Commit 1946ecf6 authored by Ethen Pociask's avatar Ethen Pociask

Merge branch 'develop' of https://github.com/epociask/optimism into indexer.client

parents 7199ad85 c93a5531
...@@ -863,10 +863,6 @@ jobs: ...@@ -863,10 +863,6 @@ jobs:
name: Lint name: Lint
command: golangci-lint run -E goimports,sqlclosecheck,bodyclose,asciicheck,misspell,errorlint --timeout 4m -e "errors.As" -e "errors.Is" ./... command: golangci-lint run -E goimports,sqlclosecheck,bodyclose,asciicheck,misspell,errorlint --timeout 4m -e "errors.As" -e "errors.Is" ./...
working_directory: indexer working_directory: indexer
- run:
name: Check generated code
command: npm run generate && git diff --exit-code
working_directory: indexer/api-ts
- run: - run:
name: install geth name: install geth
command: make install-geth command: make install-geth
...@@ -891,6 +887,18 @@ jobs: ...@@ -891,6 +887,18 @@ jobs:
name: Build name: Build
command: make indexer command: make indexer
working_directory: indexer working_directory: indexer
- run:
name: Install node_modules
command: pnpm install --frozen-lockfile --prefer-offline
working_directory: indexer/api-ts
- run:
name: Install tygo
command: go install github.com/gzuidhof/tygo@latest
working_directory: indexer/api-ts
- run:
name: Check generated code
command: npm run generate && git diff --exit-code
working_directory: indexer/api-ts
devnet: devnet:
machine: machine:
......
...@@ -13,3 +13,10 @@ ...@@ -13,3 +13,10 @@
[submodule "packages/contracts-bedrock/lib/forge-std"] [submodule "packages/contracts-bedrock/lib/forge-std"]
path = packages/contracts-bedrock/lib/forge-std path = packages/contracts-bedrock/lib/forge-std
url = https://github.com/foundry-rs/forge-std url = https://github.com/foundry-rs/forge-std
[submodule "packages/contracts-bedrock/lib/safe-contracts"]
path = packages/contracts-bedrock/lib/safe-contracts
url = https://github.com/safe-global/safe-contracts
branch = v1.4.0
...@@ -18,7 +18,7 @@ tests/ ...@@ -18,7 +18,7 @@ tests/
# Semgrep-action log folder # Semgrep-action log folder
.semgrep_logs/ .semgrep_logs/
op-bindings/bindings op-bindings/bindings/
packages/*/node_modules packages/*/node_modules
packages/*/test packages/*/test
...@@ -109,7 +109,6 @@ devnet-down: ...@@ -109,7 +109,6 @@ devnet-down:
devnet-clean: devnet-clean:
rm -rf ./packages/contracts-bedrock/deployments/devnetL1 rm -rf ./packages/contracts-bedrock/deployments/devnetL1
rm -rf ./packages/contracts-bedrock/deploy-config/devnetL1.json
rm -rf ./.devnet rm -rf ./.devnet
cd ./ops-bedrock && docker compose down cd ./ops-bedrock && docker compose down
docker image ls 'ops-bedrock*' --format='{{.Repository}}' | xargs -r docker rmi docker image ls 'ops-bedrock*' --format='{{.Repository}}' | xargs -r docker rmi
......
...@@ -208,6 +208,6 @@ require ( ...@@ -208,6 +208,6 @@ require (
rsc.io/tmplfunc v0.0.3 // indirect rsc.io/tmplfunc v0.0.3 // indirect
) )
replace github.com/ethereum/go-ethereum v1.12.0 => github.com/ethereum-optimism/op-geth v1.101200.0-rc.1.0.20230818191139-f7376a28049b replace github.com/ethereum/go-ethereum v1.12.0 => github.com/ethereum-optimism/op-geth v1.101200.2-rc.1.0.20230914224024-b84ba11915a0
//replace github.com/ethereum/go-ethereum v1.12.0 => ../go-ethereum //replace github.com/ethereum/go-ethereum v1.12.0 => ../go-ethereum
...@@ -162,8 +162,8 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7 ...@@ -162,8 +162,8 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7
github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw=
github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3 h1:RWHKLhCrQThMfch+QJ1Z8veEq5ZO3DfIhZ7xgRP9WTc= github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3 h1:RWHKLhCrQThMfch+QJ1Z8veEq5ZO3DfIhZ7xgRP9WTc=
github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3/go.mod h1:QziizLAiF0KqyLdNJYD7O5cpDlaFMNZzlxYNcWsJUxs= github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3/go.mod h1:QziizLAiF0KqyLdNJYD7O5cpDlaFMNZzlxYNcWsJUxs=
github.com/ethereum-optimism/op-geth v1.101200.0-rc.1.0.20230818191139-f7376a28049b h1:YF2FE/QnbhvrHwDYJHnbTKgJvw2aKwB/dd7PO1zKNqY= github.com/ethereum-optimism/op-geth v1.101200.2-rc.1.0.20230914224024-b84ba11915a0 h1:Qcu7OVMbKvbu7aaDC31OY0JCqFIr2N+/SGdBTnxukCs=
github.com/ethereum-optimism/op-geth v1.101200.0-rc.1.0.20230818191139-f7376a28049b/go.mod h1:gRnPb21PoKcHm3kHqj9BQlQkwmhOGUvQoGEbC7z852Q= github.com/ethereum-optimism/op-geth v1.101200.2-rc.1.0.20230914224024-b84ba11915a0/go.mod h1:gRnPb21PoKcHm3kHqj9BQlQkwmhOGUvQoGEbC7z852Q=
github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20230817174831-5d3ca1966435 h1:2CzkJkkTLuVyoVFkoW5w6vDB2Q7eJzxXw/ybA17xjqM= github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20230817174831-5d3ca1966435 h1:2CzkJkkTLuVyoVFkoW5w6vDB2Q7eJzxXw/ybA17xjqM=
github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20230817174831-5d3ca1966435/go.mod h1:v2YpePbdGBF0Gr6VWq49MFFmcTW0kRYZ2ingBJYWEwg= github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20230817174831-5d3ca1966435/go.mod h1:v2YpePbdGBF0Gr6VWq49MFFmcTW0kRYZ2ingBJYWEwg=
github.com/ethereum/c-kzg-4844 v0.2.0 h1:+cUvymlnoDDQgMInp25Bo3OmLajmmY8mLJ/tLjqd77Q= github.com/ethereum/c-kzg-4844 v0.2.0 h1:+cUvymlnoDDQgMInp25Bo3OmLajmmY8mLJ/tLjqd77Q=
......
Generated typescript types for https://github.com/ethereum-optimism/optimism/tree/develop/indexer
...@@ -8,12 +8,12 @@ export interface DepositItem { ...@@ -8,12 +8,12 @@ export interface DepositItem {
from: string; from: string;
to: string; to: string;
timestamp: number /* uint64 */; timestamp: number /* uint64 */;
L1TxHash: string; l1BlockHash: string;
L2TxHash: string; l1TxHash: string;
Block: string; l2TxHash: string;
amount: string; amount: string;
l1Token: string; l1TokenAddress: string;
l2Token: string; l2TokenAddress: string;
} }
export interface DepositResponse { export interface DepositResponse {
cursor: string; cursor: string;
...@@ -41,10 +41,10 @@ export interface WithdrawalItem { ...@@ -41,10 +41,10 @@ export interface WithdrawalItem {
timestamp: number /* uint64 */; timestamp: number /* uint64 */;
l2BlockHash: string; l2BlockHash: string;
amount: string; amount: string;
proof: string; proofTransactionHash: string;
claim: string; claimTransactionHash: string;
l1Token: string; l1TokenAddress: string;
l2Token: string; l2TokenAddress: string;
} }
export interface WithdrawalResponse { export interface WithdrawalResponse {
cursor: string; cursor: string;
......
...@@ -38,10 +38,10 @@ var createQueryString = ({ cursor, limit }) => { ...@@ -38,10 +38,10 @@ var createQueryString = ({ cursor, limit }) => {
return `?${queries.join("&")}`; return `?${queries.join("&")}`;
}; };
var depositEndpoint = ({ baseUrl = "", address, cursor, limit }) => { var depositEndpoint = ({ baseUrl = "", address, cursor, limit }) => {
return [baseUrl, "deposits", address, createQueryString({ cursor, limit })].join("/"); return [baseUrl, "deposits", `${address}${createQueryString({ cursor, limit })}`].join("/");
}; };
var withdrawalEndoint = ({ baseUrl = "", address, cursor, limit }) => { var withdrawalEndoint = ({ baseUrl = "", address, cursor, limit }) => {
return [baseUrl, "withdrawals", address, createQueryString({ cursor, limit })].join("/"); return [baseUrl, "withdrawals", `${address}${createQueryString({ cursor, limit })}`].join("/");
}; };
// Annotate the CommonJS export names for ESM import in node: // Annotate the CommonJS export names for ESM import in node:
0 && (module.exports = { 0 && (module.exports = {
......
{"version":3,"sources":["indexer.ts"],"sourcesContent":["export * from './generated'\n\ntype PaginationOptions = {\n limit?: number\n cursor?: string\n}\n\ntype Options = {\n baseUrl?: string\n address: `0x${string}`\n} & PaginationOptions\n\nconst createQueryString = ({ cursor, limit }: PaginationOptions): string => {\n if (cursor === undefined && limit === undefined) {\n return ''\n }\n const queries: string[] = []\n if (cursor) {\n queries.push(`cursor=${cursor}`)\n }\n if (limit) {\n queries.push(`limit=${limit}`)\n }\n return `?${queries.join('&')}`\n}\n\nexport const depositEndpoint = ({ baseUrl = '', address, cursor, limit }: Options): string => {\n return [baseUrl, 'deposits', address, createQueryString({ cursor, limit })].join('/')\n}\n\nexport const withdrawalEndoint = ({ baseUrl = '', address, cursor, limit }: Options): string => {\n return [baseUrl, 'withdrawals', address, createQueryString({ cursor, limit })].join('/')\n}\n\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAYA,IAAM,oBAAoB,CAAC,EAAE,QAAQ,MAAM,MAAiC;AAC1E,MAAI,WAAW,UAAa,UAAU,QAAW;AAC/C,WAAO;AAAA,EACT;AACA,QAAM,UAAoB,CAAC;AAC3B,MAAI,QAAQ;AACV,YAAQ,KAAK,UAAU,MAAM,EAAE;AAAA,EACjC;AACA,MAAI,OAAO;AACT,YAAQ,KAAK,SAAS,KAAK,EAAE;AAAA,EAC/B;AACA,SAAO,IAAI,QAAQ,KAAK,GAAG,CAAC;AAC9B;AAEO,IAAM,kBAAkB,CAAC,EAAE,UAAU,IAAI,SAAS,QAAQ,MAAM,MAAuB;AAC5F,SAAO,CAAC,SAAS,YAAY,SAAS,kBAAkB,EAAE,QAAQ,MAAM,CAAC,CAAC,EAAE,KAAK,GAAG;AACtF;AAEO,IAAM,oBAAoB,CAAC,EAAE,UAAU,IAAI,SAAS,QAAQ,MAAM,MAAuB;AAC9F,SAAO,CAAC,SAAS,eAAe,SAAS,kBAAkB,EAAE,QAAQ,MAAM,CAAC,CAAC,EAAE,KAAK,GAAG;AACzF;","names":[]} {"version":3,"sources":["indexer.ts"],"sourcesContent":["export * from './generated'\n\ntype PaginationOptions = {\n limit?: number\n cursor?: string\n}\n\ntype Options = {\n baseUrl?: string\n address: `0x${string}`\n} & PaginationOptions\n\nconst createQueryString = ({ cursor, limit }: PaginationOptions): string => {\n if (cursor === undefined && limit === undefined) {\n return ''\n }\n const queries: string[] = []\n if (cursor) {\n queries.push(`cursor=${cursor}`)\n }\n if (limit) {\n queries.push(`limit=${limit}`)\n }\n return `?${queries.join('&')}`\n}\n\nexport const depositEndpoint = ({ baseUrl = '', address, cursor, limit }: Options): string => {\n return [baseUrl, 'deposits', `${address}${createQueryString({ cursor, limit })}`].join('/')\n}\n\nexport const withdrawalEndoint = ({ baseUrl = '', address, cursor, limit }: Options): string => {\n return [baseUrl, 'withdrawals', `${address}${createQueryString({ cursor, limit })}`].join('/')\n}\n\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAYA,IAAM,oBAAoB,CAAC,EAAE,QAAQ,MAAM,MAAiC;AAC1E,MAAI,WAAW,UAAa,UAAU,QAAW;AAC/C,WAAO;AAAA,EACT;AACA,QAAM,UAAoB,CAAC;AAC3B,MAAI,QAAQ;AACV,YAAQ,KAAK,UAAU,MAAM,EAAE;AAAA,EACjC;AACA,MAAI,OAAO;AACT,YAAQ,KAAK,SAAS,KAAK,EAAE;AAAA,EAC/B;AACA,SAAO,IAAI,QAAQ,KAAK,GAAG,CAAC;AAC9B;AAEO,IAAM,kBAAkB,CAAC,EAAE,UAAU,IAAI,SAAS,QAAQ,MAAM,MAAuB;AAC5F,SAAO,CAAC,SAAS,YAAY,GAAG,OAAO,GAAG,kBAAkB,EAAE,QAAQ,MAAM,CAAC,CAAC,EAAE,EAAE,KAAK,GAAG;AAC5F;AAEO,IAAM,oBAAoB,CAAC,EAAE,UAAU,IAAI,SAAS,QAAQ,MAAM,MAAuB;AAC9F,SAAO,CAAC,SAAS,eAAe,GAAG,OAAO,GAAG,kBAAkB,EAAE,QAAQ,MAAM,CAAC,CAAC,EAAE,EAAE,KAAK,GAAG;AAC/F;","names":[]}
\ No newline at end of file \ No newline at end of file
...@@ -13,10 +13,10 @@ var createQueryString = ({ cursor, limit }) => { ...@@ -13,10 +13,10 @@ var createQueryString = ({ cursor, limit }) => {
return `?${queries.join("&")}`; return `?${queries.join("&")}`;
}; };
var depositEndpoint = ({ baseUrl = "", address, cursor, limit }) => { var depositEndpoint = ({ baseUrl = "", address, cursor, limit }) => {
return [baseUrl, "deposits", address, createQueryString({ cursor, limit })].join("/"); return [baseUrl, "deposits", `${address}${createQueryString({ cursor, limit })}`].join("/");
}; };
var withdrawalEndoint = ({ baseUrl = "", address, cursor, limit }) => { var withdrawalEndoint = ({ baseUrl = "", address, cursor, limit }) => {
return [baseUrl, "withdrawals", address, createQueryString({ cursor, limit })].join("/"); return [baseUrl, "withdrawals", `${address}${createQueryString({ cursor, limit })}`].join("/");
}; };
export { export {
depositEndpoint, depositEndpoint,
......
{"version":3,"sources":["indexer.ts"],"sourcesContent":["export * from './generated'\n\ntype PaginationOptions = {\n limit?: number\n cursor?: string\n}\n\ntype Options = {\n baseUrl?: string\n address: `0x${string}`\n} & PaginationOptions\n\nconst createQueryString = ({ cursor, limit }: PaginationOptions): string => {\n if (cursor === undefined && limit === undefined) {\n return ''\n }\n const queries: string[] = []\n if (cursor) {\n queries.push(`cursor=${cursor}`)\n }\n if (limit) {\n queries.push(`limit=${limit}`)\n }\n return `?${queries.join('&')}`\n}\n\nexport const depositEndpoint = ({ baseUrl = '', address, cursor, limit }: Options): string => {\n return [baseUrl, 'deposits', address, createQueryString({ cursor, limit })].join('/')\n}\n\nexport const withdrawalEndoint = ({ baseUrl = '', address, cursor, limit }: Options): string => {\n return [baseUrl, 'withdrawals', address, createQueryString({ cursor, limit })].join('/')\n}\n\n"],"mappings":";AAYA,IAAM,oBAAoB,CAAC,EAAE,QAAQ,MAAM,MAAiC;AAC1E,MAAI,WAAW,UAAa,UAAU,QAAW;AAC/C,WAAO;AAAA,EACT;AACA,QAAM,UAAoB,CAAC;AAC3B,MAAI,QAAQ;AACV,YAAQ,KAAK,UAAU,MAAM,EAAE;AAAA,EACjC;AACA,MAAI,OAAO;AACT,YAAQ,KAAK,SAAS,KAAK,EAAE;AAAA,EAC/B;AACA,SAAO,IAAI,QAAQ,KAAK,GAAG,CAAC;AAC9B;AAEO,IAAM,kBAAkB,CAAC,EAAE,UAAU,IAAI,SAAS,QAAQ,MAAM,MAAuB;AAC5F,SAAO,CAAC,SAAS,YAAY,SAAS,kBAAkB,EAAE,QAAQ,MAAM,CAAC,CAAC,EAAE,KAAK,GAAG;AACtF;AAEO,IAAM,oBAAoB,CAAC,EAAE,UAAU,IAAI,SAAS,QAAQ,MAAM,MAAuB;AAC9F,SAAO,CAAC,SAAS,eAAe,SAAS,kBAAkB,EAAE,QAAQ,MAAM,CAAC,CAAC,EAAE,KAAK,GAAG;AACzF;","names":[]} {"version":3,"sources":["indexer.ts"],"sourcesContent":["export * from './generated'\n\ntype PaginationOptions = {\n limit?: number\n cursor?: string\n}\n\ntype Options = {\n baseUrl?: string\n address: `0x${string}`\n} & PaginationOptions\n\nconst createQueryString = ({ cursor, limit }: PaginationOptions): string => {\n if (cursor === undefined && limit === undefined) {\n return ''\n }\n const queries: string[] = []\n if (cursor) {\n queries.push(`cursor=${cursor}`)\n }\n if (limit) {\n queries.push(`limit=${limit}`)\n }\n return `?${queries.join('&')}`\n}\n\nexport const depositEndpoint = ({ baseUrl = '', address, cursor, limit }: Options): string => {\n return [baseUrl, 'deposits', `${address}${createQueryString({ cursor, limit })}`].join('/')\n}\n\nexport const withdrawalEndoint = ({ baseUrl = '', address, cursor, limit }: Options): string => {\n return [baseUrl, 'withdrawals', `${address}${createQueryString({ cursor, limit })}`].join('/')\n}\n\n"],"mappings":";AAYA,IAAM,oBAAoB,CAAC,EAAE,QAAQ,MAAM,MAAiC;AAC1E,MAAI,WAAW,UAAa,UAAU,QAAW;AAC/C,WAAO;AAAA,EACT;AACA,QAAM,UAAoB,CAAC;AAC3B,MAAI,QAAQ;AACV,YAAQ,KAAK,UAAU,MAAM,EAAE;AAAA,EACjC;AACA,MAAI,OAAO;AACT,YAAQ,KAAK,SAAS,KAAK,EAAE;AAAA,EAC/B;AACA,SAAO,IAAI,QAAQ,KAAK,GAAG,CAAC;AAC9B;AAEO,IAAM,kBAAkB,CAAC,EAAE,UAAU,IAAI,SAAS,QAAQ,MAAM,MAAuB;AAC5F,SAAO,CAAC,SAAS,YAAY,GAAG,OAAO,GAAG,kBAAkB,EAAE,QAAQ,MAAM,CAAC,CAAC,EAAE,EAAE,KAAK,GAAG;AAC5F;AAEO,IAAM,oBAAoB,CAAC,EAAE,UAAU,IAAI,SAAS,QAAQ,MAAM,MAAuB;AAC9F,SAAO,CAAC,SAAS,eAAe,GAAG,OAAO,GAAG,kBAAkB,EAAE,QAAQ,MAAM,CAAC,CAAC,EAAE,EAAE,KAAK,GAAG;AAC/F;","names":[]}
\ No newline at end of file \ No newline at end of file
import { test, expect } from 'vitest'
import { depositEndpoint, withdrawalEndoint } from './indexer.ts'
test(depositEndpoint.name, () => {
expect(depositEndpoint({ baseUrl: 'http://localhost:8080/api/v0', address: '0x1234', cursor: '0x1235', limit: 10 })).toMatchInlineSnapshot('"http://localhost:8080/api/v0/deposits/0x1234?cursor=0x1235&limit=10"')
expect(depositEndpoint({ baseUrl: 'http://localhost:8080/api/v0', address: '0x1234' })).toMatchInlineSnapshot('"http://localhost:8080/api/v0/deposits/0x1234"')
})
test(withdrawalEndoint.name, () => {
expect(withdrawalEndoint({ baseUrl: 'http://localhost:8080/api/v0', address: '0x1234', cursor: '0x1235', limit: 10 })).toMatchInlineSnapshot('"http://localhost:8080/api/v0/withdrawals/0x1234?cursor=0x1235&limit=10"')
expect(withdrawalEndoint({ baseUrl: 'http://localhost:8080/api/v0', address: '0x1234' })).toMatchInlineSnapshot('"http://localhost:8080/api/v0/withdrawals/0x1234"')
})
...@@ -25,10 +25,10 @@ const createQueryString = ({ cursor, limit }: PaginationOptions): string => { ...@@ -25,10 +25,10 @@ const createQueryString = ({ cursor, limit }: PaginationOptions): string => {
} }
export const depositEndpoint = ({ baseUrl = '', address, cursor, limit }: Options): string => { export const depositEndpoint = ({ baseUrl = '', address, cursor, limit }: Options): string => {
return [baseUrl, 'deposits', address, createQueryString({ cursor, limit })].join('/') return [baseUrl, 'deposits', `${address}${createQueryString({ cursor, limit })}`].join('/')
} }
export const withdrawalEndoint = ({ baseUrl = '', address, cursor, limit }: Options): string => { export const withdrawalEndoint = ({ baseUrl = '', address, cursor, limit }: Options): string => {
return [baseUrl, 'withdrawals', address, createQueryString({ cursor, limit })].join('/') return [baseUrl, 'withdrawals', `${address}${createQueryString({ cursor, limit })}`].join('/')
} }
{ {
"name": "@eth-optimism/indexer-api", "name": "@eth-optimism/indexer-api",
"version": "0.0.1", "version": "0.0.3",
"description": "[Optimism] typescript types for the indexer service", "description": "[Optimism] typescript types for the indexer service",
"main": "indexer.cjs", "main": "indexer.cjs",
"module": "indexer.js", "module": "indexer.js",
...@@ -17,7 +17,8 @@ ...@@ -17,7 +17,8 @@
], ],
"scripts": { "scripts": {
"clean": "rm -rf generated.ts indexer.cjs indexer.js", "clean": "rm -rf generated.ts indexer.cjs indexer.js",
"generate": "npm run clean && tygo generate && mv ../api/routes/index.ts generated.ts && npx tsup" "generate": "npm run clean && tygo generate && mv ../api/routes/index.ts generated.ts && tsup",
"test": "vitest"
}, },
"keywords": [ "keywords": [
"optimism", "optimism",
...@@ -30,4 +31,9 @@ ...@@ -30,4 +31,9 @@
"repository": { "repository": {
"type": "git", "type": "git",
"url": "https://github.com/ethereum-optimism/optimism.git" "url": "https://github.com/ethereum-optimism/optimism.git"
}} },
"devDependencies": {
"tsup": "^7.2.0",
"vitest": "^0.34.4"
}
}
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
"strict": true, "strict": true,
"skipLibCheck": true, "skipLibCheck": true,
"module": "ESNext", "module": "ESNext",
"moduleResolution": "bundler", "moduleResolution": "NodeNext",
"jsx": "react", "jsx": "react",
"target": "ESNext", "target": "ESNext",
"noEmit": true "noEmit": true
......
...@@ -14,12 +14,12 @@ type DepositItem struct { ...@@ -14,12 +14,12 @@ type DepositItem struct {
From string `json:"from"` From string `json:"from"`
To string `json:"to"` To string `json:"to"`
Timestamp uint64 `json:"timestamp"` Timestamp uint64 `json:"timestamp"`
L1TxHash string `json:"L1TxHash"` L1BlockHash string `json:"l1BlockHash"`
L2TxHash string `json:"L2TxHash"` L1TxHash string `json:"l1TxHash"`
L1BlockHash string `json:"Block"` L2TxHash string `json:"l2TxHash"`
Amount string `json:"amount"` Amount string `json:"amount"`
L1TokenAddress string `json:"l1Token"` L1TokenAddress string `json:"l1TokenAddress"`
L2TokenAddress string `json:"l2Token"` L2TokenAddress string `json:"l2TokenAddress"`
} }
type DepositResponse struct { type DepositResponse struct {
......
...@@ -17,10 +17,10 @@ type WithdrawalItem struct { ...@@ -17,10 +17,10 @@ type WithdrawalItem struct {
Timestamp uint64 `json:"timestamp"` Timestamp uint64 `json:"timestamp"`
L2BlockHash string `json:"l2BlockHash"` L2BlockHash string `json:"l2BlockHash"`
Amount string `json:"amount"` Amount string `json:"amount"`
ProofTransactionHash string `json:"proof"` ProofTransactionHash string `json:"proofTransactionHash"`
ClaimTransactionHash string `json:"claim"` ClaimTransactionHash string `json:"claimTransactionHash"`
L1TokenAddress string `json:"l1Token"` L1TokenAddress string `json:"l1TokenAddress"`
L2TokenAddress string `json:"l2Token"` L2TokenAddress string `json:"l2TokenAddress"`
} }
type WithdrawalResponse struct { type WithdrawalResponse struct {
......
...@@ -2,10 +2,13 @@ ...@@ -2,10 +2,13 @@
package database package database
import ( import (
"context"
"fmt" "fmt"
"github.com/ethereum-optimism/optimism/indexer/config" "github.com/ethereum-optimism/optimism/indexer/config"
_ "github.com/ethereum-optimism/optimism/indexer/database/serializers" _ "github.com/ethereum-optimism/optimism/indexer/database/serializers"
"github.com/ethereum-optimism/optimism/op-service/retry"
"github.com/pkg/errors"
"gorm.io/driver/postgres" "gorm.io/driver/postgres"
"gorm.io/gorm" "gorm.io/gorm"
...@@ -31,6 +34,8 @@ type DB struct { ...@@ -31,6 +34,8 @@ type DB struct {
} }
func NewDB(dbConfig config.DBConfig) (*DB, error) { func NewDB(dbConfig config.DBConfig) (*DB, error) {
retryStrategy := &retry.ExponentialStrategy{Min: 1000, Max: 20_000, MaxJitter: 250}
dsn := fmt.Sprintf("host=%s port=%d dbname=%s sslmode=disable", dbConfig.Host, dbConfig.Port, dbConfig.Name) dsn := fmt.Sprintf("host=%s port=%d dbname=%s sslmode=disable", dbConfig.Host, dbConfig.Port, dbConfig.Name)
if dbConfig.User != "" { if dbConfig.User != "" {
dsn += fmt.Sprintf(" user=%s", dbConfig.User) dsn += fmt.Sprintf(" user=%s", dbConfig.User)
...@@ -38,17 +43,24 @@ func NewDB(dbConfig config.DBConfig) (*DB, error) { ...@@ -38,17 +43,24 @@ func NewDB(dbConfig config.DBConfig) (*DB, error) {
if dbConfig.Password != "" { if dbConfig.Password != "" {
dsn += fmt.Sprintf(" password=%s", dbConfig.Password) dsn += fmt.Sprintf(" password=%s", dbConfig.Password)
} }
gorm, err := gorm.Open(postgres.Open(dsn), &gorm.Config{
gormConfig := gorm.Config{
// The indexer will explicitly manage the transactions // The indexer will explicitly manage the transactions
SkipDefaultTransaction: true, SkipDefaultTransaction: true,
// We may choose to create an adapter such that the
// logger emits to the geth logger when on DEBUG mode
Logger: logger.Default.LogMode(logger.Silent), Logger: logger.Default.LogMode(logger.Silent),
}
gorm, err := retry.Do[*gorm.DB](context.Background(), 10, retryStrategy, func() (*gorm.DB, error) {
gorm, err := gorm.Open(postgres.Open(dsn), &gormConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to connect to database")
}
return gorm, nil
}) })
if err != nil { if err != nil {
return nil, err return nil, errors.Wrap(err, "failed to connect to database after multiple retries")
} }
db := &DB{ db := &DB{
......
...@@ -35,5 +35,9 @@ ...@@ -35,5 +35,9 @@
"PreimageOracle", "PreimageOracle",
"BlockOracle", "BlockOracle",
"EAS", "EAS",
"SchemaRegistry" "SchemaRegistry",
"ProtocolVersions",
"Safe",
"SafeProxyFactory",
"DelayedVetoable"
] ]
This diff is collapsed.
// Code generated - DO NOT EDIT.
// This file is a generated binding and any manual changes will be lost.
package bindings
import (
"encoding/json"
"github.com/ethereum-optimism/optimism/op-bindings/solc"
)
const DelayedVetoableStorageLayoutJSON = "{\"storage\":[{\"astId\":1000,\"contract\":\"src/L1/DelayedVetoable.sol:DelayedVetoable\",\"label\":\"_delay\",\"offset\":0,\"slot\":\"0\",\"type\":\"t_uint256\"},{\"astId\":1001,\"contract\":\"src/L1/DelayedVetoable.sol:DelayedVetoable\",\"label\":\"_queuedAt\",\"offset\":0,\"slot\":\"1\",\"type\":\"t_mapping(t_bytes32,t_uint256)\"}],\"types\":{\"t_bytes32\":{\"encoding\":\"inplace\",\"label\":\"bytes32\",\"numberOfBytes\":\"32\"},\"t_mapping(t_bytes32,t_uint256)\":{\"encoding\":\"mapping\",\"label\":\"mapping(bytes32 =\u003e uint256)\",\"numberOfBytes\":\"32\",\"key\":\"t_bytes32\",\"value\":\"t_uint256\"},\"t_uint256\":{\"encoding\":\"inplace\",\"label\":\"uint256\",\"numberOfBytes\":\"32\"}}}"
var DelayedVetoableStorageLayout = new(solc.StorageLayout)
var DelayedVetoableDeployedBin = "0x608060405234801561001057600080fd5b50600436106100725760003560e01c8063b912de5d11610050578063b912de5d14610111578063d4b8399214610124578063d8bff4401461012c57610072565b806354fd4d501461007c5780635c39fcc1146100ce5780636a42b8f8146100fb575b61007a610134565b005b6100b86040518060400160405280600581526020017f312e302e3000000000000000000000000000000000000000000000000000000081525081565b6040516100c591906106a7565b60405180910390f35b6100d66104fb565b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020016100c5565b610103610532565b6040519081526020016100c5565b61010361011f36600461071a565b610540565b6100d6610567565b6100d6610593565b361580156101425750600054155b15610298573373ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000000000000000000000000000000000000000000016148015906101c357503373ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001614155b1561023d576040517f295a81c100000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001660048201523360248201526044015b60405180910390fd5b7f000000000000000000000000000000000000000000000000000000000000000060008190556040519081527febf28bfb587e28dfffd9173cf71c32ba5d3f0544a0117b5539c9b274a5bba2a89060200160405180910390a1565b600080366040516102aa929190610733565b60405190819003902090503373ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000161480156103065750600081815260016020526040902054155b1561036c5760005460000361031e5761031e816105bf565b6000818152600160205260408082204290555182917f87a332a414acbc7da074543639ce7ae02ff1ea72e88379da9f261b080beb5a139161036191903690610743565b60405180910390a250565b3373ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000161480156103be575060008181526001602052604090205415155b15610406576000818152600160205260408082208290555182917fbede6852c1d97d93ff557f676de76670cd0dec861e7fe8beb13aa0ba2b0ab0409161036191903690610743565b600081815260016020526040812054900361048b576040517f295a81c100000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000166004820152336024820152604401610234565b60008054828252600160205260409091205442916104a891610790565b10156104e0576040517f43dc986d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000818152600160205260408120556104f8816105bf565b50565b60003361052757507f000000000000000000000000000000000000000000000000000000000000000090565b61052f610134565b90565b600033610527575060005490565b60003361055a575060009081526001602052604090205490565b610562610134565b919050565b60003361052757507f000000000000000000000000000000000000000000000000000000000000000090565b60003361052757507f000000000000000000000000000000000000000000000000000000000000000090565b807f4c109d85bcd0bb5c735b4be850953d652afe4cd9aa2e0b1426a65a4dcb2e12296000366040516105f2929190610743565b60405180910390a26000807f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff16600036604051610645929190610733565b6000604051808303816000865af19150503d8060008114610682576040519150601f19603f3d011682016040523d82523d6000602084013e610687565b606091505b50909250905081151560010361069f57805160208201f35b805160208201fd5b600060208083528351808285015260005b818110156106d4578581018301518582016040015282016106b8565b818111156106e6576000604083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016929092016040019392505050565b60006020828403121561072c57600080fd5b5035919050565b8183823760009101908152919050565b60208152816020820152818360408301376000818301604090810191909152601f9092017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0160101919050565b600082198211156107ca577f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b50019056fea164736f6c634300080f000a"
func init() {
if err := json.Unmarshal([]byte(DelayedVetoableStorageLayoutJSON), DelayedVetoableStorageLayout); err != nil {
panic(err)
}
layouts["DelayedVetoable"] = DelayedVetoableStorageLayout
deployedBytecodes["DelayedVetoable"] = DelayedVetoableDeployedBin
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
// Code generated - DO NOT EDIT.
// This file is a generated binding and any manual changes will be lost.
package bindings
import (
"encoding/json"
"github.com/ethereum-optimism/optimism/op-bindings/solc"
)
const ProtocolVersionsStorageLayoutJSON = "{\"storage\":[{\"astId\":1000,\"contract\":\"src/L1/ProtocolVersions.sol:ProtocolVersions\",\"label\":\"_initialized\",\"offset\":0,\"slot\":\"0\",\"type\":\"t_uint8\"},{\"astId\":1001,\"contract\":\"src/L1/ProtocolVersions.sol:ProtocolVersions\",\"label\":\"_initializing\",\"offset\":1,\"slot\":\"0\",\"type\":\"t_bool\"},{\"astId\":1002,\"contract\":\"src/L1/ProtocolVersions.sol:ProtocolVersions\",\"label\":\"__gap\",\"offset\":0,\"slot\":\"1\",\"type\":\"t_array(t_uint256)50_storage\"},{\"astId\":1003,\"contract\":\"src/L1/ProtocolVersions.sol:ProtocolVersions\",\"label\":\"_owner\",\"offset\":0,\"slot\":\"51\",\"type\":\"t_address\"},{\"astId\":1004,\"contract\":\"src/L1/ProtocolVersions.sol:ProtocolVersions\",\"label\":\"__gap\",\"offset\":0,\"slot\":\"52\",\"type\":\"t_array(t_uint256)49_storage\"}],\"types\":{\"t_address\":{\"encoding\":\"inplace\",\"label\":\"address\",\"numberOfBytes\":\"20\"},\"t_array(t_uint256)49_storage\":{\"encoding\":\"inplace\",\"label\":\"uint256[49]\",\"numberOfBytes\":\"1568\",\"base\":\"t_uint256\"},\"t_array(t_uint256)50_storage\":{\"encoding\":\"inplace\",\"label\":\"uint256[50]\",\"numberOfBytes\":\"1600\",\"base\":\"t_uint256\"},\"t_bool\":{\"encoding\":\"inplace\",\"label\":\"bool\",\"numberOfBytes\":\"1\"},\"t_uint256\":{\"encoding\":\"inplace\",\"label\":\"uint256\",\"numberOfBytes\":\"32\"},\"t_uint8\":{\"encoding\":\"inplace\",\"label\":\"uint8\",\"numberOfBytes\":\"1\"}}}"
var ProtocolVersionsStorageLayout = new(solc.StorageLayout)
var ProtocolVersionsDeployedBin = "0x608060405234801561001057600080fd5b50600436106100d45760003560e01c80638da5cb5b11610081578063f2fde38b1161005b578063f2fde38b146101b8578063f7d12760146101cb578063ffa1ad74146101d357600080fd5b80638da5cb5b14610180578063d798b1ac146101a8578063dc8452cd146101b057600080fd5b80635fd579af116100b25780635fd579af14610152578063715018a6146101655780637a1ac61e1461016d57600080fd5b80630457d6f2146100d9578063206a8300146100ee57806354fd4d5014610109575b600080fd5b6100ec6100e7366004610859565b6101db565b005b6100f66101ef565b6040519081526020015b60405180910390f35b6101456040518060400160405280600581526020017f302e312e3000000000000000000000000000000000000000000000000000000081525081565b60405161010091906108dd565b6100ec610160366004610859565b61021d565b6100ec61022e565b6100ec61017b366004610920565b610242565b60335460405173ffffffffffffffffffffffffffffffffffffffff9091168152602001610100565b6100f66103ad565b6100f66103e6565b6100ec6101c6366004610953565b610416565b6100f66104ca565b6100f6600081565b6101e36104f5565b6101ec81610576565b50565b61021a60017f4aaefe95bd84fd3f32700cf3b7566bc944b73138e41958b5785826df2aecace161096e565b81565b6102256104f5565b6101ec8161062d565b6102366104f5565b61024060006106a8565b565b600054600290610100900460ff16158015610264575060005460ff8083169116105b6102f5576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602e60248201527f496e697469616c697a61626c653a20636f6e747261637420697320616c72656160448201527f647920696e697469616c697a656400000000000000000000000000000000000060648201526084015b60405180910390fd5b600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00001660ff83161761010017905561032e61071f565b61033784610416565b61034083610576565b6103498261062d565b600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff16905560405160ff821681527f7f26b83ff96e1f2b6a682f133852f6798a09c465da95921460cefb38474024989060200160405180910390a150505050565b60006103e16103dd60017fe314dfc40f0025322aacc0ba8ef420b62fb3b702cf01e0cdf3d829117ac2ff1b61096e565b5490565b905090565b60006103e16103dd60017f4aaefe95bd84fd3f32700cf3b7566bc944b73138e41958b5785826df2aecace161096e565b61041e6104f5565b73ffffffffffffffffffffffffffffffffffffffff81166104c1576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602660248201527f4f776e61626c653a206e6577206f776e657220697320746865207a65726f206160448201527f646472657373000000000000000000000000000000000000000000000000000060648201526084016102ec565b6101ec816106a8565b61021a60017fe314dfc40f0025322aacc0ba8ef420b62fb3b702cf01e0cdf3d829117ac2ff1b61096e565b60335473ffffffffffffffffffffffffffffffffffffffff163314610240576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820181905260248201527f4f776e61626c653a2063616c6c6572206973206e6f7420746865206f776e657260448201526064016102ec565b6105a8816105a560017f4aaefe95bd84fd3f32700cf3b7566bc944b73138e41958b5785826df2aecace161096e565b55565b6000816040516020016105bd91815260200190565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0818403018152919052905060005b60007f1d2b0bda21d56b8bd12d4f94ebacffdfb35f5e226f84b461103bb8beab6353be8360405161062191906108dd565b60405180910390a35050565b61065c816105a560017fe314dfc40f0025322aacc0ba8ef420b62fb3b702cf01e0cdf3d829117ac2ff1b61096e565b60008160405160200161067191815260200190565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0818403018152919052905060016105f0565b6033805473ffffffffffffffffffffffffffffffffffffffff8381167fffffffffffffffffffffffff0000000000000000000000000000000000000000831681179093556040519116919082907f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e090600090a35050565b600054610100900460ff166107b6576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602b60248201527f496e697469616c697a61626c653a20636f6e7472616374206973206e6f74206960448201527f6e697469616c697a696e6700000000000000000000000000000000000000000060648201526084016102ec565b610240600054610100900460ff16610850576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602b60248201527f496e697469616c697a61626c653a20636f6e7472616374206973206e6f74206960448201527f6e697469616c697a696e6700000000000000000000000000000000000000000060648201526084016102ec565b610240336106a8565b60006020828403121561086b57600080fd5b5035919050565b6000815180845260005b818110156108985760208185018101518683018201520161087c565b818111156108aa576000602083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b6020815260006108f06020830184610872565b9392505050565b803573ffffffffffffffffffffffffffffffffffffffff8116811461091b57600080fd5b919050565b60008060006060848603121561093557600080fd5b61093e846108f7565b95602085013595506040909401359392505050565b60006020828403121561096557600080fd5b6108f0826108f7565b6000828210156109a7577f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b50039056fea164736f6c634300080f000a"
func init() {
if err := json.Unmarshal([]byte(ProtocolVersionsStorageLayoutJSON), ProtocolVersionsStorageLayout); err != nil {
panic(err)
}
layouts["ProtocolVersions"] = ProtocolVersionsStorageLayout
deployedBytecodes["ProtocolVersions"] = ProtocolVersionsDeployedBin
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
// Code generated - DO NOT EDIT.
// This file is a generated binding and any manual changes will be lost.
package bindings
import (
"encoding/json"
"github.com/ethereum-optimism/optimism/op-bindings/solc"
)
const SafeProxyFactoryStorageLayoutJSON = "{\"storage\":null,\"types\":{}}"
var SafeProxyFactoryStorageLayout = new(solc.StorageLayout)
var SafeProxyFactoryDeployedBin = "0x608060405234801561001057600080fd5b50600436106100675760003560e01c806353e5d9351161005057806353e5d935146100b7578063d18af54d146100cc578063ec9e80bb146100df57600080fd5b80631688f0b91461006c5780633408e470146100a9575b600080fd5b61007f61007a3660046105d2565b6100f2565b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020015b60405180910390f35b6040514681526020016100a0565b6100bf610194565b6040516100a091906106a5565b61007f6100da3660046106bf565b6101dc565b61007f6100ed3660046105d2565b6102f8565b600080838051906020012083604051602001610118929190918252602082015260400190565b60405160208183030381529060405280519060200120905061013b85858361032a565b60405173ffffffffffffffffffffffffffffffffffffffff8781168252919350908316907f4f51faf6c4561ff95f067657e43439f0f856d97c04d9ec9070a6199ad418e2359060200160405180910390a2509392505050565b6060604051806020016101a6906104c6565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe082820381018352601f90910116604052919050565b600080838360405160200161022092919091825260601b7fffffffffffffffffffffffffffffffffffffffff00000000000000000000000016602082015260340190565b6040516020818303038152906040528051906020012060001c90506102468686836100f2565b915073ffffffffffffffffffffffffffffffffffffffff8316156102ef576040517f1e52b51800000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff841690631e52b518906102bc9085908a908a908a9060040161072b565b600060405180830381600087803b1580156102d657600080fd5b505af11580156102ea573d6000803e3d6000fd5b505050505b50949350505050565b60008083805190602001208361030b4690565b6040805160208101949094528301919091526060820152608001610118565b6000833b610399576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601f60248201527f53696e676c65746f6e20636f6e7472616374206e6f74206465706c6f7965640060448201526064015b60405180910390fd5b6000604051806020016103ab906104c6565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe082820381018352601f909101166040819052610403919073ffffffffffffffffffffffffffffffffffffffff881690602001610775565b6040516020818303038152906040529050828151826020016000f5915073ffffffffffffffffffffffffffffffffffffffff821661049d576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601360248201527f437265617465322063616c6c206661696c6564000000000000000000000000006044820152606401610390565b8351156104be5760008060008651602088016000875af1036104be57600080fd5b509392505050565b61016f8061079883390190565b73ffffffffffffffffffffffffffffffffffffffff811681146104f557600080fd5b50565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b600082601f83011261053857600080fd5b813567ffffffffffffffff80821115610553576105536104f8565b604051601f83017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0908116603f01168101908282118183101715610599576105996104f8565b816040528381528660208588010111156105b257600080fd5b836020870160208301376000602085830101528094505050505092915050565b6000806000606084860312156105e757600080fd5b83356105f2816104d3565b9250602084013567ffffffffffffffff81111561060e57600080fd5b61061a86828701610527565b925050604084013590509250925092565b60005b8381101561064657818101518382015260200161062e565b83811115610655576000848401525b50505050565b6000815180845261067381602086016020860161062b565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b6020815260006106b8602083018461065b565b9392505050565b600080600080608085870312156106d557600080fd5b84356106e0816104d3565b9350602085013567ffffffffffffffff8111156106fc57600080fd5b61070887828801610527565b935050604085013591506060850135610720816104d3565b939692955090935050565b600073ffffffffffffffffffffffffffffffffffffffff808716835280861660208401525060806040830152610764608083018561065b565b905082606083015295945050505050565b6000835161078781846020880161062b565b919091019182525060200191905056fe608060405234801561001057600080fd5b5060405161016f38038061016f83398101604081905261002f916100b9565b6001600160a01b0381166100945760405162461bcd60e51b815260206004820152602260248201527f496e76616c69642073696e676c65746f6e20616464726573732070726f766964604482015261195960f21b606482015260840160405180910390fd5b600080546001600160a01b0319166001600160a01b03929092169190911790556100e9565b6000602082840312156100cb57600080fd5b81516001600160a01b03811681146100e257600080fd5b9392505050565b6078806100f76000396000f3fe6080604052600073ffffffffffffffffffffffffffffffffffffffff8154167fa619486e00000000000000000000000000000000000000000000000000000000823503604d57808252602082f35b3682833781823684845af490503d82833e806066573d82fd5b503d81f3fea164736f6c634300080f000aa164736f6c634300080f000a"
func init() {
if err := json.Unmarshal([]byte(SafeProxyFactoryStorageLayoutJSON), SafeProxyFactoryStorageLayout); err != nil {
panic(err)
}
layouts["SafeProxyFactory"] = SafeProxyFactoryStorageLayout
deployedBytecodes["SafeProxyFactory"] = SafeProxyFactoryDeployedBin
}
This diff is collapsed.
...@@ -13,7 +13,7 @@ const WETH9StorageLayoutJSON = "{\"storage\":[{\"astId\":1000,\"contract\":\"src ...@@ -13,7 +13,7 @@ const WETH9StorageLayoutJSON = "{\"storage\":[{\"astId\":1000,\"contract\":\"src
var WETH9StorageLayout = new(solc.StorageLayout) var WETH9StorageLayout = new(solc.StorageLayout)
var WETH9DeployedBin = "0x6080604052600436106100bc5760003560e01c8063313ce56711610074578063a9059cbb1161004e578063a9059cbb146102cb578063d0e30db0146100bc578063dd62ed3e14610311576100bc565b8063313ce5671461024b57806370a082311461027657806395d89b41146102b6576100bc565b806318160ddd116100a557806318160ddd146101aa57806323b872dd146101d15780632e1a7d4d14610221576100bc565b806306fdde03146100c6578063095ea7b314610150575b6100c4610359565b005b3480156100d257600080fd5b506100db6103a8565b6040805160208082528351818301528351919283929083019185019080838360005b838110156101155781810151838201526020016100fd565b50505050905090810190601f1680156101425780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561015c57600080fd5b506101966004803603604081101561017357600080fd5b5073ffffffffffffffffffffffffffffffffffffffff8135169060200135610454565b604080519115158252519081900360200190f35b3480156101b657600080fd5b506101bf6104c7565b60408051918252519081900360200190f35b3480156101dd57600080fd5b50610196600480360360608110156101f457600080fd5b5073ffffffffffffffffffffffffffffffffffffffff8135811691602081013590911690604001356104cb565b34801561022d57600080fd5b506100c46004803603602081101561024457600080fd5b503561066b565b34801561025757600080fd5b50610260610700565b6040805160ff9092168252519081900360200190f35b34801561028257600080fd5b506101bf6004803603602081101561029957600080fd5b503573ffffffffffffffffffffffffffffffffffffffff16610709565b3480156102c257600080fd5b506100db61071b565b3480156102d757600080fd5b50610196600480360360408110156102ee57600080fd5b5073ffffffffffffffffffffffffffffffffffffffff8135169060200135610793565b34801561031d57600080fd5b506101bf6004803603604081101561033457600080fd5b5073ffffffffffffffffffffffffffffffffffffffff813581169160200135166107a7565b33600081815260036020908152604091829020805434908101909155825190815291517fe1fffcc4923d04b559f4d29a8bfc6cda04eb5b0d3c460751c2402c5c5cc9109c9281900390910190a2565b6000805460408051602060026001851615610100027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0190941693909304601f8101849004840282018401909252818152929183018282801561044c5780601f106104215761010080835404028352916020019161044c565b820191906000526020600020905b81548152906001019060200180831161042f57829003601f168201915b505050505081565b33600081815260046020908152604080832073ffffffffffffffffffffffffffffffffffffffff8716808552908352818420869055815186815291519394909390927f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925928290030190a350600192915050565b4790565b73ffffffffffffffffffffffffffffffffffffffff83166000908152600360205260408120548211156104fd57600080fd5b73ffffffffffffffffffffffffffffffffffffffff84163314801590610573575073ffffffffffffffffffffffffffffffffffffffff841660009081526004602090815260408083203384529091529020547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff14155b156105ed5773ffffffffffffffffffffffffffffffffffffffff841660009081526004602090815260408083203384529091529020548211156105b557600080fd5b73ffffffffffffffffffffffffffffffffffffffff841660009081526004602090815260408083203384529091529020805483900390555b73ffffffffffffffffffffffffffffffffffffffff808516600081815260036020908152604080832080548890039055938716808352918490208054870190558351868152935191937fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef929081900390910190a35060019392505050565b3360009081526003602052604090205481111561068757600080fd5b33600081815260036020526040808220805485900390555183156108fc0291849190818181858888f193505050501580156106c6573d6000803e3d6000fd5b5060408051828152905133917f7fcf532c15f0a6db0bd6d0e038bea71d30d808c7d98cb3bf7268a95bf5081b65919081900360200190a250565b60025460ff1681565b60036020526000908152604090205481565b60018054604080516020600284861615610100027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0190941693909304601f8101849004840282018401909252818152929183018282801561044c5780601f106104215761010080835404028352916020019161044c565b60006107a03384846104cb565b9392505050565b60046020908152600092835260408084209091529082529020548156fea265627a7a7231582067ace7518c12acba9306ac90d5b712c789ea3d50860ef4996a20921a3c1e61f764736f6c63430005110032" var WETH9DeployedBin = "0x6080604052600436106100bc5760003560e01c8063313ce56711610074578063a9059cbb1161004e578063a9059cbb146102cb578063d0e30db0146100bc578063dd62ed3e14610311576100bc565b8063313ce5671461024b57806370a082311461027657806395d89b41146102b6576100bc565b806318160ddd116100a557806318160ddd146101aa57806323b872dd146101d15780632e1a7d4d14610221576100bc565b806306fdde03146100c6578063095ea7b314610150575b6100c4610359565b005b3480156100d257600080fd5b506100db6103a8565b6040805160208082528351818301528351919283929083019185019080838360005b838110156101155781810151838201526020016100fd565b50505050905090810190601f1680156101425780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561015c57600080fd5b506101966004803603604081101561017357600080fd5b5073ffffffffffffffffffffffffffffffffffffffff8135169060200135610454565b604080519115158252519081900360200190f35b3480156101b657600080fd5b506101bf6104c7565b60408051918252519081900360200190f35b3480156101dd57600080fd5b50610196600480360360608110156101f457600080fd5b5073ffffffffffffffffffffffffffffffffffffffff8135811691602081013590911690604001356104cb565b34801561022d57600080fd5b506100c46004803603602081101561024457600080fd5b503561066b565b34801561025757600080fd5b50610260610700565b6040805160ff9092168252519081900360200190f35b34801561028257600080fd5b506101bf6004803603602081101561029957600080fd5b503573ffffffffffffffffffffffffffffffffffffffff16610709565b3480156102c257600080fd5b506100db61071b565b3480156102d757600080fd5b50610196600480360360408110156102ee57600080fd5b5073ffffffffffffffffffffffffffffffffffffffff8135169060200135610793565b34801561031d57600080fd5b506101bf6004803603604081101561033457600080fd5b5073ffffffffffffffffffffffffffffffffffffffff813581169160200135166107a7565b33600081815260036020908152604091829020805434908101909155825190815291517fe1fffcc4923d04b559f4d29a8bfc6cda04eb5b0d3c460751c2402c5c5cc9109c9281900390910190a2565b6000805460408051602060026001851615610100027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0190941693909304601f8101849004840282018401909252818152929183018282801561044c5780601f106104215761010080835404028352916020019161044c565b820191906000526020600020905b81548152906001019060200180831161042f57829003601f168201915b505050505081565b33600081815260046020908152604080832073ffffffffffffffffffffffffffffffffffffffff8716808552908352818420869055815186815291519394909390927f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925928290030190a350600192915050565b4790565b73ffffffffffffffffffffffffffffffffffffffff83166000908152600360205260408120548211156104fd57600080fd5b73ffffffffffffffffffffffffffffffffffffffff84163314801590610573575073ffffffffffffffffffffffffffffffffffffffff841660009081526004602090815260408083203384529091529020547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff14155b156105ed5773ffffffffffffffffffffffffffffffffffffffff841660009081526004602090815260408083203384529091529020548211156105b557600080fd5b73ffffffffffffffffffffffffffffffffffffffff841660009081526004602090815260408083203384529091529020805483900390555b73ffffffffffffffffffffffffffffffffffffffff808516600081815260036020908152604080832080548890039055938716808352918490208054870190558351868152935191937fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef929081900390910190a35060019392505050565b3360009081526003602052604090205481111561068757600080fd5b33600081815260036020526040808220805485900390555183156108fc0291849190818181858888f193505050501580156106c6573d6000803e3d6000fd5b5060408051828152905133917f7fcf532c15f0a6db0bd6d0e038bea71d30d808c7d98cb3bf7268a95bf5081b65919081900360200190a250565b60025460ff1681565b60036020526000908152604090205481565b60018054604080516020600284861615610100027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0190941693909304601f8101849004840282018401909252818152929183018282801561044c5780601f106104215761010080835404028352916020019161044c565b60006107a03384846104cb565b9392505050565b60046020908152600092835260408084209091529082529020548156fea265627a7a723158203b5b5adf37f507cc9ca5ac0b72acf7873b794171cc0eeb888a5f850e69fe540d64736f6c63430005110032"
func init() { func init() {
if err := json.Unmarshal([]byte(WETH9StorageLayoutJSON), WETH9StorageLayout); err != nil { if err := json.Unmarshal([]byte(WETH9StorageLayoutJSON), WETH9StorageLayout); err != nil {
......
...@@ -18,6 +18,7 @@ import ( ...@@ -18,6 +18,7 @@ import (
"github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/eth"
oplog "github.com/ethereum-optimism/optimism/op-service/log" oplog "github.com/ethereum-optimism/optimism/op-service/log"
opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics"
"github.com/ethereum-optimism/optimism/op-service/opio" "github.com/ethereum-optimism/optimism/op-service/opio"
) )
...@@ -69,6 +70,17 @@ func Main(cliCtx *cli.Context) error { ...@@ -69,6 +70,17 @@ func Main(cliCtx *cli.Context) error {
go p2pNode.DiscoveryProcess(ctx, logger, config, p2pConfig.TargetPeers()) go p2pNode.DiscoveryProcess(ctx, logger, config, p2pConfig.TargetPeers())
metricsCfg := opmetrics.ReadCLIConfig(cliCtx)
if metricsCfg.Enabled {
log.Info("starting metrics server", "addr", metricsCfg.ListenAddr, "port", metricsCfg.ListenPort)
go func() {
if err := m.Serve(ctx, metricsCfg.ListenAddr, metricsCfg.ListenPort); err != nil {
log.Error("error starting metrics server", err)
}
}()
m.RecordUp()
}
opio.BlockOnInterrupts() opio.BlockOnInterrupts()
return nil return nil
......
...@@ -8,6 +8,7 @@ import ( ...@@ -8,6 +8,7 @@ import (
"github.com/ethereum-optimism/optimism/op-node/flags" "github.com/ethereum-optimism/optimism/op-node/flags"
opservice "github.com/ethereum-optimism/optimism/op-service" opservice "github.com/ethereum-optimism/optimism/op-service"
oplog "github.com/ethereum-optimism/optimism/op-service/log" oplog "github.com/ethereum-optimism/optimism/op-service/log"
opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
) )
...@@ -36,5 +37,7 @@ var Flags = []cli.Flag{ ...@@ -36,5 +37,7 @@ var Flags = []cli.Flag{
} }
func init() { func init() {
Flags = append(Flags, flags.P2pFlags...)
Flags = append(Flags, opmetrics.CLIFlags(envVarPrefix)...)
Flags = append(Flags, oplog.CLIFlags(envVarPrefix)...) Flags = append(Flags, oplog.CLIFlags(envVarPrefix)...)
} }
...@@ -8,7 +8,6 @@ import ( ...@@ -8,7 +8,6 @@ import (
"math/big" "math/big"
"os" "os"
"path/filepath" "path/filepath"
"reflect" "reflect"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
...@@ -16,6 +15,7 @@ import ( ...@@ -16,6 +15,7 @@ import (
gstate "github.com/ethereum/go-ethereum/core/state" gstate "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum-optimism/optimism/op-bindings/hardhat" "github.com/ethereum-optimism/optimism/op-bindings/hardhat"
...@@ -198,6 +198,12 @@ type DeployConfig struct { ...@@ -198,6 +198,12 @@ type DeployConfig struct {
// FundDevAccounts configures whether or not to fund the dev accounts. Should only be used // FundDevAccounts configures whether or not to fund the dev accounts. Should only be used
// during devnet deployments. // during devnet deployments.
FundDevAccounts bool `json:"fundDevAccounts"` FundDevAccounts bool `json:"fundDevAccounts"`
// RequiredProtocolVersion indicates the protocol version that
// nodes are required to adopt, to stay in sync with the network.
RequiredProtocolVersion params.ProtocolVersion `json:"requiredProtocolVersion"`
// RequiredProtocolVersion indicates the protocol version that
// nodes are recommended to adopt, to stay in sync with the network.
RecommendedProtocolVersion params.ProtocolVersion `json:"recommendedProtocolVersion"`
} }
// Copy will deeply copy the DeployConfig. This does a JSON roundtrip to copy // Copy will deeply copy the DeployConfig. This does a JSON roundtrip to copy
...@@ -522,6 +528,8 @@ type L1Deployments struct { ...@@ -522,6 +528,8 @@ type L1Deployments struct {
ProxyAdmin common.Address `json:"ProxyAdmin"` ProxyAdmin common.Address `json:"ProxyAdmin"`
SystemConfig common.Address `json:"SystemConfig"` SystemConfig common.Address `json:"SystemConfig"`
SystemConfigProxy common.Address `json:"SystemConfigProxy"` SystemConfigProxy common.Address `json:"SystemConfigProxy"`
ProtocolVersions common.Address `json:"ProtocolVersions"`
ProtocolVersionsProxy common.Address `json:"ProtocolVersionsProxy"`
} }
// GetName will return the name of the contract given an address. // GetName will return the name of the contract given an address.
......
...@@ -89,6 +89,8 @@ func TestL1Deployments(t *testing.T) { ...@@ -89,6 +89,8 @@ func TestL1Deployments(t *testing.T) {
require.NotEqual(t, deployments.ProxyAdmin, common.Address{}) require.NotEqual(t, deployments.ProxyAdmin, common.Address{})
require.NotEqual(t, deployments.SystemConfig, common.Address{}) require.NotEqual(t, deployments.SystemConfig, common.Address{})
require.NotEqual(t, deployments.SystemConfigProxy, common.Address{}) require.NotEqual(t, deployments.SystemConfigProxy, common.Address{})
require.NotEqual(t, deployments.ProtocolVersions, common.Address{})
require.NotEqual(t, deployments.ProtocolVersionsProxy, common.Address{})
require.Equal(t, "AddressManager", deployments.GetName(deployments.AddressManager)) require.Equal(t, "AddressManager", deployments.GetName(deployments.AddressManager))
require.Equal(t, "OptimismPortalProxy", deployments.GetName(deployments.OptimismPortalProxy)) require.Equal(t, "OptimismPortalProxy", deployments.GetName(deployments.OptimismPortalProxy))
......
...@@ -16,5 +16,7 @@ ...@@ -16,5 +16,7 @@
"OptimismPortalProxy": "0xaC425EECd4Fd8E9E669a62906D99aF89B9951516", "OptimismPortalProxy": "0xaC425EECd4Fd8E9E669a62906D99aF89B9951516",
"ProxyAdmin": "0x3218c3b0dC0386BAe83A58E5F908c4b070210b4F", "ProxyAdmin": "0x3218c3b0dC0386BAe83A58E5F908c4b070210b4F",
"SystemConfig": "0x36bAcDD96F28e1ac0780bB9CbE6e20780840730F", "SystemConfig": "0x36bAcDD96F28e1ac0780bB9CbE6e20780840730F",
"SystemConfigProxy": "0x14065A373936533A0c88b7986CADabDD62d471e6" "SystemConfigProxy": "0x14065A373936533A0c88b7986CADabDD62d471e6",
"ProtocolVersions": "0x883C06a27D76B1CEbdf7EB376f5556c355afC8e5",
"ProtocolVersionsProxy": "0x3732a4D4Ab006cA4825822baEA1569A107683fa1"
} }
...@@ -67,5 +67,7 @@ ...@@ -67,5 +67,7 @@
"faultGameAbsolutePrestate": "0x0000000000000000000000000000000000000000000000000000000000000000", "faultGameAbsolutePrestate": "0x0000000000000000000000000000000000000000000000000000000000000000",
"faultGameMaxDepth": 63, "faultGameMaxDepth": 63,
"faultGameMaxDuration": 604800, "faultGameMaxDuration": 604800,
"systemConfigStartBlock": 0 "systemConfigStartBlock": 0,
"requiredProtocolVersion": "0x0000000000000000000000000000000000000000000000000000000000000000",
"recommendedProtocolVersion": "0x0000000000000000000000000000000000000000000000000000000000000000"
} }
...@@ -24,6 +24,7 @@ var ( ...@@ -24,6 +24,7 @@ var (
cannonPreState = "./pre.json" cannonPreState = "./pre.json"
datadir = "./test_data" datadir = "./test_data"
cannonL2 = "http://example.com:9545" cannonL2 = "http://example.com:9545"
rollupRpc = "http://example.com:8555"
alphabetTrace = "abcdefghijz" alphabetTrace = "abcdefghijz"
agreeWithProposedOutput = "true" agreeWithProposedOutput = "true"
) )
...@@ -169,6 +170,26 @@ func TestMaxConcurrency(t *testing.T) { ...@@ -169,6 +170,26 @@ func TestMaxConcurrency(t *testing.T) {
}) })
} }
func TestPollInterval(t *testing.T) {
t.Run("UsesDefault", func(t *testing.T) {
cfg := configForArgs(t, addRequiredArgs(config.TraceTypeCannon))
require.Equal(t, config.DefaultPollInterval, cfg.PollInterval)
})
t.Run("Valid", func(t *testing.T) {
expected := 100 * time.Second
cfg := configForArgs(t, addRequiredArgs(config.TraceTypeAlphabet, "--http-poll-interval", "100s"))
require.Equal(t, expected, cfg.PollInterval)
})
t.Run("Invalid", func(t *testing.T) {
verifyArgsInvalid(
t,
"invalid value \"abc\" for flag -http-poll-interval",
addRequiredArgs(config.TraceTypeAlphabet, "--http-poll-interval", "abc"))
})
}
func TestCannonBin(t *testing.T) { func TestCannonBin(t *testing.T) {
t.Run("NotRequiredForAlphabetTrace", func(t *testing.T) { t.Run("NotRequiredForAlphabetTrace", func(t *testing.T) {
configForArgs(t, addRequiredArgsExcept(config.TraceTypeAlphabet, "--cannon-bin")) configForArgs(t, addRequiredArgsExcept(config.TraceTypeAlphabet, "--cannon-bin"))
...@@ -229,6 +250,25 @@ func TestDataDir(t *testing.T) { ...@@ -229,6 +250,25 @@ func TestDataDir(t *testing.T) {
}) })
} }
func TestRollupRpc(t *testing.T) {
t.Run("NotRequiredForAlphabetTrace", func(t *testing.T) {
configForArgs(t, addRequiredArgsExcept(config.TraceTypeAlphabet, "--rollup-rpc"))
})
t.Run("NotRequiredForAlphabetTrace", func(t *testing.T) {
configForArgs(t, addRequiredArgsExcept(config.TraceTypeCannon, "--rollup-rpc"))
})
t.Run("RequiredForOutputCannonTrace", func(t *testing.T) {
verifyArgsInvalid(t, "flag rollup-rpc is required", addRequiredArgsExcept(config.TraceTypeOutputCannon, "--rollup-rpc"))
})
t.Run("Valid", func(t *testing.T) {
cfg := configForArgs(t, addRequiredArgs(config.TraceTypeOutputCannon))
require.Equal(t, rollupRpc, cfg.RollupRpc)
})
}
func TestCannonL2(t *testing.T) { func TestCannonL2(t *testing.T) {
t.Run("NotRequiredForAlphabetTrace", func(t *testing.T) { t.Run("NotRequiredForAlphabetTrace", func(t *testing.T) {
configForArgs(t, addRequiredArgsExcept(config.TraceTypeAlphabet, "--cannon-l2")) configForArgs(t, addRequiredArgsExcept(config.TraceTypeAlphabet, "--cannon-l2"))
...@@ -402,13 +442,16 @@ func requiredArgs(traceType config.TraceType) map[string]string { ...@@ -402,13 +442,16 @@ func requiredArgs(traceType config.TraceType) map[string]string {
switch traceType { switch traceType {
case config.TraceTypeAlphabet: case config.TraceTypeAlphabet:
args["--alphabet"] = alphabetTrace args["--alphabet"] = alphabetTrace
case config.TraceTypeCannon: case config.TraceTypeCannon, config.TraceTypeOutputCannon:
args["--cannon-network"] = cannonNetwork args["--cannon-network"] = cannonNetwork
args["--cannon-bin"] = cannonBin args["--cannon-bin"] = cannonBin
args["--cannon-server"] = cannonServer args["--cannon-server"] = cannonServer
args["--cannon-prestate"] = cannonPreState args["--cannon-prestate"] = cannonPreState
args["--cannon-l2"] = cannonL2 args["--cannon-l2"] = cannonL2
} }
if traceType == config.TraceTypeOutputCannon {
args["--rollup-rpc"] = rollupRpc
}
return args return args
} }
......
...@@ -32,6 +32,7 @@ var ( ...@@ -32,6 +32,7 @@ var (
ErrCannonNetworkAndRollupConfig = errors.New("only specify one of network or rollup config path") ErrCannonNetworkAndRollupConfig = errors.New("only specify one of network or rollup config path")
ErrCannonNetworkAndL2Genesis = errors.New("only specify one of network or l2 genesis path") ErrCannonNetworkAndL2Genesis = errors.New("only specify one of network or l2 genesis path")
ErrCannonNetworkUnknown = errors.New("unknown cannon network") ErrCannonNetworkUnknown = errors.New("unknown cannon network")
ErrMissingRollupRpc = errors.New("missing rollup rpc url")
) )
type TraceType string type TraceType string
...@@ -39,6 +40,7 @@ type TraceType string ...@@ -39,6 +40,7 @@ type TraceType string
const ( const (
TraceTypeAlphabet TraceType = "alphabet" TraceTypeAlphabet TraceType = "alphabet"
TraceTypeCannon TraceType = "cannon" TraceTypeCannon TraceType = "cannon"
TraceTypeOutputCannon TraceType = "output_cannon"
// Mainnet games // Mainnet games
CannonFaultGameID = 0 CannonFaultGameID = 0
...@@ -47,7 +49,7 @@ const ( ...@@ -47,7 +49,7 @@ const (
AlphabetFaultGameID = 255 AlphabetFaultGameID = 255
) )
var TraceTypes = []TraceType{TraceTypeAlphabet, TraceTypeCannon} var TraceTypes = []TraceType{TraceTypeAlphabet, TraceTypeCannon, TraceTypeOutputCannon}
// GameIdToString maps game IDs to their string representation. // GameIdToString maps game IDs to their string representation.
var GameIdToString = map[uint8]string{ var GameIdToString = map[uint8]string{
...@@ -78,6 +80,7 @@ func ValidTraceType(value TraceType) bool { ...@@ -78,6 +80,7 @@ func ValidTraceType(value TraceType) bool {
} }
const ( const (
DefaultPollInterval = time.Second * 12
DefaultCannonSnapshotFreq = uint(1_000_000_000) DefaultCannonSnapshotFreq = uint(1_000_000_000)
DefaultCannonInfoFreq = uint(10_000_000) DefaultCannonInfoFreq = uint(10_000_000)
// DefaultGameWindow is the default maximum time duration in the past // DefaultGameWindow is the default maximum time duration in the past
...@@ -98,12 +101,16 @@ type Config struct { ...@@ -98,12 +101,16 @@ type Config struct {
AgreeWithProposedOutput bool // Temporary config if we agree or disagree with the posted output AgreeWithProposedOutput bool // Temporary config if we agree or disagree with the posted output
Datadir string // Data Directory Datadir string // Data Directory
MaxConcurrency uint // Maximum number of threads to use when progressing games MaxConcurrency uint // Maximum number of threads to use when progressing games
PollInterval time.Duration // Polling interval for latest-block subscription when using an HTTP RPC provider
TraceType TraceType // Type of trace TraceType TraceType // Type of trace
// Specific to the alphabet trace provider // Specific to the alphabet trace provider
AlphabetTrace string // String for the AlphabetTraceProvider AlphabetTrace string // String for the AlphabetTraceProvider
// Specific to the output cannon trace type
RollupRpc string
// Specific to the cannon trace provider // Specific to the cannon trace provider
CannonBin string // Path to the cannon executable to run when generating trace data CannonBin string // Path to the cannon executable to run when generating trace data
CannonServer string // Path to the op-program executable that provides the pre-image oracle server CannonServer string // Path to the op-program executable that provides the pre-image oracle server
...@@ -131,6 +138,7 @@ func NewConfig( ...@@ -131,6 +138,7 @@ func NewConfig(
L1EthRpc: l1EthRpc, L1EthRpc: l1EthRpc,
GameFactoryAddress: gameFactoryAddress, GameFactoryAddress: gameFactoryAddress,
MaxConcurrency: uint(runtime.NumCPU()), MaxConcurrency: uint(runtime.NumCPU()),
PollInterval: DefaultPollInterval,
AgreeWithProposedOutput: agreeWithProposedOutput, AgreeWithProposedOutput: agreeWithProposedOutput,
...@@ -164,7 +172,12 @@ func (c Config) Check() error { ...@@ -164,7 +172,12 @@ func (c Config) Check() error {
if c.MaxConcurrency == 0 { if c.MaxConcurrency == 0 {
return ErrMaxConcurrencyZero return ErrMaxConcurrencyZero
} }
if c.TraceType == TraceTypeCannon { if c.TraceType == TraceTypeOutputCannon {
if c.RollupRpc == "" {
return ErrMissingRollupRpc
}
}
if c.TraceType == TraceTypeCannon || c.TraceType == TraceTypeOutputCannon {
if c.CannonBin == "" { if c.CannonBin == "" {
return ErrMissingCannonBin return ErrMissingCannonBin
} }
......
...@@ -20,6 +20,7 @@ var ( ...@@ -20,6 +20,7 @@ var (
validCannonAbsolutPreState = "pre.json" validCannonAbsolutPreState = "pre.json"
validDatadir = "/tmp/data" validDatadir = "/tmp/data"
validCannonL2 = "http://localhost:9545" validCannonL2 = "http://localhost:9545"
validRollupRpc = "http://localhost:8555"
agreeWithProposedOutput = true agreeWithProposedOutput = true
) )
...@@ -28,13 +29,16 @@ func validConfig(traceType TraceType) Config { ...@@ -28,13 +29,16 @@ func validConfig(traceType TraceType) Config {
switch traceType { switch traceType {
case TraceTypeAlphabet: case TraceTypeAlphabet:
cfg.AlphabetTrace = validAlphabetTrace cfg.AlphabetTrace = validAlphabetTrace
case TraceTypeCannon: case TraceTypeCannon, TraceTypeOutputCannon:
cfg.CannonBin = validCannonBin cfg.CannonBin = validCannonBin
cfg.CannonServer = validCannonOpProgramBin cfg.CannonServer = validCannonOpProgramBin
cfg.CannonAbsolutePreState = validCannonAbsolutPreState cfg.CannonAbsolutePreState = validCannonAbsolutPreState
cfg.CannonL2 = validCannonL2 cfg.CannonL2 = validCannonL2
cfg.CannonNetwork = validCannonNetwork cfg.CannonNetwork = validCannonNetwork
} }
if traceType == TraceTypeOutputCannon {
cfg.RollupRpc = validRollupRpc
}
return cfg return cfg
} }
...@@ -118,6 +122,19 @@ func TestMaxConcurrency(t *testing.T) { ...@@ -118,6 +122,19 @@ func TestMaxConcurrency(t *testing.T) {
}) })
} }
func TestHttpPollInterval(t *testing.T) {
t.Run("Default", func(t *testing.T) {
config := validConfig(TraceTypeAlphabet)
require.EqualValues(t, DefaultPollInterval, config.PollInterval)
})
}
func TestRollupRpcRequired(t *testing.T) {
config := validConfig(TraceTypeOutputCannon)
config.RollupRpc = ""
require.ErrorIs(t, config.Check(), ErrMissingRollupRpc)
}
func TestCannonL2Required(t *testing.T) { func TestCannonL2Required(t *testing.T) {
config := validConfig(TraceTypeCannon) config := validConfig(TraceTypeCannon)
config.CannonL2 = "" config.CannonL2 = ""
......
...@@ -70,6 +70,17 @@ var ( ...@@ -70,6 +70,17 @@ var (
EnvVars: prefixEnvVars("MAX_CONCURRENCY"), EnvVars: prefixEnvVars("MAX_CONCURRENCY"),
Value: uint(runtime.NumCPU()), Value: uint(runtime.NumCPU()),
} }
HTTPPollInterval = &cli.DurationFlag{
Name: "http-poll-interval",
Usage: "Polling interval for latest-block subscription when using an HTTP RPC provider.",
EnvVars: prefixEnvVars("HTTP_POLL_INTERVAL"),
Value: config.DefaultPollInterval,
}
RollupRpcFlag = &cli.StringFlag{
Name: "rollup-rpc",
Usage: "HTTP provider URL for the rollup node",
EnvVars: prefixEnvVars("ROLLUP_RPC"),
}
AlphabetFlag = &cli.StringFlag{ AlphabetFlag = &cli.StringFlag{
Name: "alphabet", Name: "alphabet",
Usage: "Correct Alphabet Trace (alphabet trace type only)", Usage: "Correct Alphabet Trace (alphabet trace type only)",
...@@ -77,7 +88,10 @@ var ( ...@@ -77,7 +88,10 @@ var (
} }
CannonNetworkFlag = &cli.StringFlag{ CannonNetworkFlag = &cli.StringFlag{
Name: "cannon-network", Name: "cannon-network",
Usage: fmt.Sprintf("Predefined network selection. Available networks: %s (cannon trace type only)", strings.Join(chaincfg.AvailableNetworks(), ", ")), Usage: fmt.Sprintf(
"Predefined network selection. Available networks: %s (cannon trace type only)",
strings.Join(chaincfg.AvailableNetworks(), ", "),
),
EnvVars: prefixEnvVars("CANNON_NETWORK"), EnvVars: prefixEnvVars("CANNON_NETWORK"),
} }
CannonRollupConfigFlag = &cli.StringFlag{ CannonRollupConfigFlag = &cli.StringFlag{
...@@ -142,6 +156,8 @@ var requiredFlags = []cli.Flag{ ...@@ -142,6 +156,8 @@ var requiredFlags = []cli.Flag{
// optionalFlags is a list of unchecked cli flags // optionalFlags is a list of unchecked cli flags
var optionalFlags = []cli.Flag{ var optionalFlags = []cli.Flag{
MaxConcurrencyFlag, MaxConcurrencyFlag,
HTTPPollInterval,
RollupRpcFlag,
AlphabetFlag, AlphabetFlag,
GameAllowlistFlag, GameAllowlistFlag,
CannonNetworkFlag, CannonNetworkFlag,
...@@ -168,15 +184,7 @@ func init() { ...@@ -168,15 +184,7 @@ func init() {
// Flags contains the list of configuration options available to the binary. // Flags contains the list of configuration options available to the binary.
var Flags []cli.Flag var Flags []cli.Flag
func CheckRequired(ctx *cli.Context) error { func CheckCannonFlags(ctx *cli.Context) error {
for _, f := range requiredFlags {
if !ctx.IsSet(f.Names()[0]) {
return fmt.Errorf("flag %s is required", f.Names()[0])
}
}
gameType := config.TraceType(strings.ToLower(ctx.String(TraceTypeFlag.Name)))
switch gameType {
case config.TraceTypeCannon:
if !ctx.IsSet(CannonNetworkFlag.Name) && if !ctx.IsSet(CannonNetworkFlag.Name) &&
!(ctx.IsSet(CannonRollupConfigFlag.Name) && ctx.IsSet(CannonL2GenesisFlag.Name)) { !(ctx.IsSet(CannonRollupConfigFlag.Name) && ctx.IsSet(CannonL2GenesisFlag.Name)) {
return fmt.Errorf("flag %v or %v and %v is required", return fmt.Errorf("flag %v or %v and %v is required",
...@@ -199,10 +207,32 @@ func CheckRequired(ctx *cli.Context) error { ...@@ -199,10 +207,32 @@ func CheckRequired(ctx *cli.Context) error {
if !ctx.IsSet(CannonL2Flag.Name) { if !ctx.IsSet(CannonL2Flag.Name) {
return fmt.Errorf("flag %s is required", CannonL2Flag.Name) return fmt.Errorf("flag %s is required", CannonL2Flag.Name)
} }
return nil
}
func CheckRequired(ctx *cli.Context) error {
for _, f := range requiredFlags {
if !ctx.IsSet(f.Names()[0]) {
return fmt.Errorf("flag %s is required", f.Names()[0])
}
}
gameType := config.TraceType(strings.ToLower(ctx.String(TraceTypeFlag.Name)))
switch gameType {
case config.TraceTypeCannon:
if err := CheckCannonFlags(ctx); err != nil {
return err
}
case config.TraceTypeAlphabet: case config.TraceTypeAlphabet:
if !ctx.IsSet(AlphabetFlag.Name) { if !ctx.IsSet(AlphabetFlag.Name) {
return fmt.Errorf("flag %s is required", "alphabet") return fmt.Errorf("flag %s is required", "alphabet")
} }
case config.TraceTypeOutputCannon:
if err := CheckCannonFlags(ctx); err != nil {
return err
}
if !ctx.IsSet(RollupRpcFlag.Name) {
return fmt.Errorf("flag %s is required", RollupRpcFlag.Name)
}
default: default:
return fmt.Errorf("invalid trace type. must be one of %v", config.TraceTypes) return fmt.Errorf("invalid trace type. must be one of %v", config.TraceTypes)
} }
...@@ -247,6 +277,8 @@ func NewConfigFromCLI(ctx *cli.Context) (*config.Config, error) { ...@@ -247,6 +277,8 @@ func NewConfigFromCLI(ctx *cli.Context) (*config.Config, error) {
GameAllowlist: allowedGames, GameAllowlist: allowedGames,
GameWindow: ctx.Duration(GameWindowFlag.Name), GameWindow: ctx.Duration(GameWindowFlag.Name),
MaxConcurrency: maxConcurrency, MaxConcurrency: maxConcurrency,
PollInterval: ctx.Duration(HTTPPollInterval.Name),
RollupRpc: ctx.String(RollupRpcFlag.Name),
AlphabetTrace: ctx.String(AlphabetFlag.Name), AlphabetTrace: ctx.String(AlphabetFlag.Name),
CannonNetwork: ctx.String(CannonNetworkFlag.Name), CannonNetwork: ctx.String(CannonNetworkFlag.Name),
CannonRollupConfigPath: ctx.String(CannonRollupConfigFlag.Name), CannonRollupConfigPath: ctx.String(CannonRollupConfigFlag.Name),
......
...@@ -4,11 +4,13 @@ import ( ...@@ -4,11 +4,13 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"sync"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/solver" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/solver"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/types" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/types"
gameTypes "github.com/ethereum-optimism/optimism/op-challenger/game/types" gameTypes "github.com/ethereum-optimism/optimism/op-challenger/game/types"
"github.com/ethereum-optimism/optimism/op-challenger/metrics" "github.com/ethereum-optimism/optimism/op-challenger/metrics"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
) )
...@@ -17,6 +19,8 @@ import ( ...@@ -17,6 +19,8 @@ import (
type Responder interface { type Responder interface {
CallResolve(ctx context.Context) (gameTypes.GameStatus, error) CallResolve(ctx context.Context) (gameTypes.GameStatus, error)
Resolve(ctx context.Context) error Resolve(ctx context.Context) error
CallResolveClaim(ctx context.Context, claimIdx uint64) error
ResolveClaim(ctx context.Context, claimIdx uint64) error
PerformAction(ctx context.Context, action types.Action) error PerformAction(ctx context.Context, action types.Action) error
} }
...@@ -66,7 +70,12 @@ func (a *Agent) Act(ctx context.Context) error { ...@@ -66,7 +70,12 @@ func (a *Agent) Act(ctx context.Context) error {
// Perform the actions // Perform the actions
for _, action := range actions { for _, action := range actions {
log := a.log.New("action", action.Type, "is_attack", action.IsAttack, "parent", action.ParentIdx, "value", action.Value) log := a.log.New("action", action.Type, "is_attack", action.IsAttack, "parent", action.ParentIdx)
if action.Type == types.ActionTypeStep {
log = log.New("prestate", common.Bytes2Hex(action.PreState), "proof", common.Bytes2Hex(action.ProofData))
} else {
log = log.New("value", action.Value)
}
if action.OracleData != nil { if action.OracleData != nil {
a.log.Info("Updating oracle data", "oracleKey", action.OracleData.OracleKey, "oracleData", action.OracleData.OracleData) a.log.Info("Updating oracle data", "oracleKey", action.OracleData.OracleKey, "oracleData", action.OracleData.OracleData)
...@@ -106,6 +115,10 @@ func (a *Agent) shouldResolve(status gameTypes.GameStatus) bool { ...@@ -106,6 +115,10 @@ func (a *Agent) shouldResolve(status gameTypes.GameStatus) bool {
// tryResolve resolves the game if it is in a winning state // tryResolve resolves the game if it is in a winning state
// Returns true if the game is resolvable (regardless of whether it was actually resolved) // Returns true if the game is resolvable (regardless of whether it was actually resolved)
func (a *Agent) tryResolve(ctx context.Context) bool { func (a *Agent) tryResolve(ctx context.Context) bool {
if err := a.resolveClaims(ctx); err != nil {
a.log.Error("Failed to resolve claims", "err", err)
return false
}
status, err := a.responder.CallResolve(ctx) status, err := a.responder.CallResolve(ctx)
if err != nil || status == gameTypes.GameStatusInProgress { if err != nil || status == gameTypes.GameStatusInProgress {
return false return false
...@@ -120,6 +133,60 @@ func (a *Agent) tryResolve(ctx context.Context) bool { ...@@ -120,6 +133,60 @@ func (a *Agent) tryResolve(ctx context.Context) bool {
return true return true
} }
var errNoResolvableClaims = errors.New("no resolvable claims")
func (a *Agent) tryResolveClaims(ctx context.Context) error {
claims, err := a.loader.FetchClaims(ctx)
if err != nil {
return fmt.Errorf("failed to fetch claims: %w", err)
}
if len(claims) == 0 {
return errNoResolvableClaims
}
var resolvableClaims []int64
for _, claim := range claims {
a.log.Debug("checking if claim is resolvable", "claimIdx", claim.ContractIndex)
if err := a.responder.CallResolveClaim(ctx, uint64(claim.ContractIndex)); err == nil {
a.log.Info("Resolving claim", "claimIdx", claim.ContractIndex)
resolvableClaims = append(resolvableClaims, int64(claim.ContractIndex))
}
}
a.log.Info("Resolving claims", "numClaims", len(resolvableClaims))
if len(resolvableClaims) == 0 {
return errNoResolvableClaims
}
var wg sync.WaitGroup
wg.Add(len(resolvableClaims))
for _, claimIdx := range resolvableClaims {
claimIdx := claimIdx
go func() {
defer wg.Done()
err := a.responder.ResolveClaim(ctx, uint64(claimIdx))
if err != nil {
a.log.Error("Failed to resolve claim", "err", err)
}
}()
}
wg.Wait()
return nil
}
func (a *Agent) resolveClaims(ctx context.Context) error {
for {
err := a.tryResolveClaims(ctx)
switch err {
case errNoResolvableClaims:
return nil
case nil:
continue
default:
return err
}
}
}
// newGameFromContracts initializes a new game state from the state in the contract // newGameFromContracts initializes a new game state from the state in the contract
func (a *Agent) newGameFromContracts(ctx context.Context) (types.Game, error) { func (a *Agent) newGameFromContracts(ctx context.Context) (types.Game, error) {
claims, err := a.loader.FetchClaims(ctx) claims, err := a.loader.FetchClaims(ctx)
......
...@@ -77,7 +77,7 @@ func TestDoNotMakeMovesWhenGameIsResolvable(t *testing.T) { ...@@ -77,7 +77,7 @@ func TestDoNotMakeMovesWhenGameIsResolvable(t *testing.T) {
require.NoError(t, agent.Act(ctx)) require.NoError(t, agent.Act(ctx))
require.Equal(t, 1, responder.callResolveCount, "should check if game is resolvable") require.Equal(t, 1, responder.callResolveCount, "should check if game is resolvable")
require.Zero(t, claimLoader.callCount, "should not fetch claims for resolvable game") require.Equal(t, 1, claimLoader.callCount, "should fetch claims once for resolveClaim")
if test.shouldResolve { if test.shouldResolve {
require.EqualValues(t, 1, responder.resolveCount, "should resolve winning game") require.EqualValues(t, 1, responder.resolveCount, "should resolve winning game")
...@@ -92,6 +92,7 @@ func TestLoadClaimsWhenGameNotResolvable(t *testing.T) { ...@@ -92,6 +92,7 @@ func TestLoadClaimsWhenGameNotResolvable(t *testing.T) {
// Checks that if the game isn't resolvable, that the agent continues on to start checking claims // Checks that if the game isn't resolvable, that the agent continues on to start checking claims
agent, claimLoader, responder := setupTestAgent(t, false) agent, claimLoader, responder := setupTestAgent(t, false)
responder.callResolveErr = errors.New("game is not resolvable") responder.callResolveErr = errors.New("game is not resolvable")
responder.callResolveClaimErr = errors.New("claim is not resolvable")
depth := 4 depth := 4
claimBuilder := test.NewClaimBuilder(t, depth, alphabet.NewTraceProvider("abcdefg", uint64(depth))) claimBuilder := test.NewClaimBuilder(t, depth, alphabet.NewTraceProvider("abcdefg", uint64(depth)))
...@@ -101,7 +102,9 @@ func TestLoadClaimsWhenGameNotResolvable(t *testing.T) { ...@@ -101,7 +102,9 @@ func TestLoadClaimsWhenGameNotResolvable(t *testing.T) {
require.NoError(t, agent.Act(context.Background())) require.NoError(t, agent.Act(context.Background()))
require.EqualValues(t, 1, claimLoader.callCount, "should load claims for unresolvable game") require.EqualValues(t, 2, claimLoader.callCount, "should load claims for unresolvable game")
require.EqualValues(t, responder.callResolveClaimCount, 1, "should check if claim is resolvable")
require.Zero(t, responder.resolveClaimCount, "should not send resolveClaim")
} }
func setupTestAgent(t *testing.T, agreeWithProposedOutput bool) (*Agent, *stubClaimLoader, *stubResponder) { func setupTestAgent(t *testing.T, agreeWithProposedOutput bool) (*Agent, *stubClaimLoader, *stubResponder) {
...@@ -132,6 +135,10 @@ type stubResponder struct { ...@@ -132,6 +135,10 @@ type stubResponder struct {
resolveCount int resolveCount int
resolveErr error resolveErr error
callResolveClaimCount int
callResolveClaimErr error
resolveClaimCount int
} }
func (s *stubResponder) CallResolve(ctx context.Context) (gameTypes.GameStatus, error) { func (s *stubResponder) CallResolve(ctx context.Context) (gameTypes.GameStatus, error) {
...@@ -144,8 +151,18 @@ func (s *stubResponder) Resolve(ctx context.Context) error { ...@@ -144,8 +151,18 @@ func (s *stubResponder) Resolve(ctx context.Context) error {
return s.resolveErr return s.resolveErr
} }
func (s *stubResponder) CallResolveClaim(ctx context.Context, clainIdx uint64) error {
s.callResolveClaimCount++
return s.callResolveClaimErr
}
func (s *stubResponder) ResolveClaim(ctx context.Context, clainIdx uint64) error {
s.resolveClaimCount++
return nil
}
func (s *stubResponder) PerformAction(ctx context.Context, response types.Action) error { func (s *stubResponder) PerformAction(ctx context.Context, response types.Action) error {
panic("Not implemented") return nil
} }
type stubUpdater struct { type stubUpdater struct {
......
...@@ -94,6 +94,34 @@ func (r *FaultResponder) Resolve(ctx context.Context) error { ...@@ -94,6 +94,34 @@ func (r *FaultResponder) Resolve(ctx context.Context) error {
return r.sendTxAndWait(ctx, txData) return r.sendTxAndWait(ctx, txData)
} }
// buildResolveClaimData creates the transaction data for the ResolveClaim function.
func (r *FaultResponder) buildResolveClaimData(ctx context.Context, claimIdx uint64) ([]byte, error) {
return r.fdgAbi.Pack("resolveClaim", big.NewInt(int64(claimIdx)))
}
// CallResolveClaim determines if the resolveClaim function on the fault dispute game contract
// would succeed.
func (r *FaultResponder) CallResolveClaim(ctx context.Context, claimIdx uint64) error {
txData, err := r.buildResolveClaimData(ctx, claimIdx)
if err != nil {
return err
}
_, err = r.txMgr.Call(ctx, ethereum.CallMsg{
To: &r.fdgAddr,
Data: txData,
}, nil)
return err
}
// ResolveClaim executes a resolveClaim transaction to resolve a fault dispute game.
func (r *FaultResponder) ResolveClaim(ctx context.Context, claimIdx uint64) error {
txData, err := r.buildResolveClaimData(ctx, claimIdx)
if err != nil {
return err
}
return r.sendTxAndWait(ctx, txData)
}
func (r *FaultResponder) PerformAction(ctx context.Context, action types.Action) error { func (r *FaultResponder) PerformAction(ctx context.Context, action types.Action) error {
var txData []byte var txData []byte
var err error var err error
......
...@@ -73,6 +73,40 @@ func TestResolve(t *testing.T) { ...@@ -73,6 +73,40 @@ func TestResolve(t *testing.T) {
}) })
} }
func TestCallResolveClaim(t *testing.T) {
t.Run("SendFails", func(t *testing.T) {
responder, mockTxMgr := newTestFaultResponder(t)
mockTxMgr.callFails = true
err := responder.CallResolveClaim(context.Background(), 0)
require.ErrorIs(t, err, mockCallError)
require.Equal(t, 0, mockTxMgr.calls)
})
t.Run("Success", func(t *testing.T) {
responder, mockTxMgr := newTestFaultResponder(t)
err := responder.CallResolveClaim(context.Background(), 0)
require.NoError(t, err)
require.Equal(t, 1, mockTxMgr.calls)
})
}
func TestResolveClaim(t *testing.T) {
t.Run("SendFails", func(t *testing.T) {
responder, mockTxMgr := newTestFaultResponder(t)
mockTxMgr.sendFails = true
err := responder.ResolveClaim(context.Background(), 0)
require.ErrorIs(t, err, mockSendError)
require.Equal(t, 0, mockTxMgr.sends)
})
t.Run("Success", func(t *testing.T) {
responder, mockTxMgr := newTestFaultResponder(t)
err := responder.ResolveClaim(context.Background(), 0)
require.NoError(t, err)
require.Equal(t, 1, mockTxMgr.sends)
})
}
// TestRespond tests the [Responder.Respond] method. // TestRespond tests the [Responder.Respond] method.
func TestPerformAction(t *testing.T) { func TestPerformAction(t *testing.T) {
t.Run("send fails", func(t *testing.T) { t.Run("send fails", func(t *testing.T) {
......
...@@ -48,7 +48,10 @@ func (s *GameSolver) calculateStep(ctx context.Context, game types.Game, claim t ...@@ -48,7 +48,10 @@ func (s *GameSolver) calculateStep(ctx context.Context, game types.Game, claim t
if game.AgreeWithClaimLevel(claim) { if game.AgreeWithClaimLevel(claim) {
return nil, nil return nil, nil
} }
step, err := s.claimSolver.AttemptStep(ctx, claim, game.AgreeWithClaimLevel(claim)) step, err := s.claimSolver.AttemptStep(ctx, game, claim)
if err == ErrStepIgnoreInvalidPath {
return nil, nil
}
if err != nil { if err != nil {
return nil, err return nil, err
} }
...@@ -63,11 +66,14 @@ func (s *GameSolver) calculateStep(ctx context.Context, game types.Game, claim t ...@@ -63,11 +66,14 @@ func (s *GameSolver) calculateStep(ctx context.Context, game types.Game, claim t
} }
func (s *GameSolver) calculateMove(ctx context.Context, game types.Game, claim types.Claim) (*types.Action, error) { func (s *GameSolver) calculateMove(ctx context.Context, game types.Game, claim types.Claim) (*types.Action, error) {
move, err := s.claimSolver.NextMove(ctx, claim, game.AgreeWithClaimLevel(claim)) if game.AgreeWithClaimLevel(claim) {
return nil, nil
}
move, err := s.claimSolver.NextMove(ctx, claim, game)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to calculate next move for claim index %v: %w", claim.ContractIndex, err) return nil, fmt.Errorf("failed to calculate next move for claim index %v: %w", claim.ContractIndex, err)
} }
if move == nil || game.IsDuplicate(move.ClaimData) { if move == nil || game.IsDuplicate(*move) {
return nil, nil return nil, nil
} }
return &types.Action{ return &types.Action{
......
...@@ -48,7 +48,6 @@ func TestCalculateNextActions(t *testing.T) { ...@@ -48,7 +48,6 @@ func TestCalculateNextActions(t *testing.T) {
rootClaimCorrect: true, rootClaimCorrect: true,
setupGame: func(builder *faulttest.GameBuilder) {}, setupGame: func(builder *faulttest.GameBuilder) {},
}, },
{ {
name: "DoNotPerformDuplicateMoves", name: "DoNotPerformDuplicateMoves",
agreeWithOutputRoot: true, agreeWithOutputRoot: true,
...@@ -93,16 +92,15 @@ func TestCalculateNextActions(t *testing.T) { ...@@ -93,16 +92,15 @@ func TestCalculateNextActions(t *testing.T) {
maliciousStateHash := common.Hash{0x01, 0xaa} maliciousStateHash := common.Hash{0x01, 0xaa}
// Dishonest actor counters their own claims to set up a situation with an invalid prestate // Dishonest actor counters their own claims to set up a situation with an invalid prestate
// The honest actor should attack all claims that support the root claim (disagree with the output root) // The honest actor should ignore path created by the dishonest actor, only supporting its own attack on the root claim
builder.Seq().ExpectAttack(). // This expected action is the winning move. honestMove := builder.Seq().AttackCorrect() // This expected action is the winning move.
Attack(maliciousStateHash). dishonestMove := honestMove.Attack(maliciousStateHash)
Defend(maliciousStateHash).ExpectAttack(). // The expected action by the honest actor
Attack(maliciousStateHash). dishonestMove.ExpectAttack()
Attack(maliciousStateHash).ExpectStepAttack() // The honest actor will ignore this poisoned path
dishonestMove.
// The attempt to step against our malicious leaf node will fail because the pre-state won't match our Defend(maliciousStateHash).
// malicious state hash. However, it is the very first expected action, attacking the root claim with Attack(maliciousStateHash)
// the correct hash that wins the game since it will be the left-most uncountered claim.
}, },
}, },
} }
......
...@@ -13,7 +13,6 @@ var rules = []actionRule{ ...@@ -13,7 +13,6 @@ var rules = []actionRule{
parentMustExist, parentMustExist,
onlyStepAtMaxDepth, onlyStepAtMaxDepth,
onlyMoveBeforeMaxDepth, onlyMoveBeforeMaxDepth,
onlyCounterClaimsAtDisagreeingLevels,
doNotDuplicateExistingMoves, doNotDuplicateExistingMoves,
doNotDefendRootClaim, doNotDefendRootClaim,
} }
...@@ -57,20 +56,12 @@ func onlyMoveBeforeMaxDepth(game types.Game, action types.Action) error { ...@@ -57,20 +56,12 @@ func onlyMoveBeforeMaxDepth(game types.Game, action types.Action) error {
return nil return nil
} }
func onlyCounterClaimsAtDisagreeingLevels(game types.Game, action types.Action) error {
parentClaim := game.Claims()[action.ParentIdx]
if game.AgreeWithClaimLevel(parentClaim) {
return fmt.Errorf("countering a claim at depth %v that supports our view of the root", parentClaim.Position.Depth())
}
return nil
}
func doNotDuplicateExistingMoves(game types.Game, action types.Action) error { func doNotDuplicateExistingMoves(game types.Game, action types.Action) error {
newClaimData := types.ClaimData{ newClaimData := types.ClaimData{
Value: action.Value, Value: action.Value,
Position: resultingPosition(game, action), Position: resultingPosition(game, action),
} }
if game.IsDuplicate(newClaimData) { if game.IsDuplicate(types.Claim{ClaimData: newClaimData, ParentContractIndex: action.ParentIdx}) {
return fmt.Errorf("creating duplicate claim at %v with value %v", newClaimData.Position.ToGIndex(), newClaimData.Value) return fmt.Errorf("creating duplicate claim at %v with value %v", newClaimData.Position.ToGIndex(), newClaimData.Value)
} }
return nil return nil
......
...@@ -13,6 +13,7 @@ import ( ...@@ -13,6 +13,7 @@ import (
var ( var (
ErrStepNonLeafNode = errors.New("cannot step on non-leaf claims") ErrStepNonLeafNode = errors.New("cannot step on non-leaf claims")
ErrStepAgreedClaim = errors.New("cannot step on claims we agree with") ErrStepAgreedClaim = errors.New("cannot step on claims we agree with")
ErrStepIgnoreInvalidPath = errors.New("cannot step on claims that dispute invalid paths")
) )
// claimSolver uses a [TraceProvider] to determine the moves to make in a dispute game. // claimSolver uses a [TraceProvider] to determine the moves to make in a dispute game.
...@@ -30,10 +31,7 @@ func newClaimSolver(gameDepth int, traceProvider types.TraceProvider) *claimSolv ...@@ -30,10 +31,7 @@ func newClaimSolver(gameDepth int, traceProvider types.TraceProvider) *claimSolv
} }
// NextMove returns the next move to make given the current state of the game. // NextMove returns the next move to make given the current state of the game.
func (s *claimSolver) NextMove(ctx context.Context, claim types.Claim, agreeWithClaimLevel bool) (*types.Claim, error) { func (s *claimSolver) NextMove(ctx context.Context, claim types.Claim, game types.Game) (*types.Claim, error) {
if agreeWithClaimLevel {
return nil, nil
}
if claim.Depth() == s.gameDepth { if claim.Depth() == s.gameDepth {
return nil, types.ErrGameDepthReached return nil, types.ErrGameDepthReached
} }
...@@ -41,6 +39,24 @@ func (s *claimSolver) NextMove(ctx context.Context, claim types.Claim, agreeWith ...@@ -41,6 +39,24 @@ func (s *claimSolver) NextMove(ctx context.Context, claim types.Claim, agreeWith
if err != nil { if err != nil {
return nil, err return nil, err
} }
// Before challenging this claim, first check that the move wasn't warranted.
// If the parent claim is on a dishonest path, then we would have moved against it anyways. So we don't move.
// Avoiding dishonest paths ensures that there's always a valid claim available to support ours during step.
if !claim.IsRoot() {
parent, err := game.GetParent(claim)
if err != nil {
return nil, err
}
agreeWithParent, err := s.agreeWithClaimPath(ctx, game, parent)
if err != nil {
return nil, err
}
if !agreeWithParent {
return nil, nil
}
}
if agree { if agree {
return s.defend(ctx, claim) return s.defend(ctx, claim)
} else { } else {
...@@ -58,13 +74,25 @@ type StepData struct { ...@@ -58,13 +74,25 @@ type StepData struct {
// AttemptStep determines what step should occur for a given leaf claim. // AttemptStep determines what step should occur for a given leaf claim.
// An error will be returned if the claim is not at the max depth. // An error will be returned if the claim is not at the max depth.
func (s *claimSolver) AttemptStep(ctx context.Context, claim types.Claim, agreeWithClaimLevel bool) (StepData, error) { // Returns ErrStepIgnoreInvalidPath if the claim disputes an invalid path
func (s *claimSolver) AttemptStep(ctx context.Context, game types.Game, claim types.Claim) (StepData, error) {
if claim.Depth() != s.gameDepth { if claim.Depth() != s.gameDepth {
return StepData{}, ErrStepNonLeafNode return StepData{}, ErrStepNonLeafNode
} }
if agreeWithClaimLevel {
return StepData{}, ErrStepAgreedClaim // Step only on claims that dispute a valid path
parent, err := game.GetParent(claim)
if err != nil {
return StepData{}, err
}
parentValid, err := s.agreeWithClaimPath(ctx, game, parent)
if err != nil {
return StepData{}, err
}
if !parentValid {
return StepData{}, ErrStepIgnoreInvalidPath
} }
claimCorrect, err := s.agreeWithClaim(ctx, claim.ClaimData) claimCorrect, err := s.agreeWithClaim(ctx, claim.ClaimData)
if err != nil { if err != nil {
return StepData{}, err return StepData{}, err
...@@ -142,3 +170,26 @@ func (s *claimSolver) traceAtPosition(ctx context.Context, p types.Position) (co ...@@ -142,3 +170,26 @@ func (s *claimSolver) traceAtPosition(ctx context.Context, p types.Position) (co
hash, err := s.trace.Get(ctx, index) hash, err := s.trace.Get(ctx, index)
return hash, err return hash, err
} }
// agreeWithClaimPath returns true if the every other claim in the path to root is correct according to the internal [TraceProvider].
func (s *claimSolver) agreeWithClaimPath(ctx context.Context, game types.Game, claim types.Claim) (bool, error) {
agree, err := s.agreeWithClaim(ctx, claim.ClaimData)
if err != nil {
return false, err
}
if !agree {
return false, nil
}
if claim.IsRoot() || claim.Parent.IsRootPosition() {
return true, nil
}
parent, err := game.GetParent(claim)
if err != nil {
return false, err
}
grandParent, err := game.GetParent(parent)
if err != nil {
return false, err
}
return s.agreeWithClaimPath(ctx, game, grandParent)
}
...@@ -79,12 +79,13 @@ func (c *ClaimBuilder) claim(idx uint64, correct bool) common.Hash { ...@@ -79,12 +79,13 @@ func (c *ClaimBuilder) claim(idx uint64, correct bool) common.Hash {
func (c *ClaimBuilder) CreateRootClaim(correct bool) types.Claim { func (c *ClaimBuilder) CreateRootClaim(correct bool) types.Claim {
value := c.claim((1<<c.maxDepth)-1, correct) value := c.claim((1<<c.maxDepth)-1, correct)
return types.Claim{ claim := types.Claim{
ClaimData: types.ClaimData{ ClaimData: types.ClaimData{
Value: value, Value: value,
Position: types.NewPosition(0, 0), Position: types.NewPosition(0, 0),
}, },
} }
return claim
} }
func (c *ClaimBuilder) CreateLeafClaim(traceIndex uint64, correct bool) types.Claim { func (c *ClaimBuilder) CreateLeafClaim(traceIndex uint64, correct bool) types.Claim {
......
...@@ -53,8 +53,6 @@ type CannonTraceProvider struct { ...@@ -53,8 +53,6 @@ type CannonTraceProvider struct {
// lastStep stores the last step in the actual trace if known. 0 indicates unknown. // lastStep stores the last step in the actual trace if known. 0 indicates unknown.
// Cached as an optimisation to avoid repeatedly attempting to execute beyond the end of the trace. // Cached as an optimisation to avoid repeatedly attempting to execute beyond the end of the trace.
lastStep uint64 lastStep uint64
// lastProof stores the proof data to use for all steps extended beyond lastStep
lastProof *proofData
} }
func NewTraceProvider(ctx context.Context, logger log.Logger, m CannonMetricer, cfg *config.Config, l1Client bind.ContractCaller, dir string, gameAddr common.Address) (*CannonTraceProvider, error) { func NewTraceProvider(ctx context.Context, logger log.Logger, m CannonMetricer, cfg *config.Config, l1Client bind.ContractCaller, dir string, gameAddr common.Address) (*CannonTraceProvider, error) {
...@@ -139,23 +137,18 @@ func (p *CannonTraceProvider) AbsolutePreStateCommitment(ctx context.Context) (c ...@@ -139,23 +137,18 @@ func (p *CannonTraceProvider) AbsolutePreStateCommitment(ctx context.Context) (c
// loadProof will attempt to load or generate the proof data at the specified index // loadProof will attempt to load or generate the proof data at the specified index
// If the requested index is beyond the end of the actual trace it is extended with no-op instructions. // If the requested index is beyond the end of the actual trace it is extended with no-op instructions.
func (p *CannonTraceProvider) loadProof(ctx context.Context, i uint64) (*proofData, error) { func (p *CannonTraceProvider) loadProof(ctx context.Context, i uint64) (*proofData, error) {
if p.lastProof != nil && i > p.lastStep {
// If the requested index is after the last step in the actual trace, extend the final no-op step
return p.lastProof, nil
}
// Attempt to read the last step from disk cache // Attempt to read the last step from disk cache
if p.lastProof == nil && p.lastStep == 0 { if p.lastStep == 0 {
step, err := ReadLastStep(p.dir) step, err := readLastStep(p.dir)
if err != nil { if err != nil {
p.logger.Warn("Failed to read last step from disk cache", "err", err) p.logger.Warn("Failed to read last step from disk cache", "err", err)
} else { } else {
p.lastStep = step p.lastStep = step
// If the last step is tracked, set i to the last step
// to read the correct proof from disk.
if i > p.lastStep {
i = step
} }
} }
// If the last step is tracked, set i to the last step to generate or load the final proof
if p.lastStep != 0 && i > p.lastStep {
i = p.lastStep
} }
path := filepath.Join(p.dir, proofsDir, fmt.Sprintf("%d.json.gz", i)) path := filepath.Join(p.dir, proofsDir, fmt.Sprintf("%d.json.gz", i))
file, err := ioutil.OpenDecompressed(path) file, err := ioutil.OpenDecompressed(path)
...@@ -183,9 +176,6 @@ func (p *CannonTraceProvider) loadProof(ctx context.Context, i uint64) (*proofDa ...@@ -183,9 +176,6 @@ func (p *CannonTraceProvider) loadProof(ctx context.Context, i uint64) (*proofDa
if err != nil { if err != nil {
return nil, fmt.Errorf("cannot hash witness: %w", err) return nil, fmt.Errorf("cannot hash witness: %w", err)
} }
if err := WriteLastStep(p.dir, state.Step); err != nil {
p.logger.Warn("Failed to write last step to disk cache", "step", p.lastStep)
}
proof := &proofData{ proof := &proofData{
ClaimValue: witnessHash, ClaimValue: witnessHash,
StateData: hexutil.Bytes(witness), StateData: hexutil.Bytes(witness),
...@@ -194,7 +184,9 @@ func (p *CannonTraceProvider) loadProof(ctx context.Context, i uint64) (*proofDa ...@@ -194,7 +184,9 @@ func (p *CannonTraceProvider) loadProof(ctx context.Context, i uint64) (*proofDa
OracleValue: nil, OracleValue: nil,
OracleOffset: 0, OracleOffset: 0,
} }
p.lastProof = proof if err := writeLastStep(p.dir, proof, p.lastStep); err != nil {
p.logger.Warn("Failed to write last step to disk cache", "step", p.lastStep)
}
return proof, nil return proof, nil
} else { } else {
return nil, fmt.Errorf("expected proof not generated but final state was not exited, requested step %v, final state at step %v", i, state.Step) return nil, fmt.Errorf("expected proof not generated but final state was not exited, requested step %v, final state at step %v", i, state.Step)
...@@ -217,8 +209,8 @@ type diskStateCacheObj struct { ...@@ -217,8 +209,8 @@ type diskStateCacheObj struct {
Step uint64 `json:"step"` Step uint64 `json:"step"`
} }
// ReadLastStep reads the tracked last step from disk. // readLastStep reads the tracked last step from disk.
func ReadLastStep(dir string) (uint64, error) { func readLastStep(dir string) (uint64, error) {
state := diskStateCacheObj{} state := diskStateCacheObj{}
file, err := ioutil.OpenDecompressed(filepath.Join(dir, diskStateCache)) file, err := ioutil.OpenDecompressed(filepath.Join(dir, diskStateCache))
if err != nil { if err != nil {
...@@ -232,8 +224,15 @@ func ReadLastStep(dir string) (uint64, error) { ...@@ -232,8 +224,15 @@ func ReadLastStep(dir string) (uint64, error) {
return state.Step, nil return state.Step, nil
} }
// WriteLastStep writes the last step to disk as a persistent cache. // writeLastStep writes the last step and proof to disk as a persistent cache.
func WriteLastStep(dir string, step uint64) error { func writeLastStep(dir string, proof *proofData, step uint64) error {
state := diskStateCacheObj{Step: step} state := diskStateCacheObj{Step: step}
return ioutil.WriteCompressedJson(filepath.Join(dir, diskStateCache), state) lastStepFile := filepath.Join(dir, diskStateCache)
if err := ioutil.WriteCompressedJson(lastStepFile, state); err != nil {
return fmt.Errorf("failed to write last step to %v: %w", lastStepFile, err)
}
if err := ioutil.WriteCompressedJson(filepath.Join(dir, proofsDir, fmt.Sprintf("%d.json.gz", step)), proof); err != nil {
return fmt.Errorf("failed to write proof: %w", err)
}
return nil
} }
...@@ -161,17 +161,13 @@ func TestGetStepData(t *testing.T) { ...@@ -161,17 +161,13 @@ func TestGetStepData(t *testing.T) {
ClaimValue: common.Hash{0xaa}, ClaimValue: common.Hash{0xaa},
StateData: []byte{0xbb}, StateData: []byte{0xbb},
ProofData: []byte{0xcc}, ProofData: []byte{0xcc},
OracleKey: common.Hash{0xdd}.Bytes(),
OracleValue: []byte{0xdd},
OracleOffset: 10,
} }
preimage, proof, data, err := provider.GetStepData(context.Background(), 7000) preimage, proof, data, err := provider.GetStepData(context.Background(), 7000)
require.NoError(t, err) require.NoError(t, err)
require.Contains(t, generator.generated, 10, "should have tried to generate the proof") require.Empty(t, generator.generated, "should not have to generate the proof again")
witness := generator.finalState.EncodeWitness() require.EqualValues(t, initGenerator.finalState.EncodeWitness(), preimage)
require.EqualValues(t, witness, preimage) require.Empty(t, proof)
require.Equal(t, []byte{}, proof)
require.Nil(t, data) require.Nil(t, data)
}) })
......
...@@ -23,8 +23,12 @@ type Game interface { ...@@ -23,8 +23,12 @@ type Game interface {
// Claims returns all of the claims in the game. // Claims returns all of the claims in the game.
Claims() []Claim Claims() []Claim
// IsDuplicate returns true if the provided [Claim] already exists in the game state. // GetParent returns the parent of the provided claim.
IsDuplicate(claim ClaimData) bool GetParent(claim Claim) (Claim, error)
// IsDuplicate returns true if the provided [Claim] already exists in the game state
// referencing the same parent claim
IsDuplicate(claim Claim) bool
// AgreeWithClaimLevel returns if the game state agrees with the provided claim level. // AgreeWithClaimLevel returns if the game state agrees with the provided claim level.
AgreeWithClaimLevel(claim Claim) bool AgreeWithClaimLevel(claim Claim) bool
...@@ -32,31 +36,37 @@ type Game interface { ...@@ -32,31 +36,37 @@ type Game interface {
MaxDepth() uint64 MaxDepth() uint64
} }
type claimEntry struct {
ClaimData
ParentContractIndex int
}
type extendedClaim struct { type extendedClaim struct {
self Claim self Claim
children []ClaimData children []claimEntry
} }
// gameState is a struct that represents the state of a dispute game. // gameState is a struct that represents the state of a dispute game.
// The game state implements the [Game] interface. // The game state implements the [Game] interface.
type gameState struct { type gameState struct {
agreeWithProposedOutput bool agreeWithProposedOutput bool
root ClaimData root claimEntry
claims map[ClaimData]*extendedClaim claims map[claimEntry]*extendedClaim
depth uint64 depth uint64
} }
// NewGameState returns a new game state. // NewGameState returns a new game state.
// The provided [Claim] is used as the root node. // The provided [Claim] is used as the root node.
func NewGameState(agreeWithProposedOutput bool, root Claim, depth uint64) *gameState { func NewGameState(agreeWithProposedOutput bool, root Claim, depth uint64) *gameState {
claims := make(map[ClaimData]*extendedClaim) claims := make(map[claimEntry]*extendedClaim)
claims[root.ClaimData] = &extendedClaim{ rootClaimEntry := makeClaimEntry(root)
claims[rootClaimEntry] = &extendedClaim{
self: root, self: root,
children: make([]ClaimData, 0), children: make([]claimEntry, 0),
} }
return &gameState{ return &gameState{
agreeWithProposedOutput: agreeWithProposedOutput, agreeWithProposedOutput: agreeWithProposedOutput,
root: root.ClaimData, root: rootClaimEntry,
claims: claims, claims: claims,
depth: depth, depth: depth,
} }
...@@ -87,29 +97,29 @@ func (g *gameState) PutAll(claims []Claim) error { ...@@ -87,29 +97,29 @@ func (g *gameState) PutAll(claims []Claim) error {
// Put adds a claim into the game state. // Put adds a claim into the game state.
func (g *gameState) Put(claim Claim) error { func (g *gameState) Put(claim Claim) error {
if claim.IsRoot() || g.IsDuplicate(claim.ClaimData) { if claim.IsRoot() || g.IsDuplicate(claim) {
return ErrClaimExists return ErrClaimExists
} }
parent, ok := g.claims[claim.Parent]
if !ok { parent := g.getParent(claim)
if parent == nil {
return errors.New("no parent claim") return errors.New("no parent claim")
} else {
parent.children = append(parent.children, claim.ClaimData)
} }
g.claims[claim.ClaimData] = &extendedClaim{ parent.children = append(parent.children, makeClaimEntry(claim))
g.claims[makeClaimEntry(claim)] = &extendedClaim{
self: claim, self: claim,
children: make([]ClaimData, 0), children: make([]claimEntry, 0),
} }
return nil return nil
} }
func (g *gameState) IsDuplicate(claim ClaimData) bool { func (g *gameState) IsDuplicate(claim Claim) bool {
_, ok := g.claims[claim] _, ok := g.claims[makeClaimEntry(claim)]
return ok return ok
} }
func (g *gameState) Claims() []Claim { func (g *gameState) Claims() []Claim {
queue := []ClaimData{g.root} queue := []claimEntry{g.root}
var out []Claim var out []Claim
for len(queue) > 0 { for len(queue) > 0 {
item := queue[0] item := queue[0]
...@@ -124,17 +134,34 @@ func (g *gameState) MaxDepth() uint64 { ...@@ -124,17 +134,34 @@ func (g *gameState) MaxDepth() uint64 {
return g.depth return g.depth
} }
func (g *gameState) getChildren(c ClaimData) []ClaimData { func (g *gameState) getChildren(c claimEntry) []claimEntry {
return g.claims[c].children return g.claims[c].children
} }
func (g *gameState) getParent(claim Claim) (Claim, error) { func (g *gameState) GetParent(claim Claim) (Claim, error) {
if claim.IsRoot() { parent := g.getParent(claim)
if parent == nil {
return Claim{}, ErrClaimNotFound return Claim{}, ErrClaimNotFound
} }
if parent, ok := g.claims[claim.Parent]; !ok {
return Claim{}, ErrClaimNotFound
} else {
return parent.self, nil return parent.self, nil
}
func (g *gameState) getParent(claim Claim) *extendedClaim {
if claim.IsRoot() {
return nil
}
// TODO(inphi): refactor gameState for faster parent lookups
for _, c := range g.claims {
if c.self.ContractIndex == claim.ParentContractIndex {
return c
}
}
return nil
}
func makeClaimEntry(claim Claim) claimEntry {
return claimEntry{
ClaimData: claim.ClaimData,
ParentContractIndex: claim.ParentContractIndex,
} }
} }
...@@ -25,6 +25,8 @@ func createTestClaims() (Claim, Claim, Claim, Claim) { ...@@ -25,6 +25,8 @@ func createTestClaims() (Claim, Claim, Claim, Claim) {
Position: NewPosition(1, 0), Position: NewPosition(1, 0),
}, },
Parent: root.ClaimData, Parent: root.ClaimData,
ContractIndex: 1,
ParentContractIndex: 0,
} }
middle := Claim{ middle := Claim{
ClaimData: ClaimData{ ClaimData: ClaimData{
...@@ -32,6 +34,8 @@ func createTestClaims() (Claim, Claim, Claim, Claim) { ...@@ -32,6 +34,8 @@ func createTestClaims() (Claim, Claim, Claim, Claim) {
Position: NewPosition(2, 2), Position: NewPosition(2, 2),
}, },
Parent: top.ClaimData, Parent: top.ClaimData,
ContractIndex: 2,
ParentContractIndex: 1,
} }
bottom := Claim{ bottom := Claim{
...@@ -40,6 +44,8 @@ func createTestClaims() (Claim, Claim, Claim, Claim) { ...@@ -40,6 +44,8 @@ func createTestClaims() (Claim, Claim, Claim, Claim) {
Position: NewPosition(3, 4), Position: NewPosition(3, 4),
}, },
Parent: middle.ClaimData, Parent: middle.ClaimData,
ContractIndex: 3,
ParentContractIndex: 2,
} }
return root, top, middle, bottom return root, top, middle, bottom
...@@ -52,12 +58,12 @@ func TestIsDuplicate(t *testing.T) { ...@@ -52,12 +58,12 @@ func TestIsDuplicate(t *testing.T) {
require.NoError(t, g.Put(top)) require.NoError(t, g.Put(top))
// Root + Top should be duplicates // Root + Top should be duplicates
require.True(t, g.IsDuplicate(root.ClaimData)) require.True(t, g.IsDuplicate(root))
require.True(t, g.IsDuplicate(top.ClaimData)) require.True(t, g.IsDuplicate(top))
// Middle + Bottom should not be a duplicate // Middle + Bottom should not be a duplicate
require.False(t, g.IsDuplicate(middle.ClaimData)) require.False(t, g.IsDuplicate(middle))
require.False(t, g.IsDuplicate(bottom.ClaimData)) require.False(t, g.IsDuplicate(bottom))
} }
// TestGame_Put_RootAlreadyExists tests the [Game.Put] method using a [gameState] // TestGame_Put_RootAlreadyExists tests the [Game.Put] method using a [gameState]
...@@ -104,20 +110,20 @@ func TestGame_PutAll_ParentsAndChildren(t *testing.T) { ...@@ -104,20 +110,20 @@ func TestGame_PutAll_ParentsAndChildren(t *testing.T) {
g := NewGameState(false, root, testMaxDepth) g := NewGameState(false, root, testMaxDepth)
// We should not be able to get the parent of the root claim. // We should not be able to get the parent of the root claim.
parent, err := g.getParent(root) parent, err := g.GetParent(root)
require.ErrorIs(t, err, ErrClaimNotFound) require.ErrorIs(t, err, ErrClaimNotFound)
require.Equal(t, parent, Claim{}) require.Equal(t, parent, Claim{})
// Put the rest of the claims in the state. // Put the rest of the claims in the state.
err = g.PutAll([]Claim{top, middle, bottom}) err = g.PutAll([]Claim{top, middle, bottom})
require.NoError(t, err) require.NoError(t, err)
parent, err = g.getParent(top) parent, err = g.GetParent(top)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, parent, root) require.Equal(t, parent, root)
parent, err = g.getParent(middle) parent, err = g.GetParent(middle)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, parent, top) require.Equal(t, parent, top)
parent, err = g.getParent(bottom) parent, err = g.GetParent(bottom)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, parent, middle) require.Equal(t, parent, middle)
} }
...@@ -145,28 +151,28 @@ func TestGame_Put_ParentsAndChildren(t *testing.T) { ...@@ -145,28 +151,28 @@ func TestGame_Put_ParentsAndChildren(t *testing.T) {
g := NewGameState(false, root, testMaxDepth) g := NewGameState(false, root, testMaxDepth)
// We should not be able to get the parent of the root claim. // We should not be able to get the parent of the root claim.
parent, err := g.getParent(root) parent, err := g.GetParent(root)
require.ErrorIs(t, err, ErrClaimNotFound) require.ErrorIs(t, err, ErrClaimNotFound)
require.Equal(t, parent, Claim{}) require.Equal(t, parent, Claim{})
// Put + Check Top // Put + Check Top
err = g.Put(top) err = g.Put(top)
require.NoError(t, err) require.NoError(t, err)
parent, err = g.getParent(top) parent, err = g.GetParent(top)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, parent, root) require.Equal(t, parent, root)
// Put + Check Top Middle // Put + Check Top Middle
err = g.Put(middle) err = g.Put(middle)
require.NoError(t, err) require.NoError(t, err)
parent, err = g.getParent(middle) parent, err = g.GetParent(middle)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, parent, top) require.Equal(t, parent, top)
// Put + Check Top Bottom // Put + Check Top Bottom
err = g.Put(bottom) err = g.Put(bottom)
require.NoError(t, err) require.NoError(t, err)
parent, err = g.getParent(bottom) parent, err = g.GetParent(bottom)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, parent, middle) require.Equal(t, parent, middle)
} }
...@@ -194,27 +200,3 @@ func TestGame_ClaimPairs(t *testing.T) { ...@@ -194,27 +200,3 @@ func TestGame_ClaimPairs(t *testing.T) {
claims := g.Claims() claims := g.Claims()
require.ElementsMatch(t, expected, claims) require.ElementsMatch(t, expected, claims)
} }
func TestAgreeWithClaimLevelDisagreeWithOutput(t *testing.T) {
// Setup the game state.
root, top, middle, bottom := createTestClaims()
g := NewGameState(false, root, testMaxDepth)
require.NoError(t, g.PutAll([]Claim{top, middle, bottom}))
require.True(t, g.AgreeWithClaimLevel(root))
require.False(t, g.AgreeWithClaimLevel(top))
require.True(t, g.AgreeWithClaimLevel(middle))
require.False(t, g.AgreeWithClaimLevel(bottom))
}
func TestAgreeWithClaimLevelAgreeWithOutput(t *testing.T) {
// Setup the game state.
root, top, middle, bottom := createTestClaims()
g := NewGameState(true, root, testMaxDepth)
require.NoError(t, g.PutAll([]Claim{top, middle, bottom}))
require.False(t, g.AgreeWithClaimLevel(root))
require.True(t, g.AgreeWithClaimLevel(top))
require.False(t, g.AgreeWithClaimLevel(middle))
require.True(t, g.AgreeWithClaimLevel(bottom))
}
...@@ -9,7 +9,12 @@ import ( ...@@ -9,7 +9,12 @@ import (
"github.com/ethereum-optimism/optimism/op-challenger/game/scheduler" "github.com/ethereum-optimism/optimism/op-challenger/game/scheduler"
"github.com/ethereum-optimism/optimism/op-service/clock" "github.com/ethereum-optimism/optimism/op-service/clock"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
ethTypes "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
) )
...@@ -32,6 +37,20 @@ type gameMonitor struct { ...@@ -32,6 +37,20 @@ type gameMonitor struct {
gameWindow time.Duration gameWindow time.Duration
fetchBlockNumber blockNumberFetcher fetchBlockNumber blockNumberFetcher
allowedGames []common.Address allowedGames []common.Address
l1HeadsSub ethereum.Subscription
l1Source *headSource
}
type MinimalSubscriber interface {
EthSubscribe(ctx context.Context, channel interface{}, args ...interface{}) (ethereum.Subscription, error)
}
type headSource struct {
inner MinimalSubscriber
}
func (s *headSource) SubscribeNewHead(ctx context.Context, ch chan<- *ethTypes.Header) (ethereum.Subscription, error) {
return s.inner.EthSubscribe(ctx, ch, "newHeads")
} }
func newGameMonitor( func newGameMonitor(
...@@ -42,6 +61,7 @@ func newGameMonitor( ...@@ -42,6 +61,7 @@ func newGameMonitor(
gameWindow time.Duration, gameWindow time.Duration,
fetchBlockNumber blockNumberFetcher, fetchBlockNumber blockNumberFetcher,
allowedGames []common.Address, allowedGames []common.Address,
l1Source MinimalSubscriber,
) *gameMonitor { ) *gameMonitor {
return &gameMonitor{ return &gameMonitor{
logger: logger, logger: logger,
...@@ -51,6 +71,7 @@ func newGameMonitor( ...@@ -51,6 +71,7 @@ func newGameMonitor(
gameWindow: gameWindow, gameWindow: gameWindow,
fetchBlockNumber: fetchBlockNumber, fetchBlockNumber: fetchBlockNumber,
allowedGames: allowedGames, allowedGames: allowedGames,
l1Source: &headSource{inner: l1Source},
} }
} }
...@@ -99,29 +120,32 @@ func (m *gameMonitor) progressGames(ctx context.Context, blockNum uint64) error ...@@ -99,29 +120,32 @@ func (m *gameMonitor) progressGames(ctx context.Context, blockNum uint64) error
return nil return nil
} }
func (m *gameMonitor) MonitorGames(ctx context.Context) error { func (m *gameMonitor) onNewL1Head(ctx context.Context, sig eth.L1BlockRef) {
m.logger.Info("Monitoring fault dispute games") if err := m.progressGames(ctx, sig.Number); err != nil {
m.logger.Error("Failed to progress games", "err", err)
}
}
blockNum := uint64(0) func (m *gameMonitor) resubscribeFunction(ctx context.Context) event.ResubscribeErrFunc {
for { return func(innerCtx context.Context, err error) (event.Subscription, error) {
select {
case <-ctx.Done():
return ctx.Err()
default:
nextBlockNum, err := m.fetchBlockNumber(ctx)
if err != nil { if err != nil {
m.logger.Error("Failed to load current block number", "err", err) m.logger.Warn("resubscribing after failed L1 subscription", "err", err)
continue
}
if nextBlockNum > blockNum {
blockNum = nextBlockNum
if err := m.progressGames(ctx, nextBlockNum); err != nil {
m.logger.Error("Failed to progress games", "err", err)
} }
return eth.WatchHeadChanges(ctx, m.l1Source, m.onNewL1Head)
} }
if err := m.clock.SleepCtx(ctx, time.Second); err != nil { }
func (m *gameMonitor) MonitorGames(ctx context.Context) error {
m.l1HeadsSub = event.ResubscribeErr(time.Second*10, m.resubscribeFunction(ctx))
for {
select {
case <-ctx.Done():
return nil
case err, ok := <-m.l1HeadsSub.Err():
if !ok {
return err return err
} }
m.logger.Error("L1 subscription error", "err", err)
} }
} }
} }
...@@ -2,35 +2,40 @@ package game ...@@ -2,35 +2,40 @@ package game
import ( import (
"context" "context"
"fmt"
"math/big" "math/big"
"testing" "testing"
"time" "time"
"github.com/ethereum-optimism/optimism/op-node/testlog" "github.com/ethereum/go-ethereum"
"github.com/ethereum-optimism/optimism/op-service/clock"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
ethtypes "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait"
"github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum-optimism/optimism/op-service/clock"
) )
func TestMonitorMinGameTimestamp(t *testing.T) { func TestMonitorMinGameTimestamp(t *testing.T) {
t.Parallel() t.Parallel()
t.Run("zero game window returns zero", func(t *testing.T) { t.Run("zero game window returns zero", func(t *testing.T) {
monitor, _, _ := setupMonitorTest(t, []common.Address{}) monitor, _, _, _ := setupMonitorTest(t, []common.Address{})
monitor.gameWindow = time.Duration(0) monitor.gameWindow = time.Duration(0)
require.Equal(t, monitor.minGameTimestamp(), uint64(0)) require.Equal(t, monitor.minGameTimestamp(), uint64(0))
}) })
t.Run("non-zero game window with zero clock", func(t *testing.T) { t.Run("non-zero game window with zero clock", func(t *testing.T) {
monitor, _, _ := setupMonitorTest(t, []common.Address{}) monitor, _, _, _ := setupMonitorTest(t, []common.Address{})
monitor.gameWindow = time.Minute monitor.gameWindow = time.Minute
monitor.clock = clock.NewDeterministicClock(time.Unix(0, 0)) monitor.clock = clock.NewDeterministicClock(time.Unix(0, 0))
require.Equal(t, monitor.minGameTimestamp(), uint64(0)) require.Equal(t, monitor.minGameTimestamp(), uint64(0))
}) })
t.Run("minimum computed correctly", func(t *testing.T) { t.Run("minimum computed correctly", func(t *testing.T) {
monitor, _, _ := setupMonitorTest(t, []common.Address{}) monitor, _, _, _ := setupMonitorTest(t, []common.Address{})
monitor.gameWindow = time.Minute monitor.gameWindow = time.Minute
frozen := time.Unix(int64(time.Hour.Seconds()), 0) frozen := time.Unix(int64(time.Hour.Seconds()), 0)
monitor.clock = clock.NewDeterministicClock(frozen) monitor.clock = clock.NewDeterministicClock(frozen)
...@@ -39,29 +44,95 @@ func TestMonitorMinGameTimestamp(t *testing.T) { ...@@ -39,29 +44,95 @@ func TestMonitorMinGameTimestamp(t *testing.T) {
}) })
} }
func TestMonitorExitsWhenContextDone(t *testing.T) { // TestMonitorGames tests that the monitor can handle a new head event
monitor, _, _ := setupMonitorTest(t, []common.Address{{}}) // and resubscribe to new heads if the subscription errors.
func TestMonitorGames(t *testing.T) {
t.Run("Schedules games", func(t *testing.T) {
addr1 := common.Address{0xaa}
addr2 := common.Address{0xbb}
monitor, source, sched, mockHeadSource := setupMonitorTest(t, []common.Address{})
source.games = []FaultDisputeGame{newFDG(addr1, 9999), newFDG(addr2, 9999)}
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel()
go func() {
headerNotSent := true
waitErr := wait.For(context.Background(), 100*time.Millisecond, func() (bool, error) {
if len(sched.scheduled) >= 1 {
return true, nil
}
if mockHeadSource.sub == nil {
return false, nil
}
if headerNotSent {
mockHeadSource.sub.headers <- &ethtypes.Header{
Number: big.NewInt(1),
}
headerNotSent = false
}
return false, nil
})
require.NoError(t, waitErr)
mockHeadSource.err = fmt.Errorf("eth subscribe test error")
cancel() cancel()
}()
err := monitor.MonitorGames(ctx) err := monitor.MonitorGames(ctx)
require.ErrorIs(t, err, context.Canceled) require.NoError(t, err)
require.Len(t, sched.scheduled, 1)
require.Equal(t, []common.Address{addr1, addr2}, sched.scheduled[0])
})
t.Run("Resubscribes on error", func(t *testing.T) {
addr1 := common.Address{0xaa}
addr2 := common.Address{0xbb}
monitor, source, sched, mockHeadSource := setupMonitorTest(t, []common.Address{})
source.games = []FaultDisputeGame{newFDG(addr1, 9999), newFDG(addr2, 9999)}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
go func() {
headerNotSent := true
waitErr := wait.For(context.Background(), 100*time.Millisecond, func() (bool, error) {
return mockHeadSource.sub != nil, nil
})
require.NoError(t, waitErr)
mockHeadSource.sub.errChan <- fmt.Errorf("test error")
waitErr = wait.For(context.Background(), 100*time.Millisecond, func() (bool, error) {
if len(sched.scheduled) >= 1 {
return true, nil
}
if mockHeadSource.sub == nil {
return false, nil
}
if headerNotSent {
mockHeadSource.sub.headers <- &ethtypes.Header{
Number: big.NewInt(1),
}
headerNotSent = false
}
return false, nil
})
require.NoError(t, waitErr)
mockHeadSource.err = fmt.Errorf("eth subscribe test error")
cancel()
}()
err := monitor.MonitorGames(ctx)
require.NoError(t, err)
require.Len(t, sched.scheduled, 1)
require.Equal(t, []common.Address{addr1, addr2}, sched.scheduled[0])
})
} }
func TestMonitorCreateAndProgressGameAgents(t *testing.T) { func TestMonitorCreateAndProgressGameAgents(t *testing.T) {
monitor, source, sched := setupMonitorTest(t, []common.Address{}) monitor, source, sched, _ := setupMonitorTest(t, []common.Address{})
addr1 := common.Address{0xaa} addr1 := common.Address{0xaa}
addr2 := common.Address{0xbb} addr2 := common.Address{0xbb}
source.games = []FaultDisputeGame{ source.games = []FaultDisputeGame{newFDG(addr1, 9999), newFDG(addr2, 9999)}
{
Proxy: addr1,
Timestamp: 9999,
},
{
Proxy: addr2,
Timestamp: 9999,
},
}
require.NoError(t, monitor.progressGames(context.Background(), uint64(1))) require.NoError(t, monitor.progressGames(context.Background(), uint64(1)))
...@@ -72,18 +143,8 @@ func TestMonitorCreateAndProgressGameAgents(t *testing.T) { ...@@ -72,18 +143,8 @@ func TestMonitorCreateAndProgressGameAgents(t *testing.T) {
func TestMonitorOnlyScheduleSpecifiedGame(t *testing.T) { func TestMonitorOnlyScheduleSpecifiedGame(t *testing.T) {
addr1 := common.Address{0xaa} addr1 := common.Address{0xaa}
addr2 := common.Address{0xbb} addr2 := common.Address{0xbb}
monitor, source, sched := setupMonitorTest(t, []common.Address{addr2}) monitor, source, sched, _ := setupMonitorTest(t, []common.Address{addr2})
source.games = []FaultDisputeGame{newFDG(addr1, 9999), newFDG(addr2, 9999)}
source.games = []FaultDisputeGame{
{
Proxy: addr1,
Timestamp: 9999,
},
{
Proxy: addr2,
Timestamp: 9999,
},
}
require.NoError(t, monitor.progressGames(context.Background(), uint64(1))) require.NoError(t, monitor.progressGames(context.Background(), uint64(1)))
...@@ -91,7 +152,17 @@ func TestMonitorOnlyScheduleSpecifiedGame(t *testing.T) { ...@@ -91,7 +152,17 @@ func TestMonitorOnlyScheduleSpecifiedGame(t *testing.T) {
require.Equal(t, []common.Address{addr2}, sched.scheduled[0]) require.Equal(t, []common.Address{addr2}, sched.scheduled[0])
} }
func setupMonitorTest(t *testing.T, allowedGames []common.Address) (*gameMonitor, *stubGameSource, *stubScheduler) { func newFDG(proxy common.Address, timestamp uint64) FaultDisputeGame {
return FaultDisputeGame{
Proxy: proxy,
Timestamp: timestamp,
}
}
func setupMonitorTest(
t *testing.T,
allowedGames []common.Address,
) (*gameMonitor, *stubGameSource, *stubScheduler, *mockNewHeadSource) {
logger := testlog.Logger(t, log.LvlDebug) logger := testlog.Logger(t, log.LvlDebug)
source := &stubGameSource{} source := &stubGameSource{}
i := uint64(1) i := uint64(1)
...@@ -100,15 +171,58 @@ func setupMonitorTest(t *testing.T, allowedGames []common.Address) (*gameMonitor ...@@ -100,15 +171,58 @@ func setupMonitorTest(t *testing.T, allowedGames []common.Address) (*gameMonitor
return i, nil return i, nil
} }
sched := &stubScheduler{} sched := &stubScheduler{}
monitor := newGameMonitor(logger, clock.SystemClock, source, sched, time.Duration(0), fetchBlockNum, allowedGames) mockHeadSource := &mockNewHeadSource{}
return monitor, source, sched monitor := newGameMonitor(
logger,
clock.SystemClock,
source,
sched,
time.Duration(0),
fetchBlockNum,
allowedGames,
mockHeadSource,
)
return monitor, source, sched, mockHeadSource
}
type mockNewHeadSource struct {
sub *mockSubscription
err error
}
func (m *mockNewHeadSource) EthSubscribe(
ctx context.Context,
ch any,
args ...any,
) (ethereum.Subscription, error) {
errChan := make(chan error)
m.sub = &mockSubscription{errChan, (ch).(chan<- *ethtypes.Header)}
if m.err != nil {
return nil, m.err
}
return m.sub, nil
}
type mockSubscription struct {
errChan chan error
headers chan<- *ethtypes.Header
}
func (m *mockSubscription) Unsubscribe() {}
func (m *mockSubscription) Err() <-chan error {
return m.errChan
} }
type stubGameSource struct { type stubGameSource struct {
games []FaultDisputeGame games []FaultDisputeGame
} }
func (s *stubGameSource) FetchAllGamesAtBlock(ctx context.Context, earliest uint64, blockNumber *big.Int) ([]FaultDisputeGame, error) { func (s *stubGameSource) FetchAllGamesAtBlock(
ctx context.Context,
earliest uint64,
blockNumber *big.Int,
) ([]FaultDisputeGame, error) {
return s.games, nil return s.games, nil
} }
......
...@@ -10,6 +10,7 @@ import ( ...@@ -10,6 +10,7 @@ import (
"github.com/ethereum-optimism/optimism/op-challenger/game/scheduler" "github.com/ethereum-optimism/optimism/op-challenger/game/scheduler"
"github.com/ethereum-optimism/optimism/op-challenger/metrics" "github.com/ethereum-optimism/optimism/op-challenger/metrics"
"github.com/ethereum-optimism/optimism/op-challenger/version" "github.com/ethereum-optimism/optimism/op-challenger/version"
opClient "github.com/ethereum-optimism/optimism/op-node/client"
"github.com/ethereum-optimism/optimism/op-service/client" "github.com/ethereum-optimism/optimism/op-service/client"
"github.com/ethereum-optimism/optimism/op-service/clock" "github.com/ethereum-optimism/optimism/op-service/clock"
oppprof "github.com/ethereum-optimism/optimism/op-service/pprof" oppprof "github.com/ethereum-optimism/optimism/op-service/pprof"
...@@ -34,7 +35,7 @@ func NewService(ctx context.Context, logger log.Logger, cfg *config.Config) (*Se ...@@ -34,7 +35,7 @@ func NewService(ctx context.Context, logger log.Logger, cfg *config.Config) (*Se
return nil, fmt.Errorf("failed to create the transaction manager: %w", err) return nil, fmt.Errorf("failed to create the transaction manager: %w", err)
} }
client, err := client.DialEthClientWithTimeout(client.DefaultDialTimeout, logger, cfg.L1EthRpc) l1Client, err := client.DialEthClientWithTimeout(client.DefaultDialTimeout, logger, cfg.L1EthRpc)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to dial L1: %w", err) return nil, fmt.Errorf("failed to dial L1: %w", err)
} }
...@@ -57,10 +58,10 @@ func NewService(ctx context.Context, logger log.Logger, cfg *config.Config) (*Se ...@@ -57,10 +58,10 @@ func NewService(ctx context.Context, logger log.Logger, cfg *config.Config) (*Se
logger.Error("error starting metrics server", "err", err) logger.Error("error starting metrics server", "err", err)
} }
}() }()
m.StartBalanceMetrics(ctx, logger, client, txMgr.From()) m.StartBalanceMetrics(ctx, logger, l1Client, txMgr.From())
} }
factory, err := bindings.NewDisputeGameFactory(cfg.GameFactoryAddress, client) factory, err := bindings.NewDisputeGameFactory(cfg.GameFactoryAddress, l1Client)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to bind the fault dispute game factory contract: %w", err) return nil, fmt.Errorf("failed to bind the fault dispute game factory contract: %w", err)
} }
...@@ -73,10 +74,14 @@ func NewService(ctx context.Context, logger log.Logger, cfg *config.Config) (*Se ...@@ -73,10 +74,14 @@ func NewService(ctx context.Context, logger log.Logger, cfg *config.Config) (*Se
disk, disk,
cfg.MaxConcurrency, cfg.MaxConcurrency,
func(addr common.Address, dir string) (scheduler.GamePlayer, error) { func(addr common.Address, dir string) (scheduler.GamePlayer, error) {
return fault.NewGamePlayer(ctx, logger, m, cfg, dir, addr, txMgr, client) return fault.NewGamePlayer(ctx, logger, m, cfg, dir, addr, txMgr, l1Client)
}) })
monitor := newGameMonitor(logger, cl, loader, sched, cfg.GameWindow, client.BlockNumber, cfg.GameAllowlist) pollClient, err := opClient.NewRPCWithClient(ctx, logger, cfg.L1EthRpc, opClient.NewBaseRPCClient(l1Client.Client()), cfg.PollInterval)
if err != nil {
return nil, fmt.Errorf("failed to create RPC client: %w", err)
}
monitor := newGameMonitor(logger, cl, loader, sched, cfg.GameWindow, l1Client.BlockNumber, cfg.GameAllowlist, pollClient)
m.RecordInfo(version.SimpleWithMeta) m.RecordInfo(version.SimpleWithMeta)
m.RecordUp() m.RecordUp()
......
...@@ -68,6 +68,12 @@ func WithAlphabet(alphabet string) Option { ...@@ -68,6 +68,12 @@ func WithAlphabet(alphabet string) Option {
} }
} }
func WithPollInterval(pollInterval time.Duration) Option {
return func(c *config.Config) {
c.PollInterval = pollInterval
}
}
func WithCannon( func WithCannon(
t *testing.T, t *testing.T,
rollupCfg *rollup.Config, rollupCfg *rollup.Config,
...@@ -98,7 +104,7 @@ func WithCannon( ...@@ -98,7 +104,7 @@ func WithCannon(
} }
func NewChallenger(t *testing.T, ctx context.Context, l1Endpoint string, name string, options ...Option) *Helper { func NewChallenger(t *testing.T, ctx context.Context, l1Endpoint string, name string, options ...Option) *Helper {
log := testlog.Logger(t, log.LvlInfo).New("role", name) log := testlog.Logger(t, log.LvlDebug).New("role", name)
log.Info("Creating challenger", "l1", l1Endpoint) log.Info("Creating challenger", "l1", l1Endpoint)
cfg := NewChallengerConfig(t, l1Endpoint, options...) cfg := NewChallengerConfig(t, l1Endpoint, options...)
...@@ -145,6 +151,10 @@ func NewChallengerConfig(t *testing.T, l1Endpoint string, options ...Option) *co ...@@ -145,6 +151,10 @@ func NewChallengerConfig(t *testing.T, l1Endpoint string, options ...Option) *co
_, err := os.Stat(cfg.CannonAbsolutePreState) _, err := os.Stat(cfg.CannonAbsolutePreState)
require.NoError(t, err, "cannon pre-state should be built. Make sure you've run make cannon-prestate") require.NoError(t, err, "cannon pre-state should be built. Make sure you've run make cannon-prestate")
} }
if cfg.PollInterval == 0 {
cfg.PollInterval = time.Second
}
return &cfg return &cfg
} }
......
...@@ -4,6 +4,7 @@ import ( ...@@ -4,6 +4,7 @@ import (
"context" "context"
"github.com/ethereum-optimism/optimism/op-challenger/config" "github.com/ethereum-optimism/optimism/op-challenger/config"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/alphabet"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/challenger" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/challenger"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
...@@ -33,3 +34,12 @@ func (g *AlphabetGameHelper) StartChallenger(ctx context.Context, l1Endpoint str ...@@ -33,3 +34,12 @@ func (g *AlphabetGameHelper) StartChallenger(ctx context.Context, l1Endpoint str
}) })
return c return c
} }
func (g *AlphabetGameHelper) CreateHonestActor(ctx context.Context, alphabetTrace string, depth uint64) *HonestHelper {
return &HonestHelper{
t: g.t,
require: g.require,
game: &g.FaultGameHelper,
correctTrace: alphabet.NewTraceProvider(alphabetTrace, depth),
}
}
This diff is collapsed.
...@@ -100,7 +100,7 @@ func (h *FactoryHelper) StartAlphabetGame(ctx context.Context, claimedAlphabet s ...@@ -100,7 +100,7 @@ func (h *FactoryHelper) StartAlphabetGame(ctx context.Context, claimedAlphabet s
l2BlockNumber := h.waitForProposals(ctx) l2BlockNumber := h.waitForProposals(ctx)
l1Head := h.checkpointL1Block(ctx) l1Head := h.checkpointL1Block(ctx)
ctx, cancel := context.WithTimeout(ctx, 1*time.Minute) ctx, cancel := context.WithTimeout(ctx, 2*time.Minute)
defer cancel() defer cancel()
trace := alphabet.NewTraceProvider(claimedAlphabet, alphabetGameDepth) trace := alphabet.NewTraceProvider(claimedAlphabet, alphabetGameDepth)
......
...@@ -3,7 +3,9 @@ package op_e2e ...@@ -3,7 +3,9 @@ package op_e2e
import ( import (
"context" "context"
"testing" "testing"
"time"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/types"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/challenger" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/challenger"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/disputegame" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/disputegame"
l2oo2 "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/l2oo" l2oo2 "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/l2oo"
...@@ -62,8 +64,12 @@ func TestMultipleCannonGames(t *testing.T) { ...@@ -62,8 +64,12 @@ func TestMultipleCannonGames(t *testing.T) {
sys.TimeTravelClock.AdvanceTime(gameDuration) sys.TimeTravelClock.AdvanceTime(gameDuration)
require.NoError(t, wait.ForNextBlock(ctx, l1Client)) require.NoError(t, wait.ForNextBlock(ctx, l1Client))
game1.WaitForGameStatus(ctx, disputegame.StatusChallengerWins) game1.WaitForInactivity(ctx, 10, true)
game2.WaitForGameStatus(ctx, disputegame.StatusChallengerWins) game2.WaitForInactivity(ctx, 10, true)
game1.LogGameData(ctx)
game2.LogGameData(ctx)
require.EqualValues(t, disputegame.StatusChallengerWins, game1.Status(ctx))
require.EqualValues(t, disputegame.StatusChallengerWins, game2.Status(ctx))
// Check that the game directories are removed // Check that the game directories are removed
challenger.WaitForGameDataDeletion(ctx, game1, game2) challenger.WaitForGameDataDeletion(ctx, game1, game2)
...@@ -168,11 +174,72 @@ func TestChallengerCompleteDisputeGame(t *testing.T) { ...@@ -168,11 +174,72 @@ func TestChallengerCompleteDisputeGame(t *testing.T) {
sys.TimeTravelClock.AdvanceTime(gameDuration) sys.TimeTravelClock.AdvanceTime(gameDuration)
require.NoError(t, wait.ForNextBlock(ctx, l1Client)) require.NoError(t, wait.ForNextBlock(ctx, l1Client))
game.WaitForGameStatus(ctx, test.expectedResult) game.WaitForInactivity(ctx, 10, true)
game.LogGameData(ctx)
require.EqualValues(t, test.expectedResult, game.Status(ctx))
}) })
} }
} }
func TestChallengerCompleteExhaustiveDisputeGame(t *testing.T) {
InitParallel(t)
testCase := func(t *testing.T, isRootCorrect bool) {
ctx := context.Background()
sys, l1Client := startFaultDisputeSystem(t)
t.Cleanup(sys.Close)
disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys.cfg.L1Deployments, l1Client)
rootClaimedAlphabet := disputegame.CorrectAlphabet
if !isRootCorrect {
rootClaimedAlphabet = "abcdexyz"
}
game := disputeGameFactory.StartAlphabetGame(ctx, rootClaimedAlphabet)
require.NotNil(t, game)
gameDuration := game.GameDuration(ctx)
// Start honest challenger
game.StartChallenger(ctx, sys.NodeEndpoint("l1"), "Challenger",
challenger.WithAgreeProposedOutput(!isRootCorrect),
challenger.WithAlphabet(disputegame.CorrectAlphabet),
challenger.WithPrivKey(sys.cfg.Secrets.Alice),
// Ensures the challenger responds to all claims before test timeout
challenger.WithPollInterval(time.Millisecond*400),
)
// Start dishonest challenger
correctTrace := game.CreateHonestActor(ctx, disputegame.CorrectAlphabet, 4)
dishonestHelper := disputegame.NewDishonestHelper(&game.FaultGameHelper, correctTrace, !isRootCorrect)
dishonestHelper.ExhaustDishonestClaims(ctx)
// Wait until we've reached max depth before checking for inactivity
game.WaitForClaimAtDepth(ctx, int(game.MaxDepth(ctx)))
// Wait for 4 blocks of no challenger responses. The challenger may still be stepping on invalid claims at max depth
game.WaitForInactivity(ctx, 4, false)
sys.TimeTravelClock.AdvanceTime(gameDuration)
require.NoError(t, wait.ForNextBlock(ctx, l1Client))
expectedStatus := disputegame.StatusChallengerWins
if isRootCorrect {
expectedStatus = disputegame.StatusDefenderWins
}
game.WaitForInactivity(ctx, 10, true)
game.LogGameData(ctx)
require.EqualValues(t, expectedStatus, game.Status(ctx))
}
t.Run("RootCorrect", func(t *testing.T) {
InitParallel(t)
testCase(t, true)
})
t.Run("RootIncorrect", func(t *testing.T) {
InitParallel(t)
testCase(t, false)
})
}
func TestCannonDisputeGame(t *testing.T) { func TestCannonDisputeGame(t *testing.T) {
InitParallel(t) InitParallel(t)
...@@ -217,8 +284,9 @@ func TestCannonDisputeGame(t *testing.T) { ...@@ -217,8 +284,9 @@ func TestCannonDisputeGame(t *testing.T) {
sys.TimeTravelClock.AdvanceTime(game.GameDuration(ctx)) sys.TimeTravelClock.AdvanceTime(game.GameDuration(ctx))
require.NoError(t, wait.ForNextBlock(ctx, l1Client)) require.NoError(t, wait.ForNextBlock(ctx, l1Client))
game.WaitForGameStatus(ctx, disputegame.StatusChallengerWins) game.WaitForInactivity(ctx, 10, true)
game.LogGameData(ctx) game.LogGameData(ctx)
require.EqualValues(t, disputegame.StatusChallengerWins, game.Status(ctx))
}) })
} }
} }
...@@ -260,8 +328,9 @@ func TestCannonDefendStep(t *testing.T) { ...@@ -260,8 +328,9 @@ func TestCannonDefendStep(t *testing.T) {
sys.TimeTravelClock.AdvanceTime(game.GameDuration(ctx)) sys.TimeTravelClock.AdvanceTime(game.GameDuration(ctx))
require.NoError(t, wait.ForNextBlock(ctx, l1Client)) require.NoError(t, wait.ForNextBlock(ctx, l1Client))
game.WaitForGameStatus(ctx, disputegame.StatusChallengerWins) game.WaitForInactivity(ctx, 10, true)
game.LogGameData(ctx) game.LogGameData(ctx)
require.EqualValues(t, disputegame.StatusChallengerWins, game.Status(ctx))
} }
func TestCannonProposedOutputRootInvalid(t *testing.T) { func TestCannonProposedOutputRootInvalid(t *testing.T) {
...@@ -335,12 +404,86 @@ func TestCannonProposedOutputRootInvalid(t *testing.T) { ...@@ -335,12 +404,86 @@ func TestCannonProposedOutputRootInvalid(t *testing.T) {
sys.TimeTravelClock.AdvanceTime(game.GameDuration(ctx)) sys.TimeTravelClock.AdvanceTime(game.GameDuration(ctx))
require.NoError(t, wait.ForNextBlock(ctx, l1Client)) require.NoError(t, wait.ForNextBlock(ctx, l1Client))
game.WaitForGameStatus(ctx, disputegame.StatusDefenderWins) game.WaitForInactivity(ctx, 10, true)
game.LogGameData(ctx) game.LogGameData(ctx)
require.EqualValues(t, disputegame.StatusDefenderWins, game.Status(ctx))
}) })
} }
} }
func TestCannonPoisonedPostState(t *testing.T) {
InitParallel(t)
ctx := context.Background()
sys, l1Client := startFaultDisputeSystem(t)
t.Cleanup(sys.Close)
l1Endpoint := sys.NodeEndpoint("l1")
l2Endpoint := sys.NodeEndpoint("sequencer")
disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys.cfg.L1Deployments, l1Client)
game, correctTrace := disputeGameFactory.StartCannonGameWithCorrectRoot(ctx, sys.RollupConfig, sys.L2GenesisCfg, l1Endpoint, l2Endpoint,
challenger.WithPrivKey(sys.cfg.Secrets.Mallory),
)
require.NotNil(t, game)
game.LogGameData(ctx)
// Honest first attack at "honest" level
correctTrace.Attack(ctx, 0)
// Honest defense at "dishonest" level
correctTrace.Defend(ctx, 1)
// Dishonest attack at "honest" level - honest move would be to ignore
game.Attack(ctx, 2, common.Hash{0x03, 0xaa})
// Honest attack at "dishonest" level - honest move would be to ignore
correctTrace.Attack(ctx, 3)
// Start the honest challenger
game.StartChallenger(ctx, sys.RollupConfig, sys.L2GenesisCfg, l1Endpoint, l2Endpoint, "Honest",
// Agree with the proposed output, so disagree with the root claim
challenger.WithAgreeProposedOutput(true),
challenger.WithPrivKey(sys.cfg.Secrets.Bob),
)
// Start dishonest challenger that posts correct claims
// It participates in the subgame root the honest claim index 4
func() {
claimCount := int64(5)
depth := game.MaxDepth(ctx)
for {
game.LogGameData(ctx)
claimCount++
// Wait for the challenger to counter
game.WaitForClaimCount(ctx, claimCount)
// Respond with our own move
correctTrace.Defend(ctx, claimCount-1)
claimCount++
game.WaitForClaimCount(ctx, claimCount)
// Defender moves last. If we're at max depth, then we're done
dishonestClaim := game.GetClaimUnsafe(ctx, claimCount-1)
pos := types.NewPositionFromGIndex(dishonestClaim.Position.Uint64())
if int64(pos.Depth()) == depth {
break
}
}
}()
// Wait for the challenger to drive the subgame at 4 to the leaf node, which should be countered
game.WaitForClaimAtMaxDepth(ctx, true)
// Time travel past when the game will be resolvable.
sys.TimeTravelClock.AdvanceTime(game.GameDuration(ctx))
require.NoError(t, wait.ForNextBlock(ctx, l1Client))
game.WaitForInactivity(ctx, 10, true)
game.LogGameData(ctx)
require.EqualValues(t, disputegame.StatusChallengerWins, game.Status(ctx))
}
// setupDisputeGameForInvalidOutputRoot sets up an L2 chain with at least one valid output root followed by an invalid output root. // setupDisputeGameForInvalidOutputRoot sets up an L2 chain with at least one valid output root followed by an invalid output root.
// A cannon dispute game is started to dispute the invalid output root with the correct root claim provided. // A cannon dispute game is started to dispute the invalid output root with the correct root claim provided.
// An honest challenger is run to defend the root claim (ie disagree with the invalid output root). // An honest challenger is run to defend the root claim (ie disagree with the invalid output root).
...@@ -410,8 +553,9 @@ func TestCannonChallengeWithCorrectRoot(t *testing.T) { ...@@ -410,8 +553,9 @@ func TestCannonChallengeWithCorrectRoot(t *testing.T) {
sys.TimeTravelClock.AdvanceTime(game.GameDuration(ctx)) sys.TimeTravelClock.AdvanceTime(game.GameDuration(ctx))
require.NoError(t, wait.ForNextBlock(ctx, l1Client)) require.NoError(t, wait.ForNextBlock(ctx, l1Client))
game.WaitForGameStatus(ctx, disputegame.StatusChallengerWins) game.WaitForInactivity(ctx, 10, true)
game.LogGameData(ctx) game.LogGameData(ctx)
require.EqualValues(t, disputegame.StatusChallengerWins, game.Status(ctx))
} }
func startFaultDisputeSystem(t *testing.T) (*System, *ethclient.Client) { func startFaultDisputeSystem(t *testing.T) (*System, *ethclient.Client) {
......
...@@ -14,8 +14,8 @@ func TestTxGossip(t *testing.T) { ...@@ -14,8 +14,8 @@ func TestTxGossip(t *testing.T) {
gethOpts := []geth.GethOption{ gethOpts := []geth.GethOption{
geth.WithP2P(), geth.WithP2P(),
} }
cfg.GethOptions["sequencer"] = gethOpts cfg.GethOptions["sequencer"] = append(cfg.GethOptions["sequencer"], gethOpts...)
cfg.GethOptions["verifier"] = gethOpts cfg.GethOptions["verifier"] = append(cfg.GethOptions["verifier"], gethOpts...)
sys, err := cfg.Start(t) sys, err := cfg.Start(t)
require.NoError(t, err, "Start system") require.NoError(t, err, "Start system")
......
...@@ -418,6 +418,7 @@ func (cfg SystemConfig) Start(t *testing.T, _opts ...SystemConfigOption) (*Syste ...@@ -418,6 +418,7 @@ func (cfg SystemConfig) Start(t *testing.T, _opts ...SystemConfigOption) (*Syste
DepositContractAddress: cfg.DeployConfig.OptimismPortalProxy, DepositContractAddress: cfg.DeployConfig.OptimismPortalProxy,
L1SystemConfigAddress: cfg.DeployConfig.SystemConfigProxy, L1SystemConfigAddress: cfg.DeployConfig.SystemConfigProxy,
RegolithTime: cfg.DeployConfig.RegolithTime(uint64(cfg.DeployConfig.L1GenesisBlockTimestamp)), RegolithTime: cfg.DeployConfig.RegolithTime(uint64(cfg.DeployConfig.L1GenesisBlockTimestamp)),
ProtocolVersionsAddress: cfg.L1Deployments.ProtocolVersionsProxy,
} }
} }
defaultConfig := makeRollupConfig() defaultConfig := makeRollupConfig()
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -308,7 +308,7 @@ var ( ...@@ -308,7 +308,7 @@ var (
// None of these flags are strictly required. // None of these flags are strictly required.
// Some are hidden if they are too technical, or not recommended. // Some are hidden if they are too technical, or not recommended.
var p2pFlags = []cli.Flag{ var P2pFlags = []cli.Flag{
DisableP2P, DisableP2P,
NoDiscovery, NoDiscovery,
P2PPrivPath, P2PPrivPath,
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -81,6 +81,7 @@ type SequencerIface interface { ...@@ -81,6 +81,7 @@ type SequencerIface interface {
PlanNextSequencerAction() time.Duration PlanNextSequencerAction() time.Duration
RunNextSequencerAction(ctx context.Context) (*eth.ExecutionPayload, error) RunNextSequencerAction(ctx context.Context) (*eth.ExecutionPayload, error)
BuildingOnto() eth.L2BlockRef BuildingOnto() eth.L2BlockRef
CancelBuildingBlock(ctx context.Context)
} }
type Network interface { type Network interface {
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
FROM ethereum/client-go:v1.13.0 FROM ethereum/client-go:v1.13.1
RUN apk add --no-cache jq RUN apk add --no-cache jq
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment