Commit fbfd6213 authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge branch 'develop' into dependabot/npm_and_yarn/eslint-plugin-unicorn-48.0.1

parents fe1f3c55 4e8e61dd
---
'@eth-optimism/sdk': patch
---
Adds Sepolia & OP Sepolia support to SDK
---
'@eth-optimism/contracts-bedrock': patch
---
bumps sdk version to have access to sepolia deployments
......@@ -887,6 +887,18 @@ jobs:
name: Build
command: make indexer
working_directory: indexer
- run:
name: Install node_modules
command: pnpm install --frozen-lockfile --prefer-offline
working_directory: indexer/api-ts
- run:
name: Install tygo
command: go install github.com/gzuidhof/tygo@latest
working_directory: indexer/api-ts
- run:
name: Check generated code
command: npm run generate && git diff --exit-code
working_directory: indexer/api-ts
devnet:
machine:
......
......@@ -109,7 +109,6 @@ devnet-down:
devnet-clean:
rm -rf ./packages/contracts-bedrock/deployments/devnetL1
rm -rf ./packages/contracts-bedrock/deploy-config/devnetL1.json
rm -rf ./.devnet
cd ./ops-bedrock && docker compose down
docker image ls 'ops-bedrock*' --format='{{.Repository}}' | xargs -r docker rmi
......
......@@ -208,6 +208,6 @@ require (
rsc.io/tmplfunc v0.0.3 // indirect
)
replace github.com/ethereum/go-ethereum v1.12.0 => github.com/ethereum-optimism/op-geth v1.101200.0-rc.1.0.20230818191139-f7376a28049b
replace github.com/ethereum/go-ethereum v1.12.0 => github.com/ethereum-optimism/op-geth v1.101200.2-rc.1.0.20230914224024-b84ba11915a0
//replace github.com/ethereum/go-ethereum v1.12.0 => ../go-ethereum
......@@ -162,8 +162,8 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7
github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw=
github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3 h1:RWHKLhCrQThMfch+QJ1Z8veEq5ZO3DfIhZ7xgRP9WTc=
github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3/go.mod h1:QziizLAiF0KqyLdNJYD7O5cpDlaFMNZzlxYNcWsJUxs=
github.com/ethereum-optimism/op-geth v1.101200.0-rc.1.0.20230818191139-f7376a28049b h1:YF2FE/QnbhvrHwDYJHnbTKgJvw2aKwB/dd7PO1zKNqY=
github.com/ethereum-optimism/op-geth v1.101200.0-rc.1.0.20230818191139-f7376a28049b/go.mod h1:gRnPb21PoKcHm3kHqj9BQlQkwmhOGUvQoGEbC7z852Q=
github.com/ethereum-optimism/op-geth v1.101200.2-rc.1.0.20230914224024-b84ba11915a0 h1:Qcu7OVMbKvbu7aaDC31OY0JCqFIr2N+/SGdBTnxukCs=
github.com/ethereum-optimism/op-geth v1.101200.2-rc.1.0.20230914224024-b84ba11915a0/go.mod h1:gRnPb21PoKcHm3kHqj9BQlQkwmhOGUvQoGEbC7z852Q=
github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20230817174831-5d3ca1966435 h1:2CzkJkkTLuVyoVFkoW5w6vDB2Q7eJzxXw/ybA17xjqM=
github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20230817174831-5d3ca1966435/go.mod h1:v2YpePbdGBF0Gr6VWq49MFFmcTW0kRYZ2ingBJYWEwg=
github.com/ethereum/c-kzg-4844 v0.2.0 h1:+cUvymlnoDDQgMInp25Bo3OmLajmmY8mLJ/tLjqd77Q=
......
Generated typescript types for https://github.com/ethereum-optimism/optimism/tree/develop/indexer
......@@ -8,12 +8,12 @@ export interface DepositItem {
from: string;
to: string;
timestamp: number /* uint64 */;
L1TxHash: string;
L2TxHash: string;
Block: string;
l1BlockHash: string;
l1TxHash: string;
l2TxHash: string;
amount: string;
l1Token: string;
l2Token: string;
l1TokenAddress: string;
l2TokenAddress: string;
}
export interface DepositResponse {
cursor: string;
......@@ -41,10 +41,10 @@ export interface WithdrawalItem {
timestamp: number /* uint64 */;
l2BlockHash: string;
amount: string;
proof: string;
claim: string;
l1Token: string;
l2Token: string;
proofTransactionHash: string;
claimTransactionHash: string;
l1TokenAddress: string;
l2TokenAddress: string;
}
export interface WithdrawalResponse {
cursor: string;
......
......@@ -38,10 +38,10 @@ var createQueryString = ({ cursor, limit }) => {
return `?${queries.join("&")}`;
};
var depositEndpoint = ({ baseUrl = "", address, cursor, limit }) => {
return [baseUrl, "deposits", address, createQueryString({ cursor, limit })].join("/");
return [baseUrl, "deposits", `${address}${createQueryString({ cursor, limit })}`].join("/");
};
var withdrawalEndoint = ({ baseUrl = "", address, cursor, limit }) => {
return [baseUrl, "withdrawals", address, createQueryString({ cursor, limit })].join("/");
return [baseUrl, "withdrawals", `${address}${createQueryString({ cursor, limit })}`].join("/");
};
// Annotate the CommonJS export names for ESM import in node:
0 && (module.exports = {
......
{"version":3,"sources":["indexer.ts"],"sourcesContent":["export * from './generated'\n\ntype PaginationOptions = {\n limit?: number\n cursor?: string\n}\n\ntype Options = {\n baseUrl?: string\n address: `0x${string}`\n} & PaginationOptions\n\nconst createQueryString = ({ cursor, limit }: PaginationOptions): string => {\n if (cursor === undefined && limit === undefined) {\n return ''\n }\n const queries: string[] = []\n if (cursor) {\n queries.push(`cursor=${cursor}`)\n }\n if (limit) {\n queries.push(`limit=${limit}`)\n }\n return `?${queries.join('&')}`\n}\n\nexport const depositEndpoint = ({ baseUrl = '', address, cursor, limit }: Options): string => {\n return [baseUrl, 'deposits', address, createQueryString({ cursor, limit })].join('/')\n}\n\nexport const withdrawalEndoint = ({ baseUrl = '', address, cursor, limit }: Options): string => {\n return [baseUrl, 'withdrawals', address, createQueryString({ cursor, limit })].join('/')\n}\n\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAYA,IAAM,oBAAoB,CAAC,EAAE,QAAQ,MAAM,MAAiC;AAC1E,MAAI,WAAW,UAAa,UAAU,QAAW;AAC/C,WAAO;AAAA,EACT;AACA,QAAM,UAAoB,CAAC;AAC3B,MAAI,QAAQ;AACV,YAAQ,KAAK,UAAU,MAAM,EAAE;AAAA,EACjC;AACA,MAAI,OAAO;AACT,YAAQ,KAAK,SAAS,KAAK,EAAE;AAAA,EAC/B;AACA,SAAO,IAAI,QAAQ,KAAK,GAAG,CAAC;AAC9B;AAEO,IAAM,kBAAkB,CAAC,EAAE,UAAU,IAAI,SAAS,QAAQ,MAAM,MAAuB;AAC5F,SAAO,CAAC,SAAS,YAAY,SAAS,kBAAkB,EAAE,QAAQ,MAAM,CAAC,CAAC,EAAE,KAAK,GAAG;AACtF;AAEO,IAAM,oBAAoB,CAAC,EAAE,UAAU,IAAI,SAAS,QAAQ,MAAM,MAAuB;AAC9F,SAAO,CAAC,SAAS,eAAe,SAAS,kBAAkB,EAAE,QAAQ,MAAM,CAAC,CAAC,EAAE,KAAK,GAAG;AACzF;","names":[]}
\ No newline at end of file
{"version":3,"sources":["indexer.ts"],"sourcesContent":["export * from './generated'\n\ntype PaginationOptions = {\n limit?: number\n cursor?: string\n}\n\ntype Options = {\n baseUrl?: string\n address: `0x${string}`\n} & PaginationOptions\n\nconst createQueryString = ({ cursor, limit }: PaginationOptions): string => {\n if (cursor === undefined && limit === undefined) {\n return ''\n }\n const queries: string[] = []\n if (cursor) {\n queries.push(`cursor=${cursor}`)\n }\n if (limit) {\n queries.push(`limit=${limit}`)\n }\n return `?${queries.join('&')}`\n}\n\nexport const depositEndpoint = ({ baseUrl = '', address, cursor, limit }: Options): string => {\n return [baseUrl, 'deposits', `${address}${createQueryString({ cursor, limit })}`].join('/')\n}\n\nexport const withdrawalEndoint = ({ baseUrl = '', address, cursor, limit }: Options): string => {\n return [baseUrl, 'withdrawals', `${address}${createQueryString({ cursor, limit })}`].join('/')\n}\n\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAYA,IAAM,oBAAoB,CAAC,EAAE,QAAQ,MAAM,MAAiC;AAC1E,MAAI,WAAW,UAAa,UAAU,QAAW;AAC/C,WAAO;AAAA,EACT;AACA,QAAM,UAAoB,CAAC;AAC3B,MAAI,QAAQ;AACV,YAAQ,KAAK,UAAU,MAAM,EAAE;AAAA,EACjC;AACA,MAAI,OAAO;AACT,YAAQ,KAAK,SAAS,KAAK,EAAE;AAAA,EAC/B;AACA,SAAO,IAAI,QAAQ,KAAK,GAAG,CAAC;AAC9B;AAEO,IAAM,kBAAkB,CAAC,EAAE,UAAU,IAAI,SAAS,QAAQ,MAAM,MAAuB;AAC5F,SAAO,CAAC,SAAS,YAAY,GAAG,OAAO,GAAG,kBAAkB,EAAE,QAAQ,MAAM,CAAC,CAAC,EAAE,EAAE,KAAK,GAAG;AAC5F;AAEO,IAAM,oBAAoB,CAAC,EAAE,UAAU,IAAI,SAAS,QAAQ,MAAM,MAAuB;AAC9F,SAAO,CAAC,SAAS,eAAe,GAAG,OAAO,GAAG,kBAAkB,EAAE,QAAQ,MAAM,CAAC,CAAC,EAAE,EAAE,KAAK,GAAG;AAC/F;","names":[]}
\ No newline at end of file
......@@ -13,10 +13,10 @@ var createQueryString = ({ cursor, limit }) => {
return `?${queries.join("&")}`;
};
var depositEndpoint = ({ baseUrl = "", address, cursor, limit }) => {
return [baseUrl, "deposits", address, createQueryString({ cursor, limit })].join("/");
return [baseUrl, "deposits", `${address}${createQueryString({ cursor, limit })}`].join("/");
};
var withdrawalEndoint = ({ baseUrl = "", address, cursor, limit }) => {
return [baseUrl, "withdrawals", address, createQueryString({ cursor, limit })].join("/");
return [baseUrl, "withdrawals", `${address}${createQueryString({ cursor, limit })}`].join("/");
};
export {
depositEndpoint,
......
{"version":3,"sources":["indexer.ts"],"sourcesContent":["export * from './generated'\n\ntype PaginationOptions = {\n limit?: number\n cursor?: string\n}\n\ntype Options = {\n baseUrl?: string\n address: `0x${string}`\n} & PaginationOptions\n\nconst createQueryString = ({ cursor, limit }: PaginationOptions): string => {\n if (cursor === undefined && limit === undefined) {\n return ''\n }\n const queries: string[] = []\n if (cursor) {\n queries.push(`cursor=${cursor}`)\n }\n if (limit) {\n queries.push(`limit=${limit}`)\n }\n return `?${queries.join('&')}`\n}\n\nexport const depositEndpoint = ({ baseUrl = '', address, cursor, limit }: Options): string => {\n return [baseUrl, 'deposits', address, createQueryString({ cursor, limit })].join('/')\n}\n\nexport const withdrawalEndoint = ({ baseUrl = '', address, cursor, limit }: Options): string => {\n return [baseUrl, 'withdrawals', address, createQueryString({ cursor, limit })].join('/')\n}\n\n"],"mappings":";AAYA,IAAM,oBAAoB,CAAC,EAAE,QAAQ,MAAM,MAAiC;AAC1E,MAAI,WAAW,UAAa,UAAU,QAAW;AAC/C,WAAO;AAAA,EACT;AACA,QAAM,UAAoB,CAAC;AAC3B,MAAI,QAAQ;AACV,YAAQ,KAAK,UAAU,MAAM,EAAE;AAAA,EACjC;AACA,MAAI,OAAO;AACT,YAAQ,KAAK,SAAS,KAAK,EAAE;AAAA,EAC/B;AACA,SAAO,IAAI,QAAQ,KAAK,GAAG,CAAC;AAC9B;AAEO,IAAM,kBAAkB,CAAC,EAAE,UAAU,IAAI,SAAS,QAAQ,MAAM,MAAuB;AAC5F,SAAO,CAAC,SAAS,YAAY,SAAS,kBAAkB,EAAE,QAAQ,MAAM,CAAC,CAAC,EAAE,KAAK,GAAG;AACtF;AAEO,IAAM,oBAAoB,CAAC,EAAE,UAAU,IAAI,SAAS,QAAQ,MAAM,MAAuB;AAC9F,SAAO,CAAC,SAAS,eAAe,SAAS,kBAAkB,EAAE,QAAQ,MAAM,CAAC,CAAC,EAAE,KAAK,GAAG;AACzF;","names":[]}
\ No newline at end of file
{"version":3,"sources":["indexer.ts"],"sourcesContent":["export * from './generated'\n\ntype PaginationOptions = {\n limit?: number\n cursor?: string\n}\n\ntype Options = {\n baseUrl?: string\n address: `0x${string}`\n} & PaginationOptions\n\nconst createQueryString = ({ cursor, limit }: PaginationOptions): string => {\n if (cursor === undefined && limit === undefined) {\n return ''\n }\n const queries: string[] = []\n if (cursor) {\n queries.push(`cursor=${cursor}`)\n }\n if (limit) {\n queries.push(`limit=${limit}`)\n }\n return `?${queries.join('&')}`\n}\n\nexport const depositEndpoint = ({ baseUrl = '', address, cursor, limit }: Options): string => {\n return [baseUrl, 'deposits', `${address}${createQueryString({ cursor, limit })}`].join('/')\n}\n\nexport const withdrawalEndoint = ({ baseUrl = '', address, cursor, limit }: Options): string => {\n return [baseUrl, 'withdrawals', `${address}${createQueryString({ cursor, limit })}`].join('/')\n}\n\n"],"mappings":";AAYA,IAAM,oBAAoB,CAAC,EAAE,QAAQ,MAAM,MAAiC;AAC1E,MAAI,WAAW,UAAa,UAAU,QAAW;AAC/C,WAAO;AAAA,EACT;AACA,QAAM,UAAoB,CAAC;AAC3B,MAAI,QAAQ;AACV,YAAQ,KAAK,UAAU,MAAM,EAAE;AAAA,EACjC;AACA,MAAI,OAAO;AACT,YAAQ,KAAK,SAAS,KAAK,EAAE;AAAA,EAC/B;AACA,SAAO,IAAI,QAAQ,KAAK,GAAG,CAAC;AAC9B;AAEO,IAAM,kBAAkB,CAAC,EAAE,UAAU,IAAI,SAAS,QAAQ,MAAM,MAAuB;AAC5F,SAAO,CAAC,SAAS,YAAY,GAAG,OAAO,GAAG,kBAAkB,EAAE,QAAQ,MAAM,CAAC,CAAC,EAAE,EAAE,KAAK,GAAG;AAC5F;AAEO,IAAM,oBAAoB,CAAC,EAAE,UAAU,IAAI,SAAS,QAAQ,MAAM,MAAuB;AAC9F,SAAO,CAAC,SAAS,eAAe,GAAG,OAAO,GAAG,kBAAkB,EAAE,QAAQ,MAAM,CAAC,CAAC,EAAE,EAAE,KAAK,GAAG;AAC/F;","names":[]}
\ No newline at end of file
import { test, expect } from 'vitest'
import { depositEndpoint, withdrawalEndoint } from './indexer.ts'
test(depositEndpoint.name, () => {
expect(depositEndpoint({ baseUrl: 'http://localhost:8080/api/v0', address: '0x1234', cursor: '0x1235', limit: 10 })).toMatchInlineSnapshot('"http://localhost:8080/api/v0/deposits/0x1234?cursor=0x1235&limit=10"')
expect(depositEndpoint({ baseUrl: 'http://localhost:8080/api/v0', address: '0x1234' })).toMatchInlineSnapshot('"http://localhost:8080/api/v0/deposits/0x1234"')
})
test(withdrawalEndoint.name, () => {
expect(withdrawalEndoint({ baseUrl: 'http://localhost:8080/api/v0', address: '0x1234', cursor: '0x1235', limit: 10 })).toMatchInlineSnapshot('"http://localhost:8080/api/v0/withdrawals/0x1234?cursor=0x1235&limit=10"')
expect(withdrawalEndoint({ baseUrl: 'http://localhost:8080/api/v0', address: '0x1234' })).toMatchInlineSnapshot('"http://localhost:8080/api/v0/withdrawals/0x1234"')
})
......@@ -25,10 +25,10 @@ const createQueryString = ({ cursor, limit }: PaginationOptions): string => {
}
export const depositEndpoint = ({ baseUrl = '', address, cursor, limit }: Options): string => {
return [baseUrl, 'deposits', address, createQueryString({ cursor, limit })].join('/')
return [baseUrl, 'deposits', `${address}${createQueryString({ cursor, limit })}`].join('/')
}
export const withdrawalEndoint = ({ baseUrl = '', address, cursor, limit }: Options): string => {
return [baseUrl, 'withdrawals', address, createQueryString({ cursor, limit })].join('/')
return [baseUrl, 'withdrawals', `${address}${createQueryString({ cursor, limit })}`].join('/')
}
{
"name": "@eth-optimism/indexer-api",
"version": "0.0.1",
"version": "0.0.3",
"description": "[Optimism] typescript types for the indexer service",
"main": "indexer.cjs",
"module": "indexer.js",
......@@ -17,7 +17,8 @@
],
"scripts": {
"clean": "rm -rf generated.ts indexer.cjs indexer.js",
"generate": "npm run clean && tygo generate && mv ../api/routes/index.ts generated.ts && npx tsup"
"generate": "npm run clean && tygo generate && mv ../api/routes/index.ts generated.ts && tsup",
"test": "vitest"
},
"keywords": [
"optimism",
......@@ -30,4 +31,9 @@
"repository": {
"type": "git",
"url": "https://github.com/ethereum-optimism/optimism.git"
}}
},
"devDependencies": {
"tsup": "^7.2.0",
"vitest": "^0.34.4"
}
}
......@@ -4,7 +4,7 @@
"strict": true,
"skipLibCheck": true,
"module": "ESNext",
"moduleResolution": "bundler",
"moduleResolution": "NodeNext",
"jsx": "react",
"target": "ESNext",
"noEmit": true
......
......@@ -14,12 +14,12 @@ type DepositItem struct {
From string `json:"from"`
To string `json:"to"`
Timestamp uint64 `json:"timestamp"`
L1TxHash string `json:"L1TxHash"`
L2TxHash string `json:"L2TxHash"`
L1BlockHash string `json:"Block"`
L1BlockHash string `json:"l1BlockHash"`
L1TxHash string `json:"l1TxHash"`
L2TxHash string `json:"l2TxHash"`
Amount string `json:"amount"`
L1TokenAddress string `json:"l1Token"`
L2TokenAddress string `json:"l2Token"`
L1TokenAddress string `json:"l1TokenAddress"`
L2TokenAddress string `json:"l2TokenAddress"`
}
type DepositResponse struct {
......
......@@ -17,10 +17,10 @@ type WithdrawalItem struct {
Timestamp uint64 `json:"timestamp"`
L2BlockHash string `json:"l2BlockHash"`
Amount string `json:"amount"`
ProofTransactionHash string `json:"proof"`
ClaimTransactionHash string `json:"claim"`
L1TokenAddress string `json:"l1Token"`
L2TokenAddress string `json:"l2Token"`
ProofTransactionHash string `json:"proofTransactionHash"`
ClaimTransactionHash string `json:"claimTransactionHash"`
L1TokenAddress string `json:"l1TokenAddress"`
L2TokenAddress string `json:"l2TokenAddress"`
}
type WithdrawalResponse struct {
......
......@@ -35,5 +35,6 @@
"PreimageOracle",
"BlockOracle",
"EAS",
"SchemaRegistry"
"SchemaRegistry",
"ProtocolVersions"
]
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
// Code generated - DO NOT EDIT.
// This file is a generated binding and any manual changes will be lost.
package bindings
import (
"encoding/json"
"github.com/ethereum-optimism/optimism/op-bindings/solc"
)
const ProtocolVersionsStorageLayoutJSON = "{\"storage\":[{\"astId\":1000,\"contract\":\"src/L1/ProtocolVersions.sol:ProtocolVersions\",\"label\":\"_initialized\",\"offset\":0,\"slot\":\"0\",\"type\":\"t_uint8\"},{\"astId\":1001,\"contract\":\"src/L1/ProtocolVersions.sol:ProtocolVersions\",\"label\":\"_initializing\",\"offset\":1,\"slot\":\"0\",\"type\":\"t_bool\"},{\"astId\":1002,\"contract\":\"src/L1/ProtocolVersions.sol:ProtocolVersions\",\"label\":\"__gap\",\"offset\":0,\"slot\":\"1\",\"type\":\"t_array(t_uint256)50_storage\"},{\"astId\":1003,\"contract\":\"src/L1/ProtocolVersions.sol:ProtocolVersions\",\"label\":\"_owner\",\"offset\":0,\"slot\":\"51\",\"type\":\"t_address\"},{\"astId\":1004,\"contract\":\"src/L1/ProtocolVersions.sol:ProtocolVersions\",\"label\":\"__gap\",\"offset\":0,\"slot\":\"52\",\"type\":\"t_array(t_uint256)49_storage\"}],\"types\":{\"t_address\":{\"encoding\":\"inplace\",\"label\":\"address\",\"numberOfBytes\":\"20\"},\"t_array(t_uint256)49_storage\":{\"encoding\":\"inplace\",\"label\":\"uint256[49]\",\"numberOfBytes\":\"1568\",\"base\":\"t_uint256\"},\"t_array(t_uint256)50_storage\":{\"encoding\":\"inplace\",\"label\":\"uint256[50]\",\"numberOfBytes\":\"1600\",\"base\":\"t_uint256\"},\"t_bool\":{\"encoding\":\"inplace\",\"label\":\"bool\",\"numberOfBytes\":\"1\"},\"t_uint256\":{\"encoding\":\"inplace\",\"label\":\"uint256\",\"numberOfBytes\":\"32\"},\"t_uint8\":{\"encoding\":\"inplace\",\"label\":\"uint8\",\"numberOfBytes\":\"1\"}}}"
var ProtocolVersionsStorageLayout = new(solc.StorageLayout)
var ProtocolVersionsDeployedBin = "0x608060405234801561001057600080fd5b50600436106100d45760003560e01c80638da5cb5b11610081578063f2fde38b1161005b578063f2fde38b146101b8578063f7d12760146101cb578063ffa1ad74146101d357600080fd5b80638da5cb5b14610180578063d798b1ac146101a8578063dc8452cd146101b057600080fd5b80635fd579af116100b25780635fd579af14610152578063715018a6146101655780637a1ac61e1461016d57600080fd5b80630457d6f2146100d9578063206a8300146100ee57806354fd4d5014610109575b600080fd5b6100ec6100e7366004610859565b6101db565b005b6100f66101ef565b6040519081526020015b60405180910390f35b6101456040518060400160405280600581526020017f302e312e3000000000000000000000000000000000000000000000000000000081525081565b60405161010091906108dd565b6100ec610160366004610859565b61021d565b6100ec61022e565b6100ec61017b366004610920565b610242565b60335460405173ffffffffffffffffffffffffffffffffffffffff9091168152602001610100565b6100f66103ad565b6100f66103e6565b6100ec6101c6366004610953565b610416565b6100f66104ca565b6100f6600081565b6101e36104f5565b6101ec81610576565b50565b61021a60017f4aaefe95bd84fd3f32700cf3b7566bc944b73138e41958b5785826df2aecace161096e565b81565b6102256104f5565b6101ec8161062d565b6102366104f5565b61024060006106a8565b565b600054600290610100900460ff16158015610264575060005460ff8083169116105b6102f5576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602e60248201527f496e697469616c697a61626c653a20636f6e747261637420697320616c72656160448201527f647920696e697469616c697a656400000000000000000000000000000000000060648201526084015b60405180910390fd5b600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00001660ff83161761010017905561032e61071f565b61033784610416565b61034083610576565b6103498261062d565b600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff16905560405160ff821681527f7f26b83ff96e1f2b6a682f133852f6798a09c465da95921460cefb38474024989060200160405180910390a150505050565b60006103e16103dd60017fe314dfc40f0025322aacc0ba8ef420b62fb3b702cf01e0cdf3d829117ac2ff1b61096e565b5490565b905090565b60006103e16103dd60017f4aaefe95bd84fd3f32700cf3b7566bc944b73138e41958b5785826df2aecace161096e565b61041e6104f5565b73ffffffffffffffffffffffffffffffffffffffff81166104c1576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602660248201527f4f776e61626c653a206e6577206f776e657220697320746865207a65726f206160448201527f646472657373000000000000000000000000000000000000000000000000000060648201526084016102ec565b6101ec816106a8565b61021a60017fe314dfc40f0025322aacc0ba8ef420b62fb3b702cf01e0cdf3d829117ac2ff1b61096e565b60335473ffffffffffffffffffffffffffffffffffffffff163314610240576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820181905260248201527f4f776e61626c653a2063616c6c6572206973206e6f7420746865206f776e657260448201526064016102ec565b6105a8816105a560017f4aaefe95bd84fd3f32700cf3b7566bc944b73138e41958b5785826df2aecace161096e565b55565b6000816040516020016105bd91815260200190565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0818403018152919052905060005b60007f1d2b0bda21d56b8bd12d4f94ebacffdfb35f5e226f84b461103bb8beab6353be8360405161062191906108dd565b60405180910390a35050565b61065c816105a560017fe314dfc40f0025322aacc0ba8ef420b62fb3b702cf01e0cdf3d829117ac2ff1b61096e565b60008160405160200161067191815260200190565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0818403018152919052905060016105f0565b6033805473ffffffffffffffffffffffffffffffffffffffff8381167fffffffffffffffffffffffff0000000000000000000000000000000000000000831681179093556040519116919082907f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e090600090a35050565b600054610100900460ff166107b6576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602b60248201527f496e697469616c697a61626c653a20636f6e7472616374206973206e6f74206960448201527f6e697469616c697a696e6700000000000000000000000000000000000000000060648201526084016102ec565b610240600054610100900460ff16610850576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602b60248201527f496e697469616c697a61626c653a20636f6e7472616374206973206e6f74206960448201527f6e697469616c697a696e6700000000000000000000000000000000000000000060648201526084016102ec565b610240336106a8565b60006020828403121561086b57600080fd5b5035919050565b6000815180845260005b818110156108985760208185018101518683018201520161087c565b818111156108aa576000602083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b6020815260006108f06020830184610872565b9392505050565b803573ffffffffffffffffffffffffffffffffffffffff8116811461091b57600080fd5b919050565b60008060006060848603121561093557600080fd5b61093e846108f7565b95602085013595506040909401359392505050565b60006020828403121561096557600080fd5b6108f0826108f7565b6000828210156109a7577f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b50039056fea164736f6c634300080f000a"
func init() {
if err := json.Unmarshal([]byte(ProtocolVersionsStorageLayoutJSON), ProtocolVersionsStorageLayout); err != nil {
panic(err)
}
layouts["ProtocolVersions"] = ProtocolVersionsStorageLayout
deployedBytecodes["ProtocolVersions"] = ProtocolVersionsDeployedBin
}
......@@ -8,7 +8,6 @@ import (
"math/big"
"os"
"path/filepath"
"reflect"
"github.com/ethereum/go-ethereum/common"
......@@ -16,6 +15,7 @@ import (
gstate "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum-optimism/optimism/op-bindings/hardhat"
......@@ -198,6 +198,12 @@ type DeployConfig struct {
// FundDevAccounts configures whether or not to fund the dev accounts. Should only be used
// during devnet deployments.
FundDevAccounts bool `json:"fundDevAccounts"`
// RequiredProtocolVersion indicates the protocol version that
// nodes are required to adopt, to stay in sync with the network.
RequiredProtocolVersion params.ProtocolVersion `json:"requiredProtocolVersion"`
// RequiredProtocolVersion indicates the protocol version that
// nodes are recommended to adopt, to stay in sync with the network.
RecommendedProtocolVersion params.ProtocolVersion `json:"recommendedProtocolVersion"`
}
// Copy will deeply copy the DeployConfig. This does a JSON roundtrip to copy
......@@ -522,6 +528,8 @@ type L1Deployments struct {
ProxyAdmin common.Address `json:"ProxyAdmin"`
SystemConfig common.Address `json:"SystemConfig"`
SystemConfigProxy common.Address `json:"SystemConfigProxy"`
ProtocolVersions common.Address `json:"ProtocolVersions"`
ProtocolVersionsProxy common.Address `json:"ProtocolVersionsProxy"`
}
// GetName will return the name of the contract given an address.
......
......@@ -89,6 +89,8 @@ func TestL1Deployments(t *testing.T) {
require.NotEqual(t, deployments.ProxyAdmin, common.Address{})
require.NotEqual(t, deployments.SystemConfig, common.Address{})
require.NotEqual(t, deployments.SystemConfigProxy, common.Address{})
require.NotEqual(t, deployments.ProtocolVersions, common.Address{})
require.NotEqual(t, deployments.ProtocolVersionsProxy, common.Address{})
require.Equal(t, "AddressManager", deployments.GetName(deployments.AddressManager))
require.Equal(t, "OptimismPortalProxy", deployments.GetName(deployments.OptimismPortalProxy))
......
......@@ -16,5 +16,7 @@
"OptimismPortalProxy": "0xaC425EECd4Fd8E9E669a62906D99aF89B9951516",
"ProxyAdmin": "0x3218c3b0dC0386BAe83A58E5F908c4b070210b4F",
"SystemConfig": "0x36bAcDD96F28e1ac0780bB9CbE6e20780840730F",
"SystemConfigProxy": "0x14065A373936533A0c88b7986CADabDD62d471e6"
"SystemConfigProxy": "0x14065A373936533A0c88b7986CADabDD62d471e6",
"ProtocolVersions": "0x883C06a27D76B1CEbdf7EB376f5556c355afC8e5",
"ProtocolVersionsProxy": "0x3732a4D4Ab006cA4825822baEA1569A107683fa1"
}
......@@ -67,5 +67,7 @@
"faultGameAbsolutePrestate": "0x0000000000000000000000000000000000000000000000000000000000000000",
"faultGameMaxDepth": 63,
"faultGameMaxDuration": 604800,
"systemConfigStartBlock": 0
"systemConfigStartBlock": 0,
"requiredProtocolVersion": "0x0000000000000000000000000000000000000000000000000000000000000000",
"recommendedProtocolVersion": "0x0000000000000000000000000000000000000000000000000000000000000000"
}
......@@ -169,6 +169,26 @@ func TestMaxConcurrency(t *testing.T) {
})
}
func TestPollInterval(t *testing.T) {
t.Run("UsesDefault", func(t *testing.T) {
cfg := configForArgs(t, addRequiredArgs(config.TraceTypeCannon))
require.Equal(t, config.DefaultPollInterval, cfg.PollInterval)
})
t.Run("Valid", func(t *testing.T) {
expected := 100 * time.Second
cfg := configForArgs(t, addRequiredArgs(config.TraceTypeAlphabet, "--http-poll-interval", "100s"))
require.Equal(t, expected, cfg.PollInterval)
})
t.Run("Invalid", func(t *testing.T) {
verifyArgsInvalid(
t,
"invalid value \"abc\" for flag -http-poll-interval",
addRequiredArgs(config.TraceTypeAlphabet, "--http-poll-interval", "abc"))
})
}
func TestCannonBin(t *testing.T) {
t.Run("NotRequiredForAlphabetTrace", func(t *testing.T) {
configForArgs(t, addRequiredArgsExcept(config.TraceTypeAlphabet, "--cannon-bin"))
......
......@@ -78,6 +78,7 @@ func ValidTraceType(value TraceType) bool {
}
const (
DefaultPollInterval = time.Second * 12
DefaultCannonSnapshotFreq = uint(1_000_000_000)
DefaultCannonInfoFreq = uint(10_000_000)
// DefaultGameWindow is the default maximum time duration in the past
......@@ -98,6 +99,7 @@ type Config struct {
AgreeWithProposedOutput bool // Temporary config if we agree or disagree with the posted output
Datadir string // Data Directory
MaxConcurrency uint // Maximum number of threads to use when progressing games
PollInterval time.Duration // Polling interval for latest-block subscription when using an HTTP RPC provider
TraceType TraceType // Type of trace
......@@ -131,6 +133,7 @@ func NewConfig(
L1EthRpc: l1EthRpc,
GameFactoryAddress: gameFactoryAddress,
MaxConcurrency: uint(runtime.NumCPU()),
PollInterval: DefaultPollInterval,
AgreeWithProposedOutput: agreeWithProposedOutput,
......
......@@ -118,6 +118,13 @@ func TestMaxConcurrency(t *testing.T) {
})
}
func TestHttpPollInterval(t *testing.T) {
t.Run("Default", func(t *testing.T) {
config := validConfig(TraceTypeAlphabet)
require.EqualValues(t, DefaultPollInterval, config.PollInterval)
})
}
func TestCannonL2Required(t *testing.T) {
config := validConfig(TraceTypeCannon)
config.CannonL2 = ""
......
......@@ -70,6 +70,12 @@ var (
EnvVars: prefixEnvVars("MAX_CONCURRENCY"),
Value: uint(runtime.NumCPU()),
}
HTTPPollInterval = &cli.DurationFlag{
Name: "http-poll-interval",
Usage: "Polling interval for latest-block subscription when using an HTTP RPC provider.",
EnvVars: prefixEnvVars("HTTP_POLL_INTERVAL"),
Value: config.DefaultPollInterval,
}
AlphabetFlag = &cli.StringFlag{
Name: "alphabet",
Usage: "Correct Alphabet Trace (alphabet trace type only)",
......@@ -142,6 +148,7 @@ var requiredFlags = []cli.Flag{
// optionalFlags is a list of unchecked cli flags
var optionalFlags = []cli.Flag{
MaxConcurrencyFlag,
HTTPPollInterval,
AlphabetFlag,
GameAllowlistFlag,
CannonNetworkFlag,
......@@ -247,6 +254,7 @@ func NewConfigFromCLI(ctx *cli.Context) (*config.Config, error) {
GameAllowlist: allowedGames,
GameWindow: ctx.Duration(GameWindowFlag.Name),
MaxConcurrency: maxConcurrency,
PollInterval: ctx.Duration(HTTPPollInterval.Name),
AlphabetTrace: ctx.String(AlphabetFlag.Name),
CannonNetwork: ctx.String(CannonNetworkFlag.Name),
CannonRollupConfigPath: ctx.String(CannonRollupConfigFlag.Name),
......
......@@ -9,6 +9,7 @@ import (
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/types"
gameTypes "github.com/ethereum-optimism/optimism/op-challenger/game/types"
"github.com/ethereum-optimism/optimism/op-challenger/metrics"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
)
......@@ -66,7 +67,12 @@ func (a *Agent) Act(ctx context.Context) error {
// Perform the actions
for _, action := range actions {
log := a.log.New("action", action.Type, "is_attack", action.IsAttack, "parent", action.ParentIdx, "value", action.Value)
log := a.log.New("action", action.Type, "is_attack", action.IsAttack, "parent", action.ParentIdx)
if action.Type == types.ActionTypeStep {
log = log.New("prestate", common.Bytes2Hex(action.PreState), "proof", common.Bytes2Hex(action.ProofData))
} else {
log = log.New("value", action.Value)
}
if action.OracleData != nil {
a.log.Info("Updating oracle data", "oracleKey", action.OracleData.OracleKey, "oracleData", action.OracleData.OracleData)
......
......@@ -53,8 +53,6 @@ type CannonTraceProvider struct {
// lastStep stores the last step in the actual trace if known. 0 indicates unknown.
// Cached as an optimisation to avoid repeatedly attempting to execute beyond the end of the trace.
lastStep uint64
// lastProof stores the proof data to use for all steps extended beyond lastStep
lastProof *proofData
}
func NewTraceProvider(ctx context.Context, logger log.Logger, m CannonMetricer, cfg *config.Config, l1Client bind.ContractCaller, dir string, gameAddr common.Address) (*CannonTraceProvider, error) {
......@@ -139,24 +137,19 @@ func (p *CannonTraceProvider) AbsolutePreStateCommitment(ctx context.Context) (c
// loadProof will attempt to load or generate the proof data at the specified index
// If the requested index is beyond the end of the actual trace it is extended with no-op instructions.
func (p *CannonTraceProvider) loadProof(ctx context.Context, i uint64) (*proofData, error) {
if p.lastProof != nil && i > p.lastStep {
// If the requested index is after the last step in the actual trace, extend the final no-op step
return p.lastProof, nil
}
// Attempt to read the last step from disk cache
if p.lastProof == nil && p.lastStep == 0 {
step, err := ReadLastStep(p.dir)
if p.lastStep == 0 {
step, err := readLastStep(p.dir)
if err != nil {
p.logger.Warn("Failed to read last step from disk cache", "err", err)
} else {
p.lastStep = step
// If the last step is tracked, set i to the last step
// to read the correct proof from disk.
if i > p.lastStep {
i = step
}
}
}
// If the last step is tracked, set i to the last step to generate or load the final proof
if p.lastStep != 0 && i > p.lastStep {
i = p.lastStep
}
path := filepath.Join(p.dir, proofsDir, fmt.Sprintf("%d.json.gz", i))
file, err := ioutil.OpenDecompressed(path)
if errors.Is(err, os.ErrNotExist) {
......@@ -183,9 +176,6 @@ func (p *CannonTraceProvider) loadProof(ctx context.Context, i uint64) (*proofDa
if err != nil {
return nil, fmt.Errorf("cannot hash witness: %w", err)
}
if err := WriteLastStep(p.dir, state.Step); err != nil {
p.logger.Warn("Failed to write last step to disk cache", "step", p.lastStep)
}
proof := &proofData{
ClaimValue: witnessHash,
StateData: hexutil.Bytes(witness),
......@@ -194,7 +184,9 @@ func (p *CannonTraceProvider) loadProof(ctx context.Context, i uint64) (*proofDa
OracleValue: nil,
OracleOffset: 0,
}
p.lastProof = proof
if err := writeLastStep(p.dir, proof, p.lastStep); err != nil {
p.logger.Warn("Failed to write last step to disk cache", "step", p.lastStep)
}
return proof, nil
} else {
return nil, fmt.Errorf("expected proof not generated but final state was not exited, requested step %v, final state at step %v", i, state.Step)
......@@ -217,8 +209,8 @@ type diskStateCacheObj struct {
Step uint64 `json:"step"`
}
// ReadLastStep reads the tracked last step from disk.
func ReadLastStep(dir string) (uint64, error) {
// readLastStep reads the tracked last step from disk.
func readLastStep(dir string) (uint64, error) {
state := diskStateCacheObj{}
file, err := ioutil.OpenDecompressed(filepath.Join(dir, diskStateCache))
if err != nil {
......@@ -232,8 +224,15 @@ func ReadLastStep(dir string) (uint64, error) {
return state.Step, nil
}
// WriteLastStep writes the last step to disk as a persistent cache.
func WriteLastStep(dir string, step uint64) error {
// writeLastStep writes the last step and proof to disk as a persistent cache.
func writeLastStep(dir string, proof *proofData, step uint64) error {
state := diskStateCacheObj{Step: step}
return ioutil.WriteCompressedJson(filepath.Join(dir, diskStateCache), state)
lastStepFile := filepath.Join(dir, diskStateCache)
if err := ioutil.WriteCompressedJson(lastStepFile, state); err != nil {
return fmt.Errorf("failed to write last step to %v: %w", lastStepFile, err)
}
if err := ioutil.WriteCompressedJson(filepath.Join(dir, proofsDir, fmt.Sprintf("%d.json.gz", step)), proof); err != nil {
return fmt.Errorf("failed to write proof: %w", err)
}
return nil
}
......@@ -158,20 +158,16 @@ func TestGetStepData(t *testing.T) {
Exited: true,
}
generator.proof = &proofData{
ClaimValue: common.Hash{0xaa},
StateData: []byte{0xbb},
ProofData: []byte{0xcc},
OracleKey: common.Hash{0xdd}.Bytes(),
OracleValue: []byte{0xdd},
OracleOffset: 10,
ClaimValue: common.Hash{0xaa},
StateData: []byte{0xbb},
ProofData: []byte{0xcc},
}
preimage, proof, data, err := provider.GetStepData(context.Background(), 7000)
require.NoError(t, err)
require.Contains(t, generator.generated, 10, "should have tried to generate the proof")
require.Empty(t, generator.generated, "should not have to generate the proof again")
witness := generator.finalState.EncodeWitness()
require.EqualValues(t, witness, preimage)
require.Equal(t, []byte{}, proof)
require.EqualValues(t, initGenerator.finalState.EncodeWitness(), preimage)
require.Empty(t, proof)
require.Nil(t, data)
})
......
......@@ -9,7 +9,12 @@ import (
"github.com/ethereum-optimism/optimism/op-challenger/game/scheduler"
"github.com/ethereum-optimism/optimism/op-service/clock"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
ethTypes "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
)
......@@ -32,6 +37,20 @@ type gameMonitor struct {
gameWindow time.Duration
fetchBlockNumber blockNumberFetcher
allowedGames []common.Address
l1HeadsSub ethereum.Subscription
l1Source *headSource
}
type MinimalSubscriber interface {
EthSubscribe(ctx context.Context, channel interface{}, args ...interface{}) (ethereum.Subscription, error)
}
type headSource struct {
inner MinimalSubscriber
}
func (s *headSource) SubscribeNewHead(ctx context.Context, ch chan<- *ethTypes.Header) (ethereum.Subscription, error) {
return s.inner.EthSubscribe(ctx, ch, "newHeads")
}
func newGameMonitor(
......@@ -42,6 +61,7 @@ func newGameMonitor(
gameWindow time.Duration,
fetchBlockNumber blockNumberFetcher,
allowedGames []common.Address,
l1Source MinimalSubscriber,
) *gameMonitor {
return &gameMonitor{
logger: logger,
......@@ -51,6 +71,7 @@ func newGameMonitor(
gameWindow: gameWindow,
fetchBlockNumber: fetchBlockNumber,
allowedGames: allowedGames,
l1Source: &headSource{inner: l1Source},
}
}
......@@ -99,29 +120,32 @@ func (m *gameMonitor) progressGames(ctx context.Context, blockNum uint64) error
return nil
}
func (m *gameMonitor) MonitorGames(ctx context.Context) error {
m.logger.Info("Monitoring fault dispute games")
func (m *gameMonitor) onNewL1Head(ctx context.Context, sig eth.L1BlockRef) {
if err := m.progressGames(ctx, sig.Number); err != nil {
m.logger.Error("Failed to progress games", "err", err)
}
}
func (m *gameMonitor) resubscribeFunction(ctx context.Context) event.ResubscribeErrFunc {
return func(innerCtx context.Context, err error) (event.Subscription, error) {
if err != nil {
m.logger.Warn("resubscribing after failed L1 subscription", "err", err)
}
return eth.WatchHeadChanges(ctx, m.l1Source, m.onNewL1Head)
}
}
blockNum := uint64(0)
func (m *gameMonitor) MonitorGames(ctx context.Context) error {
m.l1HeadsSub = event.ResubscribeErr(time.Second*10, m.resubscribeFunction(ctx))
for {
select {
case <-ctx.Done():
return ctx.Err()
default:
nextBlockNum, err := m.fetchBlockNumber(ctx)
if err != nil {
m.logger.Error("Failed to load current block number", "err", err)
continue
}
if nextBlockNum > blockNum {
blockNum = nextBlockNum
if err := m.progressGames(ctx, nextBlockNum); err != nil {
m.logger.Error("Failed to progress games", "err", err)
}
}
if err := m.clock.SleepCtx(ctx, time.Second); err != nil {
return nil
case err, ok := <-m.l1HeadsSub.Err():
if !ok {
return err
}
m.logger.Error("L1 subscription error", "err", err)
}
}
}
......@@ -2,35 +2,40 @@ package game
import (
"context"
"fmt"
"math/big"
"testing"
"time"
"github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum-optimism/optimism/op-service/clock"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
ethtypes "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait"
"github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum-optimism/optimism/op-service/clock"
)
func TestMonitorMinGameTimestamp(t *testing.T) {
t.Parallel()
t.Run("zero game window returns zero", func(t *testing.T) {
monitor, _, _ := setupMonitorTest(t, []common.Address{})
monitor, _, _, _ := setupMonitorTest(t, []common.Address{})
monitor.gameWindow = time.Duration(0)
require.Equal(t, monitor.minGameTimestamp(), uint64(0))
})
t.Run("non-zero game window with zero clock", func(t *testing.T) {
monitor, _, _ := setupMonitorTest(t, []common.Address{})
monitor, _, _, _ := setupMonitorTest(t, []common.Address{})
monitor.gameWindow = time.Minute
monitor.clock = clock.NewDeterministicClock(time.Unix(0, 0))
require.Equal(t, monitor.minGameTimestamp(), uint64(0))
})
t.Run("minimum computed correctly", func(t *testing.T) {
monitor, _, _ := setupMonitorTest(t, []common.Address{})
monitor, _, _, _ := setupMonitorTest(t, []common.Address{})
monitor.gameWindow = time.Minute
frozen := time.Unix(int64(time.Hour.Seconds()), 0)
monitor.clock = clock.NewDeterministicClock(frozen)
......@@ -39,29 +44,95 @@ func TestMonitorMinGameTimestamp(t *testing.T) {
})
}
func TestMonitorExitsWhenContextDone(t *testing.T) {
monitor, _, _ := setupMonitorTest(t, []common.Address{{}})
ctx, cancel := context.WithCancel(context.Background())
cancel()
err := monitor.MonitorGames(ctx)
require.ErrorIs(t, err, context.Canceled)
// TestMonitorGames tests that the monitor can handle a new head event
// and resubscribe to new heads if the subscription errors.
func TestMonitorGames(t *testing.T) {
t.Run("Schedules games", func(t *testing.T) {
addr1 := common.Address{0xaa}
addr2 := common.Address{0xbb}
monitor, source, sched, mockHeadSource := setupMonitorTest(t, []common.Address{})
source.games = []FaultDisputeGame{newFDG(addr1, 9999), newFDG(addr2, 9999)}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
go func() {
headerNotSent := true
waitErr := wait.For(context.Background(), 100*time.Millisecond, func() (bool, error) {
if len(sched.scheduled) >= 1 {
return true, nil
}
if mockHeadSource.sub == nil {
return false, nil
}
if headerNotSent {
mockHeadSource.sub.headers <- &ethtypes.Header{
Number: big.NewInt(1),
}
headerNotSent = false
}
return false, nil
})
require.NoError(t, waitErr)
mockHeadSource.err = fmt.Errorf("eth subscribe test error")
cancel()
}()
err := monitor.MonitorGames(ctx)
require.NoError(t, err)
require.Len(t, sched.scheduled, 1)
require.Equal(t, []common.Address{addr1, addr2}, sched.scheduled[0])
})
t.Run("Resubscribes on error", func(t *testing.T) {
addr1 := common.Address{0xaa}
addr2 := common.Address{0xbb}
monitor, source, sched, mockHeadSource := setupMonitorTest(t, []common.Address{})
source.games = []FaultDisputeGame{newFDG(addr1, 9999), newFDG(addr2, 9999)}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
go func() {
headerNotSent := true
waitErr := wait.For(context.Background(), 100*time.Millisecond, func() (bool, error) {
return mockHeadSource.sub != nil, nil
})
require.NoError(t, waitErr)
mockHeadSource.sub.errChan <- fmt.Errorf("test error")
waitErr = wait.For(context.Background(), 100*time.Millisecond, func() (bool, error) {
if len(sched.scheduled) >= 1 {
return true, nil
}
if mockHeadSource.sub == nil {
return false, nil
}
if headerNotSent {
mockHeadSource.sub.headers <- &ethtypes.Header{
Number: big.NewInt(1),
}
headerNotSent = false
}
return false, nil
})
require.NoError(t, waitErr)
mockHeadSource.err = fmt.Errorf("eth subscribe test error")
cancel()
}()
err := monitor.MonitorGames(ctx)
require.NoError(t, err)
require.Len(t, sched.scheduled, 1)
require.Equal(t, []common.Address{addr1, addr2}, sched.scheduled[0])
})
}
func TestMonitorCreateAndProgressGameAgents(t *testing.T) {
monitor, source, sched := setupMonitorTest(t, []common.Address{})
monitor, source, sched, _ := setupMonitorTest(t, []common.Address{})
addr1 := common.Address{0xaa}
addr2 := common.Address{0xbb}
source.games = []FaultDisputeGame{
{
Proxy: addr1,
Timestamp: 9999,
},
{
Proxy: addr2,
Timestamp: 9999,
},
}
source.games = []FaultDisputeGame{newFDG(addr1, 9999), newFDG(addr2, 9999)}
require.NoError(t, monitor.progressGames(context.Background(), uint64(1)))
......@@ -72,18 +143,8 @@ func TestMonitorCreateAndProgressGameAgents(t *testing.T) {
func TestMonitorOnlyScheduleSpecifiedGame(t *testing.T) {
addr1 := common.Address{0xaa}
addr2 := common.Address{0xbb}
monitor, source, sched := setupMonitorTest(t, []common.Address{addr2})
source.games = []FaultDisputeGame{
{
Proxy: addr1,
Timestamp: 9999,
},
{
Proxy: addr2,
Timestamp: 9999,
},
}
monitor, source, sched, _ := setupMonitorTest(t, []common.Address{addr2})
source.games = []FaultDisputeGame{newFDG(addr1, 9999), newFDG(addr2, 9999)}
require.NoError(t, monitor.progressGames(context.Background(), uint64(1)))
......@@ -91,7 +152,17 @@ func TestMonitorOnlyScheduleSpecifiedGame(t *testing.T) {
require.Equal(t, []common.Address{addr2}, sched.scheduled[0])
}
func setupMonitorTest(t *testing.T, allowedGames []common.Address) (*gameMonitor, *stubGameSource, *stubScheduler) {
func newFDG(proxy common.Address, timestamp uint64) FaultDisputeGame {
return FaultDisputeGame{
Proxy: proxy,
Timestamp: timestamp,
}
}
func setupMonitorTest(
t *testing.T,
allowedGames []common.Address,
) (*gameMonitor, *stubGameSource, *stubScheduler, *mockNewHeadSource) {
logger := testlog.Logger(t, log.LvlDebug)
source := &stubGameSource{}
i := uint64(1)
......@@ -100,15 +171,58 @@ func setupMonitorTest(t *testing.T, allowedGames []common.Address) (*gameMonitor
return i, nil
}
sched := &stubScheduler{}
monitor := newGameMonitor(logger, clock.SystemClock, source, sched, time.Duration(0), fetchBlockNum, allowedGames)
return monitor, source, sched
mockHeadSource := &mockNewHeadSource{}
monitor := newGameMonitor(
logger,
clock.SystemClock,
source,
sched,
time.Duration(0),
fetchBlockNum,
allowedGames,
mockHeadSource,
)
return monitor, source, sched, mockHeadSource
}
type mockNewHeadSource struct {
sub *mockSubscription
err error
}
func (m *mockNewHeadSource) EthSubscribe(
ctx context.Context,
ch any,
args ...any,
) (ethereum.Subscription, error) {
errChan := make(chan error)
m.sub = &mockSubscription{errChan, (ch).(chan<- *ethtypes.Header)}
if m.err != nil {
return nil, m.err
}
return m.sub, nil
}
type mockSubscription struct {
errChan chan error
headers chan<- *ethtypes.Header
}
func (m *mockSubscription) Unsubscribe() {}
func (m *mockSubscription) Err() <-chan error {
return m.errChan
}
type stubGameSource struct {
games []FaultDisputeGame
}
func (s *stubGameSource) FetchAllGamesAtBlock(ctx context.Context, earliest uint64, blockNumber *big.Int) ([]FaultDisputeGame, error) {
func (s *stubGameSource) FetchAllGamesAtBlock(
ctx context.Context,
earliest uint64,
blockNumber *big.Int,
) ([]FaultDisputeGame, error) {
return s.games, nil
}
......
......@@ -10,6 +10,7 @@ import (
"github.com/ethereum-optimism/optimism/op-challenger/game/scheduler"
"github.com/ethereum-optimism/optimism/op-challenger/metrics"
"github.com/ethereum-optimism/optimism/op-challenger/version"
opClient "github.com/ethereum-optimism/optimism/op-node/client"
"github.com/ethereum-optimism/optimism/op-service/client"
"github.com/ethereum-optimism/optimism/op-service/clock"
oppprof "github.com/ethereum-optimism/optimism/op-service/pprof"
......@@ -34,7 +35,7 @@ func NewService(ctx context.Context, logger log.Logger, cfg *config.Config) (*Se
return nil, fmt.Errorf("failed to create the transaction manager: %w", err)
}
client, err := client.DialEthClientWithTimeout(client.DefaultDialTimeout, logger, cfg.L1EthRpc)
l1Client, err := client.DialEthClientWithTimeout(client.DefaultDialTimeout, logger, cfg.L1EthRpc)
if err != nil {
return nil, fmt.Errorf("failed to dial L1: %w", err)
}
......@@ -57,10 +58,10 @@ func NewService(ctx context.Context, logger log.Logger, cfg *config.Config) (*Se
logger.Error("error starting metrics server", "err", err)
}
}()
m.StartBalanceMetrics(ctx, logger, client, txMgr.From())
m.StartBalanceMetrics(ctx, logger, l1Client, txMgr.From())
}
factory, err := bindings.NewDisputeGameFactory(cfg.GameFactoryAddress, client)
factory, err := bindings.NewDisputeGameFactory(cfg.GameFactoryAddress, l1Client)
if err != nil {
return nil, fmt.Errorf("failed to bind the fault dispute game factory contract: %w", err)
}
......@@ -73,10 +74,14 @@ func NewService(ctx context.Context, logger log.Logger, cfg *config.Config) (*Se
disk,
cfg.MaxConcurrency,
func(addr common.Address, dir string) (scheduler.GamePlayer, error) {
return fault.NewGamePlayer(ctx, logger, m, cfg, dir, addr, txMgr, client)
return fault.NewGamePlayer(ctx, logger, m, cfg, dir, addr, txMgr, l1Client)
})
monitor := newGameMonitor(logger, cl, loader, sched, cfg.GameWindow, client.BlockNumber, cfg.GameAllowlist)
pollClient, err := opClient.NewRPCWithClient(ctx, logger, cfg.L1EthRpc, opClient.NewBaseRPCClient(l1Client.Client()), cfg.PollInterval)
if err != nil {
return nil, fmt.Errorf("failed to create RPC client: %w", err)
}
monitor := newGameMonitor(logger, cl, loader, sched, cfg.GameWindow, l1Client.BlockNumber, cfg.GameAllowlist, pollClient)
m.RecordInfo(version.SimpleWithMeta)
m.RecordUp()
......
......@@ -145,6 +145,10 @@ func NewChallengerConfig(t *testing.T, l1Endpoint string, options ...Option) *co
_, err := os.Stat(cfg.CannonAbsolutePreState)
require.NoError(t, err, "cannon pre-state should be built. Make sure you've run make cannon-prestate")
}
if cfg.PollInterval == 0 {
cfg.PollInterval = time.Second
}
return &cfg
}
......
......@@ -65,7 +65,7 @@ func (g *FaultGameHelper) MaxDepth(ctx context.Context) int64 {
}
func (g *FaultGameHelper) waitForClaim(ctx context.Context, errorMsg string, predicate func(claim ContractClaim) bool) {
timedCtx, cancel := context.WithTimeout(ctx, time.Minute)
timedCtx, cancel := context.WithTimeout(ctx, 2*time.Minute)
defer cancel()
err := wait.For(timedCtx, time.Second, func() (bool, error) {
count, err := g.game.ClaimDataLen(&bind.CallOpts{Context: timedCtx})
......@@ -89,6 +89,31 @@ func (g *FaultGameHelper) waitForClaim(ctx context.Context, errorMsg string, pre
}
}
func (g *FaultGameHelper) waitForNoClaim(ctx context.Context, errorMsg string, predicate func(claim ContractClaim) bool) {
timedCtx, cancel := context.WithTimeout(ctx, 3*time.Minute)
defer cancel()
err := wait.For(timedCtx, time.Second, func() (bool, error) {
count, err := g.game.ClaimDataLen(&bind.CallOpts{Context: timedCtx})
if err != nil {
return false, fmt.Errorf("retrieve number of claims: %w", err)
}
// Search backwards because the new claims are at the end and more likely the ones we will fail on.
for i := count.Int64() - 1; i >= 0; i-- {
claimData, err := g.game.ClaimData(&bind.CallOpts{Context: timedCtx}, big.NewInt(i))
if err != nil {
return false, fmt.Errorf("retrieve claim %v: %w", i, err)
}
if predicate(claimData) {
return false, nil
}
}
return true, nil
})
if err != nil { // Avoid waiting time capturing game data when there's no error
g.require.NoErrorf(err, "%v\n%v", errorMsg, g.gameData(ctx))
}
}
func (g *FaultGameHelper) GetClaimValue(ctx context.Context, claimIdx int64) common.Hash {
g.WaitForClaimCount(ctx, claimIdx+1)
claim := g.getClaim(ctx, claimIdx)
......@@ -105,6 +130,16 @@ func (g *FaultGameHelper) getClaim(ctx context.Context, claimIdx int64) Contract
return claimData
}
func (g *FaultGameHelper) WaitForClaimAtDepth(ctx context.Context, depth int) {
g.waitForClaim(
ctx,
fmt.Sprintf("Could not find claim depth %v", depth),
func(claim ContractClaim) bool {
pos := types.NewPositionFromGIndex(claim.Position.Uint64())
return pos.Depth() == depth
})
}
func (g *FaultGameHelper) WaitForClaimAtMaxDepth(ctx context.Context, countered bool) {
maxDepth := g.MaxDepth(ctx)
g.waitForClaim(
......@@ -116,6 +151,15 @@ func (g *FaultGameHelper) WaitForClaimAtMaxDepth(ctx context.Context, countered
})
}
func (g *FaultGameHelper) WaitForAllClaimsCountered(ctx context.Context) {
g.waitForNoClaim(
ctx,
"Did not find all claims countered",
func(claim ContractClaim) bool {
return !claim.Countered
})
}
func (g *FaultGameHelper) Resolve(ctx context.Context) {
ctx, cancel := context.WithTimeout(ctx, time.Minute)
defer cancel()
......
......@@ -100,7 +100,7 @@ func (h *FactoryHelper) StartAlphabetGame(ctx context.Context, claimedAlphabet s
l2BlockNumber := h.waitForProposals(ctx)
l1Head := h.checkpointL1Block(ctx)
ctx, cancel := context.WithTimeout(ctx, 1*time.Minute)
ctx, cancel := context.WithTimeout(ctx, 2*time.Minute)
defer cancel()
trace := alphabet.NewTraceProvider(claimedAlphabet, alphabetGameDepth)
......
......@@ -341,6 +341,66 @@ func TestCannonProposedOutputRootInvalid(t *testing.T) {
}
}
func TestCannonPoisonedPostState(t *testing.T) {
t.Skip("Known failure case")
InitParallel(t)
ctx := context.Background()
sys, l1Client := startFaultDisputeSystem(t)
t.Cleanup(sys.Close)
l1Endpoint := sys.NodeEndpoint("l1")
l2Endpoint := sys.NodeEndpoint("sequencer")
disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys.cfg.L1Deployments, l1Client)
game, correctTrace := disputeGameFactory.StartCannonGameWithCorrectRoot(ctx, sys.RollupConfig, sys.L2GenesisCfg, l1Endpoint, l2Endpoint,
challenger.WithPrivKey(sys.cfg.Secrets.Mallory),
)
require.NotNil(t, game)
game.LogGameData(ctx)
// Honest first attack at "honest" level
correctTrace.Attack(ctx, 0)
// Honest defense at "dishonest" level
correctTrace.Defend(ctx, 1)
// Dishonest attack at "honest" level - honest move would be to defend
game.Attack(ctx, 2, common.Hash{0x03, 0xaa})
// Start the honest challenger
game.StartChallenger(ctx, sys.RollupConfig, sys.L2GenesisCfg, l1Endpoint, l2Endpoint, "Honest",
// Agree with the proposed output, so disagree with the root claim
challenger.WithAgreeProposedOutput(true),
challenger.WithPrivKey(sys.cfg.Secrets.Bob),
)
// Start dishonest challenger that posts correct claims
game.StartChallenger(ctx, sys.RollupConfig, sys.L2GenesisCfg, l1Endpoint, l2Endpoint, "DishonestCorrect",
// Disagree with the proposed output, so agree with the root claim
challenger.WithAgreeProposedOutput(false),
challenger.WithPrivKey(sys.cfg.Secrets.Mallory),
)
// Give the challengers time to progress down the full game depth
depth := game.MaxDepth(ctx)
for i := 3; i <= int(depth); i++ {
game.WaitForClaimAtDepth(ctx, i)
game.LogGameData(ctx)
}
// Wait for all the leaf nodes to be countered
// Wait for the challengers to drive the game down to the leaf node which should be countered
game.WaitForAllClaimsCountered(ctx)
// Time travel past when the game will be resolvable.
sys.TimeTravelClock.AdvanceTime(game.GameDuration(ctx))
require.NoError(t, wait.ForNextBlock(ctx, l1Client))
game.WaitForGameStatus(ctx, disputegame.StatusChallengerWins)
game.LogGameData(ctx)
}
// setupDisputeGameForInvalidOutputRoot sets up an L2 chain with at least one valid output root followed by an invalid output root.
// A cannon dispute game is started to dispute the invalid output root with the correct root claim provided.
// An honest challenger is run to defend the root claim (ie disagree with the invalid output root).
......
......@@ -14,8 +14,8 @@ func TestTxGossip(t *testing.T) {
gethOpts := []geth.GethOption{
geth.WithP2P(),
}
cfg.GethOptions["sequencer"] = gethOpts
cfg.GethOptions["verifier"] = gethOpts
cfg.GethOptions["sequencer"] = append(cfg.GethOptions["sequencer"], gethOpts...)
cfg.GethOptions["verifier"] = append(cfg.GethOptions["verifier"], gethOpts...)
sys, err := cfg.Start(t)
require.NoError(t, err, "Start system")
......
......@@ -408,16 +408,17 @@ func (cfg SystemConfig) Start(t *testing.T, _opts ...SystemConfigOption) (*Syste
L2Time: uint64(cfg.DeployConfig.L1GenesisBlockTimestamp),
SystemConfig: e2eutils.SystemConfigFromDeployConfig(cfg.DeployConfig),
},
BlockTime: cfg.DeployConfig.L2BlockTime,
MaxSequencerDrift: cfg.DeployConfig.MaxSequencerDrift,
SeqWindowSize: cfg.DeployConfig.SequencerWindowSize,
ChannelTimeout: cfg.DeployConfig.ChannelTimeout,
L1ChainID: cfg.L1ChainIDBig(),
L2ChainID: cfg.L2ChainIDBig(),
BatchInboxAddress: cfg.DeployConfig.BatchInboxAddress,
DepositContractAddress: cfg.DeployConfig.OptimismPortalProxy,
L1SystemConfigAddress: cfg.DeployConfig.SystemConfigProxy,
RegolithTime: cfg.DeployConfig.RegolithTime(uint64(cfg.DeployConfig.L1GenesisBlockTimestamp)),
BlockTime: cfg.DeployConfig.L2BlockTime,
MaxSequencerDrift: cfg.DeployConfig.MaxSequencerDrift,
SeqWindowSize: cfg.DeployConfig.SequencerWindowSize,
ChannelTimeout: cfg.DeployConfig.ChannelTimeout,
L1ChainID: cfg.L1ChainIDBig(),
L2ChainID: cfg.L2ChainIDBig(),
BatchInboxAddress: cfg.DeployConfig.BatchInboxAddress,
DepositContractAddress: cfg.DeployConfig.OptimismPortalProxy,
L1SystemConfigAddress: cfg.DeployConfig.SystemConfigProxy,
RegolithTime: cfg.DeployConfig.RegolithTime(uint64(cfg.DeployConfig.L1GenesisBlockTimestamp)),
ProtocolVersionsAddress: cfg.L1Deployments.ProtocolVersionsProxy,
}
}
defaultConfig := makeRollupConfig()
......
......@@ -2,6 +2,7 @@ package op_e2e
import (
"context"
"errors"
"fmt"
"math/big"
"os"
......@@ -261,20 +262,20 @@ func TestPendingGasLimit(t *testing.T) {
// configure the L2 gas limit to be high, and the pending gas limits to be lower for resource saving.
cfg.DeployConfig.L2GenesisBlockGasLimit = 30_000_000
cfg.GethOptions["sequencer"] = []geth.GethOption{
cfg.GethOptions["sequencer"] = append(cfg.GethOptions["sequencer"], []geth.GethOption{
func(ethCfg *ethconfig.Config, nodeCfg *node.Config) error {
ethCfg.Miner.GasCeil = 10_000_000
ethCfg.Miner.RollupComputePendingBlock = true
return nil
},
}
cfg.GethOptions["verifier"] = []geth.GethOption{
}...)
cfg.GethOptions["verifier"] = append(cfg.GethOptions["verifier"], []geth.GethOption{
func(ethCfg *ethconfig.Config, nodeCfg *node.Config) error {
ethCfg.Miner.GasCeil = 9_000_000
ethCfg.Miner.RollupComputePendingBlock = true
return nil
},
}
}...)
sys, err := cfg.Start(t)
require.Nil(t, err, "Error starting up system")
......@@ -1434,3 +1435,118 @@ func TestRuntimeConfigReload(t *testing.T) {
})
require.NoError(t, err)
}
func TestRecommendedProtocolVersionChange(t *testing.T) {
InitParallel(t)
cfg := DefaultSystemConfig(t)
require.NotEqual(t, common.Address{}, cfg.L1Deployments.ProtocolVersions, "need ProtocolVersions contract deployment")
// to speed up the test, make it reload the config more often, and do not impose a long conf depth
cfg.Nodes["verifier"].RuntimeConfigReloadInterval = time.Second * 5
cfg.Nodes["verifier"].Driver.VerifierConfDepth = 1
sys, err := cfg.Start(t)
require.Nil(t, err, "Error starting up system")
defer sys.Close()
runtimeConfig := sys.RollupNodes["verifier"].RuntimeConfig()
// Change the superchain-config via L1
l1 := sys.Clients["l1"]
_, build, major, minor, patch, preRelease := params.OPStackSupport.Parse()
newRecommendedProtocolVersion := params.ProtocolVersionV0{Build: build, Major: major + 1, Minor: minor, Patch: patch, PreRelease: preRelease}.Encode()
require.NotEqual(t, runtimeConfig.RecommendedProtocolVersion(), newRecommendedProtocolVersion, "changing to a different protocol version")
protVersions, err := bindings.NewProtocolVersions(cfg.L1Deployments.ProtocolVersionsProxy, l1)
require.NoError(t, err)
// ProtocolVersions contract is owned by same key as SystemConfig in devnet
opts, err := bind.NewKeyedTransactorWithChainID(cfg.Secrets.SysCfgOwner, cfg.L1ChainIDBig())
require.NoError(t, err)
// Change recommended protocol version
tx, err := protVersions.SetRecommended(opts, new(big.Int).SetBytes(newRecommendedProtocolVersion[:]))
require.NoError(t, err)
// wait for the change to confirm
_, err = wait.ForReceiptOK(context.Background(), l1, tx.Hash())
require.NoError(t, err)
// wait for the recommended protocol version to change
_, err = retry.Do(context.Background(), 10, retry.Fixed(time.Second*10), func() (struct{}, error) {
v := sys.RollupNodes["verifier"].RuntimeConfig().RecommendedProtocolVersion()
if v == newRecommendedProtocolVersion {
return struct{}{}, nil
}
return struct{}{}, fmt.Errorf("no change yet, seeing %s but looking for %s", v, newRecommendedProtocolVersion)
})
require.NoError(t, err)
}
func TestRequiredProtocolVersionChangeAndHalt(t *testing.T) {
InitParallel(t)
cfg := DefaultSystemConfig(t)
// to speed up the test, make it reload the config more often, and do not impose a long conf depth
cfg.Nodes["verifier"].RuntimeConfigReloadInterval = time.Second * 5
cfg.Nodes["verifier"].Driver.VerifierConfDepth = 1
// configure halt in verifier op-node
cfg.Nodes["verifier"].RollupHalt = "major"
// configure halt in verifier op-geth node
cfg.GethOptions["verifier"] = append(cfg.GethOptions["verifier"], []geth.GethOption{
func(ethCfg *ethconfig.Config, nodeCfg *node.Config) error {
ethCfg.RollupHaltOnIncompatibleProtocolVersion = "major"
return nil
},
}...)
sys, err := cfg.Start(t)
require.Nil(t, err, "Error starting up system")
defer sys.Close()
runtimeConfig := sys.RollupNodes["verifier"].RuntimeConfig()
// Change the superchain-config via L1
l1 := sys.Clients["l1"]
_, build, major, minor, patch, preRelease := params.OPStackSupport.Parse()
newRequiredProtocolVersion := params.ProtocolVersionV0{Build: build, Major: major + 1, Minor: minor, Patch: patch, PreRelease: preRelease}.Encode()
require.NotEqual(t, runtimeConfig.RequiredProtocolVersion(), newRequiredProtocolVersion, "changing to a different protocol version")
protVersions, err := bindings.NewProtocolVersions(cfg.L1Deployments.ProtocolVersionsProxy, l1)
require.NoError(t, err)
// ProtocolVersions contract is owned by same key as SystemConfig in devnet
opts, err := bind.NewKeyedTransactorWithChainID(cfg.Secrets.SysCfgOwner, cfg.L1ChainIDBig())
require.NoError(t, err)
// Change required protocol version
tx, err := protVersions.SetRequired(opts, new(big.Int).SetBytes(newRequiredProtocolVersion[:]))
require.NoError(t, err)
// wait for the change to confirm
_, err = wait.ForReceiptOK(context.Background(), l1, tx.Hash())
require.NoError(t, err)
// wait for the required protocol version to take effect by halting the verifier that opted in, and halting the op-geth node that opted in.
_, err = retry.Do(context.Background(), 10, retry.Fixed(time.Second*10), func() (struct{}, error) {
if !sys.RollupNodes["verifier"].Closed() {
return struct{}{}, errors.New("verifier rollup node is not closed yet")
}
return struct{}{}, nil
})
require.NoError(t, err)
t.Log("verified that op-node closed!")
// Checking if the engine is down is not trivial in op-e2e.
// In op-geth we have halting tests covering the Engine API, in op-e2e we instead check if the API stops.
_, err = retry.Do(context.Background(), 10, retry.Fixed(time.Second*10), func() (struct{}, error) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
_, err := sys.Clients["verifier"].ChainID(ctx)
cancel()
if err != nil && !errors.Is(err, ctx.Err()) { // waiting for client to stop responding to chainID requests
return struct{}{}, nil
}
return struct{}{}, errors.New("verifier rollup node is not closed yet")
})
require.NoError(t, err)
t.Log("verified that op-geth closed!")
}
......@@ -80,9 +80,11 @@ func NewRPC(ctx context.Context, lgr log.Logger, addr string, opts ...RPCOption)
return nil, fmt.Errorf("rpc option %d failed to apply to RPC config: %w", i, err)
}
}
if cfg.backoffAttempts < 1 { // default to at least 1 attempt, or it always fails to dial.
cfg.backoffAttempts = 1
}
underlying, err := dialRPCClientWithBackoff(ctx, lgr, addr, cfg.backoffAttempts, cfg.gethRPCOptions...)
if err != nil {
return nil, err
......@@ -94,11 +96,15 @@ func NewRPC(ctx context.Context, lgr log.Logger, addr string, opts ...RPCOption)
wrapped = NewRateLimitingClient(wrapped, rate.Limit(cfg.limit), cfg.burst)
}
return NewRPCWithClient(ctx, lgr, addr, wrapped, cfg.httpPollInterval)
}
// NewRPCWithClient builds a new polling client with the given underlying RPC client.
func NewRPCWithClient(ctx context.Context, lgr log.Logger, addr string, underlying RPC, pollInterval time.Duration) (RPC, error) {
if httpRegex.MatchString(addr) {
wrapped = NewPollingClient(ctx, lgr, wrapped, WithPollRate(cfg.httpPollInterval))
underlying = NewPollingClient(ctx, lgr, underlying, WithPollRate(pollInterval))
}
return wrapped, nil
return underlying, nil
}
// Dials a JSON-RPC endpoint repeatedly, with a backoff, until a client connection is established. Auth is optional.
......
......@@ -244,6 +244,18 @@ var (
EnvVars: prefixEnvVars("BETA_EXTRA_NETWORKS"),
Hidden: true,
}
BetaRollupHalt = &cli.StringFlag{
Name: "beta.rollup.halt",
Usage: "Beta feature: opt-in option to halt on incompatible protocol version requirements of the given level (major/minor/patch/none), as signaled onchain in L1",
EnvVars: prefixEnvVars("BETA_ROLLUP_HALT"),
Hidden: true,
}
BetaRollupLoadProtocolVersions = &cli.BoolFlag{
Name: "beta.rollup.load-protocol-versions",
Usage: "Beta feature: load protocol versions from the superchain L1 ProtocolVersions contract (if available), and report in logs and metrics",
EnvVars: prefixEnvVars("BETA_ROLLUP_LOAD_PROTOCOL_VERSIONS"),
Hidden: true,
}
)
var requiredFlags = []cli.Flag{
......@@ -286,6 +298,8 @@ var optionalFlags = []cli.Flag{
L2EngineSyncEnabled,
SkipSyncStartCheck,
BetaExtraNetworks,
BetaRollupHalt,
BetaRollupLoadProtocolVersions,
}
// Flags contains the list of configuration options available to the binary.
......
......@@ -10,6 +10,8 @@ import (
"strconv"
"time"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum-optimism/optimism/op-node/p2p/store"
ophttp "github.com/ethereum-optimism/optimism/op-service/httputil"
"github.com/ethereum-optimism/optimism/op-service/metrics"
......@@ -78,6 +80,7 @@ type Metricer interface {
RecordIPUnban()
RecordDial(allow bool)
RecordAccept(allow bool)
ReportProtocolVersions(local, engine, recommended, required params.ProtocolVersion)
}
// Metrics tracks all the metrics for the op-node.
......@@ -153,6 +156,12 @@ type Metrics struct {
ChannelInputBytes prometheus.Counter
// Protocol version reporting
// Delta = params.ProtocolVersionComparison
ProtocolVersionDelta *prometheus.GaugeVec
// ProtocolVersions is pseudo-metric to report the exact protocol version info
ProtocolVersions *prometheus.GaugeVec
registry *prometheus.Registry
factory metrics.Factory
}
......@@ -452,6 +461,24 @@ func NewMetrics(procName string) *Metrics {
Help: "Number of sequencer block sealing jobs",
}),
ProtocolVersionDelta: factory.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns,
Name: "protocol_version_delta",
Help: "Difference between local and global protocol version, and execution-engine, per type of version",
}, []string{
"type",
}),
ProtocolVersions: factory.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns,
Name: "protocol_versions",
Help: "Pseudo-metric tracking recommended and required protocol version info",
}, []string{
"local",
"engine",
"recommended",
"required",
}),
registry: registry,
factory: factory,
}
......@@ -747,6 +774,13 @@ func (m *Metrics) RecordAccept(allow bool) {
m.Accepts.WithLabelValues("false").Inc()
}
}
func (m *Metrics) ReportProtocolVersions(local, engine, recommended, required params.ProtocolVersion) {
m.ProtocolVersionDelta.WithLabelValues("local_recommended").Set(float64(local.Compare(recommended)))
m.ProtocolVersionDelta.WithLabelValues("local_required").Set(float64(local.Compare(required)))
m.ProtocolVersionDelta.WithLabelValues("engine_recommended").Set(float64(engine.Compare(recommended)))
m.ProtocolVersionDelta.WithLabelValues("engine_required").Set(float64(engine.Compare(required)))
m.ProtocolVersions.WithLabelValues(local.String(), engine.String(), recommended.String(), required.String()).Set(1)
}
type noopMetricer struct{}
......@@ -874,3 +908,5 @@ func (n *noopMetricer) RecordDial(allow bool) {
func (n *noopMetricer) RecordAccept(allow bool) {
}
func (n *noopMetricer) ReportProtocolVersions(local, engine, recommended, required params.ProtocolVersion) {
}
......@@ -42,7 +42,7 @@ type Config struct {
ConfigPersistence ConfigPersistence
// RuntimeConfigReloadInterval defines the interval between runtime config reloads.
// Disabled if 0.
// Disabled if <= 0.
// Runtime config changes should be picked up from log-events,
// but if log-events are not coming in (e.g. not syncing blocks) then the reload ensures the config stays accurate.
RuntimeConfigReloadInterval time.Duration
......@@ -52,6 +52,10 @@ type Config struct {
Heartbeat HeartbeatConfig
Sync sync.Config
// To halt when detecting the node does not support a signaled protocol version
// change of the given severity (major/minor/patch). Disabled if empty.
RollupHalt string
}
type RPCConfig struct {
......@@ -128,5 +132,8 @@ func (cfg *Config) Check() error {
return fmt.Errorf("p2p config error: %w", err)
}
}
if !(cfg.RollupHalt == "" || cfg.RollupHalt == "major" || cfg.RollupHalt == "minor" || cfg.RollupHalt == "patch") {
return fmt.Errorf("invalid rollup halting option: %q", cfg.RollupHalt)
}
return nil
}
......@@ -2,7 +2,9 @@ package node
import (
"context"
"errors"
"fmt"
"sync/atomic"
"time"
"github.com/hashicorp/go-multierror"
......@@ -40,10 +42,14 @@ type OpNode struct {
tracer Tracer // tracer to get events for testing/debugging
runCfg *RuntimeConfig // runtime configurables
rollupHalt string // when to halt the rollup, disabled if empty
// some resources cannot be stopped directly, like the p2p gossipsub router (not our design),
// and depend on this ctx to be closed.
resourcesCtx context.Context
resourcesClose context.CancelFunc
closed atomic.Bool
}
// The OpNode handles incoming gossip
......@@ -58,6 +64,7 @@ func New(ctx context.Context, cfg *Config, log log.Logger, snapshotLog log.Logge
log: log,
appVersion: appVersion,
metrics: m,
rollupHalt: cfg.RollupHalt,
}
// not a context leak, gossipsub is closed with a context.
n.resourcesCtx, n.resourcesClose = context.WithCancel(context.Background())
......@@ -189,6 +196,9 @@ func (n *OpNode) initRuntimeConfig(ctx context.Context, cfg *Config) error {
n.log.Error("failed to fetch runtime config data", "err", err)
return l1Head, err
}
n.handleProtocolVersionsUpdate(ctx)
return l1Head, nil
}
......@@ -446,6 +456,10 @@ func (n *OpNode) RuntimeConfig() ReadonlyRuntimeConfig {
// Close closes all resources.
func (n *OpNode) Close() error {
if n.closed.Load() {
return errors.New("node is already closed")
}
var result *multierror.Error
if n.server != nil {
......@@ -494,9 +508,18 @@ func (n *OpNode) Close() error {
if n.l1Source != nil {
n.l1Source.Close()
}
if result == nil { // mark as closed if we successfully fully closed
n.closed.Store(true)
}
return result.ErrorOrNil()
}
func (n *OpNode) Closed() bool {
return n.closed.Load()
}
func (n *OpNode) ListenAddr() string {
return n.server.listenAddr.String()
}
......
......@@ -7,6 +7,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum-optimism/optimism/op-node/p2p"
"github.com/ethereum-optimism/optimism/op-node/rollup"
......@@ -17,6 +18,14 @@ var (
// UnsafeBlockSignerAddressSystemConfigStorageSlot is the storage slot identifier of the unsafeBlockSigner
// `address` storage value in the SystemConfig L1 contract. Computed as `keccak256("systemconfig.unsafeblocksigner")`
UnsafeBlockSignerAddressSystemConfigStorageSlot = common.HexToHash("0x65a7ed542fb37fe237fdfbdd70b31598523fe5b32879e307bae27a0bd9581c08")
// RequiredProtocolVersionStorageSlot is the storage slot that the required protocol version is stored at.
// Computed as: `bytes32(uint256(keccak256("protocolversion.required")) - 1)`
RequiredProtocolVersionStorageSlot = common.HexToHash("0x4aaefe95bd84fd3f32700cf3b7566bc944b73138e41958b5785826df2aecace0")
// RecommendedProtocolVersionStorageSlot is the storage slot that the recommended protocol version is stored at.
// Computed as: `bytes32(uint256(keccak256("protocolversion.recommended")) - 1)`
RecommendedProtocolVersionStorageSlot = common.HexToHash("0xe314dfc40f0025322aacc0ba8ef420b62fb3b702cf01e0cdf3d829117ac2ff1a")
)
type RuntimeCfgL1Source interface {
......@@ -25,6 +34,8 @@ type RuntimeCfgL1Source interface {
type ReadonlyRuntimeConfig interface {
P2PSequencerAddress() common.Address
RequiredProtocolVersion() params.ProtocolVersion
RecommendedProtocolVersion() params.ProtocolVersion
}
// RuntimeConfig maintains runtime-configurable options.
......@@ -49,6 +60,10 @@ type RuntimeConfig struct {
// runtimeConfigData is a flat bundle of configurable data, easy and light to copy around.
type runtimeConfigData struct {
p2pBlockSignerAddr common.Address
// superchain protocol version signals
recommended params.ProtocolVersion
required params.ProtocolVersion
}
var _ p2p.GossipRuntimeConfig = (*RuntimeConfig)(nil)
......@@ -67,18 +82,46 @@ func (r *RuntimeConfig) P2PSequencerAddress() common.Address {
return r.p2pBlockSignerAddr
}
func (r *RuntimeConfig) RequiredProtocolVersion() params.ProtocolVersion {
r.mu.RLock()
defer r.mu.RUnlock()
return r.required
}
func (r *RuntimeConfig) RecommendedProtocolVersion() params.ProtocolVersion {
r.mu.RLock()
defer r.mu.RUnlock()
return r.recommended
}
// Load resets the runtime configuration by fetching the latest config data from L1 at the given L1 block.
// Load is safe to call concurrently, but will lock the runtime configuration modifications only,
// and will thus not block other Load calls with possibly alternative L1 block views.
func (r *RuntimeConfig) Load(ctx context.Context, l1Ref eth.L1BlockRef) error {
val, err := r.l1Client.ReadStorageAt(ctx, r.rollupCfg.L1SystemConfigAddress, UnsafeBlockSignerAddressSystemConfigStorageSlot, l1Ref.Hash)
p2pSignerVal, err := r.l1Client.ReadStorageAt(ctx, r.rollupCfg.L1SystemConfigAddress, UnsafeBlockSignerAddressSystemConfigStorageSlot, l1Ref.Hash)
if err != nil {
return fmt.Errorf("failed to fetch unsafe block signing address from system config: %w", err)
}
// The superchain protocol version data is optional; only applicable to rollup configs that specify a ProtocolVersions address.
var requiredProtVersion, recommendedProtoVersion params.ProtocolVersion
if r.rollupCfg.ProtocolVersionsAddress != (common.Address{}) {
requiredVal, err := r.l1Client.ReadStorageAt(ctx, r.rollupCfg.ProtocolVersionsAddress, RequiredProtocolVersionStorageSlot, l1Ref.Hash)
if err != nil {
return fmt.Errorf("required-protocol-version value failed to load from L1 contract: %w", err)
}
requiredProtVersion = params.ProtocolVersion(requiredVal)
recommendedVal, err := r.l1Client.ReadStorageAt(ctx, r.rollupCfg.ProtocolVersionsAddress, RecommendedProtocolVersionStorageSlot, l1Ref.Hash)
if err != nil {
return fmt.Errorf("recommended-protocol-version value failed to load from L1 contract: %w", err)
}
recommendedProtoVersion = params.ProtocolVersion(recommendedVal)
}
r.mu.Lock()
defer r.mu.Unlock()
r.l1Ref = l1Ref
r.p2pBlockSignerAddr = common.BytesToAddress(val[:])
r.p2pBlockSignerAddr = common.BytesToAddress(p2pSignerVal[:])
r.required = requiredProtVersion
r.recommended = recommendedProtoVersion
r.log.Info("loaded new runtime config values!", "p2p_seq_address", r.p2pBlockSignerAddr)
return nil
}
package node
import (
"context"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum/go-ethereum/eth/catalyst"
"github.com/ethereum/go-ethereum/params"
)
func (n *OpNode) handleProtocolVersionsUpdate(ctx context.Context) {
recommended := n.runCfg.RecommendedProtocolVersion()
required := n.runCfg.RequiredProtocolVersion()
// if the protocol version sources are disabled we do not process them
if recommended == (params.ProtocolVersion{}) && required == (params.ProtocolVersion{}) {
return
}
local := rollup.OPStackSupport
// forward to execution engine, and get back the protocol version that op-geth supports
engineSupport, err := n.l2Source.SignalSuperchainV1(ctx, recommended, required)
if err != nil {
n.log.Warn("failed to notify engine of protocol version", "err", err)
// engineSupport may still be available, or otherwise zero to signal as unknown
} else {
catalyst.LogProtocolVersionSupport(n.log.New("node", "op-node"), engineSupport, recommended, "recommended")
catalyst.LogProtocolVersionSupport(n.log.New("node", "op-node"), engineSupport, required, "required")
}
n.metrics.ReportProtocolVersions(local, engineSupport, recommended, required)
catalyst.LogProtocolVersionSupport(n.log.New("node", "engine"), local, recommended, "recommended")
catalyst.LogProtocolVersionSupport(n.log.New("node", "engine"), local, required, "required")
// We may need to halt the node, if the user opted in to handling incompatible protocol-version signals
n.HaltMaybe()
}
// HaltMaybe halts the rollup node if the runtime config indicates an incompatible required protocol change
// and the node is configured to opt-in to halting at this protocol-change level.
func (n *OpNode) HaltMaybe() {
local := rollup.OPStackSupport
required := n.runCfg.RequiredProtocolVersion()
if haltMaybe(n.rollupHalt, local.Compare(required)) { // halt if we opted in to do so at this granularity
n.log.Error("Opted to halt, unprepared for protocol change", "required", required, "local", local)
if err := n.Close(); err != nil {
n.log.Error("Failed to halt rollup", "err", err)
}
}
}
// haltMaybe returns true when we should halt, given the halt-option and required-version comparison
func haltMaybe(haltOption string, reqCmp params.ProtocolVersionComparison) bool {
var needLevel int
switch haltOption {
case "major":
needLevel = 3
case "minor":
needLevel = 2
case "patch":
needLevel = 1
default:
return false // do not consider halting if not configured to
}
haveLevel := 0
switch reqCmp {
case params.OutdatedMajor:
haveLevel = 3
case params.OutdatedMinor:
haveLevel = 2
case params.OutdatedPatch:
haveLevel = 1
}
return haveLevel >= needLevel
}
package node
import (
"testing"
"github.com/stretchr/testify/require"
"golang.org/x/exp/slices"
"github.com/ethereum/go-ethereum/params"
)
func TestHaltMaybe(t *testing.T) {
haltTest := func(opt string, halts ...params.ProtocolVersionComparison) {
t.Run(opt, func(t *testing.T) {
for _, h := range []params.ProtocolVersionComparison{
params.AheadMajor,
params.OutdatedMajor,
params.AheadMinor,
params.OutdatedMinor,
params.AheadPatch,
params.OutdatedPatch,
params.AheadPrerelease,
params.OutdatedPrerelease,
params.Matching,
params.DiffVersionType,
params.DiffBuild,
params.EmptyVersion,
} {
expectedHalt := slices.Contains(halts, h)
gotHalt := haltMaybe(opt, h)
require.Equal(t, expectedHalt, gotHalt, "%s %d", opt, h)
}
})
}
haltTest("")
haltTest("major", params.OutdatedMajor)
haltTest("minor", params.OutdatedMajor, params.OutdatedMinor)
haltTest("patch", params.OutdatedMajor, params.OutdatedMinor, params.OutdatedPatch)
}
......@@ -81,6 +81,7 @@ type SequencerIface interface {
PlanNextSequencerAction() time.Duration
RunNextSequencerAction(ctx context.Context) (*eth.ExecutionPayload, error)
BuildingOnto() eth.L2BlockRef
CancelBuildingBlock(ctx context.Context)
}
type Network interface {
......
......@@ -381,6 +381,9 @@ func (s *Driver) eventLoop() {
}
s.log.Warn("Sequencer has been stopped")
s.driverConfig.SequencerStopped = true
// Cancel any inflight block building. If we don't cancel this, we can resume sequencing an old block
// even if we've received new unsafe heads in the interim, causing us to introduce a re-org.
s.sequencer.CancelBuildingBlock(ctx)
respCh <- hashAndError{hash: s.derivation.UnsafeL2Head().Hash}
}
case respCh := <-s.sequencerActive:
......
......@@ -4,12 +4,16 @@ import (
"fmt"
"math/big"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/superchain-registry/superchain"
)
var OPStackSupport = params.ProtocolVersionV0{Build: [8]byte{}, Major: 3, Minor: 1, Patch: 0, PreRelease: 1}.Encode()
const (
opMainnet = 10
opGoerli = 420
......
......@@ -84,6 +84,9 @@ type Config struct {
DepositContractAddress common.Address `json:"deposit_contract_address"`
// L1 System Config Address
L1SystemConfigAddress common.Address `json:"l1_system_config_address"`
// L1 address that declares the protocol versions, optional (Beta feature)
ProtocolVersionsAddress common.Address `json:"protocol_versions_address,omitempty"`
}
// ValidateL1Config checks L1 config variables for errors.
......@@ -283,6 +286,8 @@ func (c *Config) Description(l2Chains map[string]string) string {
// Report the upgrade configuration
banner += "Post-Bedrock Network Upgrades (timestamp based):\n"
banner += fmt.Sprintf(" - Regolith: %s\n", fmtForkTimeOrUnset(c.RegolithTime))
// Report the protocol version
banner += fmt.Sprintf("Node supports up to OP-Stack Protocol Version: %s\n", OPStackSupport)
return banner
}
......
......@@ -36,6 +36,11 @@ func NewConfig(ctx *cli.Context, log log.Logger) (*node.Config, error) {
return nil, err
}
if !ctx.Bool(flags.BetaRollupLoadProtocolVersions.Name) {
log.Info("Not opted in to ProtocolVersions signal loading, disabling ProtocolVersions contract now.")
rollupConfig.ProtocolVersionsAddress = common.Address{}
}
configPersistence := NewConfigPersistence(ctx)
driverConfig := NewDriverConfig(ctx)
......@@ -61,6 +66,11 @@ func NewConfig(ctx *cli.Context, log log.Logger) (*node.Config, error) {
syncConfig := NewSyncConfig(ctx)
haltOption := ctx.String(flags.BetaRollupHalt.Name)
if haltOption == "none" {
haltOption = ""
}
cfg := &node.Config{
L1: l1Endpoint,
L2: l2Endpoint,
......@@ -93,6 +103,7 @@ func NewConfig(ctx *cli.Context, log log.Logger) (*node.Config, error) {
},
ConfigPersistence: configPersistence,
Sync: *syncConfig,
RollupHalt: haltOption,
}
if err := cfg.LoadPersisted(log); err != nil {
......
......@@ -5,6 +5,9 @@ import (
"fmt"
"time"
"github.com/ethereum/go-ethereum/eth/catalyst"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum-optimism/optimism/op-node/client"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/sources/caching"
......@@ -125,3 +128,12 @@ func (s *EngineClient) GetPayload(ctx context.Context, payloadId eth.PayloadID)
e.Trace("Received payload")
return &result, nil
}
func (s *EngineClient) SignalSuperchainV1(ctx context.Context, recommended, required params.ProtocolVersion) (params.ProtocolVersion, error) {
var result params.ProtocolVersion
err := s.client.CallContext(ctx, &result, "engine_signalSuperchainV1", &catalyst.SuperchainSignal{
Recommended: recommended,
Required: required,
})
return result, err
}
......@@ -51,6 +51,9 @@ func (res *AccountResult) Verify(stateRoot common.Hash) error {
if err != nil {
return fmt.Errorf("failed to verify storage value %d with key %s (path %x) in storage trie %s: %w", i, entry.Key, path, res.StorageHash, err)
}
if val == nil && entry.Value.ToInt().Cmp(common.Big0) == 0 { // empty storage is zero by default
continue
}
comparison, err := rlp.EncodeToBytes(entry.Value.ToInt().Bytes())
if err != nil {
return fmt.Errorf("failed to encode storage value %d with key %s (path %x) in storage trie %s: %w", i, entry.Key, path, res.StorageHash, err)
......
......@@ -211,14 +211,8 @@ func (m *SimpleTxManager) craftTx(ctx context.Context, candidate TxCandidate) (*
}
gasFeeCap := calcGasFeeCap(basefee, gasTipCap)
nonce, err := m.nextNonce(ctx)
if err != nil {
return nil, err
}
rawTx := &types.DynamicFeeTx{
ChainID: m.chainID,
Nonce: nonce,
To: candidate.To,
GasTipCap: gasTipCap,
GasFeeCap: gasFeeCap,
......@@ -247,6 +241,13 @@ func (m *SimpleTxManager) craftTx(ctx context.Context, candidate TxCandidate) (*
rawTx.Gas = gas
}
// Avoid bumping the nonce if the gas estimation fails.
nonce, err := m.nextNonce(ctx)
if err != nil {
return nil, err
}
rawTx.Nonce = nonce
ctx, cancel := context.WithTimeout(ctx, m.cfg.NetworkTimeout)
defer cancel()
return m.cfg.Signer(ctx, m.cfg.From, types.NewTx(rawTx))
......
......@@ -93,6 +93,7 @@ type gasPricer struct {
mineAtEpoch int64
baseGasTipFee *big.Int
baseBaseFee *big.Int
err error
mu sync.Mutex
}
......@@ -206,6 +207,9 @@ func (b *mockBackend) HeaderByNumber(ctx context.Context, number *big.Int) (*typ
}
func (b *mockBackend) EstimateGas(ctx context.Context, msg ethereum.CallMsg) (uint64, error) {
if b.g.err != nil {
return 0, b.g.err
}
return b.g.basefee().Uint64(), nil
}
......@@ -420,6 +424,31 @@ func TestTxMgr_EstimateGas(t *testing.T) {
require.Equal(t, gasEstimate, tx.Gas())
}
func TestTxMgr_EstimateGasFails(t *testing.T) {
t.Parallel()
h := newTestHarness(t)
candidate := h.createTxCandidate()
// Set the gas limit to zero to trigger gas estimation.
candidate.GasLimit = 0
// Craft a successful transaction.
tx, err := h.mgr.craftTx(context.Background(), candidate)
require.Nil(t, err)
lastNonce := tx.Nonce()
// Mock gas estimation failure.
h.gasPricer.err = fmt.Errorf("execution error")
_, err = h.mgr.craftTx(context.Background(), candidate)
require.ErrorContains(t, err, "failed to estimate gas")
// Ensure successful craft uses the correct nonce
h.gasPricer.err = nil
tx, err = h.mgr.craftTx(context.Background(), candidate)
require.Nil(t, err)
require.Equal(t, lastNonce+1, tx.Nonce())
}
// TestTxMgrOnlyOnePublicationSucceeds asserts that the tx manager will return a
// receipt so long as at least one of the publications is able to succeed with a
// simulated rpc failure.
......
FROM ethereum/client-go:v1.13.0
FROM ethereum/client-go:v1.13.1
RUN apk add --no-cache jq
......
......@@ -526,6 +526,10 @@ PreimageOracle_Test:test_loadKeccak256PreimagePart_outOfBoundsOffset_reverts() (
PreimageOracle_Test:test_loadKeccak256PreimagePart_succeeds() (gas: 76098)
PreimageOracle_Test:test_loadLocalData_onePart_succeeds() (gas: 75840)
PreimageOracle_Test:test_loadLocalData_outOfBoundsOffset_reverts() (gas: 8803)
ProtocolVersions_Initialize_Test:test_initialize_events_succeeds() (gas: 52175)
ProtocolVersions_Initialize_Test:test_initialize_values_succeeds() (gas: 32301)
ProtocolVersions_Setters_TestFail:test_setRecommended_notOwner_reverts() (gas: 15508)
ProtocolVersions_Setters_TestFail:test_setRequired_notOwner_reverts() (gas: 15520)
ProxyAdmin_Test:test_chugsplashChangeProxyAdmin_succeeds() (gas: 35586)
ProxyAdmin_Test:test_chugsplashGetProxyAdmin_succeeds() (gas: 15675)
ProxyAdmin_Test:test_chugsplashGetProxyImplementation_succeeds() (gas: 51084)
......@@ -666,4 +670,4 @@ TransactorTest:test_constructor_succeeds() (gas: 9739)
TransactorTest:test_delegateCall_succeeds() (gas: 20909)
TransactorTest:test_delegateCall_unauthorized_reverts() (gas: 16550)
TransferOnionTest:test_constructor_succeeds() (gas: 564855)
TransferOnionTest:test_unwrap_succeeds() (gas: 724955)
TransferOnionTest:test_unwrap_succeeds() (gas: 724955)
\ No newline at end of file
......@@ -46,5 +46,7 @@
"faultGameAbsolutePrestate": "0x03c7ae758795765c6664a5d39bf63841c71ff191e9189522bad8ebff5d4eca98",
"faultGameMaxDepth": 30,
"faultGameMaxDuration": 1200,
"systemConfigStartBlock": 0
"systemConfigStartBlock": 0,
"requiredProtocolVersion": "0x0000000000000000000000000000000000000000000000000000000000000000",
"recommendedProtocolVersion": "0x0000000000000000000000000000000000000000000000000000000000000000"
}
......@@ -28,6 +28,7 @@ import { PreimageOracle } from "src/cannon/PreimageOracle.sol";
import { MIPS } from "src/cannon/MIPS.sol";
import { BlockOracle } from "src/dispute/BlockOracle.sol";
import { L1ERC721Bridge } from "src/L1/L1ERC721Bridge.sol";
import { ProtocolVersions, ProtocolVersion } from "src/L1/ProtocolVersions.sol";
import { Predeploys } from "src/libraries/Predeploys.sol";
import { Chains } from "./Chains.sol";
......@@ -47,7 +48,7 @@ contract Deploy is Deployer {
/// @notice The create2 salt used for deployment of the contract implementations.
/// Using this helps to reduce config across networks as the implementation
/// addresses will be the same across networks when deployed with create2.
bytes32 constant IMPL_SALT = bytes32("ether's phoenix");
bytes32 constant IMPL_SALT = keccak256(bytes("ether's phoenix"));
/// @notice The name of the script, used to ensure the right deploy artifacts
/// are used.
......@@ -80,6 +81,7 @@ contract Deploy is Deployer {
initializeL1CrossDomainMessenger();
initializeL2OutputOracle();
initializeOptimismPortal();
initializeProtocolVersions();
setAlphabetFaultGameImplementation();
setCannonFaultGameImplementation();
......@@ -103,6 +105,18 @@ contract Deploy is Deployer {
}
}
/// @notice Modifier that will only allow a function to be called on a public
/// testnet or devnet.
modifier onlyTestnetOrDevnet() {
uint256 chainid = block.chainid;
if (
chainid == Chains.Goerli || chainid == Chains.Sepolia || chainid == Chains.LocalDevnet
|| chainid == Chains.GethDevnet
) {
_;
}
}
/// @notice Deploy all of the proxies
function deployProxies() public {
deployAddressManager();
......@@ -116,6 +130,7 @@ contract Deploy is Deployer {
deployOptimismMintableERC20FactoryProxy();
deployL1ERC721BridgeProxy();
deployDisputeGameFactoryProxy();
deployProtocolVersionsProxy();
transferAddressManagerOwnership();
}
......@@ -133,6 +148,7 @@ contract Deploy is Deployer {
deployBlockOracle();
deployPreimageOracle();
deployMips();
deployProtocolVersions();
}
/// @notice Deploy the AddressManager
......@@ -291,6 +307,22 @@ contract Deploy is Deployer {
addr_ = address(proxy);
}
/// @notice Deploy the ProtocolVersionsProxy
function deployProtocolVersionsProxy() public onlyTestnetOrDevnet broadcast returns (address addr_) {
address proxyAdmin = mustGetAddress("ProxyAdmin");
Proxy proxy = new Proxy({
_admin: proxyAdmin
});
address admin = address(uint160(uint256(vm.load(address(proxy), OWNER_KEY))));
require(admin == proxyAdmin);
save("ProtocolVersionsProxy", address(proxy));
console.log("ProtocolVersionsProxy deployed at %s", address(proxy));
addr_ = address(proxy);
}
/// @notice Deploy the L1CrossDomainMessenger
function deployL1CrossDomainMessenger() public broadcast returns (address addr_) {
L1CrossDomainMessenger messenger = new L1CrossDomainMessenger{ salt: IMPL_SALT }();
......@@ -380,6 +412,15 @@ contract Deploy is Deployer {
addr_ = address(oracle);
}
/// @notice Deploy the ProtocolVersions
function deployProtocolVersions() public onlyTestnetOrDevnet broadcast returns (address addr_) {
ProtocolVersions versions = new ProtocolVersions{ salt: IMPL_SALT }();
save("ProtocolVersions", address(versions));
console.log("ProtocolVersions deployed at %s", address(versions));
addr_ = address(versions);
}
/// @notice Deploy the PreimageOracle
function deployPreimageOracle() public onlyDevnet broadcast returns (address addr_) {
PreimageOracle preimageOracle = new PreimageOracle{ salt: IMPL_SALT }();
......@@ -742,6 +783,37 @@ contract Deploy is Deployer {
require(portal.paused() == false);
}
function initializeProtocolVersions() public onlyTestnetOrDevnet broadcast {
ProxyAdmin proxyAdmin = ProxyAdmin(mustGetAddress("ProxyAdmin"));
address protocolVersionsProxy = mustGetAddress("ProtocolVersionsProxy");
address protocolVersions = mustGetAddress("ProtocolVersions");
address finalSystemOwner = cfg.finalSystemOwner();
uint256 requiredProtocolVersion = cfg.requiredProtocolVersion();
uint256 recommendedProtocolVersion = cfg.recommendedProtocolVersion();
proxyAdmin.upgradeAndCall({
_proxy: payable(protocolVersionsProxy),
_implementation: protocolVersions,
_data: abi.encodeCall(
ProtocolVersions.initialize,
(
finalSystemOwner,
ProtocolVersion.wrap(requiredProtocolVersion),
ProtocolVersion.wrap(recommendedProtocolVersion)
)
)
});
ProtocolVersions versions = ProtocolVersions(protocolVersionsProxy);
string memory version = versions.version();
console.log("ProtocolVersions version: %s", version);
require(versions.owner() == finalSystemOwner);
require(ProtocolVersion.unwrap(versions.required()) == requiredProtocolVersion);
require(ProtocolVersion.unwrap(versions.recommended()) == recommendedProtocolVersion);
}
/// @notice Transfer ownership of the ProxyAdmin contract to the final system owner
function transferProxyAdminOwnership() public broadcast {
ProxyAdmin proxyAdmin = ProxyAdmin(mustGetAddress("ProxyAdmin"));
......
......@@ -48,6 +48,8 @@ contract DeployConfig is Script {
uint256 public faultGameMaxDepth;
uint256 public faultGameMaxDuration;
uint256 public systemConfigStartBlock;
uint256 public requiredProtocolVersion;
uint256 public recommendedProtocolVersion;
constructor(string memory _path) {
console.log("DeployConfig: reading file %s", _path);
......@@ -94,6 +96,13 @@ contract DeployConfig is Script {
faultGameAbsolutePrestate = stdJson.readUint(_json, "$.faultGameAbsolutePrestate");
faultGameMaxDepth = stdJson.readUint(_json, "$.faultGameMaxDepth");
faultGameMaxDuration = stdJson.readUint(_json, "$.faultGameMaxDuration");
requiredProtocolVersion = stdJson.readUint(_json, "$.requiredProtocolVersion");
recommendedProtocolVersion = stdJson.readUint(_json, "$.recommendedProtocolVersion");
}
if (block.chainid == Chains.Goerli || block.chainid == Chains.Sepolia) {
requiredProtocolVersion = stdJson.readUint(_json, "$.requiredProtocolVersion");
recommendedProtocolVersion = stdJson.readUint(_json, "$.recommendedProtocolVersion");
}
}
......
......@@ -6,6 +6,7 @@
"src/L1/L1StandardBridge.sol": "0x12e227c6054660a83b92d823a7447db96a7d476b7a94e0f1807772d400329880",
"src/L1/L2OutputOracle.sol": "0x2e298bccb5fe73cf365e23bb56ebe974534549e0052bfe0f86372c52067153e6",
"src/L1/OptimismPortal.sol": "0xc8e41a7c3405c1b04c98f4b6bc7d20ff6f20cd3132134a3106670f5a475978c7",
"src/L1/ProtocolVersions.sol": "0x2f980d89b583936c7af5d71a1e2c73d026d895c1687abdd09815e4581032cee5",
"src/L1/SystemConfig.sol": "0xd508b84ef7c6d57be1e2e7eef92fc340f6bb3d68d999bdf2fabe12d110f9615c",
"src/L2/BaseFeeVault.sol": "0xc347c1aebe69178e72d2b1d3e700bbf84e39975319465bb85d69fd0d60fc1759",
"src/L2/GasPriceOracle.sol": "0x88efffbd40f8d012d700a5d7fde0d92266f65e9d7006cd8f034bacaa036d0eb2",
......
// SPDX-License-Identifier: MIT
pragma solidity 0.8.15;
import { OwnableUpgradeable } from "@openzeppelin/contracts-upgradeable/access/OwnableUpgradeable.sol";
import { ISemver } from "src/universal/ISemver.sol";
/// @notice ProtocolVersion is a numeric identifier of the protocol version.
type ProtocolVersion is uint256;
/// @title ProtocolVersions
/// @notice The ProtocolVersions contract is used to manage superchain protocol version information.
contract ProtocolVersions is OwnableUpgradeable, ISemver {
/// @notice Enum representing different types of updates.
/// @custom:value REQUIRED_PROTOCOL_VERSION Represents an update to the required protocol version.
/// @custom:value RECOMMENDED_PROTOCOL_VERSION Represents an update to the recommended protocol version.
enum UpdateType {
REQUIRED_PROTOCOL_VERSION,
RECOMMENDED_PROTOCOL_VERSION
}
/// @notice Version identifier, used for upgrades.
uint256 public constant VERSION = 0;
/// @notice Storage slot that the required protocol version is stored at.
bytes32 public constant REQUIRED_SLOT = bytes32(uint256(keccak256("protocolversion.required")) - 1);
/// @notice Storage slot that the recommended protocol version is stored at.
bytes32 public constant RECOMMENDED_SLOT = bytes32(uint256(keccak256("protocolversion.recommended")) - 1);
/// @notice Emitted when configuration is updated.
/// @param version ProtocolVersion version.
/// @param updateType Type of update.
/// @param data Encoded update data.
event ConfigUpdate(uint256 indexed version, UpdateType indexed updateType, bytes data);
/// @notice Semantic version.
/// @custom:semver 0.1.0
string public constant version = "0.1.0";
/// @notice Constructs the ProtocolVersion contract. Cannot set
/// the owner to `address(0)` due to the Ownable contract's
/// implementation, so set it to `address(0xdEaD)`
/// A zero version is considered empty and is ignored by nodes.
constructor() {
initialize({
_owner: address(0xdEaD),
_required: ProtocolVersion.wrap(uint256(0)),
_recommended: ProtocolVersion.wrap(uint256(0))
});
}
/// @notice Initializer.
/// @param _owner Initial owner of the contract.
/// @param _required Required protocol version to operate on this chain.
/// @param _recommended Recommended protocol version to operate on thi chain.
function initialize(
address _owner,
ProtocolVersion _required,
ProtocolVersion _recommended
)
public
reinitializer(2)
{
__Ownable_init();
transferOwnership(_owner);
_setRequired(_required);
_setRecommended(_recommended);
}
/// @notice Returns a ProtocolVersion stored in an arbitrary storage slot.
/// These storage slots decouple the storage layout from solc's automation.
/// @param _slot The storage slot to retrieve the address from.
function _getProtocolVersion(bytes32 _slot) internal view returns (ProtocolVersion out_) {
assembly {
out_ := sload(_slot)
}
}
/// @notice Stores a ProtocolVersion in an arbitrary storage slot, `_slot`.
/// @param _version The protocol version to store
/// @param _slot The storage slot to store the address in.
/// @dev WARNING! This function must be used cautiously, as it allows for overwriting values
/// in arbitrary storage slots.
function _setProtocolVersion(ProtocolVersion _version, bytes32 _slot) internal {
assembly {
sstore(_slot, _version)
}
}
/// @notice High level getter for the required protocol version.
/// @return out_ Required protocol version to sync to the head of the chain.
function required() external view returns (ProtocolVersion out_) {
out_ = _getProtocolVersion(REQUIRED_SLOT);
}
/// @notice Updates the required protocol version. Can only be called by the owner.
/// @param _required New required protocol version.
function setRequired(ProtocolVersion _required) external onlyOwner {
_setRequired(_required);
}
/// @notice Internal function for updating the required protocol version.
/// @param _required New required protocol version.
function _setRequired(ProtocolVersion _required) internal {
_setProtocolVersion(_required, REQUIRED_SLOT);
bytes memory data = abi.encode(_required);
emit ConfigUpdate(VERSION, UpdateType.REQUIRED_PROTOCOL_VERSION, data);
}
/// @notice High level getter for the recommended protocol version.
/// @return out_ Recommended protocol version to sync to the head of the chain.
function recommended() external view returns (ProtocolVersion out_) {
out_ = _getProtocolVersion(RECOMMENDED_SLOT);
}
/// @notice Updates the recommended protocol version. Can only be called by the owner.
/// @param _recommended New recommended protocol version.
function setRecommended(ProtocolVersion _recommended) external onlyOwner {
_setRecommended(_recommended);
}
/// @notice Internal function for updating the recommended protocol version.
/// @param _recommended New recommended protocol version.
function _setRecommended(ProtocolVersion _recommended) internal {
_setProtocolVersion(_recommended, RECOMMENDED_SLOT);
bytes memory data = abi.encode(_recommended);
emit ConfigUpdate(VERSION, UpdateType.RECOMMENDED_PROTOCOL_VERSION, data);
}
}
// SPDX-License-Identifier: MIT
pragma solidity 0.8.15;
// Testing utilities
import { CommonTest } from "./CommonTest.t.sol";
// Libraries
import { Constants } from "src/libraries/Constants.sol";
// Target contract dependencies
import { Proxy } from "src/universal/Proxy.sol";
// Target contract
import { ProtocolVersions, ProtocolVersion } from "src/L1/ProtocolVersions.sol";
contract ProtocolVersions_Init is CommonTest {
ProtocolVersions protocolVersions;
ProtocolVersions protocolVersionsImpl;
event ConfigUpdate(uint256 indexed version, ProtocolVersions.UpdateType indexed updateType, bytes data);
// Dummy values used to test getters
ProtocolVersion constant required = ProtocolVersion.wrap(0xabcd);
ProtocolVersion constant recommended = ProtocolVersion.wrap(0x1234);
function setUp() public virtual override {
super.setUp();
Proxy proxy = new Proxy(multisig);
protocolVersionsImpl = new ProtocolVersions();
vm.prank(multisig);
proxy.upgradeToAndCall(
address(protocolVersionsImpl),
abi.encodeCall(
ProtocolVersions.initialize,
(
alice, // _owner,
required,
recommended
)
)
);
protocolVersions = ProtocolVersions(address(proxy));
}
}
contract ProtocolVersions_Initialize_Test is ProtocolVersions_Init {
/// @dev Tests that initialization sets the correct values.
function test_initialize_values_succeeds() external {
assertEq(ProtocolVersion.unwrap(protocolVersions.required()), ProtocolVersion.unwrap(required));
assertEq(ProtocolVersion.unwrap(protocolVersions.recommended()), ProtocolVersion.unwrap(recommended));
assertEq(protocolVersions.owner(), alice);
assertEq(ProtocolVersion.unwrap(protocolVersionsImpl.required()), 0);
assertEq(ProtocolVersion.unwrap(protocolVersionsImpl.recommended()), 0);
assertEq(protocolVersionsImpl.owner(), address(0xdEad));
}
/// @dev Ensures that the events are emitted during initialization.
function test_initialize_events_succeeds() external {
assertEq(protocolVersionsImpl.owner(), address(0xdEad));
// Wipe out the initialized slot so the proxy can be initialized again
vm.store(address(protocolVersions), bytes32(0), bytes32(0));
// The order depends here
vm.expectEmit(true, true, true, true, address(protocolVersions));
emit ConfigUpdate(0, ProtocolVersions.UpdateType.REQUIRED_PROTOCOL_VERSION, abi.encode(required));
vm.expectEmit(true, true, true, true, address(protocolVersions));
emit ConfigUpdate(0, ProtocolVersions.UpdateType.RECOMMENDED_PROTOCOL_VERSION, abi.encode(recommended));
vm.prank(multisig);
Proxy(payable(address(protocolVersions))).upgradeToAndCall(
address(protocolVersionsImpl),
abi.encodeCall(
ProtocolVersions.initialize,
(
alice, // _owner
required, // _required
recommended // recommended
)
)
);
}
}
contract ProtocolVersions_Setters_TestFail is ProtocolVersions_Init {
/// @dev Tests that `setRequired` reverts if the caller is not the owner.
function test_setRequired_notOwner_reverts() external {
vm.expectRevert("Ownable: caller is not the owner");
protocolVersions.setRequired(ProtocolVersion.wrap(0));
}
/// @dev Tests that `setRecommended` reverts if the caller is not the owner.
function test_setRecommended_notOwner_reverts() external {
vm.expectRevert("Ownable: caller is not the owner");
protocolVersions.setRecommended(ProtocolVersion.wrap(0));
}
}
contract ProtocolVersions_Setters_Test is ProtocolVersions_Init {
/// @dev Tests that `setRequired` updates the required protocol version successfully.
function testFuzz_setRequired_succeeds(uint256 _version) external {
vm.expectEmit(true, true, true, true);
emit ConfigUpdate(0, ProtocolVersions.UpdateType.REQUIRED_PROTOCOL_VERSION, abi.encode(_version));
vm.prank(protocolVersions.owner());
protocolVersions.setRequired(ProtocolVersion.wrap(_version));
assertEq(ProtocolVersion.unwrap(protocolVersions.required()), _version);
}
/// @dev Tests that `setRecommended` updates the recommended protocol version successfully.
function testFuzz_setRecommended_succeeds(uint256 _version) external {
vm.expectEmit(true, true, true, true);
emit ConfigUpdate(0, ProtocolVersions.UpdateType.RECOMMENDED_PROTOCOL_VERSION, abi.encode(_version));
vm.prank(protocolVersions.owner());
protocolVersions.setRecommended(ProtocolVersion.wrap(_version));
assertEq(ProtocolVersion.unwrap(protocolVersions.recommended()), _version);
}
}
......@@ -15,6 +15,7 @@ import { IBridgeAdapter } from './bridge-adapter'
export enum L1ChainID {
MAINNET = 1,
GOERLI = 5,
SEPOLIA = 11155111,
HARDHAT_LOCAL = 31337,
BEDROCK_LOCAL_DEVNET = 900,
}
......@@ -25,6 +26,7 @@ export enum L1ChainID {
export enum L2ChainID {
OPTIMISM = 10,
OPTIMISM_GOERLI = 420,
OPTIMISM_SEPOLIA = 11155420,
OPTIMISM_HARDHAT_LOCAL = 31337,
OPTIMISM_HARDHAT_DEVNET = 17,
OPTIMISM_BEDROCK_ALPHA_TESTNET = 28528,
......
......@@ -2,50 +2,62 @@ import { predeploys } from '@eth-optimism/core-utils'
import { ethers } from 'ethers'
import portalArtifactsMainnet from '@eth-optimism/contracts-bedrock/deployments/mainnet/OptimismPortalProxy.json'
import portalArtifactsGoerli from '@eth-optimism/contracts-bedrock/deployments/goerli/OptimismPortalProxy.json'
import portalArtifactsSepolia from '@eth-optimism/contracts-bedrock/deployments/sepolia/OptimismPortalProxy.json'
import l2OutputOracleArtifactsMainnet from '@eth-optimism/contracts-bedrock/deployments/mainnet/L2OutputOracleProxy.json'
import l2OutputOracleArtifactsGoerli from '@eth-optimism/contracts-bedrock/deployments/goerli/L2OutputOracleProxy.json'
import l2OutputOracleArtifactsSepolia from '@eth-optimism/contracts-bedrock/deployments/sepolia/L2OutputOracleProxy.json'
import addressManagerArtifactMainnet from '@eth-optimism/contracts-bedrock/deployments/mainnet/AddressManager.json'
import addressManagerArtifactGoerli from '@eth-optimism/contracts-bedrock/deployments/goerli/AddressManager.json'
import addressManagerArtifactSepolia from '@eth-optimism/contracts-bedrock/deployments/sepolia/AddressManager.json'
import l1StandardBridgeArtifactMainnet from '@eth-optimism/contracts-bedrock/deployments/mainnet/L1StandardBridgeProxy.json'
import l1StandardBridgeArtifactGoerli from '@eth-optimism/contracts-bedrock/deployments/goerli/L1StandardBridgeProxy.json'
import l1StandardBridgeArtifactSepolia from '@eth-optimism/contracts-bedrock/deployments/sepolia/L1StandardBridgeProxy.json'
import l1CrossDomainMessengerArtifactMainnet from '@eth-optimism/contracts-bedrock/deployments/mainnet/L1CrossDomainMessengerProxy.json'
import l1CrossDomainMessengerArtifactGoerli from '@eth-optimism/contracts-bedrock/deployments/goerli/L1CrossDomainMessengerProxy.json'
import l1CrossDomainMessengerArtifactSepolia from '@eth-optimism/contracts-bedrock/deployments/sepolia/L1CrossDomainMessengerProxy.json'
const portalAddresses = {
mainnet: portalArtifactsMainnet.address,
goerli: portalArtifactsGoerli.address,
sepolia: portalArtifactsSepolia.address,
}
const l2OutputOracleAddresses = {
mainnet: l2OutputOracleArtifactsMainnet.address,
goerli: l2OutputOracleArtifactsGoerli.address,
sepolia: l2OutputOracleArtifactsSepolia.address,
}
const addressManagerAddresses = {
mainnet: addressManagerArtifactMainnet.address,
goerli: addressManagerArtifactGoerli.address,
sepolia: addressManagerArtifactSepolia.address,
}
const l1StandardBridgeAddresses = {
mainnet: l1StandardBridgeArtifactMainnet.address,
goerli: l1StandardBridgeArtifactGoerli.address,
sepolia: l1StandardBridgeArtifactSepolia.address,
}
const l1CrossDomainMessengerAddresses = {
mainnet: l1CrossDomainMessengerArtifactMainnet.address,
goerli: l1CrossDomainMessengerArtifactGoerli.address,
sepolia: l1CrossDomainMessengerArtifactSepolia.address,
}
// legacy
const stateCommitmentChainAddresses = {
mainnet: '0xBe5dAb4A2e9cd0F27300dB4aB94BeE3A233AEB19',
goerli: '0x9c945aC97Baf48cB784AbBB61399beB71aF7A378',
sepolia: ethers.constants.AddressZero,
}
// legacy
const canonicalTransactionChainAddresses = {
mainnet: '0x5E4e65926BA27467555EB562121fac00D24E9dD2',
goerli: '0x607F755149cFEB3a14E1Dc3A4E2450Cde7dfb04D',
sepolia: ethers.constants.AddressZero,
}
import {
......@@ -67,6 +79,7 @@ export const DEPOSIT_CONFIRMATION_BLOCKS: {
} = {
[L2ChainID.OPTIMISM]: 50 as const,
[L2ChainID.OPTIMISM_GOERLI]: 12 as const,
[L2ChainID.OPTIMISM_SEPOLIA]: 12 as const,
[L2ChainID.OPTIMISM_HARDHAT_LOCAL]: 2 as const,
[L2ChainID.OPTIMISM_HARDHAT_DEVNET]: 2 as const,
[L2ChainID.OPTIMISM_BEDROCK_ALPHA_TESTNET]: 12 as const,
......@@ -81,6 +94,7 @@ export const CHAIN_BLOCK_TIMES: {
} = {
[L1ChainID.MAINNET]: 13 as const,
[L1ChainID.GOERLI]: 15 as const,
[L1ChainID.SEPOLIA]: 15 as const,
[L1ChainID.HARDHAT_LOCAL]: 1 as const,
[L1ChainID.BEDROCK_LOCAL_DEVNET]: 15 as const,
}
......@@ -137,6 +151,10 @@ export const CONTRACT_ADDRESSES: {
l1: getL1ContractsByNetworkName('goerli'),
l2: DEFAULT_L2_CONTRACT_ADDRESSES,
},
[L2ChainID.OPTIMISM_SEPOLIA]: {
l1: getL1ContractsByNetworkName('sepolia'),
l2: DEFAULT_L2_CONTRACT_ADDRESSES,
},
[L2ChainID.OPTIMISM_HARDHAT_LOCAL]: {
l1: {
AddressManager: '0x5FbDB2315678afecb367f032d93F642f64180aa3' as const,
......
This diff is collapsed.
......@@ -2,4 +2,4 @@ packages:
- 'packages/*'
- 'endpoint-monitor'
- 'op-exporter'
- 'indexer/ts'
- 'indexer/api-ts'
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment