Commit 00c84fec authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge branch 'develop' into refcell/merkle-styling

parents 5ab0c536 9e64e9d4
---
'@eth-optimism/chain-mon': patch
---
Update import path for artifact
---
'@eth-optimism/fault-detector': patch
---
Bump contracts-bedrock version
---
'@eth-optimism/sdk': patch
---
Fixed missing indexes for multicall support
---
'@eth-optimism/sdk': minor
---
Add support for claiming multicall3 withdrawals
---
'@eth-optimism/sdk': minor
---
Fixes issue with legacy withdrawal message status detection
---
'@eth-optimism/contracts-bedrock': patch
'@eth-optimism/fault-detector': patch
'@eth-optimism/core-utils': patch
'@eth-optimism/endpoint-monitor': patch
'@eth-optimism/sdk': patch
---
fix typo
---
'@eth-optimism/fault-detector': minor
---
Remove pre-bedrock support from fault detector.
---
'@eth-optimism/contracts-bedrock': minor
---
Migrate contracts periphery into bedrock
---
'@eth-optimism/contracts-bedrock': patch
'@eth-optimism/core-utils': patch
'@eth-optimism/sdk': patch
---
Delete dead typescript https://github.com/ethereum-optimism/optimism/pull/6148.
---
'@eth-optimism/sdk': patch
---
Update the addresses of the bridges on optimism and optimism goerli for the ECO bridge adapter
---
'@eth-optimism/sdk': minor
---
Added to and from block filters to several methods in CrossChainMessenger
......@@ -140,7 +140,6 @@ jobs:
- "packages/common-ts/node_modules"
- "packages/contracts-bedrock/node_modules"
- "packages/core-utils/node_modules"
- "packages/fault-detector/node_modules"
- "packages/replica-healthcheck/node_modules"
- "packages/sdk/node_modules"
- "packages/contracts-ts/node_modules"
......@@ -376,7 +375,7 @@ jobs:
contracts-bedrock-tests:
docker:
- image: us-docker.pkg.dev/oplabs-tools-artifacts/images/ci-builder:latest
resource_class: large
resource_class: xlarge
steps:
- checkout
- attach_workspace: { at: "." }
......@@ -409,10 +408,12 @@ jobs:
environment:
FOUNDRY_PROFILE: ci
working_directory: packages/contracts-bedrock
no_output_timeout: 15m
contracts-bedrock-checks:
docker:
- image: us-docker.pkg.dev/oplabs-tools-artifacts/images/ci-builder:latest
resource_class: xlarge
steps:
- checkout
- attach_workspace: { at: "." }
......@@ -448,6 +449,7 @@ jobs:
environment:
FOUNDRY_PROFILE: ci
working_directory: packages/contracts-bedrock
no_output_timeout: 15m
- run:
name: validate deploy configs
command: |
......@@ -1115,6 +1117,18 @@ jobs:
command: |
docker logs ops-bedrock-op-proposer-1 || echo "No logs."
when: on_fail
- run:
name: Log deployment artifact
command: |
cat broadcast/Deploy.s.sol/900/run-latest.json || echo "No deployment file found"
when: on_fail
working_directory: packages/contracts-bedrock
- run:
name: Log artifacts directory
command: |
ls -R forge-artifacts || echo "No forge artifacts found"
when: on_fail
working_directory: packages/contracts-bedrock
- when:
condition:
and:
......@@ -1348,13 +1362,6 @@ workflows:
dependencies: "(common-ts|contracts-bedrock|core-utils|sdk)"
requires:
- pnpm-monorepo
- js-lint-test:
name: fault-detector-tests
coverage_flag: fault-detector-tests
package_name: fault-detector
dependencies: "(common-ts|core-utils|sdk)"
requires:
- pnpm-monorepo
- js-lint-test:
name: contracts-ts-tests
coverage_flag: contracts-ts-tests
......@@ -1601,14 +1608,6 @@ workflows:
docker_target: wd-mon
context:
- oplabs-gcr
- docker-publish:
name: fault-detector-docker-publish
docker_file: ./ops/docker/Dockerfile.packages
docker_name: fault-detector
docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>>
docker_target: fault-detector
context:
- oplabs-gcr
- hive-test:
name: hive-test-rpc
version: <<pipeline.git.revision>>
......@@ -1646,7 +1645,7 @@ workflows:
type: approval
filters:
tags:
only: /^(fault-detector|proxyd|indexer|ci-builder|op-[a-z0-9\-]*)\/v.*/
only: /^(proxyd|indexer|ci-builder|op-[a-z0-9\-]*)\/v.*/
branches:
ignore: /.*/
- docker-release:
......@@ -1713,23 +1712,6 @@ workflows:
- oplabs-gcr-release
requires:
- hold
- docker-release:
name: fault-detector-docker-release
filters:
tags:
only: /^fault-detector\/v.*/
branches:
ignore: /.*/
docker_file: ./ops/docker/Dockerfile.packages
docker_name: fault-detector
docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>>
docker_target: fault-detector
docker_context: .
platforms: "linux/amd64,linux/arm64"
context:
- oplabs-gcr-release
requires:
- hold
- docker-build:
name: op-migrate-docker-release
filters:
......
......@@ -3,14 +3,14 @@
/packages/contracts @ethereum-optimism/contract-reviewers
/packages/contracts-bedrock @ethereum-optimism/contract-reviewers
/packages/core-utils @ethereum-optimism/legacy-reviewers
/packages/chain-mon @smartcontracts
/packages/fault-detector @ethereum-optimism/devxpod
/packages/chain-mon @ethereum-optimism/devxpod
/packages/replica-healthcheck @ethereum-optimism/legacy-reviewers
/packages/sdk @ethereum-optimism/devxpod
/packages/atst @ethereum-optimism/devxpod
# Bedrock codebases
/bedrock-devnet @ethereum-optimism/go-reviewers
/cannon @ethereum-optimism/go-reviewers
/op-batcher @ethereum-optimism/go-reviewers
/op-chain-ops @ethereum-optimism/go-reviewers
/op-e2e @ethereum-optimism/go-reviewers
......@@ -35,3 +35,11 @@
/infra @ethereum-optimism/infra-reviewers
/specs @ethereum-optimism/contract-reviewers @ethereum-optimism/go-reviewers
/endpoint-monitor @ethereum-optimism/infra-reviewers
# Don't add owners if only package.json is updated
/packages/*/package.json
/*/package.json
# JavaScript Releases
/packages/*/CHANGELOG.md @ethereum-optimism/release-managers
/*/CHANGELOG.md @ethereum-optimism/release-managers
......@@ -15,7 +15,7 @@ jobs:
runs-on: ubuntu-latest
# map the step outputs to job outputs
outputs:
fault-detector: ${{ steps.packages.outputs.fault-detector }}
fault-mon: ${{ steps.packages.outputs.fault-mon }}
balance-mon: ${{ steps.packages.outputs.balance-mon }}
drippie-mon: ${{ steps.packages.outputs.drippie-mon }}
wd-mon: ${{ steps.packages.outputs.wd-mon }}
......@@ -43,10 +43,10 @@ jobs:
env:
CUSTOM_IMAGE_NAME: ${{ github.event.inputs.customImageName }}
fault-detector:
name: Publish Fault Detector Version ${{ needs.canary-publish.outputs.canary-docker-tag }}
fault-mon:
name: Publish fault-mon Version ${{ needs.canary-publish.outputs.canary-docker-tag }}
needs: canary-publish
if: needs.canary-publish.outputs.fault-detector != ''
if: needs.canary-publish.outputs.fault-mon != ''
runs-on: ubuntu-latest
steps:
......@@ -66,9 +66,9 @@ jobs:
with:
context: .
file: ./ops/docker/Dockerfile.packages
target: fault-detector
target: fault-mon
push: true
tags: ethereumoptimism/fault-detector:${{ needs.canary-publish.outputs.canary-docker-tag }}
tags: ethereumoptimism/fault-mon:${{ needs.canary-publish.outputs.canary-docker-tag }}
balance-mon:
name: Publish Balance Monitor Version ${{ needs.canary-publish.outputs.canary-docker-tag }}
......
......@@ -16,7 +16,7 @@ jobs:
if: github.repository == 'ethereum-optimism/optimism'
# map the step outputs to job outputs
outputs:
fault-detector: ${{ steps.packages.outputs.fault-detector }}
fault-mon: ${{ steps.packages.outputs.fault-mon }}
balance-mon: ${{ steps.packages.outputs.drippie-mon }}
drippie-mon: ${{ steps.packages.outputs.drippie-mon }}
wd-mon: ${{ steps.packages.outputs.wd-mon }}
......@@ -100,10 +100,10 @@ jobs:
GITCOMMIT=${{ steps.build_args.outputs.GITCOMMIT }}
GITVERSION=${{ steps.build_args.outputs.GITVERSION }}
fault-detector:
name: Publish Fault Detector Version ${{ needs.release.outputs.fault-detector }}
fault-mon:
name: Publish fault-mon Version ${{ needs.release.outputs.fault-mon }}
needs: release
if: needs.release.outputs.fault-detector != ''
if: needs.release.outputs.fault-mon != ''
runs-on: ubuntu-latest
steps:
......@@ -123,9 +123,9 @@ jobs:
with:
context: .
file: ./ops/docker/Dockerfile.packages
target: fault-detector
target: fault-mon
push: true
tags: ethereumoptimism/fault-detector:${{ needs.release.outputs.fault-detector }},ethereumoptimism/fault-detector:latest
tags: ethereumoptimism/fault-mon:${{ needs.release.outputs.fault-mon }},ethereumoptimism/fault-mon:latest
wd-mon:
name: Publish Withdrawal Monitor Version ${{ needs.release.outputs.wd-mon }}
......
......@@ -26,6 +26,8 @@ on:
- op-proposer
- op-ufm
- proxyd
- indexer
- ci-builder
prerelease:
description: Increment major/minor/patch as prerelease?
required: false
......
......@@ -19,10 +19,6 @@
{
"directory": "packages/chain-mon",
"changeProcessCWD": true
},
{
"directory": "packages/fault-detector",
"changeProcessCWD": true
}
],
"eslint.nodePath": "./node_modules/eslint/bin/",
......
......@@ -130,4 +130,6 @@ update-op-geth:
.PHONY: update-op-geth
bedrock-markdown-links:
docker run --init -it -v `pwd`:/input lycheeverse/lychee --verbose --no-progress --exclude-loopback --exclude twitter.com --exclude explorer.optimism.io --exclude-mail /input/README.md "/input/specs/**/*.md"
docker run --init -it -v `pwd`:/input lycheeverse/lychee --verbose --no-progress --exclude-loopback \
--exclude twitter.com --exclude explorer.optimism.io --exclude linux-mips.org \
--exclude-mail /input/README.md "/input/specs/**/*.md"
......@@ -54,7 +54,6 @@ Refer to the Directory Structure section below to understand which packages are
│ ├── <a href="./packages/contracts-bedrock">contracts-bedrock</a>: Bedrock smart contracts.
│ ├── <a href="./packages/core-utils">core-utils</a>: Low-level utilities that make building Optimism easier
│ ├── <a href="./packages/chain-mon">chain-mon</a>: Chain monitoring services
│ ├── <a href="./packages/fault-detector">fault-detector</a>: Service for detecting Sequencer faults
│ ├── <a href="./packages/replica-healthcheck">replica-healthcheck</a>: Service for monitoring the health of a replica node
│ └── <a href="./packages/sdk">sdk</a>: provides a set of tools for interacting with Optimism
├── <a href="./op-bindings">op-bindings</a>: Go bindings for Bedrock smart contracts.
......@@ -80,7 +79,6 @@ Refer to the Directory Structure section below to understand which packages are
│ ├── <a href="./packages/common-ts">common-ts</a>: Common tools for building apps in TypeScript
│ ├── <a href="./packages/core-utils">core-utils</a>: Low-level utilities that make building Optimism easier
│ ├── <a href="./packages/chain-mon">chain-mon</a>: Chain monitoring services
│ ├── <a href="./packages/fault-detector">fault-detector</a>: Service for detecting Sequencer faults
│ ├── <a href="./packages/replica-healthcheck">replica-healthcheck</a>: Service for monitoring the health of a replica node
│ └── <a href="./packages/sdk">sdk</a>: provides a set of tools for interacting with Optimism
├── <a href="./indexer">indexer</a>: indexes and syncs transactions
......
......@@ -44,48 +44,17 @@ There are 3 types of witness data involved in onchain execution:
### Packed State
The following state is packed together, and provided in every executed onchain instruction:
```solidity
struct State {
bytes32 memRoot;
bytes32 preimageKey;
uint32 preimageOffset;
uint32 pc;
uint32 nextPC;
uint32 lo;
uint32 hi;
uint32 heap;
uint8 exitCode;
bool exited;
uint64 step;
uint32[32] registers;
}
```
The Packed State is provided in every executed onchain instruction.
See [Cannon VM Specs](../../specs/cannon-fault-proof-vm.md#state) for
details on the state structure.
The packed state is small! The `State` data can be packed in such a small amount of EVM words,
that it is more efficient to fully provide it, than to create a proof for each individual part involved in execution.
The memory is represented as a merkle-tree root, committing to a binary merkle tree of all memory data,
see [memory proofs](#memory-proofs).
The `State` covers all general purpose registers, as well as the `lo`, `hi` and `pc` values.
`nextPC` helps pre-schedule the program counter change, to emulate delay-slot behavior of MIPS
after branch and jump instructions.
The program stops changing the state when `exited` is set to true. The exit-code is remembered,
to determine if the program is successful, or panicked/exited in some unexpected way.
This outcome can be used to determine truthiness of claims that are verified as part of the program execution.
The `heap` value is a special value, used to emulate a growing heap, to map new memory upon `mmap` calls by the program.
No memory reads/writes are actually illegal however, mmap-ing is purely to satisfy program runtimes that
need the memory-pointer result of the syscall to locate free memory.
The `preimageKey` and `preimageOffset` are backing the in-flight communication of [pre-image data](#pre-image-data).
The VM `stateHash` is computed as `keccak256(encoded_packed_state)`,
where `encoded_packed_state` is the concatenation of all state-values (all `uint` values are big-endian).
### Memory proofs
......@@ -133,11 +102,7 @@ Pre-image data is accessed through syscalls exclusively.
The OP-stack fault-proof [Pre-image Oracle specs](../../specs/fault-proof.md#pre-image-oracle)
define the ABI for communicating pre-images.
This ABI is implemented by the VM by intercepting the `read`/`write` syscalls to specific file descriptors:
- `hint client read = 3`: used by the client to send hint data to the host. Optional, implemented as blocking.
- `hint client write = 4`: used by the client to wait for the host. Always instant, since the above is implemented as blocking.
- `preimage client read = 5`: used by the client to read pre-image data, starting from the latest pre-image reading offset.
- `preimage client write = 6`: used by the client to change the pre-image key. The key may change a little bit at a time. The last 32 written bytes count as key for retrieval when reading the pre-image. Changing the key also resets the read offset to 0.
This ABI is implemented by the VM by intercepting the `read`/`write` syscalls to specific file descriptors. See [Cannon VM Specs](../../specs/cannon-fault-proof-vm.md#io) for more details.
The data is loaded into `PreimageOracle.sol` using the respective loading function based on the pre-image type.
And then retrieved during execution of the `read` syscall.
......
......@@ -8,6 +8,7 @@ ignore:
- "op-bindings/bindings/*.go"
- "packages/contracts-bedrock/contracts/vendor/WETH9.sol"
- "packages/contracts-bedrock/contracts/cannon" # tested through Go tests
- 'packages/contracts-bedrock/contracts/EAS/**/*.sol'
coverage:
status:
patch:
......@@ -35,6 +36,5 @@ flag_management:
- name: core-utils-tests
- name: dtl-tests
- name: chain-mon-tests
- name: fault-detector-tests
- name: replica-healthcheck-tests
- name: sdk-tests
......@@ -13,7 +13,7 @@ OP Stack configuration is an active work in progress and will likely evolve sign
## New Blockchain Configuration
New OP Stack blockchains are currently configured with a JSON file inside the Optimism repository. The file is `<optimism repository>/packages/contracts-bedrock/deploy-config/<chain name>.json`. For example, [this is the configuration file for the tutorial blockchain](https://github.com/ethereum-optimism/optimism/blob/develop/packages/contracts-bedrock/deploy-config/getting-started.json).
New OP Stack blockchains are currently configured with a JSON file inside the Optimism repository. The file is `<optimism repository>/packages/contracts-bedrock/deploy-config/<chain name>.json`. For example, [this is the configuration file for the tutorial blockchain](https://github.com/ethereum-optimism/optimism/blob/129032f15b76b0d2a940443a39433de931a97a44/packages/contracts-bedrock/deploy-config/getting-started.json).
### Admin accounts
......
......@@ -40,10 +40,10 @@ An example of an EVM-Ordered Alternative DA module can be found within [this mod
### Non-EVM DA
A non-EVM DA module uses a chain not based on the EVM to manage both the ordering and storage of raw input data. Such a modification would require relatively significant modifications to the [derivation portion](https://github.com/ethereum-optimism/optimism/tree/develop/op-node/rollup/derive) of the `op-node`. No such fully-independent DA modules have been developed yet — be the first!
A non-EVM DA module uses a chain not based on the EVM to manage both the ordering and storage of raw input data. Such a modification would require relatively significant modifications to the [derivation portion](https://github.com/ethereum-optimism/optimism/tree/129032f15b76b0d2a940443a39433de931a97a44/op-node/rollup/derive) of the `op-node`. No such fully-independent DA modules have been developed yet — be the first!
### Multiple DA
It is possible to use multiple Data Availability Layer modules at the same time. For instance, one could source data from two EVM-based chains simultaneously in order to form a bridge between the two chains. When using multiple Data Availability Layer modules, it is imperative to establish a global ordering between the two chains. One option for establishing this ordering is to use the timestamps of blocks from each chain.
Like a non-EVM DA module, a system with multiple Data Availability modules would need to make significant modifications to the [derivation portion](https://github.com/ethereum-optimism/optimism/tree/develop/op-node/rollup/derive) of the `op-node`. No such projects have been constructed yet.
\ No newline at end of file
Like a non-EVM DA module, a system with multiple Data Availability modules would need to make significant modifications to the [derivation portion](https://github.com/ethereum-optimism/optimism/tree/129032f15b76b0d2a940443a39433de931a97a44/op-node/rollup/derive) of the `op-node`. No such projects have been constructed yet.
\ No newline at end of file
......@@ -178,7 +178,7 @@ The `cast wallet new` tool is *not* designed for production deployments. If you
## Configure your network
Once you’ve built both repositories, you’ll need head back to the Optimism Monorepo to set up the configuration for your chain. Currently, chain configuration lives inside of the [`contracts-bedrock`](https://github.com/ethereum-optimism/optimism/tree/develop/packages/contracts-bedrock) package.
Once you’ve built both repositories, you’ll need head back to the Optimism Monorepo to set up the configuration for your chain. Currently, chain configuration lives inside of the [`contracts-bedrock`](https://github.com/ethereum-optimism/optimism/tree/129032f15b76b0d2a940443a39433de931a97a44/packages/contracts-bedrock) package.
1. Enter the Optimism Monorepo:
......@@ -226,7 +226,7 @@ Once you’ve built both repositories, you’ll need head back to the Optimism M
timestamp 1676253324
```
1. Fill out the remainder of the pre-populated config file found at [`deploy-config/getting-started.json`](https://github.com/ethereum-optimism/optimism/blob/develop/packages/contracts-bedrock/deploy-config/getting-started.json). Use the default values in the config file and make following modifications:
1. Fill out the remainder of the pre-populated config file found at [`deploy-config/getting-started.json`](https://github.com/ethereum-optimism/optimism/blob/129032f15b76b0d2a940443a39433de931a97a44/packages/contracts-bedrock/deploy-config/getting-started.json). Use the default values in the config file and make following modifications:
- Replace `"ADMIN"` with the address of the Admin account you generated earlier.
- Replace `"PROPOSER"` with the address of the Proposer account you generated earlier.
......
......@@ -10,11 +10,11 @@ OP Stack Hacks are not for the faint of heart. You will not be able to receive s
:::
OP Stack blockchains have a number of [predeployed contracts](https://github.com/ethereum-optimism/optimism/blob/develop/packages/contracts-bedrock/src/constants.ts) that provide important functionality.
OP Stack blockchains have a number of [predeployed contracts](https://github.com/ethereum-optimism/optimism/blob/129032f15b76b0d2a940443a39433de931a97a44/packages/contracts-bedrock/src/constants.ts) that provide important functionality.
Most of those contracts are proxies that can be upgraded using the `proxyAdminOwner` which was configured when the network was initially deployed.
The predeploys are controlled from a predeploy called [`ProxyAdmin`](https://github.com/ethereum-optimism/optimism/blob/develop/packages/contracts-bedrock/contracts/universal/ProxyAdmin.sol), whose address is `0x4200000000000000000000000000000000000018`.
The function to call is [`upgrade(address,address)`](https://github.com/ethereum-optimism/optimism/blob/develop/packages/contracts-bedrock/contracts/universal/ProxyAdmin.sol#L211-L229).
The predeploys are controlled from a predeploy called [`ProxyAdmin`](https://github.com/ethereum-optimism/optimism/blob/129032f15b76b0d2a940443a39433de931a97a44/packages/contracts-bedrock/contracts/universal/ProxyAdmin.sol), whose address is `0x4200000000000000000000000000000000000018`.
The function to call is [`upgrade(address,address)`](https://github.com/ethereum-optimism/optimism/blob/129032f15b76b0d2a940443a39433de931a97a44/packages/contracts-bedrock/contracts/universal/ProxyAdmin.sol#L205-L229).
The first parameter is the proxy to upgrade, and the second is the address of a new implementation.
For example, the legacy `L1BlockNumber` contract is at `0x420...013`.
......
......@@ -5,7 +5,7 @@ lang: en-US
## Overview
The first release of the OP Stack codebase is called **Bedrock**.
The first release of the OP Stack codebase is called **Bedrock**.
The Bedrock release primarily consists of the core software required to run L2 blockchains and was originally designed to power an upgrade to the Optimism Mainnet network.
## Resources
......@@ -20,15 +20,15 @@ Learn all about the Bedrock release of the OP Stack by reading the [Bedrock Expl
### Specifications
Dive deep into the specifications for the Bedrock release in the [specs folder of the Optimism Monorepo](https://github.com/ethereum-optimism/optimism/blob/develop/specs/README.md).
Dive deep into the specifications for the Bedrock release in the [specs folder of the Optimism Monorepo](https://github.com/ethereum-optimism/optimism/blob/d69cb12f6dcbe3d5355beca8997fbac611b7fe37/specs/README.md).
## Components
- [`op-node`](https://github.com/ethereum-optimism/optimism/tree/develop/op-node)
- [`op-node`](https://github.com/ethereum-optimism/optimism/tree/d69cb12f6dcbe3d5355beca8997fbac611b7fe37/op-node)
- [`op-geth`](https://github.com/ethereum-optimism/op-geth)
- [`op-batcher`](https://github.com/ethereum-optimism/optimism/tree/develop/op-batcher)
- [`op-proposer`](https://github.com/ethereum-optimism/optimism/tree/develop/op-proposer)
- [`contracts-bedrock`](https://github.com/ethereum-optimism/optimism/tree/develop/packages/contracts-bedrock)
- [`fault-detector`](https://github.com/ethereum-optimism/optimism/tree/develop/packages/fault-detector)
- [`sdk`](https://github.com/ethereum-optimism/optimism/tree/develop/packages/sdk)
- [`chain-mon`](https://github.com/ethereum-optimism/optimism/tree/develop/packages/chain-mon)
\ No newline at end of file
- [`op-batcher`](https://github.com/ethereum-optimism/optimism/tree/d69cb12f6dcbe3d5355beca8997fbac611b7fe37/op-batcher)
- [`op-proposer`](https://github.com/ethereum-optimism/optimism/tree/d69cb12f6dcbe3d5355beca8997fbac611b7fe37/op-proposer)
- [`contracts-bedrock`](https://github.com/ethereum-optimism/optimism/tree/d69cb12f6dcbe3d5355beca8997fbac611b7fe37/packages/contracts-bedrock)
- [`fault-detector`](https://github.com/ethereum-optimism/optimism/tree/d69cb12f6dcbe3d5355beca8997fbac611b7fe37/packages/fault-detector)
- [`sdk`](https://github.com/ethereum-optimism/optimism/tree/d69cb12f6dcbe3d5355beca8997fbac611b7fe37/packages/sdk)
- [`chain-mon`](https://github.com/ethereum-optimism/optimism/tree/d69cb12f6dcbe3d5355beca8997fbac611b7fe37/packages/chain-mon)
......@@ -31,7 +31,7 @@ However, there could be edge cases we did not think about where this matters.
### Accessing L1 information
If you need the equivalent information from the latest L1 block, you can get it from [the `L1Block` contract](https://github.com/ethereum-optimism/optimism/blob/develop/packages/contracts-bedrock/contracts/L2/L1Block.sol).
If you need the equivalent information from the latest L1 block, you can get it from [the `L1Block` contract](https://github.com/ethereum-optimism/optimism/blob/129032f15b76b0d2a940443a39433de931a97a44/packages/contracts-bedrock/contracts/L2/L1Block.sol).
This contract is a predeploy at address [`0x4200000000000000000000000000000000000015`](https://goerli-optimism.etherscan.io/address/0x4200000000000000000000000000000000000015).
You can use [the getter functions](https://docs.soliditylang.org/en/v0.8.12/contracts.html#getter-functions) to get these parameters:
......@@ -117,7 +117,7 @@ There are several differences in the way blocks are produced between L1 Ethereum
(1) This is the ideal.
If any blocks are missed it could be an integer multiple such as 24 seconds, 36 seconds, etc.
**Note:** The L1 Ethereum parameter values are taken from [ethereum.org](https://ethereum.org/en/developers/docs/blocks/#block-time). The Optimism Bedrock values are taken from [the Optimism specs](https://github.com/ethereum-optimism/optimism/blob/develop/specs/guaranteed-gas-market.md#limiting-guaranteed-gas).
**Note:** The L1 Ethereum parameter values are taken from [ethereum.org](https://ethereum.org/en/developers/docs/blocks/#block-time). The Optimism Bedrock values are taken from [the Optimism specs](https://github.com/ethereum-optimism/optimism/blob/129032f15b76b0d2a940443a39433de931a97a44/specs/guaranteed-gas-market.md#limiting-guaranteed-gas).
......
......@@ -19,7 +19,7 @@ The OP Stack is a decentralized development stack that powers Optimism. Componen
The security model of an OP Stack based blockchain depends on the modules used for its components. Because of the flexibility provided by OP Stack, it is always possible to set up an insecure blockchain using OP Stack components. **The goal of the OP Stack is to provide safe defaults.**
Please also keep in mind that just like any other system, **the OP Stack may contain unknown bugs** that could lead to the loss of some or all of the assets held within an OP Stack based system. [Many components of the OP Stack codebase have been audited](https://github.com/ethereum-optimism/optimism/tree/develop/technical-documents/security-reviews) but **audits are not a stamp of approval** and **a completed audit does not mean that the audited codebase is free of bugs.** It’s important to understand that using the OP Stack inherently exposes you to the risk of bugs within the OP Stack codebase.
Please also keep in mind that just like any other system, **the OP Stack may contain unknown bugs** that could lead to the loss of some or all of the assets held within an OP Stack based system. [Many components of the OP Stack codebase have been audited](https://github.com/ethereum-optimism/optimism/tree/129032f15b76b0d2a940443a39433de931a97a44/technical-documents/security-reviews) but **audits are not a stamp of approval** and **a completed audit does not mean that the audited codebase is free of bugs.** It’s important to understand that using the OP Stack inherently exposes you to the risk of bugs within the OP Stack codebase.
### Is the OP Stack safe to modify?
......
......@@ -60,7 +60,7 @@ The easiest way to withdraw ETH is to send it to the bridge, or the cross domain
transferAmt = BigInt(0.01 * 1e18)
```
1. Create a contract object for the [`OptimismPortal`](https://github.com/ethereum-optimism/optimism/blob/develop/packages/contracts-bedrock/contracts/L1/OptimismPortal.sol) contract.
1. Create a contract object for the [`OptimismPortal`](https://github.com/ethereum-optimism/optimism/blob/129032f15b76b0d2a940443a39433de931a97a44/packages/contracts-bedrock/contracts/L1/OptimismPortal.sol) contract.
```js
optimismContracts = require("@eth-optimism/contracts-bedrock")
......
......@@ -13,8 +13,8 @@ An OP Stack chain does not have to specify a usable `GUARDIAN` address if it doe
## Who can do it?
[`OptimismPortal`](https://github.com/ethereum-optimism/optimism/blob/develop/packages/contracts-bedrock/contracts/L1/OptimismPortal.sol) has an immutable `GUARDIAN`.
That address can call [`pause`](https://github.com/ethereum-optimism/optimism/blob/develop/packages/contracts-bedrock/contracts/L1/OptimismPortal.sol#L166-L170) and [`unpause`](https://github.com/ethereum-optimism/optimism/blob/develop/packages/contracts-bedrock/contracts/L1/OptimismPortal.sol#L175-L179).
[`OptimismPortal`](https://github.com/ethereum-optimism/optimism/blob/129032f15b76b0d2a940443a39433de931a97a44/packages/contracts-bedrock/contracts/L1/OptimismPortal.sol) has an immutable `GUARDIAN`.
That address can call [`pause`](https://github.com/ethereum-optimism/optimism/blob/129032f15b76b0d2a940443a39433de931a97a44/packages/contracts-bedrock/contracts/L1/OptimismPortal.sol#L171-L178) and [`unpause`](https://github.com/ethereum-optimism/optimism/blob/129032f15b76b0d2a940443a39433de931a97a44/packages/contracts-bedrock/contracts/L1/OptimismPortal.sol#L180-L187).
### Changing the guardian
......@@ -23,7 +23,7 @@ The guardian created by the setup script is the admin account.
This is sufficient for testing, but for a production system you would want the guardian to be a multisig with trusted security council.
The `GUARDIAN` variable is immutable, but the `OptimismPortal` contract sits behind a proxy, so the `GUARDIAN` can be modified by changing the `OptimismPortal` proxy to point to a new implementation contract.
You do this using the L1 [`ProxyAdmin`](https://github.com/ethereum-optimism/optimism/blob/develop/packages/contracts-bedrock/contracts/universal/ProxyAdmin.sol) contract.
You do this using the L1 [`ProxyAdmin`](https://github.com/ethereum-optimism/optimism/blob/129032f15b76b0d2a940443a39433de931a97a44/packages/contracts-bedrock/contracts/universal/ProxyAdmin.sol) contract.
<!--
## Seeing it in action
......
......@@ -35,8 +35,8 @@ The Data Availability Layer defines where the raw inputs to an OP Stack based ch
Ethereum DA is currently the most widely used Data Availability module for the OP Stack. When using the Ethereum DA module, source data can be derived from any piece of information accessible on the Ethereum blockchain. This includes Ethereum calldata, events, and 4844 data blobs.
- [Specifications](https://github.com/ethereum-optimism/optimism/blob/develop/specs/derivation.md#batch-submission-wire-format)
- [Source code](https://github.com/ethereum-optimism/optimism/tree/develop/op-batcher)
- [Specifications](https://github.com/ethereum-optimism/optimism/blob/129032f15b76b0d2a940443a39433de931a97a44/specs/derivation.md#batch-submission-wire-format)
- [Source code](https://github.com/ethereum-optimism/optimism/tree/129032f15b76b0d2a940443a39433de931a97a44/op-batcher)
### Sequencing
......@@ -58,8 +58,8 @@ The Derivation Layer defines how the raw data in the Data Availability Layer is
The Rollup module derives Engine API inputs from Ethereum block data, Sequencer transaction batches, Deposited transaction events, and more.
- [Specifications](https://github.com/ethereum-optimism/optimism/blob/develop/specs/derivation.md#l2-chain-derivation-pipeline)
- [Source code](https://github.com/ethereum-optimism/optimism/tree/develop/op-node)
- [Specifications](https://github.com/ethereum-optimism/optimism/blob/129032f15b76b0d2a940443a39433de931a97a44/specs/derivation.md#l2-chain-derivation-pipeline)
- [Source code](https://github.com/ethereum-optimism/optimism/tree/129032f15b76b0d2a940443a39433de931a97a44/op-node)
#### Indexer (proposed)
......@@ -73,8 +73,8 @@ The Execution Layer defines the structure of state within an OP Stack system and
The EVM is an Execution Layer module that uses the same state representation and state transition function as the Ethereum Virtual Machine. The EVM module in the Ethereum Rollup configuration of the OP Stack is a [lightly modified](https://op-geth.optimism.io/) version of the EVM that adds support for L2 transactions initiated on Ethereum and adds an extra L1 Data Fee to each transaction to account for the cost of publishing transactions to Ethereum.
- [Specifications](https://github.com/ethereum-optimism/optimism/blob/develop/specs/exec-engine.md) (where it differs from [geth](https://geth.ethereum.org/))
- [Source code](https://github.com/ethereum-optimism/op-geth)
- [Specifications](https://github.com/ethereum-optimism/optimism/blob/129032f15b76b0d2a940443a39433de931a97a44/specs/exec-engine.md) (where it differs from [geth](https://geth.ethereum.org/))
- [Source code](https://github.com/ethereum-optimism/op-geth/tree/09ade3df6d1d3a4f8f308553825348be132bc960)
### Settlement Layer
......@@ -89,8 +89,8 @@ Once a transaction is published and finalized on the corresponding Data Availabi
An Attestation-based Fault Proof mechanism uses an optimistic protocol to establish a view of an OP Stack chain. In optimistic settlement mechanisms generally, **Proposer** entities can propose what they believe to be the current valid state of the OP Stack chain. If these proposals are not invalidated within a certain period of time (the “challenge period”), then the proposals are assumed by the mechanism to be correct. In the Attestation Proof mechanism in particular, a proposal can be invalidated if some threshold of pre-defined parties provide attestations to a valid state that is different than the state in the proposal. This places a trust assumption on the honesty of at least a threshold number of the pre-defined participants.
- [Specifications](https://github.com/ethereum-optimism/optimism/blob/develop/specs/withdrawals.md) (called [withdrawal transactions](https://community.optimism.io/docs/developers/bridge/messaging/#))
- [Source code](https://github.com/ethereum-optimism/optimism/tree/develop/packages/contracts-bedrock/contracts)
- [Specifications](https://github.com/ethereum-optimism/optimism/blob/129032f15b76b0d2a940443a39433de931a97a44/specs/withdrawals.md) (called [withdrawal transactions](https://community.optimism.io/docs/developers/bridge/messaging/#))
- [Source code](https://github.com/ethereum-optimism/optimism/tree/129032f15b76b0d2a940443a39433de931a97a44/packages/contracts-bedrock/contracts)
#### Fault Proof Optimistic Settlement (proposed)
......
# @eth-optimism/endpoint-monitor
## 1.0.2
### Patch Changes
- [#6164](https://github.com/ethereum-optimism/optimism/pull/6164) [`c11039060`](https://github.com/ethereum-optimism/optimism/commit/c11039060bc037a88916c2cba602687b6d69ad1a) Thanks [@pengin7384](https://github.com/pengin7384)! - fix typo
## 1.0.1
### Patch Changes
......
{
"name": "@eth-optimism/endpoint-monitor",
"version": "1.0.1",
"version": "1.0.2",
"private": true,
"dependencies": {}
}
......@@ -23,6 +23,7 @@ require (
github.com/ipfs/go-datastore v0.6.0
github.com/ipfs/go-ds-leveldb v0.5.0
github.com/jackc/pgtype v1.14.0
github.com/jackc/pgx/v5 v5.3.1
github.com/lib/pq v1.10.9
github.com/libp2p/go-libp2p v0.25.1
github.com/libp2p/go-libp2p-pubsub v0.9.3
......@@ -105,7 +106,6 @@ require (
github.com/jackc/pgio v1.0.0 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
github.com/jackc/pgx/v5 v5.3.1 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect
github.com/jbenet/goprocess v0.1.4 // indirect
......
......@@ -21,8 +21,8 @@ const (
)
// DepositsByAddress mocks returning deposits by an address
func (mbv *MockBridgeView) DepositsByAddress(address common.Address) ([]*database.DepositWithTransactionHash, error) {
return []*database.DepositWithTransactionHash{
func (mbv *MockBridgeView) DepositsByAddress(address common.Address) ([]*database.DepositWithTransactionHashes, error) {
return []*database.DepositWithTransactionHashes{
{
Deposit: database.Deposit{
GUID: uuid.MustParse(guid1),
......
package cli
import (
"context"
"fmt"
"os"
"github.com/ethereum-optimism/optimism/indexer"
"github.com/ethereum-optimism/optimism/indexer/config"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-service/log"
"github.com/ethereum-optimism/optimism/op-service/opio"
"github.com/ethereum/go-ethereum/params"
"github.com/urfave/cli/v2"
)
......@@ -20,16 +24,26 @@ type Cli struct {
func runIndexer(ctx *cli.Context) error {
configPath := ctx.String(ConfigFlag.Name)
conf, err := config.LoadConfig(configPath)
cfg, err := config.LoadConfig(configPath)
if err != nil {
return err
}
fmt.Println(conf)
// setup logger
cfg.Logger = log.NewLogger(log.ReadCLIConfig(ctx))
indexer, err := indexer.NewIndexer(cfg)
if err != nil {
log.Crit("Failed to load config", "message", err)
return err
}
// finish me
return nil
indexerCtx, indexerCancel := context.WithCancel(context.Background())
go func() {
opio.BlockOnInterrupts()
indexerCancel()
}()
return indexer.Run(indexerCtx)
}
func runApi(ctx *cli.Context) error {
......@@ -39,8 +53,9 @@ func runApi(ctx *cli.Context) error {
fmt.Println(conf)
if err != nil {
log.Crit("Failed to load config", "message", err)
panic(err)
}
// finish me
return nil
}
......@@ -71,17 +86,7 @@ func (c *Cli) Run(args []string) error {
}
func NewCli(GitVersion string, GitCommit string, GitDate string) *Cli {
log.Root().SetHandler(
log.LvlFilterHandler(
log.LvlInfo,
log.StreamHandler(os.Stdout, log.TerminalFormat(true)),
),
)
flags := []cli.Flag{
ConfigFlag,
}
flags := append([]cli.Flag{ConfigFlag}, log.CLIFlags("INDEXER")...)
app := &cli.App{
Version: fmt.Sprintf("%s-%s", GitVersion, params.VersionWithCommit(GitCommit, GitDate)),
Description: "An indexer of all optimism events with a serving api layer",
......
......@@ -4,6 +4,8 @@ import (
"os"
"github.com/BurntSushi/toml"
"github.com/ethereum/go-ethereum/log"
)
// Config represents the `indexer.toml` file used to configure the indexer
......@@ -13,6 +15,7 @@ type Config struct {
DB DBConfig
API APIConfig
Metrics MetricsConfig
Logger log.Logger `toml:"-"`
}
// ChainConfig configures of the chain being indexed
......@@ -31,6 +34,7 @@ type RPCsConfig struct {
type DBConfig struct {
Host string
Port int
Name string
User string
Password string
}
......
......@@ -3,6 +3,7 @@ package database
import (
"context"
"errors"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
......@@ -53,15 +54,20 @@ type LegacyStateBatch struct {
type OutputProposal struct {
OutputRoot common.Hash `gorm:"primaryKey;serializer:json"`
L2OutputIndex U256
L2BlockNumber U256
L1ContractEventGUID uuid.UUID
}
type BlocksView interface {
L1BlockHeader(*big.Int) (*L1BlockHeader, error)
LatestL1BlockHeader() (*L1BlockHeader, error)
LatestCheckpointedOutput() (*OutputProposal, error)
OutputProposal(index *big.Int) (*OutputProposal, error)
L2BlockHeader(*big.Int) (*L2BlockHeader, error)
LatestL2BlockHeader() (*L2BlockHeader, error)
}
......@@ -104,6 +110,20 @@ func (db *blocksDB) StoreOutputProposals(outputs []*OutputProposal) error {
return result.Error
}
func (db *blocksDB) L1BlockHeader(height *big.Int) (*L1BlockHeader, error) {
var l1Header L1BlockHeader
result := db.gorm.Where(&BlockHeader{Number: U256{Int: height}}).Take(&l1Header)
if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, result.Error
}
return &l1Header, nil
}
func (db *blocksDB) LatestL1BlockHeader() (*L1BlockHeader, error) {
var l1Header L1BlockHeader
result := db.gorm.Order("number DESC").Take(&l1Header)
......@@ -120,7 +140,21 @@ func (db *blocksDB) LatestL1BlockHeader() (*L1BlockHeader, error) {
func (db *blocksDB) LatestCheckpointedOutput() (*OutputProposal, error) {
var outputProposal OutputProposal
result := db.gorm.Order("l2_block_number DESC").Take(&outputProposal)
result := db.gorm.Order("l2_output_index DESC").Take(&outputProposal)
if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, result.Error
}
return &outputProposal, nil
}
func (db *blocksDB) OutputProposal(index *big.Int) (*OutputProposal, error) {
var outputProposal OutputProposal
result := db.gorm.Where(&OutputProposal{L2OutputIndex: U256{Int: index}}).Take(&outputProposal)
if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, nil
......@@ -139,6 +173,20 @@ func (db *blocksDB) StoreL2BlockHeaders(headers []*L2BlockHeader) error {
return result.Error
}
func (db *blocksDB) L2BlockHeader(height *big.Int) (*L2BlockHeader, error) {
var l2Header L2BlockHeader
result := db.gorm.Where(&BlockHeader{Number: U256{Int: height}}).Take(&l2Header)
if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, result.Error
}
return &l2Header, nil
}
func (db *blocksDB) LatestL2BlockHeader() (*L2BlockHeader, error) {
var l2Header L2BlockHeader
result := db.gorm.Order("number DESC").Take(&l2Header)
......
......@@ -48,9 +48,11 @@ type Deposit struct {
TokenPair TokenPair `gorm:"embedded"`
}
type DepositWithTransactionHash struct {
type DepositWithTransactionHashes struct {
Deposit Deposit `gorm:"embedded"`
L1TransactionHash common.Hash `gorm:"serializer:json"`
FinalizedL2TransactionHash common.Hash `gorm:"serializer:json"`
}
type Withdrawal struct {
......@@ -77,12 +79,12 @@ type WithdrawalWithTransactionHashes struct {
Withdrawal Withdrawal `gorm:"embedded"`
L2TransactionHash common.Hash `gorm:"serializer:json"`
ProvenL1TransactionHash *common.Hash `gorm:"serializer:json"`
FinalizedL1TransactionHash *common.Hash `gorm:"serializer:json"`
ProvenL1TransactionHash common.Hash `gorm:"serializer:json"`
FinalizedL1TransactionHash common.Hash `gorm:"serializer:json"`
}
type BridgeView interface {
DepositsByAddress(address common.Address) ([]*DepositWithTransactionHash, error)
DepositsByAddress(address common.Address) ([]*DepositWithTransactionHashes, error)
DepositByMessageNonce(*big.Int) (*Deposit, error)
LatestDepositMessageNonce() (*big.Int, error)
......@@ -122,14 +124,16 @@ func (db *bridgeDB) StoreDeposits(deposits []*Deposit) error {
return result.Error
}
func (db *bridgeDB) DepositsByAddress(address common.Address) ([]*DepositWithTransactionHash, error) {
depositsQuery := db.gorm.Table("deposits").Select("deposits.*, l1_contract_events.transaction_hash AS l1_transaction_hash")
eventsJoinQuery := depositsQuery.Joins("LEFT JOIN l1_contract_events ON deposits.initiated_l1_event_guid = l1_contract_events.guid")
func (db *bridgeDB) DepositsByAddress(address common.Address) ([]*DepositWithTransactionHashes, error) {
depositsQuery := db.gorm.Table("deposits").Select("deposits.*, l1_contract_events.transaction_hash AS l1_transaction_hash, l2_contract_events.transaction_hash AS finalized_l2_transaction_hash")
initiatedJoinQuery := depositsQuery.Joins("LEFT JOIN l1_contract_events ON deposits.initiated_l1_event_guid = l1_contract_events.guid")
finalizedJoinQuery := initiatedJoinQuery.Joins("LEFT JOIN l2_contract_events ON deposits.finalized_l2_event_guid = l2_contract_events.guid")
// add in cursoring options
filteredQuery := eventsJoinQuery.Where(&Transaction{FromAddress: address}).Order("deposits.timestamp DESC").Limit(100)
filteredQuery := finalizedJoinQuery.Where(&Transaction{FromAddress: address}).Order("deposits.timestamp DESC").Limit(100)
deposits := make([]*DepositWithTransactionHash, 100)
deposits := make([]*DepositWithTransactionHashes, 100)
result := filteredQuery.Scan(&deposits)
if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
......@@ -144,7 +148,7 @@ func (db *bridgeDB) DepositsByAddress(address common.Address) ([]*DepositWithTra
func (db *bridgeDB) DepositByMessageNonce(nonce *big.Int) (*Deposit, error) {
var deposit Deposit
result := db.gorm.First(&deposit, "sent_message_nonce = ?", U256{Int: nonce})
result := db.gorm.Where(&Deposit{SentMessageNonce: U256{Int: nonce}}).Take(&deposit)
if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, nil
......@@ -172,7 +176,7 @@ func (db *bridgeDB) LatestDepositMessageNonce() (*big.Int, error) {
func (db *bridgeDB) MarkFinalizedDepositEvent(guid, finalizationEventGUID uuid.UUID) error {
var deposit Deposit
result := db.gorm.First(&deposit, "guid = ?", guid)
result := db.gorm.Where(&Deposit{GUID: guid}).Take(&deposit)
if result.Error != nil {
return result.Error
}
......@@ -191,7 +195,7 @@ func (db *bridgeDB) StoreWithdrawals(withdrawals []*Withdrawal) error {
func (db *bridgeDB) MarkProvenWithdrawalEvent(guid, provenL1EventGuid uuid.UUID) error {
var withdrawal Withdrawal
result := db.gorm.First(&withdrawal, "guid = ?", guid)
result := db.gorm.Where(&Withdrawal{GUID: guid}).Take(&withdrawal)
if result.Error != nil {
return result.Error
}
......@@ -203,7 +207,7 @@ func (db *bridgeDB) MarkProvenWithdrawalEvent(guid, provenL1EventGuid uuid.UUID)
func (db *bridgeDB) MarkFinalizedWithdrawalEvent(guid, finalizedL1EventGuid uuid.UUID) error {
var withdrawal Withdrawal
result := db.gorm.First(&withdrawal, "guid = ?", guid)
result := db.gorm.Where(&Withdrawal{GUID: guid}).Take(&withdrawal)
if result.Error != nil {
return result.Error
}
......@@ -242,7 +246,7 @@ func (db *bridgeDB) WithdrawalsByAddress(address common.Address) ([]*WithdrawalW
func (db *bridgeDB) WithdrawalByMessageNonce(nonce *big.Int) (*Withdrawal, error) {
var withdrawal Withdrawal
result := db.gorm.First(&withdrawal, "sent_message_nonce = ?", U256{Int: nonce})
result := db.gorm.Where(&Withdrawal{SentMessageNonce: U256{Int: nonce}}).Take(&withdrawal)
if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, nil
......@@ -256,7 +260,7 @@ func (db *bridgeDB) WithdrawalByMessageNonce(nonce *big.Int) (*Withdrawal, error
func (db *bridgeDB) WithdrawalByHash(hash common.Hash) (*Withdrawal, error) {
var withdrawal Withdrawal
result := db.gorm.First(&withdrawal, "withdrawal_hash = ?", hash.String())
result := db.gorm.Where(&Withdrawal{WithdrawalHash: hash}).Take(&withdrawal)
if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, nil
......
package database
import (
"errors"
"gorm.io/gorm"
"github.com/ethereum/go-ethereum/common"
......@@ -46,6 +48,11 @@ type L2ContractEvent struct {
}
type ContractEventsView interface {
L1ContractEvent(uuid.UUID) (*L1ContractEvent, error)
L1ContractEventByTxLogIndex(common.Hash, uint64) (*L1ContractEvent, error)
L2ContractEvent(uuid.UUID) (*L2ContractEvent, error)
L2ContractEventByTxLogIndex(common.Hash, uint64) (*L2ContractEvent, error)
}
type ContractEventsDB interface {
......@@ -74,9 +81,65 @@ func (db *contractEventsDB) StoreL1ContractEvents(events []*L1ContractEvent) err
return result.Error
}
func (db *contractEventsDB) L1ContractEvent(uuid uuid.UUID) (*L1ContractEvent, error) {
var l1ContractEvent L1ContractEvent
result := db.gorm.Where(&ContractEvent{GUID: uuid}).Take(&l1ContractEvent)
if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, result.Error
}
return &l1ContractEvent, nil
}
func (db *contractEventsDB) L1ContractEventByTxLogIndex(txHash common.Hash, logIndex uint64) (*L1ContractEvent, error) {
var l1ContractEvent L1ContractEvent
result := db.gorm.Where(&ContractEvent{TransactionHash: txHash, LogIndex: logIndex}).Take(&l1ContractEvent)
if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, result.Error
}
return &l1ContractEvent, nil
}
// L2
func (db *contractEventsDB) StoreL2ContractEvents(events []*L2ContractEvent) error {
result := db.gorm.Create(&events)
return result.Error
}
func (db *contractEventsDB) L2ContractEvent(uuid uuid.UUID) (*L2ContractEvent, error) {
var l2ContractEvent L2ContractEvent
result := db.gorm.Where(&ContractEvent{GUID: uuid}).Take(&l2ContractEvent)
if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, result.Error
}
return &l2ContractEvent, nil
}
func (db *contractEventsDB) L2ContractEventByTxLogIndex(txHash common.Hash, logIndex uint64) (*L2ContractEvent, error) {
var l2ContractEvent L2ContractEvent
result := db.gorm.Where(&ContractEvent{TransactionHash: txHash, LogIndex: logIndex}).Take(&l2ContractEvent)
if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, result.Error
}
return &l2ContractEvent, nil
}
......@@ -4,6 +4,7 @@ package database
import (
"gorm.io/driver/postgres"
"gorm.io/gorm"
"gorm.io/gorm/logger"
)
type DB struct {
......@@ -19,6 +20,10 @@ func NewDB(dsn string) (*DB, error) {
// The indexer will explicitly manage the transaction
// flow processing blocks
SkipDefaultTransaction: true,
// We may choose to create an adapter such that the
// logger emits to the geth logger when on DEBUG mode
Logger: logger.Default.LogMode(logger.Silent),
})
if err != nil {
......@@ -43,6 +48,15 @@ func (db *DB) Transaction(fn func(db *DB) error) error {
})
}
func (db *DB) Close() error {
sql, err := db.gorm.DB()
if err != nil {
return err
}
return sql.Close()
}
func dbFromGormTx(tx *gorm.DB) *DB {
return &DB{
gorm: tx,
......
package e2e_tests
import (
"context"
"math/big"
"testing"
"time"
"github.com/ethereum-optimism/optimism/indexer/node"
"github.com/ethereum-optimism/optimism/indexer/processor"
"github.com/ethereum-optimism/optimism/op-service/client/utils"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/crypto"
"github.com/stretchr/testify/require"
)
func TestE2EBlockHeaders(t *testing.T) {
testSuite := createE2ETestSuite(t)
l1Client := testSuite.OpSys.Clients["l1"]
l2Client := testSuite.OpSys.Clients["sequencer"]
l2OutputOracle, err := bindings.NewL2OutputOracleCaller(predeploys.DevL2OutputOracleAddr, l1Client)
require.NoError(t, err)
// a minute for total setup to finish
setupCtx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
// wait for at least 10 L2 blocks to be created & posted on L1
require.NoError(t, utils.WaitFor(setupCtx, time.Second, func() (bool, error) {
l2Height, err := l2OutputOracle.LatestBlockNumber(&bind.CallOpts{Context: setupCtx})
return l2Height != nil && l2Height.Uint64() >= 9, err
}))
// ensure the processors are caught up to this state
l1Height, err := l1Client.BlockNumber(setupCtx)
require.NoError(t, err)
require.NoError(t, utils.WaitFor(setupCtx, time.Second, func() (bool, error) {
l1Header := testSuite.Indexer.L1Processor.LatestProcessedHeader()
l2Header := testSuite.Indexer.L2Processor.LatestProcessedHeader()
return (l1Header != nil && l1Header.Number.Uint64() >= l1Height) && (l2Header != nil && l2Header.Number.Uint64() >= 9), nil
}))
t.Run("indexes L2 blocks", func(t *testing.T) {
latestL2Header, err := testSuite.DB.Blocks.LatestL2BlockHeader()
require.NoError(t, err)
require.NotNil(t, latestL2Header)
require.True(t, latestL2Header.Number.Int.Uint64() >= 9)
for i := int64(0); i < 10; i++ {
height := big.NewInt(i)
indexedHeader, err := testSuite.DB.Blocks.L2BlockHeader(height)
require.NoError(t, err)
require.NotNil(t, indexedHeader)
header, err := l2Client.HeaderByNumber(context.Background(), height)
require.NoError(t, err)
require.NotNil(t, indexedHeader)
require.Equal(t, header.Number.Int64(), indexedHeader.Number.Int.Int64())
require.Equal(t, header.Hash(), indexedHeader.Hash)
require.Equal(t, header.ParentHash, indexedHeader.ParentHash)
require.Equal(t, header.Time, indexedHeader.Timestamp)
}
})
t.Run("indexes L2 checkpoints", func(t *testing.T) {
latestOutput, err := testSuite.DB.Blocks.LatestCheckpointedOutput()
require.NoError(t, err)
require.NotNil(t, latestOutput)
require.GreaterOrEqual(t, latestOutput.L2BlockNumber.Int.Uint64(), uint64(9))
l2EthClient, err := node.DialEthClient(testSuite.OpSys.Nodes["sequencer"].HTTPEndpoint())
require.NoError(t, err)
submissionInterval := testSuite.OpCfg.DeployConfig.L2OutputOracleSubmissionInterval
numOutputs := latestOutput.L2BlockNumber.Int.Uint64() / submissionInterval
for i := int64(0); i < int64(numOutputs); i++ {
blockNumber := big.NewInt((i + 1) * int64(submissionInterval))
output, err := testSuite.DB.Blocks.OutputProposal(big.NewInt(i))
require.NoError(t, err)
require.NotNil(t, output)
require.Equal(t, i, output.L2OutputIndex.Int.Int64())
require.Equal(t, blockNumber, output.L2BlockNumber.Int)
require.NotEmpty(t, output.L1ContractEventGUID)
// we may as well check the integrity of the output root
l2Block, err := l2Client.BlockByNumber(context.Background(), blockNumber)
require.NoError(t, err)
messagePasserStorageHash, err := l2EthClient.StorageHash(predeploys.L2ToL1MessagePasserAddr, blockNumber)
require.NoError(t, err)
// construct and check output root
outputRootPreImage := [128]byte{} // 4 words (first 32 are zero for version 0)
copy(outputRootPreImage[32:64], l2Block.Root().Bytes()) // state root
copy(outputRootPreImage[64:96], messagePasserStorageHash.Bytes()) // message passer storage root
copy(outputRootPreImage[96:128], l2Block.Hash().Bytes()) // block hash
require.Equal(t, crypto.Keccak256Hash(outputRootPreImage[:]), output.OutputRoot)
}
})
t.Run("indexes L1 logs and associated blocks", func(t *testing.T) {
testCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
devContracts := processor.DevL1Contracts().ToSlice()
logFilter := ethereum.FilterQuery{FromBlock: big.NewInt(0), ToBlock: big.NewInt(int64(l1Height)), Addresses: devContracts}
logs, err := l1Client.FilterLogs(testCtx, logFilter) // []types.Log
require.NoError(t, err)
for _, log := range logs {
contractEvent, err := testSuite.DB.ContractEvents.L1ContractEventByTxLogIndex(log.TxHash, uint64(log.Index))
require.NoError(t, err)
require.Equal(t, log.Topics[0], contractEvent.EventSignature)
require.Equal(t, log.BlockHash, contractEvent.BlockHash)
require.Equal(t, log.TxHash, contractEvent.TransactionHash)
require.Equal(t, log.Index, uint(contractEvent.LogIndex))
// ensure the block is also indexed
block, err := l1Client.BlockByNumber(testCtx, big.NewInt(int64(log.BlockNumber)))
require.NoError(t, err)
require.Equal(t, block.Time(), contractEvent.Timestamp)
l1BlockHeader, err := testSuite.DB.Blocks.L1BlockHeader(block.Number())
require.NoError(t, err)
require.Equal(t, block.Hash(), l1BlockHeader.Hash)
require.Equal(t, block.ParentHash(), l1BlockHeader.ParentHash)
require.Equal(t, block.Number(), l1BlockHeader.Number.Int)
require.Equal(t, block.Time(), l1BlockHeader.Timestamp)
}
})
}
package e2e_tests
import (
"context"
"math/big"
"testing"
"time"
"github.com/ethereum-optimism/optimism/indexer/processor"
"github.com/ethereum-optimism/optimism/op-service/client/utils"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
op_e2e "github.com/ethereum-optimism/optimism/op-e2e"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/params"
"github.com/stretchr/testify/require"
)
func TestE2EBridge(t *testing.T) {
testSuite := createE2ETestSuite(t)
l1Client := testSuite.OpSys.Clients["l1"]
l2Client := testSuite.OpSys.Clients["sequencer"]
l1StandardBridge, err := bindings.NewL1StandardBridge(predeploys.DevL1StandardBridgeAddr, l1Client)
require.NoError(t, err)
l2StandardBridge, err := bindings.NewL2StandardBridge(predeploys.L2StandardBridgeAddr, l2Client)
require.NoError(t, err)
// pre-emptively conduct a deposit & withdrawal to speed up the test
setupCtx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
aliceAddr := testSuite.OpCfg.Secrets.Addresses().Alice
l1Opts, err := bind.NewKeyedTransactorWithChainID(testSuite.OpCfg.Secrets.Alice, testSuite.OpCfg.L1ChainIDBig())
require.NoError(t, err)
l2Opts, err := bind.NewKeyedTransactorWithChainID(testSuite.OpCfg.Secrets.Alice, testSuite.OpCfg.L2ChainIDBig())
require.NoError(t, err)
l1Opts.Value = big.NewInt(params.Ether)
l2Opts.Value = big.NewInt(params.Ether)
depositTx, err := l1StandardBridge.DepositETH(l1Opts, 200_000, []byte{byte(1)})
require.NoError(t, err)
withdrawTx, err := l2StandardBridge.Withdraw(l2Opts, processor.EthAddress, big.NewInt(params.Ether), 200_000, []byte{byte(1)})
require.NoError(t, err)
depositReceipt, err := utils.WaitReceiptOK(setupCtx, l1Client, depositTx.Hash())
require.NoError(t, err)
withdrawalReceipt, err := utils.WaitReceiptOK(setupCtx, l2Client, withdrawTx.Hash())
require.NoError(t, err)
t.Run("indexes ETH deposits", func(t *testing.T) {
testCtx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
defer cancel()
// Pause the L2Processor so that we can test for finalization separately. A pause is
// required since deposit inclusion is apart of the L2 block derivation process
testSuite.Indexer.L2Processor.PauseForTest()
// (1) Test Deposit Initiation
// wait for processor catchup
require.NoError(t, utils.WaitFor(testCtx, 500*time.Millisecond, func() (bool, error) {
l1Header := testSuite.Indexer.L1Processor.LatestProcessedHeader()
return l1Header != nil && l1Header.Number.Uint64() >= depositReceipt.BlockNumber.Uint64(), nil
}))
aliceDeposits, err := testSuite.DB.Bridge.DepositsByAddress(aliceAddr)
require.NoError(t, err)
require.Len(t, aliceDeposits, 1)
require.Equal(t, depositTx.Hash(), aliceDeposits[0].L1TransactionHash)
require.Empty(t, aliceDeposits[0].FinalizedL2TransactionHash)
deposit := aliceDeposits[0].Deposit
require.Nil(t, deposit.FinalizedL2EventGUID)
require.Equal(t, processor.EthAddress, deposit.TokenPair.L1TokenAddress)
require.Equal(t, processor.EthAddress, deposit.TokenPair.L2TokenAddress)
require.Equal(t, big.NewInt(params.Ether), deposit.Tx.Amount.Int)
require.Equal(t, aliceAddr, deposit.Tx.FromAddress)
require.Equal(t, aliceAddr, deposit.Tx.ToAddress)
require.Equal(t, byte(1), deposit.Tx.Data[0])
// (2) Test Deposit Finalization
testSuite.Indexer.L2Processor.ResumeForTest()
// finalization hash can be deterministically derived from TransactionDeposited log
var depositTxHash common.Hash
for _, log := range depositReceipt.Logs {
if log.Topics[0] == derive.DepositEventABIHash {
deposit, err := derive.UnmarshalDepositLogEvent(log)
require.NoError(t, err)
depositTxHash = types.NewTx(deposit).Hash()
break
}
}
// wait for the l2 processor to catch this deposit in the derivation process
_, err = utils.WaitReceiptOK(testCtx, l2Client, depositTxHash)
require.NoError(t, err)
l2Height, err := l2Client.BlockNumber(testCtx)
require.NoError(t, err)
require.NoError(t, utils.WaitFor(testCtx, 500*time.Millisecond, func() (bool, error) {
l2Header := testSuite.Indexer.L2Processor.LatestProcessedHeader()
return l2Header != nil && l2Header.Number.Uint64() >= l2Height, nil
}))
aliceDeposits, err = testSuite.DB.Bridge.DepositsByAddress(aliceAddr)
require.NoError(t, err)
require.Equal(t, depositTxHash, aliceDeposits[0].FinalizedL2TransactionHash)
require.NotNil(t, aliceDeposits[0].Deposit.FinalizedL2EventGUID)
})
t.Run("indexes ETH withdrawals", func(t *testing.T) {
testCtx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)
defer cancel()
// (1) Test Withdrawal Initiation
// wait for processor catchup
require.NoError(t, utils.WaitFor(testCtx, 500*time.Millisecond, func() (bool, error) {
l2Header := testSuite.Indexer.L2Processor.LatestProcessedHeader()
return l2Header != nil && l2Header.Number.Uint64() >= withdrawalReceipt.BlockNumber.Uint64(), nil
}))
aliceWithdrawals, err := testSuite.DB.Bridge.WithdrawalsByAddress(aliceAddr)
require.NoError(t, err)
require.Len(t, aliceWithdrawals, 1)
require.Equal(t, withdrawTx.Hash(), aliceWithdrawals[0].L2TransactionHash)
require.Empty(t, aliceWithdrawals[0].ProvenL1TransactionHash)
require.Empty(t, aliceWithdrawals[0].FinalizedL1TransactionHash)
withdrawal := aliceWithdrawals[0].Withdrawal
require.Nil(t, withdrawal.ProvenL1EventGUID)
require.Nil(t, withdrawal.FinalizedL1EventGUID)
require.Equal(t, processor.EthAddress, withdrawal.TokenPair.L1TokenAddress)
require.Equal(t, processor.EthAddress, withdrawal.TokenPair.L2TokenAddress)
require.Equal(t, big.NewInt(params.Ether), withdrawal.Tx.Amount.Int)
require.Equal(t, aliceAddr, withdrawal.Tx.FromAddress)
require.Equal(t, aliceAddr, withdrawal.Tx.ToAddress)
require.Equal(t, byte(1), withdrawal.Tx.Data[0])
// (2) Test Withdrawal Proven
// prove & wait for processor catchup
withdrawParams, proveReceipt := op_e2e.ProveWithdrawal(t, *testSuite.OpCfg, l1Client, testSuite.OpSys.Nodes["sequencer"], testSuite.OpCfg.Secrets.Alice, withdrawalReceipt)
require.NoError(t, utils.WaitFor(testCtx, 500*time.Millisecond, func() (bool, error) {
l1Header := testSuite.Indexer.L1Processor.LatestProcessedHeader()
return l1Header != nil && l1Header.Number.Uint64() >= proveReceipt.BlockNumber.Uint64(), nil
}))
aliceWithdrawals, err = testSuite.DB.Bridge.WithdrawalsByAddress(aliceAddr)
require.NoError(t, err)
require.Empty(t, aliceWithdrawals[0].FinalizedL1TransactionHash)
require.Equal(t, proveReceipt.TxHash, aliceWithdrawals[0].ProvenL1TransactionHash)
// (3) Test Withdrawal Finalization
// finalize & wait for processor catchup
finalizeReceipt := op_e2e.FinalizeWithdrawal(t, *testSuite.OpCfg, l1Client, testSuite.OpCfg.Secrets.Alice, withdrawalReceipt, withdrawParams)
require.NoError(t, utils.WaitFor(testCtx, 500*time.Millisecond, func() (bool, error) {
l1Header := testSuite.Indexer.L1Processor.LatestProcessedHeader()
return l1Header != nil && l1Header.Number.Uint64() >= finalizeReceipt.BlockNumber.Uint64(), nil
}))
aliceWithdrawals, err = testSuite.DB.Bridge.WithdrawalsByAddress(aliceAddr)
require.NoError(t, err)
require.Equal(t, finalizeReceipt.TxHash, aliceWithdrawals[0].FinalizedL1TransactionHash)
})
}
package e2e_tests
import (
"context"
"database/sql"
"fmt"
"io/fs"
"os"
"path/filepath"
"testing"
"time"
"github.com/ethereum-optimism/optimism/indexer"
"github.com/ethereum-optimism/optimism/indexer/config"
"github.com/ethereum-optimism/optimism/indexer/database"
op_e2e "github.com/ethereum-optimism/optimism/op-e2e"
"github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum/go-ethereum/log"
_ "github.com/jackc/pgx/v5/stdlib"
"github.com/stretchr/testify/require"
)
type E2ETestSuite struct {
t *testing.T
// Indexer
DB *database.DB
Indexer *indexer.Indexer
// Rollup
OpCfg *op_e2e.SystemConfig
OpSys *op_e2e.System
}
func createE2ETestSuite(t *testing.T) E2ETestSuite {
dbUser := os.Getenv("DB_USER")
dbName := setupTestDatabase(t)
// Replace the handler of the global logger with the testlog
logger := testlog.Logger(t, log.LvlInfo)
log.Root().SetHandler(logger.GetHandler())
// Rollup System Configuration and Start
opCfg := op_e2e.DefaultSystemConfig(t)
opCfg.DeployConfig.FinalizationPeriodSeconds = 2
opSys, err := opCfg.Start()
require.NoError(t, err)
// Indexer Configuration and Start
indexerCfg := config.Config{
DB: config.DBConfig{
Host: "127.0.0.1",
Port: 5432,
Name: dbName,
User: dbUser,
},
RPCs: config.RPCsConfig{
L1RPC: opSys.Nodes["l1"].HTTPEndpoint(),
L2RPC: opSys.Nodes["sequencer"].HTTPEndpoint(),
},
Logger: logger,
}
db, err := database.NewDB(fmt.Sprintf("postgres://%s@localhost:5432/%s?sslmode=disable", dbUser, dbName))
require.NoError(t, err)
indexer, err := indexer.NewIndexer(indexerCfg)
require.NoError(t, err)
indexerCtx, indexerStop := context.WithCancel(context.Background())
go func() {
err := indexer.Run(indexerCtx)
require.NoError(t, err)
indexer.Cleanup()
}()
t.Cleanup(func() {
indexerStop()
// wait a second for the stop signal to be received
time.Sleep(1 * time.Second)
indexer.Cleanup()
db.Close()
opSys.Close()
})
return E2ETestSuite{
t: t,
DB: db,
Indexer: indexer,
OpCfg: &opCfg,
OpSys: opSys,
}
}
func setupTestDatabase(t *testing.T) string {
user := os.Getenv("DB_USER")
pg, err := sql.Open("pgx", fmt.Sprintf("postgres://%s@localhost:5432?sslmode=disable", user))
require.NoError(t, err)
require.NoError(t, pg.Ping())
// create database
dbName := fmt.Sprintf("indexer_test_%d", time.Now().UnixNano())
_, err = pg.Exec("CREATE DATABASE " + dbName)
require.NoError(t, err)
t.Cleanup(func() {
_, err := pg.Exec("DROP DATABASE " + dbName)
require.NoError(t, err)
pg.Close()
})
// setup schema, migration files ware walked in lexical order
t.Logf("created database %s", dbName)
db, err := sql.Open("pgx", fmt.Sprintf("postgres://%s@localhost:5432/%s?sslmode=disable", user, dbName))
require.NoError(t, err)
require.NoError(t, db.Ping())
defer db.Close()
t.Logf("running schema migrations...")
require.NoError(t, filepath.Walk("../migrations", func(path string, info fs.FileInfo, err error) error {
if err != nil {
return err
} else if info.IsDir() {
return nil
}
t.Logf("running schema migration: %s", path)
data, err := os.ReadFile(path)
if err != nil {
return err
}
_, err = db.Exec(string(data))
return err
}))
t.Logf("schema loaded")
return dbName
}
package indexer
import (
"context"
"fmt"
"os"
"sync"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/indexer/config"
"github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum-optimism/optimism/indexer/flags"
"github.com/ethereum-optimism/optimism/indexer/node"
"github.com/ethereum-optimism/optimism/indexer/processor"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/urfave/cli"
)
// Main is the entrypoint into the indexer service. This method returns
// a closure that executes the service and blocks until the service exits. The
// use of a closure allows the parameters bound to the top-level main package,
// e.g. GitVersion, to be captured and used once the function is executed.
func Main(gitVersion string) func(ctx *cli.Context) error {
return func(ctx *cli.Context) error {
log.Info("initializing indexer")
indexer, err := NewIndexer(ctx)
if err != nil {
log.Error("unable to initialize indexer", "err", err)
return err
}
log.Info("starting indexer")
if err := indexer.Start(); err != nil {
log.Error("unable to start indexer", "err", err)
}
defer indexer.Stop()
log.Info("indexer started")
// Never terminate
<-(chan struct{})(nil)
return nil
}
}
// Indexer is a service that configures the necessary resources for
// running the Sync and BlockHandler sub-services.
// Indexer contains the necessary resources for
// indexing the configured L1 and L2 chains
type Indexer struct {
db *database.DB
db *database.DB
log log.Logger
l1Processor *processor.L1Processor
l2Processor *processor.L2Processor
L1Processor *processor.L1Processor
L2Processor *processor.L2Processor
}
// NewIndexer initializes the Indexer, gathering any resources
// that will be needed by the TxIndexer and StateIndexer
// sub-services.
func NewIndexer(ctx *cli.Context) (*Indexer, error) {
// TODO https://linear.app/optimism/issue/DX-55/api-implement-rest-api-with-mocked-data
// do json format too
// TODO https://linear.app/optimism/issue/DX-55/api-implement-rest-api-with-mocked-data
logLevel, err := log.LvlFromString(ctx.GlobalString(flags.LogLevelFlag.Name))
if err != nil {
return nil, err
// NewIndexer initializes an instance of the Indexer
func NewIndexer(cfg config.Config) (*Indexer, error) {
dsn := fmt.Sprintf("host=%s port=%d dbname=%s sslmode=disable", cfg.DB.Host, cfg.DB.Port, cfg.DB.Name)
if cfg.DB.User != "" {
dsn += fmt.Sprintf(" user=%s", cfg.DB.User)
}
if cfg.DB.Password != "" {
dsn += fmt.Sprintf(" password=%s", cfg.DB.Password)
}
logHandler := log.StreamHandler(os.Stdout, log.TerminalFormat(true))
log.Root().SetHandler(log.LvlFilterHandler(logLevel, logHandler))
dsn := fmt.Sprintf("database=%s", ctx.GlobalString(flags.DBNameFlag.Name))
db, err := database.NewDB(dsn)
if err != nil {
return nil, err
}
// L1 Processor (hardhat devnet contracts). Make this configurable
l1Contracts := processor.L1Contracts{
OptimismPortal: common.HexToAddress("0x6900000000000000000000000000000000000000"),
L2OutputOracle: common.HexToAddress("0x6900000000000000000000000000000000000001"),
L1CrossDomainMessenger: common.HexToAddress("0x6900000000000000000000000000000000000002"),
L1StandardBridge: common.HexToAddress("0x6900000000000000000000000000000000000003"),
L1ERC721Bridge: common.HexToAddress("0x6900000000000000000000000000000000000004"),
}
l1EthClient, err := node.NewEthClient(ctx.GlobalString(flags.L1EthRPCFlag.Name))
l1Contracts := processor.DevL1Contracts()
l1EthClient, err := node.DialEthClient(cfg.RPCs.L1RPC)
if err != nil {
return nil, err
}
l1Processor, err := processor.NewL1Processor(l1EthClient, db, l1Contracts)
l1Processor, err := processor.NewL1Processor(cfg.Logger, l1EthClient, db, l1Contracts)
if err != nil {
return nil, err
}
// L2Processor
l2Contracts := processor.L2ContractPredeploys() // Make this configurable
l2EthClient, err := node.NewEthClient(ctx.GlobalString(flags.L2EthRPCFlag.Name))
// L2Processor (predeploys). Although most likely the right setting, make this configurable?
l2Contracts := processor.L2ContractPredeploys()
l2EthClient, err := node.DialEthClient(cfg.RPCs.L2RPC)
if err != nil {
return nil, err
}
l2Processor, err := processor.NewL2Processor(l2EthClient, db, l2Contracts)
l2Processor, err := processor.NewL2Processor(cfg.Logger, l2EthClient, db, l2Contracts)
if err != nil {
return nil, err
}
indexer := &Indexer{
db: db,
l1Processor: l1Processor,
l2Processor: l2Processor,
log: cfg.Logger,
L1Processor: l1Processor,
L2Processor: l2Processor,
}
return indexer, nil
}
// Serve spins up a REST API server at the given hostname and port.
func (b *Indexer) Serve() error {
return nil
}
// Start starts the indexing service on L1 and L2 chains
func (i *Indexer) Run(ctx context.Context) error {
var wg sync.WaitGroup
errCh := make(chan error)
// If either processor errors out, we stop
processorCtx, cancel := context.WithCancel(ctx)
run := func(start func(ctx context.Context) error) {
wg.Add(1)
defer wg.Done()
err := start(processorCtx)
if err != nil {
i.log.Error("halting indexer on error", "err", err)
cancel()
errCh <- err
}
}
// Start starts the starts the indexing service on L1 and L2 chains and also
// starts the REST server.
func (b *Indexer) Start() error {
go b.l1Processor.Start()
go b.l2Processor.Start()
// Kick off the processors
go run(i.L1Processor.Start)
go run(i.L2Processor.Start)
err := <-errCh
return nil
// ensure both processors have halted before returning
wg.Wait()
return err
}
// Stop stops the indexing service on L1 and L2 chains.
func (b *Indexer) Stop() {
// Cleanup releases any resources that might be currently held by the indexer
func (i *Indexer) Cleanup() {
i.db.Close()
}
......@@ -56,6 +56,8 @@ CREATE TABLE IF NOT EXISTS legacy_state_batches (
CREATE TABLE IF NOT EXISTS output_proposals (
output_root VARCHAR NOT NULL PRIMARY KEY,
l2_output_index UINT256,
l2_block_number UINT256,
l1_contract_event_guid VARCHAR REFERENCES l1_contract_events(guid)
......
......@@ -3,6 +3,7 @@ package node
import (
"context"
"errors"
"fmt"
"math/big"
"time"
......@@ -29,6 +30,8 @@ type EthClient interface {
BlockHeadersByRange(*big.Int, *big.Int) ([]*types.Header, error)
BlockHeaderByHash(common.Hash) (*types.Header, error)
StorageHash(common.Address, *big.Int) (common.Hash, error)
RawRpcClient() *rpc.Client
}
......@@ -36,7 +39,7 @@ type client struct {
rpcClient *rpc.Client
}
func NewEthClient(rpcUrl string) (EthClient, error) {
func DialEthClient(rpcUrl string) (EthClient, error) {
ctxwt, cancel := context.WithTimeout(context.Background(), defaultDialTimeout)
defer cancel()
......@@ -49,6 +52,10 @@ func NewEthClient(rpcUrl string) (EthClient, error) {
return client, nil
}
func NewEthClient(rpcClient *rpc.Client) EthClient {
return &client{rpcClient}
}
func (c *client) RawRpcClient() *rpc.Client {
return c.rpcClient
}
......@@ -136,15 +143,33 @@ func (c *client) BlockHeadersByRange(startHeight, endHeight *big.Int) ([]*types.
return headers, nil
}
// StorageHash returns the sha3 of the storage root for the specified account
func (c *client) StorageHash(address common.Address, blockNumber *big.Int) (common.Hash, error) {
ctxwt, cancel := context.WithTimeout(context.Background(), defaultRequestTimeout)
defer cancel()
proof := struct{ StorageHash common.Hash }{}
err := c.rpcClient.CallContext(ctxwt, &proof, "eth_getProof", address, nil, toBlockNumArg(blockNumber))
if err != nil {
return common.Hash{}, err
}
return proof.StorageHash, nil
}
func toBlockNumArg(number *big.Int) string {
if number == nil {
return "latest"
} else if number.Sign() >= 0 {
return hexutil.EncodeBig(number)
}
pending := big.NewInt(-1)
if number.Cmp(pending) == 0 {
return "pending"
// It's negative.
if number.IsInt64() {
tag, _ := rpc.BlockNumber(number.Int64()).MarshalText()
return string(tag)
}
return hexutil.EncodeBig(number)
// It's negative and large, which is invalid.
return fmt.Sprintf("<invalid %d>", number)
}
......@@ -31,6 +31,11 @@ func (m *MockEthClient) BlockHeaderByHash(hash common.Hash) (*types.Header, erro
return args.Get(0).(*types.Header), args.Error(1)
}
func (m *MockEthClient) StorageHash(address common.Address, blockNumber *big.Int) (common.Hash, error) {
args := m.Called(address, blockNumber)
return args.Get(0).(common.Hash), args.Error(1)
}
func (m *MockEthClient) RawRpcClient() *rpc.Client {
args := m.Called()
return args.Get(0).(*rpc.Client)
......
......@@ -4,8 +4,8 @@ import (
"math/big"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"github.com/ethereum/go-ethereum/core/types"
)
......@@ -43,8 +43,8 @@ func TestHeaderTraversalNextFinalizedHeadersNoOp(t *testing.T) {
// no new headers when matched with head
client.On("FinalizedBlockHeight").Return(big.NewInt(10), nil)
headers, err := headerTraversal.NextFinalizedHeaders(100)
assert.NoError(t, err)
assert.Empty(t, headers)
require.NoError(t, err)
require.Empty(t, headers)
}
func TestHeaderTraversalNextFinalizedHeadersCursored(t *testing.T) {
......@@ -58,16 +58,16 @@ func TestHeaderTraversalNextFinalizedHeadersCursored(t *testing.T) {
client.On("FinalizedBlockHeight").Return(big.NewInt(4), nil).Times(1) // Times so that we can override next
client.On("BlockHeadersByRange", mock.MatchedBy(bigIntMatcher(0)), mock.MatchedBy(bigIntMatcher(4))).Return(headers, nil)
headers, err := headerTraversal.NextFinalizedHeaders(5)
assert.NoError(t, err)
assert.Len(t, headers, 5)
require.NoError(t, err)
require.Len(t, headers, 5)
// blocks [5..9]
headers = makeHeaders(5, headers[len(headers)-1])
client.On("FinalizedBlockHeight").Return(big.NewInt(9), nil)
client.On("BlockHeadersByRange", mock.MatchedBy(bigIntMatcher(5)), mock.MatchedBy(bigIntMatcher(9))).Return(headers, nil)
headers, err = headerTraversal.NextFinalizedHeaders(5)
assert.NoError(t, err)
assert.Len(t, headers, 5)
require.NoError(t, err)
require.Len(t, headers, 5)
}
func TestHeaderTraversalNextFinalizedHeadersMaxSize(t *testing.T) {
......@@ -83,15 +83,15 @@ func TestHeaderTraversalNextFinalizedHeadersMaxSize(t *testing.T) {
headers := makeHeaders(5, nil)
client.On("BlockHeadersByRange", mock.MatchedBy(bigIntMatcher(0)), mock.MatchedBy(bigIntMatcher(4))).Return(headers, nil)
headers, err := headerTraversal.NextFinalizedHeaders(5)
assert.NoError(t, err)
assert.Len(t, headers, 5)
require.NoError(t, err)
require.Len(t, headers, 5)
// clamped by the supplied size. FinalizedHeight == 100
headers = makeHeaders(10, headers[len(headers)-1])
client.On("BlockHeadersByRange", mock.MatchedBy(bigIntMatcher(5)), mock.MatchedBy(bigIntMatcher(14))).Return(headers, nil)
headers, err = headerTraversal.NextFinalizedHeaders(10)
assert.NoError(t, err)
assert.Len(t, headers, 10)
require.NoError(t, err)
require.Len(t, headers, 10)
}
func TestHeaderTraversalMismatchedProviderStateError(t *testing.T) {
......@@ -105,14 +105,14 @@ func TestHeaderTraversalMismatchedProviderStateError(t *testing.T) {
client.On("FinalizedBlockHeight").Return(big.NewInt(4), nil).Times(1) // Times so that we can override next
client.On("BlockHeadersByRange", mock.MatchedBy(bigIntMatcher(0)), mock.MatchedBy(bigIntMatcher(4))).Return(headers, nil)
headers, err := headerTraversal.NextFinalizedHeaders(5)
assert.NoError(t, err)
assert.Len(t, headers, 5)
require.NoError(t, err)
require.Len(t, headers, 5)
// blocks [5..9]. Next batch is not chained correctly (starts again from genesis)
headers = makeHeaders(5, nil)
client.On("FinalizedBlockHeight").Return(big.NewInt(9), nil)
client.On("BlockHeadersByRange", mock.MatchedBy(bigIntMatcher(5)), mock.MatchedBy(bigIntMatcher(9))).Return(headers, nil)
headers, err = headerTraversal.NextFinalizedHeaders(5)
assert.Nil(t, headers)
assert.Equal(t, ErrHeaderTraversalAndProviderMismatchedState, err)
require.Nil(t, headers)
require.Equal(t, ErrHeaderTraversalAndProviderMismatchedState, err)
}
......@@ -2,9 +2,7 @@ package processor
import (
"context"
"encoding/hex"
"errors"
"math/big"
"reflect"
"github.com/google/uuid"
......@@ -36,12 +34,17 @@ type L1Contracts struct {
// Remove afterwards?
}
type checkpointAbi struct {
l2OutputOracle *abi.ABI
legacyStateCommitmentChain *abi.ABI
func DevL1Contracts() L1Contracts {
return L1Contracts{
OptimismPortal: common.HexToAddress("0x6900000000000000000000000000000000000000"),
L2OutputOracle: common.HexToAddress("0x6900000000000000000000000000000000000001"),
L1CrossDomainMessenger: common.HexToAddress("0x6900000000000000000000000000000000000002"),
L1StandardBridge: common.HexToAddress("0x6900000000000000000000000000000000000003"),
L1ERC721Bridge: common.HexToAddress("0x6900000000000000000000000000000000000004"),
}
}
func (c L1Contracts) toSlice() []common.Address {
func (c L1Contracts) ToSlice() []common.Address {
fields := reflect.VisibleFields(reflect.TypeOf(c))
v := reflect.ValueOf(c)
......@@ -53,12 +56,17 @@ func (c L1Contracts) toSlice() []common.Address {
return contracts
}
type checkpointAbi struct {
l2OutputOracle *abi.ABI
legacyStateCommitmentChain *abi.ABI
}
type L1Processor struct {
processor
}
func NewL1Processor(ethClient node.EthClient, db *database.DB, l1Contracts L1Contracts) (*L1Processor, error) {
l1ProcessLog := log.New("processor", "l1")
func NewL1Processor(logger log.Logger, ethClient node.EthClient, db *database.DB, l1Contracts L1Contracts) (*L1Processor, error) {
l1ProcessLog := logger.New("processor", "l1")
l1ProcessLog.Info("initializing processor")
l2OutputOracleABI, err := bindings.L2OutputOracleMetaData.GetAbi()
......@@ -109,14 +117,16 @@ func NewL1Processor(ethClient node.EthClient, db *database.DB, l1Contracts L1Con
func l1ProcessFn(processLog log.Logger, ethClient node.EthClient, l1Contracts L1Contracts, checkpointAbi checkpointAbi) ProcessFn {
rawEthClient := ethclient.NewClient(ethClient.RawRpcClient())
contractAddrs := l1Contracts.toSlice()
contractAddrs := l1Contracts.ToSlice()
processLog.Info("processor configured with contracts", "contracts", l1Contracts)
outputProposedEventSig := checkpointAbi.l2OutputOracle.Events["OutputProposed"].ID
legacyStateBatchAppendedEventSig := checkpointAbi.legacyStateCommitmentChain.Events["StateBatchAppended"].ID
outputProposedEventName := "OutputProposed"
outputProposedEventSig := checkpointAbi.l2OutputOracle.Events[outputProposedEventName].ID
legacyStateBatchAppendedEventName := "StateBatchAppended"
legacyStateBatchAppendedEventSig := checkpointAbi.legacyStateCommitmentChain.Events[legacyStateBatchAppendedEventName].ID
return func(db *database.DB, headers []*types.Header) error {
numHeaders := len(headers)
headerMap := make(map[common.Hash]*types.Header)
for _, header := range headers {
headerMap[header.Hash()] = header
......@@ -124,7 +134,7 @@ func l1ProcessFn(processLog log.Logger, ethClient node.EthClient, l1Contracts L1
/** Watch for all Optimism Contract Events **/
logFilter := ethereum.FilterQuery{FromBlock: headers[0].Number, ToBlock: headers[numHeaders-1].Number, Addresses: contractAddrs}
logFilter := ethereum.FilterQuery{FromBlock: headers[0].Number, ToBlock: headers[len(headers)-1].Number, Addresses: contractAddrs}
logs, err := rawEthClient.FilterLogs(context.Background(), logFilter) // []types.Log
if err != nil {
return err
......@@ -138,41 +148,43 @@ func l1ProcessFn(processLog log.Logger, ethClient node.EthClient, l1Contracts L1
l1ContractEvents := make([]*database.L1ContractEvent, len(logs))
processedContractEvents := NewProcessedContractEvents()
for i, log := range logs {
for i := range logs {
log := &logs[i]
header, ok := headerMap[log.BlockHash]
if !ok {
processLog.Error("contract event found with associated header not in the batch", "header", log.BlockHash, "log_index", log.Index)
return errors.New("parsed log with a block hash not in this batch")
}
contractEvent := processedContractEvents.AddLog(&logs[i], header.Time)
contractEvent := processedContractEvents.AddLog(log, header.Time)
l1HeadersOfInterest[log.BlockHash] = true
l1ContractEvents[i] = &database.L1ContractEvent{ContractEvent: *contractEvent}
// Track Checkpoint Events for L2
switch contractEvent.EventSignature {
case outputProposedEventSig:
if len(log.Topics) != 4 {
processLog.Error("parsed unexpected number of L2OutputOracle#OutputProposed log topics", "log_topics", log.Topics)
return errors.New("parsed unexpected OutputProposed event")
var outputProposed bindings.L2OutputOracleOutputProposed
err := UnpackLog(&outputProposed, log, outputProposedEventName, checkpointAbi.l2OutputOracle)
if err != nil {
return err
}
outputProposals = append(outputProposals, &database.OutputProposal{
OutputRoot: log.Topics[1],
L2BlockNumber: database.U256{Int: new(big.Int).SetBytes(log.Topics[2].Bytes())},
OutputRoot: outputProposed.OutputRoot,
L2OutputIndex: database.U256{Int: outputProposed.L2OutputIndex},
L2BlockNumber: database.U256{Int: outputProposed.L2BlockNumber},
L1ContractEventGUID: contractEvent.GUID,
})
case legacyStateBatchAppendedEventSig:
var stateBatchAppended legacy_bindings.StateCommitmentChainStateBatchAppended
err := checkpointAbi.l2OutputOracle.UnpackIntoInterface(&stateBatchAppended, "StateBatchAppended", log.Data)
if err != nil || len(log.Topics) != 2 {
processLog.Error("unexpected StateCommitmentChain#StateBatchAppended log data or log topics", "log_topics", log.Topics, "log_data", hex.EncodeToString(log.Data), "err", err)
err := UnpackLog(&stateBatchAppended, log, legacyStateBatchAppendedEventName, checkpointAbi.legacyStateCommitmentChain)
if err != nil {
return err
}
legacyStateBatches = append(legacyStateBatches, &database.LegacyStateBatch{
Index: new(big.Int).SetBytes(log.Topics[1].Bytes()).Uint64(),
Index: stateBatchAppended.BatchIndex.Uint64(),
Root: stateBatchAppended.BatchRoot,
Size: stateBatchAppended.BatchSize.Uint64(),
PrevTotal: stateBatchAppended.PrevTotalElements.Uint64(),
......@@ -199,7 +211,7 @@ func l1ProcessFn(processLog log.Logger, ethClient node.EthClient, l1Contracts L1
numIndexedL1Headers := len(indexedL1Headers)
if numIndexedL1Headers > 0 {
processLog.Info("saving l1 blocks with optimism logs", "size", numIndexedL1Headers, "batch_size", numHeaders)
processLog.Info("saving l1 blocks with optimism logs", "size", numIndexedL1Headers, "batch_size", len(headers))
err = db.Blocks.StoreL1BlockHeaders(indexedL1Headers)
if err != nil {
return err
......@@ -296,15 +308,16 @@ func l1BridgeProcessContractEvents(processLog log.Logger, db *database.DB, ethCl
// Check if the L2Processor is behind or really has missed an event. We can compare against the
// OptimismPortal#ProvenWithdrawal on-chain mapping relative to the latest indexed L2 height
if withdrawal == nil {
bridgeAddress := l1Contracts.L1StandardBridge
portalAddress := l1Contracts.OptimismPortal
if provenWithdrawalEvent.From != bridgeAddress || provenWithdrawalEvent.To != bridgeAddress {
// This needs to be updated to read from config as well as correctly identify if the CrossDomainMessenger message is a standard
// bridge message. This will easier to do once we index passed messages separately which will include the right To/From fields
if provenWithdrawalEvent.From != common.HexToAddress("0x4200000000000000000000000000000000000007") || provenWithdrawalEvent.To != l1Contracts.L1CrossDomainMessenger {
// non-bridge withdrawal
continue
}
// Query for the the proven withdrawal on-chain
provenWithdrawal, err := OptimismPortalQueryProvenWithdrawal(rawEthClient, portalAddress, withdrawalHash)
provenWithdrawal, err := OptimismPortalQueryProvenWithdrawal(rawEthClient, l1Contracts.OptimismPortal, withdrawalHash)
if err != nil {
return err
}
......@@ -349,8 +362,8 @@ func l1BridgeProcessContractEvents(processLog log.Logger, db *database.DB, ethCl
return err
}
// Since we have to prove the event on-chain first, we don't need to check if the processor is
// behind. we're definitely in an error state if we cannot find the withdrawal when parsing this even
// Since we have to prove the event on-chain first, we don't need to check if the processor is behind
// We're definitely in an error state if we cannot find the withdrawal when parsing this event
if withdrawal == nil {
processLog.Crit("missing indexed withdrawal for this finalization event")
return errors.New("missing withdrawal message")
......
......@@ -39,7 +39,7 @@ func L2ContractPredeploys() L2Contracts {
}
}
func (c L2Contracts) toSlice() []common.Address {
func (c L2Contracts) ToSlice() []common.Address {
fields := reflect.VisibleFields(reflect.TypeOf(c))
v := reflect.ValueOf(c)
......@@ -55,8 +55,8 @@ type L2Processor struct {
processor
}
func NewL2Processor(ethClient node.EthClient, db *database.DB, l2Contracts L2Contracts) (*L2Processor, error) {
l2ProcessLog := log.New("processor", "l2")
func NewL2Processor(logger log.Logger, ethClient node.EthClient, db *database.DB, l2Contracts L2Contracts) (*L2Processor, error) {
l2ProcessLog := logger.New("processor", "l2")
l2ProcessLog.Info("initializing processor")
latestHeader, err := db.Blocks.LatestL2BlockHeader()
......@@ -94,7 +94,7 @@ func NewL2Processor(ethClient node.EthClient, db *database.DB, l2Contracts L2Con
func l2ProcessFn(processLog log.Logger, ethClient node.EthClient, l2Contracts L2Contracts) ProcessFn {
rawEthClient := ethclient.NewClient(ethClient.RawRpcClient())
contractAddrs := l2Contracts.toSlice()
contractAddrs := l2Contracts.ToSlice()
processLog.Info("processor configured with contracts", "contracts", l2Contracts)
return func(db *database.DB, headers []*types.Header) error {
numHeaders := len(headers)
......@@ -127,14 +127,15 @@ func l2ProcessFn(processLog log.Logger, ethClient node.EthClient, l2Contracts L2
l2ContractEvents := make([]*database.L2ContractEvent, len(logs))
processedContractEvents := NewProcessedContractEvents()
for i, log := range logs {
for i := range logs {
log := &logs[i]
header, ok := l2HeaderMap[log.BlockHash]
if !ok {
processLog.Error("contract event found with associated header not in the batch", "header", header, "log_index", log.Index)
return errors.New("parsed log with a block hash not in this batch")
}
contractEvent := processedContractEvents.AddLog(&logs[i], header.Time)
contractEvent := processedContractEvents.AddLog(log, header.Time)
l2ContractEvents[i] = &database.L2ContractEvent{ContractEvent: *contractEvent}
}
......
......@@ -30,11 +30,20 @@ func OptimismPortalWithdrawalProvenEvents(events *ProcessedContractEvents) ([]Op
return nil, err
}
processedWithdrawalProvenEvents := events.eventsBySignature[optimismPortalAbi.Events["WithdrawalProven"].ID]
eventName := "WithdrawalProven"
processedWithdrawalProvenEvents := events.eventsBySignature[optimismPortalAbi.Events[eventName].ID]
provenEvents := make([]OptimismPortalWithdrawalProvenEvent, len(processedWithdrawalProvenEvents))
for i, provenEvent := range processedWithdrawalProvenEvents {
log := events.eventLog[provenEvent.GUID]
var withdrawalProven bindings.OptimismPortalWithdrawalProven
err := UnpackLog(&withdrawalProven, log, eventName, optimismPortalAbi)
if err != nil {
return nil, err
}
provenEvents[i] = OptimismPortalWithdrawalProvenEvent{nil, provenEvent}
provenEvents[i] = OptimismPortalWithdrawalProvenEvent{&withdrawalProven, provenEvent}
}
return provenEvents, nil
......
package processor
import (
"context"
"time"
"github.com/ethereum-optimism/optimism/indexer/database"
......@@ -25,46 +26,81 @@ type processor struct {
db *database.DB
processFn ProcessFn
processLog log.Logger
paused bool
latestProcessedHeader *types.Header
}
// Start kicks off the processing loop
func (p processor) Start() {
// Start kicks off the processing loop. This is a block operation
// unless the processor encountering an error, abrupting the loop,
// or the supplied context is cancelled.
func (p *processor) Start(ctx context.Context) error {
done := ctx.Done()
pollTicker := time.NewTicker(defaultLoopInterval)
defer pollTicker.Stop()
p.processLog.Info("starting processor...")
var unprocessedHeaders []*types.Header
for range pollTicker.C {
if len(unprocessedHeaders) == 0 {
newHeaders, err := p.headerTraversal.NextFinalizedHeaders(defaultHeaderBufferSize)
if err != nil {
p.processLog.Error("error querying for headers", "err", err)
continue
} else if len(newHeaders) == 0 {
// Logged as an error since this loop should be operating at a longer interval than the provider
p.processLog.Error("no new headers. processor unexpectedly at head...")
for {
select {
case <-done:
p.processLog.Info("stopping processor")
return nil
case <-pollTicker.C:
if p.paused {
p.processLog.Warn("processor is paused...")
continue
}
unprocessedHeaders = newHeaders
} else {
p.processLog.Info("retrying previous batch")
}
if len(unprocessedHeaders) == 0 {
newHeaders, err := p.headerTraversal.NextFinalizedHeaders(defaultHeaderBufferSize)
if err != nil {
p.processLog.Error("error querying for headers", "err", err)
continue
} else if len(newHeaders) == 0 {
// Logged as an error since this loop should be operating at a longer interval than the provider
p.processLog.Error("no new headers. processor unexpectedly at head...")
continue
}
unprocessedHeaders = newHeaders
} else {
p.processLog.Info("retrying previous batch")
}
firstHeader := unprocessedHeaders[0]
lastHeader := unprocessedHeaders[len(unprocessedHeaders)-1]
batchLog := p.processLog.New("batch_start_block_number", firstHeader.Number, "batch_end_block_number", lastHeader.Number)
err := p.db.Transaction(func(db *database.DB) error {
batchLog.Info("processing batch")
return p.processFn(db, unprocessedHeaders)
})
if err != nil {
batchLog.Warn("error processing batch. no operations committed", "err", err)
} else {
batchLog.Info("fully committed batch")
unprocessedHeaders = nil
firstHeader := unprocessedHeaders[0]
lastHeader := unprocessedHeaders[len(unprocessedHeaders)-1]
batchLog := p.processLog.New("batch_start_block_number", firstHeader.Number, "batch_end_block_number", lastHeader.Number)
err := p.db.Transaction(func(db *database.DB) error {
batchLog.Info("processing batch")
return p.processFn(db, unprocessedHeaders)
})
// Eventually, we want to halt the processor on any error rather than rely
// on this loop for retry functionality.
if err != nil {
batchLog.Warn("error processing batch. no operations committed", "err", err)
} else {
batchLog.Info("fully committed batch")
unprocessedHeaders = nil
p.latestProcessedHeader = lastHeader
}
}
}
}
func (p processor) LatestProcessedHeader() *types.Header {
return p.latestProcessedHeader
}
// Useful ONLY for tests!
func (p *processor) PauseForTest() {
p.paused = true
}
func (p *processor) ResumeForTest() {
p.paused = false
}
......@@ -4,6 +4,7 @@ import (
"bytes"
"context"
"errors"
"fmt"
"math/big"
"github.com/ethereum-optimism/optimism/indexer/database"
......@@ -14,7 +15,7 @@ import (
)
var (
ethAddress = common.HexToAddress("0xDeadDeAddeAddEAddeadDEaDDEAdDeaDDeAD0000")
EthAddress = common.HexToAddress("0xDeadDeAddeAddEAddeadDEaDDEAdDeaDDeAD0000")
)
type StandardBridgeInitiatedEvent struct {
......@@ -131,7 +132,7 @@ func _standardBridgeInitiatedEvents[BridgeEvent bindings.L1StandardBridgeETHBrid
// represent eth bridge as an erc20
erc20BridgeData = &bindings.L1StandardBridgeERC20BridgeInitiated{
// Represent ETH using the hardcoded address
LocalToken: ethAddress, RemoteToken: ethAddress,
LocalToken: EthAddress, RemoteToken: EthAddress,
// Bridge data
From: ethBridgeData.From, To: ethBridgeData.To, Amount: ethBridgeData.Amount, ExtraData: ethBridgeData.ExtraData,
}
......@@ -170,8 +171,14 @@ func _standardBridgeFinalizedEvents[BridgeEvent bindings.L1StandardBridgeETHBrid
return nil, err
}
optimismPortalAbi, err := bindings.OptimismPortalMetaData.GetAbi()
if err != nil {
return nil, err
}
relayedMessageEventAbi := l1CrossDomainMessengerABI.Events["RelayedMessage"]
relayMessageMethodAbi := l1CrossDomainMessengerABI.Methods["relayMessage"]
finalizeWithdrawalTransactionMethodAbi := optimismPortalAbi.Methods["finalizeWithdrawalTransaction"]
var bridgeData BridgeEvent
var eventName string
......@@ -201,27 +208,52 @@ func _standardBridgeFinalizedEvents[BridgeEvent bindings.L1StandardBridgeETHBrid
return nil, errors.New("unexpected bridge event ordering")
}
// There's no way to extract the nonce on the relayed message event. we can extract
// the nonce by unpacking the transaction input for the `relayMessage` transaction
// There's no way to extract the nonce on the relayed message event. we can extract the nonce by
// by unpacking the transaction input for the `relayMessage` transaction. Since bedrock has OptimismPortal
// as on L1 as an intermediary for finalization, we have to check both scenarios
tx, isPending, err := rawEthClient.TransactionByHash(context.Background(), relayedMsgLog.TxHash)
if err != nil || isPending {
return nil, errors.New("unable to query relayMessage tx for bridge finalization event")
}
txData := tx.Data()
if !bytes.Equal(txData[:4], relayMessageMethodAbi.ID) {
return nil, errors.New("bridge finalization event does not match relayMessage tx invocation")
// If this is a finalization step with the optimism portal, the calldata for relayMessage invocation can be
// extracted from the withdrawal transaction.
// NOTE: the L2CrossDomainMessenger nonce may not match the L2ToL1MessagePasser nonce, hence the additional
// layer of decoding vs reading the nocne of the withdrawal transaction. Both nonces have a similar but
// different lifeycle that might not match (i.e L2ToL1MessagePasser can be invoced directly)
var relayMsgCallData []byte
switch {
case bytes.Equal(tx.Data()[:4], relayMessageMethodAbi.ID):
relayMsgCallData = tx.Data()[4:]
case bytes.Equal(tx.Data()[:4], finalizeWithdrawalTransactionMethodAbi.ID):
data, err := finalizeWithdrawalTransactionMethodAbi.Inputs.Unpack(tx.Data()[4:])
if err != nil {
return nil, err
}
finalizeWithdrawTransactionInput := new(struct {
Tx bindings.TypesWithdrawalTransaction
})
err = finalizeWithdrawalTransactionMethodAbi.Inputs.Copy(finalizeWithdrawTransactionInput, data)
if err != nil {
return nil, fmt.Errorf("unable extract withdrawal tx input from finalizeWithdrawalTransaction calldata: %w", err)
} else if !bytes.Equal(finalizeWithdrawTransactionInput.Tx.Data[:4], relayMessageMethodAbi.ID) {
return nil, errors.New("finalizeWithdrawalTransaction calldata does not match relayMessage invocation")
}
relayMsgCallData = finalizeWithdrawTransactionInput.Tx.Data[4:]
default:
return nil, errors.New("bridge finalization event does not correlate with a relayMessage tx invocation")
}
inputsMap := make(map[string]interface{})
err = relayMessageMethodAbi.Inputs.UnpackIntoMap(inputsMap, txData[4:])
err = relayMessageMethodAbi.Inputs.UnpackIntoMap(inputsMap, relayMsgCallData)
if err != nil {
return nil, err
}
nonce, ok := inputsMap["_nonce"].(*big.Int)
if !ok {
return nil, errors.New("unable to extract `_nonce` parameter from relayMessage transaction")
return nil, errors.New("unable to extract `_nonce` parameter from relayMessage calldata")
}
var erc20BridgeData *bindings.L1StandardBridgeERC20BridgeFinalized
......@@ -230,7 +262,7 @@ func _standardBridgeFinalizedEvents[BridgeEvent bindings.L1StandardBridgeETHBrid
ethBridgeData := any(bridgeData).(bindings.L1StandardBridgeETHBridgeFinalized)
erc20BridgeData = &bindings.L1StandardBridgeERC20BridgeFinalized{
// Represent ETH using the hardcoded address
LocalToken: ethAddress, RemoteToken: ethAddress,
LocalToken: EthAddress, RemoteToken: EthAddress,
// Bridge data
From: ethBridgeData.From, To: ethBridgeData.To, Amount: ethBridgeData.Amount, ExtraData: ethBridgeData.ExtraData,
}
......
......@@ -28,8 +28,11 @@
"L1BlockNumber",
"DisputeGameFactory",
"FaultDisputeGame",
"AlphabetVM",
"StandardBridge",
"CrossDomainMessenger",
"MIPS",
"PreimageOracle"
"PreimageOracle",
"EAS",
"SchemaRegistry"
]
This diff is collapsed.
// Code generated - DO NOT EDIT.
// This file is a generated binding and any manual changes will be lost.
package bindings
import (
"encoding/json"
"github.com/ethereum-optimism/optimism/op-bindings/solc"
)
const AlphabetVMStorageLayoutJSON = "{\"storage\":null,\"types\":{}}"
var AlphabetVMStorageLayout = new(solc.StorageLayout)
var AlphabetVMDeployedBin = "0x608060405234801561001057600080fd5b506004361061002b5760003560e01c8063f8e0cb9614610030575b600080fd5b61004361003e366004610157565b610055565b60405190815260200160405180910390f35b60008060007f0000000000000000000000000000000000000000000000000000000000000000878760405161008b9291906101c3565b6040518091039020036100af57600091506100a8868801886101d3565b90506100ce565b6100bb868801886101ec565b9092509050816100ca8161023d565b9250505b816100da826001610275565b6040805160208101939093528201526060016040516020818303038152906040528051906020012092505050949350505050565b60008083601f84011261012057600080fd5b50813567ffffffffffffffff81111561013857600080fd5b60208301915083602082850101111561015057600080fd5b9250929050565b6000806000806040858703121561016d57600080fd5b843567ffffffffffffffff8082111561018557600080fd5b6101918883890161010e565b909650945060208701359150808211156101aa57600080fd5b506101b78782880161010e565b95989497509550505050565b8183823760009101908152919050565b6000602082840312156101e557600080fd5b5035919050565b600080604083850312156101ff57600080fd5b50508035926020909101359150565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff820361026e5761026e61020e565b5060010190565b600082198211156102885761028861020e565b50019056fea164736f6c634300080f000a"
func init() {
if err := json.Unmarshal([]byte(AlphabetVMStorageLayoutJSON), AlphabetVMStorageLayout); err != nil {
panic(err)
}
layouts["AlphabetVM"] = AlphabetVMStorageLayout
deployedBytecodes["AlphabetVM"] = AlphabetVMDeployedBin
}
This source diff could not be displayed because it is too large. You can view the blob instead.
This diff is collapsed.
......@@ -9,7 +9,7 @@ import (
"github.com/ethereum-optimism/optimism/op-bindings/solc"
)
const ERC20StorageLayoutJSON = "{\"storage\":[{\"astId\":1000,\"contract\":\"node_modules/.pnpm/@openzeppelin+contracts@4.7.3/node_modules/@openzeppelin/contracts/token/ERC20/ERC20.sol:ERC20\",\"label\":\"_balances\",\"offset\":0,\"slot\":\"0\",\"type\":\"t_mapping(t_address,t_uint256)\"},{\"astId\":1001,\"contract\":\"node_modules/.pnpm/@openzeppelin+contracts@4.7.3/node_modules/@openzeppelin/contracts/token/ERC20/ERC20.sol:ERC20\",\"label\":\"_allowances\",\"offset\":0,\"slot\":\"1\",\"type\":\"t_mapping(t_address,t_mapping(t_address,t_uint256))\"},{\"astId\":1002,\"contract\":\"node_modules/.pnpm/@openzeppelin+contracts@4.7.3/node_modules/@openzeppelin/contracts/token/ERC20/ERC20.sol:ERC20\",\"label\":\"_totalSupply\",\"offset\":0,\"slot\":\"2\",\"type\":\"t_uint256\"},{\"astId\":1003,\"contract\":\"node_modules/.pnpm/@openzeppelin+contracts@4.7.3/node_modules/@openzeppelin/contracts/token/ERC20/ERC20.sol:ERC20\",\"label\":\"_name\",\"offset\":0,\"slot\":\"3\",\"type\":\"t_string_storage\"},{\"astId\":1004,\"contract\":\"node_modules/.pnpm/@openzeppelin+contracts@4.7.3/node_modules/@openzeppelin/contracts/token/ERC20/ERC20.sol:ERC20\",\"label\":\"_symbol\",\"offset\":0,\"slot\":\"4\",\"type\":\"t_string_storage\"}],\"types\":{\"t_address\":{\"encoding\":\"inplace\",\"label\":\"address\",\"numberOfBytes\":\"20\"},\"t_mapping(t_address,t_mapping(t_address,t_uint256))\":{\"encoding\":\"mapping\",\"label\":\"mapping(address =\u003e mapping(address =\u003e uint256))\",\"numberOfBytes\":\"32\",\"key\":\"t_address\",\"value\":\"t_mapping(t_address,t_uint256)\"},\"t_mapping(t_address,t_uint256)\":{\"encoding\":\"mapping\",\"label\":\"mapping(address =\u003e uint256)\",\"numberOfBytes\":\"32\",\"key\":\"t_address\",\"value\":\"t_uint256\"},\"t_string_storage\":{\"encoding\":\"bytes\",\"label\":\"string\",\"numberOfBytes\":\"32\"},\"t_uint256\":{\"encoding\":\"inplace\",\"label\":\"uint256\",\"numberOfBytes\":\"32\"}}}"
const ERC20StorageLayoutJSON = "{\"storage\":[{\"astId\":1000,\"contract\":\"node_modules/@openzeppelin/contracts/token/ERC20/ERC20.sol:ERC20\",\"label\":\"_balances\",\"offset\":0,\"slot\":\"0\",\"type\":\"t_mapping(t_address,t_uint256)\"},{\"astId\":1001,\"contract\":\"node_modules/@openzeppelin/contracts/token/ERC20/ERC20.sol:ERC20\",\"label\":\"_allowances\",\"offset\":0,\"slot\":\"1\",\"type\":\"t_mapping(t_address,t_mapping(t_address,t_uint256))\"},{\"astId\":1002,\"contract\":\"node_modules/@openzeppelin/contracts/token/ERC20/ERC20.sol:ERC20\",\"label\":\"_totalSupply\",\"offset\":0,\"slot\":\"2\",\"type\":\"t_uint256\"},{\"astId\":1003,\"contract\":\"node_modules/@openzeppelin/contracts/token/ERC20/ERC20.sol:ERC20\",\"label\":\"_name\",\"offset\":0,\"slot\":\"3\",\"type\":\"t_string_storage\"},{\"astId\":1004,\"contract\":\"node_modules/@openzeppelin/contracts/token/ERC20/ERC20.sol:ERC20\",\"label\":\"_symbol\",\"offset\":0,\"slot\":\"4\",\"type\":\"t_string_storage\"}],\"types\":{\"t_address\":{\"encoding\":\"inplace\",\"label\":\"address\",\"numberOfBytes\":\"20\"},\"t_mapping(t_address,t_mapping(t_address,t_uint256))\":{\"encoding\":\"mapping\",\"label\":\"mapping(address =\u003e mapping(address =\u003e uint256))\",\"numberOfBytes\":\"32\",\"key\":\"t_address\",\"value\":\"t_mapping(t_address,t_uint256)\"},\"t_mapping(t_address,t_uint256)\":{\"encoding\":\"mapping\",\"label\":\"mapping(address =\u003e uint256)\",\"numberOfBytes\":\"32\",\"key\":\"t_address\",\"value\":\"t_uint256\"},\"t_string_storage\":{\"encoding\":\"bytes\",\"label\":\"string\",\"numberOfBytes\":\"32\"},\"t_uint256\":{\"encoding\":\"inplace\",\"label\":\"uint256\",\"numberOfBytes\":\"32\"}}}"
var ERC20StorageLayout = new(solc.StorageLayout)
......
......@@ -31,7 +31,7 @@ var (
// PreimageOracleMetaData contains all meta data concerning the PreimageOracle contract.
var PreimageOracleMetaData = &bind.MetaData{
ABI: "[{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"partOffset\",\"type\":\"uint256\"},{\"internalType\":\"bytes32\",\"name\":\"key\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"part\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"size\",\"type\":\"uint256\"}],\"name\":\"cheat\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"_preimage\",\"type\":\"bytes\"}],\"name\":\"computePreimageKey\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"key_\",\"type\":\"bytes32\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_partOffset\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"_preimage\",\"type\":\"bytes\"}],\"name\":\"loadKeccak256PreimagePart\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"name\":\"preimageLengths\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"preimagePartOk\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"preimageParts\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"_key\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"_offset\",\"type\":\"uint256\"}],\"name\":\"readPreimage\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"dat_\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"datLen_\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]",
Bin: "0x608060405234801561001057600080fd5b506105e2806100206000396000f3fe608060405234801561001057600080fd5b506004361061007d5760003560e01c8063e03110e11161005b578063e03110e114610111578063e159261114610139578063fe4ac08e1461014e578063fef2b4ed146101c357600080fd5b806361238bde146100825780638542cf50146100c0578063a57c202c146100fe575b600080fd5b6100ad610090366004610433565b600160209081526000928352604080842090915290825290205481565b6040519081526020015b60405180910390f35b6100ee6100ce366004610433565b600260209081526000928352604080842090915290825290205460ff1681565b60405190151581526020016100b7565b6100ad61010c36600461049e565b6101e3565b61012461011f366004610433565b610242565b604080519283526020830191909152016100b7565b61014c6101473660046104e0565b610333565b005b61014c61015c36600461052c565b6000838152600260209081526040808320878452825280832080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660019081179091558684528252808320968352958152858220939093559283529082905291902055565b6100ad6101d136600461055e565b60006020819052908152604090205481565b60243560c081901b608052600090608881858237207effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff167f0200000000000000000000000000000000000000000000000000000000000000179392505050565b6000828152600260209081526040808320848452909152812054819060ff166102cb576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601460248201527f7072652d696d616765206d757374206578697374000000000000000000000000604482015260640160405180910390fd5b50600083815260208181526040909120546102e78160086105a6565b6102f28560206105a6565b1061031057836103038260086105a6565b61030d91906105be565b91505b506000938452600160209081526040808620948652939052919092205492909150565b6044356000806008830186111561034957600080fd5b60c083901b6080526088838682378087017ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff80151908490207effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff167f02000000000000000000000000000000000000000000000000000000000000001760008181526002602090815260408083208b8452825280832080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016600190811790915584845282528083209a83529981528982209390935590815290819052959095209190915550505050565b6000806040838503121561044657600080fd5b50508035926020909101359150565b60008083601f84011261046757600080fd5b50813567ffffffffffffffff81111561047f57600080fd5b60208301915083602082850101111561049757600080fd5b9250929050565b600080602083850312156104b157600080fd5b823567ffffffffffffffff8111156104c857600080fd5b6104d485828601610455565b90969095509350505050565b6000806000604084860312156104f557600080fd5b83359250602084013567ffffffffffffffff81111561051357600080fd5b61051f86828701610455565b9497909650939450505050565b6000806000806080858703121561054257600080fd5b5050823594602084013594506040840135936060013592509050565b60006020828403121561057057600080fd5b5035919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b600082198211156105b9576105b9610577565b500190565b6000828210156105d0576105d0610577565b50039056fea164736f6c634300080f000a",
Bin: "0x608060405234801561001057600080fd5b506105df806100206000396000f3fe608060405234801561001057600080fd5b506004361061007d5760003560e01c8063e03110e11161005b578063e03110e114610111578063e159261114610139578063fe4ac08e1461014e578063fef2b4ed146101c357600080fd5b806361238bde146100825780638542cf50146100c0578063a57c202c146100fe575b600080fd5b6100ad610090366004610433565b600160209081526000928352604080842090915290825290205481565b6040519081526020015b60405180910390f35b6100ee6100ce366004610433565b600260209081526000928352604080842090915290825290205460ff1681565b60405190151581526020016100b7565b6100ad61010c36600461049e565b6101e3565b61012461011f366004610433565b610242565b604080519283526020830191909152016100b7565b61014c6101473660046104e0565b610333565b005b61014c61015c36600461052c565b6000838152600260209081526040808320878452825280832080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660019081179091558684528252808320968352958152858220939093559283529082905291902055565b6100ad6101d136600461055e565b60006020819052908152604090205481565b60243560c081901b608052600090608881858237207effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff167f0200000000000000000000000000000000000000000000000000000000000000179392505050565b6000828152600260209081526040808320848452909152812054819060ff166102cb576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601460248201527f7072652d696d616765206d757374206578697374000000000000000000000000604482015260640160405180910390fd5b50600083815260208181526040909120546102e78160086105a6565b6102f28560206105a6565b1061031057836103038260086105a6565b61030d91906105bf565b91505b506000938452600160209081526040808620948652939052919092205492909150565b6044356000806008830186111561034957600080fd5b60c083901b6080526088838682378087017ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff80151908490207effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff167f02000000000000000000000000000000000000000000000000000000000000001760008181526002602090815260408083208b8452825280832080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016600190811790915584845282528083209a83529981528982209390935590815290819052959095209190915550505050565b6000806040838503121561044657600080fd5b50508035926020909101359150565b60008083601f84011261046757600080fd5b50813567ffffffffffffffff81111561047f57600080fd5b60208301915083602082850101111561049757600080fd5b9250929050565b600080602083850312156104b157600080fd5b823567ffffffffffffffff8111156104c857600080fd5b6104d485828601610455565b90969095509350505050565b6000806000604084860312156104f557600080fd5b83359250602084013567ffffffffffffffff81111561051357600080fd5b61051f86828701610455565b9497909650939450505050565b6000806000806080858703121561054257600080fd5b5050823594602084013594506040840135936060013592509050565b60006020828403121561057057600080fd5b5035919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b808201808211156105b9576105b9610577565b92915050565b818103818111156105b9576105b961057756fea164736f6c6343000813000a",
}
// PreimageOracleABI is the input ABI used to generate the binding from.
......
This diff is collapsed.
This diff is collapsed.
// Code generated - DO NOT EDIT.
// This file is a generated binding and any manual changes will be lost.
package bindings
import (
"encoding/json"
"github.com/ethereum-optimism/optimism/op-bindings/solc"
)
const SchemaRegistryStorageLayoutJSON = "{\"storage\":[{\"astId\":1000,\"contract\":\"contracts/EAS/SchemaRegistry.sol:SchemaRegistry\",\"label\":\"_registry\",\"offset\":0,\"slot\":\"0\",\"type\":\"t_mapping(t_bytes32,t_struct(SchemaRecord)1003_storage)\"},{\"astId\":1001,\"contract\":\"contracts/EAS/SchemaRegistry.sol:SchemaRegistry\",\"label\":\"__gap\",\"offset\":0,\"slot\":\"1\",\"type\":\"t_array(t_uint256)49_storage\"}],\"types\":{\"t_array(t_uint256)49_storage\":{\"encoding\":\"inplace\",\"label\":\"uint256[49]\",\"numberOfBytes\":\"1568\",\"base\":\"t_uint256\"},\"t_bool\":{\"encoding\":\"inplace\",\"label\":\"bool\",\"numberOfBytes\":\"1\"},\"t_bytes32\":{\"encoding\":\"inplace\",\"label\":\"bytes32\",\"numberOfBytes\":\"32\"},\"t_contract(ISchemaResolver)1002\":{\"encoding\":\"inplace\",\"label\":\"contract ISchemaResolver\",\"numberOfBytes\":\"20\"},\"t_mapping(t_bytes32,t_struct(SchemaRecord)1003_storage)\":{\"encoding\":\"mapping\",\"label\":\"mapping(bytes32 =\u003e struct SchemaRecord)\",\"numberOfBytes\":\"32\",\"key\":\"t_bytes32\",\"value\":\"t_struct(SchemaRecord)1003_storage\"},\"t_string_storage\":{\"encoding\":\"bytes\",\"label\":\"string\",\"numberOfBytes\":\"32\"},\"t_struct(SchemaRecord)1003_storage\":{\"encoding\":\"inplace\",\"label\":\"struct SchemaRecord\",\"numberOfBytes\":\"96\"},\"t_uint256\":{\"encoding\":\"inplace\",\"label\":\"uint256\",\"numberOfBytes\":\"32\"}}}"
var SchemaRegistryStorageLayout = new(solc.StorageLayout)
var SchemaRegistryDeployedBin = "0x608060405234801561001057600080fd5b50600436106100415760003560e01c806354fd4d501461004657806360d7a27814610064578063a2ea7c6e14610085575b600080fd5b61004e6100a5565b60405161005b9190610604565b60405180910390f35b61007761007236600461061e565b610148565b60405190815260200161005b565b6100986100933660046106d0565b6102f1565b60405161005b91906106e9565b60606100d07f0000000000000000000000000000000000000000000000000000000000000000610419565b6100f97f0000000000000000000000000000000000000000000000000000000000000000610419565b6101227f0000000000000000000000000000000000000000000000000000000000000000610419565b6040516020016101349392919061073a565b604051602081830303815290604052905090565b60008060405180608001604052806000801b81526020018573ffffffffffffffffffffffffffffffffffffffff168152602001841515815260200187878080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920182905250939094525092935091506101ca905082610556565b60008181526020819052604090205490915015610213576040517f23369fa600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b80825260008181526020818152604091829020845181559084015160018201805493860151151574010000000000000000000000000000000000000000027fffffffffffffffffffffff00000000000000000000000000000000000000000090941673ffffffffffffffffffffffffffffffffffffffff9092169190911792909217909155606083015183919060028201906102af9082610881565b50506040513381528291507f7d917fcbc9a29a9705ff9936ffa599500e4fd902e4486bae317414fe967b307c9060200160405180910390a29695505050505050565b604080516080810182526000808252602082018190529181019190915260608082015260008281526020818152604091829020825160808101845281548152600182015473ffffffffffffffffffffffffffffffffffffffff8116938201939093527401000000000000000000000000000000000000000090920460ff16151592820192909252600282018054919291606084019190610390906107df565b80601f01602080910402602001604051908101604052809291908181526020018280546103bc906107df565b80156104095780601f106103de57610100808354040283529160200191610409565b820191906000526020600020905b8154815290600101906020018083116103ec57829003601f168201915b5050505050815250509050919050565b60608160000361045c57505060408051808201909152600181527f3000000000000000000000000000000000000000000000000000000000000000602082015290565b8160005b81156104865780610470816109ca565b915061047f9050600a83610a31565b9150610460565b60008167ffffffffffffffff8111156104a1576104a16107b0565b6040519080825280601f01601f1916602001820160405280156104cb576020820181803683370190505b5090505b841561054e576104e0600183610a45565b91506104ed600a86610a5e565b6104f8906030610a72565b60f81b81838151811061050d5761050d610a85565b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a905350610547600a86610a31565b94506104cf565b949350505050565b600081606001518260200151836040015160405160200161057993929190610ab4565b604051602081830303815290604052805190602001209050919050565b60005b838110156105b1578181015183820152602001610599565b50506000910152565b600081518084526105d2816020860160208601610596565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b60208152600061061760208301846105ba565b9392505050565b6000806000806060858703121561063457600080fd5b843567ffffffffffffffff8082111561064c57600080fd5b818701915087601f83011261066057600080fd5b81358181111561066f57600080fd5b88602082850101111561068157600080fd5b6020928301965094505085013573ffffffffffffffffffffffffffffffffffffffff811681146106b057600080fd5b9150604085013580151581146106c557600080fd5b939692955090935050565b6000602082840312156106e257600080fd5b5035919050565b602081528151602082015273ffffffffffffffffffffffffffffffffffffffff60208301511660408201526040820151151560608201526000606083015160808084015261054e60a08401826105ba565b6000845161074c818460208901610596565b80830190507f2e000000000000000000000000000000000000000000000000000000000000008082528551610788816001850160208a01610596565b600192019182015283516107a3816002840160208801610596565b0160020195945050505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b600181811c908216806107f357607f821691505b60208210810361082c577f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b50919050565b601f82111561087c57600081815260208120601f850160051c810160208610156108595750805b601f850160051c820191505b8181101561087857828155600101610865565b5050505b505050565b815167ffffffffffffffff81111561089b5761089b6107b0565b6108af816108a984546107df565b84610832565b602080601f83116001811461090257600084156108cc5750858301515b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600386901b1c1916600185901b178555610878565b6000858152602081207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08616915b8281101561094f57888601518255948401946001909101908401610930565b508582101561098b57878501517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600388901b60f8161c191681555b5050505050600190811b01905550565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff82036109fb576109fb61099b565b5060010190565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b600082610a4057610a40610a02565b500490565b81810381811115610a5857610a5861099b565b92915050565b600082610a6d57610a6d610a02565b500690565b80820180821115610a5857610a5861099b565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b60008451610ac6818460208901610596565b60609490941b7fffffffffffffffffffffffffffffffffffffffff000000000000000000000000169190930190815290151560f81b60148201526015019291505056fea164736f6c6343000813000a"
func init() {
if err := json.Unmarshal([]byte(SchemaRegistryStorageLayoutJSON), SchemaRegistryStorageLayout); err != nil {
panic(err)
}
layouts["SchemaRegistry"] = SchemaRegistryStorageLayout
deployedBytecodes["SchemaRegistry"] = SchemaRegistryDeployedBin
}
......@@ -9,6 +9,7 @@ import (
"os/exec"
"path"
"path/filepath"
"regexp"
"strings"
"text/template"
......@@ -84,17 +85,22 @@ func main() {
// and hold a mapping from the contract name to the contract path.
// Walk walks the directory deterministically, so the later instance
// of the contract with the same name will be used
re := regexp.MustCompile(`\.\d+\.\d+\.\d+`)
artifactPaths := make(map[string]string)
if err := filepath.Walk(f.ForgeArtifacts,
func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
base := filepath.Base(path)
if strings.HasSuffix(base, ".json") {
name := base[:len(base)-5]
if _, ok := artifactPaths[name]; !ok {
artifactPaths[name] = path
if strings.HasSuffix(path, ".json") {
base := filepath.Base(path)
name := strings.TrimSuffix(base, ".json")
// remove the compiler version from the name
sanitized := re.ReplaceAllString(name, "")
if _, ok := artifactPaths[sanitized]; !ok {
artifactPaths[sanitized] = path
}
}
return nil
......@@ -108,6 +114,7 @@ func main() {
artifactPath := path.Join(f.ForgeArtifacts, name+".sol", name+".json")
forgeArtifactData, err := os.ReadFile(artifactPath)
if errors.Is(err, os.ErrNotExist) {
log.Printf("cannot find forge-artifact for %s at standard path %s, trying %s\n", name, artifactPath, artifactPaths[name])
artifactPath = artifactPaths[name]
forgeArtifactData, err = os.ReadFile(artifactPath)
if errors.Is(err, os.ErrNotExist) {
......
......@@ -23,6 +23,8 @@ const (
ProxyAdmin = "0x4200000000000000000000000000000000000018"
BaseFeeVault = "0x4200000000000000000000000000000000000019"
L1FeeVault = "0x420000000000000000000000000000000000001a"
SchemaRegistry = "0x4200000000000000000000000000000000000020"
EAS = "0x4200000000000000000000000000000000000021"
)
var (
......@@ -43,6 +45,8 @@ var (
ProxyAdminAddr = common.HexToAddress(ProxyAdmin)
BaseFeeVaultAddr = common.HexToAddress(BaseFeeVault)
L1FeeVaultAddr = common.HexToAddress(L1FeeVault)
SchemaRegistryAddr = common.HexToAddress(SchemaRegistry)
EASAddr = common.HexToAddress(EAS)
Predeploys = make(map[string]*common.Address)
)
......@@ -76,4 +80,6 @@ func init() {
Predeploys["ProxyAdmin"] = &ProxyAdminAddr
Predeploys["BaseFeeVault"] = &BaseFeeVaultAddr
Predeploys["L1FeeVault"] = &L1FeeVaultAddr
Predeploys["SchemaRegistry"] = &SchemaRegistryAddr
Predeploys["EAS"] = &EASAddr
}
......@@ -44,7 +44,7 @@ func Main(cliCtx *cli.Context) error {
m := metrics.NewMetrics("default")
ctx := context.Background()
config, err := opnode.NewRollupConfig(cliCtx)
config, err := opnode.NewRollupConfig(logger, cliCtx)
if err != nil {
return err
}
......
......@@ -225,6 +225,16 @@ func checkPredeployConfig(client *ethclient.Client, name string) error {
if err := checkL2ToL1MessagePasser(p, client); err != nil {
return err
}
case predeploys.SchemaRegistryAddr:
if err := checkSchemaRegistry(p, client); err != nil {
return err
}
case predeploys.EASAddr:
if err := checkEAS(p, client); err != nil {
return err
}
}
return nil
})
......@@ -712,6 +722,43 @@ func checkDeployerWhitelist(addr common.Address, client *ethclient.Client) error
return nil
}
func checkSchemaRegistry(addr common.Address, client *ethclient.Client) error {
contract, err := bindings.NewSchemaRegistry(addr, client)
if err != nil {
return err
}
version, err := contract.Version(&bind.CallOpts{})
if err != nil {
return err
}
log.Info("SchemaRegistry version", "version", version)
return nil
}
func checkEAS(addr common.Address, client *ethclient.Client) error {
contract, err := bindings.NewEAS(addr, client)
if err != nil {
return err
}
registry, err := contract.GetSchemaRegistry(&bind.CallOpts{})
if err != nil {
return err
}
if registry != predeploys.SchemaRegistryAddr {
return fmt.Errorf("Incorrect registry address %s", registry)
}
log.Info("EAS", "registry", registry)
version, err := contract.Version(&bind.CallOpts{})
if err != nil {
return err
}
log.Info("EAS version", "version", version)
return nil
}
func getEIP1967AdminAddress(client *ethclient.Client, addr common.Address) (common.Address, error) {
slot, err := client.StorageAt(context.Background(), addr, util.EIP1967AdminSlot, nil)
if err != nil {
......
......@@ -61,6 +61,7 @@ func BuildL2Genesis(config *DeployConfig, l1StartBlock *types.Block) (*core.Gene
}
db.CreateAccount(codeAddr)
db.SetState(addr, ImplementationSlot, codeAddr.Hash())
log.Info("Set proxy", "name", name, "address", addr, "implementation", codeAddr)
} else {
db.DeleteState(addr, AdminSlot)
}
......
......@@ -81,7 +81,7 @@ func TestBuildL2DeveloperGenesis(t *testing.T) {
err = config.InitDeveloperDeployedAddresses()
require.NoError(t, err)
gen := testBuildL2Genesis(t, config)
require.Equal(t, 2342, len(gen.Alloc))
require.Equal(t, 2344, len(gen.Alloc))
}
func TestBuildL2MainnetGenesis(t *testing.T) {
......@@ -90,7 +90,7 @@ func TestBuildL2MainnetGenesis(t *testing.T) {
config.EnableGovernance = true
config.FundDevAccounts = false
gen := testBuildL2Genesis(t, config)
require.Equal(t, 2064, len(gen.Alloc))
require.Equal(t, 2066, len(gen.Alloc))
}
func TestBuildL2MainnetNoGovernanceGenesis(t *testing.T) {
......@@ -99,5 +99,5 @@ func TestBuildL2MainnetNoGovernanceGenesis(t *testing.T) {
config.EnableGovernance = false
config.FundDevAccounts = false
gen := testBuildL2Genesis(t, config)
require.Equal(t, 2064, len(gen.Alloc))
require.Equal(t, 2066, len(gen.Alloc))
}
......@@ -144,6 +144,12 @@ func BuildOptimism(immutable ImmutableConfig) (DeploymentResults, error) {
{
Name: "LegacyERC20ETH",
},
{
Name: "EAS",
},
{
Name: "SchemaRegistry",
},
}
return BuildL2(deployments)
}
......@@ -239,6 +245,10 @@ func l2Deployer(backend *backends.SimulatedBackend, opts *bind.TransactOpts, dep
_, tx, _, err = bindings.DeployOptimismMintableERC721Factory(opts, backend, bridge, remoteChainId)
case "LegacyERC20ETH":
_, tx, _, err = bindings.DeployLegacyERC20ETH(opts, backend)
case "EAS":
_, tx, _, err = bindings.DeployEAS(opts, backend)
case "SchemaRegistry":
_, tx, _, err = bindings.DeploySchemaRegistry(opts, backend)
default:
return tx, fmt.Errorf("unknown contract: %s", deployment.Name)
}
......
......@@ -63,6 +63,8 @@ func TestBuildOptimism(t *testing.T) {
"L2ERC721Bridge": true,
"OptimismMintableERC721Factory": true,
"LegacyERC20ETH": true,
"EAS": true,
"SchemaRegistry": true,
}
// Only the exact contracts that we care about are being
......
package op_challenger
import (
"context"
"fmt"
"time"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-challenger/config"
......@@ -14,7 +14,7 @@ import (
)
// Main is the programmatic entry-point for running op-challenger
func Main(logger log.Logger, cfg *config.Config) error {
func Main(ctx context.Context, logger log.Logger, cfg *config.Config) error {
client, err := ethclient.Dial(cfg.L1EthRpc)
if err != nil {
return fmt.Errorf("failed to dial L1: %w", err)
......@@ -30,26 +30,20 @@ func Main(logger log.Logger, cfg *config.Config) error {
return fmt.Errorf("failed to bind the fault dispute game contract: %w", err)
}
loader := fault.NewLoader(logger, contract)
responder, err := fault.NewFaultResponder(logger, txMgr, cfg.GameAddress)
gameLogger := logger.New("game", cfg.GameAddress)
loader := fault.NewLoader(contract)
responder, err := fault.NewFaultResponder(gameLogger, txMgr, cfg.GameAddress)
if err != nil {
return fmt.Errorf("failed to create the responder: %w", err)
}
trace := fault.NewAlphabetProvider(cfg.AlphabetTrace, uint64(cfg.GameDepth))
agent := fault.NewAgent(loader, cfg.GameDepth, trace, responder, cfg.AgreeWithProposedOutput, logger)
agent := fault.NewAgent(loader, cfg.GameDepth, trace, responder, cfg.AgreeWithProposedOutput, gameLogger)
caller, err := fault.NewFaultCallerFromBindings(cfg.GameAddress, client, logger)
caller, err := fault.NewFaultCallerFromBindings(cfg.GameAddress, client, gameLogger)
if err != nil {
return fmt.Errorf("failed to bind the fault contract: %w", err)
}
logger.Info("Fault game started")
for {
logger.Info("Performing action")
_ = agent.Act()
caller.LogGameInfo()
time.Sleep(300 * time.Millisecond)
}
return fault.MonitorGame(ctx, gameLogger, cfg.AgreeWithProposedOutput, agent, caller)
}
package main
import (
"context"
"fmt"
"os"
......@@ -41,7 +42,7 @@ func main() {
}
}
type ConfigAction func(log log.Logger, config *config.Config) error
type ConfigAction func(ctx context.Context, log log.Logger, config *config.Config) error
func run(args []string, action ConfigAction) error {
oplog.SetupDefaults()
......@@ -63,7 +64,7 @@ func run(args []string, action ConfigAction) error {
if err != nil {
return err
}
return action(logger, cfg)
return action(ctx.Context, logger, cfg)
}
return app.Run(args)
}
......
package main
import (
"context"
"fmt"
"testing"
......@@ -137,7 +138,7 @@ func runWithArgs(cliArgs []string) (log.Logger, config.Config, error) {
cfg := new(config.Config)
var logger log.Logger
fullArgs := append([]string{"op-challenger"}, cliArgs...)
err := run(fullArgs, func(log log.Logger, config *config.Config) error {
err := run(fullArgs, func(ctx context.Context, log log.Logger, config *config.Config) error {
logger = log
cfg = config
return nil
......
......@@ -17,8 +17,8 @@ type Agent struct {
log log.Logger
}
func NewAgent(loader Loader, maxDepth int, trace TraceProvider, responder Responder, agreeWithProposedOutput bool, log log.Logger) Agent {
return Agent{
func NewAgent(loader Loader, maxDepth int, trace TraceProvider, responder Responder, agreeWithProposedOutput bool, log log.Logger) *Agent {
return &Agent{
solver: NewSolver(maxDepth, trace),
loader: loader,
responder: responder,
......@@ -29,27 +29,44 @@ func NewAgent(loader Loader, maxDepth int, trace TraceProvider, responder Respon
}
// Act iterates the game & performs all of the next actions.
func (a *Agent) Act() error {
game, err := a.newGameFromContracts(context.Background())
func (a *Agent) Act(ctx context.Context) error {
if a.tryResolve(ctx) {
return nil
}
game, err := a.newGameFromContracts(ctx)
if err != nil {
a.log.Error("Failed to create new game", "err", err)
return err
return fmt.Errorf("create game from contracts: %w", err)
}
// Create counter claims
for _, claim := range game.Claims() {
if err := a.move(claim, game); err != nil {
if err := a.move(ctx, claim, game); err != nil && !errors.Is(err, ErrGameDepthReached) {
log.Error("Failed to move", "err", err)
}
}
// Step on all leaf claims
for _, claim := range game.Claims() {
if err := a.step(claim, game); err != nil {
if err := a.step(ctx, claim, game); err != nil {
log.Error("Failed to step", "err", err)
}
}
return nil
}
// tryResolve resolves the game if it is in a terminal state
// and returns true if the game resolves successfully.
func (a *Agent) tryResolve(ctx context.Context) bool {
if a.responder.CanResolve(ctx) {
a.log.Info("Resolving game")
err := a.responder.Resolve(ctx)
if err != nil {
a.log.Error("Failed to resolve the game", "err", err)
return false
}
return true
}
return false
}
// newGameFromContracts initializes a new game state from the state in the contract
func (a *Agent) newGameFromContracts(ctx context.Context) (Game, error) {
claims, err := a.loader.FetchClaims(ctx)
......@@ -67,11 +84,10 @@ func (a *Agent) newGameFromContracts(ctx context.Context) (Game, error) {
}
// move determines & executes the next move given a claim
func (a *Agent) move(claim Claim, game Game) error {
func (a *Agent) move(ctx context.Context, claim Claim, game Game) error {
nextMove, err := a.solver.NextMove(claim, game.AgreeWithClaimLevel(claim))
if err != nil {
a.log.Warn("Failed to execute the next move", "err", err)
return err
return fmt.Errorf("execute next move: %w", err)
}
if nextMove == nil {
a.log.Debug("No next move")
......@@ -82,35 +98,34 @@ func (a *Agent) move(claim Claim, game Game) error {
"value", move.Value, "trace_index", move.TraceIndex(a.maxDepth),
"parent_value", claim.Value, "parent_trace_index", claim.TraceIndex(a.maxDepth))
if game.IsDuplicate(move) {
log.Debug("Duplicate move")
log.Debug("Skipping duplicate move")
return nil
}
log.Info("Performing move")
return a.responder.Respond(context.TODO(), move)
return a.responder.Respond(ctx, move)
}
// step determines & executes the next step against a leaf claim through the responder
func (a *Agent) step(claim Claim, game Game) error {
func (a *Agent) step(ctx context.Context, claim Claim, game Game) error {
if claim.Depth() != a.maxDepth {
return nil
}
agreeWithClaimLevel := game.AgreeWithClaimLevel(claim)
if agreeWithClaimLevel {
a.log.Warn("Agree with leaf claim, skipping step", "claim_depth", claim.Depth(), "maxDepth", a.maxDepth)
a.log.Debug("Agree with leaf claim, skipping step", "claim_depth", claim.Depth(), "maxDepth", a.maxDepth)
return nil
}
if claim.Countered {
a.log.Info("Claim already stepped on", "claim_depth", claim.Depth(), "maxDepth", a.maxDepth)
a.log.Debug("Step already executed against claim", "depth", claim.Depth(), "index_at_depth", claim.IndexAtDepth(), "value", claim.Value)
return nil
}
a.log.Info("Attempting step", "claim_depth", claim.Depth(), "maxDepth", a.maxDepth)
step, err := a.solver.AttemptStep(claim, agreeWithClaimLevel)
if err != nil {
a.log.Warn("Failed to get a step", "err", err)
return err
return fmt.Errorf("attempt step: %w", err)
}
a.log.Info("Performing step", "is_attack", step.IsAttack,
......@@ -119,6 +134,7 @@ func (a *Agent) step(claim Claim, game Game) error {
ClaimIndex: uint64(step.LeafClaim.ContractIndex),
IsAttack: step.IsAttack,
StateData: step.PreState,
Proof: step.ProofData,
}
return a.responder.Step(context.TODO(), callData)
return a.responder.Step(ctx, callData)
}
......@@ -26,21 +26,21 @@ func NewAlphabetProvider(state string, depth uint64) *AlphabetProvider {
}
// GetPreimage returns the preimage for the given hash.
func (ap *AlphabetProvider) GetPreimage(i uint64) ([]byte, error) {
func (ap *AlphabetProvider) GetPreimage(i uint64) ([]byte, []byte, error) {
// The index cannot be larger than the maximum index as computed by the depth.
if i >= ap.maxLen {
return []byte{}, ErrIndexTooLarge
return nil, nil, ErrIndexTooLarge
}
// We extend the deepest hash to the maximum depth if the trace is not expansive.
if i >= uint64(len(ap.state)) {
return ap.GetPreimage(uint64(len(ap.state)) - 1)
}
return BuildAlphabetPreimage(i, ap.state[i]), nil
return BuildAlphabetPreimage(i, ap.state[i]), []byte{}, nil
}
// Get returns the claim value at the given index in the trace.
func (ap *AlphabetProvider) Get(i uint64) (common.Hash, error) {
claimBytes, err := ap.GetPreimage(i)
claimBytes, _, err := ap.GetPreimage(i)
if err != nil {
return common.Hash{}, err
}
......@@ -48,9 +48,7 @@ func (ap *AlphabetProvider) Get(i uint64) (common.Hash, error) {
}
func (ap *AlphabetProvider) AbsolutePreState() []byte {
out := make([]byte, 32)
out[31] = 96 // ascii character 96 is "`"
return out
return common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000060")
}
// BuildAlphabetPreimage constructs the claim bytes for the index and state item.
......
......@@ -55,16 +55,17 @@ func FuzzIndexToBytes(f *testing.F) {
func TestGetPreimage_Succeeds(t *testing.T) {
ap := NewAlphabetProvider("abc", 2)
expected := BuildAlphabetPreimage(0, "a'")
retrieved, err := ap.GetPreimage(uint64(0))
retrieved, proof, err := ap.GetPreimage(uint64(0))
require.NoError(t, err)
require.Equal(t, expected, retrieved)
require.Empty(t, proof)
}
// TestGetPreimage_TooLargeIndex_Fails tests the GetPreimage
// function errors if the index is too large.
func TestGetPreimage_TooLargeIndex_Fails(t *testing.T) {
ap := NewAlphabetProvider("abc", 2)
_, err := ap.GetPreimage(4)
_, _, err := ap.GetPreimage(4)
require.ErrorIs(t, err, ErrIndexTooLarge)
}
......
......@@ -19,15 +19,13 @@ type FaultDisputeGameCaller interface {
type FaultCaller struct {
FaultDisputeGameCaller
log log.Logger
fdgAddr common.Address
log log.Logger
}
func NewFaultCaller(fdgAddr common.Address, caller FaultDisputeGameCaller, log log.Logger) *FaultCaller {
func NewFaultCaller(caller FaultDisputeGameCaller, log log.Logger) *FaultCaller {
return &FaultCaller{
caller,
log,
fdgAddr,
}
}
......@@ -39,40 +37,31 @@ func NewFaultCallerFromBindings(fdgAddr common.Address, client *ethclient.Client
return &FaultCaller{
caller,
log,
fdgAddr,
}, nil
}
// LogGameInfo logs the game info.
func (fc *FaultCaller) LogGameInfo() {
status, err := fc.GetGameStatus(context.Background())
func (fc *FaultCaller) LogGameInfo(ctx context.Context) {
status, err := fc.GetGameStatus(ctx)
if err != nil {
fc.log.Error("failed to get game status", "err", err)
return
}
claimLen, err := fc.GetClaimDataLength(context.Background())
claimLen, err := fc.GetClaimDataLength(ctx)
if err != nil {
fc.log.Error("failed to get claim count", "err", err)
return
}
fc.log.Info("Game info", "addr", fc.fdgAddr, "claims", claimLen, "status", GameStatusString(status))
fc.log.Info("Game info", "claims", claimLen, "status", GameStatusString(status))
}
// GetGameStatus returns the current game status.
// 0: In Progress
// 1: Challenger Won
// 2: Defender Won
func (fc *FaultCaller) GetGameStatus(ctx context.Context) (uint8, error) {
return fc.Status(&bind.CallOpts{Context: ctx})
}
func (fc *FaultCaller) LogGameStatus() {
status, err := fc.GetGameStatus(context.Background())
if err != nil {
fc.log.Error("failed to get game status", "err", err)
return
}
fc.log.Info("Game status", "status", GameStatusString(status))
func (fc *FaultCaller) GetGameStatus(ctx context.Context) (GameStatus, error) {
status, err := fc.Status(&bind.CallOpts{Context: ctx})
return GameStatus(status), err
}
// GetClaimDataLength returns the number of claims in the game.
......@@ -80,8 +69,8 @@ func (fc *FaultCaller) GetClaimDataLength(ctx context.Context) (*big.Int, error)
return fc.ClaimDataLen(&bind.CallOpts{Context: ctx})
}
func (fc *FaultCaller) LogClaimDataLength() {
claimLen, err := fc.GetClaimDataLength(context.Background())
func (fc *FaultCaller) LogClaimDataLength(ctx context.Context) {
claimLen, err := fc.GetClaimDataLength(ctx)
if err != nil {
fc.log.Error("failed to get claim count", "err", err)
return
......@@ -90,13 +79,13 @@ func (fc *FaultCaller) LogClaimDataLength() {
}
// GameStatusString returns the current game status as a string.
func GameStatusString(status uint8) string {
func GameStatusString(status GameStatus) string {
switch status {
case 0:
case GameStatusInProgress:
return "In Progress"
case 1:
case GameStatusChallengerWon:
return "Challenger Won"
case 2:
case GameStatusDefenderWon:
return "Defender Won"
default:
return "Unknown"
......
......@@ -7,13 +7,11 @@ import (
"testing"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/stretchr/testify/require"
)
var (
testAddr = common.HexToAddress("0x1234567890123456789012345678901234567890")
errMock = errors.New("mock error")
errMock = errors.New("mock error")
)
type mockFaultDisputeGameCaller struct {
......@@ -42,7 +40,7 @@ func TestFaultCaller_GetGameStatus(t *testing.T) {
tests := []struct {
name string
caller FaultDisputeGameCaller
expectedStatus uint8
expectedStatus GameStatus
expectedErr error
}{
{
......@@ -50,7 +48,7 @@ func TestFaultCaller_GetGameStatus(t *testing.T) {
caller: &mockFaultDisputeGameCaller{
status: 1,
},
expectedStatus: 1,
expectedStatus: GameStatusChallengerWon,
expectedErr: nil,
},
{
......@@ -65,7 +63,7 @@ func TestFaultCaller_GetGameStatus(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
fc := NewFaultCaller(testAddr, test.caller, nil)
fc := NewFaultCaller(test.caller, nil)
status, err := fc.GetGameStatus(context.Background())
require.Equal(t, test.expectedStatus, status)
require.Equal(t, test.expectedErr, err)
......@@ -100,7 +98,7 @@ func TestFaultCaller_GetClaimDataLength(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
fc := NewFaultCaller(testAddr, test.caller, nil)
fc := NewFaultCaller(test.caller, nil)
claimDataLen, err := fc.GetClaimDataLength(context.Background())
require.Equal(t, test.expectedClaimDataLen, claimDataLen)
require.Equal(t, test.expectedErr, err)
......
......@@ -43,16 +43,20 @@ func (p *CannonTraceProvider) Get(i uint64) (common.Hash, error) {
return value, nil
}
func (p *CannonTraceProvider) GetPreimage(i uint64) ([]byte, error) {
func (p *CannonTraceProvider) GetPreimage(i uint64) ([]byte, []byte, error) {
proof, err := p.loadProof(i)
if err != nil {
return nil, err
return nil, nil, err
}
value := ([]byte)(proof.StateData)
if len(value) == 0 {
return nil, errors.New("proof missing state data")
return nil, nil, errors.New("proof missing state data")
}
return value, nil
data := ([]byte)(proof.ProofData)
if len(data) == 0 {
return nil, nil, errors.New("proof missing proof data")
}
return value, data, nil
}
func (p *CannonTraceProvider) AbsolutePreState() []byte {
......
......@@ -43,27 +43,31 @@ func TestGet(t *testing.T) {
func TestGetPreimage(t *testing.T) {
provider := setupWithTestData(t)
t.Run("ExistingProof", func(t *testing.T) {
value, err := provider.GetPreimage(0)
value, proof, err := provider.GetPreimage(0)
require.NoError(t, err)
expected := common.Hex2Bytes("b8f068de604c85ea0e2acd437cdb47add074a2d70b81d018390c504b71fe26f400000000000000000000000000000000000000000000000000000000000000000000000000")
require.Equal(t, expected, value)
expectedProof := common.Hex2Bytes("08028e3c0000000000000000000000003c01000a24210b7c00200008000000008fa40004")
require.Equal(t, expectedProof, proof)
})
t.Run("ProofUnavailable", func(t *testing.T) {
_, err := provider.GetPreimage(7)
_, _, err := provider.GetPreimage(7)
require.ErrorIs(t, err, os.ErrNotExist)
})
t.Run("MissingStateData", func(t *testing.T) {
_, err := provider.GetPreimage(1)
_, _, err := provider.GetPreimage(1)
require.ErrorContains(t, err, "missing state data")
})
t.Run("IgnoreUnknownFields", func(t *testing.T) {
value, err := provider.GetPreimage(2)
value, proof, err := provider.GetPreimage(2)
require.NoError(t, err)
expected := common.Hex2Bytes("cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc")
require.Equal(t, expected, value)
expectedProof := common.Hex2Bytes("dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd")
require.Equal(t, expectedProof, proof)
})
}
......
......@@ -5,7 +5,6 @@ import (
"math/big"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/log"
)
// ClaimFetcher is a minimal interface around [bindings.FaultDisputeGameCaller].
......@@ -28,14 +27,12 @@ type Loader interface {
// loader pulls in fault dispute game claim data periodically and over subscriptions.
type loader struct {
log log.Logger
claimFetcher ClaimFetcher
}
// NewLoader creates a new [loader].
func NewLoader(log log.Logger, claimFetcher ClaimFetcher) *loader {
func NewLoader(claimFetcher ClaimFetcher) *loader {
return &loader{
log: log,
claimFetcher: claimFetcher,
}
}
......
......@@ -6,9 +6,7 @@ import (
"math/big"
"testing"
"github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require"
)
......@@ -91,10 +89,9 @@ func (m *mockClaimFetcher) ClaimDataLen(opts *bind.CallOpts) (*big.Int, error) {
// TestLoader_FetchClaims_Succeeds tests [loader.FetchClaims].
func TestLoader_FetchClaims_Succeeds(t *testing.T) {
log := testlog.Logger(t, log.LvlError)
mockClaimFetcher := newMockClaimFetcher()
expectedClaims := mockClaimFetcher.returnClaims
loader := NewLoader(log, mockClaimFetcher)
loader := NewLoader(mockClaimFetcher)
claims, err := loader.FetchClaims(context.Background())
require.NoError(t, err)
require.ElementsMatch(t, []Claim{
......@@ -143,10 +140,9 @@ func TestLoader_FetchClaims_Succeeds(t *testing.T) {
// TestLoader_FetchClaims_ClaimDataErrors tests [loader.FetchClaims]
// when the claim fetcher [ClaimData] function call errors.
func TestLoader_FetchClaims_ClaimDataErrors(t *testing.T) {
log := testlog.Logger(t, log.LvlError)
mockClaimFetcher := newMockClaimFetcher()
mockClaimFetcher.claimDataError = true
loader := NewLoader(log, mockClaimFetcher)
loader := NewLoader(mockClaimFetcher)
claims, err := loader.FetchClaims(context.Background())
require.ErrorIs(t, err, mockClaimDataError)
require.Empty(t, claims)
......@@ -155,10 +151,9 @@ func TestLoader_FetchClaims_ClaimDataErrors(t *testing.T) {
// TestLoader_FetchClaims_ClaimLenErrors tests [loader.FetchClaims]
// when the claim fetcher [ClaimDataLen] function call errors.
func TestLoader_FetchClaims_ClaimLenErrors(t *testing.T) {
log := testlog.Logger(t, log.LvlError)
mockClaimFetcher := newMockClaimFetcher()
mockClaimFetcher.claimLenError = true
loader := NewLoader(log, mockClaimFetcher)
loader := NewLoader(mockClaimFetcher)
claims, err := loader.FetchClaims(context.Background())
require.ErrorIs(t, err, mockClaimLenError)
require.Empty(t, claims)
......
package fault
import (
"context"
"time"
"github.com/ethereum/go-ethereum/log"
)
type GameInfo interface {
GetGameStatus(context.Context) (GameStatus, error)
LogGameInfo(ctx context.Context)
}
type Actor interface {
Act(ctx context.Context) error
}
func MonitorGame(ctx context.Context, logger log.Logger, agreeWithProposedOutput bool, actor Actor, caller GameInfo) error {
logger.Info("Monitoring fault dispute game", "agreeWithOutput", agreeWithProposedOutput)
for {
done := progressGame(ctx, logger, agreeWithProposedOutput, actor, caller)
if done {
return nil
}
select {
case <-time.After(300 * time.Millisecond):
// Continue
case <-ctx.Done():
return ctx.Err()
}
}
}
// progressGame checks the current state of the game, and attempts to progress it by performing moves, steps or resolving
// Returns true if the game is complete or false if it needs to be monitored further
func progressGame(ctx context.Context, logger log.Logger, agreeWithProposedOutput bool, actor Actor, caller GameInfo) bool {
logger.Trace("Checking if actions are required")
if err := actor.Act(ctx); err != nil {
logger.Error("Error when acting on game", "err", err)
}
if status, err := caller.GetGameStatus(ctx); err != nil {
logger.Warn("Unable to retrieve game status", "err", err)
} else if status != 0 {
var expectedStatus GameStatus
if agreeWithProposedOutput {
expectedStatus = GameStatusChallengerWon
} else {
expectedStatus = GameStatusDefenderWon
}
if expectedStatus == status {
logger.Info("Game won", "status", GameStatusString(status))
} else {
logger.Error("Game lost", "status", GameStatusString(status))
}
return true
} else {
caller.LogGameInfo(ctx)
}
return false
}
package fault
import (
"context"
"errors"
"testing"
"github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require"
)
func TestMonitorExitsWhenContextDone(t *testing.T) {
logger := testlog.Logger(t, log.LvlDebug)
actor := &stubActor{}
gameInfo := &stubGameInfo{}
ctx, cancel := context.WithCancel(context.Background())
cancel()
err := MonitorGame(ctx, logger, true, actor, gameInfo)
require.ErrorIs(t, err, context.Canceled)
}
func TestProgressGameAndLogState(t *testing.T) {
logger, _, actor, gameInfo := setupProgressGameTest(t)
done := progressGame(context.Background(), logger, true, actor, gameInfo)
require.False(t, done, "should not be done")
require.Equal(t, 1, actor.callCount, "should perform next actions")
require.Equal(t, 1, gameInfo.logCount, "should log latest game state")
}
func TestProgressGame_LogErrorFromAct(t *testing.T) {
logger, handler, actor, gameInfo := setupProgressGameTest(t)
actor.err = errors.New("Boom")
done := progressGame(context.Background(), logger, true, actor, gameInfo)
require.False(t, done, "should not be done")
require.Equal(t, 1, actor.callCount, "should perform next actions")
require.Equal(t, 1, gameInfo.logCount, "should log latest game state")
errLog := handler.FindLog(log.LvlError, "Error when acting on game")
require.NotNil(t, errLog, "should log error")
require.Equal(t, actor.err, errLog.GetContextValue("err"))
}
func TestProgressGame_LogErrorWhenGameLost(t *testing.T) {
tests := []struct {
name string
status GameStatus
agreeWithOutput bool
logLevel log.Lvl
logMsg string
statusText string
}{
{
name: "GameLostAsDefender",
status: GameStatusChallengerWon,
agreeWithOutput: false,
logLevel: log.LvlError,
logMsg: "Game lost",
statusText: "Challenger Won",
},
{
name: "GameLostAsChallenger",
status: GameStatusDefenderWon,
agreeWithOutput: true,
logLevel: log.LvlError,
logMsg: "Game lost",
statusText: "Defender Won",
},
{
name: "GameWonAsDefender",
status: GameStatusDefenderWon,
agreeWithOutput: false,
logLevel: log.LvlInfo,
logMsg: "Game won",
statusText: "Defender Won",
},
{
name: "GameWonAsChallenger",
status: GameStatusChallengerWon,
agreeWithOutput: true,
logLevel: log.LvlInfo,
logMsg: "Game won",
statusText: "Challenger Won",
},
}
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
logger, handler, actor, gameInfo := setupProgressGameTest(t)
gameInfo.status = test.status
done := progressGame(context.Background(), logger, test.agreeWithOutput, actor, gameInfo)
require.True(t, done, "should be done")
require.Equal(t, 0, gameInfo.logCount, "should not log latest game state")
errLog := handler.FindLog(test.logLevel, test.logMsg)
require.NotNil(t, errLog, "should log game result")
require.Equal(t, test.statusText, errLog.GetContextValue("status"))
})
}
}
func setupProgressGameTest(t *testing.T) (log.Logger, *testlog.CapturingHandler, *stubActor, *stubGameInfo) {
logger := testlog.Logger(t, log.LvlDebug)
handler := &testlog.CapturingHandler{
Delegate: logger.GetHandler(),
}
logger.SetHandler(handler)
actor := &stubActor{}
gameInfo := &stubGameInfo{}
return logger, handler, actor, gameInfo
}
type stubActor struct {
callCount int
err error
}
func (a *stubActor) Act(ctx context.Context) error {
a.callCount++
return a.err
}
type stubGameInfo struct {
status GameStatus
err error
logCount int
}
func (s *stubGameInfo) GetGameStatus(ctx context.Context) (GameStatus, error) {
return s.status, s.err
}
func (s *stubGameInfo) LogGameInfo(ctx context.Context) {
s.logCount++
}
......@@ -7,6 +7,7 @@ import (
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-service/txmgr"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
......@@ -77,6 +78,20 @@ func (r *faultResponder) BuildTx(ctx context.Context, response Claim) ([]byte, e
}
}
// CanResolve determines if the resolve function on the fault dispute game contract
// would succeed. Returns true if the game can be resolved, otherwise false.
func (r *faultResponder) CanResolve(ctx context.Context) bool {
txData, err := r.buildResolveData()
if err != nil {
return false
}
_, err = r.txMgr.Call(ctx, ethereum.CallMsg{
To: &r.fdgAddr,
Data: txData,
}, nil)
return err == nil
}
// Resolve executes a resolve transaction to resolve a fault dispute game.
func (r *faultResponder) Resolve(ctx context.Context) error {
txData, err := r.buildResolveData()
......@@ -108,9 +123,9 @@ func (r *faultResponder) sendTxAndWait(ctx context.Context, txData []byte) error
return err
}
if receipt.Status == types.ReceiptStatusFailed {
r.log.Error("responder tx successfully published but reverted", "tx_hash", receipt.TxHash)
r.log.Error("Responder tx successfully published but reverted", "tx_hash", receipt.TxHash)
} else {
r.log.Info("responder tx successfully published", "tx_hash", receipt.TxHash)
r.log.Debug("Responder tx successfully published", "tx_hash", receipt.TxHash)
}
return nil
}
......
......@@ -26,6 +26,7 @@ var (
type mockTxManager struct {
from common.Address
sends int
calls int
sendFails bool
}
......@@ -42,7 +43,11 @@ func (m *mockTxManager) Send(ctx context.Context, candidate txmgr.TxCandidate) (
}
func (m *mockTxManager) Call(_ context.Context, _ ethereum.CallMsg, _ *big.Int) ([]byte, error) {
panic("unimplemented")
if m.sendFails {
return nil, mockSendError
}
m.calls++
return []byte{}, nil
}
func (m *mockTxManager) BlockNumber(ctx context.Context) (uint64, error) {
......@@ -62,6 +67,24 @@ func newTestFaultResponder(t *testing.T, sendFails bool) (*faultResponder, *mock
return responder, mockTxMgr
}
// TestResponder_CanResolve_CallFails tests the [Responder.CanResolve] method
// bubbles up the error returned by the [txmgr.Call] method.
func TestResponder_CanResolve_CallFails(t *testing.T) {
responder, mockTxMgr := newTestFaultResponder(t, true)
resolved := responder.CanResolve(context.Background())
require.False(t, resolved)
require.Equal(t, 0, mockTxMgr.sends)
}
// TestResponder_CanResolve_Success tests the [Responder.CanResolve] method
// succeeds when the call message is successfully sent through the txmgr.
func TestResponder_CanResolve_Success(t *testing.T) {
responder, mockTxMgr := newTestFaultResponder(t, false)
resolved := responder.CanResolve(context.Background())
require.True(t, resolved)
require.Equal(t, 1, mockTxMgr.calls)
}
// TestResponder_Resolve_SendFails tests the [Responder.Resolve] method
// bubbles up the error returned by the [txmgr.Send] method.
func TestResponder_Resolve_SendFails(t *testing.T) {
......
......@@ -2,10 +2,15 @@ package fault
import (
"errors"
"fmt"
"github.com/ethereum/go-ethereum/common"
)
var (
ErrGameDepthReached = errors.New("game depth reached")
)
// Solver uses a [TraceProvider] to determine the moves to make in a dispute game.
type Solver struct {
TraceProvider
......@@ -54,7 +59,7 @@ func (s *Solver) handleMiddle(claim Claim) (*Claim, error) {
return nil, err
}
if claim.Depth() == s.gameDepth {
return nil, errors.New("game depth reached")
return nil, ErrGameDepthReached
}
if claimCorrect {
return s.defend(claim)
......@@ -67,6 +72,7 @@ type StepData struct {
LeafClaim Claim
IsAttack bool
PreState []byte
ProofData []byte
}
// AttemptStep determines what step should occur for a given leaf claim.
......@@ -84,6 +90,7 @@ func (s *Solver) AttemptStep(claim Claim, agreeWithClaimLevel bool) (StepData, e
}
index := claim.TraceIndex(s.gameDepth)
var preState []byte
var proofData []byte
// If we are attacking index 0, we provide the absolute pre-state, not an intermediate state
if index == 0 && !claimCorrect {
preState = s.AbsolutePreState()
......@@ -92,7 +99,7 @@ func (s *Solver) AttemptStep(claim Claim, agreeWithClaimLevel bool) (StepData, e
if !claimCorrect {
index = index - 1
}
preState, err = s.GetPreimage(index)
preState, proofData, err = s.GetPreimage(index)
if err != nil {
return StepData{}, err
}
......@@ -102,6 +109,7 @@ func (s *Solver) AttemptStep(claim Claim, agreeWithClaimLevel bool) (StepData, e
LeafClaim: claim,
IsAttack: !claimCorrect,
PreState: preState,
ProofData: proofData,
}, nil
}
......@@ -110,7 +118,7 @@ func (s *Solver) attack(claim Claim) (*Claim, error) {
position := claim.Attack()
value, err := s.traceAtPosition(position)
if err != nil {
return nil, err
return nil, fmt.Errorf("attack claim: %w", err)
}
return &Claim{
ClaimData: ClaimData{Value: value, Position: position},
......@@ -124,7 +132,7 @@ func (s *Solver) defend(claim Claim) (*Claim, error) {
position := claim.Defend()
value, err := s.traceAtPosition(position)
if err != nil {
return nil, err
return nil, fmt.Errorf("defend claim: %w", err)
}
return &Claim{
ClaimData: ClaimData{Value: value, Position: position},
......
......@@ -100,7 +100,7 @@ func TestNoMoveAgainstOwnLevel(t *testing.T) {
func TestAttemptStep(t *testing.T) {
maxDepth := 3
canonicalProvider := NewAlphabetProvider("abcdefgh", uint64(maxDepth))
canonicalProvider := &alphabetWithProofProvider{NewAlphabetProvider("abcdefgh", uint64(maxDepth))}
solver := NewSolver(maxDepth, canonicalProvider)
_, _, middle, bottom := createTestClaims()
......@@ -116,6 +116,7 @@ func TestAttemptStep(t *testing.T) {
require.Equal(t, bottom, step.LeafClaim)
require.True(t, step.IsAttack)
require.Equal(t, step.PreState, BuildAlphabetPreimage(3, "d"))
require.Equal(t, step.ProofData, []byte{3})
_, err = solver.AttemptStep(middle, false)
require.Error(t, err)
......@@ -137,3 +138,15 @@ func TestAttempStep_AgreeWithClaimLevel_Fails(t *testing.T) {
require.Error(t, err)
require.Equal(t, StepData{}, step)
}
type alphabetWithProofProvider struct {
*AlphabetProvider
}
func (a *alphabetWithProofProvider) GetPreimage(i uint64) ([]byte, []byte, error) {
preimage, _, err := a.AlphabetProvider.GetPreimage(i)
if err != nil {
return nil, nil, err
}
return preimage, []byte{byte(i)}, nil
}
......@@ -8,10 +8,17 @@ import (
)
var (
ErrNegativeIndex = errors.New("index cannot be negative")
ErrIndexTooLarge = errors.New("index is larger than the maximum index")
)
type GameStatus uint8
const (
GameStatusInProgress GameStatus = iota
GameStatusChallengerWon
GameStatusDefenderWon
)
// StepCallData encapsulates the data needed to perform a step.
type StepCallData struct {
ClaimIndex uint64
......@@ -20,13 +27,17 @@ type StepCallData struct {
Proof []byte
}
// TraceProvider is a generic way to get a claim value at a specific
// step in the trace.
// Get(i) = Keccak256(GetPreimage(i))
// AbsolutePreState is the value of the trace that transitions to the trace value at index 0
// TraceProvider is a generic way to get a claim value at a specific step in the trace.
type TraceProvider interface {
// Get returns the claim value at the requested index.
// Get(i) = Keccak256(GetPreimage(i))
Get(i uint64) (common.Hash, error)
GetPreimage(i uint64) ([]byte, error)
// GetPreimage returns the pre-image for a claim at the specified trace index, along
// with any associated proof data to assist in its verification.
GetPreimage(i uint64) (preimage []byte, proofData []byte, err error)
// AbsolutePreState is the pre-image value of the trace that transitions to the trace value at index 0
AbsolutePreState() []byte
}
......@@ -76,6 +87,8 @@ func (c *Claim) DefendsParent() bool {
// Responder takes a response action & executes.
// For full op-challenger this means executing the transaction on chain.
type Responder interface {
CanResolve(ctx context.Context) bool
Resolve(ctx context.Context) error
Respond(ctx context.Context, response Claim) error
Step(ctx context.Context, stepData StepCallData) error
}
package challenger
import (
"context"
"errors"
"testing"
"time"
op_challenger "github.com/ethereum-optimism/optimism/op-challenger"
"github.com/ethereum-optimism/optimism/op-challenger/config"
"github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum-optimism/optimism/op-service/txmgr"
"github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require"
)
type Helper struct {
log log.Logger
cancel func()
errors chan error
}
type Option func(config2 *config.Config)
func NewChallenger(t *testing.T, ctx context.Context, l1Endpoint string, name string, options ...Option) *Helper {
log := testlog.Logger(t, log.LvlInfo).New("role", name)
log.Info("Creating challenger", "l1", l1Endpoint)
txmgrCfg := txmgr.NewCLIConfig(l1Endpoint)
txmgrCfg.NumConfirmations = 1
txmgrCfg.ReceiptQueryInterval = 1 * time.Second
cfg := &config.Config{
L1EthRpc: l1Endpoint,
AlphabetTrace: "",
AgreeWithProposedOutput: true,
TxMgrConfig: txmgrCfg,
}
for _, option := range options {
option(cfg)
}
require.NotEmpty(t, cfg.TxMgrConfig.PrivateKey, "Missing private key for TxMgrConfig")
errCh := make(chan error, 1)
ctx, cancel := context.WithCancel(ctx)
go func() {
defer close(errCh)
errCh <- op_challenger.Main(ctx, log, cfg)
}()
return &Helper{
log: log,
cancel: cancel,
errors: errCh,
}
}
func (h *Helper) Close() error {
h.cancel()
select {
case <-time.After(1 * time.Minute):
return errors.New("timed out while stopping challenger")
case err := <-h.errors:
if !errors.Is(err, context.Canceled) {
return err
}
return nil
}
}
package disputegame
import (
"context"
"math/big"
"time"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-chain-ops/deployer"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/stretchr/testify/require"
)
// deployDisputeGameContracts deploys the DisputeGameFactory, AlphabetVM and FaultDisputeGame contracts
// It configures the alphabet fault game as game type 0 (faultGameType)
// If/when the dispute game factory becomes a predeployed contract this can be removed and just use the
// predeployed version
func deployDisputeGameContracts(require *require.Assertions, ctx context.Context, client *ethclient.Client, opts *bind.TransactOpts, gameDuration uint64) *bindings.DisputeGameFactory {
ctx, cancel := context.WithTimeout(ctx, 5*time.Minute)
defer cancel()
// Deploy the proxy
_, tx, proxy, err := bindings.DeployProxy(opts, client, deployer.TestAddress)
require.NoError(err)
proxyAddr, err := bind.WaitDeployed(ctx, client, tx)
require.NoError(err)
// Deploy the dispute game factory implementation
_, tx, _, err = bindings.DeployDisputeGameFactory(opts, client)
require.NoError(err)
factoryAddr, err := bind.WaitDeployed(ctx, client, tx)
require.NoError(err)
// Point the proxy at the implementation and create bindings going via the proxy
disputeGameFactoryAbi, err := bindings.DisputeGameFactoryMetaData.GetAbi()
require.NoError(err)
data, err := disputeGameFactoryAbi.Pack("initialize", deployer.TestAddress)
require.NoError(err)
_, err = proxy.UpgradeToAndCall(opts, factoryAddr, data)
require.NoError(err)
factory, err := bindings.NewDisputeGameFactory(proxyAddr, client)
require.NoError(err)
// Now setup the fault dispute game type
// Start by deploying the AlphabetVM
_, tx, _, err = bindings.DeployAlphabetVM(opts, client, alphabetVMAbsolutePrestateClaim)
require.NoError(err)
alphaVMAddr, err := bind.WaitDeployed(ctx, client, tx)
require.NoError(err)
// Deploy the fault dispute game implementation
_, tx, _, err = bindings.DeployFaultDisputeGame(opts, client, alphabetVMAbsolutePrestateClaim, big.NewInt(alphabetGameDepth), gameDuration, alphaVMAddr)
require.NoError(err)
faultDisputeGameAddr, err := bind.WaitDeployed(ctx, client, tx)
require.NoError(err)
// Set the fault game type implementation
_, err = factory.SetImplementation(opts, faultGameType, faultDisputeGameAddr)
require.NoError(err)
return factory
}
package disputegame
import (
"context"
"fmt"
"math/big"
"testing"
"time"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-chain-ops/deployer"
"github.com/ethereum-optimism/optimism/op-challenger/config"
"github.com/ethereum-optimism/optimism/op-challenger/fault"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/challenger"
"github.com/ethereum-optimism/optimism/op-service/client/utils"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/stretchr/testify/require"
)
const faultGameType uint8 = 0
const alphabetGameDepth = 4
const lastAlphabetTraceIndex = 1<<alphabetGameDepth - 1
type Status uint8
const (
StatusInProgress Status = iota
StatusChallengerWins
StatusDefenderWins
)
var alphaExtraData = common.Hex2Bytes("1000000000000000000000000000000000000000000000000000000000000000")
var alphabetVMAbsolutePrestate = common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000060")
var alphabetVMAbsolutePrestateClaim = crypto.Keccak256Hash(alphabetVMAbsolutePrestate)
var CorrectAlphabet = "abcdefghijklmnop"
type FactoryHelper struct {
t *testing.T
require *require.Assertions
client *ethclient.Client
opts *bind.TransactOpts
factory *bindings.DisputeGameFactory
}
func NewFactoryHelper(t *testing.T, ctx context.Context, client *ethclient.Client, gameDuration uint64) *FactoryHelper {
require := require.New(t)
chainID, err := client.ChainID(ctx)
require.NoError(err)
opts, err := bind.NewKeyedTransactorWithChainID(deployer.TestKey, chainID)
require.NoError(err)
factory := deployDisputeGameContracts(require, ctx, client, opts, gameDuration)
return &FactoryHelper{
t: t,
require: require,
client: client,
opts: opts,
factory: factory,
}
}
func (h *FactoryHelper) StartAlphabetGame(ctx context.Context, claimedAlphabet string) *FaultGameHelper {
ctx, cancel := context.WithTimeout(ctx, 1*time.Minute)
defer cancel()
trace := fault.NewAlphabetProvider(claimedAlphabet, 4)
rootClaim, err := trace.Get(lastAlphabetTraceIndex)
h.require.NoError(err)
tx, err := h.factory.Create(h.opts, faultGameType, rootClaim, alphaExtraData)
h.require.NoError(err)
rcpt, err := utils.WaitReceiptOK(ctx, h.client, tx.Hash())
h.require.NoError(err)
h.require.Len(rcpt.Logs, 1, "should have emitted a single DisputeGameCreated event")
createdEvent, err := h.factory.ParseDisputeGameCreated(*rcpt.Logs[0])
h.require.NoError(err)
game, err := bindings.NewFaultDisputeGame(createdEvent.DisputeProxy, h.client)
h.require.NoError(err)
return &FaultGameHelper{
t: h.t,
require: h.require,
client: h.client,
opts: h.opts,
game: game,
maxDepth: alphabetGameDepth,
addr: createdEvent.DisputeProxy,
claimedAlphabet: claimedAlphabet,
}
}
type FaultGameHelper struct {
t *testing.T
require *require.Assertions
client *ethclient.Client
opts *bind.TransactOpts
game *bindings.FaultDisputeGame
maxDepth int
addr common.Address
claimedAlphabet string
}
func (g *FaultGameHelper) StartChallenger(ctx context.Context, l1Endpoint string, name string, options ...challenger.Option) *challenger.Helper {
opts := []challenger.Option{
func(c *config.Config) {
c.GameAddress = g.addr
c.GameDepth = alphabetGameDepth
// By default the challenger agrees with the root claim (thus disagrees with the proposed output)
// This can be overridden by passing in options
c.AlphabetTrace = g.claimedAlphabet
c.AgreeWithProposedOutput = false
},
}
opts = append(opts, options...)
c := challenger.NewChallenger(g.t, ctx, l1Endpoint, name, opts...)
g.t.Cleanup(func() {
_ = c.Close()
})
return c
}
func (g *FaultGameHelper) WaitForClaimCount(ctx context.Context, count int64) {
ctx, cancel := context.WithTimeout(ctx, 1*time.Minute)
defer cancel()
err := utils.WaitFor(ctx, 1*time.Second, func() (bool, error) {
actual, err := g.game.ClaimDataLen(&bind.CallOpts{Context: ctx})
if err != nil {
return false, err
}
g.t.Log("Waiting for claim count", "current", actual, "expected", count, "game", g.addr)
return actual.Cmp(big.NewInt(count)) == 0, nil
})
g.require.NoError(err)
}
type ContractClaim struct {
ParentIndex uint32
Countered bool
Claim [32]byte
Position *big.Int
Clock *big.Int
}
func (g *FaultGameHelper) WaitForClaim(ctx context.Context, predicate func(claim ContractClaim) bool) {
ctx, cancel := context.WithTimeout(ctx, 1*time.Minute)
defer cancel()
err := utils.WaitFor(ctx, 1*time.Second, func() (bool, error) {
count, err := g.game.ClaimDataLen(&bind.CallOpts{Context: ctx})
if err != nil {
return false, fmt.Errorf("retrieve number of claims: %w", err)
}
// Search backwards because the new claims are at the end and more likely the ones we want.
for i := count.Int64() - 1; i >= 0; i-- {
claimData, err := g.game.ClaimData(&bind.CallOpts{Context: ctx}, big.NewInt(i))
if err != nil {
return false, fmt.Errorf("retrieve claim %v: %w", i, err)
}
if predicate(claimData) {
return true, nil
}
}
return false, nil
})
g.require.NoError(err)
}
func (g *FaultGameHelper) WaitForClaimAtMaxDepth(ctx context.Context, countered bool) {
g.WaitForClaim(ctx, func(claim ContractClaim) bool {
pos := fault.NewPositionFromGIndex(claim.Position.Uint64())
return pos.Depth() == g.maxDepth && claim.Countered == countered
})
}
func (g *FaultGameHelper) Resolve(ctx context.Context) {
ctx, cancel := context.WithTimeout(ctx, 1*time.Minute)
defer cancel()
tx, err := g.game.Resolve(g.opts)
g.require.NoError(err)
_, err = utils.WaitReceiptOK(ctx, g.client, tx.Hash())
g.require.NoError(err)
}
func (g *FaultGameHelper) WaitForGameStatus(ctx context.Context, expected Status) {
ctx, cancel := context.WithTimeout(ctx, 1*time.Minute)
defer cancel()
err := utils.WaitFor(ctx, 1*time.Second, func() (bool, error) {
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
defer cancel()
status, err := g.game.Status(&bind.CallOpts{Context: ctx})
if err != nil {
return false, fmt.Errorf("game status unavailable: %w", err)
}
return expected == Status(status), nil
})
g.require.NoError(err, "wait for game status")
}
......@@ -137,6 +137,10 @@ func EncodePrivKey(priv *ecdsa.PrivateKey) hexutil.Bytes {
return privkey
}
func EncodePrivKeyToString(priv *ecdsa.PrivateKey) string {
return hexutil.Encode(EncodePrivKey(priv))
}
// Addresses computes the ethereum address of each account,
// which can then be kept around for fast precomputed address access.
func (s *Secrets) Addresses() *Addresses {
......
......@@ -5,37 +5,152 @@ import (
"testing"
"time"
"github.com/ethereum-optimism/optimism/op-challenger/config"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/disputegame"
"github.com/ethereum-optimism/optimism/op-service/client/utils"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/stretchr/testify/require"
)
func TestTimeTravel(t *testing.T) {
func TestResolveDisputeGame(t *testing.T) {
InitParallel(t)
ctx := context.Background()
sys, l1Client := startL1OnlySystem(t)
t.Cleanup(sys.Close)
gameDuration := 24 * time.Hour
disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, l1Client, uint64(gameDuration.Seconds()))
game := disputeGameFactory.StartAlphabetGame(ctx, "zyxwvut")
require.NotNil(t, game)
game.WaitForGameStatus(ctx, disputegame.StatusInProgress)
game.StartChallenger(ctx, sys.NodeEndpoint("l1"), "HonestAlice", func(c *config.Config) {
c.AgreeWithProposedOutput = true // Agree with the proposed output, so disagree with the root claim
c.AlphabetTrace = "abcdefg"
c.TxMgrConfig.PrivateKey = e2eutils.EncodePrivKeyToString(sys.cfg.Secrets.Alice)
})
game.WaitForClaimCount(ctx, 2)
sys.TimeTravelClock.AdvanceTime(gameDuration)
require.NoError(t, utils.WaitNextBlock(ctx, l1Client))
// Challenger should resolve the game now that the clocks have expired.
game.WaitForGameStatus(ctx, disputegame.StatusChallengerWins)
}
func TestChallengerCompleteDisputeGame(t *testing.T) {
InitParallel(t)
tests := []struct {
name string
rootClaimAlphabet string
otherAlphabet string
expectedResult disputegame.Status
expectStep bool
}{
{
name: "ChallengerWins_DefenseStep",
rootClaimAlphabet: "abcdexyz",
otherAlphabet: disputegame.CorrectAlphabet,
expectedResult: disputegame.StatusChallengerWins,
expectStep: true,
},
{
name: "DefenderWins_DefenseStep",
rootClaimAlphabet: disputegame.CorrectAlphabet,
otherAlphabet: "abcdexyz",
expectedResult: disputegame.StatusDefenderWins,
expectStep: false,
},
{
name: "ChallengerWins_AttackStep",
rootClaimAlphabet: "abcdefghzyx",
otherAlphabet: disputegame.CorrectAlphabet,
expectedResult: disputegame.StatusChallengerWins,
expectStep: true,
},
{
name: "DefenderWins_AttackStep",
rootClaimAlphabet: disputegame.CorrectAlphabet,
otherAlphabet: "abcdexyz",
expectedResult: disputegame.StatusDefenderWins,
expectStep: false,
},
{
name: "DefenderIncorrectAtTraceZero",
rootClaimAlphabet: "zyxwvut",
otherAlphabet: disputegame.CorrectAlphabet,
expectedResult: disputegame.StatusChallengerWins,
expectStep: true,
},
{
name: "ChallengerIncorrectAtTraceZero",
rootClaimAlphabet: disputegame.CorrectAlphabet,
otherAlphabet: "zyxwvut",
expectedResult: disputegame.StatusDefenderWins,
expectStep: false,
},
{
name: "DefenderIncorrectAtLastTraceIndex",
rootClaimAlphabet: "abcdefghijklmnoz",
otherAlphabet: disputegame.CorrectAlphabet,
expectedResult: disputegame.StatusChallengerWins,
expectStep: true,
},
{
name: "ChallengerIncorrectAtLastTraceIndex",
rootClaimAlphabet: disputegame.CorrectAlphabet,
otherAlphabet: "abcdefghijklmnoz",
expectedResult: disputegame.StatusDefenderWins,
expectStep: false,
},
}
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
InitParallel(t)
ctx := context.Background()
sys, l1Client := startL1OnlySystem(t)
t.Cleanup(sys.Close)
gameDuration := 24 * time.Hour
disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, l1Client, uint64(gameDuration.Seconds()))
game := disputeGameFactory.StartAlphabetGame(ctx, test.rootClaimAlphabet)
require.NotNil(t, game)
game.StartChallenger(ctx, sys.NodeEndpoint("l1"), "Defender", func(c *config.Config) {
c.TxMgrConfig.PrivateKey = e2eutils.EncodePrivKeyToString(sys.cfg.Secrets.Mallory)
})
game.StartChallenger(ctx, sys.NodeEndpoint("l1"), "Challenger", func(c *config.Config) {
c.AgreeWithProposedOutput = true // Agree with the proposed output, so disagree with the root claim
c.AlphabetTrace = test.otherAlphabet
c.TxMgrConfig.PrivateKey = e2eutils.EncodePrivKeyToString(sys.cfg.Secrets.Alice)
})
// Wait for a claim at the maximum depth that has been countered to indicate we're ready to resolve the game
game.WaitForClaimAtMaxDepth(ctx, test.expectStep)
sys.TimeTravelClock.AdvanceTime(gameDuration)
require.NoError(t, utils.WaitNextBlock(ctx, l1Client))
game.WaitForGameStatus(ctx, test.expectedResult)
})
}
}
func startL1OnlySystem(t *testing.T) (*System, *ethclient.Client) {
cfg := DefaultSystemConfig(t)
cfg.DeployConfig.L1BlockTime = 1
delete(cfg.Nodes, "verifier")
delete(cfg.Nodes, "sequencer")
cfg.SupportL1TimeTravel = true
sys, err := cfg.Start()
require.Nil(t, err, "Error starting up system")
defer sys.Close()
l1Client := sys.Clients["l1"]
preTravel, err := l1Client.BlockByNumber(context.Background(), nil)
require.NoError(t, err)
sys.TimeTravelClock.AdvanceTime(24 * time.Hour)
// Check that the L1 chain reaches the new time reasonably quickly (ie without taking a week)
// It should be able to jump straight to the new time with just a single block
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute)
defer cancel()
err = utils.WaitFor(ctx, time.Second, func() (bool, error) {
postTravel, err := l1Client.BlockByNumber(context.Background(), nil)
if err != nil {
return false, err
}
diff := time.Duration(postTravel.Time()-preTravel.Time()) * time.Second
return diff.Hours() > 23, nil
})
require.NoError(t, err)
return sys, sys.Clients["l1"]
}
......@@ -616,6 +616,10 @@ func (cfg SystemConfig) Start(_opts ...SystemConfigOption) (*System, error) {
}
}
// Don't start batch submitter and proposer if there's no sequencer.
if sys.RollupNodes["sequencer"] == nil {
return sys, nil
}
// L2Output Submitter
sys.L2OutputSubmitter, err = l2os.NewL2OutputSubmitterFromCLIConfig(l2os.CLIConfig{
L1EthRpc: sys.Nodes["l1"].WSEndpoint(),
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment