Commit cc5bd2b3 authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge branch 'develop' into qbzzt/230712-docs-opstack-sdk-supported-chains

parents c0413f12 3ffebc90
---
'@eth-optimism/chain-mon': patch
---
Update import path for artifact
{
"$schema": "https://unpkg.com/@changesets/config@1.6.0/schema.json",
"changelog": "@changesets/cli/changelog",
"$schema": "https://unpkg.com/@changesets/config@2.3.0/schema.json",
"changelog": ["@changesets/changelog-github", { "repo": "ethereum-optimism/optimism" }],
"commit": false,
"linked": [],
"fixed": [],
"linked": [["@eth-optimism/contracts-bedrock", "@eth-optimism/contracts-ts"]],
"access": "public",
"baseBranch": "master",
"baseBranch": "develop",
"updateInternalDependencies": "patch",
"ignore": []
}
---
'@eth-optimism/contracts-periphery': patch
'@eth-optimism/contracts-bedrock': patch
'@eth-optimism/fault-detector': patch
'@eth-optimism/core-utils': patch
......
---
'@eth-optimism/contracts-bedrock': minor
---
Migrate contracts periphery into bedrock
---
'@eth-optimism/sdk': minor
---
Added to and from block filters to several methods in CrossChainMessenger
This diff is collapsed.
......@@ -2,11 +2,9 @@
/packages/common-ts @ethereum-optimism/typescript-reviewers
/packages/contracts @ethereum-optimism/contract-reviewers
/packages/contracts-bedrock @ethereum-optimism/contract-reviewers
/packages/contracts-periphery @ethereum-optimism/contract-reviewers
/packages/core-utils @ethereum-optimism/legacy-reviewers
/packages/chain-mon @smartcontracts
/packages/fault-detector @ethereum-optimism/devxpod
/packages/hardhat-deploy-config @ethereum-optimism/legacy-reviewers
/packages/replica-healthcheck @ethereum-optimism/legacy-reviewers
/packages/sdk @ethereum-optimism/devxpod
/packages/atst @ethereum-optimism/devxpod
......
name: Setup
description: Common setup steps used by our workflows
runs:
using: composite
steps:
- name: Setup pnpm
uses: pnpm/action-setup@v2
with:
version: 8.6.5
- name: Setup node 16.x
uses: actions/setup-node@v3
with:
node-version-file: .nvmrc
registry-url: https://registry.npmjs.org
cache: pnpm
- name: Setup foundry
uses: foundry-rs/foundry-toolchain@v1
- name: Install node dependencies
shell: bash
run: pnpm install --frozen-lockfile
- name: Derive appropriate SHAs for base and head for `nx affected` commands
uses: nrwl/nx-set-shas@v3
with:
main-branch-name: "develop"
- run: |
echo "nx using following shas:"
echo "BASE: ${{ env.NX_BASE }}"
echo "HEAD: ${{ env.NX_HEAD }}"
shell: bash
name: Publish Packages (canary)
name: Publish Docker images (canary)
on:
# enable users to manually trigger with workflow_dispatch
......@@ -31,48 +31,6 @@ jobs:
# This makes Actions fetch all Git history so that Changesets can generate changelogs with the correct commits
fetch-depth: 0
- name: Set up pnpm
uses: pnpm/action-setup@v2
with:
version: 8.6.5
- name: Setup Node.js 16.x
uses: actions/setup-node@master
with:
node-version: 16.x
cache: pnpm
- name: Install Dependencies
run: pnpm install --frozen-lockfile
- name: Install Foundry
uses: foundry-rs/foundry-toolchain@v1
with:
version: nightly
- name: Build
run: npx nx run-many --target=build --skip-nx-cache
- name: Setup Canary Snapshot
run: pnpm changeset version --snapshot
- name: Publish To NPM
uses: changesets/action@v1
id: changesets
with:
createGithubReleases: false
publish: pnpm changeset publish --tag canary
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
# Conditional on the release being executed, we unbundle the publishedPackages to specific
# job outputs
- name: Get version tags from each published version
id: packages
run: |
node ops/scripts/ci-versions.js ${{ toJSON(steps.changesets.outputs.publishedPackages) }}
- name: Docker Image Name
id: docker-image-name
run: |
......
name: Release snapshot
# Releases a snapshot release when new commits merge to develop
# This ensures the release process is working as expected as well as always gives us a release
# we should move to a single branch in near future
on:
workflow_dispatch:
push:
branches:
- develop
jobs:
snapshot-snapshot:
name: Publish snapshot release to npm
if: github.repository == 'ethereum-optimism/optimism'
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
with:
submodules: recursive
fetch-depth: 0
- name: Setup
uses: ./.github/actions/setup
- name: Set deployment token
run: npm config set '//registry.npmjs.org/:_authToken' "${NPM_TOKEN}"
env:
NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
- name: Publish snapshots
uses: seek-oss/changesets-snapshot@v0
with:
pre-publish: pnpm nx run-many --target=build --skip-nx-cache
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
name: Release
name: Release and version
on:
workflow_dispatch:
push:
branches:
- master
- develop
# Always wait for previous release to finish before releasing again
concurrency: ${{ github.workflow }}-${{ github.ref }}
......@@ -12,6 +13,7 @@ jobs:
release:
name: Release
runs-on: ubuntu-latest
if: github.repository == 'ethereum-optimism/optimism'
# map the step outputs to job outputs
outputs:
fault-detector: ${{ steps.packages.outputs.fault-detector }}
......@@ -21,32 +23,23 @@ jobs:
replica-healthcheck: ${{ steps.packages.outputs.replica-healthcheck }}
op-exporter: ${{ steps.packages.outputs.op-exporter }}
endpoint-monitor: ${{ steps.packages.outputs.endpoint-monitor }}
# Permissions necessary for Changesets to push a new branch and open PRs
# (for automated Version Packages PRs), and request the JWT for provenance.
# More info: https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/about-security-hardening-with-openid-connect#adding-permissions-settings
permissions:
contents: write
pull-requests: write
id-token: write
steps:
- name: Checkout Repo
uses: actions/checkout@master
uses: actions/checkout@v3
with:
# This makes Actions fetch all Git history so that Changesets can generate changelogs with the correct commits
fetch-depth: 0
ref: "develop"
- name: Set up pnpm
uses: pnpm/action-setup@v2
with:
version: 8.6.5
- name: Setup Node.js 16.x
uses: actions/setup-node@master
with:
node-version: 16.x
cache: pnpm
- name: Install Dependencies
run: pnpm install --frozen-lockfile
- name: Install Foundry
uses: foundry-rs/foundry-toolchain@v1
with:
version: nightly
- name: Setup
uses: ./.github/actions/setup
# Makes a pr to publish the changesets that when
# merged will publish to npm
......@@ -56,7 +49,8 @@ jobs:
id: changesets
with:
createGithubReleases: false
publish: pnpm release
publish: pnpm release:publish
version: pnpm release:version
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
......
......@@ -18,13 +18,14 @@ on:
required: true
type: choice
options:
- ci-builder
- fault-detector
- indexer
- op-node
- op-batcher
- op-proposer
- op-ufm
- proxyd
- indexer
- fault-detector
- ci-builder
prerelease:
description: Increment major/minor/patch as prerelease?
required: false
......
......@@ -17,11 +17,6 @@ dist
artifacts
cache
packages/contracts-periphery/coverage*
packages/contracts-periphery/@openzeppelin*
packages/contracts-periphery/hardhat*
packages/contracts-periphery/forge-artifacts*
packages/contracts-bedrock/deployments/devnetL1
packages/contracts-bedrock/deployments/anvil
......
[submodule "packages/contracts-periphery/lib/multicall"]
path = packages/contracts-periphery/lib/multicall
url = https://github.com/mds1/multicall
[submodule "lib/multicall"]
branch = v3.1.0
......@@ -16,10 +16,6 @@
"directory": "packages/contracts",
"changeProcessCWD": true
},
{
"directory": "packages/contracts-periphery",
"changeProcessCWD": true
},
{
"directory": "packages/chain-mon",
"changeProcessCWD": true
......
......@@ -52,7 +52,6 @@ Refer to the Directory Structure section below to understand which packages are
├── <a href="./packages">packages</a>
│ ├── <a href="./packages/common-ts">common-ts</a>: Common tools for building apps in TypeScript
│ ├── <a href="./packages/contracts-bedrock">contracts-bedrock</a>: Bedrock smart contracts.
│ ├── <a href="./packages/contracts-periphery">contracts-periphery</a>: Peripheral contracts for Optimism
│ ├── <a href="./packages/core-utils">core-utils</a>: Low-level utilities that make building Optimism easier
│ ├── <a href="./packages/chain-mon">chain-mon</a>: Chain monitoring services
│ ├── <a href="./packages/fault-detector">fault-detector</a>: Service for detecting Sequencer faults
......@@ -79,7 +78,6 @@ Refer to the Directory Structure section below to understand which packages are
~~ Pre-BEDROCK ~~
├── <a href="./packages">packages</a>
│ ├── <a href="./packages/common-ts">common-ts</a>: Common tools for building apps in TypeScript
│ ├── <a href="./packages/contracts-periphery">contracts-periphery</a>: Peripheral contracts for Optimism
│ ├── <a href="./packages/core-utils">core-utils</a>: Low-level utilities that make building Optimism easier
│ ├── <a href="./packages/chain-mon">chain-mon</a>: Chain monitoring services
│ ├── <a href="./packages/fault-detector">fault-detector</a>: Service for detecting Sequencer faults
......
......@@ -11,3 +11,4 @@ state.json
*.json
*.pprof
*.out
bin
GITCOMMIT := $(shell git rev-parse HEAD)
GITDATE := $(shell git show -s --format='%ct')
VERSION := v0.0.0
LDFLAGSSTRING +=-X main.GitCommit=$(GITCOMMIT)
LDFLAGSSTRING +=-X main.GitDate=$(GITDATE)
LDFLAGSSTRING +=-X github.com/ethereum-optimism/optimism/op-program/version.Version=$(VERSION)
LDFLAGSSTRING +=-X github.com/ethereum-optimism/optimism/op-program/version.Meta=$(VERSION_META)
LDFLAGS := -ldflags "$(LDFLAGSSTRING)"
cannon:
env GO111MODULE=on GOOS=$(TARGETOS) GOARCH=$(TARGETARCH) go build -v $(LDFLAGS) -o ./bin/cannon .
clean:
rm -rf bin
elf:
make -C ./example elf
test: elf
go test -v ./...
lint:
golangci-lint run -E goimports,sqlclosecheck,bodyclose,asciicheck,misspell,errorlint --timeout 5m -e "errors.As" -e "errors.Is"
.PHONY: \
cannon \
clean \
test \
lint
......@@ -26,11 +26,11 @@ make op-program # build
# Switch back to cannon, and build the CLI
cd ../cannon
go build -o cannon .
make cannon
# Transform MIPS op-program client binary into first VM state.
# This outputs state.json (VM state) and meta.json (for debug symbols).
./cannon load-elf --path=../op-program/bin/op-program-client.elf
./bin/cannon load-elf --path=../op-program/bin/op-program-client.elf
# Run cannon emulator (with example inputs)
# Note that the server-mode op-program command is passed into cannon (after the --),
......@@ -39,7 +39,7 @@ go build -o cannon .
# Note:
# - The L2 RPC is an archive L2 node on OP goerli.
# - The L1 RPC is a non-archive RPC, also change `--l1.rpckind` to reflect the correct L1 RPC type.
./cannon run
./bin/cannon run
--pprof.cpu
--info-at '%10000000'
--proof-at never
......@@ -62,7 +62,7 @@ go build -o cannon .
# Add --proof-at '=12345' (or pick other pattern, see --help)
# to pick a step to build a proof for (e.g. exact step, every N steps, etc.)
# Also see `./cannon run --help` for more options
# Also see `./bin/cannon run --help` for more options
```
## Contracts
......
......@@ -88,6 +88,12 @@ type Proof struct {
Pre common.Hash `json:"pre"`
Post common.Hash `json:"post"`
StateData hexutil.Bytes `json:"state-data"`
ProofData hexutil.Bytes `json:"proof-data"`
OracleKey hexutil.Bytes `json:"oracle-key,omitempty"`
OracleValue hexutil.Bytes `json:"oracle-value,omitempty"`
StepInput hexutil.Bytes `json:"step-input"`
OracleInput hexutil.Bytes `json:"oracle-input"`
}
......@@ -314,6 +320,8 @@ func Run(ctx *cli.Context) error {
Step: step,
Pre: preStateHash,
Post: postStateHash,
StateData: witness.State,
ProofData: witness.MemProof,
StepInput: witness.EncodeStepInput(),
}
if witness.HasPreimage() {
......@@ -322,6 +330,8 @@ func Run(ctx *cli.Context) error {
return fmt.Errorf("failed to encode pre-image oracle input: %w", err)
}
proof.OracleInput = inp
proof.OracleKey = witness.PreimageKey[:]
proof.OracleValue = witness.PreimageValue
}
if err := writeJSON[*Proof](fmt.Sprintf(proofFmt, step), proof, true); err != nil {
return fmt.Errorf("failed to write proof data: %w", err)
......
......@@ -63,6 +63,8 @@ func TestEVM(t *testing.T) {
if strings.HasPrefix(f.Name(), "oracle") {
oracle = staticOracle(t, []byte("hello world"))
}
// Short-circuit early for exit_group.bin
exitGroup := f.Name() == "exit_group.bin"
env, evmState := NewEVMEnv(contracts, addrs)
env.Config.Tracer = tracer
......@@ -83,6 +85,9 @@ func TestEVM(t *testing.T) {
if us.state.PC == endAddr {
break
}
if exitGroup && us.state.Exited {
break
}
insn := state.Memory.GetMemory(state.PC)
t.Logf("step: %4d pc: 0x%08x insn: 0x%08x", state.Step, state.PC, insn)
......@@ -122,11 +127,17 @@ func TestEVM(t *testing.T) {
require.Equal(t, hexutil.Bytes(uniPost).String(), hexutil.Bytes(evmPost).String(),
"mipsevm produced different state than EVM")
}
require.Equal(t, uint32(endAddr), state.PC, "must reach end")
// inspect test result
done, result := state.Memory.GetMemory(baseAddrEnd+4), state.Memory.GetMemory(baseAddrEnd+8)
require.Equal(t, done, uint32(1), "must be done")
require.Equal(t, result, uint32(1), "must have success result")
if exitGroup {
require.NotEqual(t, uint32(endAddr), us.state.PC, "must not reach end")
require.True(t, us.state.Exited, "must set exited state")
require.Equal(t, uint8(1), us.state.ExitCode, "must exit with 1")
} else {
require.Equal(t, uint32(endAddr), state.PC, "must reach end")
// inspect test result
done, result := state.Memory.GetMemory(baseAddrEnd+4), state.Memory.GetMemory(baseAddrEnd+8)
require.Equal(t, done, uint32(1), "must be done")
require.Equal(t, result, uint32(1), "must have success result")
}
})
}
}
......
......@@ -8,10 +8,9 @@ md = Cs(CS_ARCH_MIPS, CS_MODE_32 + CS_MODE_BIG_ENDIAN)
def maketest(d, out):
with tempfile.NamedTemporaryFile() as nf:
path = "/Users/kafka/fun/mips/mips-gcc-4.8.1/bin/"
print("building", d, "->", out)
# which mips is go
ret = os.system("%s/mips-elf-as -defsym big_endian=1 -march=mips32r2 -o %s %s" % (path, nf.name, d))
ret = os.system("mips-linux-gnu-as -defsym big_endian=1 -march=mips32r2 -o %s %s" % (nf.name, d))
assert(ret == 0)
nf.seek(0)
elffile = ELFFile(nf)
......@@ -35,4 +34,4 @@ if __name__ == "__main__":
for d in os.listdir("test/"):
if not d.endswith(".asm"):
continue
maketest("test/"+d, "test/bin/"+(d.replace(".asm", ".bin")))
\ No newline at end of file
maketest("test/"+d, "test/bin/"+(d.replace(".asm", ".bin")))
.section .test, "x"
.balign 4
.set noreorder
.global test
.ent test
test:
li $v0, 4045
syscall
lui $t0, 0x4000
subu $v0, $v0, $t0
sltiu $v0, $v0, 1
# save results
lui $s0, 0xbfff # Load the base address 0xbffffff0
ori $s0, 0xfff0
ori $s1, $0, 1 # Prepare the 'done' status
sw $v0, 8($s0) # Set the test result
sw $s1, 4($s0) # Set 'done'
$done:
jr $ra
nop
.end test
.section .test, "x"
.balign 4
.set noreorder
.global test
.ent test
test:
li $v0, 4120
syscall
li $t0, 0x1
subu $v0, $v0, $t0
sltiu $v0, $v0, 1
# save results
lui $s0, 0xbfff # Load the base address 0xbffffff0
ori $s0, 0xfff0
ori $s1, $0, 1 # Prepare the 'done' status
sw $v0, 8($s0) # Set the test result
sw $s1, 4($s0) # Set 'done'
$done:
jr $ra
nop
.end test
.section .test, "x"
.balign 4
.set noreorder
.global test
.ent test
test:
li $a0, 1
li $v0, 4246
syscall
# Unreachable ....
# set test result to fail.
# Test runner should short-circuit before reaching this point.
li $v0, 0
# save results
lui $s0, 0xbfff # Load the base address 0xbffffff0
ori $s0, 0xfff0
ori $s1, $0, 1 # Prepare the 'done' status
sw $v0, 8($s0) # Set the test result
sw $s1, 4($s0) # Set 'done'
$done:
jr $ra
nop
.end test
.section .test, "x"
.balign 4
.set noreorder
.global test
.ent test
test:
# fnctl(0, 3)
li $v0, 4055
li $a0, 0x0
li $a1, 0x3
syscall
sltiu $v0, $v0, 1
# save results
lui $s0, 0xbfff # Load the base address 0xbffffff0
ori $s0, 0xfff0
ori $s1, $0, 1 # Prepare the 'done' status
sw $v0, 8($s0) # Set the test result
sw $s1, 4($s0) # Set 'done'
$done:
jr $ra
nop
.end test
.section .test, "x"
.balign 4
.set noreorder
.global test
.ent test
test:
li $v0, 4090
lui $a0, 0x3000
li $a1, 4096
syscall
lui $t0, 0x3000
subu $v0, $v0, $t0
sltiu $v0, $v0, 1
# save results
lui $s0, 0xbfff # Load the base address 0xbffffff0
ori $s0, 0xfff0
ori $s1, $0, 1 # Prepare the 'done' status
sw $v0, 8($s0) # Set the test result
sw $s1, 4($s0) # Set 'done'
$done:
jr $ra
nop
.end test
......@@ -69,6 +69,10 @@ $readloop:
addiu $t0, $t0, -1
bnez $t0, $readloop
nop
# reading the pre-image stream at EOF should have no effect
li $a1, 0x31000008
li $v0, 4003
syscall
# length at 0x31000000. We also check that the lower 32 bits are zero
lui $s1, 0x3100
......
.section .test, "x"
.balign 4
.set noreorder
.global test
.ent test
# load hash at 0x30001000
# 0x47173285 a8d7341e 5e972fc6 77286384 f802f8ef 42a5ec5f 03bbfa25 4cb01fad = keccak("hello world")
# 0x02173285 a8d7341e 5e972fc6 77286384 f802f8ef 42a5ec5f 03bbfa25 4cb01fad = keccak("hello world").key
test:
lui $s0, 0x3000
ori $s0, 0x1000
lui $t0, 0x0217
ori $t0, 0x3285
sw $t0, 0($s0)
lui $t0, 0xa8d7
ori $t0, 0x341e
sw $t0, 4($s0)
lui $t0, 0x5e97
ori $t0, 0x2fc6
sw $t0, 8($s0)
lui $t0, 0x7728
ori $t0, 0x6384
sw $t0, 0xc($s0)
lui $t0, 0xf802
ori $t0, 0xf8ef
sw $t0, 0x10($s0)
lui $t0, 0x42a5
ori $t0, 0xec5f
sw $t0, 0x14($s0)
lui $t0, 0x03bb
ori $t0, 0xfa25
sw $t0, 0x18($s0)
lui $t0, 0x4cb0
ori $t0, 0x1fad
sw $t0, 0x1c($s0)
# preimage request - write(fdPreimageWrite, preimageData, 32)
li $a0, 6
li $a1, 0x30001000
li $t0, 8
li $a2, 4
$writeloop:
li $v0, 4004
syscall
addiu $a1, $a1, 4
addiu $t0, $t0, -1
bnez $t0, $writeloop
nop
# preimage response to 0x30002000 - read(fdPreimageRead, addr, count)
# read preimage length to unaligned addr. This will read only up to the nearest aligned byte so we have to read again.
li $a0, 5
li $a1, 0x31000001
li $a2, 4
li $v0, 4003
syscall
li $a1, 0x31000004
li $v0, 4003
syscall
li $a1, 0x31000008
li $a2, 1
li $v0, 4003
syscall
# read the preimage data
li $a1, 0x31000009
li $t0, 11
$readloop:
li $v0, 4003
li $a2, 4
syscall
addu $a1, $a1, $v0
subu $t0, $t0, $v0
bnez $t0, $readloop
nop
# length at 0x31000001. We also check that the lower 32 bits are zero
li $s1, 0x31000001
lb $t0, 0($s1)
lb $t2, 1($s1)
sll $t2, $t2, 8
or $t0, $t0, $t2
lb $t2, 2($s1)
sll $t2, $t2, 16
or $t0, $t0, $t2
# assert len[0:3] == 0
sltiu $v0, $t0, 1
# assert len[4:8] == 0
addiu $s1, $s1, 3
lw $t1, 0($s1)
sltiu $v1, $t1, 1
and $v0, $v0, $v1
# assert len[8:9] == 11
addiu $s1, $s1, 4
lb $t2, 0($s1)
li $t4, 11
subu $t5, $t2, $t4
sltiu $v1, $t5, 1
and $v0, $v0, $v1
# data at 0x31000009
addiu $s1, $s1, 1
lb $t0, 0($s1)
lb $t2, 1($s1)
sll $t0, $t0, 8
or $t0, $t0, $t2
lb $t2, 2($s1)
sll $t0, $t0, 8
or $t0, $t0, $t2
lb $t2, 3($s1)
sll $t0, $t0, 8
or $t0, $t0, $t2
#lw $t0, 0($s1)
lui $t4, 0x6865
ori $t4, 0x6c6c
subu $t5, $t0, $t4
sltiu $v1, $t5, 1
and $v0, $v0, $v1
# save results
lui $s0, 0xbfff # Load the base address 0xbffffff0
ori $s0, 0xfff0
ori $s1, $0, 1 # Prepare the 'done' status
sw $v0, 8($s0) # Set the test result
sw $s1, 4($s0) # Set 'done'
$done:
jr $ra
nop
.end test
......@@ -35,6 +35,9 @@ func TestState(t *testing.T) {
if strings.HasPrefix(f.Name(), "oracle") {
oracle = staticOracle(t, []byte("hello world"))
}
// Short-circuit early for exit_group.bin
exitGroup := f.Name() == "exit_group.bin"
// TODO: currently tests are compiled as flat binary objects
// We can use more standard tooling to compile them to ELF files and get remove maketests.py
fn := path.Join("open_mips_tests/test/bin", f.Name())
......@@ -57,14 +60,24 @@ func TestState(t *testing.T) {
if us.state.PC == endAddr {
break
}
if exitGroup && us.state.Exited {
break
}
_, err := us.Step(false)
require.NoError(t, err)
}
require.Equal(t, uint32(endAddr), us.state.PC, "must reach end")
// inspect test result
done, result := state.Memory.GetMemory(baseAddrEnd+4), state.Memory.GetMemory(baseAddrEnd+8)
require.Equal(t, done, uint32(1), "must be done")
require.Equal(t, result, uint32(1), "must have success result")
if exitGroup {
require.NotEqual(t, uint32(endAddr), us.state.PC, "must not reach end")
require.True(t, us.state.Exited, "must set exited state")
require.Equal(t, uint8(1), us.state.ExitCode, "must exit with 1")
} else {
require.Equal(t, uint32(endAddr), us.state.PC, "must reach end")
done, result := state.Memory.GetMemory(baseAddrEnd+4), state.Memory.GetMemory(baseAddrEnd+8)
// inspect test result
require.Equal(t, done, uint32(1), "must be done")
require.Equal(t, result, uint32(1), "must have success result")
}
})
}
}
......
......@@ -33,7 +33,6 @@ flag_management:
- name: common-ts-tests
- name: contracts-tests
- name: core-utils-tests
- name: contracts-periphery-tests
- name: dtl-tests
- name: chain-mon-tests
- name: fault-detector-tests
......
......@@ -30,7 +30,7 @@ You’ll need the following software installed to follow this tutorial:
- [Git](https://git-scm.com/)
- [Go](https://go.dev/)
- [Node](https://nodejs.org/en/)
- [Yarn](https://classic.yarnpkg.com/lang/en/docs/install/)
- [Pnpm](https://classic.yarnpkg.com/lang/en/docs/install/)
- [Foundry](https://github.com/foundry-rs/foundry#installation)
- [Make](https://linux.die.net/man/1/make)
- [jq](https://github.com/jqlang/jq)
......@@ -44,7 +44,7 @@ This tutorial was checked on:
| git, curl, jq, and make | OS default | `sudo apt install -y git curl make jq` |
| Go | 1.20 | `sudo apt update` <br> `wget https://go.dev/dl/go1.20.linux-amd64.tar.gz` <br> `tar xvzf go1.20.linux-amd64.tar.gz` <br> `sudo cp go/bin/go /usr/bin/go` <br> `sudo mv go /usr/lib` <br> `echo export GOROOT=/usr/lib/go >> ~/.bashrc`
| Node | 16.19.0 | `curl -fsSL https://deb.nodesource.com/setup_16.x | sudo -E bash -` <br> `sudo apt-get install -y nodejs npm`
| yarn | 1.22.19 | `sudo npm install -g yarn`
| pnpm | 8.5.6 | `sudo npm install -g pnpm`
| Foundry | 0.2.0 | `yarn install:foundry`
## Build the Source Code
......@@ -69,14 +69,14 @@ We’re going to be spinning up an EVM Rollup from the OP Stack source code. Yo
1. Install required modules. This is a slow process, while it is running you can already start building `op-geth`, as shown below.
```bash
yarn install
pnpm install
```
1. Build the various packages inside of the Optimism Monorepo.
```bash
make op-node op-batcher op-proposer
yarn build
pnpm build
```
### Build op-geth
......@@ -278,7 +278,7 @@ We’ve set up the L1 side of things, but now we need to set up the L2 side of t
You should then see the `genesis.json` and `rollup.json` files inside the `op-node` package.
1. Next, generate the `jwt.txt` file with the following command:
1. Next, generate the `jwt.txt` file with the following command:
```bash
openssl rand -hex 32 > jwt.txt
......@@ -307,24 +307,6 @@ We’re almost ready to run our chain! Now we just need to run a few commands to
mkdir datadir
```
1. Put a password file into the data directory folder:
```bash
echo "pwd" > datadir/password
```
1. Put the `Sequencer` private key into the data directory folder (don’t include a “0x” prefix):
```bash
echo "<SEQUENCER KEY HERE>" > datadir/block-signer-key
```
1. Import the key into `op-geth`:
```bash
./build/bin/geth account import --datadir=datadir --password=datadir/password datadir/block-signer-key
```
1. Next we need to initialize `op-geth` with the genesis file we generated and copied earlier:
```bash
......@@ -344,7 +326,6 @@ Set these environment variables for the configuration
| Variable | Value |
| -------------- | -
| `SEQ_ADDR` | Address of the `Sequencer` account
| `SEQ_KEY` | Private key of the `Sequencer` account
| `BATCHER_KEY` | Private key of the `Batcher` accounts, which should have at least 1 ETH
| `PROPOSER_KEY` | Private key of the `Proposer` account
......@@ -360,32 +341,27 @@ Run `op-geth` with the following commands.
cd ~/op-geth
./build/bin/geth \
--datadir ./datadir \
--http \
--http.corsdomain="*" \
--http.vhosts="*" \
--http.addr=0.0.0.0 \
--http.api=web3,debug,eth,txpool,net,engine \
--ws \
--ws.addr=0.0.0.0 \
--ws.port=8546 \
--ws.origins="*" \
--ws.api=debug,eth,txpool,net,engine \
--syncmode=full \
--gcmode=archive \
--nodiscover \
--maxpeers=0 \
--networkid=42069 \
--authrpc.vhosts="*" \
--authrpc.addr=0.0.0.0 \
--authrpc.port=8551 \
--authrpc.jwtsecret=./jwt.txt \
--rollup.disabletxpoolgossip=true \
--password=./datadir/password \
--allow-insecure-unlock \
--mine \
--miner.etherbase=$SEQ_ADDR \
--unlock=$SEQ_ADDR
--datadir ./datadir \
--http \
--http.corsdomain="*" \
--http.vhosts="*" \
--http.addr=0.0.0.0 \
--http.api=web3,debug,eth,txpool,net,engine \
--ws \
--ws.addr=0.0.0.0 \
--ws.port=8546 \
--ws.origins="*" \
--ws.api=debug,eth,txpool,net,engine \
--syncmode=full \
--gcmode=archive \
--nodiscover \
--maxpeers=0 \
--networkid=42069 \
--authrpc.vhosts="*" \
--authrpc.addr=0.0.0.0 \
--authrpc.port=8551 \
--authrpc.jwtsecret=./jwt.txt \
--rollup.disabletxpoolgossip=true
```
And `op-geth` should be running! You should see some output, but you won’t see any blocks being created yet because `op-geth` is driven by the `op-node`. We’ll need to get that running next.
......@@ -443,7 +419,7 @@ cd ~/optimism/op-node
./bin/op-node \
--l2=http://localhost:8551 \
--l2.jwt-secret=./jwt.txt \
--l2.jwt-secret=./jwt.txt \
--sequencer.enabled \
--sequencer.l1-confs=3 \
--verifier.l1-confs=3 \
......
......@@ -3,21 +3,27 @@ module github.com/ethereum-optimism/optimism
go 1.19
require (
github.com/BurntSushi/toml v1.3.2
github.com/btcsuite/btcd v0.23.3
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0
github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3
github.com/ethereum/go-ethereum v1.11.6
github.com/fsnotify/fsnotify v1.6.0
github.com/go-chi/chi/v5 v5.0.0
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb
github.com/google/go-cmp v0.5.9
github.com/google/gofuzz v1.2.1-0.20220503160820-4a35382e8fc8
github.com/google/uuid v1.3.0
github.com/gorilla/mux v1.8.0
github.com/hashicorp/go-multierror v1.1.1
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d
github.com/hashicorp/golang-lru/v2 v2.0.1
github.com/holiman/uint256 v1.2.2-0.20230321075855-87b91420868c
github.com/ipfs/go-datastore v0.6.0
github.com/ipfs/go-ds-leveldb v0.5.0
github.com/jackc/pgtype v1.14.0
github.com/lib/pq v1.10.9
github.com/libp2p/go-libp2p v0.25.1
github.com/libp2p/go-libp2p-pubsub v0.9.3
github.com/libp2p/go-libp2p-testing v0.12.0
......@@ -29,13 +35,17 @@ require (
github.com/pkg/errors v0.9.1
github.com/pkg/profile v1.7.0
github.com/prometheus/client_golang v1.14.0
github.com/rs/cors v1.8.2
github.com/stretchr/testify v1.8.1
github.com/urfave/cli v1.22.2
github.com/urfave/cli/v2 v2.25.7
golang.org/x/crypto v0.6.0
golang.org/x/crypto v0.8.0
golang.org/x/exp v0.0.0-20230213192124-5e25df0256eb
golang.org/x/sync v0.1.0
golang.org/x/term v0.6.0
golang.org/x/term v0.7.0
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af
gorm.io/driver/postgres v1.5.2
gorm.io/gorm v1.25.2
)
require (
......@@ -81,7 +91,6 @@ require (
github.com/golang/protobuf v1.5.2 // indirect
github.com/google/gopacket v1.1.19 // indirect
github.com/google/pprof v0.0.0-20230207041349-798e818bf904 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/graph-gophers/graphql-go v1.3.0 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
......@@ -93,9 +102,15 @@ require (
github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect
github.com/ipfs/go-cid v0.3.2 // indirect
github.com/ipfs/go-log/v2 v2.5.1 // indirect
github.com/jackc/pgio v1.0.0 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
github.com/jackc/pgx/v5 v5.3.1 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect
github.com/jbenet/goprocess v0.1.4 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect
github.com/klauspost/compress v1.15.15 // indirect
github.com/klauspost/cpuid/v2 v2.2.3 // indirect
github.com/koron/go-ssdp v0.0.3 // indirect
......@@ -147,7 +162,6 @@ require (
github.com/raulk/go-watchdog v1.3.0 // indirect
github.com/rivo/uniseg v0.4.3 // indirect
github.com/rogpeppe/go-internal v1.9.0 // indirect
github.com/rs/cors v1.8.2 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect
......@@ -165,9 +179,9 @@ require (
go.uber.org/multierr v1.9.0 // indirect
go.uber.org/zap v1.24.0 // indirect
golang.org/x/mod v0.9.0 // indirect
golang.org/x/net v0.8.0 // indirect
golang.org/x/sys v0.6.0 // indirect
golang.org/x/text v0.8.0 // indirect
golang.org/x/net v0.9.0 // indirect
golang.org/x/sys v0.7.0 // indirect
golang.org/x/text v0.9.0 // indirect
golang.org/x/tools v0.7.0 // indirect
google.golang.org/protobuf v1.28.1 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
......
This diff is collapsed.
FROM golang:1.19.0-alpine3.15 as builder
FROM --platform=$BUILDPLATFORM golang:1.19.9-alpine3.16 as builder
RUN apk add --no-cache make gcc musl-dev linux-headers git jq bash
COPY ./indexer /go/indexer
COPY ./.git /go/.git
COPY ./indexer/go.mod /go/indexer/go.mod
COPY ./indexer/go.sum /go/indexer/go.sum
# build indexer with the shared go.mod & go.sum files
COPY ./indexer /app/indexer
COPY ./op-bindings /app/op-bindings
COPY ./op-service /app/op-service
COPY ./op-node /app/op-node
COPY ./go.mod /app/go.mod
COPY ./go.sum /app/go.sum
COPY ./.git /app/.git
WORKDIR /go/indexer
RUN make
WORKDIR /app/indexer
FROM alpine:3.15
RUN go mod download
COPY --from=builder /go/indexer/indexer /usr/local/bin
RUN make indexer
FROM alpine:3.16
COPY --from=builder /app/indexer/indexer /usr/local/bin
CMD ["indexer"]
GITCOMMIT := $(shell git rev-parse HEAD)
GITDATE := $(shell git show -s --format='%ct')
GITVERSION := $(shell cat package.json | jq .version)
LDFLAGSSTRING +=-X main.GitCommit=$(GITCOMMIT)
LDFLAGSSTRING +=-X main.GitDate=$(GITDATE)
LDFLAGSSTRING +=-X main.GitVersion=$(GITVERSION)
LDFLAGS := -ldflags "$(LDFLAGSSTRING)"
indexer:
......
module github.com/ethereum-optimism/optimism/indexer
go 1.19
replace github.com/ethereum/go-ethereum v1.11.6 => github.com/ethereum-optimism/op-geth v1.101106.0-rc.2
require (
github.com/BurntSushi/toml v1.3.0
github.com/ethereum-optimism/optimism v1.0.9
github.com/ethereum/go-ethereum v1.11.6
github.com/go-chi/chi/v5 v5.0.8
github.com/google/uuid v1.3.0
github.com/gorilla/mux v1.8.0
github.com/jackc/pgtype v1.14.0
github.com/lib/pq v1.10.4
github.com/prometheus/client_golang v1.14.0
github.com/rs/cors v1.8.2
github.com/stretchr/testify v1.8.1
github.com/urfave/cli v1.22.9
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa
gorm.io/driver/postgres v1.5.2
gorm.io/gorm v1.25.1
)
require (
github.com/DataDog/zstd v1.5.2 // indirect
github.com/VictoriaMetrics/fastcache v1.10.0 // indirect
github.com/benbjohnson/clock v1.3.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/btcsuite/btcd v0.23.3 // indirect
github.com/btcsuite/btcd/btcec/v2 v2.2.0 // indirect
github.com/btcsuite/btcd/btcutil v1.1.0 // indirect
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 // indirect
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/cockroachdb/errors v1.9.1 // indirect
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 // indirect
github.com/cockroachdb/redact v1.1.3 // indirect
github.com/containerd/cgroups v1.1.0 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
github.com/deckarep/golang-set/v2 v2.1.0 // indirect
github.com/decred/dcrd/crypto/blake256 v1.0.0 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect
github.com/deepmap/oapi-codegen v1.8.2 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/edsrzf/mmap-go v1.1.0 // indirect
github.com/elastic/gosigar v0.14.2 // indirect
github.com/emirpasic/gods v1.18.1 // indirect
github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3 // indirect
github.com/fjl/memsize v0.0.1 // indirect
github.com/flynn/noise v1.0.0 // indirect
github.com/francoispqt/gojay v1.2.13 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 // indirect
github.com/getsentry/sentry-go v0.18.0 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-stack/stack v1.8.1 // indirect
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect
github.com/godbus/dbus/v5 v5.1.0 // indirect
github.com/gofrs/flock v0.8.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v4 v4.4.2 // indirect
github.com/golang/mock v1.6.0 // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
github.com/google/gopacket v1.1.19 // indirect
github.com/google/pprof v0.0.0-20230207041349-798e818bf904 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/graph-gophers/graphql-go v1.3.0 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-bexpr v0.1.11 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect
github.com/hashicorp/golang-lru/v2 v2.0.1 // indirect
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
github.com/holiman/uint256 v1.2.2-0.20230321075855-87b91420868c // indirect
github.com/huin/goupnp v1.1.0 // indirect
github.com/influxdata/influxdb-client-go/v2 v2.4.0 // indirect
github.com/influxdata/influxdb1-client v0.0.0-20220302092344-a9ab5670611c // indirect
github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect
github.com/ipfs/go-cid v0.3.2 // indirect
github.com/ipfs/go-datastore v0.6.0 // indirect
github.com/ipfs/go-log v1.0.5 // indirect
github.com/ipfs/go-log/v2 v2.5.1 // indirect
github.com/jackc/pgio v1.0.0 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
github.com/jackc/pgx/v5 v5.3.1 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect
github.com/jbenet/goprocess v0.1.4 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect
github.com/klauspost/compress v1.15.15 // indirect
github.com/klauspost/cpuid/v2 v2.2.3 // indirect
github.com/koron/go-ssdp v0.0.3 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/libp2p/go-buffer-pool v0.1.0 // indirect
github.com/libp2p/go-cidranger v1.1.0 // indirect
github.com/libp2p/go-flow-metrics v0.1.0 // indirect
github.com/libp2p/go-libp2p v0.25.1 // indirect
github.com/libp2p/go-libp2p-asn-util v0.2.0 // indirect
github.com/libp2p/go-libp2p-pubsub v0.9.0 // indirect
github.com/libp2p/go-libp2p-testing v0.12.0 // indirect
github.com/libp2p/go-mplex v0.7.0 // indirect
github.com/libp2p/go-msgio v0.3.0 // indirect
github.com/libp2p/go-nat v0.1.0 // indirect
github.com/libp2p/go-netroute v0.2.1 // indirect
github.com/libp2p/go-reuseport v0.2.0 // indirect
github.com/libp2p/go-yamux/v4 v4.0.0 // indirect
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.17 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/miekg/dns v1.1.50 // indirect
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
github.com/minio/sha256-simd v1.0.0 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/mitchellh/pointerstructure v1.2.1 // indirect
github.com/mr-tron/base58 v1.2.0 // indirect
github.com/multiformats/go-base32 v0.1.0 // indirect
github.com/multiformats/go-base36 v0.2.0 // indirect
github.com/multiformats/go-multiaddr v0.8.0 // indirect
github.com/multiformats/go-multiaddr-dns v0.3.1 // indirect
github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect
github.com/multiformats/go-multibase v0.1.1 // indirect
github.com/multiformats/go-multicodec v0.8.1 // indirect
github.com/multiformats/go-multihash v0.2.1 // indirect
github.com/multiformats/go-multistream v0.4.1 // indirect
github.com/multiformats/go-varint v0.0.7 // indirect
github.com/olekukonko/tablewriter v0.0.5 // indirect
github.com/onsi/ginkgo/v2 v2.8.1 // indirect
github.com/opencontainers/runtime-spec v1.0.2 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/common v0.39.0 // indirect
github.com/prometheus/procfs v0.9.0 // indirect
github.com/quic-go/qpack v0.4.0 // indirect
github.com/quic-go/qtls-go1-18 v0.2.0 // indirect
github.com/quic-go/qtls-go1-19 v0.2.0 // indirect
github.com/quic-go/qtls-go1-20 v0.1.0 // indirect
github.com/quic-go/quic-go v0.32.0 // indirect
github.com/quic-go/webtransport-go v0.5.1 // indirect
github.com/raulk/go-watchdog v1.3.0 // indirect
github.com/rivo/uniseg v0.4.3 // indirect
github.com/rogpeppe/go-internal v1.9.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/status-im/keycard-go v0.2.0 // indirect
github.com/stretchr/objx v0.5.0 // indirect
github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a // indirect
github.com/tklauser/go-sysconf v0.3.10 // indirect
github.com/tklauser/numcpus v0.5.0 // indirect
github.com/tyler-smith/go-bip39 v1.1.0 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect
go.uber.org/atomic v1.10.0 // indirect
go.uber.org/dig v1.16.1 // indirect
go.uber.org/fx v1.19.1 // indirect
go.uber.org/multierr v1.9.0 // indirect
go.uber.org/zap v1.24.0 // indirect
golang.org/x/crypto v0.8.0 // indirect
golang.org/x/exp v0.0.0-20230213192124-5e25df0256eb // indirect
golang.org/x/mod v0.9.0 // indirect
golang.org/x/net v0.9.0 // indirect
golang.org/x/sync v0.1.0 // indirect
golang.org/x/sys v0.7.0 // indirect
golang.org/x/term v0.7.0 // indirect
golang.org/x/text v0.9.0 // indirect
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af // indirect
golang.org/x/tools v0.7.0 // indirect
google.golang.org/protobuf v1.28.1 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
lukechampine.com/blake3 v1.1.7 // indirect
nhooyr.io/websocket v1.8.7 // indirect
)
package integration_tests
import (
"context"
"database/sql"
"encoding/json"
"fmt"
......@@ -14,10 +13,7 @@ import (
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/ethclient/gethclient"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc"
"github.com/stretchr/testify/require"
"github.com/ethereum-optimism/optimism/indexer/db"
......@@ -29,6 +25,7 @@ import (
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-node/withdrawals"
"github.com/ethereum-optimism/optimism/op-service/client/utils"
_ "github.com/lib/pq"
)
......@@ -47,14 +44,12 @@ func TestBedrockIndexer(t *testing.T) {
fromAddr := cfg.Secrets.Addresses().Alice
// wait a couple of blocks
require.NoError(t, e2eutils.WaitBlock(e2eutils.TimeoutCtx(t, 30*time.Second), l2Client, 10))
require.NoError(t, utils.WaitBlock(e2eutils.TimeoutCtx(t, 30*time.Second), l2Client, 10))
l1SB, err := bindings.NewL1StandardBridge(predeploys.DevL1StandardBridgeAddr, l1Client)
require.NoError(t, err)
l2SB, err := bindings.NewL2StandardBridge(predeploys.L2StandardBridgeAddr, l2Client)
require.NoError(t, err)
portal, err := bindings.NewOptimismPortal(predeploys.DevOptimismPortalAddr, l1Client)
require.NoError(t, err)
l1Opts, err := bind.NewKeyedTransactorWithChainID(cfg.Secrets.Alice, cfg.L1ChainIDBig())
require.NoError(t, err)
l2Opts, err := bind.NewKeyedTransactorWithChainID(cfg.Secrets.Alice, cfg.L2ChainIDBig())
......@@ -104,7 +99,7 @@ func TestBedrockIndexer(t *testing.T) {
l1Opts.Value = big.NewInt(params.Ether)
depTx, err := l1SB.DepositETH(l1Opts, 200_000, nil)
require.NoError(t, err)
depReceipt, err := e2eutils.WaitReceiptOK(e2eutils.TimeoutCtx(t, 10*time.Second), l1Client, depTx.Hash())
depReceipt, err := utils.WaitReceiptOK(e2eutils.TimeoutCtx(t, 10*time.Second), l1Client, depTx.Hash())
require.NoError(t, err)
require.Greaterf(t, len(depReceipt.Logs), 0, "must have logs")
var l2Hash common.Hash
......@@ -119,12 +114,12 @@ func TestBedrockIndexer(t *testing.T) {
l2Hash = tx.Hash()
}
require.NotEqual(t, common.Hash{}, l2Hash)
_, err = e2eutils.WaitReceiptOK(e2eutils.TimeoutCtx(t, 15*time.Second), l2Client, l2Hash)
_, err = utils.WaitReceiptOK(e2eutils.TimeoutCtx(t, 15*time.Second), l2Client, l2Hash)
require.NoError(t, err)
// Poll for indexer deposit
var depPage *db.PaginatedDeposits
require.NoError(t, e2eutils.WaitFor(e2eutils.TimeoutCtx(t, 30*time.Second), 100*time.Millisecond, func() (bool, error) {
require.NoError(t, utils.WaitFor(e2eutils.TimeoutCtx(t, 30*time.Second), 100*time.Millisecond, func() (bool, error) {
res := new(db.PaginatedDeposits)
err := getJSON(makeURL(fmt.Sprintf("v1/deposits/%s", fromAddr)), res)
if err != nil {
......@@ -155,11 +150,11 @@ func TestBedrockIndexer(t *testing.T) {
l2Opts.Value = big.NewInt(0.5 * params.Ether)
wdTx, err := l2SB.Withdraw(l2Opts, predeploys.LegacyERC20ETHAddr, big.NewInt(0.5*params.Ether), 0, nil)
require.NoError(t, err)
wdReceipt, err := e2eutils.WaitReceiptOK(e2eutils.TimeoutCtx(t, 30*time.Second), l2Client, wdTx.Hash())
wdReceipt, err := utils.WaitReceiptOK(e2eutils.TimeoutCtx(t, 30*time.Second), l2Client, wdTx.Hash())
require.NoError(t, err)
var wdPage *db.PaginatedWithdrawals
require.NoError(t, e2eutils.WaitFor(e2eutils.TimeoutCtx(t, 30*time.Second), 100*time.Millisecond, func() (bool, error) {
require.NoError(t, utils.WaitFor(e2eutils.TimeoutCtx(t, 30*time.Second), 100*time.Millisecond, func() (bool, error) {
res := new(db.PaginatedWithdrawals)
err := getJSON(makeURL(fmt.Sprintf("v1/withdrawals/%s", fromAddr)), res)
if err != nil {
......@@ -189,50 +184,11 @@ func TestBedrockIndexer(t *testing.T) {
require.Equal(t, db.ETHL2Token, withdrawal.L2Token)
require.NotEmpty(t, withdrawal.GUID)
finBlockNum, err := withdrawals.WaitForFinalizationPeriod(
e2eutils.TimeoutCtx(t, time.Minute),
l1Client,
predeploys.DevOptimismPortalAddr,
wdReceipt.BlockNumber,
)
require.NoError(t, err)
finHeader, err := l2Client.HeaderByNumber(context.Background(), big.NewInt(int64(finBlockNum)))
require.NoError(t, err)
rpcClient, err := rpc.Dial(sys.Nodes["sequencer"].HTTPEndpoint())
require.NoError(t, err)
proofCl := gethclient.New(rpcClient)
receiptCl := ethclient.NewClient(rpcClient)
oracle, err := bindings.NewL2OutputOracleCaller(predeploys.DevL2OutputOracleAddr, l1Client)
require.Nil(t, err)
wParams, err := withdrawals.ProveWithdrawalParameters(context.Background(), proofCl, receiptCl, wdTx.Hash(), finHeader, oracle)
require.NoError(t, err)
l1Opts.Value = big.NewInt(0)
withdrawalTx := bindings.TypesWithdrawalTransaction{
Nonce: wParams.Nonce,
Sender: wParams.Sender,
Target: wParams.Target,
Value: wParams.Value,
GasLimit: wParams.GasLimit,
Data: wParams.Data,
}
// Prove our withdrawal
proveTx, err := portal.ProveWithdrawalTransaction(
l1Opts,
withdrawalTx,
wParams.L2OutputIndex,
wParams.OutputRootProof,
wParams.WithdrawalProof,
)
require.NoError(t, err)
proveReceipt, err := e2eutils.WaitReceiptOK(e2eutils.TimeoutCtx(t, time.Minute), l1Client, proveTx.Hash())
require.NoError(t, err)
wdParams, proveReceipt := op_e2e.ProveWithdrawal(t, cfg, l1Client, sys.Nodes["sequencer"], cfg.Secrets.Alice, wdReceipt)
wdPage = nil
require.NoError(t, e2eutils.WaitFor(e2eutils.TimeoutCtx(t, 30*time.Second), 100*time.Millisecond, func() (bool, error) {
require.NoError(t, utils.WaitFor(e2eutils.TimeoutCtx(t, 30*time.Second), 100*time.Millisecond, func() (bool, error) {
res := new(db.PaginatedWithdrawals)
err := getJSON(makeURL(fmt.Sprintf("v1/withdrawals/%s", fromAddr)), res)
if err != nil {
......@@ -251,27 +207,13 @@ func TestBedrockIndexer(t *testing.T) {
require.Equal(t, proveReceipt.TxHash.String(), *wd.BedrockProvenTxHash)
require.Nil(t, wd.BedrockFinalizedTxHash)
// Wait for the finalization period to elapse
_, err = withdrawals.WaitForFinalizationPeriod(
e2eutils.TimeoutCtx(t, time.Minute),
l1Client,
predeploys.DevOptimismPortalAddr,
finHeader.Number,
)
require.NoError(t, err)
// Send our finalize withdrawal transaction
finTx, err := portal.FinalizeWithdrawalTransaction(
l1Opts,
withdrawalTx,
)
require.NoError(t, err)
finReceipt, err := e2eutils.WaitReceiptOK(e2eutils.TimeoutCtx(t, time.Minute), l1Client, finTx.Hash())
require.NoError(t, err)
// Finalize withdrawal
err = withdrawals.WaitForFinalizationPeriod(e2eutils.TimeoutCtx(t, 30*time.Second), l1Client, predeploys.DevOptimismPortalAddr, proveReceipt.BlockNumber)
require.Nil(t, err)
finReceipt := op_e2e.FinalizeWithdrawal(t, cfg, l1Client, cfg.Secrets.Alice, wdReceipt, wdParams)
wdPage = nil
require.NoError(t, e2eutils.WaitFor(e2eutils.TimeoutCtx(t, 30*time.Second), 100*time.Millisecond, func() (bool, error) {
require.NoError(t, utils.WaitFor(e2eutils.TimeoutCtx(t, 30*time.Second), 100*time.Millisecond, func() (bool, error) {
res := new(db.PaginatedWithdrawals)
err := getJSON(makeURL(fmt.Sprintf("v1/withdrawals/%s", fromAddr)), res)
if err != nil {
......
......@@ -7,7 +7,9 @@
"nx.json": "*",
"tsconfig.json": "*",
".foundryrc": "*",
".nvmrc": "*"
"pnpm.lock.yaml": "*",
".npmrc": "*",
".nvmrc": "*"
},
"tasksRunnerOptions": {
"default": {
......@@ -54,6 +56,9 @@
"inputs": ["default", "testing", "^production"],
"dependsOn": ["^build"]
},
"generate": {
"dependsOn": ["^build"]
},
"build:contracts": {
"inputs": [
"configsProject",
......
SHELL := /bin/bash
pkg := bindings
contracts-dir := ../packages/contracts-bedrock
monorepo-base := $(shell dirname $(realpath .))
contracts-dir := $(monorepo-base)/packages/contracts-bedrock
all: version mkdir bindings
......@@ -17,11 +18,12 @@ bindings: compile bindings-build
bindings-build:
go run ./gen/main.go \
-forge-artifacts ../packages/contracts-bedrock/forge-artifacts \
-forge-artifacts $(contracts-dir)/forge-artifacts \
-out ./bindings \
-contracts ./artifacts.json \
-source-maps MIPS,PreimageOracle \
-package $(pkg)
-package $(pkg) \
-monorepo-base $(monorepo-base)
mkdir:
mkdir -p $(pkg)
......
......@@ -28,3 +28,7 @@ $ make devtools
```
The geth docs for `abigen` can be found [here](https://geth.ethereum.org/docs/dapp/native-bindings).
## See also
TypeScript bindings are also generated in [@eth-optimism/contracts-ts](../packages/contracts-ts/)
package ast
import (
"path/filepath"
"regexp"
"sort"
"strconv"
......@@ -33,7 +34,7 @@ type typeRemapping struct {
// inefficiency comes from replaceType, which performs a linear
// search of all replacements when performing substring matches of
// composite types.
func CanonicalizeASTIDs(in *solc.StorageLayout) *solc.StorageLayout {
func CanonicalizeASTIDs(in *solc.StorageLayout, monorepoBase string) *solc.StorageLayout {
lastId := uint(1000)
astIDRemappings := make(map[uint]uint)
typeRemappings := make(map[string]string)
......@@ -83,9 +84,18 @@ func CanonicalizeASTIDs(in *solc.StorageLayout) *solc.StorageLayout {
Types: make(map[string]solc.StorageLayoutType),
}
for _, slot := range in.Storage {
contract := slot.Contract
// Normalize the name of the contract since absolute paths
// are used when there are 2 contracts imported with the same
// name
if filepath.IsAbs(contract) {
contract = strings.TrimPrefix(strings.Replace(contract, monorepoBase, "", 1), "/")
}
outLayout.Storage = append(outLayout.Storage, solc.StorageLayoutEntry{
AstId: astIDRemappings[slot.AstId],
Contract: slot.Contract,
Contract: contract,
Label: slot.Label,
Offset: slot.Offset,
Slot: slot.Slot,
......
......@@ -49,7 +49,7 @@ func TestCanonicalize(t *testing.T) {
// Run 100 times to make sure that we aren't relying
// on random map iteration order.
for i := 0; i < 100; i++ {
require.Equal(t, testData.Out, CanonicalizeASTIDs(testData.In))
require.Equal(t, testData.Out, CanonicalizeASTIDs(testData.In, ""))
}
})
}
......
......@@ -9,7 +9,7 @@ import (
"github.com/ethereum-optimism/optimism/op-bindings/solc"
)
const ERC20StorageLayoutJSON = "{\"storage\":[{\"astId\":1000,\"contract\":\"node_modules/@openzeppelin/contracts/token/ERC20/ERC20.sol:ERC20\",\"label\":\"_balances\",\"offset\":0,\"slot\":\"0\",\"type\":\"t_mapping(t_address,t_uint256)\"},{\"astId\":1001,\"contract\":\"node_modules/@openzeppelin/contracts/token/ERC20/ERC20.sol:ERC20\",\"label\":\"_allowances\",\"offset\":0,\"slot\":\"1\",\"type\":\"t_mapping(t_address,t_mapping(t_address,t_uint256))\"},{\"astId\":1002,\"contract\":\"node_modules/@openzeppelin/contracts/token/ERC20/ERC20.sol:ERC20\",\"label\":\"_totalSupply\",\"offset\":0,\"slot\":\"2\",\"type\":\"t_uint256\"},{\"astId\":1003,\"contract\":\"node_modules/@openzeppelin/contracts/token/ERC20/ERC20.sol:ERC20\",\"label\":\"_name\",\"offset\":0,\"slot\":\"3\",\"type\":\"t_string_storage\"},{\"astId\":1004,\"contract\":\"node_modules/@openzeppelin/contracts/token/ERC20/ERC20.sol:ERC20\",\"label\":\"_symbol\",\"offset\":0,\"slot\":\"4\",\"type\":\"t_string_storage\"}],\"types\":{\"t_address\":{\"encoding\":\"inplace\",\"label\":\"address\",\"numberOfBytes\":\"20\"},\"t_mapping(t_address,t_mapping(t_address,t_uint256))\":{\"encoding\":\"mapping\",\"label\":\"mapping(address =\u003e mapping(address =\u003e uint256))\",\"numberOfBytes\":\"32\",\"key\":\"t_address\",\"value\":\"t_mapping(t_address,t_uint256)\"},\"t_mapping(t_address,t_uint256)\":{\"encoding\":\"mapping\",\"label\":\"mapping(address =\u003e uint256)\",\"numberOfBytes\":\"32\",\"key\":\"t_address\",\"value\":\"t_uint256\"},\"t_string_storage\":{\"encoding\":\"bytes\",\"label\":\"string\",\"numberOfBytes\":\"32\"},\"t_uint256\":{\"encoding\":\"inplace\",\"label\":\"uint256\",\"numberOfBytes\":\"32\"}}}"
const ERC20StorageLayoutJSON = "{\"storage\":[{\"astId\":1000,\"contract\":\"node_modules/.pnpm/@openzeppelin+contracts@4.7.3/node_modules/@openzeppelin/contracts/token/ERC20/ERC20.sol:ERC20\",\"label\":\"_balances\",\"offset\":0,\"slot\":\"0\",\"type\":\"t_mapping(t_address,t_uint256)\"},{\"astId\":1001,\"contract\":\"node_modules/.pnpm/@openzeppelin+contracts@4.7.3/node_modules/@openzeppelin/contracts/token/ERC20/ERC20.sol:ERC20\",\"label\":\"_allowances\",\"offset\":0,\"slot\":\"1\",\"type\":\"t_mapping(t_address,t_mapping(t_address,t_uint256))\"},{\"astId\":1002,\"contract\":\"node_modules/.pnpm/@openzeppelin+contracts@4.7.3/node_modules/@openzeppelin/contracts/token/ERC20/ERC20.sol:ERC20\",\"label\":\"_totalSupply\",\"offset\":0,\"slot\":\"2\",\"type\":\"t_uint256\"},{\"astId\":1003,\"contract\":\"node_modules/.pnpm/@openzeppelin+contracts@4.7.3/node_modules/@openzeppelin/contracts/token/ERC20/ERC20.sol:ERC20\",\"label\":\"_name\",\"offset\":0,\"slot\":\"3\",\"type\":\"t_string_storage\"},{\"astId\":1004,\"contract\":\"node_modules/.pnpm/@openzeppelin+contracts@4.7.3/node_modules/@openzeppelin/contracts/token/ERC20/ERC20.sol:ERC20\",\"label\":\"_symbol\",\"offset\":0,\"slot\":\"4\",\"type\":\"t_string_storage\"}],\"types\":{\"t_address\":{\"encoding\":\"inplace\",\"label\":\"address\",\"numberOfBytes\":\"20\"},\"t_mapping(t_address,t_mapping(t_address,t_uint256))\":{\"encoding\":\"mapping\",\"label\":\"mapping(address =\u003e mapping(address =\u003e uint256))\",\"numberOfBytes\":\"32\",\"key\":\"t_address\",\"value\":\"t_mapping(t_address,t_uint256)\"},\"t_mapping(t_address,t_uint256)\":{\"encoding\":\"mapping\",\"label\":\"mapping(address =\u003e uint256)\",\"numberOfBytes\":\"32\",\"key\":\"t_address\",\"value\":\"t_uint256\"},\"t_string_storage\":{\"encoding\":\"bytes\",\"label\":\"string\",\"numberOfBytes\":\"32\"},\"t_uint256\":{\"encoding\":\"inplace\",\"label\":\"uint256\",\"numberOfBytes\":\"32\"}}}"
var ERC20StorageLayout = new(solc.StorageLayout)
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -22,6 +22,7 @@ type flags struct {
SourceMaps string
OutDir string
Package string
MonorepoBase string
}
type data struct {
......@@ -39,8 +40,14 @@ func main() {
flag.StringVar(&f.Contracts, "contracts", "artifacts.json", "Path to file containing list of contracts to generate bindings for")
flag.StringVar(&f.SourceMaps, "source-maps", "", "Comma-separated list of contracts to generate source-maps for")
flag.StringVar(&f.Package, "package", "artifacts", "Go package name")
flag.StringVar(&f.MonorepoBase, "monorepo-base", "", "Base of the monorepo")
flag.Parse()
if f.MonorepoBase == "" {
log.Fatal("must provide -monorepo-base")
}
log.Printf("Using monorepo base %s\n", f.MonorepoBase)
contractData, err := os.ReadFile(f.Contracts)
if err != nil {
log.Fatal("error reading contract list: %w\n", err)
......@@ -72,21 +79,47 @@ func main() {
defer os.RemoveAll(dir)
log.Printf("created temp dir %s\n", dir)
// If some contracts have the same name then the path to their
// artifact depends on their full import path. Scan over all artifacts
// and hold a mapping from the contract name to the contract path.
// Walk walks the directory deterministically, so the later instance
// of the contract with the same name will be used
artifactPaths := make(map[string]string)
if err := filepath.Walk(f.ForgeArtifacts,
func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
base := filepath.Base(path)
if strings.HasSuffix(base, ".json") {
name := base[:len(base)-5]
if _, ok := artifactPaths[name]; !ok {
artifactPaths[name] = path
}
}
return nil
}); err != nil {
log.Fatal(err)
}
for _, name := range contracts {
log.Printf("generating code for %s\n", name)
forgeArtifactData, err := os.ReadFile(path.Join(f.ForgeArtifacts, name+".sol", name+".json"))
artifactPath := path.Join(f.ForgeArtifacts, name+".sol", name+".json")
forgeArtifactData, err := os.ReadFile(artifactPath)
if errors.Is(err, os.ErrNotExist) {
log.Fatalf("cannot find forge-artifact of %q\n", name)
artifactPath = artifactPaths[name]
forgeArtifactData, err = os.ReadFile(artifactPath)
if errors.Is(err, os.ErrNotExist) {
log.Fatalf("cannot find forge-artifact of %q\n", name)
}
}
log.Printf("using forge-artifact %s\n", artifactPath)
var artifact foundry.Artifact
if err := json.Unmarshal(forgeArtifactData, &artifact); err != nil {
log.Fatalf("failed to parse forge artifact of %q: %v\n", name, err)
}
if err != nil {
log.Fatalf("error reading storage layout %s: %v\n", name, err)
}
rawAbi := artifact.Abi
if err != nil {
......@@ -121,7 +154,7 @@ func main() {
}
storage := artifact.StorageLayout
canonicalStorage := ast.CanonicalizeASTIDs(&storage)
canonicalStorage := ast.CanonicalizeASTIDs(&storage, f.MonorepoBase)
ser, err := json.Marshal(canonicalStorage)
if err != nil {
log.Fatalf("error marshaling storage: %v\n", err)
......
......@@ -2,6 +2,9 @@ package predeploys
import "github.com/ethereum/go-ethereum/common"
// TODO - we should get a single toml yaml or json file source of truth in @eth-optimism/bedrock package
// This needs to be kept in sync with @eth-optimism/contracts-ts/wagmi.config.ts which also specifies this
// To improve robustness and maintainability contracts-bedrock should export all addresses
const (
L2ToL1MessagePasser = "0x4200000000000000000000000000000000000016"
DeployerWhitelist = "0x4200000000000000000000000000000000000002"
......
package predeploys
import "github.com/ethereum/go-ethereum/common"
const (
LegacyERC20ETH = "0xDeadDeAddeAddEAddeadDEaDDEAdDeaDDeAD0000"
)
var (
LegacyERC20ETHAddr = common.HexToAddress(LegacyERC20ETH)
)
This diff is collapsed.
......@@ -12,6 +12,7 @@
"batchSenderAddress": "0x3C44CdDdB6a900fa2b585dd299e03d12FA4293BC",
"l2OutputOracleSubmissionInterval": 20,
"l2OutputOracleStartingBlockNumber": 0,
"l2OutputOracleStartingTimestamp": -1,
"l2OutputOracleProposer": "0x70997970C51812dc3A010C7d01b50e0d17dc79C8",
"l2OutputOracleChallenger": "0x70997970C51812dc3A010C7d01b50e0d17dc79C8",
......
......@@ -10,6 +10,7 @@
"batchInboxAddress": "0x42000000000000000000000000000000000000ff",
"batchSenderAddress": "0x0000000000000000000000000000000000000000",
"l2OutputOracleSubmissionInterval": 6,
"l2OutputOracleStartingBlockNumber": 0,
"l2OutputOracleStartingTimestamp": -1,
"l2OutputOracleProposer": "0x7770000000000000000000000000000000000001",
"l2OutputOracleChallenger": "0x7770000000000000000000000000000000000002",
......
......@@ -20,35 +20,3 @@ to see a list of available options.
`op-challenger` is configurable via command line flags and environment variables. The help menu
shows the available config options and can be accessed by running `./op-challenger --help`.
Note that there are many global options, but the most important ones are:
- `OP_CHALLENGER_L1_ETH_RPC`: An L1 Ethereum RPC URL
- `OP_CHALLENGER_ROLLUP_RPC`: A Rollup Node RPC URL
- `OP_CHALLENGER_L2OO_ADDRESS`: The L2OutputOracle Contract Address
- `OP_CHALLENGER_DGF_ADDRESS`: Dispute Game Factory Contract Address
Here is a reduced output from running `./op-challenger --help`:
```bash
NAME:
op-challenger - Modular Challenger Agent
USAGE:
main [global options] command [command options] [arguments...]
VERSION:
1.0.0
DESCRIPTION:
A modular op-stack challenge agent for output dispute games written in golang.
COMMANDS:
help, h Shows a list of commands or help for one command
GLOBAL OPTIONS:
--l1-eth-rpc value HTTP provider URL for L1. [$OP_CHALLENGER_L1_ETH_RPC]
--rollup-rpc value HTTP provider URL for the rollup node. [$OP_CHALLENGER_ROLLUP_RPC]
--l2oo-address value Address of the L2OutputOracle contract. [$OP_CHALLENGER_L2OO_ADDRESS]
--dgf-address value Address of the DisputeGameFactory contract. [$OP_CHALLENGER_DGF_ADDRESS]
...
--help, -h show help
--version, -v print the version
```
package op_challenger
import (
"fmt"
"time"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-challenger/config"
"github.com/ethereum-optimism/optimism/op-challenger/fault"
"github.com/ethereum-optimism/optimism/op-service/txmgr"
"github.com/ethereum-optimism/optimism/op-service/txmgr/metrics"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
)
// Main is the programmatic entry-point for running op-challenger
func Main(logger log.Logger, cfg *config.Config) error {
client, err := ethclient.Dial(cfg.L1EthRpc)
if err != nil {
return fmt.Errorf("failed to dial L1: %w", err)
}
txMgr, err := txmgr.NewSimpleTxManager("challenger", logger, &metrics.NoopTxMetrics{}, cfg.TxMgrConfig)
if err != nil {
return fmt.Errorf("failed to create the transaction manager: %w", err)
}
contract, err := bindings.NewFaultDisputeGameCaller(cfg.GameAddress, client)
if err != nil {
return fmt.Errorf("failed to bind the fault dispute game contract: %w", err)
}
loader := fault.NewLoader(logger, contract)
responder, err := fault.NewFaultResponder(logger, txMgr, cfg.GameAddress)
if err != nil {
return fmt.Errorf("failed to create the responder: %w", err)
}
trace := fault.NewAlphabetProvider(cfg.AlphabetTrace, uint64(cfg.GameDepth))
agent := fault.NewAgent(loader, cfg.GameDepth, trace, responder, cfg.AgreeWithProposedOutput, logger)
caller, err := fault.NewFaultCallerFromBindings(cfg.GameAddress, client, logger)
if err != nil {
return fmt.Errorf("failed to bind the fault contract: %w", err)
}
logger.Info("Fault game started")
return nil
for {
logger.Info("Performing action")
_ = agent.Act()
caller.LogGameInfo()
time.Sleep(300 * time.Millisecond)
}
}
#!/bin/bash
set -euo pipefail
DIR=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd)
cd "$DIR"
DISPUTE_GAME_PROXY="0xB7f8BC63BbcaD18155201308C8f3540b07f84F5e"
CHARLIE_ADDRESS="0xF45B7537828CB2fffBC69996B054c2Aaf36DC778"
CHARLIE_KEY="74feb147d72bfae943e6b4e483410933d9e447d5dc47d52432dcc2c1454dabb7"
MALLORY_ADDRESS="0x4641c704a6c743f73ee1f36C7568Fbf4b80681e4"
MALLORY_KEY="28d7045146193f5f4eeb151c4843544b1b0d30a7ac1680c845a416fac65a7715"
FAULT_GAME_ADDRESS="0x8daf17a20c9dba35f005b6324f493785d239719d"
./bin/op-challenger \
--l1-eth-rpc http://localhost:8545 \
--alphabet "abcdefgh" \
--game-address $FAULT_GAME_ADDRESS \
--private-key $CHARLIE_KEY \
--num-confirmations 1 \
--game-depth 4 \
--agree-with-proposed-output=true
package main
import (
"fmt"
"testing"
"github.com/ethereum-optimism/optimism/op-challenger/config"
......@@ -11,9 +12,11 @@ import (
)
var (
l1EthRpc = "http://example.com:8545"
gameAddressValue = "0xaa00000000000000000000000000000000000000"
alphabetTrace = "abcdefghijz"
l1EthRpc = "http://example.com:8545"
gameAddressValue = "0xaa00000000000000000000000000000000000000"
alphabetTrace = "abcdefghijz"
agreeWithProposedOutput = "true"
gameDepth = "4"
)
func TestLogLevel(t *testing.T) {
......@@ -33,12 +36,12 @@ func TestLogLevel(t *testing.T) {
func TestDefaultCLIOptionsMatchDefaultConfig(t *testing.T) {
cfg := configForArgs(t, addRequiredArgs())
defaultCfg := config.NewConfig(l1EthRpc, common.HexToAddress(gameAddressValue), alphabetTrace)
defaultCfg := config.NewConfig(l1EthRpc, common.HexToAddress(gameAddressValue), alphabetTrace, true, 4)
require.Equal(t, defaultCfg, cfg)
}
func TestDefaultConfigIsValid(t *testing.T) {
cfg := config.NewConfig(l1EthRpc, common.HexToAddress(gameAddressValue), alphabetTrace)
cfg := config.NewConfig(l1EthRpc, common.HexToAddress(gameAddressValue), alphabetTrace, true, 4)
require.NoError(t, cfg.Check())
}
......@@ -89,6 +92,36 @@ func TestTxManagerFlagsSupported(t *testing.T) {
require.Equal(t, uint64(7), cfg.TxMgrConfig.NumConfirmations)
}
func TestAgreeWithProposedOutput(t *testing.T) {
t.Run("MustBeProvided", func(t *testing.T) {
verifyArgsInvalid(t, "flag agree-with-proposed-output is required", addRequiredArgsExcept("--agree-with-proposed-output"))
})
t.Run("Enabled", func(t *testing.T) {
cfg := configForArgs(t, addRequiredArgs("--agree-with-proposed-output"))
require.True(t, cfg.AgreeWithProposedOutput)
})
t.Run("EnabledWithArg", func(t *testing.T) {
cfg := configForArgs(t, addRequiredArgs("--agree-with-proposed-output=true"))
require.True(t, cfg.AgreeWithProposedOutput)
})
t.Run("Disabled", func(t *testing.T) {
cfg := configForArgs(t, addRequiredArgs("--agree-with-proposed-output=false"))
require.False(t, cfg.AgreeWithProposedOutput)
})
}
func TestGameDepth(t *testing.T) {
t.Run("Required", func(t *testing.T) {
verifyArgsInvalid(t, "flag game-depth is required", addRequiredArgsExcept("--game-depth"))
})
t.Run("Valid", func(t *testing.T) {
value := "4"
cfg := configForArgs(t, addRequiredArgsExcept("--game-depth", "--game-depth="+value))
require.Equal(t, value, fmt.Sprint(cfg.GameDepth))
})
}
func verifyArgsInvalid(t *testing.T, messageContains string, cliArgs []string) {
_, _, err := runWithArgs(cliArgs)
require.ErrorContains(t, err, messageContains)
......@@ -103,7 +136,7 @@ func configForArgs(t *testing.T, cliArgs []string) config.Config {
func runWithArgs(cliArgs []string) (log.Logger, config.Config, error) {
cfg := new(config.Config)
var logger log.Logger
fullArgs := append([]string{"op-program"}, cliArgs...)
fullArgs := append([]string{"op-challenger"}, cliArgs...)
err := run(fullArgs, func(log log.Logger, config *config.Config) error {
logger = log
cfg = config
......@@ -126,17 +159,18 @@ func addRequiredArgsExcept(name string, optionalArgs ...string) []string {
func requiredArgs() map[string]string {
return map[string]string{
"--l1-eth-rpc": l1EthRpc,
"--game-address": gameAddressValue,
"--alphabet": alphabetTrace,
"--game-depth": gameDepth,
"--agree-with-proposed-output": agreeWithProposedOutput,
"--l1-eth-rpc": l1EthRpc,
"--game-address": gameAddressValue,
"--alphabet": alphabetTrace,
}
}
func toArgList(req map[string]string) []string {
var combined []string
for name, value := range req {
combined = append(combined, name)
combined = append(combined, value)
combined = append(combined, fmt.Sprintf("%s=%s", name, value))
}
return combined
}
......@@ -21,22 +21,29 @@ var (
// This also contains config options for auxiliary services.
// It is used to initialize the challenger.
type Config struct {
L1EthRpc string // L1 RPC Url
GameAddress common.Address // Address of the fault game
AlphabetTrace string // String for the AlphabetTraceProvider
L1EthRpc string // L1 RPC Url
GameAddress common.Address // Address of the fault game
AlphabetTrace string // String for the AlphabetTraceProvider
AgreeWithProposedOutput bool // Temporary config if we agree or disagree with the posted output
GameDepth int // Depth of the game tree
TxMgrConfig txmgr.CLIConfig
}
func NewConfig(l1EthRpc string,
func NewConfig(
l1EthRpc string,
GameAddress common.Address,
AlphabetTrace string,
AgreeWithProposedOutput bool,
GameDepth int,
) Config {
return Config{
L1EthRpc: l1EthRpc,
GameAddress: GameAddress,
AlphabetTrace: AlphabetTrace,
TxMgrConfig: txmgr.NewCLIConfig(l1EthRpc),
L1EthRpc: l1EthRpc,
GameAddress: GameAddress,
AlphabetTrace: AlphabetTrace,
TxMgrConfig: txmgr.NewCLIConfig(l1EthRpc),
AgreeWithProposedOutput: AgreeWithProposedOutput,
GameDepth: GameDepth,
}
}
......@@ -70,9 +77,11 @@ func NewConfigFromCLI(ctx *cli.Context) (*Config, error) {
return &Config{
// Required Flags
L1EthRpc: ctx.String(flags.L1EthRpcFlag.Name),
GameAddress: dgfAddress,
AlphabetTrace: ctx.String(flags.AlphabetFlag.Name),
TxMgrConfig: txMgrConfig,
L1EthRpc: ctx.String(flags.L1EthRpcFlag.Name),
GameAddress: dgfAddress,
AlphabetTrace: ctx.String(flags.AlphabetFlag.Name),
AgreeWithProposedOutput: ctx.Bool(flags.AgreeWithProposedOutputFlag.Name),
GameDepth: ctx.Int(flags.GameDepthFlag.Name),
TxMgrConfig: txMgrConfig,
}, nil
}
......@@ -9,13 +9,15 @@ import (
)
var (
validL1EthRpc = "http://localhost:8545"
validGameAddress = common.HexToAddress("0x7bdd3b028C4796eF0EAf07d11394d0d9d8c24139")
validAlphabetTrace = "abcdefgh"
validL1EthRpc = "http://localhost:8545"
validGameAddress = common.HexToAddress("0x7bdd3b028C4796eF0EAf07d11394d0d9d8c24139")
validAlphabetTrace = "abcdefgh"
agreeWithProposedOutput = true
gameDepth = 4
)
func validConfig() Config {
cfg := NewConfig(validL1EthRpc, validGameAddress, validAlphabetTrace)
cfg := NewConfig(validL1EthRpc, validGameAddress, validAlphabetTrace, agreeWithProposedOutput, gameDepth)
return cfg
}
......
......@@ -31,6 +31,7 @@ func setupFaultDisputeGame() (common.Address, *bind.TransactOpts, *backends.Simu
backend,
[32]byte{0x01},
big.NewInt(15),
uint64(604800), // 7 days
common.Address{0xdd},
)
if err != nil {
......
......@@ -3,27 +3,28 @@ package fault
import (
"context"
"errors"
"fmt"
"github.com/ethereum/go-ethereum/log"
)
type Agent struct {
solver *Solver
trace TraceProvider
loader Loader
responder Responder
maxDepth int
log log.Logger
solver *Solver
loader Loader
responder Responder
maxDepth int
agreeWithProposedOutput bool
log log.Logger
}
func NewAgent(loader Loader, maxDepth int, trace TraceProvider, responder Responder, log log.Logger) Agent {
func NewAgent(loader Loader, maxDepth int, trace TraceProvider, responder Responder, agreeWithProposedOutput bool, log log.Logger) Agent {
return Agent{
solver: NewSolver(maxDepth, trace),
trace: trace,
loader: loader,
responder: responder,
maxDepth: maxDepth,
log: log,
solver: NewSolver(maxDepth, trace),
loader: loader,
responder: responder,
maxDepth: maxDepth,
agreeWithProposedOutput: agreeWithProposedOutput,
log: log,
}
}
......@@ -36,11 +37,15 @@ func (a *Agent) Act() error {
}
// Create counter claims
for _, claim := range game.Claims() {
_ = a.move(claim, game)
if err := a.move(claim, game); err != nil {
log.Error("Failed to move", "err", err)
}
}
// Step on all leaf claims
for _, claim := range game.Claims() {
_ = a.step(claim, game)
if err := a.step(claim, game); err != nil {
log.Error("Failed to step", "err", err)
}
}
return nil
}
......@@ -49,34 +54,33 @@ func (a *Agent) Act() error {
func (a *Agent) newGameFromContracts(ctx context.Context) (Game, error) {
claims, err := a.loader.FetchClaims(ctx)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to fetch claims: %w", err)
}
if len(claims) == 0 {
return nil, errors.New("no claims")
}
game := NewGameState(claims[0], uint64(a.maxDepth))
game := NewGameState(a.agreeWithProposedOutput, claims[0], uint64(a.maxDepth))
if err := game.PutAll(claims[1:]); err != nil {
return nil, err
return nil, fmt.Errorf("failed to load claims into the local state: %w", err)
}
return game, nil
}
// move determines & executes the next move given a claim pair
// move determines & executes the next move given a claim
func (a *Agent) move(claim Claim, game Game) error {
a.log.Info("Fetching claims")
nextMove, err := a.solver.NextMove(claim)
nextMove, err := a.solver.NextMove(claim, game.AgreeWithClaimLevel(claim))
if err != nil {
a.log.Warn("Failed to execute the next move", "err", err)
return err
}
if nextMove == nil {
a.log.Info("No next move")
a.log.Debug("No next move")
return nil
}
move := *nextMove
log := a.log.New("is_defend", move.DefendsParent(), "depth", move.Depth(), "index_at_depth", move.IndexAtDepth(), "value", move.Value,
"letter", string(move.Value[31:]), "trace_index", move.Value[30],
"parent_letter", string(claim.Value[31:]), "parent_trace_index", claim.Value[30])
log := a.log.New("is_defend", move.DefendsParent(), "depth", move.Depth(), "index_at_depth", move.IndexAtDepth(),
"value", move.Value, "trace_index", move.TraceIndex(a.maxDepth),
"parent_value", claim.Value, "parent_trace_index", claim.TraceIndex(a.maxDepth))
if game.IsDuplicate(move) {
log.Debug("Duplicate move")
return nil
......@@ -85,25 +89,36 @@ func (a *Agent) move(claim Claim, game Game) error {
return a.responder.Respond(context.TODO(), move)
}
// step attempts to execute the step through the responder
// step determines & executes the next step against a leaf claim through the responder
func (a *Agent) step(claim Claim, game Game) error {
if claim.Depth() != a.maxDepth {
return nil
}
a.log.Info("Attempting step", "claim_depth", claim.Depth(), "maxDepth", a.maxDepth)
step, err := a.solver.AttemptStep(claim)
agreeWithClaimLevel := game.AgreeWithClaimLevel(claim)
if agreeWithClaimLevel {
a.log.Warn("Agree with leaf claim, skipping step", "claim_depth", claim.Depth(), "maxDepth", a.maxDepth)
return nil
}
if claim.Countered {
a.log.Info("Claim already stepped on", "claim_depth", claim.Depth(), "maxDepth", a.maxDepth)
return nil
}
a.log.Info("Attempting step", "claim_depth", claim.Depth(), "maxDepth", a.maxDepth)
step, err := a.solver.AttemptStep(claim, agreeWithClaimLevel)
if err != nil {
a.log.Info("Failed to get a step", "err", err)
a.log.Warn("Failed to get a step", "err", err)
return err
}
a.log.Info("Performing step",
"depth", step.LeafClaim.Depth(), "index_at_depth", step.LeafClaim.IndexAtDepth(), "value", step.LeafClaim.Value,
"is_attack", step.IsAttack)
a.log.Info("Performing step", "is_attack", step.IsAttack,
"depth", step.LeafClaim.Depth(), "index_at_depth", step.LeafClaim.IndexAtDepth(), "value", step.LeafClaim.Value)
callData := StepCallData{
ClaimIndex: uint64(step.LeafClaim.ContractIndex),
IsAttack: step.IsAttack,
StateData: step.PreState,
}
return a.responder.Step(context.TODO(), callData)
}
......@@ -5,8 +5,11 @@ import (
"strings"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
)
var _ TraceProvider = (*AlphabetProvider)(nil)
// AlphabetProvider is a [TraceProvider] that provides claims for specific
// indices in the given trace.
type AlphabetProvider struct {
......@@ -32,7 +35,7 @@ func (ap *AlphabetProvider) GetPreimage(i uint64) ([]byte, error) {
if i >= uint64(len(ap.state)) {
return ap.GetPreimage(uint64(len(ap.state)) - 1)
}
return buildAlphabetClaimBytes(i, ap.state[i]), nil
return BuildAlphabetPreimage(i, ap.state[i]), nil
}
// Get returns the claim value at the given index in the trace.
......@@ -41,17 +44,31 @@ func (ap *AlphabetProvider) Get(i uint64) (common.Hash, error) {
if err != nil {
return common.Hash{}, err
}
return common.BytesToHash(claimBytes), nil
return crypto.Keccak256Hash(claimBytes), nil
}
func (ap *AlphabetProvider) AbsolutePreState() []byte {
out := make([]byte, 32)
out[31] = 96 // ascii character 96 is "`"
return out
}
// buildAlphabetClaimBytes constructs the claim bytes for the index and state item.
func buildAlphabetClaimBytes(i uint64, letter string) []byte {
return append(IndexToBytes(i), []byte(letter)...)
// BuildAlphabetPreimage constructs the claim bytes for the index and state item.
func BuildAlphabetPreimage(i uint64, letter string) []byte {
return append(IndexToBytes(i), LetterToBytes(letter)...)
}
// IndexToBytes converts an index to a byte slice big endian
func IndexToBytes(i uint64) []byte {
big := new(big.Int)
big.SetUint64(i)
return big.Bytes()
out := make([]byte, 32)
return big.FillBytes(out)
}
// LetterToBytes converts a letter to a 32 byte array
func LetterToBytes(letter string) []byte {
out := make([]byte, 32)
out[31] = byte(letter[0])
return out
}
......@@ -20,15 +20,15 @@ func TestAlphabetProvider_Get_ClaimsByTraceIndex(t *testing.T) {
}{
{
7,
common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000768"),
alphabetClaim(7, "h"),
},
{
3,
common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000364"),
alphabetClaim(3, "d"),
},
{
5,
common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000566"),
alphabetClaim(5, "f"),
},
}
......@@ -54,7 +54,7 @@ func FuzzIndexToBytes(f *testing.F) {
// returns the correct pre-image for a index.
func TestGetPreimage_Succeeds(t *testing.T) {
ap := NewAlphabetProvider("abc", 2)
expected := append(IndexToBytes(uint64(0)), []byte("a")...)
expected := BuildAlphabetPreimage(0, "a'")
retrieved, err := ap.GetPreimage(uint64(0))
require.NoError(t, err)
require.Equal(t, expected, retrieved)
......@@ -73,8 +73,7 @@ func TestGet_Succeeds(t *testing.T) {
ap := NewAlphabetProvider("abc", 2)
claim, err := ap.Get(0)
require.NoError(t, err)
concatenated := append(IndexToBytes(0), []byte("a")...)
expected := common.BytesToHash(concatenated)
expected := alphabetClaim(0, "a")
require.Equal(t, expected, claim)
}
......@@ -92,7 +91,6 @@ func TestGet_Extends(t *testing.T) {
ap := NewAlphabetProvider("abc", 2)
claim, err := ap.Get(3)
require.NoError(t, err)
concatenated := append(IndexToBytes(2), []byte("c")...)
expected := common.BytesToHash(concatenated)
expected := alphabetClaim(2, "c")
require.Equal(t, expected, claim)
}
package fault
import (
"context"
"math/big"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
)
type FaultDisputeGameCaller interface {
Status(opts *bind.CallOpts) (uint8, error)
ClaimDataLen(opts *bind.CallOpts) (*big.Int, error)
}
type FaultCaller struct {
FaultDisputeGameCaller
log log.Logger
fdgAddr common.Address
}
func NewFaultCaller(fdgAddr common.Address, caller FaultDisputeGameCaller, log log.Logger) *FaultCaller {
return &FaultCaller{
caller,
log,
fdgAddr,
}
}
func NewFaultCallerFromBindings(fdgAddr common.Address, client *ethclient.Client, log log.Logger) (*FaultCaller, error) {
caller, err := bindings.NewFaultDisputeGameCaller(fdgAddr, client)
if err != nil {
return nil, err
}
return &FaultCaller{
caller,
log,
fdgAddr,
}, nil
}
// LogGameInfo logs the game info.
func (fc *FaultCaller) LogGameInfo() {
status, err := fc.GetGameStatus(context.Background())
if err != nil {
fc.log.Error("failed to get game status", "err", err)
return
}
claimLen, err := fc.GetClaimDataLength(context.Background())
if err != nil {
fc.log.Error("failed to get claim count", "err", err)
return
}
fc.log.Info("Game info", "addr", fc.fdgAddr, "claims", claimLen, "status", GameStatusString(status))
}
// GetGameStatus returns the current game status.
// 0: In Progress
// 1: Challenger Won
// 2: Defender Won
func (fc *FaultCaller) GetGameStatus(ctx context.Context) (uint8, error) {
return fc.Status(&bind.CallOpts{Context: ctx})
}
func (fc *FaultCaller) LogGameStatus() {
status, err := fc.GetGameStatus(context.Background())
if err != nil {
fc.log.Error("failed to get game status", "err", err)
return
}
fc.log.Info("Game status", "status", GameStatusString(status))
}
// GetClaimDataLength returns the number of claims in the game.
func (fc *FaultCaller) GetClaimDataLength(ctx context.Context) (*big.Int, error) {
return fc.ClaimDataLen(&bind.CallOpts{Context: ctx})
}
func (fc *FaultCaller) LogClaimDataLength() {
claimLen, err := fc.GetClaimDataLength(context.Background())
if err != nil {
fc.log.Error("failed to get claim count", "err", err)
return
}
fc.log.Info("Number of claims", "length", claimLen)
}
// GameStatusString returns the current game status as a string.
func GameStatusString(status uint8) string {
switch status {
case 0:
return "In Progress"
case 1:
return "Challenger Won"
case 2:
return "Defender Won"
default:
return "Unknown"
}
}
package fault
import (
"context"
"errors"
"math/big"
"testing"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/stretchr/testify/require"
)
var (
testAddr = common.HexToAddress("0x1234567890123456789012345678901234567890")
errMock = errors.New("mock error")
)
type mockFaultDisputeGameCaller struct {
status uint8
errStatus bool
claimDataLen *big.Int
errClaimDataLen bool
}
func (m *mockFaultDisputeGameCaller) Status(opts *bind.CallOpts) (uint8, error) {
if m.errStatus {
return 0, errMock
}
return m.status, nil
}
func (m *mockFaultDisputeGameCaller) ClaimDataLen(opts *bind.CallOpts) (*big.Int, error) {
if m.errClaimDataLen {
return nil, errMock
}
return m.claimDataLen, nil
}
func TestFaultCaller_GetGameStatus(t *testing.T) {
tests := []struct {
name string
caller FaultDisputeGameCaller
expectedStatus uint8
expectedErr error
}{
{
name: "success",
caller: &mockFaultDisputeGameCaller{
status: 1,
},
expectedStatus: 1,
expectedErr: nil,
},
{
name: "error",
caller: &mockFaultDisputeGameCaller{
errStatus: true,
},
expectedStatus: 0,
expectedErr: errMock,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
fc := NewFaultCaller(testAddr, test.caller, nil)
status, err := fc.GetGameStatus(context.Background())
require.Equal(t, test.expectedStatus, status)
require.Equal(t, test.expectedErr, err)
})
}
}
func TestFaultCaller_GetClaimDataLength(t *testing.T) {
tests := []struct {
name string
caller FaultDisputeGameCaller
expectedClaimDataLen *big.Int
expectedErr error
}{
{
name: "success",
caller: &mockFaultDisputeGameCaller{
claimDataLen: big.NewInt(1),
},
expectedClaimDataLen: big.NewInt(1),
expectedErr: nil,
},
{
name: "error",
caller: &mockFaultDisputeGameCaller{
errClaimDataLen: true,
},
expectedClaimDataLen: nil,
expectedErr: errMock,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
fc := NewFaultCaller(testAddr, test.caller, nil)
claimDataLen, err := fc.GetClaimDataLength(context.Background())
require.Equal(t, test.expectedClaimDataLen, claimDataLen)
require.Equal(t, test.expectedErr, err)
})
}
}
package cannon
import (
"encoding/json"
"errors"
"fmt"
"os"
"path/filepath"
"github.com/ethereum-optimism/optimism/op-challenger/fault"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
)
const proofsDir = "proofs"
type proofData struct {
ClaimValue hexutil.Bytes `json:"post"`
StateData hexutil.Bytes `json:"state-data"`
ProofData hexutil.Bytes `json:"proof-data"`
}
type CannonTraceProvider struct {
dir string
}
func NewCannonTraceProvider(dataDir string) *CannonTraceProvider {
return &CannonTraceProvider{
dir: dataDir,
}
}
func (p *CannonTraceProvider) Get(i uint64) (common.Hash, error) {
proof, err := p.loadProof(i)
if err != nil {
return common.Hash{}, err
}
value := common.BytesToHash(proof.ClaimValue)
if value == (common.Hash{}) {
return common.Hash{}, errors.New("proof missing post hash")
}
return value, nil
}
func (p *CannonTraceProvider) GetPreimage(i uint64) ([]byte, error) {
proof, err := p.loadProof(i)
if err != nil {
return nil, err
}
value := ([]byte)(proof.StateData)
if len(value) == 0 {
return nil, errors.New("proof missing state data")
}
return value, nil
}
func (p *CannonTraceProvider) AbsolutePreState() []byte {
panic("absolute prestate not yet supported")
}
func (p *CannonTraceProvider) loadProof(i uint64) (*proofData, error) {
path := filepath.Join(p.dir, proofsDir, fmt.Sprintf("%d.json", i))
file, err := os.Open(path)
if err != nil {
return nil, fmt.Errorf("cannot open proof file (%v): %w", path, err)
}
defer file.Close()
var proof proofData
err = json.NewDecoder(file).Decode(&proof)
if err != nil {
return nil, fmt.Errorf("failed to read proof (%v): %w", path, err)
}
return &proof, nil
}
var _ fault.TraceProvider = (*CannonTraceProvider)(nil)
package cannon
import (
"embed"
_ "embed"
"os"
"path/filepath"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/stretchr/testify/require"
)
//go:embed test_data
var testData embed.FS
func TestGet(t *testing.T) {
provider := setupWithTestData(t)
t.Run("ExistingProof", func(t *testing.T) {
value, err := provider.Get(0)
require.NoError(t, err)
require.Equal(t, common.HexToHash("0x45fd9aa59768331c726e719e76aa343e73123af888804604785ae19506e65e87"), value)
})
t.Run("ProofUnavailable", func(t *testing.T) {
_, err := provider.Get(7)
require.ErrorIs(t, err, os.ErrNotExist)
})
t.Run("MissingPostHash", func(t *testing.T) {
_, err := provider.Get(1)
require.ErrorContains(t, err, "missing post hash")
})
t.Run("IgnoreUnknownFields", func(t *testing.T) {
value, err := provider.Get(2)
require.NoError(t, err)
expected := common.HexToHash("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb")
require.Equal(t, expected, value)
})
}
func TestGetPreimage(t *testing.T) {
provider := setupWithTestData(t)
t.Run("ExistingProof", func(t *testing.T) {
value, err := provider.GetPreimage(0)
require.NoError(t, err)
expected := common.Hex2Bytes("b8f068de604c85ea0e2acd437cdb47add074a2d70b81d018390c504b71fe26f400000000000000000000000000000000000000000000000000000000000000000000000000")
require.Equal(t, expected, value)
})
t.Run("ProofUnavailable", func(t *testing.T) {
_, err := provider.GetPreimage(7)
require.ErrorIs(t, err, os.ErrNotExist)
})
t.Run("MissingStateData", func(t *testing.T) {
_, err := provider.GetPreimage(1)
require.ErrorContains(t, err, "missing state data")
})
t.Run("IgnoreUnknownFields", func(t *testing.T) {
value, err := provider.GetPreimage(2)
require.NoError(t, err)
expected := common.Hex2Bytes("cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc")
require.Equal(t, expected, value)
})
}
func setupWithTestData(t *testing.T) *CannonTraceProvider {
srcDir := filepath.Join("test_data", "proofs")
entries, err := testData.ReadDir(srcDir)
require.NoError(t, err)
dataDir := t.TempDir()
require.NoError(t, os.Mkdir(filepath.Join(dataDir, proofsDir), 0o777))
for _, entry := range entries {
path := filepath.Join(srcDir, entry.Name())
file, err := testData.ReadFile(path)
require.NoErrorf(t, err, "reading %v", path)
err = os.WriteFile(filepath.Join(dataDir, "proofs", entry.Name()), file, 0o644)
require.NoErrorf(t, err, "writing %v", path)
}
return NewCannonTraceProvider(dataDir)
}
{"step":0,"pre":"0x71f9eb93ff904e5c03c3425228ef75766db0c906ad239df9a7a7f0d9c6a89705","post":"0x45fd9aa59768331c726e719e76aa343e73123af888804604785ae19506e65e87","state-data":"0xb8f068de604c85ea0e2acd437cdb47add074a2d70b81d018390c504b71fe26f400000000000000000000000000000000000000000000000000000000000000000000000000","proof-data":"0x08028e3c0000000000000000000000003c01000a24210b7c00200008000000008fa40004240210960000000c0000003403e00008000000008fa100040000102571c0e460346a89963488f904199fc7b4dc3dce2ddadfe484510463ae5014a79df9d922ef2cb84325e4e13ad98828ed29937c1440d8ea9eb19cab7474243c2d0b1a83646e420529153298f3a914a2550658c930f5e519b1d8dd151cf828116697d27264e6fad331820ecf3855adcc68dc529acfc33ecfa45a3a33c9ac766edc1f437988f2abab9dce36d3bac27b0f7b58a06d125acd50a1bf14bb8c7f6c1618465a532f945043b5a9ebc800d7336673019654eb76f8c10cff4f794ee586dc9992c318cef3dfa57032e2dd2fc5cb2dcfebd05551301704dd37a7c169448ec02574f706e38c20963616dae4e03cc91f39a4c3f9608119212965b72948f0ee15feb48b758f050691197816dc3ca919bbb3b50624d195c82d644025647ac8ba07206e5eb830799dfa896506743e81856edf8a31fef737fb4f44501dc71f019bdb12ed9cf0b9fba40ef98e5091b70484ba4f6af7711ec8b0ba4f4f2c4b11455a9e071f465817724159ddeea1170f4dd912c3a5a10ec6b046aa3c4a9febddfeeaa47e3ef06e1758694515562c958dc1b018149c7e4fcd91b9033ee216fea2ea498acd065e61fd436f26c31654bfd27c13ab67707384ad7a84a4b085e890e998e8a9655da954db3d279d598343a4706a2272fca526caeddb017627ecaf0138f1446c82e16d0926c0c510773e2b439c2c71414deb9b739fa370c010380d9ed5927fd7f4bb84ac22747f1bd405830b65d9e04c5efddc2c4dc89ba294c7568b9952193172d75ed8ea3e0fe57c8ad6636da54921ab52a8a0f54920d124f43b9fd3577690140cb46a28b6f55540f89444f63de0378e3d121be09e06cc9ded1c20e65876d36aa0c65e9645644786b620e2dd2ad648ddfcbf4a7e5b1a3a4ecfe7f64667a3f0b7e2f4418588ed35a2458cffeb39b93d26f18d2ab13bdce6aee58e7b99359ec2dfd95a9c16dc00d6ef18b7933a6f8dc65ccb55667138776f7dea101070dc8796e3774df84f40ae0c8229d0d6069e5c8f39a7c299677a09d367fc7b05e3bc380ee652cdc72595f74c7b1043d0e1ffbab734648c838dfb0527d971b602bc216c9619ef6834d8ef8faaf96b7b45235297538a266eb882b8b5680f621aab3417d43cdc2eb8cd74046ff337f0a7bf2c8e03e10f642c1886798d71806ab1e888d9e5ee87d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","step-input":"0xf8e0cb960000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000014200000000000000000000000000000000000000000000000000000000000000e2b8f068de604c85ea0e2acd437cdb47add074a2d70b81d018390c504b71fe26f4000000000000000000000000000000000000000000000000000000000000000000000000000a3900000a39040000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007fffd0000000000000000000000000000000000000000000000000000000000000000000000000000000070008028e3c0000000000000000000000003c01000a24210b7c00200008000000008fa40004240210960000000c0000003403e00008000000008fa100040000102571c0e460346a89963488f904199fc7b4dc3dce2ddadfe484510463ae5014a79df9d922ef2cb84325e4e13ad98828ed29937c1440d8ea9eb19cab7474243c2d0b1a83646e420529153298f3a914a2550658c930f5e519b1d8dd151cf828116697d27264e6fad331820ecf3855adcc68dc529acfc33ecfa45a3a33c9ac766edc1f437988f2abab9dce36d3bac27b0f7b58a06d125acd50a1bf14bb8c7f6c1618465a532f945043b5a9ebc800d7336673019654eb76f8c10cff4f794ee586dc9992c318cef3dfa57032e2dd2fc5cb2dcfebd05551301704dd37a7c169448ec02574f706e38c20963616dae4e03cc91f39a4c3f9608119212965b72948f0ee15feb48b758f050691197816dc3ca919bbb3b50624d195c82d644025647ac8ba07206e5eb830799dfa896506743e81856edf8a31fef737fb4f44501dc71f019bdb12ed9cf0b9fba40ef98e5091b70484ba4f6af7711ec8b0ba4f4f2c4b11455a9e071f465817724159ddeea1170f4dd912c3a5a10ec6b046aa3c4a9febddfeeaa47e3ef06e1758694515562c958dc1b018149c7e4fcd91b9033ee216fea2ea498acd065e61fd436f26c31654bfd27c13ab67707384ad7a84a4b085e890e998e8a9655da954db3d279d598343a4706a2272fca526caeddb017627ecaf0138f1446c82e16d0926c0c510773e2b439c2c71414deb9b739fa370c010380d9ed5927fd7f4bb84ac22747f1bd405830b65d9e04c5efddc2c4dc89ba294c7568b9952193172d75ed8ea3e0fe57c8ad6636da54921ab52a8a0f54920d124f43b9fd3577690140cb46a28b6f55540f89444f63de0378e3d121be09e06cc9ded1c20e65876d36aa0c65e9645644786b620e2dd2ad648ddfcbf4a7e5b1a3a4ecfe7f64667a3f0b7e2f4418588ed35a2458cffeb39b93d26f18d2ab13bdce6aee58e7b99359ec2dfd95a9c16dc00d6ef18b7933a6f8dc65ccb55667138776f7dea101070dc8796e3774df84f40ae0c8229d0d6069e5c8f39a7c299677a09d367fc7b05e3bc380ee652cdc72595f74c7b1043d0e1ffbab734648c838dfb0527d971b602bc216c9619ef6834d8ef8faaf96b7b45235297538a266eb882b8b5680f621aab3417d43cdc2eb8cd74046ff337f0a7bf2c8e03e10f642c1886798d71806ab1e888d9e5ee87d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","oracle-input":"0x"}
This diff is collapsed.
package examples
import (
"os"
"github.com/ethereum-optimism/optimism/op-challenger/fault"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
)
func FullGame() {
log.Root().SetHandler(
log.LvlFilterHandler(log.LvlInfo, log.StreamHandler(os.Stdout, log.TerminalFormat(true))),
)
canonical := "abcdefgh"
disputed := "abcdexyz"
maxDepth := uint64(3)
canonicalProvider := fault.NewAlphabetProvider(canonical, maxDepth)
disputedProvider := fault.NewAlphabetProvider(disputed, maxDepth)
root := fault.Claim{
ClaimData: fault.ClaimData{
Value: common.HexToHash("0x000000000000000000000000000000000000000000000000000000000000077a"),
Position: fault.NewPosition(0, 0),
},
}
o := fault.NewOrchestrator(maxDepth, []fault.TraceProvider{canonicalProvider, disputedProvider}, []string{"charlie", "mallory"}, root)
o.Start()
}
package examples
import (
"github.com/ethereum-optimism/optimism/op-challenger/fault"
)
func PositionExampleOne() {
// Example 1
// abcdefgh
// abcdexyz
// go left to d, then right to f, then left to e
p := fault.Position{}
p.Print(3)
p = p.Attack()
p.Print(3)
p = p.Defend()
p.Print(3)
p = p.Attack()
p.Print(3)
}
func PositionExampleTwo() {
// Example 2
// abcdefgh
// abqrstuv
// go left r, then left to b, then right to q
p := fault.Position{}
p.Print(3)
p = p.Attack()
p.Print(3)
p = p.Attack()
p.Print(3)
p = p.Defend()
p.Print(3)
}
package examples
import (
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum-optimism/optimism/op-challenger/fault"
)
func PrettyPrintAlphabetClaim(name string, claim fault.Claim) {
value := claim.Value
idx := value[30]
letter := value[31]
if claim.IsRoot() {
fmt.Printf("%s\ttrace %v letter %c\n", name, idx, letter)
} else {
fmt.Printf("%s\ttrace %v letter %c is attack %v\n", name, idx, letter, !claim.DefendsParent())
}
}
// SolverExampleOne uses the [fault.Solver] with a [fault.AlphabetProvider]
// to print out fault game traces for the "abcdexyz" counter-state.
func SolverExampleOne() {
fmt.Println("Solver: Example 1")
// Construct the fault position.
canonical := "abcdefgh"
disputed := "abcdexyz"
maxDepth := 3
// Root claim is z at trace index 7 from the disputed provider
root := fault.Claim{
ClaimData: fault.ClaimData{
Value: common.HexToHash("0x000000000000000000000000000000000000000000000000000000000000077a"),
Position: fault.NewPosition(0, 0),
},
}
canonicalProvider := fault.NewAlphabetProvider(canonical, uint64(maxDepth))
disputedProvider := fault.NewAlphabetProvider(disputed, uint64(maxDepth))
// Create a solver with the canonical provider.
cannonicalSolver := fault.NewSolver(maxDepth, canonicalProvider)
disputedSolver := fault.NewSolver(maxDepth, disputedProvider)
// Print the initial state.
fmt.Println("Canonical state: ", canonical)
fmt.Println("Disputed state: ", disputed)
fmt.Println()
fmt.Println("Proceeding with the following moves:")
fmt.Println("go left to d, then right to x (cannonical is f), then left to e")
fmt.Println()
PrettyPrintAlphabetClaim("Root claim", root)
claim1, err := cannonicalSolver.NextMove(root)
if err != nil {
fmt.Printf("error getting claim from provider: %v", err)
}
PrettyPrintAlphabetClaim("Cannonical move", *claim1)
claim2, err := disputedSolver.NextMove(*claim1)
if err != nil {
fmt.Printf("error getting claim from provider: %v", err)
}
PrettyPrintAlphabetClaim("Disputed moved", *claim2)
claim3, err := cannonicalSolver.NextMove(*claim2)
if err != nil {
fmt.Printf("error getting claim from provider: %v", err)
}
PrettyPrintAlphabetClaim("Cannonical move", *claim3)
}
package main
import (
"github.com/ethereum-optimism/optimism/op-challenger/fault/cmd/examples"
)
func main() {
examples.FullGame()
// examples.SolverExampleOne()
// examples.PositionExampleOne()
// examples.PositionExampleTwo()
}
This diff is collapsed.
This diff is collapsed.
......@@ -28,18 +28,14 @@ type Loader interface {
// loader pulls in fault dispute game claim data periodically and over subscriptions.
type loader struct {
log log.Logger
state Game
log log.Logger
claimFetcher ClaimFetcher
}
// NewLoader creates a new [loader].
func NewLoader(log log.Logger, state Game, claimFetcher ClaimFetcher) *loader {
func NewLoader(log log.Logger, claimFetcher ClaimFetcher) *loader {
return &loader{
log: log,
state: state,
claimFetcher: claimFetcher,
}
}
......@@ -60,6 +56,10 @@ func (l *loader) fetchClaim(ctx context.Context, arrIndex uint64) (Claim, error)
Value: fetchedClaim.Claim,
Position: NewPositionFromGIndex(fetchedClaim.Position.Uint64()),
},
Countered: fetchedClaim.Countered,
Clock: fetchedClaim.Clock.Uint64(),
ContractIndex: int(arrIndex),
ParentContractIndex: int(fetchedClaim.ParentIndex),
}
if !claim.IsRootPosition() {
......
......@@ -15,46 +15,8 @@ import (
var (
mockClaimDataError = fmt.Errorf("claim data errored")
mockClaimLenError = fmt.Errorf("claim len errored")
mockPutError = fmt.Errorf("put errored")
)
type mockGameState struct {
putCalled int
putErrors bool
}
func (m *mockGameState) Put(claim Claim) error {
m.putCalled++
if m.putErrors {
return mockPutError
}
return nil
}
func (m *mockGameState) PutAll(claims []Claim) error {
m.putCalled += len(claims)
if m.putErrors {
return mockPutError
}
return nil
}
func (m *mockGameState) Claims() []Claim {
return []Claim{}
}
func (m *mockGameState) IsDuplicate(claim Claim) bool {
return false
}
func (m *mockGameState) PreStateClaim(claim Claim) (Claim, error) {
panic("unimplemented")
}
func (m *mockGameState) PostStateClaim(claim Claim) (Claim, error) {
panic("unimplemented")
}
type mockClaimFetcher struct {
claimDataError bool
claimLenError bool
......@@ -78,16 +40,22 @@ func newMockClaimFetcher() *mockClaimFetcher {
Clock *big.Int
}{
{
Claim: [32]byte{0x00},
Position: big.NewInt(0),
Claim: [32]byte{0x00},
Position: big.NewInt(0),
Countered: false,
Clock: big.NewInt(0),
},
{
Claim: [32]byte{0x01},
Position: big.NewInt(0),
Claim: [32]byte{0x01},
Position: big.NewInt(0),
Countered: false,
Clock: big.NewInt(0),
},
{
Claim: [32]byte{0x02},
Position: big.NewInt(0),
Claim: [32]byte{0x02},
Position: big.NewInt(0),
Countered: false,
Clock: big.NewInt(0),
},
},
}
......@@ -126,7 +94,7 @@ func TestLoader_FetchClaims_Succeeds(t *testing.T) {
log := testlog.Logger(t, log.LvlError)
mockClaimFetcher := newMockClaimFetcher()
expectedClaims := mockClaimFetcher.returnClaims
loader := NewLoader(log, &mockGameState{}, mockClaimFetcher)
loader := NewLoader(log, mockClaimFetcher)
claims, err := loader.FetchClaims(context.Background())
require.NoError(t, err)
require.ElementsMatch(t, []Claim{
......@@ -139,6 +107,9 @@ func TestLoader_FetchClaims_Succeeds(t *testing.T) {
Value: expectedClaims[0].Claim,
Position: NewPositionFromGIndex(expectedClaims[0].Position.Uint64()),
},
Countered: false,
Clock: uint64(0),
ContractIndex: 0,
},
{
ClaimData: ClaimData{
......@@ -149,6 +120,9 @@ func TestLoader_FetchClaims_Succeeds(t *testing.T) {
Value: expectedClaims[0].Claim,
Position: NewPositionFromGIndex(expectedClaims[1].Position.Uint64()),
},
Countered: false,
Clock: uint64(0),
ContractIndex: 1,
},
{
ClaimData: ClaimData{
......@@ -159,6 +133,9 @@ func TestLoader_FetchClaims_Succeeds(t *testing.T) {
Value: expectedClaims[0].Claim,
Position: NewPositionFromGIndex(expectedClaims[2].Position.Uint64()),
},
Countered: false,
Clock: uint64(0),
ContractIndex: 2,
},
}, claims)
}
......@@ -169,7 +146,7 @@ func TestLoader_FetchClaims_ClaimDataErrors(t *testing.T) {
log := testlog.Logger(t, log.LvlError)
mockClaimFetcher := newMockClaimFetcher()
mockClaimFetcher.claimDataError = true
loader := NewLoader(log, &mockGameState{}, mockClaimFetcher)
loader := NewLoader(log, mockClaimFetcher)
claims, err := loader.FetchClaims(context.Background())
require.ErrorIs(t, err, mockClaimDataError)
require.Empty(t, claims)
......@@ -181,7 +158,7 @@ func TestLoader_FetchClaims_ClaimLenErrors(t *testing.T) {
log := testlog.Logger(t, log.LvlError)
mockClaimFetcher := newMockClaimFetcher()
mockClaimFetcher.claimLenError = true
loader := NewLoader(log, &mockGameState{}, mockClaimFetcher)
loader := NewLoader(log, mockClaimFetcher)
claims, err := loader.FetchClaims(context.Background())
require.ErrorIs(t, err, mockClaimLenError)
require.Empty(t, claims)
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -22,10 +22,12 @@ type StepCallData struct {
// TraceProvider is a generic way to get a claim value at a specific
// step in the trace.
// The [AlphabetProvider] is a minimal implementation of this interface.
// Get(i) = Keccak256(GetPreimage(i))
// AbsolutePreState is the value of the trace that transitions to the trace value at index 0
type TraceProvider interface {
Get(i uint64) (common.Hash, error)
GetPreimage(i uint64) ([]byte, error)
AbsolutePreState() []byte
}
// ClaimData is the core of a claim. It must be unique inside a specific game.
......@@ -47,7 +49,13 @@ func (c *ClaimData) ValueBytes() [32]byte {
// and the Parent field is empty & meaningless.
type Claim struct {
ClaimData
Parent ClaimData
// WARN: Countered is a mutable field in the FaultDisputeGame contract
// and rely on it for determining whether to step on leaf claims.
// When caching is implemented for the Challenger, this will need
// to be changed/removed to avoid invalid/stale contract state.
Countered bool
Clock uint64
Parent ClaimData
// Location of the claim & it's parent inside the contract. Does not exist
// for claims that have not made it to the contract.
ContractIndex int
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment