Commit 47b4ef92 authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge branch 'develop' into aj/invalid-byte

parents eef2af0c dd59c06f
......@@ -56,7 +56,7 @@ jobs:
uses: docker/setup-buildx-action@v1
- name: Login to Docker Hub
uses: docker/login-action@v1
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_USERNAME }}
password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_SECRET }}
......@@ -83,7 +83,7 @@ jobs:
uses: docker/setup-buildx-action@v1
- name: Login to Docker Hub
uses: docker/login-action@v1
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_USERNAME }}
password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_SECRET }}
......@@ -110,7 +110,7 @@ jobs:
uses: docker/setup-buildx-action@v1
- name: Login to Docker Hub
uses: docker/login-action@v1
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_USERNAME }}
password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_SECRET }}
......@@ -137,7 +137,7 @@ jobs:
uses: docker/setup-buildx-action@v1
- name: Login to Docker Hub
uses: docker/login-action@v1
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_USERNAME }}
password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_SECRET }}
......@@ -164,7 +164,7 @@ jobs:
uses: docker/setup-buildx-action@v1
- name: Login to Docker Hub
uses: docker/login-action@v1
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_USERNAME }}
password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_SECRET }}
......@@ -191,7 +191,7 @@ jobs:
uses: docker/setup-buildx-action@v1
- name: Login to Docker Hub
uses: docker/login-action@v1
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_USERNAME }}
password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_SECRET }}
......@@ -229,7 +229,7 @@ jobs:
uses: docker/setup-buildx-action@v1
- name: Login to Docker Hub
uses: docker/login-action@v1
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_USERNAME }}
password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_SECRET }}
......
......@@ -81,7 +81,7 @@ jobs:
uses: docker/setup-buildx-action@v1
- name: Login to Docker Hub
uses: docker/login-action@v1
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_USERNAME }}
password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_SECRET }}
......@@ -118,7 +118,7 @@ jobs:
uses: docker/setup-buildx-action@v1
- name: Login to Docker Hub
uses: docker/login-action@v1
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_USERNAME }}
password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_SECRET }}
......@@ -145,7 +145,7 @@ jobs:
uses: docker/setup-buildx-action@v1
- name: Login to Docker Hub
uses: docker/login-action@v1
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_USERNAME }}
password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_SECRET }}
......@@ -172,7 +172,7 @@ jobs:
uses: docker/setup-buildx-action@v1
- name: Login to Docker Hub
uses: docker/login-action@v1
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_USERNAME }}
password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_SECRET }}
......@@ -199,7 +199,7 @@ jobs:
uses: docker/setup-buildx-action@v1
- name: Login to Docker Hub
uses: docker/login-action@v1
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_USERNAME }}
password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_SECRET }}
......@@ -226,7 +226,7 @@ jobs:
uses: docker/setup-buildx-action@v1
- name: Login to Docker Hub
uses: docker/login-action@v1
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_USERNAME }}
password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_SECRET }}
......@@ -253,7 +253,7 @@ jobs:
uses: docker/setup-buildx-action@v1
- name: Login to Docker Hub
uses: docker/login-action@v1
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_USERNAME }}
password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_SECRET }}
......
......@@ -65,7 +65,7 @@ cannon:
cannon-prestate: op-program cannon
./cannon/bin/cannon load-elf --path op-program/bin/op-program-client.elf --out op-program/bin/prestate.json --meta op-program/bin/meta.json
./cannon/bin/cannon run --proof-at '=0' --stop-at '=1' --input op-program/bin/prestate.json --meta op-program/bin/meta.json --proof-fmt 'op-program/bin/%d.json' --output /dev/null
./cannon/bin/cannon run --proof-at '=0' --stop-at '=1' --input op-program/bin/prestate.json --meta op-program/bin/meta.json --proof-fmt 'op-program/bin/%d.json' --output ""
mv op-program/bin/0.json op-program/bin/prestate-proof.json
mod-tidy:
......
......@@ -6,6 +6,7 @@ import (
"fmt"
"io"
"os"
"path"
"github.com/ethereum-optimism/optimism/op-service/ioutil"
)
......@@ -27,19 +28,27 @@ func loadJSON[X any](inputPath string) (*X, error) {
return &state, nil
}
func writeJSON[X any](outputPath string, value X, outIfEmpty bool) error {
func writeJSON[X any](outputPath string, value X) error {
if outputPath == "" {
return nil
}
var out io.Writer
if outputPath != "" {
f, err := ioutil.OpenCompressed(outputPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0755)
finish := func() error { return nil }
if outputPath != "-" {
// Write to a tmp file but reserve the file extension if present
tmpPath := outputPath + "-tmp" + path.Ext(outputPath)
f, err := ioutil.OpenCompressed(tmpPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0755)
if err != nil {
return fmt.Errorf("failed to open output file: %w", err)
}
defer f.Close()
out = f
} else if outIfEmpty {
out = os.Stdout
finish = func() error {
// Rename the file into place as atomically as the OS will allow
return os.Rename(tmpPath, outputPath)
}
} else {
return nil
out = os.Stdout
}
enc := json.NewEncoder(out)
if err := enc.Encode(value); err != nil {
......@@ -49,5 +58,8 @@ func writeJSON[X any](outputPath string, value X, outIfEmpty bool) error {
if err != nil {
return fmt.Errorf("failed to append new-line: %w", err)
}
if err := finish(); err != nil {
return fmt.Errorf("failed to finish write: %w", err)
}
return nil
}
......@@ -13,7 +13,7 @@ func TestRoundTripJSON(t *testing.T) {
dir := t.TempDir()
file := filepath.Join(dir, "test.json")
data := &jsonTestData{A: "yay", B: 3}
err := writeJSON(file, data, false)
err := writeJSON(file, data)
require.NoError(t, err)
// Confirm the file is uncompressed
......@@ -32,7 +32,7 @@ func TestRoundTripJSONWithGzip(t *testing.T) {
dir := t.TempDir()
file := filepath.Join(dir, "test.json.gz")
data := &jsonTestData{A: "yay", B: 3}
err := writeJSON(file, data, false)
err := writeJSON(file, data)
require.NoError(t, err)
// Confirm the file isn't raw JSON
......
......@@ -24,7 +24,7 @@ var (
}
LoadELFOutFlag = &cli.PathFlag{
Name: "out",
Usage: "Output path to write JSON state to. State is dumped to stdout if set to empty string.",
Usage: "Output path to write JSON state to. State is dumped to stdout if set to -. Not written if empty.",
Value: "state.json",
Required: false,
}
......@@ -66,10 +66,10 @@ func LoadELF(ctx *cli.Context) error {
if err != nil {
return fmt.Errorf("failed to compute program metadata: %w", err)
}
if err := writeJSON[*mipsevm.Metadata](ctx.Path(LoadELFMetaFlag.Name), meta, false); err != nil {
if err := writeJSON[*mipsevm.Metadata](ctx.Path(LoadELFMetaFlag.Name), meta); err != nil {
return fmt.Errorf("failed to output metadata: %w", err)
}
return writeJSON[*mipsevm.State](ctx.Path(LoadELFOutFlag.Name), state, true)
return writeJSON[*mipsevm.State](ctx.Path(LoadELFOutFlag.Name), state)
}
var LoadELFCommand = &cli.Command{
......
......@@ -28,7 +28,7 @@ var (
}
RunOutputFlag = &cli.PathFlag{
Name: "output",
Usage: "path of output JSON state. Stdout if left empty.",
Usage: "path of output JSON state. Not written if empty, use - to write to Stdout.",
TakesFile: true,
Value: "out.json",
Required: false,
......@@ -42,7 +42,7 @@ var (
}
RunProofFmtFlag = &cli.StringFlag{
Name: "proof-fmt",
Usage: "format for proof data output file names. Proof data is written to stdout if empty.",
Usage: "format for proof data output file names. Proof data is written to stdout if -.",
Value: "proof-%d.json",
Required: false,
}
......@@ -66,7 +66,7 @@ var (
}
RunMetaFlag = &cli.PathFlag{
Name: "meta",
Usage: "path to metadata file for symbol lookup for enhanced debugging info durign execution.",
Usage: "path to metadata file for symbol lookup for enhanced debugging info during execution.",
Value: "meta.json",
Required: false,
}
......@@ -324,7 +324,7 @@ func Run(ctx *cli.Context) error {
}
if snapshotAt(state) {
if err := writeJSON(fmt.Sprintf(snapshotFmt, step), state, false); err != nil {
if err := writeJSON(fmt.Sprintf(snapshotFmt, step), state); err != nil {
return fmt.Errorf("failed to write state snapshot: %w", err)
}
}
......@@ -360,7 +360,7 @@ func Run(ctx *cli.Context) error {
proof.OracleValue = witness.PreimageValue
proof.OracleOffset = witness.PreimageOffset
}
if err := writeJSON(fmt.Sprintf(proofFmt, step), proof, true); err != nil {
if err := writeJSON(fmt.Sprintf(proofFmt, step), proof); err != nil {
return fmt.Errorf("failed to write proof data: %w", err)
}
} else {
......@@ -371,7 +371,7 @@ func Run(ctx *cli.Context) error {
}
}
if err := writeJSON(ctx.Path(RunOutputFlag.Name), state, true); err != nil {
if err := writeJSON(ctx.Path(RunOutputFlag.Name), state); err != nil {
return fmt.Errorf("failed to write state output: %w", err)
}
return nil
......
......@@ -84,6 +84,27 @@ func TestCalculateNextActions(t *testing.T) {
lastHonestClaim.Attack(common.Hash{0xdd}).ExpectStepAttack()
},
},
{
name: "PoisonedPreState",
agreeWithOutputRoot: true,
setupGame: func(builder *faulttest.GameBuilder) {
// A claim hash that has no pre-image
maliciousStateHash := common.Hash{0x01, 0xaa}
// Dishonest actor counters their own claims to set up a situation with an invalid prestate
// The honest actor should attack all claims that support the root claim (disagree with the output root)
builder.Seq().ExpectAttack(). // This expected action is the winning move.
Attack(maliciousStateHash).
Defend(maliciousStateHash).ExpectAttack().
Attack(maliciousStateHash).
Attack(maliciousStateHash).ExpectStepAttack()
// The attempt to step against our malicious leaf node will fail because the pre-state won't match our
// malicious state hash. However, it is the very first expected action, attacking the root claim with
// the correct hash that wins the game since it will be the left-most uncountered claim.
},
},
}
for _, test := range tests {
......@@ -93,7 +114,8 @@ func TestCalculateNextActions(t *testing.T) {
test.setupGame(builder)
game := builder.Game
for i, claim := range game.Claims() {
t.Logf("Claim %v: Pos: %v ParentIdx: %v, Countered: %v, Value: %v", i, claim.Position.ToGIndex(), claim.ParentContractIndex, claim.Countered, claim.Value)
t.Logf("Claim %v: Pos: %v TraceIdx: %v ParentIdx: %v, Countered: %v, Value: %v",
i, claim.Position.ToGIndex(), claim.Position.TraceIndex(maxDepth), claim.ParentContractIndex, claim.Countered, claim.Value)
}
solver := NewGameSolver(maxDepth, claimBuilder.CorrectTraceProvider())
......
......@@ -22,7 +22,8 @@ import (
)
const (
proofsDir = "proofs"
proofsDir = "proofs"
diskStateCache = "state.json.gz"
)
type proofData struct {
......@@ -142,6 +143,20 @@ func (p *CannonTraceProvider) loadProof(ctx context.Context, i uint64) (*proofDa
// If the requested index is after the last step in the actual trace, extend the final no-op step
return p.lastProof, nil
}
// Attempt to read the last step from disk cache
if p.lastProof == nil && p.lastStep == 0 {
step, err := ReadLastStep(p.dir)
if err != nil {
p.logger.Warn("Failed to read last step from disk cache", "err", err)
} else {
p.lastStep = step
// If the last step is tracked, set i to the last step
// to read the correct proof from disk.
if i > p.lastStep {
i = step
}
}
}
path := filepath.Join(p.dir, proofsDir, fmt.Sprintf("%d.json.gz", i))
file, err := ioutil.OpenDecompressed(path)
if errors.Is(err, os.ErrNotExist) {
......@@ -168,6 +183,9 @@ func (p *CannonTraceProvider) loadProof(ctx context.Context, i uint64) (*proofDa
if err != nil {
return nil, fmt.Errorf("cannot hash witness: %w", err)
}
if err := WriteLastStep(p.dir, state.Step); err != nil {
p.logger.Warn("Failed to write last step to disk cache", "step", p.lastStep)
}
proof := &proofData{
ClaimValue: witnessHash,
StateData: hexutil.Bytes(witness),
......@@ -194,3 +212,28 @@ func (p *CannonTraceProvider) loadProof(ctx context.Context, i uint64) (*proofDa
}
return &proof, nil
}
type diskStateCacheObj struct {
Step uint64 `json:"step"`
}
// ReadLastStep reads the tracked last step from disk.
func ReadLastStep(dir string) (uint64, error) {
state := diskStateCacheObj{}
file, err := ioutil.OpenDecompressed(filepath.Join(dir, diskStateCache))
if err != nil {
return 0, err
}
defer file.Close()
err = json.NewDecoder(file).Decode(&state)
if err != nil {
return 0, err
}
return state.Step, nil
}
// WriteLastStep writes the last step to disk as a persistent cache.
func WriteLastStep(dir string, step uint64) error {
state := diskStateCacheObj{Step: step}
return ioutil.WriteCompressedJson(filepath.Join(dir, diskStateCache), state)
}
......@@ -65,8 +65,8 @@ func TestGet(t *testing.T) {
}
func TestGetStepData(t *testing.T) {
dataDir, prestate := setupTestData(t)
t.Run("ExistingProof", func(t *testing.T) {
dataDir, prestate := setupTestData(t)
provider, generator := setupWithTestData(t, dataDir, prestate)
value, proof, data, err := provider.GetStepData(context.Background(), 0)
require.NoError(t, err)
......@@ -80,6 +80,7 @@ func TestGetStepData(t *testing.T) {
})
t.Run("GenerateProof", func(t *testing.T) {
dataDir, prestate := setupTestData(t)
provider, generator := setupWithTestData(t, dataDir, prestate)
generator.finalState = &mipsevm.State{
Memory: &mipsevm.Memory{},
......@@ -105,6 +106,7 @@ func TestGetStepData(t *testing.T) {
})
t.Run("ProofAfterEndOfTrace", func(t *testing.T) {
dataDir, prestate := setupTestData(t)
provider, generator := setupWithTestData(t, dataDir, prestate)
generator.finalState = &mipsevm.State{
Memory: &mipsevm.Memory{},
......@@ -129,7 +131,52 @@ func TestGetStepData(t *testing.T) {
require.Nil(t, data)
})
t.Run("ReadLastStepFromDisk", func(t *testing.T) {
dataDir, prestate := setupTestData(t)
provider, initGenerator := setupWithTestData(t, dataDir, prestate)
initGenerator.finalState = &mipsevm.State{
Memory: &mipsevm.Memory{},
Step: 10,
Exited: true,
}
initGenerator.proof = &proofData{
ClaimValue: common.Hash{0xaa},
StateData: []byte{0xbb},
ProofData: []byte{0xcc},
OracleKey: common.Hash{0xdd}.Bytes(),
OracleValue: []byte{0xdd},
OracleOffset: 10,
}
_, _, _, err := provider.GetStepData(context.Background(), 7000)
require.NoError(t, err)
require.Contains(t, initGenerator.generated, 7000, "should have tried to generate the proof")
provider, generator := setupWithTestData(t, dataDir, prestate)
generator.finalState = &mipsevm.State{
Memory: &mipsevm.Memory{},
Step: 10,
Exited: true,
}
generator.proof = &proofData{
ClaimValue: common.Hash{0xaa},
StateData: []byte{0xbb},
ProofData: []byte{0xcc},
OracleKey: common.Hash{0xdd}.Bytes(),
OracleValue: []byte{0xdd},
OracleOffset: 10,
}
preimage, proof, data, err := provider.GetStepData(context.Background(), 7000)
require.NoError(t, err)
require.Contains(t, generator.generated, 10, "should have tried to generate the proof")
witness := generator.finalState.EncodeWitness()
require.EqualValues(t, witness, preimage)
require.Equal(t, []byte{}, proof)
require.Nil(t, data)
})
t.Run("MissingStateData", func(t *testing.T) {
dataDir, prestate := setupTestData(t)
provider, generator := setupWithTestData(t, dataDir, prestate)
_, _, _, err := provider.GetStepData(context.Background(), 1)
require.ErrorContains(t, err, "missing state data")
......@@ -137,6 +184,7 @@ func TestGetStepData(t *testing.T) {
})
t.Run("IgnoreUnknownFields", func(t *testing.T) {
dataDir, prestate := setupTestData(t)
provider, generator := setupWithTestData(t, dataDir, prestate)
value, proof, data, err := provider.GetStepData(context.Background(), 2)
require.NoError(t, err)
......
......@@ -2,6 +2,7 @@ package ioutil
import (
"compress/gzip"
"encoding/json"
"fmt"
"io"
"os"
......@@ -38,6 +39,20 @@ func OpenCompressed(file string, flag int, perm os.FileMode) (io.WriteCloser, er
return out, nil
}
// WriteCompressedJson writes the object to the specified file as a compressed json object
// if the filename ends with .gz.
func WriteCompressedJson(file string, obj any) error {
if !IsGzip(file) {
return fmt.Errorf("file %v does not have .gz extension", file)
}
out, err := OpenCompressed(file, os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return err
}
defer out.Close()
return json.NewEncoder(out).Encode(obj)
}
// IsGzip determines if a path points to a gzip compressed file.
// Returns true when the file has a .gz extension.
func IsGzip(path string) bool {
......
package ioutil
import (
"encoding/json"
"io"
"os"
"path/filepath"
......@@ -47,3 +48,43 @@ func TestReadWriteWithOptionalCompression(t *testing.T) {
})
}
}
func TestWriteReadCompressedJson(t *testing.T) {
tests := []struct {
name string
filename string
err string
}{
{"Uncompressed", "test.notgz", "does not have .gz extension"},
{"Gzipped", "test.gz", ""},
}
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
dir := t.TempDir()
path := filepath.Join(dir, test.filename)
err := WriteCompressedJson(path, struct {
A int
B string
}{A: 1, B: "test"})
if test.err != "" {
require.ErrorContains(t, err, test.err)
return
}
require.NoError(t, err)
var read struct {
A int
B string
}
in, err := OpenDecompressed(path)
require.NoError(t, err)
err = json.NewDecoder(in).Decode(&read)
require.NoError(t, err)
require.Equal(t, struct {
A int
B string
}{A: 1, B: "test"}, read)
})
}
}
FROM ethereum/client-go:v1.12.2
FROM ethereum/client-go:v1.13.0
RUN apk add --no-cache jq
......
......@@ -54,7 +54,7 @@
"mocha": "^10.2.0",
"nyc": "^15.1.0",
"ts-node": "^10.9.1",
"typedoc": "^0.24.8",
"typedoc": "^0.25.1",
"typescript": "^5.1.6",
"viem": "^1.6.0",
"vitest": "^0.34.2",
......
......@@ -50,7 +50,7 @@ importers:
version: 1.4.3
doctoc:
specifier: ^2.2.0
version: 2.2.0
version: 2.2.1
eslint:
specifier: ^8.43.0
version: 8.47.0
......@@ -508,8 +508,8 @@ importers:
specifier: ^10.9.1
version: 10.9.1(@types/node@20.5.0)(typescript@5.1.6)
typedoc:
specifier: ^0.24.8
version: 0.24.8(typescript@5.1.6)
specifier: ^0.25.1
version: 0.25.1(typescript@5.1.6)
typescript:
specifier: ^5.1.6
version: 5.1.6
......@@ -5394,10 +5394,10 @@ packages:
json-schema-traverse: 0.4.1
uri-js: 4.4.1
/anchor-markdown-header@0.5.7:
resolution: {integrity: sha512-AmikqcK15r3q99hPvTa1na9n3eLkW0uE+RL9BZMSgwYalQeDnNXbYrN06BIcBPfGlmsGIE2jvkuvl/x0hyPF5Q==}
/anchor-markdown-header@0.6.0:
resolution: {integrity: sha512-v7HJMtE1X7wTpNFseRhxsY/pivP4uAJbidVhPT+yhz4i/vV1+qx371IXuV9V7bN6KjFtheLJxqaSm0Y/8neJTA==}
dependencies:
emoji-regex: 6.1.3
emoji-regex: 10.1.0
dev: true
/ansi-colors@4.1.1:
......@@ -6921,14 +6921,14 @@ packages:
dependencies:
path-type: 4.0.0
/doctoc@2.2.0:
resolution: {integrity: sha512-PtiyaS+S3kcMbpx6x2V0S+PeDKisxmjEFnZsuYkkj4Lh3ObozJuuYh9dM4+sX02Ouuty8RF2LOCnIbpu/hWy/A==}
/doctoc@2.2.1:
resolution: {integrity: sha512-qNJ1gsuo7hH40vlXTVVrADm6pdg30bns/Mo7Nv1SxuXSM1bwF9b4xQ40a6EFT/L1cI+Yylbyi8MPI4G4y7XJzQ==}
hasBin: true
dependencies:
'@textlint/markdown-to-ast': 12.2.1
anchor-markdown-header: 0.5.7
anchor-markdown-header: 0.6.0
htmlparser2: 7.2.0
minimist: 1.2.6
minimist: 1.2.8
underscore: 1.13.4
update-section: 0.3.3
transitivePeerDependencies:
......@@ -7068,8 +7068,8 @@ packages:
engines: {node: '>=12'}
dev: true
/emoji-regex@6.1.3:
resolution: {integrity: sha512-73/zxHTjP2N2FQf0J5ngNjxP9LqG2krUshxYaowI8HxZQsiL2pYJc3k9/O93fc5/lCSkZv+bQ5Esk6k6msiSvg==}
/emoji-regex@10.1.0:
resolution: {integrity: sha512-xAEnNCT3w2Tg6MA7ly6QqYJvEoY1tm9iIjJ3yMKK9JPlWuRHAMoe5iETwQnx3M9TVbFMfsrBgWKR+IsmswwNjg==}
dev: true
/emoji-regex@8.0.0:
......@@ -11051,10 +11051,6 @@ packages:
kind-of: 6.0.3
dev: false
/minimist@1.2.6:
resolution: {integrity: sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q==}
dev: true
/minimist@1.2.8:
resolution: {integrity: sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==}
......@@ -14240,12 +14236,12 @@ packages:
dependencies:
is-typedarray: 1.0.0
/typedoc@0.24.8(typescript@5.1.6):
resolution: {integrity: sha512-ahJ6Cpcvxwaxfu4KtjA8qZNqS43wYt6JL27wYiIgl1vd38WW/KWX11YuAeZhuz9v+ttrutSsgK+XO1CjL1kA3w==}
engines: {node: '>= 14.14'}
/typedoc@0.25.1(typescript@5.1.6):
resolution: {integrity: sha512-c2ye3YUtGIadxN2O6YwPEXgrZcvhlZ6HlhWZ8jQRNzwLPn2ylhdGqdR8HbyDRyALP8J6lmSANILCkkIdNPFxqA==}
engines: {node: '>= 16'}
hasBin: true
peerDependencies:
typescript: 4.6.x || 4.7.x || 4.8.x || 4.9.x || 5.0.x || 5.1.x
typescript: 4.6.x || 4.7.x || 4.8.x || 4.9.x || 5.0.x || 5.1.x || 5.2.x
dependencies:
lunr: 2.3.9
marked: 4.3.0
......
......@@ -15,7 +15,7 @@ that maintains 1:1 compatibility with Ethereum.
- [Rollup Node](rollup-node.md)
- [Rollup Node P2p](rollup-node-p2p.md)
- [L2 Chain Derivation](derivation.md)
- [Network Upgrades](network-upgrades.md)
- [Superchain Upgrades](superchain-upgrades.md)
- [System Config](system_config.md)
- [Batch Submitter](batcher.md)
- [Guaranteed Gas Market](guaranteed-gas-market.md)
......
......@@ -16,6 +16,7 @@
- [Extended PayloadAttributesV1](#extended-payloadattributesv1)
- [`engine_newPayloadV1`](#engine_newpayloadv1)
- [`engine_getPayloadV1`](#engine_getpayloadv1)
- [`engine_signalSuperchainV1`](#engine_signalsuperchainv1)
- [Networking](#networking)
- [Sync](#sync)
- [Happy-path sync](#happy-path-sync)
......@@ -198,6 +199,37 @@ Applies a L2 block to the engine state.
No modifications to [`engine_getPayloadV1`][engine_getPayloadV1].
Retrieves a payload by ID, prepared by `engine_forkchoiceUpdatedV1` when called with `payloadAttributes`.
### `engine_signalSuperchainV1`
Optional extension to the Engine API. Signals superchain information to the Engine:
V1 signals which protocol version is recommended and required.
Types:
```javascript
SuperchainSignal: {
recommended: ProtocolVersion
required: ProtocolVersion
}
```
`ProtocolVersion`: encoded for RPC as defined in
[Protocol Version format specification](./superchain-upgrades.md#protocol-version-format).
Parameters:
- `signal`: `SuperchainSignal`, the signaled superchain information.
Returns:
- `ProtocolVersion`: the latest supported OP-Stack protocol version of the execution engine.
The execution engine SHOULD warn the user when the recommended version is newer than
the current version supported by the execution engine.
The execution engine SHOULD take safety precautions if it does not meet the required protocol version.
This may include halting the engine, with consent of the execution engine operator.
## Networking
The execution engine can acquire all data through the rollup node, as derived from L1:
......
......@@ -7,6 +7,7 @@
- [Overview](#overview)
- [Definitions](#definitions)
- [Virtual Machine (VM)](#virtual-machine-vm)
- [PreimageOracle](#preimageoracle)
- [Execution Trace](#execution-trace)
- [Claims](#claims)
- [DAG](#dag)
......@@ -20,6 +21,7 @@
- [Defend](#defend)
- [Step](#step)
- [Step Types](#step-types)
- [PreimageOracle Interaction](#preimageoracle-interaction)
- [Team Dynamics](#team-dynamics)
- [Game Clock](#game-clock)
- [Resolution](#resolution)
......@@ -44,7 +46,7 @@ claims made that are disputed and which aren't, to determine the winners of the
### Virtual Machine (VM)
This is a state transition function (STF) that takes a _pre-state_ and computes the post-state.
The VM may reference external data during the STF and as such, it also accepts a _proof_ of this data.
The VM may access data referenced during the STF and as such, it also accepts a _proof_ of this data.
Typically, the pre-state contains a commitment to the _proof_ to verify the integrity of the data referenced.
Mathemtically, we define the STF as $VM(S_i,P_i)$ where
......@@ -52,6 +54,12 @@ Mathemtically, we define the STF as $VM(S_i,P_i)$ where
- $S_i$ is the pre-state
- $P_i$ is an optional proof needed for the transition from $S_i$ to $S_{i+1}$.
### PreimageOracle
This is a pre-image data store. It is often used by VMs to read external data during its STF.
Before successfully executing a VM STF, it may be necessary to preload the PreimageOracle with pertinent data.
The method for key-based retrieval of these pre-images varies according to the specific VM.
### Execution Trace
An execution trace $T$ is a sequence $(S_0,S_1,S_2,...,S_n)$ where each $S_i$ is a VM state and
......@@ -62,7 +70,7 @@ We refer to this state as the **ABSOLUTE\_PRESTATE**.
### Claims
Claims assert an execution trace. This is represented as `ClaimHash`, a `bytes32` commitment to
the last VM state in the trace. A FDG is initialized with a root claim, which commits to the entire
the last VM state in a trace. A FDG is initialized with a root claim, which commits to the entire
execution trace. As we'll see later, there can be multiple claims, committing to different states in the FDG.
### DAG
......@@ -97,6 +105,7 @@ Positions higher up the game tree also cover the deepest, right-most positions r
We refer to this coverage as the **trace index** of a Position.
> This means claims commit to an execution trace that terminates at the same index as their Position's trace index.
> That is, for a given trace index $n$, its ClaimHash corresponds to the $S_n$ th state in the trace.
Note that there can be multiple positions covering the same _trace index_.
......@@ -198,6 +207,22 @@ Players interface with `step` by providing an indicator of attack and state data
that corresponds to the expected pre/post state (depending on whether it's an attack or defend).
The FDG will assert that an existing claim commits to the state data provided by players.
### PreimageOracle Interaction
Certain steps (VM state transitions) require external data to be available by the `PreimageOracle`.
To ensure a successful state transition, players should provide this data in advance.
The FDG provides the following interface to manage data loaded to the `PreimageOracle`:
```solidity
/// @notice Posts the requested local data to the VM's `PreimageOralce`.
/// @param _ident The local identifier of the data to post.
/// @param _partOffset The offset of the data to post.
function addLocalData(uint256 _ident, uint256 _partOffset) external;
```
The `addLocalData` function loads parts of a pre-image to VM's `PreimageOracle`.
Players use this to ensure pre-image parts are available to the VM during a step.
### Team Dynamics
Challengers seek to dispute the root claim, while Defenders aim to support it.
......
# Honest Challenger (Fault Dispute Game)
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
**Table of Contents**
- [Overview](#overview)
- [FDG Responses](#fdg-responses)
- [Root Claims](#root-claims)
- [Counter Claims](#counter-claims)
- [Steps](#steps)
- [Resolution](#resolution)
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
The honest challenger is an agent interacting in the [Fault Dispute Game](./fault-dispute-game.md)
(FDG) that supports honest claims and disputes false claims.
An honest challenger strives to ensure a correct, truthful, game resolution.
The honest challenger is also _rational_ as any deviation from its behavior will result in
negative outcomes.
This document specifies the expected behavior of an honest challenger.
## Overview
The Honest Challenger has two primary duties:
1. Support valid root claims in Fault Dispute Games.
2. Dispute invalid root claims in Fault Dispute Games.
The honest challenger polls the `DisputeGameFactory` contract for new and on-going Fault
Dispute Games.
For verifying the legitimacy of claims, it relies on a synced, trusted rollup node
as well as a trace provider (ex: [Cannon](./cannon-fault-proof-vm.md)).
The trace provider must be configured with the [ABSOLUTE_PRESTATE](./fault-dispute-game.md#execution-trace)
of the FDG being interacted with to generate the traces needed to make truthful claims.
## FDG Responses
### Root Claims
When a `FaultDisputeGame` is created, the honest challenger has two possible correct responses
to its root claim:
1. [**Attack**](./fault-dispute-game.md#attack) if they disagree with the root claim.
The root claim commits to the entire execution trace, so the first move here is to
attack with the [ClaimHash](./fault-dispute-game.md#claims) at the midpoint
instruction within their execution trace.
2. **Do Nothing** if they agree with the root claim. They do nothing because if the root
claim is left un-countered, the game resolves to their agreement.
NOTE: The honest challenger will still track this game in order to defend any subsequent
claims made against the root claim - in effect, "playing the game".
### Counter Claims
For every claim made in a dispute game with a [game tree](./fault-dispute-game.md#game-tree)
depth in the range of `[1, MAX_DEPTH]`, the honest challenger processes them and performs
a response.
To determine the appropriate response, the challenger first needs to know which
[_team_](./fault-dispute-game.md#team-dynamics) it belongs to.
This determines the set of claims it should respond to in the FDG.
If the agent determines itself to be a Defender, which aims to support the root claim,
then it must dispute claims positioned at odd depths in the game tree.
Otherwise, it disputes claims positioned at even depths in the game tree.
This means an honest challenger only responds to claims made by the opposing team.
The next step is to determine whether the claim has a valid commitment (i.e. `ClaimHash`).
If the `ClaimHash` matches the honest challenger's at the same trace index, then we
disagree with the claim's stance by moving to [defend](./fault-dispute-game.md#defend).
Otherwise, the claim is [attacked](./fault-dispute-game.md#attack).
The following pseudocode illustrates the response logic.
```python
class Team(Enum):
DEFENDER = 0
CHALLENGER = 1
class Claim:
position: uint64
claim_hash: ClaimHash
MAX_TRACE = 2**MAX_GAME_DEPTH
def respond(claim: Claim, chal: Team, chal_trace: List[ClaimHash, MAX_TRACE]):
if depth(claim.position) % 2 != chal.value:
if chal_trace[trace_index(claim.position)] == claim.claim_hash:
defend()
else:
attack()
else: pass # no response
```
In attack or defense, the honest challenger submit a `ClaimHash` corresponding to the
state identified by the trace index of their response position.
The honest challenger responds to claims as soon as possible to avoid the clock of its
counter-claim from expiring.
### Steps
At the max depth of the game, claims represent commitments to the state of the fault proof VM
at a single instruction step interval.
Because the game can no longer bisect further, when the honest challenger has a valid move
against these claims (valid defined by the response in [Counter Claims](#counter-claims)),
the only option for an honest challenger is to execute a VM step on-chain to disprove the claim at `MAX_GAME_DEPTH`.
Similar to the above section, the honest challenger will issue an
[attack step](./fault-dispute-game.md#step-types) when in response to such claims with
invalid `ClaimHash` commitments. Otherwise, it issues a _defense step_.
## Resolution
When the [chess clock](./fault-dispute-game.md#game-clock) of a `FaultDisputeGame` team
runs out, the game can be resolved.
The honest challenger does this by calling the `resolve` function on the
`FaultDisputeGame` contract.
The `FaultDisputeGame` does not put a time cap on resolution - because of the liveness
assumption on honest challengers and the bonds attached to the claims they’ve countered,
challengers are economically incentivized to resolve the game promptly to capture the bonds.
# Network Upgrades
Network upgrades, also known as forks or hardforks, implement consensus-breaking changes.
These changes are transitioned into deterministically across all nodes through an activation rule.
This document lists the network upgrades of the OP Stack, starting after the Bedrock upgrade.
Prospective upgrades may be listed as proposals, but are not governed through these specifications.
Activation rule parameters of network upgrades are configured in respective chain configurations,
and not part of this specification.
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
**Table of Contents**
- [Activation rules](#activation-rules)
- [L2 Block-number based activation](#l2-block-number-based-activation)
- [L2 Block-timestamp based activation](#l2-block-timestamp-based-activation)
- [Post-Bedrock Network upgrades](#post-bedrock-network-upgrades)
- [Regolith](#regolith)
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
## Activation rules
The below L2-block based activation rules may be applied in two contexts:
- The rollup node, specified through the rollup configuration (known as `rollup.json`),
referencing L2 blocks (or block input-attributes) that pass through the derivation pipeline.
- The execution engine, specified through the chain configuration (known as the `config` part of `genesis.json`),
referencing blocks or input-attributes that are part of, or applied to, the L2 chain.
### L2 Block-number based activation
Activation rule: `x != null && x >= upgradeNumber`
Starting at, and including, the L2 `block` with `block.number == x`, the upgrade rules apply.
If the upgrade block-number `x` is not specified in the configuration, the upgrade is ignored.
This applies to the L2 block number, not to the L1-origin block number.
This means that an L2 upgrade may be inactive, and then active, without changing the L1-origin.
This block number based method has commonly been used in L1 up until the Bellatrix/Paris upgrade, a.k.a. The Merge,
which was upgraded through special rules.
### L2 Block-timestamp based activation
Activation rule: `x != null && x >= upgradeTime`
Starting at, and including, the L2 `block` with `block.timestamp == x`, the upgrade rules apply.
If the upgrade block-timestamp `x` is not specified in the configuration, the upgrade is ignored.
This applies to the L2 block timestamp, not to the L1-origin block timestamp.
This means that an L2 upgrade may be inactive, and then active, without changing the L1-origin.
This timestamp based method has become the default on L1 after the Bellatrix/Paris upgrade, a.k.a. The Merge,
because it can be planned in accordance with beacon-chain epochs and slots.
Note that the L2 version is not limited to timestamps that match L1 beacon-chain slots or epochs.
A timestamp may be chosen to be synchronous with a specific slot or epoch on L1,
but the matching L1-origin information may not be present at the time of activation on L2.
## Post-Bedrock Network upgrades
### Regolith
The Regolith upgrade, named after a material best described as "deposited dust on top of a layer of bedrock",
implements minor changes to deposit processing, based on reports of the Sherlock Audit-contest and findings in
the Bedrock Optimism Goerli testnet.
Summary of changes:
- The `isSystemTx` boolean is disabled, system transactions now use the same gas accounting rules as regular deposits.
- The actual deposit gas-usage is recorded in the receipt of the deposit transaction,
and subtracted from the L2 block gas-pool.
Unused gas of deposits is not refunded with ETH however, as it is burned on L1.
- The `nonce` value of the deposit sender account, before the transaction state-transition, is recorded in a new
optional field (`depositNonce`), extending the transaction receipt (i.e. not present in pre-Regolith receipts).
- The recorded deposit `nonce` is used to correct the transaction and receipt metadata in RPC responses,
including the `contractAddress` field of deposits that deploy contracts.
- The `gas` and `depositNonce` data is committed to as part of the consensus-representation of the receipt,
enabling the data to be safely synced between independent L2 nodes.
- The L1-cost function was corrected to more closely match pre-Bedrock behavior.
The [deposit specification](./deposits.md) specifies the deposit changes of the Regolith upgrade in more detail.
The [execution engine specification](./exec-engine.md) specifies the L1 cost function difference.
The Regolith upgrade uses a *L2 block-timestamp* activation-rule, and is specified in both the
rollup-node (`regolith_time`) and execution engine (`config.regolithTime`).
......@@ -24,6 +24,7 @@ currently only concerned with the specification of the rollup driver.
- [Derivation](#derivation)
- [L2 Output RPC method](#l2-output-rpc-method)
- [Output Method API](#output-method-api)
- [Protocol Version tracking](#protocol-version-tracking)
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
......@@ -72,3 +73,20 @@ The input and return types here are as defined by the [engine API specs][engine-
- returns:
1. `version`: `DATA`, 32 Bytes - the output root version number, beginning with 0.
1. `l2OutputRoot`: `DATA`, 32 Bytes - the output root.
## Protocol Version tracking
The rollup-node should monitor the recommended and required protocol version by monitoring
the Protocol Version contract on L1, as specified in the [Superchain Version Signaling specifications].
[Superchain Version Signaling specifications]: ./superchain-upgrades.md#superchain-version-signaling
This can be implemented through polling in the [Driver](#driver) loop.
After polling the Protocol Version, the rollup node SHOULD communicate it with the execution-engine through an
[`engine_signalSuperchainV1`](./exec-engine.md#enginesignalsuperchainv1) call.
The rollup node SHOULD warn the user when the recommended version is newer than
the current version supported by the rollup node.
The rollup node SHOULD take safety precautions if it does not meet the required protocol version.
This may include halting the engine, with consent of the rollup node operator.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment