Commit e6103778 authored by Tarun Khasnavis's avatar Tarun Khasnavis Committed by GitHub

Merge branch 'ethereum-optimism:develop' into develop

parents 51bc29dd a762fa74
...@@ -85,13 +85,16 @@ nuke: clean devnet-clean ...@@ -85,13 +85,16 @@ nuke: clean devnet-clean
git clean -Xdf git clean -Xdf
.PHONY: nuke .PHONY: nuke
devnet-up: pre-devnet:
@if ! [ -x "$(command -v geth)" ]; then \ @if ! [ -x "$(command -v geth)" ]; then \
make install-geth; \ make install-geth; \
fi fi
@if [ ! -e op-program/bin ]; then \ @if [ ! -e op-program/bin ]; then \
make cannon-prestate; \ make cannon-prestate; \
fi fi
.PHONY: pre-devnet
devnet-up: pre-devnet
./ops/scripts/newer-file.sh .devnet/allocs-l1.json ./packages/contracts-bedrock \ ./ops/scripts/newer-file.sh .devnet/allocs-l1.json ./packages/contracts-bedrock \
|| make devnet-allocs || make devnet-allocs
PYTHONPATH=./bedrock-devnet python3 ./bedrock-devnet/main.py --monorepo-dir=. PYTHONPATH=./bedrock-devnet python3 ./bedrock-devnet/main.py --monorepo-dir=.
...@@ -100,7 +103,7 @@ devnet-up: ...@@ -100,7 +103,7 @@ devnet-up:
# alias for devnet-up # alias for devnet-up
devnet-up-deploy: devnet-up devnet-up-deploy: devnet-up
devnet-test: devnet-test: pre-devnet
PYTHONPATH=./bedrock-devnet python3 ./bedrock-devnet/main.py --monorepo-dir=. --test PYTHONPATH=./bedrock-devnet python3 ./bedrock-devnet/main.py --monorepo-dir=. --test
.PHONY: devnet-test .PHONY: devnet-test
...@@ -116,7 +119,7 @@ devnet-clean: ...@@ -116,7 +119,7 @@ devnet-clean:
docker volume ls --filter name=ops-bedrock --format='{{.Name}}' | xargs -r docker volume rm docker volume ls --filter name=ops-bedrock --format='{{.Name}}' | xargs -r docker volume rm
.PHONY: devnet-clean .PHONY: devnet-clean
devnet-allocs: devnet-allocs: pre-devnet
PYTHONPATH=./bedrock-devnet python3 ./bedrock-devnet/main.py --monorepo-dir=. --allocs PYTHONPATH=./bedrock-devnet python3 ./bedrock-devnet/main.py --monorepo-dir=. --allocs
devnet-logs: devnet-logs:
......
...@@ -7,6 +7,12 @@ test the fault proof system. ...@@ -7,6 +7,12 @@ test the fault proof system.
The overall design of this system along with the APIs and interfaces it exposes are not The overall design of this system along with the APIs and interfaces it exposes are not
finalized and may change without notice. finalized and may change without notice.
### Getting Started
* [Architecture Overview Video](https://www.youtube.com/watch?v=nIN5sNc6nQM)
* [Fault Proof Alpha Deployment Information (Goerli)](./deployments.md)
* [Security Reseachers - Bug Bounty Program](./immunefi.md)
### Contents ### Contents
* Specifications * Specifications
......
...@@ -2,7 +2,21 @@ ...@@ -2,7 +2,21 @@
### Goerli ### Goerli
Information on the fault proofs alpha deployment to Goerli is not yet available. **Deployments**
| Contract | Address |
|--------------------------- |--------------------------------------------- |
| DisputeGameFactory (proxy) | `0xad9e5E6b39F55EE7220A3dC21a640B089196a89f` |
| DisputeGameFactory (impl) | `0x666C3d298B9c360990F901b3Eded4e7a9d7AD446` |
| BlockOracle | `0x7979b2D824A6A682D1dA25bD02E544bB66536032` |
| PreimageOracle | `0xE214d974dE12Cc8d096170AbC5EEBD18F08a044a` |
| MIPS VM | `0x78760b9A1Df5DDe037D64376BD4d1d675dC30f0f` |
| FaultDisputeGame (impl) | `0xBE2827A6c62d39b4C7933D592B6913412D5aBC77` |
**Configuration**
- Absolute prestate hash: `0x0357393f50acca498e446f69292fce66c93a6d9038aa277b47c93fa46ce85108`
- Max game depth: `40`
- Supports an instruction trace up to `1,099,511,627,776` instructions long.
- Max game duration: `172,800 seconds` (2 days)
### Local Devnet ### Local Devnet
......
This diff is collapsed.
...@@ -65,7 +65,7 @@ require ( ...@@ -65,7 +65,7 @@ require (
github.com/cockroachdb/pebble v0.0.0-20230906160148-46873a6a7a06 // indirect github.com/cockroachdb/pebble v0.0.0-20230906160148-46873a6a7a06 // indirect
github.com/cockroachdb/redact v1.1.3 // indirect github.com/cockroachdb/redact v1.1.3 // indirect
github.com/consensys/bavard v0.1.13 // indirect github.com/consensys/bavard v0.1.13 // indirect
github.com/consensys/gnark-crypto v0.10.0 // indirect github.com/consensys/gnark-crypto v0.12.0 // indirect
github.com/containerd/cgroups v1.1.0 // indirect github.com/containerd/cgroups v1.1.0 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
......
...@@ -98,8 +98,8 @@ github.com/cockroachdb/redact v1.1.3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZ ...@@ -98,8 +98,8 @@ github.com/cockroachdb/redact v1.1.3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZ
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM=
github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ= github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ=
github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI= github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
github.com/consensys/gnark-crypto v0.10.0 h1:zRh22SR7o4K35SoNqouS9J/TKHTyU2QWaj5ldehyXtA= github.com/consensys/gnark-crypto v0.12.0 h1:1OnSpOykNkUIBIBJKdhwy2p0JlW5o+Az02ICzZmvvdg=
github.com/consensys/gnark-crypto v0.10.0/go.mod h1:Iq/P3HHl0ElSjsg2E1gsMwhAyxnxoKK5nVyZKd+/KhU= github.com/consensys/gnark-crypto v0.12.0/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY=
github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM=
github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw=
......
...@@ -5,7 +5,6 @@ import ( ...@@ -5,7 +5,6 @@ import (
"fmt" "fmt"
_ "net/http/pprof" _ "net/http/pprof"
gethrpc "github.com/ethereum/go-ethereum/rpc"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
"github.com/ethereum-optimism/optimism/op-batcher/flags" "github.com/ethereum-optimism/optimism/op-batcher/flags"
...@@ -27,11 +26,15 @@ func Main(version string, cliCtx *cli.Context) error { ...@@ -27,11 +26,15 @@ func Main(version string, cliCtx *cli.Context) error {
return err return err
} }
cfg := NewConfig(cliCtx) cfg := NewConfig(cliCtx)
if err := cfg.Check(); err != nil {
return fmt.Errorf("invalid CLI flags: %w", err)
}
l := oplog.NewLogger(oplog.AppOut(cliCtx), cfg.LogConfig) l := oplog.NewLogger(oplog.AppOut(cliCtx), cfg.LogConfig)
oplog.SetGlobalLogHandler(l.GetHandler()) oplog.SetGlobalLogHandler(l.GetHandler())
opservice.ValidateEnvVars(flags.EnvVarPrefix, flags.Flags, l) opservice.ValidateEnvVars(flags.EnvVarPrefix, flags.Flags, l)
m := metrics.NewMetrics("default") procName := "default"
m := metrics.NewMetrics(procName)
l.Info("Initializing Batch Submitter") l.Info("Initializing Batch Submitter")
batchSubmitter, err := NewBatchSubmitterFromCLIConfig(cfg, l, m) batchSubmitter, err := NewBatchSubmitterFromCLIConfig(cfg, l, m)
...@@ -72,18 +75,15 @@ func Main(version string, cliCtx *cli.Context) error { ...@@ -72,18 +75,15 @@ func Main(version string, cliCtx *cli.Context) error {
m.StartBalanceMetrics(ctx, l, batchSubmitter.L1Client, batchSubmitter.TxManager.From()) m.StartBalanceMetrics(ctx, l, batchSubmitter.L1Client, batchSubmitter.TxManager.From())
} }
rpcCfg := cfg.RPCConfig
server := oprpc.NewServer( server := oprpc.NewServer(
rpcCfg.ListenAddr, cfg.RPCFlag.ListenAddr,
rpcCfg.ListenPort, cfg.RPCFlag.ListenPort,
version, version,
oprpc.WithLogger(l), oprpc.WithLogger(l),
) )
if rpcCfg.EnableAdmin { if cfg.RPCFlag.EnableAdmin {
server.AddAPI(gethrpc.API{ adminAPI := rpc.NewAdminAPI(batchSubmitter, &m.RPCMetrics, l)
Namespace: "admin", server.AddAPI(rpc.GetAdminAPI(adminAPI))
Service: rpc.NewAdminAPI(batchSubmitter),
})
l.Info("Admin RPC enabled") l.Info("Admin RPC enabled")
} }
if err := server.Start(); err != nil { if err := server.Start(); err != nil {
......
...@@ -10,12 +10,12 @@ import ( ...@@ -10,12 +10,12 @@ import (
"github.com/ethereum-optimism/optimism/op-batcher/compressor" "github.com/ethereum-optimism/optimism/op-batcher/compressor"
"github.com/ethereum-optimism/optimism/op-batcher/flags" "github.com/ethereum-optimism/optimism/op-batcher/flags"
"github.com/ethereum-optimism/optimism/op-batcher/metrics" "github.com/ethereum-optimism/optimism/op-batcher/metrics"
"github.com/ethereum-optimism/optimism/op-batcher/rpc"
"github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/sources" "github.com/ethereum-optimism/optimism/op-node/sources"
oplog "github.com/ethereum-optimism/optimism/op-service/log" oplog "github.com/ethereum-optimism/optimism/op-service/log"
opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics" opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics"
oppprof "github.com/ethereum-optimism/optimism/op-service/pprof" oppprof "github.com/ethereum-optimism/optimism/op-service/pprof"
oprpc "github.com/ethereum-optimism/optimism/op-service/rpc"
"github.com/ethereum-optimism/optimism/op-service/txmgr" "github.com/ethereum-optimism/optimism/op-service/txmgr"
) )
...@@ -88,17 +88,16 @@ type CLIConfig struct { ...@@ -88,17 +88,16 @@ type CLIConfig struct {
Stopped bool Stopped bool
TxMgrConfig txmgr.CLIConfig TxMgrConfig txmgr.CLIConfig
RPCConfig rpc.CLIConfig
LogConfig oplog.CLIConfig LogConfig oplog.CLIConfig
MetricsConfig opmetrics.CLIConfig MetricsConfig opmetrics.CLIConfig
PprofConfig oppprof.CLIConfig PprofConfig oppprof.CLIConfig
CompressorConfig compressor.CLIConfig CompressorConfig compressor.CLIConfig
RPCFlag oprpc.CLIConfig
} }
func (c CLIConfig) Check() error { func (c CLIConfig) Check() error {
if err := c.RPCConfig.Check(); err != nil { // TODO: check the sanity of flags loaded directly https://github.com/ethereum-optimism/optimism/issues/7512
return err
}
if err := c.MetricsConfig.Check(); err != nil { if err := c.MetricsConfig.Check(); err != nil {
return err return err
} }
...@@ -108,6 +107,9 @@ func (c CLIConfig) Check() error { ...@@ -108,6 +107,9 @@ func (c CLIConfig) Check() error {
if err := c.TxMgrConfig.Check(); err != nil { if err := c.TxMgrConfig.Check(); err != nil {
return err return err
} }
if err := c.RPCFlag.Check(); err != nil {
return err
}
return nil return nil
} }
...@@ -127,10 +129,10 @@ func NewConfig(ctx *cli.Context) CLIConfig { ...@@ -127,10 +129,10 @@ func NewConfig(ctx *cli.Context) CLIConfig {
MaxL1TxSize: ctx.Uint64(flags.MaxL1TxSizeBytesFlag.Name), MaxL1TxSize: ctx.Uint64(flags.MaxL1TxSizeBytesFlag.Name),
Stopped: ctx.Bool(flags.StoppedFlag.Name), Stopped: ctx.Bool(flags.StoppedFlag.Name),
TxMgrConfig: txmgr.ReadCLIConfig(ctx), TxMgrConfig: txmgr.ReadCLIConfig(ctx),
RPCConfig: rpc.ReadCLIConfig(ctx),
LogConfig: oplog.ReadCLIConfig(ctx), LogConfig: oplog.ReadCLIConfig(ctx),
MetricsConfig: opmetrics.ReadCLIConfig(ctx), MetricsConfig: opmetrics.ReadCLIConfig(ctx),
PprofConfig: oppprof.ReadCLIConfig(ctx), PprofConfig: oppprof.ReadCLIConfig(ctx),
CompressorConfig: compressor.ReadCLIConfig(ctx), CompressorConfig: compressor.ReadCLIConfig(ctx),
RPCFlag: oprpc.ReadCLIConfig(ctx),
} }
} }
...@@ -7,7 +7,6 @@ import ( ...@@ -7,7 +7,6 @@ import (
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
"github.com/ethereum-optimism/optimism/op-batcher/compressor" "github.com/ethereum-optimism/optimism/op-batcher/compressor"
"github.com/ethereum-optimism/optimism/op-batcher/rpc"
opservice "github.com/ethereum-optimism/optimism/op-service" opservice "github.com/ethereum-optimism/optimism/op-service"
oplog "github.com/ethereum-optimism/optimism/op-service/log" oplog "github.com/ethereum-optimism/optimism/op-service/log"
opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics" opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics"
...@@ -102,7 +101,6 @@ func init() { ...@@ -102,7 +101,6 @@ func init() {
optionalFlags = append(optionalFlags, oplog.CLIFlags(EnvVarPrefix)...) optionalFlags = append(optionalFlags, oplog.CLIFlags(EnvVarPrefix)...)
optionalFlags = append(optionalFlags, opmetrics.CLIFlags(EnvVarPrefix)...) optionalFlags = append(optionalFlags, opmetrics.CLIFlags(EnvVarPrefix)...)
optionalFlags = append(optionalFlags, oppprof.CLIFlags(EnvVarPrefix)...) optionalFlags = append(optionalFlags, oppprof.CLIFlags(EnvVarPrefix)...)
optionalFlags = append(optionalFlags, rpc.CLIFlags(EnvVarPrefix)...)
optionalFlags = append(optionalFlags, txmgr.CLIFlags(EnvVarPrefix)...) optionalFlags = append(optionalFlags, txmgr.CLIFlags(EnvVarPrefix)...)
optionalFlags = append(optionalFlags, compressor.CLIFlags(EnvVarPrefix)...) optionalFlags = append(optionalFlags, compressor.CLIFlags(EnvVarPrefix)...)
......
...@@ -51,6 +51,7 @@ type Metrics struct { ...@@ -51,6 +51,7 @@ type Metrics struct {
opmetrics.RefMetrics opmetrics.RefMetrics
txmetrics.TxMetrics txmetrics.TxMetrics
opmetrics.RPCMetrics
info prometheus.GaugeVec info prometheus.GaugeVec
up prometheus.Gauge up prometheus.Gauge
...@@ -93,6 +94,7 @@ func NewMetrics(procName string) *Metrics { ...@@ -93,6 +94,7 @@ func NewMetrics(procName string) *Metrics {
RefMetrics: opmetrics.MakeRefMetrics(ns, factory), RefMetrics: opmetrics.MakeRefMetrics(ns, factory),
TxMetrics: txmetrics.MakeTxMetrics(ns, factory), TxMetrics: txmetrics.MakeTxMetrics(ns, factory),
RPCMetrics: opmetrics.MakeRPCMetrics(ns, factory),
info: *factory.NewGaugeVec(prometheus.GaugeOpts{ info: *factory.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns, Namespace: ns,
......
...@@ -2,6 +2,12 @@ package rpc ...@@ -2,6 +2,12 @@ package rpc
import ( import (
"context" "context"
"github.com/ethereum/go-ethereum/log"
gethrpc "github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum-optimism/optimism/op-service/metrics"
"github.com/ethereum-optimism/optimism/op-service/rpc"
) )
type batcherClient interface { type batcherClient interface {
...@@ -10,12 +16,21 @@ type batcherClient interface { ...@@ -10,12 +16,21 @@ type batcherClient interface {
} }
type adminAPI struct { type adminAPI struct {
*rpc.CommonAdminAPI
b batcherClient b batcherClient
} }
func NewAdminAPI(dr batcherClient) *adminAPI { func NewAdminAPI(dr batcherClient, m metrics.RPCMetricer, log log.Logger) *adminAPI {
return &adminAPI{ return &adminAPI{
b: dr, CommonAdminAPI: rpc.NewCommonAdminAPI(m, log),
b: dr,
}
}
func GetAdminAPI(api *adminAPI) gethrpc.API {
return gethrpc.API{
Namespace: "admin",
Service: api,
} }
} }
......
package rpc
import (
"github.com/urfave/cli/v2"
opservice "github.com/ethereum-optimism/optimism/op-service"
oprpc "github.com/ethereum-optimism/optimism/op-service/rpc"
)
const (
EnableAdminFlagName = "rpc.enable-admin"
)
func CLIFlags(envPrefix string) []cli.Flag {
return []cli.Flag{
&cli.BoolFlag{
Name: EnableAdminFlagName,
Usage: "Enable the admin API (experimental)",
EnvVars: opservice.PrefixEnvVar(envPrefix, "RPC_ENABLE_ADMIN"),
},
}
}
type CLIConfig struct {
oprpc.CLIConfig
EnableAdmin bool
}
func ReadCLIConfig(ctx *cli.Context) CLIConfig {
return CLIConfig{
CLIConfig: oprpc.ReadCLIConfig(ctx),
EnableAdmin: ctx.Bool(EnableAdminFlagName),
}
}
...@@ -2,6 +2,7 @@ package main ...@@ -2,6 +2,7 @@ package main
import ( import (
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"os" "os"
"slices" "slices"
...@@ -38,6 +39,11 @@ func main() { ...@@ -38,6 +39,11 @@ func main() {
Name: "chain-ids", Name: "chain-ids",
Usage: "L2 Chain IDs corresponding to chains to upgrade. Corresponds to all chains if empty", Usage: "L2 Chain IDs corresponding to chains to upgrade. Corresponds to all chains if empty",
}, },
&cli.StringFlag{
Name: "superchain-target",
Usage: "The name of the superchain to upgrade",
EnvVars: []string{"SUPERCHAIN_TARGET"},
},
&cli.PathFlag{ &cli.PathFlag{
Name: "deploy-config", Name: "deploy-config",
Usage: "The path to the deploy config file", Usage: "The path to the deploy config file",
...@@ -70,9 +76,13 @@ func entrypoint(ctx *cli.Context) error { ...@@ -70,9 +76,13 @@ func entrypoint(ctx *cli.Context) error {
if err != nil { if err != nil {
return err return err
} }
superchainName, err := toSuperchainName(l1ChainID.Uint64())
if err != nil { superchainName := ctx.String("superchain-target")
return err if superchainName == "" {
superchainName, err = toSuperchainName(l1ChainID.Uint64())
if err != nil {
return err
}
} }
chainIDs := ctx.Uint64Slice("chain-ids") chainIDs := ctx.Uint64Slice("chain-ids")
...@@ -116,23 +126,35 @@ func entrypoint(ctx *cli.Context) error { ...@@ -116,23 +126,35 @@ func entrypoint(ctx *cli.Context) error {
if err != nil { if err != nil {
return fmt.Errorf("cannot create RPC clients: %w", err) return fmt.Errorf("cannot create RPC clients: %w", err)
} }
// The L1Client is required
if clients.L1Client == nil {
return errors.New("Cannot create L1 client")
}
l1ChainID, err := clients.L1Client.ChainID(ctx.Context) l1ChainID, err := clients.L1Client.ChainID(ctx.Context)
if err != nil { if err != nil {
return fmt.Errorf("cannot fetch L1 chain ID: %w", err) return fmt.Errorf("cannot fetch L1 chain ID: %w", err)
} }
l2ChainID, err := clients.L2Client.ChainID(ctx.Context)
if err != nil { // The L2Client is not required, but double check the chain id matches if possible
return fmt.Errorf("cannot fetch L2 chain ID: %w", err) if clients.L2Client != nil {
l2ChainID, err := clients.L2Client.ChainID(ctx.Context)
if err != nil {
return fmt.Errorf("cannot fetch L2 chain ID: %w", err)
}
if chainConfig.ChainID != l2ChainID.Uint64() {
return fmt.Errorf("Mismatched chain IDs: %d != %d", chainConfig.ChainID, l2ChainID)
}
} }
log.Info(chainConfig.Name, "l1-chain-id", l1ChainID, "l2-chain-id", l2ChainID)
log.Info(chainConfig.Name, "l1-chain-id", l1ChainID, "l2-chain-id", chainConfig.ChainID)
log.Info("Detecting on chain contracts") log.Info("Detecting on chain contracts")
// Tracking the individual addresses can be deprecated once the system is upgraded // Tracking the individual addresses can be deprecated once the system is upgraded
// to the new contracts where the system config has a reference to each address. // to the new contracts where the system config has a reference to each address.
addresses, ok := superchain.Addresses[l2ChainID.Uint64()] addresses, ok := superchain.Addresses[chainConfig.ChainID]
if !ok { if !ok {
return fmt.Errorf("no addresses for chain ID %d", l2ChainID.Uint64()) return fmt.Errorf("no addresses for chain ID %d", chainConfig.ChainID)
} }
versions, err := upgrades.GetContractVersions(ctx.Context, addresses, chainConfig, clients.L1Client) versions, err := upgrades.GetContractVersions(ctx.Context, addresses, chainConfig, clients.L1Client)
if err != nil { if err != nil {
...@@ -235,7 +257,7 @@ func toSuperchainName(chainID uint64) (string, error) { ...@@ -235,7 +257,7 @@ func toSuperchainName(chainID uint64) (string, error) {
} }
func writeJSON(outfile string, input interface{}) error { func writeJSON(outfile string, input interface{}) error {
f, err := os.OpenFile(outfile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o755) f, err := os.OpenFile(outfile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o666)
if err != nil { if err != nil {
return err return err
} }
......
...@@ -55,7 +55,7 @@ func main() { ...@@ -55,7 +55,7 @@ func main() {
log.Root().SetHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(isatty.IsTerminal(os.Stderr.Fd())))) log.Root().SetHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(isatty.IsTerminal(os.Stderr.Fd()))))
app := &cli.App{ app := &cli.App{
Name: "registry-genesis", Name: "registry-data",
Usage: "Prepare superchain-registry genesis data files based on full genesis dump", Usage: "Prepare superchain-registry genesis data files based on full genesis dump",
Flags: []cli.Flag{ Flags: []cli.Flag{
L2GenesisFlag, L2GenesisFlag,
...@@ -65,6 +65,16 @@ func main() { ...@@ -65,6 +65,16 @@ func main() {
}, },
Action: entrypoint, Action: entrypoint,
} }
app.Commands = []*cli.Command{
{
Name: "bytecode",
Usage: "Generate a single gzipped data file from a bytecode hex string",
Flags: []cli.Flag{
BytecodesDirFlag,
},
Action: bytecode,
},
}
if err := app.Run(os.Args); err != nil { if err := app.Run(os.Args); err != nil {
log.Crit("error while generating registry data", "err", err) log.Crit("error while generating registry data", "err", err)
...@@ -188,30 +198,10 @@ func entrypoint(ctx *cli.Context) error { ...@@ -188,30 +198,10 @@ func entrypoint(ctx *cli.Context) error {
} }
for addr, account := range genesis.Alloc { for addr, account := range genesis.Alloc {
if len(account.Code) > 0 { if len(account.Code) > 0 {
codeHash := crypto.Keccak256Hash(account.Code) err = writeBytecode(bytecodesDir, account.Code, addr)
name := filepath.Join(bytecodesDir, fmt.Sprintf("%s.bin.gz", codeHash))
_, err := os.Stat(name)
if err != nil { if err != nil {
if os.IsNotExist(err) { return err
var buf bytes.Buffer }
w, err := gzip.NewWriterLevel(&buf, 9)
if err != nil {
return fmt.Errorf("failed to construct gzip writer for bytecode %s: %w", codeHash, err)
}
if _, err := w.Write(account.Code); err != nil {
return fmt.Errorf("failed to write bytecode %s to gzip writer: %w", codeHash, err)
}
if err := w.Close(); err != nil {
return fmt.Errorf("failed to close gzip writer: %w", err)
}
// new bytecode
if err := os.WriteFile(name, buf.Bytes(), 0755); err != nil {
return fmt.Errorf("failed to write bytecode %s of account %s: %w", codeHash, addr, err)
}
} else {
return fmt.Errorf("failed to check for pre-existing bytecode %s for address %s: %w", codeHash, addr, err)
}
} // else: already exists
} }
} }
...@@ -259,3 +249,47 @@ func entrypoint(ctx *cli.Context) error { ...@@ -259,3 +249,47 @@ func entrypoint(ctx *cli.Context) error {
} }
return nil return nil
} }
func bytecode(ctx *cli.Context) error {
if ctx.NArg() != 1 {
return fmt.Errorf("expected hex-encoded bytecode as single argument; received %d arguments", ctx.NArg())
}
bc, err := hexutil.Decode(ctx.Args().First())
if err != nil {
return fmt.Errorf("failed to decode hex: %w", err)
}
bytecodesDir := ctx.Path(BytecodesDirFlag.Name)
if err := os.MkdirAll(bytecodesDir, 0755); err != nil {
return fmt.Errorf("failed to make bytecodes dir: %w", err)
}
return writeBytecode(bytecodesDir, bc, common.Address{})
}
func writeBytecode(bytecodesDir string, code []byte, addr common.Address) error {
codeHash := crypto.Keccak256Hash(code)
name := filepath.Join(bytecodesDir, fmt.Sprintf("%s.bin.gz", codeHash))
_, err := os.Stat(name)
if err == nil {
// file already exists
return nil
}
if !os.IsNotExist(err) {
return fmt.Errorf("failed to check for pre-existing bytecode %s for address %s: %w", codeHash, addr, err)
}
var buf bytes.Buffer
w, err := gzip.NewWriterLevel(&buf, 9)
if err != nil {
return fmt.Errorf("failed to construct gzip writer for bytecode %s: %w", codeHash, err)
}
if _, err := w.Write(code); err != nil {
return fmt.Errorf("failed to write bytecode %s to gzip writer: %w", codeHash, err)
}
if err := w.Close(); err != nil {
return fmt.Errorf("failed to close gzip writer: %w", err)
}
// new bytecode
if err := os.WriteFile(name, buf.Bytes(), 0755); err != nil {
return fmt.Errorf("failed to write bytecode %s of account %s: %w", codeHash, addr, err)
}
return nil
}
...@@ -6,6 +6,7 @@ package safe ...@@ -6,6 +6,7 @@ package safe
import ( import (
"bytes" "bytes"
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"math/big" "math/big"
"strings" "strings"
...@@ -19,7 +20,12 @@ import ( ...@@ -19,7 +20,12 @@ import (
) )
// Batch represents a Safe tx-builder transaction. // Batch represents a Safe tx-builder transaction.
// SkipCalldata will skip adding the calldata to the BatchTransaction.
// This is useful for when using the Safe UI because it prefers using
// the raw calldata when both the calldata and ABIs with arguments are
// present.
type Batch struct { type Batch struct {
SkipCalldata bool
Version string `json:"version"` Version string `json:"version"`
ChainID *big.Int `json:"chainId"` ChainID *big.Int `json:"chainId"`
CreatedAt uint64 `json:"createdAt"` CreatedAt uint64 `json:"createdAt"`
...@@ -29,7 +35,10 @@ type Batch struct { ...@@ -29,7 +35,10 @@ type Batch struct {
// AddCall will add a call to the batch. After a series of calls are // AddCall will add a call to the batch. After a series of calls are
// added to the batch, it can be serialized to JSON. // added to the batch, it can be serialized to JSON.
func (b *Batch) AddCall(to common.Address, value *big.Int, sig string, args []any, iface abi.ABI) error { func (b *Batch) AddCall(to common.Address, value *big.Int, sig string, args []any, iface *abi.ABI) error {
if iface == nil {
return errors.New("abi cannot be nil")
}
// Attempt to pull out the signature from the top level methods. // Attempt to pull out the signature from the top level methods.
// The abi package uses normalization that we do not want to be // The abi package uses normalization that we do not want to be
// coupled to, so attempt to search for the raw name if the top // coupled to, so attempt to search for the raw name if the top
...@@ -87,10 +96,13 @@ func (b *Batch) AddCall(to common.Address, value *big.Int, sig string, args []an ...@@ -87,10 +96,13 @@ func (b *Batch) AddCall(to common.Address, value *big.Int, sig string, args []an
To: to, To: to,
Value: value, Value: value,
Method: contractMethod, Method: contractMethod,
Data: data,
InputValues: inputValues, InputValues: inputValues,
} }
if !b.SkipCalldata {
batchTransaction.Data = data
}
b.Transactions = append(b.Transactions, batchTransaction) b.Transactions = append(b.Transactions, batchTransaction)
return nil return nil
......
...@@ -58,7 +58,7 @@ func TestBatchAddCallFinalizeWithdrawalTransaction(t *testing.T) { ...@@ -58,7 +58,7 @@ func TestBatchAddCallFinalizeWithdrawalTransaction(t *testing.T) {
to := common.Address{19: 0x01} to := common.Address{19: 0x01}
value := big.NewInt(222) value := big.NewInt(222)
require.NoError(t, batch.AddCall(to, value, sig, argument, portalABI)) require.NoError(t, batch.AddCall(to, value, sig, argument, &portalABI))
require.NoError(t, batch.Check()) require.NoError(t, batch.Check())
require.Equal(t, batch.Transactions[0].Signature(), "finalizeWithdrawalTransaction((uint256,address,address,uint256,uint256,bytes))") require.Equal(t, batch.Transactions[0].Signature(), "finalizeWithdrawalTransaction((uint256,address,address,uint256,uint256,bytes))")
...@@ -89,7 +89,7 @@ func TestBatchAddCallDespositTransaction(t *testing.T) { ...@@ -89,7 +89,7 @@ func TestBatchAddCallDespositTransaction(t *testing.T) {
[]byte{}, []byte{},
} }
require.NoError(t, batch.AddCall(to, value, sig, argument, portalABI)) require.NoError(t, batch.AddCall(to, value, sig, argument, &portalABI))
require.NoError(t, batch.Check()) require.NoError(t, batch.Check())
require.Equal(t, batch.Transactions[0].Signature(), "depositTransaction(address,uint256,uint64,bool,bytes)") require.Equal(t, batch.Transactions[0].Signature(), "depositTransaction(address,uint256,uint64,bool,bytes)")
......
...@@ -63,6 +63,7 @@ func L1CrossDomainMessenger(batch *safe.Batch, implementations superchain.Implem ...@@ -63,6 +63,7 @@ func L1CrossDomainMessenger(batch *safe.Batch, implementations superchain.Implem
if err != nil { if err != nil {
return err return err
} }
calldata = append(initialize.ID, calldata...)
args := []any{ args := []any{
common.HexToAddress(list.L1CrossDomainMessengerProxy.String()), common.HexToAddress(list.L1CrossDomainMessengerProxy.String()),
...@@ -72,7 +73,7 @@ func L1CrossDomainMessenger(batch *safe.Batch, implementations superchain.Implem ...@@ -72,7 +73,7 @@ func L1CrossDomainMessenger(batch *safe.Batch, implementations superchain.Implem
proxyAdmin := common.HexToAddress(list.ProxyAdmin.String()) proxyAdmin := common.HexToAddress(list.ProxyAdmin.String())
sig := "upgradeAndCall(address,address,bytes)" sig := "upgradeAndCall(address,address,bytes)"
if err := batch.AddCall(proxyAdmin, common.Big0, sig, args, *proxyAdminABI); err != nil { if err := batch.AddCall(proxyAdmin, common.Big0, sig, args, proxyAdminABI); err != nil {
return err return err
} }
...@@ -102,6 +103,7 @@ func L1ERC721Bridge(batch *safe.Batch, implementations superchain.Implementation ...@@ -102,6 +103,7 @@ func L1ERC721Bridge(batch *safe.Batch, implementations superchain.Implementation
if err != nil { if err != nil {
return err return err
} }
calldata = append(initialize.ID, calldata...)
args := []any{ args := []any{
common.HexToAddress(list.L1ERC721BridgeProxy.String()), common.HexToAddress(list.L1ERC721BridgeProxy.String()),
...@@ -111,7 +113,7 @@ func L1ERC721Bridge(batch *safe.Batch, implementations superchain.Implementation ...@@ -111,7 +113,7 @@ func L1ERC721Bridge(batch *safe.Batch, implementations superchain.Implementation
proxyAdmin := common.HexToAddress(list.ProxyAdmin.String()) proxyAdmin := common.HexToAddress(list.ProxyAdmin.String())
sig := "upgradeAndCall(address,address,bytes)" sig := "upgradeAndCall(address,address,bytes)"
if err := batch.AddCall(proxyAdmin, common.Big0, sig, args, *proxyAdminABI); err != nil { if err := batch.AddCall(proxyAdmin, common.Big0, sig, args, proxyAdminABI); err != nil {
return err return err
} }
...@@ -141,6 +143,7 @@ func L1StandardBridge(batch *safe.Batch, implementations superchain.Implementati ...@@ -141,6 +143,7 @@ func L1StandardBridge(batch *safe.Batch, implementations superchain.Implementati
if err != nil { if err != nil {
return err return err
} }
calldata = append(initialize.ID, calldata...)
args := []any{ args := []any{
common.HexToAddress(list.L1StandardBridgeProxy.String()), common.HexToAddress(list.L1StandardBridgeProxy.String()),
...@@ -150,7 +153,7 @@ func L1StandardBridge(batch *safe.Batch, implementations superchain.Implementati ...@@ -150,7 +153,7 @@ func L1StandardBridge(batch *safe.Batch, implementations superchain.Implementati
proxyAdmin := common.HexToAddress(list.ProxyAdmin.String()) proxyAdmin := common.HexToAddress(list.ProxyAdmin.String())
sig := "upgradeAndCall(address,address,bytes)" sig := "upgradeAndCall(address,address,bytes)"
if err := batch.AddCall(proxyAdmin, common.Big0, sig, args, *proxyAdminABI); err != nil { if err := batch.AddCall(proxyAdmin, common.Big0, sig, args, proxyAdminABI); err != nil {
return err return err
} }
...@@ -219,6 +222,7 @@ func L2OutputOracle(batch *safe.Batch, implementations superchain.Implementation ...@@ -219,6 +222,7 @@ func L2OutputOracle(batch *safe.Batch, implementations superchain.Implementation
if err != nil { if err != nil {
return err return err
} }
calldata = append(initialize.ID, calldata...)
args := []any{ args := []any{
common.HexToAddress(list.L2OutputOracleProxy.String()), common.HexToAddress(list.L2OutputOracleProxy.String()),
...@@ -228,7 +232,7 @@ func L2OutputOracle(batch *safe.Batch, implementations superchain.Implementation ...@@ -228,7 +232,7 @@ func L2OutputOracle(batch *safe.Batch, implementations superchain.Implementation
proxyAdmin := common.HexToAddress(list.ProxyAdmin.String()) proxyAdmin := common.HexToAddress(list.ProxyAdmin.String())
sig := "upgradeAndCall(address,address,bytes)" sig := "upgradeAndCall(address,address,bytes)"
if err := batch.AddCall(proxyAdmin, common.Big0, sig, args, *proxyAdminABI); err != nil { if err := batch.AddCall(proxyAdmin, common.Big0, sig, args, proxyAdminABI); err != nil {
return err return err
} }
...@@ -258,6 +262,7 @@ func OptimismMintableERC20Factory(batch *safe.Batch, implementations superchain. ...@@ -258,6 +262,7 @@ func OptimismMintableERC20Factory(batch *safe.Batch, implementations superchain.
if err != nil { if err != nil {
return err return err
} }
calldata = append(initialize.ID, calldata...)
args := []any{ args := []any{
common.HexToAddress(list.OptimismMintableERC20FactoryProxy.String()), common.HexToAddress(list.OptimismMintableERC20FactoryProxy.String()),
...@@ -267,7 +272,7 @@ func OptimismMintableERC20Factory(batch *safe.Batch, implementations superchain. ...@@ -267,7 +272,7 @@ func OptimismMintableERC20Factory(batch *safe.Batch, implementations superchain.
proxyAdmin := common.HexToAddress(list.ProxyAdmin.String()) proxyAdmin := common.HexToAddress(list.ProxyAdmin.String())
sig := "upgradeAndCall(address,address,bytes)" sig := "upgradeAndCall(address,address,bytes)"
if err := batch.AddCall(proxyAdmin, common.Big0, sig, args, *proxyAdminABI); err != nil { if err := batch.AddCall(proxyAdmin, common.Big0, sig, args, proxyAdminABI); err != nil {
return err return err
} }
...@@ -315,6 +320,7 @@ func OptimismPortal(batch *safe.Batch, implementations superchain.Implementation ...@@ -315,6 +320,7 @@ func OptimismPortal(batch *safe.Batch, implementations superchain.Implementation
if err != nil { if err != nil {
return err return err
} }
calldata = append(initialize.ID, calldata...)
args := []any{ args := []any{
common.HexToAddress(list.OptimismPortalProxy.String()), common.HexToAddress(list.OptimismPortalProxy.String()),
...@@ -324,7 +330,7 @@ func OptimismPortal(batch *safe.Batch, implementations superchain.Implementation ...@@ -324,7 +330,7 @@ func OptimismPortal(batch *safe.Batch, implementations superchain.Implementation
proxyAdmin := common.HexToAddress(list.ProxyAdmin.String()) proxyAdmin := common.HexToAddress(list.ProxyAdmin.String())
sig := "upgradeAndCall(address,address,bytes)" sig := "upgradeAndCall(address,address,bytes)"
if err := batch.AddCall(proxyAdmin, common.Big0, sig, args, *proxyAdminABI); err != nil { if err := batch.AddCall(proxyAdmin, common.Big0, sig, args, proxyAdminABI); err != nil {
return err return err
} }
...@@ -434,6 +440,7 @@ func SystemConfig(batch *safe.Batch, implementations superchain.ImplementationLi ...@@ -434,6 +440,7 @@ func SystemConfig(batch *safe.Batch, implementations superchain.ImplementationLi
if err != nil { if err != nil {
return err return err
} }
calldata = append(initialize.ID, calldata...)
args := []any{ args := []any{
common.HexToAddress(chainConfig.SystemConfigAddr.String()), common.HexToAddress(chainConfig.SystemConfigAddr.String()),
...@@ -443,7 +450,7 @@ func SystemConfig(batch *safe.Batch, implementations superchain.ImplementationLi ...@@ -443,7 +450,7 @@ func SystemConfig(batch *safe.Batch, implementations superchain.ImplementationLi
proxyAdmin := common.HexToAddress(list.ProxyAdmin.String()) proxyAdmin := common.HexToAddress(list.ProxyAdmin.String())
sig := "upgradeAndCall(address,address,bytes)" sig := "upgradeAndCall(address,address,bytes)"
if err := batch.AddCall(proxyAdmin, common.Big0, sig, args, *proxyAdminABI); err != nil { if err := batch.AddCall(proxyAdmin, common.Big0, sig, args, proxyAdminABI); err != nil {
return err return err
} }
......
...@@ -196,9 +196,6 @@ func (a *Agent) newGameFromContracts(ctx context.Context) (types.Game, error) { ...@@ -196,9 +196,6 @@ func (a *Agent) newGameFromContracts(ctx context.Context) (types.Game, error) {
if len(claims) == 0 { if len(claims) == 0 {
return nil, errors.New("no claims") return nil, errors.New("no claims")
} }
game := types.NewGameState(a.agreeWithProposedOutput, claims[0], uint64(a.maxDepth)) game := types.NewGameState(a.agreeWithProposedOutput, claims, uint64(a.maxDepth))
if err := game.PutAll(claims[1:]); err != nil {
return nil, fmt.Errorf("failed to load claims into the local state: %w", err)
}
return game, nil return game, nil
} }
...@@ -119,10 +119,6 @@ func TestLoader_FetchClaims(t *testing.T) { ...@@ -119,10 +119,6 @@ func TestLoader_FetchClaims(t *testing.T) {
Value: expectedClaims[0].Claim, Value: expectedClaims[0].Claim,
Position: types.NewPositionFromGIndex(expectedClaims[0].Position.Uint64()), Position: types.NewPositionFromGIndex(expectedClaims[0].Position.Uint64()),
}, },
Parent: types.ClaimData{
Value: expectedClaims[0].Claim,
Position: types.NewPositionFromGIndex(expectedClaims[0].Position.Uint64()),
},
Countered: false, Countered: false,
Clock: uint64(0), Clock: uint64(0),
ContractIndex: 0, ContractIndex: 0,
...@@ -134,11 +130,12 @@ func TestLoader_FetchClaims(t *testing.T) { ...@@ -134,11 +130,12 @@ func TestLoader_FetchClaims(t *testing.T) {
}, },
Parent: types.ClaimData{ Parent: types.ClaimData{
Value: expectedClaims[0].Claim, Value: expectedClaims[0].Claim,
Position: types.NewPositionFromGIndex(expectedClaims[1].Position.Uint64()), Position: types.NewPositionFromGIndex(expectedClaims[0].Position.Uint64()),
}, },
Countered: false, Countered: false,
Clock: uint64(0), Clock: uint64(0),
ContractIndex: 1, ContractIndex: 1,
ParentContractIndex: 0,
}, },
{ {
ClaimData: types.ClaimData{ ClaimData: types.ClaimData{
...@@ -146,12 +143,13 @@ func TestLoader_FetchClaims(t *testing.T) { ...@@ -146,12 +143,13 @@ func TestLoader_FetchClaims(t *testing.T) {
Position: types.NewPositionFromGIndex(expectedClaims[2].Position.Uint64()), Position: types.NewPositionFromGIndex(expectedClaims[2].Position.Uint64()),
}, },
Parent: types.ClaimData{ Parent: types.ClaimData{
Value: expectedClaims[0].Claim, Value: expectedClaims[1].Claim,
Position: types.NewPositionFromGIndex(expectedClaims[2].Position.Uint64()), Position: types.NewPositionFromGIndex(expectedClaims[1].Position.Uint64()),
}, },
Countered: false, Countered: false,
Clock: uint64(0), Clock: uint64(0),
ContractIndex: 2, ContractIndex: 2,
ParentContractIndex: 1,
}, },
}, claims) }, claims)
}) })
...@@ -204,21 +202,23 @@ func newMockCaller() *mockCaller { ...@@ -204,21 +202,23 @@ func newMockCaller() *mockCaller {
}{ }{
{ {
Claim: [32]byte{0x00}, Claim: [32]byte{0x00},
Position: big.NewInt(0), Position: big.NewInt(1),
Countered: false, Countered: false,
Clock: big.NewInt(0), Clock: big.NewInt(0),
}, },
{ {
Claim: [32]byte{0x01}, Claim: [32]byte{0x01},
Position: big.NewInt(0), Position: big.NewInt(2),
Countered: false, Countered: false,
Clock: big.NewInt(0), Clock: big.NewInt(0),
ParentIndex: 0,
}, },
{ {
Claim: [32]byte{0x02}, Claim: [32]byte{0x02},
Position: big.NewInt(0), Position: big.NewInt(3),
Countered: false, Countered: false,
Clock: big.NewInt(0), Clock: big.NewInt(0),
ParentIndex: 1,
}, },
}, },
} }
...@@ -240,7 +240,7 @@ func (m *mockCaller) ClaimData(opts *bind.CallOpts, arg0 *big.Int) (struct { ...@@ -240,7 +240,7 @@ func (m *mockCaller) ClaimData(opts *bind.CallOpts, arg0 *big.Int) (struct {
Clock *big.Int Clock *big.Int
}{}, mockClaimDataError }{}, mockClaimDataError
} }
returnClaim := m.returnClaims[m.currentIndex] returnClaim := m.returnClaims[arg0.Uint64()]
m.currentIndex++ m.currentIndex++
return returnClaim, nil return returnClaim, nil
} }
......
...@@ -56,7 +56,6 @@ func TestCalculateNextActions(t *testing.T) { ...@@ -56,7 +56,6 @@ func TestCalculateNextActions(t *testing.T) {
builder.Seq().AttackCorrect() builder.Seq().AttackCorrect()
}, },
}, },
{ {
name: "RespondToAllClaimsAtDisagreeingLevel", name: "RespondToAllClaimsAtDisagreeingLevel",
agreeWithOutputRoot: true, agreeWithOutputRoot: true,
...@@ -70,7 +69,6 @@ func TestCalculateNextActions(t *testing.T) { ...@@ -70,7 +69,6 @@ func TestCalculateNextActions(t *testing.T) {
honestClaim.Defend(common.Hash{0xdd}).ExpectAttack() honestClaim.Defend(common.Hash{0xdd}).ExpectAttack()
}, },
}, },
{ {
name: "StepAtMaxDepth", name: "StepAtMaxDepth",
agreeWithOutputRoot: true, agreeWithOutputRoot: true,
...@@ -83,7 +81,6 @@ func TestCalculateNextActions(t *testing.T) { ...@@ -83,7 +81,6 @@ func TestCalculateNextActions(t *testing.T) {
lastHonestClaim.Attack(common.Hash{0xdd}).ExpectStepAttack() lastHonestClaim.Attack(common.Hash{0xdd}).ExpectStepAttack()
}, },
}, },
{ {
name: "PoisonedPreState", name: "PoisonedPreState",
agreeWithOutputRoot: true, agreeWithOutputRoot: true,
......
...@@ -2,6 +2,7 @@ package solver ...@@ -2,6 +2,7 @@ package solver
import ( import (
"context" "context"
"math/big"
"testing" "testing"
faulttest "github.com/ethereum-optimism/optimism/op-challenger/game/fault/test" faulttest "github.com/ethereum-optimism/optimism/op-challenger/game/fault/test"
...@@ -16,7 +17,8 @@ func TestAttemptStep(t *testing.T) { ...@@ -16,7 +17,8 @@ func TestAttemptStep(t *testing.T) {
// Last accessible leaf is the second last trace index // Last accessible leaf is the second last trace index
// The root node is used for the last trace index and can only be attacked. // The root node is used for the last trace index and can only be attacked.
lastLeafTraceIndex := uint64(1<<maxDepth - 2) lastLeafTraceIndex := big.NewInt(1<<maxDepth - 2)
lastLeafTraceIndexPlusOne := big.NewInt(1<<maxDepth - 1)
ctx := context.Background() ctx := context.Background()
tests := []struct { tests := []struct {
...@@ -32,9 +34,9 @@ func TestAttemptStep(t *testing.T) { ...@@ -32,9 +34,9 @@ func TestAttemptStep(t *testing.T) {
{ {
name: "AttackFirstTraceIndex", name: "AttackFirstTraceIndex",
expectAttack: true, expectAttack: true,
expectPreState: claimBuilder.CorrectPreState(0), expectPreState: claimBuilder.CorrectPreState(common.Big0),
expectProofData: claimBuilder.CorrectProofData(0), expectProofData: claimBuilder.CorrectProofData(common.Big0),
expectedOracleData: claimBuilder.CorrectOracleData(0), expectedOracleData: claimBuilder.CorrectOracleData(common.Big0),
setupGame: func(builder *faulttest.GameBuilder) { setupGame: func(builder *faulttest.GameBuilder) {
builder.Seq(). builder.Seq().
Attack(common.Hash{0xaa}). Attack(common.Hash{0xaa}).
...@@ -45,9 +47,9 @@ func TestAttemptStep(t *testing.T) { ...@@ -45,9 +47,9 @@ func TestAttemptStep(t *testing.T) {
{ {
name: "DefendFirstTraceIndex", name: "DefendFirstTraceIndex",
expectAttack: false, expectAttack: false,
expectPreState: claimBuilder.CorrectPreState(1), expectPreState: claimBuilder.CorrectPreState(big.NewInt(1)),
expectProofData: claimBuilder.CorrectProofData(1), expectProofData: claimBuilder.CorrectProofData(big.NewInt(1)),
expectedOracleData: claimBuilder.CorrectOracleData(1), expectedOracleData: claimBuilder.CorrectOracleData(big.NewInt(1)),
setupGame: func(builder *faulttest.GameBuilder) { setupGame: func(builder *faulttest.GameBuilder) {
builder.Seq(). builder.Seq().
Attack(common.Hash{0xaa}). Attack(common.Hash{0xaa}).
...@@ -58,9 +60,9 @@ func TestAttemptStep(t *testing.T) { ...@@ -58,9 +60,9 @@ func TestAttemptStep(t *testing.T) {
{ {
name: "AttackMiddleTraceIndex", name: "AttackMiddleTraceIndex",
expectAttack: true, expectAttack: true,
expectPreState: claimBuilder.CorrectPreState(4), expectPreState: claimBuilder.CorrectPreState(big.NewInt(4)),
expectProofData: claimBuilder.CorrectProofData(4), expectProofData: claimBuilder.CorrectProofData(big.NewInt(4)),
expectedOracleData: claimBuilder.CorrectOracleData(4), expectedOracleData: claimBuilder.CorrectOracleData(big.NewInt(4)),
setupGame: func(builder *faulttest.GameBuilder) { setupGame: func(builder *faulttest.GameBuilder) {
builder.Seq(). builder.Seq().
AttackCorrect(). AttackCorrect().
...@@ -71,9 +73,9 @@ func TestAttemptStep(t *testing.T) { ...@@ -71,9 +73,9 @@ func TestAttemptStep(t *testing.T) {
{ {
name: "DefendMiddleTraceIndex", name: "DefendMiddleTraceIndex",
expectAttack: false, expectAttack: false,
expectPreState: claimBuilder.CorrectPreState(5), expectPreState: claimBuilder.CorrectPreState(big.NewInt(5)),
expectProofData: claimBuilder.CorrectProofData(5), expectProofData: claimBuilder.CorrectProofData(big.NewInt(5)),
expectedOracleData: claimBuilder.CorrectOracleData(5), expectedOracleData: claimBuilder.CorrectOracleData(big.NewInt(5)),
setupGame: func(builder *faulttest.GameBuilder) { setupGame: func(builder *faulttest.GameBuilder) {
builder.Seq(). builder.Seq().
AttackCorrect(). AttackCorrect().
...@@ -97,9 +99,9 @@ func TestAttemptStep(t *testing.T) { ...@@ -97,9 +99,9 @@ func TestAttemptStep(t *testing.T) {
{ {
name: "DefendLastTraceIndex", name: "DefendLastTraceIndex",
expectAttack: false, expectAttack: false,
expectPreState: claimBuilder.CorrectPreState(lastLeafTraceIndex + 1), expectPreState: claimBuilder.CorrectPreState(lastLeafTraceIndexPlusOne),
expectProofData: claimBuilder.CorrectProofData(lastLeafTraceIndex + 1), expectProofData: claimBuilder.CorrectProofData(lastLeafTraceIndexPlusOne),
expectedOracleData: claimBuilder.CorrectOracleData(lastLeafTraceIndex + 1), expectedOracleData: claimBuilder.CorrectOracleData(lastLeafTraceIndexPlusOne),
setupGame: func(builder *faulttest.GameBuilder) { setupGame: func(builder *faulttest.GameBuilder) {
builder.Seq(). builder.Seq().
AttackCorrect(). AttackCorrect().
...@@ -140,9 +142,9 @@ func TestAttemptStep(t *testing.T) { ...@@ -140,9 +142,9 @@ func TestAttemptStep(t *testing.T) {
{ {
name: "CannotStepNearlyValidPath", name: "CannotStepNearlyValidPath",
expectAttack: true, expectAttack: true,
expectPreState: claimBuilder.CorrectPreState(4), expectPreState: claimBuilder.CorrectPreState(big.NewInt(4)),
expectProofData: claimBuilder.CorrectProofData(4), expectProofData: claimBuilder.CorrectProofData(big.NewInt(4)),
expectedOracleData: claimBuilder.CorrectOracleData(4), expectedOracleData: claimBuilder.CorrectOracleData(big.NewInt(4)),
setupGame: func(builder *faulttest.GameBuilder) { setupGame: func(builder *faulttest.GameBuilder) {
builder.Seq(). builder.Seq().
AttackCorrect(). AttackCorrect().
......
...@@ -32,7 +32,7 @@ func (a *alphabetWithProofProvider) GetStepData(ctx context.Context, i types.Pos ...@@ -32,7 +32,7 @@ func (a *alphabetWithProofProvider) GetStepData(ctx context.Context, i types.Pos
if err != nil { if err != nil {
return nil, nil, nil, err return nil, nil, nil, err
} }
traceIndex := i.TraceIndex(int(a.depth)) traceIndex := i.TraceIndex(int(a.depth)).Uint64()
data := types.NewPreimageOracleData([]byte{byte(traceIndex)}, []byte{byte(traceIndex - 1)}, uint32(traceIndex-1)) data := types.NewPreimageOracleData([]byte{byte(traceIndex)}, []byte{byte(traceIndex - 1)}, uint32(traceIndex-1))
return preimage, []byte{byte(traceIndex - 1)}, data, nil return preimage, []byte{byte(traceIndex - 1)}, data, nil
} }
...@@ -39,30 +39,30 @@ func (c *ClaimBuilder) CorrectClaimAtPosition(pos types.Position) common.Hash { ...@@ -39,30 +39,30 @@ func (c *ClaimBuilder) CorrectClaimAtPosition(pos types.Position) common.Hash {
} }
// CorrectPreState returns the pre-state (not hashed) required to execute the valid step at the specified trace index // CorrectPreState returns the pre-state (not hashed) required to execute the valid step at the specified trace index
func (c *ClaimBuilder) CorrectPreState(idx uint64) []byte { func (c *ClaimBuilder) CorrectPreState(idx *big.Int) []byte {
pos := types.NewPosition(c.maxDepth, int(idx)) pos := types.NewPosition(c.maxDepth, idx)
preimage, _, _, err := c.correct.GetStepData(context.Background(), pos) preimage, _, _, err := c.correct.GetStepData(context.Background(), pos)
c.require.NoError(err) c.require.NoError(err)
return preimage return preimage
} }
// CorrectProofData returns the proof-data required to execute the valid step at the specified trace index // CorrectProofData returns the proof-data required to execute the valid step at the specified trace index
func (c *ClaimBuilder) CorrectProofData(idx uint64) []byte { func (c *ClaimBuilder) CorrectProofData(idx *big.Int) []byte {
pos := types.NewPosition(c.maxDepth, int(idx)) pos := types.NewPosition(c.maxDepth, idx)
_, proof, _, err := c.correct.GetStepData(context.Background(), pos) _, proof, _, err := c.correct.GetStepData(context.Background(), pos)
c.require.NoError(err) c.require.NoError(err)
return proof return proof
} }
func (c *ClaimBuilder) CorrectOracleData(idx uint64) *types.PreimageOracleData { func (c *ClaimBuilder) CorrectOracleData(idx *big.Int) *types.PreimageOracleData {
pos := types.NewPosition(c.maxDepth, int(idx)) pos := types.NewPosition(c.maxDepth, idx)
_, _, data, err := c.correct.GetStepData(context.Background(), pos) _, _, data, err := c.correct.GetStepData(context.Background(), pos)
c.require.NoError(err) c.require.NoError(err)
return data return data
} }
func (c *ClaimBuilder) incorrectClaim(pos types.Position) common.Hash { func (c *ClaimBuilder) incorrectClaim(pos types.Position) common.Hash {
return common.BigToHash(new(big.Int).SetUint64(pos.TraceIndex(c.maxDepth))) return common.BigToHash(pos.TraceIndex(c.maxDepth))
} }
func (c *ClaimBuilder) claim(pos types.Position, correct bool) common.Hash { func (c *ClaimBuilder) claim(pos types.Position, correct bool) common.Hash {
...@@ -78,15 +78,15 @@ func (c *ClaimBuilder) CreateRootClaim(correct bool) types.Claim { ...@@ -78,15 +78,15 @@ func (c *ClaimBuilder) CreateRootClaim(correct bool) types.Claim {
claim := types.Claim{ claim := types.Claim{
ClaimData: types.ClaimData{ ClaimData: types.ClaimData{
Value: value, Value: value,
Position: types.NewPosition(0, 0), Position: types.NewPosition(0, common.Big0),
}, },
} }
return claim return claim
} }
func (c *ClaimBuilder) CreateLeafClaim(traceIndex uint64, correct bool) types.Claim { func (c *ClaimBuilder) CreateLeafClaim(traceIndex *big.Int, correct bool) types.Claim {
parentPos := types.NewPosition(c.maxDepth-1, 0) parentPos := types.NewPosition(c.maxDepth-1, common.Big0)
pos := types.NewPosition(c.maxDepth, int(traceIndex)) pos := types.NewPosition(c.maxDepth, traceIndex)
return types.Claim{ return types.Claim{
ClaimData: types.ClaimData{ ClaimData: types.ClaimData{
Value: c.claim(pos, correct), Value: c.claim(pos, correct),
......
package test package test
import ( import (
"math/big"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/types" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/types"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
) )
type GameBuilder struct { type GameBuilder struct {
builder *ClaimBuilder builder *ClaimBuilder
Game types.Game Game types.Game
ExpectedActions []types.Action ExpectedActions []types.Action
agreeWithOutputRoot bool
} }
func (c *ClaimBuilder) GameBuilder(agreeWithOutputRoot bool, rootCorrect bool) *GameBuilder { func (c *ClaimBuilder) GameBuilder(agreeWithOutputRoot bool, rootCorrect bool) *GameBuilder {
return &GameBuilder{ return &GameBuilder{
builder: c, builder: c,
Game: types.NewGameState(agreeWithOutputRoot, c.CreateRootClaim(rootCorrect), uint64(c.maxDepth)), agreeWithOutputRoot: agreeWithOutputRoot,
Game: types.NewGameState(agreeWithOutputRoot, []types.Claim{c.CreateRootClaim(rootCorrect)}, uint64(c.maxDepth)),
} }
} }
...@@ -22,62 +26,60 @@ type GameBuilderSeq struct { ...@@ -22,62 +26,60 @@ type GameBuilderSeq struct {
gameBuilder *GameBuilder gameBuilder *GameBuilder
builder *ClaimBuilder builder *ClaimBuilder
lastClaim types.Claim lastClaim types.Claim
game types.Game
} }
func (g *GameBuilder) Seq() *GameBuilderSeq { func (g *GameBuilder) Seq() *GameBuilderSeq {
return &GameBuilderSeq{ return &GameBuilderSeq{
gameBuilder: g, gameBuilder: g,
builder: g.builder, builder: g.builder,
game: g.Game,
lastClaim: g.Game.Claims()[0], lastClaim: g.Game.Claims()[0],
} }
} }
// addClaimToGame replaces the game being built with a new instance that has claim as the latest claim.
// The ContractIndex in claim is updated with its position in the game's claim array.
func (s *GameBuilderSeq) addClaimToGame(claim *types.Claim) {
claim.ContractIndex = len(s.gameBuilder.Game.Claims())
claims := append(s.gameBuilder.Game.Claims(), *claim)
s.gameBuilder.Game = types.NewGameState(s.gameBuilder.agreeWithOutputRoot, claims, uint64(s.builder.maxDepth))
}
func (s *GameBuilderSeq) AttackCorrect() *GameBuilderSeq { func (s *GameBuilderSeq) AttackCorrect() *GameBuilderSeq {
claim := s.builder.AttackClaim(s.lastClaim, true) claim := s.builder.AttackClaim(s.lastClaim, true)
claim.ContractIndex = len(s.game.Claims()) s.addClaimToGame(&claim)
s.builder.require.NoError(s.game.Put(claim))
return &GameBuilderSeq{ return &GameBuilderSeq{
gameBuilder: s.gameBuilder, gameBuilder: s.gameBuilder,
builder: s.builder, builder: s.builder,
game: s.game,
lastClaim: claim, lastClaim: claim,
} }
} }
func (s *GameBuilderSeq) Attack(value common.Hash) *GameBuilderSeq { func (s *GameBuilderSeq) Attack(value common.Hash) *GameBuilderSeq {
claim := s.builder.AttackClaimWithValue(s.lastClaim, value) claim := s.builder.AttackClaimWithValue(s.lastClaim, value)
claim.ContractIndex = len(s.game.Claims()) s.addClaimToGame(&claim)
s.builder.require.NoError(s.game.Put(claim))
return &GameBuilderSeq{ return &GameBuilderSeq{
gameBuilder: s.gameBuilder, gameBuilder: s.gameBuilder,
builder: s.builder, builder: s.builder,
game: s.game,
lastClaim: claim, lastClaim: claim,
} }
} }
func (s *GameBuilderSeq) DefendCorrect() *GameBuilderSeq { func (s *GameBuilderSeq) DefendCorrect() *GameBuilderSeq {
claim := s.builder.DefendClaim(s.lastClaim, true) claim := s.builder.DefendClaim(s.lastClaim, true)
claim.ContractIndex = len(s.game.Claims()) s.addClaimToGame(&claim)
s.builder.require.NoError(s.game.Put(claim))
return &GameBuilderSeq{ return &GameBuilderSeq{
gameBuilder: s.gameBuilder, gameBuilder: s.gameBuilder,
builder: s.builder, builder: s.builder,
game: s.game,
lastClaim: claim, lastClaim: claim,
} }
} }
func (s *GameBuilderSeq) Defend(value common.Hash) *GameBuilderSeq { func (s *GameBuilderSeq) Defend(value common.Hash) *GameBuilderSeq {
claim := s.builder.DefendClaimWithValue(s.lastClaim, value) claim := s.builder.DefendClaimWithValue(s.lastClaim, value)
claim.ContractIndex = len(s.game.Claims()) s.addClaimToGame(&claim)
s.builder.require.NoError(s.game.Put(claim))
return &GameBuilderSeq{ return &GameBuilderSeq{
gameBuilder: s.gameBuilder, gameBuilder: s.gameBuilder,
builder: s.builder, builder: s.builder,
game: s.game,
lastClaim: claim, lastClaim: claim,
} }
} }
...@@ -120,7 +122,7 @@ func (s *GameBuilderSeq) ExpectStepAttack() *GameBuilderSeq { ...@@ -120,7 +122,7 @@ func (s *GameBuilderSeq) ExpectStepAttack() *GameBuilderSeq {
} }
func (s *GameBuilderSeq) ExpectStepDefend() *GameBuilderSeq { func (s *GameBuilderSeq) ExpectStepDefend() *GameBuilderSeq {
traceIdx := s.lastClaim.TraceIndex(s.builder.maxDepth) + 1 traceIdx := new(big.Int).Add(s.lastClaim.TraceIndex(s.builder.maxDepth), big.NewInt(1))
s.gameBuilder.ExpectedActions = append(s.gameBuilder.ExpectedActions, types.Action{ s.gameBuilder.ExpectedActions = append(s.gameBuilder.ExpectedActions, types.Action{
Type: types.ActionTypeStep, Type: types.ActionTypeStep,
ParentIdx: s.lastClaim.ContractIndex, ParentIdx: s.lastClaim.ContractIndex,
......
...@@ -35,7 +35,7 @@ func NewTraceProvider(state string, depth uint64) *AlphabetTraceProvider { ...@@ -35,7 +35,7 @@ func NewTraceProvider(state string, depth uint64) *AlphabetTraceProvider {
func (ap *AlphabetTraceProvider) GetStepData(ctx context.Context, i types.Position) ([]byte, []byte, *types.PreimageOracleData, error) { func (ap *AlphabetTraceProvider) GetStepData(ctx context.Context, i types.Position) ([]byte, []byte, *types.PreimageOracleData, error) {
traceIndex := i.TraceIndex(int(ap.depth)) traceIndex := i.TraceIndex(int(ap.depth))
if traceIndex == 0 { if traceIndex.Cmp(common.Big0) == 0 {
prestate, err := ap.AbsolutePreState(ctx) prestate, err := ap.AbsolutePreState(ctx)
if err != nil { if err != nil {
return nil, nil, nil, err return nil, nil, nil, err
...@@ -43,22 +43,23 @@ func (ap *AlphabetTraceProvider) GetStepData(ctx context.Context, i types.Positi ...@@ -43,22 +43,23 @@ func (ap *AlphabetTraceProvider) GetStepData(ctx context.Context, i types.Positi
return prestate, []byte{}, nil, nil return prestate, []byte{}, nil, nil
} }
// We want the pre-state which is the value prior to the one requested // We want the pre-state which is the value prior to the one requested
traceIndex-- traceIndex = traceIndex.Sub(traceIndex, big.NewInt(1))
// The index cannot be larger than the maximum index as computed by the depth. // The index cannot be larger than the maximum index as computed by the depth.
if traceIndex >= ap.maxLen { if traceIndex.Cmp(big.NewInt(int64(ap.maxLen))) >= 0 {
return nil, nil, nil, ErrIndexTooLarge return nil, nil, nil, ErrIndexTooLarge
} }
// We extend the deepest hash to the maximum depth if the trace is not expansive. // We extend the deepest hash to the maximum depth if the trace is not expansive.
if traceIndex >= uint64(len(ap.state)) { if traceIndex.Cmp(big.NewInt(int64(len(ap.state)))) >= 0 {
return ap.GetStepData(ctx, types.NewPosition(int(ap.depth), len(ap.state))) return ap.GetStepData(ctx, types.NewPosition(int(ap.depth), big.NewInt(int64(len(ap.state)))))
} }
return BuildAlphabetPreimage(traceIndex, ap.state[traceIndex]), []byte{}, nil, nil return BuildAlphabetPreimage(traceIndex, ap.state[traceIndex.Uint64()]), []byte{}, nil, nil
} }
// Get returns the claim value at the given index in the trace. // Get returns the claim value at the given index in the trace.
func (ap *AlphabetTraceProvider) Get(ctx context.Context, i types.Position) (common.Hash, error) { func (ap *AlphabetTraceProvider) Get(ctx context.Context, i types.Position) (common.Hash, error) {
// Step data returns the pre-state, so add 1 to get the state for index i // Step data returns the pre-state, so add 1 to get the state for index i
postPosition := types.NewPosition(int(ap.depth), int(i.TraceIndex(int(ap.depth)))+1) ti := i.TraceIndex(int(ap.depth))
postPosition := types.NewPosition(int(ap.depth), new(big.Int).Add(ti, big.NewInt(1)))
claimBytes, _, _, err := ap.GetStepData(ctx, postPosition) claimBytes, _, _, err := ap.GetStepData(ctx, postPosition)
if err != nil { if err != nil {
return common.Hash{}, err return common.Hash{}, err
...@@ -82,8 +83,8 @@ func (ap *AlphabetTraceProvider) AbsolutePreStateCommitment(ctx context.Context) ...@@ -82,8 +83,8 @@ func (ap *AlphabetTraceProvider) AbsolutePreStateCommitment(ctx context.Context)
} }
// BuildAlphabetPreimage constructs the claim bytes for the index and state item. // BuildAlphabetPreimage constructs the claim bytes for the index and state item.
func BuildAlphabetPreimage(i uint64, letter string) []byte { func BuildAlphabetPreimage(i *big.Int, letter string) []byte {
return append(IndexToBytes(i), LetterToBytes(letter)...) return append(i.FillBytes(make([]byte, 32)), LetterToBytes(letter)...)
} }
func alphabetStateHash(state []byte) common.Hash { func alphabetStateHash(state []byte) common.Hash {
...@@ -92,14 +93,6 @@ func alphabetStateHash(state []byte) common.Hash { ...@@ -92,14 +93,6 @@ func alphabetStateHash(state []byte) common.Hash {
return h return h
} }
// IndexToBytes converts an index to a byte slice big endian
func IndexToBytes(i uint64) []byte {
big := new(big.Int)
big.SetUint64(i)
out := make([]byte, 32)
return big.FillBytes(out)
}
// LetterToBytes converts a letter to a 32 byte array // LetterToBytes converts a letter to a 32 byte array
func LetterToBytes(letter string) []byte { func LetterToBytes(letter string) []byte {
out := make([]byte, 32) out := make([]byte, 32)
......
...@@ -10,7 +10,7 @@ import ( ...@@ -10,7 +10,7 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
func alphabetClaim(index uint64, letter string) common.Hash { func alphabetClaim(index *big.Int, letter string) common.Hash {
return alphabetStateHash(BuildAlphabetPreimage(index, letter)) return alphabetStateHash(BuildAlphabetPreimage(index, letter))
} }
...@@ -26,16 +26,16 @@ func TestAlphabetProvider_Get_ClaimsByTraceIndex(t *testing.T) { ...@@ -26,16 +26,16 @@ func TestAlphabetProvider_Get_ClaimsByTraceIndex(t *testing.T) {
expectedHash common.Hash expectedHash common.Hash
}{ }{
{ {
types.NewPosition(depth, 7), types.NewPosition(depth, big.NewInt(7)),
alphabetClaim(7, "h"), alphabetClaim(big.NewInt(7), "h"),
}, },
{ {
types.NewPosition(depth, 3), types.NewPosition(depth, big.NewInt(3)),
alphabetClaim(3, "d"), alphabetClaim(big.NewInt(3), "d"),
}, },
{ {
types.NewPosition(depth, 5), types.NewPosition(depth, big.NewInt(5)),
alphabetClaim(5, "f"), alphabetClaim(big.NewInt(5), "f"),
}, },
} }
...@@ -47,23 +47,13 @@ func TestAlphabetProvider_Get_ClaimsByTraceIndex(t *testing.T) { ...@@ -47,23 +47,13 @@ func TestAlphabetProvider_Get_ClaimsByTraceIndex(t *testing.T) {
} }
} }
// FuzzIndexToBytes tests the IndexToBytes function.
func FuzzIndexToBytes(f *testing.F) {
f.Fuzz(func(t *testing.T, index uint64) {
translated := IndexToBytes(index)
original := new(big.Int)
original.SetBytes(translated)
require.Equal(t, original.Uint64(), index)
})
}
// TestGetPreimage_Succeeds tests the GetPreimage function // TestGetPreimage_Succeeds tests the GetPreimage function
// returns the correct pre-image for a index. // returns the correct pre-image for a index.
func TestGetStepData_Succeeds(t *testing.T) { func TestGetStepData_Succeeds(t *testing.T) {
depth := 2 depth := 2
ap := NewTraceProvider("abc", uint64(depth)) ap := NewTraceProvider("abc", uint64(depth))
expected := BuildAlphabetPreimage(0, "a") expected := BuildAlphabetPreimage(big.NewInt(0), "a")
pos := types.NewPosition(depth, 1) pos := types.NewPosition(depth, big.NewInt(1))
retrieved, proof, data, err := ap.GetStepData(context.Background(), pos) retrieved, proof, data, err := ap.GetStepData(context.Background(), pos)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, expected, retrieved) require.Equal(t, expected, retrieved)
...@@ -76,7 +66,7 @@ func TestGetStepData_Succeeds(t *testing.T) { ...@@ -76,7 +66,7 @@ func TestGetStepData_Succeeds(t *testing.T) {
func TestGetStepData_TooLargeIndex_Fails(t *testing.T) { func TestGetStepData_TooLargeIndex_Fails(t *testing.T) {
depth := 2 depth := 2
ap := NewTraceProvider("abc", uint64(depth)) ap := NewTraceProvider("abc", uint64(depth))
pos := types.NewPosition(depth, 5) pos := types.NewPosition(depth, big.NewInt(5))
_, _, _, err := ap.GetStepData(context.Background(), pos) _, _, _, err := ap.GetStepData(context.Background(), pos)
require.ErrorIs(t, err, ErrIndexTooLarge) require.ErrorIs(t, err, ErrIndexTooLarge)
} }
...@@ -85,10 +75,10 @@ func TestGetStepData_TooLargeIndex_Fails(t *testing.T) { ...@@ -85,10 +75,10 @@ func TestGetStepData_TooLargeIndex_Fails(t *testing.T) {
func TestGet_Succeeds(t *testing.T) { func TestGet_Succeeds(t *testing.T) {
depth := 2 depth := 2
ap := NewTraceProvider("abc", uint64(depth)) ap := NewTraceProvider("abc", uint64(depth))
pos := types.NewPosition(depth, 0) pos := types.NewPosition(depth, big.NewInt(0))
claim, err := ap.Get(context.Background(), pos) claim, err := ap.Get(context.Background(), pos)
require.NoError(t, err) require.NoError(t, err)
expected := alphabetClaim(0, "a") expected := alphabetClaim(big.NewInt(0), "a")
require.Equal(t, expected, claim) require.Equal(t, expected, claim)
} }
...@@ -97,7 +87,7 @@ func TestGet_Succeeds(t *testing.T) { ...@@ -97,7 +87,7 @@ func TestGet_Succeeds(t *testing.T) {
func TestGet_IndexTooLarge(t *testing.T) { func TestGet_IndexTooLarge(t *testing.T) {
depth := 2 depth := 2
ap := NewTraceProvider("abc", uint64(depth)) ap := NewTraceProvider("abc", uint64(depth))
pos := types.NewPosition(depth, 4) pos := types.NewPosition(depth, big.NewInt(4))
_, err := ap.Get(context.Background(), pos) _, err := ap.Get(context.Background(), pos)
require.ErrorIs(t, err, ErrIndexTooLarge) require.ErrorIs(t, err, ErrIndexTooLarge)
} }
...@@ -107,9 +97,9 @@ func TestGet_IndexTooLarge(t *testing.T) { ...@@ -107,9 +97,9 @@ func TestGet_IndexTooLarge(t *testing.T) {
func TestGet_Extends(t *testing.T) { func TestGet_Extends(t *testing.T) {
depth := 2 depth := 2
ap := NewTraceProvider("abc", uint64(depth)) ap := NewTraceProvider("abc", uint64(depth))
pos := types.NewPosition(depth, 3) pos := types.NewPosition(depth, big.NewInt(3))
claim, err := ap.Get(context.Background(), pos) claim, err := ap.Get(context.Background(), pos)
require.NoError(t, err) require.NoError(t, err)
expected := alphabetClaim(2, "c") expected := alphabetClaim(big.NewInt(2), "c")
require.Equal(t, expected, claim) require.Equal(t, expected, claim)
} }
...@@ -88,7 +88,7 @@ func (p *CannonTraceProvider) SetMaxDepth(gameDepth uint64) { ...@@ -88,7 +88,7 @@ func (p *CannonTraceProvider) SetMaxDepth(gameDepth uint64) {
} }
func (p *CannonTraceProvider) Get(ctx context.Context, pos types.Position) (common.Hash, error) { func (p *CannonTraceProvider) Get(ctx context.Context, pos types.Position) (common.Hash, error) {
proof, err := p.loadProof(ctx, pos.TraceIndex(int(p.gameDepth))) proof, err := p.loadProof(ctx, pos.UnsafeTraceIndex(int(p.gameDepth)))
if err != nil { if err != nil {
return common.Hash{}, err return common.Hash{}, err
} }
...@@ -101,7 +101,7 @@ func (p *CannonTraceProvider) Get(ctx context.Context, pos types.Position) (comm ...@@ -101,7 +101,7 @@ func (p *CannonTraceProvider) Get(ctx context.Context, pos types.Position) (comm
} }
func (p *CannonTraceProvider) GetStepData(ctx context.Context, pos types.Position) ([]byte, []byte, *types.PreimageOracleData, error) { func (p *CannonTraceProvider) GetStepData(ctx context.Context, pos types.Position) ([]byte, []byte, *types.PreimageOracleData, error) {
proof, err := p.loadProof(ctx, pos.TraceIndex(int(p.gameDepth))) proof, err := p.loadProof(ctx, pos.UnsafeTraceIndex(int(p.gameDepth)))
if err != nil { if err != nil {
return nil, nil, nil, err return nil, nil, nil, err
} }
......
...@@ -6,6 +6,7 @@ import ( ...@@ -6,6 +6,7 @@ import (
_ "embed" _ "embed"
"encoding/json" "encoding/json"
"fmt" "fmt"
"math/big"
"os" "os"
"path/filepath" "path/filepath"
"testing" "testing"
...@@ -22,7 +23,7 @@ import ( ...@@ -22,7 +23,7 @@ import (
//go:embed test_data //go:embed test_data
var testData embed.FS var testData embed.FS
func PositionFromTraceIndex(provider *CannonTraceProvider, idx int) types.Position { func PositionFromTraceIndex(provider *CannonTraceProvider, idx *big.Int) types.Position {
return types.NewPosition(int(provider.gameDepth), idx) return types.NewPosition(int(provider.gameDepth), idx)
} }
...@@ -30,7 +31,7 @@ func TestGet(t *testing.T) { ...@@ -30,7 +31,7 @@ func TestGet(t *testing.T) {
dataDir, prestate := setupTestData(t) dataDir, prestate := setupTestData(t)
t.Run("ExistingProof", func(t *testing.T) { t.Run("ExistingProof", func(t *testing.T) {
provider, generator := setupWithTestData(t, dataDir, prestate) provider, generator := setupWithTestData(t, dataDir, prestate)
value, err := provider.Get(context.Background(), PositionFromTraceIndex(provider, 0)) value, err := provider.Get(context.Background(), PositionFromTraceIndex(provider, common.Big0))
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, common.HexToHash("0x45fd9aa59768331c726e719e76aa343e73123af888804604785ae19506e65e87"), value) require.Equal(t, common.HexToHash("0x45fd9aa59768331c726e719e76aa343e73123af888804604785ae19506e65e87"), value)
require.Empty(t, generator.generated) require.Empty(t, generator.generated)
...@@ -43,7 +44,7 @@ func TestGet(t *testing.T) { ...@@ -43,7 +44,7 @@ func TestGet(t *testing.T) {
Step: 10, Step: 10,
Exited: true, Exited: true,
} }
value, err := provider.Get(context.Background(), PositionFromTraceIndex(provider, 7000)) value, err := provider.Get(context.Background(), PositionFromTraceIndex(provider, big.NewInt(7000)))
require.NoError(t, err) require.NoError(t, err)
require.Contains(t, generator.generated, 7000, "should have tried to generate the proof") require.Contains(t, generator.generated, 7000, "should have tried to generate the proof")
stateHash, err := generator.finalState.EncodeWitness().StateHash() stateHash, err := generator.finalState.EncodeWitness().StateHash()
...@@ -53,14 +54,14 @@ func TestGet(t *testing.T) { ...@@ -53,14 +54,14 @@ func TestGet(t *testing.T) {
t.Run("MissingPostHash", func(t *testing.T) { t.Run("MissingPostHash", func(t *testing.T) {
provider, generator := setupWithTestData(t, dataDir, prestate) provider, generator := setupWithTestData(t, dataDir, prestate)
_, err := provider.Get(context.Background(), PositionFromTraceIndex(provider, 1)) _, err := provider.Get(context.Background(), PositionFromTraceIndex(provider, big.NewInt(1)))
require.ErrorContains(t, err, "missing post hash") require.ErrorContains(t, err, "missing post hash")
require.Empty(t, generator.generated) require.Empty(t, generator.generated)
}) })
t.Run("IgnoreUnknownFields", func(t *testing.T) { t.Run("IgnoreUnknownFields", func(t *testing.T) {
provider, generator := setupWithTestData(t, dataDir, prestate) provider, generator := setupWithTestData(t, dataDir, prestate)
value, err := provider.Get(context.Background(), PositionFromTraceIndex(provider, 2)) value, err := provider.Get(context.Background(), PositionFromTraceIndex(provider, big.NewInt(2)))
require.NoError(t, err) require.NoError(t, err)
expected := common.HexToHash("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb") expected := common.HexToHash("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb")
require.Equal(t, expected, value) require.Equal(t, expected, value)
...@@ -72,7 +73,7 @@ func TestGetStepData(t *testing.T) { ...@@ -72,7 +73,7 @@ func TestGetStepData(t *testing.T) {
t.Run("ExistingProof", func(t *testing.T) { t.Run("ExistingProof", func(t *testing.T) {
dataDir, prestate := setupTestData(t) dataDir, prestate := setupTestData(t)
provider, generator := setupWithTestData(t, dataDir, prestate) provider, generator := setupWithTestData(t, dataDir, prestate)
value, proof, data, err := provider.GetStepData(context.Background(), PositionFromTraceIndex(provider, 0)) value, proof, data, err := provider.GetStepData(context.Background(), PositionFromTraceIndex(provider, new(big.Int)))
require.NoError(t, err) require.NoError(t, err)
expected := common.Hex2Bytes("b8f068de604c85ea0e2acd437cdb47add074a2d70b81d018390c504b71fe26f400000000000000000000000000000000000000000000000000000000000000000000000000") expected := common.Hex2Bytes("b8f068de604c85ea0e2acd437cdb47add074a2d70b81d018390c504b71fe26f400000000000000000000000000000000000000000000000000000000000000000000000000")
require.Equal(t, expected, value) require.Equal(t, expected, value)
...@@ -99,7 +100,7 @@ func TestGetStepData(t *testing.T) { ...@@ -99,7 +100,7 @@ func TestGetStepData(t *testing.T) {
OracleValue: []byte{0xdd}, OracleValue: []byte{0xdd},
OracleOffset: 10, OracleOffset: 10,
} }
preimage, proof, data, err := provider.GetStepData(context.Background(), PositionFromTraceIndex(provider, 4)) preimage, proof, data, err := provider.GetStepData(context.Background(), PositionFromTraceIndex(provider, big.NewInt(4)))
require.NoError(t, err) require.NoError(t, err)
require.Contains(t, generator.generated, 4, "should have tried to generate the proof") require.Contains(t, generator.generated, 4, "should have tried to generate the proof")
...@@ -125,7 +126,7 @@ func TestGetStepData(t *testing.T) { ...@@ -125,7 +126,7 @@ func TestGetStepData(t *testing.T) {
OracleValue: []byte{0xdd}, OracleValue: []byte{0xdd},
OracleOffset: 10, OracleOffset: 10,
} }
preimage, proof, data, err := provider.GetStepData(context.Background(), PositionFromTraceIndex(provider, 7000)) preimage, proof, data, err := provider.GetStepData(context.Background(), PositionFromTraceIndex(provider, big.NewInt(7000)))
require.NoError(t, err) require.NoError(t, err)
require.Contains(t, generator.generated, 7000, "should have tried to generate the proof") require.Contains(t, generator.generated, 7000, "should have tried to generate the proof")
...@@ -151,7 +152,7 @@ func TestGetStepData(t *testing.T) { ...@@ -151,7 +152,7 @@ func TestGetStepData(t *testing.T) {
OracleValue: []byte{0xdd}, OracleValue: []byte{0xdd},
OracleOffset: 10, OracleOffset: 10,
} }
_, _, _, err := provider.GetStepData(context.Background(), PositionFromTraceIndex(provider, 7000)) _, _, _, err := provider.GetStepData(context.Background(), PositionFromTraceIndex(provider, big.NewInt(7000)))
require.NoError(t, err) require.NoError(t, err)
require.Contains(t, initGenerator.generated, 7000, "should have tried to generate the proof") require.Contains(t, initGenerator.generated, 7000, "should have tried to generate the proof")
...@@ -166,7 +167,7 @@ func TestGetStepData(t *testing.T) { ...@@ -166,7 +167,7 @@ func TestGetStepData(t *testing.T) {
StateData: []byte{0xbb}, StateData: []byte{0xbb},
ProofData: []byte{0xcc}, ProofData: []byte{0xcc},
} }
preimage, proof, data, err := provider.GetStepData(context.Background(), PositionFromTraceIndex(provider, 7000)) preimage, proof, data, err := provider.GetStepData(context.Background(), PositionFromTraceIndex(provider, big.NewInt(7000)))
require.NoError(t, err) require.NoError(t, err)
require.Empty(t, generator.generated, "should not have to generate the proof again") require.Empty(t, generator.generated, "should not have to generate the proof again")
...@@ -178,7 +179,7 @@ func TestGetStepData(t *testing.T) { ...@@ -178,7 +179,7 @@ func TestGetStepData(t *testing.T) {
t.Run("MissingStateData", func(t *testing.T) { t.Run("MissingStateData", func(t *testing.T) {
dataDir, prestate := setupTestData(t) dataDir, prestate := setupTestData(t)
provider, generator := setupWithTestData(t, dataDir, prestate) provider, generator := setupWithTestData(t, dataDir, prestate)
_, _, _, err := provider.GetStepData(context.Background(), PositionFromTraceIndex(provider, 1)) _, _, _, err := provider.GetStepData(context.Background(), PositionFromTraceIndex(provider, big.NewInt(1)))
require.ErrorContains(t, err, "missing state data") require.ErrorContains(t, err, "missing state data")
require.Empty(t, generator.generated) require.Empty(t, generator.generated)
}) })
...@@ -186,7 +187,7 @@ func TestGetStepData(t *testing.T) { ...@@ -186,7 +187,7 @@ func TestGetStepData(t *testing.T) {
t.Run("IgnoreUnknownFields", func(t *testing.T) { t.Run("IgnoreUnknownFields", func(t *testing.T) {
dataDir, prestate := setupTestData(t) dataDir, prestate := setupTestData(t)
provider, generator := setupWithTestData(t, dataDir, prestate) provider, generator := setupWithTestData(t, dataDir, prestate)
value, proof, data, err := provider.GetStepData(context.Background(), PositionFromTraceIndex(provider, 2)) value, proof, data, err := provider.GetStepData(context.Background(), PositionFromTraceIndex(provider, big.NewInt(2)))
require.NoError(t, err) require.NoError(t, err)
expected := common.Hex2Bytes("cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc") expected := common.Hex2Bytes("cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc")
require.Equal(t, expected, value) require.Equal(t, expected, value)
......
...@@ -3,6 +3,7 @@ package outputs ...@@ -3,6 +3,7 @@ package outputs
import ( import (
"context" "context"
"fmt" "fmt"
"math"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/types" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/types"
"github.com/ethereum-optimism/optimism/op-service/client" "github.com/ethereum-optimism/optimism/op-service/client"
...@@ -52,7 +53,11 @@ func NewTraceProviderFromInputs(logger log.Logger, rollupClient OutputRollupClie ...@@ -52,7 +53,11 @@ func NewTraceProviderFromInputs(logger log.Logger, rollupClient OutputRollupClie
} }
func (o *OutputTraceProvider) Get(ctx context.Context, pos types.Position) (common.Hash, error) { func (o *OutputTraceProvider) Get(ctx context.Context, pos types.Position) (common.Hash, error) {
outputBlock := pos.TraceIndex(int(o.gameDepth)) + o.prestateBlock + 1 traceIndex := pos.TraceIndex(int(o.gameDepth))
if traceIndex.Cmp(common.Big0.SetUint64(math.MaxUint64)) > 0 {
return common.Hash{}, fmt.Errorf("trace index %v is greater than max uint64", traceIndex)
}
outputBlock := traceIndex.Uint64() + o.prestateBlock + 1
if outputBlock > o.poststateBlock { if outputBlock > o.poststateBlock {
outputBlock = o.poststateBlock outputBlock = o.poststateBlock
} }
......
...@@ -3,6 +3,7 @@ package outputs ...@@ -3,6 +3,7 @@ package outputs
import ( import (
"context" "context"
"fmt" "fmt"
"math/big"
"testing" "testing"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/types" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/types"
...@@ -26,13 +27,21 @@ var ( ...@@ -26,13 +27,21 @@ var (
func TestGet(t *testing.T) { func TestGet(t *testing.T) {
t.Run("PrePrestateErrors", func(t *testing.T) { t.Run("PrePrestateErrors", func(t *testing.T) {
provider, _ := setupWithTestData(t, 0, poststateBlock) provider, _ := setupWithTestData(t, 0, poststateBlock)
_, err := provider.Get(context.Background(), types.NewPosition(1, 0)) _, err := provider.Get(context.Background(), types.NewPosition(1, common.Big0))
require.ErrorAs(t, fmt.Errorf("no output at block %d", 1), &err) require.ErrorAs(t, fmt.Errorf("no output at block %d", 1), &err)
}) })
t.Run("ErrorsTraceIndexOutOfBounds", func(t *testing.T) {
deepGame := uint64(64)
provider, _ := setupWithTestData(t, prestateBlock, poststateBlock, deepGame)
pos := types.NewPosition(0, big.NewInt(0))
_, err := provider.Get(context.Background(), pos)
require.ErrorAs(t, fmt.Errorf("trace index %v is greater than max uint64", pos.TraceIndex(int(deepGame))), &err)
})
t.Run("MisconfiguredPoststateErrors", func(t *testing.T) { t.Run("MisconfiguredPoststateErrors", func(t *testing.T) {
provider, _ := setupWithTestData(t, 0, 0) provider, _ := setupWithTestData(t, 0, 0)
_, err := provider.Get(context.Background(), types.NewPosition(1, 0)) _, err := provider.Get(context.Background(), types.NewPosition(1, common.Big0))
require.ErrorAs(t, fmt.Errorf("no output at block %d", 0), &err) require.ErrorAs(t, fmt.Errorf("no output at block %d", 0), &err)
}) })
...@@ -82,7 +91,7 @@ func TestAbsolutePreStateCommitment(t *testing.T) { ...@@ -82,7 +91,7 @@ func TestAbsolutePreStateCommitment(t *testing.T) {
func TestGetStepData(t *testing.T) { func TestGetStepData(t *testing.T) {
provider, _ := setupWithTestData(t, prestateBlock, poststateBlock) provider, _ := setupWithTestData(t, prestateBlock, poststateBlock)
_, _, _, err := provider.GetStepData(context.Background(), types.NewPosition(1, 0)) _, _, _, err := provider.GetStepData(context.Background(), types.NewPosition(1, common.Big0))
require.ErrorIs(t, err, GetStepDataErr) require.ErrorIs(t, err, GetStepDataErr)
} }
...@@ -92,7 +101,7 @@ func TestAbsolutePreState(t *testing.T) { ...@@ -92,7 +101,7 @@ func TestAbsolutePreState(t *testing.T) {
require.ErrorIs(t, err, AbsolutePreStateErr) require.ErrorIs(t, err, AbsolutePreStateErr)
} }
func setupWithTestData(t *testing.T, prestateBlock, poststateBlock uint64) (*OutputTraceProvider, *stubRollupClient) { func setupWithTestData(t *testing.T, prestateBlock, poststateBlock uint64, customGameDepth ...uint64) (*OutputTraceProvider, *stubRollupClient) {
rollupClient := stubRollupClient{ rollupClient := stubRollupClient{
outputs: map[uint64]*eth.OutputResponse{ outputs: map[uint64]*eth.OutputResponse{
prestateBlock: { prestateBlock: {
...@@ -106,12 +115,16 @@ func setupWithTestData(t *testing.T, prestateBlock, poststateBlock uint64) (*Out ...@@ -106,12 +115,16 @@ func setupWithTestData(t *testing.T, prestateBlock, poststateBlock uint64) (*Out
}, },
}, },
} }
inputGameDepth := gameDepth
if len(customGameDepth) > 0 {
inputGameDepth = customGameDepth[0]
}
return &OutputTraceProvider{ return &OutputTraceProvider{
logger: testlog.Logger(t, log.LvlInfo), logger: testlog.Logger(t, log.LvlInfo),
rollupClient: &rollupClient, rollupClient: &rollupClient,
prestateBlock: prestateBlock, prestateBlock: prestateBlock,
poststateBlock: poststateBlock, poststateBlock: poststateBlock,
gameDepth: gameDepth, gameDepth: inputGameDepth,
}, &rollupClient }, &rollupClient
} }
......
package split
import (
"context"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/types"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
)
var _ types.TraceProvider = (*SplitTraceProvider)(nil)
// SplitTraceProvider is a [types.TraceProvider] implementation that
// routes requests to the correct internal trace provider based on the
// depth of the requested trace.
type SplitTraceProvider struct {
logger log.Logger
topProvider types.TraceProvider
bottomProvider types.TraceProvider
topDepth uint64
}
// NewTraceProvider creates a new [SplitTraceProvider] instance.
// The [topDepth] parameter specifies the depth at which the internal
// [types.TraceProvider] should be switched.
func NewTraceProvider(logger log.Logger, topProvider types.TraceProvider, bottomProvider types.TraceProvider, topDepth uint64) *SplitTraceProvider {
return &SplitTraceProvider{
logger: logger,
topProvider: topProvider,
bottomProvider: bottomProvider,
topDepth: topDepth,
}
}
func (s *SplitTraceProvider) providerForDepth(depth uint64) (uint64, types.TraceProvider) {
if depth <= s.topDepth {
return 0, s.topProvider
}
return s.topDepth, s.bottomProvider
}
// Get routes the Get request to the internal [types.TraceProvider] that
// that serves the trace index at the depth.
func (s *SplitTraceProvider) Get(ctx context.Context, pos types.Position) (common.Hash, error) {
ancestorDepth, provider := s.providerForDepth(uint64(pos.Depth()))
relativePosition, err := pos.RelativeToAncestorAtDepth(ancestorDepth)
if err != nil {
return common.Hash{}, err
}
return provider.Get(ctx, relativePosition)
}
// AbsolutePreStateCommitment returns the absolute prestate from the lowest internal [types.TraceProvider]
func (s *SplitTraceProvider) AbsolutePreStateCommitment(ctx context.Context) (hash common.Hash, err error) {
return s.bottomProvider.AbsolutePreStateCommitment(ctx)
}
// AbsolutePreState routes the AbsolutePreState request to the lowest internal [types.TraceProvider].
func (s *SplitTraceProvider) AbsolutePreState(ctx context.Context) (preimage []byte, err error) {
return s.bottomProvider.AbsolutePreState(ctx)
}
// GetStepData routes the GetStepData request to the lowest internal [types.TraceProvider].
func (s *SplitTraceProvider) GetStepData(ctx context.Context, pos types.Position) (prestate []byte, proofData []byte, preimageData *types.PreimageOracleData, err error) {
ancestorDepth, provider := s.providerForDepth(uint64(pos.Depth()))
relativePosition, err := pos.RelativeToAncestorAtDepth(ancestorDepth)
if err != nil {
return nil, nil, nil, err
}
return provider.GetStepData(ctx, relativePosition)
}
package split
import (
"context"
"errors"
"math/big"
"testing"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/types"
"github.com/ethereum-optimism/optimism/op-service/testlog"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require"
)
var (
mockGetError = errors.New("mock get error")
mockOutput = common.HexToHash("0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
mockCommitment = common.HexToHash("0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb")
)
func TestGet(t *testing.T) {
t.Run("ErrorBubblesUp", func(t *testing.T) {
mockOutputProvider := mockTraceProvider{getError: mockGetError}
splitProvider := newSplitTraceProvider(t, &mockOutputProvider, nil, 40)
_, err := splitProvider.Get(context.Background(), types.NewPosition(1, common.Big0))
require.ErrorIs(t, err, mockGetError)
})
t.Run("ReturnsCorrectOutputFromTopProvider", func(t *testing.T) {
mockOutputProvider := mockTraceProvider{getOutput: mockOutput}
splitProvider := newSplitTraceProvider(t, &mockOutputProvider, &mockTraceProvider{}, 40)
output, err := splitProvider.Get(context.Background(), types.NewPosition(6, big.NewInt(3)))
require.NoError(t, err)
expectedGIndex := types.NewPosition(6, big.NewInt(3)).ToGIndex()
require.Equal(t, common.BigToHash(expectedGIndex), output)
})
t.Run("ReturnsCorrectOutputWithMultipleProviders", func(t *testing.T) {
bottomProvider := mockTraceProvider{getOutput: mockOutput}
splitProvider := newSplitTraceProvider(t, &mockTraceProvider{}, &bottomProvider, 40)
output, err := splitProvider.Get(context.Background(), types.NewPosition(42, big.NewInt(17)))
require.NoError(t, err)
expectedGIndex := types.NewPosition(2, big.NewInt(1)).ToGIndex()
require.Equal(t, common.BigToHash(expectedGIndex), output)
})
}
func TestAbsolutePreStateCommitment(t *testing.T) {
t.Run("ErrorBubblesUp", func(t *testing.T) {
mockOutputProvider := mockTraceProvider{absolutePreStateCommitmentError: mockGetError}
splitProvider := newSplitTraceProvider(t, nil, &mockOutputProvider, 40)
_, err := splitProvider.AbsolutePreStateCommitment(context.Background())
require.ErrorIs(t, err, mockGetError)
})
t.Run("ReturnsCorrectOutput", func(t *testing.T) {
mockOutputProvider := mockTraceProvider{absolutePreStateCommitment: mockCommitment}
splitProvider := newSplitTraceProvider(t, nil, &mockOutputProvider, 40)
output, err := splitProvider.AbsolutePreStateCommitment(context.Background())
require.NoError(t, err)
require.Equal(t, mockCommitment, output)
})
}
func TestAbsolutePreState(t *testing.T) {
t.Run("ErrorBubblesUp", func(t *testing.T) {
mockOutputProvider := mockTraceProvider{absolutePreStateError: mockGetError}
splitProvider := newSplitTraceProvider(t, nil, &mockOutputProvider, 40)
_, err := splitProvider.AbsolutePreState(context.Background())
require.ErrorIs(t, err, mockGetError)
})
t.Run("ReturnsCorrectPreimageData", func(t *testing.T) {
expectedPreimage := []byte{1, 2, 3, 4}
mockOutputProvider := mockTraceProvider{preImageData: expectedPreimage}
splitProvider := newSplitTraceProvider(t, nil, &mockOutputProvider, 40)
output, err := splitProvider.AbsolutePreState(context.Background())
require.NoError(t, err)
require.Equal(t, expectedPreimage, output)
})
}
func TestGetStepData(t *testing.T) {
t.Run("ErrorBubblesUp", func(t *testing.T) {
mockOutputProvider := mockTraceProvider{getStepDataError: mockGetError}
splitProvider := newSplitTraceProvider(t, &mockOutputProvider, nil, 40)
_, _, _, err := splitProvider.GetStepData(context.Background(), types.NewPosition(0, common.Big0))
require.ErrorIs(t, err, mockGetError)
})
t.Run("ReturnsCorrectStepData", func(t *testing.T) {
expectedStepData := []byte{1, 2, 3, 4}
mockOutputProvider := mockTraceProvider{stepPrestateData: expectedStepData}
splitProvider := newSplitTraceProvider(t, nil, &mockOutputProvider, 40)
output, _, _, err := splitProvider.GetStepData(context.Background(), types.NewPosition(41, common.Big0))
require.NoError(t, err)
require.Equal(t, expectedStepData, output)
})
}
type mockTraceProvider struct {
getOutput common.Hash
getError error
absolutePreStateCommitmentError error
absolutePreStateCommitment common.Hash
absolutePreStateError error
preImageData []byte
getStepDataError error
stepPrestateData []byte
}
func newSplitTraceProvider(t *testing.T, tp *mockTraceProvider, bp *mockTraceProvider, topDepth uint64) SplitTraceProvider {
return SplitTraceProvider{
logger: testlog.Logger(t, log.LvlInfo),
topProvider: tp,
bottomProvider: bp,
topDepth: topDepth,
}
}
func (m *mockTraceProvider) Get(ctx context.Context, pos types.Position) (common.Hash, error) {
if m.getError != nil {
return common.Hash{}, m.getError
}
return common.BigToHash(pos.ToGIndex()), nil
}
func (m *mockTraceProvider) AbsolutePreStateCommitment(ctx context.Context) (hash common.Hash, err error) {
if m.absolutePreStateCommitmentError != nil {
return common.Hash{}, m.absolutePreStateCommitmentError
}
return m.absolutePreStateCommitment, nil
}
func (m *mockTraceProvider) AbsolutePreState(ctx context.Context) (preimage []byte, err error) {
if m.absolutePreStateError != nil {
return []byte{}, m.absolutePreStateError
}
return m.preImageData, nil
}
func (m *mockTraceProvider) GetStepData(ctx context.Context, pos types.Position) ([]byte, []byte, *types.PreimageOracleData, error) {
if m.getStepDataError != nil {
return nil, nil, nil, m.getStepDataError
}
return m.stepPrestateData, nil, nil, nil
}
...@@ -2,6 +2,10 @@ package types ...@@ -2,6 +2,10 @@ package types
import ( import (
"errors" "errors"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
) )
var ( var (
...@@ -14,12 +18,6 @@ var ( ...@@ -14,12 +18,6 @@ var (
// Game is an interface that represents the state of a dispute game. // Game is an interface that represents the state of a dispute game.
type Game interface { type Game interface {
// Put adds a claim into the game state.
Put(claim Claim) error
// PutAll adds a list of claims into the game state.
PutAll(claims []Claim) error
// Claims returns all of the claims in the game. // Claims returns all of the claims in the game.
Claims() []Claim Claims() []Claim
...@@ -36,45 +34,37 @@ type Game interface { ...@@ -36,45 +34,37 @@ type Game interface {
MaxDepth() uint64 MaxDepth() uint64
} }
type claimEntry struct { type claimID common.Hash
ClaimData
ParentContractIndex int
}
type extendedClaim struct { func computeClaimID(claim Claim) claimID {
self Claim return claimID(crypto.Keccak256Hash(
children []claimEntry claim.Position.ToGIndex().Bytes(),
claim.Value.Bytes(),
big.NewInt(int64(claim.ParentContractIndex)).Bytes(),
))
} }
// gameState is a struct that represents the state of a dispute game. // gameState is a struct that represents the state of a dispute game.
// The game state implements the [Game] interface. // The game state implements the [Game] interface.
type gameState struct { type gameState struct {
agreeWithProposedOutput bool agreeWithProposedOutput bool
root claimEntry // claims is the list of claims in the same order as the contract
// contractIndicies maps a contract index to it's extended claim. claims []Claim
// This is used to perform O(1) parent lookups. claimIDs map[claimID]bool
contractIndicies map[int]*extendedClaim depth uint64
// claims maps a claim entry to it's extended claim.
claims map[claimEntry]*extendedClaim
depth uint64
} }
// NewGameState returns a new game state. // NewGameState returns a new game state.
// The provided [Claim] is used as the root node. // The provided [Claim] is used as the root node.
func NewGameState(agreeWithProposedOutput bool, root Claim, depth uint64) *gameState { func NewGameState(agreeWithProposedOutput bool, claims []Claim, depth uint64) *gameState {
claims := make(map[claimEntry]*extendedClaim) claimIDs := make(map[claimID]bool)
parents := make(map[int]*extendedClaim) for _, claim := range claims {
rootClaimEntry := makeClaimEntry(root) claimIDs[computeClaimID(claim)] = true
claims[rootClaimEntry] = &extendedClaim{
self: root,
children: make([]claimEntry, 0),
} }
parents[root.ContractIndex] = claims[rootClaimEntry]
return &gameState{ return &gameState{
agreeWithProposedOutput: agreeWithProposedOutput, agreeWithProposedOutput: agreeWithProposedOutput,
root: rootClaimEntry,
claims: claims, claims: claims,
contractIndicies: parents, claimIDs: claimIDs,
depth: depth, depth: depth,
} }
} }
...@@ -91,83 +81,34 @@ func (g *gameState) AgreeWithClaimLevel(claim Claim) bool { ...@@ -91,83 +81,34 @@ func (g *gameState) AgreeWithClaimLevel(claim Claim) bool {
} }
} }
// PutAll adds a list of claims into the [Game] state.
// If any of the claims already exist in the game state, an error is returned.
func (g *gameState) PutAll(claims []Claim) error {
for _, claim := range claims {
if err := g.Put(claim); err != nil {
return err
}
}
return nil
}
// Put adds a claim into the game state.
func (g *gameState) Put(claim Claim) error {
if claim.IsRoot() || g.IsDuplicate(claim) {
return ErrClaimExists
}
parent := g.getParent(claim)
if parent == nil {
return errors.New("no parent claim")
}
parent.children = append(parent.children, makeClaimEntry(claim))
claimWithExtension := &extendedClaim{
self: claim,
children: make([]claimEntry, 0),
}
g.claims[makeClaimEntry(claim)] = claimWithExtension
g.contractIndicies[claim.ContractIndex] = claimWithExtension
return nil
}
func (g *gameState) IsDuplicate(claim Claim) bool { func (g *gameState) IsDuplicate(claim Claim) bool {
_, ok := g.claims[makeClaimEntry(claim)] return g.claimIDs[computeClaimID(claim)]
return ok
} }
func (g *gameState) Claims() []Claim { func (g *gameState) Claims() []Claim {
queue := []claimEntry{g.root} // Defensively copy to avoid modifications to the underlying array.
var out []Claim return append([]Claim(nil), g.claims...)
for len(queue) > 0 {
item := queue[0]
queue = queue[1:]
queue = append(queue, g.getChildren(item)...)
out = append(out, g.claims[item].self)
}
return out
} }
func (g *gameState) MaxDepth() uint64 { func (g *gameState) MaxDepth() uint64 {
return g.depth return g.depth
} }
func (g *gameState) getChildren(c claimEntry) []claimEntry {
return g.claims[c].children
}
func (g *gameState) GetParent(claim Claim) (Claim, error) { func (g *gameState) GetParent(claim Claim) (Claim, error) {
parent := g.getParent(claim) parent := g.getParent(claim)
if parent == nil { if parent == nil {
return Claim{}, ErrClaimNotFound return Claim{}, ErrClaimNotFound
} }
return parent.self, nil return *parent, nil
} }
func (g *gameState) getParent(claim Claim) *extendedClaim { func (g *gameState) getParent(claim Claim) *Claim {
if claim.IsRoot() { if claim.IsRoot() {
return nil return nil
} }
if parent, ok := g.contractIndicies[claim.ParentContractIndex]; ok { if claim.ParentContractIndex >= len(g.claims) || claim.ParentContractIndex < 0 {
return parent return nil
}
return nil
}
func makeClaimEntry(claim Claim) claimEntry {
return claimEntry{
ClaimData: claim.ClaimData,
ParentContractIndex: claim.ParentContractIndex,
} }
parent := g.claims[claim.ParentContractIndex]
return &parent
} }
package types package types
import ( import (
"math/big"
"testing" "testing"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
...@@ -15,14 +16,14 @@ func createTestClaims() (Claim, Claim, Claim, Claim) { ...@@ -15,14 +16,14 @@ func createTestClaims() (Claim, Claim, Claim, Claim) {
root := Claim{ root := Claim{
ClaimData: ClaimData{ ClaimData: ClaimData{
Value: common.HexToHash("0x000000000000000000000000000000000000000000000000000000000000077a"), Value: common.HexToHash("0x000000000000000000000000000000000000000000000000000000000000077a"),
Position: NewPosition(0, 0), Position: NewPosition(0, common.Big0),
}, },
// Root claim has no parent // Root claim has no parent
} }
top := Claim{ top := Claim{
ClaimData: ClaimData{ ClaimData: ClaimData{
Value: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000364"), Value: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000364"),
Position: NewPosition(1, 0), Position: NewPosition(1, common.Big0),
}, },
Parent: root.ClaimData, Parent: root.ClaimData,
ContractIndex: 1, ContractIndex: 1,
...@@ -31,7 +32,7 @@ func createTestClaims() (Claim, Claim, Claim, Claim) { ...@@ -31,7 +32,7 @@ func createTestClaims() (Claim, Claim, Claim, Claim) {
middle := Claim{ middle := Claim{
ClaimData: ClaimData{ ClaimData: ClaimData{
Value: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000578"), Value: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000578"),
Position: NewPosition(2, 2), Position: NewPosition(2, big.NewInt(2)),
}, },
Parent: top.ClaimData, Parent: top.ClaimData,
ContractIndex: 2, ContractIndex: 2,
...@@ -41,7 +42,7 @@ func createTestClaims() (Claim, Claim, Claim, Claim) { ...@@ -41,7 +42,7 @@ func createTestClaims() (Claim, Claim, Claim, Claim) {
bottom := Claim{ bottom := Claim{
ClaimData: ClaimData{ ClaimData: ClaimData{
Value: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000465"), Value: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000465"),
Position: NewPosition(3, 4), Position: NewPosition(3, big.NewInt(4)),
}, },
Parent: middle.ClaimData, Parent: middle.ClaimData,
ContractIndex: 3, ContractIndex: 3,
...@@ -52,10 +53,8 @@ func createTestClaims() (Claim, Claim, Claim, Claim) { ...@@ -52,10 +53,8 @@ func createTestClaims() (Claim, Claim, Claim, Claim) {
} }
func TestIsDuplicate(t *testing.T) { func TestIsDuplicate(t *testing.T) {
// Setup the game state.
root, top, middle, bottom := createTestClaims() root, top, middle, bottom := createTestClaims()
g := NewGameState(false, root, testMaxDepth) g := NewGameState(false, []Claim{root, top}, testMaxDepth)
require.NoError(t, g.Put(top))
// Root + Top should be duplicates // Root + Top should be duplicates
require.True(t, g.IsDuplicate(root)) require.True(t, g.IsDuplicate(root))
...@@ -66,137 +65,13 @@ func TestIsDuplicate(t *testing.T) { ...@@ -66,137 +65,13 @@ func TestIsDuplicate(t *testing.T) {
require.False(t, g.IsDuplicate(bottom)) require.False(t, g.IsDuplicate(bottom))
} }
// TestGame_Put_RootAlreadyExists tests the [Game.Put] method using a [gameState] func TestGame_Claims(t *testing.T) {
// instance errors when the root claim already exists in state.
func TestGame_Put_RootAlreadyExists(t *testing.T) {
// Setup the game state.
top, _, _, _ := createTestClaims()
g := NewGameState(false, top, testMaxDepth)
// Try to put the root claim into the game state again.
err := g.Put(top)
require.ErrorIs(t, err, ErrClaimExists)
}
// TestGame_PutAll_RootAlreadyExists tests the [Game.PutAll] method using a [gameState]
// instance errors when the root claim already exists in state.
func TestGame_PutAll_RootAlreadyExists(t *testing.T) {
// Setup the game state.
root, _, _, _ := createTestClaims()
g := NewGameState(false, root, testMaxDepth)
// Try to put the root claim into the game state again.
err := g.PutAll([]Claim{root})
require.ErrorIs(t, err, ErrClaimExists)
}
// TestGame_PutAll_AlreadyExists tests the [Game.PutAll] method using a [gameState]
// instance errors when the given claim already exists in state.
func TestGame_PutAll_AlreadyExists(t *testing.T) {
root, top, middle, bottom := createTestClaims()
g := NewGameState(false, root, testMaxDepth)
err := g.PutAll([]Claim{top, middle})
require.NoError(t, err)
err = g.PutAll([]Claim{middle, bottom})
require.ErrorIs(t, err, ErrClaimExists)
}
// TestGame_PutAll_ParentsAndChildren tests the [Game.PutAll] method using a [gameState] instance.
func TestGame_PutAll_ParentsAndChildren(t *testing.T) {
// Setup the game state.
root, top, middle, bottom := createTestClaims()
g := NewGameState(false, root, testMaxDepth)
// We should not be able to get the parent of the root claim.
parent, err := g.GetParent(root)
require.ErrorIs(t, err, ErrClaimNotFound)
require.Equal(t, parent, Claim{})
// Put the rest of the claims in the state.
err = g.PutAll([]Claim{top, middle, bottom})
require.NoError(t, err)
parent, err = g.GetParent(top)
require.NoError(t, err)
require.Equal(t, parent, root)
parent, err = g.GetParent(middle)
require.NoError(t, err)
require.Equal(t, parent, top)
parent, err = g.GetParent(bottom)
require.NoError(t, err)
require.Equal(t, parent, middle)
}
// TestGame_Put_AlreadyExists tests the [Game.Put] method using a [gameState]
// instance errors when the given claim already exists in state.
func TestGame_Put_AlreadyExists(t *testing.T) {
// Setup the game state.
top, middle, _, _ := createTestClaims()
g := NewGameState(false, top, testMaxDepth)
// Put the next claim into state.
err := g.Put(middle)
require.NoError(t, err)
// Put the claim into the game state again.
err = g.Put(middle)
require.ErrorIs(t, err, ErrClaimExists)
}
// TestGame_Put_ParentsAndChildren tests the [Game.Put] method using a [gameState] instance.
func TestGame_Put_ParentsAndChildren(t *testing.T) {
// Setup the game state. // Setup the game state.
root, top, middle, bottom := createTestClaims() root, top, middle, bottom := createTestClaims()
g := NewGameState(false, root, testMaxDepth) expected := []Claim{root, top, middle, bottom}
g := NewGameState(false, expected, testMaxDepth)
// We should not be able to get the parent of the root claim.
parent, err := g.GetParent(root)
require.ErrorIs(t, err, ErrClaimNotFound)
require.Equal(t, parent, Claim{})
// Put + Check Top
err = g.Put(top)
require.NoError(t, err)
parent, err = g.GetParent(top)
require.NoError(t, err)
require.Equal(t, parent, root)
// Put + Check Top Middle
err = g.Put(middle)
require.NoError(t, err)
parent, err = g.GetParent(middle)
require.NoError(t, err)
require.Equal(t, parent, top)
// Put + Check Top Bottom
err = g.Put(bottom)
require.NoError(t, err)
parent, err = g.GetParent(bottom)
require.NoError(t, err)
require.Equal(t, parent, middle)
}
// TestGame_ClaimPairs tests the [Game.ClaimPairs] method using a [gameState] instance.
func TestGame_ClaimPairs(t *testing.T) {
// Setup the game state.
root, top, middle, bottom := createTestClaims()
g := NewGameState(false, root, testMaxDepth)
// Add top claim to the game state.
err := g.Put(top)
require.NoError(t, err)
// Add the middle claim to the game state.
err = g.Put(middle)
require.NoError(t, err)
// Add the bottom claim to the game state.
err = g.Put(bottom)
require.NoError(t, err)
// Validate claim pairs. // Validate claim pairs.
expected := []Claim{root, top, middle, bottom} actual := g.Claims()
claims := g.Claims() require.ElementsMatch(t, expected, actual)
require.ElementsMatch(t, expected, claims)
} }
package types package types
import "fmt" import (
"errors"
"fmt"
"math/big"
"github.com/ethereum/go-ethereum/common"
)
var (
ErrPositionDepthTooSmall = errors.New("Position depth is too small")
)
// Position is a golang wrapper around the dispute game Position type. // Position is a golang wrapper around the dispute game Position type.
type Position struct { type Position struct {
depth int depth int
indexAtDepth int indexAtDepth *big.Int
} }
func NewPosition(depth, indexAtDepth int) Position { func NewPosition(depth int, indexAtDepth *big.Int) Position {
return Position{depth, indexAtDepth} return Position{
depth: depth,
indexAtDepth: indexAtDepth,
}
} }
func NewLargePositionFromGIndex(x *big.Int) Position {
depth := bigMSB(x)
indexAtDepth := new(big.Int).Sub(x, new(big.Int).Lsh(big.NewInt(1), uint(depth)))
return NewPosition(depth, indexAtDepth)
}
// todo(client-pod#80): remove this to use the NewLargePositionFromGIndex.
func NewPositionFromGIndex(x uint64) Position { func NewPositionFromGIndex(x uint64) Position {
depth := MSBIndex(x) depth := MSBIndex(x)
indexAtDepth := ^(1 << depth) & x indexAtDepth := ^(1 << depth) & x
return NewPosition(depth, int(indexAtDepth)) return NewPosition(depth, big.NewInt(int64(indexAtDepth)))
} }
func (p Position) MoveRight() Position { func (p Position) MoveRight() Position {
return Position{ return Position{
depth: p.depth, depth: p.depth,
indexAtDepth: int(p.indexAtDepth + 1), indexAtDepth: new(big.Int).Add(p.indexAtDepth, big.NewInt(1)),
} }
} }
// RelativeToAncestorAtDepth returns a new position for a subtree.
// [ancestor] is the depth of the subtree root node.
func (p Position) RelativeToAncestorAtDepth(ancestor uint64) (Position, error) {
if ancestor > uint64(p.depth) {
return Position{}, ErrPositionDepthTooSmall
}
newPosDepth := uint64(p.depth) - ancestor
nodesAtDepth := 1 << newPosDepth
newIndexAtDepth := new(big.Int).Mod(p.indexAtDepth, big.NewInt(int64(nodesAtDepth)))
return NewPosition(int(newPosDepth), newIndexAtDepth), nil
}
func (p Position) Depth() int { func (p Position) Depth() int {
return p.depth return p.depth
} }
func (p Position) IndexAtDepth() int { func (p Position) IndexAtDepth() *big.Int {
if p.indexAtDepth == nil {
return common.Big0
}
return p.indexAtDepth return p.indexAtDepth
} }
func (p Position) IsRootPosition() bool { func (p Position) IsRootPosition() bool {
return p.depth == 0 && p.indexAtDepth == 0 return p.depth == 0 && common.Big0.Cmp(p.indexAtDepth) == 0
}
func (p Position) lshIndex(amount int) *big.Int {
return new(big.Int).Lsh(p.IndexAtDepth(), uint(amount))
} }
// TraceIndex calculates the what the index of the claim value would be inside the trace. // TraceIndex calculates the what the index of the claim value would be inside the trace.
// It is equivalent to going right until the final depth has been reached. // It is equivalent to going right until the final depth has been reached.
func (p Position) TraceIndex(maxDepth int) uint64 { func (p Position) TraceIndex(maxDepth int) *big.Int {
// When we go right, we do a shift left and set the bottom bit to be 1. // When we go right, we do a shift left and set the bottom bit to be 1.
// To do this in a single step, do all the shifts at once & or in all 1s for the bottom bits. // To do this in a single step, do all the shifts at once & or in all 1s for the bottom bits.
rd := maxDepth - p.depth rd := maxDepth - p.depth
return uint64(p.indexAtDepth<<rd | ((1 << rd) - 1)) rhs := new(big.Int).Sub(new(big.Int).Lsh(big.NewInt(1), uint(rd)), big.NewInt(1))
ti := new(big.Int).Or(p.lshIndex(rd), rhs)
return ti
}
// UnsafeTraceIndex returns a uint64 representation of the trace index.
// todo(refcell): This should be removed in a follow-on pr and any invocations
// should be updated to use TraceIndex.
func (p Position) UnsafeTraceIndex(maxDepth int) uint64 {
return p.TraceIndex(maxDepth).Uint64()
} }
// move returns a new position at the left or right child. // move returns a new position at the left or right child.
func (p Position) move(right bool) Position { func (p Position) move(right bool) Position {
return Position{ return Position{
depth: p.depth + 1, depth: p.depth + 1,
indexAtDepth: (p.indexAtDepth << 1) | boolToInt(right), indexAtDepth: new(big.Int).Or(p.lshIndex(1), big.NewInt(int64(boolToInt(right)))),
} }
} }
...@@ -62,11 +110,19 @@ func boolToInt(b bool) int { ...@@ -62,11 +110,19 @@ func boolToInt(b bool) int {
} }
} }
func (p Position) parentIndexAtDepth() *big.Int {
return new(big.Int).Div(p.IndexAtDepth(), big.NewInt(2))
}
func (p Position) RightOf(parent Position) bool {
return p.parentIndexAtDepth().Cmp(parent.IndexAtDepth()) != 0
}
// parent return a new position that is the parent of this Position. // parent return a new position that is the parent of this Position.
func (p Position) parent() Position { func (p Position) parent() Position {
return Position{ return Position{
depth: p.depth - 1, depth: p.depth - 1,
indexAtDepth: p.indexAtDepth >> 1, indexAtDepth: p.parentIndexAtDepth(),
} }
} }
...@@ -84,8 +140,20 @@ func (p Position) Print(maxDepth int) { ...@@ -84,8 +140,20 @@ func (p Position) Print(maxDepth int) {
fmt.Printf("GIN: %4b\tTrace Position is %4b\tTrace Depth is: %d\tTrace Index is: %d\n", p.ToGIndex(), p.indexAtDepth, p.depth, p.TraceIndex(maxDepth)) fmt.Printf("GIN: %4b\tTrace Position is %4b\tTrace Depth is: %d\tTrace Index is: %d\n", p.ToGIndex(), p.indexAtDepth, p.depth, p.TraceIndex(maxDepth))
} }
func (p Position) ToGIndex() uint64 { func (p Position) ToGIndex() *big.Int {
return uint64(1<<p.depth | p.indexAtDepth) return new(big.Int).Or(new(big.Int).Lsh(big.NewInt(1), uint(p.depth)), p.IndexAtDepth())
}
// bigMSB returns the index of the most significant bit
func bigMSB(x *big.Int) int {
if x.Cmp(common.Big0) == 0 {
return 0
}
out := 0
for ; x.Cmp(common.Big0) != 0; out++ {
x = new(big.Int).Rsh(x, 1)
}
return out - 1
} }
// MSBIndex returns the index of the most significant bit // MSBIndex returns the index of the most significant bit
......
package types package types
import ( import (
"math"
"math/big"
"testing" "testing"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
...@@ -28,79 +30,93 @@ func TestMSBIndex(t *testing.T) { ...@@ -28,79 +30,93 @@ func TestMSBIndex(t *testing.T) {
t.Errorf("MSBIndex(%d) expected %d, but got %d", test.input, test.expected, result) t.Errorf("MSBIndex(%d) expected %d, but got %d", test.input, test.expected, result)
} }
} }
}
func bi(i int) *big.Int {
return big.NewInt(int64(i))
} }
type testNodeInfo struct { type testNodeInfo struct {
GIndex uint64 GIndex *big.Int
Depth int Depth int
IndexAtDepth int MaxDepth int
TraceIndex uint64 IndexAtDepth *big.Int
AttackGIndex uint64 // 0 indicates attack is not possible from this node TraceIndex *big.Int
DefendGIndex uint64 // 0 indicates defend is not possible from this node AttackGIndex *big.Int // 0 indicates attack is not possible from this node
DefendGIndex *big.Int // 0 indicates defend is not possible from this node
} }
var treeNodesMaxDepth4 = []testNodeInfo{ var treeNodes = []testNodeInfo{
{GIndex: 1, Depth: 0, IndexAtDepth: 0, TraceIndex: 15, AttackGIndex: 2}, {GIndex: bi(1), Depth: 0, MaxDepth: 4, IndexAtDepth: bi(0), TraceIndex: bi(15), AttackGIndex: bi(2)},
{GIndex: 2, Depth: 1, IndexAtDepth: 0, TraceIndex: 7, AttackGIndex: 4, DefendGIndex: 6}, {GIndex: bi(2), Depth: 1, MaxDepth: 4, IndexAtDepth: bi(0), TraceIndex: bi(7), AttackGIndex: bi(4), DefendGIndex: bi(6)},
{GIndex: 3, Depth: 1, IndexAtDepth: 1, TraceIndex: 15, AttackGIndex: 6}, {GIndex: bi(3), Depth: 1, MaxDepth: 4, IndexAtDepth: bi(1), TraceIndex: bi(15), AttackGIndex: bi(6)},
{GIndex: 4, Depth: 2, IndexAtDepth: 0, TraceIndex: 3, AttackGIndex: 8, DefendGIndex: 10}, {GIndex: bi(4), Depth: 2, MaxDepth: 4, IndexAtDepth: bi(0), TraceIndex: bi(3), AttackGIndex: bi(8), DefendGIndex: bi(10)},
{GIndex: 5, Depth: 2, IndexAtDepth: 1, TraceIndex: 7, AttackGIndex: 10}, {GIndex: bi(5), Depth: 2, MaxDepth: 4, IndexAtDepth: bi(1), TraceIndex: bi(7), AttackGIndex: bi(10)},
{GIndex: 6, Depth: 2, IndexAtDepth: 2, TraceIndex: 11, AttackGIndex: 12, DefendGIndex: 14}, {GIndex: bi(6), Depth: 2, MaxDepth: 4, IndexAtDepth: bi(2), TraceIndex: bi(11), AttackGIndex: bi(12), DefendGIndex: bi(14)},
{GIndex: 7, Depth: 2, IndexAtDepth: 3, TraceIndex: 15, AttackGIndex: 14}, {GIndex: bi(7), Depth: 2, MaxDepth: 4, IndexAtDepth: bi(3), TraceIndex: bi(15), AttackGIndex: bi(14)},
{GIndex: 8, Depth: 3, IndexAtDepth: 0, TraceIndex: 1, AttackGIndex: 16, DefendGIndex: 18}, {GIndex: bi(8), Depth: 3, MaxDepth: 4, IndexAtDepth: bi(0), TraceIndex: bi(1), AttackGIndex: bi(16), DefendGIndex: bi(18)},
{GIndex: 9, Depth: 3, IndexAtDepth: 1, TraceIndex: 3, AttackGIndex: 18}, {GIndex: bi(9), Depth: 3, MaxDepth: 4, IndexAtDepth: bi(1), TraceIndex: bi(3), AttackGIndex: bi(18)},
{GIndex: 10, Depth: 3, IndexAtDepth: 2, TraceIndex: 5, AttackGIndex: 20, DefendGIndex: 22}, {GIndex: bi(10), Depth: 3, MaxDepth: 4, IndexAtDepth: bi(2), TraceIndex: bi(5), AttackGIndex: bi(20), DefendGIndex: bi(22)},
{GIndex: 11, Depth: 3, IndexAtDepth: 3, TraceIndex: 7, AttackGIndex: 22}, {GIndex: bi(11), Depth: 3, MaxDepth: 4, IndexAtDepth: bi(3), TraceIndex: bi(7), AttackGIndex: bi(22)},
{GIndex: 12, Depth: 3, IndexAtDepth: 4, TraceIndex: 9, AttackGIndex: 24, DefendGIndex: 26}, {GIndex: bi(12), Depth: 3, MaxDepth: 4, IndexAtDepth: bi(4), TraceIndex: bi(9), AttackGIndex: bi(24), DefendGIndex: bi(26)},
{GIndex: 13, Depth: 3, IndexAtDepth: 5, TraceIndex: 11, AttackGIndex: 26}, {GIndex: bi(13), Depth: 3, MaxDepth: 4, IndexAtDepth: bi(5), TraceIndex: bi(11), AttackGIndex: bi(26)},
{GIndex: 14, Depth: 3, IndexAtDepth: 6, TraceIndex: 13, AttackGIndex: 28, DefendGIndex: 30}, {GIndex: bi(14), Depth: 3, MaxDepth: 4, IndexAtDepth: bi(6), TraceIndex: bi(13), AttackGIndex: bi(28), DefendGIndex: bi(30)},
{GIndex: 15, Depth: 3, IndexAtDepth: 7, TraceIndex: 15, AttackGIndex: 30}, {GIndex: bi(15), Depth: 3, MaxDepth: 4, IndexAtDepth: bi(7), TraceIndex: bi(15), AttackGIndex: bi(30)},
{GIndex: 16, Depth: 4, IndexAtDepth: 0, TraceIndex: 0}, {GIndex: bi(16), Depth: 4, MaxDepth: 4, IndexAtDepth: bi(0), TraceIndex: bi(0)},
{GIndex: 17, Depth: 4, IndexAtDepth: 1, TraceIndex: 1}, {GIndex: bi(17), Depth: 4, MaxDepth: 4, IndexAtDepth: bi(1), TraceIndex: bi(1)},
{GIndex: 18, Depth: 4, IndexAtDepth: 2, TraceIndex: 2}, {GIndex: bi(18), Depth: 4, MaxDepth: 4, IndexAtDepth: bi(2), TraceIndex: bi(2)},
{GIndex: 19, Depth: 4, IndexAtDepth: 3, TraceIndex: 3}, {GIndex: bi(19), Depth: 4, MaxDepth: 4, IndexAtDepth: bi(3), TraceIndex: bi(3)},
{GIndex: 20, Depth: 4, IndexAtDepth: 4, TraceIndex: 4}, {GIndex: bi(20), Depth: 4, MaxDepth: 4, IndexAtDepth: bi(4), TraceIndex: bi(4)},
{GIndex: 21, Depth: 4, IndexAtDepth: 5, TraceIndex: 5}, {GIndex: bi(21), Depth: 4, MaxDepth: 4, IndexAtDepth: bi(5), TraceIndex: bi(5)},
{GIndex: 22, Depth: 4, IndexAtDepth: 6, TraceIndex: 6}, {GIndex: bi(22), Depth: 4, MaxDepth: 4, IndexAtDepth: bi(6), TraceIndex: bi(6)},
{GIndex: 23, Depth: 4, IndexAtDepth: 7, TraceIndex: 7}, {GIndex: bi(23), Depth: 4, MaxDepth: 4, IndexAtDepth: bi(7), TraceIndex: bi(7)},
{GIndex: 24, Depth: 4, IndexAtDepth: 8, TraceIndex: 8}, {GIndex: bi(24), Depth: 4, MaxDepth: 4, IndexAtDepth: bi(8), TraceIndex: bi(8)},
{GIndex: 25, Depth: 4, IndexAtDepth: 9, TraceIndex: 9}, {GIndex: bi(25), Depth: 4, MaxDepth: 4, IndexAtDepth: bi(9), TraceIndex: bi(9)},
{GIndex: 26, Depth: 4, IndexAtDepth: 10, TraceIndex: 10}, {GIndex: bi(26), Depth: 4, MaxDepth: 4, IndexAtDepth: bi(10), TraceIndex: bi(10)},
{GIndex: 27, Depth: 4, IndexAtDepth: 11, TraceIndex: 11}, {GIndex: bi(27), Depth: 4, MaxDepth: 4, IndexAtDepth: bi(11), TraceIndex: bi(11)},
{GIndex: 28, Depth: 4, IndexAtDepth: 12, TraceIndex: 12}, {GIndex: bi(28), Depth: 4, MaxDepth: 4, IndexAtDepth: bi(12), TraceIndex: bi(12)},
{GIndex: 29, Depth: 4, IndexAtDepth: 13, TraceIndex: 13}, {GIndex: bi(29), Depth: 4, MaxDepth: 4, IndexAtDepth: bi(13), TraceIndex: bi(13)},
{GIndex: 30, Depth: 4, IndexAtDepth: 14, TraceIndex: 14}, {GIndex: bi(30), Depth: 4, MaxDepth: 4, IndexAtDepth: bi(14), TraceIndex: bi(14)},
{GIndex: 31, Depth: 4, IndexAtDepth: 15, TraceIndex: 15}, {GIndex: bi(31), Depth: 4, MaxDepth: 4, IndexAtDepth: bi(15), TraceIndex: bi(15)},
{GIndex: bi(0).Mul(bi(math.MaxInt64), bi(2)), Depth: 63, MaxDepth: 64, IndexAtDepth: bi(9223372036854775806), TraceIndex: bi(0).Sub(bi(0).Mul(bi(math.MaxInt64), bi(2)), bi(1))},
} }
// TestGINConversions does To & From the generalized index on the treeNodesMaxDepth4 data // TestGINConversions does To & From the generalized index on the treeNodesMaxDepth4 data
func TestGINConversions(t *testing.T) { func TestGINConversions(t *testing.T) {
for _, test := range treeNodesMaxDepth4 { for _, test := range treeNodes {
from := NewPositionFromGIndex(test.GIndex) from := NewLargePositionFromGIndex(test.GIndex)
pos := NewPosition(test.Depth, test.IndexAtDepth) pos := NewPosition(test.Depth, test.IndexAtDepth)
require.Equal(t, pos, from) require.EqualValuesf(t, pos.Depth(), from.Depth(), "From GIndex %v vs pos %v", from.Depth(), pos.Depth())
require.Zerof(t, pos.IndexAtDepth().Cmp(from.IndexAtDepth()), "From GIndex %v vs pos %v", from.IndexAtDepth(), pos.IndexAtDepth())
to := pos.ToGIndex() to := pos.ToGIndex()
require.Equal(t, test.GIndex, to) require.Equal(t, test.GIndex, to)
} }
} }
func TestTraceIndexOfRootWithLargeDepth(t *testing.T) {
traceIdx := new(big.Int).Sub(new(big.Int).Lsh(big.NewInt(1), 100), big.NewInt(1))
pos := NewLargePositionFromGIndex(big.NewInt(1))
actual := pos.TraceIndex(100)
require.Equal(t, traceIdx, actual)
}
// TestTraceIndex creates the position & then tests the trace index function on the treeNodesMaxDepth4 data // TestTraceIndex creates the position & then tests the trace index function on the treeNodesMaxDepth4 data
func TestTraceIndex(t *testing.T) { func TestTraceIndex(t *testing.T) {
for _, test := range treeNodesMaxDepth4 { for _, test := range treeNodes {
pos := NewPosition(test.Depth, test.IndexAtDepth) pos := NewPosition(test.Depth, test.IndexAtDepth)
result := pos.TraceIndex(4) result := pos.TraceIndex(test.MaxDepth)
require.Equal(t, test.TraceIndex, result) require.Equal(t, test.TraceIndex, result)
} }
} }
func TestAttack(t *testing.T) { func TestAttack(t *testing.T) {
for _, test := range treeNodesMaxDepth4 { for _, test := range treeNodes {
if test.AttackGIndex == 0 { if test.AttackGIndex == nil || test.AttackGIndex.Cmp(big.NewInt(0)) == 0 {
continue continue
} }
pos := NewPosition(test.Depth, test.IndexAtDepth) pos := NewPosition(test.Depth, test.IndexAtDepth)
...@@ -110,8 +126,8 @@ func TestAttack(t *testing.T) { ...@@ -110,8 +126,8 @@ func TestAttack(t *testing.T) {
} }
func TestDefend(t *testing.T) { func TestDefend(t *testing.T) {
for _, test := range treeNodesMaxDepth4 { for _, test := range treeNodes {
if test.DefendGIndex == 0 { if test.DefendGIndex == nil || test.DefendGIndex.Cmp(big.NewInt(0)) == 0 {
continue continue
} }
pos := NewPosition(test.Depth, test.IndexAtDepth) pos := NewPosition(test.Depth, test.IndexAtDepth)
...@@ -119,3 +135,19 @@ func TestDefend(t *testing.T) { ...@@ -119,3 +135,19 @@ func TestDefend(t *testing.T) {
require.Equalf(t, test.DefendGIndex, result.ToGIndex(), "Defend from GIndex %v", pos.ToGIndex()) require.Equalf(t, test.DefendGIndex, result.ToGIndex(), "Defend from GIndex %v", pos.ToGIndex())
} }
} }
func TestRelativeToAncestorAtDepth(t *testing.T) {
t.Run("ErrorsForDeepAncestor", func(t *testing.T) {
pos := NewPosition(1, big.NewInt(1))
_, err := pos.RelativeToAncestorAtDepth(2)
require.ErrorIs(t, err, ErrPositionDepthTooSmall)
})
t.Run("Success", func(t *testing.T) {
pos := NewPosition(2, big.NewInt(1))
expectedRelativePosition := NewPosition(1, big.NewInt(1))
relativePosition, err := pos.RelativeToAncestorAtDepth(1)
require.NoError(t, err)
require.Equal(t, expectedRelativePosition, relativePosition)
})
}
...@@ -119,5 +119,5 @@ func (c *Claim) IsRoot() bool { ...@@ -119,5 +119,5 @@ func (c *Claim) IsRoot() bool {
// DefendsParent returns true if the the claim is a defense (i.e. goes right) of the // DefendsParent returns true if the the claim is a defense (i.e. goes right) of the
// parent. It returns false if the claim is an attack (i.e. goes left) of the parent. // parent. It returns false if the claim is an attack (i.e. goes left) of the parent.
func (c *Claim) DefendsParent() bool { func (c *Claim) DefendsParent() bool {
return (c.IndexAtDepth() >> 1) != c.Parent.IndexAtDepth() return c.RightOf(c.Parent.Position)
} }
...@@ -23,3 +23,89 @@ func TestNewPreimageOracleData(t *testing.T) { ...@@ -23,3 +23,89 @@ func TestNewPreimageOracleData(t *testing.T) {
require.Equal(t, uint32(7), data.OracleOffset) require.Equal(t, uint32(7), data.OracleOffset)
}) })
} }
func TestIsRootPosition(t *testing.T) {
tests := []struct {
name string
position Position
expected bool
}{
{
name: "ZeroRoot",
position: NewPositionFromGIndex(0),
expected: true,
},
{
name: "ValidRoot",
position: NewPositionFromGIndex(1),
expected: true,
},
{
name: "NotRoot",
position: NewPositionFromGIndex(2),
expected: false,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
require.Equal(t, test.expected, test.position.IsRootPosition())
})
}
}
func buildClaim(gindex uint64, parentGIndex uint64) Claim {
return Claim{
ClaimData: ClaimData{
Position: NewPositionFromGIndex(gindex),
},
Parent: ClaimData{
Position: NewPositionFromGIndex(parentGIndex),
},
}
}
func TestDefendsParent(t *testing.T) {
tests := []struct {
name string
claim Claim
expected bool
}{
{
name: "LeftChildAttacks",
claim: buildClaim(2, 1),
expected: false,
},
{
name: "RightChildDoesntDefend",
claim: buildClaim(3, 1),
expected: false,
},
{
name: "SubChildDoesntDefend",
claim: buildClaim(4, 1),
expected: false,
},
{
name: "SubSecondChildDoesntDefend",
claim: buildClaim(5, 1),
expected: false,
},
{
name: "RightLeftChildDefendsParent",
claim: buildClaim(6, 1),
expected: true,
},
{
name: "SubThirdChildDefends",
claim: buildClaim(7, 1),
expected: true,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
require.Equal(t, test.expected, test.claim.DefendsParent())
})
}
}
...@@ -34,7 +34,8 @@ import ( ...@@ -34,7 +34,8 @@ import (
const alphabetGameType uint8 = 255 const alphabetGameType uint8 = 255
const cannonGameType uint8 = 0 const cannonGameType uint8 = 0
const alphabetGameDepth = 4 const alphabetGameDepth = 4
const lastAlphabetTraceIndex = 1<<alphabetGameDepth - 1
var lastAlphabetTraceIndex = big.NewInt(1<<alphabetGameDepth - 1)
// rootPosition is the position of the root claim. // rootPosition is the position of the root claim.
var rootPosition = faultTypes.NewPositionFromGIndex(1) var rootPosition = faultTypes.NewPositionFromGIndex(1)
......
...@@ -3,8 +3,6 @@ package metrics ...@@ -3,8 +3,6 @@ package metrics
import ( import (
"context" "context"
"errors"
"fmt"
"net" "net"
"strconv" "strconv"
"time" "time"
...@@ -21,9 +19,7 @@ import ( ...@@ -21,9 +19,7 @@ import (
"github.com/prometheus/client_golang/prometheus/collectors" "github.com/prometheus/client_golang/prometheus/collectors"
"github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/eth"
) )
...@@ -31,9 +27,6 @@ import ( ...@@ -31,9 +27,6 @@ import (
const ( const (
Namespace = "op_node" Namespace = "op_node"
RPCServerSubsystem = "rpc_server"
RPCClientSubsystem = "rpc_client"
BatchMethod = "<batch>" BatchMethod = "<batch>"
) )
...@@ -87,11 +80,7 @@ type Metrics struct { ...@@ -87,11 +80,7 @@ type Metrics struct {
Info *prometheus.GaugeVec Info *prometheus.GaugeVec
Up prometheus.Gauge Up prometheus.Gauge
RPCServerRequestsTotal *prometheus.CounterVec metrics.RPCMetrics
RPCServerRequestDurationSeconds *prometheus.HistogramVec
RPCClientRequestsTotal *prometheus.CounterVec
RPCClientRequestDurationSeconds *prometheus.HistogramVec
RPCClientResponsesTotal *prometheus.CounterVec
L1SourceCache *CacheMetrics L1SourceCache *CacheMetrics
L2SourceCache *CacheMetrics L2SourceCache *CacheMetrics
...@@ -186,49 +175,7 @@ func NewMetrics(procName string) *Metrics { ...@@ -186,49 +175,7 @@ func NewMetrics(procName string) *Metrics {
Help: "1 if the op node has finished starting up", Help: "1 if the op node has finished starting up",
}), }),
RPCServerRequestsTotal: factory.NewCounterVec(prometheus.CounterOpts{ RPCMetrics: metrics.MakeRPCMetrics(ns, factory),
Namespace: ns,
Subsystem: RPCServerSubsystem,
Name: "requests_total",
Help: "Total requests to the RPC server",
}, []string{
"method",
}),
RPCServerRequestDurationSeconds: factory.NewHistogramVec(prometheus.HistogramOpts{
Namespace: ns,
Subsystem: RPCServerSubsystem,
Name: "request_duration_seconds",
Buckets: []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10},
Help: "Histogram of RPC server request durations",
}, []string{
"method",
}),
RPCClientRequestsTotal: factory.NewCounterVec(prometheus.CounterOpts{
Namespace: ns,
Subsystem: RPCClientSubsystem,
Name: "requests_total",
Help: "Total RPC requests initiated by the opnode's RPC client",
}, []string{
"method",
}),
RPCClientRequestDurationSeconds: factory.NewHistogramVec(prometheus.HistogramOpts{
Namespace: ns,
Subsystem: RPCClientSubsystem,
Name: "request_duration_seconds",
Buckets: []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10},
Help: "Histogram of RPC client request durations",
}, []string{
"method",
}),
RPCClientResponsesTotal: factory.NewCounterVec(prometheus.CounterOpts{
Namespace: ns,
Subsystem: RPCClientSubsystem,
Name: "responses_total",
Help: "Total RPC request responses received by the opnode's RPC client",
}, []string{
"method",
"error",
}),
L1SourceCache: NewCacheMetrics(factory, ns, "l1_source_cache", "L1 Source cache"), L1SourceCache: NewCacheMetrics(factory, ns, "l1_source_cache", "L1 Source cache"),
L2SourceCache: NewCacheMetrics(factory, ns, "l2_source_cache", "L2 Source cache"), L2SourceCache: NewCacheMetrics(factory, ns, "l2_source_cache", "L2 Source cache"),
...@@ -467,53 +414,6 @@ func (m *Metrics) RecordUp() { ...@@ -467,53 +414,6 @@ func (m *Metrics) RecordUp() {
m.Up.Set(1) m.Up.Set(1)
} }
// RecordRPCServerRequest is a helper method to record an incoming RPC
// call to the opnode's RPC server. It bumps the requests metric,
// and tracks how long it takes to serve a response.
func (m *Metrics) RecordRPCServerRequest(method string) func() {
m.RPCServerRequestsTotal.WithLabelValues(method).Inc()
timer := prometheus.NewTimer(m.RPCServerRequestDurationSeconds.WithLabelValues(method))
return func() {
timer.ObserveDuration()
}
}
// RecordRPCClientRequest is a helper method to record an RPC client
// request. It bumps the requests metric, tracks the response
// duration, and records the response's error code.
func (m *Metrics) RecordRPCClientRequest(method string) func(err error) {
m.RPCClientRequestsTotal.WithLabelValues(method).Inc()
timer := prometheus.NewTimer(m.RPCClientRequestDurationSeconds.WithLabelValues(method))
return func(err error) {
m.RecordRPCClientResponse(method, err)
timer.ObserveDuration()
}
}
// RecordRPCClientResponse records an RPC response. It will
// convert the passed-in error into something metrics friendly.
// Nil errors get converted into <nil>, RPC errors are converted
// into rpc_<error code>, HTTP errors are converted into
// http_<status code>, and everything else is converted into
// <unknown>.
func (m *Metrics) RecordRPCClientResponse(method string, err error) {
var errStr string
var rpcErr rpc.Error
var httpErr rpc.HTTPError
if err == nil {
errStr = "<nil>"
} else if errors.As(err, &rpcErr) {
errStr = fmt.Sprintf("rpc_%d", rpcErr.ErrorCode())
} else if errors.As(err, &httpErr) {
errStr = fmt.Sprintf("http_%d", httpErr.StatusCode)
} else if errors.Is(err, ethereum.NotFound) {
errStr = "<not found>"
} else {
errStr = "<unknown>"
}
m.RPCClientResponsesTotal.WithLabelValues(method, errStr).Inc()
}
func (m *Metrics) SetDerivationIdle(status bool) { func (m *Metrics) SetDerivationIdle(status bool) {
var val float64 var val float64
if status { if status {
......
...@@ -11,7 +11,8 @@ import ( ...@@ -11,7 +11,8 @@ import (
"github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/version" "github.com/ethereum-optimism/optimism/op-node/version"
"github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/eth"
oplog "github.com/ethereum-optimism/optimism/op-service/log" "github.com/ethereum-optimism/optimism/op-service/metrics"
"github.com/ethereum-optimism/optimism/op-service/rpc"
) )
type l2EthClient interface { type l2EthClient interface {
...@@ -31,79 +32,51 @@ type driverClient interface { ...@@ -31,79 +32,51 @@ type driverClient interface {
SequencerActive(context.Context) (bool, error) SequencerActive(context.Context) (bool, error)
} }
type rpcMetrics interface {
// RecordRPCServerRequest returns a function that records the duration of serving the given RPC method
RecordRPCServerRequest(method string) func()
}
type adminAPI struct { type adminAPI struct {
dr driverClient *rpc.CommonAdminAPI
m rpcMetrics dr driverClient
log log.Logger
} }
func NewAdminAPI(dr driverClient, m rpcMetrics, log log.Logger) *adminAPI { func NewAdminAPI(dr driverClient, m metrics.RPCMetricer, log log.Logger) *adminAPI {
return &adminAPI{ return &adminAPI{
dr: dr, CommonAdminAPI: rpc.NewCommonAdminAPI(m, log),
m: m, dr: dr,
log: log,
} }
} }
func (n *adminAPI) ResetDerivationPipeline(ctx context.Context) error { func (n *adminAPI) ResetDerivationPipeline(ctx context.Context) error {
recordDur := n.m.RecordRPCServerRequest("admin_resetDerivationPipeline") recordDur := n.M.RecordRPCServerRequest("admin_resetDerivationPipeline")
defer recordDur() defer recordDur()
return n.dr.ResetDerivationPipeline(ctx) return n.dr.ResetDerivationPipeline(ctx)
} }
func (n *adminAPI) StartSequencer(ctx context.Context, blockHash common.Hash) error { func (n *adminAPI) StartSequencer(ctx context.Context, blockHash common.Hash) error {
recordDur := n.m.RecordRPCServerRequest("admin_startSequencer") recordDur := n.M.RecordRPCServerRequest("admin_startSequencer")
defer recordDur() defer recordDur()
return n.dr.StartSequencer(ctx, blockHash) return n.dr.StartSequencer(ctx, blockHash)
} }
func (n *adminAPI) StopSequencer(ctx context.Context) (common.Hash, error) { func (n *adminAPI) StopSequencer(ctx context.Context) (common.Hash, error) {
recordDur := n.m.RecordRPCServerRequest("admin_stopSequencer") recordDur := n.M.RecordRPCServerRequest("admin_stopSequencer")
defer recordDur() defer recordDur()
return n.dr.StopSequencer(ctx) return n.dr.StopSequencer(ctx)
} }
func (n *adminAPI) SequencerActive(ctx context.Context) (bool, error) { func (n *adminAPI) SequencerActive(ctx context.Context) (bool, error) {
recordDur := n.m.RecordRPCServerRequest("admin_sequencerActive") recordDur := n.M.RecordRPCServerRequest("admin_sequencerActive")
defer recordDur() defer recordDur()
return n.dr.SequencerActive(ctx) return n.dr.SequencerActive(ctx)
} }
func (n *adminAPI) SetLogLevel(ctx context.Context, lvlStr string) error {
recordDur := n.m.RecordRPCServerRequest("admin_setLogLevel")
defer recordDur()
h := n.log.GetHandler()
lvl, err := log.LvlFromString(lvlStr)
if err != nil {
return err
}
// We set the log level, and do not wrap the handler with an additional filter handler,
// as the underlying handler would otherwise also still filter with the previous log level.
lvlSetter, ok := h.(oplog.LvlSetter)
if !ok {
return fmt.Errorf("log handler type %T cannot change log level", h)
}
lvlSetter.SetLogLevel(lvl)
return nil
}
type nodeAPI struct { type nodeAPI struct {
config *rollup.Config config *rollup.Config
client l2EthClient client l2EthClient
dr driverClient dr driverClient
log log.Logger log log.Logger
m rpcMetrics m metrics.RPCMetricer
} }
func NewNodeAPI(config *rollup.Config, l2Client l2EthClient, dr driverClient, log log.Logger, m rpcMetrics) *nodeAPI { func NewNodeAPI(config *rollup.Config, l2Client l2EthClient, dr driverClient, log log.Logger, m metrics.RPCMetricer) *nodeAPI {
return &nodeAPI{ return &nodeAPI{
config: config, config: config,
client: l2Client, client: l2Client,
......
...@@ -2,17 +2,34 @@ package sources ...@@ -2,17 +2,34 @@ package sources
import ( import (
"context" "context"
"net"
"sync" "sync"
"github.com/ethereum-optimism/optimism/op-node/client" "github.com/ethereum-optimism/optimism/op-node/client"
"github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
"golang.org/x/sync/semaphore"
) )
type limitClient struct { type limitClient struct {
c client.RPC mutex sync.Mutex
sema chan struct{} closed bool
wg sync.WaitGroup c client.RPC
sema *semaphore.Weighted
wg sync.WaitGroup
}
// joinWaitGroup will add the caller to the waitgroup if the client has not
// already been told to shutdown. If the client has shut down, false is
// returned, otherwise true.
func (lc *limitClient) joinWaitGroup() bool {
lc.mutex.Lock()
defer lc.mutex.Unlock()
if lc.closed {
return false
}
lc.wg.Add(1)
return true
} }
// LimitRPC limits concurrent RPC requests (excluding subscriptions) to a given number by wrapping the client with a semaphore. // LimitRPC limits concurrent RPC requests (excluding subscriptions) to a given number by wrapping the client with a semaphore.
...@@ -20,33 +37,47 @@ func LimitRPC(c client.RPC, concurrentRequests int) client.RPC { ...@@ -20,33 +37,47 @@ func LimitRPC(c client.RPC, concurrentRequests int) client.RPC {
return &limitClient{ return &limitClient{
c: c, c: c,
// the capacity of the channel determines how many go-routines can concurrently execute requests with the wrapped client. // the capacity of the channel determines how many go-routines can concurrently execute requests with the wrapped client.
sema: make(chan struct{}, concurrentRequests), sema: semaphore.NewWeighted(int64(concurrentRequests)),
} }
} }
func (lc *limitClient) BatchCallContext(ctx context.Context, b []rpc.BatchElem) error { func (lc *limitClient) BatchCallContext(ctx context.Context, b []rpc.BatchElem) error {
lc.wg.Add(1) if !lc.joinWaitGroup() {
return net.ErrClosed
}
defer lc.wg.Done() defer lc.wg.Done()
lc.sema <- struct{}{} if err := lc.sema.Acquire(ctx, 1); err != nil {
defer func() { <-lc.sema }() return err
}
defer lc.sema.Release(1)
return lc.c.BatchCallContext(ctx, b) return lc.c.BatchCallContext(ctx, b)
} }
func (lc *limitClient) CallContext(ctx context.Context, result any, method string, args ...any) error { func (lc *limitClient) CallContext(ctx context.Context, result any, method string, args ...any) error {
lc.wg.Add(1) if !lc.joinWaitGroup() {
return net.ErrClosed
}
defer lc.wg.Done() defer lc.wg.Done()
lc.sema <- struct{}{} if err := lc.sema.Acquire(ctx, 1); err != nil {
defer func() { <-lc.sema }() return err
}
defer lc.sema.Release(1)
return lc.c.CallContext(ctx, result, method, args...) return lc.c.CallContext(ctx, result, method, args...)
} }
func (lc *limitClient) EthSubscribe(ctx context.Context, channel any, args ...any) (ethereum.Subscription, error) { func (lc *limitClient) EthSubscribe(ctx context.Context, channel any, args ...any) (ethereum.Subscription, error) {
if !lc.joinWaitGroup() {
return nil, net.ErrClosed
}
defer lc.wg.Done()
// subscription doesn't count towards request limit // subscription doesn't count towards request limit
return lc.c.EthSubscribe(ctx, channel, args...) return lc.c.EthSubscribe(ctx, channel, args...)
} }
func (lc *limitClient) Close() { func (lc *limitClient) Close() {
lc.wg.Wait() lc.mutex.Lock()
close(lc.sema) lc.closed = true // No new waitgroup members after this is set
lc.mutex.Unlock()
lc.wg.Wait() // All waitgroup members exited, means no more dereferences of the client
lc.c.Close() lc.c.Close()
} }
package sources
import (
"context"
"fmt"
"net"
"sync/atomic"
"testing"
"time"
"github.com/ethereum-optimism/optimism/op-node/client"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/rpc"
"github.com/stretchr/testify/require"
)
type MockRPC struct {
t *testing.T
blockedCallers atomic.Int32
errC chan error
}
func (m *MockRPC) Close() {}
func (m *MockRPC) CallContext(ctx context.Context, result any, method string, args ...any) error {
m.blockedCallers.Add(1)
defer m.blockedCallers.Add(-1)
return <-m.errC
}
func (m *MockRPC) BatchCallContext(ctx context.Context, b []rpc.BatchElem) error {
m.blockedCallers.Add(1)
defer m.blockedCallers.Add(-1)
return <-m.errC
}
func (m *MockRPC) EthSubscribe(ctx context.Context, channel any, args ...any) (ethereum.Subscription, error) {
m.t.Fatal("EthSubscribe should not be called")
return nil, nil
}
func asyncCallContext(ctx context.Context, lc client.RPC) chan error {
errC := make(chan error)
go func() {
errC <- lc.CallContext(ctx, 0, "fake_method")
}()
return errC
}
func TestLimitClient(t *testing.T) {
// The MockRPC will block all calls until errC is written to
m := &MockRPC{
t: t,
errC: make(chan error),
}
lc := LimitRPC(m, 2).(*limitClient)
errC1 := asyncCallContext(context.Background(), lc)
errC2 := asyncCallContext(context.Background(), lc)
require.Eventually(t, func() bool { return m.blockedCallers.Load() == 2 }, time.Second, 10*time.Millisecond)
// Once the limit of 2 clients has been reached, we enqueue two more,
// one with a context that will expire
tCtx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond)
defer cancel()
errC3 := asyncCallContext(tCtx, lc)
errC4 := asyncCallContext(context.Background(), lc)
select {
case err := <-errC3:
require.ErrorIs(t, err, context.DeadlineExceeded)
case <-time.After(time.Second):
t.Fatalf("context should have expired and the call returned")
}
// No further clients should be allowed after this block, but existing
// clients should persist until their contexts close
go lc.Close()
require.Eventually(t, func() bool {
lc.mutex.Lock()
defer lc.mutex.Unlock()
return lc.closed
}, time.Second, 10*time.Millisecond)
err := lc.CallContext(context.Background(), 0, "fake_method")
require.ErrorIs(t, err, net.ErrClosed, "Calls after close should return immediately with error")
// Existing clients should all remain blocked
select {
case err := <-errC1:
t.Fatalf("client should not have returned: %s", err)
case err := <-errC2:
t.Fatalf("client should not have returned: %s", err)
case err := <-errC4:
t.Fatalf("client should not have returned: %s", err)
case <-time.After(50 * time.Millisecond):
// None of the clients should return yet
}
m.errC <- fmt.Errorf("fake-error")
m.errC <- fmt.Errorf("fake-error")
require.Eventually(t, func() bool { return m.blockedCallers.Load() == 1 }, time.Second, 10*time.Millisecond)
m.errC <- fmt.Errorf("fake-error")
require.ErrorContains(t, <-errC1, "fake-error")
require.ErrorContains(t, <-errC2, "fake-error")
require.ErrorContains(t, <-errC4, "fake-error")
require.Eventually(t, func() bool { return m.blockedCallers.Load() == 0 }, time.Second, 10*time.Millisecond)
}
...@@ -58,3 +58,9 @@ type TestRPCMetrics struct{} ...@@ -58,3 +58,9 @@ type TestRPCMetrics struct{}
func (n *TestRPCMetrics) RecordRPCServerRequest(method string) func() { func (n *TestRPCMetrics) RecordRPCServerRequest(method string) func() {
return func() {} return func() {}
} }
func (n *TestRPCMetrics) RecordRPCClientRequest(method string) func(err error) {
return func(err error) {}
}
func (n *TestRPCMetrics) RecordRPCClientResponse(method string, err error) {}
package metrics
import (
"errors"
"fmt"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/rpc"
"github.com/prometheus/client_golang/prometheus"
)
const (
RPCServerSubsystem = "rpc_server"
RPCClientSubsystem = "rpc_client"
)
type RPCMetricer interface {
RecordRPCServerRequest(method string) func()
RecordRPCClientRequest(method string) func(err error)
RecordRPCClientResponse(method string, err error)
}
// RPCMetrics tracks all the RPC metrics for the op-service RPC.
type RPCMetrics struct {
RPCServerRequestsTotal *prometheus.CounterVec
RPCServerRequestDurationSeconds *prometheus.HistogramVec
RPCClientRequestsTotal *prometheus.CounterVec
RPCClientRequestDurationSeconds *prometheus.HistogramVec
RPCClientResponsesTotal *prometheus.CounterVec
}
// MakeRPCMetrics creates a new RPCMetrics instance with the given process name, and
// namespace for the service.
func MakeRPCMetrics(ns string, factory Factory) RPCMetrics {
return RPCMetrics{
RPCServerRequestsTotal: factory.NewCounterVec(prometheus.CounterOpts{
Namespace: ns,
Subsystem: RPCServerSubsystem,
Name: "requests_total",
Help: "Total requests to the RPC server",
}, []string{
"method",
}),
RPCServerRequestDurationSeconds: factory.NewHistogramVec(prometheus.HistogramOpts{
Namespace: ns,
Subsystem: RPCServerSubsystem,
Name: "request_duration_seconds",
Buckets: []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10},
Help: "Histogram of RPC server request durations",
}, []string{
"method",
}),
RPCClientRequestsTotal: factory.NewCounterVec(prometheus.CounterOpts{
Namespace: ns,
Subsystem: RPCClientSubsystem,
Name: "requests_total",
Help: "Total RPC requests initiated by the opnode's RPC client",
}, []string{
"method",
}),
RPCClientRequestDurationSeconds: factory.NewHistogramVec(prometheus.HistogramOpts{
Namespace: ns,
Subsystem: RPCClientSubsystem,
Name: "request_duration_seconds",
Buckets: []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10},
Help: "Histogram of RPC client request durations",
}, []string{
"method",
}),
RPCClientResponsesTotal: factory.NewCounterVec(prometheus.CounterOpts{
Namespace: ns,
Subsystem: RPCClientSubsystem,
Name: "responses_total",
Help: "Total RPC request responses received by the opnode's RPC client",
}, []string{
"method",
"error",
}),
}
}
// RecordRPCServerRequest is a helper method to record an incoming RPC
// call to the opnode's RPC server. It bumps the requests metric,
// and tracks how long it takes to serve a response.
func (m *RPCMetrics) RecordRPCServerRequest(method string) func() {
m.RPCServerRequestsTotal.WithLabelValues(method).Inc()
timer := prometheus.NewTimer(m.RPCServerRequestDurationSeconds.WithLabelValues(method))
return func() {
timer.ObserveDuration()
}
}
// RecordRPCClientRequest is a helper method to record an RPC client
// request. It bumps the requests metric, tracks the response
// duration, and records the response's error code.
func (m *RPCMetrics) RecordRPCClientRequest(method string) func(err error) {
m.RPCClientRequestsTotal.WithLabelValues(method).Inc()
timer := prometheus.NewTimer(m.RPCClientRequestDurationSeconds.WithLabelValues(method))
return func(err error) {
m.RecordRPCClientResponse(method, err)
timer.ObserveDuration()
}
}
// RecordRPCClientResponse records an RPC response. It will
// convert the passed-in error into something metrics friendly.
// Nil errors get converted into <nil>, RPC errors are converted
// into rpc_<error code>, HTTP errors are converted into
// http_<status code>, and everything else is converted into
// <unknown>.
func (m *RPCMetrics) RecordRPCClientResponse(method string, err error) {
var errStr string
var rpcErr rpc.Error
var httpErr rpc.HTTPError
if err == nil {
errStr = "<nil>"
} else if errors.As(err, &rpcErr) {
errStr = fmt.Sprintf("rpc_%d", rpcErr.ErrorCode())
} else if errors.As(err, &httpErr) {
errStr = fmt.Sprintf("http_%d", httpErr.StatusCode)
} else if errors.Is(err, ethereum.NotFound) {
errStr = "<not found>"
} else {
errStr = "<unknown>"
}
m.RPCClientResponsesTotal.WithLabelValues(method, errStr).Inc()
}
package rpc
import (
"context"
"fmt"
oplog "github.com/ethereum-optimism/optimism/op-service/log"
"github.com/ethereum-optimism/optimism/op-service/metrics"
"github.com/ethereum/go-ethereum/log"
)
type CommonAdminAPI struct {
M metrics.RPCMetricer
log log.Logger
}
func NewCommonAdminAPI(m metrics.RPCMetricer, log log.Logger) *CommonAdminAPI {
return &CommonAdminAPI{
M: m,
log: log,
}
}
func (n *CommonAdminAPI) SetLogLevel(ctx context.Context, lvlStr string) error {
recordDur := n.M.RecordRPCServerRequest("admin_setLogLevel")
defer recordDur()
h := n.log.GetHandler()
lvl, err := log.LvlFromString(lvlStr)
if err != nil {
return err
}
// We set the log level, and do not wrap the handler with an additional filter handler,
// as the underlying handler would otherwise also still filter with the previous log level.
lvlSetter, ok := h.(oplog.LvlSetter)
if !ok {
return fmt.Errorf("log handler type %T cannot change log level", h)
}
lvlSetter.SetLogLevel(lvl)
return nil
}
...@@ -9,8 +9,9 @@ import ( ...@@ -9,8 +9,9 @@ import (
) )
const ( const (
ListenAddrFlagName = "rpc.addr" ListenAddrFlagName = "rpc.addr"
PortFlagName = "rpc.port" PortFlagName = "rpc.port"
EnableAdminFlagName = "rpc.enable-admin"
) )
func CLIFlags(envPrefix string) []cli.Flag { func CLIFlags(envPrefix string) []cli.Flag {
...@@ -27,12 +28,18 @@ func CLIFlags(envPrefix string) []cli.Flag { ...@@ -27,12 +28,18 @@ func CLIFlags(envPrefix string) []cli.Flag {
Value: 8545, Value: 8545,
EnvVars: opservice.PrefixEnvVar(envPrefix, "RPC_PORT"), EnvVars: opservice.PrefixEnvVar(envPrefix, "RPC_PORT"),
}, },
&cli.BoolFlag{
Name: EnableAdminFlagName,
Usage: "Enable the admin API",
EnvVars: opservice.PrefixEnvVar(envPrefix, "RPC_ENABLE_ADMIN"),
},
} }
} }
type CLIConfig struct { type CLIConfig struct {
ListenAddr string ListenAddr string
ListenPort int ListenPort int
EnableAdmin bool
} }
func (c CLIConfig) Check() error { func (c CLIConfig) Check() error {
...@@ -45,7 +52,8 @@ func (c CLIConfig) Check() error { ...@@ -45,7 +52,8 @@ func (c CLIConfig) Check() error {
func ReadCLIConfig(ctx *cli.Context) CLIConfig { func ReadCLIConfig(ctx *cli.Context) CLIConfig {
return CLIConfig{ return CLIConfig{
ListenAddr: ctx.String(ListenAddrFlagName), ListenAddr: ctx.String(ListenAddrFlagName),
ListenPort: ctx.Int(PortFlagName), ListenPort: ctx.Int(PortFlagName),
EnableAdmin: ctx.Bool(EnableAdminFlagName),
} }
} }
...@@ -28,13 +28,12 @@ require ( ...@@ -28,13 +28,12 @@ require (
github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 // indirect github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 // indirect
github.com/cockroachdb/redact v1.1.3 // indirect github.com/cockroachdb/redact v1.1.3 // indirect
github.com/consensys/bavard v0.1.13 // indirect github.com/consensys/bavard v0.1.13 // indirect
github.com/consensys/gnark-crypto v0.10.0 // indirect github.com/consensys/gnark-crypto v0.12.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/crate-crypto/go-kzg-4844 v0.3.0 // indirect github.com/crate-crypto/go-kzg-4844 v0.3.0 // indirect
github.com/deckarep/golang-set/v2 v2.1.0 // indirect github.com/deckarep/golang-set/v2 v2.1.0 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect
github.com/ethereum/c-kzg-4844 v0.3.0 // indirect github.com/ethereum/c-kzg-4844 v0.3.0 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/getsentry/sentry-go v0.18.0 // indirect github.com/getsentry/sentry-go v0.18.0 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-stack/stack v1.8.1 // indirect github.com/go-stack/stack v1.8.1 // indirect
......
...@@ -64,8 +64,8 @@ github.com/cockroachdb/redact v1.1.3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZ ...@@ -64,8 +64,8 @@ github.com/cockroachdb/redact v1.1.3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZ
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM=
github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ= github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ=
github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI= github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
github.com/consensys/gnark-crypto v0.10.0 h1:zRh22SR7o4K35SoNqouS9J/TKHTyU2QWaj5ldehyXtA= github.com/consensys/gnark-crypto v0.12.0 h1:1OnSpOykNkUIBIBJKdhwy2p0JlW5o+Az02ICzZmvvdg=
github.com/consensys/gnark-crypto v0.10.0/go.mod h1:Iq/P3HHl0ElSjsg2E1gsMwhAyxnxoKK5nVyZKd+/KhU= github.com/consensys/gnark-crypto v0.12.0/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
...@@ -109,7 +109,6 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo ...@@ -109,7 +109,6 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc= github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc=
github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 h1:f6D9Hr8xV8uYKlyuj8XIruxlh9WjVjdh1gIicAS7ays= github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 h1:f6D9Hr8xV8uYKlyuj8XIruxlh9WjVjdh1gIicAS7ays=
github.com/getsentry/sentry-go v0.12.0/go.mod h1:NSap0JBYWzHND8oMbyi0+XZhUalc1TBdRL1M71JZW2c= github.com/getsentry/sentry-go v0.12.0/go.mod h1:NSap0JBYWzHND8oMbyi0+XZhUalc1TBdRL1M71JZW2c=
...@@ -353,8 +352,8 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ ...@@ -353,8 +352,8 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4= github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4=
github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a h1:1ur3QoCqvE5fl+nylMaIr9PVV1w343YRDtsy+Rwu7XI= github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a h1:1ur3QoCqvE5fl+nylMaIr9PVV1w343YRDtsy+Rwu7XI=
...@@ -498,7 +497,6 @@ golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBc ...@@ -498,7 +497,6 @@ golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA=
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
......
...@@ -8,5 +8,5 @@ PyGithub==1.57 ...@@ -8,5 +8,5 @@ PyGithub==1.57
PyJWT==2.6.0 PyJWT==2.6.0
PyNaCl==1.5.0 PyNaCl==1.5.0
requests==2.28.1 requests==2.28.1
urllib3==1.26.13 urllib3==1.26.17
wrapt==1.14.1 wrapt==1.14.1
...@@ -46,7 +46,7 @@ ...@@ -46,7 +46,7 @@
"express-prom-bundle": "^6.6.0", "express-prom-bundle": "^6.6.0",
"lodash": "^4.17.21", "lodash": "^4.17.21",
"morgan": "^1.10.0", "morgan": "^1.10.0",
"pino": "^8.15.3", "pino": "^8.15.4",
"pino-multi-stream": "^6.0.0", "pino-multi-stream": "^6.0.0",
"pino-sentry": "^0.14.0", "pino-sentry": "^0.14.0",
"prom-client": "^14.2.0" "prom-client": "^14.2.0"
......
...@@ -172,7 +172,7 @@ contract Deploy is Deployer { ...@@ -172,7 +172,7 @@ contract Deploy is Deployer {
? safeProxyFactory_ = new SafeProxyFactory() ? safeProxyFactory_ = new SafeProxyFactory()
: safeProxyFactory_ = SafeProxyFactory(safeProxyFactory); : safeProxyFactory_ = SafeProxyFactory(safeProxyFactory);
safeSingleton.code.length == 0 ? safeSingleton_ = new Safe() : safeSingleton_ = Safe(payable(safeSingleton_)); safeSingleton.code.length == 0 ? safeSingleton_ = new Safe() : safeSingleton_ = Safe(payable(safeSingleton));
save("SafeProxyFactory", address(safeProxyFactory_)); save("SafeProxyFactory", address(safeProxyFactory_));
save("SafeSingleton", address(safeSingleton_)); save("SafeSingleton", address(safeSingleton_));
......
...@@ -54,14 +54,14 @@ ...@@ -54,14 +54,14 @@
"@vitest/coverage-istanbul": "^0.34.6", "@vitest/coverage-istanbul": "^0.34.6",
"@wagmi/cli": "^1.5.2", "@wagmi/cli": "^1.5.2",
"@wagmi/core": "^1.4.3", "@wagmi/core": "^1.4.3",
"abitype": "^0.9.3", "abitype": "^0.9.9",
"glob": "^10.3.10", "glob": "^10.3.10",
"isomorphic-fetch": "^3.0.0", "isomorphic-fetch": "^3.0.0",
"jest-dom": "link:@types/@testing-library/jest-dom", "jest-dom": "link:@types/@testing-library/jest-dom",
"jsdom": "^22.1.0", "jsdom": "^22.1.0",
"tsup": "^7.2.0", "tsup": "^7.2.0",
"typescript": "^5.2.2", "typescript": "^5.2.2",
"vite": "^4.4.6", "vite": "^4.4.10",
"vitest": "^0.34.2" "vitest": "^0.34.2"
}, },
"peerDependencies": { "peerDependencies": {
...@@ -80,6 +80,6 @@ ...@@ -80,6 +80,6 @@
"@testing-library/react": "^14.0.0", "@testing-library/react": "^14.0.0",
"react": "^18.2.0", "react": "^18.2.0",
"react-dom": "^18.2.0", "react-dom": "^18.2.0",
"viem": "^1.14.0" "viem": "^1.15.1"
} }
} }
...@@ -38,14 +38,14 @@ ...@@ -38,14 +38,14 @@
"@testing-library/jest-dom": "^6.0.1", "@testing-library/jest-dom": "^6.0.1",
"@testing-library/react-hooks": "^8.0.1", "@testing-library/react-hooks": "^8.0.1",
"@vitest/coverage-istanbul": "^0.34.6", "@vitest/coverage-istanbul": "^0.34.6",
"abitype": "^0.9.3", "abitype": "^0.9.9",
"isomorphic-fetch": "^3.0.0", "isomorphic-fetch": "^3.0.0",
"jest-dom": "link:@types/@testing-library/jest-dom", "jest-dom": "link:@types/@testing-library/jest-dom",
"jsdom": "^22.1.0", "jsdom": "^22.1.0",
"tsup": "^7.2.0", "tsup": "^7.2.0",
"typescript": "^5.2.2", "typescript": "^5.2.2",
"viem": "^1.14.0", "viem": "^1.15.1",
"vite": "^4.4.6", "vite": "^4.4.10",
"vitest": "^0.34.2" "vitest": "^0.34.2"
}, },
"peerDependencies": { "peerDependencies": {
......
...@@ -56,9 +56,9 @@ ...@@ -56,9 +56,9 @@
"ts-node": "^10.9.1", "ts-node": "^10.9.1",
"typedoc": "^0.25.1", "typedoc": "^0.25.1",
"typescript": "^5.2.2", "typescript": "^5.2.2",
"viem": "^1.14.0", "viem": "^1.15.1",
"vitest": "^0.34.2", "vitest": "^0.34.2",
"zod": "^3.22.1" "zod": "^3.22.3"
}, },
"dependencies": { "dependencies": {
"@eth-optimism/contracts": "0.6.0", "@eth-optimism/contracts": "0.6.0",
......
...@@ -37,10 +37,10 @@ ...@@ -37,10 +37,10 @@
"@vitest/coverage-istanbul": "^0.34.6", "@vitest/coverage-istanbul": "^0.34.6",
"tsup": "^7.2.0", "tsup": "^7.2.0",
"typescript": "^5.2.2", "typescript": "^5.2.2",
"viem": "^1.14.0", "viem": "^1.15.1",
"vite": "^4.4.9", "vite": "^4.4.10",
"vitest": "^0.34.1", "vitest": "^0.34.1",
"zod": "^3.22.0" "zod": "^3.22.3"
}, },
"dependencies": { "dependencies": {
"@ethereumjs/rlp": "^5.0.0", "@ethereumjs/rlp": "^5.0.0",
......
This diff is collapsed.
...@@ -28,7 +28,6 @@ require ( ...@@ -28,7 +28,6 @@ require (
github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a // indirect github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a // indirect
github.com/beorn7/perks v1.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect
github.com/bits-and-blooms/bitset v1.7.0 // indirect github.com/bits-and-blooms/bitset v1.7.0 // indirect
github.com/btcsuite/btcd v0.22.0-beta // indirect
github.com/btcsuite/btcd/btcec/v2 v2.2.0 // indirect github.com/btcsuite/btcd/btcec/v2 v2.2.0 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/cockroachdb/errors v1.9.1 // indirect github.com/cockroachdb/errors v1.9.1 // indirect
......
This diff is collapsed.
...@@ -57,9 +57,6 @@ enum GameStatus { ...@@ -57,9 +57,6 @@ enum GameStatus {
DEFENDER_WINS DEFENDER_WINS
} }
/// @notice A dedicated timestamp type.
type Timestamp is uint64;
/// @notice A `Claim` type represents a 32 byte hash or other unique identifier for a claim about /// @notice A `Claim` type represents a 32 byte hash or other unique identifier for a claim about
/// a certain piece of information. /// a certain piece of information.
/// @dev For the `FAULT` `GameType`, this will be a root of the merklized state of the fault proof /// @dev For the `FAULT` `GameType`, this will be a root of the merklized state of the fault proof
......
...@@ -208,7 +208,7 @@ A sequencing window is a range of L1 blocks from which a [sequencing epoch][sequ ...@@ -208,7 +208,7 @@ A sequencing window is a range of L1 blocks from which a [sequencing epoch][sequ
A sequencing window whose first L1 block has number `N` contains [batcher transactions][batcher-transaction] for epoch A sequencing window whose first L1 block has number `N` contains [batcher transactions][batcher-transaction] for epoch
`N`. The window contains blocks `[N, N + SWS)` where `SWS` is the sequencer window size. `N`. The window contains blocks `[N, N + SWS)` where `SWS` is the sequencer window size.
> **TODO** specify sequencer window size The current default `sws` is 3600 epochs.
Additionally, the first block in the window defines the [depositing transactions][depositing-tx] which determine the Additionally, the first block in the window defines the [depositing transactions][depositing-tx] which determine the
[deposits] to be included in the first L2 block of the epoch. [deposits] to be included in the first L2 block of the epoch.
......
...@@ -18,7 +18,6 @@ ...@@ -18,7 +18,6 @@
"editable": true, "editable": true,
"fiscalYearStartMonth": 0, "fiscalYearStartMonth": 0,
"graphTooltip": 0, "graphTooltip": 0,
"id": 1,
"links": [], "links": [],
"liveNow": false, "liveNow": false,
"panels": [ "panels": [
...@@ -36,6 +35,7 @@ ...@@ -36,6 +35,7 @@
"axisCenteredZero": true, "axisCenteredZero": true,
"axisColorMode": "series", "axisColorMode": "series",
"axisGridShow": false, "axisGridShow": false,
"axisLabel": "",
"axisPlacement": "auto", "axisPlacement": "auto",
"barAlignment": 0, "barAlignment": 0,
"drawStyle": "line", "drawStyle": "line",
...@@ -134,6 +134,229 @@ ...@@ -134,6 +134,229 @@
], ],
"title": "Self Transferring on OP Goerli (positive number = success, negative = failures)", "title": "Self Transferring on OP Goerli (positive number = success, negative = failures)",
"type": "timeseries" "type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "PBFA97CFB590B2093"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "fixed"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisGridShow": false,
"axisLabel": "",
"axisPlacement": "auto",
"axisSoftMin": 0,
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "stepAfter",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"decimals": 0,
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "text",
"value": null
}
]
}
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "metamask_self_send_fee_estimation_low"
},
"properties": [
{
"id": "displayName",
"value": "Low (Slow 🐢)"
},
{
"id": "color",
"value": {
"fixedColor": "green",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "metamask_self_send_fee_estimation_medium"
},
"properties": [
{
"id": "displayName",
"value": "Medium (Market 🦊)"
},
{
"id": "color",
"value": {
"fixedColor": "orange",
"mode": "fixed"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "metamask_self_send_fee_estimation_high"
},
"properties": [
{
"id": "displayName",
"value": "High (Aggressive 🦍)"
}
]
},
{
"matcher": {
"id": "byName",
"options": "metamask_self_send_fee_estimation_actual"
},
"properties": [
{
"id": "displayName",
"value": "Actual transaction fee"
},
{
"id": "color",
"value": {
"fixedColor": "blue",
"mode": "fixed"
}
}
]
}
]
},
"gridPos": {
"h": 11,
"w": 24,
"x": 0,
"y": 8
},
"id": 2,
"options": {
"legend": {
"calcs": [
"last"
],
"displayMode": "table",
"placement": "bottom",
"showLegend": true
},
"timezone": [
"browser"
],
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "PBFA97CFB590B2093"
},
"disableTextWrap": false,
"editorMode": "builder",
"expr": "metamask_self_send_fee_estimation_low",
"fullMetaSearch": false,
"includeNullMetadata": true,
"instant": false,
"legendFormat": "__auto",
"range": true,
"refId": "A",
"useBackend": false
},
{
"datasource": {
"type": "prometheus",
"uid": "PBFA97CFB590B2093"
},
"disableTextWrap": false,
"editorMode": "builder",
"expr": "metamask_self_send_fee_estimation_medium",
"fullMetaSearch": false,
"hide": false,
"includeNullMetadata": true,
"instant": false,
"legendFormat": "__auto",
"range": true,
"refId": "B",
"useBackend": false
},
{
"datasource": {
"type": "prometheus",
"uid": "PBFA97CFB590B2093"
},
"disableTextWrap": false,
"editorMode": "builder",
"expr": "metamask_self_send_fee_estimation_high",
"fullMetaSearch": false,
"hide": false,
"includeNullMetadata": true,
"instant": false,
"legendFormat": "__auto",
"range": true,
"refId": "C",
"useBackend": false
},
{
"datasource": {
"type": "prometheus",
"uid": "PBFA97CFB590B2093"
},
"disableTextWrap": false,
"editorMode": "builder",
"expr": "metamask_self_send_fee_estimation_actual",
"fullMetaSearch": false,
"hide": false,
"includeNullMetadata": true,
"instant": false,
"legendFormat": "__auto",
"range": true,
"refId": "D",
"useBackend": false
}
],
"title": "Self Transferring on OP Goerli Fee Estimates",
"type": "timeseries"
} }
], ],
"refresh": "5s", "refresh": "5s",
...@@ -144,7 +367,7 @@ ...@@ -144,7 +367,7 @@
"list": [] "list": []
}, },
"time": { "time": {
"from": "now-30m", "from": "now-3h",
"to": "now" "to": "now"
}, },
"timepicker": {}, "timepicker": {},
......
...@@ -13,7 +13,7 @@ importers: ...@@ -13,7 +13,7 @@ importers:
version: 14.2.0 version: 14.2.0
zod: zod:
specifier: ^3.22.2 specifier: ^3.22.2
version: 3.22.2 version: 3.22.3
devDependencies: devDependencies:
'@metamask/test-dapp': '@metamask/test-dapp':
specifier: ^7.1.0 specifier: ^7.1.0
...@@ -23,7 +23,7 @@ importers: ...@@ -23,7 +23,7 @@ importers:
version: 1.37.1 version: 1.37.1
'@synthetixio/synpress': '@synthetixio/synpress':
specifier: 3.7.2-beta.7 specifier: 3.7.2-beta.7
version: 3.7.2-beta.7(@babel/core@7.22.20)(@babel/preset-env@7.22.20)(babel-loader@9.1.3)(react-dom@18.2.0)(react@18.2.0)(typescript@5.1.6)(webpack@5.88.2)(zod@3.22.2) version: 3.7.2-beta.7(@babel/core@7.22.20)(@babel/preset-env@7.22.20)(babel-loader@9.1.3)(react-dom@18.2.0)(react@18.2.0)(typescript@5.1.6)(webpack@5.88.2)(zod@3.22.3)
dotenv: dotenv:
specifier: ^16.3.1 specifier: ^16.3.1
version: 16.3.1 version: 16.3.1
...@@ -35,7 +35,7 @@ importers: ...@@ -35,7 +35,7 @@ importers:
version: 5.1.6 version: 5.1.6
viem: viem:
specifier: ^1.10.8 specifier: ^1.10.8
version: 1.10.8(typescript@5.1.6)(zod@3.22.2) version: 1.10.8(typescript@5.1.6)(zod@3.22.3)
packages: packages:
...@@ -353,7 +353,6 @@ packages: ...@@ -353,7 +353,6 @@ packages:
/@babel/plugin-proposal-private-property-in-object@7.21.0-placeholder-for-preset-env.2(@babel/core@7.22.20): /@babel/plugin-proposal-private-property-in-object@7.21.0-placeholder-for-preset-env.2(@babel/core@7.22.20):
resolution: {integrity: sha512-SOSkfJDddaM7mak6cPEpswyTRnuRltl429hMraQEglW+OkovnCzsiszTmsrlY//qLFjCpQDFRvjdm2wA5pPm9w==} resolution: {integrity: sha512-SOSkfJDddaM7mak6cPEpswyTRnuRltl429hMraQEglW+OkovnCzsiszTmsrlY//qLFjCpQDFRvjdm2wA5pPm9w==}
engines: {node: '>=6.9.0'} engines: {node: '>=6.9.0'}
deprecated: This proposal has been merged to the ECMAScript standard and thus this plugin is no longer maintained. Please use @babel/plugin-transform-private-property-in-object instead.
peerDependencies: peerDependencies:
'@babel/core': ^7.0.0-0 '@babel/core': ^7.0.0-0
dependencies: dependencies:
...@@ -1512,7 +1511,7 @@ packages: ...@@ -1512,7 +1511,7 @@ packages:
engines: {node: '>=4'} engines: {node: '>=4'}
dev: true dev: true
/@synthetixio/synpress@3.7.2-beta.7(@babel/core@7.22.20)(@babel/preset-env@7.22.20)(babel-loader@9.1.3)(react-dom@18.2.0)(react@18.2.0)(typescript@5.1.6)(webpack@5.88.2)(zod@3.22.2): /@synthetixio/synpress@3.7.2-beta.7(@babel/core@7.22.20)(@babel/preset-env@7.22.20)(babel-loader@9.1.3)(react-dom@18.2.0)(react@18.2.0)(typescript@5.1.6)(webpack@5.88.2)(zod@3.22.3):
resolution: {integrity: sha512-V5Z59fbzMIv3BVjzfMElD+eR+3C1q3mKPR6RJh9C0GpAnsBLtH2c2cPgBV2QgHBdmaJLFxEQBBud6Sb05w2jcw==} resolution: {integrity: sha512-V5Z59fbzMIv3BVjzfMElD+eR+3C1q3mKPR6RJh9C0GpAnsBLtH2c2cPgBV2QgHBdmaJLFxEQBBud6Sb05w2jcw==}
engines: {node: '>=14'} engines: {node: '>=14'}
hasBin: true hasBin: true
...@@ -1547,7 +1546,7 @@ packages: ...@@ -1547,7 +1546,7 @@ packages:
get-port: 7.0.0 get-port: 7.0.0
node-fetch: 2.7.0 node-fetch: 2.7.0
underscore: 1.13.6 underscore: 1.13.6
viem: 1.10.8(typescript@5.1.6)(zod@3.22.2) viem: 1.10.8(typescript@5.1.6)(zod@3.22.3)
wait-on: 7.0.1(debug@4.3.4) wait-on: 7.0.1(debug@4.3.4)
transitivePeerDependencies: transitivePeerDependencies:
- '@babel/core' - '@babel/core'
...@@ -2036,7 +2035,7 @@ packages: ...@@ -2036,7 +2035,7 @@ packages:
resolution: {integrity: sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ==} resolution: {integrity: sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ==}
dev: true dev: true
/abitype@0.9.8(typescript@5.1.6)(zod@3.22.2): /abitype@0.9.8(typescript@5.1.6)(zod@3.22.3):
resolution: {integrity: sha512-puLifILdm+8sjyss4S+fsUN09obiT1g2YW6CtcQF+QDzxR0euzgEB29MZujC6zMk2a6SVmtttq1fc6+YFA7WYQ==} resolution: {integrity: sha512-puLifILdm+8sjyss4S+fsUN09obiT1g2YW6CtcQF+QDzxR0euzgEB29MZujC6zMk2a6SVmtttq1fc6+YFA7WYQ==}
peerDependencies: peerDependencies:
typescript: '>=5.0.4' typescript: '>=5.0.4'
...@@ -2048,7 +2047,7 @@ packages: ...@@ -2048,7 +2047,7 @@ packages:
optional: true optional: true
dependencies: dependencies:
typescript: 5.1.6 typescript: 5.1.6
zod: 3.22.2 zod: 3.22.3
dev: true dev: true
/accepts@1.3.8: /accepts@1.3.8:
...@@ -7104,7 +7103,7 @@ packages: ...@@ -7104,7 +7103,7 @@ packages:
extsprintf: 1.3.0 extsprintf: 1.3.0
dev: true dev: true
/viem@1.10.8(typescript@5.1.6)(zod@3.22.2): /viem@1.10.8(typescript@5.1.6)(zod@3.22.3):
resolution: {integrity: sha512-/kVDjc9j1OVoDsxV0E1iw1ehPuWPXv5x/9Yc1H0wKky6ACWRoKsURDeLi0Xwtli7vmFcJne+MMPhA96zVu5iIg==} resolution: {integrity: sha512-/kVDjc9j1OVoDsxV0E1iw1ehPuWPXv5x/9Yc1H0wKky6ACWRoKsURDeLi0Xwtli7vmFcJne+MMPhA96zVu5iIg==}
peerDependencies: peerDependencies:
typescript: '>=5.0.4' typescript: '>=5.0.4'
...@@ -7118,7 +7117,7 @@ packages: ...@@ -7118,7 +7117,7 @@ packages:
'@scure/bip32': 1.3.2 '@scure/bip32': 1.3.2
'@scure/bip39': 1.2.1 '@scure/bip39': 1.2.1
'@types/ws': 8.5.5 '@types/ws': 8.5.5
abitype: 0.9.8(typescript@5.1.6)(zod@3.22.2) abitype: 0.9.8(typescript@5.1.6)(zod@3.22.3)
isomorphic-ws: 5.0.0(ws@8.13.0) isomorphic-ws: 5.0.0(ws@8.13.0)
typescript: 5.1.6 typescript: 5.1.6
ws: 8.13.0 ws: 8.13.0
...@@ -7469,5 +7468,5 @@ packages: ...@@ -7469,5 +7468,5 @@ packages:
engines: {node: '>=12.20'} engines: {node: '>=12.20'}
dev: true dev: true
/zod@3.22.2: /zod@3.22.3:
resolution: {integrity: sha512-wvWkphh5WQsJbVk1tbx1l1Ly4yg+XecD+Mq280uBGt9wa5BKSWf4Mhp6GmrkPixhMxmabYY7RbzlwVP32pbGCg==} resolution: {integrity: sha512-EjIevzuJRiRPbVH4mGc8nApb/lVLKVpmUhAaR5R5doKGfAnGJ6Gr3CViAVjP+4FWSxCsybeWQdcgCtbX+7oZug==}
import 'dotenv/config' import 'dotenv/config'
import { z } from 'zod' import { z } from 'zod'
import metamask from '@synthetixio/synpress/commands/metamask.js' import metamask from '@synthetixio/synpress/commands/metamask.js'
import synpressPlaywright from '@synthetixio/synpress/commands/playwright.js'
import { confirmPageElements } from '@synthetixio/synpress/pages/metamask/notification-page.js'
import { expect, test, type Page } from '@playwright/test' import { expect, test, type Page } from '@playwright/test'
import { mnemonicToAccount, privateKeyToAccount } from 'viem/accounts' import { mnemonicToAccount, privateKeyToAccount } from 'viem/accounts'
import { formatGwei, parseGwei } from 'viem'
import { testWithSynpress } from './testWithSynpressUtil' import { testWithSynpress } from './testWithSynpressUtil'
import { import {
incrementSelfSendTxGauge, incrementSelfSendTxGauge,
setFeeEstimationGauge,
} from './prometheusUtils' } from './prometheusUtils'
const env = z.object({ const env = z
METAMASK_SECRET_WORDS_OR_PRIVATEKEY: z.string(), .object({
METAMASK_OP_GOERLI_RPC_URL: z.string().url(), METAMASK_SECRET_WORDS_OR_PRIVATEKEY: z.string(),
METAMASK_DAPP_URL: z.string().url() METAMASK_OP_GOERLI_RPC_URL: z.string().url(),
}).parse(process.env) METAMASK_DAPP_URL: z.string().url(),
})
const expectedSender = .parse(process.env)
env.METAMASK_SECRET_WORDS_OR_PRIVATEKEY?.startsWith('0x')
? privateKeyToAccount( const expectedSender = env.METAMASK_SECRET_WORDS_OR_PRIVATEKEY?.startsWith('0x')
env.METAMASK_SECRET_WORDS_OR_PRIVATEKEY as `0x${string}` ? privateKeyToAccount(
).address.toLowerCase() env.METAMASK_SECRET_WORDS_OR_PRIVATEKEY as `0x${string}`
: mnemonicToAccount( ).address.toLowerCase()
env.METAMASK_SECRET_WORDS_OR_PRIVATEKEY as string : mnemonicToAccount(
).address.toLowerCase() env.METAMASK_SECRET_WORDS_OR_PRIVATEKEY as string
).address.toLowerCase()
const expectedRecipient = expectedSender const expectedRecipient = expectedSender
let sharedPage: Page let sharedPage: Page
...@@ -119,25 +124,51 @@ test('Send an EIP-1559 transaction and verify success', async () => { ...@@ -119,25 +124,51 @@ test('Send an EIP-1559 transaction and verify success', async () => {
}) })
}) })
const notificationPage =
await synpressPlaywright.switchToMetamaskNotification()
const lowFeeEstimate = await getFeeEstimateInGwei(
confirmPageElements.gasOptionLowButton,
'Low',
notificationPage
)
const highFeeEstimate = await getFeeEstimateInGwei(
confirmPageElements.gasOptionHighButton,
'Aggressive',
notificationPage
)
// Medium needs to be last because that's the gas option we want to submit the tx with
const mediumFeeEstimate = await getFeeEstimateInGwei(
confirmPageElements.gasOptionMediumButton,
'Market',
notificationPage
)
await metamask.confirmTransactionAndWaitForMining() await metamask.confirmTransactionAndWaitForMining()
const txHash = await txHashPromise const txHash = await txHashPromise
const transactionReceiptPromise = new Promise<Record<string, string>>(
(resolve) => {
sharedPage.on('load', async () => {
const responseText = await sharedPage.locator('body > main').innerText()
const transactionReceipt = JSON.parse(
responseText.replace('Response: ', '')
)
resolve(transactionReceipt)
})
}
)
// Metamask test dApp allows us access to the Metamask RPC provider via loading this URL. // Metamask test dApp allows us access to the Metamask RPC provider via loading this URL.
// The RPC reponse will be populated onto the page that's loaded. // The RPC response will be populated onto the page that's loaded.
// More info here: https://github.com/MetaMask/test-dapp/tree/main#usage // More info here: https://github.com/MetaMask/test-dapp/tree/main#usage
await sharedPage.goto( await sharedPage.goto(
`${env.METAMASK_DAPP_URL}/request.html?method=eth_getTransactionReceipt&params=["${txHash}"]` `${env.METAMASK_DAPP_URL}/request.html?method=eth_getTransactionReceipt&params=["${txHash}"]`
) )
// Waiting for RPC response to be populated on the page const transactionReceipt = await transactionReceiptPromise
await sharedPage.waitForTimeout(2_000)
const transactionReceipt = JSON.parse(
(await sharedPage.locator('body > main').innerText()).replace(
'Response: ',
''
)
)
try { try {
expect(transactionReceipt.status).toBe('0x1') expect(transactionReceipt.status).toBe('0x1')
...@@ -149,4 +180,41 @@ test('Send an EIP-1559 transaction and verify success', async () => { ...@@ -149,4 +180,41 @@ test('Send an EIP-1559 transaction and verify success', async () => {
throw error throw error
} }
console.log('Sent an EIP-1559 transaction and verified success') console.log('Sent an EIP-1559 transaction and verified success')
await setFeeEstimationGauge('low', lowFeeEstimate)
await setFeeEstimationGauge('medium', mediumFeeEstimate)
await setFeeEstimationGauge('high', highFeeEstimate)
await setFeeEstimationGauge('actual', getActualTransactionFee(transactionReceipt))
}) })
const getFeeEstimateInGwei = async (
gasOptionButton: string,
waitForText: 'Low' | 'Market' | 'Aggressive',
notificationPage: Page
) => {
const regexParseEtherValue = /(\d+\.\d+)\sOPG/
await synpressPlaywright.waitAndClick(
confirmPageElements.editGasFeeButton,
notificationPage
)
await synpressPlaywright.waitAndClick(gasOptionButton, notificationPage)
await synpressPlaywright.waitForText(
`${confirmPageElements.editGasFeeButton} .edit-gas-fee-button__label`,
waitForText,
notificationPage
)
const feeValue = (
await synpressPlaywright.waitAndGetValue(
confirmPageElements.totalLabel,
notificationPage
)
).match(regexParseEtherValue)[1]
return parseInt(parseGwei(feeValue).toString())
}
const getActualTransactionFee = (transactionReceipt: Record<string, string>) => {
const effectiveGasPrice = BigInt(transactionReceipt.effectiveGasPrice)
const l2GasUsed = BigInt(transactionReceipt.gasUsed)
const l1Fee = BigInt(transactionReceipt.l1Fee)
return parseInt(formatGwei(effectiveGasPrice * l2GasUsed + l1Fee, 'wei'))
}
import 'dotenv/config' import 'dotenv/config'
import { z } from 'zod' import { z } from 'zod'
import { Gauge, Pushgateway } from 'prom-client' import { Gauge, Pushgateway, Registry } from 'prom-client'
const env = z const env = z
.object({ .object({
...@@ -10,10 +10,41 @@ const env = z ...@@ -10,10 +10,41 @@ const env = z
.parse(process.env) .parse(process.env)
const selfSendTransactionMetricName = 'metamask_self_send' const selfSendTransactionMetricName = 'metamask_self_send'
const feeEstimateLowMetricName = 'metamask_self_send_fee_estimation_low'
const feeEstimateMediumMetricName = 'metamask_self_send_fee_estimation_medium'
const feeEstimateHighMetricName = 'metamask_self_send_fee_estimation_high'
const feeEstimateActualMetricName = 'metamask_self_send_fee_estimation_actual'
const selfSendRegistry = new Registry()
const feeEstimateLowRegistry = new Registry()
const feeEstimateMediumRegistry = new Registry()
const feeEstimateHighRegistry = new Registry()
const feeEstimateActualRegistry = new Registry()
const selfSendGauge = new Gauge({ const selfSendGauge = new Gauge({
name: selfSendTransactionMetricName, name: selfSendTransactionMetricName,
help: 'A gauge signifying the number of transactions sent with Metamask', help: 'A gauge signifying the number of transactions sent with Metamask',
registers: [selfSendRegistry]
})
const feeEstimateLowGauge = new Gauge({
name: feeEstimateLowMetricName,
help: 'A gauge signifying the latest fee estimation from Metamask for Low transaction speed',
registers: [feeEstimateLowRegistry]
})
const feeEstimateMediumGauge = new Gauge({
name: feeEstimateMediumMetricName,
help: 'A gauge signifying the latest fee estimation from Metamask for Medium transaction speed',
registers: [feeEstimateMediumRegistry]
})
const feeEstimateHighGauge = new Gauge({
name: feeEstimateHighMetricName,
help: 'A gauge signifying the latest fee estimation from Metamask for High transaction speed',
registers: [feeEstimateHighRegistry]
})
const feeEstimateActualGauge = new Gauge({
name: feeEstimateActualMetricName,
help: 'A gauge signifying the latest actual transaction fee',
registers: [feeEstimateActualRegistry]
}) })
export const getSelfSendGaugeValue = async () => { export const getSelfSendGaugeValue = async () => {
...@@ -71,10 +102,10 @@ export const getSelfSendGaugeValue = async () => { ...@@ -71,10 +102,10 @@ export const getSelfSendGaugeValue = async () => {
} }
export const setSelfSendTxGauge = async (valueToSetTo: number) => { export const setSelfSendTxGauge = async (valueToSetTo: number) => {
console.log(`Setting ${selfSendTransactionMetricName} to ${valueToSetTo}`) console.log(`Setting ${selfSendTransactionMetricName} to ${valueToSetTo}...`)
selfSendGauge.set(valueToSetTo) selfSendGauge.set(valueToSetTo)
const pushGateway = new Pushgateway(env.PROMETHEUS_PUSHGATEWAY_URL) const pushGateway = new Pushgateway(env.PROMETHEUS_PUSHGATEWAY_URL, undefined, selfSendRegistry)
await pushGateway.pushAdd({ jobName: 'metamask_self_send_tx_count' }) await pushGateway.pushAdd({ jobName: 'metamask_self_send_tx_count' })
} }
...@@ -89,7 +120,41 @@ export const incrementSelfSendTxGauge = async (isSuccess: boolean) => { ...@@ -89,7 +120,41 @@ export const incrementSelfSendTxGauge = async (isSuccess: boolean) => {
} }
console.log( console.log(
`Current value of ${selfSendTransactionMetricName} is ${currentMetricValue}, incrementing to ${newMetricValue}` `Current value of ${selfSendTransactionMetricName} is ${currentMetricValue}, incrementing to ${newMetricValue}...`
) )
await setSelfSendTxGauge(newMetricValue) await setSelfSendTxGauge(newMetricValue)
} }
export const setFeeEstimationGauge = async (txSpeed: 'low' | 'medium' | 'high' | 'actual', fee: number) => {
console.log(
txSpeed !== 'actual'
? `Setting Metamask fee estimation for ${txSpeed} to ${fee}...`
: `Setting actual transaction fee to ${fee}`
)
let prometheusRegistry: Registry
switch (txSpeed) {
case 'low':
feeEstimateLowGauge.set(fee)
prometheusRegistry = feeEstimateLowRegistry
break;
case 'medium':
feeEstimateMediumGauge.set(fee)
prometheusRegistry = feeEstimateMediumRegistry
break;
case 'high':
feeEstimateHighGauge.set(fee)
prometheusRegistry = feeEstimateHighRegistry
break;
case 'actual':
feeEstimateActualGauge.set(fee)
prometheusRegistry = feeEstimateActualRegistry
break;
default:
throw new Error(`unsupported transaction speed given: ${txSpeed}`)
}
const pushGateway = new Pushgateway(env.PROMETHEUS_PUSHGATEWAY_URL, undefined, prometheusRegistry)
await pushGateway.pushAdd({ jobName: `metamask_self_send_tx_fee_estimation_${txSpeed}` })
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment