Commit 54cfdcf7 authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge branch 'develop' into engine-api-utils

parents db9aa65e a2b595c3
---
'@eth-optimism/sdk': patch
---
Updated npm dependencies to latest
---
'@eth-optimism/common-ts': patch
---
Updated npm dependencies of common-ts
......@@ -775,11 +775,8 @@ jobs:
module:
description: Go Module Name
type: string
use_http:
description: If the op-e2e package should use HTTP clients
type: string
use_external:
description: The extra-process shim (if any) that should be used
target:
description: The make target to execute
type: string
docker:
- image: us-docker.pkg.dev/oplabs-tools-artifacts/images/ci-builder:latest
......@@ -791,13 +788,6 @@ jobs:
- run:
name: prep results dir
command: mkdir -p /tmp/test-results
- when:
condition: <<parameters.use_external>>
steps:
- run:
name: Build Shim
command: make -C <<parameters.use_external>>
working_directory: <<parameters.module>>
- run:
name: install geth
command: make install-geth
......@@ -807,21 +797,12 @@ jobs:
- run:
name: print go's available MIPS targets
command: go tool dist list | grep mips
- run:
name: Run all init steps for op-e2e
command: make pre-test
working_directory: <<parameters.module>>
- run:
name: run tests
command: |
command:
# Note: We don't use circle CI test splits because we need to split by test name, not by package. There is an additional
# constraint that gotestsum does not currently (nor likely will) accept files from different pacakges when building.
# Note: -parallel must be set to match the number of cores in the resource class
export TEST_SUFFIX="<<parameters.use_external>>"
export EXTERNAL_L2="$(test -z '<<parameters.use_external>>' || echo '<<parameters.use_external>>/shim')"
OP_TESTLOG_DISABLE_COLOR=true OP_E2E_DISABLE_PARALLEL=false OP_E2E_USE_HTTP=<<parameters.use_http>> gotestsum \
--format=standard-verbose --junitfile=/tmp/test-results/<<parameters.module>>_http_<<parameters.use_http>>$TEST_SUFFIX.xml \
-- -timeout=20m -parallel=8 --externalL2 "$EXTERNAL_L2" ./...
JUNIT_FILE=/tmp/test-results/<<parameters.module>>_<<parameters.target>>.xml make <<parameters.target>>
working_directory: <<parameters.module>>
- store_test_results:
path: /tmp/test-results
......@@ -1232,18 +1213,15 @@ workflows:
- go-e2e-test:
name: op-e2e-WS-tests
module: op-e2e
use_http: "false"
use_external: ""
target: test-ws
- go-e2e-test:
name: op-e2e-HTTP-tests
module: op-e2e
use_http: "true"
use_external: ""
target: test-http
- go-e2e-test:
name: op-e2e-WS-tests-external-geth
name: op-e2e-ext-geth-tests
module: op-e2e
use_http: "false"
use_external: "external_geth"
target: test-external-geth
- bedrock-go-tests:
requires:
- op-batcher-lint
......
......@@ -119,7 +119,7 @@ Note that these environment variables significantly speed up build time.
cd ops-bedrock
export COMPOSE_DOCKER_CLI_BUILD=1
export DOCKER_BUILDKIT=1
docker-compose build
docker compose build
```
Source code changes can have an impact on more than one container.
......@@ -127,9 +127,9 @@ Source code changes can have an impact on more than one container.
```bash
cd ops-bedrock
docker-compose down
docker-compose build
docker-compose up
docker compose down
docker compose build
docker compose up
```
**If a node process exits with exit code: 137** you may need to increase the default memory limit of docker containers
......@@ -141,18 +141,18 @@ cd optimism
pnpm clean
pnpm build
cd ops
docker-compose down -v
docker-compose build
docker-compose up
docker compose down -v
docker compose build
docker compose up
```
#### Viewing docker container logs
By default, the `docker-compose up` command will show logs from all services, and that
By default, the `docker compose up` command will show logs from all services, and that
can be hard to filter through. In order to view the logs from a specific service, you can run:
```bash
docker-compose logs --follow <service name>
docker compose logs --follow <service name>
```
### Running tests
......
......@@ -101,13 +101,13 @@ devnet-test:
.PHONY: devnet-test
devnet-down:
@(cd ./ops-bedrock && GENESIS_TIMESTAMP=$(shell date +%s) docker-compose stop)
@(cd ./ops-bedrock && GENESIS_TIMESTAMP=$(shell date +%s) docker compose stop)
.PHONY: devnet-down
devnet-clean:
rm -rf ./packages/contracts-bedrock/deployments/devnetL1
rm -rf ./.devnet
cd ./ops-bedrock && docker-compose down
cd ./ops-bedrock && docker compose down
docker image ls 'ops-bedrock*' --format='{{.Repository}}' | xargs -r docker rmi
docker volume ls --filter name=ops-bedrock --format='{{.Name}}' | xargs -r docker volume rm
.PHONY: devnet-clean
......@@ -116,7 +116,7 @@ devnet-allocs:
PYTHONPATH=./bedrock-devnet python3 ./bedrock-devnet/main.py --monorepo-dir=. --allocs
devnet-logs:
@(cd ./ops-bedrock && docker-compose logs -f)
@(cd ./ops-bedrock && docker compose logs -f)
.PHONY: devnet-logs
test-unit:
......
......@@ -93,7 +93,7 @@ def main():
return
log.info('Building docker images')
run_command(['docker-compose', 'build', '--progress', 'plain'], cwd=paths.ops_bedrock_dir, env={
run_command(['docker', 'compose', 'build', '--progress', 'plain'], cwd=paths.ops_bedrock_dir, env={
'PWD': paths.ops_bedrock_dir
})
......@@ -173,7 +173,7 @@ def devnet_deploy(paths):
], cwd=paths.op_node_dir)
log.info('Starting L1.')
run_command(['docker-compose', 'up', '-d', 'l1'], cwd=paths.ops_bedrock_dir, env={
run_command(['docker', 'compose', 'up', '-d', 'l1'], cwd=paths.ops_bedrock_dir, env={
'PWD': paths.ops_bedrock_dir
})
wait_up(8545)
......@@ -196,7 +196,7 @@ def devnet_deploy(paths):
addresses = read_json(paths.addresses_json_path)
log.info('Bringing up L2.')
run_command(['docker-compose', 'up', '-d', 'l2'], cwd=paths.ops_bedrock_dir, env={
run_command(['docker', 'compose', 'up', '-d', 'l2'], cwd=paths.ops_bedrock_dir, env={
'PWD': paths.ops_bedrock_dir
})
wait_up(9545)
......@@ -208,7 +208,7 @@ def devnet_deploy(paths):
log.info(f'Using batch inbox {batch_inbox_address}')
log.info('Bringing up everything else.')
run_command(['docker-compose', 'up', '-d', 'op-node', 'op-proposer', 'op-batcher'], cwd=paths.ops_bedrock_dir, env={
run_command(['docker', 'compose', 'up', '-d', 'op-node', 'op-proposer', 'op-batcher'], cwd=paths.ops_bedrock_dir, env={
'PWD': paths.ops_bedrock_dir,
'L2OO_ADDRESS': l2_output_oracle,
'SEQUENCER_BATCH_INBOX_ADDRESS': batch_inbox_address
......
package cmd
import (
"compress/gzip"
"encoding/json"
"errors"
"fmt"
"io"
"os"
"strings"
"github.com/ethereum-optimism/optimism/op-service/ioutil"
)
func loadJSON[X any](inputPath string) (*X, error) {
......@@ -15,18 +15,11 @@ func loadJSON[X any](inputPath string) (*X, error) {
return nil, errors.New("no path specified")
}
var f io.ReadCloser
f, err := os.OpenFile(inputPath, os.O_RDONLY, 0)
f, err := ioutil.OpenDecompressed(inputPath)
if err != nil {
return nil, fmt.Errorf("failed to open file %q: %w", inputPath, err)
}
defer f.Close()
if isGzip(inputPath) {
f, err = gzip.NewReader(f)
if err != nil {
return nil, fmt.Errorf("create gzip reader: %w", err)
}
defer f.Close()
}
var state X
if err := json.NewDecoder(f).Decode(&state); err != nil {
return nil, fmt.Errorf("failed to decode file %q: %w", inputPath, err)
......@@ -37,17 +30,12 @@ func loadJSON[X any](inputPath string) (*X, error) {
func writeJSON[X any](outputPath string, value X, outIfEmpty bool) error {
var out io.Writer
if outputPath != "" {
f, err := os.OpenFile(outputPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0755)
f, err := ioutil.OpenCompressed(outputPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0755)
if err != nil {
return fmt.Errorf("failed to open output file: %w", err)
}
defer f.Close()
out = f
if isGzip(outputPath) {
g := gzip.NewWriter(f)
defer g.Close()
out = g
}
} else if outIfEmpty {
out = os.Stdout
} else {
......@@ -63,7 +51,3 @@ func writeJSON[X any](outputPath string, value X, outIfEmpty bool) error {
}
return nil
}
func isGzip(path string) bool {
return strings.HasSuffix(path, ".gz")
}
package cmd
import (
"context"
"fmt"
"os"
"os/exec"
......@@ -112,11 +113,15 @@ func (rk rawKey) PreimageKey() [32]byte {
}
type ProcessPreimageOracle struct {
pCl *preimage.OracleClient
hCl *preimage.HintWriter
cmd *exec.Cmd
pCl *preimage.OracleClient
hCl *preimage.HintWriter
cmd *exec.Cmd
waitErr chan error
cancelIO context.CancelCauseFunc
}
const clientPollTimeout = time.Second * 15
func NewProcessPreimageOracle(name string, args []string) (*ProcessPreimageOracle, error) {
if name == "" {
return &ProcessPreimageOracle{}, nil
......@@ -140,10 +145,18 @@ func NewProcessPreimageOracle(name string, args []string) (*ProcessPreimageOracl
pOracleRW.Reader(),
pOracleRW.Writer(),
}
// Note that the client file descriptors are not closed when the pre-image server exits.
// So we use the FilePoller to ensure that we don't get stuck in a blocking read/write.
ctx, cancelIO := context.WithCancelCause(context.Background())
preimageClientIO := preimage.NewFilePoller(ctx, pClientRW, clientPollTimeout)
hostClientIO := preimage.NewFilePoller(ctx, hClientRW, clientPollTimeout)
out := &ProcessPreimageOracle{
pCl: preimage.NewOracleClient(pClientRW),
hCl: preimage.NewHintWriter(hClientRW),
cmd: cmd,
pCl: preimage.NewOracleClient(preimageClientIO),
hCl: preimage.NewHintWriter(hostClientIO),
cmd: cmd,
waitErr: make(chan error),
cancelIO: cancelIO,
}
return out, nil
}
......@@ -166,23 +179,30 @@ func (p *ProcessPreimageOracle) Start() error {
if p.cmd == nil {
return nil
}
return p.cmd.Start()
err := p.cmd.Start()
go p.wait()
return err
}
func (p *ProcessPreimageOracle) Close() error {
if p.cmd == nil {
return nil
}
// Give the pre-image server time to exit cleanly before killing it.
time.Sleep(time.Second * 1)
_ = p.cmd.Process.Signal(os.Interrupt)
// Go 1.20 feature, to introduce later
//p.cmd.WaitDelay = time.Second * 10
return <-p.waitErr
}
func (p *ProcessPreimageOracle) wait() {
err := p.cmd.Wait()
if err, ok := err.(*exec.ExitError); ok {
if err.Success() {
return nil
}
var waitErr error
if err, ok := err.(*exec.ExitError); !ok || !err.Success() {
waitErr = err
}
return err
p.cancelIO(fmt.Errorf("%w: pre-image server has exited", waitErr))
p.waitErr <- waitErr
close(p.waitErr)
}
type StepFn func(proof bool) (*mipsevm.StepWitness, error)
......
FROM golang:1.19.9-alpine3.16 as builder
FROM golang:1.20.7-alpine3.18 as builder
RUN apk --no-cache add make jq bash git alpine-sdk
......@@ -16,7 +16,7 @@ RUN go mod download
RUN make build
FROM alpine:3.16
FROM alpine:3.18
RUN apk --no-cache add ca-certificates
RUN addgroup -S app && adduser -S app -G app
......
module github.com/ethereum-optimism/optimism
go 1.19
go 1.20
require (
github.com/BurntSushi/toml v1.3.2
......
docker-compose.dev.yml
.env
indexer
/indexer
FROM --platform=$BUILDPLATFORM golang:1.19.9-alpine3.16 as builder
FROM --platform=$BUILDPLATFORM golang:1.20.7-alpine3.18 as builder
RUN apk add --no-cache make gcc musl-dev linux-headers git jq bash
......@@ -18,8 +18,8 @@ RUN go mod download
RUN make indexer
FROM alpine:3.16
FROM alpine:3.18
COPY --from=builder /app/indexer/indexer /usr/local/bin
CMD ["indexer", "--config", "/app/indexer/indexer.toml"]
CMD ["indexer", "all", "--config", "/app/indexer/indexer.toml"]
......@@ -6,6 +6,9 @@
### Setup env
The `indexer.toml` stores a set of preset environmental variables that can be used to run the indexer with the exception of the network specific `l1-rpc` and `l2-rpc` variables. The `indexer.toml` file can be ran as a default config, otherwise a custom `.toml` config can provided via the `--config` flag when running the application. An optional `l1-starting-height` value can be provided to the indexer to specify the L1 starting block height to begin indexing from. This should be ideally be an L1 block that holds a correlated L2 genesis commitment. Furthermore, this value must be less than the current L1 block height to pass validation. If no starting height value is provided and the database is empty, the indexer will begin sequentially processing from L1 genesis.
### Setup polling intervals
The indexer polls and processes batches from the L1 and L2 chains on a set interval/size. The default polling interval is 5 seconds for both chains with a default batch header size of 500. The polling frequency can be changed by setting the `l1-polling-interval` and `l2-polling-interval` values in the `indexer.toml` file. The batch header size can be changed by setting the `l1-batch-size` and `l2-batch-size` values in the `indexer.toml` file.
### Testing
All tests can be ran by running `make test` from the `/indexer` directory. This will run all unit and e2e tests.
......@@ -16,7 +19,7 @@ All tests can be ran by running `make test` from the `/indexer` directory. This
- install docker
- `cp example.env .env`
- fill in .env
- run `docker-compose up` to start the indexer vs optimism goerli network
- run `docker compose up` to start the indexer vs optimism goerli network
### Run indexer with go
......
package api
import (
"context"
"fmt"
"net/http"
"github.com/ethereum-optimism/optimism/indexer/api/routes"
"github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum-optimism/optimism/op-service/httputil"
"github.com/ethereum/go-ethereum/log"
"github.com/go-chi/chi/v5"
"github.com/go-chi/chi/v5/middleware"
)
const ethereumAddressRegex = `^0x[a-fA-F0-9]{40}$`
type Api struct {
log log.Logger
Router *chi.Mux
}
func NewApi(bv database.BridgeTransfersView, logger log.Logger) *Api {
logger.Info("Initializing API...")
func NewApi(logger log.Logger, bv database.BridgeTransfersView) *Api {
r := chi.NewRouter()
h := routes.NewRoutes(logger, bv, r)
api := &Api{Router: r}
r.Use(middleware.Heartbeat("/healthz"))
r.Get("/healthz", h.HealthzHandler)
r.Get(fmt.Sprintf("/api/v0/deposits/{address:%s}", ethereumAddressRegex), h.L1DepositsHandler)
r.Get(fmt.Sprintf("/api/v0/withdrawals/{address:%s}", ethereumAddressRegex), h.L2WithdrawalsHandler)
return api
return &Api{log: logger, Router: r}
}
func (a *Api) Listen(port string) error {
return http.ListenAndServe(port, a.Router)
func (a *Api) Listen(ctx context.Context, port int) error {
a.log.Info("api server listening...", "port", port)
server := http.Server{Addr: fmt.Sprintf(":%d", port), Handler: a.Router}
err := httputil.ListenAndServeContext(ctx, &server)
if err != nil {
a.log.Error("api server stopped", "err", err)
} else {
a.log.Info("api server stopped")
}
return err
}
......@@ -77,7 +77,7 @@ func (mbv *MockBridgeTransfersView) L2BridgeWithdrawalsByAddress(address common.
}
func TestHealthz(t *testing.T) {
logger := testlog.Logger(t, log.LvlInfo)
api := NewApi(&MockBridgeTransfersView{}, logger)
api := NewApi(logger, &MockBridgeTransfersView{})
request, err := http.NewRequest("GET", "/healthz", nil)
assert.Nil(t, err)
......@@ -89,7 +89,7 @@ func TestHealthz(t *testing.T) {
func TestL1BridgeDepositsHandler(t *testing.T) {
logger := testlog.Logger(t, log.LvlInfo)
api := NewApi(&MockBridgeTransfersView{}, logger)
api := NewApi(logger, &MockBridgeTransfersView{})
request, err := http.NewRequest("GET", fmt.Sprintf("/api/v0/deposits/%s", mockAddress), nil)
assert.Nil(t, err)
......@@ -101,7 +101,7 @@ func TestL1BridgeDepositsHandler(t *testing.T) {
func TestL2BridgeWithdrawalsByAddressHandler(t *testing.T) {
logger := testlog.Logger(t, log.LvlInfo)
api := NewApi(&MockBridgeTransfersView{}, logger)
api := NewApi(logger, &MockBridgeTransfersView{})
request, err := http.NewRequest("GET", fmt.Sprintf("/api/v0/withdrawals/%s", mockAddress), nil)
assert.Nil(t, err)
......
......@@ -46,7 +46,7 @@ func newDepositResponse(deposits *database.L1BridgeDepositsResponse) DepositResp
},
From: deposit.L1BridgeDeposit.Tx.FromAddress.String(),
To: deposit.L1BridgeDeposit.Tx.ToAddress.String(),
Amount: deposit.L1BridgeDeposit.Tx.Amount.Int.String(),
Amount: deposit.L1BridgeDeposit.Tx.Amount.String(),
L1Token: TokenInfo{
ChainId: 1,
Address: deposit.L1BridgeDeposit.TokenPair.LocalTokenAddress.String(),
......
package routes
import (
"net/http"
)
func (h Routes) HealthzHandler(w http.ResponseWriter, r *http.Request) {
jsonResponse(w, h.Logger, "ok", http.StatusOK)
}
......@@ -60,7 +60,7 @@ func newWithdrawalResponse(withdrawals *database.L2BridgeWithdrawalsResponse) Wi
From: withdrawal.L2BridgeWithdrawal.Tx.FromAddress.String(),
To: withdrawal.L2BridgeWithdrawal.Tx.ToAddress.String(),
TransactionHash: withdrawal.L2TransactionHash.String(),
Amount: withdrawal.L2BridgeWithdrawal.Tx.Amount.Int.String(),
Amount: withdrawal.L2BridgeWithdrawal.Tx.Amount.String(),
Proof: Proof{
TransactionHash: withdrawal.ProvenL1TransactionHash.String(),
BlockTimestamp: withdrawal.L2BridgeWithdrawal.Tx.Timestamp,
......
package cli
package main
import (
"context"
"fmt"
"strconv"
"sync"
"github.com/ethereum-optimism/optimism/indexer"
"github.com/ethereum-optimism/optimism/indexer/api"
"github.com/ethereum-optimism/optimism/indexer/config"
"github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum-optimism/optimism/op-service/log"
"github.com/ethereum-optimism/optimism/op-service/opio"
"github.com/ethereum/go-ethereum/params"
"github.com/urfave/cli/v2"
)
type Cli struct {
GitVersion string
GitCommit string
GitDate string
app *cli.App
Flags []cli.Flag
}
var (
ConfigFlag = &cli.StringFlag{
Name: "config",
Value: "./indexer.toml",
Aliases: []string{"c"},
Usage: "path to config file",
EnvVars: []string{"INDEXER_CONFIG"},
}
)
func runIndexer(ctx *cli.Context) error {
logger := log.NewLogger(log.ReadCLIConfig(ctx))
configPath := ctx.String(ConfigFlag.Name)
cfg, err := config.LoadConfig(logger, configPath)
log := log.NewLogger(log.ReadCLIConfig(ctx)).New("role", "indexer")
cfg, err := config.LoadConfig(log, ctx.String(ConfigFlag.Name))
if err != nil {
logger.Error("failed to load config", "err", err)
log.Error("failed to load config", "err", err)
return err
}
db, err := database.NewDB(cfg.DB)
if err != nil {
log.Error("failed to connect to database", "err", err)
return err
}
defer db.Close()
indexer, err := indexer.NewIndexer(logger, cfg.Chain, cfg.RPCs, db)
indexer, err := indexer.NewIndexer(log, db, cfg.Chain, cfg.RPCs, cfg.Metrics)
if err != nil {
log.Error("failed to create indexer", "err", err)
return err
}
indexerCtx, indexerCancel := context.WithCancel(context.Background())
go func() {
opio.BlockOnInterrupts()
indexerCancel()
}()
return indexer.Run(indexerCtx)
return indexer.Run(ctx.Context)
}
func runApi(ctx *cli.Context) error {
logger := log.NewLogger(log.ReadCLIConfig(ctx))
configPath := ctx.String(ConfigFlag.Name)
cfg, err := config.LoadConfig(logger, configPath)
log := log.NewLogger(log.ReadCLIConfig(ctx)).New("role", "api")
cfg, err := config.LoadConfig(log, ctx.String(ConfigFlag.Name))
if err != nil {
logger.Error("failed to load config", "err", err)
log.Error("failed to load config", "err", err)
return err
}
db, err := database.NewDB(cfg.DB)
if err != nil {
logger.Crit("Failed to connect to database", "err", err)
log.Error("failed to connect to database", "err", err)
return err
}
defer db.Close()
server := api.NewApi(db.BridgeTransfers, logger)
return server.Listen(strconv.Itoa(cfg.API.Port))
api := api.NewApi(log, db.BridgeTransfers)
return api.Listen(ctx.Context, cfg.API.Port)
}
var (
ConfigFlag = &cli.StringFlag{
Name: "config",
Value: "./indexer.toml",
Aliases: []string{"c"},
Usage: "path to config file",
EnvVars: []string{"INDEXER_CONFIG"},
}
)
func runAll(ctx *cli.Context) error {
log := log.NewLogger(log.ReadCLIConfig(ctx))
// Ensure both processes complete before returning.
var wg sync.WaitGroup
wg.Add(2)
go func() {
defer wg.Done()
err := runApi(ctx)
if err != nil {
log.Error("api process non-zero exit", "err", err)
}
}()
go func() {
defer wg.Done()
err := runIndexer(ctx)
if err != nil {
log.Error("indexer process non-zero exit", "err", err)
}
}()
// make a instance method on Cli called Run that runs cli
// and returns an error
func (c *Cli) Run(args []string) error {
return c.app.Run(args)
// We purposefully return no error since the indexer and api
// have no inter-dependencies. We simply rely on the logs to
// report a non-zero exit for either process.
wg.Wait()
return nil
}
func NewCli(GitVersion string, GitCommit string, GitDate string) *Cli {
func newCli(GitCommit string, GitDate string) *cli.App {
flags := []cli.Flag{ConfigFlag}
flags = append(flags, log.CLIFlags("INDEXER")...)
app := &cli.App{
Version: fmt.Sprintf("%s-%s", GitVersion, params.VersionWithCommit(GitCommit, GitDate)),
Description: "An indexer of all optimism events with a serving api layer",
return &cli.App{
Version: params.VersionWithCommit(GitCommit, GitDate),
Description: "An indexer of all optimism events with a serving api layer",
EnableBashCompletion: true,
Commands: []*cli.Command{
{
Name: "api",
......@@ -110,11 +115,20 @@ func NewCli(GitVersion string, GitCommit string, GitDate string) *Cli {
Description: "Runs the indexing service",
Action: runIndexer,
},
{
Name: "all",
Flags: flags,
Description: "Runs both the api service and the indexing service",
Action: runAll,
},
{
Name: "version",
Description: "print version",
Action: func(ctx *cli.Context) error {
cli.ShowVersion(ctx)
return nil
},
},
},
}
return &Cli{
app: app,
Flags: flags,
}
}
package main
import (
"context"
"os"
"github.com/ethereum-optimism/optimism/indexer/cli"
"github.com/ethereum-optimism/optimism/op-service/opio"
"github.com/ethereum/go-ethereum/log"
)
var (
GitVersion = ""
GitCommit = ""
GitDate = ""
GitCommit = ""
GitDate = ""
)
func main() {
app := cli.NewCli(GitVersion, GitCommit, GitDate)
// This is the most root context, used to propagate
// cancellations to all spawned application-level goroutines
ctx, cancel := context.WithCancel(context.Background())
go func() {
opio.BlockOnInterrupts()
cancel()
}()
if err := app.Run(os.Args); err != nil {
log.Crit("Application failed", "message", err)
app := newCli(GitCommit, GitDate)
if err := app.RunContext(ctx, os.Args); err != nil {
log.Error("application failed", "err", err)
}
}
......@@ -11,6 +11,12 @@ import (
geth_log "github.com/ethereum/go-ethereum/log"
)
const (
// default to 5 seconds
defaultLoopInterval = 5000
defaultHeaderBufferSize = 500
)
// in future presets can just be onchain config and fetched on initialization
// Config represents the `indexer.toml` file used to configure the indexer
......@@ -59,13 +65,19 @@ func (c *L1Contracts) AsSlice() ([]common.Address, error) {
// ChainConfig configures of the chain being indexed
type ChainConfig struct {
// Configure known chains with the l2 chain id
// NOTE - This currently performs no lookups to extract known L1 contracts by l2 chain id
Preset int
L1Contracts L1Contracts `toml:"l1-contracts"`
// L1StartingHeight is the block height to start indexing from
L1StartingHeight uint `toml:"l1-starting-height"`
L1PollingInterval uint `toml:"l1-polling-interval"`
L2PollingInterval uint `toml:"l2-polling-interval"`
L1HeaderBufferSize uint `toml:"l1-header-buffer-size"`
L2HeaderBufferSize uint `toml:"l2-header-buffer-size"`
}
// L1StartHeight returns the block height to start indexing from
func (cc *ChainConfig) L1StartHeight() *big.Int {
return big.NewInt(int64(cc.L1StartingHeight))
}
......@@ -99,20 +111,18 @@ type MetricsConfig struct {
// LoadConfig loads the `indexer.toml` config file from a given path
func LoadConfig(logger geth_log.Logger, path string) (Config, error) {
logger.Info("Loading config file", "path", path)
var conf Config
logger.Debug("loading config", "path", path)
var conf Config
data, err := os.ReadFile(path)
if err != nil {
return conf, err
}
data = []byte(os.ExpandEnv(string(data)))
logger.Debug("Decoding config file", "data", string(data))
logger.Debug("parsed config file", "data", string(data))
if _, err := toml.Decode(string(data), &conf); err != nil {
logger.Info("Failed to decode config file", "message", err)
logger.Info("failed to decode config file", "err", err)
return conf, err
}
......@@ -125,7 +135,27 @@ func LoadConfig(logger geth_log.Logger, path string) (Config, error) {
}
}
logger.Debug("Loaded config file", conf)
// Set polling defaults if not set
if conf.Chain.L1PollingInterval == 0 {
logger.Info("setting default L1 polling interval", "interval", defaultLoopInterval)
conf.Chain.L1PollingInterval = defaultLoopInterval
}
if conf.Chain.L2PollingInterval == 0 {
logger.Info("setting default L2 polling interval", "interval", defaultLoopInterval)
conf.Chain.L2PollingInterval = defaultLoopInterval
}
if conf.Chain.L1HeaderBufferSize == 0 {
logger.Info("setting default L1 header buffer", "size", defaultHeaderBufferSize)
conf.Chain.L1HeaderBufferSize = defaultHeaderBufferSize
}
if conf.Chain.L2HeaderBufferSize == 0 {
logger.Info("setting default L2 header buffer", "size", defaultHeaderBufferSize)
conf.Chain.L2HeaderBufferSize = defaultHeaderBufferSize
}
logger.Info("loaded config")
return conf, nil
}
......@@ -79,6 +79,7 @@ func TestLoadConfig_WithoutPreset(t *testing.T) {
testData := `
[chain]
[chain.l1-contracts]
optimism-portal = "0x4205Fc579115071764c7423A4f12eDde41f106Ed"
l2-output-oracle = "0x42097868233d1aa22e815a266982f2cf17685a27"
......@@ -102,11 +103,18 @@ func TestLoadConfig_WithoutPreset(t *testing.T) {
conf, err := LoadConfig(logger, tmpfile.Name())
require.NoError(t, err)
// Enforce default values
require.Equal(t, conf.Chain.L1Contracts.OptimismPortalProxy.String(), common.HexToAddress("0x4205Fc579115071764c7423A4f12eDde41f106Ed").String())
require.Equal(t, conf.Chain.L1Contracts.L2OutputOracleProxy.String(), common.HexToAddress("0x42097868233d1aa22e815a266982f2cf17685a27").String())
require.Equal(t, conf.Chain.L1Contracts.L1CrossDomainMessengerProxy.String(), common.HexToAddress("0x420ce71c97B33Cc4729CF772ae268934F7ab5fA1").String())
require.Equal(t, conf.Chain.L1Contracts.L1StandardBridgeProxy.String(), common.HexToAddress("0x4209fc46f92E8a1c0deC1b1747d010903E884bE1").String())
require.Equal(t, conf.Chain.Preset, 0)
// Enforce polling default values
require.Equal(t, conf.Chain.L1PollingInterval, uint(5000))
require.Equal(t, conf.Chain.L2PollingInterval, uint(5000))
require.Equal(t, conf.Chain.L1HeaderBufferSize, uint(500))
require.Equal(t, conf.Chain.L2HeaderBufferSize, uint(500))
}
func TestLoadConfig_WithUnknownPreset(t *testing.T) {
......@@ -140,6 +148,37 @@ func TestLoadConfig_WithUnknownPreset(t *testing.T) {
require.Equal(t, fmt.Sprintf("unknown preset: %d", faultyPreset), err.Error())
}
func Test_LoadConfig_PollingValues(t *testing.T) {
tmpfile, err := os.CreateTemp("", "test_user_values.toml")
require.NoError(t, err)
defer os.Remove(tmpfile.Name())
defer tmpfile.Close()
testData := `
[chain]
l1-polling-interval = 1000
l2-polling-interval = 1005
l1-header-buffer-size = 100
l2-header-buffer-size = 105`
data := []byte(testData)
err = os.WriteFile(tmpfile.Name(), data, 0644)
require.NoError(t, err)
defer os.Remove(tmpfile.Name())
err = tmpfile.Close()
require.NoError(t, err)
logger := testlog.Logger(t, log.LvlInfo)
conf, err := LoadConfig(logger, tmpfile.Name())
require.NoError(t, err)
require.Equal(t, conf.Chain.L1PollingInterval, uint(1000))
require.Equal(t, conf.Chain.L2PollingInterval, uint(1005))
require.Equal(t, conf.Chain.L1HeaderBufferSize, uint(100))
require.Equal(t, conf.Chain.L2HeaderBufferSize, uint(105))
}
func Test_AsSliceSuccess(t *testing.T) {
// error cases are intentionally ignored for testing since they can only be
// generated when the L1Contracts struct is developer modified to hold a non-address var field
......
......@@ -20,7 +20,7 @@ import (
type BlockHeader struct {
Hash common.Hash `gorm:"primaryKey;serializer:bytes"`
ParentHash common.Hash `gorm:"serializer:bytes"`
Number U256
Number *big.Int `gorm:"serializer:u256"`
Timestamp uint64
RLPHeader *RLPHeader `gorm:"serializer:rlp;column:rlp_bytes"`
......@@ -30,7 +30,7 @@ func BlockHeaderFromHeader(header *types.Header) BlockHeader {
return BlockHeader{
Hash: header.Hash(),
ParentHash: header.ParentHash,
Number: U256{Int: header.Number},
Number: header.Number,
Timestamp: header.Time,
RLPHeader: (*RLPHeader)(header),
......@@ -58,8 +58,8 @@ type LegacyStateBatch struct {
type OutputProposal struct {
OutputRoot common.Hash `gorm:"primaryKey;serializer:bytes"`
L2OutputIndex U256
L2BlockNumber U256
L2OutputIndex *big.Int `gorm:"serializer:u256"`
L2BlockNumber *big.Int `gorm:"serializer:u256"`
L1ContractEventGUID uuid.UUID
}
......@@ -165,7 +165,7 @@ func (db *blocksDB) LatestCheckpointedOutput() (*OutputProposal, error) {
func (db *blocksDB) OutputProposal(index *big.Int) (*OutputProposal, error) {
var outputProposal OutputProposal
result := db.gorm.Where(&OutputProposal{L2OutputIndex: U256{Int: index}}).Take(&outputProposal)
result := db.gorm.Where(&OutputProposal{L2OutputIndex: index}).Take(&outputProposal)
if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, nil
......
......@@ -3,6 +3,7 @@ package database
import (
"errors"
"fmt"
"math/big"
"gorm.io/gorm"
......@@ -17,13 +18,13 @@ import (
type BridgeMessage struct {
MessageHash common.Hash `gorm:"primaryKey;serializer:bytes"`
Nonce U256
Nonce *big.Int `gorm:"serializer:u256"`
SentMessageEventGUID uuid.UUID
RelayedMessageEventGUID *uuid.UUID
Tx Transaction `gorm:"embedded"`
GasLimit U256
GasLimit *big.Int `gorm:"serializer:u256"`
}
type L1BridgeMessage struct {
......
......@@ -3,6 +3,7 @@ package database
import (
"errors"
"fmt"
"math/big"
"github.com/google/uuid"
"gorm.io/gorm"
......@@ -17,8 +18,8 @@ import (
type Transaction struct {
FromAddress common.Address `gorm:"serializer:bytes"`
ToAddress common.Address `gorm:"serializer:bytes"`
Amount U256
Data Bytes `gorm:"serializer:bytes"`
Amount *big.Int `gorm:"serializer:u256"`
Data Bytes `gorm:"serializer:bytes"`
Timestamp uint64
}
......@@ -28,12 +29,12 @@ type L1TransactionDeposit struct {
InitiatedL1EventGUID uuid.UUID
Tx Transaction `gorm:"embedded"`
GasLimit U256
GasLimit *big.Int `gorm:"serializer:u256"`
}
type L2TransactionWithdrawal struct {
WithdrawalHash common.Hash `gorm:"serializer:bytes;primaryKey"`
Nonce U256
Nonce *big.Int `gorm:"serializer:u256"`
InitiatedL2EventGUID uuid.UUID
ProvenL1EventGUID *uuid.UUID
......@@ -41,7 +42,7 @@ type L2TransactionWithdrawal struct {
Succeeded *bool
Tx Transaction `gorm:"embedded"`
GasLimit U256
GasLimit *big.Int `gorm:"serializer:u256"`
}
type BridgeTransactionsView interface {
......
This diff is collapsed.
package serializers
import (
"context"
"fmt"
"math/big"
"reflect"
"github.com/jackc/pgtype"
"gorm.io/gorm/schema"
)
var (
big10 = big.NewInt(10)
u256BigIntOverflow = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), nil)
)
type U256Serializer struct{}
func init() {
schema.RegisterSerializer("u256", U256Serializer{})
}
func (U256Serializer) Scan(ctx context.Context, field *schema.Field, dst reflect.Value, dbValue interface{}) error {
if dbValue == nil {
return nil
} else if field.FieldType != reflect.TypeOf((*big.Int)(nil)) {
return fmt.Errorf("can only deserialize into a *big.Int: %T", field.FieldType)
}
numeric := new(pgtype.Numeric)
err := numeric.Scan(dbValue)
if err != nil {
return err
}
bigInt := numeric.Int
if numeric.Exp > 0 {
factor := new(big.Int).Exp(big10, big.NewInt(int64(numeric.Exp)), nil)
bigInt.Mul(bigInt, factor)
}
if bigInt.Cmp(u256BigIntOverflow) >= 0 {
return fmt.Errorf("deserialized number larger than u256 can hold: %s", bigInt)
}
field.ReflectValueOf(ctx, dst).Set(reflect.ValueOf(bigInt))
return nil
}
func (U256Serializer) Value(ctx context.Context, field *schema.Field, dst reflect.Value, fieldValue interface{}) (interface{}, error) {
if fieldValue == nil || (field.FieldType.Kind() == reflect.Pointer && reflect.ValueOf(fieldValue).IsNil()) {
return nil, nil
} else if field.FieldType != reflect.TypeOf((*big.Int)(nil)) {
return nil, fmt.Errorf("can only serialize a *big.Int: %T", field.FieldType)
}
numeric := pgtype.Numeric{Int: fieldValue.(*big.Int), Status: pgtype.Present}
return numeric.Value()
}
package database
import (
"database/sql/driver"
"errors"
"io"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
"github.com/jackc/pgtype"
)
var u256BigIntOverflow = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), nil)
var big10 = big.NewInt(10)
var ErrU256Overflow = errors.New("number exceeds u256")
var ErrU256ContainsDecimal = errors.New("number contains fractional digits")
var ErrU256Null = errors.New("number cannot be null")
// U256 is a wrapper over big.Int that conforms to the database U256 numeric domain type
type U256 struct {
Int *big.Int
}
// Scan implements the database/sql Scanner interface.
func (u256 *U256) Scan(src interface{}) error {
// deserialize as a numeric
var numeric pgtype.Numeric
err := numeric.Scan(src)
if err != nil {
return err
} else if numeric.Exp < 0 {
return ErrU256ContainsDecimal
} else if numeric.Status == pgtype.Null {
return ErrU256Null
}
// factor in the powers of 10
num := numeric.Int
if numeric.Exp > 0 {
factor := new(big.Int).Exp(big10, big.NewInt(int64(numeric.Exp)), nil)
num.Mul(num, factor)
}
// check bounds before setting the u256
if num.Cmp(u256BigIntOverflow) >= 0 {
return ErrU256Overflow
} else {
u256.Int = num
}
return nil
}
// Value implements the database/sql/driver Valuer interface.
func (u256 U256) Value() (driver.Value, error) {
// check bounds
if u256.Int == nil {
return nil, ErrU256Null
} else if u256.Int.Cmp(u256BigIntOverflow) >= 0 {
return nil, ErrU256Overflow
}
// simply encode as a numeric with no Exp set (non-decimal)
numeric := pgtype.Numeric{Int: u256.Int, Status: pgtype.Present}
return numeric.Value()
}
// Wrapper over types.Header such that we can get an RLP
// encoder over it via a `types.Block` wrapper
type RLPHeader types.Header
......@@ -94,6 +37,9 @@ func (h *RLPHeader) Hash() common.Hash {
return h.Header().Hash()
}
// Type definition for []byte to conform to the
// interface expected by the `bytes` serializer
type Bytes []byte
func (b Bytes) Bytes() []byte {
......
......@@ -59,9 +59,9 @@ func TestE2EBridgeL1CrossDomainMessenger(t *testing.T) {
require.NotNil(t, sentMessage)
require.NotNil(t, sentMessage.SentMessageEventGUID)
require.Equal(t, depositInfo.DepositTx.SourceHash, sentMessage.TransactionSourceHash)
require.Equal(t, nonce.Uint64(), sentMessage.Nonce.Int.Uint64())
require.Equal(t, uint64(100_000), sentMessage.GasLimit.Int.Uint64())
require.Equal(t, big.NewInt(params.Ether), sentMessage.Tx.Amount.Int)
require.Equal(t, nonce.Uint64(), sentMessage.Nonce.Uint64())
require.Equal(t, uint64(100_000), sentMessage.GasLimit.Uint64())
require.Equal(t, uint64(params.Ether), sentMessage.Tx.Amount.Uint64())
require.Equal(t, aliceAddr, sentMessage.Tx.FromAddress)
require.Equal(t, aliceAddr, sentMessage.Tx.ToAddress)
require.ElementsMatch(t, calldata, sentMessage.Tx.Data)
......@@ -146,9 +146,9 @@ func TestE2EBridgeL2CrossDomainMessenger(t *testing.T) {
require.NotNil(t, sentMessage)
require.NotNil(t, sentMessage.SentMessageEventGUID)
require.Equal(t, withdrawalHash, sentMessage.TransactionWithdrawalHash)
require.Equal(t, nonce.Uint64(), sentMessage.Nonce.Int.Uint64())
require.Equal(t, uint64(100_000), sentMessage.GasLimit.Int.Uint64())
require.Equal(t, big.NewInt(params.Ether), sentMessage.Tx.Amount.Int)
require.Equal(t, nonce.Uint64(), sentMessage.Nonce.Uint64())
require.Equal(t, uint64(100_000), sentMessage.GasLimit.Uint64())
require.Equal(t, uint64(params.Ether), sentMessage.Tx.Amount.Uint64())
require.Equal(t, aliceAddr, sentMessage.Tx.FromAddress)
require.Equal(t, aliceAddr, sentMessage.Tx.ToAddress)
require.ElementsMatch(t, calldata, sentMessage.Tx.Data)
......
......@@ -54,8 +54,8 @@ func TestE2EBridgeTransactionsOptimismPortalDeposits(t *testing.T) {
require.NoError(t, err)
require.NotNil(t, deposit)
require.Equal(t, depositL2TxHash, deposit.L2TransactionHash)
require.Equal(t, big.NewInt(100_000), deposit.GasLimit.Int)
require.Equal(t, big.NewInt(params.Ether), deposit.Tx.Amount.Int)
require.Equal(t, uint64(100_000), deposit.GasLimit.Uint64())
require.Equal(t, uint64(params.Ether), deposit.Tx.Amount.Uint64())
require.Equal(t, aliceAddr, deposit.Tx.FromAddress)
require.Equal(t, aliceAddr, deposit.Tx.ToAddress)
require.ElementsMatch(t, calldata, deposit.Tx.Data)
......@@ -113,9 +113,9 @@ func TestE2EBridgeTransactionsL2ToL1MessagePasserWithdrawal(t *testing.T) {
withdraw, err := testSuite.DB.BridgeTransactions.L2TransactionWithdrawal(withdrawalHash)
require.NoError(t, err)
require.NotNil(t, withdraw)
require.Equal(t, msgPassed.Nonce.Uint64(), withdraw.Nonce.Int.Uint64())
require.Equal(t, big.NewInt(100_000), withdraw.GasLimit.Int)
require.Equal(t, big.NewInt(params.Ether), withdraw.Tx.Amount.Int)
require.Equal(t, msgPassed.Nonce.Uint64(), withdraw.Nonce.Uint64())
require.Equal(t, uint64(100_000), withdraw.GasLimit.Uint64())
require.Equal(t, uint64(params.Ether), withdraw.Tx.Amount.Uint64())
require.Equal(t, aliceAddr, withdraw.Tx.FromAddress)
require.Equal(t, aliceAddr, withdraw.Tx.ToAddress)
require.ElementsMatch(t, calldata, withdraw.Tx.Data)
......
......@@ -63,7 +63,7 @@ func TestE2EBridgeTransfersStandardBridgeETHDeposit(t *testing.T) {
require.Equal(t, depositInfo.DepositTx.SourceHash, deposit.TransactionSourceHash)
require.Equal(t, predeploys.LegacyERC20ETHAddr, deposit.TokenPair.LocalTokenAddress)
require.Equal(t, predeploys.LegacyERC20ETHAddr, deposit.TokenPair.RemoteTokenAddress)
require.Equal(t, big.NewInt(params.Ether), deposit.Tx.Amount.Int)
require.Equal(t, uint64(params.Ether), deposit.Tx.Amount.Uint64())
require.Equal(t, aliceAddr, deposit.Tx.FromAddress)
require.Equal(t, aliceAddr, deposit.Tx.ToAddress)
require.Equal(t, byte(1), deposit.Tx.Data[0])
......@@ -227,13 +227,15 @@ func TestE2EBridgeTransfersOptimismPortalETHReceive(t *testing.T) {
aliceDeposits, err := testSuite.DB.BridgeTransfers.L1BridgeDepositsByAddress(aliceAddr, "", 0)
require.NoError(t, err)
require.NotNil(t, aliceDeposits)
require.Len(t, aliceDeposits.Deposits, 1)
require.Equal(t, portalDepositTx.Hash(), aliceDeposits.Deposits[0].L1TransactionHash)
deposit := aliceDeposits.Deposits[0].L1BridgeDeposit
require.Equal(t, depositInfo.DepositTx.SourceHash, deposit.TransactionSourceHash)
require.Equal(t, predeploys.LegacyERC20ETHAddr, deposit.TokenPair.LocalTokenAddress)
require.Equal(t, predeploys.LegacyERC20ETHAddr, deposit.TokenPair.RemoteTokenAddress)
require.Equal(t, big.NewInt(params.Ether), deposit.Tx.Amount.Int)
require.Equal(t, uint64(params.Ether), deposit.Tx.Amount.Uint64())
require.Equal(t, aliceAddr, deposit.Tx.FromAddress)
require.Equal(t, aliceAddr, deposit.Tx.ToAddress)
require.Len(t, deposit.Tx.Data, 0)
......@@ -304,7 +306,7 @@ func TestE2EBridgeTransfersStandardBridgeETHWithdrawal(t *testing.T) {
require.Equal(t, withdrawalHash, withdrawal.TransactionWithdrawalHash)
require.Equal(t, predeploys.LegacyERC20ETHAddr, withdrawal.TokenPair.LocalTokenAddress)
require.Equal(t, predeploys.LegacyERC20ETHAddr, withdrawal.TokenPair.RemoteTokenAddress)
require.Equal(t, big.NewInt(params.Ether), withdrawal.Tx.Amount.Int)
require.Equal(t, uint64(params.Ether), withdrawal.Tx.Amount.Uint64())
require.Equal(t, aliceAddr, withdrawal.Tx.FromAddress)
require.Equal(t, aliceAddr, withdrawal.Tx.ToAddress)
require.Equal(t, byte(1), withdrawal.Tx.Data[0])
......@@ -339,7 +341,7 @@ func TestE2EBridgeTransfersStandardBridgeETHWithdrawal(t *testing.T) {
require.NotNil(t, crossDomainBridgeMessage.RelayedMessageEventGUID)
}
func TestE2EBridgeTransfersL2ToL1MessagePasserReceive(t *testing.T) {
func TestE2EBridgeTransfersL2ToL1MessagePasserETHReceive(t *testing.T) {
testSuite := createE2ETestSuite(t)
optimismPortal, err := bindings.NewOptimismPortal(testSuite.OpCfg.L1Deployments.OptimismPortalProxy, testSuite.L1Client)
......@@ -376,6 +378,7 @@ func TestE2EBridgeTransfersL2ToL1MessagePasserReceive(t *testing.T) {
aliceWithdrawals, err := testSuite.DB.BridgeTransfers.L2BridgeWithdrawalsByAddress(aliceAddr, "", 0)
require.NoError(t, err)
require.Len(t, aliceWithdrawals.Withdrawals, 1)
require.Equal(t, l2ToL1MessagePasserWithdrawTx.Hash().String(), aliceWithdrawals.Withdrawals[0].L2TransactionHash.String())
msgPassed, err := withdrawals.ParseMessagePassed(l2ToL1WithdrawReceipt)
......@@ -387,7 +390,7 @@ func TestE2EBridgeTransfersL2ToL1MessagePasserReceive(t *testing.T) {
require.Equal(t, withdrawalHash, withdrawal.TransactionWithdrawalHash)
require.Equal(t, predeploys.LegacyERC20ETHAddr, withdrawal.TokenPair.LocalTokenAddress)
require.Equal(t, predeploys.LegacyERC20ETHAddr, withdrawal.TokenPair.RemoteTokenAddress)
require.Equal(t, big.NewInt(params.Ether), withdrawal.Tx.Amount.Int)
require.Equal(t, uint64(params.Ether), withdrawal.Tx.Amount.Uint64())
require.Equal(t, aliceAddr, withdrawal.Tx.FromAddress)
require.Equal(t, aliceAddr, withdrawal.Tx.ToAddress)
require.Len(t, withdrawal.Tx.Data, 0)
......
......@@ -40,19 +40,19 @@ func TestE2EETL(t *testing.T) {
l2Header, err := testSuite.DB.Blocks.L2LatestBlockHeader()
require.NoError(t, err)
return (l1Header != nil && l1Header.Number.Int.Uint64() >= l1Height) && (l2Header != nil && l2Header.Number.Int.Uint64() >= 9), nil
return (l1Header != nil && l1Header.Number.Uint64() >= l1Height) && (l2Header != nil && l2Header.Number.Uint64() >= 9), nil
}))
t.Run("indexes all L2 blocks", func(t *testing.T) {
latestL2Header, err := testSuite.DB.Blocks.L2LatestBlockHeader()
require.NoError(t, err)
require.NotNil(t, latestL2Header)
require.True(t, latestL2Header.Number.Int.Uint64() >= 9)
require.True(t, latestL2Header.Number.Uint64() >= 9)
for i := int64(0); i < 10; i++ {
height := big.NewInt(i)
indexedHeader, err := testSuite.DB.Blocks.L2BlockHeaderWithFilter(database.BlockHeader{Number: database.U256{Int: height}})
indexedHeader, err := testSuite.DB.Blocks.L2BlockHeaderWithFilter(database.BlockHeader{Number: height})
require.NoError(t, err)
require.NotNil(t, indexedHeader)
......@@ -60,7 +60,7 @@ func TestE2EETL(t *testing.T) {
require.NoError(t, err)
require.NotNil(t, indexedHeader)
require.Equal(t, header.Number.Int64(), indexedHeader.Number.Int.Int64())
require.Equal(t, header.Number.Int64(), indexedHeader.Number.Int64())
require.Equal(t, header.Hash(), indexedHeader.Hash)
require.Equal(t, header.ParentHash, indexedHeader.ParentHash)
require.Equal(t, header.Time, indexedHeader.Timestamp)
......@@ -144,7 +144,7 @@ func TestE2EETL(t *testing.T) {
require.NoError(t, err)
require.Equal(t, block.Hash(), l1BlockHeader.Hash)
require.Equal(t, block.ParentHash(), l1BlockHeader.ParentHash)
require.Equal(t, block.Number(), l1BlockHeader.Number.Int)
require.Equal(t, block.Number().Uint64(), l1BlockHeader.Number.Uint64())
require.Equal(t, block.Time(), l1BlockHeader.Timestamp)
// ensure the right rlp encoding is stored. checking the hashes
......
......@@ -52,6 +52,7 @@ func createE2ETestSuite(t *testing.T) E2ETestSuite {
opCfg.DeployConfig.FinalizationPeriodSeconds = 2
opSys, err := opCfg.Start(t)
require.NoError(t, err)
t.Cleanup(func() { opSys.Close() })
// E2E tests can run on the order of magnitude of minutes. Once
// the system is running, mark this test for Parallel execution
......@@ -59,7 +60,6 @@ func createE2ETestSuite(t *testing.T) E2ETestSuite {
// Indexer Configuration and Start
indexerCfg := config.Config{
DB: config.DBConfig{
Host: "127.0.0.1",
Port: 5432,
......@@ -71,6 +71,8 @@ func createE2ETestSuite(t *testing.T) E2ETestSuite {
L2RPC: opSys.EthInstances["sequencer"].HTTPEndpoint(),
},
Chain: config.ChainConfig{
L1PollingInterval: 1000,
L2PollingInterval: 1000,
L1Contracts: config.L1Contracts{
OptimismPortalProxy: opCfg.L1Deployments.OptimismPortalProxy,
L2OutputOracleProxy: opCfg.L1Deployments.L2OutputOracleProxy,
......@@ -78,30 +80,26 @@ func createE2ETestSuite(t *testing.T) E2ETestSuite {
L1StandardBridgeProxy: opCfg.L1Deployments.L1StandardBridgeProxy,
},
},
Metrics: config.MetricsConfig{
Host: "127.0.0.1",
Port: 0,
},
}
db, err := database.NewDB(indexerCfg.DB)
require.NoError(t, err)
indexer, err := indexer.NewIndexer(logger, indexerCfg.Chain, indexerCfg.RPCs, db)
t.Cleanup(func() { db.Close() })
indexer, err := indexer.NewIndexer(logger, db, indexerCfg.Chain, indexerCfg.RPCs, indexerCfg.Metrics)
require.NoError(t, err)
indexerStoppedCh := make(chan interface{}, 1)
indexerCtx, indexerStop := context.WithCancel(context.Background())
t.Cleanup(func() { indexerStop() })
go func() {
err := indexer.Run(indexerCtx)
require.NoError(t, err)
indexerStoppedCh <- nil
}()
t.Cleanup(func() {
indexerStop()
<-indexerStoppedCh
indexer.Cleanup()
db.Close()
opSys.Close()
})
return E2ETestSuite{
t: t,
DB: db,
......
......@@ -3,6 +3,7 @@ package etl
import (
"context"
"errors"
"math/big"
"time"
"github.com/ethereum-optimism/optimism/indexer/node"
......@@ -13,21 +14,22 @@ import (
"github.com/ethereum/go-ethereum/log"
)
const (
// NOTE - These values can be made configurable to allow for more fine grained control
// Additionally a default interval of 5 seconds may be too slow for reading L2 blocks provided
// the current rate of L2 block production on OP Stack chains (2 seconds per block)
defaultLoopInterval = 5 * time.Second
defaultHeaderBufferSize = 500
)
type Config struct {
LoopIntervalMsec uint
HeaderBufferSize uint
StartHeight *big.Int
}
type ETL struct {
log log.Logger
log log.Logger
metrics Metricer
headerTraversal *node.HeaderTraversal
ethClient *ethclient.Client
contracts []common.Address
loopInterval time.Duration
headerBufferSize uint64
headerTraversal *node.HeaderTraversal
ethClient *ethclient.Client
contracts []common.Address
etlBatches chan ETLBatch
}
......@@ -43,11 +45,14 @@ type ETLBatch struct {
func (etl *ETL) Start(ctx context.Context) error {
done := ctx.Done()
pollTicker := time.NewTicker(defaultLoopInterval)
pollTicker := time.NewTicker(etl.loopInterval)
defer pollTicker.Stop()
etl.log.Info("starting etl...")
// A reference that'll stay populated between intervals
// in the event of failures in order to retry.
var headers []types.Header
etl.log.Info("starting etl...")
for {
select {
case <-done:
......@@ -55,61 +60,74 @@ func (etl *ETL) Start(ctx context.Context) error {
return nil
case <-pollTicker.C:
if len(headers) == 0 {
newHeaders, err := etl.headerTraversal.NextFinalizedHeaders(defaultHeaderBufferSize)
done := etl.metrics.RecordInterval()
if len(headers) > 0 {
etl.log.Info("retrying previous batch")
} else {
newHeaders, err := etl.headerTraversal.NextFinalizedHeaders(etl.headerBufferSize)
if err != nil {
etl.log.Error("error querying for headers", "err", err)
continue
}
if len(newHeaders) == 0 {
// Logged as an error since this loop should be operating at a longer interval than the provider
etl.log.Error("no new headers. processor unexpectedly at head...")
continue
} else if len(newHeaders) == 0 {
etl.log.Warn("no new headers. processor unexpectedly at head...")
}
headers = newHeaders
} else {
etl.log.Info("retrying previous batch")
etl.metrics.RecordBatchHeaders(len(newHeaders))
}
firstHeader := headers[0]
lastHeader := headers[len(headers)-1]
batchLog := etl.log.New("batch_start_block_number", firstHeader.Number, "batch_end_block_number", lastHeader.Number)
batchLog.Info("extracting batch", "size", len(headers))
headerMap := make(map[common.Hash]*types.Header, len(headers))
for i := range headers {
headerMap[headers[i].Hash()] = &headers[i]
// only clear the reference if we were able to process this batch
err := etl.processBatch(headers)
if err == nil {
headers = nil
}
headersWithLog := make(map[common.Hash]bool, len(headers))
logFilter := ethereum.FilterQuery{FromBlock: firstHeader.Number, ToBlock: lastHeader.Number, Addresses: etl.contracts}
logs, err := etl.ethClient.FilterLogs(context.Background(), logFilter)
if err != nil {
batchLog.Info("unable to extract logs within batch", "err", err)
continue // spin and try again
}
done(err)
}
}
}
for i := range logs {
if _, ok := headerMap[logs[i].BlockHash]; !ok {
// NOTE. Definitely an error state if the none of the headers were re-orged out in between
// the blocks and logs retrieval operations. However, we need to gracefully handle reorgs
batchLog.Error("log found with block hash not in the batch", "block_hash", logs[i].BlockHash, "log_index", logs[i].Index)
return errors.New("parsed log with a block hash not in the fetched batch")
}
headersWithLog[logs[i].BlockHash] = true
}
func (etl *ETL) processBatch(headers []types.Header) error {
if len(headers) == 0 {
return nil
}
if len(logs) > 0 {
batchLog.Info("detected logs", "size", len(logs))
}
firstHeader, lastHeader := headers[0], headers[len(headers)-1]
batchLog := etl.log.New("batch_start_block_number", firstHeader.Number, "batch_end_block_number", lastHeader.Number)
batchLog.Info("extracting batch", "size", len(headers))
// create a new reference such that subsequent changes to `headers` does not affect the reference
headersRef := headers
batch := ETLBatch{Logger: batchLog, Headers: headersRef, HeaderMap: headerMap, Logs: logs, HeadersWithLog: headersWithLog}
etl.metrics.RecordBatchLatestHeight(lastHeader.Number)
headerMap := make(map[common.Hash]*types.Header, len(headers))
for i := range headers {
header := headers[i]
headerMap[header.Hash()] = &header
}
headersWithLog := make(map[common.Hash]bool, len(headers))
logFilter := ethereum.FilterQuery{FromBlock: firstHeader.Number, ToBlock: lastHeader.Number, Addresses: etl.contracts}
logs, err := etl.ethClient.FilterLogs(context.Background(), logFilter)
if err != nil {
batchLog.Info("unable to extract logs", "err", err)
return err
}
if len(logs) > 0 {
batchLog.Info("detected logs", "size", len(logs))
}
headers = nil
etl.etlBatches <- batch
for i := range logs {
log := logs[i]
if _, ok := headerMap[log.BlockHash]; !ok {
// NOTE. Definitely an error state if the none of the headers were re-orged out in between
// the blocks and logs retrieval operations. However, we need to gracefully handle reorgs
batchLog.Error("log found with block hash not in the batch", "block_hash", logs[i].BlockHash, "log_index", logs[i].Index)
return errors.New("parsed log with a block hash not in the batch")
}
etl.metrics.RecordBatchLog(log.Address)
headersWithLog[log.BlockHash] = true
}
// ensure we use unique downstream references for the etl batch
headersRef := headers
etl.etlBatches <- ETLBatch{Logger: batchLog, Headers: headersRef, HeaderMap: headerMap, Logs: logs, HeadersWithLog: headersWithLog}
return nil
}
......@@ -3,7 +3,7 @@ package etl
import (
"context"
"fmt"
"math/big"
"time"
"github.com/ethereum-optimism/optimism/indexer/config"
"github.com/ethereum-optimism/optimism/indexer/database"
......@@ -21,8 +21,7 @@ type L1ETL struct {
// NewL1ETL creates a new L1ETL instance that will start indexing from different starting points
// depending on the state of the database and the supplied start height.
func NewL1ETL(log log.Logger, db *database.DB, client node.EthClient, startHeight *big.Int,
contracts config.L1Contracts) (*L1ETL, error) {
func NewL1ETL(cfg Config, log log.Logger, db *database.DB, metrics Metrics, client node.EthClient, contracts config.L1Contracts) (*L1ETL, error) {
log = log.New("etl", "l1")
latestHeader, err := db.Blocks.L1LatestBlockHeader()
......@@ -38,12 +37,12 @@ func NewL1ETL(log log.Logger, db *database.DB, client node.EthClient, startHeigh
// Determine the starting height for traversal
var fromHeader *types.Header
if latestHeader != nil {
log.Info("detected last indexed block", "number", latestHeader.Number.Int, "hash", latestHeader.Hash)
log.Info("detected last indexed block", "number", latestHeader.Number, "hash", latestHeader.Hash)
fromHeader = latestHeader.RLPHeader.Header()
} else if startHeight.BitLen() > 0 {
log.Info("no indexed state in storage, starting from supplied L1 height", "height", startHeight.String())
header, err := client.BlockHeaderByNumber(startHeight)
} else if cfg.StartHeight.BitLen() > 0 {
log.Info("no indexed state in storage, starting from supplied L1 height", "height", cfg.StartHeight.String())
header, err := client.BlockHeaderByNumber(cfg.StartHeight)
if err != nil {
return nil, fmt.Errorf("could not fetch starting block header: %w", err)
}
......@@ -58,7 +57,11 @@ func NewL1ETL(log log.Logger, db *database.DB, client node.EthClient, startHeigh
// will be able to keep up with the rate of incoming batches
etlBatches := make(chan ETLBatch)
etl := ETL{
loopInterval: time.Duration(cfg.LoopIntervalMsec) * time.Millisecond,
headerBufferSize: uint64(cfg.HeaderBufferSize),
log: log,
metrics: metrics.newMetricer("l1"),
headerTraversal: node.NewHeaderTraversal(client, fromHeader),
ethClient: client.GethEthClient(),
contracts: cSlice,
......@@ -79,16 +82,14 @@ func (l1Etl *L1ETL) Start(ctx context.Context) error {
case err := <-errCh:
return err
// Index incoming batches
// Index incoming batches (only L1 blocks that have an emitted log)
case batch := <-l1Etl.etlBatches:
// Pull out only L1 blocks that have emitted a log ( <= batch.Headers )
l1BlockHeaders := make([]database.L1BlockHeader, 0, len(batch.Headers))
for i := range batch.Headers {
if _, ok := batch.HeadersWithLog[batch.Headers[i].Hash()]; ok {
l1BlockHeaders = append(l1BlockHeaders, database.L1BlockHeader{BlockHeader: database.BlockHeaderFromHeader(&batch.Headers[i])})
}
}
if len(l1BlockHeaders) == 0 {
batch.Logger.Info("no l1 blocks with logs in batch")
continue
......@@ -102,29 +103,28 @@ func (l1Etl *L1ETL) Start(ctx context.Context) error {
// Continually try to persist this batch. If it fails after 10 attempts, we simply error out
retryStrategy := &retry.ExponentialStrategy{Min: 1000, Max: 20_000, MaxJitter: 250}
_, err := retry.Do[interface{}](ctx, 10, retryStrategy, func() (interface{}, error) {
err := l1Etl.db.Transaction(func(tx *database.DB) error {
if _, err := retry.Do[interface{}](ctx, 10, retryStrategy, func() (interface{}, error) {
if err := l1Etl.db.Transaction(func(tx *database.DB) error {
if err := tx.Blocks.StoreL1BlockHeaders(l1BlockHeaders); err != nil {
return err
}
// we must have logs if we have l1 blocks
if err := tx.ContractEvents.StoreL1ContractEvents(l1ContractEvents); err != nil {
return err
}
return nil
})
if err != nil {
}); err != nil {
batch.Logger.Error("unable to persist batch", "err", err)
return nil, err
}
// a-ok! Can merge with the above block but being explicit
return nil, nil
})
l1Etl.ETL.metrics.RecordIndexedHeaders(len(l1BlockHeaders))
l1Etl.ETL.metrics.RecordIndexedLatestHeight(l1BlockHeaders[len(l1BlockHeaders)-1].Number)
l1Etl.ETL.metrics.RecordIndexedLogs(len(l1ContractEvents))
if err != nil {
// a-ok!
return nil, nil
}); err != nil {
return err
}
......
......@@ -4,6 +4,7 @@ import (
"math/big"
"github.com/ethereum-optimism/optimism/op-service/log"
"github.com/ethereum-optimism/optimism/op-service/metrics"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/stretchr/testify/mock"
......@@ -17,6 +18,8 @@ import (
)
func Test_L1ETL_Construction(t *testing.T) {
etlMetrics := NewMetrics(metrics.NewRegistry())
type testSuite struct {
db *database.MockDB
client *node.MockEthClient
......@@ -98,8 +101,9 @@ func Test_L1ETL_Construction(t *testing.T) {
ts := test.construction()
logger := log.NewLogger(log.DefaultCLIConfig())
cfg := Config{StartHeight: ts.start}
etl, err := NewL1ETL(logger, ts.db.DB, ts.client, ts.start, ts.contracts)
etl, err := NewL1ETL(cfg, logger, ts.db.DB, etlMetrics, ts.client, ts.contracts)
test.assertion(etl, err)
})
}
......
......@@ -2,6 +2,7 @@ package etl
import (
"context"
"time"
"github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum-optimism/optimism/indexer/node"
......@@ -18,7 +19,7 @@ type L2ETL struct {
db *database.DB
}
func NewL2ETL(log log.Logger, db *database.DB, client node.EthClient) (*L2ETL, error) {
func NewL2ETL(cfg Config, log log.Logger, db *database.DB, metrics Metrics, client node.EthClient) (*L2ETL, error) {
log = log.New("etl", "l2")
// allow predeploys to be overridable
......@@ -35,7 +36,7 @@ func NewL2ETL(log log.Logger, db *database.DB, client node.EthClient) (*L2ETL, e
var fromHeader *types.Header
if latestHeader != nil {
log.Info("detected last indexed block", "number", latestHeader.Number.Int, "hash", latestHeader.Hash)
log.Info("detected last indexed block", "number", latestHeader.Number, "hash", latestHeader.Hash)
fromHeader = latestHeader.RLPHeader.Header()
} else {
log.Info("no indexed state, starting from genesis")
......@@ -43,7 +44,11 @@ func NewL2ETL(log log.Logger, db *database.DB, client node.EthClient) (*L2ETL, e
etlBatches := make(chan ETLBatch)
etl := ETL{
loopInterval: time.Duration(cfg.LoopIntervalMsec) * time.Millisecond,
headerBufferSize: uint64(cfg.HeaderBufferSize),
log: log,
metrics: metrics.newMetricer("l2"),
headerTraversal: node.NewHeaderTraversal(client, fromHeader),
ethClient: client.GethEthClient(),
contracts: l2Contracts,
......@@ -64,9 +69,8 @@ func (l2Etl *L2ETL) Start(ctx context.Context) error {
case err := <-errCh:
return err
// Index incoming batches
// Index incoming batches (all L2 Blocks)
case batch := <-l2Etl.etlBatches:
// We're indexing every L2 block.
l2BlockHeaders := make([]database.L2BlockHeader, len(batch.Headers))
for i := range batch.Headers {
l2BlockHeaders[i] = database.L2BlockHeader{BlockHeader: database.BlockHeaderFromHeader(&batch.Headers[i])}
......@@ -78,10 +82,10 @@ func (l2Etl *L2ETL) Start(ctx context.Context) error {
l2ContractEvents[i] = database.L2ContractEvent{ContractEvent: database.ContractEventFromLog(&batch.Logs[i], timestamp)}
}
// Continually try to persist this batch. If it fails after 5 attempts, we simply error out
// Continually try to persist this batch. If it fails after 10 attempts, we simply error out
retryStrategy := &retry.ExponentialStrategy{Min: 1000, Max: 20_000, MaxJitter: 250}
_, err := retry.Do[interface{}](ctx, 10, retryStrategy, func() (interface{}, error) {
err := l2Etl.db.Transaction(func(tx *database.DB) error {
if _, err := retry.Do[interface{}](ctx, 10, retryStrategy, func() (interface{}, error) {
if err := l2Etl.db.Transaction(func(tx *database.DB) error {
if err := tx.Blocks.StoreL2BlockHeaders(l2BlockHeaders); err != nil {
return err
}
......@@ -91,18 +95,20 @@ func (l2Etl *L2ETL) Start(ctx context.Context) error {
}
}
return nil
})
if err != nil {
}); err != nil {
batch.Logger.Error("unable to persist batch", "err", err)
return nil, err
}
// a-ok! Can merge with the above block but being explicit
return nil, nil
})
l2Etl.ETL.metrics.RecordIndexedHeaders(len(l2BlockHeaders))
l2Etl.ETL.metrics.RecordIndexedLatestHeight(l2BlockHeaders[len(l2BlockHeaders)-1].Number)
if len(l2ContractEvents) > 0 {
l2Etl.ETL.metrics.RecordIndexedLogs(len(l2ContractEvents))
}
if err != nil {
// a-ok!
return nil, nil
}); err != nil {
return err
}
......
package etl
import (
"math/big"
"github.com/ethereum-optimism/optimism/op-service/metrics"
"github.com/ethereum/go-ethereum/common"
"github.com/prometheus/client_golang/prometheus"
)
var (
MetricsNamespace string = "etl"
_ Metricer = &metricer{}
)
type Metrics interface {
newMetricer(etl string) Metricer
}
type Metricer interface {
RecordInterval() (done func(err error))
// Batch Extraction
RecordBatchFailure()
RecordBatchLatestHeight(height *big.Int)
RecordBatchHeaders(size int)
RecordBatchLog(contractAddress common.Address)
// Indexed Batches
RecordIndexedLatestHeight(height *big.Int)
RecordIndexedHeaders(size int)
RecordIndexedLogs(size int)
}
type etlMetrics struct {
intervalTick *prometheus.CounterVec
intervalDuration *prometheus.HistogramVec
batchFailures *prometheus.CounterVec
batchLatestHeight *prometheus.GaugeVec
batchHeaders *prometheus.CounterVec
batchLogs *prometheus.CounterVec
indexedLatestHeight *prometheus.GaugeVec
indexedHeaders *prometheus.CounterVec
indexedLogs *prometheus.CounterVec
}
type metricerFactory struct {
metrics *etlMetrics
}
type metricer struct {
etl string
metrics *etlMetrics
}
func NewMetrics(registry *prometheus.Registry) Metrics {
return &metricerFactory{metrics: newMetrics(registry)}
}
func (factory *metricerFactory) newMetricer(etl string) Metricer {
return &metricer{etl, factory.metrics}
}
func newMetrics(registry *prometheus.Registry) *etlMetrics {
factory := metrics.With(registry)
return &etlMetrics{
intervalTick: factory.NewCounterVec(prometheus.CounterOpts{
Namespace: MetricsNamespace,
Name: "intervals_total",
Help: "number of times the etl has run its extraction loop",
}, []string{
"etl",
}),
intervalDuration: factory.NewHistogramVec(prometheus.HistogramOpts{
Namespace: MetricsNamespace,
Name: "interval_seconds",
Help: "duration elapsed for during the processing loop",
}, []string{
"etl",
}),
batchFailures: factory.NewCounterVec(prometheus.CounterOpts{
Namespace: MetricsNamespace,
Name: "failures_total",
Help: "number of times the etl encountered a failure to extract a batch",
}, []string{
"etl",
}),
batchLatestHeight: factory.NewGaugeVec(prometheus.GaugeOpts{
Namespace: MetricsNamespace,
Name: "height",
Help: "the latest block height observed by an etl interval",
}, []string{
"etl",
}),
batchHeaders: factory.NewCounterVec(prometheus.CounterOpts{
Namespace: MetricsNamespace,
Name: "headers_total",
Help: "number of headers observed by the etl",
}, []string{
"etl",
}),
batchLogs: factory.NewCounterVec(prometheus.CounterOpts{
Namespace: MetricsNamespace,
Name: "logs_total",
Help: "number of logs observed by the etl",
}, []string{
"etl",
"contract",
}),
indexedLatestHeight: factory.NewGaugeVec(prometheus.GaugeOpts{
Namespace: MetricsNamespace,
Name: "indexed_height",
Help: "the latest block height indexed into the database",
}, []string{
"etl",
}),
indexedHeaders: factory.NewCounterVec(prometheus.CounterOpts{
Namespace: MetricsNamespace,
Name: "indexed_headers_total",
Help: "number of headers indexed by the etl",
}, []string{
"etl",
}),
indexedLogs: factory.NewCounterVec(prometheus.CounterOpts{
Namespace: MetricsNamespace,
Name: "indexed_logs_total",
Help: "number of logs indexed by the etl",
}, []string{
"etl",
}),
}
}
func (m *metricer) RecordInterval() func(error) {
m.metrics.intervalTick.WithLabelValues(m.etl).Inc()
timer := prometheus.NewTimer(m.metrics.intervalDuration.WithLabelValues(m.etl))
return func(err error) {
if err != nil {
m.RecordBatchFailure()
}
timer.ObserveDuration()
}
}
func (m *metricer) RecordBatchFailure() {
m.metrics.batchFailures.WithLabelValues(m.etl).Inc()
}
func (m *metricer) RecordBatchLatestHeight(height *big.Int) {
m.metrics.batchLatestHeight.WithLabelValues(m.etl).Set(float64(height.Uint64()))
}
func (m *metricer) RecordBatchHeaders(size int) {
m.metrics.batchHeaders.WithLabelValues(m.etl).Add(float64(size))
}
func (m *metricer) RecordBatchLog(contractAddress common.Address) {
m.metrics.batchLogs.WithLabelValues(m.etl, contractAddress.String()).Inc()
}
func (m *metricer) RecordIndexedLatestHeight(height *big.Int) {
m.metrics.indexedLatestHeight.WithLabelValues(m.etl).Set(float64(height.Uint64()))
}
func (m *metricer) RecordIndexedHeaders(size int) {
m.metrics.indexedHeaders.WithLabelValues(m.etl).Add(float64(size))
}
func (m *metricer) RecordIndexedLogs(size int) {
m.metrics.indexedLogs.WithLabelValues(m.etl).Add(float64(size))
}
......@@ -7,19 +7,24 @@ import (
"sync"
"github.com/ethereum/go-ethereum/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/ethereum-optimism/optimism/indexer/config"
"github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum-optimism/optimism/indexer/etl"
"github.com/ethereum-optimism/optimism/indexer/node"
"github.com/ethereum-optimism/optimism/indexer/processors"
"github.com/ethereum-optimism/optimism/op-service/metrics"
)
// Indexer contains the necessary resources for
// indexing the configured L1 and L2 chains
type Indexer struct {
db *database.DB
log log.Logger
db *database.DB
metricsConfig config.MetricsConfig
metricsRegistry *prometheus.Registry
L1ETL *etl.L1ETL
L2ETL *etl.L2ETL
......@@ -28,36 +33,44 @@ type Indexer struct {
}
// NewIndexer initializes an instance of the Indexer
func NewIndexer(logger log.Logger, chainConfig config.ChainConfig, rpcsConfig config.RPCsConfig, db *database.DB) (*Indexer, error) {
func NewIndexer(logger log.Logger, db *database.DB, chainConfig config.ChainConfig, rpcsConfig config.RPCsConfig, metricsConfig config.MetricsConfig) (*Indexer, error) {
metricsRegistry := metrics.NewRegistry()
etlMetrics := etl.NewMetrics(metricsRegistry)
// L1
l1EthClient, err := node.DialEthClient(rpcsConfig.L1RPC)
if err != nil {
return nil, err
}
l1Etl, err := etl.NewL1ETL(logger, db, l1EthClient, chainConfig.L1StartHeight(), chainConfig.L1Contracts)
l1Cfg := etl.Config{LoopIntervalMsec: chainConfig.L1PollingInterval, HeaderBufferSize: chainConfig.L1HeaderBufferSize, StartHeight: chainConfig.L1StartHeight()}
l1Etl, err := etl.NewL1ETL(l1Cfg, logger, db, etlMetrics, l1EthClient, chainConfig.L1Contracts)
if err != nil {
return nil, err
}
// L2 (defaults to predeploy contracts)
l2EthClient, err := node.DialEthClient(rpcsConfig.L2RPC)
if err != nil {
return nil, err
}
// Currently defaults to the predeploys
l2Etl, err := etl.NewL2ETL(logger, db, l2EthClient)
l2Cfg := etl.Config{LoopIntervalMsec: chainConfig.L2PollingInterval, HeaderBufferSize: chainConfig.L2HeaderBufferSize}
l2Etl, err := etl.NewL2ETL(l2Cfg, logger, db, etlMetrics, l2EthClient)
if err != nil {
return nil, err
}
// Bridge
bridgeProcessor, err := processors.NewBridgeProcessor(logger, db, chainConfig)
if err != nil {
return nil, err
}
indexer := &Indexer{
db: db,
log: logger,
db: db,
metricsConfig: metricsConfig,
metricsRegistry: metricsRegistry,
L1ETL: l1Etl,
L2ETL: l2Etl,
......@@ -67,48 +80,56 @@ func NewIndexer(logger log.Logger, chainConfig config.ChainConfig, rpcsConfig co
return indexer, nil
}
func (i *Indexer) startMetricsServer(ctx context.Context) error {
i.log.Info("starting metrics server...", "port", i.metricsConfig.Port)
err := metrics.ListenAndServe(ctx, i.metricsRegistry, i.metricsConfig.Host, i.metricsConfig.Port)
if err != nil {
i.log.Error("metrics server stopped", "err", err)
} else {
i.log.Info("metrics server stopped")
}
return err
}
// Start starts the indexing service on L1 and L2 chains
func (i *Indexer) Run(ctx context.Context) error {
var wg sync.WaitGroup
errCh := make(chan error, 3)
errCh := make(chan error, 4)
// If either processor errors out, we stop
subCtx, cancel := context.WithCancel(ctx)
run := func(start func(ctx context.Context) error) {
// if any goroutine halts, we stop the entire indexer
processCtx, processCancel := context.WithCancel(ctx)
runProcess := func(start func(ctx context.Context) error) {
wg.Add(1)
defer func() {
if err := recover(); err != nil {
i.log.Error("halting indexer on panic", "err", err)
debug.PrintStack()
errCh <- fmt.Errorf("panic: %v", err)
}
cancel()
wg.Done()
go func() {
defer func() {
if err := recover(); err != nil {
i.log.Error("halting indexer on panic", "err", err)
debug.PrintStack()
errCh <- fmt.Errorf("panic: %v", err)
}
processCancel()
wg.Done()
}()
errCh <- start(processCtx)
}()
err := start(subCtx)
if err != nil {
i.log.Error("halting indexer on error", "err", err)
}
// Send a value down regardless if we've received an error
// or halted via cancellation where err == nil
errCh <- err
}
// Kick off all the dependent routines
go run(i.L1ETL.Start)
go run(i.L2ETL.Start)
go run(i.BridgeProcessor.Start)
runProcess(i.L1ETL.Start)
runProcess(i.L2ETL.Start)
runProcess(i.BridgeProcessor.Start)
runProcess(i.startMetricsServer)
wg.Wait()
err := <-errCh
if err != nil {
i.log.Error("indexer stopped", "err", err)
} else {
i.log.Info("indexer stopped")
}
wg.Wait()
i.log.Info("indexer stopped")
return err
}
// Cleanup releases any resources that might be currently held by the indexer
func (i *Indexer) Cleanup() {
i.db.Close()
}
# Chain configures l1 chain addresses
# Can configure them manually or use a preset l2 ChainId for known chains including OP Mainnet, OP Goerli, Base, Base Goerli, Zora, and Zora goerli
[chain]
l1-polling-interval = 0
l1-header-buffer-size = 0
l2-polling-interval = 0
l2-header-buffer-size = 0
# OP Goerli
preset = 420
l1-starting-height = 0
......
......@@ -107,7 +107,7 @@ func (b *BridgeProcessor) Start(ctx context.Context) error {
continue
}
toL1Height, toL2Height := latestEpoch.L1BlockHeader.Number.Int, latestEpoch.L2BlockHeader.Number.Int
toL1Height, toL2Height := latestEpoch.L1BlockHeader.Number, latestEpoch.L2BlockHeader.Number
fromL1Height, fromL2Height := big.NewInt(0), big.NewInt(0)
if b.LatestL1Header != nil {
// `NewBridgeProcessor` ensures that LatestL2Header must not be nil if LatestL1Header is set
......
......@@ -26,7 +26,6 @@ func L1ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, chainConfig
return err
}
ethDeposits := []database.L1BridgeDeposit{}
portalDeposits := make(map[logKey]*contracts.OptimismPortalTransactionDepositEvent, len(optimismPortalTxDeposits))
transactionDeposits := make([]database.L1TransactionDeposit, len(optimismPortalTxDeposits))
for i := range optimismPortalTxDeposits {
......@@ -39,14 +38,6 @@ func L1ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, chainConfig
GasLimit: depositTx.GasLimit,
Tx: depositTx.Tx,
}
// catch ETH transfers to the portal contract.
if len(depositTx.DepositTx.Data) == 0 && depositTx.DepositTx.Value.BitLen() > 0 {
ethDeposits = append(ethDeposits, database.L1BridgeDeposit{
TransactionSourceHash: depositTx.DepositTx.SourceHash,
BridgeTransfer: database.BridgeTransfer{Tx: transactionDeposits[i].Tx, TokenPair: database.ETHTokenPair},
})
}
}
if len(transactionDeposits) > 0 {
......@@ -54,12 +45,6 @@ func L1ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, chainConfig
if err := db.BridgeTransactions.StoreL1TransactionDeposits(transactionDeposits); err != nil {
return err
}
if len(ethDeposits) > 0 {
log.Info("detected portal ETH transfers", "size", len(ethDeposits))
if err := db.BridgeTransfers.StoreL1BridgeDeposits(ethDeposits); err != nil {
return err
}
}
}
// (2) L1CrossDomainMessenger
......@@ -151,7 +136,7 @@ func L1ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, chainConfig
if err != nil {
return err
} else if withdrawal == nil {
log.Crit("missing indexed withdrawal on prove event!", "withdrawal_hash", proven.WithdrawalHash, "tx_hash", proven.Event.TransactionHash)
log.Error("missing indexed withdrawal on prove event!", "withdrawal_hash", proven.WithdrawalHash, "tx_hash", proven.Event.TransactionHash)
return errors.New("missing indexed withdrawal")
}
......@@ -176,7 +161,7 @@ func L1ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, chainConfig
if err != nil {
return err
} else if withdrawal == nil {
log.Crit("missing indexed withdrawal on finalization event!", "withdrawal_hash", finalized.WithdrawalHash, "tx_hash", finalized.Event.TransactionHash)
log.Error("missing indexed withdrawal on finalization event!", "withdrawal_hash", finalized.WithdrawalHash, "tx_hash", finalized.Event.TransactionHash)
return errors.New("missing indexed withdrawal")
}
......@@ -203,7 +188,7 @@ func L1ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, chainConfig
if err != nil {
return err
} else if message == nil {
log.Crit("missing indexed L2CrossDomainMessenger message", "message_hash", relayed.MessageHash, "tx_hash", relayed.Event.TransactionHash)
log.Error("missing indexed L2CrossDomainMessenger message", "message_hash", relayed.MessageHash, "tx_hash", relayed.Event.TransactionHash)
return fmt.Errorf("missing indexed L2CrossDomainMessager message")
}
......@@ -240,7 +225,7 @@ func L1ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, chainConfig
if err != nil {
return err
} else if withdrawal == nil {
log.Crit("missing L2StandardBridge withdrawal on L1 finalization", "tx_hash", finalizedBridge.Event.TransactionHash)
log.Error("missing L2StandardBridge withdrawal on L1 finalization", "tx_hash", finalizedBridge.Event.TransactionHash)
return errors.New("missing L2StandardBridge withdrawal on L1 finalization")
}
}
......
......@@ -26,7 +26,6 @@ func L2ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, fromHeight
return err
}
ethWithdrawals := []database.L2BridgeWithdrawal{}
messagesPassed := make(map[logKey]*contracts.L2ToL1MessagePasserMessagePassed, len(l2ToL1MPMessagesPassed))
transactionWithdrawals := make([]database.L2TransactionWithdrawal, len(l2ToL1MPMessagesPassed))
for i := range l2ToL1MPMessagesPassed {
......@@ -39,13 +38,6 @@ func L2ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, fromHeight
GasLimit: messagePassed.GasLimit,
Tx: messagePassed.Tx,
}
if len(messagePassed.Tx.Data) == 0 && messagePassed.Tx.Amount.Int.BitLen() > 0 {
ethWithdrawals = append(ethWithdrawals, database.L2BridgeWithdrawal{
TransactionWithdrawalHash: messagePassed.WithdrawalHash,
BridgeTransfer: database.BridgeTransfer{Tx: transactionWithdrawals[i].Tx, TokenPair: database.ETHTokenPair},
})
}
}
if len(messagesPassed) > 0 {
......@@ -53,12 +45,6 @@ func L2ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, fromHeight
if err := db.BridgeTransactions.StoreL2TransactionWithdrawals(transactionWithdrawals); err != nil {
return err
}
if len(ethWithdrawals) > 0 {
log.Info("detected L2ToL1MessagePasser ETH transfers", "size", len(ethWithdrawals))
if err := db.BridgeTransfers.StoreL2BridgeWithdrawals(ethWithdrawals); err != nil {
return err
}
}
}
// (2) L2CrossDomainMessenger
......@@ -151,7 +137,7 @@ func L2ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, fromHeight
if err != nil {
return err
} else if message == nil {
log.Crit("missing indexed L1CrossDomainMessenger message", "message_hash", relayed.MessageHash, "tx_hash", relayed.Event.TransactionHash)
log.Error("missing indexed L1CrossDomainMessenger message", "message_hash", relayed.MessageHash, "tx_hash", relayed.Event.TransactionHash)
return fmt.Errorf("missing indexed L1CrossDomainMessager message")
}
......@@ -188,7 +174,7 @@ func L2ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, fromHeight
if err != nil {
return err
} else if deposit == nil {
log.Crit("missing L1StandardBridge deposit on L2 finalization", "tx_hash", finalizedBridge.Event.TransactionHash)
log.Error("missing L1StandardBridge deposit on L2 finalization", "tx_hash", finalizedBridge.Event.TransactionHash)
return errors.New("missing L1StandardBridge deposit on L2 finalization")
}
}
......
......@@ -94,13 +94,13 @@ func CrossDomainMessengerSentMessageEvents(chainSelector string, contractAddress
Event: &sentMessageEvents[i],
BridgeMessage: database.BridgeMessage{
MessageHash: messageHash,
Nonce: database.U256{Int: sentMessage.MessageNonce},
Nonce: sentMessage.MessageNonce,
SentMessageEventGUID: sentMessageEvents[i].GUID,
GasLimit: database.U256{Int: sentMessage.GasLimit},
GasLimit: sentMessage.GasLimit,
Tx: database.Transaction{
FromAddress: sentMessage.Sender,
ToAddress: sentMessage.Target,
Amount: database.U256{Int: sentMessageExtension.Value},
Amount: sentMessageExtension.Value,
Data: sentMessage.Message,
Timestamp: sentMessageEvents[i].Timestamp,
},
......
......@@ -11,8 +11,8 @@ import (
type L2ToL1MessagePasserMessagePassed struct {
Event *database.ContractEvent
WithdrawalHash common.Hash
GasLimit database.U256
Nonce database.U256
GasLimit *big.Int
Nonce *big.Int
Tx database.Transaction
}
......@@ -40,12 +40,12 @@ func L2ToL1MessagePasserMessagePassedEvents(contractAddress common.Address, db *
messagesPassed[i] = L2ToL1MessagePasserMessagePassed{
Event: &messagePassedEvents[i].ContractEvent,
WithdrawalHash: messagePassed.WithdrawalHash,
Nonce: database.U256{Int: messagePassed.Nonce},
GasLimit: database.U256{Int: messagePassed.GasLimit},
Nonce: messagePassed.Nonce,
GasLimit: messagePassed.GasLimit,
Tx: database.Transaction{
FromAddress: messagePassed.Sender,
ToAddress: messagePassed.Target,
Amount: database.U256{Int: messagePassed.Value},
Amount: messagePassed.Value,
Data: messagePassed.Data,
Timestamp: messagePassedEvents[i].Timestamp,
},
......
......@@ -16,7 +16,7 @@ type OptimismPortalTransactionDepositEvent struct {
Event *database.ContractEvent
DepositTx *types.DepositTx
Tx database.Transaction
GasLimit database.U256
GasLimit *big.Int
}
type OptimismPortalWithdrawalProvenEvent struct {
......@@ -67,11 +67,11 @@ func OptimismPortalTransactionDepositEvents(contractAddress common.Address, db *
optimismPortalTxDeposits[i] = OptimismPortalTransactionDepositEvent{
Event: &transactionDepositEvents[i].ContractEvent,
DepositTx: depositTx,
GasLimit: database.U256{Int: new(big.Int).SetUint64(depositTx.Gas)},
GasLimit: new(big.Int).SetUint64(depositTx.Gas),
Tx: database.Transaction{
FromAddress: txDeposit.From,
ToAddress: txDeposit.To,
Amount: database.U256{Int: depositTx.Value},
Amount: depositTx.Value,
Data: depositTx.Data,
Timestamp: transactionDepositEvents[i].Timestamp,
},
......
......@@ -101,7 +101,7 @@ func _standardBridgeInitiatedEvents[BridgeEventType bindings.StandardBridgeETHBr
Tx: database.Transaction{
FromAddress: erc20Bridge.From,
ToAddress: erc20Bridge.To,
Amount: database.U256{Int: erc20Bridge.Amount},
Amount: erc20Bridge.Amount,
Data: erc20Bridge.ExtraData,
Timestamp: initiatedBridgeEvents[i].Timestamp,
},
......@@ -161,7 +161,7 @@ func _standardBridgeFinalizedEvents[BridgeEventType bindings.StandardBridgeETHBr
Tx: database.Transaction{
FromAddress: erc20Bridge.From,
ToAddress: erc20Bridge.To,
Amount: database.U256{Int: erc20Bridge.Amount},
Amount: erc20Bridge.Amount,
Data: erc20Bridge.ExtraData,
Timestamp: bridgeFinalizedEvents[i].Timestamp,
},
......
......@@ -7,7 +7,7 @@ A simple UI for exploring the indexer DB using [Prisma studio](https://www.prism
Included in the docker-compose file as `ui` service
```bash
docker-compose up
docker compose up
```
Prisma can be viewed at [localhost:5555](http://localhost:5555)
......
......@@ -7,24 +7,24 @@ datasource db {
url = env("DATABASE_URL")
}
model l1_tokens {
model l1_bridged_tokens {
address String @id @db.VarChar
bridge_address String @db.VarChar
l2_token_address String @db.VarChar
name String @db.VarChar
symbol String @db.VarChar
decimals Int
l2_tokens l2_tokens[]
l2_bridged_tokens l2_bridged_tokens[]
}
model l2_tokens {
model l2_bridged_tokens {
address String @id @db.VarChar
bridge_address String @db.VarChar
l1_token_address String? @db.VarChar
name String @db.VarChar
symbol String @db.VarChar
decimals Int
l1_tokens l1_tokens? @relation(fields: [l1_token_address], references: [address], onDelete: NoAction, onUpdate: NoAction)
l1_bridged_tokens l1_bridged_tokens? @relation(fields: [l1_token_address], references: [address], onDelete: NoAction, onUpdate: NoAction)
}
/// This table contains check constraints and requires additional setup for migrations. Visit https://pris.ly/d/check-constraints for more info.
......
FROM --platform=$BUILDPLATFORM golang:1.19.9-alpine3.16 as builder
FROM --platform=$BUILDPLATFORM golang:1.20.7-alpine3.18 as builder
ARG VERSION=v0.0.0
......@@ -23,7 +23,7 @@ ARG TARGETOS TARGETARCH
RUN make op-batcher VERSION="$VERSION" GOOS=$TARGETOS GOARCH=$TARGETARCH
FROM alpine:3.16
FROM alpine:3.18
COPY --from=builder /app/op-batcher/bin/op-batcher /usr/local/bin
......
......@@ -3,8 +3,8 @@ package compressor_test
import (
"bytes"
"compress/zlib"
"crypto/rand"
"io"
"math/rand"
"testing"
"github.com/ethereum-optimism/optimism/op-batcher/compressor"
......
......@@ -4,46 +4,134 @@
package safe
import (
"bytes"
"encoding/json"
"fmt"
"math/big"
"strings"
"golang.org/x/exp/maps"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/crypto"
)
// BatchFile represents a Safe tx-builder transaction.
type BatchFile struct {
// Batch represents a Safe tx-builder transaction.
type Batch struct {
Version string `json:"version"`
ChainID *big.Int `json:"chainId"`
CreatedAt uint64 `json:"createdAt"`
Meta BatchFileMeta `json:"meta"`
Meta BatchMeta `json:"meta"`
Transactions []BatchTransaction `json:"transactions"`
}
// AddCall will add a call to the batch. After a series of calls are
// added to the batch, it can be serialized to JSON.
func (b *Batch) AddCall(to common.Address, value *big.Int, sig string, args []any, iface abi.ABI) error {
// Attempt to pull out the signature from the top level methods.
// The abi package uses normalization that we do not want to be
// coupled to, so attempt to search for the raw name if the top
// level name is not found to handle overloading more gracefully.
method, ok := iface.Methods[sig]
if !ok {
for _, m := range iface.Methods {
if m.RawName == sig || m.Sig == sig {
method = m
ok = true
}
}
}
if !ok {
keys := maps.Keys(iface.Methods)
methods := strings.Join(keys, ",")
return fmt.Errorf("%s not found in abi, options are %s", sig, methods)
}
if len(args) != len(method.Inputs) {
return fmt.Errorf("requires %d inputs but got %d for %s", len(method.Inputs), len(args), method.RawName)
}
contractMethod := ContractMethod{
Name: method.RawName,
Payable: method.Payable,
}
inputValues := make(map[string]string)
contractInputs := make([]ContractInput, 0)
for i, input := range method.Inputs {
contractInput, err := createContractInput(input, contractInputs)
if err != nil {
return err
}
contractMethod.Inputs = append(contractMethod.Inputs, contractInput...)
str, err := stringifyArg(args[i])
if err != nil {
return err
}
inputValues[input.Name] = str
}
encoded, err := method.Inputs.PackValues(args)
if err != nil {
return err
}
data := make([]byte, len(method.ID)+len(encoded))
copy(data, method.ID)
copy(data[len(method.ID):], encoded)
batchTransaction := BatchTransaction{
To: to,
Value: value,
Method: contractMethod,
Data: data,
InputValues: inputValues,
}
b.Transactions = append(b.Transactions, batchTransaction)
return nil
}
// Check will check the batch for errors
func (b *Batch) Check() error {
for _, tx := range b.Transactions {
if err := tx.Check(); err != nil {
return err
}
}
return nil
}
// bathcFileMarshaling is a helper type used for JSON marshaling.
type batchFileMarshaling struct {
type batchMarshaling struct {
Version string `json:"version"`
ChainID string `json:"chainId"`
CreatedAt uint64 `json:"createdAt"`
Meta BatchFileMeta `json:"meta"`
Meta BatchMeta `json:"meta"`
Transactions []BatchTransaction `json:"transactions"`
}
// MarshalJSON will marshal a BatchFile to JSON.
func (b *BatchFile) MarshalJSON() ([]byte, error) {
return json.Marshal(batchFileMarshaling{
// MarshalJSON will marshal a Batch to JSON.
func (b *Batch) MarshalJSON() ([]byte, error) {
batch := batchMarshaling{
Version: b.Version,
ChainID: b.ChainID.String(),
CreatedAt: b.CreatedAt,
Meta: b.Meta,
Transactions: b.Transactions,
})
}
if b.ChainID != nil {
batch.ChainID = b.ChainID.String()
}
return json.Marshal(batch)
}
// UnmarshalJSON will unmarshal a BatchFile from JSON.
func (b *BatchFile) UnmarshalJSON(data []byte) error {
var bf batchFileMarshaling
// UnmarshalJSON will unmarshal a Batch from JSON.
func (b *Batch) UnmarshalJSON(data []byte) error {
var bf batchMarshaling
if err := json.Unmarshal(data, &bf); err != nil {
return err
}
......@@ -59,9 +147,9 @@ func (b *BatchFile) UnmarshalJSON(data []byte) error {
return nil
}
// BatchFileMeta contains metadata about a BatchFile. Not all
// BatchMeta contains metadata about a Batch. Not all
// of the fields are required.
type BatchFileMeta struct {
type BatchMeta struct {
TxBuilderVersion string `json:"txBuilderVersion,omitempty"`
Checksum string `json:"checksum,omitempty"`
CreatedFromSafeAddress string `json:"createdFromSafeAddress"`
......@@ -79,6 +167,81 @@ type BatchTransaction struct {
InputValues map[string]string `json:"contractInputsValues"`
}
// Check will check the batch transaction for errors.
// An error is defined by:
// - incorrectly encoded calldata
// - mismatch in number of arguments
// It does not currently work on structs, will return no error if a "tuple"
// is used as an argument. Need to find a generic way to work with structs.
func (bt *BatchTransaction) Check() error {
if len(bt.Method.Inputs) != len(bt.InputValues) {
return fmt.Errorf("expected %d inputs but got %d", len(bt.Method.Inputs), len(bt.InputValues))
}
if len(bt.Data) > 0 && bt.Method.Name != "fallback" {
if len(bt.Data) < 4 {
return fmt.Errorf("must have at least 4 bytes of calldata, got %d", len(bt.Data))
}
sig := bt.Signature()
selector := crypto.Keccak256([]byte(sig))[0:4]
if !bytes.Equal(bt.Data[0:4], selector) {
return fmt.Errorf("data does not match signature")
}
// Check the calldata
values := make([]any, len(bt.Method.Inputs))
for i, input := range bt.Method.Inputs {
value, ok := bt.InputValues[input.Name]
if !ok {
return fmt.Errorf("missing input %s", input.Name)
}
// Need to figure out better way to handle tuples in a generic way
if input.Type == "tuple" {
return nil
}
arg, err := unstringifyArg(value, input.Type)
if err != nil {
return err
}
values[i] = arg
}
calldata, err := bt.Arguments().PackValues(values)
if err != nil {
return err
}
if !bytes.Equal(bt.Data[4:], calldata) {
return fmt.Errorf("calldata does not match inputs, expected %s, got %s", hexutil.Encode(bt.Data[4:]), hexutil.Encode(calldata))
}
}
return nil
}
// Signature returns the function signature of the batch transaction.
func (bt *BatchTransaction) Signature() string {
types := make([]string, len(bt.Method.Inputs))
for i, input := range bt.Method.Inputs {
types[i] = buildFunctionSignature(input)
}
return fmt.Sprintf("%s(%s)", bt.Method.Name, strings.Join(types, ","))
}
func (bt *BatchTransaction) Arguments() abi.Arguments {
arguments := make(abi.Arguments, len(bt.Method.Inputs))
for i, input := range bt.Method.Inputs {
serialized, err := json.Marshal(input)
if err != nil {
panic(err)
}
var arg abi.Argument
if err := json.Unmarshal(serialized, &arg); err != nil {
panic(err)
}
arguments[i] = arg
}
return arguments
}
// UnmarshalJSON will unmarshal a BatchTransaction from JSON.
func (b *BatchTransaction) UnmarshalJSON(data []byte) error {
var bt batchTransactionMarshaling
......@@ -87,6 +250,9 @@ func (b *BatchTransaction) UnmarshalJSON(data []byte) error {
}
b.To = common.HexToAddress(bt.To)
b.Value = new(big.Int).SetUint64(bt.Value)
if bt.Data != nil {
b.Data = common.CopyBytes(*bt.Data)
}
b.Method = bt.Method
b.InputValues = bt.InputValues
return nil
......@@ -101,8 +267,8 @@ func (b *BatchTransaction) MarshalJSON() ([]byte, error) {
InputValues: b.InputValues,
}
if len(b.Data) != 0 {
hex := hexutil.Encode(b.Data)
batch.Data = &hex
data := hexutil.Bytes(b.Data)
batch.Data = &data
}
return json.Marshal(batch)
}
......@@ -111,7 +277,7 @@ func (b *BatchTransaction) MarshalJSON() ([]byte, error) {
type batchTransactionMarshaling struct {
To string `json:"to"`
Value uint64 `json:"value,string"`
Data *string `json:"data"`
Data *hexutil.Bytes `json:"data"`
Method ContractMethod `json:"contractMethod"`
InputValues map[string]string `json:"contractInputsValues"`
}
......
package safe
import (
"bytes"
"encoding/json"
"os"
"testing"
"github.com/stretchr/testify/require"
)
func TestBatchFileJSONPrepareBedrock(t *testing.T) {
testBatchFileJSON(t, "testdata/batch-prepare-bedrock.json")
}
func TestBatchFileJSONL2OO(t *testing.T) {
testBatchFileJSON(t, "testdata/l2-output-oracle.json")
}
func testBatchFileJSON(t *testing.T, path string) {
b, err := os.ReadFile(path)
require.NoError(t, err)
dec := json.NewDecoder(bytes.NewReader(b))
decoded := new(BatchFile)
require.NoError(t, dec.Decode(decoded))
data, err := json.Marshal(decoded)
require.NoError(t, err)
require.JSONEq(t, string(b), string(data))
}
package safe
import (
"fmt"
"math/big"
"reflect"
"strconv"
"strings"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
)
// stringifyArg converts a Go type to a string that is representable by ABI.
// To do so, this function must be recursive to handle nested tuples.
func stringifyArg(argument any) (string, error) {
switch arg := argument.(type) {
case common.Address:
return arg.String(), nil
case *common.Address:
return arg.String(), nil
case *big.Int:
return arg.String(), nil
case big.Int:
return arg.String(), nil
case bool:
if arg {
return "true", nil
}
return "false", nil
case int64:
return strconv.FormatInt(arg, 10), nil
case int32:
return strconv.FormatInt(int64(arg), 10), nil
case int16:
return strconv.FormatInt(int64(arg), 10), nil
case int8:
return strconv.FormatInt(int64(arg), 10), nil
case int:
return strconv.FormatInt(int64(arg), 10), nil
case uint64:
return strconv.FormatUint(uint64(arg), 10), nil
case uint32:
return strconv.FormatUint(uint64(arg), 10), nil
case uint16:
return strconv.FormatUint(uint64(arg), 10), nil
case uint8:
return strconv.FormatUint(uint64(arg), 10), nil
case uint:
return strconv.FormatUint(uint64(arg), 10), nil
case []byte:
return hexutil.Encode(arg), nil
case []any:
ret := make([]string, len(arg))
for i, v := range arg {
str, err := stringifyArg(v)
if err != nil {
return "", err
}
ret[i] = str
}
return "[" + strings.Join(ret, ",") + "]", nil
default:
typ := reflect.TypeOf(argument)
if typ.Kind() == reflect.Ptr {
typ = typ.Elem()
}
if typ.Kind() == reflect.Struct {
v := reflect.ValueOf(argument)
numField := v.NumField()
ret := make([]string, numField)
for i := 0; i < numField; i++ {
val := v.Field(i).Interface()
str, err := stringifyArg(val)
if err != nil {
return "", err
}
ret[i] = str
}
return "[" + strings.Join(ret, ",") + "]", nil
}
return "", fmt.Errorf("unknown type as argument: %T", arg)
}
}
// unstringifyArg converts a string to a Go type.
func unstringifyArg(arg string, typ string) (any, error) {
switch typ {
case "address":
return common.HexToAddress(arg), nil
case "bool":
return strconv.ParseBool(arg)
case "uint8":
val, err := strconv.ParseUint(arg, 10, 8)
return uint8(val), err
case "uint16":
val, err := strconv.ParseUint(arg, 10, 16)
return uint16(val), err
case "uint32":
val, err := strconv.ParseUint(arg, 10, 32)
return uint32(val), err
case "uint64":
val, err := strconv.ParseUint(arg, 10, 64)
return val, err
case "int8":
val, err := strconv.ParseInt(arg, 10, 8)
return val, err
case "int16":
val, err := strconv.ParseInt(arg, 10, 16)
return val, err
case "int32":
val, err := strconv.ParseInt(arg, 10, 32)
return val, err
case "int64":
val, err := strconv.ParseInt(arg, 10, 64)
return val, err
case "uint256", "int256":
val, ok := new(big.Int).SetString(arg, 10)
if !ok {
return nil, fmt.Errorf("failed to parse %s as big.Int", arg)
}
return val, nil
case "string":
return arg, nil
case "bytes":
return hexutil.Decode(arg)
default:
return nil, fmt.Errorf("unknown type: %s", typ)
}
}
// createContractInput converts an abi.Argument to one or more ContractInputs.
func createContractInput(input abi.Argument, inputs []ContractInput) ([]ContractInput, error) {
inputType, err := stringifyType(input.Type)
if err != nil {
return nil, err
}
// TODO: could probably do better than string comparison?
internalType := input.Type.String()
if inputType == "tuple" {
internalType = input.Type.TupleRawName
}
components := make([]ContractInput, 0)
for i, elem := range input.Type.TupleElems {
e := *elem
arg := abi.Argument{
Name: input.Type.TupleRawNames[i],
Type: e,
}
component, err := createContractInput(arg, inputs)
if err != nil {
return nil, err
}
components = append(components, component...)
}
contractInput := ContractInput{
InternalType: internalType,
Name: input.Name,
Type: inputType,
Components: components,
}
inputs = append(inputs, contractInput)
return inputs, nil
}
// stringifyType turns an abi.Type into a string
func stringifyType(t abi.Type) (string, error) {
switch t.T {
case abi.TupleTy:
return "tuple", nil
case abi.BoolTy:
return t.String(), nil
case abi.AddressTy:
return t.String(), nil
case abi.UintTy:
return t.String(), nil
case abi.IntTy:
return t.String(), nil
case abi.StringTy:
return t.String(), nil
case abi.BytesTy:
return t.String(), nil
default:
return "", fmt.Errorf("unknown type: %d", t.T)
}
}
// buildFunctionSignature builds a function signature from a ContractInput.
// It is recursive to handle tuples.
func buildFunctionSignature(input ContractInput) string {
if input.Type == "tuple" {
types := make([]string, len(input.Components))
for i, component := range input.Components {
types[i] = buildFunctionSignature(component)
}
return fmt.Sprintf("(%s)", strings.Join(types, ","))
}
return input.InternalType
}
package safe
import (
"bytes"
"encoding/json"
"errors"
"math/big"
"os"
"testing"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common"
"github.com/stretchr/testify/require"
)
func TestBatchJSONPrepareBedrock(t *testing.T) {
testBatchJSON(t, "testdata/batch-prepare-bedrock.json")
}
func TestBatchJSONL2OO(t *testing.T) {
testBatchJSON(t, "testdata/l2-output-oracle.json")
}
func testBatchJSON(t *testing.T, path string) {
b, err := os.ReadFile(path)
require.NoError(t, err)
dec := json.NewDecoder(bytes.NewReader(b))
decoded := new(Batch)
require.NoError(t, dec.Decode(decoded))
data, err := json.Marshal(decoded)
require.NoError(t, err)
require.JSONEq(t, string(b), string(data))
}
// TestBatchAddCallFinalizeWithdrawalTransaction ensures that structs can be serialized correctly.
func TestBatchAddCallFinalizeWithdrawalTransaction(t *testing.T) {
file, err := os.ReadFile("testdata/portal-abi.json")
require.NoError(t, err)
portalABI, err := abi.JSON(bytes.NewReader(file))
require.NoError(t, err)
sig := "finalizeWithdrawalTransaction"
argument := []any{
bindings.TypesWithdrawalTransaction{
Nonce: big.NewInt(0),
Sender: common.Address{19: 0x01},
Target: common.Address{19: 0x02},
Value: big.NewInt(1),
GasLimit: big.NewInt(2),
Data: []byte{},
},
}
batch := new(Batch)
to := common.Address{19: 0x01}
value := big.NewInt(222)
require.NoError(t, batch.AddCall(to, value, sig, argument, portalABI))
require.NoError(t, batch.Check())
require.Equal(t, batch.Transactions[0].Signature(), "finalizeWithdrawalTransaction((uint256,address,address,uint256,uint256,bytes))")
expected, err := os.ReadFile("testdata/finalize-withdrawal-tx.json")
require.NoError(t, err)
serialized, err := json.Marshal(batch)
require.NoError(t, err)
require.JSONEq(t, string(expected), string(serialized))
}
// TestBatchAddCallDespostTransaction ensures that simple calls can be serialized correctly.
func TestBatchAddCallDespositTransaction(t *testing.T) {
file, err := os.ReadFile("testdata/portal-abi.json")
require.NoError(t, err)
portalABI, err := abi.JSON(bytes.NewReader(file))
require.NoError(t, err)
batch := new(Batch)
to := common.Address{19: 0x01}
value := big.NewInt(222)
sig := "depositTransaction"
argument := []any{
common.Address{01},
big.NewInt(2),
uint64(100),
false,
[]byte{},
}
require.NoError(t, batch.AddCall(to, value, sig, argument, portalABI))
require.NoError(t, batch.Check())
require.Equal(t, batch.Transactions[0].Signature(), "depositTransaction(address,uint256,uint64,bool,bytes)")
expected, err := os.ReadFile("testdata/deposit-tx.json")
require.NoError(t, err)
serialized, err := json.Marshal(batch)
require.NoError(t, err)
require.JSONEq(t, string(expected), string(serialized))
}
// TestBatchCheck checks for the various failure cases of Batch.Check
// as well as a simple check for a valid batch.
func TestBatchCheck(t *testing.T) {
cases := []struct {
name string
bt BatchTransaction
err error
}{
{
name: "bad-input-count",
bt: BatchTransaction{
Method: ContractMethod{},
InputValues: map[string]string{
"foo": "bar",
},
},
err: errors.New("expected 0 inputs but got 1"),
},
{
name: "bad-calldata-too-small",
bt: BatchTransaction{
Data: []byte{0x01},
},
err: errors.New("must have at least 4 bytes of calldata, got 1"),
},
{
name: "bad-calldata-mismatch",
bt: BatchTransaction{
Data: []byte{0x01, 0x02, 0x03, 0x04},
Method: ContractMethod{
Name: "foo",
},
},
err: errors.New("data does not match signature"),
},
{
name: "good-calldata",
bt: BatchTransaction{
Data: []byte{0xc2, 0x98, 0x55, 0x78},
Method: ContractMethod{
Name: "foo",
},
},
err: nil,
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
require.Equal(t, tc.err, tc.bt.Check())
})
}
}
{
"version": "",
"chainId": "",
"createdAt": 0,
"meta": {
"createdFromSafeAddress": "",
"createdFromOwnerAddress": "",
"name": "",
"description": ""
},
"transactions": [
{
"to": "0x0000000000000000000000000000000000000001",
"value": "222",
"data": "0xe9e05c42000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000064000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000",
"contractMethod": {
"inputs": [
{
"internalType": "address",
"name": "_to",
"type": "address"
},
{
"internalType": "uint256",
"name": "_value",
"type": "uint256"
},
{
"internalType": "uint64",
"name": "_gasLimit",
"type": "uint64"
},
{
"internalType": "bool",
"name": "_isCreation",
"type": "bool"
},
{
"internalType": "bytes",
"name": "_data",
"type": "bytes"
}
],
"name": "depositTransaction",
"payable": false
},
"contractInputsValues": {
"_data": "0x",
"_gasLimit": "100",
"_isCreation": "false",
"_to": "0x0100000000000000000000000000000000000000",
"_value": "2"
}
}
]
}
{
"version": "",
"chainId": "",
"createdAt": 0,
"meta": {
"createdFromSafeAddress": "",
"createdFromOwnerAddress": "",
"name": "",
"description": ""
},
"transactions": [
{
"to": "0x0000000000000000000000000000000000000001",
"value": "222",
"data": "0x8c3152e900000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000000000",
"contractMethod": {
"inputs": [
{
"internalType": "TypesWithdrawalTransaction",
"name": "_tx",
"type": "tuple",
"components": [
{
"internalType": "uint256",
"name": "nonce",
"type": "uint256"
},
{
"internalType": "address",
"name": "sender",
"type": "address"
},
{
"internalType": "address",
"name": "target",
"type": "address"
},
{
"internalType": "uint256",
"name": "value",
"type": "uint256"
},
{
"internalType": "uint256",
"name": "gasLimit",
"type": "uint256"
},
{
"internalType": "bytes",
"name": "data",
"type": "bytes"
}
]
}
],
"name": "finalizeWithdrawalTransaction",
"payable": false
},
"contractInputsValues": {
"_tx": "[0,0x0000000000000000000000000000000000000001,0x0000000000000000000000000000000000000002,1,2,0x]"
}
}
]
}
This diff is collapsed.
package state_test
import (
crand "crypto/rand"
"math/big"
"math/rand"
"testing"
......@@ -47,7 +48,8 @@ func TestCode(t *testing.T) {
require.Nil(t, pre)
code := make([]byte, rand.Intn(1024))
rand.Read(code)
_, err := crand.Read(code)
require.NoError(t, err)
db.SetCode(addr, code)
......
FROM --platform=$BUILDPLATFORM golang:1.19.0-alpine3.15 as builder
FROM --platform=$BUILDPLATFORM golang:1.20.7-alpine3.18 as builder
ARG VERSION=v0.0.0
......@@ -6,6 +6,8 @@ RUN apk add --no-cache make gcc musl-dev linux-headers git jq bash
# build op-challenger with the shared go.mod & go.sum files
COPY ./op-challenger /app/op-challenger
COPY ./op-program /app/op-program
COPY ./op-preimage /app/op-preimage
COPY ./op-bindings /app/op-bindings
COPY ./op-node /app/op-node
COPY ./op-service /app/op-service
......@@ -19,16 +21,25 @@ COPY ./cannon /app/cannon
COPY ./op-preimage /app/op-preimage
COPY ./op-chain-ops /app/op-chain-ops
WORKDIR /app/op-challenger
WORKDIR /app/op-program
RUN go mod download
ARG TARGETOS TARGETARCH
RUN make op-program-host VERSION="$VERSION" GOOS=$TARGETOS GOARCH=$TARGETARCH
WORKDIR /app/op-challenger
RUN make op-challenger VERSION="$VERSION" GOOS=$TARGETOS GOARCH=$TARGETARCH
FROM alpine:3.15
FROM alpine:3.18
# Make the bundled op-program the default cannon server
ENV OP_CHALLENGER_CANNON_SERVER /usr/local/bin/op-program
COPY --from=builder /app/op-challenger/bin/op-challenger /usr/local/bin
COPY --from=builder /app/op-program/bin/op-program /usr/local/bin
CMD ["op-challenger"]
......@@ -5,13 +5,16 @@ import (
"fmt"
"github.com/ethereum-optimism/optimism/op-challenger/config"
"github.com/ethereum-optimism/optimism/op-challenger/fault"
"github.com/ethereum-optimism/optimism/op-challenger/game"
"github.com/ethereum/go-ethereum/log"
)
// Main is the programmatic entry-point for running op-challenger
func Main(ctx context.Context, logger log.Logger, cfg *config.Config) error {
service, err := fault.NewService(ctx, logger, cfg)
if err := cfg.Check(); err != nil {
return err
}
service, err := game.NewService(ctx, logger, cfg)
if err != nil {
return fmt.Errorf("failed to create the fault service: %w", err)
}
......
package op_challenger
import (
"context"
"testing"
"github.com/ethereum-optimism/optimism/op-challenger/config"
"github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require"
)
func TestMainShouldReturnErrorWhenConfigInvalid(t *testing.T) {
cfg := &config.Config{}
err := Main(context.Background(), testlog.Logger(t, log.LvlInfo), cfg)
require.ErrorIs(t, err, cfg.Check())
}
......@@ -4,6 +4,7 @@ import (
"context"
"fmt"
"testing"
"time"
"github.com/ethereum-optimism/optimism/op-challenger/config"
"github.com/ethereum-optimism/optimism/op-node/chaincfg"
......@@ -21,7 +22,7 @@ var (
cannonBin = "./bin/cannon"
cannonServer = "./bin/op-program"
cannonPreState = "./pre.json"
cannonDatadir = "./test_data"
datadir = "./test_data"
cannonL2 = "http://example.com:9545"
alphabetTrace = "abcdefghijz"
agreeWithProposedOutput = "true"
......@@ -44,14 +45,14 @@ func TestLogLevel(t *testing.T) {
func TestDefaultCLIOptionsMatchDefaultConfig(t *testing.T) {
cfg := configForArgs(t, addRequiredArgs(config.TraceTypeAlphabet))
defaultCfg := config.NewConfig(common.HexToAddress(gameFactoryAddressValue), l1EthRpc, config.TraceTypeAlphabet, true)
defaultCfg := config.NewConfig(common.HexToAddress(gameFactoryAddressValue), l1EthRpc, config.TraceTypeAlphabet, true, datadir)
// Add in the extra CLI options required when using alphabet trace type
defaultCfg.AlphabetTrace = alphabetTrace
require.Equal(t, defaultCfg, cfg)
}
func TestDefaultConfigIsValid(t *testing.T) {
cfg := config.NewConfig(common.HexToAddress(gameFactoryAddressValue), l1EthRpc, config.TraceTypeAlphabet, true)
cfg := config.NewConfig(common.HexToAddress(gameFactoryAddressValue), l1EthRpc, config.TraceTypeAlphabet, true, datadir)
// Add in options that are required based on the specific trace type
// To avoid needing to specify unused options, these aren't included in the params for NewConfig
cfg.AlphabetTrace = alphabetTrace
......@@ -146,6 +147,28 @@ func TestAgreeWithProposedOutput(t *testing.T) {
})
}
func TestMaxConcurrency(t *testing.T) {
t.Run("Valid", func(t *testing.T) {
expected := uint(345)
cfg := configForArgs(t, addRequiredArgs(config.TraceTypeAlphabet, "--max-concurrency", "345"))
require.Equal(t, expected, cfg.MaxConcurrency)
})
t.Run("Invalid", func(t *testing.T) {
verifyArgsInvalid(
t,
"invalid value \"abc\" for flag -max-concurrency",
addRequiredArgs(config.TraceTypeAlphabet, "--max-concurrency", "abc"))
})
t.Run("Zero", func(t *testing.T) {
verifyArgsInvalid(
t,
"max-concurrency must not be 0",
addRequiredArgs(config.TraceTypeAlphabet, "--max-concurrency", "0"))
})
}
func TestCannonBin(t *testing.T) {
t.Run("NotRequiredForAlphabetTrace", func(t *testing.T) {
configForArgs(t, addRequiredArgsExcept(config.TraceTypeAlphabet, "--cannon-bin"))
......@@ -191,18 +214,18 @@ func TestCannonAbsolutePrestate(t *testing.T) {
})
}
func TestCannonDataDir(t *testing.T) {
t.Run("NotRequiredForAlphabetTrace", func(t *testing.T) {
configForArgs(t, addRequiredArgsExcept(config.TraceTypeAlphabet, "--cannon-datadir"))
func TestDataDir(t *testing.T) {
t.Run("RequiredForAlphabetTrace", func(t *testing.T) {
verifyArgsInvalid(t, "flag datadir is required", addRequiredArgsExcept(config.TraceTypeAlphabet, "--datadir"))
})
t.Run("Required", func(t *testing.T) {
verifyArgsInvalid(t, "flag cannon-datadir is required", addRequiredArgsExcept(config.TraceTypeCannon, "--cannon-datadir"))
t.Run("RequiredForCannonTrace", func(t *testing.T) {
verifyArgsInvalid(t, "flag datadir is required", addRequiredArgsExcept(config.TraceTypeCannon, "--datadir"))
})
t.Run("Valid", func(t *testing.T) {
cfg := configForArgs(t, addRequiredArgsExcept(config.TraceTypeCannon, "--cannon-datadir", "--cannon-datadir=/foo/bar/cannon"))
require.Equal(t, "/foo/bar/cannon", cfg.CannonDatadir)
cfg := configForArgs(t, addRequiredArgsExcept(config.TraceTypeCannon, "--datadir", "--datadir=/foo/bar/cannon"))
require.Equal(t, "/foo/bar/cannon", cfg.Datadir)
})
}
......@@ -233,6 +256,23 @@ func TestCannonSnapshotFreq(t *testing.T) {
})
}
func TestGameWindow(t *testing.T) {
t.Run("UsesDefault", func(t *testing.T) {
cfg := configForArgs(t, addRequiredArgs(config.TraceTypeAlphabet))
require.Equal(t, config.DefaultGameWindow, cfg.GameWindow)
})
t.Run("Valid", func(t *testing.T) {
cfg := configForArgs(t, addRequiredArgs(config.TraceTypeAlphabet, "--game-window=1m"))
require.Equal(t, time.Duration(time.Minute), cfg.GameWindow)
})
t.Run("ParsesDefault", func(t *testing.T) {
cfg := configForArgs(t, addRequiredArgs(config.TraceTypeAlphabet, "--game-window=264h"))
require.Equal(t, config.DefaultGameWindow, cfg.GameWindow)
})
}
func TestRequireEitherCannonNetworkOrRollupAndGenesis(t *testing.T) {
verifyArgsInvalid(
t,
......@@ -335,6 +375,7 @@ func requiredArgs(traceType config.TraceType) map[string]string {
"--l1-eth-rpc": l1EthRpc,
"--game-factory-address": gameFactoryAddressValue,
"--trace-type": traceType.String(),
"--datadir": datadir,
}
switch traceType {
case config.TraceTypeAlphabet:
......@@ -344,7 +385,6 @@ func requiredArgs(traceType config.TraceType) map[string]string {
args["--cannon-bin"] = cannonBin
args["--cannon-server"] = cannonServer
args["--cannon-prestate"] = cannonPreState
args["--cannon-datadir"] = cannonDatadir
args["--cannon-l2"] = cannonL2
}
return args
......
......@@ -3,6 +3,8 @@ package config
import (
"errors"
"fmt"
"runtime"
"time"
"github.com/ethereum/go-ethereum/common"
......@@ -14,7 +16,8 @@ import (
var (
ErrMissingTraceType = errors.New("missing trace type")
ErrMissingCannonDatadir = errors.New("missing cannon datadir")
ErrMissingDatadir = errors.New("missing datadir")
ErrMaxConcurrencyZero = errors.New("max concurrency must not be 0")
ErrMissingCannonL2 = errors.New("missing cannon L2")
ErrMissingCannonBin = errors.New("missing cannon bin")
ErrMissingCannonServer = errors.New("missing cannon server")
......@@ -73,7 +76,14 @@ func ValidTraceType(value TraceType) bool {
return false
}
const DefaultCannonSnapshotFreq = uint(1_000_000_000)
const (
DefaultCannonSnapshotFreq = uint(1_000_000_000)
// DefaultGameWindow is the default maximum time duration in the past
// that the challenger will look for games to progress.
// The default value is 11 days, which is a 4 day resolution buffer
// plus the 7 day game finalization window.
DefaultGameWindow = time.Duration(11 * 24 * time.Hour)
)
// Config is a well typed config that is parsed from the CLI params.
// This also contains config options for auxiliary services.
......@@ -82,7 +92,10 @@ type Config struct {
L1EthRpc string // L1 RPC Url
GameFactoryAddress common.Address // Address of the dispute game factory
GameAllowlist []common.Address // Allowlist of fault game addresses
GameWindow time.Duration // Maximum time duration to look for games to progress
AgreeWithProposedOutput bool // Temporary config if we agree or disagree with the posted output
Datadir string // Data Directory
MaxConcurrency uint // Maximum number of threads to use when progressing games
TraceType TraceType // Type of trace
......@@ -96,7 +109,6 @@ type Config struct {
CannonNetwork string
CannonRollupConfigPath string
CannonL2GenesisPath string
CannonDatadir string // Cannon Data Directory
CannonL2 string // L2 RPC Url
CannonSnapshotFreq uint // Frequency of snapshots to create when executing cannon (in VM instructions)
......@@ -110,10 +122,12 @@ func NewConfig(
l1EthRpc string,
traceType TraceType,
agreeWithProposedOutput bool,
datadir string,
) Config {
return Config{
L1EthRpc: l1EthRpc,
GameFactoryAddress: gameFactoryAddress,
MaxConcurrency: uint(runtime.NumCPU()),
AgreeWithProposedOutput: agreeWithProposedOutput,
......@@ -123,7 +137,10 @@ func NewConfig(
MetricsConfig: opmetrics.DefaultCLIConfig(),
PprofConfig: oppprof.DefaultCLIConfig(),
Datadir: datadir,
CannonSnapshotFreq: DefaultCannonSnapshotFreq,
GameWindow: DefaultGameWindow,
}
}
......@@ -137,6 +154,12 @@ func (c Config) Check() error {
if c.TraceType == "" {
return ErrMissingTraceType
}
if c.Datadir == "" {
return ErrMissingDatadir
}
if c.MaxConcurrency == 0 {
return ErrMaxConcurrencyZero
}
if c.TraceType == TraceTypeCannon {
if c.CannonBin == "" {
return ErrMissingCannonBin
......@@ -165,9 +188,6 @@ func (c Config) Check() error {
if c.CannonAbsolutePreState == "" {
return ErrMissingCannonAbsolutePreState
}
if c.CannonDatadir == "" {
return ErrMissingCannonDatadir
}
if c.CannonL2 == "" {
return ErrMissingCannonL2
}
......
package config
import (
"runtime"
"testing"
"github.com/ethereum/go-ethereum/common"
......@@ -17,13 +18,13 @@ var (
validCannonOpProgramBin = "./bin/op-program"
validCannonNetwork = "mainnet"
validCannonAbsolutPreState = "pre.json"
validCannonDatadir = "/tmp/cannon"
validDatadir = "/tmp/data"
validCannonL2 = "http://localhost:9545"
agreeWithProposedOutput = true
)
func validConfig(traceType TraceType) Config {
cfg := NewConfig(validGameFactoryAddress, validL1EthRpc, traceType, agreeWithProposedOutput)
cfg := NewConfig(validGameFactoryAddress, validL1EthRpc, traceType, agreeWithProposedOutput, validDatadir)
switch traceType {
case TraceTypeAlphabet:
cfg.AlphabetTrace = validAlphabetTrace
......@@ -31,7 +32,6 @@ func validConfig(traceType TraceType) Config {
cfg.CannonBin = validCannonBin
cfg.CannonServer = validCannonOpProgramBin
cfg.CannonAbsolutePreState = validCannonAbsolutPreState
cfg.CannonDatadir = validCannonDatadir
cfg.CannonL2 = validCannonL2
cfg.CannonNetwork = validCannonNetwork
}
......@@ -99,10 +99,23 @@ func TestCannonAbsolutePreStateRequired(t *testing.T) {
require.ErrorIs(t, config.Check(), ErrMissingCannonAbsolutePreState)
}
func TestCannonDatadirRequired(t *testing.T) {
config := validConfig(TraceTypeCannon)
config.CannonDatadir = ""
require.ErrorIs(t, config.Check(), ErrMissingCannonDatadir)
func TestDatadirRequired(t *testing.T) {
config := validConfig(TraceTypeAlphabet)
config.Datadir = ""
require.ErrorIs(t, config.Check(), ErrMissingDatadir)
}
func TestMaxConcurrency(t *testing.T) {
t.Run("Required", func(t *testing.T) {
config := validConfig(TraceTypeAlphabet)
config.MaxConcurrency = 0
require.ErrorIs(t, config.Check(), ErrMaxConcurrencyZero)
})
t.Run("DefaultToNumberOfCPUs", func(t *testing.T) {
config := validConfig(TraceTypeAlphabet)
require.EqualValues(t, runtime.NumCPU(), config.MaxConcurrency)
})
}
func TestCannonL2Required(t *testing.T) {
......
package fault
import (
"context"
"math/big"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-challenger/fault/types"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient"
)
type FaultDisputeGameCaller interface {
Status(opts *bind.CallOpts) (uint8, error)
ClaimDataLen(opts *bind.CallOpts) (*big.Int, error)
}
type FaultCaller struct {
contract FaultDisputeGameCaller
}
func NewFaultCaller(caller FaultDisputeGameCaller) *FaultCaller {
return &FaultCaller{
caller,
}
}
func NewFaultCallerFromBindings(fdgAddr common.Address, client *ethclient.Client) (*FaultCaller, error) {
caller, err := bindings.NewFaultDisputeGameCaller(fdgAddr, client)
if err != nil {
return nil, err
}
return &FaultCaller{
caller,
}, nil
}
// GetGameStatus returns the current game status.
// 0: In Progress
// 1: Challenger Won
// 2: Defender Won
func (fc *FaultCaller) GetGameStatus(ctx context.Context) (types.GameStatus, error) {
status, err := fc.contract.Status(&bind.CallOpts{Context: ctx})
return types.GameStatus(status), err
}
// GetClaimCount returns the number of claims in the game.
func (fc *FaultCaller) GetClaimCount(ctx context.Context) (uint64, error) {
count, err := fc.contract.ClaimDataLen(&bind.CallOpts{Context: ctx})
if err != nil {
return 0, err
}
return count.Uint64(), nil
}
package fault
import (
"context"
"errors"
"math/big"
"testing"
"github.com/ethereum-optimism/optimism/op-challenger/fault/types"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/stretchr/testify/require"
)
var (
errMock = errors.New("mock error")
)
type mockFaultDisputeGameCaller struct {
status uint8
errStatus bool
claimDataLen *big.Int
errClaimDataLen bool
}
func (m *mockFaultDisputeGameCaller) Status(opts *bind.CallOpts) (uint8, error) {
if m.errStatus {
return 0, errMock
}
return m.status, nil
}
func (m *mockFaultDisputeGameCaller) ClaimDataLen(opts *bind.CallOpts) (*big.Int, error) {
if m.errClaimDataLen {
return nil, errMock
}
return m.claimDataLen, nil
}
func TestFaultCaller_GetGameStatus(t *testing.T) {
tests := []struct {
name string
caller FaultDisputeGameCaller
expectedStatus types.GameStatus
expectedErr error
}{
{
name: "success",
caller: &mockFaultDisputeGameCaller{
status: 1,
},
expectedStatus: types.GameStatusChallengerWon,
expectedErr: nil,
},
{
name: "error",
caller: &mockFaultDisputeGameCaller{
errStatus: true,
},
expectedStatus: 0,
expectedErr: errMock,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
fc := NewFaultCaller(test.caller)
status, err := fc.GetGameStatus(context.Background())
require.Equal(t, test.expectedStatus, status)
require.Equal(t, test.expectedErr, err)
})
}
}
func TestFaultCaller_GetClaimCount(t *testing.T) {
tests := []struct {
name string
caller FaultDisputeGameCaller
expectedClaimDataLen uint64
expectedErr error
}{
{
name: "success",
caller: &mockFaultDisputeGameCaller{
claimDataLen: big.NewInt(1),
},
expectedClaimDataLen: 1,
expectedErr: nil,
},
{
name: "error",
caller: &mockFaultDisputeGameCaller{
errClaimDataLen: true,
},
expectedClaimDataLen: 0,
expectedErr: errMock,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
fc := NewFaultCaller(test.caller)
claimDataLen, err := fc.GetClaimCount(context.Background())
require.Equal(t, test.expectedClaimDataLen, claimDataLen)
require.Equal(t, test.expectedErr, err)
})
}
}
package fault
import (
"context"
"errors"
"math/big"
"testing"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-challenger/fault/types"
"github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum-optimism/optimism/op-service/txmgr"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
ethtypes "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require"
)
var (
mockFdgAddress = common.HexToAddress("0x1234")
mockSendError = errors.New("mock send error")
)
type mockTxManager struct {
from common.Address
sends int
calls int
sendFails bool
}
func (m *mockTxManager) Send(ctx context.Context, candidate txmgr.TxCandidate) (*ethtypes.Receipt, error) {
if m.sendFails {
return nil, mockSendError
}
m.sends++
return ethtypes.NewReceipt(
[]byte{},
false,
0,
), nil
}
func (m *mockTxManager) Call(_ context.Context, _ ethereum.CallMsg, _ *big.Int) ([]byte, error) {
if m.sendFails {
return nil, mockSendError
}
m.calls++
return []byte{}, nil
}
func (m *mockTxManager) BlockNumber(ctx context.Context) (uint64, error) {
panic("not implemented")
}
func (m *mockTxManager) From() common.Address {
return m.from
}
func newTestFaultResponder(t *testing.T, sendFails bool) (*faultResponder, *mockTxManager) {
log := testlog.Logger(t, log.LvlError)
mockTxMgr := &mockTxManager{}
mockTxMgr.sendFails = sendFails
responder, err := NewFaultResponder(log, mockTxMgr, mockFdgAddress)
require.NoError(t, err)
return responder, mockTxMgr
}
// TestResponder_CanResolve_CallFails tests the [Responder.CanResolve] method
// bubbles up the error returned by the [txmgr.Call] method.
func TestResponder_CanResolve_CallFails(t *testing.T) {
responder, mockTxMgr := newTestFaultResponder(t, true)
resolved := responder.CanResolve(context.Background())
require.False(t, resolved)
require.Equal(t, 0, mockTxMgr.sends)
}
// TestResponder_CanResolve_Success tests the [Responder.CanResolve] method
// succeeds when the call message is successfully sent through the txmgr.
func TestResponder_CanResolve_Success(t *testing.T) {
responder, mockTxMgr := newTestFaultResponder(t, false)
resolved := responder.CanResolve(context.Background())
require.True(t, resolved)
require.Equal(t, 1, mockTxMgr.calls)
}
// TestResponder_Resolve_SendFails tests the [Responder.Resolve] method
// bubbles up the error returned by the [txmgr.Send] method.
func TestResponder_Resolve_SendFails(t *testing.T) {
responder, mockTxMgr := newTestFaultResponder(t, true)
err := responder.Resolve(context.Background())
require.ErrorIs(t, err, mockSendError)
require.Equal(t, 0, mockTxMgr.sends)
}
// TestResponder_Resolve_Success tests the [Responder.Resolve] method
// succeeds when the tx candidate is successfully sent through the txmgr.
func TestResponder_Resolve_Success(t *testing.T) {
responder, mockTxMgr := newTestFaultResponder(t, false)
err := responder.Resolve(context.Background())
require.NoError(t, err)
require.Equal(t, 1, mockTxMgr.sends)
}
// TestResponder_Respond_SendFails tests the [Responder.Respond] method
// bubbles up the error returned by the [txmgr.Send] method.
func TestResponder_Respond_SendFails(t *testing.T) {
responder, mockTxMgr := newTestFaultResponder(t, true)
err := responder.Respond(context.Background(), types.Claim{
ClaimData: types.ClaimData{
Value: common.Hash{0x01},
Position: types.NewPositionFromGIndex(2),
},
Parent: types.ClaimData{
Value: common.Hash{0x02},
Position: types.NewPositionFromGIndex(1),
},
ContractIndex: 0,
ParentContractIndex: 0,
})
require.ErrorIs(t, err, mockSendError)
require.Equal(t, 0, mockTxMgr.sends)
}
// TestResponder_Respond_Success tests the [Responder.Respond] method
// succeeds when the tx candidate is successfully sent through the txmgr.
func TestResponder_Respond_Success(t *testing.T) {
responder, mockTxMgr := newTestFaultResponder(t, false)
err := responder.Respond(context.Background(), types.Claim{
ClaimData: types.ClaimData{
Value: common.Hash{0x01},
Position: types.NewPositionFromGIndex(2),
},
Parent: types.ClaimData{
Value: common.Hash{0x02},
Position: types.NewPositionFromGIndex(1),
},
ContractIndex: 0,
ParentContractIndex: 0,
})
require.NoError(t, err)
require.Equal(t, 1, mockTxMgr.sends)
}
// TestResponder_BuildTx_Attack tests the [Responder.BuildTx] method
// returns a tx candidate with the correct data for an attack tx.
func TestResponder_BuildTx_Attack(t *testing.T) {
responder, _ := newTestFaultResponder(t, false)
responseClaim := types.Claim{
ClaimData: types.ClaimData{
Value: common.Hash{0x01},
Position: types.NewPositionFromGIndex(2),
},
Parent: types.ClaimData{
Value: common.Hash{0x02},
Position: types.NewPositionFromGIndex(1),
},
ContractIndex: 0,
ParentContractIndex: 7,
}
tx, err := responder.BuildTx(context.Background(), responseClaim)
require.NoError(t, err)
// Pack the tx data manually.
fdgAbi, err := bindings.FaultDisputeGameMetaData.GetAbi()
require.NoError(t, err)
expected, err := fdgAbi.Pack(
"attack",
big.NewInt(int64(7)),
responseClaim.ValueBytes(),
)
require.NoError(t, err)
require.Equal(t, expected, tx)
}
// TestResponder_BuildTx_Defend tests the [Responder.BuildTx] method
// returns a tx candidate with the correct data for a defend tx.
func TestResponder_BuildTx_Defend(t *testing.T) {
responder, _ := newTestFaultResponder(t, false)
responseClaim := types.Claim{
ClaimData: types.ClaimData{
Value: common.Hash{0x01},
Position: types.NewPositionFromGIndex(3),
},
Parent: types.ClaimData{
Value: common.Hash{0x02},
Position: types.NewPositionFromGIndex(6),
},
ContractIndex: 0,
ParentContractIndex: 7,
}
tx, err := responder.BuildTx(context.Background(), responseClaim)
require.NoError(t, err)
// Pack the tx data manually.
fdgAbi, err := bindings.FaultDisputeGameMetaData.GetAbi()
require.NoError(t, err)
expected, err := fdgAbi.Pack(
"defend",
big.NewInt(int64(7)),
responseClaim.ValueBytes(),
)
require.NoError(t, err)
require.Equal(t, expected, tx)
}
package fault
import (
"context"
"fmt"
"testing"
"github.com/ethereum-optimism/optimism/op-challenger/fault/types"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/stretchr/testify/require"
)
var (
mockTraceProviderError = fmt.Errorf("mock trace provider error")
mockLoaderError = fmt.Errorf("mock loader error")
)
// TestValidateAbsolutePrestate tests that the absolute prestate is validated
// correctly by the service component.
func TestValidateAbsolutePrestate(t *testing.T) {
t.Run("ValidPrestates", func(t *testing.T) {
prestate := []byte{0x00, 0x01, 0x02, 0x03}
prestateHash := crypto.Keccak256(prestate)
mockTraceProvider := newMockTraceProvider(false, prestate)
mockLoader := newMockLoader(false, prestateHash)
err := ValidateAbsolutePrestate(context.Background(), mockTraceProvider, mockLoader)
require.NoError(t, err)
})
t.Run("TraceProviderErrors", func(t *testing.T) {
prestate := []byte{0x00, 0x01, 0x02, 0x03}
mockTraceProvider := newMockTraceProvider(true, prestate)
mockLoader := newMockLoader(false, prestate)
err := ValidateAbsolutePrestate(context.Background(), mockTraceProvider, mockLoader)
require.ErrorIs(t, err, mockTraceProviderError)
})
t.Run("LoaderErrors", func(t *testing.T) {
prestate := []byte{0x00, 0x01, 0x02, 0x03}
mockTraceProvider := newMockTraceProvider(false, prestate)
mockLoader := newMockLoader(true, prestate)
err := ValidateAbsolutePrestate(context.Background(), mockTraceProvider, mockLoader)
require.ErrorIs(t, err, mockLoaderError)
})
t.Run("PrestateMismatch", func(t *testing.T) {
mockTraceProvider := newMockTraceProvider(false, []byte{0x00, 0x01, 0x02, 0x03})
mockLoader := newMockLoader(false, []byte{0x00})
err := ValidateAbsolutePrestate(context.Background(), mockTraceProvider, mockLoader)
require.Error(t, err)
})
}
type mockTraceProvider struct {
prestateErrors bool
prestate []byte
}
func newMockTraceProvider(prestateErrors bool, prestate []byte) *mockTraceProvider {
return &mockTraceProvider{
prestateErrors: prestateErrors,
prestate: prestate,
}
}
func (m *mockTraceProvider) Get(ctx context.Context, i uint64) (common.Hash, error) {
panic("not implemented")
}
func (m *mockTraceProvider) GetStepData(ctx context.Context, i uint64) (prestate []byte, proofData []byte, preimageData *types.PreimageOracleData, err error) {
panic("not implemented")
}
func (m *mockTraceProvider) AbsolutePreState(ctx context.Context) ([]byte, error) {
if m.prestateErrors {
return nil, mockTraceProviderError
}
return m.prestate, nil
}
type mockLoader struct {
prestateError bool
prestate []byte
}
func newMockLoader(prestateError bool, prestate []byte) *mockLoader {
return &mockLoader{
prestateError: prestateError,
prestate: prestate,
}
}
func (m *mockLoader) FetchClaims(ctx context.Context) ([]types.Claim, error) {
panic("not implemented")
}
func (m *mockLoader) FetchGameDepth(ctx context.Context) (uint64, error) {
panic("not implemented")
}
func (m *mockLoader) FetchAbsolutePrestateHash(ctx context.Context) ([]byte, error) {
if m.prestateError {
return nil, mockLoaderError
}
return m.prestate, nil
}
......@@ -2,6 +2,7 @@ package flags
import (
"fmt"
"runtime"
"strings"
"github.com/ethereum/go-ethereum/common"
......@@ -57,7 +58,18 @@ var (
Usage: "Temporary hardcoded flag if we agree or disagree with the proposed output.",
EnvVars: prefixEnvVars("AGREE_WITH_PROPOSED_OUTPUT"),
}
DatadirFlag = &cli.StringFlag{
Name: "datadir",
Usage: "Directory to store data generated as part of responding to games",
EnvVars: prefixEnvVars("DATADIR"),
}
// Optional Flags
MaxConcurrencyFlag = &cli.UintFlag{
Name: "max-concurrency",
Usage: "Maximum number of threads to use when progressing games",
EnvVars: prefixEnvVars("MAX_CONCURRENCY"),
Value: uint(runtime.NumCPU()),
}
AlphabetFlag = &cli.StringFlag{
Name: "alphabet",
Usage: "Correct Alphabet Trace (alphabet trace type only)",
......@@ -93,11 +105,6 @@ var (
Usage: "Path to absolute prestate to use when generating trace data (cannon trace type only)",
EnvVars: prefixEnvVars("CANNON_PRESTATE"),
}
CannonDatadirFlag = &cli.StringFlag{
Name: "cannon-datadir",
Usage: "Directory to store data generated by cannon (cannon trace type only)",
EnvVars: prefixEnvVars("CANNON_DATADIR"),
}
CannonL2Flag = &cli.StringFlag{
Name: "cannon-l2",
Usage: "L2 Address of L2 JSON-RPC endpoint to use (eth and debug namespace required) (cannon trace type only)",
......@@ -109,6 +116,12 @@ var (
EnvVars: prefixEnvVars("CANNON_SNAPSHOT_FREQ"),
Value: config.DefaultCannonSnapshotFreq,
}
GameWindowFlag = &cli.DurationFlag{
Name: "game-window",
Usage: "The time window which the challenger will look for games to progress.",
EnvVars: prefixEnvVars("GAME_WINDOW"),
Value: config.DefaultGameWindow,
}
)
// requiredFlags are checked by [CheckRequired]
......@@ -117,10 +130,12 @@ var requiredFlags = []cli.Flag{
FactoryAddressFlag,
TraceTypeFlag,
AgreeWithProposedOutputFlag,
DatadirFlag,
}
// optionalFlags is a list of unchecked cli flags
var optionalFlags = []cli.Flag{
MaxConcurrencyFlag,
AlphabetFlag,
GameAllowlistFlag,
CannonNetworkFlag,
......@@ -129,9 +144,9 @@ var optionalFlags = []cli.Flag{
CannonBinFlag,
CannonServerFlag,
CannonPreStateFlag,
CannonDatadirFlag,
CannonL2Flag,
CannonSnapshotFreqFlag,
GameWindowFlag,
}
func init() {
......@@ -174,9 +189,6 @@ func CheckRequired(ctx *cli.Context) error {
if !ctx.IsSet(CannonPreStateFlag.Name) {
return fmt.Errorf("flag %s is required", CannonPreStateFlag.Name)
}
if !ctx.IsSet(CannonDatadirFlag.Name) {
return fmt.Errorf("flag %s is required", CannonDatadirFlag.Name)
}
if !ctx.IsSet(CannonL2Flag.Name) {
return fmt.Errorf("flag %s is required", CannonL2Flag.Name)
}
......@@ -216,12 +228,18 @@ func NewConfigFromCLI(ctx *cli.Context) (*config.Config, error) {
traceTypeFlag := config.TraceType(strings.ToLower(ctx.String(TraceTypeFlag.Name)))
maxConcurrency := ctx.Uint(MaxConcurrencyFlag.Name)
if maxConcurrency == 0 {
return nil, fmt.Errorf("%v must not be 0", MaxConcurrencyFlag.Name)
}
return &config.Config{
// Required Flags
L1EthRpc: ctx.String(L1EthRpcFlag.Name),
TraceType: traceTypeFlag,
GameFactoryAddress: gameFactoryAddress,
GameAllowlist: allowedGames,
GameWindow: ctx.Duration(GameWindowFlag.Name),
MaxConcurrency: maxConcurrency,
AlphabetTrace: ctx.String(AlphabetFlag.Name),
CannonNetwork: ctx.String(CannonNetworkFlag.Name),
CannonRollupConfigPath: ctx.String(CannonRollupConfigFlag.Name),
......@@ -229,7 +247,7 @@ func NewConfigFromCLI(ctx *cli.Context) (*config.Config, error) {
CannonBin: ctx.String(CannonBinFlag.Name),
CannonServer: ctx.String(CannonServerFlag.Name),
CannonAbsolutePreState: ctx.String(CannonPreStateFlag.Name),
CannonDatadir: ctx.String(CannonDatadirFlag.Name),
Datadir: ctx.String(DatadirFlag.Name),
CannonL2: ctx.String(CannonL2Flag.Name),
CannonSnapshotFreq: ctx.Uint(CannonSnapshotFreqFlag.Name),
AgreeWithProposedOutput: ctx.Bool(AgreeWithProposedOutputFlag.Name),
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -5,7 +5,7 @@ import (
"errors"
"fmt"
"github.com/ethereum-optimism/optimism/op-challenger/fault/types"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/types"
"github.com/ethereum/go-ethereum/common"
)
......
......@@ -5,9 +5,9 @@ import (
"errors"
"testing"
"github.com/ethereum-optimism/optimism/op-challenger/fault/solver"
"github.com/ethereum-optimism/optimism/op-challenger/fault/test"
"github.com/ethereum-optimism/optimism/op-challenger/fault/types"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/solver"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/test"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/types"
"github.com/stretchr/testify/require"
)
......
......@@ -5,7 +5,7 @@ import (
"math/big"
"testing"
"github.com/ethereum-optimism/optimism/op-challenger/fault/types"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/types"
"github.com/ethereum/go-ethereum/common"
"github.com/stretchr/testify/require"
)
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment