Commit 7f941b84 authored by protolambda's avatar protolambda Committed by GitHub

Interop: local devnet (#11590)

* local interop devnet

* interop-devnet: experimental op-geth docker image, connect with op-supervisor

* interop-devnet: port and path fixes

* interop-devnet: datadir fix

* interop-local: more fixes

* interop-devnet: connect op-supervisor to L2 EL nodes using RPC

* more fixes

* ops-bedrock: fix l2 op geth dockerfile for interop

* interop-devnet: fix supervisor RPC add workaround

* interop-devnet: implement review suggestions

* fixes from run-testing

* Add op-deployer to dockerignore exceptions

* use latest geth rc

* use RW Locks in Update Functions

* add log for new cross-safe head

* make updates much more frequent

* use LocalDB for LastDerivedFrom

* Add log message for finalization update

* op-supervisor: fix db locking, fix crossdb usage

* interop-devnet: use chain IDs as chain indices, since it's not translated everywhere yet

* op-supervisor: cross-derived-from RPC method

* Work Process ErrFuture to Debug Log

---------
Co-authored-by: default avataraxelKingsley <axel.kingsley@gmail.com>
parent caf63ce1
#!/bin/bash
set -eu
# Run this with workdir set as root of the repo
if [ -f "../versions.json" ]; then
echo "Running create-chains script."
else
echo "Cannot run create-chains script, must be in interop-devnet dir, but currently in:"
pwd
exit 1
fi
# Navigate to repository root
cd ..
# Check if already created
if [ -d ".devnet-interop" ]; then
echo "Already created chains."
exit 1
else
echo "Creating new interop devnet chain configs"
fi
export OP_INTEROP_MNEMONIC="test test test test test test test test test test test junk"
go run ./op-node/cmd interop dev-setup \
--artifacts-dir=packages/contracts-bedrock/forge-artifacts \
--foundry-dir=packages/contracts-bedrock \
--l1.chainid=900100 \
--l2.chainids=900200,900201 \
--out-dir=".devnet-interop" \
--log.format=logfmt \
--log.level=info
# create L1 CL genesis
eth2-testnet-genesis deneb \
--config=./ops-bedrock/beacon-data/config.yaml \
--preset-phase0=minimal \
--preset-altair=minimal \
--preset-bellatrix=minimal \
--preset-capella=minimal \
--preset-deneb=minimal \
--eth1-config=.devnet-interop/genesis/l1/genesis.json \
--state-output=.devnet-interop/genesis/l1/beaconstate.ssz \
--tranches-dir=.devnet-interop/genesis/l1/tranches \
--mnemonics=./ops-bedrock/mnemonics.yaml \
--eth1-withdrawal-address=0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa \
--eth1-match-genesis-time
echo "Writing env files now..."
# write env files for each L2 service
chain_env=".devnet-interop/env/l2/900200"
mkdir -p "$chain_env"
key_cmd="go run ./op-node/cmd interop devkey secret --domain=chain-operator --chainid=900200"
# op-node
echo "OP_NODE_P2P_SEQUENCER_KEY=$($key_cmd --name=sequencer-p2p)" >> "$chain_env/op-node.env"
# proposer
echo "OP_PROPOSER_PRIVATE_KEY=$($key_cmd --name=proposer)" >> "$chain_env/op-proposer.env"
echo "OP_PROPOSER_GAME_FACTORY_ADDRESS=$(jq -r .DisputeGameFactoryProxy .devnet-interop/deployments/l2/900200/addresses.json)" >> "$chain_env/op-proposer.env"
# batcher
echo "OP_BATCHER_PRIVATE_KEY=$($key_cmd --name=batcher)" >> "$chain_env/op-batcher.env"
chain_env=".devnet-interop/env/l2/900201"
mkdir -p "$chain_env"
key_cmd="go run ./op-node/cmd interop devkey secret --domain=chain-operator --chainid=900201"
# op-node
echo "OP_NODE_P2P_SEQUENCER_KEY=$($key_cmd --name=sequencer-p2p)" >> "$chain_env/op-node.env"
# proposer
echo "OP_PROPOSER_PRIVATE_KEY=$($key_cmd --name=proposer)" >> "$chain_env/op-proposer.env"
echo "OP_PROPOSER_GAME_FACTORY_ADDRESS=$(jq -r .DisputeGameFactoryProxy .devnet-interop/deployments/l2/900201/addresses.json)" >> "$chain_env/op-proposer.env"
# batcher
echo "OP_BATCHER_PRIVATE_KEY=$($key_cmd --name=batcher)" >> "$chain_env/op-batcher.env"
echo "Interop devnet setup is complete!"
{
"dependencies": {
"900200": {
"chainIndex": "900200",
"activationTime": 0,
"historyMinTime": 0
},
"900201": {
"chainIndex": "900201",
"activationTime": 0,
"historyMinTime": 0
}
}
}
# This Compose file is expected to be used with the devnet-up.sh script.
# The volumes below mount the configs generated by the script into each
# service.
volumes:
l1_data:
l1_bn_data:
l1_vc_data:
l2_a_data:
safedb_a_data:
l2_b_data:
safedb_b_data:
supervisor_data:
op_log_a:
op_log_b:
services:
l1:
build:
context: ../ops-bedrock
dockerfile: l1-geth.Dockerfile
ports:
- "8545:8545"
- "8546:8546"
- "7060:6060"
volumes:
- "l1_data:/db"
- "${PWD}/../.devnet-interop/genesis/l1/genesis.json:/genesis.json"
- "${PWD}/../ops-bedrock/test-jwt-secret.txt:/config/jwt-secret.txt"
environment:
GETH_MINER_RECOMMIT: 100ms
l1-bn:
depends_on:
- l1
build:
context: ../ops-bedrock
dockerfile: l1-lighthouse.Dockerfile
ports:
- "9000:9000"
- "5052:5052"
volumes:
- "l1_bn_data:/db"
- "${PWD}/../ops-bedrock/test-jwt-secret.txt:/config/jwt-secret.txt"
- "${PWD}/../ops-bedrock/beacon-data/config.yaml:/genesis/config.yaml"
- "${PWD}/../ops-bedrock/beacon-data/deposit_contract_block.txt:/genesis/deposit_contract_block.txt"
- "${PWD}/../.devnet-interop/genesis/l1/beaconstate.ssz:/genesis/genesis.ssz"
environment:
LH_EXECUTION_ENDPOINT: "http://l1:8551"
entrypoint:
- "/bin/sh"
- "/entrypoint-bn.sh"
l1-vc:
depends_on:
- l1
- l1-bn
build:
context: ../ops-bedrock
dockerfile: l1-lighthouse.Dockerfile
volumes:
- "l1_vc_data:/db"
- "${PWD}/../ops-bedrock/beacon-data/data/keys:/validator_setup/validators"
- "${PWD}/../ops-bedrock/beacon-data/data/secrets:/validator_setup/secrets"
- "${PWD}/../ops-bedrock/beacon-data/config.yaml:/genesis/config.yaml"
- "${PWD}/../ops-bedrock/beacon-data/deposit_contract_block.txt:/genesis/deposit_contract_block.txt"
- "${PWD}/../.devnet-interop/genesis/l1/beaconstate.ssz:/genesis/genesis.ssz"
environment:
LH_BEACON_NODES: "http://l1-bn:5052/"
entrypoint:
- "/bin/sh"
- "/entrypoint-vc.sh"
op-supervisor:
depends_on:
- l1
build:
context: ../
dockerfile: ops/docker/op-stack-go/Dockerfile
target: op-supervisor-target
ports:
- "9045:8545"
volumes:
- "supervisor_data:/db"
- "./depset.json:/depset.json"
image: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-supervisor:devnet
command: >
op-supervisor
--datadir="/db"
--dependency-set="/depset.json"
--l2-rpcs=""
--rpc.addr="0.0.0.0"
--rpc.port=8545
--rpc.enable-admin
--l2-rpcs="ws://l2-a:8546,ws://l2-b:8546"
l2-a:
depends_on:
- op-supervisor
build:
context: ../ops-bedrock/
dockerfile: l2-op-geth-interop.Dockerfile
ports:
- "9145:8545"
- "8160:6060"
volumes:
- "l2_a_data:/db"
- "${PWD}/../.devnet-interop/genesis/l2/900200/genesis.json:/genesis.json"
- "${PWD}/../ops-bedrock/test-jwt-secret.txt:/config/jwt-secret.txt"
entrypoint: # pass the L2 specific flags by overriding the entry-point and adding extra arguments
- "/bin/sh"
- "/entrypoint.sh"
environment:
GETH_MINER_RECOMMIT: 100ms
GETH_ROLLUP_INTEROPRPC: "ws://op-supervisor:8545"
l2-b:
depends_on:
- op-supervisor
build:
context: ../ops-bedrock/
dockerfile: l2-op-geth-interop.Dockerfile
ports:
- "9245:8545"
- "8260:6060"
volumes:
- "l2_b_data:/db"
- "${PWD}/../.devnet-interop/genesis/l2/900201/genesis.json:/genesis.json"
- "${PWD}/../ops-bedrock/test-jwt-secret.txt:/config/jwt-secret.txt"
entrypoint: # pass the L2 specific flags by overriding the entry-point and adding extra arguments
- "/bin/sh"
- "/entrypoint.sh"
environment:
GETH_MINER_RECOMMIT: 100ms
GETH_ROLLUP_INTEROPRPC: "ws://op-supervisor:8545"
op-node-a:
depends_on:
- l1
- l1-bn
- l1-vc
- l2-a
- op-supervisor
build:
context: ../
dockerfile: ops/docker/op-stack-go/Dockerfile
target: op-node-target
image: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-node:devnet
command: >
op-node
--l1=ws://l1:8546
--l1.beacon=http://l1-bn:5052
--l1.epoch-poll-interval=12s
--l1.http-poll-interval=6s
--l2=http://l2-a:8551
--l2.jwt-secret=/config/jwt-secret.txt
--supervisor=http://op-supervisor:8545
--sequencer.enabled
--sequencer.l1-confs=0
--verifier.l1-confs=0
--rollup.config=/rollup.json
--rpc.addr=0.0.0.0
--rpc.port=8545
--p2p.listen.ip=0.0.0.0
--p2p.listen.tcp=9003
--p2p.listen.udp=9003
--p2p.scoring.peers=light
--p2p.ban.peers=true
--metrics.enabled
--metrics.addr=0.0.0.0
--metrics.port=7300
--pprof.enabled
--rpc.enable-admin
--safedb.path=/db
ports:
- "7145:8545"
- "9103:9003"
- "7100:7300"
- "6160:6060"
volumes:
- "safedb_a_data:/db"
- "${PWD}/../ops-bedrock/test-jwt-secret.txt:/config/jwt-secret.txt"
- "${PWD}/../.devnet-interop/genesis/l2/900200/rollup.json:/rollup.json"
- op_log_a:/op_log
env_file:
- "${PWD}/../.devnet-interop/env/l2/900200/op-node.env"
op-node-b:
depends_on:
- l1
- l1-bn
- l1-vc
- l2-b
- op-supervisor
build:
context: ../
dockerfile: ops/docker/op-stack-go/Dockerfile
target: op-node-target
image: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-node:devnet
command: >
op-node
--l1=ws://l1:8546
--l1.beacon=http://l1-bn:5052
--l1.epoch-poll-interval=12s
--l1.http-poll-interval=6s
--l2=http://l2-b:8551
--l2.jwt-secret=/config/jwt-secret.txt
--supervisor=http://op-supervisor:8545
--sequencer.enabled
--sequencer.l1-confs=0
--verifier.l1-confs=0
--rollup.config=/rollup.json
--rpc.addr=0.0.0.0
--rpc.port=8545
--p2p.listen.ip=0.0.0.0
--p2p.listen.tcp=9003
--p2p.listen.udp=9003
--p2p.scoring.peers=light
--p2p.ban.peers=true
--metrics.enabled
--metrics.addr=0.0.0.0
--metrics.port=7300
--pprof.enabled
--rpc.enable-admin
--safedb.path=/db
ports:
- "7245:8545"
- "9203:9003"
- "7200:7300"
- "6260:6060"
volumes:
- "safedb_b_data:/db"
- "${PWD}/../ops-bedrock/test-jwt-secret.txt:/config/jwt-secret.txt"
- "${PWD}/../.devnet-interop/genesis/l2/900201/rollup.json:/rollup.json"
- op_log_b:/op_log
env_file:
- "${PWD}/../.devnet-interop/env/l2/900201/op-node.env"
op-proposer-a:
depends_on:
- l1
- op-node-a
build:
context: ../
dockerfile: ops/docker/op-stack-go/Dockerfile
target: op-proposer-target
image: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-proposer:devnet
ports:
- "6162:6060"
- "7102:7300"
- "6146:8545"
environment:
OP_PROPOSER_L1_ETH_RPC: http://l1:8545
OP_PROPOSER_ROLLUP_RPC: http://op-node-a:8545
OP_PROPOSER_POLL_INTERVAL: 1s
OP_PROPOSER_NUM_CONFIRMATIONS: 1
OP_PROPOSER_GAME_TYPE: "254"
OP_PROPOSER_PROPOSAL_INTERVAL: "12s"
OP_PROPOSER_PPROF_ENABLED: "true"
OP_PROPOSER_METRICS_ENABLED: "true"
OP_PROPOSER_ALLOW_NON_FINALIZED: "true"
OP_PROPOSER_RPC_ENABLE_ADMIN: "true"
env_file:
- "${PWD}/../.devnet-interop/env/l2/900200/op-proposer.env"
op-proposer-b:
depends_on:
- l1
- op-node-b
build:
context: ../
dockerfile: ops/docker/op-stack-go/Dockerfile
target: op-proposer-target
image: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-proposer:devnet
ports:
- "6262:6060"
- "7202:7300"
- "6246:8545"
environment:
OP_PROPOSER_L1_ETH_RPC: http://l1:8545
OP_PROPOSER_ROLLUP_RPC: http://op-node-b:8545
OP_PROPOSER_POLL_INTERVAL: 1s
OP_PROPOSER_NUM_CONFIRMATIONS: 1
OP_PROPOSER_GAME_TYPE: "254"
OP_PROPOSER_PROPOSAL_INTERVAL: "12s"
OP_PROPOSER_PPROF_ENABLED: "true"
OP_PROPOSER_METRICS_ENABLED: "true"
OP_PROPOSER_ALLOW_NON_FINALIZED: "true"
OP_PROPOSER_RPC_ENABLE_ADMIN: "true"
env_file:
- "${PWD}/../.devnet-interop/env/l2/900201/op-proposer.env"
op-batcher-a:
depends_on:
- l1
- l2-a
- op-node-a
build:
context: ../
dockerfile: ops/docker/op-stack-go/Dockerfile
target: op-batcher-target
image: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-batcher:devnet
ports:
- "6161:6060"
- "7101:7300"
- "6145:8545"
environment:
OP_BATCHER_L1_ETH_RPC: http://l1:8545
OP_BATCHER_L2_ETH_RPC: http://l2-a:8545
OP_BATCHER_ROLLUP_RPC: http://op-node-a:8545
OP_BATCHER_MAX_CHANNEL_DURATION: 2
OP_BATCHER_SUB_SAFETY_MARGIN: 4 # SWS is 15, ChannelTimeout is 40
OP_BATCHER_POLL_INTERVAL: 1s
OP_BATCHER_NUM_CONFIRMATIONS: 1
OP_BATCHER_PPROF_ENABLED: "true"
OP_BATCHER_METRICS_ENABLED: "true"
OP_BATCHER_RPC_ENABLE_ADMIN: "true"
OP_BATCHER_BATCH_TYPE:
# uncomment to use blobs
# OP_BATCHER_DATA_AVAILABILITY_TYPE: blobs
env_file:
- "${PWD}/../.devnet-interop/env/l2/900200/op-batcher.env"
op-batcher-b:
depends_on:
- l1
- l2-b
- op-node-b
build:
context: ../
dockerfile: ops/docker/op-stack-go/Dockerfile
target: op-batcher-target
image: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-batcher:devnet
ports:
- "6261:6060"
- "7201:7300"
- "6245:8545"
environment:
OP_BATCHER_L1_ETH_RPC: http://l1:8545
OP_BATCHER_L2_ETH_RPC: http://l2-b:8545
OP_BATCHER_ROLLUP_RPC: http://op-node-b:8545
OP_BATCHER_MAX_CHANNEL_DURATION: 2
OP_BATCHER_SUB_SAFETY_MARGIN: 4 # SWS is 15, ChannelTimeout is 40
OP_BATCHER_POLL_INTERVAL: 1s
OP_BATCHER_NUM_CONFIRMATIONS: 1
OP_BATCHER_PPROF_ENABLED: "true"
OP_BATCHER_METRICS_ENABLED: "true"
OP_BATCHER_RPC_ENABLE_ADMIN: "true"
OP_BATCHER_BATCH_TYPE:
# uncomment to use blobs
# OP_BATCHER_DATA_AVAILABILITY_TYPE: blobs
env_file:
- "${PWD}/../.devnet-interop/env/l2/900201/op-batcher.env"
devnet-setup:
bash create-chains.sh
devnet-build-images:
PWD="$(pwd)" DOCKER_BUILDKIT=1 COMPOSE_DOCKER_CLI_BUILD=1 \
docker compose build --progress plain \
--build-arg GIT_COMMIT={git_commit} \
--build-arg GIT_DATE={git_date}
devnet-up:
docker compose up -d l1 l1-bn l1-vc
docker compose up -d \
op-supervisor \
op-node-a op-batcher-a op-proposer-a \
op-node-b op-batcher-b op-proposer-b
devnet-down:
# stops services, does not remove containers/networks
docker compose stop
devnet-clean:
rm -rf ../.devnet-interop
# Stops services, and removes containers/networks
docker compose down
# Now manually clean up the related images and volumes
# Note: `justfile` interprets the curly brackets. So we escape them, by wrapping it with more, as a string, like Jinja2.
docker image ls 'interop-devnet*' --format='{{ '{{.Repository}}' }}' | xargs -r docker rmi
docker volume ls --filter name=interop-devnet --format='{{ '{{.Name}}' }}' | xargs -r docker volume rm
devnet-logs:
docker compose logs -f
...@@ -93,6 +93,21 @@ func (role SuperchainOperatorRole) Key(chainID *big.Int) Key { ...@@ -93,6 +93,21 @@ func (role SuperchainOperatorRole) Key(chainID *big.Int) Key {
} }
} }
func (role *SuperchainOperatorRole) UnmarshalText(data []byte) error {
v := string(data)
for i := SuperchainOperatorRole(0); i < 20; i++ {
if i.String() == v {
*role = i
return nil
}
}
return fmt.Errorf("unknown superchain operator role %q", v)
}
func (role *SuperchainOperatorRole) MarshalText() ([]byte, error) {
return []byte(role.String()), nil
}
// SuperchainOperatorKey is an account specific to an OperationRole of a given OP-Stack chain. // SuperchainOperatorKey is an account specific to an OperationRole of a given OP-Stack chain.
type SuperchainOperatorKey struct { type SuperchainOperatorKey struct {
ChainID *big.Int ChainID *big.Int
...@@ -181,6 +196,21 @@ func (role ChainOperatorRole) Key(chainID *big.Int) Key { ...@@ -181,6 +196,21 @@ func (role ChainOperatorRole) Key(chainID *big.Int) Key {
} }
} }
func (role *ChainOperatorRole) UnmarshalText(data []byte) error {
v := string(data)
for i := ChainOperatorRole(0); i < 20; i++ {
if i.String() == v {
*role = i
return nil
}
}
return fmt.Errorf("unknown chain operator role %q", v)
}
func (role *ChainOperatorRole) MarshalText() ([]byte, error) {
return []byte(role.String()), nil
}
// ChainOperatorKey is an account specific to an OperationRole of a given OP-Stack chain. // ChainOperatorKey is an account specific to an OperationRole of a given OP-Stack chain.
type ChainOperatorKey struct { type ChainOperatorKey struct {
ChainID *big.Int ChainID *big.Int
......
package interop
import (
"fmt"
"math/big"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/urfave/cli/v2"
"github.com/ethereum-optimism/optimism/op-chain-ops/devkeys"
"github.com/ethereum-optimism/optimism/op-chain-ops/foundry"
"github.com/ethereum-optimism/optimism/op-chain-ops/interopgen"
op_service "github.com/ethereum-optimism/optimism/op-service"
"github.com/ethereum-optimism/optimism/op-service/cliapp"
"github.com/ethereum-optimism/optimism/op-service/ioutil"
"github.com/ethereum-optimism/optimism/op-service/jsonutil"
oplog "github.com/ethereum-optimism/optimism/op-service/log"
"github.com/ethereum/go-ethereum/crypto"
)
var EnvPrefix = "OP_INTEROP"
var (
l1ChainIDFlag = &cli.Uint64Flag{
Name: "l1.chainid",
Value: 900100,
EnvVars: op_service.PrefixEnvVar(EnvPrefix, "L1_CHAINID"),
}
l2ChainIDsFlag = &cli.Uint64SliceFlag{
Name: "l2.chainids",
Value: cli.NewUint64Slice(900200, 900201),
EnvVars: op_service.PrefixEnvVar(EnvPrefix, "L2_CHAINIDS"),
}
timestampFlag = &cli.Uint64Flag{
Name: "timestamp",
Value: 0,
EnvVars: op_service.PrefixEnvVar(EnvPrefix, "TIMESTAMP"),
Usage: "Will use current timestamp, plus 5 seconds, if not set",
}
artifactsDirFlag = &cli.StringFlag{
Name: "artifacts-dir",
Value: "packages/contracts-bedrock/forge-artifacts",
EnvVars: op_service.PrefixEnvVar(EnvPrefix, "ARTIFACTS_DIR"),
}
foundryDirFlag = &cli.StringFlag{
Name: "foundry-dir",
Value: "packages/contracts-bedrock",
EnvVars: op_service.PrefixEnvVar(EnvPrefix, "FOUNDRY_DIR"),
Usage: "Optional, for source-map info during genesis generation",
}
outDirFlag = &cli.StringFlag{
Name: "out-dir",
Value: ".interop-devnet",
EnvVars: op_service.PrefixEnvVar(EnvPrefix, "OUT_DIR"),
}
// used in both dev-setup and devkey commands
mnemonicFlag = &cli.StringFlag{
Name: "mnemonic",
Value: devkeys.TestMnemonic,
EnvVars: op_service.PrefixEnvVar(EnvPrefix, "MNEMONIC"),
}
// for devkey command
devkeyDomainFlag = &cli.StringFlag{
Name: "domain",
Value: "chain-operator",
EnvVars: op_service.PrefixEnvVar(EnvPrefix, "DEVKEY_DOMAIN"),
}
devkeyChainIdFlag = &cli.Uint64Flag{
Name: "chainid",
Value: 0,
EnvVars: op_service.PrefixEnvVar(EnvPrefix, "DEVKEY_CHAINID"),
}
devkeyNameFlag = &cli.StringFlag{
Name: "name",
EnvVars: op_service.PrefixEnvVar(EnvPrefix, "DEVKEY_NAME"),
}
)
var InteropDevSetup = &cli.Command{
Name: "dev-setup",
Usage: "Generate devnet genesis configs with one L1 and multiple L2s",
Flags: cliapp.ProtectFlags(append([]cli.Flag{
l1ChainIDFlag,
l2ChainIDsFlag,
timestampFlag,
mnemonicFlag,
artifactsDirFlag,
foundryDirFlag,
outDirFlag,
}, oplog.CLIFlags(EnvPrefix)...)),
Action: func(cliCtx *cli.Context) error {
logCfg := oplog.ReadCLIConfig(cliCtx)
logger := oplog.NewLogger(cliCtx.App.Writer, logCfg)
recipe := &interopgen.InteropDevRecipe{
L1ChainID: cliCtx.Uint64(l1ChainIDFlag.Name),
L2ChainIDs: cliCtx.Uint64Slice(l2ChainIDsFlag.Name),
GenesisTimestamp: cliCtx.Uint64(timestampFlag.Name),
}
if recipe.GenesisTimestamp == 0 {
recipe.GenesisTimestamp = uint64(time.Now().Unix() + 5)
}
mnemonic := strings.TrimSpace(cliCtx.String(mnemonicFlag.Name))
if mnemonic == devkeys.TestMnemonic {
logger.Warn("Using default test mnemonic!")
}
keys, err := devkeys.NewMnemonicDevKeys(mnemonic)
if err != nil {
return fmt.Errorf("failed to setup dev keys from mnemonic: %w", err)
}
worldCfg, err := recipe.Build(keys)
if err != nil {
return fmt.Errorf("failed to build deploy configs from interop recipe: %w", err)
}
if err := worldCfg.Check(logger); err != nil {
return fmt.Errorf("invalid deploy configs: %w", err)
}
artifactsDir := cliCtx.String(artifactsDirFlag.Name)
af := foundry.OpenArtifactsDir(artifactsDir)
var srcFs *foundry.SourceMapFS
if cliCtx.IsSet(foundryDirFlag.Name) {
srcDir := cliCtx.String(foundryDirFlag.Name)
srcFs = foundry.NewSourceMapFS(os.DirFS(srcDir))
}
worldDeployment, worldOutput, err := interopgen.Deploy(logger, af, srcFs, worldCfg)
if err != nil {
return fmt.Errorf("failed to deploy interop dev setup: %w", err)
}
outDir := cliCtx.String(outDirFlag.Name)
// Write deployments
{
deploymentsDir := filepath.Join(outDir, "deployments")
l1Dir := filepath.Join(deploymentsDir, "l1")
if err := writeJson(filepath.Join(l1Dir, "common.json"), worldDeployment.L1); err != nil {
return fmt.Errorf("failed to write L1 deployment data: %w", err)
}
if err := writeJson(filepath.Join(l1Dir, "superchain.json"), worldDeployment.Superchain); err != nil {
return fmt.Errorf("failed to write Superchain deployment data: %w", err)
}
l2sDir := filepath.Join(deploymentsDir, "l2")
for id, dep := range worldDeployment.L2s {
l2Dir := filepath.Join(l2sDir, id)
if err := writeJson(filepath.Join(l2Dir, "addresses.json"), dep); err != nil {
return fmt.Errorf("failed to write L2 %s deployment data: %w", id, err)
}
}
}
// write genesis
{
genesisDir := filepath.Join(outDir, "genesis")
l1Dir := filepath.Join(genesisDir, "l1")
if err := writeJson(filepath.Join(l1Dir, "genesis.json"), worldOutput.L1.Genesis); err != nil {
return fmt.Errorf("failed to write L1 genesis data: %w", err)
}
l2sDir := filepath.Join(genesisDir, "l2")
for id, dep := range worldOutput.L2s {
l2Dir := filepath.Join(l2sDir, id)
if err := writeJson(filepath.Join(l2Dir, "genesis.json"), dep.Genesis); err != nil {
return fmt.Errorf("failed to write L2 %s genesis config: %w", id, err)
}
if err := writeJson(filepath.Join(l2Dir, "rollup.json"), dep.RollupCfg); err != nil {
return fmt.Errorf("failed to write L2 %s rollup config: %w", id, err)
}
}
}
return nil
},
}
func writeJson(path string, content any) error {
return jsonutil.WriteJSON[any](content, ioutil.ToBasicFile(path, 0o755))
}
var DevKeySecretCmd = &cli.Command{
Name: "secret",
Usage: "Retrieve devkey secret, by specifying domain, chain ID, name.",
Flags: cliapp.ProtectFlags([]cli.Flag{
mnemonicFlag,
devkeyDomainFlag,
devkeyChainIdFlag,
devkeyNameFlag,
}),
Action: func(context *cli.Context) error {
mnemonic := context.String(mnemonicFlag.Name)
domain := context.String(devkeyDomainFlag.Name)
chainID := context.Uint64(devkeyChainIdFlag.Name)
chainIDBig := new(big.Int).SetUint64(chainID)
name := context.String(devkeyNameFlag.Name)
k, err := parseKey(domain, chainIDBig, name)
if err != nil {
return err
}
mnemonicKeys, err := devkeys.NewMnemonicDevKeys(mnemonic)
if err != nil {
return err
}
secret, err := mnemonicKeys.Secret(k)
if err != nil {
return err
}
secretBin := crypto.FromECDSA(secret)
_, err = fmt.Fprintf(context.App.Writer, "%x", secretBin)
if err != nil {
return fmt.Errorf("failed to output secret key: %w", err)
}
return nil
},
}
var DevKeyAddressCmd = &cli.Command{
Name: "address",
Usage: "Retrieve devkey address, by specifying domain, chain ID, name.",
Flags: cliapp.ProtectFlags([]cli.Flag{
mnemonicFlag,
devkeyDomainFlag,
devkeyChainIdFlag,
devkeyNameFlag,
}),
Action: func(context *cli.Context) error {
mnemonic := context.String(mnemonicFlag.Name)
domain := context.String(devkeyDomainFlag.Name)
chainID := context.Uint64(devkeyChainIdFlag.Name)
chainIDBig := new(big.Int).SetUint64(chainID)
name := context.String(devkeyNameFlag.Name)
k, err := parseKey(domain, chainIDBig, name)
if err != nil {
return err
}
mnemonicKeys, err := devkeys.NewMnemonicDevKeys(mnemonic)
if err != nil {
return err
}
addr, err := mnemonicKeys.Address(k)
if err != nil {
return err
}
_, err = fmt.Fprintf(context.App.Writer, "%s", addr)
if err != nil {
return fmt.Errorf("failed to output address: %w", err)
}
return nil
},
}
var DevKeyCmd = &cli.Command{
Name: "devkey",
Usage: "Retrieve devkey secret or address",
Subcommands: cli.Commands{
DevKeySecretCmd,
DevKeyAddressCmd,
},
}
func parseKey(domain string, chainID *big.Int, name string) (devkeys.Key, error) {
switch domain {
case "user":
index, err := strconv.ParseUint(name, 10, 64)
if err != nil {
return nil, fmt.Errorf("failed to parse user index: %w", err)
}
return devkeys.ChainUserKey{
ChainID: chainID,
Index: index,
}, nil
case "chain-operator":
var role devkeys.ChainOperatorRole
if err := role.UnmarshalText([]byte(name)); err != nil {
return nil, fmt.Errorf("failed to parse chain operator role: %w", err)
}
return devkeys.ChainOperatorKey{
ChainID: chainID,
Role: role,
}, nil
case "superchain-operator":
var role devkeys.SuperchainOperatorRole
if err := role.UnmarshalText([]byte(name)); err != nil {
return nil, fmt.Errorf("failed to parse chain operator role: %w", err)
}
return devkeys.SuperchainOperatorKey{
ChainID: chainID,
Role: role,
}, nil
default:
return nil, fmt.Errorf("unknown devkey domain %q", domain)
}
}
var InteropCmd = &cli.Command{
Name: "interop",
Usage: "Experimental tools for OP-Stack interop networks.",
Subcommands: cli.Commands{
InteropDevSetup,
DevKeyCmd,
},
}
...@@ -12,6 +12,7 @@ import ( ...@@ -12,6 +12,7 @@ import (
opnode "github.com/ethereum-optimism/optimism/op-node" opnode "github.com/ethereum-optimism/optimism/op-node"
"github.com/ethereum-optimism/optimism/op-node/chaincfg" "github.com/ethereum-optimism/optimism/op-node/chaincfg"
"github.com/ethereum-optimism/optimism/op-node/cmd/genesis" "github.com/ethereum-optimism/optimism/op-node/cmd/genesis"
"github.com/ethereum-optimism/optimism/op-node/cmd/interop"
"github.com/ethereum-optimism/optimism/op-node/cmd/networks" "github.com/ethereum-optimism/optimism/op-node/cmd/networks"
"github.com/ethereum-optimism/optimism/op-node/cmd/p2p" "github.com/ethereum-optimism/optimism/op-node/cmd/p2p"
"github.com/ethereum-optimism/optimism/op-node/flags" "github.com/ethereum-optimism/optimism/op-node/flags"
...@@ -62,6 +63,7 @@ func main() { ...@@ -62,6 +63,7 @@ func main() {
Name: "networks", Name: "networks",
Subcommands: networks.Subcommands, Subcommands: networks.Subcommands,
}, },
interop.InteropCmd,
} }
ctx := ctxinterrupt.WithSignalWaiterMain(context.Background()) ctx := ctxinterrupt.WithSignalWaiterMain(context.Background())
......
...@@ -73,6 +73,7 @@ var ( ...@@ -73,6 +73,7 @@ var (
EnvVars: prefixEnvVars("L1_BEACON"), EnvVars: prefixEnvVars("L1_BEACON"),
Category: RollupCategory, Category: RollupCategory,
} }
/* Optional Flags */
SupervisorAddr = &cli.StringFlag{ SupervisorAddr = &cli.StringFlag{
Name: "supervisor", Name: "supervisor",
Usage: "RPC address of interop supervisor service for cross-chain safety verification." + Usage: "RPC address of interop supervisor service for cross-chain safety verification." +
...@@ -80,7 +81,6 @@ var ( ...@@ -80,7 +81,6 @@ var (
Hidden: true, // hidden for now during early testing. Hidden: true, // hidden for now during early testing.
EnvVars: prefixEnvVars("SUPERVISOR"), EnvVars: prefixEnvVars("SUPERVISOR"),
} }
/* Optional Flags */
BeaconHeader = &cli.StringFlag{ BeaconHeader = &cli.StringFlag{
Name: "l1.beacon-header", Name: "l1.beacon-header",
Usage: "Optional HTTP header to add to all requests to the L1 Beacon endpoint. Format: 'X-Key: Value'", Usage: "Optional HTTP header to add to all requests to the L1 Beacon endpoint. Format: 'X-Key: Value'",
......
...@@ -27,7 +27,7 @@ type InteropBackend interface { ...@@ -27,7 +27,7 @@ type InteropBackend interface {
SafeView(ctx context.Context, chainID types.ChainID, safe types.ReferenceView) (types.ReferenceView, error) SafeView(ctx context.Context, chainID types.ChainID, safe types.ReferenceView) (types.ReferenceView, error)
Finalized(ctx context.Context, chainID types.ChainID) (eth.BlockID, error) Finalized(ctx context.Context, chainID types.ChainID) (eth.BlockID, error)
DerivedFrom(ctx context.Context, chainID types.ChainID, derived eth.BlockID) (eth.L1BlockRef, error) CrossDerivedFrom(ctx context.Context, chainID types.ChainID, derived eth.BlockID) (eth.L1BlockRef, error)
UpdateLocalUnsafe(ctx context.Context, chainID types.ChainID, head eth.BlockRef) error UpdateLocalUnsafe(ctx context.Context, chainID types.ChainID, head eth.BlockRef) error
UpdateLocalSafe(ctx context.Context, chainID types.ChainID, derivedFrom eth.L1BlockRef, lastDerived eth.BlockRef) error UpdateLocalSafe(ctx context.Context, chainID types.ChainID, derivedFrom eth.L1BlockRef, lastDerived eth.BlockRef) error
...@@ -232,10 +232,11 @@ func (d *InteropDeriver) onCrossSafeUpdateEvent(x engine.CrossSafeUpdateEvent) e ...@@ -232,10 +232,11 @@ func (d *InteropDeriver) onCrossSafeUpdateEvent(x engine.CrossSafeUpdateEvent) e
Hash: result.Cross.Hash, Hash: result.Cross.Hash,
Number: result.Cross.Number, Number: result.Cross.Number,
} }
derivedFrom, err := d.backend.DerivedFrom(ctx, d.chainID, derived) derivedFrom, err := d.backend.CrossDerivedFrom(ctx, d.chainID, derived)
if err != nil { if err != nil {
return fmt.Errorf("failed to get derived-from of %s: %w", result.Cross, err) return fmt.Errorf("failed to get derived-from of %s: %w", result.Cross, err)
} }
d.log.Info("New cross-safe block", "block", result.Cross.Number)
ref, err := d.l2.L2BlockRefByHash(ctx, result.Cross.Hash) ref, err := d.l2.L2BlockRefByHash(ctx, result.Cross.Hash)
if err != nil { if err != nil {
return fmt.Errorf("failed to get block ref of %s: %w", result.Cross, err) return fmt.Errorf("failed to get block ref of %s: %w", result.Cross, err)
...@@ -272,6 +273,7 @@ func (d *InteropDeriver) onFinalizedUpdate(x engine.FinalizedUpdateEvent) error ...@@ -272,6 +273,7 @@ func (d *InteropDeriver) onFinalizedUpdate(x engine.FinalizedUpdateEvent) error
if err != nil { if err != nil {
return fmt.Errorf("failed to get block ref of %s: %w", finalized, err) return fmt.Errorf("failed to get block ref of %s: %w", finalized, err)
} }
d.log.Info("New finalized block from supervisor", "block", finalized.Number)
d.emitter.Emit(engine.PromoteFinalizedEvent{ d.emitter.Emit(engine.PromoteFinalizedEvent{
Ref: ref, Ref: ref,
}) })
......
package ioutil package ioutil
import ( import (
"fmt"
"io" "io"
"os" "os"
"path/filepath"
) )
var ( var (
...@@ -21,6 +23,20 @@ func NoOutputStream() OutputTarget { ...@@ -21,6 +23,20 @@ func NoOutputStream() OutputTarget {
} }
} }
func ToBasicFile(path string, perm os.FileMode) OutputTarget {
return func() (io.Writer, io.Closer, Aborter, error) {
outDir := filepath.Dir(path)
if err := os.MkdirAll(outDir, perm); err != nil {
return nil, nil, nil, fmt.Errorf("failed to create dir %q: %w", outDir, err)
}
f, err := os.OpenFile(path, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, perm)
if err != nil {
return nil, nil, nil, fmt.Errorf("failed to open %q: %w", path, err)
}
return f, f, func() {}, nil
}
}
func ToAtomicFile(path string, perm os.FileMode) OutputTarget { func ToAtomicFile(path string, perm os.FileMode) OutputTarget {
return func() (io.Writer, io.Closer, Aborter, error) { return func() (io.Writer, io.Closer, Aborter, error) {
f, err := NewAtomicWriterCompressed(path, perm) f, err := NewAtomicWriterCompressed(path, perm)
......
...@@ -67,6 +67,7 @@ type jsonEncoder struct { ...@@ -67,6 +67,7 @@ type jsonEncoder struct {
func newJSONEncoder(w io.Writer) Encoder { func newJSONEncoder(w io.Writer) Encoder {
e := json.NewEncoder(w) e := json.NewEncoder(w)
e.SetIndent("", " ") e.SetIndent("", " ")
e.SetEscapeHTML(false)
return &jsonEncoder{ return &jsonEncoder{
e: e, e: e,
} }
......
package locks
import "sync"
// RWMap is a simple wrapper around a map, with global Read-Write protection.
// For many concurrent reads/writes a sync.Map may be more performant,
// although it does not utilize Go generics.
// The RWMap does not have to be initialized,
// it is immediately ready for reads/writes.
type RWMap[K comparable, V any] struct {
inner map[K]V
mu sync.RWMutex
}
func (m *RWMap[K, V]) Has(key K) (ok bool) {
m.mu.RLock()
defer m.mu.RUnlock()
_, ok = m.inner[key]
return
}
func (m *RWMap[K, V]) Get(key K) (value V, ok bool) {
m.mu.RLock()
defer m.mu.RUnlock()
value, ok = m.inner[key]
return
}
func (m *RWMap[K, V]) Set(key K, value V) {
m.mu.Lock()
defer m.mu.Unlock()
if m.inner == nil {
m.inner = make(map[K]V)
}
m.inner[key] = value
}
// Range calls f sequentially for each key and value present in the map.
// If f returns false, range stops the iteration.
func (m *RWMap[K, V]) Range(f func(key K, value V) bool) {
m.mu.RLock()
defer m.mu.RUnlock()
for k, v := range m.inner {
if !f(k, v) {
break
}
}
}
package locks
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestRWMap(t *testing.T) {
m := &RWMap[uint64, int64]{}
// get on new map
v, ok := m.Get(123)
require.False(t, ok)
require.Equal(t, int64(0), v)
// set a value
m.Set(123, 42)
v, ok = m.Get(123)
require.True(t, ok)
require.Equal(t, int64(42), v)
// overwrite a value
m.Set(123, -42)
v, ok = m.Get(123)
require.True(t, ok)
require.Equal(t, int64(-42), v)
// add a value
m.Set(10, 100)
// range over values
got := make(map[uint64]int64)
m.Range(func(key uint64, value int64) bool {
if _, ok := got[key]; ok {
panic("duplicate")
}
got[key] = value
return true
})
require.Len(t, got, 2)
require.Equal(t, int64(100), got[uint64(10)])
require.Equal(t, int64(-42), got[uint64(123)])
// range and stop early
clear(got)
m.Range(func(key uint64, value int64) bool {
got[key] = value
return false
})
require.Len(t, got, 1, "stop early")
}
package locks
import "sync"
// RWValue is a simple container struct, to deconflict reads/writes of the value,
// without locking up a bigger structure in the caller.
// It exposes the underlying RWLock and Value for direct access where needed.
type RWValue[E any] struct {
sync.RWMutex
Value E
}
func (c *RWValue[E]) Get() (out E) {
c.RLock()
defer c.RUnlock()
out = c.Value
return
}
func (c *RWValue[E]) Set(v E) {
c.Lock()
defer c.Unlock()
c.Value = v
}
package locks
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestRWValue(t *testing.T) {
v := &RWValue[uint64]{}
require.Equal(t, uint64(0), v.Get())
v.Set(123)
require.Equal(t, uint64(123), v.Get())
v.Set(42)
require.Equal(t, uint64(42), v.Get())
}
...@@ -114,12 +114,12 @@ func (cl *SupervisorClient) Finalized(ctx context.Context, chainID types.ChainID ...@@ -114,12 +114,12 @@ func (cl *SupervisorClient) Finalized(ctx context.Context, chainID types.ChainID
return result, err return result, err
} }
func (cl *SupervisorClient) DerivedFrom(ctx context.Context, chainID types.ChainID, derived eth.BlockID) (eth.BlockRef, error) { func (cl *SupervisorClient) CrossDerivedFrom(ctx context.Context, chainID types.ChainID, derived eth.BlockID) (eth.BlockRef, error) {
var result eth.BlockRef var result eth.BlockRef
err := cl.client.CallContext( err := cl.client.CallContext(
ctx, ctx,
&result, &result,
"supervisor_derivedFrom", "supervisor_crossDerivedFrom",
chainID, chainID,
derived) derived)
return result, err return result, err
......
...@@ -29,7 +29,7 @@ func (m *FakeInteropBackend) Finalized(ctx context.Context, chainID types.ChainI ...@@ -29,7 +29,7 @@ func (m *FakeInteropBackend) Finalized(ctx context.Context, chainID types.ChainI
return m.FinalizedFn(ctx, chainID) return m.FinalizedFn(ctx, chainID)
} }
func (m *FakeInteropBackend) DerivedFrom(ctx context.Context, chainID types.ChainID, derived eth.BlockID) (eth.L1BlockRef, error) { func (m *FakeInteropBackend) CrossDerivedFrom(ctx context.Context, chainID types.ChainID, derived eth.BlockID) (eth.L1BlockRef, error) {
return m.DerivedFromFn(ctx, chainID, derived) return m.DerivedFromFn(ctx, chainID, derived)
} }
......
...@@ -58,13 +58,13 @@ func (m *MockInteropBackend) ExpectFinalized(chainID types.ChainID, result eth.B ...@@ -58,13 +58,13 @@ func (m *MockInteropBackend) ExpectFinalized(chainID types.ChainID, result eth.B
m.Mock.On("Finalized", chainID).Once().Return(result, &err) m.Mock.On("Finalized", chainID).Once().Return(result, &err)
} }
func (m *MockInteropBackend) DerivedFrom(ctx context.Context, chainID types.ChainID, derived eth.BlockID) (eth.L1BlockRef, error) { func (m *MockInteropBackend) CrossDerivedFrom(ctx context.Context, chainID types.ChainID, derived eth.BlockID) (eth.L1BlockRef, error) {
result := m.Mock.MethodCalled("DerivedFrom", chainID, derived) result := m.Mock.MethodCalled("CrossDerivedFrom", chainID, derived)
return result.Get(0).(eth.L1BlockRef), *result.Get(1).(*error) return result.Get(0).(eth.L1BlockRef), *result.Get(1).(*error)
} }
func (m *MockInteropBackend) ExpectDerivedFrom(chainID types.ChainID, derived eth.BlockID, result eth.L1BlockRef, err error) { func (m *MockInteropBackend) ExpectDerivedFrom(chainID types.ChainID, derived eth.BlockID, result eth.L1BlockRef, err error) {
m.Mock.On("DerivedFrom", chainID, derived).Once().Return(result, &err) m.Mock.On("CrossDerivedFrom", chainID, derived).Once().Return(result, &err)
} }
func (m *MockInteropBackend) UpdateLocalUnsafe(ctx context.Context, chainID types.ChainID, head eth.BlockRef) error { func (m *MockInteropBackend) UpdateLocalUnsafe(ctx context.Context, chainID types.ChainID, head eth.BlockRef) error {
......
...@@ -402,11 +402,11 @@ func (su *SupervisorBackend) Finalized(ctx context.Context, chainID types.ChainI ...@@ -402,11 +402,11 @@ func (su *SupervisorBackend) Finalized(ctx context.Context, chainID types.ChainI
return v.ID(), nil return v.ID(), nil
} }
func (su *SupervisorBackend) DerivedFrom(ctx context.Context, chainID types.ChainID, derived eth.BlockID) (derivedFrom eth.BlockRef, err error) { func (su *SupervisorBackend) CrossDerivedFrom(ctx context.Context, chainID types.ChainID, derived eth.BlockID) (derivedFrom eth.BlockRef, err error) {
su.mu.RLock() su.mu.RLock()
defer su.mu.RUnlock() defer su.mu.RUnlock()
v, err := su.chainDBs.DerivedFrom(chainID, derived) v, err := su.chainDBs.CrossDerivedFromBlockRef(chainID, derived)
if err != nil { if err != nil {
return eth.BlockRef{}, err return eth.BlockRef{}, err
} }
......
...@@ -223,7 +223,7 @@ func TestCrossSafeHazards(t *testing.T) { ...@@ -223,7 +223,7 @@ func TestCrossSafeHazards(t *testing.T) {
require.ErrorContains(t, err, "some error") require.ErrorContains(t, err, "some error")
require.Empty(t, hazards) require.Empty(t, hazards)
}) })
t.Run("timestamp is less, DerivedFrom returns error", func(t *testing.T) { t.Run("timestamp is less, CrossDerivedFrom returns error", func(t *testing.T) {
ssd := &mockSafeStartDeps{} ssd := &mockSafeStartDeps{}
sampleBlockSeal := types.BlockSeal{Number: 3, Hash: common.BytesToHash([]byte{0x02})} sampleBlockSeal := types.BlockSeal{Number: 3, Hash: common.BytesToHash([]byte{0x02})}
ssd.checkFn = func() (includedIn types.BlockSeal, err error) { ssd.checkFn = func() (includedIn types.BlockSeal, err error) {
...@@ -245,7 +245,7 @@ func TestCrossSafeHazards(t *testing.T) { ...@@ -245,7 +245,7 @@ func TestCrossSafeHazards(t *testing.T) {
require.ErrorContains(t, err, "some error") require.ErrorContains(t, err, "some error")
require.Empty(t, hazards) require.Empty(t, hazards)
}) })
t.Run("timestamp is less, DerivedFrom Number is greater", func(t *testing.T) { t.Run("timestamp is less, CrossDerivedFrom Number is greater", func(t *testing.T) {
ssd := &mockSafeStartDeps{} ssd := &mockSafeStartDeps{}
sampleBlockSeal := types.BlockSeal{Number: 3, Hash: common.BytesToHash([]byte{0x02})} sampleBlockSeal := types.BlockSeal{Number: 3, Hash: common.BytesToHash([]byte{0x02})}
ssd.checkFn = func() (includedIn types.BlockSeal, err error) { ssd.checkFn = func() (includedIn types.BlockSeal, err error) {
...@@ -268,7 +268,7 @@ func TestCrossSafeHazards(t *testing.T) { ...@@ -268,7 +268,7 @@ func TestCrossSafeHazards(t *testing.T) {
require.ErrorIs(t, err, types.ErrOutOfScope) require.ErrorIs(t, err, types.ErrOutOfScope)
require.Empty(t, hazards) require.Empty(t, hazards)
}) })
t.Run("timestamp is less, DerivedFrom Number less", func(t *testing.T) { t.Run("timestamp is less, CrossDerivedFrom Number less", func(t *testing.T) {
ssd := &mockSafeStartDeps{} ssd := &mockSafeStartDeps{}
sampleBlockSeal := types.BlockSeal{Number: 3, Hash: common.BytesToHash([]byte{0x02})} sampleBlockSeal := types.BlockSeal{Number: 3, Hash: common.BytesToHash([]byte{0x02})}
ssd.checkFn = func() (includedIn types.BlockSeal, err error) { ssd.checkFn = func() (includedIn types.BlockSeal, err error) {
...@@ -291,7 +291,7 @@ func TestCrossSafeHazards(t *testing.T) { ...@@ -291,7 +291,7 @@ func TestCrossSafeHazards(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
require.Empty(t, hazards) require.Empty(t, hazards)
}) })
t.Run("timestamp is less, DerivedFrom Number equal", func(t *testing.T) { t.Run("timestamp is less, CrossDerivedFrom Number equal", func(t *testing.T) {
ssd := &mockSafeStartDeps{} ssd := &mockSafeStartDeps{}
sampleBlockSeal := types.BlockSeal{Number: 3, Hash: common.BytesToHash([]byte{0x02})} sampleBlockSeal := types.BlockSeal{Number: 3, Hash: common.BytesToHash([]byte{0x02})}
ssd.checkFn = func() (includedIn types.BlockSeal, err error) { ssd.checkFn = func() (includedIn types.BlockSeal, err error) {
......
...@@ -6,6 +6,7 @@ import ( ...@@ -6,6 +6,7 @@ import (
"sync" "sync"
"time" "time"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
) )
...@@ -37,7 +38,7 @@ func NewWorker(log log.Logger, workFn workFn) *Worker { ...@@ -37,7 +38,7 @@ func NewWorker(log log.Logger, workFn workFn) *Worker {
log: log, log: log,
poke: make(chan struct{}, 1), poke: make(chan struct{}, 1),
// The data may have changed, and we may have missed a poke, so re-attempt regularly. // The data may have changed, and we may have missed a poke, so re-attempt regularly.
pollDuration: time.Second * 4, pollDuration: 250 * time.Millisecond,
ctx: ctx, ctx: ctx,
cancel: cancel, cancel: cancel,
} }
...@@ -69,7 +70,11 @@ func (s *Worker) worker() { ...@@ -69,7 +70,11 @@ func (s *Worker) worker() {
if errors.Is(err, s.ctx.Err()) { if errors.Is(err, s.ctx.Err()) {
return return
} }
s.log.Error("Failed to process work", "err", err) if errors.Is(err, types.ErrFuture) {
s.log.Debug("Failed to process work", "err", err)
} else {
s.log.Warn("Failed to process work", "err", err)
}
} }
// await next time we process, or detect shutdown // await next time we process, or detect shutdown
......
...@@ -4,12 +4,12 @@ import ( ...@@ -4,12 +4,12 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"sync"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/locks"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/fromda" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/fromda"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/logs" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/logs"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/depset" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/depset"
...@@ -73,28 +73,23 @@ var _ LogStorage = (*logs.DB)(nil) ...@@ -73,28 +73,23 @@ var _ LogStorage = (*logs.DB)(nil)
// ChainsDB is a database that stores logs and derived-from data for multiple chains. // ChainsDB is a database that stores logs and derived-from data for multiple chains.
// it implements the LogStorage interface, as well as several DB interfaces needed by the cross package. // it implements the LogStorage interface, as well as several DB interfaces needed by the cross package.
type ChainsDB struct { type ChainsDB struct {
// RW mutex:
// Read = chains can be read / mutated.
// Write = set of chains is changing.
mu sync.RWMutex
// unsafe info: the sequence of block seals and events // unsafe info: the sequence of block seals and events
logDBs map[types.ChainID]LogStorage logDBs locks.RWMap[types.ChainID, LogStorage]
// cross-unsafe: how far we have processed the unsafe data. // cross-unsafe: how far we have processed the unsafe data.
// If present but set to a zeroed value the cross-unsafe will fallback to cross-safe. // If present but set to a zeroed value the cross-unsafe will fallback to cross-safe.
crossUnsafe map[types.ChainID]types.BlockSeal crossUnsafe locks.RWMap[types.ChainID, *locks.RWValue[types.BlockSeal]]
// local-safe: index of what we optimistically know about L2 blocks being derived from L1 // local-safe: index of what we optimistically know about L2 blocks being derived from L1
localDBs map[types.ChainID]LocalDerivedFromStorage localDBs locks.RWMap[types.ChainID, LocalDerivedFromStorage]
// cross-safe: index of L2 blocks we know to only have cross-L2 valid dependencies // cross-safe: index of L2 blocks we know to only have cross-L2 valid dependencies
crossDBs map[types.ChainID]CrossDerivedFromStorage crossDBs locks.RWMap[types.ChainID, CrossDerivedFromStorage]
// finalized: the L1 finality progress. This can be translated into what may be considered as finalized in L2. // finalized: the L1 finality progress. This can be translated into what may be considered as finalized in L2.
// It is initially zeroed, and the L2 finality query will return // It is initially zeroed, and the L2 finality query will return
// an error until it has this L1 finality to work with. // an error until it has this L1 finality to work with.
finalizedL1 eth.L1BlockRef finalizedL1 locks.RWValue[eth.L1BlockRef]
// depSet is the dependency set, used to determine what may be tracked, // depSet is the dependency set, used to determine what may be tracked,
// what is missing, and to provide it to DB users. // what is missing, and to provide it to DB users.
...@@ -105,78 +100,62 @@ type ChainsDB struct { ...@@ -105,78 +100,62 @@ type ChainsDB struct {
func NewChainsDB(l log.Logger, depSet depset.DependencySet) *ChainsDB { func NewChainsDB(l log.Logger, depSet depset.DependencySet) *ChainsDB {
return &ChainsDB{ return &ChainsDB{
logDBs: make(map[types.ChainID]LogStorage), logger: l,
logger: l, depSet: depSet,
localDBs: make(map[types.ChainID]LocalDerivedFromStorage),
crossDBs: make(map[types.ChainID]CrossDerivedFromStorage),
crossUnsafe: make(map[types.ChainID]types.BlockSeal),
depSet: depSet,
} }
} }
func (db *ChainsDB) AddLogDB(chainID types.ChainID, logDB LogStorage) { func (db *ChainsDB) AddLogDB(chainID types.ChainID, logDB LogStorage) {
db.mu.Lock() if db.logDBs.Has(chainID) {
defer db.mu.Unlock()
if _, ok := db.logDBs[chainID]; ok {
db.logger.Warn("overwriting existing log DB for chain", "chain", chainID) db.logger.Warn("overwriting existing log DB for chain", "chain", chainID)
} }
db.logDBs[chainID] = logDB db.logDBs.Set(chainID, logDB)
} }
func (db *ChainsDB) AddLocalDerivedFromDB(chainID types.ChainID, dfDB LocalDerivedFromStorage) { func (db *ChainsDB) AddLocalDerivedFromDB(chainID types.ChainID, dfDB LocalDerivedFromStorage) {
db.mu.Lock() if db.localDBs.Has(chainID) {
defer db.mu.Unlock()
if _, ok := db.localDBs[chainID]; ok {
db.logger.Warn("overwriting existing local derived-from DB for chain", "chain", chainID) db.logger.Warn("overwriting existing local derived-from DB for chain", "chain", chainID)
} }
db.localDBs[chainID] = dfDB db.localDBs.Set(chainID, dfDB)
} }
func (db *ChainsDB) AddCrossDerivedFromDB(chainID types.ChainID, dfDB CrossDerivedFromStorage) { func (db *ChainsDB) AddCrossDerivedFromDB(chainID types.ChainID, dfDB CrossDerivedFromStorage) {
db.mu.Lock() if db.crossDBs.Has(chainID) {
defer db.mu.Unlock()
if _, ok := db.crossDBs[chainID]; ok {
db.logger.Warn("overwriting existing cross derived-from DB for chain", "chain", chainID) db.logger.Warn("overwriting existing cross derived-from DB for chain", "chain", chainID)
} }
db.crossDBs[chainID] = dfDB db.crossDBs.Set(chainID, dfDB)
} }
func (db *ChainsDB) AddCrossUnsafeTracker(chainID types.ChainID) { func (db *ChainsDB) AddCrossUnsafeTracker(chainID types.ChainID) {
db.mu.Lock() if db.crossUnsafe.Has(chainID) {
defer db.mu.Unlock()
if _, ok := db.crossUnsafe[chainID]; ok {
db.logger.Warn("overwriting existing cross-unsafe tracker for chain", "chain", chainID) db.logger.Warn("overwriting existing cross-unsafe tracker for chain", "chain", chainID)
} }
db.crossUnsafe[chainID] = types.BlockSeal{} db.crossUnsafe.Set(chainID, &locks.RWValue[types.BlockSeal]{})
} }
// ResumeFromLastSealedBlock prepares the chains db to resume recording events after a restart. // ResumeFromLastSealedBlock prepares the chains db to resume recording events after a restart.
// It rewinds the database to the last block that is guaranteed to have been fully recorded to the database, // It rewinds the database to the last block that is guaranteed to have been fully recorded to the database,
// to ensure it can resume recording from the first log of the next block. // to ensure it can resume recording from the first log of the next block.
func (db *ChainsDB) ResumeFromLastSealedBlock() error { func (db *ChainsDB) ResumeFromLastSealedBlock() error {
db.mu.RLock() var result error
defer db.mu.RUnlock() db.logDBs.Range(func(chain types.ChainID, logStore LogStorage) bool {
for chain, logStore := range db.logDBs {
headNum, ok := logStore.LatestSealedBlockNum() headNum, ok := logStore.LatestSealedBlockNum()
if !ok { if !ok {
// db must be empty, nothing to rewind to // db must be empty, nothing to rewind to
db.logger.Info("Resuming, but found no DB contents", "chain", chain) db.logger.Info("Resuming, but found no DB contents", "chain", chain)
continue return true
} }
db.logger.Info("Resuming, starting from last sealed block", "head", headNum) db.logger.Info("Resuming, starting from last sealed block", "head", headNum)
if err := logStore.Rewind(headNum); err != nil { if err := logStore.Rewind(headNum); err != nil {
return fmt.Errorf("failed to rewind chain %s to sealed block %d", chain, headNum) result = fmt.Errorf("failed to rewind chain %s to sealed block %d", chain, headNum)
return false
} }
} return true
return nil })
return result
} }
func (db *ChainsDB) DependencySet() depset.DependencySet { func (db *ChainsDB) DependencySet() depset.DependencySet {
...@@ -184,14 +163,12 @@ func (db *ChainsDB) DependencySet() depset.DependencySet { ...@@ -184,14 +163,12 @@ func (db *ChainsDB) DependencySet() depset.DependencySet {
} }
func (db *ChainsDB) Close() error { func (db *ChainsDB) Close() error {
db.mu.Lock()
defer db.mu.Unlock()
var combined error var combined error
for id, logDB := range db.logDBs { db.logDBs.Range(func(id types.ChainID, logDB LogStorage) bool {
if err := logDB.Close(); err != nil { if err := logDB.Close(); err != nil {
combined = errors.Join(combined, fmt.Errorf("failed to close log db for chain %v: %w", id, err)) combined = errors.Join(combined, fmt.Errorf("failed to close log db for chain %v: %w", id, err))
} }
} return true
})
return combined return combined
} }
...@@ -64,7 +64,7 @@ func TestBadUpdates(t *testing.T) { ...@@ -64,7 +64,7 @@ func TestBadUpdates(t *testing.T) {
assertFn: noChange, assertFn: noChange,
}, },
{ {
name: "DerivedFrom with conflicting parent root, same L1 height, new L2: accepted, L1 parent-hash is used only on L1 increments.", name: "CrossDerivedFrom with conflicting parent root, same L1 height, new L2: accepted, L1 parent-hash is used only on L1 increments.",
setupFn: func(t *testing.T, db *DB, m *stubMetrics) { setupFn: func(t *testing.T, db *DB, m *stubMetrics) {
require.NoError(t, db.AddDerived(toRef(dDerivedFrom, common.Hash{0x42}), toRef(eDerived, dDerived.Hash)), types.ErrConflict) require.NoError(t, db.AddDerived(toRef(dDerivedFrom, common.Hash{0x42}), toRef(eDerived, dDerived.Hash)), types.ErrConflict)
}, },
......
...@@ -12,10 +12,7 @@ import ( ...@@ -12,10 +12,7 @@ import (
) )
func (db *ChainsDB) FindSealedBlock(chain types.ChainID, number uint64) (seal types.BlockSeal, err error) { func (db *ChainsDB) FindSealedBlock(chain types.ChainID, number uint64) (seal types.BlockSeal, err error) {
db.mu.RLock() logDB, ok := db.logDBs.Get(chain)
defer db.mu.RUnlock()
logDB, ok := db.logDBs[chain]
if !ok { if !ok {
return types.BlockSeal{}, fmt.Errorf("%w: %v", types.ErrUnknownChain, chain) return types.BlockSeal{}, fmt.Errorf("%w: %v", types.ErrUnknownChain, chain)
} }
...@@ -26,10 +23,7 @@ func (db *ChainsDB) FindSealedBlock(chain types.ChainID, number uint64) (seal ty ...@@ -26,10 +23,7 @@ func (db *ChainsDB) FindSealedBlock(chain types.ChainID, number uint64) (seal ty
// for the given chain. It does not contain safety guarantees. // for the given chain. It does not contain safety guarantees.
// The block number might not be available (empty database, or non-existent chain). // The block number might not be available (empty database, or non-existent chain).
func (db *ChainsDB) LatestBlockNum(chain types.ChainID) (num uint64, ok bool) { func (db *ChainsDB) LatestBlockNum(chain types.ChainID) (num uint64, ok bool) {
db.mu.RLock() logDB, knownChain := db.logDBs.Get(chain)
defer db.mu.RUnlock()
logDB, knownChain := db.logDBs[chain]
if !knownChain { if !knownChain {
return 0, false return 0, false
} }
...@@ -37,16 +31,15 @@ func (db *ChainsDB) LatestBlockNum(chain types.ChainID) (num uint64, ok bool) { ...@@ -37,16 +31,15 @@ func (db *ChainsDB) LatestBlockNum(chain types.ChainID) (num uint64, ok bool) {
} }
func (db *ChainsDB) IsCrossUnsafe(chainID types.ChainID, block eth.BlockID) error { func (db *ChainsDB) IsCrossUnsafe(chainID types.ChainID, block eth.BlockID) error {
db.mu.RLock() v, ok := db.crossUnsafe.Get(chainID)
defer db.mu.RUnlock()
v, ok := db.crossUnsafe[chainID]
if !ok { if !ok {
return types.ErrUnknownChain return types.ErrUnknownChain
} }
if v == (types.BlockSeal{}) { crossUnsafe := v.Get()
if crossUnsafe == (types.BlockSeal{}) {
return types.ErrFuture return types.ErrFuture
} }
if block.Number > v.Number { if block.Number > crossUnsafe.Number {
return types.ErrFuture return types.ErrFuture
} }
// TODO(#11693): make cross-unsafe reorg safe // TODO(#11693): make cross-unsafe reorg safe
...@@ -54,9 +47,7 @@ func (db *ChainsDB) IsCrossUnsafe(chainID types.ChainID, block eth.BlockID) erro ...@@ -54,9 +47,7 @@ func (db *ChainsDB) IsCrossUnsafe(chainID types.ChainID, block eth.BlockID) erro
} }
func (db *ChainsDB) ParentBlock(chainID types.ChainID, parentOf eth.BlockID) (parent eth.BlockID, err error) { func (db *ChainsDB) ParentBlock(chainID types.ChainID, parentOf eth.BlockID) (parent eth.BlockID, err error) {
db.mu.RLock() logDB, ok := db.logDBs.Get(chainID)
defer db.mu.RUnlock()
logDB, ok := db.logDBs[chainID]
if !ok { if !ok {
return eth.BlockID{}, types.ErrUnknownChain return eth.BlockID{}, types.ErrUnknownChain
} }
...@@ -72,9 +63,7 @@ func (db *ChainsDB) ParentBlock(chainID types.ChainID, parentOf eth.BlockID) (pa ...@@ -72,9 +63,7 @@ func (db *ChainsDB) ParentBlock(chainID types.ChainID, parentOf eth.BlockID) (pa
} }
func (db *ChainsDB) IsLocalUnsafe(chainID types.ChainID, block eth.BlockID) error { func (db *ChainsDB) IsLocalUnsafe(chainID types.ChainID, block eth.BlockID) error {
db.mu.RLock() logDB, ok := db.logDBs.Get(chainID)
defer db.mu.RUnlock()
logDB, ok := db.logDBs[chainID]
if !ok { if !ok {
return types.ErrUnknownChain return types.ErrUnknownChain
} }
...@@ -89,10 +78,7 @@ func (db *ChainsDB) IsLocalUnsafe(chainID types.ChainID, block eth.BlockID) erro ...@@ -89,10 +78,7 @@ func (db *ChainsDB) IsLocalUnsafe(chainID types.ChainID, block eth.BlockID) erro
} }
func (db *ChainsDB) LocalUnsafe(chainID types.ChainID) (types.BlockSeal, error) { func (db *ChainsDB) LocalUnsafe(chainID types.ChainID) (types.BlockSeal, error) {
db.mu.RLock() eventsDB, ok := db.logDBs.Get(chainID)
defer db.mu.RUnlock()
eventsDB, ok := db.logDBs[chainID]
if !ok { if !ok {
return types.BlockSeal{}, types.ErrUnknownChain return types.BlockSeal{}, types.ErrUnknownChain
} }
...@@ -104,29 +90,24 @@ func (db *ChainsDB) LocalUnsafe(chainID types.ChainID) (types.BlockSeal, error) ...@@ -104,29 +90,24 @@ func (db *ChainsDB) LocalUnsafe(chainID types.ChainID) (types.BlockSeal, error)
} }
func (db *ChainsDB) CrossUnsafe(chainID types.ChainID) (types.BlockSeal, error) { func (db *ChainsDB) CrossUnsafe(chainID types.ChainID) (types.BlockSeal, error) {
db.mu.RLock() result, ok := db.crossUnsafe.Get(chainID)
defer db.mu.RUnlock()
result, ok := db.crossUnsafe[chainID]
if !ok { if !ok {
return types.BlockSeal{}, types.ErrUnknownChain return types.BlockSeal{}, types.ErrUnknownChain
} }
crossUnsafe := result.Get()
// Fall back to cross-safe if cross-unsafe is not known yet // Fall back to cross-safe if cross-unsafe is not known yet
if result == (types.BlockSeal{}) { if crossUnsafe == (types.BlockSeal{}) {
_, crossSafe, err := db.CrossSafe(chainID) _, crossSafe, err := db.CrossSafe(chainID)
if err != nil { if err != nil {
return types.BlockSeal{}, fmt.Errorf("no cross-unsafe known for chain %s, and failed to fall back to cross-safe value: %w", chainID, err) return types.BlockSeal{}, fmt.Errorf("no cross-unsafe known for chain %s, and failed to fall back to cross-safe value: %w", chainID, err)
} }
return crossSafe, nil return crossSafe, nil
} }
return result, nil return crossUnsafe, nil
} }
func (db *ChainsDB) LocalSafe(chainID types.ChainID) (derivedFrom types.BlockSeal, derived types.BlockSeal, err error) { func (db *ChainsDB) LocalSafe(chainID types.ChainID) (derivedFrom types.BlockSeal, derived types.BlockSeal, err error) {
db.mu.RLock() localDB, ok := db.localDBs.Get(chainID)
defer db.mu.RUnlock()
localDB, ok := db.localDBs[chainID]
if !ok { if !ok {
return types.BlockSeal{}, types.BlockSeal{}, types.ErrUnknownChain return types.BlockSeal{}, types.BlockSeal{}, types.ErrUnknownChain
} }
...@@ -134,10 +115,7 @@ func (db *ChainsDB) LocalSafe(chainID types.ChainID) (derivedFrom types.BlockSea ...@@ -134,10 +115,7 @@ func (db *ChainsDB) LocalSafe(chainID types.ChainID) (derivedFrom types.BlockSea
} }
func (db *ChainsDB) CrossSafe(chainID types.ChainID) (derivedFrom types.BlockSeal, derived types.BlockSeal, err error) { func (db *ChainsDB) CrossSafe(chainID types.ChainID) (derivedFrom types.BlockSeal, derived types.BlockSeal, err error) {
db.mu.RLock() crossDB, ok := db.crossDBs.Get(chainID)
defer db.mu.RUnlock()
crossDB, ok := db.crossDBs[chainID]
if !ok { if !ok {
return types.BlockSeal{}, types.BlockSeal{}, types.ErrUnknownChain return types.BlockSeal{}, types.BlockSeal{}, types.ErrUnknownChain
} }
...@@ -145,10 +123,7 @@ func (db *ChainsDB) CrossSafe(chainID types.ChainID) (derivedFrom types.BlockSea ...@@ -145,10 +123,7 @@ func (db *ChainsDB) CrossSafe(chainID types.ChainID) (derivedFrom types.BlockSea
} }
func (db *ChainsDB) Finalized(chainID types.ChainID) (types.BlockSeal, error) { func (db *ChainsDB) Finalized(chainID types.ChainID) (types.BlockSeal, error) {
db.mu.RLock() finalizedL1 := db.finalizedL1.Get()
defer db.mu.RUnlock()
finalizedL1 := db.finalizedL1
if finalizedL1 == (eth.L1BlockRef{}) { if finalizedL1 == (eth.L1BlockRef{}) {
return types.BlockSeal{}, errors.New("no finalized L1 signal, cannot determine L2 finality yet") return types.BlockSeal{}, errors.New("no finalized L1 signal, cannot determine L2 finality yet")
} }
...@@ -160,26 +135,25 @@ func (db *ChainsDB) Finalized(chainID types.ChainID) (types.BlockSeal, error) { ...@@ -160,26 +135,25 @@ func (db *ChainsDB) Finalized(chainID types.ChainID) (types.BlockSeal, error) {
} }
func (db *ChainsDB) LastDerivedFrom(chainID types.ChainID, derivedFrom eth.BlockID) (derived types.BlockSeal, err error) { func (db *ChainsDB) LastDerivedFrom(chainID types.ChainID, derivedFrom eth.BlockID) (derived types.BlockSeal, err error) {
crossDB, ok := db.crossDBs[chainID] crossDB, ok := db.crossDBs.Get(chainID)
if !ok { if !ok {
return types.BlockSeal{}, types.ErrUnknownChain return types.BlockSeal{}, types.ErrUnknownChain
} }
return crossDB.LastDerivedAt(derivedFrom) return crossDB.LastDerivedAt(derivedFrom)
} }
func (db *ChainsDB) DerivedFrom(chainID types.ChainID, derived eth.BlockID) (derivedFrom eth.BlockRef, err error) { // CrossDerivedFromBlockRef returns the block that the given block was derived from, if it exists in the cross derived-from storage.
db.mu.RLock() // This includes the parent-block lookup. Use CrossDerivedFrom if no parent-block info is needed.
defer db.mu.RUnlock() func (db *ChainsDB) CrossDerivedFromBlockRef(chainID types.ChainID, derived eth.BlockID) (derivedFrom eth.BlockRef, err error) {
xdb, ok := db.crossDBs.Get(chainID)
localDB, ok := db.localDBs[chainID]
if !ok { if !ok {
return eth.BlockRef{}, types.ErrUnknownChain return eth.BlockRef{}, types.ErrUnknownChain
} }
res, err := localDB.DerivedFrom(derived) res, err := xdb.DerivedFrom(derived)
if err != nil { if err != nil {
return eth.BlockRef{}, err return eth.BlockRef{}, err
} }
parent, err := localDB.PreviousDerivedFrom(res.ID()) parent, err := xdb.PreviousDerivedFrom(res.ID())
if err != nil { if err != nil {
return eth.BlockRef{}, err return eth.BlockRef{}, err
} }
...@@ -189,10 +163,7 @@ func (db *ChainsDB) DerivedFrom(chainID types.ChainID, derived eth.BlockID) (der ...@@ -189,10 +163,7 @@ func (db *ChainsDB) DerivedFrom(chainID types.ChainID, derived eth.BlockID) (der
// Check calls the underlying logDB to determine if the given log entry exists at the given location. // Check calls the underlying logDB to determine if the given log entry exists at the given location.
// If the block-seal of the block that includes the log is known, it is returned. It is fully zeroed otherwise, if the block is in-progress. // If the block-seal of the block that includes the log is known, it is returned. It is fully zeroed otherwise, if the block is in-progress.
func (db *ChainsDB) Check(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) (includedIn types.BlockSeal, err error) { func (db *ChainsDB) Check(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) (includedIn types.BlockSeal, err error) {
db.mu.RLock() logDB, ok := db.logDBs.Get(chain)
defer db.mu.RUnlock()
logDB, ok := db.logDBs[chain]
if !ok { if !ok {
return types.BlockSeal{}, fmt.Errorf("%w: %v", types.ErrUnknownChain, chain) return types.BlockSeal{}, fmt.Errorf("%w: %v", types.ErrUnknownChain, chain)
} }
...@@ -202,10 +173,7 @@ func (db *ChainsDB) Check(chain types.ChainID, blockNum uint64, logIdx uint32, l ...@@ -202,10 +173,7 @@ func (db *ChainsDB) Check(chain types.ChainID, blockNum uint64, logIdx uint32, l
// OpenBlock returns the Executing Messages for the block at the given number on the given chain. // OpenBlock returns the Executing Messages for the block at the given number on the given chain.
// it routes the request to the appropriate logDB. // it routes the request to the appropriate logDB.
func (db *ChainsDB) OpenBlock(chainID types.ChainID, blockNum uint64) (seal eth.BlockRef, logCount uint32, execMsgs map[uint32]*types.ExecutingMessage, err error) { func (db *ChainsDB) OpenBlock(chainID types.ChainID, blockNum uint64) (seal eth.BlockRef, logCount uint32, execMsgs map[uint32]*types.ExecutingMessage, err error) {
db.mu.RLock() logDB, ok := db.logDBs.Get(chainID)
defer db.mu.RUnlock()
logDB, ok := db.logDBs[chainID]
if !ok { if !ok {
return eth.BlockRef{}, 0, nil, types.ErrUnknownChain return eth.BlockRef{}, 0, nil, types.ErrUnknownChain
} }
...@@ -215,10 +183,7 @@ func (db *ChainsDB) OpenBlock(chainID types.ChainID, blockNum uint64) (seal eth. ...@@ -215,10 +183,7 @@ func (db *ChainsDB) OpenBlock(chainID types.ChainID, blockNum uint64) (seal eth.
// LocalDerivedFrom returns the block that the given block was derived from, if it exists in the local derived-from storage. // LocalDerivedFrom returns the block that the given block was derived from, if it exists in the local derived-from storage.
// it routes the request to the appropriate localDB. // it routes the request to the appropriate localDB.
func (db *ChainsDB) LocalDerivedFrom(chain types.ChainID, derived eth.BlockID) (derivedFrom types.BlockSeal, err error) { func (db *ChainsDB) LocalDerivedFrom(chain types.ChainID, derived eth.BlockID) (derivedFrom types.BlockSeal, err error) {
db.mu.RLock() lDB, ok := db.localDBs.Get(chain)
defer db.mu.RUnlock()
lDB, ok := db.localDBs[chain]
if !ok { if !ok {
return types.BlockSeal{}, types.ErrUnknownChain return types.BlockSeal{}, types.ErrUnknownChain
} }
...@@ -228,10 +193,7 @@ func (db *ChainsDB) LocalDerivedFrom(chain types.ChainID, derived eth.BlockID) ( ...@@ -228,10 +193,7 @@ func (db *ChainsDB) LocalDerivedFrom(chain types.ChainID, derived eth.BlockID) (
// CrossDerivedFrom returns the block that the given block was derived from, if it exists in the cross derived-from storage. // CrossDerivedFrom returns the block that the given block was derived from, if it exists in the cross derived-from storage.
// it routes the request to the appropriate crossDB. // it routes the request to the appropriate crossDB.
func (db *ChainsDB) CrossDerivedFrom(chain types.ChainID, derived eth.BlockID) (derivedFrom types.BlockSeal, err error) { func (db *ChainsDB) CrossDerivedFrom(chain types.ChainID, derived eth.BlockID) (derivedFrom types.BlockSeal, err error) {
db.mu.RLock() xDB, ok := db.crossDBs.Get(chain)
defer db.mu.RUnlock()
xDB, ok := db.crossDBs[chain]
if !ok { if !ok {
return types.BlockSeal{}, types.ErrUnknownChain return types.BlockSeal{}, types.ErrUnknownChain
} }
...@@ -247,15 +209,12 @@ func (db *ChainsDB) CrossDerivedFrom(chain types.ChainID, derived eth.BlockID) ( ...@@ -247,15 +209,12 @@ func (db *ChainsDB) CrossDerivedFrom(chain types.ChainID, derived eth.BlockID) (
// Or ErrOutOfScope, with non-zero derivedFromScope, // Or ErrOutOfScope, with non-zero derivedFromScope,
// if additional L1 data is needed to cross-verify the candidate L2 block. // if additional L1 data is needed to cross-verify the candidate L2 block.
func (db *ChainsDB) CandidateCrossSafe(chain types.ChainID) (derivedFromScope, crossSafe eth.BlockRef, err error) { func (db *ChainsDB) CandidateCrossSafe(chain types.ChainID) (derivedFromScope, crossSafe eth.BlockRef, err error) {
db.mu.RLock() xDB, ok := db.crossDBs.Get(chain)
defer db.mu.RUnlock()
xDB, ok := db.crossDBs[chain]
if !ok { if !ok {
return eth.BlockRef{}, eth.BlockRef{}, types.ErrUnknownChain return eth.BlockRef{}, eth.BlockRef{}, types.ErrUnknownChain
} }
lDB, ok := db.localDBs[chain] lDB, ok := db.localDBs.Get(chain)
if !ok { if !ok {
return eth.BlockRef{}, eth.BlockRef{}, types.ErrUnknownChain return eth.BlockRef{}, eth.BlockRef{}, types.ErrUnknownChain
} }
...@@ -323,9 +282,7 @@ func (db *ChainsDB) CandidateCrossSafe(chain types.ChainID) (derivedFromScope, c ...@@ -323,9 +282,7 @@ func (db *ChainsDB) CandidateCrossSafe(chain types.ChainID) (derivedFromScope, c
} }
func (db *ChainsDB) PreviousDerived(chain types.ChainID, derived eth.BlockID) (prevDerived types.BlockSeal, err error) { func (db *ChainsDB) PreviousDerived(chain types.ChainID, derived eth.BlockID) (prevDerived types.BlockSeal, err error) {
db.mu.RLock() lDB, ok := db.localDBs.Get(chain)
defer db.mu.RUnlock()
lDB, ok := db.localDBs[chain]
if !ok { if !ok {
return types.BlockSeal{}, types.ErrUnknownChain return types.BlockSeal{}, types.ErrUnknownChain
} }
...@@ -333,9 +290,7 @@ func (db *ChainsDB) PreviousDerived(chain types.ChainID, derived eth.BlockID) (p ...@@ -333,9 +290,7 @@ func (db *ChainsDB) PreviousDerived(chain types.ChainID, derived eth.BlockID) (p
} }
func (db *ChainsDB) PreviousDerivedFrom(chain types.ChainID, derivedFrom eth.BlockID) (prevDerivedFrom types.BlockSeal, err error) { func (db *ChainsDB) PreviousDerivedFrom(chain types.ChainID, derivedFrom eth.BlockID) (prevDerivedFrom types.BlockSeal, err error) {
db.mu.RLock() lDB, ok := db.localDBs.Get(chain)
defer db.mu.RUnlock()
lDB, ok := db.localDBs[chain]
if !ok { if !ok {
return types.BlockSeal{}, types.ErrUnknownChain return types.BlockSeal{}, types.ErrUnknownChain
} }
...@@ -343,9 +298,7 @@ func (db *ChainsDB) PreviousDerivedFrom(chain types.ChainID, derivedFrom eth.Blo ...@@ -343,9 +298,7 @@ func (db *ChainsDB) PreviousDerivedFrom(chain types.ChainID, derivedFrom eth.Blo
} }
func (db *ChainsDB) NextDerivedFrom(chain types.ChainID, derivedFrom eth.BlockID) (after eth.BlockRef, err error) { func (db *ChainsDB) NextDerivedFrom(chain types.ChainID, derivedFrom eth.BlockID) (after eth.BlockRef, err error) {
db.mu.RLock() lDB, ok := db.localDBs.Get(chain)
defer db.mu.RUnlock()
lDB, ok := db.localDBs[chain]
if !ok { if !ok {
return eth.BlockRef{}, types.ErrUnknownChain return eth.BlockRef{}, types.ErrUnknownChain
} }
...@@ -360,9 +313,6 @@ func (db *ChainsDB) NextDerivedFrom(chain types.ChainID, derivedFrom eth.BlockID ...@@ -360,9 +313,6 @@ func (db *ChainsDB) NextDerivedFrom(chain types.ChainID, derivedFrom eth.BlockID
// it assumes the log entry has already been checked and is valid, this function only checks safety levels. // it assumes the log entry has already been checked and is valid, this function only checks safety levels.
// Safety levels are assumed to graduate from LocalUnsafe to LocalSafe to CrossUnsafe to CrossSafe, with Finalized as the strongest. // Safety levels are assumed to graduate from LocalUnsafe to LocalSafe to CrossUnsafe to CrossSafe, with Finalized as the strongest.
func (db *ChainsDB) Safest(chainID types.ChainID, blockNum uint64, index uint32) (safest types.SafetyLevel, err error) { func (db *ChainsDB) Safest(chainID types.ChainID, blockNum uint64, index uint32) (safest types.SafetyLevel, err error) {
db.mu.RLock()
defer db.mu.RUnlock()
if finalized, err := db.Finalized(chainID); err == nil { if finalized, err := db.Finalized(chainID); err == nil {
if finalized.Number >= blockNum { if finalized.Number >= blockNum {
return types.Finalized, nil return types.Finalized, nil
...@@ -395,7 +345,7 @@ func (db *ChainsDB) Safest(chainID types.ChainID, blockNum uint64, index uint32) ...@@ -395,7 +345,7 @@ func (db *ChainsDB) Safest(chainID types.ChainID, blockNum uint64, index uint32)
} }
func (db *ChainsDB) IteratorStartingAt(chain types.ChainID, sealedNum uint64, logIndex uint32) (logs.Iterator, error) { func (db *ChainsDB) IteratorStartingAt(chain types.ChainID, sealedNum uint64, logIndex uint32) (logs.Iterator, error) {
logDB, ok := db.logDBs[chain] logDB, ok := db.logDBs.Get(chain)
if !ok { if !ok {
return nil, fmt.Errorf("%w: %v", types.ErrUnknownChain, chain) return nil, fmt.Errorf("%w: %v", types.ErrUnknownChain, chain)
} }
......
...@@ -15,10 +15,7 @@ func (db *ChainsDB) AddLog( ...@@ -15,10 +15,7 @@ func (db *ChainsDB) AddLog(
parentBlock eth.BlockID, parentBlock eth.BlockID,
logIdx uint32, logIdx uint32,
execMsg *types.ExecutingMessage) error { execMsg *types.ExecutingMessage) error {
db.mu.RLock() logDB, ok := db.logDBs.Get(chain)
defer db.mu.RUnlock()
logDB, ok := db.logDBs[chain]
if !ok { if !ok {
return fmt.Errorf("cannot AddLog: %w: %v", types.ErrUnknownChain, chain) return fmt.Errorf("cannot AddLog: %w: %v", types.ErrUnknownChain, chain)
} }
...@@ -26,10 +23,7 @@ func (db *ChainsDB) AddLog( ...@@ -26,10 +23,7 @@ func (db *ChainsDB) AddLog(
} }
func (db *ChainsDB) SealBlock(chain types.ChainID, block eth.BlockRef) error { func (db *ChainsDB) SealBlock(chain types.ChainID, block eth.BlockRef) error {
db.mu.RLock() logDB, ok := db.logDBs.Get(chain)
defer db.mu.RUnlock()
logDB, ok := db.logDBs[chain]
if !ok { if !ok {
return fmt.Errorf("cannot SealBlock: %w: %v", types.ErrUnknownChain, chain) return fmt.Errorf("cannot SealBlock: %w: %v", types.ErrUnknownChain, chain)
} }
...@@ -42,10 +36,7 @@ func (db *ChainsDB) SealBlock(chain types.ChainID, block eth.BlockRef) error { ...@@ -42,10 +36,7 @@ func (db *ChainsDB) SealBlock(chain types.ChainID, block eth.BlockRef) error {
} }
func (db *ChainsDB) Rewind(chain types.ChainID, headBlockNum uint64) error { func (db *ChainsDB) Rewind(chain types.ChainID, headBlockNum uint64) error {
db.mu.RLock() logDB, ok := db.logDBs.Get(chain)
defer db.mu.RUnlock()
logDB, ok := db.logDBs[chain]
if !ok { if !ok {
return fmt.Errorf("cannot Rewind: %w: %s", types.ErrUnknownChain, chain) return fmt.Errorf("cannot Rewind: %w: %s", types.ErrUnknownChain, chain)
} }
...@@ -53,10 +44,7 @@ func (db *ChainsDB) Rewind(chain types.ChainID, headBlockNum uint64) error { ...@@ -53,10 +44,7 @@ func (db *ChainsDB) Rewind(chain types.ChainID, headBlockNum uint64) error {
} }
func (db *ChainsDB) UpdateLocalSafe(chain types.ChainID, derivedFrom eth.BlockRef, lastDerived eth.BlockRef) error { func (db *ChainsDB) UpdateLocalSafe(chain types.ChainID, derivedFrom eth.BlockRef, lastDerived eth.BlockRef) error {
db.mu.RLock() localDB, ok := db.localDBs.Get(chain)
defer db.mu.RUnlock()
localDB, ok := db.localDBs[chain]
if !ok { if !ok {
return fmt.Errorf("cannot UpdateLocalSafe: %w: %v", types.ErrUnknownChain, chain) return fmt.Errorf("cannot UpdateLocalSafe: %w: %v", types.ErrUnknownChain, chain)
} }
...@@ -65,22 +53,17 @@ func (db *ChainsDB) UpdateLocalSafe(chain types.ChainID, derivedFrom eth.BlockRe ...@@ -65,22 +53,17 @@ func (db *ChainsDB) UpdateLocalSafe(chain types.ChainID, derivedFrom eth.BlockRe
} }
func (db *ChainsDB) UpdateCrossUnsafe(chain types.ChainID, crossUnsafe types.BlockSeal) error { func (db *ChainsDB) UpdateCrossUnsafe(chain types.ChainID, crossUnsafe types.BlockSeal) error {
db.mu.RLock() v, ok := db.crossUnsafe.Get(chain)
defer db.mu.RUnlock() if !ok {
if _, ok := db.crossUnsafe[chain]; !ok {
return fmt.Errorf("cannot UpdateCrossUnsafe: %w: %s", types.ErrUnknownChain, chain) return fmt.Errorf("cannot UpdateCrossUnsafe: %w: %s", types.ErrUnknownChain, chain)
} }
db.logger.Debug("Updating cross unsafe", "chain", chain, "crossUnsafe", crossUnsafe) db.logger.Debug("Updating cross unsafe", "chain", chain, "crossUnsafe", crossUnsafe)
db.crossUnsafe[chain] = crossUnsafe v.Set(crossUnsafe)
return nil return nil
} }
func (db *ChainsDB) UpdateCrossSafe(chain types.ChainID, l1View eth.BlockRef, lastCrossDerived eth.BlockRef) error { func (db *ChainsDB) UpdateCrossSafe(chain types.ChainID, l1View eth.BlockRef, lastCrossDerived eth.BlockRef) error {
db.mu.RLock() crossDB, ok := db.crossDBs.Get(chain)
defer db.mu.RUnlock()
crossDB, ok := db.crossDBs[chain]
if !ok { if !ok {
return fmt.Errorf("cannot UpdateCrossSafe: %w: %s", types.ErrUnknownChain, chain) return fmt.Errorf("cannot UpdateCrossSafe: %w: %s", types.ErrUnknownChain, chain)
} }
...@@ -89,13 +72,14 @@ func (db *ChainsDB) UpdateCrossSafe(chain types.ChainID, l1View eth.BlockRef, la ...@@ -89,13 +72,14 @@ func (db *ChainsDB) UpdateCrossSafe(chain types.ChainID, l1View eth.BlockRef, la
} }
func (db *ChainsDB) UpdateFinalizedL1(finalized eth.BlockRef) error { func (db *ChainsDB) UpdateFinalizedL1(finalized eth.BlockRef) error {
db.mu.RLock() // Lock, so we avoid race-conditions in-between getting (for comparison) and setting.
defer db.mu.RUnlock() db.finalizedL1.Lock()
defer db.finalizedL1.Unlock()
if db.finalizedL1.Number > finalized.Number { if v := db.finalizedL1.Value; v.Number > finalized.Number {
return fmt.Errorf("cannot rewind finalized L1 head from %s to %s", db.finalizedL1, finalized) return fmt.Errorf("cannot rewind finalized L1 head from %s to %s", v, finalized)
} }
db.logger.Debug("Updating finalized L1", "finalizedL1", finalized) db.logger.Debug("Updating finalized L1", "finalizedL1", finalized)
db.finalizedL1 = finalized db.finalizedL1.Value = finalized
return nil return nil
} }
...@@ -62,7 +62,7 @@ func (m *MockBackend) Finalized(ctx context.Context, chainID types.ChainID) (eth ...@@ -62,7 +62,7 @@ func (m *MockBackend) Finalized(ctx context.Context, chainID types.ChainID) (eth
return eth.BlockID{}, nil return eth.BlockID{}, nil
} }
func (m *MockBackend) DerivedFrom(ctx context.Context, chainID types.ChainID, derived eth.BlockID) (derivedFrom eth.BlockRef, err error) { func (m *MockBackend) CrossDerivedFrom(ctx context.Context, chainID types.ChainID, derived eth.BlockID) (derivedFrom eth.BlockRef, err error) {
return eth.BlockRef{}, nil return eth.BlockRef{}, nil
} }
......
...@@ -17,7 +17,7 @@ type AdminBackend interface { ...@@ -17,7 +17,7 @@ type AdminBackend interface {
type QueryBackend interface { type QueryBackend interface {
CheckMessage(identifier types.Identifier, payloadHash common.Hash) (types.SafetyLevel, error) CheckMessage(identifier types.Identifier, payloadHash common.Hash) (types.SafetyLevel, error)
CheckMessages(messages []types.Message, minSafety types.SafetyLevel) error CheckMessages(messages []types.Message, minSafety types.SafetyLevel) error
DerivedFrom(ctx context.Context, chainID types.ChainID, derived eth.BlockID) (derivedFrom eth.BlockRef, err error) CrossDerivedFrom(ctx context.Context, chainID types.ChainID, derived eth.BlockID) (derivedFrom eth.BlockRef, err error)
UnsafeView(ctx context.Context, chainID types.ChainID, unsafe types.ReferenceView) (types.ReferenceView, error) UnsafeView(ctx context.Context, chainID types.ChainID, unsafe types.ReferenceView) (types.ReferenceView, error)
SafeView(ctx context.Context, chainID types.ChainID, safe types.ReferenceView) (types.ReferenceView, error) SafeView(ctx context.Context, chainID types.ChainID, safe types.ReferenceView) (types.ReferenceView, error)
Finalized(ctx context.Context, chainID types.ChainID) (eth.BlockID, error) Finalized(ctx context.Context, chainID types.ChainID) (eth.BlockID, error)
...@@ -67,8 +67,8 @@ func (q *QueryFrontend) Finalized(ctx context.Context, chainID types.ChainID) (e ...@@ -67,8 +67,8 @@ func (q *QueryFrontend) Finalized(ctx context.Context, chainID types.ChainID) (e
return q.Supervisor.Finalized(ctx, chainID) return q.Supervisor.Finalized(ctx, chainID)
} }
func (q *QueryFrontend) DerivedFrom(ctx context.Context, chainID types.ChainID, derived eth.BlockID) (derivedFrom eth.BlockRef, err error) { func (q *QueryFrontend) CrossDerivedFrom(ctx context.Context, chainID types.ChainID, derived eth.BlockID) (derivedFrom eth.BlockRef, err error) {
return q.Supervisor.DerivedFrom(ctx, chainID, derived) return q.Supervisor.CrossDerivedFrom(ctx, chainID, derived)
} }
type AdminFrontend struct { type AdminFrontend struct {
......
...@@ -72,6 +72,17 @@ func (su *SupervisorService) initBackend(ctx context.Context, cfg *config.Config ...@@ -72,6 +72,17 @@ func (su *SupervisorService) initBackend(ctx context.Context, cfg *config.Config
su.backend = backend.NewMockBackend() su.backend = backend.NewMockBackend()
return nil return nil
} }
// the flag is a string slice, which has the potential to have empty strings
filterBlank := func(in []string) []string {
out := make([]string, 0, len(in))
for _, s := range in {
if s != "" {
out = append(out, s)
}
}
return out
}
cfg.L2RPCs = filterBlank(cfg.L2RPCs)
be, err := backend.NewSupervisorBackend(ctx, su.log, su.metrics, cfg) be, err := backend.NewSupervisorBackend(ctx, su.log, su.metrics, cfg)
if err != nil { if err != nil {
return fmt.Errorf("failed to create supervisor backend: %w", err) return fmt.Errorf("failed to create supervisor backend: %w", err)
......
FROM us-docker.pkg.dev/oplabs-tools-artifacts/images/op-geth:v1.101411.1-rc.3
# Note: depend on dev-release for sequencer interop message checks
RUN apk add --no-cache jq
COPY l2-op-geth-entrypoint.sh /entrypoint.sh
VOLUME ["/db"]
ENTRYPOINT ["/bin/sh", "/entrypoint.sh"]
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
!/op-batcher !/op-batcher
!/op-bootnode !/op-bootnode
!/op-chain-ops !/op-chain-ops
!/op-deployer
!/op-challenger !/op-challenger
!/packages/contracts-bedrock/snapshots !/packages/contracts-bedrock/snapshots
!/op-dispute-mon !/op-dispute-mon
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment