Commit 7f941b84 authored by protolambda's avatar protolambda Committed by GitHub

Interop: local devnet (#11590)

* local interop devnet

* interop-devnet: experimental op-geth docker image, connect with op-supervisor

* interop-devnet: port and path fixes

* interop-devnet: datadir fix

* interop-local: more fixes

* interop-devnet: connect op-supervisor to L2 EL nodes using RPC

* more fixes

* ops-bedrock: fix l2 op geth dockerfile for interop

* interop-devnet: fix supervisor RPC add workaround

* interop-devnet: implement review suggestions

* fixes from run-testing

* Add op-deployer to dockerignore exceptions

* use latest geth rc

* use RW Locks in Update Functions

* add log for new cross-safe head

* make updates much more frequent

* use LocalDB for LastDerivedFrom

* Add log message for finalization update

* op-supervisor: fix db locking, fix crossdb usage

* interop-devnet: use chain IDs as chain indices, since it's not translated everywhere yet

* op-supervisor: cross-derived-from RPC method

* Work Process ErrFuture to Debug Log

---------
Co-authored-by: default avataraxelKingsley <axel.kingsley@gmail.com>
parent caf63ce1
#!/bin/bash
set -eu
# Run this with workdir set as root of the repo
if [ -f "../versions.json" ]; then
echo "Running create-chains script."
else
echo "Cannot run create-chains script, must be in interop-devnet dir, but currently in:"
pwd
exit 1
fi
# Navigate to repository root
cd ..
# Check if already created
if [ -d ".devnet-interop" ]; then
echo "Already created chains."
exit 1
else
echo "Creating new interop devnet chain configs"
fi
export OP_INTEROP_MNEMONIC="test test test test test test test test test test test junk"
go run ./op-node/cmd interop dev-setup \
--artifacts-dir=packages/contracts-bedrock/forge-artifacts \
--foundry-dir=packages/contracts-bedrock \
--l1.chainid=900100 \
--l2.chainids=900200,900201 \
--out-dir=".devnet-interop" \
--log.format=logfmt \
--log.level=info
# create L1 CL genesis
eth2-testnet-genesis deneb \
--config=./ops-bedrock/beacon-data/config.yaml \
--preset-phase0=minimal \
--preset-altair=minimal \
--preset-bellatrix=minimal \
--preset-capella=minimal \
--preset-deneb=minimal \
--eth1-config=.devnet-interop/genesis/l1/genesis.json \
--state-output=.devnet-interop/genesis/l1/beaconstate.ssz \
--tranches-dir=.devnet-interop/genesis/l1/tranches \
--mnemonics=./ops-bedrock/mnemonics.yaml \
--eth1-withdrawal-address=0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa \
--eth1-match-genesis-time
echo "Writing env files now..."
# write env files for each L2 service
chain_env=".devnet-interop/env/l2/900200"
mkdir -p "$chain_env"
key_cmd="go run ./op-node/cmd interop devkey secret --domain=chain-operator --chainid=900200"
# op-node
echo "OP_NODE_P2P_SEQUENCER_KEY=$($key_cmd --name=sequencer-p2p)" >> "$chain_env/op-node.env"
# proposer
echo "OP_PROPOSER_PRIVATE_KEY=$($key_cmd --name=proposer)" >> "$chain_env/op-proposer.env"
echo "OP_PROPOSER_GAME_FACTORY_ADDRESS=$(jq -r .DisputeGameFactoryProxy .devnet-interop/deployments/l2/900200/addresses.json)" >> "$chain_env/op-proposer.env"
# batcher
echo "OP_BATCHER_PRIVATE_KEY=$($key_cmd --name=batcher)" >> "$chain_env/op-batcher.env"
chain_env=".devnet-interop/env/l2/900201"
mkdir -p "$chain_env"
key_cmd="go run ./op-node/cmd interop devkey secret --domain=chain-operator --chainid=900201"
# op-node
echo "OP_NODE_P2P_SEQUENCER_KEY=$($key_cmd --name=sequencer-p2p)" >> "$chain_env/op-node.env"
# proposer
echo "OP_PROPOSER_PRIVATE_KEY=$($key_cmd --name=proposer)" >> "$chain_env/op-proposer.env"
echo "OP_PROPOSER_GAME_FACTORY_ADDRESS=$(jq -r .DisputeGameFactoryProxy .devnet-interop/deployments/l2/900201/addresses.json)" >> "$chain_env/op-proposer.env"
# batcher
echo "OP_BATCHER_PRIVATE_KEY=$($key_cmd --name=batcher)" >> "$chain_env/op-batcher.env"
echo "Interop devnet setup is complete!"
{
"dependencies": {
"900200": {
"chainIndex": "900200",
"activationTime": 0,
"historyMinTime": 0
},
"900201": {
"chainIndex": "900201",
"activationTime": 0,
"historyMinTime": 0
}
}
}
This diff is collapsed.
devnet-setup:
bash create-chains.sh
devnet-build-images:
PWD="$(pwd)" DOCKER_BUILDKIT=1 COMPOSE_DOCKER_CLI_BUILD=1 \
docker compose build --progress plain \
--build-arg GIT_COMMIT={git_commit} \
--build-arg GIT_DATE={git_date}
devnet-up:
docker compose up -d l1 l1-bn l1-vc
docker compose up -d \
op-supervisor \
op-node-a op-batcher-a op-proposer-a \
op-node-b op-batcher-b op-proposer-b
devnet-down:
# stops services, does not remove containers/networks
docker compose stop
devnet-clean:
rm -rf ../.devnet-interop
# Stops services, and removes containers/networks
docker compose down
# Now manually clean up the related images and volumes
# Note: `justfile` interprets the curly brackets. So we escape them, by wrapping it with more, as a string, like Jinja2.
docker image ls 'interop-devnet*' --format='{{ '{{.Repository}}' }}' | xargs -r docker rmi
docker volume ls --filter name=interop-devnet --format='{{ '{{.Name}}' }}' | xargs -r docker volume rm
devnet-logs:
docker compose logs -f
......@@ -93,6 +93,21 @@ func (role SuperchainOperatorRole) Key(chainID *big.Int) Key {
}
}
func (role *SuperchainOperatorRole) UnmarshalText(data []byte) error {
v := string(data)
for i := SuperchainOperatorRole(0); i < 20; i++ {
if i.String() == v {
*role = i
return nil
}
}
return fmt.Errorf("unknown superchain operator role %q", v)
}
func (role *SuperchainOperatorRole) MarshalText() ([]byte, error) {
return []byte(role.String()), nil
}
// SuperchainOperatorKey is an account specific to an OperationRole of a given OP-Stack chain.
type SuperchainOperatorKey struct {
ChainID *big.Int
......@@ -181,6 +196,21 @@ func (role ChainOperatorRole) Key(chainID *big.Int) Key {
}
}
func (role *ChainOperatorRole) UnmarshalText(data []byte) error {
v := string(data)
for i := ChainOperatorRole(0); i < 20; i++ {
if i.String() == v {
*role = i
return nil
}
}
return fmt.Errorf("unknown chain operator role %q", v)
}
func (role *ChainOperatorRole) MarshalText() ([]byte, error) {
return []byte(role.String()), nil
}
// ChainOperatorKey is an account specific to an OperationRole of a given OP-Stack chain.
type ChainOperatorKey struct {
ChainID *big.Int
......
package interop
import (
"fmt"
"math/big"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/urfave/cli/v2"
"github.com/ethereum-optimism/optimism/op-chain-ops/devkeys"
"github.com/ethereum-optimism/optimism/op-chain-ops/foundry"
"github.com/ethereum-optimism/optimism/op-chain-ops/interopgen"
op_service "github.com/ethereum-optimism/optimism/op-service"
"github.com/ethereum-optimism/optimism/op-service/cliapp"
"github.com/ethereum-optimism/optimism/op-service/ioutil"
"github.com/ethereum-optimism/optimism/op-service/jsonutil"
oplog "github.com/ethereum-optimism/optimism/op-service/log"
"github.com/ethereum/go-ethereum/crypto"
)
var EnvPrefix = "OP_INTEROP"
var (
l1ChainIDFlag = &cli.Uint64Flag{
Name: "l1.chainid",
Value: 900100,
EnvVars: op_service.PrefixEnvVar(EnvPrefix, "L1_CHAINID"),
}
l2ChainIDsFlag = &cli.Uint64SliceFlag{
Name: "l2.chainids",
Value: cli.NewUint64Slice(900200, 900201),
EnvVars: op_service.PrefixEnvVar(EnvPrefix, "L2_CHAINIDS"),
}
timestampFlag = &cli.Uint64Flag{
Name: "timestamp",
Value: 0,
EnvVars: op_service.PrefixEnvVar(EnvPrefix, "TIMESTAMP"),
Usage: "Will use current timestamp, plus 5 seconds, if not set",
}
artifactsDirFlag = &cli.StringFlag{
Name: "artifacts-dir",
Value: "packages/contracts-bedrock/forge-artifacts",
EnvVars: op_service.PrefixEnvVar(EnvPrefix, "ARTIFACTS_DIR"),
}
foundryDirFlag = &cli.StringFlag{
Name: "foundry-dir",
Value: "packages/contracts-bedrock",
EnvVars: op_service.PrefixEnvVar(EnvPrefix, "FOUNDRY_DIR"),
Usage: "Optional, for source-map info during genesis generation",
}
outDirFlag = &cli.StringFlag{
Name: "out-dir",
Value: ".interop-devnet",
EnvVars: op_service.PrefixEnvVar(EnvPrefix, "OUT_DIR"),
}
// used in both dev-setup and devkey commands
mnemonicFlag = &cli.StringFlag{
Name: "mnemonic",
Value: devkeys.TestMnemonic,
EnvVars: op_service.PrefixEnvVar(EnvPrefix, "MNEMONIC"),
}
// for devkey command
devkeyDomainFlag = &cli.StringFlag{
Name: "domain",
Value: "chain-operator",
EnvVars: op_service.PrefixEnvVar(EnvPrefix, "DEVKEY_DOMAIN"),
}
devkeyChainIdFlag = &cli.Uint64Flag{
Name: "chainid",
Value: 0,
EnvVars: op_service.PrefixEnvVar(EnvPrefix, "DEVKEY_CHAINID"),
}
devkeyNameFlag = &cli.StringFlag{
Name: "name",
EnvVars: op_service.PrefixEnvVar(EnvPrefix, "DEVKEY_NAME"),
}
)
var InteropDevSetup = &cli.Command{
Name: "dev-setup",
Usage: "Generate devnet genesis configs with one L1 and multiple L2s",
Flags: cliapp.ProtectFlags(append([]cli.Flag{
l1ChainIDFlag,
l2ChainIDsFlag,
timestampFlag,
mnemonicFlag,
artifactsDirFlag,
foundryDirFlag,
outDirFlag,
}, oplog.CLIFlags(EnvPrefix)...)),
Action: func(cliCtx *cli.Context) error {
logCfg := oplog.ReadCLIConfig(cliCtx)
logger := oplog.NewLogger(cliCtx.App.Writer, logCfg)
recipe := &interopgen.InteropDevRecipe{
L1ChainID: cliCtx.Uint64(l1ChainIDFlag.Name),
L2ChainIDs: cliCtx.Uint64Slice(l2ChainIDsFlag.Name),
GenesisTimestamp: cliCtx.Uint64(timestampFlag.Name),
}
if recipe.GenesisTimestamp == 0 {
recipe.GenesisTimestamp = uint64(time.Now().Unix() + 5)
}
mnemonic := strings.TrimSpace(cliCtx.String(mnemonicFlag.Name))
if mnemonic == devkeys.TestMnemonic {
logger.Warn("Using default test mnemonic!")
}
keys, err := devkeys.NewMnemonicDevKeys(mnemonic)
if err != nil {
return fmt.Errorf("failed to setup dev keys from mnemonic: %w", err)
}
worldCfg, err := recipe.Build(keys)
if err != nil {
return fmt.Errorf("failed to build deploy configs from interop recipe: %w", err)
}
if err := worldCfg.Check(logger); err != nil {
return fmt.Errorf("invalid deploy configs: %w", err)
}
artifactsDir := cliCtx.String(artifactsDirFlag.Name)
af := foundry.OpenArtifactsDir(artifactsDir)
var srcFs *foundry.SourceMapFS
if cliCtx.IsSet(foundryDirFlag.Name) {
srcDir := cliCtx.String(foundryDirFlag.Name)
srcFs = foundry.NewSourceMapFS(os.DirFS(srcDir))
}
worldDeployment, worldOutput, err := interopgen.Deploy(logger, af, srcFs, worldCfg)
if err != nil {
return fmt.Errorf("failed to deploy interop dev setup: %w", err)
}
outDir := cliCtx.String(outDirFlag.Name)
// Write deployments
{
deploymentsDir := filepath.Join(outDir, "deployments")
l1Dir := filepath.Join(deploymentsDir, "l1")
if err := writeJson(filepath.Join(l1Dir, "common.json"), worldDeployment.L1); err != nil {
return fmt.Errorf("failed to write L1 deployment data: %w", err)
}
if err := writeJson(filepath.Join(l1Dir, "superchain.json"), worldDeployment.Superchain); err != nil {
return fmt.Errorf("failed to write Superchain deployment data: %w", err)
}
l2sDir := filepath.Join(deploymentsDir, "l2")
for id, dep := range worldDeployment.L2s {
l2Dir := filepath.Join(l2sDir, id)
if err := writeJson(filepath.Join(l2Dir, "addresses.json"), dep); err != nil {
return fmt.Errorf("failed to write L2 %s deployment data: %w", id, err)
}
}
}
// write genesis
{
genesisDir := filepath.Join(outDir, "genesis")
l1Dir := filepath.Join(genesisDir, "l1")
if err := writeJson(filepath.Join(l1Dir, "genesis.json"), worldOutput.L1.Genesis); err != nil {
return fmt.Errorf("failed to write L1 genesis data: %w", err)
}
l2sDir := filepath.Join(genesisDir, "l2")
for id, dep := range worldOutput.L2s {
l2Dir := filepath.Join(l2sDir, id)
if err := writeJson(filepath.Join(l2Dir, "genesis.json"), dep.Genesis); err != nil {
return fmt.Errorf("failed to write L2 %s genesis config: %w", id, err)
}
if err := writeJson(filepath.Join(l2Dir, "rollup.json"), dep.RollupCfg); err != nil {
return fmt.Errorf("failed to write L2 %s rollup config: %w", id, err)
}
}
}
return nil
},
}
func writeJson(path string, content any) error {
return jsonutil.WriteJSON[any](content, ioutil.ToBasicFile(path, 0o755))
}
var DevKeySecretCmd = &cli.Command{
Name: "secret",
Usage: "Retrieve devkey secret, by specifying domain, chain ID, name.",
Flags: cliapp.ProtectFlags([]cli.Flag{
mnemonicFlag,
devkeyDomainFlag,
devkeyChainIdFlag,
devkeyNameFlag,
}),
Action: func(context *cli.Context) error {
mnemonic := context.String(mnemonicFlag.Name)
domain := context.String(devkeyDomainFlag.Name)
chainID := context.Uint64(devkeyChainIdFlag.Name)
chainIDBig := new(big.Int).SetUint64(chainID)
name := context.String(devkeyNameFlag.Name)
k, err := parseKey(domain, chainIDBig, name)
if err != nil {
return err
}
mnemonicKeys, err := devkeys.NewMnemonicDevKeys(mnemonic)
if err != nil {
return err
}
secret, err := mnemonicKeys.Secret(k)
if err != nil {
return err
}
secretBin := crypto.FromECDSA(secret)
_, err = fmt.Fprintf(context.App.Writer, "%x", secretBin)
if err != nil {
return fmt.Errorf("failed to output secret key: %w", err)
}
return nil
},
}
var DevKeyAddressCmd = &cli.Command{
Name: "address",
Usage: "Retrieve devkey address, by specifying domain, chain ID, name.",
Flags: cliapp.ProtectFlags([]cli.Flag{
mnemonicFlag,
devkeyDomainFlag,
devkeyChainIdFlag,
devkeyNameFlag,
}),
Action: func(context *cli.Context) error {
mnemonic := context.String(mnemonicFlag.Name)
domain := context.String(devkeyDomainFlag.Name)
chainID := context.Uint64(devkeyChainIdFlag.Name)
chainIDBig := new(big.Int).SetUint64(chainID)
name := context.String(devkeyNameFlag.Name)
k, err := parseKey(domain, chainIDBig, name)
if err != nil {
return err
}
mnemonicKeys, err := devkeys.NewMnemonicDevKeys(mnemonic)
if err != nil {
return err
}
addr, err := mnemonicKeys.Address(k)
if err != nil {
return err
}
_, err = fmt.Fprintf(context.App.Writer, "%s", addr)
if err != nil {
return fmt.Errorf("failed to output address: %w", err)
}
return nil
},
}
var DevKeyCmd = &cli.Command{
Name: "devkey",
Usage: "Retrieve devkey secret or address",
Subcommands: cli.Commands{
DevKeySecretCmd,
DevKeyAddressCmd,
},
}
func parseKey(domain string, chainID *big.Int, name string) (devkeys.Key, error) {
switch domain {
case "user":
index, err := strconv.ParseUint(name, 10, 64)
if err != nil {
return nil, fmt.Errorf("failed to parse user index: %w", err)
}
return devkeys.ChainUserKey{
ChainID: chainID,
Index: index,
}, nil
case "chain-operator":
var role devkeys.ChainOperatorRole
if err := role.UnmarshalText([]byte(name)); err != nil {
return nil, fmt.Errorf("failed to parse chain operator role: %w", err)
}
return devkeys.ChainOperatorKey{
ChainID: chainID,
Role: role,
}, nil
case "superchain-operator":
var role devkeys.SuperchainOperatorRole
if err := role.UnmarshalText([]byte(name)); err != nil {
return nil, fmt.Errorf("failed to parse chain operator role: %w", err)
}
return devkeys.SuperchainOperatorKey{
ChainID: chainID,
Role: role,
}, nil
default:
return nil, fmt.Errorf("unknown devkey domain %q", domain)
}
}
var InteropCmd = &cli.Command{
Name: "interop",
Usage: "Experimental tools for OP-Stack interop networks.",
Subcommands: cli.Commands{
InteropDevSetup,
DevKeyCmd,
},
}
......@@ -12,6 +12,7 @@ import (
opnode "github.com/ethereum-optimism/optimism/op-node"
"github.com/ethereum-optimism/optimism/op-node/chaincfg"
"github.com/ethereum-optimism/optimism/op-node/cmd/genesis"
"github.com/ethereum-optimism/optimism/op-node/cmd/interop"
"github.com/ethereum-optimism/optimism/op-node/cmd/networks"
"github.com/ethereum-optimism/optimism/op-node/cmd/p2p"
"github.com/ethereum-optimism/optimism/op-node/flags"
......@@ -62,6 +63,7 @@ func main() {
Name: "networks",
Subcommands: networks.Subcommands,
},
interop.InteropCmd,
}
ctx := ctxinterrupt.WithSignalWaiterMain(context.Background())
......
......@@ -73,6 +73,7 @@ var (
EnvVars: prefixEnvVars("L1_BEACON"),
Category: RollupCategory,
}
/* Optional Flags */
SupervisorAddr = &cli.StringFlag{
Name: "supervisor",
Usage: "RPC address of interop supervisor service for cross-chain safety verification." +
......@@ -80,7 +81,6 @@ var (
Hidden: true, // hidden for now during early testing.
EnvVars: prefixEnvVars("SUPERVISOR"),
}
/* Optional Flags */
BeaconHeader = &cli.StringFlag{
Name: "l1.beacon-header",
Usage: "Optional HTTP header to add to all requests to the L1 Beacon endpoint. Format: 'X-Key: Value'",
......
......@@ -27,7 +27,7 @@ type InteropBackend interface {
SafeView(ctx context.Context, chainID types.ChainID, safe types.ReferenceView) (types.ReferenceView, error)
Finalized(ctx context.Context, chainID types.ChainID) (eth.BlockID, error)
DerivedFrom(ctx context.Context, chainID types.ChainID, derived eth.BlockID) (eth.L1BlockRef, error)
CrossDerivedFrom(ctx context.Context, chainID types.ChainID, derived eth.BlockID) (eth.L1BlockRef, error)
UpdateLocalUnsafe(ctx context.Context, chainID types.ChainID, head eth.BlockRef) error
UpdateLocalSafe(ctx context.Context, chainID types.ChainID, derivedFrom eth.L1BlockRef, lastDerived eth.BlockRef) error
......@@ -232,10 +232,11 @@ func (d *InteropDeriver) onCrossSafeUpdateEvent(x engine.CrossSafeUpdateEvent) e
Hash: result.Cross.Hash,
Number: result.Cross.Number,
}
derivedFrom, err := d.backend.DerivedFrom(ctx, d.chainID, derived)
derivedFrom, err := d.backend.CrossDerivedFrom(ctx, d.chainID, derived)
if err != nil {
return fmt.Errorf("failed to get derived-from of %s: %w", result.Cross, err)
}
d.log.Info("New cross-safe block", "block", result.Cross.Number)
ref, err := d.l2.L2BlockRefByHash(ctx, result.Cross.Hash)
if err != nil {
return fmt.Errorf("failed to get block ref of %s: %w", result.Cross, err)
......@@ -272,6 +273,7 @@ func (d *InteropDeriver) onFinalizedUpdate(x engine.FinalizedUpdateEvent) error
if err != nil {
return fmt.Errorf("failed to get block ref of %s: %w", finalized, err)
}
d.log.Info("New finalized block from supervisor", "block", finalized.Number)
d.emitter.Emit(engine.PromoteFinalizedEvent{
Ref: ref,
})
......
package ioutil
import (
"fmt"
"io"
"os"
"path/filepath"
)
var (
......@@ -21,6 +23,20 @@ func NoOutputStream() OutputTarget {
}
}
func ToBasicFile(path string, perm os.FileMode) OutputTarget {
return func() (io.Writer, io.Closer, Aborter, error) {
outDir := filepath.Dir(path)
if err := os.MkdirAll(outDir, perm); err != nil {
return nil, nil, nil, fmt.Errorf("failed to create dir %q: %w", outDir, err)
}
f, err := os.OpenFile(path, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, perm)
if err != nil {
return nil, nil, nil, fmt.Errorf("failed to open %q: %w", path, err)
}
return f, f, func() {}, nil
}
}
func ToAtomicFile(path string, perm os.FileMode) OutputTarget {
return func() (io.Writer, io.Closer, Aborter, error) {
f, err := NewAtomicWriterCompressed(path, perm)
......
......@@ -67,6 +67,7 @@ type jsonEncoder struct {
func newJSONEncoder(w io.Writer) Encoder {
e := json.NewEncoder(w)
e.SetIndent("", " ")
e.SetEscapeHTML(false)
return &jsonEncoder{
e: e,
}
......
package locks
import "sync"
// RWMap is a simple wrapper around a map, with global Read-Write protection.
// For many concurrent reads/writes a sync.Map may be more performant,
// although it does not utilize Go generics.
// The RWMap does not have to be initialized,
// it is immediately ready for reads/writes.
type RWMap[K comparable, V any] struct {
inner map[K]V
mu sync.RWMutex
}
func (m *RWMap[K, V]) Has(key K) (ok bool) {
m.mu.RLock()
defer m.mu.RUnlock()
_, ok = m.inner[key]
return
}
func (m *RWMap[K, V]) Get(key K) (value V, ok bool) {
m.mu.RLock()
defer m.mu.RUnlock()
value, ok = m.inner[key]
return
}
func (m *RWMap[K, V]) Set(key K, value V) {
m.mu.Lock()
defer m.mu.Unlock()
if m.inner == nil {
m.inner = make(map[K]V)
}
m.inner[key] = value
}
// Range calls f sequentially for each key and value present in the map.
// If f returns false, range stops the iteration.
func (m *RWMap[K, V]) Range(f func(key K, value V) bool) {
m.mu.RLock()
defer m.mu.RUnlock()
for k, v := range m.inner {
if !f(k, v) {
break
}
}
}
package locks
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestRWMap(t *testing.T) {
m := &RWMap[uint64, int64]{}
// get on new map
v, ok := m.Get(123)
require.False(t, ok)
require.Equal(t, int64(0), v)
// set a value
m.Set(123, 42)
v, ok = m.Get(123)
require.True(t, ok)
require.Equal(t, int64(42), v)
// overwrite a value
m.Set(123, -42)
v, ok = m.Get(123)
require.True(t, ok)
require.Equal(t, int64(-42), v)
// add a value
m.Set(10, 100)
// range over values
got := make(map[uint64]int64)
m.Range(func(key uint64, value int64) bool {
if _, ok := got[key]; ok {
panic("duplicate")
}
got[key] = value
return true
})
require.Len(t, got, 2)
require.Equal(t, int64(100), got[uint64(10)])
require.Equal(t, int64(-42), got[uint64(123)])
// range and stop early
clear(got)
m.Range(func(key uint64, value int64) bool {
got[key] = value
return false
})
require.Len(t, got, 1, "stop early")
}
package locks
import "sync"
// RWValue is a simple container struct, to deconflict reads/writes of the value,
// without locking up a bigger structure in the caller.
// It exposes the underlying RWLock and Value for direct access where needed.
type RWValue[E any] struct {
sync.RWMutex
Value E
}
func (c *RWValue[E]) Get() (out E) {
c.RLock()
defer c.RUnlock()
out = c.Value
return
}
func (c *RWValue[E]) Set(v E) {
c.Lock()
defer c.Unlock()
c.Value = v
}
package locks
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestRWValue(t *testing.T) {
v := &RWValue[uint64]{}
require.Equal(t, uint64(0), v.Get())
v.Set(123)
require.Equal(t, uint64(123), v.Get())
v.Set(42)
require.Equal(t, uint64(42), v.Get())
}
......@@ -114,12 +114,12 @@ func (cl *SupervisorClient) Finalized(ctx context.Context, chainID types.ChainID
return result, err
}
func (cl *SupervisorClient) DerivedFrom(ctx context.Context, chainID types.ChainID, derived eth.BlockID) (eth.BlockRef, error) {
func (cl *SupervisorClient) CrossDerivedFrom(ctx context.Context, chainID types.ChainID, derived eth.BlockID) (eth.BlockRef, error) {
var result eth.BlockRef
err := cl.client.CallContext(
ctx,
&result,
"supervisor_derivedFrom",
"supervisor_crossDerivedFrom",
chainID,
derived)
return result, err
......
......@@ -29,7 +29,7 @@ func (m *FakeInteropBackend) Finalized(ctx context.Context, chainID types.ChainI
return m.FinalizedFn(ctx, chainID)
}
func (m *FakeInteropBackend) DerivedFrom(ctx context.Context, chainID types.ChainID, derived eth.BlockID) (eth.L1BlockRef, error) {
func (m *FakeInteropBackend) CrossDerivedFrom(ctx context.Context, chainID types.ChainID, derived eth.BlockID) (eth.L1BlockRef, error) {
return m.DerivedFromFn(ctx, chainID, derived)
}
......
......@@ -58,13 +58,13 @@ func (m *MockInteropBackend) ExpectFinalized(chainID types.ChainID, result eth.B
m.Mock.On("Finalized", chainID).Once().Return(result, &err)
}
func (m *MockInteropBackend) DerivedFrom(ctx context.Context, chainID types.ChainID, derived eth.BlockID) (eth.L1BlockRef, error) {
result := m.Mock.MethodCalled("DerivedFrom", chainID, derived)
func (m *MockInteropBackend) CrossDerivedFrom(ctx context.Context, chainID types.ChainID, derived eth.BlockID) (eth.L1BlockRef, error) {
result := m.Mock.MethodCalled("CrossDerivedFrom", chainID, derived)
return result.Get(0).(eth.L1BlockRef), *result.Get(1).(*error)
}
func (m *MockInteropBackend) ExpectDerivedFrom(chainID types.ChainID, derived eth.BlockID, result eth.L1BlockRef, err error) {
m.Mock.On("DerivedFrom", chainID, derived).Once().Return(result, &err)
m.Mock.On("CrossDerivedFrom", chainID, derived).Once().Return(result, &err)
}
func (m *MockInteropBackend) UpdateLocalUnsafe(ctx context.Context, chainID types.ChainID, head eth.BlockRef) error {
......
......@@ -402,11 +402,11 @@ func (su *SupervisorBackend) Finalized(ctx context.Context, chainID types.ChainI
return v.ID(), nil
}
func (su *SupervisorBackend) DerivedFrom(ctx context.Context, chainID types.ChainID, derived eth.BlockID) (derivedFrom eth.BlockRef, err error) {
func (su *SupervisorBackend) CrossDerivedFrom(ctx context.Context, chainID types.ChainID, derived eth.BlockID) (derivedFrom eth.BlockRef, err error) {
su.mu.RLock()
defer su.mu.RUnlock()
v, err := su.chainDBs.DerivedFrom(chainID, derived)
v, err := su.chainDBs.CrossDerivedFromBlockRef(chainID, derived)
if err != nil {
return eth.BlockRef{}, err
}
......
......@@ -223,7 +223,7 @@ func TestCrossSafeHazards(t *testing.T) {
require.ErrorContains(t, err, "some error")
require.Empty(t, hazards)
})
t.Run("timestamp is less, DerivedFrom returns error", func(t *testing.T) {
t.Run("timestamp is less, CrossDerivedFrom returns error", func(t *testing.T) {
ssd := &mockSafeStartDeps{}
sampleBlockSeal := types.BlockSeal{Number: 3, Hash: common.BytesToHash([]byte{0x02})}
ssd.checkFn = func() (includedIn types.BlockSeal, err error) {
......@@ -245,7 +245,7 @@ func TestCrossSafeHazards(t *testing.T) {
require.ErrorContains(t, err, "some error")
require.Empty(t, hazards)
})
t.Run("timestamp is less, DerivedFrom Number is greater", func(t *testing.T) {
t.Run("timestamp is less, CrossDerivedFrom Number is greater", func(t *testing.T) {
ssd := &mockSafeStartDeps{}
sampleBlockSeal := types.BlockSeal{Number: 3, Hash: common.BytesToHash([]byte{0x02})}
ssd.checkFn = func() (includedIn types.BlockSeal, err error) {
......@@ -268,7 +268,7 @@ func TestCrossSafeHazards(t *testing.T) {
require.ErrorIs(t, err, types.ErrOutOfScope)
require.Empty(t, hazards)
})
t.Run("timestamp is less, DerivedFrom Number less", func(t *testing.T) {
t.Run("timestamp is less, CrossDerivedFrom Number less", func(t *testing.T) {
ssd := &mockSafeStartDeps{}
sampleBlockSeal := types.BlockSeal{Number: 3, Hash: common.BytesToHash([]byte{0x02})}
ssd.checkFn = func() (includedIn types.BlockSeal, err error) {
......@@ -291,7 +291,7 @@ func TestCrossSafeHazards(t *testing.T) {
require.NoError(t, err)
require.Empty(t, hazards)
})
t.Run("timestamp is less, DerivedFrom Number equal", func(t *testing.T) {
t.Run("timestamp is less, CrossDerivedFrom Number equal", func(t *testing.T) {
ssd := &mockSafeStartDeps{}
sampleBlockSeal := types.BlockSeal{Number: 3, Hash: common.BytesToHash([]byte{0x02})}
ssd.checkFn = func() (includedIn types.BlockSeal, err error) {
......
......@@ -6,6 +6,7 @@ import (
"sync"
"time"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
"github.com/ethereum/go-ethereum/log"
)
......@@ -37,7 +38,7 @@ func NewWorker(log log.Logger, workFn workFn) *Worker {
log: log,
poke: make(chan struct{}, 1),
// The data may have changed, and we may have missed a poke, so re-attempt regularly.
pollDuration: time.Second * 4,
pollDuration: 250 * time.Millisecond,
ctx: ctx,
cancel: cancel,
}
......@@ -69,7 +70,11 @@ func (s *Worker) worker() {
if errors.Is(err, s.ctx.Err()) {
return
}
s.log.Error("Failed to process work", "err", err)
if errors.Is(err, types.ErrFuture) {
s.log.Debug("Failed to process work", "err", err)
} else {
s.log.Warn("Failed to process work", "err", err)
}
}
// await next time we process, or detect shutdown
......
......@@ -4,12 +4,12 @@ import (
"errors"
"fmt"
"io"
"sync"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/locks"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/fromda"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/logs"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/depset"
......@@ -73,28 +73,23 @@ var _ LogStorage = (*logs.DB)(nil)
// ChainsDB is a database that stores logs and derived-from data for multiple chains.
// it implements the LogStorage interface, as well as several DB interfaces needed by the cross package.
type ChainsDB struct {
// RW mutex:
// Read = chains can be read / mutated.
// Write = set of chains is changing.
mu sync.RWMutex
// unsafe info: the sequence of block seals and events
logDBs map[types.ChainID]LogStorage
logDBs locks.RWMap[types.ChainID, LogStorage]
// cross-unsafe: how far we have processed the unsafe data.
// If present but set to a zeroed value the cross-unsafe will fallback to cross-safe.
crossUnsafe map[types.ChainID]types.BlockSeal
crossUnsafe locks.RWMap[types.ChainID, *locks.RWValue[types.BlockSeal]]
// local-safe: index of what we optimistically know about L2 blocks being derived from L1
localDBs map[types.ChainID]LocalDerivedFromStorage
localDBs locks.RWMap[types.ChainID, LocalDerivedFromStorage]
// cross-safe: index of L2 blocks we know to only have cross-L2 valid dependencies
crossDBs map[types.ChainID]CrossDerivedFromStorage
crossDBs locks.RWMap[types.ChainID, CrossDerivedFromStorage]
// finalized: the L1 finality progress. This can be translated into what may be considered as finalized in L2.
// It is initially zeroed, and the L2 finality query will return
// an error until it has this L1 finality to work with.
finalizedL1 eth.L1BlockRef
finalizedL1 locks.RWValue[eth.L1BlockRef]
// depSet is the dependency set, used to determine what may be tracked,
// what is missing, and to provide it to DB users.
......@@ -105,78 +100,62 @@ type ChainsDB struct {
func NewChainsDB(l log.Logger, depSet depset.DependencySet) *ChainsDB {
return &ChainsDB{
logDBs: make(map[types.ChainID]LogStorage),
logger: l,
localDBs: make(map[types.ChainID]LocalDerivedFromStorage),
crossDBs: make(map[types.ChainID]CrossDerivedFromStorage),
crossUnsafe: make(map[types.ChainID]types.BlockSeal),
depSet: depSet,
}
}
func (db *ChainsDB) AddLogDB(chainID types.ChainID, logDB LogStorage) {
db.mu.Lock()
defer db.mu.Unlock()
if _, ok := db.logDBs[chainID]; ok {
if db.logDBs.Has(chainID) {
db.logger.Warn("overwriting existing log DB for chain", "chain", chainID)
}
db.logDBs[chainID] = logDB
db.logDBs.Set(chainID, logDB)
}
func (db *ChainsDB) AddLocalDerivedFromDB(chainID types.ChainID, dfDB LocalDerivedFromStorage) {
db.mu.Lock()
defer db.mu.Unlock()
if _, ok := db.localDBs[chainID]; ok {
if db.localDBs.Has(chainID) {
db.logger.Warn("overwriting existing local derived-from DB for chain", "chain", chainID)
}
db.localDBs[chainID] = dfDB
db.localDBs.Set(chainID, dfDB)
}
func (db *ChainsDB) AddCrossDerivedFromDB(chainID types.ChainID, dfDB CrossDerivedFromStorage) {
db.mu.Lock()
defer db.mu.Unlock()
if _, ok := db.crossDBs[chainID]; ok {
if db.crossDBs.Has(chainID) {
db.logger.Warn("overwriting existing cross derived-from DB for chain", "chain", chainID)
}
db.crossDBs[chainID] = dfDB
db.crossDBs.Set(chainID, dfDB)
}
func (db *ChainsDB) AddCrossUnsafeTracker(chainID types.ChainID) {
db.mu.Lock()
defer db.mu.Unlock()
if _, ok := db.crossUnsafe[chainID]; ok {
if db.crossUnsafe.Has(chainID) {
db.logger.Warn("overwriting existing cross-unsafe tracker for chain", "chain", chainID)
}
db.crossUnsafe[chainID] = types.BlockSeal{}
db.crossUnsafe.Set(chainID, &locks.RWValue[types.BlockSeal]{})
}
// ResumeFromLastSealedBlock prepares the chains db to resume recording events after a restart.
// It rewinds the database to the last block that is guaranteed to have been fully recorded to the database,
// to ensure it can resume recording from the first log of the next block.
func (db *ChainsDB) ResumeFromLastSealedBlock() error {
db.mu.RLock()
defer db.mu.RUnlock()
for chain, logStore := range db.logDBs {
var result error
db.logDBs.Range(func(chain types.ChainID, logStore LogStorage) bool {
headNum, ok := logStore.LatestSealedBlockNum()
if !ok {
// db must be empty, nothing to rewind to
db.logger.Info("Resuming, but found no DB contents", "chain", chain)
continue
return true
}
db.logger.Info("Resuming, starting from last sealed block", "head", headNum)
if err := logStore.Rewind(headNum); err != nil {
return fmt.Errorf("failed to rewind chain %s to sealed block %d", chain, headNum)
}
result = fmt.Errorf("failed to rewind chain %s to sealed block %d", chain, headNum)
return false
}
return nil
return true
})
return result
}
func (db *ChainsDB) DependencySet() depset.DependencySet {
......@@ -184,14 +163,12 @@ func (db *ChainsDB) DependencySet() depset.DependencySet {
}
func (db *ChainsDB) Close() error {
db.mu.Lock()
defer db.mu.Unlock()
var combined error
for id, logDB := range db.logDBs {
db.logDBs.Range(func(id types.ChainID, logDB LogStorage) bool {
if err := logDB.Close(); err != nil {
combined = errors.Join(combined, fmt.Errorf("failed to close log db for chain %v: %w", id, err))
}
}
return true
})
return combined
}
......@@ -64,7 +64,7 @@ func TestBadUpdates(t *testing.T) {
assertFn: noChange,
},
{
name: "DerivedFrom with conflicting parent root, same L1 height, new L2: accepted, L1 parent-hash is used only on L1 increments.",
name: "CrossDerivedFrom with conflicting parent root, same L1 height, new L2: accepted, L1 parent-hash is used only on L1 increments.",
setupFn: func(t *testing.T, db *DB, m *stubMetrics) {
require.NoError(t, db.AddDerived(toRef(dDerivedFrom, common.Hash{0x42}), toRef(eDerived, dDerived.Hash)), types.ErrConflict)
},
......
......@@ -15,10 +15,7 @@ func (db *ChainsDB) AddLog(
parentBlock eth.BlockID,
logIdx uint32,
execMsg *types.ExecutingMessage) error {
db.mu.RLock()
defer db.mu.RUnlock()
logDB, ok := db.logDBs[chain]
logDB, ok := db.logDBs.Get(chain)
if !ok {
return fmt.Errorf("cannot AddLog: %w: %v", types.ErrUnknownChain, chain)
}
......@@ -26,10 +23,7 @@ func (db *ChainsDB) AddLog(
}
func (db *ChainsDB) SealBlock(chain types.ChainID, block eth.BlockRef) error {
db.mu.RLock()
defer db.mu.RUnlock()
logDB, ok := db.logDBs[chain]
logDB, ok := db.logDBs.Get(chain)
if !ok {
return fmt.Errorf("cannot SealBlock: %w: %v", types.ErrUnknownChain, chain)
}
......@@ -42,10 +36,7 @@ func (db *ChainsDB) SealBlock(chain types.ChainID, block eth.BlockRef) error {
}
func (db *ChainsDB) Rewind(chain types.ChainID, headBlockNum uint64) error {
db.mu.RLock()
defer db.mu.RUnlock()
logDB, ok := db.logDBs[chain]
logDB, ok := db.logDBs.Get(chain)
if !ok {
return fmt.Errorf("cannot Rewind: %w: %s", types.ErrUnknownChain, chain)
}
......@@ -53,10 +44,7 @@ func (db *ChainsDB) Rewind(chain types.ChainID, headBlockNum uint64) error {
}
func (db *ChainsDB) UpdateLocalSafe(chain types.ChainID, derivedFrom eth.BlockRef, lastDerived eth.BlockRef) error {
db.mu.RLock()
defer db.mu.RUnlock()
localDB, ok := db.localDBs[chain]
localDB, ok := db.localDBs.Get(chain)
if !ok {
return fmt.Errorf("cannot UpdateLocalSafe: %w: %v", types.ErrUnknownChain, chain)
}
......@@ -65,22 +53,17 @@ func (db *ChainsDB) UpdateLocalSafe(chain types.ChainID, derivedFrom eth.BlockRe
}
func (db *ChainsDB) UpdateCrossUnsafe(chain types.ChainID, crossUnsafe types.BlockSeal) error {
db.mu.RLock()
defer db.mu.RUnlock()
if _, ok := db.crossUnsafe[chain]; !ok {
v, ok := db.crossUnsafe.Get(chain)
if !ok {
return fmt.Errorf("cannot UpdateCrossUnsafe: %w: %s", types.ErrUnknownChain, chain)
}
db.logger.Debug("Updating cross unsafe", "chain", chain, "crossUnsafe", crossUnsafe)
db.crossUnsafe[chain] = crossUnsafe
v.Set(crossUnsafe)
return nil
}
func (db *ChainsDB) UpdateCrossSafe(chain types.ChainID, l1View eth.BlockRef, lastCrossDerived eth.BlockRef) error {
db.mu.RLock()
defer db.mu.RUnlock()
crossDB, ok := db.crossDBs[chain]
crossDB, ok := db.crossDBs.Get(chain)
if !ok {
return fmt.Errorf("cannot UpdateCrossSafe: %w: %s", types.ErrUnknownChain, chain)
}
......@@ -89,13 +72,14 @@ func (db *ChainsDB) UpdateCrossSafe(chain types.ChainID, l1View eth.BlockRef, la
}
func (db *ChainsDB) UpdateFinalizedL1(finalized eth.BlockRef) error {
db.mu.RLock()
defer db.mu.RUnlock()
// Lock, so we avoid race-conditions in-between getting (for comparison) and setting.
db.finalizedL1.Lock()
defer db.finalizedL1.Unlock()
if db.finalizedL1.Number > finalized.Number {
return fmt.Errorf("cannot rewind finalized L1 head from %s to %s", db.finalizedL1, finalized)
if v := db.finalizedL1.Value; v.Number > finalized.Number {
return fmt.Errorf("cannot rewind finalized L1 head from %s to %s", v, finalized)
}
db.logger.Debug("Updating finalized L1", "finalizedL1", finalized)
db.finalizedL1 = finalized
db.finalizedL1.Value = finalized
return nil
}
......@@ -62,7 +62,7 @@ func (m *MockBackend) Finalized(ctx context.Context, chainID types.ChainID) (eth
return eth.BlockID{}, nil
}
func (m *MockBackend) DerivedFrom(ctx context.Context, chainID types.ChainID, derived eth.BlockID) (derivedFrom eth.BlockRef, err error) {
func (m *MockBackend) CrossDerivedFrom(ctx context.Context, chainID types.ChainID, derived eth.BlockID) (derivedFrom eth.BlockRef, err error) {
return eth.BlockRef{}, nil
}
......
......@@ -17,7 +17,7 @@ type AdminBackend interface {
type QueryBackend interface {
CheckMessage(identifier types.Identifier, payloadHash common.Hash) (types.SafetyLevel, error)
CheckMessages(messages []types.Message, minSafety types.SafetyLevel) error
DerivedFrom(ctx context.Context, chainID types.ChainID, derived eth.BlockID) (derivedFrom eth.BlockRef, err error)
CrossDerivedFrom(ctx context.Context, chainID types.ChainID, derived eth.BlockID) (derivedFrom eth.BlockRef, err error)
UnsafeView(ctx context.Context, chainID types.ChainID, unsafe types.ReferenceView) (types.ReferenceView, error)
SafeView(ctx context.Context, chainID types.ChainID, safe types.ReferenceView) (types.ReferenceView, error)
Finalized(ctx context.Context, chainID types.ChainID) (eth.BlockID, error)
......@@ -67,8 +67,8 @@ func (q *QueryFrontend) Finalized(ctx context.Context, chainID types.ChainID) (e
return q.Supervisor.Finalized(ctx, chainID)
}
func (q *QueryFrontend) DerivedFrom(ctx context.Context, chainID types.ChainID, derived eth.BlockID) (derivedFrom eth.BlockRef, err error) {
return q.Supervisor.DerivedFrom(ctx, chainID, derived)
func (q *QueryFrontend) CrossDerivedFrom(ctx context.Context, chainID types.ChainID, derived eth.BlockID) (derivedFrom eth.BlockRef, err error) {
return q.Supervisor.CrossDerivedFrom(ctx, chainID, derived)
}
type AdminFrontend struct {
......
......@@ -72,6 +72,17 @@ func (su *SupervisorService) initBackend(ctx context.Context, cfg *config.Config
su.backend = backend.NewMockBackend()
return nil
}
// the flag is a string slice, which has the potential to have empty strings
filterBlank := func(in []string) []string {
out := make([]string, 0, len(in))
for _, s := range in {
if s != "" {
out = append(out, s)
}
}
return out
}
cfg.L2RPCs = filterBlank(cfg.L2RPCs)
be, err := backend.NewSupervisorBackend(ctx, su.log, su.metrics, cfg)
if err != nil {
return fmt.Errorf("failed to create supervisor backend: %w", err)
......
FROM us-docker.pkg.dev/oplabs-tools-artifacts/images/op-geth:v1.101411.1-rc.3
# Note: depend on dev-release for sequencer interop message checks
RUN apk add --no-cache jq
COPY l2-op-geth-entrypoint.sh /entrypoint.sh
VOLUME ["/db"]
ENTRYPOINT ["/bin/sh", "/entrypoint.sh"]
......@@ -6,6 +6,7 @@
!/op-batcher
!/op-bootnode
!/op-chain-ops
!/op-deployer
!/op-challenger
!/packages/contracts-bedrock/snapshots
!/op-dispute-mon
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment