Commit cb066834 authored by OptimismBot's avatar OptimismBot Committed by GitHub

Merge pull request #7111 from ethereum-optimism/runtime-config-reloading

op-node: implement runtime config reloading
parents db94109b 1752ffc5
...@@ -120,8 +120,9 @@ func DefaultSystemConfig(t *testing.T) SystemConfig { ...@@ -120,8 +120,9 @@ func DefaultSystemConfig(t *testing.T) SystemConfig {
ListenPort: 0, ListenPort: 0,
EnableAdmin: true, EnableAdmin: true,
}, },
L1EpochPollInterval: time.Second * 2, L1EpochPollInterval: time.Second * 2,
ConfigPersistence: &rollupNode.DisabledConfigPersistence{}, RuntimeConfigReloadInterval: time.Minute * 10,
ConfigPersistence: &rollupNode.DisabledConfigPersistence{},
}, },
"verifier": { "verifier": {
Driver: driver.Config{ Driver: driver.Config{
...@@ -129,8 +130,9 @@ func DefaultSystemConfig(t *testing.T) SystemConfig { ...@@ -129,8 +130,9 @@ func DefaultSystemConfig(t *testing.T) SystemConfig {
SequencerConfDepth: 0, SequencerConfDepth: 0,
SequencerEnabled: false, SequencerEnabled: false,
}, },
L1EpochPollInterval: time.Second * 4, L1EpochPollInterval: time.Second * 4,
ConfigPersistence: &rollupNode.DisabledConfigPersistence{}, RuntimeConfigReloadInterval: time.Minute * 10,
ConfigPersistence: &rollupNode.DisabledConfigPersistence{},
}, },
}, },
Loggers: map[string]log.Logger{ Loggers: map[string]log.Logger{
......
...@@ -30,6 +30,7 @@ import ( ...@@ -30,6 +30,7 @@ import (
"github.com/ethereum-optimism/optimism/op-bindings/bindings" "github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys" "github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum-optimism/optimism/op-e2e/config" "github.com/ethereum-optimism/optimism/op-e2e/config"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait"
"github.com/ethereum-optimism/optimism/op-node/client" "github.com/ethereum-optimism/optimism/op-node/client"
"github.com/ethereum-optimism/optimism/op-node/metrics" "github.com/ethereum-optimism/optimism/op-node/metrics"
rollupNode "github.com/ethereum-optimism/optimism/op-node/node" rollupNode "github.com/ethereum-optimism/optimism/op-node/node"
...@@ -1389,3 +1390,47 @@ func TestPendingBlockIsLatest(t *testing.T) { ...@@ -1389,3 +1390,47 @@ func TestPendingBlockIsLatest(t *testing.T) {
t.Fatal("failed to get pending header with same number as latest header") t.Fatal("failed to get pending header with same number as latest header")
}) })
} }
func TestRuntimeConfigReload(t *testing.T) {
InitParallel(t)
cfg := DefaultSystemConfig(t)
// to speed up the test, make it reload the config more often, and do not impose a long conf depth
cfg.Nodes["verifier"].RuntimeConfigReloadInterval = time.Second * 5
cfg.Nodes["verifier"].Driver.VerifierConfDepth = 1
sys, err := cfg.Start(t)
require.Nil(t, err, "Error starting up system")
defer sys.Close()
initialRuntimeConfig := sys.RollupNodes["verifier"].RuntimeConfig()
// close the EL node, since we want to block derivation, to solely rely on the reloading mechanism for updates.
sys.EthInstances["verifier"].Close()
l1 := sys.Clients["l1"]
// Change the system-config via L1
sysCfgContract, err := bindings.NewSystemConfig(cfg.L1Deployments.SystemConfigProxy, l1)
require.NoError(t, err)
newUnsafeBlocksSigner := common.Address{0x12, 0x23, 0x45}
require.NotEqual(t, initialRuntimeConfig.P2PSequencerAddress(), newUnsafeBlocksSigner, "changing to a different address")
opts, err := bind.NewKeyedTransactorWithChainID(cfg.Secrets.SysCfgOwner, cfg.L1ChainIDBig())
require.Nil(t, err)
// the unsafe signer address is part of the runtime config
tx, err := sysCfgContract.SetUnsafeBlockSigner(opts, newUnsafeBlocksSigner)
require.NoError(t, err)
// wait for the change to confirm
_, err = wait.ForReceiptOK(context.Background(), l1, tx.Hash())
require.NoError(t, err)
// wait for the address to change
_, err = retry.Do(context.Background(), 10, retry.Fixed(time.Second*10), func() (struct{}, error) {
v := sys.RollupNodes["verifier"].RuntimeConfig().P2PSequencerAddress()
if v == newUnsafeBlocksSigner {
return struct{}{}, nil
}
return struct{}{}, fmt.Errorf("no change yet, seeing %s but looking for %s", v, newUnsafeBlocksSigner)
})
require.NoError(t, err)
}
...@@ -146,6 +146,13 @@ var ( ...@@ -146,6 +146,13 @@ var (
Required: false, Required: false,
Value: time.Second * 12 * 32, Value: time.Second * 12 * 32,
} }
RuntimeConfigReloadIntervalFlag = &cli.DurationFlag{
Name: "l1.runtime-config-reload-interval",
Usage: "Poll interval for reloading the runtime config, useful when config events are not being picked up. Disabled if 0 or negative.",
EnvVars: prefixEnvVars("L1_RUNTIME_CONFIG_RELOAD_INTERVAL"),
Required: false,
Value: time.Minute * 10,
}
MetricsEnabledFlag = &cli.BoolFlag{ MetricsEnabledFlag = &cli.BoolFlag{
Name: "metrics.enabled", Name: "metrics.enabled",
Usage: "Enable the metrics server", Usage: "Enable the metrics server",
...@@ -261,6 +268,7 @@ var optionalFlags = []cli.Flag{ ...@@ -261,6 +268,7 @@ var optionalFlags = []cli.Flag{
SequencerMaxSafeLagFlag, SequencerMaxSafeLagFlag,
SequencerL1Confs, SequencerL1Confs,
L1EpochPollIntervalFlag, L1EpochPollIntervalFlag,
RuntimeConfigReloadIntervalFlag,
RPCEnableAdmin, RPCEnableAdmin,
RPCAdminPersistence, RPCAdminPersistence,
MetricsEnabledFlag, MetricsEnabledFlag,
......
...@@ -41,6 +41,12 @@ type Config struct { ...@@ -41,6 +41,12 @@ type Config struct {
ConfigPersistence ConfigPersistence ConfigPersistence ConfigPersistence
// RuntimeConfigReloadInterval defines the interval between runtime config reloads.
// Disabled if 0.
// Runtime config changes should be picked up from log-events,
// but if log-events are not coming in (e.g. not syncing blocks) then the reload ensures the config stays accurate.
RuntimeConfigReloadInterval time.Duration
// Optional // Optional
Tracer Tracer Tracer Tracer
Heartbeat HeartbeatConfig Heartbeat HeartbeatConfig
......
...@@ -2,7 +2,6 @@ package node ...@@ -2,7 +2,6 @@ package node
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"time" "time"
...@@ -19,6 +18,7 @@ import ( ...@@ -19,6 +18,7 @@ import (
"github.com/ethereum-optimism/optimism/op-node/rollup/driver" "github.com/ethereum-optimism/optimism/op-node/rollup/driver"
"github.com/ethereum-optimism/optimism/op-node/sources" "github.com/ethereum-optimism/optimism/op-node/sources"
"github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/retry"
) )
type OpNode struct { type OpNode struct {
...@@ -159,27 +159,70 @@ func (n *OpNode) initRuntimeConfig(ctx context.Context, cfg *Config) error { ...@@ -159,27 +159,70 @@ func (n *OpNode) initRuntimeConfig(ctx context.Context, cfg *Config) error {
// attempt to load runtime config, repeat N times // attempt to load runtime config, repeat N times
n.runCfg = NewRuntimeConfig(n.log, n.l1Source, &cfg.Rollup) n.runCfg = NewRuntimeConfig(n.log, n.l1Source, &cfg.Rollup)
for i := 0; i < 5; i++ { confDepth := cfg.Driver.VerifierConfDepth
reload := func(ctx context.Context) (eth.L1BlockRef, error) {
fetchCtx, fetchCancel := context.WithTimeout(ctx, time.Second*10) fetchCtx, fetchCancel := context.WithTimeout(ctx, time.Second*10)
l1Head, err := n.l1Source.L1BlockRefByLabel(fetchCtx, eth.Unsafe) l1Head, err := n.l1Source.L1BlockRefByLabel(fetchCtx, eth.Unsafe)
fetchCancel() fetchCancel()
if err != nil { if err != nil {
n.log.Error("failed to fetch L1 head for runtime config initialization", "err", err) n.log.Error("failed to fetch L1 head for runtime config initialization", "err", err)
continue return eth.L1BlockRef{}, err
}
// Apply confirmation-distance
blNum := l1Head.Number
if blNum >= confDepth {
blNum -= confDepth
}
fetchCtx, fetchCancel = context.WithTimeout(ctx, time.Second*10)
confirmed, err := n.l1Source.L1BlockRefByNumber(fetchCtx, blNum)
fetchCancel()
if err != nil {
n.log.Error("failed to fetch confirmed L1 block for runtime config loading", "err", err, "number", blNum)
return eth.L1BlockRef{}, err
} }
fetchCtx, fetchCancel = context.WithTimeout(ctx, time.Second*10) fetchCtx, fetchCancel = context.WithTimeout(ctx, time.Second*10)
err = n.runCfg.Load(fetchCtx, l1Head) err = n.runCfg.Load(fetchCtx, confirmed)
fetchCancel() fetchCancel()
if err != nil { if err != nil {
n.log.Error("failed to fetch runtime config data", "err", err) n.log.Error("failed to fetch runtime config data", "err", err)
continue return l1Head, err
} }
return l1Head, nil
}
return nil // initialize the runtime config before unblocking
if _, err := retry.Do(ctx, 5, retry.Fixed(time.Second*10), func() (eth.L1BlockRef, error) {
return reload(ctx)
}); err != nil {
return fmt.Errorf("failed to load runtime configuration repeatedly, last error: %w", err)
} }
return errors.New("failed to load runtime configuration repeatedly") // start a background loop, to keep reloading it at the configured reload interval
go func(ctx context.Context, reloadInterval time.Duration) {
if reloadInterval <= 0 {
n.log.Debug("not running runtime-config reloading background loop")
return
}
ticker := time.NewTicker(reloadInterval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
// If the reload fails, we will try again the next interval.
// Missing a runtime-config update is not critical, and we do not want to overwhelm the L1 RPC.
if l1Head, err := reload(ctx); err != nil {
n.log.Warn("failed to reload runtime config", "err", err)
} else {
n.log.Debug("reloaded runtime config", "l1_head", l1Head)
}
case <-ctx.Done():
return
}
}
}(n.resourcesCtx, cfg.RuntimeConfigReloadInterval) // this keeps running after initialization
return nil
} }
func (n *OpNode) initL2(ctx context.Context, cfg *Config, snapshotLog log.Logger) error { func (n *OpNode) initL2(ctx context.Context, cfg *Config, snapshotLog log.Logger) error {
...@@ -397,6 +440,10 @@ func (n *OpNode) P2P() p2p.Node { ...@@ -397,6 +440,10 @@ func (n *OpNode) P2P() p2p.Node {
return n.p2pNode return n.p2pNode
} }
func (n *OpNode) RuntimeConfig() ReadonlyRuntimeConfig {
return n.runCfg
}
// Close closes all resources. // Close closes all resources.
func (n *OpNode) Close() error { func (n *OpNode) Close() error {
var result *multierror.Error var result *multierror.Error
......
...@@ -23,6 +23,10 @@ type RuntimeCfgL1Source interface { ...@@ -23,6 +23,10 @@ type RuntimeCfgL1Source interface {
ReadStorageAt(ctx context.Context, address common.Address, storageSlot common.Hash, blockHash common.Hash) (common.Hash, error) ReadStorageAt(ctx context.Context, address common.Address, storageSlot common.Hash, blockHash common.Hash) (common.Hash, error)
} }
type ReadonlyRuntimeConfig interface {
P2PSequencerAddress() common.Address
}
// RuntimeConfig maintains runtime-configurable options. // RuntimeConfig maintains runtime-configurable options.
// These options are loaded based on initial loading + updates for every subsequent L1 block. // These options are loaded based on initial loading + updates for every subsequent L1 block.
// Only the *latest* values are maintained however, the runtime config has no concept of chain history, // Only the *latest* values are maintained however, the runtime config has no concept of chain history,
......
...@@ -82,9 +82,10 @@ func NewConfig(ctx *cli.Context, log log.Logger) (*node.Config, error) { ...@@ -82,9 +82,10 @@ func NewConfig(ctx *cli.Context, log log.Logger) (*node.Config, error) {
ListenAddr: ctx.String(flags.PprofAddrFlag.Name), ListenAddr: ctx.String(flags.PprofAddrFlag.Name),
ListenPort: ctx.Int(flags.PprofPortFlag.Name), ListenPort: ctx.Int(flags.PprofPortFlag.Name),
}, },
P2P: p2pConfig, P2P: p2pConfig,
P2PSigner: p2pSignerSetup, P2PSigner: p2pSignerSetup,
L1EpochPollInterval: ctx.Duration(flags.L1EpochPollIntervalFlag.Name), L1EpochPollInterval: ctx.Duration(flags.L1EpochPollIntervalFlag.Name),
RuntimeConfigReloadInterval: ctx.Duration(flags.RuntimeConfigReloadIntervalFlag.Name),
Heartbeat: node.HeartbeatConfig{ Heartbeat: node.HeartbeatConfig{
Enabled: ctx.Bool(flags.HeartbeatEnabledFlag.Name), Enabled: ctx.Bool(flags.HeartbeatEnabledFlag.Name),
Moniker: ctx.String(flags.HeartbeatMonikerFlag.Name), Moniker: ctx.String(flags.HeartbeatMonikerFlag.Name),
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment