Commit 2db03c7f authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge branch 'develop' into aj/fpvm-status-e2e

parents 023b3d5e cb066834
......@@ -121,6 +121,7 @@ func DefaultSystemConfig(t *testing.T) SystemConfig {
EnableAdmin: true,
},
L1EpochPollInterval: time.Second * 2,
RuntimeConfigReloadInterval: time.Minute * 10,
ConfigPersistence: &rollupNode.DisabledConfigPersistence{},
},
"verifier": {
......@@ -130,6 +131,7 @@ func DefaultSystemConfig(t *testing.T) SystemConfig {
SequencerEnabled: false,
},
L1EpochPollInterval: time.Second * 4,
RuntimeConfigReloadInterval: time.Minute * 10,
ConfigPersistence: &rollupNode.DisabledConfigPersistence{},
},
},
......
......@@ -30,6 +30,7 @@ import (
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum-optimism/optimism/op-e2e/config"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait"
"github.com/ethereum-optimism/optimism/op-node/client"
"github.com/ethereum-optimism/optimism/op-node/metrics"
rollupNode "github.com/ethereum-optimism/optimism/op-node/node"
......@@ -1389,3 +1390,47 @@ func TestPendingBlockIsLatest(t *testing.T) {
t.Fatal("failed to get pending header with same number as latest header")
})
}
func TestRuntimeConfigReload(t *testing.T) {
InitParallel(t)
cfg := DefaultSystemConfig(t)
// to speed up the test, make it reload the config more often, and do not impose a long conf depth
cfg.Nodes["verifier"].RuntimeConfigReloadInterval = time.Second * 5
cfg.Nodes["verifier"].Driver.VerifierConfDepth = 1
sys, err := cfg.Start(t)
require.Nil(t, err, "Error starting up system")
defer sys.Close()
initialRuntimeConfig := sys.RollupNodes["verifier"].RuntimeConfig()
// close the EL node, since we want to block derivation, to solely rely on the reloading mechanism for updates.
sys.EthInstances["verifier"].Close()
l1 := sys.Clients["l1"]
// Change the system-config via L1
sysCfgContract, err := bindings.NewSystemConfig(cfg.L1Deployments.SystemConfigProxy, l1)
require.NoError(t, err)
newUnsafeBlocksSigner := common.Address{0x12, 0x23, 0x45}
require.NotEqual(t, initialRuntimeConfig.P2PSequencerAddress(), newUnsafeBlocksSigner, "changing to a different address")
opts, err := bind.NewKeyedTransactorWithChainID(cfg.Secrets.SysCfgOwner, cfg.L1ChainIDBig())
require.Nil(t, err)
// the unsafe signer address is part of the runtime config
tx, err := sysCfgContract.SetUnsafeBlockSigner(opts, newUnsafeBlocksSigner)
require.NoError(t, err)
// wait for the change to confirm
_, err = wait.ForReceiptOK(context.Background(), l1, tx.Hash())
require.NoError(t, err)
// wait for the address to change
_, err = retry.Do(context.Background(), 10, retry.Fixed(time.Second*10), func() (struct{}, error) {
v := sys.RollupNodes["verifier"].RuntimeConfig().P2PSequencerAddress()
if v == newUnsafeBlocksSigner {
return struct{}{}, nil
}
return struct{}{}, fmt.Errorf("no change yet, seeing %s but looking for %s", v, newUnsafeBlocksSigner)
})
require.NoError(t, err)
}
......@@ -146,6 +146,13 @@ var (
Required: false,
Value: time.Second * 12 * 32,
}
RuntimeConfigReloadIntervalFlag = &cli.DurationFlag{
Name: "l1.runtime-config-reload-interval",
Usage: "Poll interval for reloading the runtime config, useful when config events are not being picked up. Disabled if 0 or negative.",
EnvVars: prefixEnvVars("L1_RUNTIME_CONFIG_RELOAD_INTERVAL"),
Required: false,
Value: time.Minute * 10,
}
MetricsEnabledFlag = &cli.BoolFlag{
Name: "metrics.enabled",
Usage: "Enable the metrics server",
......@@ -261,6 +268,7 @@ var optionalFlags = []cli.Flag{
SequencerMaxSafeLagFlag,
SequencerL1Confs,
L1EpochPollIntervalFlag,
RuntimeConfigReloadIntervalFlag,
RPCEnableAdmin,
RPCAdminPersistence,
MetricsEnabledFlag,
......
......@@ -41,6 +41,12 @@ type Config struct {
ConfigPersistence ConfigPersistence
// RuntimeConfigReloadInterval defines the interval between runtime config reloads.
// Disabled if 0.
// Runtime config changes should be picked up from log-events,
// but if log-events are not coming in (e.g. not syncing blocks) then the reload ensures the config stays accurate.
RuntimeConfigReloadInterval time.Duration
// Optional
Tracer Tracer
Heartbeat HeartbeatConfig
......
......@@ -2,7 +2,6 @@ package node
import (
"context"
"errors"
"fmt"
"time"
......@@ -19,6 +18,7 @@ import (
"github.com/ethereum-optimism/optimism/op-node/rollup/driver"
"github.com/ethereum-optimism/optimism/op-node/sources"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/retry"
)
type OpNode struct {
......@@ -159,27 +159,70 @@ func (n *OpNode) initRuntimeConfig(ctx context.Context, cfg *Config) error {
// attempt to load runtime config, repeat N times
n.runCfg = NewRuntimeConfig(n.log, n.l1Source, &cfg.Rollup)
for i := 0; i < 5; i++ {
confDepth := cfg.Driver.VerifierConfDepth
reload := func(ctx context.Context) (eth.L1BlockRef, error) {
fetchCtx, fetchCancel := context.WithTimeout(ctx, time.Second*10)
l1Head, err := n.l1Source.L1BlockRefByLabel(fetchCtx, eth.Unsafe)
fetchCancel()
if err != nil {
n.log.Error("failed to fetch L1 head for runtime config initialization", "err", err)
continue
return eth.L1BlockRef{}, err
}
// Apply confirmation-distance
blNum := l1Head.Number
if blNum >= confDepth {
blNum -= confDepth
}
fetchCtx, fetchCancel = context.WithTimeout(ctx, time.Second*10)
err = n.runCfg.Load(fetchCtx, l1Head)
confirmed, err := n.l1Source.L1BlockRefByNumber(fetchCtx, blNum)
fetchCancel()
if err != nil {
n.log.Error("failed to fetch confirmed L1 block for runtime config loading", "err", err, "number", blNum)
return eth.L1BlockRef{}, err
}
fetchCtx, fetchCancel = context.WithTimeout(ctx, time.Second*10)
err = n.runCfg.Load(fetchCtx, confirmed)
fetchCancel()
if err != nil {
n.log.Error("failed to fetch runtime config data", "err", err)
continue
return l1Head, err
}
return l1Head, nil
}
return nil
// initialize the runtime config before unblocking
if _, err := retry.Do(ctx, 5, retry.Fixed(time.Second*10), func() (eth.L1BlockRef, error) {
return reload(ctx)
}); err != nil {
return fmt.Errorf("failed to load runtime configuration repeatedly, last error: %w", err)
}
return errors.New("failed to load runtime configuration repeatedly")
// start a background loop, to keep reloading it at the configured reload interval
go func(ctx context.Context, reloadInterval time.Duration) {
if reloadInterval <= 0 {
n.log.Debug("not running runtime-config reloading background loop")
return
}
ticker := time.NewTicker(reloadInterval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
// If the reload fails, we will try again the next interval.
// Missing a runtime-config update is not critical, and we do not want to overwhelm the L1 RPC.
if l1Head, err := reload(ctx); err != nil {
n.log.Warn("failed to reload runtime config", "err", err)
} else {
n.log.Debug("reloaded runtime config", "l1_head", l1Head)
}
case <-ctx.Done():
return
}
}
}(n.resourcesCtx, cfg.RuntimeConfigReloadInterval) // this keeps running after initialization
return nil
}
func (n *OpNode) initL2(ctx context.Context, cfg *Config, snapshotLog log.Logger) error {
......@@ -397,6 +440,10 @@ func (n *OpNode) P2P() p2p.Node {
return n.p2pNode
}
func (n *OpNode) RuntimeConfig() ReadonlyRuntimeConfig {
return n.runCfg
}
// Close closes all resources.
func (n *OpNode) Close() error {
var result *multierror.Error
......
......@@ -23,6 +23,10 @@ type RuntimeCfgL1Source interface {
ReadStorageAt(ctx context.Context, address common.Address, storageSlot common.Hash, blockHash common.Hash) (common.Hash, error)
}
type ReadonlyRuntimeConfig interface {
P2PSequencerAddress() common.Address
}
// RuntimeConfig maintains runtime-configurable options.
// These options are loaded based on initial loading + updates for every subsequent L1 block.
// Only the *latest* values are maintained however, the runtime config has no concept of chain history,
......
......@@ -85,6 +85,7 @@ func NewConfig(ctx *cli.Context, log log.Logger) (*node.Config, error) {
P2P: p2pConfig,
P2PSigner: p2pSignerSetup,
L1EpochPollInterval: ctx.Duration(flags.L1EpochPollIntervalFlag.Name),
RuntimeConfigReloadInterval: ctx.Duration(flags.RuntimeConfigReloadIntervalFlag.Name),
Heartbeat: node.HeartbeatConfig{
Enabled: ctx.Bool(flags.HeartbeatEnabledFlag.Name),
Moniker: ctx.String(flags.HeartbeatMonikerFlag.Name),
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment