Commit cb453762 authored by protolambda's avatar protolambda

op-node: shutdown cleanup review fixes

parent ccfb3119
...@@ -135,12 +135,8 @@ func (n *OpNode) init(ctx context.Context, cfg *Config, snapshotLog log.Logger) ...@@ -135,12 +135,8 @@ func (n *OpNode) init(ctx context.Context, cfg *Config, snapshotLog log.Logger)
} }
n.metrics.RecordInfo(n.appVersion) n.metrics.RecordInfo(n.appVersion)
n.metrics.RecordUp() n.metrics.RecordUp()
if err := n.initHeartbeat(ctx, cfg); err != nil { n.initHeartbeat(cfg)
return fmt.Errorf("failed to init the heartbeat service: %w", err) n.initPProf(cfg)
}
if err := n.initPProf(ctx, cfg); err != nil {
return fmt.Errorf("failed to init pprof server: %w", err)
}
return nil return nil
} }
...@@ -257,18 +253,20 @@ func (n *OpNode) initRuntimeConfig(ctx context.Context, cfg *Config) error { ...@@ -257,18 +253,20 @@ func (n *OpNode) initRuntimeConfig(ctx context.Context, cfg *Config) error {
// If the reload fails, we will try again the next interval. // If the reload fails, we will try again the next interval.
// Missing a runtime-config update is not critical, and we do not want to overwhelm the L1 RPC. // Missing a runtime-config update is not critical, and we do not want to overwhelm the L1 RPC.
l1Head, err := reload(ctx) l1Head, err := reload(ctx)
switch err { if err != nil {
case errNodeHalt, nil: if errors.Is(err, errNodeHalt) {
n.log.Debug("reloaded runtime config", "l1_head", l1Head)
if err == errNodeHalt {
n.halted = true n.halted = true
if n.cancel != nil { if n.cancel != nil { // node cancellation is always available when started as CLI app
n.cancel(errNodeHalt) n.cancel(errNodeHalt)
return
} else {
n.log.Debug("opted to halt, but cannot halt node", "l1_head", l1Head)
} }
return } else {
n.log.Warn("failed to reload runtime config", "err", err)
} }
default: } else {
n.log.Warn("failed to reload runtime config", "err", err) n.log.Debug("reloaded runtime config", "l1_head", l1Head)
} }
case <-ctx.Done(): case <-ctx.Done():
return return
...@@ -357,9 +355,9 @@ func (n *OpNode) initMetricsServer(ctx context.Context, cfg *Config) error { ...@@ -357,9 +355,9 @@ func (n *OpNode) initMetricsServer(ctx context.Context, cfg *Config) error {
return nil return nil
} }
func (n *OpNode) initHeartbeat(_ context.Context, cfg *Config) error { func (n *OpNode) initHeartbeat(cfg *Config) {
if !cfg.Heartbeat.Enabled { if !cfg.Heartbeat.Enabled {
return nil return
} }
var peerID string var peerID string
if cfg.P2P.Disabled() { if cfg.P2P.Disabled() {
...@@ -381,12 +379,11 @@ func (n *OpNode) initHeartbeat(_ context.Context, cfg *Config) error { ...@@ -381,12 +379,11 @@ func (n *OpNode) initHeartbeat(_ context.Context, cfg *Config) error {
log.Error("heartbeat goroutine crashed", "err", err) log.Error("heartbeat goroutine crashed", "err", err)
} }
}(cfg.Heartbeat.URL) }(cfg.Heartbeat.URL)
return nil
} }
func (n *OpNode) initPProf(_ context.Context, cfg *Config) error { func (n *OpNode) initPProf(cfg *Config) {
if !cfg.Pprof.Enabled { if !cfg.Pprof.Enabled {
return nil return
} }
log.Info("pprof server started", "addr", net.JoinHostPort(cfg.Pprof.ListenAddr, strconv.Itoa(cfg.Pprof.ListenPort))) log.Info("pprof server started", "addr", net.JoinHostPort(cfg.Pprof.ListenAddr, strconv.Itoa(cfg.Pprof.ListenPort)))
go func(listenAddr string, listenPort int) { go func(listenAddr string, listenPort int) {
...@@ -394,7 +391,6 @@ func (n *OpNode) initPProf(_ context.Context, cfg *Config) error { ...@@ -394,7 +391,6 @@ func (n *OpNode) initPProf(_ context.Context, cfg *Config) error {
log.Error("error starting pprof", "err", err) log.Error("error starting pprof", "err", err)
} }
}(cfg.Pprof.ListenAddr, cfg.Pprof.ListenPort) }(cfg.Pprof.ListenAddr, cfg.Pprof.ListenPort)
return nil
} }
func (n *OpNode) initP2P(ctx context.Context, cfg *Config) error { func (n *OpNode) initP2P(ctx context.Context, cfg *Config) error {
......
...@@ -383,9 +383,9 @@ type publisher struct { ...@@ -383,9 +383,9 @@ type publisher struct {
log log.Logger log log.Logger
cfg *rollup.Config cfg *rollup.Config
// p2pCtx is used for downstream message-handling resources, // p2pCancel cancels the downstream gossip event-handling functions, independent of the sources.
// these can be cancelled without blocking. // A closed gossip event source (event handler or subscription) does not stop any open event iteration,
p2pCtx context.Context // thus we have to stop it ourselves this way.
p2pCancel context.CancelFunc p2pCancel context.CancelFunc
// blocks topic, main handle on block gossip // blocks topic, main handle on block gossip
...@@ -475,7 +475,6 @@ func JoinGossip(self peer.ID, ps *pubsub.PubSub, log log.Logger, cfg *rollup.Con ...@@ -475,7 +475,6 @@ func JoinGossip(self peer.ID, ps *pubsub.PubSub, log log.Logger, cfg *rollup.Con
blocksTopic: blocksTopic, blocksTopic: blocksTopic,
blocksEvents: blocksTopicEvents, blocksEvents: blocksTopicEvents,
blocksSub: subscription, blocksSub: subscription,
p2pCtx: p2pCtx,
p2pCancel: p2pCancel, p2pCancel: p2pCancel,
runCfg: runCfg, runCfg: runCfg,
}, nil }, nil
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment