metrics.go 26.6 KB
Newer Older
1
// Package metrics provides a set of metrics for the op-node.
2 3 4 5 6 7 8 9
package metrics

import (
	"context"
	"errors"
	"fmt"
	"net"
	"strconv"
10
	"time"
11

12 13
	"github.com/ethereum/go-ethereum/params"

14
	"github.com/ethereum-optimism/optimism/op-node/p2p/store"
Flocqst's avatar
Flocqst committed
15
	ophttp "github.com/ethereum-optimism/optimism/op-service/httputil"
16 17
	"github.com/ethereum-optimism/optimism/op-service/metrics"

18
	pb "github.com/libp2p/go-libp2p-pubsub/pb"
19
	libp2pmetrics "github.com/libp2p/go-libp2p/core/metrics"
20 21 22
	"github.com/prometheus/client_golang/prometheus"
	"github.com/prometheus/client_golang/prometheus/collectors"
	"github.com/prometheus/client_golang/prometheus/promhttp"
23

24
	"github.com/ethereum/go-ethereum"
25
	"github.com/ethereum/go-ethereum/common"
26
	"github.com/ethereum/go-ethereum/rpc"
27

28
	"github.com/ethereum-optimism/optimism/op-service/eth"
29 30 31 32 33 34 35 36 37 38 39
)

const (
	Namespace = "op_node"

	RPCServerSubsystem = "rpc_server"
	RPCClientSubsystem = "rpc_client"

	BatchMethod = "<batch>"
)

40 41 42 43 44 45 46 47 48 49 50 51
type Metricer interface {
	RecordInfo(version string)
	RecordUp()
	RecordRPCServerRequest(method string) func()
	RecordRPCClientRequest(method string) func(err error)
	RecordRPCClientResponse(method string, err error)
	SetDerivationIdle(status bool)
	RecordPipelineReset()
	RecordSequencingError()
	RecordPublishingError()
	RecordDerivationError()
	RecordReceivedUnsafePayload(payload *eth.ExecutionPayload)
52
	RecordRef(layer string, name string, num uint64, timestamp uint64, h common.Hash)
53 54 55 56 57
	RecordL1Ref(name string, ref eth.L1BlockRef)
	RecordL2Ref(name string, ref eth.L2BlockRef)
	RecordUnsafePayloadsBuffer(length uint64, memSize uint64, next eth.BlockID)
	CountSequencedTxs(count int)
	RecordL1ReorgDepth(d uint64)
58 59
	RecordSequencerInconsistentL1Origin(from eth.BlockID, to eth.BlockID)
	RecordSequencerReset()
60 61 62 63 64 65
	RecordGossipEvent(evType int32)
	IncPeerCount()
	DecPeerCount()
	IncStreamCount()
	DecStreamCount()
	RecordBandwidth(ctx context.Context, bwc *libp2pmetrics.BandwidthCounter)
66 67
	RecordSequencerBuildingDiffTime(duration time.Duration)
	RecordSequencerSealingTime(duration time.Duration)
68
	Document() []metrics.DocumentedMetric
69
	RecordChannelInputBytes(num int)
70 71 72
	RecordHeadChannelOpened()
	RecordChannelTimedOut()
	RecordFrame()
73
	// P2P Metrics
74
	SetPeerScores(allScores []store.PeerScores)
75 76 77
	ClientPayloadByNumberEvent(num uint64, resultCode byte, duration time.Duration)
	ServerPayloadByNumberEvent(num uint64, resultCode byte, duration time.Duration)
	PayloadsQuarantineSize(n int)
78 79 80 81
	RecordPeerUnban()
	RecordIPUnban()
	RecordDial(allow bool)
	RecordAccept(allow bool)
82
	ReportProtocolVersions(local, engine, recommended, required params.ProtocolVersion)
83 84
}

85
// Metrics tracks all the metrics for the op-node.
86
type Metrics struct {
87 88 89
	Info *prometheus.GaugeVec
	Up   prometheus.Gauge

90 91 92 93 94 95
	RPCServerRequestsTotal          *prometheus.CounterVec
	RPCServerRequestDurationSeconds *prometheus.HistogramVec
	RPCClientRequestsTotal          *prometheus.CounterVec
	RPCClientRequestDurationSeconds *prometheus.HistogramVec
	RPCClientResponsesTotal         *prometheus.CounterVec

96
	L1SourceCache *CacheMetrics
97
	L2SourceCache *CacheMetrics
98

99 100
	DerivationIdle prometheus.Gauge

101 102 103 104 105
	PipelineResets   *metrics.Event
	UnsafePayloads   *metrics.Event
	DerivationErrors *metrics.Event
	SequencingErrors *metrics.Event
	PublishingErrors *metrics.Event
106

107 108 109 110 111 112
	P2PReqDurationSeconds *prometheus.HistogramVec
	P2PReqTotal           *prometheus.CounterVec
	P2PPayloadByNumber    *prometheus.GaugeVec

	PayloadsQuarantineTotal prometheus.Gauge

113 114
	SequencerInconsistentL1Origin *metrics.Event
	SequencerResets               *metrics.Event
115

116 117
	L1RequestDurationSeconds *prometheus.HistogramVec

118 119 120 121 122 123
	SequencerBuildingDiffDurationSeconds prometheus.Histogram
	SequencerBuildingDiffTotal           prometheus.Counter

	SequencerSealingDurationSeconds prometheus.Histogram
	SequencerSealingTotal           prometheus.Counter

124 125 126
	UnsafePayloadsBufferLen     prometheus.Gauge
	UnsafePayloadsBufferMemSize prometheus.Gauge

127
	metrics.RefMetrics
128 129

	L1ReorgDepth prometheus.Histogram
130

131 132
	TransactionsSequencedTotal prometheus.Counter

133
	// Channel Bank Metrics
134 135 136
	headChannelOpenedEvent *metrics.Event
	channelTimedOutEvent   *metrics.Event
	frameAddedEvent        *metrics.Event
137

138
	// P2P Metrics
139 140 141 142 143 144 145 146 147
	PeerCount         prometheus.Gauge
	StreamCount       prometheus.Gauge
	GossipEventsTotal *prometheus.CounterVec
	BandwidthTotal    *prometheus.GaugeVec
	PeerUnbans        prometheus.Counter
	IPUnbans          prometheus.Counter
	Dials             *prometheus.CounterVec
	Accepts           *prometheus.CounterVec
	PeerScores        *prometheus.HistogramVec
148

149
	ChannelInputBytes prometheus.Counter
150

151 152 153 154 155 156
	// Protocol version reporting
	// Delta = params.ProtocolVersionComparison
	ProtocolVersionDelta *prometheus.GaugeVec
	// ProtocolVersions is pseudo-metric to report the exact protocol version info
	ProtocolVersions *prometheus.GaugeVec

157
	registry *prometheus.Registry
158
	factory  metrics.Factory
159 160
}

161 162
var _ Metricer = (*Metrics)(nil)

Andreas Bigger's avatar
Andreas Bigger committed
163
// NewMetrics creates a new [Metrics] instance with the given process name.
164 165 166 167 168 169 170 171 172
func NewMetrics(procName string) *Metrics {
	if procName == "" {
		procName = "default"
	}
	ns := Namespace + "_" + procName

	registry := prometheus.NewRegistry()
	registry.MustRegister(collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}))
	registry.MustRegister(collectors.NewGoCollector())
173
	factory := metrics.With(registry)
174

175
	return &Metrics{
176
		Info: factory.NewGaugeVec(prometheus.GaugeOpts{
177 178 179 180 181 182
			Namespace: ns,
			Name:      "info",
			Help:      "Pseudo-metric tracking version and config info",
		}, []string{
			"version",
		}),
183
		Up: factory.NewGauge(prometheus.GaugeOpts{
184 185 186 187
			Namespace: ns,
			Name:      "up",
			Help:      "1 if the op node has finished starting up",
		}),
188

189
		RPCServerRequestsTotal: factory.NewCounterVec(prometheus.CounterOpts{
190 191 192 193 194 195 196
			Namespace: ns,
			Subsystem: RPCServerSubsystem,
			Name:      "requests_total",
			Help:      "Total requests to the RPC server",
		}, []string{
			"method",
		}),
197
		RPCServerRequestDurationSeconds: factory.NewHistogramVec(prometheus.HistogramOpts{
198 199 200 201 202 203 204 205
			Namespace: ns,
			Subsystem: RPCServerSubsystem,
			Name:      "request_duration_seconds",
			Buckets:   []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10},
			Help:      "Histogram of RPC server request durations",
		}, []string{
			"method",
		}),
206
		RPCClientRequestsTotal: factory.NewCounterVec(prometheus.CounterOpts{
207 208 209 210 211 212 213
			Namespace: ns,
			Subsystem: RPCClientSubsystem,
			Name:      "requests_total",
			Help:      "Total RPC requests initiated by the opnode's RPC client",
		}, []string{
			"method",
		}),
214
		RPCClientRequestDurationSeconds: factory.NewHistogramVec(prometheus.HistogramOpts{
215 216 217 218 219 220 221 222
			Namespace: ns,
			Subsystem: RPCClientSubsystem,
			Name:      "request_duration_seconds",
			Buckets:   []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10},
			Help:      "Histogram of RPC client request durations",
		}, []string{
			"method",
		}),
223
		RPCClientResponsesTotal: factory.NewCounterVec(prometheus.CounterOpts{
224 225 226 227 228 229 230 231
			Namespace: ns,
			Subsystem: RPCClientSubsystem,
			Name:      "responses_total",
			Help:      "Total RPC request responses received by the opnode's RPC client",
		}, []string{
			"method",
			"error",
		}),
232

233 234
		L1SourceCache: NewCacheMetrics(factory, ns, "l1_source_cache", "L1 Source cache"),
		L2SourceCache: NewCacheMetrics(factory, ns, "l2_source_cache", "L2 Source cache"),
235

236
		DerivationIdle: factory.NewGauge(prometheus.GaugeOpts{
237 238 239 240
			Namespace: ns,
			Name:      "derivation_idle",
			Help:      "1 if the derivation pipeline is idle",
		}),
241

242 243 244 245 246
		PipelineResets:   metrics.NewEvent(factory, ns, "", "pipeline_resets", "derivation pipeline resets"),
		UnsafePayloads:   metrics.NewEvent(factory, ns, "", "unsafe_payloads", "unsafe payloads"),
		DerivationErrors: metrics.NewEvent(factory, ns, "", "derivation_errors", "derivation errors"),
		SequencingErrors: metrics.NewEvent(factory, ns, "", "sequencing_errors", "sequencing errors"),
		PublishingErrors: metrics.NewEvent(factory, ns, "", "publishing_errors", "p2p publishing errors"),
247

248 249
		SequencerInconsistentL1Origin: metrics.NewEvent(factory, ns, "", "sequencer_inconsistent_l1_origin", "events when the sequencer selects an inconsistent L1 origin"),
		SequencerResets:               metrics.NewEvent(factory, ns, "", "sequencer_resets", "sequencer resets"),
250

251
		UnsafePayloadsBufferLen: factory.NewGauge(prometheus.GaugeOpts{
252 253 254 255
			Namespace: ns,
			Name:      "unsafe_payloads_buffer_len",
			Help:      "Number of buffered L2 unsafe payloads",
		}),
256
		UnsafePayloadsBufferMemSize: factory.NewGauge(prometheus.GaugeOpts{
257 258 259 260 261
			Namespace: ns,
			Name:      "unsafe_payloads_buffer_mem_size",
			Help:      "Total estimated memory size of buffered L2 unsafe payloads",
		}),

262
		RefMetrics: metrics.MakeRefMetrics(ns, factory),
263

264
		L1ReorgDepth: factory.NewHistogram(prometheus.HistogramOpts{
265 266 267 268 269
			Namespace: ns,
			Name:      "l1_reorg_depth",
			Buckets:   []float64{0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5, 10.5, 20.5, 50.5, 100.5},
			Help:      "Histogram of L1 Reorg Depths",
		}),
270

271
		TransactionsSequencedTotal: factory.NewGauge(prometheus.GaugeOpts{
272 273 274 275 276
			Namespace: ns,
			Name:      "transactions_sequenced_total",
			Help:      "Count of total transactions sequenced",
		}),

277
		PeerCount: factory.NewGauge(prometheus.GaugeOpts{
278 279 280 281 282
			Namespace: ns,
			Subsystem: "p2p",
			Name:      "peer_count",
			Help:      "Count of currently connected p2p peers",
		}),
283
		PeerScores: factory.NewHistogramVec(prometheus.HistogramOpts{
asnared's avatar
asnared committed
284 285
			Namespace: ns,
			Name:      "peer_scores",
286 287 288
			Help:      "Histogram of currrently connected peer scores",
			Buckets:   []float64{-100, -40, -20, -10, -5, -2, -1, -0.5, -0.05, 0, 0.05, 0.5, 1, 2, 5, 10, 20, 40},
		}, []string{"type"}),
289 290 291 292 293
		StreamCount: factory.NewGauge(prometheus.GaugeOpts{
			Namespace: ns,
			Subsystem: "p2p",
			Name:      "stream_count",
			Help:      "Count of currently connected p2p streams",
asnared's avatar
asnared committed
294
		}),
295
		GossipEventsTotal: factory.NewCounterVec(prometheus.CounterOpts{
296 297 298 299 300 301 302
			Namespace: ns,
			Subsystem: "p2p",
			Name:      "gossip_events_total",
			Help:      "Count of gossip events by type",
		}, []string{
			"type",
		}),
303
		BandwidthTotal: factory.NewGaugeVec(prometheus.GaugeOpts{
304 305 306 307 308 309 310
			Namespace: ns,
			Subsystem: "p2p",
			Name:      "bandwidth_bytes_total",
			Help:      "P2P bandwidth by direction",
		}, []string{
			"direction",
		}),
311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334
		PeerUnbans: factory.NewCounter(prometheus.CounterOpts{
			Namespace: ns,
			Subsystem: "p2p",
			Name:      "peer_unbans",
			Help:      "Count of peer unbans",
		}),
		IPUnbans: factory.NewCounter(prometheus.CounterOpts{
			Namespace: ns,
			Subsystem: "p2p",
			Name:      "ip_unbans",
			Help:      "Count of IP unbans",
		}),
		Dials: factory.NewCounterVec(prometheus.CounterOpts{
			Namespace: ns,
			Subsystem: "p2p",
			Name:      "dials",
			Help:      "Count of outgoing dial attempts, with label to filter to allowed attempts",
		}, []string{"allow"}),
		Accepts: factory.NewCounterVec(prometheus.CounterOpts{
			Namespace: ns,
			Subsystem: "p2p",
			Name:      "accepts",
			Help:      "Count of incoming dial attempts to accept, with label to filter to allowed attempts",
		}, []string{"allow"}),
335

336 337 338
		headChannelOpenedEvent: metrics.NewEvent(factory, ns, "", "head_channel", "New channel at the front of the channel bank"),
		channelTimedOutEvent:   metrics.NewEvent(factory, ns, "", "channel_timeout", "Channel has timed out"),
		frameAddedEvent:        metrics.NewEvent(factory, ns, "", "frame_added", "New frame ingested in the channel bank"),
339

340
		ChannelInputBytes: factory.NewCounter(prometheus.CounterOpts{
341 342 343 344 345
			Namespace: ns,
			Name:      "channel_input_bytes",
			Help:      "Number of compressed bytes added to the channel",
		}),

346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383
		P2PReqDurationSeconds: factory.NewHistogramVec(prometheus.HistogramOpts{
			Namespace: ns,
			Subsystem: "p2p",
			Name:      "req_duration_seconds",
			Buckets:   []float64{},
			Help:      "Duration of P2P requests",
		}, []string{
			"p2p_role", // "client" or "server"
			"p2p_method",
			"result_code",
		}),

		P2PReqTotal: factory.NewCounterVec(prometheus.CounterOpts{
			Namespace: ns,
			Subsystem: "p2p",
			Name:      "req_total",
			Help:      "Number of P2P requests",
		}, []string{
			"p2p_role", // "client" or "server"
			"p2p_method",
			"result_code",
		}),

		P2PPayloadByNumber: factory.NewGaugeVec(prometheus.GaugeOpts{
			Namespace: ns,
			Subsystem: "p2p",
			Name:      "payload_by_number",
			Help:      "Payload by number requests",
		}, []string{
			"p2p_role", // "client" or "server"
		}),
		PayloadsQuarantineTotal: factory.NewGauge(prometheus.GaugeOpts{
			Namespace: ns,
			Subsystem: "p2p",
			Name:      "payloads_quarantine_total",
			Help:      "number of unverified execution payloads buffered in quarantine",
		}),

384 385 386 387 388 389 390 391
		L1RequestDurationSeconds: factory.NewHistogramVec(prometheus.HistogramOpts{
			Namespace: ns,
			Name:      "l1_request_seconds",
			Buckets: []float64{
				.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10},
			Help: "Histogram of L1 request time",
		}, []string{"request"}),

392
		SequencerBuildingDiffDurationSeconds: factory.NewHistogram(prometheus.HistogramOpts{
393 394 395 396 397 398 399
			Namespace: ns,
			Name:      "sequencer_building_diff_seconds",
			Buckets: []float64{
				-10, -5, -2.5, -1, -.5, -.25, -.1, -0.05, -0.025, -0.01, -0.005,
				.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10},
			Help: "Histogram of Sequencer building time, minus block time",
		}),
400
		SequencerBuildingDiffTotal: factory.NewCounter(prometheus.CounterOpts{
401 402 403 404
			Namespace: ns,
			Name:      "sequencer_building_diff_total",
			Help:      "Number of sequencer block building jobs",
		}),
405
		SequencerSealingDurationSeconds: factory.NewHistogram(prometheus.HistogramOpts{
406 407 408 409 410
			Namespace: ns,
			Name:      "sequencer_sealing_seconds",
			Buckets:   []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10},
			Help:      "Histogram of Sequencer block sealing time",
		}),
411
		SequencerSealingTotal: factory.NewCounter(prometheus.CounterOpts{
412 413 414 415 416
			Namespace: ns,
			Name:      "sequencer_sealing_total",
			Help:      "Number of sequencer block sealing jobs",
		}),

417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434
		ProtocolVersionDelta: factory.NewGaugeVec(prometheus.GaugeOpts{
			Namespace: ns,
			Name:      "protocol_version_delta",
			Help:      "Difference between local and global protocol version, and execution-engine, per type of version",
		}, []string{
			"type",
		}),
		ProtocolVersions: factory.NewGaugeVec(prometheus.GaugeOpts{
			Namespace: ns,
			Name:      "protocol_versions",
			Help:      "Pseudo-metric tracking recommended and required protocol version info",
		}, []string{
			"local",
			"engine",
			"recommended",
			"required",
		}),

435
		registry: registry,
436
		factory:  factory,
437 438 439
	}
}

440 441 442
// SetPeerScores updates the peer score metrics.
// Accepts a slice of peer scores in any order.
func (m *Metrics) SetPeerScores(allScores []store.PeerScores) {
443
	for _, scores := range allScores {
444 445 446 447 448 449 450
		m.PeerScores.WithLabelValues("total").Observe(scores.Gossip.Total)
		m.PeerScores.WithLabelValues("ipColocation").Observe(scores.Gossip.IPColocationFactor)
		m.PeerScores.WithLabelValues("behavioralPenalty").Observe(scores.Gossip.BehavioralPenalty)
		m.PeerScores.WithLabelValues("blocksFirstMessage").Observe(scores.Gossip.Blocks.FirstMessageDeliveries)
		m.PeerScores.WithLabelValues("blocksTimeInMesh").Observe(scores.Gossip.Blocks.TimeInMesh)
		m.PeerScores.WithLabelValues("blocksMessageDeliveries").Observe(scores.Gossip.Blocks.MeshMessageDeliveries)
		m.PeerScores.WithLabelValues("blocksInvalidMessageDeliveries").Observe(scores.Gossip.Blocks.InvalidMessageDeliveries)
451 452 453 454

		m.PeerScores.WithLabelValues("reqRespValidResponses").Observe(scores.ReqResp.ValidResponses)
		m.PeerScores.WithLabelValues("reqRespErrorResponses").Observe(scores.ReqResp.ErrorResponses)
		m.PeerScores.WithLabelValues("reqRespRejectedPayloads").Observe(scores.ReqResp.RejectedPayloads)
455 456 457
	}
}

458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516
// RecordInfo sets a pseudo-metric that contains versioning and
// config info for the opnode.
func (m *Metrics) RecordInfo(version string) {
	m.Info.WithLabelValues(version).Set(1)
}

// RecordUp sets the up metric to 1.
func (m *Metrics) RecordUp() {
	prometheus.MustRegister()
	m.Up.Set(1)
}

// RecordRPCServerRequest is a helper method to record an incoming RPC
// call to the opnode's RPC server. It bumps the requests metric,
// and tracks how long it takes to serve a response.
func (m *Metrics) RecordRPCServerRequest(method string) func() {
	m.RPCServerRequestsTotal.WithLabelValues(method).Inc()
	timer := prometheus.NewTimer(m.RPCServerRequestDurationSeconds.WithLabelValues(method))
	return func() {
		timer.ObserveDuration()
	}
}

// RecordRPCClientRequest is a helper method to record an RPC client
// request. It bumps the requests metric, tracks the response
// duration, and records the response's error code.
func (m *Metrics) RecordRPCClientRequest(method string) func(err error) {
	m.RPCClientRequestsTotal.WithLabelValues(method).Inc()
	timer := prometheus.NewTimer(m.RPCClientRequestDurationSeconds.WithLabelValues(method))
	return func(err error) {
		m.RecordRPCClientResponse(method, err)
		timer.ObserveDuration()
	}
}

// RecordRPCClientResponse records an RPC response. It will
// convert the passed-in error into something metrics friendly.
// Nil errors get converted into <nil>, RPC errors are converted
// into rpc_<error code>, HTTP errors are converted into
// http_<status code>, and everything else is converted into
// <unknown>.
func (m *Metrics) RecordRPCClientResponse(method string, err error) {
	var errStr string
	var rpcErr rpc.Error
	var httpErr rpc.HTTPError
	if err == nil {
		errStr = "<nil>"
	} else if errors.As(err, &rpcErr) {
		errStr = fmt.Sprintf("rpc_%d", rpcErr.ErrorCode())
	} else if errors.As(err, &httpErr) {
		errStr = fmt.Sprintf("http_%d", httpErr.StatusCode)
	} else if errors.Is(err, ethereum.NotFound) {
		errStr = "<not found>"
	} else {
		errStr = "<unknown>"
	}
	m.RPCClientResponsesTotal.WithLabelValues(method, errStr).Inc()
}

517 518 519 520 521 522 523 524
func (m *Metrics) SetDerivationIdle(status bool) {
	var val float64
	if status {
		val = 1
	}
	m.DerivationIdle.Set(val)
}

525
func (m *Metrics) RecordPipelineReset() {
526
	m.PipelineResets.Record()
527 528
}

529
func (m *Metrics) RecordSequencingError() {
530
	m.SequencingErrors.Record()
531 532 533
}

func (m *Metrics) RecordPublishingError() {
534
	m.PublishingErrors.Record()
535 536 537
}

func (m *Metrics) RecordDerivationError() {
538
	m.DerivationErrors.Record()
539 540 541
}

func (m *Metrics) RecordReceivedUnsafePayload(payload *eth.ExecutionPayload) {
542 543
	m.UnsafePayloads.Record()
	m.RecordRef("l2", "received_payload", uint64(payload.BlockNumber), uint64(payload.Timestamp), payload.BlockHash)
544 545
}

546
func (m *Metrics) RecordUnsafePayloadsBuffer(length uint64, memSize uint64, next eth.BlockID) {
547
	m.RecordRef("l2", "l2_buffer_unsafe", next.Number, 0, next.Hash)
548 549 550 551
	m.UnsafePayloadsBufferLen.Set(float64(length))
	m.UnsafePayloadsBufferMemSize.Set(float64(memSize))
}

552 553
func (m *Metrics) CountSequencedTxs(count int) {
	m.TransactionsSequencedTotal.Add(float64(count))
554 555
}

556 557 558 559
func (m *Metrics) RecordL1ReorgDepth(d uint64) {
	m.L1ReorgDepth.Observe(float64(d))
}

560
func (m *Metrics) RecordSequencerInconsistentL1Origin(from eth.BlockID, to eth.BlockID) {
561 562 563
	m.SequencerInconsistentL1Origin.Record()
	m.RecordRef("l1_origin", "inconsistent_from", from.Number, 0, from.Hash)
	m.RecordRef("l1_origin", "inconsistent_to", to.Number, 0, to.Hash)
564 565 566
}

func (m *Metrics) RecordSequencerReset() {
567
	m.SequencerResets.Record()
568 569
}

570 571 572 573
func (m *Metrics) RecordGossipEvent(evType int32) {
	m.GossipEventsTotal.WithLabelValues(pb.TraceEvent_Type_name[evType]).Inc()
}

Matthew Slipper's avatar
Matthew Slipper committed
574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590
func (m *Metrics) IncPeerCount() {
	m.PeerCount.Inc()
}

func (m *Metrics) DecPeerCount() {
	m.PeerCount.Dec()
}

func (m *Metrics) IncStreamCount() {
	m.StreamCount.Inc()
}

func (m *Metrics) DecStreamCount() {
	m.StreamCount.Dec()
}

func (m *Metrics) RecordBandwidth(ctx context.Context, bwc *libp2pmetrics.BandwidthCounter) {
591 592 593 594 595 596 597 598 599 600 601 602 603 604 605
	tick := time.NewTicker(10 * time.Second)
	defer tick.Stop()

	for {
		select {
		case <-tick.C:
			bwTotals := bwc.GetBandwidthTotals()
			m.BandwidthTotal.WithLabelValues("in").Set(float64(bwTotals.TotalIn))
			m.BandwidthTotal.WithLabelValues("out").Set(float64(bwTotals.TotalOut))
		case <-ctx.Done():
			return
		}
	}
}

606 607 608 609 610
// RecordL1RequestTime tracks the amount of time the derivation pipeline spent waiting for L1 data requests.
func (m *Metrics) RecordL1RequestTime(method string, duration time.Duration) {
	m.L1RequestDurationSeconds.WithLabelValues(method).Observe(float64(duration) / float64(time.Second))
}

611 612 613 614 615 616 617 618 619 620 621 622 623 624 625
// RecordSequencerBuildingDiffTime tracks the amount of time the sequencer was allowed between
// start to finish, incl. sealing, minus the block time.
// Ideally this is 0, realistically the sequencer scheduler may be busy with other jobs like syncing sometimes.
func (m *Metrics) RecordSequencerBuildingDiffTime(duration time.Duration) {
	m.SequencerBuildingDiffTotal.Inc()
	m.SequencerBuildingDiffDurationSeconds.Observe(float64(duration) / float64(time.Second))
}

// RecordSequencerSealingTime tracks the amount of time the sequencer took to finish sealing the block.
// Ideally this is 0, realistically it may take some time.
func (m *Metrics) RecordSequencerSealingTime(duration time.Duration) {
	m.SequencerSealingTotal.Inc()
	m.SequencerSealingDurationSeconds.Observe(float64(duration) / float64(time.Second))
}

626 627 628 629
// Serve starts the metrics server on the given hostname and port.
// The server will be closed when the passed-in context is cancelled.
func (m *Metrics) Serve(ctx context.Context, hostname string, port int) error {
	addr := net.JoinHostPort(hostname, strconv.Itoa(port))
630
	server := ophttp.NewHttpServer(promhttp.InstrumentMetricHandler(
631 632 633
		m.registry, promhttp.HandlerFor(m.registry, promhttp.HandlerOpts{}),
	))
	server.Addr = addr
634 635 636 637 638 639
	go func() {
		<-ctx.Done()
		server.Close()
	}()
	return server.ListenAndServe()
}
640

641 642 643 644
func (m *Metrics) Document() []metrics.DocumentedMetric {
	return m.factory.Document()
}

645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665
func (m *Metrics) ClientPayloadByNumberEvent(num uint64, resultCode byte, duration time.Duration) {
	if resultCode > 4 { // summarize all high codes to reduce metrics overhead
		resultCode = 5
	}
	code := strconv.FormatUint(uint64(resultCode), 10)
	m.P2PReqTotal.WithLabelValues("client", "payload_by_number", code).Inc()
	m.P2PReqDurationSeconds.WithLabelValues("client", "payload_by_number", code).Observe(float64(duration) / float64(time.Second))
	m.P2PPayloadByNumber.WithLabelValues("client").Set(float64(num))
}

func (m *Metrics) ServerPayloadByNumberEvent(num uint64, resultCode byte, duration time.Duration) {
	code := strconv.FormatUint(uint64(resultCode), 10)
	m.P2PReqTotal.WithLabelValues("server", "payload_by_number", code).Inc()
	m.P2PReqDurationSeconds.WithLabelValues("server", "payload_by_number", code).Observe(float64(duration) / float64(time.Second))
	m.P2PPayloadByNumber.WithLabelValues("server").Set(float64(num))
}

func (m *Metrics) PayloadsQuarantineSize(n int) {
	m.PayloadsQuarantineTotal.Set(float64(n))
}

666
func (m *Metrics) RecordChannelInputBytes(inputCompressedBytes int) {
667
	m.ChannelInputBytes.Add(float64(inputCompressedBytes))
668 669
}

670
func (m *Metrics) RecordHeadChannelOpened() {
671
	m.headChannelOpenedEvent.Record()
672 673 674
}

func (m *Metrics) RecordChannelTimedOut() {
675
	m.channelTimedOutEvent.Record()
676 677 678
}

func (m *Metrics) RecordFrame() {
679
	m.frameAddedEvent.Record()
680 681
}

682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704
func (m *Metrics) RecordPeerUnban() {
	m.PeerUnbans.Inc()
}

func (m *Metrics) RecordIPUnban() {
	m.IPUnbans.Inc()
}

func (m *Metrics) RecordDial(allow bool) {
	if allow {
		m.Dials.WithLabelValues("true").Inc()
	} else {
		m.Dials.WithLabelValues("false").Inc()
	}
}

func (m *Metrics) RecordAccept(allow bool) {
	if allow {
		m.Accepts.WithLabelValues("true").Inc()
	} else {
		m.Accepts.WithLabelValues("false").Inc()
	}
}
705 706 707 708 709 710 711
func (m *Metrics) ReportProtocolVersions(local, engine, recommended, required params.ProtocolVersion) {
	m.ProtocolVersionDelta.WithLabelValues("local_recommended").Set(float64(local.Compare(recommended)))
	m.ProtocolVersionDelta.WithLabelValues("local_required").Set(float64(local.Compare(required)))
	m.ProtocolVersionDelta.WithLabelValues("engine_recommended").Set(float64(engine.Compare(recommended)))
	m.ProtocolVersionDelta.WithLabelValues("engine_required").Set(float64(engine.Compare(required)))
	m.ProtocolVersions.WithLabelValues(local.String(), engine.String(), recommended.String(), required.String()).Set(1)
}
712

713 714
type noopMetricer struct{}

715
var NoopMetrics Metricer = new(noopMetricer)
716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751

func (n *noopMetricer) RecordInfo(version string) {
}

func (n *noopMetricer) RecordUp() {
}

func (n *noopMetricer) RecordRPCServerRequest(method string) func() {
	return func() {}
}

func (n *noopMetricer) RecordRPCClientRequest(method string) func(err error) {
	return func(err error) {}
}

func (n *noopMetricer) RecordRPCClientResponse(method string, err error) {
}

func (n *noopMetricer) SetDerivationIdle(status bool) {
}

func (n *noopMetricer) RecordPipelineReset() {
}

func (n *noopMetricer) RecordSequencingError() {
}

func (n *noopMetricer) RecordPublishingError() {
}

func (n *noopMetricer) RecordDerivationError() {
}

func (n *noopMetricer) RecordReceivedUnsafePayload(payload *eth.ExecutionPayload) {
}

752
func (n *noopMetricer) RecordRef(layer string, name string, num uint64, timestamp uint64, h common.Hash) {
753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769
}

func (n *noopMetricer) RecordL1Ref(name string, ref eth.L1BlockRef) {
}

func (n *noopMetricer) RecordL2Ref(name string, ref eth.L2BlockRef) {
}

func (n *noopMetricer) RecordUnsafePayloadsBuffer(length uint64, memSize uint64, next eth.BlockID) {
}

func (n *noopMetricer) CountSequencedTxs(count int) {
}

func (n *noopMetricer) RecordL1ReorgDepth(d uint64) {
}

770 771 772 773 774 775
func (n *noopMetricer) RecordSequencerInconsistentL1Origin(from eth.BlockID, to eth.BlockID) {
}

func (n *noopMetricer) RecordSequencerReset() {
}

776 777 778
func (n *noopMetricer) RecordGossipEvent(evType int32) {
}

779
func (n *noopMetricer) SetPeerScores(allScores []store.PeerScores) {
780 781
}

782 783 784 785 786 787 788 789 790 791 792 793 794 795
func (n *noopMetricer) IncPeerCount() {
}

func (n *noopMetricer) DecPeerCount() {
}

func (n *noopMetricer) IncStreamCount() {
}

func (n *noopMetricer) DecStreamCount() {
}

func (n *noopMetricer) RecordBandwidth(ctx context.Context, bwc *libp2pmetrics.BandwidthCounter) {
}
796 797 798 799 800 801

func (n *noopMetricer) RecordSequencerBuildingDiffTime(duration time.Duration) {
}

func (n *noopMetricer) RecordSequencerSealingTime(duration time.Duration) {
}
802 803 804 805

func (n *noopMetricer) Document() []metrics.DocumentedMetric {
	return nil
}
806 807 808 809 810 811 812 813 814

func (n *noopMetricer) ClientPayloadByNumberEvent(num uint64, resultCode byte, duration time.Duration) {
}

func (n *noopMetricer) ServerPayloadByNumberEvent(num uint64, resultCode byte, duration time.Duration) {
}

func (n *noopMetricer) PayloadsQuarantineSize(int) {
}
815 816 817

func (n *noopMetricer) RecordChannelInputBytes(int) {
}
818

819 820 821 822 823 824 825 826 827
func (n *noopMetricer) RecordHeadChannelOpened() {
}

func (n *noopMetricer) RecordChannelTimedOut() {
}

func (n *noopMetricer) RecordFrame() {
}

828 829 830 831 832 833 834 835 836 837 838
func (n *noopMetricer) RecordPeerUnban() {
}

func (n *noopMetricer) RecordIPUnban() {
}

func (n *noopMetricer) RecordDial(allow bool) {
}

func (n *noopMetricer) RecordAccept(allow bool) {
}
839 840
func (n *noopMetricer) ReportProtocolVersions(local, engine, recommended, required params.ProtocolVersion) {
}