Commit 1abe7a59 authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge branch 'develop' into fix/cdm-json

parents de181eac 38a6b213
package doc
import (
"encoding/json"
"fmt"
"os"
"strings"
"github.com/ethereum-optimism/optimism/op-node/metrics"
"github.com/olekukonko/tablewriter"
"github.com/urfave/cli"
)
var Subcommands = cli.Commands{
{
Name: "metrics",
Usage: "Dumps a list of supported metrics to stdout",
Flags: []cli.Flag{
cli.StringFlag{
Name: "format",
Value: "markdown",
Usage: "Output format (json|markdown)",
},
},
Action: func(ctx *cli.Context) error {
m := metrics.NewMetrics("default")
supportedMetrics := m.Document()
format := ctx.String("format")
if format != "markdown" && format != "json" {
return fmt.Errorf("invalid format: %s", format)
}
if format == "json" {
enc := json.NewEncoder(os.Stdout)
return enc.Encode(supportedMetrics)
}
table := tablewriter.NewWriter(os.Stdout)
table.SetBorders(tablewriter.Border{Left: true, Top: false, Right: true, Bottom: false})
table.SetCenterSeparator("|")
table.SetAutoWrapText(false)
table.SetHeader([]string{"Metric", "Description", "Labels", "Type"})
var data [][]string
for _, metric := range supportedMetrics {
labels := strings.Join(metric.Labels, ",")
data = append(data, []string{metric.Name, metric.Help, labels, metric.Type})
}
table.AppendBulk(data)
table.Render()
return nil
},
},
}
......@@ -9,6 +9,8 @@ import (
"syscall"
"time"
"github.com/ethereum-optimism/optimism/op-node/cmd/doc"
"github.com/urfave/cli"
opnode "github.com/ethereum-optimism/optimism/op-node"
......@@ -68,6 +70,10 @@ func main() {
Name: "genesis",
Subcommands: genesis.Subcommands,
},
{
Name: "doc",
Subcommands: doc.Subcommands,
},
}
err := app.Run(os.Args)
......
......@@ -23,6 +23,7 @@ require (
github.com/libp2p/go-libp2p-testing v0.12.0
github.com/multiformats/go-multiaddr v0.7.0
github.com/multiformats/go-multiaddr-dns v0.3.1
github.com/olekukonko/tablewriter v0.0.5
github.com/prometheus/client_golang v1.13.0
github.com/stretchr/testify v1.8.1
github.com/urfave/cli v1.22.9
......@@ -115,7 +116,6 @@ require (
github.com/multiformats/go-multistream v0.3.3 // indirect
github.com/multiformats/go-varint v0.0.6 // indirect
github.com/nxadm/tail v1.4.8 // indirect
github.com/olekukonko/tablewriter v0.0.5 // indirect
github.com/onsi/ginkgo v1.16.5 // indirect
github.com/opencontainers/runtime-spec v1.0.2 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect
......
package metrics
import (
"github.com/ethereum-optimism/optimism/op-service/metrics"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
// CacheMetrics implements the Metrics interface in the caching package,
......@@ -34,16 +34,16 @@ func (m *CacheMetrics) CacheGet(typeLabel string, hit bool) {
}
}
func NewCacheMetrics(registry prometheus.Registerer, ns string, name string, displayName string) *CacheMetrics {
func NewCacheMetrics(factory metrics.Factory, ns string, name string, displayName string) *CacheMetrics {
return &CacheMetrics{
SizeVec: promauto.With(registry).NewGaugeVec(prometheus.GaugeOpts{
SizeVec: factory.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns,
Name: name + "_size",
Help: displayName + " cache size",
}, []string{
"type",
}),
GetVec: promauto.With(registry).NewCounterVec(prometheus.CounterOpts{
GetVec: factory.NewCounterVec(prometheus.CounterOpts{
Namespace: ns,
Name: name + "_get",
Help: displayName + " lookups, hitting or not",
......@@ -51,7 +51,7 @@ func NewCacheMetrics(registry prometheus.Registerer, ns string, name string, dis
"type",
"hit",
}),
AddVec: promauto.With(registry).NewCounterVec(prometheus.CounterOpts{
AddVec: factory.NewCounterVec(prometheus.CounterOpts{
Namespace: ns,
Name: name + "_add",
Help: displayName + " additions, evicting previous values or not",
......
......@@ -4,8 +4,9 @@ import (
"fmt"
"time"
"github.com/ethereum-optimism/optimism/op-service/metrics"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
type EventMetrics struct {
......@@ -18,14 +19,14 @@ func (e *EventMetrics) RecordEvent() {
e.LastTime.Set(float64(time.Now().Unix()))
}
func NewEventMetrics(registry prometheus.Registerer, ns string, name string, displayName string) *EventMetrics {
func NewEventMetrics(factory metrics.Factory, ns string, name string, displayName string) *EventMetrics {
return &EventMetrics{
Total: promauto.With(registry).NewCounter(prometheus.CounterOpts{
Total: factory.NewCounter(prometheus.CounterOpts{
Namespace: ns,
Name: fmt.Sprintf("%s_total", name),
Help: fmt.Sprintf("Count of %s events", displayName),
}),
LastTime: promauto.With(registry).NewGauge(prometheus.GaugeOpts{
LastTime: factory.NewGauge(prometheus.GaugeOpts{
Namespace: ns,
Name: fmt.Sprintf("last_%s_unix", name),
Help: fmt.Sprintf("Timestamp of last %s event", displayName),
......
......@@ -10,11 +10,12 @@ import (
"strconv"
"time"
"github.com/ethereum-optimism/optimism/op-service/metrics"
pb "github.com/libp2p/go-libp2p-pubsub/pb"
libp2pmetrics "github.com/libp2p/go-libp2p/core/metrics"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/collectors"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/ethereum/go-ethereum"
......@@ -59,6 +60,7 @@ type Metricer interface {
RecordBandwidth(ctx context.Context, bwc *libp2pmetrics.BandwidthCounter)
RecordSequencerBuildingDiffTime(duration time.Duration)
RecordSequencerSealingTime(duration time.Duration)
Document() []metrics.DocumentedMetric
}
type Metrics struct {
......@@ -111,6 +113,7 @@ type Metrics struct {
BandwidthTotal *prometheus.GaugeVec
registry *prometheus.Registry
factory metrics.Factory
}
var _ Metricer = (*Metrics)(nil)
......@@ -124,21 +127,22 @@ func NewMetrics(procName string) *Metrics {
registry := prometheus.NewRegistry()
registry.MustRegister(collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}))
registry.MustRegister(collectors.NewGoCollector())
factory := metrics.With(registry)
return &Metrics{
Info: promauto.With(registry).NewGaugeVec(prometheus.GaugeOpts{
Info: factory.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns,
Name: "info",
Help: "Pseudo-metric tracking version and config info",
}, []string{
"version",
}),
Up: promauto.With(registry).NewGauge(prometheus.GaugeOpts{
Up: factory.NewGauge(prometheus.GaugeOpts{
Namespace: ns,
Name: "up",
Help: "1 if the op node has finished starting up",
}),
RPCServerRequestsTotal: promauto.With(registry).NewCounterVec(prometheus.CounterOpts{
RPCServerRequestsTotal: factory.NewCounterVec(prometheus.CounterOpts{
Namespace: ns,
Subsystem: RPCServerSubsystem,
Name: "requests_total",
......@@ -146,7 +150,7 @@ func NewMetrics(procName string) *Metrics {
}, []string{
"method",
}),
RPCServerRequestDurationSeconds: promauto.With(registry).NewHistogramVec(prometheus.HistogramOpts{
RPCServerRequestDurationSeconds: factory.NewHistogramVec(prometheus.HistogramOpts{
Namespace: ns,
Subsystem: RPCServerSubsystem,
Name: "request_duration_seconds",
......@@ -155,7 +159,7 @@ func NewMetrics(procName string) *Metrics {
}, []string{
"method",
}),
RPCClientRequestsTotal: promauto.With(registry).NewCounterVec(prometheus.CounterOpts{
RPCClientRequestsTotal: factory.NewCounterVec(prometheus.CounterOpts{
Namespace: ns,
Subsystem: RPCClientSubsystem,
Name: "requests_total",
......@@ -163,7 +167,7 @@ func NewMetrics(procName string) *Metrics {
}, []string{
"method",
}),
RPCClientRequestDurationSeconds: promauto.With(registry).NewHistogramVec(prometheus.HistogramOpts{
RPCClientRequestDurationSeconds: factory.NewHistogramVec(prometheus.HistogramOpts{
Namespace: ns,
Subsystem: RPCClientSubsystem,
Name: "request_duration_seconds",
......@@ -172,7 +176,7 @@ func NewMetrics(procName string) *Metrics {
}, []string{
"method",
}),
RPCClientResponsesTotal: promauto.With(registry).NewCounterVec(prometheus.CounterOpts{
RPCClientResponsesTotal: factory.NewCounterVec(prometheus.CounterOpts{
Namespace: ns,
Subsystem: RPCClientSubsystem,
Name: "responses_total",
......@@ -182,33 +186,33 @@ func NewMetrics(procName string) *Metrics {
"error",
}),
L1SourceCache: NewCacheMetrics(registry, ns, "l1_source_cache", "L1 Source cache"),
L2SourceCache: NewCacheMetrics(registry, ns, "l2_source_cache", "L2 Source cache"),
L1SourceCache: NewCacheMetrics(factory, ns, "l1_source_cache", "L1 Source cache"),
L2SourceCache: NewCacheMetrics(factory, ns, "l2_source_cache", "L2 Source cache"),
DerivationIdle: promauto.With(registry).NewGauge(prometheus.GaugeOpts{
DerivationIdle: factory.NewGauge(prometheus.GaugeOpts{
Namespace: ns,
Name: "derivation_idle",
Help: "1 if the derivation pipeline is idle",
}),
PipelineResets: NewEventMetrics(registry, ns, "pipeline_resets", "derivation pipeline resets"),
UnsafePayloads: NewEventMetrics(registry, ns, "unsafe_payloads", "unsafe payloads"),
DerivationErrors: NewEventMetrics(registry, ns, "derivation_errors", "derivation errors"),
SequencingErrors: NewEventMetrics(registry, ns, "sequencing_errors", "sequencing errors"),
PublishingErrors: NewEventMetrics(registry, ns, "publishing_errors", "p2p publishing errors"),
PipelineResets: NewEventMetrics(factory, ns, "pipeline_resets", "derivation pipeline resets"),
UnsafePayloads: NewEventMetrics(factory, ns, "unsafe_payloads", "unsafe payloads"),
DerivationErrors: NewEventMetrics(factory, ns, "derivation_errors", "derivation errors"),
SequencingErrors: NewEventMetrics(factory, ns, "sequencing_errors", "sequencing errors"),
PublishingErrors: NewEventMetrics(factory, ns, "publishing_errors", "p2p publishing errors"),
UnsafePayloadsBufferLen: promauto.With(registry).NewGauge(prometheus.GaugeOpts{
UnsafePayloadsBufferLen: factory.NewGauge(prometheus.GaugeOpts{
Namespace: ns,
Name: "unsafe_payloads_buffer_len",
Help: "Number of buffered L2 unsafe payloads",
}),
UnsafePayloadsBufferMemSize: promauto.With(registry).NewGauge(prometheus.GaugeOpts{
UnsafePayloadsBufferMemSize: factory.NewGauge(prometheus.GaugeOpts{
Namespace: ns,
Name: "unsafe_payloads_buffer_mem_size",
Help: "Total estimated memory size of buffered L2 unsafe payloads",
}),
RefsNumber: promauto.With(registry).NewGaugeVec(prometheus.GaugeOpts{
RefsNumber: factory.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns,
Name: "refs_number",
Help: "Gauge representing the different L1/L2 reference block numbers",
......@@ -216,7 +220,7 @@ func NewMetrics(procName string) *Metrics {
"layer",
"type",
}),
RefsTime: promauto.With(registry).NewGaugeVec(prometheus.GaugeOpts{
RefsTime: factory.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns,
Name: "refs_time",
Help: "Gauge representing the different L1/L2 reference block timestamps",
......@@ -224,7 +228,7 @@ func NewMetrics(procName string) *Metrics {
"layer",
"type",
}),
RefsHash: promauto.With(registry).NewGaugeVec(prometheus.GaugeOpts{
RefsHash: factory.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns,
Name: "refs_hash",
Help: "Gauge representing the different L1/L2 reference block hashes truncated to float values",
......@@ -232,14 +236,14 @@ func NewMetrics(procName string) *Metrics {
"layer",
"type",
}),
RefsSeqNr: promauto.With(registry).NewGaugeVec(prometheus.GaugeOpts{
RefsSeqNr: factory.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns,
Name: "refs_seqnr",
Help: "Gauge representing the different L2 reference sequence numbers",
}, []string{
"type",
}),
RefsLatency: promauto.With(registry).NewGaugeVec(prometheus.GaugeOpts{
RefsLatency: factory.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns,
Name: "refs_latency",
Help: "Gauge representing the different L1/L2 reference block timestamps minus current time, in seconds",
......@@ -249,32 +253,32 @@ func NewMetrics(procName string) *Metrics {
}),
LatencySeen: make(map[string]common.Hash),
L1ReorgDepth: promauto.With(registry).NewHistogram(prometheus.HistogramOpts{
L1ReorgDepth: factory.NewHistogram(prometheus.HistogramOpts{
Namespace: ns,
Name: "l1_reorg_depth",
Buckets: []float64{0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5, 10.5, 20.5, 50.5, 100.5},
Help: "Histogram of L1 Reorg Depths",
}),
TransactionsSequencedTotal: promauto.With(registry).NewGauge(prometheus.GaugeOpts{
TransactionsSequencedTotal: factory.NewGauge(prometheus.GaugeOpts{
Namespace: ns,
Name: "transactions_sequenced_total",
Help: "Count of total transactions sequenced",
}),
PeerCount: promauto.With(registry).NewGauge(prometheus.GaugeOpts{
PeerCount: factory.NewGauge(prometheus.GaugeOpts{
Namespace: ns,
Subsystem: "p2p",
Name: "peer_count",
Help: "Count of currently connected p2p peers",
}),
StreamCount: promauto.With(registry).NewGauge(prometheus.GaugeOpts{
StreamCount: factory.NewGauge(prometheus.GaugeOpts{
Namespace: ns,
Subsystem: "p2p",
Name: "stream_count",
Help: "Count of currently connected p2p streams",
}),
GossipEventsTotal: promauto.With(registry).NewCounterVec(prometheus.CounterOpts{
GossipEventsTotal: factory.NewCounterVec(prometheus.CounterOpts{
Namespace: ns,
Subsystem: "p2p",
Name: "gossip_events_total",
......@@ -282,7 +286,7 @@ func NewMetrics(procName string) *Metrics {
}, []string{
"type",
}),
BandwidthTotal: promauto.With(registry).NewGaugeVec(prometheus.GaugeOpts{
BandwidthTotal: factory.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns,
Subsystem: "p2p",
Name: "bandwidth_bytes_total",
......@@ -291,7 +295,7 @@ func NewMetrics(procName string) *Metrics {
"direction",
}),
SequencerBuildingDiffDurationSeconds: promauto.With(registry).NewHistogram(prometheus.HistogramOpts{
SequencerBuildingDiffDurationSeconds: factory.NewHistogram(prometheus.HistogramOpts{
Namespace: ns,
Name: "sequencer_building_diff_seconds",
Buckets: []float64{
......@@ -299,24 +303,25 @@ func NewMetrics(procName string) *Metrics {
.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10},
Help: "Histogram of Sequencer building time, minus block time",
}),
SequencerBuildingDiffTotal: promauto.With(registry).NewCounter(prometheus.CounterOpts{
SequencerBuildingDiffTotal: factory.NewCounter(prometheus.CounterOpts{
Namespace: ns,
Name: "sequencer_building_diff_total",
Help: "Number of sequencer block building jobs",
}),
SequencerSealingDurationSeconds: promauto.With(registry).NewHistogram(prometheus.HistogramOpts{
SequencerSealingDurationSeconds: factory.NewHistogram(prometheus.HistogramOpts{
Namespace: ns,
Name: "sequencer_sealing_seconds",
Buckets: []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10},
Help: "Histogram of Sequencer block sealing time",
}),
SequencerSealingTotal: promauto.With(registry).NewCounter(prometheus.CounterOpts{
SequencerSealingTotal: factory.NewCounter(prometheus.CounterOpts{
Namespace: ns,
Name: "sequencer_sealing_total",
Help: "Number of sequencer block sealing jobs",
}),
registry: registry,
factory: factory,
}
}
......@@ -515,6 +520,10 @@ func (m *Metrics) Serve(ctx context.Context, hostname string, port int) error {
return server.ListenAndServe()
}
func (m *Metrics) Document() []metrics.DocumentedMetric {
return m.factory.Document()
}
type noopMetricer struct{}
var NoopMetrics Metricer = new(noopMetricer)
......@@ -595,3 +604,7 @@ func (n *noopMetricer) RecordSequencerBuildingDiffTime(duration time.Duration) {
func (n *noopMetricer) RecordSequencerSealingTime(duration time.Duration) {
}
func (n *noopMetricer) Document() []metrics.DocumentedMetric {
return nil
}
......@@ -27,6 +27,17 @@ var (
ConfigUpdateEventVersion0 = common.Hash{}
)
var (
// A left-padded uint256 equal to 32.
oneWordUint = common.Hash{31: 32}
// A left-padded uint256 equal to 64.
twoWordUint = common.Hash{31: 64}
// 24 zero bytes (the padding for a uint64 in a 32 byte word)
uint64Padding = make([]byte, 24)
// 12 zero bytes (the padding for an Ethereum address in a 32 byte word)
addressPadding = make([]byte, 12)
)
// UpdateSystemConfigWithL1Receipts filters all L1 receipts to find config updates and applies the config updates to the given sysCfg
func UpdateSystemConfigWithL1Receipts(sysCfg *eth.SystemConfig, receipts []*types.Receipt, cfg *rollup.Config) error {
var result error
......@@ -69,50 +80,94 @@ func ProcessSystemConfigUpdateLogEvent(destSysCfg *eth.SystemConfig, ev *types.L
}
// indexed 1
updateType := ev.Topics[2]
// unindexed data
// Create a reader of the unindexed data
reader := bytes.NewReader(ev.Data)
// Counter for the number of bytes read from `reader` via `readWord`
countReadBytes := 0
// Helper function to read a word from the log data reader
readWord := func() (b [32]byte) {
if _, err := reader.Read(b[:]); err != nil {
// If there is an error reading the next 32 bytes from the reader, return an empty
// 32 byte array. We always check that the number of bytes read (`countReadBytes`)
// is equal to the expected amount at the end of each switch case.
return b
}
countReadBytes += 32
return b
}
// Attempt to read unindexed data
switch updateType {
case SystemConfigUpdateBatcher:
if len(ev.Data) != 32*3 {
return fmt.Errorf("expected 32*3 bytes in batcher hash update, but got %d bytes", len(ev.Data))
// Read the pointer, it should always equal 32.
if word := readWord(); word != oneWordUint {
return fmt.Errorf("expected offset to point to length location, but got %s", word)
}
if x := common.BytesToHash(ev.Data[:32]); x != (common.Hash{31: 32}) {
return fmt.Errorf("expected offset to point to length location, but got %s", x)
// Read the length, it should also always equal 32.
if word := readWord(); word != oneWordUint {
return fmt.Errorf("expected length to be 32 bytes, but got %s", word)
}
if x := common.BytesToHash(ev.Data[32:64]); x != (common.Hash{31: 32}) {
return fmt.Errorf("expected length of 1 bytes32, but got %s", x)
// Indexing `word` directly is always safe here, it is guaranteed to be 32 bytes in length.
// Check that the batcher address is correctly zero-padded.
word := readWord()
if !bytes.Equal(word[:12], addressPadding) {
return fmt.Errorf("expected version 0 batcher hash with zero padding, but got %x", word)
}
if !bytes.Equal(ev.Data[64:64+12], make([]byte, 12)) {
return fmt.Errorf("expected version 0 batcher hash with zero padding, but got %x", ev.Data)
destSysCfg.BatcherAddr.SetBytes(word[12:])
if countReadBytes != 32*3 {
return NewCriticalError(fmt.Errorf("expected 32*3 bytes in batcher hash update, but got %d bytes", len(ev.Data)))
}
destSysCfg.BatcherAddr.SetBytes(ev.Data[64+12:])
return nil
case SystemConfigUpdateGasConfig: // left padded uint8
if len(ev.Data) != 32*4 {
return fmt.Errorf("expected 32*4 bytes in GPO params update data, but got %d", len(ev.Data))
case SystemConfigUpdateGasConfig:
// Read the pointer, it should always equal 32.
if word := readWord(); word != oneWordUint {
return fmt.Errorf("expected offset to point to length location, but got %s", word)
}
if x := common.BytesToHash(ev.Data[:32]); x != (common.Hash{31: 32}) {
return fmt.Errorf("expected offset to point to length location, but got %s", x)
// Read the length, it should always equal 64.
if word := readWord(); word != twoWordUint {
return fmt.Errorf("expected length to be 64 bytes, but got %s", word)
}
if x := common.BytesToHash(ev.Data[32:64]); x != (common.Hash{31: 64}) {
return fmt.Errorf("expected length of 2 bytes32, but got %s", x)
// Set the system config's overhead and scalar values to the values read from the log
destSysCfg.Overhead = readWord()
destSysCfg.Scalar = readWord()
if countReadBytes != 32*4 {
return NewCriticalError(fmt.Errorf("expected 32*4 bytes in GPO params update data, but got %d", len(ev.Data)))
}
copy(destSysCfg.Overhead[:], ev.Data[64:96])
copy(destSysCfg.Scalar[:], ev.Data[96:128])
return nil
case SystemConfigUpdateGasLimit:
if len(ev.Data) != 32*3 {
return fmt.Errorf("expected 32*3 bytes in gas limit update, but got %d bytes", len(ev.Data))
// Read the pointer, it should always equal 32.
if word := readWord(); word != oneWordUint {
return fmt.Errorf("expected offset to point to length location, but got %s", word)
}
if x := common.BytesToHash(ev.Data[:32]); x != (common.Hash{31: 32}) {
return fmt.Errorf("expected offset to point to length location, but got %s", x)
// Read the length, it should also always equal 32.
if word := readWord(); word != oneWordUint {
return fmt.Errorf("expected length to be 32 bytes, but got %s", word)
}
if x := common.BytesToHash(ev.Data[32:64]); x != (common.Hash{31: 32}) {
return fmt.Errorf("expected length of 1 bytes32, but got %s", x)
// Indexing `word` directly is always safe here, it is guaranteed to be 32 bytes in length.
// Check that the gas limit is correctly zero-padded.
word := readWord()
if !bytes.Equal(word[:24], uint64Padding) {
return fmt.Errorf("expected zero padding for gaslimit, but got %x", word)
}
if !bytes.Equal(ev.Data[64:64+24], make([]byte, 24)) {
return fmt.Errorf("expected zero padding for gaslimit, but got %x", ev.Data)
destSysCfg.GasLimit = binary.BigEndian.Uint64(word[24:])
if countReadBytes != 32*3 {
return NewCriticalError(fmt.Errorf("expected 32*3 bytes in gas limit update, but got %d bytes", len(ev.Data)))
}
destSysCfg.GasLimit = binary.BigEndian.Uint64(ev.Data[64+24:])
return nil
case SystemConfigUpdateUnsafeBlockSigner:
// Ignored in derivation. This configurable applies to runtime configuration outside of the derivation.
......
package metrics
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
type Factory interface {
NewCounter(opts prometheus.CounterOpts) prometheus.Counter
NewCounterVec(opts prometheus.CounterOpts, labelNames []string) *prometheus.CounterVec
NewGauge(opts prometheus.GaugeOpts) prometheus.Gauge
NewGaugeVec(opts prometheus.GaugeOpts, labelNames []string) *prometheus.GaugeVec
NewHistogram(opts prometheus.HistogramOpts) prometheus.Histogram
NewHistogramVec(opts prometheus.HistogramOpts, labelNames []string) *prometheus.HistogramVec
NewSummary(opts prometheus.SummaryOpts) prometheus.Summary
NewSummaryVec(opts prometheus.SummaryOpts, labelNames []string) *prometheus.SummaryVec
Document() []DocumentedMetric
}
type DocumentedMetric struct {
Type string `json:"type"`
Name string `json:"name"`
Help string `json:"help"`
Labels []string `json:"labels"`
}
type documentor struct {
metrics []DocumentedMetric
factory promauto.Factory
}
func With(registry *prometheus.Registry) Factory {
return &documentor{
factory: promauto.With(registry),
}
}
func (d *documentor) NewCounter(opts prometheus.CounterOpts) prometheus.Counter {
d.metrics = append(d.metrics, DocumentedMetric{
Type: "counter",
Name: fullName(opts.Namespace, opts.Subsystem, opts.Name),
Help: opts.Help,
})
return d.factory.NewCounter(opts)
}
func (d *documentor) NewCounterVec(opts prometheus.CounterOpts, labelNames []string) *prometheus.CounterVec {
d.metrics = append(d.metrics, DocumentedMetric{
Type: "counter",
Name: fullName(opts.Namespace, opts.Subsystem, opts.Name),
Help: opts.Help,
Labels: labelNames,
})
return d.factory.NewCounterVec(opts, labelNames)
}
func (d *documentor) NewGauge(opts prometheus.GaugeOpts) prometheus.Gauge {
d.metrics = append(d.metrics, DocumentedMetric{
Type: "gauge",
Name: fullName(opts.Namespace, opts.Subsystem, opts.Name),
Help: opts.Help,
})
return d.factory.NewGauge(opts)
}
func (d *documentor) NewGaugeVec(opts prometheus.GaugeOpts, labelNames []string) *prometheus.GaugeVec {
d.metrics = append(d.metrics, DocumentedMetric{
Type: "gauge",
Name: fullName(opts.Namespace, opts.Subsystem, opts.Name),
Help: opts.Help,
Labels: labelNames,
})
return d.factory.NewGaugeVec(opts, labelNames)
}
func (d *documentor) NewHistogram(opts prometheus.HistogramOpts) prometheus.Histogram {
d.metrics = append(d.metrics, DocumentedMetric{
Type: "histogram",
Name: fullName(opts.Namespace, opts.Subsystem, opts.Name),
Help: opts.Help,
})
return d.factory.NewHistogram(opts)
}
func (d *documentor) NewHistogramVec(opts prometheus.HistogramOpts, labelNames []string) *prometheus.HistogramVec {
d.metrics = append(d.metrics, DocumentedMetric{
Type: "histogram",
Name: fullName(opts.Namespace, opts.Subsystem, opts.Name),
Help: opts.Help,
Labels: labelNames,
})
return d.factory.NewHistogramVec(opts, labelNames)
}
func (d *documentor) NewSummary(opts prometheus.SummaryOpts) prometheus.Summary {
d.metrics = append(d.metrics, DocumentedMetric{
Type: "summary",
Name: fullName(opts.Namespace, opts.Subsystem, opts.Name),
Help: opts.Help,
})
return d.factory.NewSummary(opts)
}
func (d *documentor) NewSummaryVec(opts prometheus.SummaryOpts, labelNames []string) *prometheus.SummaryVec {
d.metrics = append(d.metrics, DocumentedMetric{
Type: "summary",
Name: fullName(opts.Namespace, opts.Subsystem, opts.Name),
Help: opts.Help,
Labels: labelNames,
})
return d.factory.NewSummaryVec(opts, labelNames)
}
func (d *documentor) Document() []DocumentedMetric {
return d.metrics
}
func fullName(ns, subsystem, name string) string {
out := ns
if subsystem != "" {
out += "_" + subsystem
}
return out + "_" + name
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment