Commit e4579740 authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge pull request #5165 from ethereum-optimism/jg/op_node_logging

op-node: More logging
parents 9a85422f 8f1c9303
......@@ -7,6 +7,7 @@ import (
"github.com/ethereum-optimism/optimism/op-node/chaincfg"
"github.com/ethereum-optimism/optimism/op-node/sources"
oplog "github.com/ethereum-optimism/optimism/op-service/log"
"github.com/urfave/cli"
)
......@@ -113,23 +114,6 @@ var (
Required: false,
Value: time.Second * 12 * 32,
}
LogLevelFlag = cli.StringFlag{
Name: "log.level",
Usage: "The lowest log level that will be output",
Value: "info",
EnvVar: prefixEnvVar("LOG_LEVEL"),
}
LogFormatFlag = cli.StringFlag{
Name: "log.format",
Usage: "Format the log output. Supported formats: 'text', 'json'",
Value: "text",
EnvVar: prefixEnvVar("LOG_FORMAT"),
}
LogColorFlag = cli.BoolFlag{
Name: "log.color",
Usage: "Color the log output",
EnvVar: prefixEnvVar("LOG_COLOR"),
}
MetricsEnabledFlag = cli.BoolFlag{
Name: "metrics.enabled",
Usage: "Enable the metrics server",
......@@ -200,7 +184,7 @@ var requiredFlags = []cli.Flag{
RPCListenPort,
}
var optionalFlags = append([]cli.Flag{
var optionalFlags = []cli.Flag{
RollupConfig,
Network,
L1TrustRPC,
......@@ -211,9 +195,6 @@ var optionalFlags = append([]cli.Flag{
SequencerStoppedFlag,
SequencerL1Confs,
L1EpochPollIntervalFlag,
LogLevelFlag,
LogFormatFlag,
LogColorFlag,
RPCEnableAdmin,
MetricsEnabledFlag,
MetricsAddrFlag,
......@@ -226,10 +207,16 @@ var optionalFlags = append([]cli.Flag{
HeartbeatMonikerFlag,
HeartbeatURLFlag,
BackupL2UnsafeSyncRPC,
}, p2pFlags...)
}
// Flags contains the list of configuration options available to the binary.
var Flags = append(requiredFlags, optionalFlags...)
var Flags []cli.Flag
func init() {
optionalFlags = append(optionalFlags, p2pFlags...)
optionalFlags = append(optionalFlags, oplog.CLIFlags(envVarPrefix)...)
Flags = append(requiredFlags, optionalFlags...)
}
func CheckRequired(ctx *cli.Context) error {
l1NodeAddr := ctx.GlobalString(L1NodeAddr.Name)
......
......@@ -139,6 +139,7 @@ func (bq *BatchQueue) AddBatch(batch *BatchData, l2SafeHead eth.L2BlockRef) {
if validity == BatchDrop {
return // if we do drop the batch, CheckBatch will log the drop reason with WARN level.
}
bq.log.Debug("Adding batch", "batch_timestamp", batch.Timestamp, "parent_hash", batch.ParentHash, "batch_epoch", batch.Epoch(), "txs", len(batch.Transactions))
bq.batches[batch.Timestamp] = append(bq.batches[batch.Timestamp], &data)
}
......@@ -212,7 +213,7 @@ batchLoop:
if nextBatch.Batch.EpochNum == rollup.Epoch(epoch.Number)+1 {
bq.l1Blocks = bq.l1Blocks[1:]
}
bq.log.Trace("Returning found batch", "epoch", epoch, "batch_epoch", nextBatch.Batch.EpochNum, "batch_timestamp", nextBatch.Batch.Timestamp)
bq.log.Info("Found next batch", "epoch", epoch, "batch_epoch", nextBatch.Batch.EpochNum, "batch_timestamp", nextBatch.Batch.Timestamp)
return nextBatch.Batch, nil
}
......@@ -241,7 +242,7 @@ batchLoop:
// to preserve that L2 time >= L1 time. If this is the first block of the epoch, always generate a
// batch to ensure that we at least have one batch per epoch.
if nextTimestamp < nextEpoch.Time || firstOfEpoch {
bq.log.Trace("Generating next batch", "epoch", epoch, "timestamp", nextTimestamp)
bq.log.Info("Generating next batch", "epoch", epoch, "timestamp", nextTimestamp)
return &BatchData{
BatchV1{
ParentHash: l2SafeHead.Hash,
......
......@@ -69,6 +69,7 @@ func (cb *ChannelBank) prune() {
ch := cb.channels[id]
cb.channelQueue = cb.channelQueue[1:]
delete(cb.channels, id)
cb.log.Info("pruning channel", "channel", id, "totalSize", totalSize, "channel_size", ch.size, "remaining_channel_count", len(cb.channels))
totalSize -= ch.size
}
}
......@@ -86,6 +87,7 @@ func (cb *ChannelBank) IngestFrame(f Frame) {
currentCh = NewChannel(f.ID, origin)
cb.channels[f.ID] = currentCh
cb.channelQueue = append(cb.channelQueue, f.ID)
log.Info("created new channel")
}
// check if the channel is not timed out
......@@ -114,7 +116,7 @@ func (cb *ChannelBank) Read() (data []byte, err error) {
ch := cb.channels[first]
timedOut := ch.OpenBlockNumber()+cb.cfg.ChannelTimeout < cb.Origin().Number
if timedOut {
cb.log.Debug("channel timed out", "channel", first, "frames", len(ch.inputs))
cb.log.Info("channel timed out", "channel", first, "frames", len(ch.inputs))
delete(cb.channels, first)
cb.channelQueue = cb.channelQueue[1:]
return nil, nil // multiple different channels may all be timed out
......@@ -137,7 +139,6 @@ func (cb *ChannelBank) Read() (data []byte, err error) {
// consistency around channel bank pruning which depends upon the order
// of operations.
func (cb *ChannelBank) NextData(ctx context.Context) ([]byte, error) {
// Do the read from the channel bank first
data, err := cb.Read()
if err == io.EOF {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment