Commit 698c8949 authored by amber guru's avatar amber guru Committed by GitHub

proposer&batcher: Log fix (#10840)

* bathcer driver log cap fixed

* batcher service log fix

* proposer service log cap fix
parent 0fb2bb18
......@@ -167,7 +167,7 @@ func (l *BatchSubmitter) loadBlocksIntoState(ctx context.Context) error {
l.lastStoredBlock = eth.BlockID{}
return err
} else if err != nil {
l.Log.Warn("failed to load block into state", "err", err)
l.Log.Warn("Failed to load block into state", "err", err)
return err
}
l.lastStoredBlock = eth.ToBlockID(block)
......@@ -203,7 +203,7 @@ func (l *BatchSubmitter) loadBlockIntoState(ctx context.Context, blockNumber uin
return nil, fmt.Errorf("adding L2 block to state: %w", err)
}
l.Log.Info("added L2 block to local state", "block", eth.ToBlockID(block), "tx_count", len(block.Transactions()), "time", block.Time())
l.Log.Info("Added L2 block to local state", "block", eth.ToBlockID(block), "tx_count", len(block.Transactions()), "time", block.Time())
return block, nil
}
......@@ -233,7 +233,7 @@ func (l *BatchSubmitter) calculateL2BlockRangeToStore(ctx context.Context) (eth.
l.Log.Info("Starting batch-submitter work at safe-head", "safe", syncStatus.SafeL2)
l.lastStoredBlock = syncStatus.SafeL2.ID()
} else if l.lastStoredBlock.Number < syncStatus.SafeL2.Number {
l.Log.Warn("last submitted block lagged behind L2 safe head: batch submission will continue from the safe head now", "last", l.lastStoredBlock, "safe", syncStatus.SafeL2)
l.Log.Warn("Last submitted block lagged behind L2 safe head: batch submission will continue from the safe head now", "last", l.lastStoredBlock, "safe", syncStatus.SafeL2)
l.lastStoredBlock = syncStatus.SafeL2.ID()
}
......@@ -276,10 +276,10 @@ func (l *BatchSubmitter) loop() {
for {
select {
case r := <-receiptsCh:
l.Log.Info("handling receipt", "id", r.ID)
l.Log.Info("Handling receipt", "id", r.ID)
l.handleReceipt(r)
case <-receiptLoopDone:
l.Log.Info("receipt processing loop done")
l.Log.Info("Receipt processing loop done")
return
}
}
......@@ -382,7 +382,7 @@ func (l *BatchSubmitter) publishStateToL1(queue *txmgr.Queue[txID], receiptsCh c
err := l.publishTxToL1(l.killCtx, queue, receiptsCh)
if err != nil {
if err != io.EOF {
l.Log.Error("error publishing tx to l1", "err", err)
l.Log.Error("Error publishing tx to l1", "err", err)
}
return
}
......@@ -442,10 +442,10 @@ func (l *BatchSubmitter) publishTxToL1(ctx context.Context, queue *txmgr.Queue[t
txdata, err := l.state.TxData(l1tip.ID())
if err == io.EOF {
l.Log.Trace("no transaction data available")
l.Log.Trace("No transaction data available")
return err
} else if err != nil {
l.Log.Error("unable to get tx data", "err", err)
l.Log.Error("Unable to get tx data", "err", err)
return err
}
......@@ -497,7 +497,7 @@ func (l *BatchSubmitter) sendTransaction(ctx context.Context, txdata txData, que
} else {
// sanity check
if nf := len(txdata.frames); nf != 1 {
l.Log.Crit("unexpected number of frames in calldata tx", "num_frames", nf)
l.Log.Crit("Unexpected number of frames in calldata tx", "num_frames", nf)
}
data := txdata.CallData()
// if plasma DA is enabled we post the txdata to the DA Provider and replace it with the commitment.
......@@ -534,7 +534,7 @@ func (l *BatchSubmitter) blobTxCandidate(data txData) (*txmgr.TxCandidate, error
}
size := data.Len()
lastSize := len(data.frames[len(data.frames)-1].data)
l.Log.Info("building Blob transaction candidate",
l.Log.Info("Building Blob transaction candidate",
"size", size, "last_size", lastSize, "num_blobs", len(blobs))
l.Metr.RecordBlobUsedBytes(lastSize)
return &txmgr.TxCandidate{
......@@ -544,7 +544,7 @@ func (l *BatchSubmitter) blobTxCandidate(data txData) (*txmgr.TxCandidate, error
}
func (l *BatchSubmitter) calldataTxCandidate(data []byte) *txmgr.TxCandidate {
l.Log.Info("building Calldata transaction candidate", "size", len(data))
l.Log.Info("Building Calldata transaction candidate", "size", len(data))
return &txmgr.TxCandidate{
To: &l.RollupConfig.BatchInboxAddress,
TxData: data,
......
......@@ -282,19 +282,19 @@ func (bs *BatcherService) initPProf(cfg *CLIConfig) error {
func (bs *BatcherService) initMetricsServer(cfg *CLIConfig) error {
if !cfg.MetricsConfig.Enabled {
bs.Log.Info("metrics disabled")
bs.Log.Info("Metrics disabled")
return nil
}
m, ok := bs.Metrics.(opmetrics.RegistryMetricer)
if !ok {
return fmt.Errorf("metrics were enabled, but metricer %T does not expose registry for metrics-server", bs.Metrics)
}
bs.Log.Debug("starting metrics server", "addr", cfg.MetricsConfig.ListenAddr, "port", cfg.MetricsConfig.ListenPort)
bs.Log.Debug("Starting metrics server", "addr", cfg.MetricsConfig.ListenAddr, "port", cfg.MetricsConfig.ListenPort)
metricsSrv, err := opmetrics.StartServer(m.Registry(), cfg.MetricsConfig.ListenAddr, cfg.MetricsConfig.ListenPort)
if err != nil {
return fmt.Errorf("failed to start metrics server: %w", err)
}
bs.Log.Info("started metrics server", "addr", metricsSrv.Addr())
bs.Log.Info("Started metrics server", "addr", metricsSrv.Addr())
bs.metricsSrv = metricsSrv
return nil
}
......
......@@ -186,19 +186,19 @@ func (ps *ProposerService) initPProf(cfg *CLIConfig) error {
func (ps *ProposerService) initMetricsServer(cfg *CLIConfig) error {
if !cfg.MetricsConfig.Enabled {
ps.Log.Info("metrics disabled")
ps.Log.Info("Metrics disabled")
return nil
}
m, ok := ps.Metrics.(opmetrics.RegistryMetricer)
if !ok {
return fmt.Errorf("metrics were enabled, but metricer %T does not expose registry for metrics-server", ps.Metrics)
}
ps.Log.Debug("starting metrics server", "addr", cfg.MetricsConfig.ListenAddr, "port", cfg.MetricsConfig.ListenPort)
ps.Log.Debug("Starting metrics server", "addr", cfg.MetricsConfig.ListenAddr, "port", cfg.MetricsConfig.ListenPort)
metricsSrv, err := opmetrics.StartServer(m.Registry(), cfg.MetricsConfig.ListenAddr, cfg.MetricsConfig.ListenPort)
if err != nil {
return fmt.Errorf("failed to start metrics server: %w", err)
}
ps.Log.Info("started metrics server", "addr", metricsSrv.Addr())
ps.Log.Info("Started metrics server", "addr", metricsSrv.Addr())
ps.metricsSrv = metricsSrv
return nil
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment