Commit 3658b171 authored by Arman Mazdaee's avatar Arman Mazdaee

op-node: Update golang-lru to v2

parent 7b30659f
......@@ -10,7 +10,7 @@ import (
"time"
"github.com/golang/snappy"
lru "github.com/hashicorp/golang-lru"
lru "github.com/hashicorp/golang-lru/v2"
pubsub "github.com/libp2p/go-libp2p-pubsub"
pb "github.com/libp2p/go-libp2p-pubsub/pb"
"github.com/libp2p/go-libp2p/core/host"
......@@ -242,7 +242,7 @@ func BuildBlocksValidator(log log.Logger, cfg *rollup.Config, runCfg GossipRunti
// Seen block hashes per block height
// uint64 -> *seenBlocks
blockHeightLRU, err := lru.New(1000)
blockHeightLRU, err := lru.New[uint64, *seenBlocks](1000)
if err != nil {
panic(fmt.Errorf("failed to set up block height LRU cache: %w", err))
}
......@@ -315,7 +315,7 @@ func BuildBlocksValidator(log log.Logger, cfg *rollup.Config, runCfg GossipRunti
blockHeightLRU.Add(uint64(payload.BlockNumber), seen)
}
if count, hasSeen := seen.(*seenBlocks).hasSeen(payload.BlockHash); count > 5 {
if count, hasSeen := seen.hasSeen(payload.BlockHash); count > 5 {
// [REJECT] if more than 5 blocks have been seen with the same block height
log.Warn("seen too many different blocks at same height", "height", payload.BlockNumber)
return pubsub.ValidationReject
......@@ -327,7 +327,7 @@ func BuildBlocksValidator(log log.Logger, cfg *rollup.Config, runCfg GossipRunti
// mark it as seen. (note: with concurrent validation more than 5 blocks may be marked as seen still,
// but validator concurrency is limited anyway)
seen.(*seenBlocks).markSeen(payload.BlockHash)
seen.markSeen(payload.BlockHash)
// remember the decoded payload for later usage in topic subscriber.
message.ValidatorData = &payload
......
package caching
import lru "github.com/hashicorp/golang-lru"
import lru "github.com/hashicorp/golang-lru/v2"
type Metrics interface {
CacheAdd(label string, cacheSize int, evicted bool)
......@@ -8,13 +8,13 @@ type Metrics interface {
}
// LRUCache wraps hashicorp *lru.Cache and tracks cache metrics
type LRUCache struct {
type LRUCache[K comparable, V any] struct {
m Metrics
label string
inner *lru.Cache
inner *lru.Cache[K, V]
}
func (c *LRUCache) Get(key any) (value any, ok bool) {
func (c *LRUCache[K, V]) Get(key K) (value V, ok bool) {
value, ok = c.inner.Get(key)
if c.m != nil {
c.m.CacheGet(c.label, ok)
......@@ -22,7 +22,7 @@ func (c *LRUCache) Get(key any) (value any, ok bool) {
return value, ok
}
func (c *LRUCache) Add(key, value any) (evicted bool) {
func (c *LRUCache[K, V]) Add(key K, value V) (evicted bool) {
evicted = c.inner.Add(key, value)
if c.m != nil {
c.m.CacheAdd(c.label, c.inner.Len(), evicted)
......@@ -32,10 +32,10 @@ func (c *LRUCache) Add(key, value any) (evicted bool) {
// NewLRUCache creates a LRU cache with the given metrics, labeling the cache adds/gets.
// Metrics are optional: no metrics will be tracked if m == nil.
func NewLRUCache(m Metrics, label string, maxSize int) *LRUCache {
func NewLRUCache[K comparable, V any](m Metrics, label string, maxSize int) *LRUCache[K, V] {
// no errors if the size is positive
cache, _ := lru.New(maxSize)
return &LRUCache{
cache, _ := lru.New[K, V](maxSize)
return &LRUCache[K, V]{
m: m,
label: label,
inner: cache,
......
......@@ -106,19 +106,19 @@ type EthClient struct {
// cache receipts in bundles per block hash
// We cache the receipts fetching job to not lose progress when we have to retry the `Fetch` call
// common.Hash -> *receiptsFetchingJob
receiptsCache *caching.LRUCache
receiptsCache *caching.LRUCache[common.Hash, *receiptsFetchingJob]
// cache transactions in bundles per block hash
// common.Hash -> types.Transactions
transactionsCache *caching.LRUCache
transactionsCache *caching.LRUCache[common.Hash, types.Transactions]
// cache block headers of blocks by hash
// common.Hash -> *HeaderInfo
headersCache *caching.LRUCache
headersCache *caching.LRUCache[common.Hash, eth.BlockInfo]
// cache payloads by hash
// common.Hash -> *eth.ExecutionPayload
payloadsCache *caching.LRUCache
payloadsCache *caching.LRUCache[common.Hash, *eth.ExecutionPayload]
// availableReceiptMethods tracks which receipt methods can be used for fetching receipts
// This may be modified concurrently, but we don't lock since it's a single
......@@ -172,10 +172,10 @@ func NewEthClient(client client.RPC, log log.Logger, metrics caching.Metrics, co
mustBePostMerge: config.MustBePostMerge,
provKind: config.RPCProviderKind,
log: log,
receiptsCache: caching.NewLRUCache(metrics, "receipts", config.ReceiptsCacheSize),
transactionsCache: caching.NewLRUCache(metrics, "txs", config.TransactionsCacheSize),
headersCache: caching.NewLRUCache(metrics, "headers", config.HeadersCacheSize),
payloadsCache: caching.NewLRUCache(metrics, "payloads", config.PayloadsCacheSize),
receiptsCache: caching.NewLRUCache[common.Hash, *receiptsFetchingJob](metrics, "receipts", config.ReceiptsCacheSize),
transactionsCache: caching.NewLRUCache[common.Hash, types.Transactions](metrics, "txs", config.TransactionsCacheSize),
headersCache: caching.NewLRUCache[common.Hash, eth.BlockInfo](metrics, "headers", config.HeadersCacheSize),
payloadsCache: caching.NewLRUCache[common.Hash, *eth.ExecutionPayload](metrics, "payloads", config.PayloadsCacheSize),
availableReceiptMethods: AvailableReceiptsFetchingMethods(config.RPCProviderKind),
lastMethodsReset: time.Now(),
methodResetDuration: config.MethodResetDuration,
......@@ -292,7 +292,7 @@ func (s *EthClient) ChainID(ctx context.Context) (*big.Int, error) {
func (s *EthClient) InfoByHash(ctx context.Context, hash common.Hash) (eth.BlockInfo, error) {
if header, ok := s.headersCache.Get(hash); ok {
return header.(eth.BlockInfo), nil
return header, nil
}
return s.headerCall(ctx, "eth_getBlockByHash", hashID(hash))
}
......@@ -310,7 +310,7 @@ func (s *EthClient) InfoByLabel(ctx context.Context, label eth.BlockLabel) (eth.
func (s *EthClient) InfoAndTxsByHash(ctx context.Context, hash common.Hash) (eth.BlockInfo, types.Transactions, error) {
if header, ok := s.headersCache.Get(hash); ok {
if txs, ok := s.transactionsCache.Get(hash); ok {
return header.(eth.BlockInfo), txs.(types.Transactions), nil
return header, txs, nil
}
}
return s.blockCall(ctx, "eth_getBlockByHash", hashID(hash))
......@@ -328,7 +328,7 @@ func (s *EthClient) InfoAndTxsByLabel(ctx context.Context, label eth.BlockLabel)
func (s *EthClient) PayloadByHash(ctx context.Context, hash common.Hash) (*eth.ExecutionPayload, error) {
if payload, ok := s.payloadsCache.Get(hash); ok {
return payload.(*eth.ExecutionPayload), nil
return payload, nil
}
return s.payloadCall(ctx, "eth_getBlockByHash", hashID(hash))
}
......@@ -354,7 +354,7 @@ func (s *EthClient) FetchReceipts(ctx context.Context, blockHash common.Hash) (e
// The underlying fetcher uses the receipts hash to verify receipt integrity.
var job *receiptsFetchingJob
if v, ok := s.receiptsCache.Get(blockHash); ok {
job = v.(*receiptsFetchingJob)
job = v
} else {
txHashes := eth.TransactionsToHashes(txs)
job = NewReceiptsFetchingJob(s, s.client, s.maxBatchSize, eth.ToBlockID(info), info.ReceiptHash(), txHashes)
......
......@@ -56,7 +56,7 @@ type L1Client struct {
// cache L1BlockRef by hash
// common.Hash -> eth.L1BlockRef
l1BlockRefsCache *caching.LRUCache
l1BlockRefsCache *caching.LRUCache[common.Hash, eth.L1BlockRef]
}
// NewL1Client wraps a RPC with bindings to fetch L1 data, while logging errors, tracking metrics (optional), and caching.
......@@ -68,7 +68,7 @@ func NewL1Client(client client.RPC, log log.Logger, metrics caching.Metrics, con
return &L1Client{
EthClient: ethClient,
l1BlockRefsCache: caching.NewLRUCache(metrics, "blockrefs", config.L1BlockRefsCacheSize),
l1BlockRefsCache: caching.NewLRUCache[common.Hash, eth.L1BlockRef](metrics, "blockrefs", config.L1BlockRefsCacheSize),
}, nil
}
......@@ -105,7 +105,7 @@ func (s *L1Client) L1BlockRefByNumber(ctx context.Context, num uint64) (eth.L1Bl
// We cache the block reference by hash as it is safe to assume collision will not occur.
func (s *L1Client) L1BlockRefByHash(ctx context.Context, hash common.Hash) (eth.L1BlockRef, error) {
if v, ok := s.l1BlockRefsCache.Get(hash); ok {
return v.(eth.L1BlockRef), nil
return v, nil
}
info, err := s.InfoByHash(ctx, hash)
if err != nil {
......
......@@ -68,11 +68,11 @@ type L2Client struct {
// cache L2BlockRef by hash
// common.Hash -> eth.L2BlockRef
l2BlockRefsCache *caching.LRUCache
l2BlockRefsCache *caching.LRUCache[common.Hash, eth.L2BlockRef]
// cache SystemConfig by L2 hash
// common.Hash -> eth.SystemConfig
systemConfigsCache *caching.LRUCache
systemConfigsCache *caching.LRUCache[common.Hash, eth.SystemConfig]
}
// NewL2Client constructs a new L2Client instance. The L2Client is a thin wrapper around the EthClient with added functions
......@@ -87,8 +87,8 @@ func NewL2Client(client client.RPC, log log.Logger, metrics caching.Metrics, con
return &L2Client{
EthClient: ethClient,
rollupCfg: config.RollupCfg,
l2BlockRefsCache: caching.NewLRUCache(metrics, "blockrefs", config.L2BlockRefsCacheSize),
systemConfigsCache: caching.NewLRUCache(metrics, "systemconfigs", config.L1ConfigsCacheSize),
l2BlockRefsCache: caching.NewLRUCache[common.Hash, eth.L2BlockRef](metrics, "blockrefs", config.L2BlockRefsCacheSize),
systemConfigsCache: caching.NewLRUCache[common.Hash, eth.SystemConfig](metrics, "systemconfigs", config.L1ConfigsCacheSize),
}, nil
}
......@@ -131,7 +131,7 @@ func (s *L2Client) L2BlockRefByNumber(ctx context.Context, num uint64) (eth.L2Bl
// The returned BlockRef may not be in the canonical chain.
func (s *L2Client) L2BlockRefByHash(ctx context.Context, hash common.Hash) (eth.L2BlockRef, error) {
if ref, ok := s.l2BlockRefsCache.Get(hash); ok {
return ref.(eth.L2BlockRef), nil
return ref, nil
}
payload, err := s.PayloadByHash(ctx, hash)
......@@ -151,7 +151,7 @@ func (s *L2Client) L2BlockRefByHash(ctx context.Context, hash common.Hash) (eth.
// The returned [eth.SystemConfig] may not be in the canonical chain when the hash is not canonical.
func (s *L2Client) SystemConfigByL2Hash(ctx context.Context, hash common.Hash) (eth.SystemConfig, error) {
if ref, ok := s.systemConfigsCache.Get(hash); ok {
return ref.(eth.SystemConfig), nil
return ref, nil
}
payload, err := s.PayloadByHash(ctx, hash)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment