l1_client.go 4.41 KB
Newer Older
1 2 3 4 5
package sources

import (
	"context"
	"fmt"
protolambda's avatar
protolambda committed
6
	"strings"
7
	"time"
8

9 10 11 12
	"github.com/ethereum/go-ethereum"
	"github.com/ethereum/go-ethereum/common"
	"github.com/ethereum/go-ethereum/log"

13
	"github.com/ethereum-optimism/optimism/op-node/rollup"
Sabnock01's avatar
Sabnock01 committed
14
	"github.com/ethereum-optimism/optimism/op-service/client"
15
	"github.com/ethereum-optimism/optimism/op-service/eth"
Sabnock01's avatar
Sabnock01 committed
16
	"github.com/ethereum-optimism/optimism/op-service/sources/caching"
17 18 19 20 21 22 23 24
)

type L1ClientConfig struct {
	EthClientConfig

	L1BlockRefsCacheSize int
}

25
func L1ClientDefaultConfig(config *rollup.Config, trustRPC bool, kind RPCProviderKind) *L1ClientConfig {
26 27
	// Cache 3/2 worth of sequencing window of receipts and txs
	span := int(config.SeqWindowSize) * 3 / 2
28 29 30 31 32
	return L1ClientSimpleConfig(trustRPC, kind, span)
}

func L1ClientSimpleConfig(trustRPC bool, kind RPCProviderKind, cacheSize int) *L1ClientConfig {
	span := cacheSize
33 34 35 36 37 38 39 40 41
	if span > 1000 { // sanity cap. If a large sequencing window is configured, do not make the cache too large
		span = 1000
	}
	return &L1ClientConfig{
		EthClientConfig: EthClientConfig{
			// receipts and transactions are cached per block
			ReceiptsCacheSize:     span,
			TransactionsCacheSize: span,
			HeadersCacheSize:      span,
42
			PayloadsCacheSize:     span,
43 44 45
			MaxRequestsPerBatch:   20, // TODO: tune batch param
			MaxConcurrentRequests: 10,
			TrustRPC:              trustRPC,
46
			MustBePostMerge:       false,
47
			RPCProviderKind:       kind,
48
			MethodResetDuration:   time.Minute,
49
		},
50
		// Not bounded by span, to cover find-sync-start range fully for speedy recovery after errors.
51
		L1BlockRefsCacheSize: cacheSize,
52 53 54 55 56 57 58
	}
}

// L1Client provides typed bindings to retrieve L1 data from an RPC source,
// with optimized batch requests, cached results, and flag to not trust the RPC
// (i.e. to verify all returned contents against corresponding block hashes).
type L1Client struct {
59
	*EthClient
60 61 62

	// cache L1BlockRef by hash
	// common.Hash -> eth.L1BlockRef
63
	l1BlockRefsCache *caching.LRUCache[common.Hash, eth.L1BlockRef]
64 65 66 67 68 69 70 71 72 73
}

// NewL1Client wraps a RPC with bindings to fetch L1 data, while logging errors, tracking metrics (optional), and caching.
func NewL1Client(client client.RPC, log log.Logger, metrics caching.Metrics, config *L1ClientConfig) (*L1Client, error) {
	ethClient, err := NewEthClient(client, log, metrics, &config.EthClientConfig)
	if err != nil {
		return nil, err
	}

	return &L1Client{
74 75
		EthClient:        ethClient,
		l1BlockRefsCache: caching.NewLRUCache[common.Hash, eth.L1BlockRef](metrics, "blockrefs", config.L1BlockRefsCacheSize),
76 77 78
	}, nil
}

Andreas Bigger's avatar
Andreas Bigger committed
79 80
// L1BlockRefByLabel returns the [eth.L1BlockRef] for the given block label.
// Notice, we cannot cache a block reference by label because labels are not guaranteed to be unique.
81 82 83
func (s *L1Client) L1BlockRefByLabel(ctx context.Context, label eth.BlockLabel) (eth.L1BlockRef, error) {
	info, err := s.InfoByLabel(ctx, label)
	if err != nil {
protolambda's avatar
protolambda committed
84 85 86 87 88
		// Both geth and erigon like to serve non-standard errors for the safe and finalized heads, correct that.
		// This happens when the chain just started and nothing is marked as safe/finalized yet.
		if strings.Contains(err.Error(), "block not found") || strings.Contains(err.Error(), "Unknown block") {
			err = ethereum.NotFound
		}
89 90 91 92 93 94 95
		return eth.L1BlockRef{}, fmt.Errorf("failed to fetch head header: %w", err)
	}
	ref := eth.InfoToL1BlockRef(info)
	s.l1BlockRefsCache.Add(ref.Hash, ref)
	return ref, nil
}

Andreas Bigger's avatar
Andreas Bigger committed
96 97
// L1BlockRefByNumber returns an [eth.L1BlockRef] for the given block number.
// Notice, we cannot cache a block reference by number because L1 re-orgs can invalidate the cached block reference.
98 99 100 101 102 103 104 105 106 107
func (s *L1Client) L1BlockRefByNumber(ctx context.Context, num uint64) (eth.L1BlockRef, error) {
	info, err := s.InfoByNumber(ctx, num)
	if err != nil {
		return eth.L1BlockRef{}, fmt.Errorf("failed to fetch header by num %d: %w", num, err)
	}
	ref := eth.InfoToL1BlockRef(info)
	s.l1BlockRefsCache.Add(ref.Hash, ref)
	return ref, nil
}

Andreas Bigger's avatar
Andreas Bigger committed
108 109
// L1BlockRefByHash returns the [eth.L1BlockRef] for the given block hash.
// We cache the block reference by hash as it is safe to assume collision will not occur.
110 111
func (s *L1Client) L1BlockRefByHash(ctx context.Context, hash common.Hash) (eth.L1BlockRef, error) {
	if v, ok := s.l1BlockRefsCache.Get(hash); ok {
112
		return v, nil
113 114 115 116 117 118 119 120 121
	}
	info, err := s.InfoByHash(ctx, hash)
	if err != nil {
		return eth.L1BlockRef{}, fmt.Errorf("failed to fetch header by hash %v: %w", hash, err)
	}
	ref := eth.InfoToL1BlockRef(info)
	s.l1BlockRefsCache.Add(ref.Hash, ref)
	return ref, nil
}