l1_client.go 4.79 KB
Newer Older
1 2 3 4 5
package sources

import (
	"context"
	"fmt"
protolambda's avatar
protolambda committed
6
	"strings"
7
	"time"
8

9 10 11 12
	"github.com/ethereum/go-ethereum"
	"github.com/ethereum/go-ethereum/common"
	"github.com/ethereum/go-ethereum/log"

13
	"github.com/ethereum-optimism/optimism/op-node/rollup"
Sabnock01's avatar
Sabnock01 committed
14
	"github.com/ethereum-optimism/optimism/op-service/client"
15
	"github.com/ethereum-optimism/optimism/op-service/eth"
Sabnock01's avatar
Sabnock01 committed
16
	"github.com/ethereum-optimism/optimism/op-service/sources/caching"
17 18 19 20 21 22
)

type L1ClientConfig struct {
	EthClientConfig

	L1BlockRefsCacheSize int
23 24
	PrefetchingWindow    uint64
	PrefetchingTimeout   time.Duration
25 26
}

27
func L1ClientDefaultConfig(config *rollup.Config, trustRPC bool, kind RPCProviderKind) *L1ClientConfig {
28 29
	// Cache 3/2 worth of sequencing window of receipts and txs
	span := int(config.SeqWindowSize) * 3 / 2
30
	fullSpan := span
31 32 33 34 35 36 37 38 39
	if span > 1000 { // sanity cap. If a large sequencing window is configured, do not make the cache too large
		span = 1000
	}
	return &L1ClientConfig{
		EthClientConfig: EthClientConfig{
			// receipts and transactions are cached per block
			ReceiptsCacheSize:     span,
			TransactionsCacheSize: span,
			HeadersCacheSize:      span,
40
			PayloadsCacheSize:     span,
41 42 43
			MaxRequestsPerBatch:   20, // TODO: tune batch param
			MaxConcurrentRequests: 10,
			TrustRPC:              trustRPC,
44
			MustBePostMerge:       false,
45
			RPCProviderKind:       kind,
46
			MethodResetDuration:   time.Minute,
47
		},
48 49
		// Not bounded by span, to cover find-sync-start range fully for speedy recovery after errors.
		L1BlockRefsCacheSize: fullSpan,
50 51
		PrefetchingWindow:    0, // no prefetching by default
		PrefetchingTimeout:   0, // no prefetching by default
52 53 54 55 56 57 58
	}
}

// L1Client provides typed bindings to retrieve L1 data from an RPC source,
// with optimized batch requests, cached results, and flag to not trust the RPC
// (i.e. to verify all returned contents against corresponding block hashes).
type L1Client struct {
59
	EthClientInterface
60 61 62

	// cache L1BlockRef by hash
	// common.Hash -> eth.L1BlockRef
63
	l1BlockRefsCache *caching.LRUCache[common.Hash, eth.L1BlockRef]
64 65 66 67 68 69 70 71 72
}

// NewL1Client wraps a RPC with bindings to fetch L1 data, while logging errors, tracking metrics (optional), and caching.
func NewL1Client(client client.RPC, log log.Logger, metrics caching.Metrics, config *L1ClientConfig) (*L1Client, error) {
	ethClient, err := NewEthClient(client, log, metrics, &config.EthClientConfig)
	if err != nil {
		return nil, err
	}

73 74 75 76 77 78 79 80 81 82 83 84
	var clientToUse EthClientInterface

	if config.PrefetchingTimeout > 0 && config.PrefetchingWindow > 0 {
		prefetchingEthClient, err := NewPrefetchingEthClient(ethClient, config.PrefetchingWindow, config.PrefetchingTimeout)
		if err != nil {
			return nil, err
		}
		clientToUse = prefetchingEthClient
	} else {
		clientToUse = ethClient
	}

85
	return &L1Client{
86 87
		EthClientInterface: clientToUse,
		l1BlockRefsCache:   caching.NewLRUCache[common.Hash, eth.L1BlockRef](metrics, "blockrefs", config.L1BlockRefsCacheSize),
88
	}, nil
89

90 91
}

Andreas Bigger's avatar
Andreas Bigger committed
92 93
// L1BlockRefByLabel returns the [eth.L1BlockRef] for the given block label.
// Notice, we cannot cache a block reference by label because labels are not guaranteed to be unique.
94 95 96
func (s *L1Client) L1BlockRefByLabel(ctx context.Context, label eth.BlockLabel) (eth.L1BlockRef, error) {
	info, err := s.InfoByLabel(ctx, label)
	if err != nil {
protolambda's avatar
protolambda committed
97 98 99 100 101
		// Both geth and erigon like to serve non-standard errors for the safe and finalized heads, correct that.
		// This happens when the chain just started and nothing is marked as safe/finalized yet.
		if strings.Contains(err.Error(), "block not found") || strings.Contains(err.Error(), "Unknown block") {
			err = ethereum.NotFound
		}
102 103 104 105 106 107 108
		return eth.L1BlockRef{}, fmt.Errorf("failed to fetch head header: %w", err)
	}
	ref := eth.InfoToL1BlockRef(info)
	s.l1BlockRefsCache.Add(ref.Hash, ref)
	return ref, nil
}

Andreas Bigger's avatar
Andreas Bigger committed
109 110
// L1BlockRefByNumber returns an [eth.L1BlockRef] for the given block number.
// Notice, we cannot cache a block reference by number because L1 re-orgs can invalidate the cached block reference.
111 112 113 114 115 116 117 118 119 120
func (s *L1Client) L1BlockRefByNumber(ctx context.Context, num uint64) (eth.L1BlockRef, error) {
	info, err := s.InfoByNumber(ctx, num)
	if err != nil {
		return eth.L1BlockRef{}, fmt.Errorf("failed to fetch header by num %d: %w", num, err)
	}
	ref := eth.InfoToL1BlockRef(info)
	s.l1BlockRefsCache.Add(ref.Hash, ref)
	return ref, nil
}

Andreas Bigger's avatar
Andreas Bigger committed
121 122
// L1BlockRefByHash returns the [eth.L1BlockRef] for the given block hash.
// We cache the block reference by hash as it is safe to assume collision will not occur.
123 124
func (s *L1Client) L1BlockRefByHash(ctx context.Context, hash common.Hash) (eth.L1BlockRef, error) {
	if v, ok := s.l1BlockRefsCache.Get(hash); ok {
125
		return v, nil
126 127 128 129 130 131 132 133 134
	}
	info, err := s.InfoByHash(ctx, hash)
	if err != nil {
		return eth.L1BlockRef{}, fmt.Errorf("failed to fetch header by hash %v: %w", hash, err)
	}
	ref := eth.InfoToL1BlockRef(info)
	s.l1BlockRefsCache.Add(ref.Hash, ref)
	return ref, nil
}