l1_client.go 3.11 KB
Newer Older
1 2 3 4
package sources

import (
	"context"
5
	"time"
6

7 8 9
	"github.com/ethereum/go-ethereum/common"
	"github.com/ethereum/go-ethereum/log"

10
	"github.com/ethereum-optimism/optimism/op-node/rollup"
Sabnock01's avatar
Sabnock01 committed
11
	"github.com/ethereum-optimism/optimism/op-service/client"
12
	"github.com/ethereum-optimism/optimism/op-service/eth"
Sabnock01's avatar
Sabnock01 committed
13
	"github.com/ethereum-optimism/optimism/op-service/sources/caching"
14 15 16 17 18 19
)

type L1ClientConfig struct {
	EthClientConfig
}

20
func L1ClientDefaultConfig(config *rollup.Config, trustRPC bool, kind RPCProviderKind) *L1ClientConfig {
21 22
	// Cache 3/2 worth of sequencing window of receipts and txs
	span := int(config.SeqWindowSize) * 3 / 2
23 24 25 26 27
	return L1ClientSimpleConfig(trustRPC, kind, span)
}

func L1ClientSimpleConfig(trustRPC bool, kind RPCProviderKind, cacheSize int) *L1ClientConfig {
	span := cacheSize
28 29 30 31 32 33 34 35 36
	if span > 1000 { // sanity cap. If a large sequencing window is configured, do not make the cache too large
		span = 1000
	}
	return &L1ClientConfig{
		EthClientConfig: EthClientConfig{
			// receipts and transactions are cached per block
			ReceiptsCacheSize:     span,
			TransactionsCacheSize: span,
			HeadersCacheSize:      span,
37
			PayloadsCacheSize:     span,
38 39 40
			MaxRequestsPerBatch:   20, // TODO: tune batch param
			MaxConcurrentRequests: 10,
			TrustRPC:              trustRPC,
41
			MustBePostMerge:       false,
42
			RPCProviderKind:       kind,
43
			MethodResetDuration:   time.Minute,
44 45
			// Not bounded by span, to cover find-sync-start range fully for speedy recovery after errors.
			BlockRefsCacheSize: cacheSize,
46 47 48 49 50 51 52 53
		},
	}
}

// L1Client provides typed bindings to retrieve L1 data from an RPC source,
// with optimized batch requests, cached results, and flag to not trust the RPC
// (i.e. to verify all returned contents against corresponding block hashes).
type L1Client struct {
54
	*EthClient
55 56 57 58 59 60 61 62 63 64
}

// NewL1Client wraps a RPC with bindings to fetch L1 data, while logging errors, tracking metrics (optional), and caching.
func NewL1Client(client client.RPC, log log.Logger, metrics caching.Metrics, config *L1ClientConfig) (*L1Client, error) {
	ethClient, err := NewEthClient(client, log, metrics, &config.EthClientConfig)
	if err != nil {
		return nil, err
	}

	return &L1Client{
65
		EthClient: ethClient,
66 67 68
	}, nil
}

Andreas Bigger's avatar
Andreas Bigger committed
69 70
// L1BlockRefByLabel returns the [eth.L1BlockRef] for the given block label.
// Notice, we cannot cache a block reference by label because labels are not guaranteed to be unique.
71
func (s *L1Client) L1BlockRefByLabel(ctx context.Context, label eth.BlockLabel) (eth.L1BlockRef, error) {
72
	return s.BlockRefByLabel(ctx, label)
73 74
}

Andreas Bigger's avatar
Andreas Bigger committed
75 76
// L1BlockRefByNumber returns an [eth.L1BlockRef] for the given block number.
// Notice, we cannot cache a block reference by number because L1 re-orgs can invalidate the cached block reference.
77
func (s *L1Client) L1BlockRefByNumber(ctx context.Context, num uint64) (eth.L1BlockRef, error) {
78
	return s.BlockRefByNumber(ctx, num)
79 80
}

Andreas Bigger's avatar
Andreas Bigger committed
81 82
// L1BlockRefByHash returns the [eth.L1BlockRef] for the given block hash.
// We cache the block reference by hash as it is safe to assume collision will not occur.
83
func (s *L1Client) L1BlockRefByHash(ctx context.Context, hash common.Hash) (eth.L1BlockRef, error) {
84
	return s.BlockRefByHash(ctx, hash)
85
}