Commit 1c013805 authored by Joshua Gutow's avatar Joshua Gutow Committed by GitHub

Merge pull request #8130 from ethereum-optimism/seb/receipts-provider

op-service: Refactor `EthClient` with `ReceiptsProvider` abstraction
parents 658d2a68 0745c50e
......@@ -63,7 +63,9 @@ type EthClientConfig struct {
// If this is 0 then the client does not fall back to less optimal but available methods.
MethodResetDuration time.Duration
// [OPTIONAL] The reth DB path to fetch receipts from
// [OPTIONAL] The reth DB path to fetch receipts from.
// If it is specified, the rethdb receipts fetcher will be used
// and the RPC configuration parameters don't need to be set.
RethDBPath string
}
......@@ -80,6 +82,15 @@ func (c *EthClientConfig) Check() error {
if c.PayloadsCacheSize < 0 {
return fmt.Errorf("invalid payloads cache size: %d", c.PayloadsCacheSize)
}
if c.RethDBPath != "" {
if buildRethdb {
// If the rethdb path is set, we use the rethdb receipts fetcher and skip creating
// an RCP receipts fetcher, so below rpc config parameters don't need to be checked.
return nil
} else {
return fmt.Errorf("rethdb path specified, but built without rethdb support")
}
}
if c.MaxConcurrentRequests < 1 {
return fmt.Errorf("expected at least 1 concurrent request, but max is %d", c.MaxConcurrentRequests)
}
......@@ -96,21 +107,14 @@ func (c *EthClientConfig) Check() error {
type EthClient struct {
client client.RPC
maxBatchSize int
recProvider ReceiptsProvider
trustRPC bool
mustBePostMerge bool
provKind RPCProviderKind
log log.Logger
// cache receipts in bundles per block hash
// We cache the receipts fetching job to not lose progress when we have to retry the `Fetch` call
// common.Hash -> *receiptsFetchingJob
receiptsCache *caching.LRUCache[common.Hash, *receiptsFetchingJob]
// cache transactions in bundles per block hash
// common.Hash -> types.Transactions
transactionsCache *caching.LRUCache[common.Hash, types.Transactions]
......@@ -122,46 +126,6 @@ type EthClient struct {
// cache payloads by hash
// common.Hash -> *eth.ExecutionPayload
payloadsCache *caching.LRUCache[common.Hash, *eth.ExecutionPayload]
// availableReceiptMethods tracks which receipt methods can be used for fetching receipts
// This may be modified concurrently, but we don't lock since it's a single
// uint64 that's not critical (fine to miss or mix up a modification)
availableReceiptMethods ReceiptsFetchingMethod
// lastMethodsReset tracks when availableReceiptMethods was last reset.
// When receipt-fetching fails it falls back to available methods,
// but periodically it will try to reset to the preferred optimal methods.
lastMethodsReset time.Time
// methodResetDuration defines how long we take till we reset lastMethodsReset
methodResetDuration time.Duration
// [OPTIONAL] The reth DB path to fetch receipts from
rethDbPath string
}
func (s *EthClient) PickReceiptsMethod(txCount uint64) ReceiptsFetchingMethod {
if now := time.Now(); now.Sub(s.lastMethodsReset) > s.methodResetDuration {
m := AvailableReceiptsFetchingMethods(s.provKind)
if s.availableReceiptMethods != m {
s.log.Warn("resetting back RPC preferences, please review RPC provider kind setting", "kind", s.provKind.String())
}
s.availableReceiptMethods = m
s.lastMethodsReset = now
}
return PickBestReceiptsFetchingMethod(s.provKind, s.availableReceiptMethods, txCount)
}
func (s *EthClient) OnReceiptsMethodErr(m ReceiptsFetchingMethod, err error) {
if unusableMethod(err) {
// clear the bit of the method that errored
s.availableReceiptMethods &^= m
s.log.Warn("failed to use selected RPC method for receipt fetching, temporarily falling back to alternatives",
"provider_kind", s.provKind, "failed_method", m, "fallback", s.availableReceiptMethods, "err", err)
} else {
s.log.Debug("failed to use selected RPC method for receipt fetching, but method does appear to be available, so we continue to use it",
"provider_kind", s.provKind, "failed_method", m, "fallback", s.availableReceiptMethods&^m, "err", err)
}
}
// NewEthClient returns an [EthClient], wrapping an RPC with bindings to fetch ethereum data with added error logging,
......@@ -170,22 +134,18 @@ func NewEthClient(client client.RPC, log log.Logger, metrics caching.Metrics, co
if err := config.Check(); err != nil {
return nil, fmt.Errorf("bad config, cannot create L1 source: %w", err)
}
client = LimitRPC(client, config.MaxConcurrentRequests)
recProvider := newRecProviderFromConfig(client, log, metrics, config)
return &EthClient{
client: client,
maxBatchSize: config.MaxRequestsPerBatch,
trustRPC: config.TrustRPC,
mustBePostMerge: config.MustBePostMerge,
provKind: config.RPCProviderKind,
log: log,
receiptsCache: caching.NewLRUCache[common.Hash, *receiptsFetchingJob](metrics, "receipts", config.ReceiptsCacheSize),
transactionsCache: caching.NewLRUCache[common.Hash, types.Transactions](metrics, "txs", config.TransactionsCacheSize),
headersCache: caching.NewLRUCache[common.Hash, eth.BlockInfo](metrics, "headers", config.HeadersCacheSize),
payloadsCache: caching.NewLRUCache[common.Hash, *eth.ExecutionPayload](metrics, "payloads", config.PayloadsCacheSize),
availableReceiptMethods: AvailableReceiptsFetchingMethods(config.RPCProviderKind),
lastMethodsReset: time.Now(),
methodResetDuration: config.MethodResetDuration,
rethDbPath: config.RethDBPath,
client: client,
recProvider: recProvider,
trustRPC: config.TrustRPC,
mustBePostMerge: config.MustBePostMerge,
log: log,
transactionsCache: caching.NewLRUCache[common.Hash, types.Transactions](metrics, "txs", config.TransactionsCacheSize),
headersCache: caching.NewLRUCache[common.Hash, eth.BlockInfo](metrics, "headers", config.HeadersCacheSize),
payloadsCache: caching.NewLRUCache[common.Hash, *eth.ExecutionPayload](metrics, "payloads", config.PayloadsCacheSize),
}, nil
}
......@@ -354,24 +314,21 @@ func (s *EthClient) PayloadByLabel(ctx context.Context, label eth.BlockLabel) (*
func (s *EthClient) FetchReceipts(ctx context.Context, blockHash common.Hash) (eth.BlockInfo, types.Receipts, error) {
info, txs, err := s.InfoAndTxsByHash(ctx, blockHash)
if err != nil {
return nil, nil, err
return nil, nil, fmt.Errorf("querying block: %w", err)
}
// Try to reuse the receipts fetcher because is caches the results of intermediate calls. This means
// that if just one of many calls fail, we only retry the failed call rather than all of the calls.
// The underlying fetcher uses the receipts hash to verify receipt integrity.
var job *receiptsFetchingJob
if v, ok := s.receiptsCache.Get(blockHash); ok {
job = v
} else {
txHashes := eth.TransactionsToHashes(txs)
job = NewReceiptsFetchingJob(s, s.client, s.maxBatchSize, eth.ToBlockID(info), info.ReceiptHash(), txHashes, s.rethDbPath)
s.receiptsCache.Add(blockHash, job)
}
receipts, err := job.Fetch(ctx)
txHashes, block := eth.TransactionsToHashes(txs), eth.ToBlockID(info)
receipts, err := s.recProvider.FetchReceipts(ctx, block, txHashes)
if err != nil {
return nil, nil, err
}
if !s.trustRPC {
if err := validateReceipts(block, info.ReceiptHash(), txHashes, receipts); err != nil {
return info, nil, fmt.Errorf("invalid receipts: %w", err)
}
}
return info, receipts, nil
}
......
......@@ -4,6 +4,7 @@ import (
"context"
crand "crypto/rand"
"math/big"
"math/rand"
"testing"
"github.com/stretchr/testify/mock"
......@@ -18,6 +19,7 @@ import (
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-service/client"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/sources/caching"
)
type mockRPC struct {
......@@ -177,3 +179,45 @@ func TestEthClient_WrongInfoByHash(t *testing.T) {
require.Error(t, err, "cannot accept the wrong block")
m.Mock.AssertExpectations(t)
}
func TestEthClient_validateReceipts(t *testing.T) {
require := require.New(t)
mrpc := new(mockRPC)
mrp := new(mockReceiptsProvider)
const numTxs = 4
block, receipts := randomRpcBlockAndReceipts(rand.New(rand.NewSource(420)), numTxs)
txHashes := receiptTxHashes(receipts)
ctx := context.Background()
// mutate a field to make validation fail.
receipts[2].Bloom[0] = 1
mrpc.On("CallContext", ctx, mock.AnythingOfType("**sources.rpcBlock"),
"eth_getBlockByHash", []any{block.Hash, true}).
Run(func(args mock.Arguments) {
*(args[1].(**rpcBlock)) = block
}).
Return([]error{nil}).Once()
mrp.On("FetchReceipts", ctx, block.BlockID(), txHashes).
Return(types.Receipts(receipts), error(nil)).Once()
ethcl := newEthClientWithCaches(nil, numTxs)
ethcl.client = mrpc
ethcl.recProvider = mrp
ethcl.trustRPC = false
_, _, err := ethcl.FetchReceipts(ctx, block.Hash)
require.ErrorContains(err, "invalid receipts")
mrpc.AssertExpectations(t)
mrp.AssertExpectations(t)
}
func newEthClientWithCaches(metrics caching.Metrics, cacheSize int) *EthClient {
return &EthClient{
transactionsCache: caching.NewLRUCache[common.Hash, types.Transactions](metrics, "txs", cacheSize),
headersCache: caching.NewLRUCache[common.Hash, eth.BlockInfo](metrics, "headers", cacheSize),
payloadsCache: caching.NewLRUCache[common.Hash, *eth.ExecutionPayload](metrics, "payloads", cacheSize),
}
}
This diff is collapsed.
package sources
import (
"context"
"io"
"sync"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/sources/batching"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rpc"
)
type receiptsBatchCall = batching.IterativeBatchCall[common.Hash, *types.Receipt]
type BasicRPCReceiptsFetcher struct {
client rpcClient
maxBatchSize int
// calls caches uncompleted batch calls
calls map[common.Hash]*receiptsBatchCall
callsMu sync.Mutex
}
func NewBasicRPCReceiptsFetcher(client rpcClient, maxBatchSize int) *BasicRPCReceiptsFetcher {
return &BasicRPCReceiptsFetcher{
client: client,
maxBatchSize: maxBatchSize,
calls: make(map[common.Hash]*receiptsBatchCall),
}
}
func (f *BasicRPCReceiptsFetcher) FetchReceipts(ctx context.Context, block eth.BlockID, txHashes []common.Hash) (types.Receipts, error) {
call := f.getOrCreateBatchCall(block.Hash, txHashes)
// Fetch all receipts
for {
if err := call.Fetch(ctx); err == io.EOF {
break
} else if err != nil {
return nil, err
}
}
res, err := call.Result()
if err != nil {
return nil, err
}
// call successful, remove from cache
f.deleteBatchCall(block.Hash)
return res, nil
}
func (f *BasicRPCReceiptsFetcher) getOrCreateBatchCall(blockHash common.Hash, txHashes []common.Hash) *receiptsBatchCall {
f.callsMu.Lock()
defer f.callsMu.Unlock()
if call, ok := f.calls[blockHash]; ok {
return call
}
call := batching.NewIterativeBatchCall[common.Hash, *types.Receipt](
txHashes,
makeReceiptRequest,
f.client.BatchCallContext,
f.client.CallContext,
f.maxBatchSize,
)
f.calls[blockHash] = call
return call
}
func (f *BasicRPCReceiptsFetcher) deleteBatchCall(blockHash common.Hash) {
f.callsMu.Lock()
defer f.callsMu.Unlock()
delete(f.calls, blockHash)
}
func makeReceiptRequest(txHash common.Hash) (*types.Receipt, rpc.BatchElem) {
out := new(types.Receipt)
return out, rpc.BatchElem{
Method: "eth_getTransactionReceipt",
Args: []any{txHash},
Result: &out, // receipt may become nil, double pointer is intentional
}
}
package sources
import (
"context"
"errors"
"fmt"
"math/rand"
"testing"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rpc"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
// simpleMockRPC is needed for some tests where the return value dynamically
// depends on the input, so that the test can set the function.
type simpleMockRPC struct {
callFn func(ctx context.Context, result any, method string, args ...any) error
batchCallFn func(ctx context.Context, b []rpc.BatchElem) error
}
func (m *simpleMockRPC) CallContext(ctx context.Context, result any, method string, args ...any) error {
return m.callFn(ctx, result, method, args...)
}
func (m *simpleMockRPC) BatchCallContext(ctx context.Context, b []rpc.BatchElem) error {
return m.batchCallFn(ctx, b)
}
func TestBasicRPCReceiptsFetcher_Reuse(t *testing.T) {
require := require.New(t)
batchSize, txCount := 2, uint64(4)
block, receipts := randomRpcBlockAndReceipts(rand.New(rand.NewSource(123)), txCount)
blockid := block.BlockID()
txHashes := make([]common.Hash, 0, len(receipts))
recMap := make(map[common.Hash]*types.Receipt, len(receipts))
for _, rec := range receipts {
txHashes = append(txHashes, rec.TxHash)
recMap[rec.TxHash] = rec
}
mrpc := new(simpleMockRPC)
rp := NewBasicRPCReceiptsFetcher(mrpc, batchSize)
// prepare mock
ctx, done := context.WithTimeout(context.Background(), 10*time.Second)
defer done()
// 1st fetching
response := map[common.Hash]bool{
txHashes[0]: true,
txHashes[1]: true,
txHashes[2]: false,
txHashes[3]: false,
}
var numCalls int
mrpc.batchCallFn = func(_ context.Context, b []rpc.BatchElem) (err error) {
numCalls++
for i, el := range b {
if el.Method == "eth_getTransactionReceipt" {
txHash := el.Args[0].(common.Hash)
if response[txHash] {
// The IterativeBatchCall expects that the values are written
// to the fields of the allocated *types.Receipt.
**(el.Result.(**types.Receipt)) = *recMap[txHash]
} else {
err = errors.Join(err, fmt.Errorf("receipt[%d] error, hash %x", i, txHash))
}
} else {
err = errors.Join(err, fmt.Errorf("unknown method %s", el.Method))
}
}
return err
}
// 1st fetching should result in errors
recs, err := rp.FetchReceipts(ctx, blockid, txHashes)
require.Error(err)
require.Nil(recs)
require.Equal(2, numCalls)
// prepare 2nd fetching - all should succeed now
response[txHashes[2]] = true
response[txHashes[3]] = true
recs, err = rp.FetchReceipts(ctx, blockid, txHashes)
require.NoError(err)
require.NotNil(recs)
for i, rec := range recs {
requireEqualReceipt(t, receipts[i], rec)
}
require.Equal(3, numCalls)
}
func TestBasicRPCReceiptsFetcher_Concurrency(t *testing.T) {
require := require.New(t)
const numFetchers = 32
batchSize, txCount := 4, uint64(18) // 4.5 * 4
block, receipts := randomRpcBlockAndReceipts(rand.New(rand.NewSource(123)), txCount)
recMap := make(map[common.Hash]*types.Receipt, len(receipts))
for _, rec := range receipts {
recMap[rec.TxHash] = rec
}
mrpc := new(mockRPC)
rp := NewBasicRPCReceiptsFetcher(mrpc, batchSize)
// prepare mock
var numCalls int
mrpc.On("BatchCallContext", mock.Anything, mock.AnythingOfType("[]rpc.BatchElem")).
Run(func(args mock.Arguments) {
numCalls++
els := args.Get(1).([]rpc.BatchElem)
for _, el := range els {
if el.Method == "eth_getTransactionReceipt" {
txHash := el.Args[0].(common.Hash)
// The IterativeBatchCall expects that the values are written
// to the fields of the allocated *types.Receipt.
**(el.Result.(**types.Receipt)) = *recMap[txHash]
}
}
}).
Return([]error{nil})
runConcurrentFetchingTest(t, rp, numFetchers, receipts, block)
mrpc.AssertExpectations(t)
require.NotZero(numCalls, "BatchCallContext should have been called.")
require.Less(numCalls, numFetchers, "Some IterativeBatchCalls should have been shared.")
}
func runConcurrentFetchingTest(t *testing.T, rp ReceiptsProvider, numFetchers int, receipts types.Receipts, block *rpcBlock) {
require := require.New(t)
txHashes := receiptTxHashes(receipts)
// start n fetchers
type fetchResult struct {
rs types.Receipts
err error
}
fetchResults := make(chan fetchResult, numFetchers)
barrier := make(chan struct{})
ctx, done := context.WithTimeout(context.Background(), 10*time.Second)
defer done()
for i := 0; i < numFetchers; i++ {
go func() {
<-barrier
recs, err := rp.FetchReceipts(ctx, block.BlockID(), txHashes)
fetchResults <- fetchResult{rs: recs, err: err}
}()
}
close(barrier) // Go!
// assert results
for i := 0; i < numFetchers; i++ {
select {
case f := <-fetchResults:
require.NoError(f.err)
require.Len(f.rs, len(receipts))
for j, r := range receipts {
requireEqualReceipt(t, r, f.rs[j])
}
case <-ctx.Done():
t.Fatal("Test timeout")
}
}
}
func receiptTxHashes(receipts types.Receipts) []common.Hash {
txHashes := make([]common.Hash, 0, len(receipts))
for _, rec := range receipts {
txHashes = append(txHashes, rec.TxHash)
}
return txHashes
}
package sources
import (
"context"
"sync"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/sources/caching"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
)
// A CachingReceiptsProvider caches successful receipt fetches from the inner
// ReceiptsProvider. It also avoids duplicate in-flight requests per block hash.
type CachingReceiptsProvider struct {
inner ReceiptsProvider
cache *caching.LRUCache[common.Hash, types.Receipts]
// lock fetching process for each block hash to avoid duplicate requests
fetching map[common.Hash]*sync.Mutex
fetchingMu sync.Mutex // only protects map
}
func NewCachingReceiptsProvider(inner ReceiptsProvider, m caching.Metrics, cacheSize int) *CachingReceiptsProvider {
return &CachingReceiptsProvider{
inner: inner,
cache: caching.NewLRUCache[common.Hash, types.Receipts](m, "receipts", cacheSize),
fetching: make(map[common.Hash]*sync.Mutex),
}
}
func NewCachingRPCReceiptsProvider(client rpcClient, log log.Logger, config RPCReceiptsConfig, m caching.Metrics, cacheSize int) *CachingReceiptsProvider {
return NewCachingReceiptsProvider(NewRPCReceiptsFetcher(client, log, config), m, cacheSize)
}
func (p *CachingReceiptsProvider) getOrCreateFetchingLock(blockHash common.Hash) *sync.Mutex {
p.fetchingMu.Lock()
defer p.fetchingMu.Unlock()
if mu, ok := p.fetching[blockHash]; ok {
return mu
}
mu := new(sync.Mutex)
p.fetching[blockHash] = mu
return mu
}
func (p *CachingReceiptsProvider) deleteFetchingLock(blockHash common.Hash) {
p.fetchingMu.Lock()
defer p.fetchingMu.Unlock()
delete(p.fetching, blockHash)
}
func (p *CachingReceiptsProvider) FetchReceipts(ctx context.Context, block eth.BlockID, txHashes []common.Hash) (types.Receipts, error) {
if r, ok := p.cache.Get(block.Hash); ok {
return r, nil
}
mu := p.getOrCreateFetchingLock(block.Hash)
mu.Lock()
defer mu.Unlock()
// Other routine might have fetched in the meantime
if r, ok := p.cache.Get(block.Hash); ok {
// we might have created a new lock above while the old
// fetching job completed.
p.deleteFetchingLock(block.Hash)
return r, nil
}
r, err := p.inner.FetchReceipts(ctx, block, txHashes)
if err != nil {
return nil, err
}
p.cache.Add(block.Hash, r)
// result now in cache, can delete fetching lock
p.deleteFetchingLock(block.Hash)
return r, nil
}
package sources
import (
"context"
"math/rand"
"testing"
"time"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
type mockReceiptsProvider struct {
mock.Mock
}
func (m *mockReceiptsProvider) FetchReceipts(ctx context.Context, block eth.BlockID, txHashes []common.Hash) (types.Receipts, error) {
args := m.Called(ctx, block, txHashes)
return args.Get(0).(types.Receipts), args.Error(1)
}
func TestCachingReceiptsProvider_Caching(t *testing.T) {
block, receipts := randomRpcBlockAndReceipts(rand.New(rand.NewSource(69)), 4)
txHashes := receiptTxHashes(receipts)
blockid := block.BlockID()
mrp := new(mockReceiptsProvider)
rp := NewCachingReceiptsProvider(mrp, nil, 1)
ctx, done := context.WithTimeout(context.Background(), 10*time.Second)
defer done()
mrp.On("FetchReceipts", ctx, blockid, txHashes).
Return(types.Receipts(receipts), error(nil)).
Once() // receipts should be cached after first fetch
for i := 0; i < 4; i++ {
gotRecs, err := rp.FetchReceipts(ctx, blockid, txHashes)
require.NoError(t, err)
for i, gotRec := range gotRecs {
requireEqualReceipt(t, receipts[i], gotRec)
}
}
mrp.AssertExpectations(t)
}
func TestCachingReceiptsProvider_Concurrency(t *testing.T) {
block, receipts := randomRpcBlockAndReceipts(rand.New(rand.NewSource(69)), 4)
txHashes := receiptTxHashes(receipts)
blockid := block.BlockID()
mrp := new(mockReceiptsProvider)
rp := NewCachingReceiptsProvider(mrp, nil, 1)
mrp.On("FetchReceipts", mock.Anything, blockid, txHashes).
Return(types.Receipts(receipts), error(nil)).
Once() // receipts should be cached after first fetch
runConcurrentFetchingTest(t, rp, 32, receipts, block)
mrp.AssertExpectations(t)
}
This diff is collapsed.
......@@ -171,15 +171,13 @@ func (tc *ReceiptsTestCase) Run(t *testing.T) {
for i, req := range requests {
info, result, err := ethCl.FetchReceipts(context.Background(), block.Hash)
if err == nil {
require.Nil(t, req.err, "error")
require.NoError(t, req.err, "error")
require.Equal(t, block.Hash, info.Hash(), fmt.Sprintf("req %d blockhash", i))
expectedJson, err := json.MarshalIndent(req.result, "", " ")
require.NoError(t, err)
gotJson, err := json.MarshalIndent(result, "", " ")
require.NoError(t, err)
require.Equal(t, string(expectedJson), string(gotJson), fmt.Sprintf("req %d result", i))
for j, rec := range req.result {
requireEqualReceipt(t, rec, result[j], "req %d result %d", i, j)
}
} else {
require.NotNil(t, req.err, "error")
require.Error(t, req.err, "error")
require.Equal(t, req.err.Error(), err.Error(), fmt.Sprintf("req %d err", i))
}
}
......@@ -570,3 +568,12 @@ func TestVerifyReceipts(t *testing.T) {
require.ErrorContains(t, err, "must never be removed due to reorg")
})
}
func requireEqualReceipt(t *testing.T, exp, act *types.Receipt, msgAndArgs ...any) {
t.Helper()
expJson, err := json.MarshalIndent(exp, "", " ")
require.NoError(t, err, msgAndArgs...)
actJson, err := json.MarshalIndent(act, "", " ")
require.NoError(t, err, msgAndArgs...)
require.Equal(t, string(expJson), string(actJson), msgAndArgs...)
}
......@@ -3,13 +3,17 @@
package sources
import (
"context"
"encoding/json"
"fmt"
"unsafe"
"github.com/ethereum-optimism/optimism/op-service/client"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/sources/caching"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
)
/*
......@@ -63,3 +67,35 @@ func FetchRethReceipts(dbPath string, blockHash *common.Hash) (types.Receipts, e
return receipts, nil
}
type RethDBReceiptsFetcher struct {
dbPath string
// TODO(8225): Now that we have reading from a Reth DB encapsulated here,
// We could store a reference to the RethDB here instead of just a db path,
// which would be more optimal.
// We could move the opening of the RethDB and creation of the db reference
// into NewRethDBReceiptsFetcher.
}
func NewRethDBReceiptsFetcher(dbPath string) *RethDBReceiptsFetcher {
return &RethDBReceiptsFetcher{
dbPath: dbPath,
}
}
func (f *RethDBReceiptsFetcher) FetchReceipts(ctx context.Context, block eth.BlockID, txHashes []common.Hash) (types.Receipts, error) {
return FetchRethReceipts(f.dbPath, &block.Hash)
}
func NewCachingRethDBReceiptsFetcher(dbPath string, m caching.Metrics, cacheSize int) *CachingReceiptsProvider {
return NewCachingReceiptsProvider(NewRethDBReceiptsFetcher(dbPath), m, cacheSize)
}
const buildRethdb = true
func newRecProviderFromConfig(client client.RPC, log log.Logger, metrics caching.Metrics, config *EthClientConfig) *CachingReceiptsProvider {
if dbPath := config.RethDBPath; dbPath != "" {
return NewCachingRethDBReceiptsFetcher(dbPath, metrics, config.ReceiptsCacheSize)
}
return newRPCRecProviderFromConfig(client, log, metrics, config)
}
......@@ -3,11 +3,13 @@
package sources
import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum-optimism/optimism/op-service/client"
"github.com/ethereum-optimism/optimism/op-service/sources/caching"
"github.com/ethereum/go-ethereum/log"
)
// FetchRethReceipts stub; Not available without `rethdb` build tag.
func FetchRethReceipts(dbPath string, blockHash *common.Hash) (types.Receipts, error) {
panic("unimplemented! Did you forget to enable the `rethdb` build tag?")
const buildRethdb = false
func newRecProviderFromConfig(client client.RPC, log log.Logger, metrics caching.Metrics, config *EthClientConfig) *CachingReceiptsProvider {
return newRPCRecProviderFromConfig(client, log, metrics, config)
}
......@@ -190,6 +190,13 @@ func (hdr *rpcHeader) Info(trustCache bool, mustBePostMerge bool) (eth.BlockInfo
return &headerInfo{hdr.Hash, hdr.createGethHeader()}, nil
}
func (hdr *rpcHeader) BlockID() eth.BlockID {
return eth.BlockID{
Hash: hdr.Hash,
Number: uint64(hdr.Number),
}
}
type rpcBlock struct {
rpcHeader
Transactions []*types.Transaction `json:"transactions"`
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment