Commit 93b66b10 authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge branch 'develop' into aj/fpp-caching

parents 319421c0 fdcb651b
......@@ -61,7 +61,7 @@ jobs:
yarn-monorepo:
docker:
- image: ethereumoptimism/ci-builder:latest
resource_class: xlarge
resource_class: large
steps:
- checkout
- check-changed:
......@@ -142,7 +142,7 @@ jobs:
default: "oplabs-tools-artifacts/images"
machine:
image: ubuntu-2204:2022.07.1
resource_class: xlarge
resource_class: medium
steps:
- checkout
- run:
......@@ -207,7 +207,7 @@ jobs:
default: "linux/amd64"
machine:
image: ubuntu-2204:2022.07.1
resource_class: xlarge
resource_class: medium
steps:
- gcp-oidc-authenticate
# Below is CircleCI recommended way of specifying nameservers on an Ubuntu box:
......@@ -261,7 +261,7 @@ jobs:
default: "linux/amd64"
machine:
image: ubuntu-2204:2022.07.1
resource_class: xlarge
resource_class: medium
steps:
- gcp-cli/install
- gcp-oidc-authenticate
......@@ -379,7 +379,7 @@ jobs:
contracts-bedrock-slither:
docker:
- image: ethereumoptimism/ci-builder:latest
resource_class: xlarge
resource_class: large
steps:
- checkout
- attach_workspace: { at: "." }
......@@ -900,7 +900,7 @@ jobs:
docker:
- image: returntocorp/semgrep
resource_class: xlarge
resource_class: medium
steps:
- checkout
- unless:
......@@ -942,7 +942,7 @@ jobs:
machine:
image: ubuntu-2204:2022.07.1
docker_layer_caching: true
resource_class: xlarge
resource_class: large
steps:
- attach_workspace:
at: /tmp/docker_images
......
......@@ -28,12 +28,11 @@ func TestBatchInLastPossibleBlocks(gt *testing.T) {
signer := types.LatestSigner(sd.L2Cfg.Config)
cl := sequencerEngine.EthClient()
aliceNonce := uint64(0) // manual nonce management to avoid geth pending-tx nonce non-determinism flakiness
aliceTx := func() {
n, err := cl.PendingNonceAt(t.Ctx(), dp.Addresses.Alice)
require.NoError(t, err)
tx := types.MustSignNewTx(dp.Secrets.Alice, signer, &types.DynamicFeeTx{
ChainID: sd.L2Cfg.Config.ChainID,
Nonce: n,
Nonce: aliceNonce,
GasTipCap: big.NewInt(2 * params.GWei),
GasFeeCap: new(big.Int).Add(miner.l1Chain.CurrentBlock().BaseFee, big.NewInt(2*params.GWei)),
Gas: params.TxGas,
......@@ -41,6 +40,7 @@ func TestBatchInLastPossibleBlocks(gt *testing.T) {
Value: e2eutils.Ether(2),
})
require.NoError(gt, cl.SendTransaction(t.Ctx(), tx))
aliceNonce += 1
}
makeL2BlockWithAliceTx := func() {
aliceTx()
......
......@@ -49,3 +49,46 @@ func TestDerivationWithFlakyL1RPC(gt *testing.T) {
// Verifier should be synced, even though it hit lots of temporary L1 RPC errors
require.Equal(t, sequencer.L2Unsafe(), verifier.L2Safe(), "verifier is synced")
}
func TestFinalizeWhileSyncing(gt *testing.T) {
t := NewDefaultTesting(gt)
dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams)
sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlError) // mute all the temporary derivation errors that we forcefully create
_, _, miner, sequencer, _, verifier, _, batcher := setupReorgTestActors(t, dp, sd, log)
sequencer.ActL2PipelineFull(t)
verifier.ActL2PipelineFull(t)
verifierStartStatus := verifier.SyncStatus()
// Build an L1 chain with 64 + 1 blocks, containing batches of L2 chain.
// Enough to go past the finalityDelay of the engine queue,
// to make the verifier finalize while it syncs.
miner.ActEmptyBlock(t)
for i := 0; i < 64+1; i++ {
sequencer.ActL1HeadSignal(t)
sequencer.ActL2PipelineFull(t)
sequencer.ActBuildToL1Head(t)
batcher.ActSubmitAll(t)
miner.ActL1StartBlock(12)(t)
miner.ActL1IncludeTx(batcher.batcherAddr)(t)
miner.ActL1EndBlock(t)
}
l1Head := miner.l1Chain.CurrentHeader()
// finalize all of L1
miner.ActL1Safe(t, l1Head.Number.Uint64())
miner.ActL1Finalize(t, l1Head.Number.Uint64())
// Now signal L1 finality to the verifier, while the verifier is not synced.
verifier.ActL1HeadSignal(t)
verifier.ActL1SafeSignal(t)
verifier.ActL1FinalizedSignal(t)
// Now sync the verifier, without repeating the signal.
// While it's syncing, it should finalize on interval now, based on the future L1 finalized block it remembered.
verifier.ActL2PipelineFull(t)
// Verify the verifier finalized something new
require.Less(t, verifierStartStatus.FinalizedL2.Number, verifier.SyncStatus().FinalizedL2.Number, "verifier finalized L2 blocks during sync")
}
......@@ -76,6 +76,10 @@ const maxUnsafePayloadsMemory = 500 * 1024 * 1024
// And then we add 1 to make pruning easier by leaving room for a new item without pruning the 32*4.
const finalityLookback = 4*32 + 1
// finalityDelay is the number of L1 blocks to traverse before trying to finalize L2 blocks again.
// We do not want to do this too often, since it requires fetching a L1 block by number, so no cache data.
const finalityDelay = 64
type FinalityData struct {
// The last L2 block that was fully derived and inserted into the L2 engine while processing this L1 block.
L2Block eth.L2BlockRef
......@@ -102,8 +106,13 @@ type EngineQueue struct {
// This update may repeat if the engine returns a temporary error.
needForkchoiceUpdate bool
// finalizedL1 is the currently perceived finalized L1 block.
// This may be ahead of the current traversed origin when syncing.
finalizedL1 eth.L1BlockRef
// triedFinalizeAt tracks at which origin we last tried to finalize during sync.
triedFinalizeAt eth.L1BlockRef
// The queued-up attributes
safeAttributesParent eth.L2BlockRef
safeAttributes *eth.PayloadAttributes
......@@ -171,17 +180,23 @@ func (eq *EngineQueue) Finalize(l1Origin eth.L1BlockRef) {
eq.log.Error("ignoring old L1 finalized block signal! Is the L1 provider corrupted?", "prev_finalized_l1", eq.finalizedL1, "signaled_finalized_l1", l1Origin)
return
}
// Perform a safety check: the L1 finalization signal is only accepted if we previously processed the L1 block.
// This prevents a corrupt L1 provider from tricking us in recognizing a L1 block inconsistent with the L1 chain we are on.
// Missing a finality signal due to empty buffer is fine, it will finalize when the buffer is filled again.
// remember the L1 finalization signal
eq.finalizedL1 = l1Origin
// Sanity check: we only try to finalize L2 immediately, without fetching additional data,
// if we are on the same chain as the signal.
// If we are on a different chain, the signal will be ignored,
// and tryFinalizeL1Origin() will eventually detect that we are on the wrong chain,
// if not resetting due to reorg elsewhere already.
for _, fd := range eq.finalityData {
if fd.L1Block == l1Origin.ID() {
eq.finalizedL1 = l1Origin
eq.tryFinalizeL2()
return
}
}
eq.log.Warn("ignoring finalization signal for unknown L1 block, waiting for new L1 blocks in buffer", "prev_finalized_l1", eq.finalizedL1, "signaled_finalized_l1", l1Origin)
eq.log.Info("received L1 finality signal, but missing data for immediate L2 finalization", "prev_finalized_l1", eq.finalizedL1, "signaled_finalized_l1", l1Origin)
}
// FinalizedL1 identifies the L1 chain (incl.) that included and/or produced all the finalized L2 blocks.
......@@ -217,6 +232,10 @@ func (eq *EngineQueue) Step(ctx context.Context) error {
}
eq.origin = newOrigin
eq.postProcessSafeL2() // make sure we track the last L2 safe head for every new L1 block
// try to finalize the L2 blocks we have synced so far (no-op if L1 finality is behind)
if err := eq.tryFinalizePastL2Blocks(ctx); err != nil {
return err
}
if next, err := eq.prev.NextAttributes(ctx, eq.safeHead); err == io.EOF {
outOfData = true
} else if err != nil {
......@@ -271,6 +290,38 @@ func (eq *EngineQueue) verifyNewL1Origin(ctx context.Context, newOrigin eth.L1Bl
return nil
}
func (eq *EngineQueue) tryFinalizePastL2Blocks(ctx context.Context) error {
if eq.finalizedL1 == (eth.L1BlockRef{}) {
return nil
}
// If the L1 is finalized beyond the point we are traversing (e.g. during sync),
// then we should check if we can finalize this L1 block we are traversing.
// Otherwise, nothing to act on here, we will finalize later on a new finality signal matching the recent history.
if eq.finalizedL1.Number < eq.origin.Number {
return nil
}
// If we recently tried finalizing, then don't try again just yet, but traverse more of L1 first.
if eq.triedFinalizeAt != (eth.L1BlockRef{}) && eq.origin.Number <= eq.triedFinalizeAt.Number+finalityDelay {
return nil
}
eq.log.Info("processing L1 finality information", "l1_finalized", eq.finalizedL1, "l1_origin", eq.origin, "previous", eq.triedFinalizeAt)
// Sanity check we are indeed on the finalizing chain, and not stuck on something else.
// We assume that the block-by-number query is consistent with the previously received finalized chain signal
ref, err := eq.l1Fetcher.L1BlockRefByNumber(ctx, eq.origin.Number)
if err != nil {
return NewTemporaryError(fmt.Errorf("failed to check if on finalizing L1 chain: %w", err))
}
if ref.Hash != eq.origin.Hash {
return NewResetError(fmt.Errorf("need to reset, we are on %s, not on the finalizing L1 chain %s (towards %s)", eq.origin, ref, eq.finalizedL1))
}
eq.tryFinalizeL2()
return nil
}
// tryFinalizeL2 traverses the past L1 blocks, checks if any has been finalized,
// and then marks the latest fully derived L2 block from this as finalized,
// or defaults to the current finalized L2 block.
......@@ -278,6 +329,7 @@ func (eq *EngineQueue) tryFinalizeL2() {
if eq.finalizedL1 == (eth.L1BlockRef{}) {
return // if no L1 information is finalized yet, then skip this
}
eq.triedFinalizeAt = eq.origin
// default to keep the same finalized block
finalizedL2 := eq.finalized
// go through the latest inclusion data, and find the last L2 block that was derived from a finalized L1 block
......@@ -668,6 +720,7 @@ func (eq *EngineQueue) Reset(ctx context.Context, _ eth.L1BlockRef, _ eth.System
eq.resetBuildingState()
eq.needForkchoiceUpdate = true
eq.finalityData = eq.finalityData[:0]
// note: finalizedL1 and triedFinalizeAt do not reset, since these do not change between reorgs.
// note: we do not clear the unsafe payloads queue; if the payloads are not applicable anymore the parent hash checks will clear out the old payloads.
eq.origin = pipelineOrigin
eq.sysCfg = l1Cfg
......
......@@ -24,6 +24,7 @@ type IterativeBatchCall[K any, V any] struct {
makeRequest func(K) (V, rpc.BatchElem)
getBatch BatchCallContextFn
getSingle CallContextFn
requestsValues []V
scheduled chan rpc.BatchElem
......@@ -35,6 +36,7 @@ func NewIterativeBatchCall[K any, V any](
requestsKeys []K,
makeRequest func(K) (V, rpc.BatchElem),
getBatch BatchCallContextFn,
getSingle CallContextFn,
batchSize int) *IterativeBatchCall[K, V] {
if len(requestsKeys) < batchSize {
......@@ -47,6 +49,7 @@ func NewIterativeBatchCall[K any, V any](
out := &IterativeBatchCall[K, V]{
completed: 0,
getBatch: getBatch,
getSingle: getSingle,
requestsKeys: requestsKeys,
batchSize: batchSize,
makeRequest: makeRequest,
......@@ -84,6 +87,11 @@ func (ibc *IterativeBatchCall[K, V]) Fetch(ctx context.Context) error {
ibc.resetLock.RLock()
defer ibc.resetLock.RUnlock()
// return early if context is Done
if ctx.Err() != nil {
return ctx.Err()
}
// collect a batch from the requests channel
batch := make([]rpc.BatchElem, 0, ibc.batchSize)
// wait for first element
......@@ -119,12 +127,24 @@ func (ibc *IterativeBatchCall[K, V]) Fetch(ctx context.Context) error {
break
}
if len(batch) == 0 {
return nil
}
if ibc.batchSize == 1 {
first := batch[0]
if err := ibc.getSingle(ctx, &first.Result, first.Method, first.Args...); err != nil {
ibc.scheduled <- first
return err
}
} else {
if err := ibc.getBatch(ctx, batch); err != nil {
for _, r := range batch {
ibc.scheduled <- r
}
return fmt.Errorf("failed batch-retrieval: %w", err)
}
}
var result error
for _, elem := range batch {
if elem.Error != nil {
......
......@@ -35,6 +35,7 @@ type batchTestCase struct {
batchSize int
batchCalls []batchCall
singleCalls []elemCall
mock.Mock
}
......@@ -53,7 +54,14 @@ func (tc *batchTestCase) GetBatch(ctx context.Context, b []rpc.BatchElem) error
if ctx.Err() != nil {
return ctx.Err()
}
return tc.Mock.MethodCalled("get", b).Get(0).([]error)[0]
return tc.Mock.MethodCalled("getBatch", b).Get(0).([]error)[0]
}
func (tc *batchTestCase) GetSingle(ctx context.Context, result any, method string, args ...any) error {
if ctx.Err() != nil {
return ctx.Err()
}
return tc.Mock.MethodCalled("getSingle", (*(result.(*interface{}))).(*string), method, args[0]).Get(0).([]error)[0]
}
var mockErr = errors.New("mockErr")
......@@ -64,7 +72,7 @@ func (tc *batchTestCase) Run(t *testing.T) {
keys[i] = i
}
makeMock := func(bci int, bc batchCall) func(args mock.Arguments) {
makeBatchMock := func(bc batchCall) func(args mock.Arguments) {
return func(args mock.Arguments) {
batch := args[0].([]rpc.BatchElem)
for i, elem := range batch {
......@@ -83,7 +91,7 @@ func (tc *batchTestCase) Run(t *testing.T) {
}
}
// mock all the results of the batch calls
for bci, bc := range tc.batchCalls {
for _, bc := range tc.batchCalls {
var batch []rpc.BatchElem
for _, elem := range bc.elems {
batch = append(batch, rpc.BatchElem{
......@@ -94,10 +102,30 @@ func (tc *batchTestCase) Run(t *testing.T) {
})
}
if len(bc.elems) > 0 {
tc.On("get", batch).Once().Run(makeMock(bci, bc)).Return([]error{bc.rpcErr}) // wrap to preserve nil as type of error
tc.On("getBatch", batch).Once().Run(makeBatchMock(bc)).Return([]error{bc.rpcErr}) // wrap to preserve nil as type of error
}
}
makeSingleMock := func(ec elemCall) func(args mock.Arguments) {
return func(args mock.Arguments) {
result := args[0].(*string)
id := args[2].(int)
require.Equal(t, ec.id, id, "element should match expected element")
if ec.err {
*result = ""
} else {
*result = fmt.Sprintf("mock result id %d", id)
}
}
}
// mock the results of unbatched calls
for _, ec := range tc.singleCalls {
var ret error
if ec.err {
ret = mockErr
}
tc.On("getSingle", new(string), "testing_foobar", ec.id).Once().Run(makeSingleMock(ec)).Return([]error{ret})
}
iter := NewIterativeBatchCall[int, *string](keys, makeTestRequest, tc.GetBatch, tc.batchSize)
iter := NewIterativeBatchCall[int, *string](keys, makeTestRequest, tc.GetBatch, tc.GetSingle, tc.batchSize)
for i, bc := range tc.batchCalls {
ctx := context.Background()
if bc.makeCtx != nil {
......@@ -116,6 +144,20 @@ func (tc *batchTestCase) Run(t *testing.T) {
}
}
}
for i, ec := range tc.singleCalls {
ctx := context.Background()
err := iter.Fetch(ctx)
if err == io.EOF {
require.Equal(t, i, len(tc.singleCalls)-1, "EOF only on last call")
} else {
require.False(t, iter.Complete())
if ec.err {
require.Error(t, err)
} else {
require.NoError(t, err)
}
}
}
require.True(t, iter.Complete(), "batch iter should be complete after the expected calls")
out, err := iter.Result()
require.NoError(t, err)
......@@ -154,6 +196,37 @@ func TestFetchBatched(t *testing.T) {
},
},
},
{
name: "single element",
items: 1,
batchSize: 4,
singleCalls: []elemCall{
{id: 0, err: false},
},
},
{
name: "unbatched",
items: 4,
batchSize: 1,
singleCalls: []elemCall{
{id: 0, err: false},
{id: 1, err: false},
{id: 2, err: false},
{id: 3, err: false},
},
},
{
name: "unbatched with retry",
items: 4,
batchSize: 1,
singleCalls: []elemCall{
{id: 0, err: false},
{id: 1, err: true},
{id: 2, err: false},
{id: 3, err: false},
{id: 1, err: false},
},
},
{
name: "split",
items: 5,
......@@ -240,7 +313,7 @@ func TestFetchBatched(t *testing.T) {
},
{
name: "context timeout",
items: 1,
items: 2,
batchSize: 3,
batchCalls: []batchCall{
{
......@@ -255,6 +328,7 @@ func TestFetchBatched(t *testing.T) {
{
elems: []elemCall{
{id: 0, err: false},
{id: 1, err: false},
},
err: "",
},
......
......@@ -373,6 +373,7 @@ func (job *receiptsFetchingJob) runFetcher(ctx context.Context) error {
job.txHashes,
makeReceiptRequest,
job.client.BatchCallContext,
job.client.CallContext,
job.maxBatchSize,
)
}
......
......@@ -31,7 +31,7 @@ func NewOracleL1Client(logger log.Logger, oracle Oracle, l1Head common.Hash) *Or
}
}
func (o OracleL1Client) L1BlockRefByLabel(ctx context.Context, label eth.BlockLabel) (eth.L1BlockRef, error) {
func (o *OracleL1Client) L1BlockRefByLabel(ctx context.Context, label eth.BlockLabel) (eth.L1BlockRef, error) {
if label != eth.Unsafe && label != eth.Safe && label != eth.Finalized {
return eth.L1BlockRef{}, fmt.Errorf("%w: %s", ErrUnknownLabel, label)
}
......@@ -39,7 +39,7 @@ func (o OracleL1Client) L1BlockRefByLabel(ctx context.Context, label eth.BlockLa
return o.head, nil
}
func (o OracleL1Client) L1BlockRefByNumber(ctx context.Context, number uint64) (eth.L1BlockRef, error) {
func (o *OracleL1Client) L1BlockRefByNumber(ctx context.Context, number uint64) (eth.L1BlockRef, error) {
if number > o.head.Number {
return eth.L1BlockRef{}, fmt.Errorf("%w: block number %d", ErrNotFound, number)
}
......@@ -50,20 +50,20 @@ func (o OracleL1Client) L1BlockRefByNumber(ctx context.Context, number uint64) (
return block, nil
}
func (o OracleL1Client) L1BlockRefByHash(ctx context.Context, hash common.Hash) (eth.L1BlockRef, error) {
func (o *OracleL1Client) L1BlockRefByHash(ctx context.Context, hash common.Hash) (eth.L1BlockRef, error) {
return eth.InfoToL1BlockRef(o.oracle.HeaderByBlockHash(hash)), nil
}
func (o OracleL1Client) InfoByHash(ctx context.Context, hash common.Hash) (eth.BlockInfo, error) {
func (o *OracleL1Client) InfoByHash(ctx context.Context, hash common.Hash) (eth.BlockInfo, error) {
return o.oracle.HeaderByBlockHash(hash), nil
}
func (o OracleL1Client) FetchReceipts(ctx context.Context, blockHash common.Hash) (eth.BlockInfo, types.Receipts, error) {
func (o *OracleL1Client) FetchReceipts(ctx context.Context, blockHash common.Hash) (eth.BlockInfo, types.Receipts, error) {
info, rcpts := o.oracle.ReceiptsByBlockHash(blockHash)
return info, rcpts, nil
}
func (o OracleL1Client) InfoAndTxsByHash(ctx context.Context, hash common.Hash) (eth.BlockInfo, types.Transactions, error) {
func (o *OracleL1Client) InfoAndTxsByHash(ctx context.Context, hash common.Hash) (eth.BlockInfo, types.Transactions, error) {
info, txs := o.oracle.TransactionsByBlockHash(hash)
return info, txs, nil
}
......@@ -33,19 +33,19 @@ func NewOracleEngine(rollupCfg *rollup.Config, logger log.Logger, backend engine
}
}
func (o OracleEngine) GetPayload(ctx context.Context, payloadId eth.PayloadID) (*eth.ExecutionPayload, error) {
func (o *OracleEngine) GetPayload(ctx context.Context, payloadId eth.PayloadID) (*eth.ExecutionPayload, error) {
return o.api.GetPayloadV1(ctx, payloadId)
}
func (o OracleEngine) ForkchoiceUpdate(ctx context.Context, state *eth.ForkchoiceState, attr *eth.PayloadAttributes) (*eth.ForkchoiceUpdatedResult, error) {
func (o *OracleEngine) ForkchoiceUpdate(ctx context.Context, state *eth.ForkchoiceState, attr *eth.PayloadAttributes) (*eth.ForkchoiceUpdatedResult, error) {
return o.api.ForkchoiceUpdatedV1(ctx, state, attr)
}
func (o OracleEngine) NewPayload(ctx context.Context, payload *eth.ExecutionPayload) (*eth.PayloadStatusV1, error) {
func (o *OracleEngine) NewPayload(ctx context.Context, payload *eth.ExecutionPayload) (*eth.PayloadStatusV1, error) {
return o.api.NewPayloadV1(ctx, payload)
}
func (o OracleEngine) PayloadByHash(ctx context.Context, hash common.Hash) (*eth.ExecutionPayload, error) {
func (o *OracleEngine) PayloadByHash(ctx context.Context, hash common.Hash) (*eth.ExecutionPayload, error) {
block := o.backend.GetBlockByHash(hash)
if block == nil {
return nil, ErrNotFound
......@@ -53,7 +53,7 @@ func (o OracleEngine) PayloadByHash(ctx context.Context, hash common.Hash) (*eth
return eth.BlockAsPayload(block)
}
func (o OracleEngine) PayloadByNumber(ctx context.Context, n uint64) (*eth.ExecutionPayload, error) {
func (o *OracleEngine) PayloadByNumber(ctx context.Context, n uint64) (*eth.ExecutionPayload, error) {
hash := o.backend.GetCanonicalHash(n)
if hash == (common.Hash{}) {
return nil, ErrNotFound
......@@ -61,7 +61,7 @@ func (o OracleEngine) PayloadByNumber(ctx context.Context, n uint64) (*eth.Execu
return o.PayloadByHash(ctx, hash)
}
func (o OracleEngine) L2BlockRefByLabel(ctx context.Context, label eth.BlockLabel) (eth.L2BlockRef, error) {
func (o *OracleEngine) L2BlockRefByLabel(ctx context.Context, label eth.BlockLabel) (eth.L2BlockRef, error) {
var header *types.Header
switch label {
case eth.Unsafe:
......@@ -83,7 +83,7 @@ func (o OracleEngine) L2BlockRefByLabel(ctx context.Context, label eth.BlockLabe
return derive.L2BlockToBlockRef(block, &o.rollupCfg.Genesis)
}
func (o OracleEngine) L2BlockRefByHash(ctx context.Context, l2Hash common.Hash) (eth.L2BlockRef, error) {
func (o *OracleEngine) L2BlockRefByHash(ctx context.Context, l2Hash common.Hash) (eth.L2BlockRef, error) {
block := o.backend.GetBlockByHash(l2Hash)
if block == nil {
return eth.L2BlockRef{}, ErrNotFound
......@@ -91,7 +91,7 @@ func (o OracleEngine) L2BlockRefByHash(ctx context.Context, l2Hash common.Hash)
return derive.L2BlockToBlockRef(block, &o.rollupCfg.Genesis)
}
func (o OracleEngine) SystemConfigByL2Hash(ctx context.Context, hash common.Hash) (eth.SystemConfig, error) {
func (o *OracleEngine) SystemConfigByL2Hash(ctx context.Context, hash common.Hash) (eth.SystemConfig, error) {
payload, err := o.PayloadByHash(ctx, hash)
if err != nil {
return eth.SystemConfig{}, err
......
......@@ -30,7 +30,7 @@ func NewFetchingL1Oracle(ctx context.Context, logger log.Logger, source Source)
}
}
func (o FetchingL1Oracle) HeaderByBlockHash(blockHash common.Hash) eth.BlockInfo {
func (o *FetchingL1Oracle) HeaderByBlockHash(blockHash common.Hash) eth.BlockInfo {
o.logger.Trace("HeaderByBlockHash", "hash", blockHash)
info, err := o.source.InfoByHash(o.ctx, blockHash)
if err != nil {
......@@ -42,7 +42,7 @@ func (o FetchingL1Oracle) HeaderByBlockHash(blockHash common.Hash) eth.BlockInfo
return info
}
func (o FetchingL1Oracle) TransactionsByBlockHash(blockHash common.Hash) (eth.BlockInfo, types.Transactions) {
func (o *FetchingL1Oracle) TransactionsByBlockHash(blockHash common.Hash) (eth.BlockInfo, types.Transactions) {
o.logger.Trace("TransactionsByBlockHash", "hash", blockHash)
info, txs, err := o.source.InfoAndTxsByHash(o.ctx, blockHash)
if err != nil {
......@@ -54,7 +54,7 @@ func (o FetchingL1Oracle) TransactionsByBlockHash(blockHash common.Hash) (eth.Bl
return info, txs
}
func (o FetchingL1Oracle) ReceiptsByBlockHash(blockHash common.Hash) (eth.BlockInfo, types.Receipts) {
func (o *FetchingL1Oracle) ReceiptsByBlockHash(blockHash common.Hash) (eth.BlockInfo, types.Receipts) {
o.logger.Trace("ReceiptsByBlockHash", "hash", blockHash)
info, rcpts, err := o.source.FetchReceipts(o.ctx, blockHash)
if err != nil {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment