Commit 0db615dc authored by Brian Bland's avatar Brian Bland Committed by GitHub

Add buffered caching of L1 block refs to the confDepth-aware fetcher (#11142)

* Add buffered caching of L1 block refs to the confDepth-aware fetcher

* Refactor l1 head buffer into helper structs

* Fix linting errors

* Move L1 block caching from confDepth into an event-driven L1Tracker

* Fix l1HeadBuffer locking

* Better handle non-shallow reorgs

* Improve test naming

* Explicitly rewind cache when old head received
parent 979b5f8d
......@@ -28,9 +28,6 @@ func NewConfDepth(depth uint64, l1Head func() eth.L1BlockRef, fetcher derive.L1F
// Any block numbers that are within confirmation depth of the L1 head are mocked to be "not found",
// effectively hiding the uncertain part of the L1 chain.
func (c *confDepth) L1BlockRefByNumber(ctx context.Context, num uint64) (eth.L1BlockRef, error) {
// TODO: performance optimization: buffer the l1Unsafe, invalidate any reorged previous buffer content,
// and instantly return the origin by number from the buffer if we can.
// Don't apply the conf depth if l1Head is empty (as it is during the startup case before the l1State is initialized).
l1Head := c.l1Head()
if l1Head == (eth.L1BlockRef{}) {
......
......@@ -13,32 +13,36 @@ import (
"github.com/ethereum-optimism/optimism/op-service/testutils"
)
var exHash = common.Hash{0xff}
type confTest struct {
name string
head uint64
hash common.Hash // hash of head block
req uint64
depth uint64
pass bool
}
func mockL1BlockRef(num uint64) eth.L1BlockRef {
return eth.L1BlockRef{Number: num, Hash: common.Hash{byte(num)}}
}
func (ct *confTest) Run(t *testing.T) {
l1Fetcher := &testutils.MockL1Source{}
l1Head := eth.L1BlockRef{Number: ct.head, Hash: ct.hash}
var l1Head eth.L1BlockRef
if ct.head != 0 {
l1Head = mockL1BlockRef(ct.head)
}
l1HeadGetter := func() eth.L1BlockRef { return l1Head }
cd := NewConfDepth(ct.depth, l1HeadGetter, l1Fetcher)
if ct.pass {
// no calls to the l1Fetcher are made if the confirmation depth of the request is not met
l1Fetcher.ExpectL1BlockRefByNumber(ct.req, eth.L1BlockRef{Number: ct.req}, nil)
l1Fetcher.ExpectL1BlockRefByNumber(ct.req, mockL1BlockRef(ct.req), nil)
}
out, err := cd.L1BlockRefByNumber(context.Background(), ct.req)
l1Fetcher.AssertExpectations(t)
if ct.pass {
require.NoError(t, err)
require.Equal(t, out, eth.L1BlockRef{Number: ct.req})
require.Equal(t, out, mockL1BlockRef(ct.req))
} else {
require.Equal(t, ethereum.NotFound, err)
}
......@@ -48,18 +52,18 @@ func TestConfDepth(t *testing.T) {
// note: we're not testing overflows.
// If a request is large enough to overflow the conf depth check, it's not returning anything anyway.
testCases := []confTest{
{name: "zero conf future", head: 4, hash: exHash, req: 5, depth: 0, pass: true},
{name: "zero conf present", head: 4, hash: exHash, req: 4, depth: 0, pass: true},
{name: "zero conf past", head: 4, hash: exHash, req: 4, depth: 0, pass: true},
{name: "one conf future", head: 4, hash: exHash, req: 5, depth: 1, pass: false},
{name: "one conf present", head: 4, hash: exHash, req: 4, depth: 1, pass: false},
{name: "one conf past", head: 4, hash: exHash, req: 3, depth: 1, pass: true},
{name: "two conf future", head: 4, hash: exHash, req: 5, depth: 2, pass: false},
{name: "two conf present", head: 4, hash: exHash, req: 4, depth: 2, pass: false},
{name: "two conf not like 1", head: 4, hash: exHash, req: 3, depth: 2, pass: false},
{name: "two conf pass", head: 4, hash: exHash, req: 2, depth: 2, pass: true},
{name: "easy pass", head: 100, hash: exHash, req: 20, depth: 5, pass: true},
{name: "genesis case", head: 0, hash: exHash, req: 0, depth: 4, pass: true},
{name: "zero conf future", head: 4, req: 5, depth: 0, pass: true},
{name: "zero conf present", head: 4, req: 4, depth: 0, pass: true},
{name: "zero conf past", head: 4, req: 3, depth: 0, pass: true},
{name: "one conf future", head: 4, req: 5, depth: 1, pass: false},
{name: "one conf present", head: 4, req: 4, depth: 1, pass: false},
{name: "one conf past", head: 4, req: 3, depth: 1, pass: true},
{name: "two conf future", head: 4, req: 5, depth: 2, pass: false},
{name: "two conf present", head: 4, req: 4, depth: 2, pass: false},
{name: "two conf not like 1", head: 4, req: 3, depth: 2, pass: false},
{name: "two conf pass", head: 4, req: 2, depth: 2, pass: true},
{name: "easy pass", head: 100, req: 20, depth: 5, pass: true},
{name: "genesis case", head: 0, req: 0, depth: 4, pass: true},
{name: "no L1 state", req: 10, depth: 4, pass: true},
}
for _, tc := range testCases {
......
......@@ -184,7 +184,10 @@ func NewDriver(
statusTracker := status.NewStatusTracker(log, metrics)
sys.Register("status", statusTracker, opts)
l1 = NewMeteredL1Fetcher(l1, metrics)
l1Tracker := status.NewL1Tracker(l1)
sys.Register("l1-blocks", l1Tracker, opts)
l1 = NewMeteredL1Fetcher(l1Tracker, metrics)
verifConfDepth := confdepth.NewConfDepth(driverCfg.VerifierConfDepth, statusTracker.L1Head, l1)
ec := engine.NewEngineController(l2, log, metrics, cfg, syncCfg,
......
package status
import (
"sync"
"github.com/ethereum-optimism/optimism/op-service/eth"
)
// l1HeadBuffer is a thread-safe cache for L1 block references, which contains a series blocks with a valid chain of parent hashes.
type l1HeadBuffer struct {
rb *ringbuffer[eth.L1BlockRef]
minBlockNumber uint64
mu sync.RWMutex
}
func newL1HeadBuffer(size int) *l1HeadBuffer {
return &l1HeadBuffer{rb: newRingBuffer[eth.L1BlockRef](size)}
}
// Get returns the L1 block reference for the given block number, if it exists in the cache.
func (lhb *l1HeadBuffer) Get(num uint64) (eth.L1BlockRef, bool) {
lhb.mu.RLock()
defer lhb.mu.RUnlock()
return lhb.get(num)
}
func (lhb *l1HeadBuffer) get(num uint64) (eth.L1BlockRef, bool) {
return lhb.rb.Get(int(num - lhb.minBlockNumber))
}
// Insert inserts a new L1 block reference into the cache, and removes any entries that are invalidated by a reorg.
// If the parent hash of the new head doesn't match the hash of the previous head, all entries after the new head are removed
// as the chain cannot be validated.
func (lhb *l1HeadBuffer) Insert(l1Head eth.L1BlockRef) {
lhb.mu.Lock()
defer lhb.mu.Unlock()
if ref, ok := lhb.get(l1Head.Number - 1); ok && ref.Hash == l1Head.ParentHash {
// Parent hash is found, so we can safely add the new head to the cache after the parent.
// Remove any L1 refs from the cache after or conflicting with the new head.
if ref, ok := lhb.rb.End(); ok && ref.Number >= l1Head.Number {
for ref, ok = lhb.rb.Pop(); ok && ref.Number > l1Head.Number; ref, ok = lhb.rb.Pop() {
}
}
} else {
// Parent not found or doesn't match, so invalidate the entire cache.
lhb.rb.Reset()
}
lhb.rb.Push(l1Head)
start, _ := lhb.rb.Start()
lhb.minBlockNumber = start.Number
}
package status
import (
"context"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-node/rollup/event"
"github.com/ethereum-optimism/optimism/op-service/eth"
)
// L1Tracker implements the L1Fetcher interface while proactively maintaining a reorg-aware cache
// of L1 block references by number. This handles the L1UnsafeEvent in order to populate the cache with
// the latest L1 block references.
type L1Tracker struct {
derive.L1Fetcher
cache *l1HeadBuffer
}
func NewL1Tracker(inner derive.L1Fetcher) *L1Tracker {
return &L1Tracker{
L1Fetcher: inner,
cache: newL1HeadBuffer(1000),
}
}
func (st *L1Tracker) OnEvent(ev event.Event) bool {
switch x := ev.(type) {
case L1UnsafeEvent:
st.cache.Insert(x.L1Unsafe)
default:
return false
}
return true
}
func (l *L1Tracker) L1BlockRefByNumber(ctx context.Context, num uint64) (eth.L1BlockRef, error) {
if ref, ok := l.cache.Get(num); ok {
return ref, nil
}
return l.L1Fetcher.L1BlockRefByNumber(ctx, num)
}
package status
import (
"context"
"testing"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/testutils"
"github.com/ethereum/go-ethereum/common"
"github.com/stretchr/testify/require"
)
func mockL1BlockRef(num uint64) eth.L1BlockRef {
return eth.L1BlockRef{Number: num, Hash: common.Hash{byte(num)}, ParentHash: common.Hash{byte(num - 1)}}
}
func newL1HeadEvent(l1Tracker *L1Tracker, head eth.L1BlockRef) {
l1Tracker.OnEvent(L1UnsafeEvent{
L1Unsafe: head,
})
}
func TestCachingHeadReorg(t *testing.T) {
ctx := context.Background()
l1Fetcher := &testutils.MockL1Source{}
l1Tracker := NewL1Tracker(l1Fetcher)
// no blocks added to cache yet
l1Head := mockL1BlockRef(99)
l1Fetcher.ExpectL1BlockRefByNumber(99, l1Head, nil)
ret, err := l1Tracker.L1BlockRefByNumber(ctx, 99)
require.NoError(t, err)
require.Equal(t, l1Head, ret)
l1Fetcher.AssertExpectations(t)
// from cache
l1Head = mockL1BlockRef(100)
newL1HeadEvent(l1Tracker, l1Head)
ret, err = l1Tracker.L1BlockRefByNumber(ctx, 100)
require.NoError(t, err)
require.Equal(t, l1Head, ret)
// from cache
l1Head = mockL1BlockRef(101)
newL1HeadEvent(l1Tracker, l1Head)
ret, err = l1Tracker.L1BlockRefByNumber(ctx, 101)
require.NoError(t, err)
require.Equal(t, l1Head, ret)
// from cache
l1Head = mockL1BlockRef(102)
newL1HeadEvent(l1Tracker, l1Head)
ret, err = l1Tracker.L1BlockRefByNumber(ctx, 102)
require.NoError(t, err)
require.Equal(t, l1Head, ret)
// trigger a reorg of block 102
l1Head = mockL1BlockRef(102)
l1Head.Hash = common.Hash{0xde, 0xad, 0xbe, 0xef}
newL1HeadEvent(l1Tracker, l1Head)
ret, err = l1Tracker.L1BlockRefByNumber(ctx, 102)
require.NoError(t, err)
require.Equal(t, l1Head, ret)
// confirm that 101 is still in the cache
ret, err = l1Tracker.L1BlockRefByNumber(ctx, 101)
require.NoError(t, err)
require.Equal(t, mockL1BlockRef(101), ret)
}
func TestCachingHeadRewind(t *testing.T) {
ctx := context.Background()
l1Fetcher := &testutils.MockL1Source{}
l1Tracker := NewL1Tracker(l1Fetcher)
// no blocks added to cache yet
l1Head := mockL1BlockRef(99)
l1Fetcher.ExpectL1BlockRefByNumber(99, l1Head, nil)
ret, err := l1Tracker.L1BlockRefByNumber(ctx, 99)
require.NoError(t, err)
require.Equal(t, l1Head, ret)
l1Fetcher.AssertExpectations(t)
// from cache
l1Head = mockL1BlockRef(100)
newL1HeadEvent(l1Tracker, l1Head)
ret, err = l1Tracker.L1BlockRefByNumber(ctx, 100)
require.NoError(t, err)
require.Equal(t, l1Head, ret)
// from cache
l1Head = mockL1BlockRef(101)
newL1HeadEvent(l1Tracker, l1Head)
ret, err = l1Tracker.L1BlockRefByNumber(ctx, 101)
require.NoError(t, err)
require.Equal(t, l1Head, ret)
// from cache
l1Head = mockL1BlockRef(102)
newL1HeadEvent(l1Tracker, l1Head)
ret, err = l1Tracker.L1BlockRefByNumber(ctx, 102)
require.NoError(t, err)
require.Equal(t, l1Head, ret)
// 101 is the new head, invalidating 102
l1Head = mockL1BlockRef(101)
newL1HeadEvent(l1Tracker, l1Head)
ret, err = l1Tracker.L1BlockRefByNumber(ctx, 101)
require.NoError(t, err)
require.Equal(t, l1Head, ret)
// confirm that 102 is no longer in the cache
l1Head = mockL1BlockRef(102)
l1Fetcher.ExpectL1BlockRefByNumber(102, l1Head, nil)
ret, err = l1Tracker.L1BlockRefByNumber(ctx, 102)
require.NoError(t, err)
require.Equal(t, l1Head, ret)
l1Fetcher.AssertExpectations(t)
// confirm that 101 is still in the cache
ret, err = l1Tracker.L1BlockRefByNumber(ctx, 101)
require.NoError(t, err)
require.Equal(t, mockL1BlockRef(101), ret)
}
func TestCachingChainShorteningReorg(t *testing.T) {
ctx := context.Background()
l1Fetcher := &testutils.MockL1Source{}
l1Tracker := NewL1Tracker(l1Fetcher)
// no blocks added to cache yet
l1Head := mockL1BlockRef(99)
l1Fetcher.ExpectL1BlockRefByNumber(99, l1Head, nil)
ret, err := l1Tracker.L1BlockRefByNumber(ctx, 99)
require.NoError(t, err)
require.Equal(t, l1Head, ret)
l1Fetcher.AssertExpectations(t)
// from cache
l1Head = mockL1BlockRef(100)
newL1HeadEvent(l1Tracker, l1Head)
ret, err = l1Tracker.L1BlockRefByNumber(ctx, 100)
require.NoError(t, err)
require.Equal(t, l1Head, ret)
// from cache
l1Head = mockL1BlockRef(101)
newL1HeadEvent(l1Tracker, l1Head)
ret, err = l1Tracker.L1BlockRefByNumber(ctx, 101)
require.NoError(t, err)
require.Equal(t, l1Head, ret)
// from cache
l1Head = mockL1BlockRef(102)
newL1HeadEvent(l1Tracker, l1Head)
ret, err = l1Tracker.L1BlockRefByNumber(ctx, 102)
require.NoError(t, err)
require.Equal(t, l1Head, ret)
// trigger a reorg of block 101, invalidating the following cache elements (102)
l1Head = mockL1BlockRef(101)
l1Head.Hash = common.Hash{0xde, 0xad, 0xbe, 0xef}
newL1HeadEvent(l1Tracker, l1Head)
ret, err = l1Tracker.L1BlockRefByNumber(ctx, 101)
require.NoError(t, err)
require.Equal(t, l1Head, ret)
// confirm that 102 has been removed
l1Fetcher.ExpectL1BlockRefByNumber(102, mockL1BlockRef(102), nil)
ret, err = l1Tracker.L1BlockRefByNumber(ctx, 102)
require.NoError(t, err)
require.Equal(t, mockL1BlockRef(102), ret)
l1Fetcher.AssertExpectations(t)
}
func TestCachingDeepReorg(t *testing.T) {
ctx := context.Background()
l1Fetcher := &testutils.MockL1Source{}
l1Tracker := NewL1Tracker(l1Fetcher)
// from cache
l1Head := mockL1BlockRef(100)
newL1HeadEvent(l1Tracker, l1Head)
ret, err := l1Tracker.L1BlockRefByNumber(ctx, 100)
require.NoError(t, err)
require.Equal(t, l1Head, ret)
// from cache
l1Head = mockL1BlockRef(101)
newL1HeadEvent(l1Tracker, l1Head)
ret, err = l1Tracker.L1BlockRefByNumber(ctx, 101)
require.NoError(t, err)
require.Equal(t, l1Head, ret)
// from cache
l1Head = mockL1BlockRef(102)
newL1HeadEvent(l1Tracker, l1Head)
ret, err = l1Tracker.L1BlockRefByNumber(ctx, 102)
require.NoError(t, err)
require.Equal(t, l1Head, ret)
// append a new block 102 based on a different 101, invalidating the entire cache
parentHash := common.Hash{0xde, 0xad, 0xbe, 0xef}
l1Head = mockL1BlockRef(102)
l1Head.ParentHash = parentHash
newL1HeadEvent(l1Tracker, l1Head)
ret, err = l1Tracker.L1BlockRefByNumber(ctx, 102)
require.NoError(t, err)
require.Equal(t, l1Head, ret)
// confirm that the cache contains no 101
l1Fetcher.ExpectL1BlockRefByNumber(101, mockL1BlockRef(101), nil)
ret, err = l1Tracker.L1BlockRefByNumber(ctx, 101)
require.NoError(t, err)
require.Equal(t, mockL1BlockRef(101), ret)
l1Fetcher.AssertExpectations(t)
// confirm that the cache contains no 100
l1Fetcher.ExpectL1BlockRefByNumber(100, mockL1BlockRef(100), nil)
ret, err = l1Tracker.L1BlockRefByNumber(ctx, 100)
require.NoError(t, err)
require.Equal(t, mockL1BlockRef(100), ret)
l1Fetcher.AssertExpectations(t)
}
func TestCachingSkipAhead(t *testing.T) {
ctx := context.Background()
l1Fetcher := &testutils.MockL1Source{}
l1Tracker := NewL1Tracker(l1Fetcher)
// from cache
l1Head := mockL1BlockRef(100)
newL1HeadEvent(l1Tracker, l1Head)
ret, err := l1Tracker.L1BlockRefByNumber(ctx, 100)
require.NoError(t, err)
require.Equal(t, l1Head, ret)
// from cache
l1Head = mockL1BlockRef(101)
newL1HeadEvent(l1Tracker, l1Head)
ret, err = l1Tracker.L1BlockRefByNumber(ctx, 101)
require.NoError(t, err)
require.Equal(t, l1Head, ret)
// head jumps ahead from 101->103, invalidating the entire cache
l1Head = mockL1BlockRef(103)
newL1HeadEvent(l1Tracker, l1Head)
ret, err = l1Tracker.L1BlockRefByNumber(ctx, 103)
require.NoError(t, err)
require.Equal(t, mockL1BlockRef(103), ret)
l1Fetcher.AssertExpectations(t)
// confirm that the cache contains no 101
l1Fetcher.ExpectL1BlockRefByNumber(101, mockL1BlockRef(101), nil)
ret, err = l1Tracker.L1BlockRefByNumber(ctx, 101)
require.NoError(t, err)
require.Equal(t, mockL1BlockRef(101), ret)
l1Fetcher.AssertExpectations(t)
}
func TestCacheSizeEviction(t *testing.T) {
ctx := context.Background()
l1Fetcher := &testutils.MockL1Source{}
l1Tracker := NewL1Tracker(l1Fetcher)
// insert 1000 elements into the cache
for idx := 1000; idx < 2000; idx++ {
l1Head := mockL1BlockRef(uint64(idx))
newL1HeadEvent(l1Tracker, l1Head)
}
// request each element from cache
for idx := 1000; idx < 2000; idx++ {
ret, err := l1Tracker.L1BlockRefByNumber(ctx, uint64(idx))
require.NoError(t, err)
require.Equal(t, mockL1BlockRef(uint64(idx)), ret)
}
// insert 1001st element, removing the first
l1Head := mockL1BlockRef(2000)
newL1HeadEvent(l1Tracker, l1Head)
// request first element, which now requires a live fetch instead
l1Fetcher.ExpectL1BlockRefByNumber(1000, mockL1BlockRef(1000), nil)
ret, err := l1Tracker.L1BlockRefByNumber(ctx, 1000)
require.NoError(t, err)
require.Equal(t, mockL1BlockRef(1000), ret)
}
package status
// ringBuffer is a circular buffer that can be used to store a fixed number of
// elements. When the buffer is full, the oldest element is overwritten.
// This buffer implementation supports indexed access to elements, as well as
// access to the first and last elements.
type ringbuffer[T any] struct {
contents []T
start int
size int
}
func newRingBuffer[T any](size int) *ringbuffer[T] {
return &ringbuffer[T]{
contents: make([]T, size),
}
}
func (rb *ringbuffer[T]) Len() int {
return rb.size
}
func (rb *ringbuffer[T]) Get(idx int) (T, bool) {
if idx < 0 || idx >= rb.size {
var zero T
return zero, false
}
return rb.contents[(rb.start+idx)%len(rb.contents)], true
}
func (rb *ringbuffer[T]) Start() (T, bool) {
if rb.size == 0 {
var zero T
return zero, false
}
return rb.contents[rb.start], true
}
func (rb *ringbuffer[T]) End() (T, bool) {
if rb.size == 0 {
var zero T
return zero, false
}
return rb.contents[(rb.start+rb.size+len(rb.contents)-1)%len(rb.contents)], true
}
func (rb *ringbuffer[T]) Push(val T) {
rb.contents[(rb.start+rb.size)%len(rb.contents)] = val
if rb.size == len(rb.contents) {
rb.start = (rb.start + 1) % len(rb.contents)
} else {
rb.size++
}
}
func (rb *ringbuffer[T]) Pop() (T, bool) {
end, ok := rb.End()
if ok {
rb.size--
}
return end, ok
}
func (rb *ringbuffer[T]) Reset() {
rb.start = 0
rb.size = 0
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment