Commit c19d51bf authored by protolambda's avatar protolambda Committed by GitHub

op-supervisor: head pointers, refactor block processor (#12031)

* op-supervisor: supervisor-head-pointers squashed

change entry indices to head pointers, refactor block processor, backend fixes
Co-authored-by: default avatarAxel Kingsley <axel.kingsley@gmail.com>

* use ticker instead of time.After

---------
Co-authored-by: default avatarAxel Kingsley <axel.kingsley@gmail.com>
parent d90e4340
......@@ -48,7 +48,7 @@ func NewSupervisorBackend(ctx context.Context, logger log.Logger, m Metrics, cfg
}
// create the head tracker
headTracker, err := heads.NewHeadTracker(filepath.Join(cfg.Datadir, "heads.json"))
headTracker, err := heads.NewHeadTracker(logger, filepath.Join(cfg.Datadir, "heads.json"))
if err != nil {
return nil, fmt.Errorf("failed to load existing heads: %w", err)
}
......@@ -190,7 +190,7 @@ func (su *SupervisorBackend) CheckMessage(identifier types.Identifier, payloadHa
chainID := identifier.ChainID
blockNum := identifier.BlockNumber
logIdx := identifier.LogIndex
i, err := su.db.Check(chainID, blockNum, uint32(logIdx), payloadHash)
_, err := su.db.Check(chainID, blockNum, uint32(logIdx), payloadHash)
if errors.Is(err, logs.ErrFuture) {
return types.Unsafe, nil
}
......@@ -207,8 +207,15 @@ func (su *SupervisorBackend) CheckMessage(identifier types.Identifier, payloadHa
db.NewSafetyChecker(types.Safe, su.db),
db.NewSafetyChecker(types.Finalized, su.db),
} {
if i <= checker.CrossHeadForChain(chainID) {
safest = checker.SafetyLevel()
// check local safety limit first as it's more permissive
localPtr := checker.LocalHead(chainID)
if localPtr.WithinRange(blockNum, uint32(logIdx)) {
safest = checker.LocalSafetyLevel()
}
// check cross safety level
crossPtr := checker.CrossHead(chainID)
if crossPtr.WithinRange(blockNum, uint32(logIdx)) {
safest = checker.CrossSafetyLevel()
}
}
return safest, nil
......@@ -239,7 +246,7 @@ func (su *SupervisorBackend) CheckBlock(chainID *hexutil.U256, blockHash common.
safest := types.CrossUnsafe
// find the last log index in the block
id := eth.BlockID{Hash: blockHash, Number: uint64(blockNumber)}
i, err := su.db.FindSealedBlock(types.ChainID(*chainID), id)
_, err := su.db.FindSealedBlock(types.ChainID(*chainID), id)
if errors.Is(err, logs.ErrFuture) {
return types.Unsafe, nil
}
......@@ -256,8 +263,15 @@ func (su *SupervisorBackend) CheckBlock(chainID *hexutil.U256, blockHash common.
db.NewSafetyChecker(types.Safe, su.db),
db.NewSafetyChecker(types.Finalized, su.db),
} {
if i <= checker.CrossHeadForChain(types.ChainID(*chainID)) {
safest = checker.SafetyLevel()
// check local safety limit first as it's more permissive
localPtr := checker.LocalHead(types.ChainID(*chainID))
if localPtr.IsSealed(uint64(blockNumber)) {
safest = checker.LocalSafetyLevel()
}
// check cross safety level
crossPtr := checker.CrossHead(types.ChainID(*chainID))
if crossPtr.IsSealed(uint64(blockNumber)) {
safest = checker.CrossSafetyLevel()
}
}
return safest, nil
......
......@@ -39,7 +39,7 @@ type LogStorage interface {
// returns ErrDifferent if the known block does not match
FindSealedBlock(block eth.BlockID) (nextEntry entrydb.EntryIdx, err error)
IteratorStartingAt(i entrydb.EntryIdx) (logs.Iterator, error)
IteratorStartingAt(sealedNum uint64, logsSince uint32) (logs.Iterator, error)
// returns ErrConflict if the log does not match the canonical chain.
// returns ErrFuture if the log is out of reach.
......@@ -50,8 +50,20 @@ type LogStorage interface {
var _ LogStorage = (*logs.DB)(nil)
type HeadsStorage interface {
Current() *heads.Heads
Apply(op heads.Operation) error
CrossUnsafe(id types.ChainID) heads.HeadPointer
CrossSafe(id types.ChainID) heads.HeadPointer
CrossFinalized(id types.ChainID) heads.HeadPointer
LocalUnsafe(id types.ChainID) heads.HeadPointer
LocalSafe(id types.ChainID) heads.HeadPointer
LocalFinalized(id types.ChainID) heads.HeadPointer
UpdateCrossUnsafe(id types.ChainID, pointer heads.HeadPointer) error
UpdateCrossSafe(id types.ChainID, pointer heads.HeadPointer) error
UpdateCrossFinalized(id types.ChainID, pointer heads.HeadPointer) error
UpdateLocalUnsafe(id types.ChainID, pointer heads.HeadPointer) error
UpdateLocalSafe(id types.ChainID, pointer heads.HeadPointer) error
UpdateLocalFinalized(id types.ChainID, pointer heads.HeadPointer) error
}
// ChainsDB is a database that stores logs and heads for multiple chains.
......@@ -85,7 +97,7 @@ func (db *ChainsDB) AddLogDB(chain types.ChainID, logDB LogStorage) {
func (db *ChainsDB) ResumeFromLastSealedBlock() error {
for chain, logStore := range db.logDBs {
headNum, ok := logStore.LatestSealedBlockNum()
if ok {
if !ok {
// db must be empty, nothing to rewind to
db.logger.Info("Resuming, but found no DB contents", "chain", chain)
continue
......@@ -155,7 +167,7 @@ func (db *ChainsDB) updateAllHeads() error {
safeChecker,
finalizedChecker} {
if err := db.UpdateCrossHeads(checker); err != nil {
return fmt.Errorf("failed to update cross-heads for safety level %v: %w", checker.Name(), err)
return fmt.Errorf("failed to update cross-heads for safety level %s: %w", checker, err)
}
}
return nil
......@@ -165,13 +177,14 @@ func (db *ChainsDB) updateAllHeads() error {
// the provided checker controls which heads are considered.
func (db *ChainsDB) UpdateCrossHeadsForChain(chainID types.ChainID, checker SafetyChecker) error {
// start with the xsafe head of the chain
xHead := checker.CrossHeadForChain(chainID)
xHead := checker.CrossHead(chainID)
// advance as far as the local head
localHead := checker.LocalHeadForChain(chainID)
// get an iterator for the last checkpoint behind the x-head
iter, err := db.logDBs[chainID].IteratorStartingAt(xHead)
localHead := checker.LocalHead(chainID)
// get an iterator for the next item
iter, err := db.logDBs[chainID].IteratorStartingAt(xHead.LastSealedBlockNum, xHead.LogsSince)
if err != nil {
return fmt.Errorf("failed to rewind cross-safe head for chain %v: %w", chainID, err)
return fmt.Errorf("failed to open iterator at sealed block %d logsSince %d for chain %v: %w",
xHead.LastSealedBlockNum, xHead.LogsSince, chainID, err)
}
// track if we updated the cross-head
updated := false
......@@ -181,51 +194,92 @@ func (db *ChainsDB) UpdateCrossHeadsForChain(chainID types.ChainID, checker Safe
// - when we reach a message that is not safe
// - if an error occurs
for {
if err := iter.NextExecMsg(); err == io.EOF {
if err := iter.NextInitMsg(); errors.Is(err, logs.ErrFuture) {
// We ran out of events, but there can still be empty blocks.
// Take the last block we've processed, and try to update the x-head with it.
sealedBlockHash, sealedBlockNum, ok := iter.SealedBlock()
if !ok {
break
}
// We can only drop the logsSince value to 0 if the block is not seen.
if sealedBlockNum > xHead.LastSealedBlockNum {
// if we would exceed the local head, then abort
if !localHead.WithinRange(sealedBlockNum, 0) {
break
}
xHead = heads.HeadPointer{
LastSealedBlockHash: sealedBlockHash,
LastSealedBlockNum: sealedBlockNum,
LogsSince: 0,
}
updated = true
}
break
} else if err != nil {
return fmt.Errorf("failed to read next executing message for chain %v: %w", chainID, err)
}
// if we would exceed the local head, then abort
if iter.NextIndex() > localHead {
xHead = localHead // clip to local head
updated = localHead != xHead
sealedBlockHash, sealedBlockNum, ok := iter.SealedBlock()
if !ok {
break
}
exec := iter.ExecMessage()
if exec == nil {
panic("expected executing message after traversing to one without error")
_, logIdx, ok := iter.InitMessage()
if !ok {
break
}
// use the checker to determine if this message is safe
safe := checker.Check(
types.ChainIDFromUInt64(uint64(exec.Chain)),
exec.BlockNum,
exec.LogIdx,
exec.Hash)
if !safe {
// if we would exceed the local head, then abort
if !localHead.WithinRange(sealedBlockNum, logIdx) {
break
}
// Check the executing message, if any
exec := iter.ExecMessage()
if exec != nil {
// Use the checker to determine if this message exists in the canonical chain,
// within the view of the checker's safety level
if err := checker.CheckCross(
types.ChainIDFromUInt64(uint64(exec.Chain)),
exec.BlockNum,
exec.LogIdx,
exec.Hash); err != nil {
if errors.Is(err, logs.ErrConflict) {
db.logger.Error("Bad executing message!", "err", err)
} else if errors.Is(err, logs.ErrFuture) {
db.logger.Warn("Executing message references future message", "err", err)
} else {
db.logger.Error("Failed to check executing message")
}
break
}
}
// if all is well, prepare the x-head update to this point
xHead = iter.NextIndex()
xHead = heads.HeadPointer{
LastSealedBlockHash: sealedBlockHash,
LastSealedBlockNum: sealedBlockNum,
LogsSince: logIdx + 1,
}
updated = true
}
// have the checker create an update to the x-head in question, and apply that update
err = db.heads.Apply(checker.Update(chainID, xHead))
if err != nil {
return fmt.Errorf("failed to update cross-head for chain %v: %w", chainID, err)
}
// if any chain was updated, we can trigger a maintenance request
// this allows for the maintenance loop to handle cascading updates
// instead of waiting for the next scheduled update
if updated {
db.logger.Info("Promoting cross-head", "head", xHead, "safety-level", checker.SafetyLevel())
db.logger.Info("Promoting cross-head", "chain", chainID, "head", xHead, "safety-level", checker.CrossSafetyLevel())
err = checker.UpdateCross(chainID, xHead)
if err != nil {
return fmt.Errorf("failed to update cross-head for chain %v: %w", chainID, err)
}
db.RequestMaintenance()
} else {
db.logger.Info("No cross-head update", "head", xHead, "safety-level", checker.SafetyLevel())
db.logger.Debug("No cross-head update", "chain", chainID, "head", xHead, "safety-level", checker.CrossSafetyLevel())
}
return nil
}
func (db *ChainsDB) Heads() HeadsStorage {
return db.heads
}
// UpdateCrossHeads updates the cross-heads of all chains
// based on the provided SafetyChecker. The SafetyChecker is used to determine
// the safety of each log entry in the database, and the cross-head associated with it.
......
package db
/*
import (
"errors"
"fmt"
"io"
"math/rand" // nosemgrep
"testing"
......@@ -182,9 +184,9 @@ func TestChainsDB_UpdateCrossHeadsError(t *testing.T) {
// but readability and maintainability would be improved by making this function more configurable.
func setupStubbedForUpdateHeads(chainID types.ChainID) (*stubLogDB, *stubChecker, *heads.Heads) {
// the last known cross-safe head is at 20
cross := entrydb.EntryIdx(20)
cross := heads.HeadPointer{LastSealedBlockNum: 20}
// the local head (the limit of the update) is at 40
local := entrydb.EntryIdx(40)
local := heads.HeadPointer{LastSealedBlockNum: 40}
// the number of executing messages to make available (this should be more than the number of safety checks performed)
numExecutingMessages := 30
// number of safety checks that will pass before returning false
......@@ -245,39 +247,57 @@ func setupStubbedForUpdateHeads(chainID types.ChainID) (*stubLogDB, *stubChecker
}
type stubChecker struct {
localHeadForChain entrydb.EntryIdx
crossHeadForChain entrydb.EntryIdx
localHeadForChain heads.HeadPointer
crossHeadForChain heads.HeadPointer
numSafe int
checkCalls int
updated entrydb.EntryIdx
updated heads.HeadPointer
}
func (s *stubChecker) LocalHeadForChain(chainID types.ChainID) entrydb.EntryIdx {
return s.localHeadForChain
func (s *stubChecker) String() string {
return "stubChecker"
}
func (s *stubChecker) Name() string {
return "stubChecker"
func (s *stubChecker) LocalSafetyLevel() types.SafetyLevel {
return types.Safe
}
func (s *stubChecker) CrossHeadForChain(chainID types.ChainID) entrydb.EntryIdx {
func (s *stubChecker) CrossSafetyLevel() types.SafetyLevel {
return types.Safe
}
func (s *stubChecker) LocalHead(chainID types.ChainID) heads.HeadPointer {
return s.localHeadForChain
}
func (s *stubChecker) CrossHead(chainID types.ChainID) heads.HeadPointer {
return s.crossHeadForChain
}
// stubbed Check returns true for the first numSafe calls, and false thereafter
func (s *stubChecker) Check(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) bool {
if s.checkCalls >= s.numSafe {
return false
return fmt.Errorf("safety check failed")
}
s.checkCalls++
return true
return nil
}
func (s *stubChecker) CheckCross(chain types.ChainID, blockNum uint64, logIdx uint32, logHash backendTypes.TruncatedHash) error {
return s.check(chain, blockNum, logIdx, logHash)
}
func (s *stubChecker) CheckLocal(chain types.ChainID, blockNum uint64, logIdx uint32, logHash backendTypes.TruncatedHash) error {
return s.check(chain, blockNum, logIdx, logHash)
}
func (s *stubChecker) Update(chain types.ChainID, index entrydb.EntryIdx) heads.OperationFn {
s.updated = index
return func(heads *heads.Heads) error {
return nil
}
func (s *stubChecker) Update(chain types.ChainID, h heads.HeadPointer) error {
s.updated = h
return nil
}
func (s *stubChecker) UpdateCross(chain types.ChainID, h heads.HeadPointer) error {
return s.Update(chain, h)
}
func (s *stubChecker) UpdateLocal(chain types.ChainID, h heads.HeadPointer) error {
return s.Update(chain, h)
}
func (s *stubChecker) SafetyLevel() types.SafetyLevel {
......@@ -288,6 +308,54 @@ type stubHeadStorage struct {
heads *heads.Heads
}
func (s *stubHeadStorage) UpdateLocalUnsafe(chainID types.ChainID, h heads.HeadPointer) error {
panic("not implemented")
}
func (s *stubHeadStorage) UpdateLocalSafe(chainID types.ChainID, h heads.HeadPointer) error {
panic("not implemented")
}
func (s *stubHeadStorage) UpdateLocalFinalized(chainID types.ChainID, h heads.HeadPointer) error {
panic("not implemented")
}
func (s *stubHeadStorage) UpdateCrossUnsafe(chainID types.ChainID, h heads.HeadPointer) error {
panic("not implemented")
}
func (s *stubHeadStorage) UpdateCrossSafe(chainID types.ChainID, h heads.HeadPointer) error {
panic("not implemented")
}
func (s *stubHeadStorage) UpdateCrossFinalized(chainID types.ChainID, h heads.HeadPointer) error {
panic("not implemented")
}
func (s *stubHeadStorage) LocalUnsafe(chainID types.ChainID) heads.HeadPointer {
panic("not implemented")
}
func (s *stubHeadStorage) LocalSafe(chainID types.ChainID) heads.HeadPointer {
panic("not implemented")
}
func (s *stubHeadStorage) LocalFinalized(chainID types.ChainID) heads.HeadPointer {
panic("not implemented")
}
func (s *stubHeadStorage) CrossUnsafe(chainID types.ChainID) heads.HeadPointer {
panic("not implemented")
}
func (s *stubHeadStorage) CrossSafe(chainID types.ChainID) heads.HeadPointer {
panic("not implemented")
}
func (s *stubHeadStorage) CrossFinalized(chainID types.ChainID) heads.HeadPointer {
panic("not implemented")
}
func (s *stubHeadStorage) Apply(heads.Operation) error {
return nil
}
......@@ -415,10 +483,10 @@ func (s *stubLogDB) FindSealedBlock(block eth.BlockID) (nextEntry entrydb.EntryI
panic("not implemented")
}
func (s *stubLogDB) IteratorStartingAt(i entrydb.EntryIdx) (logs.Iterator, error) {
func (s *stubLogDB) IteratorStartingAt(sealedNum uint64, logIndex uint32) (logs.Iterator, error) {
return &stubIterator{
index: i - 1,
db: s,
//index: i - 1, // TODO broken
db: s,
}, nil
}
......@@ -447,3 +515,4 @@ func (s *stubLogDB) LatestBlockNum() uint64 {
func (s *stubLogDB) Close() error {
return nil
}
*/
......@@ -7,8 +7,12 @@ import (
"os"
"sync"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-service/ioutil"
"github.com/ethereum-optimism/optimism/op-service/jsonutil"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
)
// HeadTracker records the current chain head pointers for a single chain.
......@@ -18,9 +22,95 @@ type HeadTracker struct {
path string
current *Heads
logger log.Logger
}
func (t *HeadTracker) CrossUnsafe(id types.ChainID) HeadPointer {
return t.current.Get(id).CrossUnsafe
}
func (t *HeadTracker) CrossSafe(id types.ChainID) HeadPointer {
return t.current.Get(id).CrossSafe
}
func (t *HeadTracker) CrossFinalized(id types.ChainID) HeadPointer {
return t.current.Get(id).CrossFinalized
}
func (t *HeadTracker) LocalUnsafe(id types.ChainID) HeadPointer {
return t.current.Get(id).Unsafe
}
func (t *HeadTracker) LocalSafe(id types.ChainID) HeadPointer {
return t.current.Get(id).LocalSafe
}
func (t *HeadTracker) LocalFinalized(id types.ChainID) HeadPointer {
return t.current.Get(id).LocalFinalized
}
func (t *HeadTracker) UpdateCrossUnsafe(id types.ChainID, pointer HeadPointer) error {
return t.Apply(OperationFn(func(heads *Heads) error {
t.logger.Info("Cross-unsafe update", "pointer", pointer)
h := heads.Get(id)
h.CrossUnsafe = pointer
heads.Put(id, h)
return nil
}))
}
func (t *HeadTracker) UpdateCrossSafe(id types.ChainID, pointer HeadPointer) error {
return t.Apply(OperationFn(func(heads *Heads) error {
t.logger.Info("Cross-safe update", "pointer", pointer)
h := heads.Get(id)
h.CrossSafe = pointer
heads.Put(id, h)
return nil
}))
}
func (t *HeadTracker) UpdateCrossFinalized(id types.ChainID, pointer HeadPointer) error {
return t.Apply(OperationFn(func(heads *Heads) error {
t.logger.Info("Cross-finalized update", "pointer", pointer)
h := heads.Get(id)
h.CrossFinalized = pointer
heads.Put(id, h)
return nil
}))
}
func (t *HeadTracker) UpdateLocalUnsafe(id types.ChainID, pointer HeadPointer) error {
return t.Apply(OperationFn(func(heads *Heads) error {
t.logger.Info("Local-unsafe update", "pointer", pointer)
h := heads.Get(id)
h.Unsafe = pointer
heads.Put(id, h)
return nil
}))
}
func (t *HeadTracker) UpdateLocalSafe(id types.ChainID, pointer HeadPointer) error {
return t.Apply(OperationFn(func(heads *Heads) error {
t.logger.Info("Local-safe update", "pointer", pointer)
h := heads.Get(id)
h.LocalSafe = pointer
heads.Put(id, h)
return nil
}))
}
func (t *HeadTracker) UpdateLocalFinalized(id types.ChainID, pointer HeadPointer) error {
return t.Apply(OperationFn(func(heads *Heads) error {
t.logger.Info("Local-finalized update", "pointer", pointer)
h := heads.Get(id)
h.LocalFinalized = pointer
heads.Put(id, h)
return nil
}))
}
func NewHeadTracker(path string) (*HeadTracker, error) {
func NewHeadTracker(logger log.Logger, path string) (*HeadTracker, error) {
current := NewHeads()
if data, err := os.ReadFile(path); errors.Is(err, os.ErrNotExist) {
// No existing file, just use empty heads
......@@ -34,6 +124,7 @@ func NewHeadTracker(path string) (*HeadTracker, error) {
return &HeadTracker{
path: path,
current: current,
logger: logger,
}, nil
}
......
package heads
/*
import (
"errors"
"os"
......@@ -99,3 +100,4 @@ func TestHeads_NoChangesMadeIfWriteFails(t *testing.T) {
require.ErrorIs(t, err, os.ErrNotExist)
require.Equal(t, ChainHeads{}, orig.Current().Get(chainA))
}
*/
......@@ -3,23 +3,48 @@ package heads
import (
"encoding/json"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
)
type HeadPointer struct {
// LastSealedBlockHash is the last fully-processed block
LastSealedBlockHash common.Hash
LastSealedBlockNum uint64
// Number of logs that have been verified since the LastSealedBlock.
// These logs are contained in the block that builds on top of the LastSealedBlock.
LogsSince uint32
}
// WithinRange checks if the given log, in the given block,
// is within range (i.e. before or equal to the head-pointer).
// This does not guarantee that the log exists.
func (ptr *HeadPointer) WithinRange(blockNum uint64, logIdx uint32) bool {
if ptr.LastSealedBlockHash == (common.Hash{}) {
return false // no block yet
}
return blockNum <= ptr.LastSealedBlockNum ||
(blockNum+1 == ptr.LastSealedBlockNum && logIdx < ptr.LogsSince)
}
func (ptr *HeadPointer) IsSealed(blockNum uint64) bool {
if ptr.LastSealedBlockHash == (common.Hash{}) {
return false // no block yet
}
return blockNum <= ptr.LastSealedBlockNum
}
// ChainHeads provides the serialization format for the current chain heads.
// The values here could be block numbers or just the index of entries in the log db.
// If they're log db entries, we can't detect if things changed because of a reorg though (if the logdb write succeeded and head update failed).
// So we probably need to store actual block IDs here... but then we don't have the block hash for every block in the log db.
// Only jumping the head forward on checkpoint blocks doesn't work though...
type ChainHeads struct {
Unsafe entrydb.EntryIdx `json:"localUnsafe"`
CrossUnsafe entrydb.EntryIdx `json:"crossUnsafe"`
LocalSafe entrydb.EntryIdx `json:"localSafe"`
CrossSafe entrydb.EntryIdx `json:"crossSafe"`
LocalFinalized entrydb.EntryIdx `json:"localFinalized"`
CrossFinalized entrydb.EntryIdx `json:"crossFinalized"`
Unsafe HeadPointer `json:"localUnsafe"`
CrossUnsafe HeadPointer `json:"crossUnsafe"`
LocalSafe HeadPointer `json:"localSafe"`
CrossSafe HeadPointer `json:"crossSafe"`
LocalFinalized HeadPointer `json:"localFinalized"`
CrossFinalized HeadPointer `json:"crossFinalized"`
}
type Heads struct {
......@@ -35,6 +60,26 @@ func (h *Heads) Get(id types.ChainID) ChainHeads {
if !ok {
return ChainHeads{}
}
// init to genesis
if chain.LocalFinalized == (HeadPointer{}) && chain.Unsafe.LastSealedBlockNum == 0 {
chain.LocalFinalized = chain.Unsafe
}
// Make sure the data is consistent
if chain.LocalSafe == (HeadPointer{}) {
chain.LocalSafe = chain.LocalFinalized
}
if chain.Unsafe == (HeadPointer{}) {
chain.Unsafe = chain.LocalSafe
}
if chain.CrossFinalized == (HeadPointer{}) && chain.LocalFinalized.LastSealedBlockNum == 0 {
chain.CrossFinalized = chain.LocalFinalized
}
if chain.CrossSafe == (HeadPointer{}) {
chain.CrossSafe = chain.CrossFinalized
}
if chain.CrossUnsafe == (HeadPointer{}) {
chain.CrossUnsafe = chain.CrossSafe
}
return chain
}
......@@ -50,7 +95,7 @@ func (h *Heads) Copy() *Heads {
return c
}
func (h Heads) MarshalJSON() ([]byte, error) {
func (h *Heads) MarshalJSON() ([]byte, error) {
data := make(map[hexutil.U256]ChainHeads)
for id, heads := range h.Chains {
data[hexutil.U256(id)] = heads
......
......@@ -3,38 +3,52 @@ package heads
import (
"encoding/json"
"fmt"
"math/rand" // nosemgrep
"testing"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
"github.com/stretchr/testify/require"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
)
func TestHeads(t *testing.T) {
rng := rand.New(rand.NewSource(1234))
randHeadPtr := func() HeadPointer {
var h common.Hash
rng.Read(h[:])
return HeadPointer{
LastSealedBlockHash: h,
LastSealedBlockNum: rng.Uint64(),
LogsSince: rng.Uint32(),
}
}
t.Run("RoundTripViaJson", func(t *testing.T) {
heads := NewHeads()
heads.Put(types.ChainIDFromUInt64(3), ChainHeads{
Unsafe: 10,
CrossUnsafe: 9,
LocalSafe: 8,
CrossSafe: 7,
LocalFinalized: 6,
CrossFinalized: 5,
Unsafe: randHeadPtr(),
CrossUnsafe: randHeadPtr(),
LocalSafe: randHeadPtr(),
CrossSafe: randHeadPtr(),
LocalFinalized: randHeadPtr(),
CrossFinalized: randHeadPtr(),
})
heads.Put(types.ChainIDFromUInt64(9), ChainHeads{
Unsafe: 90,
CrossUnsafe: 80,
LocalSafe: 70,
CrossSafe: 60,
LocalFinalized: 50,
CrossFinalized: 40,
Unsafe: randHeadPtr(),
CrossUnsafe: randHeadPtr(),
LocalSafe: randHeadPtr(),
CrossSafe: randHeadPtr(),
LocalFinalized: randHeadPtr(),
CrossFinalized: randHeadPtr(),
})
heads.Put(types.ChainIDFromUInt64(4892497242424), ChainHeads{
Unsafe: 1000,
CrossUnsafe: 900,
LocalSafe: 800,
CrossSafe: 700,
LocalFinalized: 600,
CrossFinalized: 400,
Unsafe: randHeadPtr(),
CrossUnsafe: randHeadPtr(),
LocalSafe: randHeadPtr(),
CrossSafe: randHeadPtr(),
LocalFinalized: randHeadPtr(),
CrossFinalized: randHeadPtr(),
})
j, err := json.Marshal(heads)
......@@ -51,16 +65,16 @@ func TestHeads(t *testing.T) {
chainA := types.ChainIDFromUInt64(3)
chainB := types.ChainIDFromUInt64(4)
chainAOrigHeads := ChainHeads{
Unsafe: 1,
Unsafe: randHeadPtr(),
}
chainAModifiedHeads1 := ChainHeads{
Unsafe: 2,
Unsafe: randHeadPtr(),
}
chainAModifiedHeads2 := ChainHeads{
Unsafe: 4,
Unsafe: randHeadPtr(),
}
chainBModifiedHeads := ChainHeads{
Unsafe: 2,
Unsafe: randHeadPtr(),
}
heads := NewHeads()
......
......@@ -149,37 +149,10 @@ func (db *DB) updateEntryCountMetric() {
db.m.RecordDBEntryCount(db.store.Size())
}
func (db *DB) IteratorStartingAt(i entrydb.EntryIdx) (Iterator, error) {
func (db *DB) IteratorStartingAt(sealedNum uint64, logsSince uint32) (Iterator, error) {
db.rwLock.RLock()
defer db.rwLock.RUnlock()
if i > db.lastEntryContext.nextEntryIndex {
return nil, ErrFuture
}
// TODO(#12031): Workaround while we not have IteratorStartingAt(heads.HeadPointer):
// scroll back from the index, to find block info.
idx := i
for ; idx >= 0; i-- {
entry, err := db.store.Read(idx)
if err != nil {
if errors.Is(err, io.EOF) {
continue // traverse to when we did have blocks
}
return nil, err
}
if entry.Type() == entrydb.TypeSearchCheckpoint {
break
}
if idx == 0 {
return nil, fmt.Errorf("empty DB, no block entry, cannot start at %d", i)
}
}
iter := db.newIterator(idx)
for iter.NextIndex() < i {
if _, err := iter.next(); err != nil {
return nil, errors.New("failed to process back up to the head pointer")
}
}
return iter, nil
return db.newIteratorAt(sealedNum, logsSince)
}
// FindSealedBlock finds the requested block, to check if it exists,
......
package db
import (
"errors"
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/heads"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/logs"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
......@@ -18,173 +17,137 @@ const (
)
// SafetyChecker is an interface for checking the safety of a log entry
// and updating the local head for a chain.
// it maintains a consistent view between local and cross chain for a given safety level
type SafetyChecker interface {
LocalHeadForChain(chainID types.ChainID) entrydb.EntryIdx
CrossHeadForChain(chainID types.ChainID) entrydb.EntryIdx
Check(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) bool
Update(chain types.ChainID, index entrydb.EntryIdx) heads.OperationFn
Name() string
SafetyLevel() types.SafetyLevel
}
// unsafeChecker is a SafetyChecker that uses the unsafe head as the view into the database
type unsafeChecker struct {
chainsDB *ChainsDB
}
// safeChecker is a SafetyChecker that uses the safe head as the view into the database
type safeChecker struct {
chainsDB *ChainsDB
}
// finalizedChecker is a SafetyChecker that uses the finalized head as the view into the database
type finalizedChecker struct {
chainsDB *ChainsDB
LocalHead(chainID types.ChainID) heads.HeadPointer
CrossHead(chainID types.ChainID) heads.HeadPointer
CheckLocal(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) error
CheckCross(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) error
UpdateLocal(chain types.ChainID, pointer heads.HeadPointer) error
UpdateCross(chain types.ChainID, pointer heads.HeadPointer) error
String() string
LocalSafetyLevel() types.SafetyLevel
CrossSafetyLevel() types.SafetyLevel
}
// NewSafetyChecker creates a new SafetyChecker of the given type
func NewSafetyChecker(t types.SafetyLevel, chainsDB *ChainsDB) SafetyChecker {
switch t {
case Unsafe:
return &unsafeChecker{
chainsDB: chainsDB,
}
case Safe:
return &safeChecker{
chainsDB: chainsDB,
}
case Finalized:
return &finalizedChecker{
chainsDB: chainsDB,
}
default:
panic("unknown safety checker type")
}
}
// Name returns the safety checker type, using the same strings as the constants used in construction
func (c *unsafeChecker) Name() string {
return Unsafe
}
func (c *safeChecker) Name() string {
return Safe
}
func (c *finalizedChecker) Name() string {
return Finalized
}
// LocalHeadForChain returns the local head for the given chain
// based on the type of SafetyChecker
func (c *unsafeChecker) LocalHeadForChain(chainID types.ChainID) entrydb.EntryIdx {
heads := c.chainsDB.heads.Current().Get(chainID)
return heads.Unsafe
}
func (c *safeChecker) LocalHeadForChain(chainID types.ChainID) entrydb.EntryIdx {
heads := c.chainsDB.heads.Current().Get(chainID)
return heads.LocalSafe
}
func (c *finalizedChecker) LocalHeadForChain(chainID types.ChainID) entrydb.EntryIdx {
heads := c.chainsDB.heads.Current().Get(chainID)
return heads.LocalFinalized
}
// CrossHeadForChain returns the x-head for the given chain
// based on the type of SafetyChecker
func (c *unsafeChecker) CrossHeadForChain(chainID types.ChainID) entrydb.EntryIdx {
heads := c.chainsDB.heads.Current().Get(chainID)
return heads.CrossUnsafe
}
func (c *safeChecker) CrossHeadForChain(chainID types.ChainID) entrydb.EntryIdx {
heads := c.chainsDB.heads.Current().Get(chainID)
return heads.CrossSafe
}
func (c *finalizedChecker) CrossHeadForChain(chainID types.ChainID) entrydb.EntryIdx {
heads := c.chainsDB.heads.Current().Get(chainID)
return heads.CrossFinalized
}
func (c *unsafeChecker) SafetyLevel() types.SafetyLevel {
return types.CrossUnsafe
}
func (c *safeChecker) SafetyLevel() types.SafetyLevel {
return types.CrossSafe
}
func (c *finalizedChecker) SafetyLevel() types.SafetyLevel {
return types.CrossFinalized
return NewChecker(t, chainsDB)
}
// check checks if the log entry is safe, provided a local head for the chain
// it is used by the individual SafetyCheckers to determine if a log entry is safe
func check(
chainsDB *ChainsDB,
localHead entrydb.EntryIdx,
head heads.HeadPointer,
chain types.ChainID,
blockNum uint64,
logIdx uint32,
logHash common.Hash) bool {
logHash common.Hash) error {
// for the Check to be valid, the log must:
// exist at the blockNum and logIdx
// have a hash that matches the provided hash (implicit in the Contains call), and
// be less than or equal to the local head for the chain
index, err := chainsDB.logDBs[chain].Contains(blockNum, logIdx, logHash)
// 1. have the expected logHash at the indicated blockNum and logIdx
_, err := chainsDB.logDBs[chain].Contains(blockNum, logIdx, logHash)
if err != nil {
if errors.Is(err, logs.ErrFuture) {
return false // TODO(#12031)
}
if errors.Is(err, logs.ErrConflict) {
return false // TODO(#12031)
}
return false
return err
}
return index <= localHead
// 2. be within the range of the given head
if !head.WithinRange(blockNum, logIdx) {
return logs.ErrFuture
}
return nil
}
// Check checks if the log entry is safe, provided a local head for the chain
// it passes on the local head this checker is concerned with, along with its view of the database
func (c *unsafeChecker) Check(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) bool {
return check(c.chainsDB, c.LocalHeadForChain(chain), chain, blockNum, logIdx, logHash)
// checker is a composition of accessor and update functions for a given safety level.
// they implement the SafetyChecker interface.
// checkers can be made with NewChecker.
type checker struct {
chains *ChainsDB
localSafety types.SafetyLevel
crossSafety types.SafetyLevel
updateCross func(chain types.ChainID, pointer heads.HeadPointer) error
updateLocal func(chain types.ChainID, pointer heads.HeadPointer) error
localHead func(chain types.ChainID) heads.HeadPointer
crossHead func(chain types.ChainID) heads.HeadPointer
checkCross func(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) error
checkLocal func(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) error
}
func (c *safeChecker) Check(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) bool {
return check(c.chainsDB, c.LocalHeadForChain(chain), chain, blockNum, logIdx, logHash)
func (c *checker) String() string {
return fmt.Sprintf("%s+%s", c.localSafety.String(), c.crossSafety.String())
}
func (c *finalizedChecker) Check(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) bool {
return check(c.chainsDB, c.LocalHeadForChain(chain), chain, blockNum, logIdx, logHash)
func (c *checker) LocalSafetyLevel() types.SafetyLevel {
return c.localSafety
}
// Update creates an Operation that updates the x-head for the chain, given an index to set it to
func (c *unsafeChecker) Update(chain types.ChainID, index entrydb.EntryIdx) heads.OperationFn {
return func(heads *heads.Heads) error {
chainHeads := heads.Get(chain)
chainHeads.CrossUnsafe = index
heads.Put(chain, chainHeads)
return nil
}
func (c *checker) CrossSafetyLevel() types.SafetyLevel {
return c.crossSafety
}
func (c *safeChecker) Update(chain types.ChainID, index entrydb.EntryIdx) heads.OperationFn {
return func(heads *heads.Heads) error {
chainHeads := heads.Get(chain)
chainHeads.CrossSafe = index
heads.Put(chain, chainHeads)
return nil
}
func (c *checker) UpdateCross(chain types.ChainID, pointer heads.HeadPointer) error {
return c.updateCross(chain, pointer)
}
func (c *checker) UpdateLocal(chain types.ChainID, pointer heads.HeadPointer) error {
return c.updateLocal(chain, pointer)
}
func (c *checker) LocalHead(chain types.ChainID) heads.HeadPointer {
return c.localHead(chain)
}
func (c *checker) CrossHead(chain types.ChainID) heads.HeadPointer {
return c.crossHead(chain)
}
func (c *checker) CheckCross(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) error {
return c.checkCross(chain, blockNum, logIdx, logHash)
}
func (c *checker) CheckLocal(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) error {
return c.checkLocal(chain, blockNum, logIdx, logHash)
}
func (c *finalizedChecker) Update(chain types.ChainID, index entrydb.EntryIdx) heads.OperationFn {
return func(heads *heads.Heads) error {
chainHeads := heads.Get(chain)
chainHeads.CrossFinalized = index
heads.Put(chain, chainHeads)
return nil
func NewChecker(t types.SafetyLevel, c *ChainsDB) SafetyChecker {
// checkWith creates a function which takes a chain-getter and returns a function that returns the head for the chain
checkWith := func(getHead func(chain types.ChainID) heads.HeadPointer) func(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) error {
return func(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) error {
return check(c, getHead(chain), chain, blockNum, logIdx, logHash)
}
}
switch t {
case Unsafe:
return &checker{
chains: c,
localSafety: types.Unsafe,
crossSafety: types.CrossUnsafe,
updateCross: c.heads.UpdateCrossUnsafe,
updateLocal: c.heads.UpdateLocalUnsafe,
crossHead: c.heads.CrossUnsafe,
localHead: c.heads.LocalUnsafe,
checkCross: checkWith(c.heads.CrossUnsafe),
checkLocal: checkWith(c.heads.LocalUnsafe),
}
case Safe:
return &checker{
chains: c,
localSafety: types.Safe,
crossSafety: types.CrossSafe,
updateCross: c.heads.UpdateCrossSafe,
updateLocal: c.heads.UpdateLocalSafe,
crossHead: c.heads.CrossSafe,
localHead: c.heads.LocalSafe,
checkCross: checkWith(c.heads.CrossSafe),
checkLocal: checkWith(c.heads.LocalSafe),
}
case Finalized:
return &checker{
chains: c,
localSafety: types.Finalized,
crossSafety: types.CrossFinalized,
updateCross: c.heads.UpdateCrossFinalized,
updateLocal: c.heads.UpdateLocalFinalized,
crossHead: c.heads.CrossFinalized,
localHead: c.heads.LocalFinalized,
checkCross: checkWith(c.heads.CrossFinalized),
checkLocal: checkWith(c.heads.LocalFinalized),
}
}
return &checker{}
}
package db
/*
import (
"errors"
"testing"
......@@ -211,3 +212,4 @@ func TestCheck(t *testing.T) {
})
}
}
*/
......@@ -5,16 +5,17 @@ import (
"fmt"
"time"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-service/client"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/sources"
"github.com/ethereum-optimism/optimism/op-service/sources/caching"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
"github.com/ethereum/go-ethereum/log"
)
// TODO(optimism#11032) Make these configurable and a sensible default
const epochPollInterval = 30 * time.Second
const epochPollInterval = 3 * time.Second
const pollInterval = 2 * time.Second
const trustRpc = false
const rpcKind = sources.RPCKindStandard
......@@ -25,6 +26,7 @@ type Metrics interface {
type Storage interface {
LogStorage
Heads() db.HeadsStorage
DatabaseRewinder
LatestBlockNum(chainID types.ChainID) (num uint64, ok bool)
}
......@@ -32,8 +34,9 @@ type Storage interface {
// ChainMonitor monitors a source L2 chain, retrieving the data required to populate the database and perform
// interop consolidation. It detects and notifies when reorgs occur.
type ChainMonitor struct {
log log.Logger
headMonitor *HeadMonitor
log log.Logger
headMonitor *HeadMonitor
chainProcessor *ChainProcessor
}
func NewChainMonitor(ctx context.Context, logger log.Logger, m Metrics, chainID types.ChainID, rpc string, client client.RPC, store Storage) (*ChainMonitor, error) {
......@@ -43,26 +46,26 @@ func NewChainMonitor(ctx context.Context, logger log.Logger, m Metrics, chainID
return nil, err
}
latest, ok := store.LatestBlockNum(chainID)
if !ok {
logger.Warn("")
}
// Create the log processor and fetcher
processLogs := newLogProcessor(chainID, store)
unsafeBlockProcessor := NewChainProcessor(logger, cl, chainID, processLogs, store)
startingHead := eth.L1BlockRef{
Number: latest,
}
// create head processors which only update the head
unsafeHeadProcessor := OnNewHead(chainID, store.Heads().UpdateLocalUnsafe)
safeHeadProcessor := OnNewHead(chainID, store.Heads().UpdateLocalSafe)
finalizedHeadProcessor := OnNewHead(chainID, store.Heads().UpdateLocalFinalized)
processLogs := newLogProcessor(chainID, store)
fetchReceipts := newLogFetcher(cl, processLogs)
unsafeBlockProcessor := NewChainProcessor(logger, cl, chainID, startingHead, fetchReceipts, store)
unsafeProcessors := []HeadProcessor{unsafeBlockProcessor, unsafeHeadProcessor}
safeProcessors := []HeadProcessor{safeHeadProcessor}
finalizedProcessors := []HeadProcessor{finalizedHeadProcessor}
unsafeProcessors := []HeadProcessor{unsafeBlockProcessor}
callback := newHeadUpdateProcessor(logger, unsafeProcessors, nil, nil)
callback := newHeadUpdateProcessor(logger, unsafeProcessors, safeProcessors, finalizedProcessors)
headMonitor := NewHeadMonitor(logger, epochPollInterval, cl, callback)
return &ChainMonitor{
log: logger,
headMonitor: headMonitor,
log: logger,
headMonitor: headMonitor,
chainProcessor: unsafeBlockProcessor,
}, nil
}
......@@ -72,6 +75,7 @@ func (c *ChainMonitor) Start() error {
}
func (c *ChainMonitor) Stop() error {
c.chainProcessor.Close()
return c.headMonitor.Stop()
}
......
......@@ -2,22 +2,31 @@ package source
import (
"context"
"fmt"
"sync"
"sync/atomic"
"time"
"github.com/ethereum/go-ethereum/common"
gethtypes "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
"github.com/ethereum/go-ethereum/log"
)
type BlockByNumberSource interface {
type Source interface {
L1BlockRefByNumber(ctx context.Context, number uint64) (eth.L1BlockRef, error)
FetchReceipts(ctx context.Context, blockHash common.Hash) (eth.BlockInfo, gethtypes.Receipts, error)
}
type BlockProcessor interface {
ProcessBlock(ctx context.Context, block eth.L1BlockRef) error
type LogProcessor interface {
ProcessLogs(ctx context.Context, block eth.L1BlockRef, receipts gethtypes.Receipts) error
}
type DatabaseRewinder interface {
Rewind(chain types.ChainID, headBlockNum uint64) error
LatestBlockNum(chain types.ChainID) (num uint64, ok bool)
}
type BlockProcessorFn func(ctx context.Context, block eth.L1BlockRef) error
......@@ -29,58 +38,145 @@ func (fn BlockProcessorFn) ProcessBlock(ctx context.Context, block eth.L1BlockRe
// ChainProcessor is a HeadProcessor that fills in any skipped blocks between head update events.
// It ensures that, absent reorgs, every block in the chain is processed even if some head advancements are skipped.
type ChainProcessor struct {
log log.Logger
client BlockByNumberSource
chain types.ChainID
lastBlock eth.L1BlockRef
processor BlockProcessor
log log.Logger
client Source
chain types.ChainID
processor LogProcessor
rewinder DatabaseRewinder
// the last known head. May be 0 if not known.
lastHead atomic.Uint64
// channel with capacity of 1, full if there is work to do
newHead chan struct{}
// bool to indicate if calls are synchronous
synchronous bool
// channel with capacity of 1, to signal work complete if running in synchroneous mode
out chan struct{}
// lifetime management of the chain processor
ctx context.Context
cancel context.CancelFunc
wg sync.WaitGroup
}
func NewChainProcessor(log log.Logger, client BlockByNumberSource, chain types.ChainID, startingHead eth.L1BlockRef, processor BlockProcessor, rewinder DatabaseRewinder) *ChainProcessor {
return &ChainProcessor{
func NewChainProcessor(log log.Logger, client Source, chain types.ChainID, processor LogProcessor, rewinder DatabaseRewinder) *ChainProcessor {
ctx, cancel := context.WithCancel(context.Background())
out := &ChainProcessor{
log: log,
client: client,
chain: chain,
lastBlock: startingHead,
processor: processor,
rewinder: rewinder,
newHead: make(chan struct{}, 1),
// default to synchronous because we want other processors to wait for this
// in the future we could make this async and have a separate mechanism which forwards the work signal to other processors
synchronous: true,
out: make(chan struct{}, 1),
ctx: ctx,
cancel: cancel,
}
out.wg.Add(1)
go out.worker()
return out
}
func (s *ChainProcessor) OnNewHead(ctx context.Context, head eth.L1BlockRef) {
s.log.Debug("Processing chain", "chain", s.chain, "head", head, "last", s.lastBlock)
if head.Number <= s.lastBlock.Number {
s.log.Info("head is not newer than last processed block", "head", head, "lastBlock", s.lastBlock)
return
func (s *ChainProcessor) nextNum() uint64 {
headNum, ok := s.rewinder.LatestBlockNum(s.chain)
if !ok {
return 0 // genesis. We could change this to start at a later block.
}
for s.lastBlock.Number+1 < head.Number {
s.log.Debug("Filling in skipped block", "chain", s.chain, "lastBlock", s.lastBlock, "head", head)
blockNum := s.lastBlock.Number + 1
nextBlock, err := s.client.L1BlockRefByNumber(ctx, blockNum)
if err != nil {
s.log.Error("Failed to fetch block info", "number", blockNum, "err", err)
return headNum + 1
}
func (s *ChainProcessor) worker() {
defer s.wg.Done()
delay := time.NewTicker(time.Second * 5)
for {
if s.ctx.Err() != nil { // check if we are closing down
return
}
if ok := s.processBlock(ctx, nextBlock); !ok {
target := s.nextNum()
if err := s.update(target); err != nil {
s.log.Error("Failed to process new block", "err", err)
// idle until next update trigger
} else if x := s.lastHead.Load(); target+1 <= x {
s.log.Debug("Continuing with next block",
"newTarget", target+1, "lastHead", x)
continue // instantly continue processing, no need to idle
} else {
s.log.Debug("Idling block-processing, reached latest block", "head", target)
}
if s.synchronous {
s.out <- struct{}{}
}
// await next time we process, or detect shutdown
select {
case <-s.ctx.Done():
delay.Stop()
return
case <-s.newHead:
s.log.Debug("Responding to new head signal")
continue
case <-delay.C:
s.log.Debug("Checking for updates")
continue
}
}
s.processBlock(ctx, head)
}
func (s *ChainProcessor) processBlock(ctx context.Context, block eth.L1BlockRef) bool {
if err := s.processor.ProcessBlock(ctx, block); err != nil {
s.log.Error("Failed to process block", "block", block, "err", err)
func (s *ChainProcessor) update(nextNum uint64) error {
ctx, cancel := context.WithTimeout(s.ctx, time.Second*10)
next, err := s.client.L1BlockRefByNumber(ctx, nextNum)
cancel()
if err != nil {
return fmt.Errorf("failed to fetch next block: %w", err)
}
// Try and fetch the receipts
ctx, cancel = context.WithTimeout(s.ctx, time.Second*10)
_, receipts, err := s.client.FetchReceipts(ctx, next.Hash)
cancel()
if err != nil {
return fmt.Errorf("failed to fetch receipts of block: %w", err)
}
if err := s.processor.ProcessLogs(ctx, next, receipts); err != nil {
s.log.Error("Failed to process block", "block", next, "err", err)
if next.Number == 0 { // cannot rewind genesis
return nil
}
// Try to rewind the database to the previous block to remove any logs from this block that were written
if err := s.rewinder.Rewind(s.chain, s.lastBlock.Number); err != nil {
if err := s.rewinder.Rewind(s.chain, nextNum-1); err != nil {
// If any logs were written, our next attempt to write will fail and we'll retry this rewind.
// If no logs were written successfully then the rewind wouldn't have done anything anyway.
s.log.Error("Failed to rewind after error processing block", "block", block, "err", err)
s.log.Error("Failed to rewind after error processing block", "block", next, "err", err)
}
return false // Don't update the last processed block so we will retry on next update
}
s.lastBlock = block
return true
return nil
}
func (s *ChainProcessor) OnNewHead(ctx context.Context, head eth.L1BlockRef) error {
// update the latest target
s.lastHead.Store(head.Number)
// signal that we have something to process
select {
case s.newHead <- struct{}{}:
default:
// already requested an update
}
// if we are running synchronously, wait for the work to complete
if s.synchronous {
<-s.out
}
return nil
}
func (s *ChainProcessor) Close() {
s.cancel()
s.wg.Wait()
}
package source
/* TODO
import (
"context"
"errors"
......@@ -22,7 +23,7 @@ func TestUnsafeBlocksStage(t *testing.T) {
logger := testlog.Logger(t, log.LvlInfo)
client := &stubBlockByNumberSource{}
processor := &stubBlockProcessor{}
stage := NewChainProcessor(logger, client, processorChainID, eth.L1BlockRef{Number: 100}, processor, &stubRewinder{})
stage := NewChainProcessor(logger, client, processorChainID, processor, &stubRewinder{})
stage.OnNewHead(ctx, eth.L1BlockRef{Number: 100})
stage.OnNewHead(ctx, eth.L1BlockRef{Number: 99})
......@@ -185,3 +186,4 @@ func (s *stubRewinder) Rewind(chainID types.ChainID, headBlockNum uint64) error
s.rewindCalled = true
return nil
}
*/
package source
import (
"context"
"fmt"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
)
type LogSource interface {
FetchReceipts(ctx context.Context, blockHash common.Hash) (eth.BlockInfo, types.Receipts, error)
}
type ReceiptProcessor interface {
ProcessLogs(ctx context.Context, block eth.L1BlockRef, rcpts types.Receipts) error
}
type ReceiptProcessorFn func(ctx context.Context, block eth.L1BlockRef, rcpts types.Receipts) error
func (r ReceiptProcessorFn) ProcessLogs(ctx context.Context, block eth.L1BlockRef, rcpts types.Receipts) error {
return r(ctx, block, rcpts)
}
type logFetcher struct {
client LogSource
processor ReceiptProcessor
}
func newLogFetcher(client LogSource, processor ReceiptProcessor) *logFetcher {
return &logFetcher{
client: client,
processor: processor,
}
}
var _ BlockProcessor = (*logFetcher)(nil)
func (l *logFetcher) ProcessBlock(ctx context.Context, block eth.L1BlockRef) error {
_, rcpts, err := l.client.FetchReceipts(ctx, block.Hash)
if err != nil {
return fmt.Errorf("failed to fetch receipts for block %v: %w", block, err)
}
return l.processor.ProcessLogs(ctx, block, rcpts)
}
package source
import (
"context"
"errors"
"testing"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/stretchr/testify/require"
)
func TestFetchLogs(t *testing.T) {
ctx := context.Background()
rcpts := types.Receipts{&types.Receipt{Type: 3}, &types.Receipt{Type: 4}}
t.Run("Success", func(t *testing.T) {
client := &stubLogSource{
rcpts: rcpts,
}
var processed []types.Receipts
processor := ReceiptProcessorFn(func(ctx context.Context, block eth.L1BlockRef, rcpts types.Receipts) error {
processed = append(processed, rcpts)
return nil
})
fetcher := newLogFetcher(client, processor)
block := eth.L1BlockRef{Number: 11, Hash: common.Hash{0xaa}}
err := fetcher.ProcessBlock(ctx, block)
require.NoError(t, err)
require.Equal(t, []types.Receipts{rcpts}, processed)
})
t.Run("ReceiptFetcherError", func(t *testing.T) {
client := &stubLogSource{
err: errors.New("boom"),
}
processor := ReceiptProcessorFn(func(ctx context.Context, block eth.L1BlockRef, rcpts types.Receipts) error {
t.Fatal("should not be called")
return nil
})
fetcher := newLogFetcher(client, processor)
block := eth.L1BlockRef{Number: 11, Hash: common.Hash{0xaa}}
err := fetcher.ProcessBlock(ctx, block)
require.ErrorIs(t, err, client.err)
})
t.Run("ProcessorError", func(t *testing.T) {
expectedErr := errors.New("boom")
client := &stubLogSource{
rcpts: rcpts,
}
processor := ReceiptProcessorFn(func(ctx context.Context, block eth.L1BlockRef, rcpts types.Receipts) error {
return expectedErr
})
fetcher := newLogFetcher(client, processor)
block := eth.L1BlockRef{Number: 11, Hash: common.Hash{0xaa}}
err := fetcher.ProcessBlock(ctx, block)
require.ErrorIs(t, err, expectedErr)
})
}
type stubLogSource struct {
err error
rcpts types.Receipts
}
func (s *stubLogSource) FetchReceipts(_ context.Context, _ common.Hash) (eth.BlockInfo, types.Receipts, error) {
if s.err != nil {
return nil, nil, s.err
}
return nil, s.rcpts, nil
}
......@@ -3,18 +3,21 @@ package source
import (
"context"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/heads"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
)
type HeadProcessor interface {
OnNewHead(ctx context.Context, head eth.L1BlockRef)
OnNewHead(ctx context.Context, head eth.L1BlockRef) error
}
type HeadProcessorFn func(ctx context.Context, head eth.L1BlockRef)
type HeadProcessorFn func(ctx context.Context, head eth.L1BlockRef) error
func (f HeadProcessorFn) OnNewHead(ctx context.Context, head eth.L1BlockRef) {
f(ctx, head)
func (f HeadProcessorFn) OnNewHead(ctx context.Context, head eth.L1BlockRef) error {
return f(ctx, head)
}
// headUpdateProcessor handles head update events and routes them to the appropriate handlers
......@@ -37,19 +40,37 @@ func newHeadUpdateProcessor(log log.Logger, unsafeProcessors []HeadProcessor, sa
func (n *headUpdateProcessor) OnNewUnsafeHead(ctx context.Context, block eth.L1BlockRef) {
n.log.Debug("New unsafe head", "block", block)
for _, processor := range n.unsafeProcessors {
processor.OnNewHead(ctx, block)
if err := processor.OnNewHead(ctx, block); err != nil {
n.log.Error("unsafe-head processing failed", "err", err)
}
}
}
func (n *headUpdateProcessor) OnNewSafeHead(ctx context.Context, block eth.L1BlockRef) {
n.log.Debug("New safe head", "block", block)
for _, processor := range n.safeProcessors {
processor.OnNewHead(ctx, block)
if err := processor.OnNewHead(ctx, block); err != nil {
n.log.Error("safe-head processing failed", "err", err)
}
}
}
func (n *headUpdateProcessor) OnNewFinalizedHead(ctx context.Context, block eth.L1BlockRef) {
n.log.Debug("New finalized head", "block", block)
for _, processor := range n.finalizedProcessors {
processor.OnNewHead(ctx, block)
if err := processor.OnNewHead(ctx, block); err != nil {
n.log.Error("finalized-head processing failed", "err", err)
}
}
}
// OnNewHead is a util function to turn a head-signal processor into head-pointer updater
func OnNewHead(id types.ChainID, apply func(id types.ChainID, v heads.HeadPointer) error) HeadProcessorFn {
return func(ctx context.Context, head eth.L1BlockRef) error {
return apply(id, heads.HeadPointer{
LastSealedBlockHash: head.Hash,
LastSealedBlockNum: head.Number,
LogsSince: 0,
})
}
}
......@@ -16,8 +16,9 @@ func TestHeadUpdateProcessor(t *testing.T) {
logger := testlog.Logger(t, log.LvlInfo)
processed := make([]eth.L1BlockRef, 3)
makeProcessor := func(idx int) HeadProcessor {
return HeadProcessorFn(func(_ context.Context, head eth.L1BlockRef) {
return HeadProcessorFn(func(_ context.Context, head eth.L1BlockRef) error {
processed[idx] = head
return nil
})
}
headUpdates := newHeadUpdateProcessor(logger, []HeadProcessor{makeProcessor(0), makeProcessor(1), makeProcessor(2)}, nil, nil)
......@@ -30,8 +31,9 @@ func TestHeadUpdateProcessor(t *testing.T) {
logger := testlog.Logger(t, log.LvlInfo)
processed := make([]eth.L1BlockRef, 3)
makeProcessor := func(idx int) HeadProcessor {
return HeadProcessorFn(func(_ context.Context, head eth.L1BlockRef) {
return HeadProcessorFn(func(_ context.Context, head eth.L1BlockRef) error {
processed[idx] = head
return nil
})
}
headUpdates := newHeadUpdateProcessor(logger, nil, []HeadProcessor{makeProcessor(0), makeProcessor(1), makeProcessor(2)}, nil)
......@@ -44,8 +46,9 @@ func TestHeadUpdateProcessor(t *testing.T) {
logger := testlog.Logger(t, log.LvlInfo)
processed := make([]eth.L1BlockRef, 3)
makeProcessor := func(idx int) HeadProcessor {
return HeadProcessorFn(func(_ context.Context, head eth.L1BlockRef) {
return HeadProcessorFn(func(_ context.Context, head eth.L1BlockRef) error {
processed[idx] = head
return nil
})
}
headUpdates := newHeadUpdateProcessor(logger, nil, nil, []HeadProcessor{makeProcessor(0), makeProcessor(1), makeProcessor(2)})
......
......@@ -73,7 +73,7 @@ func (lvl SafetyLevel) String() string {
func (lvl SafetyLevel) Valid() bool {
switch lvl {
case Finalized, Safe, CrossUnsafe, Unsafe:
case CrossFinalized, Finalized, Safe, CrossUnsafe, Unsafe:
return true
default:
return false
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment