Commit 745b251d authored by protolambda's avatar protolambda Committed by GitHub

op-supervisor: cleanup, refactor, local-safe info from op-node (#12427)

* op-supervisor: cleanup, refactor to take local-safe info from op-node

* Refactor ChainProcessor Worker

* remove unneeded err check

* semgrep

---------
Co-authored-by: default avataraxelKingsley <axel.kingsley@gmail.com>
parent 77289937
......@@ -2,7 +2,6 @@ package interop
import (
"context"
"fmt"
"math/big"
"testing"
"time"
......@@ -86,14 +85,12 @@ func TestInteropTrivial(t *testing.T) {
require.Equal(t, expectedBalance, bobBalance)
s2.DeployEmitterContract(chainA, "Alice")
rec := s2.EmitData(chainA, "Alice", "0x1234567890abcdef")
fmt.Println("Result of emitting event:", rec)
s2.DeployEmitterContract(chainB, "Alice")
rec = s2.EmitData(chainB, "Alice", "0x1234567890abcdef")
for i := 0; i < 1; i++ {
s2.EmitData(chainA, "Alice", "0x1234567890abcdef")
fmt.Println("Result of emitting event:", rec)
s2.EmitData(chainB, "Alice", "0x1234567890abcdef")
}
time.Sleep(60 * time.Second)
......
......@@ -5,7 +5,6 @@ import (
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum-optimism/optimism/op-service/client"
"github.com/ethereum-optimism/optimism/op-service/eth"
......@@ -68,7 +67,12 @@ func (cl *SupervisorClient) AddL2RPC(
func (cl *SupervisorClient) UnsafeView(ctx context.Context, chainID types.ChainID, unsafe types.ReferenceView) (types.ReferenceView, error) {
var result types.ReferenceView
err := cl.client.CallContext(ctx, &result, "supervisor_unsafeView", (*hexutil.U256)(&chainID), unsafe)
err := cl.client.CallContext(
ctx,
&result,
"supervisor_unsafeView",
chainID,
unsafe)
if err != nil {
return types.ReferenceView{}, fmt.Errorf("failed to share unsafe block view %s (chain %s): %w", unsafe, chainID, err)
}
......@@ -77,7 +81,12 @@ func (cl *SupervisorClient) UnsafeView(ctx context.Context, chainID types.ChainI
func (cl *SupervisorClient) SafeView(ctx context.Context, chainID types.ChainID, safe types.ReferenceView) (types.ReferenceView, error) {
var result types.ReferenceView
err := cl.client.CallContext(ctx, &result, "supervisor_safeView", (*hexutil.U256)(&chainID), safe)
err := cl.client.CallContext(
ctx,
&result,
"supervisor_safeView",
chainID,
safe)
if err != nil {
return types.ReferenceView{}, fmt.Errorf("failed to share safe block view %s (chain %s): %w", safe, chainID, err)
}
......
......@@ -4,21 +4,22 @@ import (
"context"
"errors"
"fmt"
"io"
"sync"
"sync/atomic"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-service/client"
"github.com/ethereum-optimism/optimism/op-service/dial"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/sources"
"github.com/ethereum-optimism/optimism/op-supervisor/config"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/logs"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/source"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/processors"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/frontend"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
)
......@@ -29,13 +30,21 @@ type SupervisorBackend struct {
m Metrics
dataDir string
chainMonitors map[types.ChainID]*source.ChainMonitor
db *db.ChainsDB
// RW lock to avoid concurrent map mutations.
// Read = any chain may be used and mutated.
// Write = set of chains is changing.
mu sync.RWMutex
// db holds on to the DB indices for each chain
db *db.ChainsDB
// chainProcessors are notified of new unsafe blocks, and add the unsafe log events data into the events DB
chainProcessors map[types.ChainID]*processors.ChainProcessor
}
var _ frontend.Backend = (*SupervisorBackend)(nil)
var _ io.Closer = (*SupervisorBackend)(nil)
var errAlreadyStopped = errors.New("already stopped")
func NewSupervisorBackend(ctx context.Context, logger log.Logger, m Metrics, cfg *config.Config) (*SupervisorBackend, error) {
// attempt to prepare the data directory
......@@ -44,18 +53,18 @@ func NewSupervisorBackend(ctx context.Context, logger log.Logger, m Metrics, cfg
}
// create the chains db
db := db.NewChainsDB(map[types.ChainID]db.LogStorage{}, logger)
chainsDB := db.NewChainsDB(logger)
// create an empty map of chain monitors
chainMonitors := make(map[types.ChainID]*source.ChainMonitor, len(cfg.L2RPCs))
chainProcessors := make(map[types.ChainID]*processors.ChainProcessor, len(cfg.L2RPCs))
// create the supervisor backend
super := &SupervisorBackend{
logger: logger,
m: m,
dataDir: cfg.Datadir,
chainMonitors: chainMonitors,
db: db,
logger: logger,
m: m,
dataDir: cfg.Datadir,
chainProcessors: chainProcessors,
db: chainsDB,
}
// from the RPC strings, have the supervisor backend create a chain monitor
......@@ -72,9 +81,9 @@ func NewSupervisorBackend(ctx context.Context, logger log.Logger, m Metrics, cfg
// addFromRPC adds a chain monitor to the supervisor backend from an rpc endpoint
// it does not expect to be called after the backend has been started
// it will start the monitor if shouldStart is true
func (su *SupervisorBackend) addFromRPC(ctx context.Context, logger log.Logger, rpc string, shouldStart bool) error {
func (su *SupervisorBackend) addFromRPC(ctx context.Context, logger log.Logger, rpc string, _ bool) error {
// create the rpc client, which yields the chain id
rpcClient, chainID, err := createRpcClient(ctx, logger, rpc)
rpcClient, chainID, err := clientForL2(ctx, logger, rpc)
if err != nil {
return err
}
......@@ -89,25 +98,29 @@ func (su *SupervisorBackend) addFromRPC(ctx context.Context, logger log.Logger,
if err != nil {
return fmt.Errorf("failed to create logdb for chain %v at %v: %w", chainID, path, err)
}
if su.chainMonitors[chainID] != nil {
if su.chainProcessors[chainID] != nil {
return fmt.Errorf("chain monitor for chain %v already exists", chainID)
}
monitor, err := source.NewChainMonitor(ctx, logger, cm, chainID, rpc, rpcClient, su.db)
// create a client like the monitor would have
cl, err := processors.NewEthClient(
ctx,
logger,
cm,
rpc,
rpcClient, 2*time.Second,
false,
sources.RPCKindStandard)
if err != nil {
return fmt.Errorf("failed to create monitor for rpc %v: %w", rpc, err)
}
// start the monitor if requested
if shouldStart {
if err := monitor.Start(); err != nil {
return fmt.Errorf("failed to start monitor for rpc %v: %w", rpc, err)
}
return err
}
su.chainMonitors[chainID] = monitor
logProcessor := processors.NewLogProcessor(chainID, su.db)
chainProcessor := processors.NewChainProcessor(logger, cl, chainID, logProcessor, su.db)
su.chainProcessors[chainID] = chainProcessor
su.db.AddLogDB(chainID, logDB)
return nil
}
func createRpcClient(ctx context.Context, logger log.Logger, rpc string) (client.RPC, types.ChainID, error) {
func clientForL2(ctx context.Context, logger log.Logger, rpc string) (client.RPC, types.ChainID, error) {
ethClient, err := dial.DialEthClientWithTimeout(ctx, 10*time.Second, logger, rpc)
if err != nil {
return nil, types.ChainID{}, fmt.Errorf("failed to connect to rpc %v: %w", rpc, err)
......@@ -120,6 +133,9 @@ func createRpcClient(ctx context.Context, logger log.Logger, rpc string) (client
}
func (su *SupervisorBackend) Start(ctx context.Context) error {
su.mu.Lock()
defer su.mu.Unlock()
// ensure we only start once
if !su.started.CompareAndSwap(false, true) {
return errors.New("already started")
......@@ -129,68 +145,66 @@ func (su *SupervisorBackend) Start(ctx context.Context) error {
if err := su.db.ResumeFromLastSealedBlock(); err != nil {
return fmt.Errorf("failed to resume chains db: %w", err)
}
// start chain monitors
for _, monitor := range su.chainMonitors {
if err := monitor.Start(); err != nil {
return fmt.Errorf("failed to start chain monitor: %w", err)
}
}
// TODO(#12423): init background processors, de-dup with constructor
return nil
}
var errAlreadyStopped = errors.New("already stopped")
func (su *SupervisorBackend) Stop(ctx context.Context) error {
su.mu.Lock()
defer su.mu.Unlock()
if !su.started.CompareAndSwap(true, false) {
return errAlreadyStopped
}
// collect errors from stopping chain monitors
var errs error
for _, monitor := range su.chainMonitors {
if err := monitor.Stop(); err != nil {
errs = errors.Join(errs, fmt.Errorf("failed to stop chain monitor: %w", err))
}
// close all processors
for id, processor := range su.chainProcessors {
su.logger.Info("stopping chain processor", "chainID", id)
processor.Close()
}
// close the database
if err := su.db.Close(); err != nil {
errs = errors.Join(errs, fmt.Errorf("failed to close database: %w", err))
}
return errs
}
func (su *SupervisorBackend) Close() error {
// TODO(protocol-quest#288): close logdb of all chains
return nil
clear(su.chainProcessors)
// close the databases
return su.db.Close()
}
// AddL2RPC adds a new L2 chain to the supervisor backend
// it stops and restarts the backend to add the new chain
func (su *SupervisorBackend) AddL2RPC(ctx context.Context, rpc string) error {
su.mu.Lock()
defer su.mu.Unlock()
// start the monitor immediately, as the backend is assumed to already be running
return su.addFromRPC(ctx, su.logger, rpc, true)
}
// Query methods
// ----------------------------
func (su *SupervisorBackend) CheckMessage(identifier types.Identifier, payloadHash common.Hash) (types.SafetyLevel, error) {
su.mu.RLock()
defer su.mu.RUnlock()
chainID := identifier.ChainID
blockNum := identifier.BlockNumber
logIdx := identifier.LogIndex
_, err := su.db.Check(chainID, blockNum, uint32(logIdx), payloadHash)
if errors.Is(err, logs.ErrFuture) {
if errors.Is(err, entrydb.ErrFuture) {
return types.LocalUnsafe, nil
}
if errors.Is(err, logs.ErrConflict) {
if errors.Is(err, entrydb.ErrConflict) {
return types.Invalid, nil
}
if err != nil {
return types.Invalid, fmt.Errorf("failed to check log: %w", err)
}
safest := su.db.Safest(chainID, blockNum, uint32(logIdx))
return safest, nil
return su.db.Safest(chainID, blockNum, uint32(logIdx))
}
func (su *SupervisorBackend) CheckMessages(
messages []types.Message,
minSafety types.SafetyLevel) error {
su.mu.RLock()
defer su.mu.RUnlock()
for _, msg := range messages {
safety, err := su.CheckMessage(msg.Identifier, msg.PayloadHash)
if err != nil {
......@@ -206,32 +220,85 @@ func (su *SupervisorBackend) CheckMessages(
return nil
}
// CheckBlock checks if the block is safe according to the safety level
// The block is considered safe if all logs in the block are safe
// this is decided by finding the last log in the block and
func (su *SupervisorBackend) CheckBlock(chainID *hexutil.U256, blockHash common.Hash, blockNumber hexutil.Uint64) (types.SafetyLevel, error) {
// find the last log index in the block
id := eth.BlockID{Hash: blockHash, Number: uint64(blockNumber)}
_, err := su.db.FindSealedBlock(types.ChainID(*chainID), id)
if errors.Is(err, logs.ErrFuture) {
return types.LocalUnsafe, nil
func (su *SupervisorBackend) UnsafeView(ctx context.Context, chainID types.ChainID, unsafe types.ReferenceView) (types.ReferenceView, error) {
su.mu.RLock()
defer su.mu.RUnlock()
head, err := su.db.LocalUnsafe(chainID)
if err != nil {
return types.ReferenceView{}, fmt.Errorf("failed to get local-unsafe head: %w", err)
}
if errors.Is(err, logs.ErrConflict) {
return types.Invalid, nil
cross, err := su.db.CrossUnsafe(chainID)
if err != nil {
return types.ReferenceView{}, fmt.Errorf("failed to get cross-unsafe head: %w", err)
}
// TODO(#11693): check `unsafe` input to detect reorg conflicts
return types.ReferenceView{
Local: head.ID(),
Cross: cross.ID(),
}, nil
}
func (su *SupervisorBackend) SafeView(ctx context.Context, chainID types.ChainID, safe types.ReferenceView) (types.ReferenceView, error) {
su.mu.RLock()
defer su.mu.RUnlock()
_, localSafe, err := su.db.LocalSafe(chainID)
if err != nil {
return types.ReferenceView{}, fmt.Errorf("failed to get local-safe head: %w", err)
}
_, crossSafe, err := su.db.CrossSafe(chainID)
if err != nil {
su.logger.Error("failed to scan block", "err", err)
return "", err
return types.ReferenceView{}, fmt.Errorf("failed to get cross-safe head: %w", err)
}
// TODO(#11693): check `safe` input to detect reorg conflicts
return types.ReferenceView{
Local: localSafe.ID(),
Cross: crossSafe.ID(),
}, nil
}
func (su *SupervisorBackend) Finalized(ctx context.Context, chainID types.ChainID) (eth.BlockID, error) {
su.mu.RLock()
defer su.mu.RUnlock()
return su.db.Finalized(chainID)
}
func (su *SupervisorBackend) DerivedFrom(ctx context.Context, chainID types.ChainID, derived eth.BlockID) (derivedFrom eth.BlockID, err error) {
su.mu.RLock()
defer su.mu.RUnlock()
return su.db.DerivedFrom(chainID, derived)
}
// Update methods
// ----------------------------
func (su *SupervisorBackend) UpdateLocalUnsafe(chainID types.ChainID, head eth.BlockRef) error {
su.mu.RLock()
defer su.mu.RUnlock()
ch, ok := su.chainProcessors[chainID]
if !ok {
return db.ErrUnknownChain
}
safest := su.db.Safest(types.ChainID(*chainID), uint64(blockNumber), 0)
return safest, nil
return ch.OnNewHead(head)
}
func (su *SupervisorBackend) UpdateLocalSafe(chainID types.ChainID, derivedFrom eth.BlockRef, lastDerived eth.BlockRef) error {
su.mu.RLock()
defer su.mu.RUnlock()
return su.db.UpdateLocalSafe(chainID, derivedFrom, lastDerived)
}
func (su *SupervisorBackend) DerivedFrom(
ctx context.Context,
chainID types.ChainID,
blockHash common.Hash,
blockNumber uint64) (eth.BlockRef, error) {
// TODO(#12358): attach to backend
return eth.BlockRef{}, nil
func (su *SupervisorBackend) UpdateFinalizedL1(chainID types.ChainID, finalized eth.BlockRef) error {
su.mu.RLock()
defer su.mu.RUnlock()
return su.db.UpdateFinalizedL1(finalized)
}
......@@ -4,20 +4,17 @@ import (
"errors"
"fmt"
"io"
"sync"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/logs"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/safety"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
)
var (
ErrUnknownChain = errors.New("unknown chain")
)
var ErrUnknownChain = errors.New("unknown chain")
type LogStorage interface {
io.Closer
......@@ -31,58 +28,92 @@ type LogStorage interface {
LatestSealedBlockNum() (n uint64, ok bool)
// FindSealedBlock finds the requested block, to check if it exists,
// returning the next index after it where things continue from.
// returns ErrFuture if the block is too new to be able to tell
// returns ErrDifferent if the known block does not match
FindSealedBlock(block eth.BlockID) (nextEntry entrydb.EntryIdx, err error)
// FindSealedBlock finds the requested block by number, to check if it exists,
// returning the block seal if it was found.
// returns ErrFuture if the block is too new to be able to tell.
FindSealedBlock(number uint64) (block types.BlockSeal, err error)
IteratorStartingAt(sealedNum uint64, logsSince uint32) (logs.Iterator, error)
// returns ErrConflict if the log does not match the canonical chain.
// returns ErrFuture if the log is out of reach.
// returns nil if the log is known and matches the canonical chain.
Contains(blockNum uint64, logIdx uint32, logHash common.Hash) (nextIndex entrydb.EntryIdx, err error)
// Contains returns no error iff the specified logHash is recorded in the specified blockNum and logIdx.
// If the log is out of reach, then ErrFuture is returned.
// If the log is determined to conflict with the canonical chain, then ErrConflict is returned.
// logIdx is the index of the log in the array of all logs in the block.
// This can be used to check the validity of cross-chain interop events.
// The block-seal of the blockNum block, that the log was included in, is returned.
// This seal may be fully zeroed, without error, if the block isn't fully known yet.
Contains(blockNum uint64, logIdx uint32, logHash common.Hash) (includedIn types.BlockSeal, err error)
}
type LocalDerivedFromStorage interface {
Last() (derivedFrom eth.BlockRef, derived eth.BlockRef, err error)
AddDerived(derivedFrom eth.BlockRef, derived eth.BlockRef) error
LastDerived(derivedFrom eth.BlockID) (derived eth.BlockID, err error)
DerivedFrom(derived eth.BlockID) (derivedFrom eth.BlockID, err error)
}
type CrossDerivedFromStorage interface {
LocalDerivedFromStorage
// This will start to differ with reorg support
}
var _ LogStorage = (*logs.DB)(nil)
// ChainsDB is a database that stores logs and heads for multiple chains.
// ChainsDB is a database that stores logs and derived-from data for multiple chains.
// it implements the ChainsStorage interface.
type ChainsDB struct {
logDBs map[types.ChainID]LogStorage
safetyIndex safety.SafetyIndex
logger log.Logger
// RW mutex:
// Read = chains can be read / mutated.
// Write = set of chains is changing.
mu sync.RWMutex
// unsafe info: the sequence of block seals and events
logDBs map[types.ChainID]LogStorage
// cross-unsafe: how far we have processed the unsafe data.
crossUnsafe map[types.ChainID]types.BlockSeal
// local-safe: index of what we optimistically know about L2 blocks being derived from L1
localDBs map[types.ChainID]LocalDerivedFromStorage
// cross-safe: index of L2 blocks we know to only have cross-L2 valid dependencies
crossDBs map[types.ChainID]CrossDerivedFromStorage
// finalized: the L1 finality progress. This can be translated into what may be considered as finalized in L2.
// It is initially zeroed, and the L2 finality query will return
// an error until it has this L1 finality to work with.
finalizedL1 eth.L1BlockRef
logger log.Logger
}
func NewChainsDB(logDBs map[types.ChainID]LogStorage, l log.Logger) *ChainsDB {
ret := &ChainsDB{
logDBs: logDBs,
logger: l,
func NewChainsDB(l log.Logger) *ChainsDB {
return &ChainsDB{
logDBs: make(map[types.ChainID]LogStorage),
logger: l,
localDBs: make(map[types.ChainID]LocalDerivedFromStorage),
crossDBs: make(map[types.ChainID]CrossDerivedFromStorage),
crossUnsafe: make(map[types.ChainID]types.BlockSeal),
}
ret.safetyIndex = safety.NewSafetyIndex(l, ret)
return ret
}
func (db *ChainsDB) AddLogDB(chain types.ChainID, logDB LogStorage) {
db.mu.Lock()
defer db.mu.Unlock()
if db.logDBs[chain] != nil {
log.Warn("overwriting existing logDB for chain", "chain", chain)
}
db.logDBs[chain] = logDB
}
func (db *ChainsDB) IteratorStartingAt(chain types.ChainID, sealedNum uint64, logIndex uint32) (logs.Iterator, error) {
logDB, ok := db.logDBs[chain]
if !ok {
return nil, fmt.Errorf("%w: %v", ErrUnknownChain, chain)
}
return logDB.IteratorStartingAt(sealedNum, logIndex)
}
// ResumeFromLastSealedBlock prepares the chains db to resume recording events after a restart.
// It rewinds the database to the last block that is guaranteed to have been fully recorded to the database,
// to ensure it can resume recording from the first log of the next block.
func (db *ChainsDB) ResumeFromLastSealedBlock() error {
db.mu.RLock()
defer db.mu.RUnlock()
for chain, logStore := range db.logDBs {
headNum, ok := logStore.LatestSealedBlockNum()
if !ok {
......@@ -98,100 +129,10 @@ func (db *ChainsDB) ResumeFromLastSealedBlock() error {
return nil
}
// Check calls the underlying logDB to determine if the given log entry is safe with respect to the checker's criteria.
func (db *ChainsDB) Check(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) (common.Hash, error) {
logDB, ok := db.logDBs[chain]
if !ok {
return common.Hash{}, fmt.Errorf("%w: %v", ErrUnknownChain, chain)
}
_, err := logDB.Contains(blockNum, logIdx, logHash)
if err != nil {
return common.Hash{}, err
}
// TODO(#11693): need to get the actual block hash for this log entry for reorg detection
return common.Hash{}, nil
}
// Safest returns the strongest safety level that can be guaranteed for the given log entry.
// it assumes the log entry has already been checked and is valid, this funcion only checks safety levels.
func (db *ChainsDB) Safest(chainID types.ChainID, blockNum uint64, index uint32) (safest types.SafetyLevel) {
safest = types.LocalUnsafe
if crossUnsafe, err := db.safetyIndex.CrossUnsafeL2(chainID); err == nil && crossUnsafe.WithinRange(blockNum, index) {
safest = types.CrossUnsafe
}
if localSafe, err := db.safetyIndex.LocalSafeL2(chainID); err == nil && localSafe.WithinRange(blockNum, index) {
safest = types.LocalSafe
}
if crossSafe, err := db.safetyIndex.LocalSafeL2(chainID); err == nil && crossSafe.WithinRange(blockNum, index) {
safest = types.CrossSafe
}
if finalized, err := db.safetyIndex.FinalizedL2(chainID); err == nil {
if finalized.Number >= blockNum {
safest = types.Finalized
}
}
return
}
func (db *ChainsDB) FindSealedBlock(chain types.ChainID, block eth.BlockID) (nextEntry entrydb.EntryIdx, err error) {
logDB, ok := db.logDBs[chain]
if !ok {
return 0, fmt.Errorf("%w: %v", ErrUnknownChain, chain)
}
return logDB.FindSealedBlock(block)
}
// LatestBlockNum returns the latest fully-sealed block number that has been recorded to the logs db
// for the given chain. It does not contain safety guarantees.
// The block number might not be available (empty database, or non-existent chain).
func (db *ChainsDB) LatestBlockNum(chain types.ChainID) (num uint64, ok bool) {
logDB, knownChain := db.logDBs[chain]
if !knownChain {
return 0, false
}
return logDB.LatestSealedBlockNum()
}
func (db *ChainsDB) AddLog(
chain types.ChainID,
logHash common.Hash,
parentBlock eth.BlockID,
logIdx uint32,
execMsg *types.ExecutingMessage) error {
logDB, ok := db.logDBs[chain]
if !ok {
return fmt.Errorf("%w: %v", ErrUnknownChain, chain)
}
return logDB.AddLog(logHash, parentBlock, logIdx, execMsg)
}
func (db *ChainsDB) SealBlock(
chain types.ChainID,
block eth.BlockRef) error {
logDB, ok := db.logDBs[chain]
if !ok {
return fmt.Errorf("%w: %v", ErrUnknownChain, chain)
}
err := logDB.SealBlock(block.ParentHash, block.ID(), block.Time)
if err != nil {
return fmt.Errorf("failed to seal block %v: %w", block, err)
}
err = db.safetyIndex.UpdateLocalUnsafe(chain, block)
if err != nil {
return fmt.Errorf("failed to update local-unsafe: %w", err)
}
return nil
}
func (db *ChainsDB) Rewind(chain types.ChainID, headBlockNum uint64) error {
logDB, ok := db.logDBs[chain]
if !ok {
return fmt.Errorf("%w: %v", ErrUnknownChain, chain)
}
return logDB.Rewind(headBlockNum)
}
func (db *ChainsDB) Close() error {
db.mu.Lock()
defer db.mu.Unlock()
var combined error
for id, logDB := range db.logDBs {
if err := logDB.Close(); err != nil {
......
package db
/*
import (
"errors"
"fmt"
"io"
"math/rand" // nosemgrep
"testing"
"github.com/stretchr/testify/require"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/testlog"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/heads"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/logs"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
)
func TestChainsDB_AddLog(t *testing.T) {
t.Run("UnknownChain", func(t *testing.T) {
db := NewChainsDB(nil, &stubHeadStorage{}, testlog.Logger(t, log.LevelDebug))
err := db.AddLog(types.ChainIDFromUInt64(2), common.Hash{}, eth.BlockID{}, 33, nil)
require.ErrorIs(t, err, ErrUnknownChain)
})
t.Run("KnownChain", func(t *testing.T) {
chainID := types.ChainIDFromUInt64(1)
logDB := &stubLogDB{}
db := NewChainsDB(map[types.ChainID]LogStorage{
chainID: logDB,
}, &stubHeadStorage{}, testlog.Logger(t, log.LevelDebug))
bl10 := eth.BlockID{Hash: common.Hash{0x10}, Number: 10}
err := db.SealBlock(chainID, common.Hash{0x9}, bl10, 1234)
require.NoError(t, err, err)
err = db.AddLog(chainID, common.Hash{}, bl10, 0, nil)
require.NoError(t, err, err)
require.Equal(t, 1, logDB.addLogCalls)
require.Equal(t, 1, logDB.sealBlockCalls)
})
}
func TestChainsDB_Rewind(t *testing.T) {
t.Run("UnknownChain", func(t *testing.T) {
db := NewChainsDB(nil, &stubHeadStorage{}, testlog.Logger(t, log.LevelDebug))
err := db.Rewind(types.ChainIDFromUInt64(2), 42)
require.ErrorIs(t, err, ErrUnknownChain)
})
t.Run("KnownChain", func(t *testing.T) {
chainID := types.ChainIDFromUInt64(1)
logDB := &stubLogDB{}
db := NewChainsDB(map[types.ChainID]LogStorage{
chainID: logDB,
}, &stubHeadStorage{},
testlog.Logger(t, log.LevelDebug))
err := db.Rewind(chainID, 23)
require.NoError(t, err, err)
require.EqualValues(t, 23, logDB.headBlockNum)
})
}
func TestChainsDB_UpdateCrossHeads(t *testing.T) {
// using a chainID of 1 for simplicity
chainID := types.ChainIDFromUInt64(1)
// get default stubbed components
logDB, checker, h := setupStubbedForUpdateHeads(chainID)
checker.numSafe = 1
xSafe := checker.crossHeadForChain
// The ChainsDB is real, but uses only stubbed components
db := NewChainsDB(
map[types.ChainID]LogStorage{
chainID: logDB},
&stubHeadStorage{h},
testlog.Logger(t, log.LevelDebug))
err := db.UpdateCrossHeads(checker)
require.NoError(t, err)
// found a safe executing message, and no new initiating messages
require.Equal(t, xSafe+1, checker.updated)
}
func TestChainsDB_UpdateCrossHeadsBeyondLocal(t *testing.T) {
// using a chainID of 1 for simplicity
chainID := types.ChainIDFromUInt64(1)
// get default stubbed components
logDB, checker, h := setupStubbedForUpdateHeads(chainID)
// set the safety checker to pass 99 times, effectively allowing all messages to be safe
checker.numSafe = 99
startLocalSafe := checker.localHeadForChain
// The ChainsDB is real, but uses only stubbed components
db := NewChainsDB(
map[types.ChainID]LogStorage{
chainID: logDB},
&stubHeadStorage{h},
testlog.Logger(t, log.LevelDebug))
// Update cross-heads is expected to:
// 1. get a last checkpoint iterator from the logDB (stubbed to be at 15)
// 2. progress the iterator to repeatedly, as the safety check will pass 99 times.
// 3. exceed the local head, and update the cross-head to the local head (40)
err := db.UpdateCrossHeads(checker)
require.NoError(t, err)
require.Equal(t, startLocalSafe, checker.updated)
}
func TestChainsDB_UpdateCrossHeadsEOF(t *testing.T) {
// using a chainID of 1 for simplicity
chainID := types.ChainIDFromUInt64(1)
// get default stubbed components
logDB, checker, h := setupStubbedForUpdateHeads(chainID)
// set the log DB to return an EOF error when trying to get the next executing message
// after processing 10 message (with more messages available to be safe)
logDB.nextLogs = logDB.nextLogs[:checker.crossHeadForChain+11]
// This is a legacy test, the local head is further than the DB content...
checker.numSafe = 99
// The ChainsDB is real, but uses only stubbed components
db := NewChainsDB(
map[types.ChainID]LogStorage{
chainID: logDB},
&stubHeadStorage{h},
testlog.Logger(t, log.LevelDebug))
// Update cross-heads is expected to:
// - process 10 logs as safe, 5 of which execute something
// - update cross-safe to what was there
err := db.UpdateCrossHeads(checker)
require.NoError(t, err)
require.Equal(t, checker.crossHeadForChain+11, checker.updated)
}
func TestChainsDB_UpdateCrossHeadsError(t *testing.T) {
// using a chainID of 1 for simplicity
chainID := types.ChainIDFromUInt64(1)
// get default stubbed components
logDB, checker, h := setupStubbedForUpdateHeads(chainID)
// set the log DB to return an error when trying to get the next executing message
// after processing 3 messages as safe (with more messages available to be safe)
executed := 0
for i, e := range logDB.nextLogs {
if executed == 3 {
logDB.nextLogs[i].err = errors.New("some error")
}
if entrydb.EntryIdx(i) > checker.crossHeadForChain && e.execIdx >= 0 {
executed++
}
}
// everything is safe until error
checker.numSafe = 99
// The ChainsDB is real, but uses only stubbed components
db := NewChainsDB(
map[types.ChainID]LogStorage{
chainID: logDB},
&stubHeadStorage{h},
testlog.Logger(t, log.LevelDebug))
// Update cross-heads is expected to:
// 1. get a last checkpoint iterator from the logDB (stubbed to be at 10)
// 2. fail during execution, even after processing 3 messages as safe
// 3. exit without updating, returning the error
err := db.UpdateCrossHeads(checker)
require.Error(t, err)
// the update was never set (aka 0-value)
require.Equal(t, entrydb.EntryIdx(0), checker.updated)
}
// setupStubbedForUpdateHeads sets up stubbed components for testing the UpdateCrossHeads method
// it returns stubbed structs which are suitable for their interfaces, and can be modified before testing
// TODO: the variables at the top of this function should be configurable by the caller.
// this isn't an issue for now, as all tests can modify the stubbed components directly after calling this function.
// but readability and maintainability would be improved by making this function more configurable.
func setupStubbedForUpdateHeads(chainID types.ChainID) (*stubLogDB, *stubChecker, *heads.Heads) {
// the last known cross-safe head is at 20
cross := heads.HeadPointer{LastSealedBlockNum: 20}
// the local head (the limit of the update) is at 40
local := heads.HeadPointer{LastSealedBlockNum: 40}
// the number of executing messages to make available (this should be more than the number of safety checks performed)
numExecutingMessages := 30
// number of safety checks that will pass before returning false
numSafe := 1
// set up stubbed logDB
logDB := &stubLogDB{}
// set up stubbed executing messages that the ChainsDB can pass to the checker
logDB.executingMessages = []*types.ExecutingMessage{}
for i := 0; i < numExecutingMessages; i++ {
// executing messages are packed in groups of 3, with block numbers increasing by 1
logDB.executingMessages = append(logDB.executingMessages, &types.ExecutingMessage{
BlockNum: uint64(100 + int(i/3)),
LogIdx: uint32(i),
Hash: common.Hash{},
})
}
rng := rand.New(rand.NewSource(123))
blockNum := uint64(100)
logIndex := uint32(0)
executedCount := 0
for i := entrydb.EntryIdx(0); i <= local; i++ {
var logHash common.Hash
rng.Read(logHash[:])
execIndex := -1
// All the even messages have an executing message
if i%2 == 0 {
execIndex = rng.Intn(len(logDB.executingMessages))
executedCount += 1
}
var msgErr error
logDB.nextLogs = append(logDB.nextLogs, nextLogResponse{
blockNum: blockNum,
logIdx: logIndex,
evtHash: logHash,
err: msgErr,
execIdx: execIndex,
})
}
// set up stubbed checker
checker := &stubChecker{
localHeadForChain: local,
crossHeadForChain: cross,
// the first safety check will return true, the second false
numSafe: numSafe,
}
// set up stubbed heads with sample values
h := heads.NewHeads()
h.Chains[chainID] = heads.ChainHeads{}
return logDB, checker, h
}
type stubChecker struct {
localHeadForChain heads.HeadPointer
crossHeadForChain heads.HeadPointer
numSafe int
checkCalls int
updated heads.HeadPointer
}
func (s *stubChecker) String() string {
return "stubChecker"
}
func (s *stubChecker) LocalSafetyLevel() types.SafetyLevel {
return types.Safe
}
func (s *stubChecker) CrossSafetyLevel() types.SafetyLevel {
return types.Safe
}
func (s *stubChecker) LocalHead(chainID types.ChainID) heads.HeadPointer {
return s.localHeadForChain
}
func (s *stubChecker) CrossHead(chainID types.ChainID) heads.HeadPointer {
return s.crossHeadForChain
}
// stubbed Check returns true for the first numSafe calls, and false thereafter
func (s *stubChecker) Check(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) bool {
if s.checkCalls >= s.numSafe {
return fmt.Errorf("safety check failed")
}
s.checkCalls++
return nil
}
func (s *stubChecker) CheckCross(chain types.ChainID, blockNum uint64, logIdx uint32, logHash backendTypes.TruncatedHash) error {
return s.check(chain, blockNum, logIdx, logHash)
}
func (s *stubChecker) CheckLocal(chain types.ChainID, blockNum uint64, logIdx uint32, logHash backendTypes.TruncatedHash) error {
return s.check(chain, blockNum, logIdx, logHash)
}
func (s *stubChecker) Update(chain types.ChainID, h heads.HeadPointer) error {
s.updated = h
return nil
}
func (s *stubChecker) UpdateCross(chain types.ChainID, h heads.HeadPointer) error {
return s.Update(chain, h)
}
func (s *stubChecker) UpdateLocal(chain types.ChainID, h heads.HeadPointer) error {
return s.Update(chain, h)
}
func (s *stubChecker) SafetyLevel() types.SafetyLevel {
return types.CrossSafe
}
type stubHeadStorage struct {
heads *heads.Heads
}
func (s *stubHeadStorage) UpdateLocalUnsafe(chainID types.ChainID, h heads.HeadPointer) error {
panic("not implemented")
}
func (s *stubHeadStorage) UpdateLocalSafe(chainID types.ChainID, h heads.HeadPointer) error {
panic("not implemented")
}
func (s *stubHeadStorage) UpdateLocalFinalized(chainID types.ChainID, h heads.HeadPointer) error {
panic("not implemented")
}
func (s *stubHeadStorage) UpdateCrossUnsafe(chainID types.ChainID, h heads.HeadPointer) error {
panic("not implemented")
}
func (s *stubHeadStorage) UpdateCrossSafe(chainID types.ChainID, h heads.HeadPointer) error {
panic("not implemented")
}
func (s *stubHeadStorage) UpdateCrossFinalized(chainID types.ChainID, h heads.HeadPointer) error {
panic("not implemented")
}
func (s *stubHeadStorage) LocalUnsafe(chainID types.ChainID) heads.HeadPointer {
panic("not implemented")
}
func (s *stubHeadStorage) LocalSafe(chainID types.ChainID) heads.HeadPointer {
panic("not implemented")
}
func (s *stubHeadStorage) LocalFinalized(chainID types.ChainID) heads.HeadPointer {
panic("not implemented")
}
func (s *stubHeadStorage) CrossUnsafe(chainID types.ChainID) heads.HeadPointer {
panic("not implemented")
}
func (s *stubHeadStorage) CrossSafe(chainID types.ChainID) heads.HeadPointer {
panic("not implemented")
}
func (s *stubHeadStorage) CrossFinalized(chainID types.ChainID) heads.HeadPointer {
panic("not implemented")
}
func (s *stubHeadStorage) Apply(heads.Operation) error {
return nil
}
func (s *stubHeadStorage) Current() *heads.Heads {
if s.heads == nil {
s.heads = heads.NewHeads()
}
return s.heads.Copy()
}
type nextLogResponse struct {
blockNum uint64
logIdx uint32
evtHash common.Hash
err error
// -1 if not executing
execIdx int
}
type stubIterator struct {
index entrydb.EntryIdx
db *stubLogDB
}
func (s *stubIterator) End() error {
return nil // only used for DB-loading. The stub is already loaded
}
func (s *stubIterator) NextInitMsg() error {
s.index += 1
if s.index >= entrydb.EntryIdx(len(s.db.nextLogs)) {
return io.EOF
}
e := s.db.nextLogs[s.index]
return e.err
}
func (s *stubIterator) NextExecMsg() error {
for {
s.index += 1
if s.index >= entrydb.EntryIdx(len(s.db.nextLogs)) {
return io.EOF
}
e := s.db.nextLogs[s.index]
if e.err != nil {
return e.err
}
if e.execIdx >= 0 {
return nil
}
}
}
func (s *stubIterator) NextBlock() error {
panic("not yet supported")
}
func (s *stubIterator) NextIndex() entrydb.EntryIdx {
return s.index + 1
}
func (s *stubIterator) SealedBlock() (hash common.Hash, num uint64, ok bool) {
panic("not yet supported")
}
func (s *stubIterator) InitMessage() (hash common.Hash, logIndex uint32, ok bool) {
if s.index < 0 {
return common.Hash{}, 0, false
}
if s.index >= entrydb.EntryIdx(len(s.db.nextLogs)) {
return common.Hash{}, 0, false
}
e := s.db.nextLogs[s.index]
return e.evtHash, e.logIdx, true
}
func (s *stubIterator) ExecMessage() *types.ExecutingMessage {
if s.index < 0 {
return nil
}
if s.index >= entrydb.EntryIdx(len(s.db.nextLogs)) {
return nil
}
e := s.db.nextLogs[s.index]
if e.execIdx < 0 {
return nil
}
return s.db.executingMessages[e.execIdx]
}
var _ logs.Iterator = (*stubIterator)(nil)
type stubLogDB struct {
addLogCalls int
sealBlockCalls int
headBlockNum uint64
executingMessages []*types.ExecutingMessage
nextLogs []nextLogResponse
containsResponse containsResponse
}
func (s *stubLogDB) AddLog(logHash common.Hash, parentBlock eth.BlockID, logIdx uint32, execMsg *types.ExecutingMessage) error {
s.addLogCalls++
return nil
}
func (s *stubLogDB) SealBlock(parentHash common.Hash, block eth.BlockID, timestamp uint64) error {
s.sealBlockCalls++
return nil
}
func (s *stubLogDB) LatestSealedBlockNum() (n uint64, ok bool) {
return s.headBlockNum, true
}
func (s *stubLogDB) FindSealedBlock(block eth.BlockID) (nextEntry entrydb.EntryIdx, err error) {
panic("not implemented")
}
func (s *stubLogDB) IteratorStartingAt(sealedNum uint64, logIndex uint32) (logs.Iterator, error) {
return &stubIterator{
//index: i - 1, // TODO broken
db: s,
}, nil
}
var _ LogStorage = (*stubLogDB)(nil)
type containsResponse struct {
index entrydb.EntryIdx
err error
}
// stubbed Contains records the arguments passed to it
// it returns the response set in the struct, or an empty response
func (s *stubLogDB) Contains(blockNum uint64, logIdx uint32, logHash common.Hash) (nextIndex entrydb.EntryIdx, err error) {
return s.containsResponse.index, s.containsResponse.err
}
func (s *stubLogDB) Rewind(newHeadBlockNum uint64) error {
s.headBlockNum = newHeadBlockNum
return nil
}
func (s *stubLogDB) LatestBlockNum() uint64 {
return s.headBlockNum
}
func (s *stubLogDB) Close() error {
return nil
}
*/
......@@ -9,71 +9,19 @@ import (
"github.com/ethereum/go-ethereum/log"
)
const (
EntrySize = 34
)
const EntrySize = 34
type EntryIdx int64
type Entry [EntrySize]byte
func (entry Entry) Type() EntryType {
return EntryType(entry[0])
type EntryType interface {
String() string
~uint8
}
type EntryTypeFlag uint8
const (
FlagSearchCheckpoint EntryTypeFlag = 1 << TypeSearchCheckpoint
FlagCanonicalHash EntryTypeFlag = 1 << TypeCanonicalHash
FlagInitiatingEvent EntryTypeFlag = 1 << TypeInitiatingEvent
FlagExecutingLink EntryTypeFlag = 1 << TypeExecutingLink
FlagExecutingCheck EntryTypeFlag = 1 << TypeExecutingCheck
FlagPadding EntryTypeFlag = 1 << TypePadding
// for additional padding
FlagPadding2 EntryTypeFlag = FlagPadding << 1
)
func (ex EntryTypeFlag) Any(v EntryTypeFlag) bool {
return ex&v != 0
}
func (ex *EntryTypeFlag) Add(v EntryTypeFlag) {
*ex = *ex | v
}
type Entry[T EntryType] [EntrySize]byte
func (ex *EntryTypeFlag) Remove(v EntryTypeFlag) {
*ex = *ex &^ v
}
type EntryType uint8
const (
TypeSearchCheckpoint EntryType = iota
TypeCanonicalHash
TypeInitiatingEvent
TypeExecutingLink
TypeExecutingCheck
TypePadding
)
func (d EntryType) String() string {
switch d {
case TypeSearchCheckpoint:
return "searchCheckpoint"
case TypeCanonicalHash:
return "canonicalHash"
case TypeInitiatingEvent:
return "initiatingEvent"
case TypeExecutingLink:
return "executingLink"
case TypeExecutingCheck:
return "executingCheck"
case TypePadding:
return "padding"
default:
return fmt.Sprintf("unknown-%d", uint8(d))
}
func (entry Entry[T]) Type() T {
return T(entry[0])
}
// dataAccess defines a minimal API required to manipulate the actual stored data.
......@@ -85,7 +33,7 @@ type dataAccess interface {
Truncate(size int64) error
}
type EntryDB struct {
type EntryDB[T EntryType] struct {
data dataAccess
lastEntryIdx EntryIdx
......@@ -97,7 +45,7 @@ type EntryDB struct {
// If the file exists it will be used as the existing data.
// Returns ErrRecoveryRequired if the existing file is not a valid entry db. A EntryDB is still returned but all
// operations will return ErrRecoveryRequired until the Recover method is called.
func NewEntryDB(logger log.Logger, path string) (*EntryDB, error) {
func NewEntryDB[T EntryType](logger log.Logger, path string) (*EntryDB[T], error) {
logger.Info("Opening entry database", "path", path)
file, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0o644)
if err != nil {
......@@ -108,7 +56,7 @@ func NewEntryDB(logger log.Logger, path string) (*EntryDB, error) {
return nil, fmt.Errorf("failed to stat database at %v: %w", path, err)
}
size := info.Size() / EntrySize
db := &EntryDB{
db := &EntryDB[T]{
data: file,
lastEntryIdx: EntryIdx(size - 1),
}
......@@ -121,24 +69,24 @@ func NewEntryDB(logger log.Logger, path string) (*EntryDB, error) {
return db, nil
}
func (e *EntryDB) Size() int64 {
func (e *EntryDB[T]) Size() int64 {
return int64(e.lastEntryIdx) + 1
}
func (e *EntryDB) LastEntryIdx() EntryIdx {
func (e *EntryDB[T]) LastEntryIdx() EntryIdx {
return e.lastEntryIdx
}
// Read an entry from the database by index. Returns io.EOF iff idx is after the last entry.
func (e *EntryDB) Read(idx EntryIdx) (Entry, error) {
func (e *EntryDB[T]) Read(idx EntryIdx) (Entry[T], error) {
if idx > e.lastEntryIdx {
return Entry{}, io.EOF
return Entry[T]{}, io.EOF
}
var out Entry
var out Entry[T]
read, err := e.data.ReadAt(out[:], int64(idx)*EntrySize)
// Ignore io.EOF if we read the entire last entry as ReadAt may return io.EOF or nil when it reads the last byte
if err != nil && !(errors.Is(err, io.EOF) && read == EntrySize) {
return Entry{}, fmt.Errorf("failed to read entry %v: %w", idx, err)
return Entry[T]{}, fmt.Errorf("failed to read entry %v: %w", idx, err)
}
return out, nil
}
......@@ -147,7 +95,7 @@ func (e *EntryDB) Read(idx EntryIdx) (Entry, error) {
// The entries are combined in memory and passed to a single Write invocation.
// If the write fails, it will attempt to truncate any partially written data.
// Subsequent writes to this instance will fail until partially written data is truncated.
func (e *EntryDB) Append(entries ...Entry) error {
func (e *EntryDB[T]) Append(entries ...Entry[T]) error {
if e.cleanupFailedWrite {
// Try to rollback partially written data from a previous Append
if truncateErr := e.Truncate(e.lastEntryIdx); truncateErr != nil {
......@@ -177,7 +125,7 @@ func (e *EntryDB) Append(entries ...Entry) error {
}
// Truncate the database so that the last retained entry is idx. Any entries after idx are deleted.
func (e *EntryDB) Truncate(idx EntryIdx) error {
func (e *EntryDB[T]) Truncate(idx EntryIdx) error {
if err := e.data.Truncate((int64(idx) + 1) * EntrySize); err != nil {
return fmt.Errorf("failed to truncate to entry %v: %w", idx, err)
}
......@@ -188,13 +136,13 @@ func (e *EntryDB) Truncate(idx EntryIdx) error {
}
// recover an invalid database by truncating back to the last complete event.
func (e *EntryDB) recover() error {
func (e *EntryDB[T]) recover() error {
if err := e.data.Truncate((e.Size()) * EntrySize); err != nil {
return fmt.Errorf("failed to truncate trailing partial entries: %w", err)
}
return nil
}
func (e *EntryDB) Close() error {
func (e *EntryDB[T]) Close() error {
return e.data.Close()
}
......@@ -3,6 +3,7 @@ package entrydb
import (
"bytes"
"errors"
"fmt"
"io"
"os"
"path/filepath"
......@@ -13,6 +14,14 @@ import (
"github.com/stretchr/testify/require"
)
type TestEntryType uint8
func (typ TestEntryType) String() string {
return fmt.Sprintf("%d", uint8(typ))
}
type TestEntry = Entry[TestEntryType]
func TestReadWrite(t *testing.T) {
t.Run("BasicReadWrite", func(t *testing.T) {
db := createEntryDB(t)
......@@ -114,7 +123,7 @@ func TestTruncateTrailingPartialEntries(t *testing.T) {
copy(invalidData[EntrySize:], entry2[:])
invalidData[len(invalidData)-1] = 3 // Some invalid trailing data
require.NoError(t, os.WriteFile(file, invalidData, 0o644))
db, err := NewEntryDB(logger, file)
db, err := NewEntryDB[TestEntryType](logger, file)
require.NoError(t, err)
defer db.Close()
......@@ -177,19 +186,19 @@ func TestWriteErrors(t *testing.T) {
})
}
func requireRead(t *testing.T, db *EntryDB, idx EntryIdx, expected Entry) {
func requireRead(t *testing.T, db *EntryDB[TestEntryType], idx EntryIdx, expected TestEntry) {
actual, err := db.Read(idx)
require.NoError(t, err)
require.Equal(t, expected, actual)
}
func createEntry(i byte) Entry {
return Entry(bytes.Repeat([]byte{i}, EntrySize))
func createEntry(i byte) TestEntry {
return TestEntry(bytes.Repeat([]byte{i}, EntrySize))
}
func createEntryDB(t *testing.T) *EntryDB {
func createEntryDB(t *testing.T) *EntryDB[TestEntryType] {
logger := testlog.Logger(t, log.LvlInfo)
db, err := NewEntryDB(logger, filepath.Join(t.TempDir(), "entries.db"))
db, err := NewEntryDB[TestEntryType](logger, filepath.Join(t.TempDir(), "entries.db"))
require.NoError(t, err)
t.Cleanup(func() {
require.NoError(t, db.Close())
......@@ -197,9 +206,9 @@ func createEntryDB(t *testing.T) *EntryDB {
return db
}
func createEntryDBWithStubData() (*EntryDB, *stubDataAccess) {
func createEntryDBWithStubData() (*EntryDB[TestEntryType], *stubDataAccess) {
stubData := &stubDataAccess{}
db := &EntryDB{data: stubData, lastEntryIdx: -1}
db := &EntryDB[TestEntryType]{data: stubData, lastEntryIdx: -1}
return db, stubData
}
......
package entrydb
import "errors"
var (
// ErrOutOfOrder happens when you try to add data to the DB,
// but it does not actually fit onto the latest data (by being too old or new).
ErrOutOfOrder = errors.New("data out of order")
// ErrDataCorruption happens when the underlying DB has some I/O issue
ErrDataCorruption = errors.New("data corruption")
// ErrSkipped happens when we try to retrieve data that is not available (pruned)
// It may also happen if we erroneously skip data, that was not considered a conflict, if the DB is corrupted.
ErrSkipped = errors.New("skipped data")
// ErrFuture happens when data is just not yet available
ErrFuture = errors.New("future data")
// ErrConflict happens when we know for sure that there is different canonical data
ErrConflict = errors.New("conflicting data")
// ErrStop can be used in iterators to indicate iteration has to stop
ErrStop = errors.New("iter stop")
)
package heads
import (
"encoding/json"
"errors"
"fmt"
"os"
"sync"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-service/ioutil"
"github.com/ethereum-optimism/optimism/op-service/jsonutil"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
)
// HeadTracker records the current chain head pointers for a single chain.
type HeadTracker struct {
rwLock sync.RWMutex
path string
current *Heads
logger log.Logger
}
func (t *HeadTracker) CrossUnsafe(id types.ChainID) HeadPointer {
return t.current.Get(id).CrossUnsafe
}
func (t *HeadTracker) CrossSafe(id types.ChainID) HeadPointer {
return t.current.Get(id).CrossSafe
}
func (t *HeadTracker) CrossFinalized(id types.ChainID) HeadPointer {
return t.current.Get(id).CrossFinalized
}
func (t *HeadTracker) LocalUnsafe(id types.ChainID) HeadPointer {
return t.current.Get(id).Unsafe
}
func (t *HeadTracker) LocalSafe(id types.ChainID) HeadPointer {
return t.current.Get(id).LocalSafe
}
func (t *HeadTracker) LocalFinalized(id types.ChainID) HeadPointer {
return t.current.Get(id).LocalFinalized
}
func (t *HeadTracker) UpdateCrossUnsafe(id types.ChainID, pointer HeadPointer) error {
return t.Apply(OperationFn(func(heads *Heads) error {
t.logger.Info("Cross-unsafe update", "pointer", pointer)
h := heads.Get(id)
h.CrossUnsafe = pointer
heads.Put(id, h)
return nil
}))
}
func (t *HeadTracker) UpdateCrossSafe(id types.ChainID, pointer HeadPointer) error {
return t.Apply(OperationFn(func(heads *Heads) error {
t.logger.Info("Cross-safe update", "pointer", pointer)
h := heads.Get(id)
h.CrossSafe = pointer
heads.Put(id, h)
return nil
}))
}
func (t *HeadTracker) UpdateCrossFinalized(id types.ChainID, pointer HeadPointer) error {
return t.Apply(OperationFn(func(heads *Heads) error {
t.logger.Info("Cross-finalized update", "pointer", pointer)
h := heads.Get(id)
h.CrossFinalized = pointer
heads.Put(id, h)
return nil
}))
}
func (t *HeadTracker) UpdateLocalUnsafe(id types.ChainID, pointer HeadPointer) error {
return t.Apply(OperationFn(func(heads *Heads) error {
t.logger.Info("Local-unsafe update", "pointer", pointer)
h := heads.Get(id)
h.Unsafe = pointer
heads.Put(id, h)
return nil
}))
}
func (t *HeadTracker) UpdateLocalSafe(id types.ChainID, pointer HeadPointer) error {
return t.Apply(OperationFn(func(heads *Heads) error {
t.logger.Info("Local-safe update", "pointer", pointer)
h := heads.Get(id)
h.LocalSafe = pointer
heads.Put(id, h)
return nil
}))
}
func (t *HeadTracker) UpdateLocalFinalized(id types.ChainID, pointer HeadPointer) error {
return t.Apply(OperationFn(func(heads *Heads) error {
t.logger.Info("Local-finalized update", "pointer", pointer)
h := heads.Get(id)
h.LocalFinalized = pointer
heads.Put(id, h)
return nil
}))
}
func NewHeadTracker(logger log.Logger, path string) (*HeadTracker, error) {
current := NewHeads()
if data, err := os.ReadFile(path); errors.Is(err, os.ErrNotExist) {
// No existing file, just use empty heads
} else if err != nil {
return nil, fmt.Errorf("failed to read existing heads from %v: %w", path, err)
} else {
if err := json.Unmarshal(data, current); err != nil {
return nil, fmt.Errorf("invalid existing heads file %v: %w", path, err)
}
}
return &HeadTracker{
path: path,
current: current,
logger: logger,
}, nil
}
func (t *HeadTracker) Apply(op Operation) error {
t.rwLock.Lock()
defer t.rwLock.Unlock()
// Store a copy of the heads prior to changing so we can roll back if needed.
modified := t.current.Copy()
if err := op.Apply(modified); err != nil {
return fmt.Errorf("operation failed: %w", err)
}
if err := t.write(modified); err != nil {
return fmt.Errorf("failed to store updated heads: %w", err)
}
t.current = modified
return nil
}
func (t *HeadTracker) Current() *Heads {
t.rwLock.RLock()
defer t.rwLock.RUnlock()
return t.current.Copy()
}
func (t *HeadTracker) write(heads *Heads) error {
if err := jsonutil.WriteJSON(heads, ioutil.ToAtomicFile(t.path, 0o644)); err != nil {
return fmt.Errorf("failed to write new heads: %w", err)
}
return nil
}
func (t *HeadTracker) Close() error {
return nil
}
package heads
/*
import (
"errors"
"os"
"path/filepath"
"testing"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
"github.com/stretchr/testify/require"
)
func TestHeads_SaveAndReload(t *testing.T) {
dir := t.TempDir()
path := filepath.Join(dir, "heads.json")
chainA := types.ChainIDFromUInt64(3)
chainAHeads := ChainHeads{
Unsafe: 1,
CrossUnsafe: 2,
LocalSafe: 3,
CrossSafe: 4,
LocalFinalized: 5,
CrossFinalized: 6,
}
chainB := types.ChainIDFromUInt64(5)
chainBHeads := ChainHeads{
Unsafe: 11,
CrossUnsafe: 12,
LocalSafe: 13,
CrossSafe: 14,
LocalFinalized: 15,
CrossFinalized: 16,
}
orig, err := NewHeadTracker(path)
require.NoError(t, err)
err = orig.Apply(OperationFn(func(heads *Heads) error {
heads.Put(chainA, chainAHeads)
heads.Put(chainB, chainBHeads)
return nil
}))
require.NoError(t, err)
require.Equal(t, orig.Current().Get(chainA), chainAHeads)
require.Equal(t, orig.Current().Get(chainB), chainBHeads)
loaded, err := NewHeadTracker(path)
require.NoError(t, err)
require.EqualValues(t, loaded.Current(), orig.Current())
}
func TestHeads_NoChangesMadeIfOperationFails(t *testing.T) {
dir := t.TempDir()
path := filepath.Join(dir, "heads.json")
chainA := types.ChainIDFromUInt64(3)
chainAHeads := ChainHeads{
Unsafe: 1,
CrossUnsafe: 2,
LocalSafe: 3,
CrossSafe: 4,
LocalFinalized: 5,
CrossFinalized: 6,
}
orig, err := NewHeadTracker(path)
require.NoError(t, err)
boom := errors.New("boom")
err = orig.Apply(OperationFn(func(heads *Heads) error {
heads.Put(chainA, chainAHeads)
return boom
}))
require.ErrorIs(t, err, boom)
require.Equal(t, ChainHeads{}, orig.Current().Get(chainA))
// Should be able to load from disk too
loaded, err := NewHeadTracker(path)
require.NoError(t, err)
require.EqualValues(t, loaded.Current(), orig.Current())
}
func TestHeads_NoChangesMadeIfWriteFails(t *testing.T) {
dir := t.TempDir()
path := filepath.Join(dir, "invalid/heads.json")
chainA := types.ChainIDFromUInt64(3)
chainAHeads := ChainHeads{
Unsafe: 1,
CrossUnsafe: 2,
LocalSafe: 3,
CrossSafe: 4,
LocalFinalized: 5,
CrossFinalized: 6,
}
orig, err := NewHeadTracker(path)
require.NoError(t, err)
err = orig.Apply(OperationFn(func(heads *Heads) error {
heads.Put(chainA, chainAHeads)
return nil
}))
require.ErrorIs(t, err, os.ErrNotExist)
require.Equal(t, ChainHeads{}, orig.Current().Get(chainA))
}
*/
package heads
import (
"encoding/json"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
)
type HeadPointer struct {
// LastSealedBlockHash is the last fully-processed block
LastSealedBlockHash common.Hash
LastSealedBlockNum uint64
LastSealedTimestamp uint64
// Number of logs that have been verified since the LastSealedBlock.
// These logs are contained in the block that builds on top of the LastSealedBlock.
LogsSince uint32
}
// WithinRange checks if the given log, in the given block,
// is within range (i.e. before or equal to the head-pointer).
// This does not guarantee that the log exists.
func (ptr *HeadPointer) WithinRange(blockNum uint64, logIdx uint32) bool {
if ptr.LastSealedBlockHash == (common.Hash{}) {
return false // no block yet
}
return blockNum <= ptr.LastSealedBlockNum ||
(blockNum+1 == ptr.LastSealedBlockNum && logIdx < ptr.LogsSince)
}
func (ptr *HeadPointer) IsSealed(blockNum uint64) bool {
if ptr.LastSealedBlockHash == (common.Hash{}) {
return false // no block yet
}
return blockNum <= ptr.LastSealedBlockNum
}
// ChainHeads provides the serialization format for the current chain heads.
type ChainHeads struct {
Unsafe HeadPointer `json:"localUnsafe"`
CrossUnsafe HeadPointer `json:"crossUnsafe"`
LocalSafe HeadPointer `json:"localSafe"`
CrossSafe HeadPointer `json:"crossSafe"`
LocalFinalized HeadPointer `json:"localFinalized"`
CrossFinalized HeadPointer `json:"crossFinalized"`
}
type Heads struct {
Chains map[types.ChainID]ChainHeads
}
func NewHeads() *Heads {
return &Heads{Chains: make(map[types.ChainID]ChainHeads)}
}
func (h *Heads) Get(id types.ChainID) ChainHeads {
chain, ok := h.Chains[id]
if !ok {
return ChainHeads{}
}
// init to genesis
if chain.LocalFinalized == (HeadPointer{}) && chain.Unsafe.LastSealedBlockNum == 0 {
chain.LocalFinalized = chain.Unsafe
}
// Make sure the data is consistent
if chain.LocalSafe == (HeadPointer{}) {
chain.LocalSafe = chain.LocalFinalized
}
if chain.Unsafe == (HeadPointer{}) {
chain.Unsafe = chain.LocalSafe
}
if chain.CrossFinalized == (HeadPointer{}) && chain.LocalFinalized.LastSealedBlockNum == 0 {
chain.CrossFinalized = chain.LocalFinalized
}
if chain.CrossSafe == (HeadPointer{}) {
chain.CrossSafe = chain.CrossFinalized
}
if chain.CrossUnsafe == (HeadPointer{}) {
chain.CrossUnsafe = chain.CrossSafe
}
return chain
}
func (h *Heads) Put(id types.ChainID, head ChainHeads) {
h.Chains[id] = head
}
func (h *Heads) Copy() *Heads {
c := &Heads{Chains: make(map[types.ChainID]ChainHeads)}
for id, heads := range h.Chains {
c.Chains[id] = heads
}
return c
}
func (h *Heads) MarshalJSON() ([]byte, error) {
data := make(map[hexutil.U256]ChainHeads)
for id, heads := range h.Chains {
data[hexutil.U256(id)] = heads
}
return json.Marshal(data)
}
func (h *Heads) UnmarshalJSON(data []byte) error {
hexData := make(map[hexutil.U256]ChainHeads)
if err := json.Unmarshal(data, &hexData); err != nil {
return err
}
h.Chains = make(map[types.ChainID]ChainHeads)
for id, heads := range hexData {
h.Put(types.ChainID(id), heads)
}
return nil
}
type Operation interface {
Apply(head *Heads) error
}
type OperationFn func(heads *Heads) error
func (f OperationFn) Apply(heads *Heads) error {
return f(heads)
}
package heads
import (
"encoding/json"
"fmt"
"math/rand" // nosemgrep
"testing"
"github.com/stretchr/testify/require"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
)
func TestHeads(t *testing.T) {
rng := rand.New(rand.NewSource(1234))
randHeadPtr := func() HeadPointer {
var h common.Hash
rng.Read(h[:])
return HeadPointer{
LastSealedBlockHash: h,
LastSealedBlockNum: rng.Uint64(),
LogsSince: rng.Uint32(),
}
}
t.Run("RoundTripViaJson", func(t *testing.T) {
heads := NewHeads()
heads.Put(types.ChainIDFromUInt64(3), ChainHeads{
Unsafe: randHeadPtr(),
CrossUnsafe: randHeadPtr(),
LocalSafe: randHeadPtr(),
CrossSafe: randHeadPtr(),
LocalFinalized: randHeadPtr(),
CrossFinalized: randHeadPtr(),
})
heads.Put(types.ChainIDFromUInt64(9), ChainHeads{
Unsafe: randHeadPtr(),
CrossUnsafe: randHeadPtr(),
LocalSafe: randHeadPtr(),
CrossSafe: randHeadPtr(),
LocalFinalized: randHeadPtr(),
CrossFinalized: randHeadPtr(),
})
heads.Put(types.ChainIDFromUInt64(4892497242424), ChainHeads{
Unsafe: randHeadPtr(),
CrossUnsafe: randHeadPtr(),
LocalSafe: randHeadPtr(),
CrossSafe: randHeadPtr(),
LocalFinalized: randHeadPtr(),
CrossFinalized: randHeadPtr(),
})
j, err := json.Marshal(heads)
require.NoError(t, err)
fmt.Println(string(j))
var result Heads
err = json.Unmarshal(j, &result)
require.NoError(t, err)
require.Equal(t, heads.Chains, result.Chains)
})
t.Run("Copy", func(t *testing.T) {
chainA := types.ChainIDFromUInt64(3)
chainB := types.ChainIDFromUInt64(4)
chainAOrigHeads := ChainHeads{
Unsafe: randHeadPtr(),
}
chainAModifiedHeads1 := ChainHeads{
Unsafe: randHeadPtr(),
}
chainAModifiedHeads2 := ChainHeads{
Unsafe: randHeadPtr(),
}
chainBModifiedHeads := ChainHeads{
Unsafe: randHeadPtr(),
}
heads := NewHeads()
heads.Put(chainA, chainAOrigHeads)
otherHeads := heads.Copy()
otherHeads.Put(chainA, chainAModifiedHeads1)
otherHeads.Put(chainB, chainBModifiedHeads)
require.Equal(t, heads.Get(chainA), chainAOrigHeads)
require.Equal(t, heads.Get(chainB), ChainHeads{})
heads.Put(chainA, chainAModifiedHeads2)
require.Equal(t, heads.Get(chainA), chainAModifiedHeads2)
require.Equal(t, otherHeads.Get(chainA), chainAModifiedHeads1)
require.Equal(t, otherHeads.Get(chainB), chainBModifiedHeads)
})
}
......@@ -20,21 +20,6 @@ const (
eventFlagHasExecutingMessage = byte(1)
)
var (
// ErrLogOutOfOrder happens when you try to add a log to the DB,
// but it does not actually fit onto the latest data (by being too old or new).
ErrLogOutOfOrder = errors.New("log out of order")
// ErrDataCorruption happens when the underlying DB has some I/O issue
ErrDataCorruption = errors.New("data corruption")
// ErrSkipped happens when we try to retrieve data that is not available (pruned)
// It may also happen if we erroneously skip data, that was not considered a conflict, if the DB is corrupted.
ErrSkipped = errors.New("skipped data")
// ErrFuture happens when data is just not yet available
ErrFuture = errors.New("future data")
// ErrConflict happens when we know for sure that there is different canonical data
ErrConflict = errors.New("conflicting data")
)
type Metrics interface {
RecordDBEntryCount(count int64)
RecordDBSearchEntriesRead(count int64)
......@@ -43,8 +28,8 @@ type Metrics interface {
type EntryStore interface {
Size() int64
LastEntryIdx() entrydb.EntryIdx
Read(idx entrydb.EntryIdx) (entrydb.Entry, error)
Append(entries ...entrydb.Entry) error
Read(idx entrydb.EntryIdx) (Entry, error)
Append(entries ...Entry) error
Truncate(idx entrydb.EntryIdx) error
Close() error
}
......@@ -66,7 +51,7 @@ type DB struct {
}
func NewFromFile(logger log.Logger, m Metrics, path string, trimToLastSealed bool) (*DB, error) {
store, err := entrydb.NewEntryDB(logger, path)
store, err := entrydb.NewEntryDB[EntryType](logger, path)
if err != nil {
return nil, fmt.Errorf("failed to open DB: %w", err)
}
......@@ -117,7 +102,7 @@ func (db *DB) init(trimToLastSealed bool) error {
// and then apply any remaining changes on top, to hydrate the state.
lastCheckpoint := (db.lastEntryIdx() / searchCheckpointFrequency) * searchCheckpointFrequency
i := db.newIterator(lastCheckpoint)
i.current.need.Add(entrydb.FlagCanonicalHash)
i.current.need.Add(FlagCanonicalHash)
if err := i.End(); err != nil {
return fmt.Errorf("failed to init from remaining trailing data: %w", err)
}
......@@ -132,7 +117,7 @@ func (db *DB) trimToLastSealed() error {
if err != nil {
return fmt.Errorf("failed to read %v to check for trailing entries: %w", i, err)
}
if entry.Type() == entrydb.TypeCanonicalHash {
if entry.Type() == TypeCanonicalHash {
// only an executing hash, indicating a sealed block, is a valid point for restart
break
}
......@@ -159,23 +144,31 @@ func (db *DB) IteratorStartingAt(sealedNum uint64, logsSince uint32) (Iterator,
// returning the next index after it where things continue from.
// returns ErrFuture if the block is too new to be able to tell
// returns ErrDifferent if the known block does not match
func (db *DB) FindSealedBlock(block eth.BlockID) (nextEntry entrydb.EntryIdx, err error) {
func (db *DB) FindSealedBlock(number uint64) (seal types.BlockSeal, err error) {
db.rwLock.RLock()
defer db.rwLock.RUnlock()
iter, err := db.newIteratorAt(block.Number, 0)
if errors.Is(err, ErrFuture) {
return 0, fmt.Errorf("block %d is not known yet: %w", block.Number, ErrFuture)
iter, err := db.newIteratorAt(number, 0)
if errors.Is(err, entrydb.ErrFuture) {
return types.BlockSeal{}, fmt.Errorf("block %d is not known yet: %w", number, entrydb.ErrFuture)
} else if err != nil {
return 0, fmt.Errorf("failed to find sealed block %d: %w", block.Number, err)
return types.BlockSeal{}, fmt.Errorf("failed to find sealed block %d: %w", number, err)
}
h, _, ok := iter.SealedBlock()
h, n, ok := iter.SealedBlock()
if !ok {
panic("expected block")
}
if block.Hash != h {
return 0, fmt.Errorf("queried %s but got %s at number %d: %w", block.Hash, h, block.Number, ErrConflict)
if n != number {
panic(fmt.Errorf("found block seal %s %d does not match expected block number %d", h, n, number))
}
timestamp, ok := iter.SealedTimestamp()
if !ok {
panic("expected timestamp")
}
return iter.NextIndex(), nil
return types.BlockSeal{
Hash: h,
Number: n,
Timestamp: timestamp,
}, nil
}
// LatestSealedBlockNum returns the block number of the block that was last sealed,
......@@ -206,31 +199,63 @@ func (db *DB) Get(blockNum uint64, logIdx uint32) (common.Hash, error) {
// If the log is determined to conflict with the canonical chain, then ErrConflict is returned.
// logIdx is the index of the log in the array of all logs in the block.
// This can be used to check the validity of cross-chain interop events.
func (db *DB) Contains(blockNum uint64, logIdx uint32, logHash common.Hash) (entrydb.EntryIdx, error) {
// The block-seal of the blockNum block, that the log was included in, is returned.
// This seal may be fully zeroed, without error, if the block isn't fully known yet.
func (db *DB) Contains(blockNum uint64, logIdx uint32, logHash common.Hash) (types.BlockSeal, error) {
db.rwLock.RLock()
defer db.rwLock.RUnlock()
db.log.Trace("Checking for log", "blockNum", blockNum, "logIdx", logIdx, "hash", logHash)
evtHash, iter, err := db.findLogInfo(blockNum, logIdx)
if err != nil {
return 0, err // may be ErrConflict if the block does not have as many logs
return types.BlockSeal{}, err // may be ErrConflict if the block does not have as many logs
}
db.log.Trace("Found initiatingEvent", "blockNum", blockNum, "logIdx", logIdx, "hash", evtHash)
// Found the requested block and log index, check if the hash matches
if evtHash != logHash {
return 0, fmt.Errorf("payload hash mismatch: expected %s, got %s", logHash, evtHash)
return types.BlockSeal{}, fmt.Errorf("payload hash mismatch: expected %s, got %s", logHash, evtHash)
}
return iter.NextIndex(), nil
// Now find the block seal after the log, to identify where the log was included in.
err = iter.TraverseConditional(func(state IteratorState) error {
_, n, ok := state.SealedBlock()
if !ok { // incomplete block data
return nil
}
if n == blockNum {
return entrydb.ErrStop
}
if n > blockNum {
return entrydb.ErrDataCorruption
}
return nil
})
if err == nil {
panic("expected iterator to stop with error")
}
if errors.Is(err, entrydb.ErrFuture) {
// Log is known, but as part of an unsealed block.
return types.BlockSeal{}, nil
}
if errors.Is(err, entrydb.ErrStop) {
h, n, _ := iter.SealedBlock()
timestamp, _ := iter.SealedTimestamp()
return types.BlockSeal{
Hash: h,
Number: n,
Timestamp: timestamp,
}, nil
}
return types.BlockSeal{}, err
}
func (db *DB) findLogInfo(blockNum uint64, logIdx uint32) (common.Hash, Iterator, error) {
if blockNum == 0 {
return common.Hash{}, nil, ErrConflict // no logs in block 0
return common.Hash{}, nil, entrydb.ErrConflict // no logs in block 0
}
// blockNum-1, such that we find a log that came after the parent num-1 was sealed.
// logIdx, such that all entries before logIdx can be skipped, but logIdx itself is still readable.
iter, err := db.newIteratorAt(blockNum-1, logIdx)
if errors.Is(err, ErrFuture) {
if errors.Is(err, entrydb.ErrFuture) {
db.log.Trace("Could not find log yet", "blockNum", blockNum, "logIdx", logIdx)
return common.Hash{}, nil, err
} else if err != nil {
......@@ -245,7 +270,7 @@ func (db *DB) findLogInfo(blockNum uint64, logIdx uint32) (common.Hash, Iterator
} else if x < blockNum-1 {
panic(fmt.Errorf("bug in newIteratorAt, expected to have found parent block %d but got %d", blockNum-1, x))
} else if x > blockNum-1 {
return common.Hash{}, nil, fmt.Errorf("log does not exist, found next block already: %w", ErrConflict)
return common.Hash{}, nil, fmt.Errorf("log does not exist, found next block already: %w", entrydb.ErrConflict)
}
logHash, x, ok := iter.InitMessage()
if !ok {
......@@ -266,17 +291,14 @@ func (db *DB) newIteratorAt(blockNum uint64, logIndex uint32) (*iterator, error)
searchCheckpointIndex, err := db.searchCheckpoint(blockNum, logIndex)
if errors.Is(err, io.EOF) {
// Did not find a checkpoint to start reading from so the log cannot be present.
return nil, ErrFuture
return nil, entrydb.ErrFuture
} else if err != nil {
return nil, err
}
// The iterator did not consume the checkpoint yet, it's positioned right at it.
// So we can call NextBlock() and get the checkpoint itself as first entry.
iter := db.newIterator(searchCheckpointIndex)
if err != nil {
return nil, err
}
iter.current.need.Add(entrydb.FlagCanonicalHash)
iter.current.need.Add(FlagCanonicalHash)
defer func() {
db.m.RecordDBSearchEntriesRead(iter.entriesRead)
}()
......@@ -285,9 +307,9 @@ func (db *DB) newIteratorAt(blockNum uint64, logIndex uint32) (*iterator, error)
if _, n, _ := iter.SealedBlock(); n == blockNum { // we may already have it exactly
break
}
if err := iter.NextBlock(); errors.Is(err, ErrFuture) {
if err := iter.NextBlock(); errors.Is(err, entrydb.ErrFuture) {
db.log.Trace("ran out of data, could not find block", "nextIndex", iter.NextIndex(), "target", blockNum)
return nil, ErrFuture
return nil, entrydb.ErrFuture
} else if err != nil {
db.log.Error("failed to read next block", "nextIndex", iter.NextIndex(), "target", blockNum, "err", err)
return nil, err
......@@ -301,7 +323,7 @@ func (db *DB) newIteratorAt(blockNum uint64, logIndex uint32) (*iterator, error)
continue
}
if num != blockNum { // block does not contain
return nil, fmt.Errorf("looking for %d, but already at %d: %w", blockNum, num, ErrConflict)
return nil, fmt.Errorf("looking for %d, but already at %d: %w", blockNum, num, entrydb.ErrConflict)
}
break
}
......@@ -310,7 +332,7 @@ func (db *DB) newIteratorAt(blockNum uint64, logIndex uint32) (*iterator, error)
// so two logs before quiting (and not 3 to then quit after).
for iter.current.logsSince < logIndex {
if err := iter.NextInitMsg(); err == io.EOF {
return nil, ErrFuture
return nil, entrydb.ErrFuture
} else if err != nil {
return nil, err
}
......@@ -320,7 +342,7 @@ func (db *DB) newIteratorAt(blockNum uint64, logIndex uint32) (*iterator, error)
}
if num > blockNum {
// we overshot, the block did not contain as many seen log events as requested
return nil, ErrConflict
return nil, entrydb.ErrConflict
}
_, idx, ok := iter.InitMessage()
if !ok {
......@@ -354,7 +376,7 @@ func (db *DB) newIterator(index entrydb.EntryIdx) *iterator {
// Returns the index of the searchCheckpoint to begin reading from or an error.
func (db *DB) searchCheckpoint(sealedBlockNum uint64, logsSince uint32) (entrydb.EntryIdx, error) {
if db.lastEntryContext.nextEntryIndex == 0 {
return 0, ErrFuture // empty DB, everything is in the future
return 0, entrydb.ErrFuture // empty DB, everything is in the future
}
n := (db.lastEntryIdx() / searchCheckpointFrequency) + 1
// Define: x is the array of known checkpoints
......@@ -391,7 +413,7 @@ func (db *DB) searchCheckpoint(sealedBlockNum uint64, logsSince uint32) (entrydb
if checkpoint.blockNum > sealedBlockNum ||
(checkpoint.blockNum == sealedBlockNum && checkpoint.logsSince > logsSince) {
return 0, fmt.Errorf("missing data, earliest search checkpoint is %d with %d logs, cannot find something before or at %d with %d logs: %w",
checkpoint.blockNum, checkpoint.logsSince, sealedBlockNum, logsSince, ErrSkipped)
checkpoint.blockNum, checkpoint.logsSince, sealedBlockNum, logsSince, entrydb.ErrSkipped)
}
return result, nil
}
......
......@@ -7,12 +7,13 @@ import (
"os"
"testing"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb"
"github.com/stretchr/testify/require"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb"
)
type statInvariant func(stat os.FileInfo, m *stubMetrics) error
type entryInvariant func(entryIdx int, entry entrydb.Entry, entries []entrydb.Entry, m *stubMetrics) error
type entryInvariant func(entryIdx int, entry Entry, entries []Entry, m *stubMetrics) error
// checkDBInvariants reads the database log directly and asserts a set of invariants on the data.
func checkDBInvariants(t *testing.T, dbPath string, m *stubMetrics) {
......@@ -30,7 +31,7 @@ func checkDBInvariants(t *testing.T, dbPath string, m *stubMetrics) {
// Read all entries as binary blobs
file, err := os.OpenFile(dbPath, os.O_RDONLY, 0o644)
require.NoError(t, err)
entries := make([]entrydb.Entry, stat.Size()/entrydb.EntrySize)
entries := make([]Entry, stat.Size()/entrydb.EntrySize)
for i := range entries {
n, err := io.ReadFull(file, entries[i][:])
require.NoErrorf(t, err, "failed to read entry %v", i)
......@@ -56,7 +57,7 @@ func checkDBInvariants(t *testing.T, dbPath string, m *stubMetrics) {
}
}
func fmtEntries(entries []entrydb.Entry) string {
func fmtEntries(entries []Entry) string {
out := ""
for i, entry := range entries {
out += fmt.Sprintf("%v: %x\n", i, entry)
......@@ -80,44 +81,44 @@ func invariantFileSizeMatchesEntryCountMetric(stat os.FileInfo, m *stubMetrics)
return nil
}
func invariantSearchCheckpointAtEverySearchCheckpointFrequency(entryIdx int, entry entrydb.Entry, entries []entrydb.Entry, m *stubMetrics) error {
if entryIdx%searchCheckpointFrequency == 0 && entry.Type() != entrydb.TypeSearchCheckpoint {
func invariantSearchCheckpointAtEverySearchCheckpointFrequency(entryIdx int, entry Entry, entries []Entry, m *stubMetrics) error {
if entryIdx%searchCheckpointFrequency == 0 && entry.Type() != TypeSearchCheckpoint {
return fmt.Errorf("should have search checkpoints every %v entries but entry %v was %x", searchCheckpointFrequency, entryIdx, entry)
}
return nil
}
func invariantCanonicalHashOrCheckpointAfterEverySearchCheckpoint(entryIdx int, entry entrydb.Entry, entries []entrydb.Entry, m *stubMetrics) error {
if entry.Type() != entrydb.TypeSearchCheckpoint {
func invariantCanonicalHashOrCheckpointAfterEverySearchCheckpoint(entryIdx int, entry Entry, entries []Entry, m *stubMetrics) error {
if entry.Type() != TypeSearchCheckpoint {
return nil
}
if entryIdx+1 >= len(entries) {
return fmt.Errorf("expected canonical hash or checkpoint after search checkpoint at entry %v but no further entries found", entryIdx)
}
nextEntry := entries[entryIdx+1]
if nextEntry.Type() != entrydb.TypeCanonicalHash && nextEntry.Type() != entrydb.TypeSearchCheckpoint {
if nextEntry.Type() != TypeCanonicalHash && nextEntry.Type() != TypeSearchCheckpoint {
return fmt.Errorf("expected canonical hash or checkpoint after search checkpoint at entry %v but got %x", entryIdx, nextEntry)
}
return nil
}
// invariantSearchCheckpointBeforeEveryCanonicalHash ensures we don't have extra canonical-hash entries
func invariantSearchCheckpointBeforeEveryCanonicalHash(entryIdx int, entry entrydb.Entry, entries []entrydb.Entry, m *stubMetrics) error {
if entry.Type() != entrydb.TypeCanonicalHash {
func invariantSearchCheckpointBeforeEveryCanonicalHash(entryIdx int, entry Entry, entries []Entry, m *stubMetrics) error {
if entry.Type() != TypeCanonicalHash {
return nil
}
if entryIdx == 0 {
return fmt.Errorf("expected search checkpoint before canonical hash at entry %v but no previous entries present", entryIdx)
}
prevEntry := entries[entryIdx-1]
if prevEntry.Type() != entrydb.TypeSearchCheckpoint {
if prevEntry.Type() != TypeSearchCheckpoint {
return fmt.Errorf("expected search checkpoint before canonical hash at entry %v but got %x", entryIdx, prevEntry)
}
return nil
}
func invariantExecLinkAfterInitEventWithFlagSet(entryIdx int, entry entrydb.Entry, entries []entrydb.Entry, m *stubMetrics) error {
if entry.Type() != entrydb.TypeInitiatingEvent {
func invariantExecLinkAfterInitEventWithFlagSet(entryIdx int, entry Entry, entries []Entry, m *stubMetrics) error {
if entry.Type() != TypeInitiatingEvent {
return nil
}
hasExecMessage := entry[1]&eventFlagHasExecutingMessage != 0
......@@ -131,14 +132,14 @@ func invariantExecLinkAfterInitEventWithFlagSet(entryIdx int, entry entrydb.Entr
if len(entries) <= linkIdx {
return fmt.Errorf("expected executing link after initiating event with exec msg flag set at entry %v but there were no more events", entryIdx)
}
if entries[linkIdx].Type() != entrydb.TypeExecutingLink {
if entries[linkIdx].Type() != TypeExecutingLink {
return fmt.Errorf("expected executing link at idx %v after initiating event with exec msg flag set at entry %v but got type %v", linkIdx, entryIdx, entries[linkIdx][0])
}
return nil
}
func invariantExecLinkOnlyAfterInitiatingEventWithFlagSet(entryIdx int, entry entrydb.Entry, entries []entrydb.Entry, m *stubMetrics) error {
if entry.Type() != entrydb.TypeExecutingLink {
func invariantExecLinkOnlyAfterInitiatingEventWithFlagSet(entryIdx int, entry Entry, entries []Entry, m *stubMetrics) error {
if entry.Type() != TypeExecutingLink {
return nil
}
if entryIdx == 0 {
......@@ -152,7 +153,7 @@ func invariantExecLinkOnlyAfterInitiatingEventWithFlagSet(entryIdx int, entry en
return fmt.Errorf("found executing link without a preceding initiating event at entry %v", entryIdx)
}
initEntry := entries[initIdx]
if initEntry.Type() != entrydb.TypeInitiatingEvent {
if initEntry.Type() != TypeInitiatingEvent {
return fmt.Errorf("expected initiating event at entry %v prior to executing link at %v but got %x", initIdx, entryIdx, initEntry[0])
}
flags := initEntry[1]
......@@ -162,8 +163,8 @@ func invariantExecLinkOnlyAfterInitiatingEventWithFlagSet(entryIdx int, entry en
return nil
}
func invariantExecCheckAfterExecLink(entryIdx int, entry entrydb.Entry, entries []entrydb.Entry, m *stubMetrics) error {
if entry.Type() != entrydb.TypeExecutingLink {
func invariantExecCheckAfterExecLink(entryIdx int, entry Entry, entries []Entry, m *stubMetrics) error {
if entry.Type() != TypeExecutingLink {
return nil
}
checkIdx := entryIdx + 1
......@@ -174,14 +175,14 @@ func invariantExecCheckAfterExecLink(entryIdx int, entry entrydb.Entry, entries
return fmt.Errorf("expected executing link at %v to be followed by executing check at %v but ran out of entries", entryIdx, checkIdx)
}
checkEntry := entries[checkIdx]
if checkEntry.Type() != entrydb.TypeExecutingCheck {
if checkEntry.Type() != TypeExecutingCheck {
return fmt.Errorf("expected executing link at %v to be followed by executing check at %v but got type %v", entryIdx, checkIdx, checkEntry[0])
}
return nil
}
func invariantExecCheckOnlyAfterExecLink(entryIdx int, entry entrydb.Entry, entries []entrydb.Entry, m *stubMetrics) error {
if entry.Type() != entrydb.TypeExecutingCheck {
func invariantExecCheckOnlyAfterExecLink(entryIdx int, entry Entry, entries []Entry, m *stubMetrics) error {
if entry.Type() != TypeExecutingCheck {
return nil
}
if entryIdx == 0 {
......@@ -195,7 +196,7 @@ func invariantExecCheckOnlyAfterExecLink(entryIdx int, entry entrydb.Entry, entr
return fmt.Errorf("found executing link without a preceding initiating event at entry %v", entryIdx)
}
linkEntry := entries[linkIdx]
if linkEntry.Type() != entrydb.TypeExecutingLink {
if linkEntry.Type() != TypeExecutingLink {
return fmt.Errorf("expected executing link at entry %v prior to executing check at %v but got %x", linkIdx, entryIdx, linkEntry[0])
}
return nil
......
......@@ -90,7 +90,7 @@ func TestLatestSealedBlockNum(t *testing.T) {
require.False(t, ok, "empty db expected")
require.Zero(t, n)
idx, err := db.searchCheckpoint(0, 0)
require.ErrorIs(t, err, ErrFuture, "no checkpoint in empty db")
require.ErrorIs(t, err, entrydb.ErrFuture, "no checkpoint in empty db")
require.Zero(t, idx)
})
})
......@@ -123,7 +123,7 @@ func TestLatestSealedBlockNum(t *testing.T) {
require.NoError(t, err)
require.Zero(t, idx, "anchor block as checkpoint 0")
_, err = db.searchCheckpoint(0, 0)
require.ErrorIs(t, err, ErrSkipped, "no checkpoint before genesis")
require.ErrorIs(t, err, entrydb.ErrSkipped, "no checkpoint before genesis")
})
})
t.Run("Block 1 case", func(t *testing.T) {
......@@ -175,7 +175,7 @@ func TestAddLog(t *testing.T) {
func(t *testing.T, db *DB, m *stubMetrics) {
genesis := eth.BlockID{Hash: createHash(15), Number: 0}
err := db.AddLog(createHash(1), genesis, 0, nil)
require.ErrorIs(t, err, ErrLogOutOfOrder)
require.ErrorIs(t, err, entrydb.ErrOutOfOrder)
})
})
......@@ -265,7 +265,7 @@ func TestAddLog(t *testing.T) {
func(t *testing.T, db *DB, m *stubMetrics) {
bl14 := eth.BlockID{Hash: createHash(14), Number: 14}
err := db.SealBlock(createHash(13), bl14, 5000)
require.ErrorIs(t, err, ErrConflict)
require.ErrorIs(t, err, entrydb.ErrConflict)
})
})
......@@ -282,7 +282,7 @@ func TestAddLog(t *testing.T) {
func(t *testing.T, db *DB, m *stubMetrics) {
onto := eth.BlockID{Hash: createHash(14), Number: 14}
err := db.AddLog(createHash(1), onto, 0, nil)
require.ErrorIs(t, err, ErrLogOutOfOrder, "cannot build logs on 14 when 15 is already sealed")
require.ErrorIs(t, err, entrydb.ErrOutOfOrder, "cannot build logs on 14 when 15 is already sealed")
})
})
......@@ -298,7 +298,7 @@ func TestAddLog(t *testing.T) {
func(t *testing.T, db *DB, m *stubMetrics) {
bl15 := eth.BlockID{Hash: createHash(15), Number: 15}
err := db.AddLog(createHash(1), bl15, 0, nil)
require.ErrorIs(t, err, ErrLogOutOfOrder, "already at log index 2")
require.ErrorIs(t, err, entrydb.ErrOutOfOrder, "already at log index 2")
})
})
......@@ -313,7 +313,7 @@ func TestAddLog(t *testing.T) {
},
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createHash(1), eth.BlockID{Hash: createHash(16), Number: 16}, 0, nil)
require.ErrorIs(t, err, ErrLogOutOfOrder)
require.ErrorIs(t, err, entrydb.ErrOutOfOrder)
})
})
......@@ -329,7 +329,7 @@ func TestAddLog(t *testing.T) {
func(t *testing.T, db *DB, m *stubMetrics) {
bl15 := eth.BlockID{Hash: createHash(15), Number: 15}
err := db.AddLog(createHash(1), bl15, 1, nil)
require.ErrorIs(t, err, ErrLogOutOfOrder, "already at log index 2")
require.ErrorIs(t, err, entrydb.ErrOutOfOrder, "already at log index 2")
})
})
......@@ -345,7 +345,7 @@ func TestAddLog(t *testing.T) {
func(t *testing.T, db *DB, m *stubMetrics) {
bl15 := eth.BlockID{Hash: createHash(16), Number: 16}
err := db.AddLog(createHash(1), bl15, 2, nil)
require.ErrorIs(t, err, ErrLogOutOfOrder)
require.ErrorIs(t, err, entrydb.ErrOutOfOrder)
})
})
......@@ -360,7 +360,7 @@ func TestAddLog(t *testing.T) {
func(t *testing.T, db *DB, m *stubMetrics) {
bl15 := eth.BlockID{Hash: createHash(15), Number: 15}
err := db.AddLog(createHash(1), bl15, 2, nil)
require.ErrorIs(t, err, ErrLogOutOfOrder)
require.ErrorIs(t, err, entrydb.ErrOutOfOrder)
})
})
......@@ -373,7 +373,7 @@ func TestAddLog(t *testing.T) {
func(t *testing.T, db *DB, m *stubMetrics) {
bl15 := eth.BlockID{Hash: createHash(15), Number: 15}
err := db.AddLog(createHash(1), bl15, 5, nil)
require.ErrorIs(t, err, ErrLogOutOfOrder)
require.ErrorIs(t, err, entrydb.ErrOutOfOrder)
})
})
......@@ -394,7 +394,7 @@ func TestAddLog(t *testing.T) {
err = db.SealBlock(bl15.Hash, bl16, 5001)
require.NoError(t, err)
err = db.AddLog(createHash(1), bl16, 1, nil)
require.ErrorIs(t, err, ErrLogOutOfOrder)
require.ErrorIs(t, err, entrydb.ErrOutOfOrder)
})
})
......@@ -698,9 +698,8 @@ func TestGetBlockInfo(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {},
func(t *testing.T, db *DB, m *stubMetrics) {
bl10 := eth.BlockID{Hash: createHash(10), Number: 10}
_, err := db.FindSealedBlock(bl10)
require.ErrorIs(t, err, ErrFuture)
_, err := db.FindSealedBlock(10)
require.ErrorIs(t, err, entrydb.ErrFuture)
})
})
......@@ -714,9 +713,8 @@ func TestGetBlockInfo(t *testing.T) {
},
func(t *testing.T, db *DB, m *stubMetrics) {
// if the DB starts at 11, then shouldn't find 10
bl10 := eth.BlockID{Hash: createHash(10), Number: 10}
_, err := db.FindSealedBlock(bl10)
require.ErrorIs(t, err, ErrSkipped)
_, err := db.FindSealedBlock(10)
require.ErrorIs(t, err, entrydb.ErrSkipped)
})
})
......@@ -727,10 +725,10 @@ func TestGetBlockInfo(t *testing.T) {
require.NoError(t, db.SealBlock(common.Hash{}, block, 500))
},
func(t *testing.T, db *DB, m *stubMetrics) {
index, err := db.FindSealedBlock(block)
seal, err := db.FindSealedBlock(block.Number)
require.NoError(t, err)
require.Equal(t, entrydb.EntryIdx(2), index,
"expecting to continue after search checkpoint that declared the block")
require.Equal(t, block, seal.ID())
require.Equal(t, uint64(500), seal.Timestamp)
})
})
}
......@@ -755,7 +753,7 @@ func requireConflicts(t *testing.T, db *DB, blockNum uint64, logIdx uint32, logH
m, ok := db.m.(*stubMetrics)
require.True(t, ok, "Did not get the expected metrics type")
_, err := db.Contains(blockNum, logIdx, logHash)
require.ErrorIs(t, err, ErrConflict, "canonical chain must not include this log")
require.ErrorIs(t, err, entrydb.ErrConflict, "canonical chain must not include this log")
require.LessOrEqual(t, m.entriesReadForSearch, int64(searchCheckpointFrequency*2), "Should not need to read more than between two checkpoints")
}
......@@ -763,7 +761,7 @@ func requireFuture(t *testing.T, db *DB, blockNum uint64, logIdx uint32, logHash
m, ok := db.m.(*stubMetrics)
require.True(t, ok, "Did not get the expected metrics type")
_, err := db.Contains(blockNum, logIdx, logHash)
require.ErrorIs(t, err, ErrFuture, "canonical chain does not yet include this log")
require.ErrorIs(t, err, entrydb.ErrFuture, "canonical chain does not yet include this log")
require.LessOrEqual(t, m.entriesReadForSearch, int64(searchCheckpointFrequency*2), "Should not need to read more than between two checkpoints")
}
......@@ -791,7 +789,7 @@ func TestRecoverOnCreate(t *testing.T) {
return db, m, err
}
storeWithEvents := func(evts ...entrydb.Entry) *stubEntryStore {
storeWithEvents := func(evts ...Entry) *stubEntryStore {
store := &stubEntryStore{}
store.entries = append(store.entries, evts...)
return store
......@@ -924,9 +922,9 @@ func TestRewind(t *testing.T) {
t.Run("WhenEmpty", func(t *testing.T) {
runDBTest(t, func(t *testing.T, db *DB, m *stubMetrics) {},
func(t *testing.T, db *DB, m *stubMetrics) {
require.ErrorIs(t, db.Rewind(100), ErrFuture)
require.ErrorIs(t, db.Rewind(100), entrydb.ErrFuture)
// Genesis is a block to, not present in an empty DB
require.ErrorIs(t, db.Rewind(0), ErrFuture)
require.ErrorIs(t, db.Rewind(0), entrydb.ErrFuture)
})
})
......@@ -944,7 +942,7 @@ func TestRewind(t *testing.T) {
require.NoError(t, db.SealBlock(bl51.Hash, bl52, 504))
require.NoError(t, db.AddLog(createHash(4), bl52, 0, nil))
// cannot rewind to a block that is not sealed yet
require.ErrorIs(t, db.Rewind(53), ErrFuture)
require.ErrorIs(t, db.Rewind(53), entrydb.ErrFuture)
},
func(t *testing.T, db *DB, m *stubMetrics) {
requireContains(t, db, 51, 0, createHash(1))
......@@ -963,7 +961,7 @@ func TestRewind(t *testing.T) {
require.NoError(t, db.AddLog(createHash(1), bl50, 0, nil))
require.NoError(t, db.AddLog(createHash(2), bl50, 1, nil))
// cannot go back to an unknown block
require.ErrorIs(t, db.Rewind(25), ErrSkipped)
require.ErrorIs(t, db.Rewind(25), entrydb.ErrSkipped)
},
func(t *testing.T, db *DB, m *stubMetrics) {
requireContains(t, db, 51, 0, createHash(1))
......@@ -1088,12 +1086,12 @@ func TestRewind(t *testing.T) {
bl29 := eth.BlockID{Hash: createHash(29), Number: 29}
// 29 was deleted
err := db.AddLog(createHash(2), bl29, 1, nil)
require.ErrorIs(t, err, ErrLogOutOfOrder, "Cannot add log on removed block")
require.ErrorIs(t, err, entrydb.ErrOutOfOrder, "Cannot add log on removed block")
// 15 is older, we have up to 16
bl15 := eth.BlockID{Hash: createHash(15), Number: 15}
// try to add a third log to 15
err = db.AddLog(createHash(10), bl15, 2, nil)
require.ErrorIs(t, err, ErrLogOutOfOrder)
require.ErrorIs(t, err, entrydb.ErrOutOfOrder)
bl16 := eth.BlockID{Hash: createHash(16), Number: 16}
// try to add a log to 17, on top of 16
err = db.AddLog(createHash(42), bl16, 0, nil)
......@@ -1119,7 +1117,7 @@ func (s *stubMetrics) RecordDBSearchEntriesRead(count int64) {
var _ Metrics = (*stubMetrics)(nil)
type stubEntryStore struct {
entries []entrydb.Entry
entries []Entry
}
func (s *stubEntryStore) Size() int64 {
......@@ -1130,14 +1128,14 @@ func (s *stubEntryStore) LastEntryIdx() entrydb.EntryIdx {
return entrydb.EntryIdx(s.Size() - 1)
}
func (s *stubEntryStore) Read(idx entrydb.EntryIdx) (entrydb.Entry, error) {
func (s *stubEntryStore) Read(idx entrydb.EntryIdx) (Entry, error) {
if idx < entrydb.EntryIdx(len(s.entries)) {
return s.entries[idx], nil
}
return entrydb.Entry{}, io.EOF
return Entry{}, io.EOF
}
func (s *stubEntryStore) Append(entries ...entrydb.Entry) error {
func (s *stubEntryStore) Append(entries ...Entry) error {
s.entries = append(s.entries, entries...)
return nil
}
......
......@@ -27,9 +27,9 @@ func newSearchCheckpoint(blockNum uint64, logsSince uint32, timestamp uint64) se
}
}
func newSearchCheckpointFromEntry(data entrydb.Entry) (searchCheckpoint, error) {
if data.Type() != entrydb.TypeSearchCheckpoint {
return searchCheckpoint{}, fmt.Errorf("%w: attempting to decode search checkpoint but was type %s", ErrDataCorruption, data.Type())
func newSearchCheckpointFromEntry(data Entry) (searchCheckpoint, error) {
if data.Type() != TypeSearchCheckpoint {
return searchCheckpoint{}, fmt.Errorf("%w: attempting to decode search checkpoint but was type %s", entrydb.ErrDataCorruption, data.Type())
}
return searchCheckpoint{
blockNum: binary.LittleEndian.Uint64(data[1:9]),
......@@ -40,9 +40,9 @@ func newSearchCheckpointFromEntry(data entrydb.Entry) (searchCheckpoint, error)
// encode creates a checkpoint entry
// type 0: "search checkpoint" <type><uint64 block number: 8 bytes><uint32 logsSince count: 4 bytes><uint64 timestamp: 8 bytes> = 21 bytes
func (s searchCheckpoint) encode() entrydb.Entry {
var data entrydb.Entry
data[0] = uint8(entrydb.TypeSearchCheckpoint)
func (s searchCheckpoint) encode() Entry {
var data Entry
data[0] = uint8(TypeSearchCheckpoint)
binary.LittleEndian.PutUint64(data[1:9], s.blockNum)
binary.LittleEndian.PutUint32(data[9:13], s.logsSince)
binary.LittleEndian.PutUint64(data[13:21], s.timestamp)
......@@ -57,16 +57,16 @@ func newCanonicalHash(hash common.Hash) canonicalHash {
return canonicalHash{hash: hash}
}
func newCanonicalHashFromEntry(data entrydb.Entry) (canonicalHash, error) {
if data.Type() != entrydb.TypeCanonicalHash {
return canonicalHash{}, fmt.Errorf("%w: attempting to decode canonical hash but was type %s", ErrDataCorruption, data.Type())
func newCanonicalHashFromEntry(data Entry) (canonicalHash, error) {
if data.Type() != TypeCanonicalHash {
return canonicalHash{}, fmt.Errorf("%w: attempting to decode canonical hash but was type %s", entrydb.ErrDataCorruption, data.Type())
}
return newCanonicalHash(common.Hash(data[1:33])), nil
}
func (c canonicalHash) encode() entrydb.Entry {
var entry entrydb.Entry
entry[0] = uint8(entrydb.TypeCanonicalHash)
func (c canonicalHash) encode() Entry {
var entry Entry
entry[0] = uint8(TypeCanonicalHash)
copy(entry[1:33], c.hash[:])
return entry
}
......@@ -76,9 +76,9 @@ type initiatingEvent struct {
logHash common.Hash
}
func newInitiatingEventFromEntry(data entrydb.Entry) (initiatingEvent, error) {
if data.Type() != entrydb.TypeInitiatingEvent {
return initiatingEvent{}, fmt.Errorf("%w: attempting to decode initiating event but was type %s", ErrDataCorruption, data.Type())
func newInitiatingEventFromEntry(data Entry) (initiatingEvent, error) {
if data.Type() != TypeInitiatingEvent {
return initiatingEvent{}, fmt.Errorf("%w: attempting to decode initiating event but was type %s", entrydb.ErrDataCorruption, data.Type())
}
flags := data[1]
return initiatingEvent{
......@@ -96,9 +96,9 @@ func newInitiatingEvent(logHash common.Hash, hasExecMsg bool) initiatingEvent {
// encode creates an initiating event entry
// type 2: "initiating event" <type><flags><event-hash: 20 bytes> = 22 bytes
func (i initiatingEvent) encode() entrydb.Entry {
var data entrydb.Entry
data[0] = uint8(entrydb.TypeInitiatingEvent)
func (i initiatingEvent) encode() Entry {
var data Entry
data[0] = uint8(TypeInitiatingEvent)
flags := byte(0)
if i.hasExecMsg {
flags = flags | eventFlagHasExecutingMessage
......@@ -127,9 +127,9 @@ func newExecutingLink(msg types.ExecutingMessage) (executingLink, error) {
}, nil
}
func newExecutingLinkFromEntry(data entrydb.Entry) (executingLink, error) {
if data.Type() != entrydb.TypeExecutingLink {
return executingLink{}, fmt.Errorf("%w: attempting to decode executing link but was type %s", ErrDataCorruption, data.Type())
func newExecutingLinkFromEntry(data Entry) (executingLink, error) {
if data.Type() != TypeExecutingLink {
return executingLink{}, fmt.Errorf("%w: attempting to decode executing link but was type %s", entrydb.ErrDataCorruption, data.Type())
}
timestamp := binary.LittleEndian.Uint64(data[16:24])
return executingLink{
......@@ -142,9 +142,9 @@ func newExecutingLinkFromEntry(data entrydb.Entry) (executingLink, error) {
// encode creates an executing link entry
// type 3: "executing link" <type><chain: 4 bytes><blocknum: 8 bytes><event index: 3 bytes><uint64 timestamp: 8 bytes> = 24 bytes
func (e executingLink) encode() entrydb.Entry {
var entry entrydb.Entry
entry[0] = uint8(entrydb.TypeExecutingLink)
func (e executingLink) encode() Entry {
var entry Entry
entry[0] = uint8(TypeExecutingLink)
binary.LittleEndian.PutUint32(entry[1:5], e.chain)
binary.LittleEndian.PutUint64(entry[5:13], e.blockNum)
......@@ -164,18 +164,18 @@ func newExecutingCheck(hash common.Hash) executingCheck {
return executingCheck{hash: hash}
}
func newExecutingCheckFromEntry(data entrydb.Entry) (executingCheck, error) {
if data.Type() != entrydb.TypeExecutingCheck {
return executingCheck{}, fmt.Errorf("%w: attempting to decode executing check but was type %s", ErrDataCorruption, data.Type())
func newExecutingCheckFromEntry(data Entry) (executingCheck, error) {
if data.Type() != TypeExecutingCheck {
return executingCheck{}, fmt.Errorf("%w: attempting to decode executing check but was type %s", entrydb.ErrDataCorruption, data.Type())
}
return newExecutingCheck(common.Hash(data[1:33])), nil
}
// encode creates an executing check entry
// type 4: "executing check" <type><event-hash: 32 bytes> = 33 bytes
func (e executingCheck) encode() entrydb.Entry {
var entry entrydb.Entry
entry[0] = uint8(entrydb.TypeExecutingCheck)
func (e executingCheck) encode() Entry {
var entry Entry
entry[0] = uint8(TypeExecutingCheck)
copy(entry[1:33], e.hash[:])
return entry
}
......@@ -184,8 +184,8 @@ type paddingEntry struct{}
// encoding of the padding entry
// type 5: "padding" <type><padding: 33 bytes> = 34 bytes
func (e paddingEntry) encode() entrydb.Entry {
var entry entrydb.Entry
entry[0] = uint8(entrydb.TypePadding)
func (e paddingEntry) encode() Entry {
var entry Entry
entry[0] = uint8(TypePadding)
return entry
}
package logs
import (
"fmt"
"strings"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb"
)
type EntryObj interface {
encode() Entry
}
type Entry = entrydb.Entry[EntryType]
type EntryTypeFlag uint8
const (
FlagSearchCheckpoint EntryTypeFlag = 1 << TypeSearchCheckpoint
FlagCanonicalHash EntryTypeFlag = 1 << TypeCanonicalHash
FlagInitiatingEvent EntryTypeFlag = 1 << TypeInitiatingEvent
FlagExecutingLink EntryTypeFlag = 1 << TypeExecutingLink
FlagExecutingCheck EntryTypeFlag = 1 << TypeExecutingCheck
FlagPadding EntryTypeFlag = 1 << TypePadding
// for additional padding
FlagPadding2 EntryTypeFlag = FlagPadding << 1
)
func (x EntryTypeFlag) String() string {
var out []string
for i := EntryTypeFlag(1); i != 0; i <<= 1 { // iterate to bitmask
if x.Any(i) {
out = append(out, i.String())
}
}
return strings.Join(out, "|")
}
func (x EntryTypeFlag) Any(v EntryTypeFlag) bool {
return x&v != 0
}
func (x *EntryTypeFlag) Add(v EntryTypeFlag) {
*x = *x | v
}
func (x *EntryTypeFlag) Remove(v EntryTypeFlag) {
*x = *x &^ v
}
type EntryType uint8
const (
TypeSearchCheckpoint EntryType = iota
TypeCanonicalHash
TypeInitiatingEvent
TypeExecutingLink
TypeExecutingCheck
TypePadding
)
func (x EntryType) String() string {
switch x {
case TypeSearchCheckpoint:
return "searchCheckpoint"
case TypeCanonicalHash:
return "canonicalHash"
case TypeInitiatingEvent:
return "initiatingEvent"
case TypeExecutingLink:
return "executingLink"
case TypeExecutingCheck:
return "executingCheck"
case TypePadding:
return "padding"
default:
return fmt.Sprintf("unknown-%d", uint8(x))
}
}
......@@ -8,14 +8,13 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/heads"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
)
type IteratorState interface {
NextIndex() entrydb.EntryIdx
HeadPointer() (heads.HeadPointer, error)
SealedBlock() (hash common.Hash, num uint64, ok bool)
SealedTimestamp() (timestamp uint64, ok bool)
InitMessage() (hash common.Hash, logIndex uint32, ok bool)
ExecMessage() *types.ExecutingMessage
}
......@@ -42,7 +41,7 @@ type traverseConditionalFn func(state IteratorState) error
func (i *iterator) End() error {
for {
_, err := i.next()
if errors.Is(err, ErrFuture) {
if errors.Is(err, entrydb.ErrFuture) {
return nil
} else if err != nil {
return err
......@@ -59,7 +58,7 @@ func (i *iterator) NextInitMsg() error {
if err != nil {
return err
}
if typ == entrydb.TypeInitiatingEvent {
if typ == TypeInitiatingEvent {
seenLog = true
}
if !i.current.hasCompleteBlock() {
......@@ -98,7 +97,7 @@ func (i *iterator) NextBlock() error {
if err != nil {
return err
}
if typ == entrydb.TypeSearchCheckpoint {
if typ == TypeSearchCheckpoint {
seenBlock = true
}
if !i.current.hasCompleteBlock() {
......@@ -130,12 +129,12 @@ func (i *iterator) TraverseConditional(fn traverseConditionalFn) error {
}
// Read and apply the next entry.
func (i *iterator) next() (entrydb.EntryType, error) {
func (i *iterator) next() (EntryType, error) {
index := i.current.nextEntryIndex
entry, err := i.db.store.Read(index)
if err != nil {
if errors.Is(err, io.EOF) {
return 0, ErrFuture
return 0, entrydb.ErrFuture
}
return 0, fmt.Errorf("failed to read entry %d: %w", index, err)
}
......@@ -157,6 +156,11 @@ func (i *iterator) SealedBlock() (hash common.Hash, num uint64, ok bool) {
return i.current.SealedBlock()
}
// SealedTimestamp returns the timestamp of SealedBlock
func (i *iterator) SealedTimestamp() (timestamp uint64, ok bool) {
return i.current.SealedTimestamp()
}
// InitMessage returns the current initiating message, if any is available.
func (i *iterator) InitMessage() (hash common.Hash, logIndex uint32, ok bool) {
return i.current.InitMessage()
......@@ -166,7 +170,3 @@ func (i *iterator) InitMessage() (hash common.Hash, logIndex uint32, ok bool) {
func (i *iterator) ExecMessage() *types.ExecutingMessage {
return i.current.ExecMessage()
}
func (i *iterator) HeadPointer() (heads.HeadPointer, error) {
return i.current.HeadPointer()
}
......@@ -9,7 +9,6 @@ import (
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/heads"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
)
......@@ -73,18 +72,14 @@ type logContext struct {
// then we know an executing message is still coming.
execMsg *types.ExecutingMessage
need entrydb.EntryTypeFlag
need EntryTypeFlag
// buffer of entries not yet in the DB.
// This is generated as objects are applied.
// E.g. you can build multiple hypothetical blocks with log events on top of the state,
// before flushing the entries to a DB.
// However, no entries can be read from the DB while objects are being applied.
out []entrydb.Entry
}
type EntryObj interface {
encode() entrydb.Entry
out []Entry
}
func (l *logContext) NextIndex() entrydb.EntryIdx {
......@@ -99,12 +94,19 @@ func (l *logContext) SealedBlock() (hash common.Hash, num uint64, ok bool) {
return l.blockHash, l.blockNum, true
}
func (l *logContext) SealedTimestamp() (timestamp uint64, ok bool) {
if !l.hasCompleteBlock() {
return 0, false
}
return l.timestamp, true
}
func (l *logContext) hasCompleteBlock() bool {
return !l.need.Any(entrydb.FlagCanonicalHash)
return !l.need.Any(FlagCanonicalHash)
}
func (l *logContext) hasIncompleteLog() bool {
return l.need.Any(entrydb.FlagInitiatingEvent | entrydb.FlagExecutingLink | entrydb.FlagExecutingCheck)
return l.need.Any(FlagInitiatingEvent | FlagExecutingLink | FlagExecutingCheck)
}
func (l *logContext) hasReadableLog() bool {
......@@ -127,20 +129,8 @@ func (l *logContext) ExecMessage() *types.ExecutingMessage {
return nil
}
func (l *logContext) HeadPointer() (heads.HeadPointer, error) {
if l.need != 0 {
return heads.HeadPointer{}, errors.New("cannot provide head pointer while state is incomplete")
}
return heads.HeadPointer{
LastSealedBlockHash: l.blockHash,
LastSealedBlockNum: l.blockNum,
LastSealedTimestamp: l.timestamp,
LogsSince: l.logsSince,
}, nil
}
// ApplyEntry applies an entry on top of the current state.
func (l *logContext) ApplyEntry(entry entrydb.Entry) error {
func (l *logContext) ApplyEntry(entry Entry) error {
// Wrap processEntry to add common useful error message info
err := l.processEntry(entry)
if err != nil {
......@@ -152,28 +142,28 @@ func (l *logContext) ApplyEntry(entry entrydb.Entry) error {
// processEntry decodes and applies an entry to the state.
// Entries may not be applied if we are in the process of generating entries from objects.
// These outputs need to be flushed before inputs can be accepted.
func (l *logContext) processEntry(entry entrydb.Entry) error {
func (l *logContext) processEntry(entry Entry) error {
if len(l.out) != 0 {
panic("can only apply without appending if the state is still empty")
}
switch entry.Type() {
case entrydb.TypeSearchCheckpoint:
case TypeSearchCheckpoint:
current, err := newSearchCheckpointFromEntry(entry)
if err != nil {
return err
}
l.blockNum = current.blockNum
l.blockHash = common.Hash{}
l.logsSince = current.logsSince // TODO this is bumping the logsSince?
l.logsSince = current.logsSince
l.timestamp = current.timestamp
l.need.Add(entrydb.FlagCanonicalHash)
l.need.Add(FlagCanonicalHash)
// Log data after the block we are sealing remains to be seen
if l.logsSince == 0 {
l.logHash = common.Hash{}
l.execMsg = nil
}
case entrydb.TypeCanonicalHash:
if !l.need.Any(entrydb.FlagCanonicalHash) {
case TypeCanonicalHash:
if !l.need.Any(FlagCanonicalHash) {
return errors.New("not ready for canonical hash entry, already sealed the last block")
}
canonHash, err := newCanonicalHashFromEntry(entry)
......@@ -181,8 +171,8 @@ func (l *logContext) processEntry(entry entrydb.Entry) error {
return err
}
l.blockHash = canonHash.hash
l.need.Remove(entrydb.FlagCanonicalHash)
case entrydb.TypeInitiatingEvent:
l.need.Remove(FlagCanonicalHash)
case TypeInitiatingEvent:
if !l.hasCompleteBlock() {
return errors.New("did not complete block seal, cannot add log")
}
......@@ -196,13 +186,13 @@ func (l *logContext) processEntry(entry entrydb.Entry) error {
l.execMsg = nil // clear the old state
l.logHash = evt.logHash
if evt.hasExecMsg {
l.need.Add(entrydb.FlagExecutingLink | entrydb.FlagExecutingCheck)
l.need.Add(FlagExecutingLink | FlagExecutingCheck)
} else {
l.logsSince += 1
}
l.need.Remove(entrydb.FlagInitiatingEvent)
case entrydb.TypeExecutingLink:
if !l.need.Any(entrydb.FlagExecutingLink) {
l.need.Remove(FlagInitiatingEvent)
case TypeExecutingLink:
if !l.need.Any(FlagExecutingLink) {
return errors.New("unexpected executing-link")
}
link, err := newExecutingLinkFromEntry(entry)
......@@ -216,13 +206,13 @@ func (l *logContext) processEntry(entry entrydb.Entry) error {
Timestamp: link.timestamp,
Hash: common.Hash{}, // not known yet
}
l.need.Remove(entrydb.FlagExecutingLink)
l.need.Add(entrydb.FlagExecutingCheck)
case entrydb.TypeExecutingCheck:
if l.need.Any(entrydb.FlagExecutingLink) {
l.need.Remove(FlagExecutingLink)
l.need.Add(FlagExecutingCheck)
case TypeExecutingCheck:
if l.need.Any(FlagExecutingLink) {
return errors.New("need executing link to be applied before the check part")
}
if !l.need.Any(entrydb.FlagExecutingCheck) {
if !l.need.Any(FlagExecutingCheck) {
return errors.New("unexpected executing check")
}
link, err := newExecutingCheckFromEntry(entry)
......@@ -230,13 +220,13 @@ func (l *logContext) processEntry(entry entrydb.Entry) error {
return err
}
l.execMsg.Hash = link.hash
l.need.Remove(entrydb.FlagExecutingCheck)
l.need.Remove(FlagExecutingCheck)
l.logsSince += 1
case entrydb.TypePadding:
if l.need.Any(entrydb.FlagPadding) {
l.need.Remove(entrydb.FlagPadding)
case TypePadding:
if l.need.Any(FlagPadding) {
l.need.Remove(FlagPadding)
} else {
l.need.Remove(entrydb.FlagPadding2)
l.need.Remove(FlagPadding2)
}
default:
return fmt.Errorf("unknown entry type: %s", entry.Type())
......@@ -253,77 +243,75 @@ func (l *logContext) appendEntry(obj EntryObj) {
l.nextEntryIndex += 1
}
// infer advances the logContext in cases where multiple entries are to be appended implicitly
// depending on the last type of entry, a new entry is appended,
// or when the searchCheckpoint should be inserted.
// This can be done repeatedly until there is no more implied data to extend.
// infer advances the logContext in cases where complex entries contain multiple implied entries
// eg. a SearchCheckpoint implies a CannonicalHash will follow
// this also handles inserting the searchCheckpoint at the set frequency, and padding entries
func (l *logContext) infer() error {
// We force-insert a checkpoint whenever we hit the known fixed interval.
if l.nextEntryIndex%searchCheckpointFrequency == 0 {
l.need.Add(entrydb.FlagSearchCheckpoint)
l.need.Add(FlagSearchCheckpoint)
}
if l.need.Any(entrydb.FlagSearchCheckpoint) {
if l.need.Any(FlagSearchCheckpoint) {
l.appendEntry(newSearchCheckpoint(l.blockNum, l.logsSince, l.timestamp))
l.need.Add(entrydb.FlagCanonicalHash) // always follow with a canonical hash
l.need.Remove(entrydb.FlagSearchCheckpoint)
l.need.Add(FlagCanonicalHash) // always follow with a canonical hash
l.need.Remove(FlagSearchCheckpoint)
return nil
}
if l.need.Any(entrydb.FlagCanonicalHash) {
if l.need.Any(FlagCanonicalHash) {
l.appendEntry(newCanonicalHash(l.blockHash))
l.need.Remove(entrydb.FlagCanonicalHash)
l.need.Remove(FlagCanonicalHash)
return nil
}
if l.need.Any(entrydb.FlagPadding) {
if l.need.Any(FlagPadding) {
l.appendEntry(paddingEntry{})
l.need.Remove(entrydb.FlagPadding)
l.need.Remove(FlagPadding)
return nil
}
if l.need.Any(entrydb.FlagPadding2) {
if l.need.Any(FlagPadding2) {
l.appendEntry(paddingEntry{})
l.need.Remove(entrydb.FlagPadding2)
l.need.Remove(FlagPadding2)
return nil
}
if l.need.Any(entrydb.FlagInitiatingEvent) {
if l.need.Any(FlagInitiatingEvent) {
// If we are running out of space for log-event data,
// write some checkpoints as padding, to pass the checkpoint.
if l.execMsg != nil { // takes 3 total. Need to avoid the checkpoint.
switch l.nextEntryIndex % searchCheckpointFrequency {
case searchCheckpointFrequency - 1:
l.need.Add(entrydb.FlagPadding)
l.need.Add(FlagPadding)
return nil
case searchCheckpointFrequency - 2:
l.need.Add(entrydb.FlagPadding | entrydb.FlagPadding2)
l.need.Add(FlagPadding | FlagPadding2)
return nil
}
}
evt := newInitiatingEvent(l.logHash, l.execMsg != nil)
l.appendEntry(evt)
l.need.Remove(entrydb.FlagInitiatingEvent)
l.need.Remove(FlagInitiatingEvent)
if l.execMsg == nil {
l.logsSince += 1
}
return nil
}
if l.need.Any(entrydb.FlagExecutingLink) {
if l.need.Any(FlagExecutingLink) {
link, err := newExecutingLink(*l.execMsg)
if err != nil {
return fmt.Errorf("failed to create executing link: %w", err)
}
l.appendEntry(link)
l.need.Remove(entrydb.FlagExecutingLink)
l.need.Remove(FlagExecutingLink)
return nil
}
if l.need.Any(entrydb.FlagExecutingCheck) {
if l.need.Any(FlagExecutingCheck) {
l.appendEntry(newExecutingCheck(l.execMsg.Hash))
l.need.Remove(entrydb.FlagExecutingCheck)
l.need.Remove(FlagExecutingCheck)
l.logsSince += 1
return nil
}
return io.EOF
}
// inferFull advances the queued entries held by the log context repeatedly
// until no more implied entries can be added
// inferFull advances the logContext until it cannot infer any more entries.
func (l *logContext) inferFull() error {
for i := 0; i < 10; i++ {
err := l.infer()
......@@ -364,13 +352,13 @@ func (l *logContext) SealBlock(parent common.Hash, upd eth.BlockID, timestamp ui
return err
}
if l.blockHash != parent {
return fmt.Errorf("%w: cannot apply block %s (parent %s) on top of %s", ErrConflict, upd, parent, l.blockHash)
return fmt.Errorf("%w: cannot apply block %s (parent %s) on top of %s", entrydb.ErrConflict, upd, parent, l.blockHash)
}
if l.blockHash != (common.Hash{}) && l.blockNum+1 != upd.Number {
return fmt.Errorf("%w: cannot apply block %d on top of %d", ErrConflict, upd.Number, l.blockNum)
return fmt.Errorf("%w: cannot apply block %d on top of %d", entrydb.ErrConflict, upd.Number, l.blockNum)
}
if l.timestamp > timestamp {
return fmt.Errorf("%w: block timestamp %d must be equal or larger than current timestamp %d", ErrConflict, timestamp, l.timestamp)
return fmt.Errorf("%w: block timestamp %d must be equal or larger than current timestamp %d", entrydb.ErrConflict, timestamp, l.timestamp)
}
}
l.blockHash = upd.Hash
......@@ -379,7 +367,7 @@ func (l *logContext) SealBlock(parent common.Hash, upd eth.BlockID, timestamp ui
l.logsSince = 0
l.execMsg = nil
l.logHash = common.Hash{}
l.need.Add(entrydb.FlagSearchCheckpoint)
l.need.Add(FlagSearchCheckpoint)
return l.inferFull() // apply to the state as much as possible
}
......@@ -387,34 +375,34 @@ func (l *logContext) SealBlock(parent common.Hash, upd eth.BlockID, timestamp ui
// The parent-block that the log comes after must be applied with ApplyBlock first.
func (l *logContext) ApplyLog(parentBlock eth.BlockID, logIdx uint32, logHash common.Hash, execMsg *types.ExecutingMessage) error {
if parentBlock == (eth.BlockID{}) {
return fmt.Errorf("genesis does not have logs: %w", ErrLogOutOfOrder)
return fmt.Errorf("genesis does not have logs: %w", entrydb.ErrOutOfOrder)
}
if err := l.inferFull(); err != nil { // ensure we can start applying
return err
}
if !l.hasCompleteBlock() {
if l.blockNum == 0 {
return fmt.Errorf("%w: should not have logs in block 0", ErrLogOutOfOrder)
return fmt.Errorf("%w: should not have logs in block 0", entrydb.ErrOutOfOrder)
} else {
return errors.New("cannot append log before last known block is sealed")
}
}
// check parent block
if l.blockHash != parentBlock.Hash {
return fmt.Errorf("%w: log builds on top of block %s, but have block %s", ErrLogOutOfOrder, parentBlock, l.blockHash)
return fmt.Errorf("%w: log builds on top of block %s, but have block %s", entrydb.ErrOutOfOrder, parentBlock, l.blockHash)
}
if l.blockNum != parentBlock.Number {
return fmt.Errorf("%w: log builds on top of block %d, but have block %d", ErrLogOutOfOrder, parentBlock.Number, l.blockNum)
return fmt.Errorf("%w: log builds on top of block %d, but have block %d", entrydb.ErrOutOfOrder, parentBlock.Number, l.blockNum)
}
// check if log fits on top. The length so far == the index of the next log.
if logIdx != l.logsSince {
return fmt.Errorf("%w: expected event index %d, cannot append %d", ErrLogOutOfOrder, l.logsSince, logIdx)
return fmt.Errorf("%w: expected event index %d, cannot append %d", entrydb.ErrOutOfOrder, l.logsSince, logIdx)
}
l.logHash = logHash
l.execMsg = execMsg
l.need.Add(entrydb.FlagInitiatingEvent)
l.need.Add(FlagInitiatingEvent)
if execMsg != nil {
l.need.Add(entrydb.FlagExecutingLink | entrydb.FlagExecutingCheck)
l.need.Add(FlagExecutingLink | FlagExecutingCheck)
}
return l.inferFull() // apply to the state as much as possible
}
package db
import (
"errors"
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/logs"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
)
func (db *ChainsDB) FindSealedBlock(chain types.ChainID, number uint64) (seal types.BlockSeal, err error) {
db.mu.RLock()
defer db.mu.RUnlock()
logDB, ok := db.logDBs[chain]
if !ok {
return types.BlockSeal{}, fmt.Errorf("%w: %v", ErrUnknownChain, chain)
}
return logDB.FindSealedBlock(number)
}
// LatestBlockNum returns the latest fully-sealed block number that has been recorded to the logs db
// for the given chain. It does not contain safety guarantees.
// The block number might not be available (empty database, or non-existent chain).
func (db *ChainsDB) LatestBlockNum(chain types.ChainID) (num uint64, ok bool) {
db.mu.RLock()
defer db.mu.RUnlock()
logDB, knownChain := db.logDBs[chain]
if !knownChain {
return 0, false
}
return logDB.LatestSealedBlockNum()
}
func (db *ChainsDB) LocalUnsafe(chainID types.ChainID) (types.BlockSeal, error) {
db.mu.RLock()
defer db.mu.RUnlock()
eventsDB, ok := db.logDBs[chainID]
if !ok {
return types.BlockSeal{}, ErrUnknownChain
}
n, ok := eventsDB.LatestSealedBlockNum()
if !ok {
return types.BlockSeal{}, entrydb.ErrFuture
}
return eventsDB.FindSealedBlock(n)
}
func (db *ChainsDB) CrossUnsafe(chainID types.ChainID) (types.BlockSeal, error) {
db.mu.RLock()
defer db.mu.RUnlock()
result, ok := db.crossUnsafe[chainID]
if !ok {
return types.BlockSeal{}, ErrUnknownChain
}
return result, nil
}
func (db *ChainsDB) LocalSafe(chainID types.ChainID) (derivedFrom eth.BlockRef, derived eth.BlockRef, err error) {
db.mu.RLock()
defer db.mu.RUnlock()
localDB, ok := db.localDBs[chainID]
if !ok {
return eth.BlockRef{}, eth.BlockRef{}, ErrUnknownChain
}
return localDB.Last()
}
func (db *ChainsDB) CrossSafe(chainID types.ChainID) (derivedFrom eth.BlockRef, derived eth.BlockRef, err error) {
db.mu.RLock()
defer db.mu.RUnlock()
crossDB, ok := db.crossDBs[chainID]
if !ok {
return eth.BlockRef{}, eth.BlockRef{}, ErrUnknownChain
}
return crossDB.Last()
}
func (db *ChainsDB) Finalized(chainID types.ChainID) (eth.BlockID, error) {
db.mu.RLock()
defer db.mu.RUnlock()
finalizedL1 := db.finalizedL1
if finalizedL1 == (eth.L1BlockRef{}) {
return eth.BlockID{}, errors.New("no finalized L1 signal, cannot determine L2 finality yet")
}
derived, err := db.LastDerivedFrom(chainID, finalizedL1.ID())
if err != nil {
return eth.BlockID{}, errors.New("could not find what was last derived from the finalized L1 block")
}
return derived, nil
}
func (db *ChainsDB) LastDerivedFrom(chainID types.ChainID, derivedFrom eth.BlockID) (derived eth.BlockID, err error) {
crossDB, ok := db.crossDBs[chainID]
if !ok {
return eth.BlockID{}, ErrUnknownChain
}
return crossDB.LastDerived(derivedFrom)
}
func (db *ChainsDB) DerivedFrom(chainID types.ChainID, derived eth.BlockID) (derivedFrom eth.BlockID, err error) {
db.mu.RLock()
defer db.mu.RUnlock()
localDB, ok := db.localDBs[chainID]
if !ok {
return eth.BlockID{}, ErrUnknownChain
}
return localDB.DerivedFrom(derived)
}
// Check calls the underlying logDB to determine if the given log entry exists at the given location.
// If the block-seal of the block that includes the log is known, it is returned. It is fully zeroed otherwise, if the block is in-progress.
func (db *ChainsDB) Check(chain types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) (includedIn types.BlockSeal, err error) {
db.mu.RLock()
defer db.mu.RUnlock()
logDB, ok := db.logDBs[chain]
if !ok {
return types.BlockSeal{}, fmt.Errorf("%w: %v", ErrUnknownChain, chain)
}
return logDB.Contains(blockNum, logIdx, logHash)
}
// Safest returns the strongest safety level that can be guaranteed for the given log entry.
// it assumes the log entry has already been checked and is valid, this function only checks safety levels.
// Cross-safety levels are all considered to be more safe than any form of local-safety.
func (db *ChainsDB) Safest(chainID types.ChainID, blockNum uint64, index uint32) (safest types.SafetyLevel, err error) {
db.mu.RLock()
defer db.mu.RUnlock()
if finalized, err := db.Finalized(chainID); err == nil {
if finalized.Number >= blockNum {
return types.Finalized, nil
}
}
_, crossSafe, err := db.CrossSafe(chainID)
if err != nil {
return types.Invalid, err
}
if crossSafe.Number >= blockNum {
return types.CrossSafe, nil
}
crossUnsafe, err := db.CrossUnsafe(chainID)
if err != nil {
return types.Invalid, err
}
// TODO(#12425): API: "index" for in-progress block building shouldn't be exposed from DB.
// For now we're not counting anything cross-safe until the block is sealed.
if blockNum <= crossUnsafe.Number {
return types.CrossUnsafe, nil
}
_, localSafe, err := db.LocalSafe(chainID)
if err != nil {
return types.Invalid, err
}
if blockNum <= localSafe.Number {
return types.LocalSafe, nil
}
return types.LocalUnsafe, nil
}
func (db *ChainsDB) IteratorStartingAt(chain types.ChainID, sealedNum uint64, logIndex uint32) (logs.Iterator, error) {
logDB, ok := db.logDBs[chain]
if !ok {
return nil, fmt.Errorf("%w: %v", ErrUnknownChain, chain)
}
return logDB.IteratorStartingAt(sealedNum, logIndex)
}
package db
import (
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
)
func (db *ChainsDB) AddLog(
chain types.ChainID,
logHash common.Hash,
parentBlock eth.BlockID,
logIdx uint32,
execMsg *types.ExecutingMessage) error {
db.mu.RLock()
defer db.mu.RUnlock()
logDB, ok := db.logDBs[chain]
if !ok {
return fmt.Errorf("cannot AddLog: %w: %v", ErrUnknownChain, chain)
}
return logDB.AddLog(logHash, parentBlock, logIdx, execMsg)
}
func (db *ChainsDB) SealBlock(chain types.ChainID, block eth.BlockRef) error {
db.mu.RLock()
defer db.mu.RUnlock()
logDB, ok := db.logDBs[chain]
if !ok {
return fmt.Errorf("cannot SealBlock: %w: %v", ErrUnknownChain, chain)
}
err := logDB.SealBlock(block.ParentHash, block.ID(), block.Time)
if err != nil {
return fmt.Errorf("failed to seal block %v: %w", block, err)
}
return nil
}
func (db *ChainsDB) Rewind(chain types.ChainID, headBlockNum uint64) error {
db.mu.RLock()
defer db.mu.RUnlock()
logDB, ok := db.logDBs[chain]
if !ok {
return fmt.Errorf("cannot Rewind: %w: %s", ErrUnknownChain, chain)
}
return logDB.Rewind(headBlockNum)
}
func (db *ChainsDB) UpdateLocalSafe(chain types.ChainID, derivedFrom eth.BlockRef, lastDerived eth.BlockRef) error {
db.mu.RLock()
defer db.mu.RUnlock()
localDB, ok := db.localDBs[chain]
if !ok {
return fmt.Errorf("cannot UpdateLocalSafe: %w: %v", ErrUnknownChain, chain)
}
return localDB.AddDerived(derivedFrom, lastDerived)
}
func (db *ChainsDB) UpdateCrossUnsafe(chain types.ChainID, crossUnsafe types.BlockSeal) error {
db.mu.RLock()
defer db.mu.RUnlock()
if _, ok := db.crossUnsafe[chain]; !ok {
return fmt.Errorf("cannot UpdateCrossUnsafe: %w: %s", ErrUnknownChain, chain)
}
db.crossUnsafe[chain] = crossUnsafe
return nil
}
func (db *ChainsDB) UpdateCrossSafe(chain types.ChainID, l1View eth.BlockRef, lastCrossDerived eth.BlockRef) error {
db.mu.RLock()
defer db.mu.RUnlock()
crossDB, ok := db.crossDBs[chain]
if !ok {
return fmt.Errorf("cannot UpdateCrossSafe: %w: %s", ErrUnknownChain, chain)
}
return crossDB.AddDerived(l1View, lastCrossDerived)
}
func (db *ChainsDB) UpdateFinalizedL1(finalized eth.BlockRef) error {
db.mu.RLock()
defer db.mu.RUnlock()
if db.finalizedL1.Number > finalized.Number {
return fmt.Errorf("cannot rewind finalized L1 head from %s to %s", db.finalizedL1, finalized)
}
db.finalizedL1 = finalized
return nil
}
......@@ -6,12 +6,10 @@ import (
"io"
"sync/atomic"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/frontend"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
"github.com/ethereum/go-ethereum/common"
)
type MockBackend struct {
......@@ -52,12 +50,32 @@ func (m *MockBackend) CheckMessages(messages []types.Message, minSafety types.Sa
return nil
}
func (m *MockBackend) CheckBlock(chainID *hexutil.U256, blockHash common.Hash, blockNumber hexutil.Uint64) (types.SafetyLevel, error) {
return types.CrossUnsafe, nil
func (m *MockBackend) UnsafeView(ctx context.Context, chainID types.ChainID, unsafe types.ReferenceView) (types.ReferenceView, error) {
return types.ReferenceView{}, nil
}
func (m *MockBackend) DerivedFrom(ctx context.Context, t types.ChainID, parentHash common.Hash, n uint64) (eth.BlockRef, error) {
return eth.BlockRef{}, nil
func (m *MockBackend) SafeView(ctx context.Context, chainID types.ChainID, safe types.ReferenceView) (types.ReferenceView, error) {
return types.ReferenceView{}, nil
}
func (m *MockBackend) Finalized(ctx context.Context, chainID types.ChainID) (eth.BlockID, error) {
return eth.BlockID{}, nil
}
func (m *MockBackend) DerivedFrom(ctx context.Context, chainID types.ChainID, derived eth.BlockID) (derivedFrom eth.BlockID, err error) {
return eth.BlockID{}, nil
}
func (m *MockBackend) UpdateLocalUnsafe(chainID types.ChainID, head eth.BlockRef) error {
return nil
}
func (m *MockBackend) UpdateLocalSafe(chainID types.ChainID, derivedFrom eth.BlockRef, lastDerived eth.BlockRef) error {
return nil
}
func (m *MockBackend) UpdateFinalizedL1(chainID types.ChainID, finalized eth.BlockRef) error {
return nil
}
func (m *MockBackend) Close() error {
......
package source
package processors
import (
"context"
......@@ -91,10 +91,35 @@ func (s *ChainProcessor) nextNum() uint64 {
return headNum + 1
}
// worker is the main loop of the chain processor's worker
// it manages work by request or on a timer, and watches for shutdown
func (s *ChainProcessor) worker() {
defer s.wg.Done()
delay := time.NewTicker(time.Second * 5)
for {
// await next time we process, or detect shutdown
select {
case <-s.ctx.Done():
delay.Stop()
return
case <-s.newHead:
s.log.Debug("Responding to new head signal")
s.work()
// if this chain processor is synchronous, signal completion
// to be picked up by the caller (ChainProcessor.OnNewHead)
if s.synchronous {
s.out <- struct{}{}
}
case <-delay.C:
s.log.Debug("Checking for updates")
s.work()
}
}
}
// work processes the next block in the chain repeatedly until it reaches the head
func (s *ChainProcessor) work() {
for {
if s.ctx.Err() != nil { // check if we are closing down
return
......@@ -104,27 +129,12 @@ func (s *ChainProcessor) worker() {
s.log.Error("Failed to process new block", "err", err)
// idle until next update trigger
} else if x := s.lastHead.Load(); target+1 <= x {
s.log.Debug("Continuing with next block",
"newTarget", target+1, "lastHead", x)
s.log.Debug("Continuing with next block", "newTarget", target+1, "lastHead", x)
continue // instantly continue processing, no need to idle
} else {
s.log.Debug("Idling block-processing, reached latest block", "head", target)
}
if s.synchronous {
s.out <- struct{}{}
}
// await next time we process, or detect shutdown
select {
case <-s.ctx.Done():
delay.Stop()
return
case <-s.newHead:
s.log.Debug("Responding to new head signal")
continue
case <-delay.C:
s.log.Debug("Checking for updates")
continue
}
return
}
}
......@@ -166,7 +176,7 @@ func (s *ChainProcessor) update(nextNum uint64) error {
return nil
}
func (s *ChainProcessor) OnNewHead(ctx context.Context, head eth.BlockRef) error {
func (s *ChainProcessor) OnNewHead(head eth.BlockRef) error {
// update the latest target
s.lastHead.Store(head.Number)
// signal that we have something to process
......
package processors
import (
"context"
"fmt"
"time"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-service/client"
"github.com/ethereum-optimism/optimism/op-service/sources"
"github.com/ethereum-optimism/optimism/op-service/sources/caching"
)
// NewEthClient creates an Eth RPC client for event-log fetching.
func NewEthClient(ctx context.Context, logger log.Logger, m caching.Metrics, rpc string, rpcClient client.RPC,
pollRate time.Duration, trustRPC bool, kind sources.RPCProviderKind) (*sources.L1Client, error) {
c, err := client.NewRPCWithClient(ctx, logger, rpc, rpcClient, pollRate)
if err != nil {
return nil, fmt.Errorf("failed to create new RPC client: %w", err)
}
l1Client, err := sources.NewL1Client(c, logger, m, sources.L1ClientSimpleConfig(trustRPC, kind, 100))
if err != nil {
return nil, fmt.Errorf("failed to connect client: %w", err)
}
return l1Client, nil
}
......@@ -123,7 +123,7 @@ func identifierFromBytes(identifierBytes io.Reader) (contractIdentifier, error)
// which is then hashed again. This is the hash that is stored in the log storage.
// The logHash can then be used to traverse from the executing message
// to the log the referenced initiating message.
// TODO: this function is duplicated between contracts and backend/source/log_processor.go
// TODO(#12424): this function is duplicated between contracts and backend/source/log_processor.go
// to avoid a circular dependency. It should be reorganized to avoid this duplication.
func payloadHashToLogHash(payloadHash common.Hash, addr common.Address) common.Hash {
msg := make([]byte, 0, 2*common.HashLength)
......
package source
package processors
import (
"context"
......@@ -10,7 +10,7 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/source/contracts"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/processors/contracts"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
)
......@@ -34,7 +34,7 @@ type logProcessor struct {
eventDecoder EventDecoder
}
func newLogProcessor(chain types.ChainID, logStore LogStorage) *logProcessor {
func NewLogProcessor(chain types.ChainID, logStore LogStorage) LogProcessor {
return &logProcessor{
chain: chain,
logStore: logStore,
......
package source
package processors
import (
"context"
......@@ -25,7 +25,7 @@ func TestLogProcessor(t *testing.T) {
}
t.Run("NoOutputWhenLogsAreEmpty", func(t *testing.T) {
store := &stubLogStorage{}
processor := newLogProcessor(logProcessorChainID, store)
processor := NewLogProcessor(logProcessorChainID, store)
err := processor.ProcessLogs(ctx, block1, ethTypes.Receipts{})
require.NoError(t, err)
......@@ -59,7 +59,7 @@ func TestLogProcessor(t *testing.T) {
},
}
store := &stubLogStorage{}
processor := newLogProcessor(logProcessorChainID, store)
processor := NewLogProcessor(logProcessorChainID, store)
err := processor.ProcessLogs(ctx, block1, rcpts)
require.NoError(t, err)
......@@ -115,7 +115,7 @@ func TestLogProcessor(t *testing.T) {
Hash: common.Hash{0xaa},
}
store := &stubLogStorage{}
processor := newLogProcessor(types.ChainID{4}, store)
processor := NewLogProcessor(types.ChainID{4}, store).(*logProcessor)
processor.eventDecoder = EventDecoderFn(func(l *ethTypes.Log) (types.ExecutingMessage, error) {
require.Equal(t, rcpts[0].Logs[0], l)
return execMsg, nil
......
package safety
import (
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/heads"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/logs"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
)
type SafetyIndex interface {
// Updaters for the latest local safety status of each chain
UpdateLocalUnsafe(chainID types.ChainID, ref eth.BlockRef) error
UpdateLocalSafe(chainID types.ChainID, at eth.BlockRef, ref eth.BlockRef) error
UpdateFinalizeL1(ref eth.BlockRef) error
// Getters for the latest safety status of each chain
UnsafeL2(chainID types.ChainID) (heads.HeadPointer, error)
CrossUnsafeL2(chainID types.ChainID) (heads.HeadPointer, error)
LocalSafeL2(chainID types.ChainID) (heads.HeadPointer, error)
CrossSafeL2(chainID types.ChainID) (heads.HeadPointer, error)
// We only finalize on full L2 block boundaries, hence not a heads.HeadPointer return.
FinalizedL2(chainId types.ChainID) (eth.BlockID, error)
}
type ChainsDBClient interface {
IteratorStartingAt(chainID types.ChainID, sealedNum uint64, logIndex uint32) (logs.Iterator, error)
Check(chainID types.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) (h common.Hash, err error)
}
type safetyIndex struct {
log log.Logger
chains ChainsDBClient
unsafe map[types.ChainID]*View
safe map[types.ChainID]*View
finalized map[types.ChainID]eth.BlockID
// remember what each non-finalized L2 block is derived from
derivedFrom map[types.ChainID]map[common.Hash]eth.BlockRef
// the last received L1 finality signal.
finalizedL1 eth.BlockRef
}
func NewSafetyIndex(log log.Logger, chains ChainsDBClient) *safetyIndex {
return &safetyIndex{
log: log,
chains: chains,
unsafe: make(map[types.ChainID]*View),
safe: make(map[types.ChainID]*View),
finalized: make(map[types.ChainID]eth.BlockID),
derivedFrom: make(map[types.ChainID]map[common.Hash]eth.BlockRef),
}
}
// UpdateLocalUnsafe updates the local-unsafe view for the given chain, and advances the cross-unsafe status.
func (r *safetyIndex) UpdateLocalUnsafe(chainID types.ChainID, ref eth.BlockRef) error {
view, ok := r.safe[chainID]
if !ok {
iter, err := r.chains.IteratorStartingAt(chainID, ref.Number, 0)
if err != nil {
return fmt.Errorf("failed to open iterator for chain %s block %d", chainID, ref.Number)
}
view = &View{
chainID: chainID,
iter: iter,
localView: heads.HeadPointer{
LastSealedBlockHash: ref.Hash,
LastSealedBlockNum: ref.Number,
LastSealedTimestamp: ref.Time,
LogsSince: 0,
},
localDerivedFrom: eth.BlockRef{},
validWithinView: r.ValidWithinUnsafeView,
}
r.unsafe[chainID] = view
} else if err := view.UpdateLocal(eth.BlockRef{}, ref); err != nil {
return fmt.Errorf("failed to update local-unsafe: %w", err)
}
local, _ := r.unsafe[chainID].Local()
r.log.Debug("Updated local unsafe head", "chainID", chainID, "local", local)
r.advanceCrossUnsafe()
return nil
}
// advanceCrossUnsafe calls Process on all cross-unsafe views.
func (r *safetyIndex) advanceCrossUnsafe() {
for chainID, view := range r.unsafe {
if err := view.Process(); err != nil {
r.log.Error("Failed to update cross-unsafe view", "chain", chainID, "err", err)
}
cross, _ := r.unsafe[chainID].Cross()
r.log.Debug("Updated cross unsafe head", "chainID", chainID, "cross", cross)
}
}
// UpdateLocalSafe updates the local-safe view for the given chain, and advances the cross-safe status.
func (r *safetyIndex) UpdateLocalSafe(
chainID types.ChainID, at eth.BlockRef, ref eth.BlockRef) error {
view, ok := r.safe[chainID]
if !ok {
iter, err := r.chains.IteratorStartingAt(chainID, ref.Number, 0)
if err != nil {
return fmt.Errorf("failed to open iterator for chain %s block %d", chainID, ref.Number)
}
view = &View{
chainID: chainID,
iter: iter,
localView: heads.HeadPointer{
LastSealedBlockHash: ref.Hash,
LastSealedBlockNum: ref.Number,
LastSealedTimestamp: ref.Time,
LogsSince: 0,
},
localDerivedFrom: at,
validWithinView: r.ValidWithinSafeView,
}
r.safe[chainID] = view
} else if err := view.UpdateLocal(at, ref); err != nil {
return fmt.Errorf("failed to update local-safe: %w", err)
}
// register what this L2 block is derived from
m, ok := r.derivedFrom[chainID]
if !ok {
m = make(map[common.Hash]eth.BlockRef)
r.derivedFrom[chainID] = m
}
m[ref.Hash] = at
local, _ := r.safe[chainID].Local()
r.log.Debug("Updated local safe head", "chainID", chainID, "local", local)
r.advanceCrossSafe()
return nil
}
// advanceCrossSafe calls Process on all cross-safe views, and advances the finalized safety status.
func (r *safetyIndex) advanceCrossSafe() {
for chainID, view := range r.safe {
if err := view.Process(); err != nil {
r.log.Error("Failed to update cross-safe view", "chain", chainID, "err", err)
}
cross, _ := r.safe[chainID].Cross()
r.log.Debug("Updated local safe head", "chainID", chainID, "cross", cross)
}
r.advanceFinalized()
}
// UpdateFinalizeL1 updates the finalized L1 block, and advances the finalized safety status.
func (r *safetyIndex) UpdateFinalizeL1(ref eth.BlockRef) error {
if ref.Number <= r.finalizedL1.Number {
return fmt.Errorf("ignoring old L1 finality signal of %s, already have %s", ref, r.finalizedL1)
}
r.finalizedL1 = ref
r.log.Debug("Updated L1 finalized head", "L1finalized", ref)
r.advanceFinalized()
return nil
}
// advanceFinalized should be called whenever the finalized L1 block, or the cross-safe history, changes.
// This then promotes the irreversible cross-safe L2 blocks to a finalized safety status.
func (r *safetyIndex) advanceFinalized() {
// Whatever was considered cross-safe at the finalized block-height can
// now be considered finalized, since the inputs have become irreversible.
for chainID, view := range r.safe {
crossSafe, err := view.Cross()
if err != nil {
r.log.Info("Failed to get cross-safe data, cannot finalize", "chain", chainID, "err", err)
continue
}
// TODO(#12184): we need to consider older cross-safe data,
// if we want to finalize something at all on longer lagging finality signal.
// Could consider just iterating over all derivedFrom contents?
l1Dep := r.derivedFrom[chainID][crossSafe.LastSealedBlockHash]
if l1Dep.Number < r.finalizedL1.Number {
r.finalized[chainID] = eth.BlockID{Hash: crossSafe.LastSealedBlockHash, Number: crossSafe.LastSealedBlockNum}
finalized := r.finalized[chainID]
r.log.Debug("Updated finalized head", "chainID", chainID, "finalized", finalized)
}
}
}
// UnsafeL2 returns the latest unsafe L2 block of the given chain.
func (r *safetyIndex) UnsafeL2(chainID types.ChainID) (heads.HeadPointer, error) {
view, ok := r.unsafe[chainID]
if !ok {
return heads.HeadPointer{}, fmt.Errorf("no unsafe data for chain %s", chainID)
}
return view.Local()
}
// CrossUnsafeL2 returns the latest cross-unsafe L2 block of the given chain.
func (r *safetyIndex) CrossUnsafeL2(chainID types.ChainID) (heads.HeadPointer, error) {
view, ok := r.unsafe[chainID]
if !ok {
return heads.HeadPointer{}, fmt.Errorf("no cross-unsafe data for chain %s", chainID)
}
return view.Cross()
}
// LocalSafeL2 returns the latest local-safe L2 block of the given chain.
func (r *safetyIndex) LocalSafeL2(chainID types.ChainID) (heads.HeadPointer, error) {
view, ok := r.safe[chainID]
if !ok {
return heads.HeadPointer{}, fmt.Errorf("no local-safe data for chain %s", chainID)
}
return view.Local()
}
// CrossSafeL2 returns the latest cross-safe L2 block of the given chain.
func (r *safetyIndex) CrossSafeL2(chainID types.ChainID) (heads.HeadPointer, error) {
view, ok := r.safe[chainID]
if !ok {
return heads.HeadPointer{}, fmt.Errorf("no cross-safe data for chain %s", chainID)
}
return view.Cross()
}
// FinalizedL2 returns the latest finalized L2 block of the given chain.
func (r *safetyIndex) FinalizedL2(chainId types.ChainID) (eth.BlockID, error) {
finalized, ok := r.finalized[chainId]
if !ok {
return eth.BlockID{}, fmt.Errorf("not seen finalized data of chain %s at finalized L1 block %s", chainId, r.finalizedL1)
}
return finalized, nil
}
// ValidWithinUnsafeView checks if the given executing message is in the database.
// unsafe view is meant to represent all of the database, and so no boundary checks are needed.
func (r *safetyIndex) ValidWithinUnsafeView(_ uint64, execMsg *types.ExecutingMessage) error {
execChainID := types.ChainIDFromUInt64(uint64(execMsg.Chain))
_, err := r.chains.Check(execChainID, execMsg.BlockNum, execMsg.LogIdx, execMsg.Hash)
return err
}
// ValidWithinSafeView checks if the given executing message is within the database,
// and within the L1 view of the caller.
func (r *safetyIndex) ValidWithinSafeView(l1View uint64, execMsg *types.ExecutingMessage) error {
execChainID := types.ChainIDFromUInt64(uint64(execMsg.Chain))
// Check that the initiating message, which was pulled in by the executing message,
// does indeed exist. And in which L2 block it exists (if any).
l2BlockHash, err := r.chains.Check(execChainID, execMsg.BlockNum, execMsg.LogIdx, execMsg.Hash)
if err != nil {
return err
}
// if the executing message falls within the execFinalized range, then nothing to check
execFinalized, ok := r.finalized[execChainID]
if ok && execFinalized.Number > execMsg.BlockNum {
return nil
}
// check if the L1 block of the executing message is known
execL1Block, ok := r.derivedFrom[execChainID][l2BlockHash]
if !ok {
return logs.ErrFuture // TODO(#12185) need to distinguish between same-data future, and new-data future
}
// check if the L1 block is within the view
if execL1Block.Number > l1View {
return fmt.Errorf("exec message depends on L2 block %s:%d, derived from L1 block %s, not within view yet: %w",
l2BlockHash, execMsg.BlockNum, execL1Block, logs.ErrFuture)
}
return nil
}
var _ SafetyIndex = (*safetyIndex)(nil)
package safety
import (
"errors"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/heads"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/logs"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
)
type View struct {
chainID types.ChainID
iter logs.Iterator
localView heads.HeadPointer
localDerivedFrom eth.BlockRef
validWithinView func(l1View uint64, execMsg *types.ExecutingMessage) error
}
func (vi *View) Cross() (heads.HeadPointer, error) {
return vi.iter.HeadPointer()
}
func (vi *View) Local() (heads.HeadPointer, error) {
if vi.localView == (heads.HeadPointer{}) {
return heads.HeadPointer{}, logs.ErrFuture
}
return vi.localView, nil
}
func (vi *View) UpdateLocal(at eth.BlockRef, ref eth.BlockRef) error {
vi.localView = heads.HeadPointer{
LastSealedBlockHash: ref.Hash,
LastSealedBlockNum: ref.Number,
//LastSealedTimestamp: ref.Time,
LogsSince: 0,
}
vi.localDerivedFrom = at
// TODO(#11693): reorg check against existing DB
// TODO(#12186): localView may be larger than what DB contents we have
return nil
}
func (vi *View) Process() error {
err := vi.iter.TraverseConditional(func(state logs.IteratorState) error {
hash, num, ok := state.SealedBlock()
if !ok {
return logs.ErrFuture // maybe a more specific error for no-genesis case?
}
// TODO(#11693): reorg check in the future. To make sure that what we traverse is still canonical.
_ = hash
// check if L2 block is within view
if !vi.localView.WithinRange(num, 0) {
return logs.ErrFuture
}
_, initLogIndex, ok := state.InitMessage()
if !ok {
return nil // no readable message, just an empty block
}
// check if the message is within view
if !vi.localView.WithinRange(num, initLogIndex) {
return logs.ErrFuture
}
// check if it is an executing message. If so, check the dependency
if execMsg := state.ExecMessage(); execMsg != nil {
// Check if executing message is within cross L2 view,
// relative to the L1 view of current message.
// And check if the message is valid to execute at all
// (i.e. if it exists on the initiating side).
// TODO(#12187): it's inaccurate to check with the view of the local-unsafe
// it should be limited to the L1 view at the time of the inclusion of execution of the message.
err := vi.validWithinView(vi.localDerivedFrom.Number, execMsg)
if err != nil {
return err
}
}
return nil
})
if err == nil {
panic("expected reader to complete with an exit-error")
}
if errors.Is(err, logs.ErrFuture) {
// register the new cross-safe block as cross-safe up to the current L1 view
return nil
}
return err
}
package source
import (
"context"
"fmt"
"time"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-service/client"
"github.com/ethereum-optimism/optimism/op-service/sources"
"github.com/ethereum-optimism/optimism/op-service/sources/caching"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
)
// TODO(optimism#11032) Make these configurable and a sensible default
const epochPollInterval = 3 * time.Second
const pollInterval = 2 * time.Second
const trustRpc = false
const rpcKind = sources.RPCKindStandard
type Metrics interface {
caching.Metrics
}
type Storage interface {
ChainsDBClientForLogProcessor
DatabaseRewinder
LatestBlockNum(chainID types.ChainID) (num uint64, ok bool)
}
// ChainMonitor monitors a source L2 chain, retrieving the data required to populate the database and perform
// interop consolidation. It detects and notifies when reorgs occur.
type ChainMonitor struct {
log log.Logger
headMonitor *HeadMonitor
chainProcessor *ChainProcessor
}
func NewChainMonitor(ctx context.Context, logger log.Logger, m Metrics, chainID types.ChainID, rpc string, client client.RPC, store Storage) (*ChainMonitor, error) {
logger = logger.New("chainID", chainID)
cl, err := newClient(ctx, logger, m, rpc, client, pollInterval, trustRpc, rpcKind)
if err != nil {
return nil, err
}
// Create the log processor and fetcher
processLogs := newLogProcessor(chainID, store)
unsafeBlockProcessor := NewChainProcessor(logger, cl, chainID, processLogs, store)
unsafeProcessors := []HeadProcessor{unsafeBlockProcessor}
callback := newHeadUpdateProcessor(logger, unsafeProcessors, nil, nil)
headMonitor := NewHeadMonitor(logger, epochPollInterval, cl, callback)
return &ChainMonitor{
log: logger,
headMonitor: headMonitor,
chainProcessor: unsafeBlockProcessor,
}, nil
}
func (c *ChainMonitor) Start() error {
c.log.Info("Started monitoring chain")
return c.headMonitor.Start()
}
func (c *ChainMonitor) Stop() error {
c.chainProcessor.Close()
return c.headMonitor.Stop()
}
func newClient(ctx context.Context, logger log.Logger, m caching.Metrics, rpc string, rpcClient client.RPC, pollRate time.Duration, trustRPC bool, kind sources.RPCProviderKind) (*sources.L1Client, error) {
c, err := client.NewRPCWithClient(ctx, logger, rpc, rpcClient, pollRate)
if err != nil {
return nil, fmt.Errorf("failed to create new RPC client: %w", err)
}
l1Client, err := sources.NewL1Client(c, logger, m, sources.L1ClientSimpleConfig(trustRPC, kind, 100))
if err != nil {
return nil, fmt.Errorf("failed to connect client: %w", err)
}
return l1Client, nil
}
package source
/* TODO
import (
"context"
"errors"
"fmt"
"testing"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/testlog"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require"
)
var processorChainID = types.ChainIDFromUInt64(4)
func TestUnsafeBlocksStage(t *testing.T) {
t.Run("IgnoreEventsAtOrPriorToStartingHead", func(t *testing.T) {
ctx := context.Background()
logger := testlog.Logger(t, log.LvlInfo)
client := &stubBlockByNumberSource{}
processor := &stubBlockProcessor{}
stage := NewChainProcessor(logger, client, processorChainID, processor, &stubRewinder{})
stage.OnNewHead(ctx, eth.L1BlockRef{Number: 100})
stage.OnNewHead(ctx, eth.L1BlockRef{Number: 99})
require.Empty(t, processor.processed)
require.Zero(t, client.calls)
})
t.Run("OutputNewHeadsWithNoMissedBlocks", func(t *testing.T) {
ctx := context.Background()
logger := testlog.Logger(t, log.LvlInfo)
client := &stubBlockByNumberSource{}
block0 := eth.L1BlockRef{Number: 100}
block1 := eth.L1BlockRef{Number: 101}
block2 := eth.L1BlockRef{Number: 102}
block3 := eth.L1BlockRef{Number: 103}
processor := &stubBlockProcessor{}
stage := NewChainProcessor(logger, client, processorChainID, block0, processor, &stubRewinder{})
stage.OnNewHead(ctx, block1)
require.Equal(t, []eth.L1BlockRef{block1}, processor.processed)
stage.OnNewHead(ctx, block2)
require.Equal(t, []eth.L1BlockRef{block1, block2}, processor.processed)
stage.OnNewHead(ctx, block3)
require.Equal(t, []eth.L1BlockRef{block1, block2, block3}, processor.processed)
require.Zero(t, client.calls, "should not need to request block info")
})
t.Run("IgnoreEventsAtOrPriorToPreviousHead", func(t *testing.T) {
ctx := context.Background()
logger := testlog.Logger(t, log.LvlInfo)
client := &stubBlockByNumberSource{}
block0 := eth.L1BlockRef{Number: 100}
block1 := eth.L1BlockRef{Number: 101}
processor := &stubBlockProcessor{}
stage := NewChainProcessor(logger, client, processorChainID, block0, processor, &stubRewinder{})
stage.OnNewHead(ctx, block1)
require.NotEmpty(t, processor.processed)
require.Equal(t, []eth.L1BlockRef{block1}, processor.processed)
stage.OnNewHead(ctx, block0)
stage.OnNewHead(ctx, block1)
require.Equal(t, []eth.L1BlockRef{block1}, processor.processed)
require.Zero(t, client.calls, "should not need to request block info")
})
t.Run("OutputSkippedBlocks", func(t *testing.T) {
ctx := context.Background()
logger := testlog.Logger(t, log.LvlInfo)
client := &stubBlockByNumberSource{}
block0 := eth.L1BlockRef{Number: 100}
block3 := eth.L1BlockRef{Number: 103}
processor := &stubBlockProcessor{}
stage := NewChainProcessor(logger, client, processorChainID, block0, processor, &stubRewinder{})
stage.OnNewHead(ctx, block3)
require.Equal(t, []eth.L1BlockRef{makeBlockRef(101), makeBlockRef(102), block3}, processor.processed)
require.Equal(t, 2, client.calls, "should only request the two missing blocks")
})
t.Run("DoNotUpdateLastBlockOnFetchError", func(t *testing.T) {
ctx := context.Background()
logger := testlog.Logger(t, log.LvlInfo)
client := &stubBlockByNumberSource{err: errors.New("boom")}
block0 := eth.L1BlockRef{Number: 100}
block3 := eth.L1BlockRef{Number: 103}
processor := &stubBlockProcessor{}
rewinder := &stubRewinder{}
stage := NewChainProcessor(logger, client, processorChainID, block0, processor, rewinder)
stage.OnNewHead(ctx, block3)
require.Empty(t, processor.processed, "should not update any blocks because backfill failed")
client.err = nil
stage.OnNewHead(ctx, block3)
require.Equal(t, []eth.L1BlockRef{makeBlockRef(101), makeBlockRef(102), block3}, processor.processed)
require.False(t, rewinder.rewindCalled, "should not rewind because no logs could have been written")
})
t.Run("DoNotUpdateLastBlockOnProcessorError", func(t *testing.T) {
ctx := context.Background()
logger := testlog.Logger(t, log.LvlInfo)
client := &stubBlockByNumberSource{}
block0 := eth.L1BlockRef{Number: 100}
block3 := eth.L1BlockRef{Number: 103}
processor := &stubBlockProcessor{err: errors.New("boom")}
rewinder := &stubRewinder{}
stage := NewChainProcessor(logger, client, processorChainID, block0, processor, rewinder)
stage.OnNewHead(ctx, block3)
require.Equal(t, []eth.L1BlockRef{makeBlockRef(101)}, processor.processed, "Attempted to process block 101")
require.Equal(t, block0.Number, rewinder.rewoundTo, "should rewind to block before error")
processor.err = nil
stage.OnNewHead(ctx, block3)
// Attempts to process block 101 again, then carries on
require.Equal(t, []eth.L1BlockRef{makeBlockRef(101), makeBlockRef(101), makeBlockRef(102), block3}, processor.processed)
})
t.Run("RewindWhenNewHeadProcessingFails", func(t *testing.T) {
ctx := context.Background()
logger := testlog.Logger(t, log.LvlInfo)
client := &stubBlockByNumberSource{}
block0 := eth.L1BlockRef{Number: 100}
block1 := eth.L1BlockRef{Number: 101}
processor := &stubBlockProcessor{err: errors.New("boom")}
rewinder := &stubRewinder{}
stage := NewChainProcessor(logger, client, processorChainID, block0, processor, rewinder)
// No skipped blocks
stage.OnNewHead(ctx, block1)
require.Equal(t, []eth.L1BlockRef{block1}, processor.processed, "Attempted to process block 101")
require.Equal(t, block0.Number, rewinder.rewoundTo, "should rewind to block before error")
})
}
type stubBlockByNumberSource struct {
calls int
err error
}
func (s *stubBlockByNumberSource) L1BlockRefByNumber(_ context.Context, number uint64) (eth.L1BlockRef, error) {
s.calls++
if s.err != nil {
return eth.L1BlockRef{}, s.err
}
return makeBlockRef(number), nil
}
type stubBlockProcessor struct {
processed []eth.L1BlockRef
err error
}
func (s *stubBlockProcessor) ProcessBlock(_ context.Context, block eth.L1BlockRef) error {
s.processed = append(s.processed, block)
return s.err
}
func makeBlockRef(number uint64) eth.L1BlockRef {
return eth.L1BlockRef{
Number: number,
Hash: common.Hash{byte(number)},
ParentHash: common.Hash{byte(number - 1)},
Time: number * 1000,
}
}
type stubRewinder struct {
rewoundTo uint64
rewindCalled bool
}
func (s *stubRewinder) Rewind(chainID types.ChainID, headBlockNum uint64) error {
if chainID != processorChainID {
return fmt.Errorf("chainID mismatch, expected %v but was %v", processorChainID, chainID)
}
s.rewoundTo = headBlockNum
s.rewindCalled = true
return nil
}
*/
package source
import (
"context"
"errors"
"sync/atomic"
"time"
"github.com/ethereum-optimism/optimism/op-service/eth"
ethereum "github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
)
type HeadMonitorClient interface {
eth.NewHeadSource
eth.L1BlockRefsSource
}
type HeadChangeCallback interface {
OnNewUnsafeHead(ctx context.Context, block eth.L1BlockRef)
OnNewSafeHead(ctx context.Context, block eth.L1BlockRef)
OnNewFinalizedHead(ctx context.Context, block eth.L1BlockRef)
}
// HeadMonitor monitors an L2 chain and sends notifications when the unsafe, safe or finalized head changes.
// Head updates may be coalesced, allowing the head block to skip forward multiple blocks.
// Reorgs are not identified.
type HeadMonitor struct {
log log.Logger
epochPollInterval time.Duration
rpc HeadMonitorClient
callback HeadChangeCallback
started atomic.Bool
headsSub event.Subscription
safeSub ethereum.Subscription
finalizedSub ethereum.Subscription
}
func NewHeadMonitor(logger log.Logger, epochPollInterval time.Duration, rpc HeadMonitorClient, callback HeadChangeCallback) *HeadMonitor {
return &HeadMonitor{
log: logger,
epochPollInterval: epochPollInterval,
rpc: rpc,
callback: callback,
}
}
func (h *HeadMonitor) Start() error {
if !h.started.CompareAndSwap(false, true) {
return errors.New("already started")
}
// Keep subscribed to the unsafe head, which changes frequently.
h.headsSub = event.ResubscribeErr(time.Second*10, func(ctx context.Context, err error) (event.Subscription, error) {
if err != nil {
h.log.Warn("Resubscribing after failed heads subscription", "err", err)
}
return eth.WatchHeadChanges(ctx, h.rpc, h.callback.OnNewUnsafeHead)
})
go func() {
err, ok := <-h.headsSub.Err()
if !ok {
return
}
h.log.Error("Heads subscription error", "err", err)
}()
// Poll for the safe block and finalized block, which only change once per epoch at most and may be delayed.
h.safeSub = eth.PollBlockChanges(h.log, h.rpc, h.callback.OnNewSafeHead, eth.Safe,
h.epochPollInterval, time.Second*10)
h.finalizedSub = eth.PollBlockChanges(h.log, h.rpc, h.callback.OnNewFinalizedHead, eth.Finalized,
h.epochPollInterval, time.Second*10)
h.log.Info("Chain head monitoring started")
return nil
}
func (h *HeadMonitor) Stop() error {
if !h.started.CompareAndSwap(true, false) {
return errors.New("already stopped")
}
// stop heads feed
if h.headsSub != nil {
h.headsSub.Unsubscribe()
}
// stop polling for safe-head changes
if h.safeSub != nil {
h.safeSub.Unsubscribe()
}
// stop polling for finalized-head changes
if h.finalizedSub != nil {
h.finalizedSub.Unsubscribe()
}
return nil
}
package source
import (
"context"
"errors"
"fmt"
"math/rand"
"sync"
"testing"
"time"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/testlog"
"github.com/ethereum-optimism/optimism/op-service/testutils"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require"
)
const waitDuration = 10 * time.Second
const checkInterval = 10 * time.Millisecond
func TestUnsafeHeadUpdates(t *testing.T) {
rng := rand.New(rand.NewSource(0x1337))
header1 := testutils.RandomHeader(rng)
header2 := testutils.RandomHeader(rng)
t.Run("NotifyOfNewHeads", func(t *testing.T) {
rpc, callback := startHeadMonitor(t)
rpc.NewUnsafeHead(t, header1)
callback.RequireUnsafeHeaders(t, header1)
rpc.NewUnsafeHead(t, header2)
callback.RequireUnsafeHeaders(t, header1, header2)
})
t.Run("ResubscribeOnError", func(t *testing.T) {
rpc, callback := startHeadMonitor(t)
rpc.SubscriptionError(t)
rpc.NewUnsafeHead(t, header1)
callback.RequireUnsafeHeaders(t, header1)
})
}
func TestSafeHeadUpdates(t *testing.T) {
rpc, callback := startHeadMonitor(t)
head1 := eth.L1BlockRef{
Hash: common.Hash{0xaa},
Number: 1,
}
head2 := eth.L1BlockRef{
Hash: common.Hash{0xbb},
Number: 2,
}
rpc.SetSafeHead(head1)
callback.RequireSafeHeaders(t, head1)
rpc.SetSafeHead(head2)
callback.RequireSafeHeaders(t, head1, head2)
}
func TestFinalizedHeadUpdates(t *testing.T) {
rpc, callback := startHeadMonitor(t)
head1 := eth.L1BlockRef{
Hash: common.Hash{0xaa},
Number: 1,
}
head2 := eth.L1BlockRef{
Hash: common.Hash{0xbb},
Number: 2,
}
rpc.SetFinalizedHead(head1)
callback.RequireFinalizedHeaders(t, head1)
rpc.SetFinalizedHead(head2)
callback.RequireFinalizedHeaders(t, head1, head2)
}
func startHeadMonitor(t *testing.T) (*stubRPC, *stubCallback) {
logger := testlog.Logger(t, log.LvlInfo)
rpc := &stubRPC{}
callback := &stubCallback{}
monitor := NewHeadMonitor(logger, 50*time.Millisecond, rpc, callback)
require.NoError(t, monitor.Start())
t.Cleanup(func() {
require.NoError(t, monitor.Stop())
})
return rpc, callback
}
type stubCallback struct {
sync.Mutex
unsafe []eth.L1BlockRef
safe []eth.L1BlockRef
finalized []eth.L1BlockRef
}
func (s *stubCallback) RequireUnsafeHeaders(t *testing.T, heads ...*types.Header) {
expected := make([]eth.L1BlockRef, len(heads))
for i, head := range heads {
expected[i] = eth.InfoToL1BlockRef(eth.HeaderBlockInfo(head))
}
s.requireHeaders(t, func(s *stubCallback) []eth.L1BlockRef { return s.unsafe }, expected)
}
func (s *stubCallback) RequireSafeHeaders(t *testing.T, expected ...eth.L1BlockRef) {
s.requireHeaders(t, func(s *stubCallback) []eth.L1BlockRef { return s.safe }, expected)
}
func (s *stubCallback) RequireFinalizedHeaders(t *testing.T, expected ...eth.L1BlockRef) {
s.requireHeaders(t, func(s *stubCallback) []eth.L1BlockRef { return s.finalized }, expected)
}
func (s *stubCallback) requireHeaders(t *testing.T, getter func(*stubCallback) []eth.L1BlockRef, expected []eth.L1BlockRef) {
require.Eventually(t, func() bool {
s.Lock()
defer s.Unlock()
return len(getter(s)) >= len(expected)
}, waitDuration, checkInterval)
s.Lock()
defer s.Unlock()
require.Equal(t, expected, getter(s))
}
func (s *stubCallback) OnNewUnsafeHead(ctx context.Context, block eth.L1BlockRef) {
s.Lock()
defer s.Unlock()
s.unsafe = append(s.unsafe, block)
}
func (s *stubCallback) OnNewSafeHead(ctx context.Context, block eth.L1BlockRef) {
s.Lock()
defer s.Unlock()
s.safe = append(s.safe, block)
}
func (s *stubCallback) OnNewFinalizedHead(ctx context.Context, block eth.L1BlockRef) {
s.Lock()
defer s.Unlock()
s.finalized = append(s.finalized, block)
}
var _ HeadChangeCallback = (*stubCallback)(nil)
type stubRPC struct {
sync.Mutex
sub *mockSubscription
safeHead eth.L1BlockRef
finalizedHead eth.L1BlockRef
}
func (s *stubRPC) SubscribeNewHead(_ context.Context, unsafeCh chan<- *types.Header) (ethereum.Subscription, error) {
s.Lock()
defer s.Unlock()
if s.sub != nil {
return nil, errors.New("already subscribed to unsafe heads")
}
errChan := make(chan error)
s.sub = &mockSubscription{errChan, unsafeCh, s}
return s.sub, nil
}
func (s *stubRPC) SetSafeHead(head eth.L1BlockRef) {
s.Lock()
defer s.Unlock()
s.safeHead = head
}
func (s *stubRPC) SetFinalizedHead(head eth.L1BlockRef) {
s.Lock()
defer s.Unlock()
s.finalizedHead = head
}
func (s *stubRPC) L1BlockRefByLabel(ctx context.Context, label eth.BlockLabel) (eth.L1BlockRef, error) {
s.Lock()
defer s.Unlock()
switch label {
case eth.Safe:
if s.safeHead == (eth.L1BlockRef{}) {
return eth.L1BlockRef{}, errors.New("no unsafe head")
}
return s.safeHead, nil
case eth.Finalized:
if s.finalizedHead == (eth.L1BlockRef{}) {
return eth.L1BlockRef{}, errors.New("no finalized head")
}
return s.finalizedHead, nil
default:
return eth.L1BlockRef{}, fmt.Errorf("unknown label: %v", label)
}
}
func (s *stubRPC) NewUnsafeHead(t *testing.T, header *types.Header) {
s.WaitForSub(t)
s.Lock()
defer s.Unlock()
require.NotNil(t, s.sub, "Attempting to publish a header with no subscription")
s.sub.headers <- header
}
func (s *stubRPC) SubscriptionError(t *testing.T) {
s.WaitForSub(t)
s.Lock()
defer s.Unlock()
s.sub.errChan <- errors.New("subscription error")
s.sub = nil
}
func (s *stubRPC) WaitForSub(t *testing.T) {
require.Eventually(t, func() bool {
s.Lock()
defer s.Unlock()
return s.sub != nil
}, waitDuration, checkInterval, "Head monitor did not subscribe to unsafe head")
}
var _ HeadMonitorClient = (*stubRPC)(nil)
type mockSubscription struct {
errChan chan error
headers chan<- *types.Header
rpc *stubRPC
}
func (m *mockSubscription) Unsubscribe() {
fmt.Println("Unsubscribed")
m.rpc.Lock()
defer m.rpc.Unlock()
m.rpc.sub = nil
}
func (m *mockSubscription) Err() <-chan error {
return m.errChan
}
package source
import (
"context"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/heads"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
)
type HeadProcessor interface {
OnNewHead(ctx context.Context, head eth.L1BlockRef) error
}
type HeadProcessorFn func(ctx context.Context, head eth.L1BlockRef) error
func (f HeadProcessorFn) OnNewHead(ctx context.Context, head eth.L1BlockRef) error {
return f(ctx, head)
}
// headUpdateProcessor handles head update events and routes them to the appropriate handlers
type headUpdateProcessor struct {
log log.Logger
unsafeProcessors []HeadProcessor
safeProcessors []HeadProcessor
finalizedProcessors []HeadProcessor
}
func newHeadUpdateProcessor(log log.Logger, unsafeProcessors []HeadProcessor, safeProcessors []HeadProcessor, finalizedProcessors []HeadProcessor) *headUpdateProcessor {
return &headUpdateProcessor{
log: log,
unsafeProcessors: unsafeProcessors,
safeProcessors: safeProcessors,
finalizedProcessors: finalizedProcessors,
}
}
func (n *headUpdateProcessor) OnNewUnsafeHead(ctx context.Context, block eth.L1BlockRef) {
n.log.Debug("New unsafe head", "block", block)
for _, processor := range n.unsafeProcessors {
if err := processor.OnNewHead(ctx, block); err != nil {
n.log.Error("unsafe-head processing failed", "err", err)
}
}
}
func (n *headUpdateProcessor) OnNewSafeHead(ctx context.Context, block eth.L1BlockRef) {
n.log.Debug("New safe head", "block", block)
for _, processor := range n.safeProcessors {
if err := processor.OnNewHead(ctx, block); err != nil {
n.log.Error("safe-head processing failed", "err", err)
}
}
}
func (n *headUpdateProcessor) OnNewFinalizedHead(ctx context.Context, block eth.L1BlockRef) {
n.log.Debug("New finalized head", "block", block)
for _, processor := range n.finalizedProcessors {
if err := processor.OnNewHead(ctx, block); err != nil {
n.log.Error("finalized-head processing failed", "err", err)
}
}
}
// OnNewHead is a util function to turn a head-signal processor into head-pointer updater
func OnNewHead(id types.ChainID, apply func(id types.ChainID, v heads.HeadPointer) error) HeadProcessorFn {
return func(ctx context.Context, head eth.L1BlockRef) error {
return apply(id, heads.HeadPointer{
LastSealedBlockHash: head.Hash,
LastSealedBlockNum: head.Number,
LogsSince: 0,
})
}
}
package source
import (
"context"
"testing"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/testlog"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require"
)
func TestHeadUpdateProcessor(t *testing.T) {
t.Run("NotifyUnsafeHeadProcessors", func(t *testing.T) {
logger := testlog.Logger(t, log.LvlInfo)
processed := make([]eth.L1BlockRef, 3)
makeProcessor := func(idx int) HeadProcessor {
return HeadProcessorFn(func(_ context.Context, head eth.L1BlockRef) error {
processed[idx] = head
return nil
})
}
headUpdates := newHeadUpdateProcessor(logger, []HeadProcessor{makeProcessor(0), makeProcessor(1), makeProcessor(2)}, nil, nil)
block := eth.L1BlockRef{Number: 110, Hash: common.Hash{0xaa}}
headUpdates.OnNewUnsafeHead(context.Background(), block)
require.Equal(t, []eth.L1BlockRef{block, block, block}, processed)
})
t.Run("NotifySafeHeadProcessors", func(t *testing.T) {
logger := testlog.Logger(t, log.LvlInfo)
processed := make([]eth.L1BlockRef, 3)
makeProcessor := func(idx int) HeadProcessor {
return HeadProcessorFn(func(_ context.Context, head eth.L1BlockRef) error {
processed[idx] = head
return nil
})
}
headUpdates := newHeadUpdateProcessor(logger, nil, []HeadProcessor{makeProcessor(0), makeProcessor(1), makeProcessor(2)}, nil)
block := eth.L1BlockRef{Number: 110, Hash: common.Hash{0xaa}}
headUpdates.OnNewSafeHead(context.Background(), block)
require.Equal(t, []eth.L1BlockRef{block, block, block}, processed)
})
t.Run("NotifyFinalizedHeadProcessors", func(t *testing.T) {
logger := testlog.Logger(t, log.LvlInfo)
processed := make([]eth.L1BlockRef, 3)
makeProcessor := func(idx int) HeadProcessor {
return HeadProcessorFn(func(_ context.Context, head eth.L1BlockRef) error {
processed[idx] = head
return nil
})
}
headUpdates := newHeadUpdateProcessor(logger, nil, nil, []HeadProcessor{makeProcessor(0), makeProcessor(1), makeProcessor(2)})
block := eth.L1BlockRef{Number: 110, Hash: common.Hash{0xaa}}
headUpdates.OnNewFinalizedHead(context.Background(), block)
require.Equal(t, []eth.L1BlockRef{block, block, block}, processed)
})
}
......@@ -3,11 +3,9 @@ package frontend
import (
"context"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
"github.com/ethereum/go-ethereum/common"
)
type AdminBackend interface {
......@@ -19,19 +17,22 @@ type AdminBackend interface {
type QueryBackend interface {
CheckMessage(identifier types.Identifier, payloadHash common.Hash) (types.SafetyLevel, error)
CheckMessages(messages []types.Message, minSafety types.SafetyLevel) error
CheckBlock(chainID *hexutil.U256, blockHash common.Hash, blockNumber hexutil.Uint64) (types.SafetyLevel, error)
DerivedFrom(ctx context.Context, chainID types.ChainID, blockHash common.Hash, blockNumber uint64) (eth.BlockRef, error)
DerivedFrom(ctx context.Context, chainID types.ChainID, derived eth.BlockID) (derivedFrom eth.BlockID, err error)
UnsafeView(ctx context.Context, chainID types.ChainID, unsafe types.ReferenceView) (types.ReferenceView, error)
SafeView(ctx context.Context, chainID types.ChainID, safe types.ReferenceView) (types.ReferenceView, error)
Finalized(ctx context.Context, chainID types.ChainID) (eth.BlockID, error)
}
type UpdatesBackend interface {
UpdateLocalUnsafe(chainID types.ChainID, head eth.BlockRef)
UpdateLocalSafe(chainID types.ChainID, derivedFrom eth.BlockRef, lastDerived eth.BlockRef)
UpdateFinalizedL1(chainID types.ChainID, finalized eth.BlockRef)
UpdateLocalUnsafe(chainID types.ChainID, head eth.BlockRef) error
UpdateLocalSafe(chainID types.ChainID, derivedFrom eth.BlockRef, lastDerived eth.BlockRef) error
UpdateFinalizedL1(chainID types.ChainID, finalized eth.BlockRef) error
}
type Backend interface {
AdminBackend
QueryBackend
UpdatesBackend
}
type QueryFrontend struct {
......@@ -53,23 +54,19 @@ func (q *QueryFrontend) CheckMessages(
}
func (q *QueryFrontend) UnsafeView(ctx context.Context, chainID types.ChainID, unsafe types.ReferenceView) (types.ReferenceView, error) {
// TODO(#12358): attach to backend
return types.ReferenceView{}, nil
return q.Supervisor.UnsafeView(ctx, chainID, unsafe)
}
func (q *QueryFrontend) SafeView(ctx context.Context, chainID types.ChainID, safe types.ReferenceView) (types.ReferenceView, error) {
// TODO(#12358): attach to backend
return types.ReferenceView{}, nil
return q.Supervisor.SafeView(ctx, chainID, safe)
}
func (q *QueryFrontend) Finalized(ctx context.Context, chainID types.ChainID) (eth.BlockID, error) {
// TODO(#12358): attach to backend
return eth.BlockID{}, nil
return q.Supervisor.Finalized(ctx, chainID)
}
func (q *QueryFrontend) DerivedFrom(ctx context.Context, chainID types.ChainID, blockHash common.Hash, blockNumber uint64) (eth.BlockRef, error) {
// TODO(#12358): attach to backend
return eth.BlockRef{}, nil
func (q *QueryFrontend) DerivedFrom(ctx context.Context, chainID types.ChainID, derived eth.BlockID) (derivedFrom eth.BlockID, err error) {
return q.Supervisor.DerivedFrom(ctx, chainID, derived)
}
type AdminFrontend struct {
......@@ -95,14 +92,14 @@ type UpdatesFrontend struct {
Supervisor UpdatesBackend
}
func (u *UpdatesFrontend) UpdateLocalUnsafe(chainID types.ChainID, head eth.BlockRef) {
u.Supervisor.UpdateLocalUnsafe(chainID, head)
func (u *UpdatesFrontend) UpdateLocalUnsafe(chainID types.ChainID, head eth.BlockRef) error {
return u.Supervisor.UpdateLocalUnsafe(chainID, head)
}
func (u *UpdatesFrontend) UpdateLocalSafe(chainID types.ChainID, derivedFrom eth.BlockRef, lastDerived eth.BlockRef) {
u.Supervisor.UpdateLocalSafe(chainID, derivedFrom, lastDerived)
func (u *UpdatesFrontend) UpdateLocalSafe(chainID types.ChainID, derivedFrom eth.BlockRef, lastDerived eth.BlockRef) error {
return u.Supervisor.UpdateLocalSafe(chainID, derivedFrom, lastDerived)
}
func (u *UpdatesFrontend) UpdateFinalizedL1(chainID types.ChainID, finalized eth.BlockRef) {
u.Supervisor.UpdateFinalizedL1(chainID, finalized)
func (u *UpdatesFrontend) UpdateFinalizedL1(chainID types.ChainID, finalized eth.BlockRef) error {
return u.Supervisor.UpdateFinalizedL1(chainID, finalized)
}
......@@ -4,10 +4,8 @@ import (
"context"
"errors"
"fmt"
"io"
"sync/atomic"
"github.com/ethereum-optimism/optimism/op-supervisor/config"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rpc"
......@@ -16,6 +14,7 @@ import (
opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics"
"github.com/ethereum-optimism/optimism/op-service/oppprof"
oprpc "github.com/ethereum-optimism/optimism/op-service/rpc"
"github.com/ethereum-optimism/optimism/op-supervisor/config"
"github.com/ethereum-optimism/optimism/op-supervisor/metrics"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/frontend"
......@@ -23,7 +22,6 @@ import (
type Backend interface {
frontend.Backend
io.Closer
}
// SupervisorService implements the full-environment bells and whistles around the Supervisor.
......@@ -149,6 +147,11 @@ func (su *SupervisorService) initRPCServer(cfg *config.Config) error {
Service: &frontend.QueryFrontend{Supervisor: su.backend},
Authenticated: false,
})
server.AddAPI(rpc.API{
Namespace: "supervisor",
Service: &frontend.UpdatesFrontend{Supervisor: su.backend},
Authenticated: false,
})
su.rpcServer = server
return nil
}
......@@ -179,16 +182,19 @@ func (su *SupervisorService) Stop(ctx context.Context) error {
result = errors.Join(result, fmt.Errorf("failed to stop RPC server: %w", err))
}
}
su.log.Info("Stopped RPC Server")
if su.backend != nil {
if err := su.backend.Close(); err != nil {
if err := su.backend.Stop(ctx); err != nil {
result = errors.Join(result, fmt.Errorf("failed to close supervisor backend: %w", err))
}
}
su.log.Info("Stopped Backend")
if su.pprofService != nil {
if err := su.pprofService.Stop(ctx); err != nil {
result = errors.Join(result, fmt.Errorf("failed to stop PProf server: %w", err))
}
}
su.log.Info("Stopped PProf")
if su.metricsSrv != nil {
if err := su.metricsSrv.Stop(ctx); err != nil {
result = errors.Join(result, fmt.Errorf("failed to stop metrics server: %w", err))
......
......@@ -23,6 +23,11 @@ type ExecutingMessage struct {
Hash common.Hash
}
func (s *ExecutingMessage) String() string {
return fmt.Sprintf("ExecMsg(chain: %d, block: %d, log: %d, time: %d, logHash: %s)",
s.Chain, s.BlockNum, s.LogIdx, s.Timestamp, s.Hash)
}
type Message struct {
Identifier Identifier `json:"identifier"`
PayloadHash common.Hash `json:"payloadHash"`
......@@ -171,3 +176,17 @@ type ReferenceView struct {
func (v ReferenceView) String() string {
return fmt.Sprintf("View(local: %s, cross: %s)", v.Local, v.Cross)
}
type BlockSeal struct {
Hash common.Hash
Number uint64
Timestamp uint64
}
func (s BlockSeal) String() string {
return fmt.Sprintf("BlockSeal(hash:%s, number:%d, time:%d)", s.Hash, s.Number, s.Timestamp)
}
func (s BlockSeal) ID() eth.BlockID {
return eth.BlockID{Hash: s.Hash, Number: s.Number}
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment