Commit 98e1cf26 authored by Adrian Sutton's avatar Adrian Sutton Committed by GitHub

op-supervisor: Wire in head tracking (#11139)

* op-supervisor: Introduce head tracker

* op-supervisor: Move log db to subpackage of db.

* op-supervisor: Route all updates through a common db that can track heads

* op-supervisor: Remove unused error.

* op-supervisor: Remove operations - it fits into a later PR.

* op-supervisor: Fix semgrep

* op-supervisor: Move resuming databases into ChainsDB so it can later update the chain heads too.
parent 89f75545
......@@ -43,8 +43,10 @@ func WriteJSON[X any](outputPath string, value X, perm os.FileMode) error {
if err != nil {
return fmt.Errorf("failed to open output file: %w", err)
}
// Ensure we close the stream even if failures occur.
defer f.Close()
// Ensure we close the stream without renaming even if failures occur.
defer func() {
_ = f.Abort()
}()
out = f
// Closing the file causes it to be renamed to the final destination
// so make sure we handle any errors it returns
......
......@@ -5,6 +5,7 @@ import (
"errors"
"fmt"
"io"
"path/filepath"
"sync/atomic"
"time"
......@@ -12,8 +13,9 @@ import (
"github.com/ethereum-optimism/optimism/op-service/dial"
"github.com/ethereum-optimism/optimism/op-supervisor/config"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/heads"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/logs"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/source"
backendTypes "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/types"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/frontend"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
"github.com/ethereum/go-ethereum/common"
......@@ -21,18 +23,12 @@ import (
"github.com/ethereum/go-ethereum/log"
)
type LogStore interface {
io.Closer
ClosestBlockInfo(blockNum uint64) (uint64, backendTypes.TruncatedHash, error)
Rewind(headBlockNum uint64) error
}
type SupervisorBackend struct {
started atomic.Bool
logger log.Logger
chainMonitors []*source.ChainMonitor
logDBs []LogStore
db *db.ChainsDB
}
var _ frontend.Backend = (*SupervisorBackend)(nil)
......@@ -40,9 +36,17 @@ var _ frontend.Backend = (*SupervisorBackend)(nil)
var _ io.Closer = (*SupervisorBackend)(nil)
func NewSupervisorBackend(ctx context.Context, logger log.Logger, m Metrics, cfg *config.Config) (*SupervisorBackend, error) {
chainMonitors := make([]*source.ChainMonitor, len(cfg.L2RPCs))
logDBs := make([]LogStore, len(cfg.L2RPCs))
for i, rpc := range cfg.L2RPCs {
if err := prepDataDir(cfg.Datadir); err != nil {
return nil, err
}
headTracker, err := heads.NewHeadTracker(filepath.Join(cfg.Datadir, "heads.json"))
if err != nil {
return nil, fmt.Errorf("failed to load existing heads: %w", err)
}
logDBs := make(map[types.ChainID]db.LogStorage)
chainRPCs := make(map[types.ChainID]string)
chainClients := make(map[types.ChainID]client.RPC)
for _, rpc := range cfg.L2RPCs {
rpcClient, chainID, err := createRpcClient(ctx, logger, rpc)
if err != nil {
return nil, err
......@@ -52,26 +56,32 @@ func NewSupervisorBackend(ctx context.Context, logger log.Logger, m Metrics, cfg
if err != nil {
return nil, fmt.Errorf("failed to create datadir for chain %v: %w", chainID, err)
}
logDB, err := db.NewFromFile(logger, cm, path)
logDB, err := logs.NewFromFile(logger, cm, path)
if err != nil {
return nil, fmt.Errorf("failed to create logdb for chain %v at %v: %w", chainID, path, err)
}
logDBs[i] = logDB
block, err := Resume(logDB)
if err != nil {
return nil, err
logDBs[chainID] = logDB
chainRPCs[chainID] = rpc
chainClients[chainID] = rpcClient
}
chainsDB := db.NewChainsDB(logDBs, headTracker)
if err := chainsDB.Resume(); err != nil {
return nil, fmt.Errorf("failed to resume chains db: %w", err)
}
monitor, err := source.NewChainMonitor(ctx, logger, cm, chainID, rpc, rpcClient, logDB, block)
chainMonitors := make([]*source.ChainMonitor, 0, len(cfg.L2RPCs))
for chainID, rpc := range chainRPCs {
cm := newChainMetrics(chainID, m)
monitor, err := source.NewChainMonitor(ctx, logger, cm, chainID, rpc, chainClients[chainID], chainsDB)
if err != nil {
return nil, fmt.Errorf("failed to create monitor for rpc %v: %w", rpc, err)
}
chainMonitors[i] = monitor
chainMonitors = append(chainMonitors, monitor)
}
return &SupervisorBackend{
logger: logger,
chainMonitors: chainMonitors,
logDBs: logDBs,
db: chainsDB,
}, nil
}
......@@ -109,10 +119,8 @@ func (su *SupervisorBackend) Stop(ctx context.Context) error {
errs = errors.Join(errs, fmt.Errorf("failed to stop chain monitor: %w", err))
}
}
for _, logDB := range su.logDBs {
if err := logDB.Close(); err != nil {
errs = errors.Join(errs, fmt.Errorf("failed to close logdb: %w", err))
}
if err := su.db.Close(); err != nil {
errs = errors.Join(errs, fmt.Errorf("failed to close database: %w", err))
}
return errs
}
......
......@@ -2,7 +2,7 @@ package backend
import (
"github.com/ethereum-optimism/optimism/op-service/sources/caching"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/logs"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
)
......@@ -45,4 +45,4 @@ func (c *chainMetrics) RecordDBSearchEntriesRead(count int64) {
}
var _ caching.Metrics = (*chainMetrics)(nil)
var _ db.Metrics = (*chainMetrics)(nil)
var _ logs.Metrics = (*chainMetrics)(nil)
......@@ -4,491 +4,81 @@ import (
"errors"
"fmt"
"io"
"math"
"sync"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/types"
"github.com/ethereum/go-ethereum/log"
)
const (
searchCheckpointFrequency = 256
eventFlagIncrementLogIdx = byte(1)
eventFlagHasExecutingMessage = byte(1) << 1
)
const (
typeSearchCheckpoint byte = iota
typeCanonicalHash
typeInitiatingEvent
typeExecutingLink
typeExecutingCheck
backendTypes "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/types"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
)
var (
ErrLogOutOfOrder = errors.New("log out of order")
ErrDataCorruption = errors.New("data corruption")
ErrNotFound = errors.New("not found")
ErrUnknownChain = errors.New("unknown chain")
)
type Metrics interface {
RecordDBEntryCount(count int64)
RecordDBSearchEntriesRead(count int64)
type LogStorage interface {
io.Closer
AddLog(logHash backendTypes.TruncatedHash, block eth.BlockID, timestamp uint64, logIdx uint32, execMsg *backendTypes.ExecutingMessage) error
Rewind(newHeadBlockNum uint64) error
LatestBlockNum() uint64
ClosestBlockInfo(blockNum uint64) (uint64, backendTypes.TruncatedHash, error)
}
type logContext struct {
blockNum uint64
logIdx uint32
type HeadsStorage interface {
}
type EntryStore interface {
Size() int64
LastEntryIdx() entrydb.EntryIdx
Read(idx entrydb.EntryIdx) (entrydb.Entry, error)
Append(entries ...entrydb.Entry) error
Truncate(idx entrydb.EntryIdx) error
Close() error
type ChainsDB struct {
logDBs map[types.ChainID]LogStorage
heads HeadsStorage
}
// DB implements an append only database for log data and cross-chain dependencies.
//
// To keep the append-only format, reduce data size, and support reorg detection and registering of executing-messages:
//
// Use a fixed 24 bytes per entry.
//
// Data is an append-only log, that can be binary searched for any necessary event data.
//
// Rules:
// if entry_index % 256 == 0: must be type 0. For easy binary search.
// type 1 always adjacent to type 0
// type 2 "diff" values are offsets from type 0 values (always within 256 entries range)
// type 3 always after type 2
// type 4 always after type 3
//
// Types (<type> = 1 byte):
// type 0: "search checkpoint" <type><uint64 block number: 8 bytes><uint32 event index offset: 4 bytes><uint64 timestamp: 8 bytes> = 20 bytes
// type 1: "canonical hash" <type><parent blockhash truncated: 20 bytes> = 21 bytes
// type 2: "initiating event" <type><blocknum diff: 1 byte><event flags: 1 byte><event-hash: 20 bytes> = 23 bytes
// type 3: "executing link" <type><chain: 4 bytes><blocknum: 8 bytes><event index: 3 bytes><uint64 timestamp: 8 bytes> = 24 bytes
// type 4: "executing check" <type><event-hash: 20 bytes> = 21 bytes
// other types: future compat. E.g. for linking to L1, registering block-headers as a kind of initiating-event, tracking safe-head progression, etc.
//
// Right-pad each entry that is not 24 bytes.
//
// event-flags: each bit represents a boolean value, currently only two are defined
// * event-flags & 0x01 - true if the log index should increment. Should only be false when the event is immediately after a search checkpoint and canonical hash
// * event-flags & 0x02 - true if the initiating event has an executing link that should follow. Allows detecting when the executing link failed to write.
// event-hash: H(origin, timestamp, payloadhash); enough to check identifier matches & payload matches.
type DB struct {
log log.Logger
m Metrics
store EntryStore
rwLock sync.RWMutex
lastEntryContext logContext
}
func NewFromFile(logger log.Logger, m Metrics, path string) (*DB, error) {
store, err := entrydb.NewEntryDB(logger, path)
if err != nil {
return nil, fmt.Errorf("failed to open DB: %w", err)
}
return NewFromEntryStore(logger, m, store)
}
func NewFromEntryStore(logger log.Logger, m Metrics, store EntryStore) (*DB, error) {
db := &DB{
log: logger,
m: m,
store: store,
}
if err := db.init(); err != nil {
return nil, fmt.Errorf("failed to init database: %w", err)
func NewChainsDB(logDBs map[types.ChainID]LogStorage, heads HeadsStorage) *ChainsDB {
return &ChainsDB{
logDBs: logDBs,
heads: heads,
}
return db, nil
}
func (db *DB) lastEntryIdx() entrydb.EntryIdx {
return db.store.LastEntryIdx()
}
func (db *DB) init() error {
defer db.updateEntryCountMetric() // Always update the entry count metric after init completes
if err := db.trimInvalidTrailingEntries(); err != nil {
return fmt.Errorf("failed to trim invalid trailing entries: %w", err)
}
if db.lastEntryIdx() < 0 {
// Database is empty so no context to load
return nil
}
lastCheckpoint := (db.lastEntryIdx() / searchCheckpointFrequency) * searchCheckpointFrequency
i, err := db.newIterator(lastCheckpoint)
if err != nil {
return fmt.Errorf("failed to create iterator at last search checkpoint: %w", err)
}
// Read all entries until the end of the file
for {
_, _, _, err := i.NextLog()
if errors.Is(err, io.EOF) {
break
} else if err != nil {
return fmt.Errorf("failed to init from existing entries: %w", err)
}
}
db.lastEntryContext = i.current
return nil
}
func (db *DB) trimInvalidTrailingEntries() error {
i := db.lastEntryIdx()
for ; i >= 0; i-- {
entry, err := db.store.Read(i)
if err != nil {
return fmt.Errorf("failed to read %v to check for trailing entries: %w", i, err)
}
if entry[0] == typeExecutingCheck {
// executing check is a valid final entry
break
// Resume prepares the chains db to resume recording events after a restart.
// It rewinds the database to the last block that is guaranteed to have been fully recorded to the database
// to ensure it can resume recording from the first log of the next block.
func (db *ChainsDB) Resume() error {
for chain, logStore := range db.logDBs {
if err := Resume(logStore); err != nil {
return fmt.Errorf("failed to resume chain %v: %w", chain, err)
}
if entry[0] == typeInitiatingEvent {
evt, err := newInitiatingEventFromEntry(entry)
if err != nil {
// Entry is invalid, keep walking backwards
continue
}
if !evt.hasExecMsg {
// init event with no exec msg is a valid final entry
break
}
}
}
if i < db.lastEntryIdx() {
db.log.Warn("Truncating unexpected trailing entries", "prev", db.lastEntryIdx(), "new", i)
return db.store.Truncate(i)
}
return nil
}
func (db *DB) updateEntryCountMetric() {
db.m.RecordDBEntryCount(db.store.Size())
}
// ClosestBlockInfo returns the block number and hash of the highest recorded block at or before blockNum.
// Since block data is only recorded in search checkpoints, this may return an earlier block even if log data is
// recorded for the requested block.
func (db *DB) ClosestBlockInfo(blockNum uint64) (uint64, types.TruncatedHash, error) {
db.rwLock.RLock()
defer db.rwLock.RUnlock()
checkpointIdx, err := db.searchCheckpoint(blockNum, math.MaxUint32)
if err != nil {
return 0, types.TruncatedHash{}, fmt.Errorf("no checkpoint at or before block %v found: %w", blockNum, err)
}
checkpoint, err := db.readSearchCheckpoint(checkpointIdx)
if err != nil {
return 0, types.TruncatedHash{}, fmt.Errorf("failed to reach checkpoint: %w", err)
func (db *ChainsDB) LatestBlockNum(chain types.ChainID) uint64 {
logDB, ok := db.logDBs[chain]
if !ok {
return 0
}
entry, err := db.readCanonicalHash(checkpointIdx + 1)
if err != nil {
return 0, types.TruncatedHash{}, fmt.Errorf("failed to read canonical hash: %w", err)
}
return checkpoint.blockNum, entry.hash, nil
return logDB.LatestBlockNum()
}
// Contains return true iff the specified logHash is recorded in the specified blockNum and logIdx.
// logIdx is the index of the log in the array of all logs the block.
// This can be used to check the validity of cross-chain interop events.
func (db *DB) Contains(blockNum uint64, logIdx uint32, logHash types.TruncatedHash) (bool, error) {
db.rwLock.RLock()
defer db.rwLock.RUnlock()
db.log.Trace("Checking for log", "blockNum", blockNum, "logIdx", logIdx, "hash", logHash)
evtHash, _, err := db.findLogInfo(blockNum, logIdx)
if errors.Is(err, ErrNotFound) {
// Did not find a log at blockNum and logIdx
return false, nil
} else if err != nil {
return false, err
func (db *ChainsDB) AddLog(chain types.ChainID, logHash backendTypes.TruncatedHash, block eth.BlockID, timestamp uint64, logIdx uint32, execMsg *backendTypes.ExecutingMessage) error {
logDB, ok := db.logDBs[chain]
if !ok {
return fmt.Errorf("%w: %v", ErrUnknownChain, chain)
}
db.log.Trace("Found initiatingEvent", "blockNum", blockNum, "logIdx", logIdx, "hash", evtHash)
// Found the requested block and log index, check if the hash matches
return evtHash == logHash, nil
return logDB.AddLog(logHash, block, timestamp, logIdx, execMsg)
}
// Executes checks if the log identified by the specific block number and log index, has an ExecutingMessage associated
// with it that needs to be checked as part of interop validation.
// logIdx is the index of the log in the array of all logs the block.
// Returns the ExecutingMessage if it exists, or ExecutingMessage{} if the log is found but has no ExecutingMessage.
// Returns ErrNotFound if the specified log does not exist in the database.
func (db *DB) Executes(blockNum uint64, logIdx uint32) (types.ExecutingMessage, error) {
db.rwLock.RLock()
defer db.rwLock.RUnlock()
_, iter, err := db.findLogInfo(blockNum, logIdx)
if err != nil {
return types.ExecutingMessage{}, err
}
execMsg, err := iter.ExecMessage()
if err != nil {
return types.ExecutingMessage{}, fmt.Errorf("failed to read executing message: %w", err)
func (db *ChainsDB) Rewind(chain types.ChainID, headBlockNum uint64) error {
logDB, ok := db.logDBs[chain]
if !ok {
return fmt.Errorf("%w: %v", ErrUnknownChain, chain)
}
return execMsg, nil
return logDB.Rewind(headBlockNum)
}
func (db *DB) findLogInfo(blockNum uint64, logIdx uint32) (types.TruncatedHash, *iterator, error) {
entryIdx, err := db.searchCheckpoint(blockNum, logIdx)
if errors.Is(err, io.EOF) {
// Did not find a checkpoint to start reading from so the log cannot be present.
return types.TruncatedHash{}, nil, ErrNotFound
} else if err != nil {
return types.TruncatedHash{}, nil, err
}
i, err := db.newIterator(entryIdx)
if err != nil {
return types.TruncatedHash{}, nil, fmt.Errorf("failed to create iterator: %w", err)
}
db.log.Trace("Starting search", "entry", entryIdx, "blockNum", i.current.blockNum, "logIdx", i.current.logIdx)
defer func() {
db.m.RecordDBSearchEntriesRead(i.entriesRead)
}()
for {
evtBlockNum, evtLogIdx, evtHash, err := i.NextLog()
if errors.Is(err, io.EOF) {
// Reached end of log without finding the event
return types.TruncatedHash{}, nil, ErrNotFound
} else if err != nil {
return types.TruncatedHash{}, nil, fmt.Errorf("failed to read next log: %w", err)
}
if evtBlockNum == blockNum && evtLogIdx == logIdx {
db.log.Trace("Found initiatingEvent", "blockNum", evtBlockNum, "logIdx", evtLogIdx, "hash", evtHash)
return evtHash, i, nil
}
if evtBlockNum > blockNum || (evtBlockNum == blockNum && evtLogIdx > logIdx) {
// Progressed past the requested log without finding it.
return types.TruncatedHash{}, nil, ErrNotFound
}
}
}
func (db *DB) newIterator(startCheckpointEntry entrydb.EntryIdx) (*iterator, error) {
checkpoint, err := db.readSearchCheckpoint(startCheckpointEntry)
if err != nil {
return nil, fmt.Errorf("failed to read search checkpoint entry %v: %w", startCheckpointEntry, err)
}
startIdx := startCheckpointEntry + 2
firstEntry, err := db.store.Read(startIdx)
if errors.Is(err, io.EOF) {
// There should always be an entry after a checkpoint and canonical hash so an EOF here is data corruption
return nil, fmt.Errorf("%w: no entry after checkpoint and canonical hash at %v", ErrDataCorruption, startCheckpointEntry)
} else if err != nil {
return nil, fmt.Errorf("failed to read first entry to iterate %v: %w", startCheckpointEntry+2, err)
}
startLogCtx := logContext{
blockNum: checkpoint.blockNum,
logIdx: checkpoint.logIdx,
}
// Handle starting from a checkpoint after initiating-event but before its executing-link or executing-check
if firstEntry[0] == typeExecutingLink || firstEntry[0] == typeExecutingCheck {
if firstEntry[0] == typeExecutingLink {
// The start checkpoint was between the initiating event and the executing link
// Step back to read the initiating event. The checkpoint block data will be for the initiating event
startIdx = startCheckpointEntry - 1
} else {
// The start checkpoint was between the executing link and the executing check
// Step back to read the initiating event. The checkpoint block data will be for the initiating event
startIdx = startCheckpointEntry - 2
}
initEntry, err := db.store.Read(startIdx)
if err != nil {
return nil, fmt.Errorf("failed to read prior initiating event: %w", err)
}
initEvt, err := newInitiatingEventFromEntry(initEntry)
if err != nil {
return nil, fmt.Errorf("invalid initiating event at idx %v: %w", startIdx, err)
}
startLogCtx = initEvt.preContext(startLogCtx)
func (db *ChainsDB) Close() error {
var combined error
for id, logDB := range db.logDBs {
if err := logDB.Close(); err != nil {
combined = errors.Join(combined, fmt.Errorf("failed to close log db for chain %v: %w", id, err))
}
i := &iterator{
db: db,
// +2 to skip the initial search checkpoint and the canonical hash event after it
nextEntryIdx: startIdx,
current: startLogCtx,
}
return i, nil
}
// searchCheckpoint performs a binary search of the searchCheckpoint entries to find the closest one at or before
// the requested log.
// Returns the index of the searchCheckpoint to begin reading from or an error
func (db *DB) searchCheckpoint(blockNum uint64, logIdx uint32) (entrydb.EntryIdx, error) {
n := (db.lastEntryIdx() / searchCheckpointFrequency) + 1
// Define x[-1] < target and x[n] >= target.
// Invariant: x[i-1] < target, x[j] >= target.
i, j := entrydb.EntryIdx(0), n
for i < j {
h := entrydb.EntryIdx(uint64(i+j) >> 1) // avoid overflow when computing h
checkpoint, err := db.readSearchCheckpoint(h * searchCheckpointFrequency)
if err != nil {
return 0, fmt.Errorf("failed to read entry %v: %w", h, err)
}
// i ≤ h < j
if checkpoint.blockNum < blockNum || (checkpoint.blockNum == blockNum && checkpoint.logIdx < logIdx) {
i = h + 1 // preserves x[i-1] < target
} else {
j = h // preserves x[j] >= target
}
}
if i < n {
checkpoint, err := db.readSearchCheckpoint(i * searchCheckpointFrequency)
if err != nil {
return 0, fmt.Errorf("failed to read entry %v: %w", i, err)
}
if checkpoint.blockNum == blockNum && checkpoint.logIdx == logIdx {
// Found entry at requested block number and log index
return i * searchCheckpointFrequency, nil
}
}
if i == 0 {
// There are no checkpoints before the requested blocks
return 0, io.EOF
}
// Not found, need to start reading from the entry prior
return (i - 1) * searchCheckpointFrequency, nil
}
func (db *DB) AddLog(logHash types.TruncatedHash, block eth.BlockID, timestamp uint64, logIdx uint32, execMsg *types.ExecutingMessage) error {
db.rwLock.Lock()
defer db.rwLock.Unlock()
postState := logContext{
blockNum: block.Number,
logIdx: logIdx,
}
if block.Number == 0 {
return fmt.Errorf("%w: should not have logs in block 0", ErrLogOutOfOrder)
}
if db.lastEntryContext.blockNum > block.Number {
return fmt.Errorf("%w: adding block %v, head block: %v", ErrLogOutOfOrder, block.Number, db.lastEntryContext.blockNum)
}
if db.lastEntryContext.blockNum == block.Number && db.lastEntryContext.logIdx+1 != logIdx {
return fmt.Errorf("%w: adding log %v in block %v, but currently at log %v", ErrLogOutOfOrder, logIdx, block.Number, db.lastEntryContext.logIdx)
}
if db.lastEntryContext.blockNum < block.Number && logIdx != 0 {
return fmt.Errorf("%w: adding log %v as first log in block %v", ErrLogOutOfOrder, logIdx, block.Number)
}
var entriesToAdd []entrydb.Entry
newContext := db.lastEntryContext
lastEntryIdx := db.lastEntryIdx()
addEntry := func(entry entrydb.Entry) {
entriesToAdd = append(entriesToAdd, entry)
lastEntryIdx++
}
maybeAddCheckpoint := func() {
if (lastEntryIdx+1)%searchCheckpointFrequency == 0 {
addEntry(newSearchCheckpoint(block.Number, logIdx, timestamp).encode())
addEntry(newCanonicalHash(types.TruncateHash(block.Hash)).encode())
newContext = postState
}
}
maybeAddCheckpoint()
evt, err := newInitiatingEvent(newContext, postState.blockNum, postState.logIdx, logHash, execMsg != nil)
if err != nil {
return fmt.Errorf("failed to create initiating event: %w", err)
}
addEntry(evt.encode())
if execMsg != nil {
maybeAddCheckpoint()
link, err := newExecutingLink(*execMsg)
if err != nil {
return fmt.Errorf("failed to create executing link: %w", err)
}
addEntry(link.encode())
maybeAddCheckpoint()
addEntry(newExecutingCheck(execMsg.Hash).encode())
}
if err := db.store.Append(entriesToAdd...); err != nil {
return fmt.Errorf("failed to append entries: %w", err)
}
db.lastEntryContext = postState
db.updateEntryCountMetric()
return nil
}
// Rewind the database to remove any blocks after headBlockNum
// The block at headBlockNum itself is not removed.
func (db *DB) Rewind(headBlockNum uint64) error {
db.rwLock.Lock()
defer db.rwLock.Unlock()
if headBlockNum >= db.lastEntryContext.blockNum {
// Nothing to do
return nil
}
// Find the last checkpoint before the block to remove
idx, err := db.searchCheckpoint(headBlockNum+1, 0)
if errors.Is(err, io.EOF) {
// Requested a block prior to the first checkpoint
// Delete everything without scanning forward
idx = -1
} else if err != nil {
return fmt.Errorf("failed to find checkpoint prior to block %v: %w", headBlockNum, err)
} else {
// Scan forward from the checkpoint to find the first entry about a block after headBlockNum
i, err := db.newIterator(idx)
if err != nil {
return fmt.Errorf("failed to create iterator when searching for rewind point: %w", err)
}
// If we don't find any useful logs after the checkpoint, we should delete the checkpoint itself
// So move our delete marker back to include it as a starting point
idx--
for {
blockNum, _, _, err := i.NextLog()
if errors.Is(err, io.EOF) {
// Reached end of file, we need to keep everything
return nil
} else if err != nil {
return fmt.Errorf("failed to find rewind point: %w", err)
}
if blockNum > headBlockNum {
// Found the first entry we don't need, so stop searching and delete everything after idx
break
}
// Otherwise we need all of the entries the iterator just read
idx = i.nextEntryIdx - 1
}
}
// Truncate to contain idx+1 entries, since indices are 0 based, this deletes everything after idx
if err := db.store.Truncate(idx); err != nil {
return fmt.Errorf("failed to truncate to block %v: %w", headBlockNum, err)
}
// Use db.init() to find the log context for the new latest log entry
if err := db.init(); err != nil {
return fmt.Errorf("failed to find new last entry context: %w", err)
}
return nil
}
func (db *DB) readSearchCheckpoint(entryIdx entrydb.EntryIdx) (searchCheckpoint, error) {
data, err := db.store.Read(entryIdx)
if err != nil {
return searchCheckpoint{}, fmt.Errorf("failed to read entry %v: %w", entryIdx, err)
}
return newSearchCheckpointFromEntry(data)
}
func (db *DB) readCanonicalHash(entryIdx entrydb.EntryIdx) (canonicalHash, error) {
data, err := db.store.Read(entryIdx)
if err != nil {
return canonicalHash{}, fmt.Errorf("failed to read entry %v: %w", entryIdx, err)
}
return newCanonicalHashFromEntry(data)
}
func (db *DB) Close() error {
return db.store.Close()
return combined
}
package db
import (
"bytes"
"fmt"
"io"
"io/fs"
"os"
"path/filepath"
"testing"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/testlog"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/types"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
backendTypes "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/types"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
"github.com/stretchr/testify/require"
)
func createTruncatedHash(i int) types.TruncatedHash {
return types.TruncateHash(createHash(i))
}
func createHash(i int) common.Hash {
data := bytes.Repeat([]byte{byte(i)}, common.HashLength)
return common.BytesToHash(data)
}
func TestErrorOpeningDatabase(t *testing.T) {
dir := t.TempDir()
_, err := NewFromFile(testlog.Logger(t, log.LvlInfo), &stubMetrics{}, filepath.Join(dir, "missing-dir", "file.db"))
require.ErrorIs(t, err, os.ErrNotExist)
}
func runDBTest(t *testing.T, setup func(t *testing.T, db *DB, m *stubMetrics), assert func(t *testing.T, db *DB, m *stubMetrics)) {
createDb := func(t *testing.T, dir string) (*DB, *stubMetrics, string) {
logger := testlog.Logger(t, log.LvlInfo)
path := filepath.Join(dir, "test.db")
m := &stubMetrics{}
db, err := NewFromFile(logger, m, path)
require.NoError(t, err, "Failed to create database")
t.Cleanup(func() {
err := db.Close()
if err != nil {
require.ErrorIs(t, err, fs.ErrClosed)
}
})
return db, m, path
}
t.Run("New", func(t *testing.T) {
db, m, _ := createDb(t, t.TempDir())
setup(t, db, m)
assert(t, db, m)
})
t.Run("Existing", func(t *testing.T) {
dir := t.TempDir()
db, m, path := createDb(t, dir)
setup(t, db, m)
// Close and recreate the database
require.NoError(t, db.Close())
checkDBInvariants(t, path, m)
db2, m, path := createDb(t, dir)
assert(t, db2, m)
checkDBInvariants(t, path, m)
})
}
func TestEmptyDbDoesNotFindEntry(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {},
func(t *testing.T, db *DB, m *stubMetrics) {
requireNotContains(t, db, 0, 0, createHash(1))
requireNotContains(t, db, 0, 0, common.Hash{})
})
}
func TestAddLog(t *testing.T) {
t.Run("BlockZero", func(t *testing.T) {
// There are no logs in the genesis block so recording an entry for block 0 should be rejected.
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {},
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 0}, 5000, 0, nil)
require.ErrorIs(t, err, ErrLogOutOfOrder)
})
})
t.Run("FirstEntry", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 0, nil)
require.NoError(t, err)
},
func(t *testing.T, db *DB, m *stubMetrics) {
requireContains(t, db, 15, 0, createHash(1))
})
})
t.Run("MultipleEntriesFromSameBlock", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 0, nil)
require.NoError(t, err)
err = db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 1, nil)
require.NoError(t, err)
err = db.AddLog(createTruncatedHash(3), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 2, nil)
require.NoError(t, err)
},
func(t *testing.T, db *DB, m *stubMetrics) {
require.EqualValues(t, 5, m.entryCount, "should not output new searchCheckpoint for every log")
requireContains(t, db, 15, 0, createHash(1))
requireContains(t, db, 15, 1, createHash(2))
requireContains(t, db, 15, 2, createHash(3))
})
})
t.Run("MultipleEntriesFromMultipleBlocks", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 0, nil)
require.NoError(t, err)
err = db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 1, nil)
require.NoError(t, err)
err = db.AddLog(createTruncatedHash(3), eth.BlockID{Hash: createHash(16), Number: 16}, 5002, 0, nil)
require.NoError(t, err)
err = db.AddLog(createTruncatedHash(4), eth.BlockID{Hash: createHash(16), Number: 16}, 5002, 1, nil)
require.NoError(t, err)
},
func(t *testing.T, db *DB, m *stubMetrics) {
require.EqualValues(t, 6, m.entryCount, "should not output new searchCheckpoint for every block")
requireContains(t, db, 15, 0, createHash(1))
requireContains(t, db, 15, 1, createHash(2))
requireContains(t, db, 16, 0, createHash(3))
requireContains(t, db, 16, 1, createHash(4))
})
})
t.Run("ErrorWhenBeforeCurrentBlock", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 0, nil)
require.NoError(t, err)
},
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(14), Number: 14}, 4998, 0, nil)
require.ErrorIs(t, err, ErrLogOutOfOrder)
})
})
t.Run("ErrorWhenBeforeCurrentBlockButAfterLastCheckpoint", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(13), Number: 13}, 5000, 0, nil)
require.NoError(t, err)
err = db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 0, nil)
require.NoError(t, err)
},
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(14), Number: 14}, 4998, 0, nil)
require.ErrorIs(t, err, ErrLogOutOfOrder)
})
})
t.Run("ErrorWhenBeforeCurrentLogEvent", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 1, nil))
},
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(14), Number: 15}, 4998, 0, nil)
require.ErrorIs(t, err, ErrLogOutOfOrder)
})
})
t.Run("ErrorWhenBeforeCurrentLogEventButAfterLastCheckpoint", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 0, nil)
require.NoError(t, err)
err = db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 1, nil)
require.NoError(t, err)
err = db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 2, nil)
require.NoError(t, err)
},
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(14), Number: 15}, 4998, 1, nil)
require.ErrorIs(t, err, ErrLogOutOfOrder)
})
})
t.Run("ErrorWhenAtCurrentLogEvent", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 1, nil))
},
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 4998, 1, nil)
require.ErrorIs(t, err, ErrLogOutOfOrder)
})
})
t.Run("ErrorWhenAtCurrentLogEventButAfterLastCheckpoint", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 1, nil))
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 2, nil))
},
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(14), Number: 15}, 4998, 2, nil)
require.ErrorIs(t, err, ErrLogOutOfOrder)
})
})
t.Run("ErrorWhenSkippingLogEvent", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 0, nil)
require.NoError(t, err)
},
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 4998, 2, nil)
require.ErrorIs(t, err, ErrLogOutOfOrder)
})
})
t.Run("ErrorWhenFirstLogIsNotLogIdxZero", func(t *testing.T) {
runDBTest(t, func(t *testing.T, db *DB, m *stubMetrics) {},
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 4998, 5, nil)
require.ErrorIs(t, err, ErrLogOutOfOrder)
})
})
t.Run("ErrorWhenFirstLogOfNewBlockIsNotLogIdxZero", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(14), Number: 14}, 4996, 0, nil))
},
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 4998, 1, nil)
require.ErrorIs(t, err, ErrLogOutOfOrder)
})
})
t.Run("MultipleSearchCheckpoints", func(t *testing.T) {
block1 := eth.BlockID{Hash: createHash(11), Number: 11}
block2 := eth.BlockID{Hash: createHash(12), Number: 12}
block3 := eth.BlockID{Hash: createHash(15), Number: 15}
block4 := eth.BlockID{Hash: createHash(16), Number: 16}
// First checkpoint is at entry idx 0
// Block 1 logs don't reach the second checkpoint
block1LogCount := searchCheckpointFrequency - 10
// Block 2 logs extend to just after the third checkpoint
block2LogCount := searchCheckpointFrequency + 20
// Block 3 logs extend to immediately before the fourth checkpoint
block3LogCount := searchCheckpointFrequency - 16
block4LogCount := 2
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
for i := 0; i < block1LogCount; i++ {
err := db.AddLog(createTruncatedHash(i), block1, 3000, uint32(i), nil)
require.NoErrorf(t, err, "failed to add log %v of block 1", i)
}
for i := 0; i < block2LogCount; i++ {
err := db.AddLog(createTruncatedHash(i), block2, 3002, uint32(i), nil)
require.NoErrorf(t, err, "failed to add log %v of block 2", i)
}
for i := 0; i < block3LogCount; i++ {
err := db.AddLog(createTruncatedHash(i), block3, 3004, uint32(i), nil)
require.NoErrorf(t, err, "failed to add log %v of block 3", i)
}
// Verify that we're right before the fourth checkpoint will be written.
// entryCount is the number of entries, so given 0 based indexing is the index of the next entry
// the first checkpoint is at entry 0, the second at entry searchCheckpointFrequency etc
// so the fourth is at entry 3*searchCheckpointFrequency
require.EqualValues(t, 3*searchCheckpointFrequency, m.entryCount)
for i := 0; i < block4LogCount; i++ {
err := db.AddLog(createTruncatedHash(i), block4, 3006, uint32(i), nil)
require.NoErrorf(t, err, "failed to add log %v of block 4", i)
}
},
func(t *testing.T, db *DB, m *stubMetrics) {
// Check that we wrote additional search checkpoints
expectedCheckpointCount := 4
expectedEntryCount := block1LogCount + block2LogCount + block3LogCount + block4LogCount + (2 * expectedCheckpointCount)
require.EqualValues(t, expectedEntryCount, m.entryCount)
// Check we can find all the logs.
for i := 0; i < block1LogCount; i++ {
requireContains(t, db, block1.Number, uint32(i), createHash(i))
}
// Block 2 logs extend to just after the third checkpoint
for i := 0; i < block2LogCount; i++ {
requireContains(t, db, block2.Number, uint32(i), createHash(i))
}
// Block 3 logs extend to immediately before the fourth checkpoint
for i := 0; i < block3LogCount; i++ {
requireContains(t, db, block3.Number, uint32(i), createHash(i))
}
// Block 4 logs start immediately after the fourth checkpoint
for i := 0; i < block4LogCount; i++ {
requireContains(t, db, block4.Number, uint32(i), createHash(i))
}
})
})
}
func TestAddDependentLog(t *testing.T) {
execMsg := types.ExecutingMessage{
Chain: 3,
BlockNum: 42894,
LogIdx: 42,
Timestamp: 8742482,
Hash: types.TruncateHash(createHash(8844)),
}
t.Run("FirstEntry", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 0, &execMsg)
require.NoError(t, err)
},
func(t *testing.T, db *DB, m *stubMetrics) {
requireContains(t, db, 15, 0, createHash(1), execMsg)
})
})
t.Run("CheckpointBetweenInitEventAndExecLink", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
for i := uint32(0); m.entryCount < searchCheckpointFrequency-1; i++ {
require.NoError(t, db.AddLog(createTruncatedHash(9), eth.BlockID{Hash: createHash(9), Number: 1}, 500, i, nil))
}
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 0, &execMsg)
require.NoError(t, err)
},
func(t *testing.T, db *DB, m *stubMetrics) {
requireContains(t, db, 15, 0, createHash(1), execMsg)
})
})
t.Run("CheckpointBetweenInitEventAndExecLinkNotIncrementingBlock", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
for i := uint32(0); m.entryCount < searchCheckpointFrequency-1; i++ {
require.NoError(t, db.AddLog(createTruncatedHash(9), eth.BlockID{Hash: createHash(9), Number: 1}, 500, i, nil))
}
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 1}, 5000, 253, &execMsg)
require.NoError(t, err)
},
func(t *testing.T, db *DB, m *stubMetrics) {
requireContains(t, db, 1, 253, createHash(1), execMsg)
})
})
t.Run("CheckpointBetweenExecLinkAndExecCheck", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
for i := uint32(0); m.entryCount < searchCheckpointFrequency-2; i++ {
require.NoError(t, db.AddLog(createTruncatedHash(9), eth.BlockID{Hash: createHash(9), Number: 1}, 500, i, nil))
}
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 0, &execMsg)
require.NoError(t, err)
},
func(t *testing.T, db *DB, m *stubMetrics) {
requireContains(t, db, 15, 0, createHash(1), execMsg)
})
})
t.Run("CheckpointBetweenExecLinkAndExecCheckNotIncrementingBlock", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
for i := uint32(0); m.entryCount < searchCheckpointFrequency-2; i++ {
require.NoError(t, db.AddLog(createTruncatedHash(9), eth.BlockID{Hash: createHash(9), Number: 1}, 500, i, nil))
}
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 1}, 5000, 252, &execMsg)
require.NoError(t, err)
},
func(t *testing.T, db *DB, m *stubMetrics) {
requireContains(t, db, 1, 252, createHash(1), execMsg)
})
})
}
func TestContains(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(3), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 1, nil))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 2, nil))
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(52), Number: 52}, 500, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(3), eth.BlockID{Hash: createHash(52), Number: 52}, 500, 1, nil))
},
func(t *testing.T, db *DB, m *stubMetrics) {
// Should find added logs
requireContains(t, db, 50, 0, createHash(1))
requireContains(t, db, 50, 1, createHash(3))
requireContains(t, db, 50, 2, createHash(2))
requireContains(t, db, 52, 0, createHash(1))
requireContains(t, db, 52, 1, createHash(3))
// Should not find log when block number too low
requireNotContains(t, db, 49, 0, createHash(1))
// Should not find log when block number too high
requireNotContains(t, db, 51, 0, createHash(1))
// Should not find log when requested log after end of database
requireNotContains(t, db, 52, 2, createHash(3))
requireNotContains(t, db, 53, 0, createHash(3))
// Should not find log when log index too high
requireNotContains(t, db, 50, 3, createHash(2))
// Should not find log when hash doesn't match log at block number and index
requireWrongHash(t, db, 50, 0, createHash(5), types.ExecutingMessage{})
})
}
func TestExecutes(t *testing.T) {
execMsg1 := types.ExecutingMessage{
Chain: 33,
BlockNum: 22,
LogIdx: 99,
Timestamp: 948294,
Hash: createTruncatedHash(332299),
}
execMsg2 := types.ExecutingMessage{
Chain: 44,
BlockNum: 55,
LogIdx: 66,
Timestamp: 77777,
Hash: createTruncatedHash(445566),
}
execMsg3 := types.ExecutingMessage{
Chain: 77,
BlockNum: 88,
LogIdx: 89,
Timestamp: 6578567,
Hash: createTruncatedHash(778889),
}
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(3), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 1, &execMsg1))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 2, nil))
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(52), Number: 52}, 500, 0, &execMsg2))
require.NoError(t, db.AddLog(createTruncatedHash(3), eth.BlockID{Hash: createHash(52), Number: 52}, 500, 1, &execMsg3))
},
func(t *testing.T, db *DB, m *stubMetrics) {
// Should find added logs
requireExecutingMessage(t, db, 50, 0, types.ExecutingMessage{})
requireExecutingMessage(t, db, 50, 1, execMsg1)
requireExecutingMessage(t, db, 50, 2, types.ExecutingMessage{})
requireExecutingMessage(t, db, 52, 0, execMsg2)
requireExecutingMessage(t, db, 52, 1, execMsg3)
// Should not find log when block number too low
requireNotContains(t, db, 49, 0, createHash(1))
// Should not find log when block number too high
requireNotContains(t, db, 51, 0, createHash(1))
// Should not find log when requested log after end of database
requireNotContains(t, db, 52, 2, createHash(3))
requireNotContains(t, db, 53, 0, createHash(3))
// Should not find log when log index too high
requireNotContains(t, db, 50, 3, createHash(2))
})
}
func TestGetBlockInfo(t *testing.T) {
t.Run("ReturnsEOFWhenEmpty", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {},
func(t *testing.T, db *DB, m *stubMetrics) {
_, _, err := db.ClosestBlockInfo(10)
require.ErrorIs(t, err, io.EOF)
})
})
t.Run("ReturnsEOFWhenRequestedBlockBeforeFirstSearchCheckpoint", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(11), Number: 11}, 500, 0, nil)
require.NoError(t, err)
},
func(t *testing.T, db *DB, m *stubMetrics) {
_, _, err := db.ClosestBlockInfo(10)
require.ErrorIs(t, err, io.EOF)
})
})
t.Run("ReturnFirstBlockInfo", func(t *testing.T) {
block := eth.BlockID{Hash: createHash(11), Number: 11}
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), block, 500, 0, nil)
require.NoError(t, err)
},
func(t *testing.T, db *DB, m *stubMetrics) {
requireClosestBlockInfo(t, db, 11, block.Number, block.Hash)
requireClosestBlockInfo(t, db, 12, block.Number, block.Hash)
requireClosestBlockInfo(t, db, 200, block.Number, block.Hash)
})
})
t.Run("ReturnClosestCheckpointBlockInfo", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
for i := 1; i < searchCheckpointFrequency+3; i++ {
block := eth.BlockID{Hash: createHash(i), Number: uint64(i)}
err := db.AddLog(createTruncatedHash(i), block, uint64(i)*2, 0, nil)
require.NoError(t, err)
}
},
func(t *testing.T, db *DB, m *stubMetrics) {
// Expect block from the first checkpoint
requireClosestBlockInfo(t, db, 1, 1, createHash(1))
requireClosestBlockInfo(t, db, 10, 1, createHash(1))
requireClosestBlockInfo(t, db, searchCheckpointFrequency-3, 1, createHash(1))
// Expect block from the second checkpoint
// 2 entries used for initial checkpoint but we start at block 1
secondCheckpointBlockNum := searchCheckpointFrequency - 1
requireClosestBlockInfo(t, db, uint64(secondCheckpointBlockNum), uint64(secondCheckpointBlockNum), createHash(secondCheckpointBlockNum))
requireClosestBlockInfo(t, db, uint64(secondCheckpointBlockNum)+1, uint64(secondCheckpointBlockNum), createHash(secondCheckpointBlockNum))
requireClosestBlockInfo(t, db, uint64(secondCheckpointBlockNum)+2, uint64(secondCheckpointBlockNum), createHash(secondCheckpointBlockNum))
})
})
}
func requireClosestBlockInfo(t *testing.T, db *DB, searchFor uint64, expectedBlockNum uint64, expectedHash common.Hash) {
blockNum, hash, err := db.ClosestBlockInfo(searchFor)
require.NoError(t, err)
require.Equal(t, expectedBlockNum, blockNum)
require.Equal(t, types.TruncateHash(expectedHash), hash)
}
func requireContains(t *testing.T, db *DB, blockNum uint64, logIdx uint32, logHash common.Hash, execMsg ...types.ExecutingMessage) {
require.LessOrEqual(t, len(execMsg), 1, "cannot have multiple executing messages for a single log")
m, ok := db.m.(*stubMetrics)
require.True(t, ok, "Did not get the expected metrics type")
result, err := db.Contains(blockNum, logIdx, types.TruncateHash(logHash))
require.NoErrorf(t, err, "Error searching for log %v in block %v", logIdx, blockNum)
require.Truef(t, result, "Did not find log %v in block %v with hash %v", logIdx, blockNum, logHash)
require.LessOrEqual(t, m.entriesReadForSearch, int64(searchCheckpointFrequency), "Should not need to read more than between two checkpoints")
require.NotZero(t, m.entriesReadForSearch, "Must read at least some entries to find the log")
var expectedExecMsg types.ExecutingMessage
if len(execMsg) == 1 {
expectedExecMsg = execMsg[0]
}
requireExecutingMessage(t, db, blockNum, logIdx, expectedExecMsg)
}
func requireNotContains(t *testing.T, db *DB, blockNum uint64, logIdx uint32, logHash common.Hash) {
m, ok := db.m.(*stubMetrics)
require.True(t, ok, "Did not get the expected metrics type")
result, err := db.Contains(blockNum, logIdx, types.TruncateHash(logHash))
require.NoErrorf(t, err, "Error searching for log %v in block %v", logIdx, blockNum)
require.Falsef(t, result, "Found unexpected log %v in block %v with hash %v", logIdx, blockNum, logHash)
require.LessOrEqual(t, m.entriesReadForSearch, int64(searchCheckpointFrequency), "Should not need to read more than between two checkpoints")
_, err = db.Executes(blockNum, logIdx)
require.ErrorIs(t, err, ErrNotFound, "Found unexpected log when getting executing message")
require.LessOrEqual(t, m.entriesReadForSearch, int64(searchCheckpointFrequency), "Should not need to read more than between two checkpoints")
}
func requireExecutingMessage(t *testing.T, db *DB, blockNum uint64, logIdx uint32, execMsg types.ExecutingMessage) {
m, ok := db.m.(*stubMetrics)
require.True(t, ok, "Did not get the expected metrics type")
actualExecMsg, err := db.Executes(blockNum, logIdx)
require.NoError(t, err, "Error when searching for executing message")
require.Equal(t, execMsg, actualExecMsg, "Should return matching executing message")
require.LessOrEqual(t, m.entriesReadForSearch, int64(searchCheckpointFrequency), "Should not need to read more than between two checkpoints")
require.NotZero(t, m.entriesReadForSearch, "Must read at least some entries to find the log")
}
func requireWrongHash(t *testing.T, db *DB, blockNum uint64, logIdx uint32, logHash common.Hash, execMsg types.ExecutingMessage) {
m, ok := db.m.(*stubMetrics)
require.True(t, ok, "Did not get the expected metrics type")
result, err := db.Contains(blockNum, logIdx, types.TruncateHash(logHash))
require.NoErrorf(t, err, "Error searching for log %v in block %v", logIdx, blockNum)
require.Falsef(t, result, "Found unexpected log %v in block %v with hash %v", logIdx, blockNum, logHash)
_, err = db.Executes(blockNum, logIdx)
require.NoError(t, err, "Error when searching for executing message")
require.LessOrEqual(t, m.entriesReadForSearch, int64(searchCheckpointFrequency), "Should not need to read more than between two checkpoints")
}
func TestRecoverOnCreate(t *testing.T) {
createDb := func(t *testing.T, store *stubEntryStore) (*DB, *stubMetrics, error) {
logger := testlog.Logger(t, log.LvlInfo)
m := &stubMetrics{}
db, err := NewFromEntryStore(logger, m, store)
return db, m, err
}
validInitEvent, err := newInitiatingEvent(logContext{blockNum: 1, logIdx: 0}, 1, 0, createTruncatedHash(1), false)
require.NoError(t, err)
validEventSequence := []entrydb.Entry{
newSearchCheckpoint(1, 0, 100).encode(),
newCanonicalHash(createTruncatedHash(344)).encode(),
validInitEvent.encode(),
}
var emptyEventSequence []entrydb.Entry
for _, prefixEvents := range [][]entrydb.Entry{emptyEventSequence, validEventSequence} {
prefixEvents := prefixEvents
storeWithEvents := func(evts ...entrydb.Entry) *stubEntryStore {
store := &stubEntryStore{}
store.entries = append(store.entries, prefixEvents...)
store.entries = append(store.entries, evts...)
return store
}
t.Run(fmt.Sprintf("PrefixEvents-%v", len(prefixEvents)), func(t *testing.T) {
t.Run("NoTruncateWhenLastEntryIsLogWithNoExecMessage", func(t *testing.T) {
initEvent, err := newInitiatingEvent(logContext{blockNum: 3, logIdx: 0}, 3, 0, createTruncatedHash(1), false)
require.NoError(t, err)
store := storeWithEvents(
newSearchCheckpoint(3, 0, 100).encode(),
newCanonicalHash(createTruncatedHash(344)).encode(),
initEvent.encode(),
)
db, m, err := createDb(t, store)
require.NoError(t, err)
require.EqualValues(t, len(prefixEvents)+3, m.entryCount)
requireContains(t, db, 3, 0, createHash(1))
})
t.Run("NoTruncateWhenLastEntryIsExecutingCheck", func(t *testing.T) {
initEvent, err := newInitiatingEvent(logContext{blockNum: 3, logIdx: 0}, 3, 0, createTruncatedHash(1), true)
execMsg := types.ExecutingMessage{
Chain: 4,
BlockNum: 10,
LogIdx: 4,
Timestamp: 1288,
Hash: createTruncatedHash(4),
}
require.NoError(t, err)
linkEvt, err := newExecutingLink(execMsg)
require.NoError(t, err)
store := storeWithEvents(
newSearchCheckpoint(3, 0, 100).encode(),
newCanonicalHash(createTruncatedHash(344)).encode(),
initEvent.encode(),
linkEvt.encode(),
newExecutingCheck(execMsg.Hash).encode(),
)
db, m, err := createDb(t, store)
require.NoError(t, err)
require.EqualValues(t, len(prefixEvents)+5, m.entryCount)
requireContains(t, db, 3, 0, createHash(1), execMsg)
func TestChainsDB_AddLog(t *testing.T) {
t.Run("UnknownChain", func(t *testing.T) {
db := NewChainsDB(nil, &stubHeadStorage{})
err := db.AddLog(types.ChainIDFromUInt64(2), backendTypes.TruncatedHash{}, eth.BlockID{}, 1234, 33, nil)
require.ErrorIs(t, err, ErrUnknownChain)
})
t.Run("TruncateWhenLastEntrySearchCheckpoint", func(t *testing.T) {
store := storeWithEvents(newSearchCheckpoint(3, 0, 100).encode())
_, m, err := createDb(t, store)
require.NoError(t, err)
require.EqualValues(t, len(prefixEvents), m.entryCount)
t.Run("KnownChain", func(t *testing.T) {
chainID := types.ChainIDFromUInt64(1)
logDB := &stubLogDB{}
db := NewChainsDB(map[types.ChainID]LogStorage{
chainID: logDB,
}, &stubHeadStorage{})
err := db.AddLog(chainID, backendTypes.TruncatedHash{}, eth.BlockID{}, 1234, 33, nil)
require.NoError(t, err, err)
require.Equal(t, 1, logDB.addLogCalls)
})
t.Run("TruncateWhenLastEntryCanonicalHash", func(t *testing.T) {
store := storeWithEvents(
newSearchCheckpoint(3, 0, 100).encode(),
newCanonicalHash(createTruncatedHash(344)).encode(),
)
_, m, err := createDb(t, store)
require.NoError(t, err)
require.EqualValues(t, len(prefixEvents), m.entryCount)
})
t.Run("TruncateWhenLastEntryInitEventWithExecMsg", func(t *testing.T) {
initEvent, err := newInitiatingEvent(logContext{blockNum: 3, logIdx: 0}, 3, 0, createTruncatedHash(1), true)
require.NoError(t, err)
store := storeWithEvents(
newSearchCheckpoint(3, 0, 100).encode(),
newCanonicalHash(createTruncatedHash(344)).encode(),
initEvent.encode(),
)
_, m, err := createDb(t, store)
require.NoError(t, err)
require.EqualValues(t, len(prefixEvents), m.entryCount)
})
t.Run("TruncateWhenLastEntryInitEventWithExecLink", func(t *testing.T) {
initEvent, err := newInitiatingEvent(logContext{blockNum: 3, logIdx: 0}, 3, 0, createTruncatedHash(1), true)
require.NoError(t, err)
execMsg := types.ExecutingMessage{
Chain: 4,
BlockNum: 10,
LogIdx: 4,
Timestamp: 1288,
Hash: createTruncatedHash(4),
}
require.NoError(t, err)
linkEvt, err := newExecutingLink(execMsg)
require.NoError(t, err)
store := storeWithEvents(
newSearchCheckpoint(3, 0, 100).encode(),
newCanonicalHash(createTruncatedHash(344)).encode(),
initEvent.encode(),
linkEvt.encode(),
)
_, m, err := createDb(t, store)
require.NoError(t, err)
require.EqualValues(t, len(prefixEvents), m.entryCount)
})
})
}
}
func TestRewind(t *testing.T) {
t.Run("WhenEmpty", func(t *testing.T) {
runDBTest(t, func(t *testing.T, db *DB, m *stubMetrics) {},
func(t *testing.T, db *DB, m *stubMetrics) {
require.NoError(t, db.Rewind(100))
require.NoError(t, db.Rewind(0))
})
})
t.Run("AfterLastBlock", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 1, nil))
require.NoError(t, db.AddLog(createTruncatedHash(3), eth.BlockID{Hash: createHash(51), Number: 51}, 502, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(4), eth.BlockID{Hash: createHash(74), Number: 74}, 700, 0, nil))
require.NoError(t, db.Rewind(75))
},
func(t *testing.T, db *DB, m *stubMetrics) {
requireContains(t, db, 50, 0, createHash(1))
requireContains(t, db, 50, 1, createHash(2))
requireContains(t, db, 51, 0, createHash(3))
requireContains(t, db, 74, 0, createHash(4))
})
func TestChainsDB_Rewind(t *testing.T) {
t.Run("UnknownChain", func(t *testing.T) {
db := NewChainsDB(nil, &stubHeadStorage{})
err := db.Rewind(types.ChainIDFromUInt64(2), 42)
require.ErrorIs(t, err, ErrUnknownChain)
})
t.Run("BeforeFirstBlock", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 1, nil))
require.NoError(t, db.Rewind(25))
},
func(t *testing.T, db *DB, m *stubMetrics) {
requireNotContains(t, db, 50, 0, createHash(1))
requireNotContains(t, db, 50, 0, createHash(1))
require.Zero(t, m.entryCount)
})
})
t.Run("AtFirstBlock", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 1, nil))
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(51), Number: 51}, 502, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(51), Number: 51}, 502, 1, nil))
require.NoError(t, db.Rewind(50))
},
func(t *testing.T, db *DB, m *stubMetrics) {
requireContains(t, db, 50, 0, createHash(1))
requireContains(t, db, 50, 1, createHash(2))
requireNotContains(t, db, 51, 0, createHash(1))
requireNotContains(t, db, 51, 1, createHash(2))
})
})
t.Run("AtSecondCheckpoint", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
for i := uint32(0); m.entryCount < searchCheckpointFrequency; i++ {
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(50), Number: 50}, 500, i, nil))
}
require.EqualValues(t, searchCheckpointFrequency, m.entryCount)
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(51), Number: 51}, 502, 0, nil))
require.EqualValues(t, searchCheckpointFrequency+3, m.entryCount, "Should have inserted new checkpoint and extra log")
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(51), Number: 51}, 502, 1, nil))
require.NoError(t, db.Rewind(50))
},
func(t *testing.T, db *DB, m *stubMetrics) {
require.EqualValues(t, searchCheckpointFrequency, m.entryCount, "Should have deleted second checkpoint")
requireContains(t, db, 50, 0, createHash(1))
requireContains(t, db, 50, 1, createHash(1))
requireNotContains(t, db, 51, 0, createHash(1))
requireNotContains(t, db, 51, 1, createHash(2))
})
})
t.Run("BetweenLogEntries", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 1, nil))
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(60), Number: 60}, 502, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(60), Number: 60}, 502, 1, nil))
require.NoError(t, db.Rewind(55))
},
func(t *testing.T, db *DB, m *stubMetrics) {
requireContains(t, db, 50, 0, createHash(1))
requireContains(t, db, 50, 1, createHash(2))
requireNotContains(t, db, 60, 0, createHash(1))
requireNotContains(t, db, 60, 1, createHash(2))
})
t.Run("KnownChain", func(t *testing.T) {
chainID := types.ChainIDFromUInt64(1)
logDB := &stubLogDB{}
db := NewChainsDB(map[types.ChainID]LogStorage{
chainID: logDB,
}, &stubHeadStorage{})
err := db.Rewind(chainID, 23)
require.NoError(t, err, err)
require.EqualValues(t, 23, logDB.headBlockNum)
})
t.Run("AtExistingLogEntry", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(59), Number: 59}, 500, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(59), Number: 59}, 500, 1, nil))
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(60), Number: 60}, 502, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(60), Number: 60}, 502, 1, nil))
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(61), Number: 61}, 502, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(61), Number: 61}, 502, 1, nil))
require.NoError(t, db.Rewind(60))
},
func(t *testing.T, db *DB, m *stubMetrics) {
requireContains(t, db, 59, 0, createHash(1))
requireContains(t, db, 59, 1, createHash(2))
requireContains(t, db, 60, 0, createHash(1))
requireContains(t, db, 60, 1, createHash(2))
requireNotContains(t, db, 61, 0, createHash(1))
requireNotContains(t, db, 61, 1, createHash(2))
})
})
t.Run("AtLastEntry", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 1, nil))
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(60), Number: 60}, 502, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(60), Number: 60}, 502, 1, nil))
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(70), Number: 70}, 502, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(70), Number: 70}, 502, 1, nil))
require.NoError(t, db.Rewind(70))
},
func(t *testing.T, db *DB, m *stubMetrics) {
requireContains(t, db, 50, 0, createHash(1))
requireContains(t, db, 50, 1, createHash(2))
requireContains(t, db, 60, 0, createHash(1))
requireContains(t, db, 60, 1, createHash(2))
requireContains(t, db, 70, 0, createHash(1))
requireContains(t, db, 70, 1, createHash(2))
})
})
t.Run("ReaddDeletedBlocks", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(59), Number: 59}, 500, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(59), Number: 59}, 500, 1, nil))
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(60), Number: 60}, 502, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(60), Number: 60}, 502, 1, nil))
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(61), Number: 61}, 502, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(61), Number: 61}, 502, 1, nil))
require.NoError(t, db.Rewind(60))
},
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(59), Number: 59}, 500, 1, nil)
require.ErrorIs(t, err, ErrLogOutOfOrder, "Cannot add block before rewound head")
err = db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(60), Number: 60}, 502, 1, nil)
require.ErrorIs(t, err, ErrLogOutOfOrder, "Cannot add block that was rewound to")
err = db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(60), Number: 61}, 502, 0, nil)
require.NoError(t, err, "Can re-add deleted block")
})
})
}
type stubMetrics struct {
entryCount int64
entriesReadForSearch int64
}
func (s *stubMetrics) RecordDBEntryCount(count int64) {
s.entryCount = count
}
func (s *stubMetrics) RecordDBSearchEntriesRead(count int64) {
s.entriesReadForSearch = count
}
var _ Metrics = (*stubMetrics)(nil)
type stubHeadStorage struct{}
type stubEntryStore struct {
entries []entrydb.Entry
type stubLogDB struct {
addLogCalls int
headBlockNum uint64
}
func (s *stubEntryStore) Size() int64 {
return int64(len(s.entries))
func (s *stubLogDB) ClosestBlockInfo(_ uint64) (uint64, backendTypes.TruncatedHash, error) {
panic("not implemented")
}
func (s *stubEntryStore) LastEntryIdx() entrydb.EntryIdx {
return entrydb.EntryIdx(s.Size() - 1)
}
func (s *stubEntryStore) Read(idx entrydb.EntryIdx) (entrydb.Entry, error) {
if idx < entrydb.EntryIdx(len(s.entries)) {
return s.entries[idx], nil
}
return entrydb.Entry{}, io.EOF
func (s *stubLogDB) AddLog(logHash backendTypes.TruncatedHash, block eth.BlockID, timestamp uint64, logIdx uint32, execMsg *backendTypes.ExecutingMessage) error {
s.addLogCalls++
return nil
}
func (s *stubEntryStore) Append(entries ...entrydb.Entry) error {
s.entries = append(s.entries, entries...)
func (s *stubLogDB) Rewind(newHeadBlockNum uint64) error {
s.headBlockNum = newHeadBlockNum
return nil
}
func (s *stubEntryStore) Truncate(idx entrydb.EntryIdx) error {
s.entries = s.entries[:min(s.Size()-1, int64(idx+1))]
return nil
func (s *stubLogDB) LatestBlockNum() uint64 {
return s.headBlockNum
}
func (s *stubEntryStore) Close() error {
func (s *stubLogDB) Close() error {
return nil
}
var _ EntryStore = (*stubEntryStore)(nil)
package heads
import (
"encoding/json"
"errors"
"fmt"
"os"
"sync"
"github.com/ethereum-optimism/optimism/op-service/jsonutil"
)
// HeadTracker records the current chain head pointers for a single chain.
type HeadTracker struct {
rwLock sync.RWMutex
path string
current *Heads
}
func NewHeadTracker(path string) (*HeadTracker, error) {
current := NewHeads()
if data, err := os.ReadFile(path); errors.Is(err, os.ErrNotExist) {
// No existing file, just use empty heads
} else if err != nil {
return nil, fmt.Errorf("failed to read existing heads from %v: %w", path, err)
} else {
if err := json.Unmarshal(data, current); err != nil {
return nil, fmt.Errorf("invalid existing heads file %v: %w", path, err)
}
}
return &HeadTracker{
path: path,
current: current,
}, nil
}
func (t *HeadTracker) Apply(op Operation) error {
t.rwLock.Lock()
defer t.rwLock.Unlock()
// Store a copy of the heads prior to changing so we can roll back if needed.
modified := t.current.Copy()
if err := op.Apply(modified); err != nil {
return fmt.Errorf("operation failed: %w", err)
}
if err := t.write(modified); err != nil {
return fmt.Errorf("failed to store updated heads: %w", err)
}
t.current = modified
return nil
}
func (t *HeadTracker) Current() *Heads {
t.rwLock.RLock()
defer t.rwLock.RUnlock()
return t.current.Copy()
}
func (t *HeadTracker) write(heads *Heads) error {
if err := jsonutil.WriteJSON(t.path, heads, 0o644); err != nil {
return fmt.Errorf("failed to write new heads: %w", err)
}
return nil
}
func (t *HeadTracker) Close() error {
return nil
}
package heads
import (
"errors"
"os"
"path/filepath"
"testing"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
"github.com/stretchr/testify/require"
)
func TestHeads_SaveAndReload(t *testing.T) {
dir := t.TempDir()
path := filepath.Join(dir, "heads.json")
chainA := types.ChainIDFromUInt64(3)
chainAHeads := ChainHeads{
Unsafe: 1,
CrossUnsafe: 2,
LocalSafe: 3,
CrossSafe: 4,
LocalFinalized: 5,
CrossFinalized: 6,
}
chainB := types.ChainIDFromUInt64(5)
chainBHeads := ChainHeads{
Unsafe: 11,
CrossUnsafe: 12,
LocalSafe: 13,
CrossSafe: 14,
LocalFinalized: 15,
CrossFinalized: 16,
}
orig, err := NewHeadTracker(path)
require.NoError(t, err)
err = orig.Apply(OperationFn(func(heads *Heads) error {
heads.Put(chainA, chainAHeads)
heads.Put(chainB, chainBHeads)
return nil
}))
require.NoError(t, err)
require.Equal(t, orig.Current().Get(chainA), chainAHeads)
require.Equal(t, orig.Current().Get(chainB), chainBHeads)
loaded, err := NewHeadTracker(path)
require.NoError(t, err)
require.EqualValues(t, loaded.Current(), orig.Current())
}
func TestHeads_NoChangesMadeIfOperationFails(t *testing.T) {
dir := t.TempDir()
path := filepath.Join(dir, "heads.json")
chainA := types.ChainIDFromUInt64(3)
chainAHeads := ChainHeads{
Unsafe: 1,
CrossUnsafe: 2,
LocalSafe: 3,
CrossSafe: 4,
LocalFinalized: 5,
CrossFinalized: 6,
}
orig, err := NewHeadTracker(path)
require.NoError(t, err)
boom := errors.New("boom")
err = orig.Apply(OperationFn(func(heads *Heads) error {
heads.Put(chainA, chainAHeads)
return boom
}))
require.ErrorIs(t, err, boom)
require.Equal(t, ChainHeads{}, orig.Current().Get(chainA))
// Should be able to load from disk too
loaded, err := NewHeadTracker(path)
require.NoError(t, err)
require.EqualValues(t, loaded.Current(), orig.Current())
}
func TestHeads_NoChangesMadeIfWriteFails(t *testing.T) {
dir := t.TempDir()
path := filepath.Join(dir, "invalid/heads.json")
chainA := types.ChainIDFromUInt64(3)
chainAHeads := ChainHeads{
Unsafe: 1,
CrossUnsafe: 2,
LocalSafe: 3,
CrossSafe: 4,
LocalFinalized: 5,
CrossFinalized: 6,
}
orig, err := NewHeadTracker(path)
require.NoError(t, err)
err = orig.Apply(OperationFn(func(heads *Heads) error {
heads.Put(chainA, chainAHeads)
return nil
}))
require.ErrorIs(t, err, os.ErrNotExist)
require.Equal(t, ChainHeads{}, orig.Current().Get(chainA))
}
package heads
import (
"encoding/json"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
"github.com/ethereum/go-ethereum/common/hexutil"
)
// ChainHeads provides the serialization format for the current chain heads.
// The values here could be block numbers or just the index of entries in the log db.
// If they're log db entries, we can't detect if things changed because of a reorg though (if the logdb write succeeded and head update failed).
// So we probably need to store actual block IDs here... but then we don't have the block hash for every block in the log db.
// Only jumping the head forward on checkpoint blocks doesn't work though...
type ChainHeads struct {
Unsafe entrydb.EntryIdx `json:"localUnsafe"`
CrossUnsafe entrydb.EntryIdx `json:"crossUnsafe"`
LocalSafe entrydb.EntryIdx `json:"localSafe"`
CrossSafe entrydb.EntryIdx `json:"crossSafe"`
LocalFinalized entrydb.EntryIdx `json:"localFinalized"`
CrossFinalized entrydb.EntryIdx `json:"crossFinalized"`
}
type Heads struct {
Chains map[types.ChainID]ChainHeads
}
func NewHeads() *Heads {
return &Heads{Chains: make(map[types.ChainID]ChainHeads)}
}
func (h *Heads) Get(id types.ChainID) ChainHeads {
chain, ok := h.Chains[id]
if !ok {
return ChainHeads{}
}
return chain
}
func (h *Heads) Put(id types.ChainID, head ChainHeads) {
h.Chains[id] = head
}
func (h *Heads) Copy() *Heads {
c := &Heads{Chains: make(map[types.ChainID]ChainHeads)}
for id, heads := range h.Chains {
c.Chains[id] = heads
}
return c
}
func (h Heads) MarshalJSON() ([]byte, error) {
data := make(map[hexutil.U256]ChainHeads)
for id, heads := range h.Chains {
data[hexutil.U256(id)] = heads
}
return json.Marshal(data)
}
func (h *Heads) UnmarshalJSON(data []byte) error {
hexData := make(map[hexutil.U256]ChainHeads)
if err := json.Unmarshal(data, &hexData); err != nil {
return err
}
h.Chains = make(map[types.ChainID]ChainHeads)
for id, heads := range hexData {
h.Put(types.ChainID(id), heads)
}
return nil
}
type Operation interface {
Apply(head *Heads) error
}
type OperationFn func(heads *Heads) error
func (f OperationFn) Apply(heads *Heads) error {
return f(heads)
}
package heads
import (
"encoding/json"
"fmt"
"testing"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
"github.com/stretchr/testify/require"
)
func TestHeads(t *testing.T) {
t.Run("RoundTripViaJson", func(t *testing.T) {
heads := NewHeads()
heads.Put(types.ChainIDFromUInt64(3), ChainHeads{
Unsafe: 10,
CrossUnsafe: 9,
LocalSafe: 8,
CrossSafe: 7,
LocalFinalized: 6,
CrossFinalized: 5,
})
heads.Put(types.ChainIDFromUInt64(9), ChainHeads{
Unsafe: 90,
CrossUnsafe: 80,
LocalSafe: 70,
CrossSafe: 60,
LocalFinalized: 50,
CrossFinalized: 40,
})
heads.Put(types.ChainIDFromUInt64(4892497242424), ChainHeads{
Unsafe: 1000,
CrossUnsafe: 900,
LocalSafe: 800,
CrossSafe: 700,
LocalFinalized: 600,
CrossFinalized: 400,
})
j, err := json.Marshal(heads)
require.NoError(t, err)
fmt.Println(string(j))
var result Heads
err = json.Unmarshal(j, &result)
require.NoError(t, err)
require.Equal(t, heads.Chains, result.Chains)
})
t.Run("Copy", func(t *testing.T) {
chainA := types.ChainIDFromUInt64(3)
chainB := types.ChainIDFromUInt64(4)
chainAOrigHeads := ChainHeads{
Unsafe: 1,
}
chainAModifiedHeads1 := ChainHeads{
Unsafe: 2,
}
chainAModifiedHeads2 := ChainHeads{
Unsafe: 4,
}
chainBModifiedHeads := ChainHeads{
Unsafe: 2,
}
heads := NewHeads()
heads.Put(chainA, chainAOrigHeads)
otherHeads := heads.Copy()
otherHeads.Put(chainA, chainAModifiedHeads1)
otherHeads.Put(chainB, chainBModifiedHeads)
require.Equal(t, heads.Get(chainA), chainAOrigHeads)
require.Equal(t, heads.Get(chainB), ChainHeads{})
heads.Put(chainA, chainAModifiedHeads2)
require.Equal(t, heads.Get(chainA), chainAModifiedHeads2)
require.Equal(t, otherHeads.Get(chainA), chainAModifiedHeads1)
require.Equal(t, otherHeads.Get(chainB), chainBModifiedHeads)
})
}
package backend
package db
import (
"errors"
......@@ -10,7 +10,7 @@ import (
// Resume prepares the given LogStore to resume recording events.
// It returns the block number of the last block that is guaranteed to have been fully recorded to the database
// and rewinds the database to ensure it can resume recording from the first log of the next block.
func Resume(logDB LogStore) (uint64, error) {
func Resume(logDB LogStorage) error {
// Get the last checkpoint that was written then Rewind the db
// to the block prior to that block and start from there.
// Guarantees we will always roll back at least one block
......@@ -18,17 +18,17 @@ func Resume(logDB LogStore) (uint64, error) {
checkPointBlock, _, err := logDB.ClosestBlockInfo(math.MaxUint64)
if errors.Is(err, io.EOF) {
// No blocks recorded in the database, start from genesis
return 0, nil
return nil
} else if err != nil {
return 0, fmt.Errorf("failed to get block from checkpoint: %w", err)
return fmt.Errorf("failed to get block from checkpoint: %w", err)
}
if checkPointBlock == 0 {
return 0, nil
return nil
}
block := checkPointBlock - 1
err = logDB.Rewind(block)
if err != nil {
return 0, fmt.Errorf("failed to 'Rewind' the database: %w", err)
return fmt.Errorf("failed to rewind the database: %w", err)
}
return block, nil
return nil
}
package backend
package db
import (
"fmt"
"io"
"testing"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/types"
"github.com/stretchr/testify/require"
)
......@@ -13,34 +14,29 @@ func TestRecover(t *testing.T) {
tests := []struct {
name string
stubDB *stubLogStore
expectedBlockNum uint64
expectRewoundTo uint64
}{
{
name: "emptydb",
stubDB: &stubLogStore{closestBlockErr: fmt.Errorf("no entries: %w", io.EOF)},
expectedBlockNum: 0,
expectRewoundTo: 0,
},
{
name: "genesis",
stubDB: &stubLogStore{},
expectedBlockNum: 0,
expectRewoundTo: 0,
},
{
name: "with_blocks",
stubDB: &stubLogStore{closestBlockNumber: 15},
expectedBlockNum: 14,
expectRewoundTo: 14,
},
}
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
block, err := Resume(test.stubDB)
err := Resume(test.stubDB)
require.NoError(t, err)
require.Equal(t, test.expectedBlockNum, block)
require.Equal(t, test.expectRewoundTo, test.stubDB.rewoundTo)
})
}
......@@ -52,10 +48,6 @@ type stubLogStore struct {
rewoundTo uint64
}
func (s *stubLogStore) Close() error {
return nil
}
func (s *stubLogStore) ClosestBlockInfo(blockNum uint64) (uint64, types.TruncatedHash, error) {
if s.closestBlockErr != nil {
return 0, types.TruncatedHash{}, s.closestBlockErr
......@@ -67,3 +59,15 @@ func (s *stubLogStore) Rewind(headBlockNum uint64) error {
s.rewoundTo = headBlockNum
return nil
}
func (s *stubLogStore) AddLog(logHash types.TruncatedHash, block eth.BlockID, timestamp uint64, logIdx uint32, execMsg *types.ExecutingMessage) error {
panic("not supported")
}
func (s *stubLogStore) LatestBlockNum() uint64 {
panic("not supported")
}
func (s *stubLogStore) Close() error {
return nil
}
package logs
import (
"errors"
"fmt"
"io"
"math"
"sync"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/types"
"github.com/ethereum/go-ethereum/log"
)
const (
searchCheckpointFrequency = 256
eventFlagIncrementLogIdx = byte(1)
eventFlagHasExecutingMessage = byte(1) << 1
)
const (
typeSearchCheckpoint byte = iota
typeCanonicalHash
typeInitiatingEvent
typeExecutingLink
typeExecutingCheck
)
var (
ErrLogOutOfOrder = errors.New("log out of order")
ErrDataCorruption = errors.New("data corruption")
ErrNotFound = errors.New("not found")
)
type Metrics interface {
RecordDBEntryCount(count int64)
RecordDBSearchEntriesRead(count int64)
}
type logContext struct {
blockNum uint64
logIdx uint32
}
type EntryStore interface {
Size() int64
LastEntryIdx() entrydb.EntryIdx
Read(idx entrydb.EntryIdx) (entrydb.Entry, error)
Append(entries ...entrydb.Entry) error
Truncate(idx entrydb.EntryIdx) error
Close() error
}
// DB implements an append only database for log data and cross-chain dependencies.
//
// To keep the append-only format, reduce data size, and support reorg detection and registering of executing-messages:
//
// Use a fixed 24 bytes per entry.
//
// Data is an append-only log, that can be binary searched for any necessary event data.
//
// Rules:
// if entry_index % 256 == 0: must be type 0. For easy binary search.
// type 1 always adjacent to type 0
// type 2 "diff" values are offsets from type 0 values (always within 256 entries range)
// type 3 always after type 2
// type 4 always after type 3
//
// Types (<type> = 1 byte):
// type 0: "search checkpoint" <type><uint64 block number: 8 bytes><uint32 event index offset: 4 bytes><uint64 timestamp: 8 bytes> = 20 bytes
// type 1: "canonical hash" <type><parent blockhash truncated: 20 bytes> = 21 bytes
// type 2: "initiating event" <type><blocknum diff: 1 byte><event flags: 1 byte><event-hash: 20 bytes> = 23 bytes
// type 3: "executing link" <type><chain: 4 bytes><blocknum: 8 bytes><event index: 3 bytes><uint64 timestamp: 8 bytes> = 24 bytes
// type 4: "executing check" <type><event-hash: 20 bytes> = 21 bytes
// other types: future compat. E.g. for linking to L1, registering block-headers as a kind of initiating-event, tracking safe-head progression, etc.
//
// Right-pad each entry that is not 24 bytes.
//
// event-flags: each bit represents a boolean value, currently only two are defined
// * event-flags & 0x01 - true if the log index should increment. Should only be false when the event is immediately after a search checkpoint and canonical hash
// * event-flags & 0x02 - true if the initiating event has an executing link that should follow. Allows detecting when the executing link failed to write.
// event-hash: H(origin, timestamp, payloadhash); enough to check identifier matches & payload matches.
type DB struct {
log log.Logger
m Metrics
store EntryStore
rwLock sync.RWMutex
lastEntryContext logContext
}
func NewFromFile(logger log.Logger, m Metrics, path string) (*DB, error) {
store, err := entrydb.NewEntryDB(logger, path)
if err != nil {
return nil, fmt.Errorf("failed to open DB: %w", err)
}
return NewFromEntryStore(logger, m, store)
}
func NewFromEntryStore(logger log.Logger, m Metrics, store EntryStore) (*DB, error) {
db := &DB{
log: logger,
m: m,
store: store,
}
if err := db.init(); err != nil {
return nil, fmt.Errorf("failed to init database: %w", err)
}
return db, nil
}
func (db *DB) lastEntryIdx() entrydb.EntryIdx {
return db.store.LastEntryIdx()
}
func (db *DB) init() error {
defer db.updateEntryCountMetric() // Always update the entry count metric after init completes
if err := db.trimInvalidTrailingEntries(); err != nil {
return fmt.Errorf("failed to trim invalid trailing entries: %w", err)
}
if db.lastEntryIdx() < 0 {
// Database is empty so no context to load
return nil
}
lastCheckpoint := (db.lastEntryIdx() / searchCheckpointFrequency) * searchCheckpointFrequency
i, err := db.newIterator(lastCheckpoint)
if err != nil {
return fmt.Errorf("failed to create iterator at last search checkpoint: %w", err)
}
// Read all entries until the end of the file
for {
_, _, _, err := i.NextLog()
if errors.Is(err, io.EOF) {
break
} else if err != nil {
return fmt.Errorf("failed to init from existing entries: %w", err)
}
}
db.lastEntryContext = i.current
return nil
}
func (db *DB) trimInvalidTrailingEntries() error {
i := db.lastEntryIdx()
for ; i >= 0; i-- {
entry, err := db.store.Read(i)
if err != nil {
return fmt.Errorf("failed to read %v to check for trailing entries: %w", i, err)
}
if entry[0] == typeExecutingCheck {
// executing check is a valid final entry
break
}
if entry[0] == typeInitiatingEvent {
evt, err := newInitiatingEventFromEntry(entry)
if err != nil {
// Entry is invalid, keep walking backwards
continue
}
if !evt.hasExecMsg {
// init event with no exec msg is a valid final entry
break
}
}
}
if i < db.lastEntryIdx() {
db.log.Warn("Truncating unexpected trailing entries", "prev", db.lastEntryIdx(), "new", i)
return db.store.Truncate(i)
}
return nil
}
func (db *DB) updateEntryCountMetric() {
db.m.RecordDBEntryCount(db.store.Size())
}
func (db *DB) LatestBlockNum() uint64 {
return db.lastEntryContext.blockNum
}
// ClosestBlockInfo returns the block number and hash of the highest recorded block at or before blockNum.
// Since block data is only recorded in search checkpoints, this may return an earlier block even if log data is
// recorded for the requested block.
func (db *DB) ClosestBlockInfo(blockNum uint64) (uint64, types.TruncatedHash, error) {
db.rwLock.RLock()
defer db.rwLock.RUnlock()
checkpointIdx, err := db.searchCheckpoint(blockNum, math.MaxUint32)
if err != nil {
return 0, types.TruncatedHash{}, fmt.Errorf("no checkpoint at or before block %v found: %w", blockNum, err)
}
checkpoint, err := db.readSearchCheckpoint(checkpointIdx)
if err != nil {
return 0, types.TruncatedHash{}, fmt.Errorf("failed to reach checkpoint: %w", err)
}
entry, err := db.readCanonicalHash(checkpointIdx + 1)
if err != nil {
return 0, types.TruncatedHash{}, fmt.Errorf("failed to read canonical hash: %w", err)
}
return checkpoint.blockNum, entry.hash, nil
}
// Contains return true iff the specified logHash is recorded in the specified blockNum and logIdx.
// logIdx is the index of the log in the array of all logs the block.
// This can be used to check the validity of cross-chain interop events.
func (db *DB) Contains(blockNum uint64, logIdx uint32, logHash types.TruncatedHash) (bool, error) {
db.rwLock.RLock()
defer db.rwLock.RUnlock()
db.log.Trace("Checking for log", "blockNum", blockNum, "logIdx", logIdx, "hash", logHash)
evtHash, _, err := db.findLogInfo(blockNum, logIdx)
if errors.Is(err, ErrNotFound) {
// Did not find a log at blockNum and logIdx
return false, nil
} else if err != nil {
return false, err
}
db.log.Trace("Found initiatingEvent", "blockNum", blockNum, "logIdx", logIdx, "hash", evtHash)
// Found the requested block and log index, check if the hash matches
return evtHash == logHash, nil
}
// Executes checks if the log identified by the specific block number and log index, has an ExecutingMessage associated
// with it that needs to be checked as part of interop validation.
// logIdx is the index of the log in the array of all logs the block.
// Returns the ExecutingMessage if it exists, or ExecutingMessage{} if the log is found but has no ExecutingMessage.
// Returns ErrNotFound if the specified log does not exist in the database.
func (db *DB) Executes(blockNum uint64, logIdx uint32) (types.ExecutingMessage, error) {
db.rwLock.RLock()
defer db.rwLock.RUnlock()
_, iter, err := db.findLogInfo(blockNum, logIdx)
if err != nil {
return types.ExecutingMessage{}, err
}
execMsg, err := iter.ExecMessage()
if err != nil {
return types.ExecutingMessage{}, fmt.Errorf("failed to read executing message: %w", err)
}
return execMsg, nil
}
func (db *DB) findLogInfo(blockNum uint64, logIdx uint32) (types.TruncatedHash, *iterator, error) {
entryIdx, err := db.searchCheckpoint(blockNum, logIdx)
if errors.Is(err, io.EOF) {
// Did not find a checkpoint to start reading from so the log cannot be present.
return types.TruncatedHash{}, nil, ErrNotFound
} else if err != nil {
return types.TruncatedHash{}, nil, err
}
i, err := db.newIterator(entryIdx)
if err != nil {
return types.TruncatedHash{}, nil, fmt.Errorf("failed to create iterator: %w", err)
}
db.log.Trace("Starting search", "entry", entryIdx, "blockNum", i.current.blockNum, "logIdx", i.current.logIdx)
defer func() {
db.m.RecordDBSearchEntriesRead(i.entriesRead)
}()
for {
evtBlockNum, evtLogIdx, evtHash, err := i.NextLog()
if errors.Is(err, io.EOF) {
// Reached end of log without finding the event
return types.TruncatedHash{}, nil, ErrNotFound
} else if err != nil {
return types.TruncatedHash{}, nil, fmt.Errorf("failed to read next log: %w", err)
}
if evtBlockNum == blockNum && evtLogIdx == logIdx {
db.log.Trace("Found initiatingEvent", "blockNum", evtBlockNum, "logIdx", evtLogIdx, "hash", evtHash)
return evtHash, i, nil
}
if evtBlockNum > blockNum || (evtBlockNum == blockNum && evtLogIdx > logIdx) {
// Progressed past the requested log without finding it.
return types.TruncatedHash{}, nil, ErrNotFound
}
}
}
func (db *DB) newIterator(startCheckpointEntry entrydb.EntryIdx) (*iterator, error) {
checkpoint, err := db.readSearchCheckpoint(startCheckpointEntry)
if err != nil {
return nil, fmt.Errorf("failed to read search checkpoint entry %v: %w", startCheckpointEntry, err)
}
startIdx := startCheckpointEntry + 2
firstEntry, err := db.store.Read(startIdx)
if errors.Is(err, io.EOF) {
// There should always be an entry after a checkpoint and canonical hash so an EOF here is data corruption
return nil, fmt.Errorf("%w: no entry after checkpoint and canonical hash at %v", ErrDataCorruption, startCheckpointEntry)
} else if err != nil {
return nil, fmt.Errorf("failed to read first entry to iterate %v: %w", startCheckpointEntry+2, err)
}
startLogCtx := logContext{
blockNum: checkpoint.blockNum,
logIdx: checkpoint.logIdx,
}
// Handle starting from a checkpoint after initiating-event but before its executing-link or executing-check
if firstEntry[0] == typeExecutingLink || firstEntry[0] == typeExecutingCheck {
if firstEntry[0] == typeExecutingLink {
// The start checkpoint was between the initiating event and the executing link
// Step back to read the initiating event. The checkpoint block data will be for the initiating event
startIdx = startCheckpointEntry - 1
} else {
// The start checkpoint was between the executing link and the executing check
// Step back to read the initiating event. The checkpoint block data will be for the initiating event
startIdx = startCheckpointEntry - 2
}
initEntry, err := db.store.Read(startIdx)
if err != nil {
return nil, fmt.Errorf("failed to read prior initiating event: %w", err)
}
initEvt, err := newInitiatingEventFromEntry(initEntry)
if err != nil {
return nil, fmt.Errorf("invalid initiating event at idx %v: %w", startIdx, err)
}
startLogCtx = initEvt.preContext(startLogCtx)
}
i := &iterator{
db: db,
// +2 to skip the initial search checkpoint and the canonical hash event after it
nextEntryIdx: startIdx,
current: startLogCtx,
}
return i, nil
}
// searchCheckpoint performs a binary search of the searchCheckpoint entries to find the closest one at or before
// the requested log.
// Returns the index of the searchCheckpoint to begin reading from or an error
func (db *DB) searchCheckpoint(blockNum uint64, logIdx uint32) (entrydb.EntryIdx, error) {
n := (db.lastEntryIdx() / searchCheckpointFrequency) + 1
// Define x[-1] < target and x[n] >= target.
// Invariant: x[i-1] < target, x[j] >= target.
i, j := entrydb.EntryIdx(0), n
for i < j {
h := entrydb.EntryIdx(uint64(i+j) >> 1) // avoid overflow when computing h
checkpoint, err := db.readSearchCheckpoint(h * searchCheckpointFrequency)
if err != nil {
return 0, fmt.Errorf("failed to read entry %v: %w", h, err)
}
// i ≤ h < j
if checkpoint.blockNum < blockNum || (checkpoint.blockNum == blockNum && checkpoint.logIdx < logIdx) {
i = h + 1 // preserves x[i-1] < target
} else {
j = h // preserves x[j] >= target
}
}
if i < n {
checkpoint, err := db.readSearchCheckpoint(i * searchCheckpointFrequency)
if err != nil {
return 0, fmt.Errorf("failed to read entry %v: %w", i, err)
}
if checkpoint.blockNum == blockNum && checkpoint.logIdx == logIdx {
// Found entry at requested block number and log index
return i * searchCheckpointFrequency, nil
}
}
if i == 0 {
// There are no checkpoints before the requested blocks
return 0, io.EOF
}
// Not found, need to start reading from the entry prior
return (i - 1) * searchCheckpointFrequency, nil
}
func (db *DB) AddLog(logHash types.TruncatedHash, block eth.BlockID, timestamp uint64, logIdx uint32, execMsg *types.ExecutingMessage) error {
db.rwLock.Lock()
defer db.rwLock.Unlock()
postState := logContext{
blockNum: block.Number,
logIdx: logIdx,
}
if block.Number == 0 {
return fmt.Errorf("%w: should not have logs in block 0", ErrLogOutOfOrder)
}
if db.lastEntryContext.blockNum > block.Number {
return fmt.Errorf("%w: adding block %v, head block: %v", ErrLogOutOfOrder, block.Number, db.lastEntryContext.blockNum)
}
if db.lastEntryContext.blockNum == block.Number && db.lastEntryContext.logIdx+1 != logIdx {
return fmt.Errorf("%w: adding log %v in block %v, but currently at log %v", ErrLogOutOfOrder, logIdx, block.Number, db.lastEntryContext.logIdx)
}
if db.lastEntryContext.blockNum < block.Number && logIdx != 0 {
return fmt.Errorf("%w: adding log %v as first log in block %v", ErrLogOutOfOrder, logIdx, block.Number)
}
var entriesToAdd []entrydb.Entry
newContext := db.lastEntryContext
lastEntryIdx := db.lastEntryIdx()
addEntry := func(entry entrydb.Entry) {
entriesToAdd = append(entriesToAdd, entry)
lastEntryIdx++
}
maybeAddCheckpoint := func() {
if (lastEntryIdx+1)%searchCheckpointFrequency == 0 {
addEntry(newSearchCheckpoint(block.Number, logIdx, timestamp).encode())
addEntry(newCanonicalHash(types.TruncateHash(block.Hash)).encode())
newContext = postState
}
}
maybeAddCheckpoint()
evt, err := newInitiatingEvent(newContext, postState.blockNum, postState.logIdx, logHash, execMsg != nil)
if err != nil {
return fmt.Errorf("failed to create initiating event: %w", err)
}
addEntry(evt.encode())
if execMsg != nil {
maybeAddCheckpoint()
link, err := newExecutingLink(*execMsg)
if err != nil {
return fmt.Errorf("failed to create executing link: %w", err)
}
addEntry(link.encode())
maybeAddCheckpoint()
addEntry(newExecutingCheck(execMsg.Hash).encode())
}
if err := db.store.Append(entriesToAdd...); err != nil {
return fmt.Errorf("failed to append entries: %w", err)
}
db.lastEntryContext = postState
db.updateEntryCountMetric()
return nil
}
// Rewind the database to remove any blocks after headBlockNum
// The block at headBlockNum itself is not removed.
func (db *DB) Rewind(headBlockNum uint64) error {
db.rwLock.Lock()
defer db.rwLock.Unlock()
if headBlockNum >= db.lastEntryContext.blockNum {
// Nothing to do
return nil
}
// Find the last checkpoint before the block to remove
idx, err := db.searchCheckpoint(headBlockNum+1, 0)
if errors.Is(err, io.EOF) {
// Requested a block prior to the first checkpoint
// Delete everything without scanning forward
idx = -1
} else if err != nil {
return fmt.Errorf("failed to find checkpoint prior to block %v: %w", headBlockNum, err)
} else {
// Scan forward from the checkpoint to find the first entry about a block after headBlockNum
i, err := db.newIterator(idx)
if err != nil {
return fmt.Errorf("failed to create iterator when searching for rewind point: %w", err)
}
// If we don't find any useful logs after the checkpoint, we should delete the checkpoint itself
// So move our delete marker back to include it as a starting point
idx--
for {
blockNum, _, _, err := i.NextLog()
if errors.Is(err, io.EOF) {
// Reached end of file, we need to keep everything
return nil
} else if err != nil {
return fmt.Errorf("failed to find rewind point: %w", err)
}
if blockNum > headBlockNum {
// Found the first entry we don't need, so stop searching and delete everything after idx
break
}
// Otherwise we need all of the entries the iterator just read
idx = i.nextEntryIdx - 1
}
}
// Truncate to contain idx+1 entries, since indices are 0 based, this deletes everything after idx
if err := db.store.Truncate(idx); err != nil {
return fmt.Errorf("failed to truncate to block %v: %w", headBlockNum, err)
}
// Use db.init() to find the log context for the new latest log entry
if err := db.init(); err != nil {
return fmt.Errorf("failed to find new last entry context: %w", err)
}
return nil
}
func (db *DB) readSearchCheckpoint(entryIdx entrydb.EntryIdx) (searchCheckpoint, error) {
data, err := db.store.Read(entryIdx)
if err != nil {
return searchCheckpoint{}, fmt.Errorf("failed to read entry %v: %w", entryIdx, err)
}
return newSearchCheckpointFromEntry(data)
}
func (db *DB) readCanonicalHash(entryIdx entrydb.EntryIdx) (canonicalHash, error) {
data, err := db.store.Read(entryIdx)
if err != nil {
return canonicalHash{}, fmt.Errorf("failed to read entry %v: %w", entryIdx, err)
}
return newCanonicalHashFromEntry(data)
}
func (db *DB) Close() error {
return db.store.Close()
}
package logs
import (
"bytes"
"fmt"
"io"
"io/fs"
"os"
"path/filepath"
"testing"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/testlog"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/types"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require"
)
func createTruncatedHash(i int) types.TruncatedHash {
return types.TruncateHash(createHash(i))
}
func createHash(i int) common.Hash {
data := bytes.Repeat([]byte{byte(i)}, common.HashLength)
return common.BytesToHash(data)
}
func TestErrorOpeningDatabase(t *testing.T) {
dir := t.TempDir()
_, err := NewFromFile(testlog.Logger(t, log.LvlInfo), &stubMetrics{}, filepath.Join(dir, "missing-dir", "file.db"))
require.ErrorIs(t, err, os.ErrNotExist)
}
func runDBTest(t *testing.T, setup func(t *testing.T, db *DB, m *stubMetrics), assert func(t *testing.T, db *DB, m *stubMetrics)) {
createDb := func(t *testing.T, dir string) (*DB, *stubMetrics, string) {
logger := testlog.Logger(t, log.LvlInfo)
path := filepath.Join(dir, "test.db")
m := &stubMetrics{}
db, err := NewFromFile(logger, m, path)
require.NoError(t, err, "Failed to create database")
t.Cleanup(func() {
err := db.Close()
if err != nil {
require.ErrorIs(t, err, fs.ErrClosed)
}
})
return db, m, path
}
t.Run("New", func(t *testing.T) {
db, m, _ := createDb(t, t.TempDir())
setup(t, db, m)
assert(t, db, m)
})
t.Run("Existing", func(t *testing.T) {
dir := t.TempDir()
db, m, path := createDb(t, dir)
setup(t, db, m)
// Close and recreate the database
require.NoError(t, db.Close())
checkDBInvariants(t, path, m)
db2, m, path := createDb(t, dir)
assert(t, db2, m)
checkDBInvariants(t, path, m)
})
}
func TestEmptyDbDoesNotFindEntry(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {},
func(t *testing.T, db *DB, m *stubMetrics) {
requireNotContains(t, db, 0, 0, createHash(1))
requireNotContains(t, db, 0, 0, common.Hash{})
})
}
func TestAddLog(t *testing.T) {
t.Run("BlockZero", func(t *testing.T) {
// There are no logs in the genesis block so recording an entry for block 0 should be rejected.
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {},
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 0}, 5000, 0, nil)
require.ErrorIs(t, err, ErrLogOutOfOrder)
})
})
t.Run("FirstEntry", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 0, nil)
require.NoError(t, err)
},
func(t *testing.T, db *DB, m *stubMetrics) {
requireContains(t, db, 15, 0, createHash(1))
})
})
t.Run("MultipleEntriesFromSameBlock", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 0, nil)
require.NoError(t, err)
err = db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 1, nil)
require.NoError(t, err)
err = db.AddLog(createTruncatedHash(3), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 2, nil)
require.NoError(t, err)
},
func(t *testing.T, db *DB, m *stubMetrics) {
require.EqualValues(t, 5, m.entryCount, "should not output new searchCheckpoint for every log")
requireContains(t, db, 15, 0, createHash(1))
requireContains(t, db, 15, 1, createHash(2))
requireContains(t, db, 15, 2, createHash(3))
})
})
t.Run("MultipleEntriesFromMultipleBlocks", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 0, nil)
require.NoError(t, err)
err = db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 1, nil)
require.NoError(t, err)
err = db.AddLog(createTruncatedHash(3), eth.BlockID{Hash: createHash(16), Number: 16}, 5002, 0, nil)
require.NoError(t, err)
err = db.AddLog(createTruncatedHash(4), eth.BlockID{Hash: createHash(16), Number: 16}, 5002, 1, nil)
require.NoError(t, err)
},
func(t *testing.T, db *DB, m *stubMetrics) {
require.EqualValues(t, 6, m.entryCount, "should not output new searchCheckpoint for every block")
requireContains(t, db, 15, 0, createHash(1))
requireContains(t, db, 15, 1, createHash(2))
requireContains(t, db, 16, 0, createHash(3))
requireContains(t, db, 16, 1, createHash(4))
})
})
t.Run("ErrorWhenBeforeCurrentBlock", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 0, nil)
require.NoError(t, err)
},
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(14), Number: 14}, 4998, 0, nil)
require.ErrorIs(t, err, ErrLogOutOfOrder)
})
})
t.Run("ErrorWhenBeforeCurrentBlockButAfterLastCheckpoint", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(13), Number: 13}, 5000, 0, nil)
require.NoError(t, err)
err = db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 0, nil)
require.NoError(t, err)
},
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(14), Number: 14}, 4998, 0, nil)
require.ErrorIs(t, err, ErrLogOutOfOrder)
})
})
t.Run("ErrorWhenBeforeCurrentLogEvent", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 1, nil))
},
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(14), Number: 15}, 4998, 0, nil)
require.ErrorIs(t, err, ErrLogOutOfOrder)
})
})
t.Run("ErrorWhenBeforeCurrentLogEventButAfterLastCheckpoint", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 0, nil)
require.NoError(t, err)
err = db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 1, nil)
require.NoError(t, err)
err = db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 2, nil)
require.NoError(t, err)
},
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(14), Number: 15}, 4998, 1, nil)
require.ErrorIs(t, err, ErrLogOutOfOrder)
})
})
t.Run("ErrorWhenAtCurrentLogEvent", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 1, nil))
},
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 4998, 1, nil)
require.ErrorIs(t, err, ErrLogOutOfOrder)
})
})
t.Run("ErrorWhenAtCurrentLogEventButAfterLastCheckpoint", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 1, nil))
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 2, nil))
},
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(14), Number: 15}, 4998, 2, nil)
require.ErrorIs(t, err, ErrLogOutOfOrder)
})
})
t.Run("ErrorWhenSkippingLogEvent", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 0, nil)
require.NoError(t, err)
},
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 4998, 2, nil)
require.ErrorIs(t, err, ErrLogOutOfOrder)
})
})
t.Run("ErrorWhenFirstLogIsNotLogIdxZero", func(t *testing.T) {
runDBTest(t, func(t *testing.T, db *DB, m *stubMetrics) {},
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 4998, 5, nil)
require.ErrorIs(t, err, ErrLogOutOfOrder)
})
})
t.Run("ErrorWhenFirstLogOfNewBlockIsNotLogIdxZero", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(14), Number: 14}, 4996, 0, nil))
},
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 4998, 1, nil)
require.ErrorIs(t, err, ErrLogOutOfOrder)
})
})
t.Run("MultipleSearchCheckpoints", func(t *testing.T) {
block1 := eth.BlockID{Hash: createHash(11), Number: 11}
block2 := eth.BlockID{Hash: createHash(12), Number: 12}
block3 := eth.BlockID{Hash: createHash(15), Number: 15}
block4 := eth.BlockID{Hash: createHash(16), Number: 16}
// First checkpoint is at entry idx 0
// Block 1 logs don't reach the second checkpoint
block1LogCount := searchCheckpointFrequency - 10
// Block 2 logs extend to just after the third checkpoint
block2LogCount := searchCheckpointFrequency + 20
// Block 3 logs extend to immediately before the fourth checkpoint
block3LogCount := searchCheckpointFrequency - 16
block4LogCount := 2
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
for i := 0; i < block1LogCount; i++ {
err := db.AddLog(createTruncatedHash(i), block1, 3000, uint32(i), nil)
require.NoErrorf(t, err, "failed to add log %v of block 1", i)
}
for i := 0; i < block2LogCount; i++ {
err := db.AddLog(createTruncatedHash(i), block2, 3002, uint32(i), nil)
require.NoErrorf(t, err, "failed to add log %v of block 2", i)
}
for i := 0; i < block3LogCount; i++ {
err := db.AddLog(createTruncatedHash(i), block3, 3004, uint32(i), nil)
require.NoErrorf(t, err, "failed to add log %v of block 3", i)
}
// Verify that we're right before the fourth checkpoint will be written.
// entryCount is the number of entries, so given 0 based indexing is the index of the next entry
// the first checkpoint is at entry 0, the second at entry searchCheckpointFrequency etc
// so the fourth is at entry 3*searchCheckpointFrequency
require.EqualValues(t, 3*searchCheckpointFrequency, m.entryCount)
for i := 0; i < block4LogCount; i++ {
err := db.AddLog(createTruncatedHash(i), block4, 3006, uint32(i), nil)
require.NoErrorf(t, err, "failed to add log %v of block 4", i)
}
},
func(t *testing.T, db *DB, m *stubMetrics) {
// Check that we wrote additional search checkpoints
expectedCheckpointCount := 4
expectedEntryCount := block1LogCount + block2LogCount + block3LogCount + block4LogCount + (2 * expectedCheckpointCount)
require.EqualValues(t, expectedEntryCount, m.entryCount)
// Check we can find all the logs.
for i := 0; i < block1LogCount; i++ {
requireContains(t, db, block1.Number, uint32(i), createHash(i))
}
// Block 2 logs extend to just after the third checkpoint
for i := 0; i < block2LogCount; i++ {
requireContains(t, db, block2.Number, uint32(i), createHash(i))
}
// Block 3 logs extend to immediately before the fourth checkpoint
for i := 0; i < block3LogCount; i++ {
requireContains(t, db, block3.Number, uint32(i), createHash(i))
}
// Block 4 logs start immediately after the fourth checkpoint
for i := 0; i < block4LogCount; i++ {
requireContains(t, db, block4.Number, uint32(i), createHash(i))
}
})
})
}
func TestAddDependentLog(t *testing.T) {
execMsg := types.ExecutingMessage{
Chain: 3,
BlockNum: 42894,
LogIdx: 42,
Timestamp: 8742482,
Hash: types.TruncateHash(createHash(8844)),
}
t.Run("FirstEntry", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 0, &execMsg)
require.NoError(t, err)
},
func(t *testing.T, db *DB, m *stubMetrics) {
requireContains(t, db, 15, 0, createHash(1), execMsg)
})
})
t.Run("CheckpointBetweenInitEventAndExecLink", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
for i := uint32(0); m.entryCount < searchCheckpointFrequency-1; i++ {
require.NoError(t, db.AddLog(createTruncatedHash(9), eth.BlockID{Hash: createHash(9), Number: 1}, 500, i, nil))
}
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 0, &execMsg)
require.NoError(t, err)
},
func(t *testing.T, db *DB, m *stubMetrics) {
requireContains(t, db, 15, 0, createHash(1), execMsg)
})
})
t.Run("CheckpointBetweenInitEventAndExecLinkNotIncrementingBlock", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
for i := uint32(0); m.entryCount < searchCheckpointFrequency-1; i++ {
require.NoError(t, db.AddLog(createTruncatedHash(9), eth.BlockID{Hash: createHash(9), Number: 1}, 500, i, nil))
}
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 1}, 5000, 253, &execMsg)
require.NoError(t, err)
},
func(t *testing.T, db *DB, m *stubMetrics) {
requireContains(t, db, 1, 253, createHash(1), execMsg)
})
})
t.Run("CheckpointBetweenExecLinkAndExecCheck", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
for i := uint32(0); m.entryCount < searchCheckpointFrequency-2; i++ {
require.NoError(t, db.AddLog(createTruncatedHash(9), eth.BlockID{Hash: createHash(9), Number: 1}, 500, i, nil))
}
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 0, &execMsg)
require.NoError(t, err)
},
func(t *testing.T, db *DB, m *stubMetrics) {
requireContains(t, db, 15, 0, createHash(1), execMsg)
})
})
t.Run("CheckpointBetweenExecLinkAndExecCheckNotIncrementingBlock", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
for i := uint32(0); m.entryCount < searchCheckpointFrequency-2; i++ {
require.NoError(t, db.AddLog(createTruncatedHash(9), eth.BlockID{Hash: createHash(9), Number: 1}, 500, i, nil))
}
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 1}, 5000, 252, &execMsg)
require.NoError(t, err)
},
func(t *testing.T, db *DB, m *stubMetrics) {
requireContains(t, db, 1, 252, createHash(1), execMsg)
})
})
}
func TestContains(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(3), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 1, nil))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 2, nil))
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(52), Number: 52}, 500, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(3), eth.BlockID{Hash: createHash(52), Number: 52}, 500, 1, nil))
},
func(t *testing.T, db *DB, m *stubMetrics) {
// Should find added logs
requireContains(t, db, 50, 0, createHash(1))
requireContains(t, db, 50, 1, createHash(3))
requireContains(t, db, 50, 2, createHash(2))
requireContains(t, db, 52, 0, createHash(1))
requireContains(t, db, 52, 1, createHash(3))
// Should not find log when block number too low
requireNotContains(t, db, 49, 0, createHash(1))
// Should not find log when block number too high
requireNotContains(t, db, 51, 0, createHash(1))
// Should not find log when requested log after end of database
requireNotContains(t, db, 52, 2, createHash(3))
requireNotContains(t, db, 53, 0, createHash(3))
// Should not find log when log index too high
requireNotContains(t, db, 50, 3, createHash(2))
// Should not find log when hash doesn't match log at block number and index
requireWrongHash(t, db, 50, 0, createHash(5), types.ExecutingMessage{})
})
}
func TestExecutes(t *testing.T) {
execMsg1 := types.ExecutingMessage{
Chain: 33,
BlockNum: 22,
LogIdx: 99,
Timestamp: 948294,
Hash: createTruncatedHash(332299),
}
execMsg2 := types.ExecutingMessage{
Chain: 44,
BlockNum: 55,
LogIdx: 66,
Timestamp: 77777,
Hash: createTruncatedHash(445566),
}
execMsg3 := types.ExecutingMessage{
Chain: 77,
BlockNum: 88,
LogIdx: 89,
Timestamp: 6578567,
Hash: createTruncatedHash(778889),
}
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(3), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 1, &execMsg1))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 2, nil))
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(52), Number: 52}, 500, 0, &execMsg2))
require.NoError(t, db.AddLog(createTruncatedHash(3), eth.BlockID{Hash: createHash(52), Number: 52}, 500, 1, &execMsg3))
},
func(t *testing.T, db *DB, m *stubMetrics) {
// Should find added logs
requireExecutingMessage(t, db, 50, 0, types.ExecutingMessage{})
requireExecutingMessage(t, db, 50, 1, execMsg1)
requireExecutingMessage(t, db, 50, 2, types.ExecutingMessage{})
requireExecutingMessage(t, db, 52, 0, execMsg2)
requireExecutingMessage(t, db, 52, 1, execMsg3)
// Should not find log when block number too low
requireNotContains(t, db, 49, 0, createHash(1))
// Should not find log when block number too high
requireNotContains(t, db, 51, 0, createHash(1))
// Should not find log when requested log after end of database
requireNotContains(t, db, 52, 2, createHash(3))
requireNotContains(t, db, 53, 0, createHash(3))
// Should not find log when log index too high
requireNotContains(t, db, 50, 3, createHash(2))
})
}
func TestGetBlockInfo(t *testing.T) {
t.Run("ReturnsEOFWhenEmpty", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {},
func(t *testing.T, db *DB, m *stubMetrics) {
_, _, err := db.ClosestBlockInfo(10)
require.ErrorIs(t, err, io.EOF)
})
})
t.Run("ReturnsEOFWhenRequestedBlockBeforeFirstSearchCheckpoint", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(11), Number: 11}, 500, 0, nil)
require.NoError(t, err)
},
func(t *testing.T, db *DB, m *stubMetrics) {
_, _, err := db.ClosestBlockInfo(10)
require.ErrorIs(t, err, io.EOF)
})
})
t.Run("ReturnFirstBlockInfo", func(t *testing.T) {
block := eth.BlockID{Hash: createHash(11), Number: 11}
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), block, 500, 0, nil)
require.NoError(t, err)
},
func(t *testing.T, db *DB, m *stubMetrics) {
requireClosestBlockInfo(t, db, 11, block.Number, block.Hash)
requireClosestBlockInfo(t, db, 12, block.Number, block.Hash)
requireClosestBlockInfo(t, db, 200, block.Number, block.Hash)
})
})
t.Run("ReturnClosestCheckpointBlockInfo", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
for i := 1; i < searchCheckpointFrequency+3; i++ {
block := eth.BlockID{Hash: createHash(i), Number: uint64(i)}
err := db.AddLog(createTruncatedHash(i), block, uint64(i)*2, 0, nil)
require.NoError(t, err)
}
},
func(t *testing.T, db *DB, m *stubMetrics) {
// Expect block from the first checkpoint
requireClosestBlockInfo(t, db, 1, 1, createHash(1))
requireClosestBlockInfo(t, db, 10, 1, createHash(1))
requireClosestBlockInfo(t, db, searchCheckpointFrequency-3, 1, createHash(1))
// Expect block from the second checkpoint
// 2 entries used for initial checkpoint but we start at block 1
secondCheckpointBlockNum := searchCheckpointFrequency - 1
requireClosestBlockInfo(t, db, uint64(secondCheckpointBlockNum), uint64(secondCheckpointBlockNum), createHash(secondCheckpointBlockNum))
requireClosestBlockInfo(t, db, uint64(secondCheckpointBlockNum)+1, uint64(secondCheckpointBlockNum), createHash(secondCheckpointBlockNum))
requireClosestBlockInfo(t, db, uint64(secondCheckpointBlockNum)+2, uint64(secondCheckpointBlockNum), createHash(secondCheckpointBlockNum))
})
})
}
func requireClosestBlockInfo(t *testing.T, db *DB, searchFor uint64, expectedBlockNum uint64, expectedHash common.Hash) {
blockNum, hash, err := db.ClosestBlockInfo(searchFor)
require.NoError(t, err)
require.Equal(t, expectedBlockNum, blockNum)
require.Equal(t, types.TruncateHash(expectedHash), hash)
}
func requireContains(t *testing.T, db *DB, blockNum uint64, logIdx uint32, logHash common.Hash, execMsg ...types.ExecutingMessage) {
require.LessOrEqual(t, len(execMsg), 1, "cannot have multiple executing messages for a single log")
m, ok := db.m.(*stubMetrics)
require.True(t, ok, "Did not get the expected metrics type")
result, err := db.Contains(blockNum, logIdx, types.TruncateHash(logHash))
require.NoErrorf(t, err, "Error searching for log %v in block %v", logIdx, blockNum)
require.Truef(t, result, "Did not find log %v in block %v with hash %v", logIdx, blockNum, logHash)
require.LessOrEqual(t, m.entriesReadForSearch, int64(searchCheckpointFrequency), "Should not need to read more than between two checkpoints")
require.NotZero(t, m.entriesReadForSearch, "Must read at least some entries to find the log")
var expectedExecMsg types.ExecutingMessage
if len(execMsg) == 1 {
expectedExecMsg = execMsg[0]
}
requireExecutingMessage(t, db, blockNum, logIdx, expectedExecMsg)
}
func requireNotContains(t *testing.T, db *DB, blockNum uint64, logIdx uint32, logHash common.Hash) {
m, ok := db.m.(*stubMetrics)
require.True(t, ok, "Did not get the expected metrics type")
result, err := db.Contains(blockNum, logIdx, types.TruncateHash(logHash))
require.NoErrorf(t, err, "Error searching for log %v in block %v", logIdx, blockNum)
require.Falsef(t, result, "Found unexpected log %v in block %v with hash %v", logIdx, blockNum, logHash)
require.LessOrEqual(t, m.entriesReadForSearch, int64(searchCheckpointFrequency), "Should not need to read more than between two checkpoints")
_, err = db.Executes(blockNum, logIdx)
require.ErrorIs(t, err, ErrNotFound, "Found unexpected log when getting executing message")
require.LessOrEqual(t, m.entriesReadForSearch, int64(searchCheckpointFrequency), "Should not need to read more than between two checkpoints")
}
func requireExecutingMessage(t *testing.T, db *DB, blockNum uint64, logIdx uint32, execMsg types.ExecutingMessage) {
m, ok := db.m.(*stubMetrics)
require.True(t, ok, "Did not get the expected metrics type")
actualExecMsg, err := db.Executes(blockNum, logIdx)
require.NoError(t, err, "Error when searching for executing message")
require.Equal(t, execMsg, actualExecMsg, "Should return matching executing message")
require.LessOrEqual(t, m.entriesReadForSearch, int64(searchCheckpointFrequency), "Should not need to read more than between two checkpoints")
require.NotZero(t, m.entriesReadForSearch, "Must read at least some entries to find the log")
}
func requireWrongHash(t *testing.T, db *DB, blockNum uint64, logIdx uint32, logHash common.Hash, execMsg types.ExecutingMessage) {
m, ok := db.m.(*stubMetrics)
require.True(t, ok, "Did not get the expected metrics type")
result, err := db.Contains(blockNum, logIdx, types.TruncateHash(logHash))
require.NoErrorf(t, err, "Error searching for log %v in block %v", logIdx, blockNum)
require.Falsef(t, result, "Found unexpected log %v in block %v with hash %v", logIdx, blockNum, logHash)
_, err = db.Executes(blockNum, logIdx)
require.NoError(t, err, "Error when searching for executing message")
require.LessOrEqual(t, m.entriesReadForSearch, int64(searchCheckpointFrequency), "Should not need to read more than between two checkpoints")
}
func TestRecoverOnCreate(t *testing.T) {
createDb := func(t *testing.T, store *stubEntryStore) (*DB, *stubMetrics, error) {
logger := testlog.Logger(t, log.LvlInfo)
m := &stubMetrics{}
db, err := NewFromEntryStore(logger, m, store)
return db, m, err
}
validInitEvent, err := newInitiatingEvent(logContext{blockNum: 1, logIdx: 0}, 1, 0, createTruncatedHash(1), false)
require.NoError(t, err)
validEventSequence := []entrydb.Entry{
newSearchCheckpoint(1, 0, 100).encode(),
newCanonicalHash(createTruncatedHash(344)).encode(),
validInitEvent.encode(),
}
var emptyEventSequence []entrydb.Entry
for _, prefixEvents := range [][]entrydb.Entry{emptyEventSequence, validEventSequence} {
prefixEvents := prefixEvents
storeWithEvents := func(evts ...entrydb.Entry) *stubEntryStore {
store := &stubEntryStore{}
store.entries = append(store.entries, prefixEvents...)
store.entries = append(store.entries, evts...)
return store
}
t.Run(fmt.Sprintf("PrefixEvents-%v", len(prefixEvents)), func(t *testing.T) {
t.Run("NoTruncateWhenLastEntryIsLogWithNoExecMessage", func(t *testing.T) {
initEvent, err := newInitiatingEvent(logContext{blockNum: 3, logIdx: 0}, 3, 0, createTruncatedHash(1), false)
require.NoError(t, err)
store := storeWithEvents(
newSearchCheckpoint(3, 0, 100).encode(),
newCanonicalHash(createTruncatedHash(344)).encode(),
initEvent.encode(),
)
db, m, err := createDb(t, store)
require.NoError(t, err)
require.EqualValues(t, len(prefixEvents)+3, m.entryCount)
requireContains(t, db, 3, 0, createHash(1))
})
t.Run("NoTruncateWhenLastEntryIsExecutingCheck", func(t *testing.T) {
initEvent, err := newInitiatingEvent(logContext{blockNum: 3, logIdx: 0}, 3, 0, createTruncatedHash(1), true)
execMsg := types.ExecutingMessage{
Chain: 4,
BlockNum: 10,
LogIdx: 4,
Timestamp: 1288,
Hash: createTruncatedHash(4),
}
require.NoError(t, err)
linkEvt, err := newExecutingLink(execMsg)
require.NoError(t, err)
store := storeWithEvents(
newSearchCheckpoint(3, 0, 100).encode(),
newCanonicalHash(createTruncatedHash(344)).encode(),
initEvent.encode(),
linkEvt.encode(),
newExecutingCheck(execMsg.Hash).encode(),
)
db, m, err := createDb(t, store)
require.NoError(t, err)
require.EqualValues(t, len(prefixEvents)+5, m.entryCount)
requireContains(t, db, 3, 0, createHash(1), execMsg)
})
t.Run("TruncateWhenLastEntrySearchCheckpoint", func(t *testing.T) {
store := storeWithEvents(newSearchCheckpoint(3, 0, 100).encode())
_, m, err := createDb(t, store)
require.NoError(t, err)
require.EqualValues(t, len(prefixEvents), m.entryCount)
})
t.Run("TruncateWhenLastEntryCanonicalHash", func(t *testing.T) {
store := storeWithEvents(
newSearchCheckpoint(3, 0, 100).encode(),
newCanonicalHash(createTruncatedHash(344)).encode(),
)
_, m, err := createDb(t, store)
require.NoError(t, err)
require.EqualValues(t, len(prefixEvents), m.entryCount)
})
t.Run("TruncateWhenLastEntryInitEventWithExecMsg", func(t *testing.T) {
initEvent, err := newInitiatingEvent(logContext{blockNum: 3, logIdx: 0}, 3, 0, createTruncatedHash(1), true)
require.NoError(t, err)
store := storeWithEvents(
newSearchCheckpoint(3, 0, 100).encode(),
newCanonicalHash(createTruncatedHash(344)).encode(),
initEvent.encode(),
)
_, m, err := createDb(t, store)
require.NoError(t, err)
require.EqualValues(t, len(prefixEvents), m.entryCount)
})
t.Run("TruncateWhenLastEntryInitEventWithExecLink", func(t *testing.T) {
initEvent, err := newInitiatingEvent(logContext{blockNum: 3, logIdx: 0}, 3, 0, createTruncatedHash(1), true)
require.NoError(t, err)
execMsg := types.ExecutingMessage{
Chain: 4,
BlockNum: 10,
LogIdx: 4,
Timestamp: 1288,
Hash: createTruncatedHash(4),
}
require.NoError(t, err)
linkEvt, err := newExecutingLink(execMsg)
require.NoError(t, err)
store := storeWithEvents(
newSearchCheckpoint(3, 0, 100).encode(),
newCanonicalHash(createTruncatedHash(344)).encode(),
initEvent.encode(),
linkEvt.encode(),
)
_, m, err := createDb(t, store)
require.NoError(t, err)
require.EqualValues(t, len(prefixEvents), m.entryCount)
})
})
}
}
func TestRewind(t *testing.T) {
t.Run("WhenEmpty", func(t *testing.T) {
runDBTest(t, func(t *testing.T, db *DB, m *stubMetrics) {},
func(t *testing.T, db *DB, m *stubMetrics) {
require.NoError(t, db.Rewind(100))
require.NoError(t, db.Rewind(0))
})
})
t.Run("AfterLastBlock", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 1, nil))
require.NoError(t, db.AddLog(createTruncatedHash(3), eth.BlockID{Hash: createHash(51), Number: 51}, 502, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(4), eth.BlockID{Hash: createHash(74), Number: 74}, 700, 0, nil))
require.NoError(t, db.Rewind(75))
},
func(t *testing.T, db *DB, m *stubMetrics) {
requireContains(t, db, 50, 0, createHash(1))
requireContains(t, db, 50, 1, createHash(2))
requireContains(t, db, 51, 0, createHash(3))
requireContains(t, db, 74, 0, createHash(4))
})
})
t.Run("BeforeFirstBlock", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 1, nil))
require.NoError(t, db.Rewind(25))
},
func(t *testing.T, db *DB, m *stubMetrics) {
requireNotContains(t, db, 50, 0, createHash(1))
requireNotContains(t, db, 50, 0, createHash(1))
require.Zero(t, m.entryCount)
})
})
t.Run("AtFirstBlock", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 1, nil))
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(51), Number: 51}, 502, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(51), Number: 51}, 502, 1, nil))
require.NoError(t, db.Rewind(50))
},
func(t *testing.T, db *DB, m *stubMetrics) {
requireContains(t, db, 50, 0, createHash(1))
requireContains(t, db, 50, 1, createHash(2))
requireNotContains(t, db, 51, 0, createHash(1))
requireNotContains(t, db, 51, 1, createHash(2))
})
})
t.Run("AtSecondCheckpoint", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
for i := uint32(0); m.entryCount < searchCheckpointFrequency; i++ {
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(50), Number: 50}, 500, i, nil))
}
require.EqualValues(t, searchCheckpointFrequency, m.entryCount)
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(51), Number: 51}, 502, 0, nil))
require.EqualValues(t, searchCheckpointFrequency+3, m.entryCount, "Should have inserted new checkpoint and extra log")
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(51), Number: 51}, 502, 1, nil))
require.NoError(t, db.Rewind(50))
},
func(t *testing.T, db *DB, m *stubMetrics) {
require.EqualValues(t, searchCheckpointFrequency, m.entryCount, "Should have deleted second checkpoint")
requireContains(t, db, 50, 0, createHash(1))
requireContains(t, db, 50, 1, createHash(1))
requireNotContains(t, db, 51, 0, createHash(1))
requireNotContains(t, db, 51, 1, createHash(2))
})
})
t.Run("BetweenLogEntries", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 1, nil))
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(60), Number: 60}, 502, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(60), Number: 60}, 502, 1, nil))
require.NoError(t, db.Rewind(55))
},
func(t *testing.T, db *DB, m *stubMetrics) {
requireContains(t, db, 50, 0, createHash(1))
requireContains(t, db, 50, 1, createHash(2))
requireNotContains(t, db, 60, 0, createHash(1))
requireNotContains(t, db, 60, 1, createHash(2))
})
})
t.Run("AtExistingLogEntry", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(59), Number: 59}, 500, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(59), Number: 59}, 500, 1, nil))
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(60), Number: 60}, 502, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(60), Number: 60}, 502, 1, nil))
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(61), Number: 61}, 502, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(61), Number: 61}, 502, 1, nil))
require.NoError(t, db.Rewind(60))
},
func(t *testing.T, db *DB, m *stubMetrics) {
requireContains(t, db, 59, 0, createHash(1))
requireContains(t, db, 59, 1, createHash(2))
requireContains(t, db, 60, 0, createHash(1))
requireContains(t, db, 60, 1, createHash(2))
requireNotContains(t, db, 61, 0, createHash(1))
requireNotContains(t, db, 61, 1, createHash(2))
})
})
t.Run("AtLastEntry", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 1, nil))
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(60), Number: 60}, 502, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(60), Number: 60}, 502, 1, nil))
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(70), Number: 70}, 502, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(70), Number: 70}, 502, 1, nil))
require.NoError(t, db.Rewind(70))
},
func(t *testing.T, db *DB, m *stubMetrics) {
requireContains(t, db, 50, 0, createHash(1))
requireContains(t, db, 50, 1, createHash(2))
requireContains(t, db, 60, 0, createHash(1))
requireContains(t, db, 60, 1, createHash(2))
requireContains(t, db, 70, 0, createHash(1))
requireContains(t, db, 70, 1, createHash(2))
})
})
t.Run("ReaddDeletedBlocks", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(59), Number: 59}, 500, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(59), Number: 59}, 500, 1, nil))
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(60), Number: 60}, 502, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(60), Number: 60}, 502, 1, nil))
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(61), Number: 61}, 502, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(61), Number: 61}, 502, 1, nil))
require.NoError(t, db.Rewind(60))
},
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(59), Number: 59}, 500, 1, nil)
require.ErrorIs(t, err, ErrLogOutOfOrder, "Cannot add block before rewound head")
err = db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(60), Number: 60}, 502, 1, nil)
require.ErrorIs(t, err, ErrLogOutOfOrder, "Cannot add block that was rewound to")
err = db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(60), Number: 61}, 502, 0, nil)
require.NoError(t, err, "Can re-add deleted block")
})
})
}
type stubMetrics struct {
entryCount int64
entriesReadForSearch int64
}
func (s *stubMetrics) RecordDBEntryCount(count int64) {
s.entryCount = count
}
func (s *stubMetrics) RecordDBSearchEntriesRead(count int64) {
s.entriesReadForSearch = count
}
var _ Metrics = (*stubMetrics)(nil)
type stubEntryStore struct {
entries []entrydb.Entry
}
func (s *stubEntryStore) Size() int64 {
return int64(len(s.entries))
}
func (s *stubEntryStore) LastEntryIdx() entrydb.EntryIdx {
return entrydb.EntryIdx(s.Size() - 1)
}
func (s *stubEntryStore) Read(idx entrydb.EntryIdx) (entrydb.Entry, error) {
if idx < entrydb.EntryIdx(len(s.entries)) {
return s.entries[idx], nil
}
return entrydb.Entry{}, io.EOF
}
func (s *stubEntryStore) Append(entries ...entrydb.Entry) error {
s.entries = append(s.entries, entries...)
return nil
}
func (s *stubEntryStore) Truncate(idx entrydb.EntryIdx) error {
s.entries = s.entries[:min(s.Size()-1, int64(idx+1))]
return nil
}
func (s *stubEntryStore) Close() error {
return nil
}
var _ EntryStore = (*stubEntryStore)(nil)
......@@ -23,3 +23,10 @@ func prepChainDir(chainID types.ChainID, datadir string) (string, error) {
}
return dir, nil
}
func prepDataDir(datadir string) error {
if err := os.MkdirAll(datadir, 0755); err != nil {
return fmt.Errorf("failed to create data directory %v: %w", datadir, err)
}
return nil
}
......@@ -23,9 +23,10 @@ type Metrics interface {
caching.Metrics
}
type LogDB interface {
type Storage interface {
LogStorage
DatabaseRewinder
LatestBlockNum(chainID types.ChainID) uint64
}
// ChainMonitor monitors a source L2 chain, retrieving the data required to populate the database and perform
......@@ -35,7 +36,7 @@ type ChainMonitor struct {
headMonitor *HeadMonitor
}
func NewChainMonitor(ctx context.Context, logger log.Logger, m Metrics, chainID types.ChainID, rpc string, client client.RPC, store LogDB, block uint64) (*ChainMonitor, error) {
func NewChainMonitor(ctx context.Context, logger log.Logger, m Metrics, chainID types.ChainID, rpc string, client client.RPC, store Storage) (*ChainMonitor, error) {
logger = logger.New("chainID", chainID)
cl, err := newClient(ctx, logger, m, rpc, client, pollInterval, trustRpc, rpcKind)
if err != nil {
......@@ -43,12 +44,12 @@ func NewChainMonitor(ctx context.Context, logger log.Logger, m Metrics, chainID
}
startingHead := eth.L1BlockRef{
Number: block,
Number: store.LatestBlockNum(chainID),
}
processLogs := newLogProcessor(store)
processLogs := newLogProcessor(chainID, store)
fetchReceipts := newLogFetcher(cl, processLogs)
unsafeBlockProcessor := NewChainProcessor(logger, cl, startingHead, fetchReceipts, store)
unsafeBlockProcessor := NewChainProcessor(logger, cl, chainID, startingHead, fetchReceipts, store)
unsafeProcessors := []HeadProcessor{unsafeBlockProcessor}
callback := newHeadUpdateProcessor(logger, unsafeProcessors, nil, nil)
......
......@@ -4,6 +4,7 @@ import (
"context"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
"github.com/ethereum/go-ethereum/log"
)
......@@ -16,7 +17,7 @@ type BlockProcessor interface {
}
type DatabaseRewinder interface {
Rewind(headBlockNum uint64) error
Rewind(chain types.ChainID, headBlockNum uint64) error
}
type BlockProcessorFn func(ctx context.Context, block eth.L1BlockRef) error
......@@ -30,15 +31,17 @@ func (fn BlockProcessorFn) ProcessBlock(ctx context.Context, block eth.L1BlockRe
type ChainProcessor struct {
log log.Logger
client BlockByNumberSource
chain types.ChainID
lastBlock eth.L1BlockRef
processor BlockProcessor
rewinder DatabaseRewinder
}
func NewChainProcessor(log log.Logger, client BlockByNumberSource, startingHead eth.L1BlockRef, processor BlockProcessor, rewinder DatabaseRewinder) *ChainProcessor {
func NewChainProcessor(log log.Logger, client BlockByNumberSource, chain types.ChainID, startingHead eth.L1BlockRef, processor BlockProcessor, rewinder DatabaseRewinder) *ChainProcessor {
return &ChainProcessor{
log: log,
client: client,
chain: chain,
lastBlock: startingHead,
processor: processor,
rewinder: rewinder,
......@@ -68,7 +71,7 @@ func (s *ChainProcessor) processBlock(ctx context.Context, block eth.L1BlockRef)
if err := s.processor.ProcessBlock(ctx, block); err != nil {
s.log.Error("Failed to process block", "block", block, "err", err)
// Try to rewind the database to the previous block to remove any logs from this block that were written
if err := s.rewinder.Rewind(s.lastBlock.Number); err != nil {
if err := s.rewinder.Rewind(s.chain, s.lastBlock.Number); err != nil {
// If any logs were written, our next attempt to write will fail and we'll retry this rewind.
// If no logs were written successfully then the rewind wouldn't have done anything anyway.
s.log.Error("Failed to rewind after error processing block", "block", block, "err", err)
......
......@@ -3,22 +3,26 @@ package source
import (
"context"
"errors"
"fmt"
"testing"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/testlog"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require"
)
var processorChainID = types.ChainIDFromUInt64(4)
func TestUnsafeBlocksStage(t *testing.T) {
t.Run("IgnoreEventsAtOrPriorToStartingHead", func(t *testing.T) {
ctx := context.Background()
logger := testlog.Logger(t, log.LvlInfo)
client := &stubBlockByNumberSource{}
processor := &stubBlockProcessor{}
stage := NewChainProcessor(logger, client, eth.L1BlockRef{Number: 100}, processor, &stubRewinder{})
stage := NewChainProcessor(logger, client, processorChainID, eth.L1BlockRef{Number: 100}, processor, &stubRewinder{})
stage.OnNewHead(ctx, eth.L1BlockRef{Number: 100})
stage.OnNewHead(ctx, eth.L1BlockRef{Number: 99})
......@@ -35,7 +39,7 @@ func TestUnsafeBlocksStage(t *testing.T) {
block2 := eth.L1BlockRef{Number: 102}
block3 := eth.L1BlockRef{Number: 103}
processor := &stubBlockProcessor{}
stage := NewChainProcessor(logger, client, block0, processor, &stubRewinder{})
stage := NewChainProcessor(logger, client, processorChainID, block0, processor, &stubRewinder{})
stage.OnNewHead(ctx, block1)
require.Equal(t, []eth.L1BlockRef{block1}, processor.processed)
stage.OnNewHead(ctx, block2)
......@@ -53,7 +57,7 @@ func TestUnsafeBlocksStage(t *testing.T) {
block0 := eth.L1BlockRef{Number: 100}
block1 := eth.L1BlockRef{Number: 101}
processor := &stubBlockProcessor{}
stage := NewChainProcessor(logger, client, block0, processor, &stubRewinder{})
stage := NewChainProcessor(logger, client, processorChainID, block0, processor, &stubRewinder{})
stage.OnNewHead(ctx, block1)
require.NotEmpty(t, processor.processed)
require.Equal(t, []eth.L1BlockRef{block1}, processor.processed)
......@@ -72,7 +76,7 @@ func TestUnsafeBlocksStage(t *testing.T) {
block0 := eth.L1BlockRef{Number: 100}
block3 := eth.L1BlockRef{Number: 103}
processor := &stubBlockProcessor{}
stage := NewChainProcessor(logger, client, block0, processor, &stubRewinder{})
stage := NewChainProcessor(logger, client, processorChainID, block0, processor, &stubRewinder{})
stage.OnNewHead(ctx, block3)
require.Equal(t, []eth.L1BlockRef{makeBlockRef(101), makeBlockRef(102), block3}, processor.processed)
......@@ -88,7 +92,7 @@ func TestUnsafeBlocksStage(t *testing.T) {
block3 := eth.L1BlockRef{Number: 103}
processor := &stubBlockProcessor{}
rewinder := &stubRewinder{}
stage := NewChainProcessor(logger, client, block0, processor, rewinder)
stage := NewChainProcessor(logger, client, processorChainID, block0, processor, rewinder)
stage.OnNewHead(ctx, block3)
require.Empty(t, processor.processed, "should not update any blocks because backfill failed")
......@@ -107,7 +111,7 @@ func TestUnsafeBlocksStage(t *testing.T) {
block3 := eth.L1BlockRef{Number: 103}
processor := &stubBlockProcessor{err: errors.New("boom")}
rewinder := &stubRewinder{}
stage := NewChainProcessor(logger, client, block0, processor, rewinder)
stage := NewChainProcessor(logger, client, processorChainID, block0, processor, rewinder)
stage.OnNewHead(ctx, block3)
require.Equal(t, []eth.L1BlockRef{makeBlockRef(101)}, processor.processed, "Attempted to process block 101")
......@@ -127,7 +131,7 @@ func TestUnsafeBlocksStage(t *testing.T) {
block1 := eth.L1BlockRef{Number: 101}
processor := &stubBlockProcessor{err: errors.New("boom")}
rewinder := &stubRewinder{}
stage := NewChainProcessor(logger, client, block0, processor, rewinder)
stage := NewChainProcessor(logger, client, processorChainID, block0, processor, rewinder)
// No skipped blocks
stage.OnNewHead(ctx, block1)
......@@ -173,7 +177,10 @@ type stubRewinder struct {
rewindCalled bool
}
func (s *stubRewinder) Rewind(headBlockNum uint64) error {
func (s *stubRewinder) Rewind(chainID types.ChainID, headBlockNum uint64) error {
if chainID != processorChainID {
return fmt.Errorf("chainID mismatch, expected %v but was %v", processorChainID, chainID)
}
s.rewoundTo = headBlockNum
s.rewindCalled = true
return nil
......
......@@ -6,28 +6,30 @@ import (
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/types"
supTypes "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
"github.com/ethereum/go-ethereum/common"
ethTypes "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
)
type LogStorage interface {
AddLog(logHash types.TruncatedHash, block eth.BlockID, timestamp uint64, logIdx uint32, execMsg *types.ExecutingMessage) error
AddLog(chain supTypes.ChainID, logHash types.TruncatedHash, block eth.BlockID, timestamp uint64, logIdx uint32, execMsg *types.ExecutingMessage) error
}
type logProcessor struct {
chain supTypes.ChainID
logStore LogStorage
}
func newLogProcessor(logStore LogStorage) *logProcessor {
return &logProcessor{logStore}
func newLogProcessor(chain supTypes.ChainID, logStore LogStorage) *logProcessor {
return &logProcessor{chain: chain, logStore: logStore}
}
func (p *logProcessor) ProcessLogs(_ context.Context, block eth.L1BlockRef, rcpts ethTypes.Receipts) error {
for _, rcpt := range rcpts {
for _, l := range rcpt.Logs {
logHash := logToHash(l)
err := p.logStore.AddLog(logHash, block.ID(), block.Time, uint32(l.Index), nil)
err := p.logStore.AddLog(p.chain, logHash, block.ID(), block.Time, uint32(l.Index), nil)
if err != nil {
return fmt.Errorf("failed to add log %d from block %v: %w", l.Index, block.ID(), err)
}
......
......@@ -2,21 +2,25 @@ package source
import (
"context"
"fmt"
"testing"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/types"
supTypes "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
"github.com/ethereum/go-ethereum/common"
ethTypes "github.com/ethereum/go-ethereum/core/types"
"github.com/stretchr/testify/require"
)
var logProcessorChainID = supTypes.ChainIDFromUInt64(4)
func TestLogProcessor(t *testing.T) {
ctx := context.Background()
block1 := eth.L1BlockRef{Number: 100, Hash: common.Hash{0x11}, Time: 1111}
t.Run("NoOutputWhenLogsAreEmpty", func(t *testing.T) {
store := &stubLogStorage{}
processor := newLogProcessor(store)
processor := newLogProcessor(logProcessorChainID, store)
err := processor.ProcessLogs(ctx, block1, ethTypes.Receipts{})
require.NoError(t, err)
......@@ -50,7 +54,7 @@ func TestLogProcessor(t *testing.T) {
},
}
store := &stubLogStorage{}
processor := newLogProcessor(store)
processor := newLogProcessor(logProcessorChainID, store)
err := processor.ProcessLogs(ctx, block1, rcpts)
require.NoError(t, err)
......@@ -141,7 +145,10 @@ type stubLogStorage struct {
logs []storedLog
}
func (s *stubLogStorage) AddLog(logHash types.TruncatedHash, block eth.BlockID, timestamp uint64, logIdx uint32, execMsg *types.ExecutingMessage) error {
func (s *stubLogStorage) AddLog(chainID supTypes.ChainID, logHash types.TruncatedHash, block eth.BlockID, timestamp uint64, logIdx uint32, execMsg *types.ExecutingMessage) error {
if logProcessorChainID != chainID {
return fmt.Errorf("chain id mismatch, expected %v but got %v", logProcessorChainID, chainID)
}
s.logs = append(s.logs, storedLog{
block: block,
timestamp: timestamp,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment