Commit f3da9a53 authored by Adrian Sutton's avatar Adrian Sutton Committed by GitHub

Record executing messages in log db and handle write failures (#10973)

* op-supervisor: Support recording executing message info

* op-supervisor: Reduce permissions

* op-supervisor: Implement recovery for entry db

* op-supervisor: Track size in entrydb instead of last entry idx

* op-supervisor: Trim log entries back to the last valid ending point

* op-supervisor: Remove the manual recover operations since recovery at startup is automatic

* op-supervisor: Add support for writing multiple entries in a single write

* op-supervisor: Write all entries for a log in one append call.

Only update in-memory state after the write succeeds.

* op-supervisor: Handle partial writes

* op-supervisor: Extract logic to reverse an init event

* op-supervisor: Use errors.New

* op-supervisor: Combine the two AddLog variants.

* op-supervisor: Remove place holder tests.

* op-supervisor: Separate Executes and Contains queries

* op-supervisor: Only read executing message when it is actually used.
parent 1e0ea43e
...@@ -9,7 +9,6 @@ import ( ...@@ -9,7 +9,6 @@ import (
"github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
) )
...@@ -17,7 +16,7 @@ const ( ...@@ -17,7 +16,7 @@ const (
searchCheckpointFrequency = 256 searchCheckpointFrequency = 256
eventFlagIncrementLogIdx = byte(1) eventFlagIncrementLogIdx = byte(1)
//eventFlagHasExecutingMessage = byte(1) << 1 eventFlagHasExecutingMessage = byte(1) << 1
) )
const ( const (
...@@ -28,13 +27,6 @@ const ( ...@@ -28,13 +27,6 @@ const (
typeExecutingCheck typeExecutingCheck
) )
var (
ErrLogOutOfOrder = errors.New("log out of order")
ErrDataCorruption = errors.New("data corruption")
)
type TruncatedHash [20]byte
type Metrics interface { type Metrics interface {
RecordEntryCount(count int64) RecordEntryCount(count int64)
RecordSearchEntriesRead(count int64) RecordSearchEntriesRead(count int64)
...@@ -45,7 +37,7 @@ type logContext struct { ...@@ -45,7 +37,7 @@ type logContext struct {
logIdx uint32 logIdx uint32
} }
type entryStore interface { type EntryStore interface {
Size() int64 Size() int64
Read(idx int64) (entrydb.Entry, error) Read(idx int64) (entrydb.Entry, error)
Append(entries ...entrydb.Entry) error Append(entries ...entrydb.Entry) error
...@@ -85,17 +77,21 @@ type entryStore interface { ...@@ -85,17 +77,21 @@ type entryStore interface {
type DB struct { type DB struct {
log log.Logger log log.Logger
m Metrics m Metrics
store entryStore store EntryStore
rwLock sync.RWMutex rwLock sync.RWMutex
lastEntryContext logContext lastEntryContext logContext
} }
func NewFromFile(logger log.Logger, m Metrics, path string) (*DB, error) { func NewFromFile(logger log.Logger, m Metrics, path string) (*DB, error) {
store, err := entrydb.NewEntryDB(path) store, err := entrydb.NewEntryDB(logger, path)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to open DB: %w", err) return nil, fmt.Errorf("failed to open DB: %w", err)
} }
return NewFromEntryStore(logger, m, store)
}
func NewFromEntryStore(logger log.Logger, m Metrics, store EntryStore) (*DB, error) {
db := &DB{ db := &DB{
log: logger, log: logger,
m: m, m: m,
...@@ -112,11 +108,15 @@ func (db *DB) lastEntryIdx() int64 { ...@@ -112,11 +108,15 @@ func (db *DB) lastEntryIdx() int64 {
} }
func (db *DB) init() error { func (db *DB) init() error {
db.updateEntryCountMetric() defer db.updateEntryCountMetric() // Always update the entry count metric after init completes
if err := db.trimInvalidTrailingEntries(); err != nil {
return fmt.Errorf("failed to trim invalid trailing entries: %w", err)
}
if db.lastEntryIdx() < 0 { if db.lastEntryIdx() < 0 {
// Database is empty so no context to load // Database is empty so no context to load
return nil return nil
} }
lastCheckpoint := (db.lastEntryIdx() / searchCheckpointFrequency) * searchCheckpointFrequency lastCheckpoint := (db.lastEntryIdx() / searchCheckpointFrequency) * searchCheckpointFrequency
i, err := db.newIterator(lastCheckpoint) i, err := db.newIterator(lastCheckpoint)
if err != nil { if err != nil {
...@@ -135,6 +135,36 @@ func (db *DB) init() error { ...@@ -135,6 +135,36 @@ func (db *DB) init() error {
return nil return nil
} }
func (db *DB) trimInvalidTrailingEntries() error {
i := db.lastEntryIdx()
for ; i >= 0; i-- {
entry, err := db.store.Read(i)
if err != nil {
return fmt.Errorf("failed to read %v to check for trailing entries: %w", i, err)
}
if entry[0] == typeExecutingCheck {
// executing check is a valid final entry
break
}
if entry[0] == typeInitiatingEvent {
evt, err := newInitiatingEventFromEntry(entry)
if err != nil {
// Entry is invalid, keep walking backwards
continue
}
if !evt.hasExecMsg {
// init event with no exec msg is a valid final entry
break
}
}
}
if i < db.lastEntryIdx() {
db.log.Warn("Truncating unexpected trailing entries", "prev", db.lastEntryIdx(), "new", i)
return db.store.Truncate(i)
}
return nil
}
func (db *DB) updateEntryCountMetric() { func (db *DB) updateEntryCountMetric() {
db.m.RecordEntryCount(db.lastEntryIdx() + 1) db.m.RecordEntryCount(db.lastEntryIdx() + 1)
} }
...@@ -162,21 +192,55 @@ func (db *DB) ClosestBlockInfo(blockNum uint64) (uint64, TruncatedHash, error) { ...@@ -162,21 +192,55 @@ func (db *DB) ClosestBlockInfo(blockNum uint64) (uint64, TruncatedHash, error) {
// Contains return true iff the specified logHash is recorded in the specified blockNum and logIdx. // Contains return true iff the specified logHash is recorded in the specified blockNum and logIdx.
// logIdx is the index of the log in the array of all logs the block. // logIdx is the index of the log in the array of all logs the block.
// This can be used to check the validity of cross-chain interop events.
func (db *DB) Contains(blockNum uint64, logIdx uint32, logHash TruncatedHash) (bool, error) { func (db *DB) Contains(blockNum uint64, logIdx uint32, logHash TruncatedHash) (bool, error) {
db.rwLock.RLock() db.rwLock.RLock()
defer db.rwLock.RUnlock() defer db.rwLock.RUnlock()
db.log.Trace("Checking for log", "blockNum", blockNum, "logIdx", logIdx, "hash", logHash) db.log.Trace("Checking for log", "blockNum", blockNum, "logIdx", logIdx, "hash", logHash)
evtHash, _, err := db.findLogInfo(blockNum, logIdx)
if errors.Is(err, ErrNotFound) {
// Did not find a log at blockNum and logIdx
return false, nil
} else if err != nil {
return false, err
}
db.log.Trace("Found initiatingEvent", "blockNum", blockNum, "logIdx", logIdx, "hash", evtHash)
// Found the requested block and log index, check if the hash matches
return evtHash == logHash, nil
}
// Executes checks if the log identified by the specific block number and log index, has an ExecutingMessage associated
// with it that needs to be checked as part of interop validation.
// logIdx is the index of the log in the array of all logs the block.
// Returns the ExecutingMessage if it exists, or ExecutingMessage{} if the log is found but has no ExecutingMessage.
// Returns ErrNotFound if the specified log does not exist in the database.
func (db *DB) Executes(blockNum uint64, logIdx uint32) (ExecutingMessage, error) {
db.rwLock.RLock()
defer db.rwLock.RUnlock()
_, iter, err := db.findLogInfo(blockNum, logIdx)
if err != nil {
return ExecutingMessage{}, err
}
execMsg, err := iter.ExecMessage()
if err != nil {
return ExecutingMessage{}, fmt.Errorf("failed to read executing message: %w", err)
}
return execMsg, nil
}
func (db *DB) findLogInfo(blockNum uint64, logIdx uint32) (TruncatedHash, *iterator, error) {
entryIdx, err := db.searchCheckpoint(blockNum, logIdx) entryIdx, err := db.searchCheckpoint(blockNum, logIdx)
if errors.Is(err, io.EOF) { if errors.Is(err, io.EOF) {
// Did not find a checkpoint to start reading from so the log cannot be present. // Did not find a checkpoint to start reading from so the log cannot be present.
return false, nil return TruncatedHash{}, nil, ErrNotFound
} else if err != nil { } else if err != nil {
return false, err return TruncatedHash{}, nil, err
} }
i, err := db.newIterator(entryIdx) i, err := db.newIterator(entryIdx)
if err != nil { if err != nil {
return false, fmt.Errorf("failed to create iterator: %w", err) return TruncatedHash{}, nil, fmt.Errorf("failed to create iterator: %w", err)
} }
db.log.Trace("Starting search", "entry", entryIdx, "blockNum", i.current.blockNum, "logIdx", i.current.logIdx) db.log.Trace("Starting search", "entry", entryIdx, "blockNum", i.current.blockNum, "logIdx", i.current.logIdx)
defer func() { defer func() {
...@@ -186,37 +250,64 @@ func (db *DB) Contains(blockNum uint64, logIdx uint32, logHash TruncatedHash) (b ...@@ -186,37 +250,64 @@ func (db *DB) Contains(blockNum uint64, logIdx uint32, logHash TruncatedHash) (b
evtBlockNum, evtLogIdx, evtHash, err := i.NextLog() evtBlockNum, evtLogIdx, evtHash, err := i.NextLog()
if errors.Is(err, io.EOF) { if errors.Is(err, io.EOF) {
// Reached end of log without finding the event // Reached end of log without finding the event
return false, nil return TruncatedHash{}, nil, ErrNotFound
} else if err != nil { } else if err != nil {
return false, fmt.Errorf("failed to read next log: %w", err) return TruncatedHash{}, nil, fmt.Errorf("failed to read next log: %w", err)
} }
if evtBlockNum == blockNum && evtLogIdx == logIdx { if evtBlockNum == blockNum && evtLogIdx == logIdx {
db.log.Trace("Found initiatingEvent", "blockNum", evtBlockNum, "logIdx", evtLogIdx, "hash", evtHash) db.log.Trace("Found initiatingEvent", "blockNum", evtBlockNum, "logIdx", evtLogIdx, "hash", evtHash)
// Found the requested block and log index, check if the hash matches return evtHash, i, nil
return evtHash == logHash, nil
} }
if evtBlockNum > blockNum || (evtBlockNum == blockNum && evtLogIdx > logIdx) { if evtBlockNum > blockNum || (evtBlockNum == blockNum && evtLogIdx > logIdx) {
// Progressed past the requested log without finding it. // Progressed past the requested log without finding it.
return false, nil return TruncatedHash{}, nil, ErrNotFound
} }
} }
} }
func (db *DB) newIterator(startCheckpointEntry int64) (*iterator, error) { func (db *DB) newIterator(startCheckpointEntry int64) (*iterator, error) {
// TODO(optimism#10857): Handle starting from a checkpoint after initiating-event but before its executing-link checkpoint, err := db.readSearchCheckpoint(startCheckpointEntry)
// Will need to read the entry prior to the checkpoint to get the initiating event info
current, err := db.readSearchCheckpoint(startCheckpointEntry)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to read search checkpoint entry %v: %w", startCheckpointEntry, err) return nil, fmt.Errorf("failed to read search checkpoint entry %v: %w", startCheckpointEntry, err)
} }
startIdx := startCheckpointEntry + 2
firstEntry, err := db.store.Read(startIdx)
if errors.Is(err, io.EOF) {
// There should always be an entry after a checkpoint and canonical hash so an EOF here is data corruption
return nil, fmt.Errorf("%w: no entry after checkpoint and canonical hash at %v", ErrDataCorruption, startCheckpointEntry)
} else if err != nil {
return nil, fmt.Errorf("failed to read first entry to iterate %v: %w", startCheckpointEntry+2, err)
}
startLogCtx := logContext{
blockNum: checkpoint.blockNum,
logIdx: checkpoint.logIdx,
}
// Handle starting from a checkpoint after initiating-event but before its executing-link or executing-check
if firstEntry[0] == typeExecutingLink || firstEntry[0] == typeExecutingCheck {
if firstEntry[0] == typeExecutingLink {
// The start checkpoint was between the initiating event and the executing link
// Step back to read the initiating event. The checkpoint block data will be for the initiating event
startIdx = startCheckpointEntry - 1
} else {
// The start checkpoint was between the executing link and the executing check
// Step back to read the initiating event. The checkpoint block data will be for the initiating event
startIdx = startCheckpointEntry - 2
}
initEntry, err := db.store.Read(startIdx)
if err != nil {
return nil, fmt.Errorf("failed to read prior initiating event: %w", err)
}
initEvt, err := newInitiatingEventFromEntry(initEntry)
if err != nil {
return nil, fmt.Errorf("invalid initiating event at idx %v: %w", startIdx, err)
}
startLogCtx = initEvt.preContext(startLogCtx)
}
i := &iterator{ i := &iterator{
db: db, db: db,
// +2 to skip the initial search checkpoint and the canonical hash event after it // +2 to skip the initial search checkpoint and the canonical hash event after it
nextEntryIdx: startCheckpointEntry + 2, nextEntryIdx: startIdx,
current: logContext{ current: startLogCtx,
blockNum: current.blockNum,
logIdx: current.logIdx,
},
} }
return i, nil return i, nil
} }
...@@ -260,7 +351,7 @@ func (db *DB) searchCheckpoint(blockNum uint64, logIdx uint32) (int64, error) { ...@@ -260,7 +351,7 @@ func (db *DB) searchCheckpoint(blockNum uint64, logIdx uint32) (int64, error) {
return (i - 1) * searchCheckpointFrequency, nil return (i - 1) * searchCheckpointFrequency, nil
} }
func (db *DB) AddLog(logHash TruncatedHash, block eth.BlockID, timestamp uint64, logIdx uint32) error { func (db *DB) AddLog(logHash TruncatedHash, block eth.BlockID, timestamp uint64, logIdx uint32, execMsg *ExecutingMessage) error {
db.rwLock.Lock() db.rwLock.Lock()
defer db.rwLock.Unlock() defer db.rwLock.Unlock()
postState := logContext{ postState := logContext{
...@@ -279,15 +370,42 @@ func (db *DB) AddLog(logHash TruncatedHash, block eth.BlockID, timestamp uint64, ...@@ -279,15 +370,42 @@ func (db *DB) AddLog(logHash TruncatedHash, block eth.BlockID, timestamp uint64,
if db.lastEntryContext.blockNum < block.Number && logIdx != 0 { if db.lastEntryContext.blockNum < block.Number && logIdx != 0 {
return fmt.Errorf("%w: adding log %v as first log in block %v", ErrLogOutOfOrder, logIdx, block.Number) return fmt.Errorf("%w: adding log %v as first log in block %v", ErrLogOutOfOrder, logIdx, block.Number)
} }
if (db.lastEntryIdx()+1)%searchCheckpointFrequency == 0 { var entriesToAdd []entrydb.Entry
if err := db.writeSearchCheckpoint(block.Number, logIdx, timestamp, block.Hash); err != nil { newContext := db.lastEntryContext
return fmt.Errorf("failed to write search checkpoint: %w", err) lastEntryIdx := db.lastEntryIdx()
addEntry := func(entry entrydb.Entry) {
entriesToAdd = append(entriesToAdd, entry)
lastEntryIdx++
}
maybeAddCheckpoint := func() {
if (lastEntryIdx+1)%searchCheckpointFrequency == 0 {
addEntry(newSearchCheckpoint(block.Number, logIdx, timestamp).encode())
addEntry(newCanonicalHash(TruncateHash(block.Hash)).encode())
newContext = postState
} }
db.lastEntryContext = postState
} }
maybeAddCheckpoint()
evt, err := newInitiatingEvent(newContext, postState.blockNum, postState.logIdx, logHash, execMsg != nil)
if err != nil {
return fmt.Errorf("failed to create initiating event: %w", err)
}
addEntry(evt.encode())
if err := db.writeInitiatingEvent(postState, logHash); err != nil { if execMsg != nil {
return err maybeAddCheckpoint()
link, err := newExecutingLink(*execMsg)
if err != nil {
return fmt.Errorf("failed to create executing link: %w", err)
}
addEntry(link.encode())
maybeAddCheckpoint()
addEntry(newExecutingCheck(execMsg.Hash).encode())
}
if err := db.store.Append(entriesToAdd...); err != nil {
return fmt.Errorf("failed to append entries: %w", err)
} }
db.lastEntryContext = postState db.lastEntryContext = postState
db.updateEntryCountMetric() db.updateEntryCountMetric()
...@@ -347,17 +465,6 @@ func (db *DB) Rewind(headBlockNum uint64) error { ...@@ -347,17 +465,6 @@ func (db *DB) Rewind(headBlockNum uint64) error {
return nil return nil
} }
// writeSearchCheckpoint appends search checkpoint and canonical hash entry to the log
// type 0: "search checkpoint" <type><uint64 block number: 8 bytes><uint32 event index offset: 4 bytes><uint64 timestamp: 8 bytes> = 20 bytes
// type 1: "canonical hash" <type><parent blockhash truncated: 20 bytes> = 21 bytes
func (db *DB) writeSearchCheckpoint(blockNum uint64, logIdx uint32, timestamp uint64, blockHash common.Hash) error {
entry := newSearchCheckpoint(blockNum, logIdx, timestamp).encode()
if err := db.store.Append(entry); err != nil {
return err
}
return db.writeCanonicalHash(blockHash)
}
func (db *DB) readSearchCheckpoint(entryIdx int64) (searchCheckpoint, error) { func (db *DB) readSearchCheckpoint(entryIdx int64) (searchCheckpoint, error) {
data, err := db.store.Read(entryIdx) data, err := db.store.Read(entryIdx)
if err != nil { if err != nil {
...@@ -366,39 +473,14 @@ func (db *DB) readSearchCheckpoint(entryIdx int64) (searchCheckpoint, error) { ...@@ -366,39 +473,14 @@ func (db *DB) readSearchCheckpoint(entryIdx int64) (searchCheckpoint, error) {
return newSearchCheckpointFromEntry(data) return newSearchCheckpointFromEntry(data)
} }
// writeCanonicalHash appends a canonical hash entry to the log
// type 1: "canonical hash" <type><parent blockhash truncated: 20 bytes> = 21 bytes
func (db *DB) writeCanonicalHash(blockHash common.Hash) error {
return db.store.Append(newCanonicalHash(TruncateHash(blockHash)).encode())
}
func (db *DB) readCanonicalHash(entryIdx int64) (canonicalHash, error) { func (db *DB) readCanonicalHash(entryIdx int64) (canonicalHash, error) {
data, err := db.store.Read(entryIdx) data, err := db.store.Read(entryIdx)
if err != nil { if err != nil {
return canonicalHash{}, fmt.Errorf("failed to read entry %v: %w", entryIdx, err) return canonicalHash{}, fmt.Errorf("failed to read entry %v: %w", entryIdx, err)
} }
if data[0] != typeCanonicalHash {
return canonicalHash{}, fmt.Errorf("%w: expected canonical hash at entry %v but was type %v", ErrDataCorruption, entryIdx, data[0])
}
return newCanonicalHashFromEntry(data) return newCanonicalHashFromEntry(data)
} }
// writeInitiatingEvent appends an initiating event to the log
// type 2: "initiating event" <type><blocknum diff: 1 byte><event flags: 1 byte><event-hash: 20 bytes> = 23 bytes
func (db *DB) writeInitiatingEvent(postState logContext, logHash TruncatedHash) error {
evt, err := newInitiatingEvent(db.lastEntryContext, postState.blockNum, postState.logIdx, logHash)
if err != nil {
return err
}
return db.store.Append(evt.encode())
}
func TruncateHash(hash common.Hash) TruncatedHash {
var truncated TruncatedHash
copy(truncated[:], hash[0:20])
return truncated
}
func (db *DB) Close() error { func (db *DB) Close() error {
return db.store.Close() return db.store.Close()
} }
package db package db
import ( import (
"errors"
"fmt" "fmt"
"io" "io"
"os" "os"
...@@ -42,6 +43,11 @@ func checkDBInvariants(t *testing.T, dbPath string, m *stubMetrics) { ...@@ -42,6 +43,11 @@ func checkDBInvariants(t *testing.T, dbPath string, m *stubMetrics) {
invariantCanonicalHashAfterEverySearchCheckpoint, invariantCanonicalHashAfterEverySearchCheckpoint,
invariantSearchCheckpointBeforeEveryCanonicalHash, invariantSearchCheckpointBeforeEveryCanonicalHash,
invariantIncrementLogIdxIfNotImmediatelyAfterCanonicalHash, invariantIncrementLogIdxIfNotImmediatelyAfterCanonicalHash,
invariantExecLinkAfterInitEventWithFlagSet,
invariantExecLinkOnlyAfterInitiatingEventWithFlagSet,
invariantExecCheckAfterExecLink,
invariantExecCheckOnlyAfterExecLink,
invariantValidLastEntry,
} }
for i, entry := range entries { for i, entry := range entries {
for _, invariant := range entryInvariants { for _, invariant := range entryInvariants {
...@@ -146,3 +152,109 @@ func invariantIncrementLogIdxIfNotImmediatelyAfterCanonicalHash(entryIdx int, en ...@@ -146,3 +152,109 @@ func invariantIncrementLogIdxIfNotImmediatelyAfterCanonicalHash(entryIdx int, en
} }
return nil return nil
} }
func invariantExecLinkAfterInitEventWithFlagSet(entryIdx int, entry entrydb.Entry, entries []entrydb.Entry, m *stubMetrics) error {
if entry[0] != typeInitiatingEvent {
return nil
}
hasExecMessage := entry[2]&eventFlagHasExecutingMessage != 0
if !hasExecMessage {
return nil
}
linkIdx := entryIdx + 1
if linkIdx%searchCheckpointFrequency == 0 {
linkIdx += 2 // Skip over the search checkpoint and canonical hash events
}
if len(entries) <= linkIdx {
return fmt.Errorf("expected executing link after initiating event with exec msg flag set at entry %v but there were no more events", entryIdx)
}
if entries[linkIdx][0] != typeExecutingLink {
return fmt.Errorf("expected executing link at idx %v after initiating event with exec msg flag set at entry %v but got type %v", linkIdx, entryIdx, entries[linkIdx][0])
}
return nil
}
func invariantExecLinkOnlyAfterInitiatingEventWithFlagSet(entryIdx int, entry entrydb.Entry, entries []entrydb.Entry, m *stubMetrics) error {
if entry[0] != typeExecutingLink {
return nil
}
if entryIdx == 0 {
return errors.New("found executing link as first entry")
}
initIdx := entryIdx - 1
if initIdx%searchCheckpointFrequency == 1 {
initIdx -= 2 // Skip the canonical hash and search checkpoint entries
}
if initIdx < 0 {
return fmt.Errorf("found executing link without a preceding initiating event at entry %v", entryIdx)
}
initEntry := entries[initIdx]
if initEntry[0] != typeInitiatingEvent {
return fmt.Errorf("expected initiating event at entry %v prior to executing link at %v but got %x", initIdx, entryIdx, initEntry[0])
}
flags := initEntry[2]
if flags&eventFlagHasExecutingMessage == 0 {
return fmt.Errorf("initiating event at %v prior to executing link at %v does not have flag set to indicate needing a executing event: %x", initIdx, entryIdx, initEntry)
}
return nil
}
func invariantExecCheckAfterExecLink(entryIdx int, entry entrydb.Entry, entries []entrydb.Entry, m *stubMetrics) error {
if entry[0] != typeExecutingLink {
return nil
}
checkIdx := entryIdx + 1
if checkIdx%searchCheckpointFrequency == 0 {
checkIdx += 2 // Skip the search checkpoint and canonical hash entries
}
if checkIdx >= len(entries) {
return fmt.Errorf("expected executing link at %v to be followed by executing check at %v but ran out of entries", entryIdx, checkIdx)
}
checkEntry := entries[checkIdx]
if checkEntry[0] != typeExecutingCheck {
return fmt.Errorf("expected executing link at %v to be followed by executing check at %v but got type %v", entryIdx, checkIdx, checkEntry[0])
}
return nil
}
func invariantExecCheckOnlyAfterExecLink(entryIdx int, entry entrydb.Entry, entries []entrydb.Entry, m *stubMetrics) error {
if entry[0] != typeExecutingCheck {
return nil
}
if entryIdx == 0 {
return errors.New("found executing check as first entry")
}
linkIdx := entryIdx - 1
if linkIdx%searchCheckpointFrequency == 1 {
linkIdx -= 2 // Skip the canonical hash and search checkpoint entries
}
if linkIdx < 0 {
return fmt.Errorf("found executing link without a preceding initiating event at entry %v", entryIdx)
}
linkEntry := entries[linkIdx]
if linkEntry[0] != typeExecutingLink {
return fmt.Errorf("expected executing link at entry %v prior to executing check at %v but got %x", linkIdx, entryIdx, linkEntry[0])
}
return nil
}
// invariantValidLastEntry checks that the last entry is either a executing check or initiating event with no exec message
func invariantValidLastEntry(entryIdx int, entry entrydb.Entry, entries []entrydb.Entry, m *stubMetrics) error {
if entryIdx+1 < len(entries) {
return nil
}
if entry[0] == typeExecutingCheck {
return nil
}
if entry[0] != typeInitiatingEvent {
return fmt.Errorf("invalid final event type: %v", entry[0])
}
evt, err := newInitiatingEventFromEntry(entry)
if err != nil {
return fmt.Errorf("final event was invalid: %w", err)
}
if evt.hasExecMsg {
return errors.New("ends with init event that should have exec msg but no exec msg follows")
}
return nil
}
...@@ -2,6 +2,7 @@ package db ...@@ -2,6 +2,7 @@ package db
import ( import (
"bytes" "bytes"
"fmt"
"io" "io"
"io/fs" "io/fs"
"os" "os"
...@@ -10,6 +11,7 @@ import ( ...@@ -10,6 +11,7 @@ import (
"github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/testlog" "github.com/ethereum-optimism/optimism/op-service/testlog"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
...@@ -32,7 +34,7 @@ func TestErrorOpeningDatabase(t *testing.T) { ...@@ -32,7 +34,7 @@ func TestErrorOpeningDatabase(t *testing.T) {
func runDBTest(t *testing.T, setup func(t *testing.T, db *DB, m *stubMetrics), assert func(t *testing.T, db *DB, m *stubMetrics)) { func runDBTest(t *testing.T, setup func(t *testing.T, db *DB, m *stubMetrics), assert func(t *testing.T, db *DB, m *stubMetrics)) {
createDb := func(t *testing.T, dir string) (*DB, *stubMetrics, string) { createDb := func(t *testing.T, dir string) (*DB, *stubMetrics, string) {
logger := testlog.Logger(t, log.LvlTrace) logger := testlog.Logger(t, log.LvlInfo)
path := filepath.Join(dir, "test.db") path := filepath.Join(dir, "test.db")
m := &stubMetrics{} m := &stubMetrics{}
db, err := NewFromFile(logger, m, path) db, err := NewFromFile(logger, m, path)
...@@ -81,7 +83,7 @@ func TestAddLog(t *testing.T) { ...@@ -81,7 +83,7 @@ func TestAddLog(t *testing.T) {
runDBTest(t, runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {}, func(t *testing.T, db *DB, m *stubMetrics) {},
func(t *testing.T, db *DB, m *stubMetrics) { func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 0}, 5000, 0) err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 0}, 5000, 0, nil)
require.ErrorIs(t, err, ErrLogOutOfOrder) require.ErrorIs(t, err, ErrLogOutOfOrder)
}) })
}) })
...@@ -89,7 +91,7 @@ func TestAddLog(t *testing.T) { ...@@ -89,7 +91,7 @@ func TestAddLog(t *testing.T) {
t.Run("FirstEntry", func(t *testing.T) { t.Run("FirstEntry", func(t *testing.T) {
runDBTest(t, runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) { func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 0) err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 0, nil)
require.NoError(t, err) require.NoError(t, err)
}, },
func(t *testing.T, db *DB, m *stubMetrics) { func(t *testing.T, db *DB, m *stubMetrics) {
...@@ -100,11 +102,11 @@ func TestAddLog(t *testing.T) { ...@@ -100,11 +102,11 @@ func TestAddLog(t *testing.T) {
t.Run("MultipleEntriesFromSameBlock", func(t *testing.T) { t.Run("MultipleEntriesFromSameBlock", func(t *testing.T) {
runDBTest(t, runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) { func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 0) err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 0, nil)
require.NoError(t, err) require.NoError(t, err)
err = db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 1) err = db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 1, nil)
require.NoError(t, err) require.NoError(t, err)
err = db.AddLog(createTruncatedHash(3), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 2) err = db.AddLog(createTruncatedHash(3), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 2, nil)
require.NoError(t, err) require.NoError(t, err)
}, },
func(t *testing.T, db *DB, m *stubMetrics) { func(t *testing.T, db *DB, m *stubMetrics) {
...@@ -118,13 +120,13 @@ func TestAddLog(t *testing.T) { ...@@ -118,13 +120,13 @@ func TestAddLog(t *testing.T) {
t.Run("MultipleEntriesFromMultipleBlocks", func(t *testing.T) { t.Run("MultipleEntriesFromMultipleBlocks", func(t *testing.T) {
runDBTest(t, runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) { func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 0) err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 0, nil)
require.NoError(t, err) require.NoError(t, err)
err = db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 1) err = db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 1, nil)
require.NoError(t, err) require.NoError(t, err)
err = db.AddLog(createTruncatedHash(3), eth.BlockID{Hash: createHash(16), Number: 16}, 5002, 0) err = db.AddLog(createTruncatedHash(3), eth.BlockID{Hash: createHash(16), Number: 16}, 5002, 0, nil)
require.NoError(t, err) require.NoError(t, err)
err = db.AddLog(createTruncatedHash(4), eth.BlockID{Hash: createHash(16), Number: 16}, 5002, 1) err = db.AddLog(createTruncatedHash(4), eth.BlockID{Hash: createHash(16), Number: 16}, 5002, 1, nil)
require.NoError(t, err) require.NoError(t, err)
}, },
func(t *testing.T, db *DB, m *stubMetrics) { func(t *testing.T, db *DB, m *stubMetrics) {
...@@ -139,11 +141,11 @@ func TestAddLog(t *testing.T) { ...@@ -139,11 +141,11 @@ func TestAddLog(t *testing.T) {
t.Run("ErrorWhenBeforeCurrentBlock", func(t *testing.T) { t.Run("ErrorWhenBeforeCurrentBlock", func(t *testing.T) {
runDBTest(t, runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) { func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 0) err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 0, nil)
require.NoError(t, err) require.NoError(t, err)
}, },
func(t *testing.T, db *DB, m *stubMetrics) { func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(14), Number: 14}, 4998, 0) err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(14), Number: 14}, 4998, 0, nil)
require.ErrorIs(t, err, ErrLogOutOfOrder) require.ErrorIs(t, err, ErrLogOutOfOrder)
}) })
}) })
...@@ -151,13 +153,13 @@ func TestAddLog(t *testing.T) { ...@@ -151,13 +153,13 @@ func TestAddLog(t *testing.T) {
t.Run("ErrorWhenBeforeCurrentBlockButAfterLastCheckpoint", func(t *testing.T) { t.Run("ErrorWhenBeforeCurrentBlockButAfterLastCheckpoint", func(t *testing.T) {
runDBTest(t, runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) { func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(13), Number: 13}, 5000, 0) err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(13), Number: 13}, 5000, 0, nil)
require.NoError(t, err) require.NoError(t, err)
err = db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 0) err = db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 0, nil)
require.NoError(t, err) require.NoError(t, err)
}, },
func(t *testing.T, db *DB, m *stubMetrics) { func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(14), Number: 14}, 4998, 0) err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(14), Number: 14}, 4998, 0, nil)
require.ErrorIs(t, err, ErrLogOutOfOrder) require.ErrorIs(t, err, ErrLogOutOfOrder)
}) })
}) })
...@@ -165,11 +167,11 @@ func TestAddLog(t *testing.T) { ...@@ -165,11 +167,11 @@ func TestAddLog(t *testing.T) {
t.Run("ErrorWhenBeforeCurrentLogEvent", func(t *testing.T) { t.Run("ErrorWhenBeforeCurrentLogEvent", func(t *testing.T) {
runDBTest(t, runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) { func(t *testing.T, db *DB, m *stubMetrics) {
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 0)) require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 1)) require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 1, nil))
}, },
func(t *testing.T, db *DB, m *stubMetrics) { func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(14), Number: 15}, 4998, 0) err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(14), Number: 15}, 4998, 0, nil)
require.ErrorIs(t, err, ErrLogOutOfOrder) require.ErrorIs(t, err, ErrLogOutOfOrder)
}) })
}) })
...@@ -177,15 +179,15 @@ func TestAddLog(t *testing.T) { ...@@ -177,15 +179,15 @@ func TestAddLog(t *testing.T) {
t.Run("ErrorWhenBeforeCurrentLogEventButAfterLastCheckpoint", func(t *testing.T) { t.Run("ErrorWhenBeforeCurrentLogEventButAfterLastCheckpoint", func(t *testing.T) {
runDBTest(t, runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) { func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 0) err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 0, nil)
require.NoError(t, err) require.NoError(t, err)
err = db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 1) err = db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 1, nil)
require.NoError(t, err) require.NoError(t, err)
err = db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 2) err = db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 2, nil)
require.NoError(t, err) require.NoError(t, err)
}, },
func(t *testing.T, db *DB, m *stubMetrics) { func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(14), Number: 15}, 4998, 1) err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(14), Number: 15}, 4998, 1, nil)
require.ErrorIs(t, err, ErrLogOutOfOrder) require.ErrorIs(t, err, ErrLogOutOfOrder)
}) })
}) })
...@@ -193,11 +195,11 @@ func TestAddLog(t *testing.T) { ...@@ -193,11 +195,11 @@ func TestAddLog(t *testing.T) {
t.Run("ErrorWhenAtCurrentLogEvent", func(t *testing.T) { t.Run("ErrorWhenAtCurrentLogEvent", func(t *testing.T) {
runDBTest(t, runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) { func(t *testing.T, db *DB, m *stubMetrics) {
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 0)) require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 1)) require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 1, nil))
}, },
func(t *testing.T, db *DB, m *stubMetrics) { func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 4998, 1) err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 4998, 1, nil)
require.ErrorIs(t, err, ErrLogOutOfOrder) require.ErrorIs(t, err, ErrLogOutOfOrder)
}) })
}) })
...@@ -205,12 +207,12 @@ func TestAddLog(t *testing.T) { ...@@ -205,12 +207,12 @@ func TestAddLog(t *testing.T) {
t.Run("ErrorWhenAtCurrentLogEventButAfterLastCheckpoint", func(t *testing.T) { t.Run("ErrorWhenAtCurrentLogEventButAfterLastCheckpoint", func(t *testing.T) {
runDBTest(t, runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) { func(t *testing.T, db *DB, m *stubMetrics) {
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 0)) require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 1)) require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 1, nil))
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 2)) require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 2, nil))
}, },
func(t *testing.T, db *DB, m *stubMetrics) { func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(14), Number: 15}, 4998, 2) err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(14), Number: 15}, 4998, 2, nil)
require.ErrorIs(t, err, ErrLogOutOfOrder) require.ErrorIs(t, err, ErrLogOutOfOrder)
}) })
}) })
...@@ -218,11 +220,11 @@ func TestAddLog(t *testing.T) { ...@@ -218,11 +220,11 @@ func TestAddLog(t *testing.T) {
t.Run("ErrorWhenSkippingLogEvent", func(t *testing.T) { t.Run("ErrorWhenSkippingLogEvent", func(t *testing.T) {
runDBTest(t, runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) { func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 0) err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 0, nil)
require.NoError(t, err) require.NoError(t, err)
}, },
func(t *testing.T, db *DB, m *stubMetrics) { func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 4998, 2) err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 4998, 2, nil)
require.ErrorIs(t, err, ErrLogOutOfOrder) require.ErrorIs(t, err, ErrLogOutOfOrder)
}) })
}) })
...@@ -230,7 +232,7 @@ func TestAddLog(t *testing.T) { ...@@ -230,7 +232,7 @@ func TestAddLog(t *testing.T) {
t.Run("ErrorWhenFirstLogIsNotLogIdxZero", func(t *testing.T) { t.Run("ErrorWhenFirstLogIsNotLogIdxZero", func(t *testing.T) {
runDBTest(t, func(t *testing.T, db *DB, m *stubMetrics) {}, runDBTest(t, func(t *testing.T, db *DB, m *stubMetrics) {},
func(t *testing.T, db *DB, m *stubMetrics) { func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 4998, 5) err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 4998, 5, nil)
require.ErrorIs(t, err, ErrLogOutOfOrder) require.ErrorIs(t, err, ErrLogOutOfOrder)
}) })
}) })
...@@ -238,10 +240,10 @@ func TestAddLog(t *testing.T) { ...@@ -238,10 +240,10 @@ func TestAddLog(t *testing.T) {
t.Run("ErrorWhenFirstLogOfNewBlockIsNotLogIdxZero", func(t *testing.T) { t.Run("ErrorWhenFirstLogOfNewBlockIsNotLogIdxZero", func(t *testing.T) {
runDBTest(t, runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) { func(t *testing.T, db *DB, m *stubMetrics) {
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(14), Number: 14}, 4996, 0)) require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(14), Number: 14}, 4996, 0, nil))
}, },
func(t *testing.T, db *DB, m *stubMetrics) { func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 4998, 1) err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 4998, 1, nil)
require.ErrorIs(t, err, ErrLogOutOfOrder) require.ErrorIs(t, err, ErrLogOutOfOrder)
}) })
}) })
...@@ -262,15 +264,15 @@ func TestAddLog(t *testing.T) { ...@@ -262,15 +264,15 @@ func TestAddLog(t *testing.T) {
runDBTest(t, runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) { func(t *testing.T, db *DB, m *stubMetrics) {
for i := 0; i < block1LogCount; i++ { for i := 0; i < block1LogCount; i++ {
err := db.AddLog(createTruncatedHash(i), block1, 3000, uint32(i)) err := db.AddLog(createTruncatedHash(i), block1, 3000, uint32(i), nil)
require.NoErrorf(t, err, "failed to add log %v of block 1", i) require.NoErrorf(t, err, "failed to add log %v of block 1", i)
} }
for i := 0; i < block2LogCount; i++ { for i := 0; i < block2LogCount; i++ {
err := db.AddLog(createTruncatedHash(i), block2, 3002, uint32(i)) err := db.AddLog(createTruncatedHash(i), block2, 3002, uint32(i), nil)
require.NoErrorf(t, err, "failed to add log %v of block 2", i) require.NoErrorf(t, err, "failed to add log %v of block 2", i)
} }
for i := 0; i < block3LogCount; i++ { for i := 0; i < block3LogCount; i++ {
err := db.AddLog(createTruncatedHash(i), block3, 3004, uint32(i)) err := db.AddLog(createTruncatedHash(i), block3, 3004, uint32(i), nil)
require.NoErrorf(t, err, "failed to add log %v of block 3", i) require.NoErrorf(t, err, "failed to add log %v of block 3", i)
} }
// Verify that we're right before the fourth checkpoint will be written. // Verify that we're right before the fourth checkpoint will be written.
...@@ -279,7 +281,7 @@ func TestAddLog(t *testing.T) { ...@@ -279,7 +281,7 @@ func TestAddLog(t *testing.T) {
// so the fourth is at entry 3*searchCheckpointFrequency // so the fourth is at entry 3*searchCheckpointFrequency
require.EqualValues(t, 3*searchCheckpointFrequency, m.entryCount) require.EqualValues(t, 3*searchCheckpointFrequency, m.entryCount)
for i := 0; i < block4LogCount; i++ { for i := 0; i < block4LogCount; i++ {
err := db.AddLog(createTruncatedHash(i), block4, 3006, uint32(i)) err := db.AddLog(createTruncatedHash(i), block4, 3006, uint32(i), nil)
require.NoErrorf(t, err, "failed to add log %v of block 4", i) require.NoErrorf(t, err, "failed to add log %v of block 4", i)
} }
}, },
...@@ -308,14 +310,91 @@ func TestAddLog(t *testing.T) { ...@@ -308,14 +310,91 @@ func TestAddLog(t *testing.T) {
}) })
} }
func TestAddDependentLog(t *testing.T) {
execMsg := ExecutingMessage{
Chain: 3,
BlockNum: 42894,
LogIdx: 42,
Timestamp: 8742482,
Hash: TruncateHash(createHash(8844)),
}
t.Run("FirstEntry", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 0, &execMsg)
require.NoError(t, err)
},
func(t *testing.T, db *DB, m *stubMetrics) {
requireContains(t, db, 15, 0, createHash(1), execMsg)
})
})
t.Run("CheckpointBetweenInitEventAndExecLink", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
for i := uint32(0); m.entryCount < searchCheckpointFrequency-1; i++ {
require.NoError(t, db.AddLog(createTruncatedHash(9), eth.BlockID{Hash: createHash(9), Number: 1}, 500, i, nil))
}
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 0, &execMsg)
require.NoError(t, err)
},
func(t *testing.T, db *DB, m *stubMetrics) {
requireContains(t, db, 15, 0, createHash(1), execMsg)
})
})
t.Run("CheckpointBetweenInitEventAndExecLinkNotIncrementingBlock", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
for i := uint32(0); m.entryCount < searchCheckpointFrequency-1; i++ {
require.NoError(t, db.AddLog(createTruncatedHash(9), eth.BlockID{Hash: createHash(9), Number: 1}, 500, i, nil))
}
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 1}, 5000, 253, &execMsg)
require.NoError(t, err)
},
func(t *testing.T, db *DB, m *stubMetrics) {
requireContains(t, db, 1, 253, createHash(1), execMsg)
})
})
t.Run("CheckpointBetweenExecLinkAndExecCheck", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
for i := uint32(0); m.entryCount < searchCheckpointFrequency-2; i++ {
require.NoError(t, db.AddLog(createTruncatedHash(9), eth.BlockID{Hash: createHash(9), Number: 1}, 500, i, nil))
}
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 0, &execMsg)
require.NoError(t, err)
},
func(t *testing.T, db *DB, m *stubMetrics) {
requireContains(t, db, 15, 0, createHash(1), execMsg)
})
})
t.Run("CheckpointBetweenExecLinkAndExecCheckNotIncrementingBlock", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
for i := uint32(0); m.entryCount < searchCheckpointFrequency-2; i++ {
require.NoError(t, db.AddLog(createTruncatedHash(9), eth.BlockID{Hash: createHash(9), Number: 1}, 500, i, nil))
}
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 1}, 5000, 252, &execMsg)
require.NoError(t, err)
},
func(t *testing.T, db *DB, m *stubMetrics) {
requireContains(t, db, 1, 252, createHash(1), execMsg)
})
})
}
func TestContains(t *testing.T) { func TestContains(t *testing.T) {
runDBTest(t, runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) { func(t *testing.T, db *DB, m *stubMetrics) {
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 0)) require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(3), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 1)) require.NoError(t, db.AddLog(createTruncatedHash(3), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 1, nil))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 2)) require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 2, nil))
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(52), Number: 52}, 500, 0)) require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(52), Number: 52}, 500, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(3), eth.BlockID{Hash: createHash(52), Number: 52}, 500, 1)) require.NoError(t, db.AddLog(createTruncatedHash(3), eth.BlockID{Hash: createHash(52), Number: 52}, 500, 1, nil))
}, },
func(t *testing.T, db *DB, m *stubMetrics) { func(t *testing.T, db *DB, m *stubMetrics) {
// Should find added logs // Should find added logs
...@@ -339,7 +418,60 @@ func TestContains(t *testing.T) { ...@@ -339,7 +418,60 @@ func TestContains(t *testing.T) {
requireNotContains(t, db, 50, 3, createHash(2)) requireNotContains(t, db, 50, 3, createHash(2))
// Should not find log when hash doesn't match log at block number and index // Should not find log when hash doesn't match log at block number and index
requireNotContains(t, db, 50, 0, createHash(5)) requireWrongHash(t, db, 50, 0, createHash(5), ExecutingMessage{})
})
}
func TestExecutes(t *testing.T) {
execMsg1 := ExecutingMessage{
Chain: 33,
BlockNum: 22,
LogIdx: 99,
Timestamp: 948294,
Hash: createTruncatedHash(332299),
}
execMsg2 := ExecutingMessage{
Chain: 44,
BlockNum: 55,
LogIdx: 66,
Timestamp: 77777,
Hash: createTruncatedHash(445566),
}
execMsg3 := ExecutingMessage{
Chain: 77,
BlockNum: 88,
LogIdx: 89,
Timestamp: 6578567,
Hash: createTruncatedHash(778889),
}
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(3), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 1, &execMsg1))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 2, nil))
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(52), Number: 52}, 500, 0, &execMsg2))
require.NoError(t, db.AddLog(createTruncatedHash(3), eth.BlockID{Hash: createHash(52), Number: 52}, 500, 1, &execMsg3))
},
func(t *testing.T, db *DB, m *stubMetrics) {
// Should find added logs
requireExecutingMessage(t, db, 50, 0, ExecutingMessage{})
requireExecutingMessage(t, db, 50, 1, execMsg1)
requireExecutingMessage(t, db, 50, 2, ExecutingMessage{})
requireExecutingMessage(t, db, 52, 0, execMsg2)
requireExecutingMessage(t, db, 52, 1, execMsg3)
// Should not find log when block number too low
requireNotContains(t, db, 49, 0, createHash(1))
// Should not find log when block number too high
requireNotContains(t, db, 51, 0, createHash(1))
// Should not find log when requested log after end of database
requireNotContains(t, db, 52, 2, createHash(3))
requireNotContains(t, db, 53, 0, createHash(3))
// Should not find log when log index too high
requireNotContains(t, db, 50, 3, createHash(2))
}) })
} }
...@@ -356,7 +488,7 @@ func TestGetBlockInfo(t *testing.T) { ...@@ -356,7 +488,7 @@ func TestGetBlockInfo(t *testing.T) {
t.Run("ReturnsEOFWhenRequestedBlockBeforeFirstSearchCheckpoint", func(t *testing.T) { t.Run("ReturnsEOFWhenRequestedBlockBeforeFirstSearchCheckpoint", func(t *testing.T) {
runDBTest(t, runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) { func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(11), Number: 11}, 500, 0) err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(11), Number: 11}, 500, 0, nil)
require.NoError(t, err) require.NoError(t, err)
}, },
func(t *testing.T, db *DB, m *stubMetrics) { func(t *testing.T, db *DB, m *stubMetrics) {
...@@ -369,7 +501,7 @@ func TestGetBlockInfo(t *testing.T) { ...@@ -369,7 +501,7 @@ func TestGetBlockInfo(t *testing.T) {
block := eth.BlockID{Hash: createHash(11), Number: 11} block := eth.BlockID{Hash: createHash(11), Number: 11}
runDBTest(t, runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) { func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), block, 500, 0) err := db.AddLog(createTruncatedHash(1), block, 500, 0, nil)
require.NoError(t, err) require.NoError(t, err)
}, },
func(t *testing.T, db *DB, m *stubMetrics) { func(t *testing.T, db *DB, m *stubMetrics) {
...@@ -384,7 +516,7 @@ func TestGetBlockInfo(t *testing.T) { ...@@ -384,7 +516,7 @@ func TestGetBlockInfo(t *testing.T) {
func(t *testing.T, db *DB, m *stubMetrics) { func(t *testing.T, db *DB, m *stubMetrics) {
for i := 1; i < searchCheckpointFrequency+3; i++ { for i := 1; i < searchCheckpointFrequency+3; i++ {
block := eth.BlockID{Hash: createHash(i), Number: uint64(i)} block := eth.BlockID{Hash: createHash(i), Number: uint64(i)}
err := db.AddLog(createTruncatedHash(i), block, uint64(i)*2, 0) err := db.AddLog(createTruncatedHash(i), block, uint64(i)*2, 0, nil)
require.NoError(t, err) require.NoError(t, err)
} }
}, },
...@@ -411,7 +543,8 @@ func requireClosestBlockInfo(t *testing.T, db *DB, searchFor uint64, expectedBlo ...@@ -411,7 +543,8 @@ func requireClosestBlockInfo(t *testing.T, db *DB, searchFor uint64, expectedBlo
require.Equal(t, TruncateHash(expectedHash), hash) require.Equal(t, TruncateHash(expectedHash), hash)
} }
func requireContains(t *testing.T, db *DB, blockNum uint64, logIdx uint32, logHash common.Hash) { func requireContains(t *testing.T, db *DB, blockNum uint64, logIdx uint32, logHash common.Hash, execMsg ...ExecutingMessage) {
require.LessOrEqual(t, len(execMsg), 1, "cannot have multiple executing messages for a single log")
m, ok := db.m.(*stubMetrics) m, ok := db.m.(*stubMetrics)
require.True(t, ok, "Did not get the expected metrics type") require.True(t, ok, "Did not get the expected metrics type")
result, err := db.Contains(blockNum, logIdx, TruncateHash(logHash)) result, err := db.Contains(blockNum, logIdx, TruncateHash(logHash))
...@@ -419,6 +552,12 @@ func requireContains(t *testing.T, db *DB, blockNum uint64, logIdx uint32, logHa ...@@ -419,6 +552,12 @@ func requireContains(t *testing.T, db *DB, blockNum uint64, logIdx uint32, logHa
require.Truef(t, result, "Did not find log %v in block %v with hash %v", logIdx, blockNum, logHash) require.Truef(t, result, "Did not find log %v in block %v with hash %v", logIdx, blockNum, logHash)
require.LessOrEqual(t, m.entriesReadForSearch, int64(searchCheckpointFrequency), "Should not need to read more than between two checkpoints") require.LessOrEqual(t, m.entriesReadForSearch, int64(searchCheckpointFrequency), "Should not need to read more than between two checkpoints")
require.NotZero(t, m.entriesReadForSearch, "Must read at least some entries to find the log") require.NotZero(t, m.entriesReadForSearch, "Must read at least some entries to find the log")
var expectedExecMsg ExecutingMessage
if len(execMsg) == 1 {
expectedExecMsg = execMsg[0]
}
requireExecutingMessage(t, db, blockNum, logIdx, expectedExecMsg)
} }
func requireNotContains(t *testing.T, db *DB, blockNum uint64, logIdx uint32, logHash common.Hash) { func requireNotContains(t *testing.T, db *DB, blockNum uint64, logIdx uint32, logHash common.Hash) {
...@@ -428,22 +567,154 @@ func requireNotContains(t *testing.T, db *DB, blockNum uint64, logIdx uint32, lo ...@@ -428,22 +567,154 @@ func requireNotContains(t *testing.T, db *DB, blockNum uint64, logIdx uint32, lo
require.NoErrorf(t, err, "Error searching for log %v in block %v", logIdx, blockNum) require.NoErrorf(t, err, "Error searching for log %v in block %v", logIdx, blockNum)
require.Falsef(t, result, "Found unexpected log %v in block %v with hash %v", logIdx, blockNum, logHash) require.Falsef(t, result, "Found unexpected log %v in block %v with hash %v", logIdx, blockNum, logHash)
require.LessOrEqual(t, m.entriesReadForSearch, int64(searchCheckpointFrequency), "Should not need to read more than between two checkpoints") require.LessOrEqual(t, m.entriesReadForSearch, int64(searchCheckpointFrequency), "Should not need to read more than between two checkpoints")
}
func TestShouldRollBackInMemoryChangesOnWriteFailure(t *testing.T) { _, err = db.Executes(blockNum, logIdx)
t.Skip("TODO(optimism#10857)") require.ErrorIs(t, err, ErrNotFound, "Found unexpected log when getting executing message")
require.LessOrEqual(t, m.entriesReadForSearch, int64(searchCheckpointFrequency), "Should not need to read more than between two checkpoints")
} }
func TestShouldRecoverWhenSearchCheckpointWrittenButNotCanonicalHash(t *testing.T) { func requireExecutingMessage(t *testing.T, db *DB, blockNum uint64, logIdx uint32, execMsg ExecutingMessage) {
t.Skip("TODO(optimism#10857)") m, ok := db.m.(*stubMetrics)
require.True(t, ok, "Did not get the expected metrics type")
actualExecMsg, err := db.Executes(blockNum, logIdx)
require.NoError(t, err, "Error when searching for executing message")
require.Equal(t, execMsg, actualExecMsg, "Should return matching executing message")
require.LessOrEqual(t, m.entriesReadForSearch, int64(searchCheckpointFrequency), "Should not need to read more than between two checkpoints")
require.NotZero(t, m.entriesReadForSearch, "Must read at least some entries to find the log")
} }
func TestShouldRecoverWhenPartialEntryWritten(t *testing.T) { func requireWrongHash(t *testing.T, db *DB, blockNum uint64, logIdx uint32, logHash common.Hash, execMsg ExecutingMessage) {
t.Skip("TODO(optimism#10857)") m, ok := db.m.(*stubMetrics)
require.True(t, ok, "Did not get the expected metrics type")
result, err := db.Contains(blockNum, logIdx, TruncateHash(logHash))
require.NoErrorf(t, err, "Error searching for log %v in block %v", logIdx, blockNum)
require.Falsef(t, result, "Found unexpected log %v in block %v with hash %v", logIdx, blockNum, logHash)
_, err = db.Executes(blockNum, logIdx)
require.NoError(t, err, "Error when searching for executing message")
require.LessOrEqual(t, m.entriesReadForSearch, int64(searchCheckpointFrequency), "Should not need to read more than between two checkpoints")
} }
func TestShouldRecoverWhenInitiatingEventWrittenButNotExecutingLink(t *testing.T) { func TestRecoverOnCreate(t *testing.T) {
t.Skip("TODO(optimism#10857)") createDb := func(t *testing.T, store *stubEntryStore) (*DB, *stubMetrics, error) {
logger := testlog.Logger(t, log.LvlInfo)
m := &stubMetrics{}
db, err := NewFromEntryStore(logger, m, store)
return db, m, err
}
validInitEvent, err := newInitiatingEvent(logContext{blockNum: 1, logIdx: 0}, 1, 0, createTruncatedHash(1), false)
require.NoError(t, err)
validEventSequence := []entrydb.Entry{
newSearchCheckpoint(1, 0, 100).encode(),
newCanonicalHash(createTruncatedHash(344)).encode(),
validInitEvent.encode(),
}
var emptyEventSequence []entrydb.Entry
for _, prefixEvents := range [][]entrydb.Entry{emptyEventSequence, validEventSequence} {
prefixEvents := prefixEvents
storeWithEvents := func(evts ...entrydb.Entry) *stubEntryStore {
store := &stubEntryStore{}
store.entries = append(store.entries, prefixEvents...)
store.entries = append(store.entries, evts...)
return store
}
t.Run(fmt.Sprintf("PrefixEvents-%v", len(prefixEvents)), func(t *testing.T) {
t.Run("NoTruncateWhenLastEntryIsLogWithNoExecMessage", func(t *testing.T) {
initEvent, err := newInitiatingEvent(logContext{blockNum: 3, logIdx: 0}, 3, 0, createTruncatedHash(1), false)
require.NoError(t, err)
store := storeWithEvents(
newSearchCheckpoint(3, 0, 100).encode(),
newCanonicalHash(createTruncatedHash(344)).encode(),
initEvent.encode(),
)
db, m, err := createDb(t, store)
require.NoError(t, err)
require.EqualValues(t, len(prefixEvents)+3, m.entryCount)
requireContains(t, db, 3, 0, createHash(1))
})
t.Run("NoTruncateWhenLastEntryIsExecutingCheck", func(t *testing.T) {
initEvent, err := newInitiatingEvent(logContext{blockNum: 3, logIdx: 0}, 3, 0, createTruncatedHash(1), true)
execMsg := ExecutingMessage{
Chain: 4,
BlockNum: 10,
LogIdx: 4,
Timestamp: 1288,
Hash: createTruncatedHash(4),
}
require.NoError(t, err)
linkEvt, err := newExecutingLink(execMsg)
require.NoError(t, err)
store := storeWithEvents(
newSearchCheckpoint(3, 0, 100).encode(),
newCanonicalHash(createTruncatedHash(344)).encode(),
initEvent.encode(),
linkEvt.encode(),
newExecutingCheck(execMsg.Hash).encode(),
)
db, m, err := createDb(t, store)
require.NoError(t, err)
require.EqualValues(t, len(prefixEvents)+5, m.entryCount)
requireContains(t, db, 3, 0, createHash(1), execMsg)
})
t.Run("TruncateWhenLastEntrySearchCheckpoint", func(t *testing.T) {
store := storeWithEvents(newSearchCheckpoint(3, 0, 100).encode())
_, m, err := createDb(t, store)
require.NoError(t, err)
require.EqualValues(t, len(prefixEvents), m.entryCount)
})
t.Run("TruncateWhenLastEntryCanonicalHash", func(t *testing.T) {
store := storeWithEvents(
newSearchCheckpoint(3, 0, 100).encode(),
newCanonicalHash(createTruncatedHash(344)).encode(),
)
_, m, err := createDb(t, store)
require.NoError(t, err)
require.EqualValues(t, len(prefixEvents), m.entryCount)
})
t.Run("TruncateWhenLastEntryInitEventWithExecMsg", func(t *testing.T) {
initEvent, err := newInitiatingEvent(logContext{blockNum: 3, logIdx: 0}, 3, 0, createTruncatedHash(1), true)
require.NoError(t, err)
store := storeWithEvents(
newSearchCheckpoint(3, 0, 100).encode(),
newCanonicalHash(createTruncatedHash(344)).encode(),
initEvent.encode(),
)
_, m, err := createDb(t, store)
require.NoError(t, err)
require.EqualValues(t, len(prefixEvents), m.entryCount)
})
t.Run("TruncateWhenLastEntryInitEventWithExecLink", func(t *testing.T) {
initEvent, err := newInitiatingEvent(logContext{blockNum: 3, logIdx: 0}, 3, 0, createTruncatedHash(1), true)
require.NoError(t, err)
execMsg := ExecutingMessage{
Chain: 4,
BlockNum: 10,
LogIdx: 4,
Timestamp: 1288,
Hash: createTruncatedHash(4),
}
require.NoError(t, err)
linkEvt, err := newExecutingLink(execMsg)
require.NoError(t, err)
store := storeWithEvents(
newSearchCheckpoint(3, 0, 100).encode(),
newCanonicalHash(createTruncatedHash(344)).encode(),
initEvent.encode(),
linkEvt.encode(),
)
_, m, err := createDb(t, store)
require.NoError(t, err)
require.EqualValues(t, len(prefixEvents), m.entryCount)
})
})
}
} }
func TestRewind(t *testing.T) { func TestRewind(t *testing.T) {
...@@ -458,10 +729,10 @@ func TestRewind(t *testing.T) { ...@@ -458,10 +729,10 @@ func TestRewind(t *testing.T) {
t.Run("AfterLastBlock", func(t *testing.T) { t.Run("AfterLastBlock", func(t *testing.T) {
runDBTest(t, runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) { func(t *testing.T, db *DB, m *stubMetrics) {
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 0)) require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 1)) require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 1, nil))
require.NoError(t, db.AddLog(createTruncatedHash(3), eth.BlockID{Hash: createHash(51), Number: 51}, 502, 0)) require.NoError(t, db.AddLog(createTruncatedHash(3), eth.BlockID{Hash: createHash(51), Number: 51}, 502, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(4), eth.BlockID{Hash: createHash(74), Number: 74}, 700, 0)) require.NoError(t, db.AddLog(createTruncatedHash(4), eth.BlockID{Hash: createHash(74), Number: 74}, 700, 0, nil))
require.NoError(t, db.Rewind(75)) require.NoError(t, db.Rewind(75))
}, },
func(t *testing.T, db *DB, m *stubMetrics) { func(t *testing.T, db *DB, m *stubMetrics) {
...@@ -475,8 +746,8 @@ func TestRewind(t *testing.T) { ...@@ -475,8 +746,8 @@ func TestRewind(t *testing.T) {
t.Run("BeforeFirstBlock", func(t *testing.T) { t.Run("BeforeFirstBlock", func(t *testing.T) {
runDBTest(t, runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) { func(t *testing.T, db *DB, m *stubMetrics) {
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 0)) require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 1)) require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 1, nil))
require.NoError(t, db.Rewind(25)) require.NoError(t, db.Rewind(25))
}, },
func(t *testing.T, db *DB, m *stubMetrics) { func(t *testing.T, db *DB, m *stubMetrics) {
...@@ -489,10 +760,10 @@ func TestRewind(t *testing.T) { ...@@ -489,10 +760,10 @@ func TestRewind(t *testing.T) {
t.Run("AtFirstBlock", func(t *testing.T) { t.Run("AtFirstBlock", func(t *testing.T) {
runDBTest(t, runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) { func(t *testing.T, db *DB, m *stubMetrics) {
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 0)) require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 1)) require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 1, nil))
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(51), Number: 51}, 502, 0)) require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(51), Number: 51}, 502, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(51), Number: 51}, 502, 1)) require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(51), Number: 51}, 502, 1, nil))
require.NoError(t, db.Rewind(50)) require.NoError(t, db.Rewind(50))
}, },
func(t *testing.T, db *DB, m *stubMetrics) { func(t *testing.T, db *DB, m *stubMetrics) {
...@@ -507,12 +778,12 @@ func TestRewind(t *testing.T) { ...@@ -507,12 +778,12 @@ func TestRewind(t *testing.T) {
runDBTest(t, runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) { func(t *testing.T, db *DB, m *stubMetrics) {
for i := uint32(0); m.entryCount < searchCheckpointFrequency; i++ { for i := uint32(0); m.entryCount < searchCheckpointFrequency; i++ {
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(50), Number: 50}, 500, i)) require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(50), Number: 50}, 500, i, nil))
} }
require.EqualValues(t, searchCheckpointFrequency, m.entryCount) require.EqualValues(t, searchCheckpointFrequency, m.entryCount)
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(51), Number: 51}, 502, 0)) require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(51), Number: 51}, 502, 0, nil))
require.EqualValues(t, searchCheckpointFrequency+3, m.entryCount, "Should have inserted new checkpoint and extra log") require.EqualValues(t, searchCheckpointFrequency+3, m.entryCount, "Should have inserted new checkpoint and extra log")
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(51), Number: 51}, 502, 1)) require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(51), Number: 51}, 502, 1, nil))
require.NoError(t, db.Rewind(50)) require.NoError(t, db.Rewind(50))
}, },
func(t *testing.T, db *DB, m *stubMetrics) { func(t *testing.T, db *DB, m *stubMetrics) {
...@@ -527,10 +798,10 @@ func TestRewind(t *testing.T) { ...@@ -527,10 +798,10 @@ func TestRewind(t *testing.T) {
t.Run("BetweenLogEntries", func(t *testing.T) { t.Run("BetweenLogEntries", func(t *testing.T) {
runDBTest(t, runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) { func(t *testing.T, db *DB, m *stubMetrics) {
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 0)) require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 1)) require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 1, nil))
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(60), Number: 60}, 502, 0)) require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(60), Number: 60}, 502, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(60), Number: 60}, 502, 1)) require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(60), Number: 60}, 502, 1, nil))
require.NoError(t, db.Rewind(55)) require.NoError(t, db.Rewind(55))
}, },
func(t *testing.T, db *DB, m *stubMetrics) { func(t *testing.T, db *DB, m *stubMetrics) {
...@@ -544,12 +815,12 @@ func TestRewind(t *testing.T) { ...@@ -544,12 +815,12 @@ func TestRewind(t *testing.T) {
t.Run("AtExistingLogEntry", func(t *testing.T) { t.Run("AtExistingLogEntry", func(t *testing.T) {
runDBTest(t, runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) { func(t *testing.T, db *DB, m *stubMetrics) {
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(59), Number: 59}, 500, 0)) require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(59), Number: 59}, 500, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(59), Number: 59}, 500, 1)) require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(59), Number: 59}, 500, 1, nil))
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(60), Number: 60}, 502, 0)) require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(60), Number: 60}, 502, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(60), Number: 60}, 502, 1)) require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(60), Number: 60}, 502, 1, nil))
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(61), Number: 61}, 502, 0)) require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(61), Number: 61}, 502, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(61), Number: 61}, 502, 1)) require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(61), Number: 61}, 502, 1, nil))
require.NoError(t, db.Rewind(60)) require.NoError(t, db.Rewind(60))
}, },
func(t *testing.T, db *DB, m *stubMetrics) { func(t *testing.T, db *DB, m *stubMetrics) {
...@@ -565,12 +836,12 @@ func TestRewind(t *testing.T) { ...@@ -565,12 +836,12 @@ func TestRewind(t *testing.T) {
t.Run("AtLastEntry", func(t *testing.T) { t.Run("AtLastEntry", func(t *testing.T) {
runDBTest(t, runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) { func(t *testing.T, db *DB, m *stubMetrics) {
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 0)) require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 1)) require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 1, nil))
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(60), Number: 60}, 502, 0)) require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(60), Number: 60}, 502, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(60), Number: 60}, 502, 1)) require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(60), Number: 60}, 502, 1, nil))
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(70), Number: 70}, 502, 0)) require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(70), Number: 70}, 502, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(70), Number: 70}, 502, 1)) require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(70), Number: 70}, 502, 1, nil))
require.NoError(t, db.Rewind(70)) require.NoError(t, db.Rewind(70))
}, },
func(t *testing.T, db *DB, m *stubMetrics) { func(t *testing.T, db *DB, m *stubMetrics) {
...@@ -586,20 +857,20 @@ func TestRewind(t *testing.T) { ...@@ -586,20 +857,20 @@ func TestRewind(t *testing.T) {
t.Run("ReaddDeletedBlocks", func(t *testing.T) { t.Run("ReaddDeletedBlocks", func(t *testing.T) {
runDBTest(t, runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) { func(t *testing.T, db *DB, m *stubMetrics) {
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(59), Number: 59}, 500, 0)) require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(59), Number: 59}, 500, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(59), Number: 59}, 500, 1)) require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(59), Number: 59}, 500, 1, nil))
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(60), Number: 60}, 502, 0)) require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(60), Number: 60}, 502, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(60), Number: 60}, 502, 1)) require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(60), Number: 60}, 502, 1, nil))
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(61), Number: 61}, 502, 0)) require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(61), Number: 61}, 502, 0, nil))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(61), Number: 61}, 502, 1)) require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(61), Number: 61}, 502, 1, nil))
require.NoError(t, db.Rewind(60)) require.NoError(t, db.Rewind(60))
}, },
func(t *testing.T, db *DB, m *stubMetrics) { func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(59), Number: 59}, 500, 1) err := db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(59), Number: 59}, 500, 1, nil)
require.ErrorIs(t, err, ErrLogOutOfOrder, "Cannot add block before rewound head") require.ErrorIs(t, err, ErrLogOutOfOrder, "Cannot add block before rewound head")
err = db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(60), Number: 60}, 502, 1) err = db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(60), Number: 60}, 502, 1, nil)
require.ErrorIs(t, err, ErrLogOutOfOrder, "Cannot add block that was rewound to") require.ErrorIs(t, err, ErrLogOutOfOrder, "Cannot add block that was rewound to")
err = db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(60), Number: 61}, 502, 0) err = db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(60), Number: 61}, 502, 0, nil)
require.NoError(t, err, "Can re-add deleted block") require.NoError(t, err, "Can re-add deleted block")
}) })
}) })
...@@ -619,3 +890,34 @@ func (s *stubMetrics) RecordSearchEntriesRead(count int64) { ...@@ -619,3 +890,34 @@ func (s *stubMetrics) RecordSearchEntriesRead(count int64) {
} }
var _ Metrics = (*stubMetrics)(nil) var _ Metrics = (*stubMetrics)(nil)
type stubEntryStore struct {
entries []entrydb.Entry
}
func (s *stubEntryStore) Size() int64 {
return int64(len(s.entries))
}
func (s *stubEntryStore) Read(idx int64) (entrydb.Entry, error) {
if idx < int64(len(s.entries)) {
return s.entries[idx], nil
}
return entrydb.Entry{}, io.EOF
}
func (s *stubEntryStore) Append(entries ...entrydb.Entry) error {
s.entries = append(s.entries, entries...)
return nil
}
func (s *stubEntryStore) Truncate(idx int64) error {
s.entries = s.entries[:min(s.Size()-1, idx+1)]
return nil
}
func (s *stubEntryStore) Close() error {
return nil
}
var _ EntryStore = (*stubEntryStore)(nil)
...@@ -71,6 +71,7 @@ func (c canonicalHash) encode() entrydb.Entry { ...@@ -71,6 +71,7 @@ func (c canonicalHash) encode() entrydb.Entry {
type initiatingEvent struct { type initiatingEvent struct {
blockDiff uint8 blockDiff uint8
incrementLogIdx bool incrementLogIdx bool
hasExecMsg bool
logHash TruncatedHash logHash TruncatedHash
} }
...@@ -83,11 +84,12 @@ func newInitiatingEventFromEntry(data entrydb.Entry) (initiatingEvent, error) { ...@@ -83,11 +84,12 @@ func newInitiatingEventFromEntry(data entrydb.Entry) (initiatingEvent, error) {
return initiatingEvent{ return initiatingEvent{
blockDiff: blockNumDiff, blockDiff: blockNumDiff,
incrementLogIdx: flags&eventFlagIncrementLogIdx != 0, incrementLogIdx: flags&eventFlagIncrementLogIdx != 0,
hasExecMsg: flags&eventFlagHasExecutingMessage != 0,
logHash: TruncatedHash(data[3:23]), logHash: TruncatedHash(data[3:23]),
}, nil }, nil
} }
func newInitiatingEvent(pre logContext, blockNum uint64, logIdx uint32, logHash TruncatedHash) (initiatingEvent, error) { func newInitiatingEvent(pre logContext, blockNum uint64, logIdx uint32, logHash TruncatedHash, hasExecMsg bool) (initiatingEvent, error) {
blockDiff := blockNum - pre.blockNum blockDiff := blockNum - pre.blockNum
if blockDiff > math.MaxUint8 { if blockDiff > math.MaxUint8 {
// TODO(optimism#10857): Need to find a way to support this. // TODO(optimism#10857): Need to find a way to support this.
...@@ -106,6 +108,7 @@ func newInitiatingEvent(pre logContext, blockNum uint64, logIdx uint32, logHash ...@@ -106,6 +108,7 @@ func newInitiatingEvent(pre logContext, blockNum uint64, logIdx uint32, logHash
return initiatingEvent{ return initiatingEvent{
blockDiff: uint8(blockDiff), blockDiff: uint8(blockDiff),
incrementLogIdx: logDiff > 0, incrementLogIdx: logDiff > 0,
hasExecMsg: hasExecMsg,
logHash: logHash, logHash: logHash,
}, nil }, nil
} }
...@@ -121,6 +124,9 @@ func (i initiatingEvent) encode() entrydb.Entry { ...@@ -121,6 +124,9 @@ func (i initiatingEvent) encode() entrydb.Entry {
// Set flag to indicate log idx needs to be incremented (ie we're not directly after a checkpoint) // Set flag to indicate log idx needs to be incremented (ie we're not directly after a checkpoint)
flags = flags | eventFlagIncrementLogIdx flags = flags | eventFlagIncrementLogIdx
} }
if i.hasExecMsg {
flags = flags | eventFlagHasExecutingMessage
}
data[2] = flags data[2] = flags
copy(data[3:23], i.logHash[:]) copy(data[3:23], i.logHash[:])
return data return data
...@@ -139,3 +145,106 @@ func (i initiatingEvent) postContext(pre logContext) logContext { ...@@ -139,3 +145,106 @@ func (i initiatingEvent) postContext(pre logContext) logContext {
} }
return post return post
} }
// preContext is the reverse of postContext and calculates the logContext required as input to get the specified post
// context after applying this init event.
func (i initiatingEvent) preContext(post logContext) logContext {
pre := post
pre.blockNum = post.blockNum - uint64(i.blockDiff)
if i.incrementLogIdx {
pre.logIdx--
}
return pre
}
type executingLink struct {
chain uint32
blockNum uint64
logIdx uint32
timestamp uint64
}
func newExecutingLink(msg ExecutingMessage) (executingLink, error) {
if msg.LogIdx > 1<<24 {
return executingLink{}, fmt.Errorf("log idx is too large (%v)", msg.LogIdx)
}
return executingLink{
chain: msg.Chain,
blockNum: msg.BlockNum,
logIdx: msg.LogIdx,
timestamp: msg.Timestamp,
}, nil
}
func newExecutingLinkFromEntry(data entrydb.Entry) (executingLink, error) {
if data[0] != typeExecutingLink {
return executingLink{}, fmt.Errorf("%w: attempting to decode executing link but was type %v", ErrDataCorruption, data[0])
}
timestamp := binary.LittleEndian.Uint64(data[16:24])
return executingLink{
chain: binary.LittleEndian.Uint32(data[1:5]),
blockNum: binary.LittleEndian.Uint64(data[5:13]),
logIdx: uint32(data[13]) | uint32(data[14])<<8 | uint32(data[15])<<16,
timestamp: timestamp,
}, nil
}
// encode creates an executing link entry
// type 3: "executing link" <type><chain: 4 bytes><blocknum: 8 bytes><event index: 3 bytes><uint64 timestamp: 8 bytes> = 24 bytes
func (e executingLink) encode() entrydb.Entry {
var entry entrydb.Entry
entry[0] = typeExecutingLink
binary.LittleEndian.PutUint32(entry[1:5], e.chain)
binary.LittleEndian.PutUint64(entry[5:13], e.blockNum)
entry[13] = byte(e.logIdx)
entry[14] = byte(e.logIdx >> 8)
entry[15] = byte(e.logIdx >> 16)
binary.LittleEndian.PutUint64(entry[16:24], e.timestamp)
return entry
}
type executingCheck struct {
hash TruncatedHash
}
func newExecutingCheck(hash TruncatedHash) executingCheck {
return executingCheck{hash: hash}
}
func newExecutingCheckFromEntry(entry entrydb.Entry) (executingCheck, error) {
if entry[0] != typeExecutingCheck {
return executingCheck{}, fmt.Errorf("%w: attempting to decode executing check but was type %v", ErrDataCorruption, entry[0])
}
var hash TruncatedHash
copy(hash[:], entry[1:21])
return newExecutingCheck(hash), nil
}
// encode creates an executing check entry
// type 4: "executing check" <type><event-hash: 20 bytes> = 21 bytes
func (e executingCheck) encode() entrydb.Entry {
var entry entrydb.Entry
entry[0] = typeExecutingCheck
copy(entry[1:21], e.hash[:])
return entry
}
func newExecutingMessageFromEntries(linkEntry entrydb.Entry, checkEntry entrydb.Entry) (ExecutingMessage, error) {
link, err := newExecutingLinkFromEntry(linkEntry)
if err != nil {
return ExecutingMessage{}, fmt.Errorf("invalid executing link: %w", err)
}
check, err := newExecutingCheckFromEntry(checkEntry)
if err != nil {
return ExecutingMessage{}, fmt.Errorf("invalid executing check: %w", err)
}
return ExecutingMessage{
Chain: link.chain,
BlockNum: link.blockNum,
LogIdx: link.logIdx,
Timestamp: link.timestamp,
Hash: check.hash,
}, nil
}
...@@ -5,6 +5,8 @@ import ( ...@@ -5,6 +5,8 @@ import (
"fmt" "fmt"
"io" "io"
"os" "os"
"github.com/ethereum/go-ethereum/log"
) )
const ( const (
...@@ -24,11 +26,19 @@ type dataAccess interface { ...@@ -24,11 +26,19 @@ type dataAccess interface {
type EntryDB struct { type EntryDB struct {
data dataAccess data dataAccess
lastEntryIdx int64 size int64
cleanupFailedWrite bool
} }
func NewEntryDB(path string) (*EntryDB, error) { // NewEntryDB creates an EntryDB. A new file will be created if the specified path does not exist,
file, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0o666) // but parent directories will not be created.
// If the file exists it will be used as the existing data.
// Returns ErrRecoveryRequired if the existing file is not a valid entry db. A EntryDB is still returned but all
// operations will return ErrRecoveryRequired until the Recover method is called.
func NewEntryDB(logger log.Logger, path string) (*EntryDB, error) {
logger.Info("Opening entry database", "path", path)
file, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0o644)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to open database at %v: %w", path, err) return nil, fmt.Errorf("failed to open database at %v: %w", path, err)
} }
...@@ -36,18 +46,29 @@ func NewEntryDB(path string) (*EntryDB, error) { ...@@ -36,18 +46,29 @@ func NewEntryDB(path string) (*EntryDB, error) {
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to stat database at %v: %w", path, err) return nil, fmt.Errorf("failed to stat database at %v: %w", path, err)
} }
lastEntryIdx := info.Size()/EntrySize - 1 size := info.Size() / EntrySize
return &EntryDB{ db := &EntryDB{
data: file, data: file,
lastEntryIdx: lastEntryIdx, size: size,
}, nil }
if size*EntrySize != info.Size() {
logger.Warn("File size (%v) is nut a multiple of entry size %v. Truncating to last complete entry", size, EntrySize)
if err := db.recover(); err != nil {
return nil, fmt.Errorf("failed to recover database at %v: %w", path, err)
}
}
return db, nil
} }
func (e *EntryDB) Size() int64 { func (e *EntryDB) Size() int64 {
return e.lastEntryIdx + 1 return e.size
} }
// Read an entry from the database by index. Returns io.EOF iff idx is after the last entry.
func (e *EntryDB) Read(idx int64) (Entry, error) { func (e *EntryDB) Read(idx int64) (Entry, error) {
if idx >= e.size {
return Entry{}, io.EOF
}
var out Entry var out Entry
read, err := e.data.ReadAt(out[:], idx*EntrySize) read, err := e.data.ReadAt(out[:], idx*EntrySize)
// Ignore io.EOF if we read the entire last entry as ReadAt may return io.EOF or nil when it reads the last byte // Ignore io.EOF if we read the entire last entry as ReadAt may return io.EOF or nil when it reads the last byte
...@@ -57,25 +78,55 @@ func (e *EntryDB) Read(idx int64) (Entry, error) { ...@@ -57,25 +78,55 @@ func (e *EntryDB) Read(idx int64) (Entry, error) {
return out, nil return out, nil
} }
// Append entries to the database.
// The entries are combined in memory and passed to a single Write invocation.
// If the write fails, it will attempt to truncate any partially written data.
// Subsequent writes to this instance will fail until partially written data is truncated.
func (e *EntryDB) Append(entries ...Entry) error { func (e *EntryDB) Append(entries ...Entry) error {
if e.cleanupFailedWrite {
// Try to rollback partially written data from a previous Append
if truncateErr := e.Truncate(e.size - 1); truncateErr != nil {
return fmt.Errorf("failed to recover from previous write error: %w", truncateErr)
}
}
data := make([]byte, 0, len(entries)*EntrySize)
for _, entry := range entries { for _, entry := range entries {
if _, err := e.data.Write(entry[:]); err != nil { data = append(data, entry[:]...)
// TODO(optimism#10857): When a write fails, need to revert any in memory changes and truncate back to the }
// pre-write state. Likely need to batch writes for multiple entries into a single write akin to transactions if n, err := e.data.Write(data); err != nil {
// to avoid leaving hanging entries without the entry that should follow them. if n == 0 {
// Didn't write any data, so no recovery required
return err return err
} }
e.lastEntryIdx++ // Try to rollback the partially written data
if truncateErr := e.Truncate(e.size - 1); truncateErr != nil {
// Failed to rollback, set a flag to attempt the clean up on the next write
e.cleanupFailedWrite = true
return errors.Join(err, fmt.Errorf("failed to remove partially written data: %w", truncateErr))
}
// Successfully rolled back the changes, still report the failed write
return err
} }
e.size += int64(len(entries))
return nil return nil
} }
// Truncate the database so that the last retained entry is idx. Any entries after idx are deleted.
func (e *EntryDB) Truncate(idx int64) error { func (e *EntryDB) Truncate(idx int64) error {
if err := e.data.Truncate((idx + 1) * EntrySize); err != nil { if err := e.data.Truncate((idx + 1) * EntrySize); err != nil {
return fmt.Errorf("failed to truncate to entry %v: %w", idx, err) return fmt.Errorf("failed to truncate to entry %v: %w", idx, err)
} }
// Update the lastEntryIdx cache and then use db.init() to find the log context for the new latest log entry // Update the lastEntryIdx cache
e.lastEntryIdx = idx e.size = idx + 1
e.cleanupFailedWrite = false
return nil
}
// recover an invalid database by truncating back to the last complete event.
func (e *EntryDB) recover() error {
if err := e.data.Truncate((e.size) * EntrySize); err != nil {
return fmt.Errorf("failed to truncate trailing partial entries: %w", err)
}
return nil return nil
} }
......
...@@ -2,20 +2,30 @@ package entrydb ...@@ -2,20 +2,30 @@ package entrydb
import ( import (
"bytes" "bytes"
"errors"
"io" "io"
"os"
"path/filepath" "path/filepath"
"testing" "testing"
"github.com/ethereum-optimism/optimism/op-service/testlog"
"github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
func TestReadWrite(t *testing.T) { func TestReadWrite(t *testing.T) {
t.Run("BasicReadWrite", func(t *testing.T) { t.Run("BasicReadWrite", func(t *testing.T) {
db := createEntryDB(t) db := createEntryDB(t)
require.EqualValues(t, 0, db.Size())
require.NoError(t, db.Append(createEntry(1))) require.NoError(t, db.Append(createEntry(1)))
require.EqualValues(t, 1, db.Size())
require.NoError(t, db.Append(createEntry(2))) require.NoError(t, db.Append(createEntry(2)))
require.EqualValues(t, 2, db.Size())
require.NoError(t, db.Append(createEntry(3))) require.NoError(t, db.Append(createEntry(3)))
require.EqualValues(t, 3, db.Size())
require.NoError(t, db.Append(createEntry(4))) require.NoError(t, db.Append(createEntry(4)))
require.EqualValues(t, 4, db.Size())
requireRead(t, db, 0, createEntry(1)) requireRead(t, db, 0, createEntry(1))
requireRead(t, db, 1, createEntry(2)) requireRead(t, db, 1, createEntry(2))
requireRead(t, db, 2, createEntry(3)) requireRead(t, db, 2, createEntry(3))
...@@ -33,11 +43,8 @@ func TestReadWrite(t *testing.T) { ...@@ -33,11 +43,8 @@ func TestReadWrite(t *testing.T) {
t.Run("WriteMultiple", func(t *testing.T) { t.Run("WriteMultiple", func(t *testing.T) {
db := createEntryDB(t) db := createEntryDB(t)
require.NoError(t, db.Append( require.NoError(t, db.Append(createEntry(1), createEntry(2), createEntry(3)))
createEntry(1), require.EqualValues(t, 3, db.Size())
createEntry(2),
createEntry(3),
))
requireRead(t, db, 0, createEntry(1)) requireRead(t, db, 0, createEntry(1))
requireRead(t, db, 1, createEntry(2)) requireRead(t, db, 1, createEntry(2))
requireRead(t, db, 2, createEntry(3)) requireRead(t, db, 2, createEntry(3))
...@@ -45,14 +52,17 @@ func TestReadWrite(t *testing.T) { ...@@ -45,14 +52,17 @@ func TestReadWrite(t *testing.T) {
} }
func TestTruncate(t *testing.T) { func TestTruncate(t *testing.T) {
t.Run("Partial", func(t *testing.T) {
db := createEntryDB(t) db := createEntryDB(t)
require.NoError(t, db.Append(createEntry(1))) require.NoError(t, db.Append(createEntry(1)))
require.NoError(t, db.Append(createEntry(2))) require.NoError(t, db.Append(createEntry(2)))
require.NoError(t, db.Append(createEntry(3))) require.NoError(t, db.Append(createEntry(3)))
require.NoError(t, db.Append(createEntry(4))) require.NoError(t, db.Append(createEntry(4)))
require.NoError(t, db.Append(createEntry(5))) require.NoError(t, db.Append(createEntry(5)))
require.EqualValues(t, 5, db.Size())
require.NoError(t, db.Truncate(3)) require.NoError(t, db.Truncate(3))
require.EqualValues(t, 4, db.Size()) // 0, 1, 2 and 3 are preserved
requireRead(t, db, 0, createEntry(1)) requireRead(t, db, 0, createEntry(1))
requireRead(t, db, 1, createEntry(2)) requireRead(t, db, 1, createEntry(2))
requireRead(t, db, 2, createEntry(3)) requireRead(t, db, 2, createEntry(3))
...@@ -62,6 +72,109 @@ func TestTruncate(t *testing.T) { ...@@ -62,6 +72,109 @@ func TestTruncate(t *testing.T) {
require.ErrorIs(t, err, io.EOF) require.ErrorIs(t, err, io.EOF)
_, err = db.Read(5) _, err = db.Read(5)
require.ErrorIs(t, err, io.EOF) require.ErrorIs(t, err, io.EOF)
})
t.Run("Complete", func(t *testing.T) {
db := createEntryDB(t)
require.NoError(t, db.Append(createEntry(1)))
require.NoError(t, db.Append(createEntry(2)))
require.NoError(t, db.Append(createEntry(3)))
require.EqualValues(t, 3, db.Size())
require.NoError(t, db.Truncate(-1))
require.EqualValues(t, 0, db.Size()) // All items are removed
_, err := db.Read(0)
require.ErrorIs(t, err, io.EOF)
})
t.Run("AppendAfterTruncate", func(t *testing.T) {
db := createEntryDB(t)
require.NoError(t, db.Append(createEntry(1)))
require.NoError(t, db.Append(createEntry(2)))
require.NoError(t, db.Append(createEntry(3)))
require.EqualValues(t, 3, db.Size())
require.NoError(t, db.Truncate(1))
require.EqualValues(t, 2, db.Size())
newEntry := createEntry(4)
require.NoError(t, db.Append(newEntry))
entry, err := db.Read(2)
require.NoError(t, err)
require.Equal(t, newEntry, entry)
})
}
func TestTruncateTrailingPartialEntries(t *testing.T) {
logger := testlog.Logger(t, log.LvlInfo)
file := filepath.Join(t.TempDir(), "entries.db")
entry1 := createEntry(1)
entry2 := createEntry(2)
invalidData := make([]byte, len(entry1)+len(entry2)+4)
copy(invalidData, entry1[:])
copy(invalidData[EntrySize:], entry2[:])
invalidData[len(invalidData)-1] = 3 // Some invalid trailing data
require.NoError(t, os.WriteFile(file, invalidData, 0o644))
db, err := NewEntryDB(logger, file)
require.NoError(t, err)
defer db.Close()
// Should automatically truncate the file to remove trailing partial entries
require.EqualValues(t, 2, db.Size())
stat, err := os.Stat(file)
require.NoError(t, err)
require.EqualValues(t, 2*EntrySize, stat.Size())
}
func TestWriteErrors(t *testing.T) {
expectedErr := errors.New("some error")
t.Run("TruncatePartiallyWrittenData", func(t *testing.T) {
db, stubData := createEntryDBWithStubData()
stubData.writeErr = expectedErr
stubData.writeErrAfterBytes = 3
err := db.Append(createEntry(1), createEntry(2))
require.ErrorIs(t, err, expectedErr)
require.EqualValues(t, 0, db.Size(), "should not consider entries written")
require.Len(t, stubData.data, 0, "should truncate written bytes")
})
t.Run("FailBeforeDataWritten", func(t *testing.T) {
db, stubData := createEntryDBWithStubData()
stubData.writeErr = expectedErr
stubData.writeErrAfterBytes = 0
err := db.Append(createEntry(1), createEntry(2))
require.ErrorIs(t, err, expectedErr)
require.EqualValues(t, 0, db.Size(), "should not consider entries written")
require.Len(t, stubData.data, 0, "no data written")
})
t.Run("PartialWriteAndTruncateFails", func(t *testing.T) {
db, stubData := createEntryDBWithStubData()
stubData.writeErr = expectedErr
stubData.writeErrAfterBytes = EntrySize + 2
stubData.truncateErr = errors.New("boom")
err := db.Append(createEntry(1), createEntry(2))
require.ErrorIs(t, err, expectedErr)
require.EqualValues(t, 0, db.Size(), "should not consider entries written")
require.Len(t, stubData.data, stubData.writeErrAfterBytes, "rollback failed")
_, err = db.Read(0)
require.ErrorIs(t, err, io.EOF, "should not have first entry")
_, err = db.Read(1)
require.ErrorIs(t, err, io.EOF, "should not have second entry")
// Should retry truncate on next write
stubData.writeErr = nil
stubData.truncateErr = nil
err = db.Append(createEntry(3))
require.NoError(t, err)
actual, err := db.Read(0)
require.NoError(t, err)
require.Equal(t, createEntry(3), actual)
})
} }
func requireRead(t *testing.T, db *EntryDB, idx int64, expected Entry) { func requireRead(t *testing.T, db *EntryDB, idx int64, expected Entry) {
...@@ -75,10 +188,51 @@ func createEntry(i byte) Entry { ...@@ -75,10 +188,51 @@ func createEntry(i byte) Entry {
} }
func createEntryDB(t *testing.T) *EntryDB { func createEntryDB(t *testing.T) *EntryDB {
db, err := NewEntryDB(filepath.Join(t.TempDir(), "entries.db")) logger := testlog.Logger(t, log.LvlInfo)
db, err := NewEntryDB(logger, filepath.Join(t.TempDir(), "entries.db"))
require.NoError(t, err) require.NoError(t, err)
t.Cleanup(func() { t.Cleanup(func() {
require.NoError(t, db.Close()) require.NoError(t, db.Close())
}) })
return db return db
} }
func createEntryDBWithStubData() (*EntryDB, *stubDataAccess) {
stubData := &stubDataAccess{}
db := &EntryDB{data: stubData, size: 0}
return db, stubData
}
type stubDataAccess struct {
data []byte
writeErr error
writeErrAfterBytes int
truncateErr error
}
func (s *stubDataAccess) ReadAt(p []byte, off int64) (n int, err error) {
return bytes.NewReader(s.data).ReadAt(p, off)
}
func (s *stubDataAccess) Write(p []byte) (n int, err error) {
if s.writeErr != nil {
s.data = append(s.data, p[:s.writeErrAfterBytes]...)
return s.writeErrAfterBytes, s.writeErr
}
s.data = append(s.data, p...)
return len(p), nil
}
func (s *stubDataAccess) Close() error {
return nil
}
func (s *stubDataAccess) Truncate(size int64) error {
if s.truncateErr != nil {
return s.truncateErr
}
s.data = s.data[:size]
return nil
}
var _ dataAccess = (*stubDataAccess)(nil)
package db package db
import ( import (
"errors"
"fmt" "fmt"
"io" "io"
) )
...@@ -10,6 +11,7 @@ type iterator struct { ...@@ -10,6 +11,7 @@ type iterator struct {
nextEntryIdx int64 nextEntryIdx int64
current logContext current logContext
hasExecMsg bool
entriesRead int64 entriesRead int64
} }
...@@ -24,6 +26,7 @@ func (i *iterator) NextLog() (blockNum uint64, logIdx uint32, evtHash TruncatedH ...@@ -24,6 +26,7 @@ func (i *iterator) NextLog() (blockNum uint64, logIdx uint32, evtHash TruncatedH
} }
i.nextEntryIdx++ i.nextEntryIdx++
i.entriesRead++ i.entriesRead++
i.hasExecMsg = false
switch entry[0] { switch entry[0] {
case typeSearchCheckpoint: case typeSearchCheckpoint:
current, err := newSearchCheckpointFromEntry(entry) current, err := newSearchCheckpointFromEntry(entry)
...@@ -33,8 +36,6 @@ func (i *iterator) NextLog() (blockNum uint64, logIdx uint32, evtHash TruncatedH ...@@ -33,8 +36,6 @@ func (i *iterator) NextLog() (blockNum uint64, logIdx uint32, evtHash TruncatedH
} }
i.current.blockNum = current.blockNum i.current.blockNum = current.blockNum
i.current.logIdx = current.logIdx i.current.logIdx = current.logIdx
case typeCanonicalHash:
// Skip
case typeInitiatingEvent: case typeInitiatingEvent:
evt, err := newInitiatingEventFromEntry(entry) evt, err := newInitiatingEventFromEntry(entry)
if err != nil { if err != nil {
...@@ -45,11 +46,11 @@ func (i *iterator) NextLog() (blockNum uint64, logIdx uint32, evtHash TruncatedH ...@@ -45,11 +46,11 @@ func (i *iterator) NextLog() (blockNum uint64, logIdx uint32, evtHash TruncatedH
blockNum = i.current.blockNum blockNum = i.current.blockNum
logIdx = i.current.logIdx logIdx = i.current.logIdx
evtHash = evt.logHash evtHash = evt.logHash
i.hasExecMsg = evt.hasExecMsg
return return
case typeExecutingCheck: case typeCanonicalHash: // Skip
// TODO(optimism#10857): Handle this properly case typeExecutingCheck: // Skip
case typeExecutingLink: case typeExecutingLink: // Skip
// TODO(optimism#10857): Handle this properly
default: default:
outErr = fmt.Errorf("unknown entry type at idx %v %v", entryIdx, entry[0]) outErr = fmt.Errorf("unknown entry type at idx %v %v", entryIdx, entry[0])
return return
...@@ -58,3 +59,41 @@ func (i *iterator) NextLog() (blockNum uint64, logIdx uint32, evtHash TruncatedH ...@@ -58,3 +59,41 @@ func (i *iterator) NextLog() (blockNum uint64, logIdx uint32, evtHash TruncatedH
outErr = io.EOF outErr = io.EOF
return return
} }
func (i *iterator) ExecMessage() (ExecutingMessage, error) {
if !i.hasExecMsg {
return ExecutingMessage{}, nil
}
// Look ahead to find the exec message info
logEntryIdx := i.nextEntryIdx - 1
execMsg, err := i.readExecMessage(logEntryIdx)
if err != nil {
return ExecutingMessage{}, fmt.Errorf("failed to read exec message for initiating event at %v: %w", logEntryIdx, err)
}
return execMsg, nil
}
func (i *iterator) readExecMessage(initEntryIdx int64) (ExecutingMessage, error) {
linkIdx := initEntryIdx + 1
if linkIdx%searchCheckpointFrequency == 0 {
linkIdx += 2 // skip the search checkpoint and canonical hash entries
}
linkEntry, err := i.db.store.Read(linkIdx)
if errors.Is(err, io.EOF) {
return ExecutingMessage{}, fmt.Errorf("%w: missing expected executing link event at idx %v", ErrDataCorruption, linkIdx)
} else if err != nil {
return ExecutingMessage{}, fmt.Errorf("failed to read executing link event at idx %v: %w", linkIdx, err)
}
checkIdx := linkIdx + 1
if checkIdx%searchCheckpointFrequency == 0 {
checkIdx += 2 // skip the search checkpoint and canonical hash entries
}
checkEntry, err := i.db.store.Read(checkIdx)
if errors.Is(err, io.EOF) {
return ExecutingMessage{}, fmt.Errorf("%w: missing expected executing check event at idx %v", ErrDataCorruption, checkIdx)
} else if err != nil {
return ExecutingMessage{}, fmt.Errorf("failed to read executing check event at idx %v: %w", checkIdx, err)
}
return newExecutingMessageFromEntries(linkEntry, checkEntry)
}
package db
import (
"errors"
"github.com/ethereum/go-ethereum/common"
)
var (
ErrLogOutOfOrder = errors.New("log out of order")
ErrDataCorruption = errors.New("data corruption")
ErrNotFound = errors.New("not found")
)
type TruncatedHash [20]byte
func TruncateHash(hash common.Hash) TruncatedHash {
var truncated TruncatedHash
copy(truncated[:], hash[0:20])
return truncated
}
type ExecutingMessage struct {
Chain uint32
BlockNum uint64
LogIdx uint32
Timestamp uint64
Hash TruncatedHash
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment