Commit 1cf5239c authored by Adrian Sutton's avatar Adrian Sutton Committed by GitHub

op-supervisor: Add log db (#10902)

* op-supervisor: Introduce thread-unsafe log database

* op-supervisor: Add simple r/w locking

* op-supervisor: Add comment

* op-supervisor: Start switching to multi-entry database format

* op-supervisor: Improve test to cover the case where a new block starts at a search checkpoint boundary (other than at the start of the file)

* op-supervisor: Use a flag to indicate when log index should increment rather than a 1 byte increment amount.

* op-supervisor: Comment out unused stuff to make lint happy.

* op-supervisor: Load correct block number and log idx on init

* op-supervisor: Refactor state to only hold context that can always be kept up to date.

* op-supervisor: Support rewinding

* op-supervisor: Remove TODO that probably won't be done there

* op-supervisor: Require first log in block to have logIdx 0

* op-supervisor: Remove completed TODO.

* op-supervisor: Improve testing for logs not existing

* op-supervisor: Fix typo

* op-supervisor: Tidy up TODOs and pending tests.

* op-supervisor: Add invariant assertions for db data

* op-supervisor: Lock db in ClosestBlockInfo

* op-supervisor: Label alerts

* op-supervisor: Use a TruncatedHash for logs everywhere and make it a fixed size array.

* op-supervisor: Separate serialization of initating events

* op-supervisor: Separate serialization of other event types and enforce type code.

* op-supervisor: Introduce entry type

* op-supervisor: Split out an entry database

* op-supervisor: Introduce structs for entry types

* op-supervisor: Use a struct for CanonicalHash too
parent c54b656b
package db
import (
"errors"
"fmt"
"io"
"math"
"sync"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
)
const (
searchCheckpointFrequency = 256
eventFlagIncrementLogIdx = byte(1)
//eventFlagHasExecutingMessage = byte(1) << 1
)
const (
typeSearchCheckpoint byte = iota
typeCanonicalHash
typeInitiatingEvent
typeExecutingLink
typeExecutingCheck
)
var (
ErrLogOutOfOrder = errors.New("log out of order")
ErrDataCorruption = errors.New("data corruption")
)
type TruncatedHash [20]byte
type Metrics interface {
RecordEntryCount(count int64)
RecordSearchEntriesRead(count int64)
}
type logContext struct {
blockNum uint64
logIdx uint32
}
type entryStore interface {
Size() int64
Read(idx int64) (entrydb.Entry, error)
Append(entries ...entrydb.Entry) error
Truncate(idx int64) error
Close() error
}
// DB implements an append only database for log data and cross-chain dependencies.
//
// To keep the append-only format, reduce data size, and support reorg detection and registering of executing-messages:
//
// Use a fixed 24 bytes per entry.
//
// Data is an append-only log, that can be binary searched for any necessary event data.
//
// Rules:
// if entry_index % 256 == 0: must be type 0. For easy binary search.
// type 1 always adjacent to type 0
// type 2 "diff" values are offsets from type 0 values (always within 256 entries range)
// type 3 always after type 2
// type 4 always after type 3
//
// Types (<type> = 1 byte):
// type 0: "search checkpoint" <type><uint64 block number: 8 bytes><uint32 event index offset: 4 bytes><uint64 timestamp: 8 bytes> = 20 bytes
// type 1: "canonical hash" <type><parent blockhash truncated: 20 bytes> = 21 bytes
// type 2: "initiating event" <type><blocknum diff: 1 byte><event flags: 1 byte><event-hash: 20 bytes> = 23 bytes
// type 3: "executing link" <type><chain: 4 bytes><blocknum: 8 bytes><event index: 3 bytes><uint64 timestamp: 8 bytes> = 24 bytes
// type 4: "executing check" <type><event-hash: 20 bytes> = 21 bytes
// other types: future compat. E.g. for linking to L1, registering block-headers as a kind of initiating-event, tracking safe-head progression, etc.
//
// Right-pad each entry that is not 24 bytes.
//
// event-flags: each bit represents a boolean value, currently only two are defined
// * event-flags & 0x01 - true if the log index should increment. Should only be false when the event is immediately after a search checkpoint and canonical hash
// * event-flags & 0x02 - true if the initiating event has an executing link that should follow. Allows detecting when the executing link failed to write.
// event-hash: H(origin, timestamp, payloadhash); enough to check identifier matches & payload matches.
type DB struct {
log log.Logger
m Metrics
store entryStore
rwLock sync.RWMutex
lastEntryContext logContext
}
func NewFromFile(logger log.Logger, m Metrics, path string) (*DB, error) {
store, err := entrydb.NewEntryDB(path)
if err != nil {
return nil, fmt.Errorf("failed to open DB: %w", err)
}
db := &DB{
log: logger,
m: m,
store: store,
}
if err := db.init(); err != nil {
return nil, fmt.Errorf("failed to init database: %w", err)
}
return db, nil
}
func (db *DB) lastEntryIdx() int64 {
return db.store.Size() - 1
}
func (db *DB) init() error {
db.updateEntryCountMetric()
if db.lastEntryIdx() < 0 {
// Database is empty so no context to load
return nil
}
lastCheckpoint := (db.lastEntryIdx() / searchCheckpointFrequency) * searchCheckpointFrequency
i, err := db.newIterator(lastCheckpoint)
if err != nil {
return fmt.Errorf("failed to create iterator at last search checkpoint: %w", err)
}
// Read all entries until the end of the file
for {
_, _, _, err := i.NextLog()
if errors.Is(err, io.EOF) {
break
} else if err != nil {
return fmt.Errorf("failed to init from existing entries: %w", err)
}
}
db.lastEntryContext = i.current
return nil
}
func (db *DB) updateEntryCountMetric() {
db.m.RecordEntryCount(db.lastEntryIdx() + 1)
}
// ClosestBlockInfo returns the block number and hash of the highest recorded block at or before blockNum.
// Since block data is only recorded in search checkpoints, this may return an earlier block even if log data is
// recorded for the requested block.
func (db *DB) ClosestBlockInfo(blockNum uint64) (uint64, TruncatedHash, error) {
db.rwLock.RLock()
defer db.rwLock.RUnlock()
checkpointIdx, err := db.searchCheckpoint(blockNum, math.MaxUint32)
if err != nil {
return 0, TruncatedHash{}, fmt.Errorf("no checkpoint at or before block %v found: %w", blockNum, err)
}
checkpoint, err := db.readSearchCheckpoint(checkpointIdx)
if err != nil {
return 0, TruncatedHash{}, fmt.Errorf("failed to reach checkpoint: %w", err)
}
entry, err := db.readCanonicalHash(checkpointIdx + 1)
if err != nil {
return 0, TruncatedHash{}, fmt.Errorf("failed to read canonical hash: %w", err)
}
return checkpoint.blockNum, entry.hash, nil
}
// Contains return true iff the specified logHash is recorded in the specified blockNum and logIdx.
// logIdx is the index of the log in the array of all logs the block.
func (db *DB) Contains(blockNum uint64, logIdx uint32, logHash TruncatedHash) (bool, error) {
db.rwLock.RLock()
defer db.rwLock.RUnlock()
db.log.Trace("Checking for log", "blockNum", blockNum, "logIdx", logIdx, "hash", logHash)
entryIdx, err := db.searchCheckpoint(blockNum, logIdx)
if errors.Is(err, io.EOF) {
// Did not find a checkpoint to start reading from so the log cannot be present.
return false, nil
} else if err != nil {
return false, err
}
i, err := db.newIterator(entryIdx)
if err != nil {
return false, fmt.Errorf("failed to create iterator: %w", err)
}
db.log.Trace("Starting search", "entry", entryIdx, "blockNum", i.current.blockNum, "logIdx", i.current.logIdx)
defer func() {
db.m.RecordSearchEntriesRead(i.entriesRead)
}()
for {
evtBlockNum, evtLogIdx, evtHash, err := i.NextLog()
if errors.Is(err, io.EOF) {
// Reached end of log without finding the event
return false, nil
} else if err != nil {
return false, fmt.Errorf("failed to read next log: %w", err)
}
if evtBlockNum == blockNum && evtLogIdx == logIdx {
db.log.Trace("Found initiatingEvent", "blockNum", evtBlockNum, "logIdx", evtLogIdx, "hash", evtHash)
// Found the requested block and log index, check if the hash matches
return evtHash == logHash, nil
}
if evtBlockNum > blockNum || (evtBlockNum == blockNum && evtLogIdx > logIdx) {
// Progressed past the requested log without finding it.
return false, nil
}
}
}
func (db *DB) newIterator(startCheckpointEntry int64) (*iterator, error) {
// TODO(optimism#10857): Handle starting from a checkpoint after initiating-event but before its executing-link
// Will need to read the entry prior to the checkpoint to get the initiating event info
current, err := db.readSearchCheckpoint(startCheckpointEntry)
if err != nil {
return nil, fmt.Errorf("failed to read search checkpoint entry %v: %w", startCheckpointEntry, err)
}
i := &iterator{
db: db,
// +2 to skip the initial search checkpoint and the canonical hash event after it
nextEntryIdx: startCheckpointEntry + 2,
current: logContext{
blockNum: current.blockNum,
logIdx: current.logIdx,
},
}
return i, nil
}
// searchCheckpoint performs a binary search of the searchCheckpoint entries to find the closest one at or before
// the requested log.
// Returns the index of the searchCheckpoint to begin reading from or an error
func (db *DB) searchCheckpoint(blockNum uint64, logIdx uint32) (int64, error) {
n := (db.lastEntryIdx() / searchCheckpointFrequency) + 1
// Define x[-1] < target and x[n] >= target.
// Invariant: x[i-1] < target, x[j] >= target.
i, j := int64(0), n
for i < j {
h := int64(uint64(i+j) >> 1) // avoid overflow when computing h
checkpoint, err := db.readSearchCheckpoint(h * searchCheckpointFrequency)
if err != nil {
return 0, fmt.Errorf("failed to read entry %v: %w", h, err)
}
// i ≤ h < j
if checkpoint.blockNum < blockNum || (checkpoint.blockNum == blockNum && checkpoint.logIdx < logIdx) {
i = h + 1 // preserves x[i-1] < target
} else {
j = h // preserves x[j] >= target
}
}
if i < n {
checkpoint, err := db.readSearchCheckpoint(i * searchCheckpointFrequency)
if err != nil {
return 0, fmt.Errorf("failed to read entry %v: %w", i, err)
}
if checkpoint.blockNum == blockNum && checkpoint.logIdx == logIdx {
// Found entry at requested block number and log index
return i * searchCheckpointFrequency, nil
}
}
if i == 0 {
// There are no checkpoints before the requested blocks
return 0, io.EOF
}
// Not found, need to start reading from the entry prior
return (i - 1) * searchCheckpointFrequency, nil
}
func (db *DB) AddLog(logHash TruncatedHash, block eth.BlockID, timestamp uint64, logIdx uint32) error {
db.rwLock.Lock()
defer db.rwLock.Unlock()
postState := logContext{
blockNum: block.Number,
logIdx: logIdx,
}
if block.Number == 0 {
return fmt.Errorf("%w: should not have logs in block 0", ErrLogOutOfOrder)
}
if db.lastEntryContext.blockNum > block.Number {
return fmt.Errorf("%w: adding block %v, head block: %v", ErrLogOutOfOrder, block.Number, db.lastEntryContext.blockNum)
}
if db.lastEntryContext.blockNum == block.Number && db.lastEntryContext.logIdx+1 != logIdx {
return fmt.Errorf("%w: adding log %v in block %v, but currently at log %v", ErrLogOutOfOrder, logIdx, block.Number, db.lastEntryContext.logIdx)
}
if db.lastEntryContext.blockNum < block.Number && logIdx != 0 {
return fmt.Errorf("%w: adding log %v as first log in block %v", ErrLogOutOfOrder, logIdx, block.Number)
}
if (db.lastEntryIdx()+1)%searchCheckpointFrequency == 0 {
if err := db.writeSearchCheckpoint(block.Number, logIdx, timestamp, block.Hash); err != nil {
return fmt.Errorf("failed to write search checkpoint: %w", err)
}
db.lastEntryContext = postState
}
if err := db.writeInitiatingEvent(postState, logHash); err != nil {
return err
}
db.lastEntryContext = postState
db.updateEntryCountMetric()
return nil
}
// Rewind the database to remove any blocks after headBlockNum
// The block at headBlockNum itself is not removed.
func (db *DB) Rewind(headBlockNum uint64) error {
db.rwLock.Lock()
defer db.rwLock.Unlock()
if headBlockNum >= db.lastEntryContext.blockNum {
// Nothing to do
return nil
}
// Find the last checkpoint before the block to remove
idx, err := db.searchCheckpoint(headBlockNum+1, 0)
if errors.Is(err, io.EOF) {
// Requested a block prior to the first checkpoint
// Delete everything without scanning forward
idx = -1
} else if err != nil {
return fmt.Errorf("failed to find checkpoint prior to block %v: %w", headBlockNum, err)
} else {
// Scan forward from the checkpoint to find the first entry about a block after headBlockNum
i, err := db.newIterator(idx)
if err != nil {
return fmt.Errorf("failed to create iterator when searching for rewind point: %w", err)
}
// If we don't find any useful logs after the checkpoint, we should delete the checkpoint itself
// So move our delete marker back to include it as a starting point
idx--
for {
blockNum, _, _, err := i.NextLog()
if errors.Is(err, io.EOF) {
// Reached end of file, we need to keep everything
return nil
} else if err != nil {
return fmt.Errorf("failed to find rewind point: %w", err)
}
if blockNum > headBlockNum {
// Found the first entry we don't need, so stop searching and delete everything after idx
break
}
// Otherwise we need all of the entries the iterator just read
idx = i.nextEntryIdx - 1
}
}
// Truncate to contain idx+1 entries, since indices are 0 based, this deletes everything after idx
if err := db.store.Truncate(idx); err != nil {
return fmt.Errorf("failed to truncate to block %v: %w", headBlockNum, err)
}
// Use db.init() to find the log context for the new latest log entry
if err := db.init(); err != nil {
return fmt.Errorf("failed to find new last entry context: %w", err)
}
return nil
}
// writeSearchCheckpoint appends search checkpoint and canonical hash entry to the log
// type 0: "search checkpoint" <type><uint64 block number: 8 bytes><uint32 event index offset: 4 bytes><uint64 timestamp: 8 bytes> = 20 bytes
// type 1: "canonical hash" <type><parent blockhash truncated: 20 bytes> = 21 bytes
func (db *DB) writeSearchCheckpoint(blockNum uint64, logIdx uint32, timestamp uint64, blockHash common.Hash) error {
entry := newSearchCheckpoint(blockNum, logIdx, timestamp).encode()
if err := db.store.Append(entry); err != nil {
return err
}
return db.writeCanonicalHash(blockHash)
}
func (db *DB) readSearchCheckpoint(entryIdx int64) (searchCheckpoint, error) {
data, err := db.store.Read(entryIdx)
if err != nil {
return searchCheckpoint{}, fmt.Errorf("failed to read entry %v: %w", entryIdx, err)
}
return newSearchCheckpointFromEntry(data)
}
// writeCanonicalHash appends a canonical hash entry to the log
// type 1: "canonical hash" <type><parent blockhash truncated: 20 bytes> = 21 bytes
func (db *DB) writeCanonicalHash(blockHash common.Hash) error {
return db.store.Append(newCanonicalHash(TruncateHash(blockHash)).encode())
}
func (db *DB) readCanonicalHash(entryIdx int64) (canonicalHash, error) {
data, err := db.store.Read(entryIdx)
if err != nil {
return canonicalHash{}, fmt.Errorf("failed to read entry %v: %w", entryIdx, err)
}
if data[0] != typeCanonicalHash {
return canonicalHash{}, fmt.Errorf("%w: expected canonical hash at entry %v but was type %v", ErrDataCorruption, entryIdx, data[0])
}
return newCanonicalHashFromEntry(data)
}
// writeInitiatingEvent appends an initiating event to the log
// type 2: "initiating event" <type><blocknum diff: 1 byte><event flags: 1 byte><event-hash: 20 bytes> = 23 bytes
func (db *DB) writeInitiatingEvent(postState logContext, logHash TruncatedHash) error {
evt, err := newInitiatingEvent(db.lastEntryContext, postState.blockNum, postState.logIdx, logHash)
if err != nil {
return err
}
return db.store.Append(evt.encode())
}
func TruncateHash(hash common.Hash) TruncatedHash {
var truncated TruncatedHash
copy(truncated[:], hash[0:20])
return truncated
}
func (db *DB) Close() error {
return db.store.Close()
}
package db
import (
"fmt"
"io"
"os"
"testing"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb"
"github.com/stretchr/testify/require"
)
type statInvariant func(stat os.FileInfo, m *stubMetrics) error
type entryInvariant func(entryIdx int, entry entrydb.Entry, entries []entrydb.Entry, m *stubMetrics) error
// checkDBInvariants reads the database log directly and asserts a set of invariants on the data.
func checkDBInvariants(t *testing.T, dbPath string, m *stubMetrics) {
stat, err := os.Stat(dbPath)
require.NoError(t, err)
statInvariants := []statInvariant{
invariantFileSizeMultipleOfEntrySize,
invariantFileSizeMatchesEntryCountMetric,
}
for _, invariant := range statInvariants {
require.NoError(t, invariant(stat, m))
}
// Read all entries as binary blobs
file, err := os.OpenFile(dbPath, os.O_RDONLY, 0o644)
require.NoError(t, err)
entries := make([]entrydb.Entry, stat.Size()/entrydb.EntrySize)
for i := range entries {
n, err := io.ReadFull(file, entries[i][:])
require.NoErrorf(t, err, "failed to read entry %v", i)
require.EqualValuesf(t, entrydb.EntrySize, n, "read wrong length for entry %v", i)
}
entryInvariants := []entryInvariant{
invariantSearchCheckpointOnlyAtFrequency,
invariantSearchCheckpointAtEverySearchCheckpointFrequency,
invariantCanonicalHashAfterEverySearchCheckpoint,
invariantSearchCheckpointBeforeEveryCanonicalHash,
invariantIncrementLogIdxIfNotImmediatelyAfterCanonicalHash,
}
for i, entry := range entries {
for _, invariant := range entryInvariants {
err := invariant(i, entry, entries, m)
if err != nil {
require.NoErrorf(t, err, "Invariant breached: \n%v", fmtEntries(entries))
}
}
}
}
func fmtEntries(entries []entrydb.Entry) string {
out := ""
for i, entry := range entries {
out += fmt.Sprintf("%v: %x\n", i, entry)
}
return out
}
func invariantFileSizeMultipleOfEntrySize(stat os.FileInfo, _ *stubMetrics) error {
size := stat.Size()
if size%entrydb.EntrySize != 0 {
return fmt.Errorf("expected file size to be a multiple of entry size (%v) but was %v", entrydb.EntrySize, size)
}
return nil
}
func invariantFileSizeMatchesEntryCountMetric(stat os.FileInfo, m *stubMetrics) error {
size := stat.Size()
if m.entryCount*entrydb.EntrySize != size {
return fmt.Errorf("expected file size to be entryCount (%v) * entrySize (%v) = %v but was %v", m.entryCount, entrydb.EntrySize, m.entryCount*entrydb.EntrySize, size)
}
return nil
}
func invariantSearchCheckpointOnlyAtFrequency(entryIdx int, entry entrydb.Entry, entries []entrydb.Entry, m *stubMetrics) error {
if entry[0] != typeSearchCheckpoint {
return nil
}
if entryIdx%searchCheckpointFrequency != 0 {
return fmt.Errorf("should only have search checkpoints every %v entries but found at entry %v", searchCheckpointFrequency, entryIdx)
}
return nil
}
func invariantSearchCheckpointAtEverySearchCheckpointFrequency(entryIdx int, entry entrydb.Entry, entries []entrydb.Entry, m *stubMetrics) error {
if entryIdx%searchCheckpointFrequency == 0 && entry[0] != typeSearchCheckpoint {
return fmt.Errorf("should have search checkpoints every %v entries but entry %v was %x", searchCheckpointFrequency, entryIdx, entry)
}
return nil
}
func invariantCanonicalHashAfterEverySearchCheckpoint(entryIdx int, entry entrydb.Entry, entries []entrydb.Entry, m *stubMetrics) error {
if entry[0] != typeSearchCheckpoint {
return nil
}
if entryIdx+1 >= len(entries) {
return fmt.Errorf("expected canonical hash after search checkpoint at entry %v but no further entries found", entryIdx)
}
nextEntry := entries[entryIdx+1]
if nextEntry[0] != typeCanonicalHash {
return fmt.Errorf("expected canonical hash after search checkpoint at entry %v but got %x", entryIdx, nextEntry)
}
return nil
}
// invariantSearchCheckpointBeforeEveryCanonicalHash ensures we don't have extra canonical-hash entries
func invariantSearchCheckpointBeforeEveryCanonicalHash(entryIdx int, entry entrydb.Entry, entries []entrydb.Entry, m *stubMetrics) error {
if entry[0] != typeCanonicalHash {
return nil
}
if entryIdx == 0 {
return fmt.Errorf("expected search checkpoint before canonical hash at entry %v but no previous entries present", entryIdx)
}
prevEntry := entries[entryIdx-1]
if prevEntry[0] != typeSearchCheckpoint {
return fmt.Errorf("expected search checkpoint before canonical hash at entry %v but got %x", entryIdx, prevEntry)
}
return nil
}
func invariantIncrementLogIdxIfNotImmediatelyAfterCanonicalHash(entryIdx int, entry entrydb.Entry, entries []entrydb.Entry, m *stubMetrics) error {
if entry[0] != typeInitiatingEvent {
return nil
}
if entryIdx == 0 {
return fmt.Errorf("found initiating event at index %v before any search checkpoint", entryIdx)
}
blockDiff := entry[1]
flags := entry[2]
incrementsLogIdx := flags&eventFlagIncrementLogIdx != 0
prevEntry := entries[entryIdx-1]
prevEntryIsCanonicalHash := prevEntry[0] == typeCanonicalHash
if incrementsLogIdx && prevEntryIsCanonicalHash {
return fmt.Errorf("initiating event at index %v increments logIdx despite being immediately after canonical hash (prev entry %x)", entryIdx, prevEntry)
}
if incrementsLogIdx && blockDiff > 0 {
return fmt.Errorf("initiating event at index %v increments logIdx despite starting a new block", entryIdx)
}
if !incrementsLogIdx && !prevEntryIsCanonicalHash && blockDiff == 0 {
return fmt.Errorf("initiating event at index %v does not increment logIdx when block unchanged and not after canonical hash (prev entry %x)", entryIdx, prevEntry)
}
return nil
}
package db
import (
"bytes"
"io"
"io/fs"
"os"
"path/filepath"
"testing"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/testlog"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require"
)
func createTruncatedHash(i int) TruncatedHash {
return TruncateHash(createHash(i))
}
func createHash(i int) common.Hash {
data := bytes.Repeat([]byte{byte(i)}, common.HashLength)
return common.BytesToHash(data)
}
func TestErrorOpeningDatabase(t *testing.T) {
dir := t.TempDir()
_, err := NewFromFile(testlog.Logger(t, log.LvlInfo), &stubMetrics{}, filepath.Join(dir, "missing-dir", "file.db"))
require.ErrorIs(t, err, os.ErrNotExist)
}
func runDBTest(t *testing.T, setup func(t *testing.T, db *DB, m *stubMetrics), assert func(t *testing.T, db *DB, m *stubMetrics)) {
createDb := func(t *testing.T, dir string) (*DB, *stubMetrics, string) {
logger := testlog.Logger(t, log.LvlTrace)
path := filepath.Join(dir, "test.db")
m := &stubMetrics{}
db, err := NewFromFile(logger, m, path)
require.NoError(t, err, "Failed to create database")
t.Cleanup(func() {
err := db.Close()
if err != nil {
require.ErrorIs(t, err, fs.ErrClosed)
}
})
return db, m, path
}
t.Run("New", func(t *testing.T) {
db, m, _ := createDb(t, t.TempDir())
setup(t, db, m)
assert(t, db, m)
})
t.Run("Existing", func(t *testing.T) {
dir := t.TempDir()
db, m, path := createDb(t, dir)
setup(t, db, m)
// Close and recreate the database
require.NoError(t, db.Close())
checkDBInvariants(t, path, m)
db2, m, path := createDb(t, dir)
assert(t, db2, m)
checkDBInvariants(t, path, m)
})
}
func TestEmptyDbDoesNotFindEntry(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {},
func(t *testing.T, db *DB, m *stubMetrics) {
requireNotContains(t, db, 0, 0, createHash(1))
requireNotContains(t, db, 0, 0, common.Hash{})
})
}
func TestAddLog(t *testing.T) {
t.Run("BlockZero", func(t *testing.T) {
// There are no logs in the genesis block so recording an entry for block 0 should be rejected.
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {},
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 0}, 5000, 0)
require.ErrorIs(t, err, ErrLogOutOfOrder)
})
})
t.Run("FirstEntry", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 0)
require.NoError(t, err)
},
func(t *testing.T, db *DB, m *stubMetrics) {
requireContains(t, db, 15, 0, createHash(1))
})
})
t.Run("MultipleEntriesFromSameBlock", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 0)
require.NoError(t, err)
err = db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 1)
require.NoError(t, err)
err = db.AddLog(createTruncatedHash(3), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 2)
require.NoError(t, err)
},
func(t *testing.T, db *DB, m *stubMetrics) {
require.EqualValues(t, 5, m.entryCount, "should not output new searchCheckpoint for every log")
requireContains(t, db, 15, 0, createHash(1))
requireContains(t, db, 15, 1, createHash(2))
requireContains(t, db, 15, 2, createHash(3))
})
})
t.Run("MultipleEntriesFromMultipleBlocks", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 0)
require.NoError(t, err)
err = db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 1)
require.NoError(t, err)
err = db.AddLog(createTruncatedHash(3), eth.BlockID{Hash: createHash(16), Number: 16}, 5002, 0)
require.NoError(t, err)
err = db.AddLog(createTruncatedHash(4), eth.BlockID{Hash: createHash(16), Number: 16}, 5002, 1)
require.NoError(t, err)
},
func(t *testing.T, db *DB, m *stubMetrics) {
require.EqualValues(t, 6, m.entryCount, "should not output new searchCheckpoint for every block")
requireContains(t, db, 15, 0, createHash(1))
requireContains(t, db, 15, 1, createHash(2))
requireContains(t, db, 16, 0, createHash(3))
requireContains(t, db, 16, 1, createHash(4))
})
})
t.Run("ErrorWhenBeforeCurrentBlock", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 0)
require.NoError(t, err)
},
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(14), Number: 14}, 4998, 0)
require.ErrorIs(t, err, ErrLogOutOfOrder)
})
})
t.Run("ErrorWhenBeforeCurrentBlockButAfterLastCheckpoint", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(13), Number: 13}, 5000, 0)
require.NoError(t, err)
err = db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 0)
require.NoError(t, err)
},
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(14), Number: 14}, 4998, 0)
require.ErrorIs(t, err, ErrLogOutOfOrder)
})
})
t.Run("ErrorWhenBeforeCurrentLogEvent", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 0))
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 1))
},
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(14), Number: 15}, 4998, 0)
require.ErrorIs(t, err, ErrLogOutOfOrder)
})
})
t.Run("ErrorWhenBeforeCurrentLogEventButAfterLastCheckpoint", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 0)
require.NoError(t, err)
err = db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 1)
require.NoError(t, err)
err = db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 2)
require.NoError(t, err)
},
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(14), Number: 15}, 4998, 1)
require.ErrorIs(t, err, ErrLogOutOfOrder)
})
})
t.Run("ErrorWhenAtCurrentLogEvent", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 0))
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 1))
},
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 4998, 1)
require.ErrorIs(t, err, ErrLogOutOfOrder)
})
})
t.Run("ErrorWhenAtCurrentLogEventButAfterLastCheckpoint", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 0))
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 1))
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 2))
},
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(14), Number: 15}, 4998, 2)
require.ErrorIs(t, err, ErrLogOutOfOrder)
})
})
t.Run("ErrorWhenSkippingLogEvent", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 5000, 0)
require.NoError(t, err)
},
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 4998, 2)
require.ErrorIs(t, err, ErrLogOutOfOrder)
})
})
t.Run("ErrorWhenFirstLogIsNotLogIdxZero", func(t *testing.T) {
runDBTest(t, func(t *testing.T, db *DB, m *stubMetrics) {},
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 4998, 5)
require.ErrorIs(t, err, ErrLogOutOfOrder)
})
})
t.Run("ErrorWhenFirstLogOfNewBlockIsNotLogIdxZero", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(14), Number: 14}, 4996, 0))
},
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(15), Number: 15}, 4998, 1)
require.ErrorIs(t, err, ErrLogOutOfOrder)
})
})
t.Run("MultipleSearchCheckpoints", func(t *testing.T) {
block1 := eth.BlockID{Hash: createHash(11), Number: 11}
block2 := eth.BlockID{Hash: createHash(12), Number: 12}
block3 := eth.BlockID{Hash: createHash(15), Number: 15}
block4 := eth.BlockID{Hash: createHash(16), Number: 16}
// First checkpoint is at entry idx 0
// Block 1 logs don't reach the second checkpoint
block1LogCount := searchCheckpointFrequency - 10
// Block 2 logs extend to just after the third checkpoint
block2LogCount := searchCheckpointFrequency + 20
// Block 3 logs extend to immediately before the fourth checkpoint
block3LogCount := searchCheckpointFrequency - 16
block4LogCount := 2
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
for i := 0; i < block1LogCount; i++ {
err := db.AddLog(createTruncatedHash(i), block1, 3000, uint32(i))
require.NoErrorf(t, err, "failed to add log %v of block 1", i)
}
for i := 0; i < block2LogCount; i++ {
err := db.AddLog(createTruncatedHash(i), block2, 3002, uint32(i))
require.NoErrorf(t, err, "failed to add log %v of block 2", i)
}
for i := 0; i < block3LogCount; i++ {
err := db.AddLog(createTruncatedHash(i), block3, 3004, uint32(i))
require.NoErrorf(t, err, "failed to add log %v of block 3", i)
}
// Verify that we're right before the fourth checkpoint will be written.
// entryCount is the number of entries, so given 0 based indexing is the index of the next entry
// the first checkpoint is at entry 0, the second at entry searchCheckpointFrequency etc
// so the fourth is at entry 3*searchCheckpointFrequency
require.EqualValues(t, 3*searchCheckpointFrequency, m.entryCount)
for i := 0; i < block4LogCount; i++ {
err := db.AddLog(createTruncatedHash(i), block4, 3006, uint32(i))
require.NoErrorf(t, err, "failed to add log %v of block 4", i)
}
},
func(t *testing.T, db *DB, m *stubMetrics) {
// Check that we wrote additional search checkpoints
expectedCheckpointCount := 4
expectedEntryCount := block1LogCount + block2LogCount + block3LogCount + block4LogCount + (2 * expectedCheckpointCount)
require.EqualValues(t, expectedEntryCount, m.entryCount)
// Check we can find all the logs.
for i := 0; i < block1LogCount; i++ {
requireContains(t, db, block1.Number, uint32(i), createHash(i))
}
// Block 2 logs extend to just after the third checkpoint
for i := 0; i < block2LogCount; i++ {
requireContains(t, db, block2.Number, uint32(i), createHash(i))
}
// Block 3 logs extend to immediately before the fourth checkpoint
for i := 0; i < block3LogCount; i++ {
requireContains(t, db, block3.Number, uint32(i), createHash(i))
}
// Block 4 logs start immediately after the fourth checkpoint
for i := 0; i < block4LogCount; i++ {
requireContains(t, db, block4.Number, uint32(i), createHash(i))
}
})
})
}
func TestContains(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 0))
require.NoError(t, db.AddLog(createTruncatedHash(3), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 1))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 2))
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(52), Number: 52}, 500, 0))
require.NoError(t, db.AddLog(createTruncatedHash(3), eth.BlockID{Hash: createHash(52), Number: 52}, 500, 1))
},
func(t *testing.T, db *DB, m *stubMetrics) {
// Should find added logs
requireContains(t, db, 50, 0, createHash(1))
requireContains(t, db, 50, 1, createHash(3))
requireContains(t, db, 50, 2, createHash(2))
requireContains(t, db, 52, 0, createHash(1))
requireContains(t, db, 52, 1, createHash(3))
// Should not find log when block number too low
requireNotContains(t, db, 49, 0, createHash(1))
// Should not find log when block number too high
requireNotContains(t, db, 51, 0, createHash(1))
// Should not find log when requested log after end of database
requireNotContains(t, db, 52, 2, createHash(3))
requireNotContains(t, db, 53, 0, createHash(3))
// Should not find log when log index too high
requireNotContains(t, db, 50, 3, createHash(2))
// Should not find log when hash doesn't match log at block number and index
requireNotContains(t, db, 50, 0, createHash(5))
})
}
func TestGetBlockInfo(t *testing.T) {
t.Run("ReturnsEOFWhenEmpty", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {},
func(t *testing.T, db *DB, m *stubMetrics) {
_, _, err := db.ClosestBlockInfo(10)
require.ErrorIs(t, err, io.EOF)
})
})
t.Run("ReturnsEOFWhenRequestedBlockBeforeFirstSearchCheckpoint", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(11), Number: 11}, 500, 0)
require.NoError(t, err)
},
func(t *testing.T, db *DB, m *stubMetrics) {
_, _, err := db.ClosestBlockInfo(10)
require.ErrorIs(t, err, io.EOF)
})
})
t.Run("ReturnFirstBlockInfo", func(t *testing.T) {
block := eth.BlockID{Hash: createHash(11), Number: 11}
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(1), block, 500, 0)
require.NoError(t, err)
},
func(t *testing.T, db *DB, m *stubMetrics) {
requireClosestBlockInfo(t, db, 11, block.Number, block.Hash)
requireClosestBlockInfo(t, db, 12, block.Number, block.Hash)
requireClosestBlockInfo(t, db, 200, block.Number, block.Hash)
})
})
t.Run("ReturnClosestCheckpointBlockInfo", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
for i := 1; i < searchCheckpointFrequency+3; i++ {
block := eth.BlockID{Hash: createHash(i), Number: uint64(i)}
err := db.AddLog(createTruncatedHash(i), block, uint64(i)*2, 0)
require.NoError(t, err)
}
},
func(t *testing.T, db *DB, m *stubMetrics) {
// Expect block from the first checkpoint
requireClosestBlockInfo(t, db, 1, 1, createHash(1))
requireClosestBlockInfo(t, db, 10, 1, createHash(1))
requireClosestBlockInfo(t, db, searchCheckpointFrequency-3, 1, createHash(1))
// Expect block from the second checkpoint
// 2 entries used for initial checkpoint but we start at block 1
secondCheckpointBlockNum := searchCheckpointFrequency - 1
requireClosestBlockInfo(t, db, uint64(secondCheckpointBlockNum), uint64(secondCheckpointBlockNum), createHash(secondCheckpointBlockNum))
requireClosestBlockInfo(t, db, uint64(secondCheckpointBlockNum)+1, uint64(secondCheckpointBlockNum), createHash(secondCheckpointBlockNum))
requireClosestBlockInfo(t, db, uint64(secondCheckpointBlockNum)+2, uint64(secondCheckpointBlockNum), createHash(secondCheckpointBlockNum))
})
})
}
func requireClosestBlockInfo(t *testing.T, db *DB, searchFor uint64, expectedBlockNum uint64, expectedHash common.Hash) {
blockNum, hash, err := db.ClosestBlockInfo(searchFor)
require.NoError(t, err)
require.Equal(t, expectedBlockNum, blockNum)
require.Equal(t, TruncateHash(expectedHash), hash)
}
func requireContains(t *testing.T, db *DB, blockNum uint64, logIdx uint32, logHash common.Hash) {
m, ok := db.m.(*stubMetrics)
require.True(t, ok, "Did not get the expected metrics type")
result, err := db.Contains(blockNum, logIdx, TruncateHash(logHash))
require.NoErrorf(t, err, "Error searching for log %v in block %v", logIdx, blockNum)
require.Truef(t, result, "Did not find log %v in block %v with hash %v", logIdx, blockNum, logHash)
require.LessOrEqual(t, m.entriesReadForSearch, int64(searchCheckpointFrequency), "Should not need to read more than between two checkpoints")
require.NotZero(t, m.entriesReadForSearch, "Must read at least some entries to find the log")
}
func requireNotContains(t *testing.T, db *DB, blockNum uint64, logIdx uint32, logHash common.Hash) {
m, ok := db.m.(*stubMetrics)
require.True(t, ok, "Did not get the expected metrics type")
result, err := db.Contains(blockNum, logIdx, TruncateHash(logHash))
require.NoErrorf(t, err, "Error searching for log %v in block %v", logIdx, blockNum)
require.Falsef(t, result, "Found unexpected log %v in block %v with hash %v", logIdx, blockNum, logHash)
require.LessOrEqual(t, m.entriesReadForSearch, int64(searchCheckpointFrequency), "Should not need to read more than between two checkpoints")
}
func TestShouldRollBackInMemoryChangesOnWriteFailure(t *testing.T) {
t.Skip("TODO(optimism#10857)")
}
func TestShouldRecoverWhenSearchCheckpointWrittenButNotCanonicalHash(t *testing.T) {
t.Skip("TODO(optimism#10857)")
}
func TestShouldRecoverWhenPartialEntryWritten(t *testing.T) {
t.Skip("TODO(optimism#10857)")
}
func TestShouldRecoverWhenInitiatingEventWrittenButNotExecutingLink(t *testing.T) {
t.Skip("TODO(optimism#10857)")
}
func TestRewind(t *testing.T) {
t.Run("WhenEmpty", func(t *testing.T) {
runDBTest(t, func(t *testing.T, db *DB, m *stubMetrics) {},
func(t *testing.T, db *DB, m *stubMetrics) {
require.NoError(t, db.Rewind(100))
require.NoError(t, db.Rewind(0))
})
})
t.Run("AfterLastBlock", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 0))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 1))
require.NoError(t, db.AddLog(createTruncatedHash(3), eth.BlockID{Hash: createHash(51), Number: 51}, 502, 0))
require.NoError(t, db.AddLog(createTruncatedHash(4), eth.BlockID{Hash: createHash(74), Number: 74}, 700, 0))
require.NoError(t, db.Rewind(75))
},
func(t *testing.T, db *DB, m *stubMetrics) {
requireContains(t, db, 50, 0, createHash(1))
requireContains(t, db, 50, 1, createHash(2))
requireContains(t, db, 51, 0, createHash(3))
requireContains(t, db, 74, 0, createHash(4))
})
})
t.Run("BeforeFirstBlock", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 0))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 1))
require.NoError(t, db.Rewind(25))
},
func(t *testing.T, db *DB, m *stubMetrics) {
requireNotContains(t, db, 50, 0, createHash(1))
requireNotContains(t, db, 50, 0, createHash(1))
require.Zero(t, m.entryCount)
})
})
t.Run("AtFirstBlock", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 0))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 1))
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(51), Number: 51}, 502, 0))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(51), Number: 51}, 502, 1))
require.NoError(t, db.Rewind(50))
},
func(t *testing.T, db *DB, m *stubMetrics) {
requireContains(t, db, 50, 0, createHash(1))
requireContains(t, db, 50, 1, createHash(2))
requireNotContains(t, db, 51, 0, createHash(1))
requireNotContains(t, db, 51, 1, createHash(2))
})
})
t.Run("AtSecondCheckpoint", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
for i := uint32(0); m.entryCount < searchCheckpointFrequency; i++ {
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(50), Number: 50}, 500, i))
}
require.EqualValues(t, searchCheckpointFrequency, m.entryCount)
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(51), Number: 51}, 502, 0))
require.EqualValues(t, searchCheckpointFrequency+3, m.entryCount, "Should have inserted new checkpoint and extra log")
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(51), Number: 51}, 502, 1))
require.NoError(t, db.Rewind(50))
},
func(t *testing.T, db *DB, m *stubMetrics) {
require.EqualValues(t, searchCheckpointFrequency, m.entryCount, "Should have deleted second checkpoint")
requireContains(t, db, 50, 0, createHash(1))
requireContains(t, db, 50, 1, createHash(1))
requireNotContains(t, db, 51, 0, createHash(1))
requireNotContains(t, db, 51, 1, createHash(2))
})
})
t.Run("BetweenLogEntries", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 0))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 1))
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(60), Number: 60}, 502, 0))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(60), Number: 60}, 502, 1))
require.NoError(t, db.Rewind(55))
},
func(t *testing.T, db *DB, m *stubMetrics) {
requireContains(t, db, 50, 0, createHash(1))
requireContains(t, db, 50, 1, createHash(2))
requireNotContains(t, db, 60, 0, createHash(1))
requireNotContains(t, db, 60, 1, createHash(2))
})
})
t.Run("AtExistingLogEntry", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(59), Number: 59}, 500, 0))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(59), Number: 59}, 500, 1))
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(60), Number: 60}, 502, 0))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(60), Number: 60}, 502, 1))
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(61), Number: 61}, 502, 0))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(61), Number: 61}, 502, 1))
require.NoError(t, db.Rewind(60))
},
func(t *testing.T, db *DB, m *stubMetrics) {
requireContains(t, db, 59, 0, createHash(1))
requireContains(t, db, 59, 1, createHash(2))
requireContains(t, db, 60, 0, createHash(1))
requireContains(t, db, 60, 1, createHash(2))
requireNotContains(t, db, 61, 0, createHash(1))
requireNotContains(t, db, 61, 1, createHash(2))
})
})
t.Run("AtLastEntry", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 0))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(50), Number: 50}, 500, 1))
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(60), Number: 60}, 502, 0))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(60), Number: 60}, 502, 1))
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(70), Number: 70}, 502, 0))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(70), Number: 70}, 502, 1))
require.NoError(t, db.Rewind(70))
},
func(t *testing.T, db *DB, m *stubMetrics) {
requireContains(t, db, 50, 0, createHash(1))
requireContains(t, db, 50, 1, createHash(2))
requireContains(t, db, 60, 0, createHash(1))
requireContains(t, db, 60, 1, createHash(2))
requireContains(t, db, 70, 0, createHash(1))
requireContains(t, db, 70, 1, createHash(2))
})
})
t.Run("ReaddDeletedBlocks", func(t *testing.T) {
runDBTest(t,
func(t *testing.T, db *DB, m *stubMetrics) {
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(59), Number: 59}, 500, 0))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(59), Number: 59}, 500, 1))
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(60), Number: 60}, 502, 0))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(60), Number: 60}, 502, 1))
require.NoError(t, db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(61), Number: 61}, 502, 0))
require.NoError(t, db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(61), Number: 61}, 502, 1))
require.NoError(t, db.Rewind(60))
},
func(t *testing.T, db *DB, m *stubMetrics) {
err := db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(59), Number: 59}, 500, 1)
require.ErrorIs(t, err, ErrLogOutOfOrder, "Cannot add block before rewound head")
err = db.AddLog(createTruncatedHash(2), eth.BlockID{Hash: createHash(60), Number: 60}, 502, 1)
require.ErrorIs(t, err, ErrLogOutOfOrder, "Cannot add block that was rewound to")
err = db.AddLog(createTruncatedHash(1), eth.BlockID{Hash: createHash(60), Number: 61}, 502, 0)
require.NoError(t, err, "Can re-add deleted block")
})
})
}
type stubMetrics struct {
entryCount int64
entriesReadForSearch int64
}
func (s *stubMetrics) RecordEntryCount(count int64) {
s.entryCount = count
}
func (s *stubMetrics) RecordSearchEntriesRead(count int64) {
s.entriesReadForSearch = count
}
var _ Metrics = (*stubMetrics)(nil)
package db
import (
"encoding/binary"
"fmt"
"math"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb"
)
type searchCheckpoint struct {
blockNum uint64
logIdx uint32
timestamp uint64
}
func newSearchCheckpoint(blockNum uint64, logIdx uint32, timestamp uint64) searchCheckpoint {
return searchCheckpoint{
blockNum: blockNum,
logIdx: logIdx,
timestamp: timestamp,
}
}
func newSearchCheckpointFromEntry(data entrydb.Entry) (searchCheckpoint, error) {
if data[0] != typeSearchCheckpoint {
return searchCheckpoint{}, fmt.Errorf("%w: attempting to decode search checkpoint but was type %v", ErrDataCorruption, data[0])
}
return searchCheckpoint{
blockNum: binary.LittleEndian.Uint64(data[1:9]),
logIdx: binary.LittleEndian.Uint32(data[9:13]),
timestamp: binary.LittleEndian.Uint64(data[13:21]),
}, nil
}
// encode creates a search checkpoint entry
// type 0: "search checkpoint" <type><uint64 block number: 8 bytes><uint32 event index offset: 4 bytes><uint64 timestamp: 8 bytes> = 20 bytes
func (s searchCheckpoint) encode() entrydb.Entry {
var data entrydb.Entry
data[0] = typeSearchCheckpoint
binary.LittleEndian.PutUint64(data[1:9], s.blockNum)
binary.LittleEndian.PutUint32(data[9:13], s.logIdx)
binary.LittleEndian.PutUint64(data[13:21], s.timestamp)
return data
}
type canonicalHash struct {
hash TruncatedHash
}
func newCanonicalHash(hash TruncatedHash) canonicalHash {
return canonicalHash{hash: hash}
}
func newCanonicalHashFromEntry(data entrydb.Entry) (canonicalHash, error) {
if data[0] != typeCanonicalHash {
return canonicalHash{}, fmt.Errorf("%w: attempting to decode canonical hash but was type %v", ErrDataCorruption, data[0])
}
var truncated TruncatedHash
copy(truncated[:], data[1:21])
return newCanonicalHash(truncated), nil
}
func (c canonicalHash) encode() entrydb.Entry {
var entry entrydb.Entry
entry[0] = typeCanonicalHash
copy(entry[1:21], c.hash[:])
return entry
}
type initiatingEvent struct {
blockDiff uint8
incrementLogIdx bool
logHash TruncatedHash
}
func newInitiatingEventFromEntry(data entrydb.Entry) (initiatingEvent, error) {
if data[0] != typeInitiatingEvent {
return initiatingEvent{}, fmt.Errorf("%w: attempting to decode initiating event but was type %v", ErrDataCorruption, data[0])
}
blockNumDiff := data[1]
flags := data[2]
return initiatingEvent{
blockDiff: blockNumDiff,
incrementLogIdx: flags&eventFlagIncrementLogIdx != 0,
logHash: TruncatedHash(data[3:23]),
}, nil
}
func newInitiatingEvent(pre logContext, blockNum uint64, logIdx uint32, logHash TruncatedHash) (initiatingEvent, error) {
blockDiff := blockNum - pre.blockNum
if blockDiff > math.MaxUint8 {
// TODO(optimism#10857): Need to find a way to support this.
return initiatingEvent{}, fmt.Errorf("too many block skipped between %v and %v", pre.blockNum, blockNum)
}
currLogIdx := pre.logIdx
if blockDiff > 0 {
currLogIdx = 0
}
logDiff := logIdx - currLogIdx
if logDiff > 1 {
return initiatingEvent{}, fmt.Errorf("skipped logs between %v and %v", currLogIdx, logIdx)
}
return initiatingEvent{
blockDiff: uint8(blockDiff),
incrementLogIdx: logDiff > 0,
logHash: logHash,
}, nil
}
// encode creates an initiating event entry
// type 2: "initiating event" <type><blocknum diff: 1 byte><event flags: 1 byte><event-hash: 20 bytes> = 23 bytes
func (i initiatingEvent) encode() entrydb.Entry {
var data entrydb.Entry
data[0] = typeInitiatingEvent
data[1] = i.blockDiff
flags := byte(0)
if i.incrementLogIdx {
// Set flag to indicate log idx needs to be incremented (ie we're not directly after a checkpoint)
flags = flags | eventFlagIncrementLogIdx
}
data[2] = flags
copy(data[3:23], i.logHash[:])
return data
}
func (i initiatingEvent) postContext(pre logContext) logContext {
post := logContext{
blockNum: pre.blockNum + uint64(i.blockDiff),
logIdx: pre.logIdx,
}
if i.blockDiff > 0 {
post.logIdx = 0
}
if i.incrementLogIdx {
post.logIdx++
}
return post
}
package entrydb
import (
"errors"
"fmt"
"io"
"os"
)
const (
EntrySize = 24
)
type Entry [EntrySize]byte
// dataAccess defines a minimal API required to manipulate the actual stored data.
// It is a subset of the os.File API but could (theoretically) be satisfied by an in-memory implementation for testing.
type dataAccess interface {
io.ReaderAt
io.Writer
io.Closer
Truncate(size int64) error
}
type EntryDB struct {
data dataAccess
lastEntryIdx int64
}
func NewEntryDB(path string) (*EntryDB, error) {
file, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0o666)
if err != nil {
return nil, fmt.Errorf("failed to open database at %v: %w", path, err)
}
info, err := file.Stat()
if err != nil {
return nil, fmt.Errorf("failed to stat database at %v: %w", path, err)
}
lastEntryIdx := info.Size()/EntrySize - 1
return &EntryDB{
data: file,
lastEntryIdx: lastEntryIdx,
}, nil
}
func (e *EntryDB) Size() int64 {
return e.lastEntryIdx + 1
}
func (e *EntryDB) Read(idx int64) (Entry, error) {
var out Entry
read, err := e.data.ReadAt(out[:], idx*EntrySize)
// Ignore io.EOF if we read the entire last entry as ReadAt may return io.EOF or nil when it reads the last byte
if err != nil && !(errors.Is(err, io.EOF) && read == EntrySize) {
return Entry{}, fmt.Errorf("failed to read entry %v: %w", idx, err)
}
return out, nil
}
func (e *EntryDB) Append(entries ...Entry) error {
for _, entry := range entries {
if _, err := e.data.Write(entry[:]); err != nil {
// TODO(optimism#10857): When a write fails, need to revert any in memory changes and truncate back to the
// pre-write state. Likely need to batch writes for multiple entries into a single write akin to transactions
// to avoid leaving hanging entries without the entry that should follow them.
return err
}
e.lastEntryIdx++
}
return nil
}
func (e *EntryDB) Truncate(idx int64) error {
if err := e.data.Truncate((idx + 1) * EntrySize); err != nil {
return fmt.Errorf("failed to truncate to entry %v: %w", idx, err)
}
// Update the lastEntryIdx cache and then use db.init() to find the log context for the new latest log entry
e.lastEntryIdx = idx
return nil
}
func (e *EntryDB) Close() error {
return e.data.Close()
}
package entrydb
import (
"bytes"
"io"
"path/filepath"
"testing"
"github.com/stretchr/testify/require"
)
func TestReadWrite(t *testing.T) {
t.Run("BasicReadWrite", func(t *testing.T) {
db := createEntryDB(t)
require.NoError(t, db.Append(createEntry(1)))
require.NoError(t, db.Append(createEntry(2)))
require.NoError(t, db.Append(createEntry(3)))
require.NoError(t, db.Append(createEntry(4)))
requireRead(t, db, 0, createEntry(1))
requireRead(t, db, 1, createEntry(2))
requireRead(t, db, 2, createEntry(3))
requireRead(t, db, 3, createEntry(4))
// Check we can read out of order
requireRead(t, db, 1, createEntry(2))
})
t.Run("ReadPastEndOfFileReturnsEOF", func(t *testing.T) {
db := createEntryDB(t)
_, err := db.Read(0)
require.ErrorIs(t, err, io.EOF)
})
t.Run("WriteMultiple", func(t *testing.T) {
db := createEntryDB(t)
require.NoError(t, db.Append(
createEntry(1),
createEntry(2),
createEntry(3),
))
requireRead(t, db, 0, createEntry(1))
requireRead(t, db, 1, createEntry(2))
requireRead(t, db, 2, createEntry(3))
})
}
func TestTruncate(t *testing.T) {
db := createEntryDB(t)
require.NoError(t, db.Append(createEntry(1)))
require.NoError(t, db.Append(createEntry(2)))
require.NoError(t, db.Append(createEntry(3)))
require.NoError(t, db.Append(createEntry(4)))
require.NoError(t, db.Append(createEntry(5)))
require.NoError(t, db.Truncate(3))
requireRead(t, db, 0, createEntry(1))
requireRead(t, db, 1, createEntry(2))
requireRead(t, db, 2, createEntry(3))
// 4 and 5 have been removed
_, err := db.Read(4)
require.ErrorIs(t, err, io.EOF)
_, err = db.Read(5)
require.ErrorIs(t, err, io.EOF)
}
func requireRead(t *testing.T, db *EntryDB, idx int64, expected Entry) {
actual, err := db.Read(idx)
require.NoError(t, err)
require.Equal(t, expected, actual)
}
func createEntry(i byte) Entry {
return Entry(bytes.Repeat([]byte{i}, EntrySize))
}
func createEntryDB(t *testing.T) *EntryDB {
db, err := NewEntryDB(filepath.Join(t.TempDir(), "entries.db"))
require.NoError(t, err)
t.Cleanup(func() {
require.NoError(t, db.Close())
})
return db
}
package db
import (
"fmt"
"io"
)
type iterator struct {
db *DB
nextEntryIdx int64
current logContext
entriesRead int64
}
func (i *iterator) NextLog() (blockNum uint64, logIdx uint32, evtHash TruncatedHash, outErr error) {
for i.nextEntryIdx <= i.db.lastEntryIdx() {
entryIdx := i.nextEntryIdx
entry, err := i.db.store.Read(entryIdx)
if err != nil {
outErr = fmt.Errorf("failed to read entry %v: %w", i, err)
return
}
i.nextEntryIdx++
i.entriesRead++
switch entry[0] {
case typeSearchCheckpoint:
current, err := newSearchCheckpointFromEntry(entry)
if err != nil {
outErr = fmt.Errorf("failed to parse search checkpoint at idx %v: %w", entryIdx, err)
return
}
i.current.blockNum = current.blockNum
i.current.logIdx = current.logIdx
case typeCanonicalHash:
// Skip
case typeInitiatingEvent:
evt, err := newInitiatingEventFromEntry(entry)
if err != nil {
outErr = fmt.Errorf("failed to parse initiating event at idx %v: %w", entryIdx, err)
return
}
i.current = evt.postContext(i.current)
blockNum = i.current.blockNum
logIdx = i.current.logIdx
evtHash = evt.logHash
return
case typeExecutingCheck:
// TODO(optimism#10857): Handle this properly
case typeExecutingLink:
// TODO(optimism#10857): Handle this properly
default:
outErr = fmt.Errorf("unknown entry type at idx %v %v", entryIdx, entry[0])
return
}
}
outErr = io.EOF
return
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment