Commit a71c4926 authored by protolambda's avatar protolambda Committed by GitHub

op-supervisor: DB improvements for cross-safe updates (#12622)

Co-authored-by: default avataraxelKingsley <axel.kingsley@gmail.com>
Co-authored-by: default avatarTyler Smith <mail@tcry.pt>
parent 21527c63
...@@ -17,7 +17,6 @@ import ( ...@@ -17,7 +17,6 @@ import (
"github.com/ethereum-optimism/optimism/op-service/sources" "github.com/ethereum-optimism/optimism/op-service/sources"
"github.com/ethereum-optimism/optimism/op-supervisor/config" "github.com/ethereum-optimism/optimism/op-supervisor/config"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/depset" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/depset"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/processors" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/processors"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/frontend" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/frontend"
...@@ -71,7 +70,7 @@ func NewSupervisorBackend(ctx context.Context, logger log.Logger, m Metrics, cfg ...@@ -71,7 +70,7 @@ func NewSupervisorBackend(ctx context.Context, logger log.Logger, m Metrics, cfg
chains := depSet.Chains() chains := depSet.Chains()
// create initial per-chain resources // create initial per-chain resources
chainsDBs := db.NewChainsDB(logger) chainsDBs := db.NewChainsDB(logger, depSet)
chainProcessors := make(map[types.ChainID]*processors.ChainProcessor, len(chains)) chainProcessors := make(map[types.ChainID]*processors.ChainProcessor, len(chains))
chainMetrics := make(map[types.ChainID]*chainMetrics, len(chains)) chainMetrics := make(map[types.ChainID]*chainMetrics, len(chains))
...@@ -167,7 +166,7 @@ func (su *SupervisorBackend) attachRPC(ctx context.Context, rpc string) error { ...@@ -167,7 +166,7 @@ func (su *SupervisorBackend) attachRPC(ctx context.Context, rpc string) error {
return err return err
} }
if !su.depSet.HasChain(chainID) { if !su.depSet.HasChain(chainID) {
return fmt.Errorf("chain %s is not part of the interop dependency set: %w", chainID, db.ErrUnknownChain) return fmt.Errorf("chain %s is not part of the interop dependency set: %w", chainID, types.ErrUnknownChain)
} }
cm, ok := su.chainMetrics[chainID] cm, ok := su.chainMetrics[chainID]
if !ok { if !ok {
...@@ -273,10 +272,10 @@ func (su *SupervisorBackend) CheckMessage(identifier types.Identifier, payloadHa ...@@ -273,10 +272,10 @@ func (su *SupervisorBackend) CheckMessage(identifier types.Identifier, payloadHa
blockNum := identifier.BlockNumber blockNum := identifier.BlockNumber
logIdx := identifier.LogIndex logIdx := identifier.LogIndex
_, err := su.chainDBs.Check(chainID, blockNum, uint32(logIdx), payloadHash) _, err := su.chainDBs.Check(chainID, blockNum, uint32(logIdx), payloadHash)
if errors.Is(err, entrydb.ErrFuture) { if errors.Is(err, types.ErrFuture) {
return types.LocalUnsafe, nil return types.LocalUnsafe, nil
} }
if errors.Is(err, entrydb.ErrConflict) { if errors.Is(err, types.ErrConflict) {
return types.Invalid, nil return types.Invalid, nil
} }
if err != nil { if err != nil {
...@@ -378,7 +377,7 @@ func (su *SupervisorBackend) UpdateLocalUnsafe(chainID types.ChainID, head eth.B ...@@ -378,7 +377,7 @@ func (su *SupervisorBackend) UpdateLocalUnsafe(chainID types.ChainID, head eth.B
defer su.mu.RUnlock() defer su.mu.RUnlock()
ch, ok := su.chainProcessors[chainID] ch, ok := su.chainProcessors[chainID]
if !ok { if !ok {
return db.ErrUnknownChain return types.ErrUnknownChain
} }
return ch.OnNewHead(head) return ch.OnNewHead(head)
} }
......
...@@ -21,7 +21,6 @@ import ( ...@@ -21,7 +21,6 @@ import (
"github.com/ethereum-optimism/optimism/op-service/testutils" "github.com/ethereum-optimism/optimism/op-service/testutils"
"github.com/ethereum-optimism/optimism/op-supervisor/config" "github.com/ethereum-optimism/optimism/op-supervisor/config"
"github.com/ethereum-optimism/optimism/op-supervisor/metrics" "github.com/ethereum-optimism/optimism/op-supervisor/metrics"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/depset" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/depset"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
) )
...@@ -89,7 +88,7 @@ func TestBackendLifetime(t *testing.T) { ...@@ -89,7 +88,7 @@ func TestBackendLifetime(t *testing.T) {
t.Log("started!") t.Log("started!")
_, err = b.UnsafeView(context.Background(), chainA, types.ReferenceView{}) _, err = b.UnsafeView(context.Background(), chainA, types.ReferenceView{})
require.ErrorIs(t, err, entrydb.ErrFuture, "no data yet, need local-unsafe") require.ErrorIs(t, err, types.ErrFuture, "no data yet, need local-unsafe")
src.ExpectL1BlockRefByNumber(0, blockX, nil) src.ExpectL1BlockRefByNumber(0, blockX, nil)
src.ExpectFetchReceipts(blockX.Hash, &testutils.MockBlockInfo{ src.ExpectFetchReceipts(blockX.Hash, &testutils.MockBlockInfo{
...@@ -118,7 +117,7 @@ func TestBackendLifetime(t *testing.T) { ...@@ -118,7 +117,7 @@ func TestBackendLifetime(t *testing.T) {
b.chainProcessors[chainA].ProcessToHead() b.chainProcessors[chainA].ProcessToHead()
_, err = b.UnsafeView(context.Background(), chainA, types.ReferenceView{}) _, err = b.UnsafeView(context.Background(), chainA, types.ReferenceView{})
require.ErrorIs(t, err, entrydb.ErrFuture, "still no data yet, need cross-unsafe") require.ErrorIs(t, err, types.ErrFuture, "still no data yet, need cross-unsafe")
err = b.chainDBs.UpdateCrossUnsafe(chainA, types.BlockSeal{ err = b.chainDBs.UpdateCrossUnsafe(chainA, types.BlockSeal{
Hash: blockX.Hash, Hash: blockX.Hash,
......
...@@ -12,13 +12,10 @@ import ( ...@@ -12,13 +12,10 @@ import (
"github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/fromda" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/fromda"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/logs" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/logs"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/depset"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
) )
var (
ErrUnknownChain = errors.New("unknown chain")
)
type LogStorage interface { type LogStorage interface {
io.Closer io.Closer
...@@ -46,13 +43,22 @@ type LogStorage interface { ...@@ -46,13 +43,22 @@ type LogStorage interface {
// The block-seal of the blockNum block, that the log was included in, is returned. // The block-seal of the blockNum block, that the log was included in, is returned.
// This seal may be fully zeroed, without error, if the block isn't fully known yet. // This seal may be fully zeroed, without error, if the block isn't fully known yet.
Contains(blockNum uint64, logIdx uint32, logHash common.Hash) (includedIn types.BlockSeal, err error) Contains(blockNum uint64, logIdx uint32, logHash common.Hash) (includedIn types.BlockSeal, err error)
// OpenBlock accumulates the ExecutingMessage events for a block and returns them
OpenBlock(blockNum uint64) (ref eth.BlockRef, logCount uint32, execMsgs map[uint32]*types.ExecutingMessage, err error)
} }
type LocalDerivedFromStorage interface { type LocalDerivedFromStorage interface {
First() (derivedFrom types.BlockSeal, derived types.BlockSeal, err error)
Latest() (derivedFrom types.BlockSeal, derived types.BlockSeal, err error) Latest() (derivedFrom types.BlockSeal, derived types.BlockSeal, err error)
AddDerived(derivedFrom eth.BlockRef, derived eth.BlockRef) error AddDerived(derivedFrom eth.BlockRef, derived eth.BlockRef) error
LastDerivedAt(derivedFrom eth.BlockID) (derived types.BlockSeal, err error) LastDerivedAt(derivedFrom eth.BlockID) (derived types.BlockSeal, err error)
DerivedFrom(derived eth.BlockID) (derivedFrom types.BlockSeal, err error) DerivedFrom(derived eth.BlockID) (derivedFrom types.BlockSeal, err error)
FirstAfter(derivedFrom, derived eth.BlockID) (nextDerivedFrom, nextDerived types.BlockSeal, err error)
NextDerivedFrom(derivedFrom eth.BlockID) (nextDerivedFrom types.BlockSeal, err error)
NextDerived(derived eth.BlockID) (derivedFrom types.BlockSeal, nextDerived types.BlockSeal, err error)
PreviousDerivedFrom(derivedFrom eth.BlockID) (prevDerivedFrom types.BlockSeal, err error)
PreviousDerived(derived eth.BlockID) (prevDerived types.BlockSeal, err error)
} }
var _ LocalDerivedFromStorage = (*fromda.DB)(nil) var _ LocalDerivedFromStorage = (*fromda.DB)(nil)
...@@ -65,7 +71,7 @@ type CrossDerivedFromStorage interface { ...@@ -65,7 +71,7 @@ type CrossDerivedFromStorage interface {
var _ LogStorage = (*logs.DB)(nil) var _ LogStorage = (*logs.DB)(nil)
// ChainsDB is a database that stores logs and derived-from data for multiple chains. // ChainsDB is a database that stores logs and derived-from data for multiple chains.
// it implements the ChainsStorage interface. // it implements the LogStorage interface, as well as several DB interfaces needed by the cross package.
type ChainsDB struct { type ChainsDB struct {
// RW mutex: // RW mutex:
// Read = chains can be read / mutated. // Read = chains can be read / mutated.
...@@ -90,16 +96,21 @@ type ChainsDB struct { ...@@ -90,16 +96,21 @@ type ChainsDB struct {
// an error until it has this L1 finality to work with. // an error until it has this L1 finality to work with.
finalizedL1 eth.L1BlockRef finalizedL1 eth.L1BlockRef
// depSet is the dependency set, used to determine what may be tracked,
// what is missing, and to provide it to DB users.
depSet depset.DependencySet
logger log.Logger logger log.Logger
} }
func NewChainsDB(l log.Logger) *ChainsDB { func NewChainsDB(l log.Logger, depSet depset.DependencySet) *ChainsDB {
return &ChainsDB{ return &ChainsDB{
logDBs: make(map[types.ChainID]LogStorage), logDBs: make(map[types.ChainID]LogStorage),
logger: l, logger: l,
localDBs: make(map[types.ChainID]LocalDerivedFromStorage), localDBs: make(map[types.ChainID]LocalDerivedFromStorage),
crossDBs: make(map[types.ChainID]CrossDerivedFromStorage), crossDBs: make(map[types.ChainID]CrossDerivedFromStorage),
crossUnsafe: make(map[types.ChainID]types.BlockSeal), crossUnsafe: make(map[types.ChainID]types.BlockSeal),
depSet: depSet,
} }
} }
...@@ -168,6 +179,10 @@ func (db *ChainsDB) ResumeFromLastSealedBlock() error { ...@@ -168,6 +179,10 @@ func (db *ChainsDB) ResumeFromLastSealedBlock() error {
return nil return nil
} }
func (db *ChainsDB) DependencySet() depset.DependencySet {
return db.depSet
}
func (db *ChainsDB) Close() error { func (db *ChainsDB) Close() error {
db.mu.Lock() db.mu.Lock()
defer db.mu.Unlock() defer db.mu.Unlock()
......
...@@ -5,7 +5,6 @@ import ( ...@@ -5,7 +5,6 @@ import (
"fmt" "fmt"
"io" "io"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
) )
...@@ -57,10 +56,10 @@ func (d LinkEntry) String() string { ...@@ -57,10 +56,10 @@ func (d LinkEntry) String() string {
func (d *LinkEntry) decode(e Entry) error { func (d *LinkEntry) decode(e Entry) error {
if e.Type() != DerivedFromV0 { if e.Type() != DerivedFromV0 {
return fmt.Errorf("%w: unexpected entry type: %s", entrydb.ErrDataCorruption, e.Type()) return fmt.Errorf("%w: unexpected entry type: %s", types.ErrDataCorruption, e.Type())
} }
if [3]byte(e[1:4]) != ([3]byte{}) { if [3]byte(e[1:4]) != ([3]byte{}) {
return fmt.Errorf("%w: expected empty data, to pad entry size to round number: %x", entrydb.ErrDataCorruption, e[1:4]) return fmt.Errorf("%w: expected empty data, to pad entry size to round number: %x", types.ErrDataCorruption, e[1:4])
} }
offset := 4 offset := 4
d.derivedFrom.Number = binary.BigEndian.Uint64(e[offset : offset+8]) d.derivedFrom.Number = binary.BigEndian.Uint64(e[offset : offset+8])
......
...@@ -4,7 +4,6 @@ import ( ...@@ -4,7 +4,6 @@ import (
"fmt" "fmt"
"github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
) )
...@@ -60,19 +59,19 @@ func (db *DB) AddDerived(derivedFrom eth.BlockRef, derived eth.BlockRef) error { ...@@ -60,19 +59,19 @@ func (db *DB) AddDerived(derivedFrom eth.BlockRef, derived eth.BlockRef) error {
// I.e. we encountered an empty L1 block, and the same L2 block continues to be the last block that was derived from it. // I.e. we encountered an empty L1 block, and the same L2 block continues to be the last block that was derived from it.
if lastDerived.Hash != derived.Hash { if lastDerived.Hash != derived.Hash {
return fmt.Errorf("derived block %s conflicts with known derived block %s at same height: %w", return fmt.Errorf("derived block %s conflicts with known derived block %s at same height: %w",
derived, lastDerived, entrydb.ErrConflict) derived, lastDerived, types.ErrConflict)
} }
} else if lastDerived.Number+1 == derived.Number { } else if lastDerived.Number+1 == derived.Number {
if lastDerived.Hash != derived.ParentHash { if lastDerived.Hash != derived.ParentHash {
return fmt.Errorf("derived block %s (parent %s) does not build on %s: %w", return fmt.Errorf("derived block %s (parent %s) does not build on %s: %w",
derived, derived.ParentHash, lastDerived, entrydb.ErrConflict) derived, derived.ParentHash, lastDerived, types.ErrConflict)
} }
} else if lastDerived.Number+1 < derived.Number { } else if lastDerived.Number+1 < derived.Number {
return fmt.Errorf("derived block %s (parent: %s) is too new, expected to build on top of %s: %w", return fmt.Errorf("derived block %s (parent: %s) is too new, expected to build on top of %s: %w",
derived, derived.ParentHash, lastDerived, entrydb.ErrOutOfOrder) derived, derived.ParentHash, lastDerived, types.ErrOutOfOrder)
} else { } else {
return fmt.Errorf("derived block %s is older than current derived block %s: %w", return fmt.Errorf("derived block %s is older than current derived block %s: %w",
derived, lastDerived, entrydb.ErrOutOfOrder) derived, lastDerived, types.ErrOutOfOrder)
} }
// Check derived-from relation: multiple L2 blocks may be derived from the same L1 block. But everything in sequence. // Check derived-from relation: multiple L2 blocks may be derived from the same L1 block. But everything in sequence.
...@@ -80,22 +79,22 @@ func (db *DB) AddDerived(derivedFrom eth.BlockRef, derived eth.BlockRef) error { ...@@ -80,22 +79,22 @@ func (db *DB) AddDerived(derivedFrom eth.BlockRef, derived eth.BlockRef) error {
// Same block height? Then it must be the same block. // Same block height? Then it must be the same block.
if lastDerivedFrom.Hash != derivedFrom.Hash { if lastDerivedFrom.Hash != derivedFrom.Hash {
return fmt.Errorf("cannot add block %s as derived from %s, expected to be derived from %s at this block height: %w", return fmt.Errorf("cannot add block %s as derived from %s, expected to be derived from %s at this block height: %w",
derived, derivedFrom, lastDerivedFrom, entrydb.ErrConflict) derived, derivedFrom, lastDerivedFrom, types.ErrConflict)
} }
} else if lastDerivedFrom.Number+1 == derivedFrom.Number { } else if lastDerivedFrom.Number+1 == derivedFrom.Number {
// parent hash check // parent hash check
if lastDerivedFrom.Hash != derivedFrom.ParentHash { if lastDerivedFrom.Hash != derivedFrom.ParentHash {
return fmt.Errorf("cannot add block %s as derived from %s (parent %s) derived on top of %s: %w", return fmt.Errorf("cannot add block %s as derived from %s (parent %s) derived on top of %s: %w",
derived, derivedFrom, derivedFrom.ParentHash, lastDerivedFrom, entrydb.ErrConflict) derived, derivedFrom, derivedFrom.ParentHash, lastDerivedFrom, types.ErrConflict)
} }
} else if lastDerivedFrom.Number+1 < derivedFrom.Number { } else if lastDerivedFrom.Number+1 < derivedFrom.Number {
// adding block that is derived from something too far into the future // adding block that is derived from something too far into the future
return fmt.Errorf("cannot add block %s as derived from %s, still deriving from %s: %w", return fmt.Errorf("cannot add block %s as derived from %s, still deriving from %s: %w",
derived, derivedFrom, lastDerivedFrom, entrydb.ErrOutOfOrder) derived, derivedFrom, lastDerivedFrom, types.ErrOutOfOrder)
} else { } else {
// adding block that is derived from something too old // adding block that is derived from something too old
return fmt.Errorf("cannot add block %s as derived from %s, deriving already at %s: %w", return fmt.Errorf("cannot add block %s as derived from %s, deriving already at %s: %w",
derived, derivedFrom, lastDerivedFrom, entrydb.ErrOutOfOrder) derived, derivedFrom, lastDerivedFrom, types.ErrOutOfOrder)
} }
link := LinkEntry{ link := LinkEntry{
......
...@@ -7,7 +7,6 @@ import ( ...@@ -7,7 +7,6 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
) )
...@@ -42,14 +41,14 @@ func TestBadUpdates(t *testing.T) { ...@@ -42,14 +41,14 @@ func TestBadUpdates(t *testing.T) {
{ {
name: "add on old derivedFrom", name: "add on old derivedFrom",
setupFn: func(t *testing.T, db *DB, m *stubMetrics) { setupFn: func(t *testing.T, db *DB, m *stubMetrics) {
require.ErrorIs(t, db.AddDerived(toRef(bDerivedFrom, aDerivedFrom.Hash), toRef(dDerived, cDerived.Hash)), entrydb.ErrOutOfOrder) require.ErrorIs(t, db.AddDerived(toRef(bDerivedFrom, aDerivedFrom.Hash), toRef(dDerived, cDerived.Hash)), types.ErrOutOfOrder)
}, },
assertFn: noChange, assertFn: noChange,
}, },
{ {
name: "repeat parent derivedFrom", name: "repeat parent derivedFrom",
setupFn: func(t *testing.T, db *DB, m *stubMetrics) { setupFn: func(t *testing.T, db *DB, m *stubMetrics) {
require.ErrorIs(t, db.AddDerived(toRef(cDerivedFrom, bDerivedFrom.Hash), toRef(dDerived, cDerived.Hash)), entrydb.ErrOutOfOrder) require.ErrorIs(t, db.AddDerived(toRef(cDerivedFrom, bDerivedFrom.Hash), toRef(dDerived, cDerived.Hash)), types.ErrOutOfOrder)
}, },
assertFn: noChange, assertFn: noChange,
}, },
...@@ -60,14 +59,14 @@ func TestBadUpdates(t *testing.T) { ...@@ -60,14 +59,14 @@ func TestBadUpdates(t *testing.T) {
Hash: common.Hash{0xba, 0xd}, Hash: common.Hash{0xba, 0xd},
Number: dDerivedFrom.Number, Number: dDerivedFrom.Number,
Timestamp: dDerivedFrom.Timestamp, Timestamp: dDerivedFrom.Timestamp,
}, cDerivedFrom.Hash), toRef(eDerived, dDerived.Hash)), entrydb.ErrConflict) }, cDerivedFrom.Hash), toRef(eDerived, dDerived.Hash)), types.ErrConflict)
}, },
assertFn: noChange, assertFn: noChange,
}, },
{ {
name: "DerivedFrom with conflicting parent root, same L1 height, new L2: accepted, L1 parent-hash is used only on L1 increments.", name: "DerivedFrom with conflicting parent root, same L1 height, new L2: accepted, L1 parent-hash is used only on L1 increments.",
setupFn: func(t *testing.T, db *DB, m *stubMetrics) { setupFn: func(t *testing.T, db *DB, m *stubMetrics) {
require.NoError(t, db.AddDerived(toRef(dDerivedFrom, common.Hash{0x42}), toRef(eDerived, dDerived.Hash)), entrydb.ErrConflict) require.NoError(t, db.AddDerived(toRef(dDerivedFrom, common.Hash{0x42}), toRef(eDerived, dDerived.Hash)), types.ErrConflict)
}, },
assertFn: func(t *testing.T, db *DB, m *stubMetrics) { assertFn: func(t *testing.T, db *DB, m *stubMetrics) {
derivedFrom, derived, err := db.Latest() derivedFrom, derived, err := db.Latest()
...@@ -79,28 +78,28 @@ func TestBadUpdates(t *testing.T) { ...@@ -79,28 +78,28 @@ func TestBadUpdates(t *testing.T) {
{ {
name: "Conflicting derivedFrom parent root, new L1 height, same L2", name: "Conflicting derivedFrom parent root, new L1 height, same L2",
setupFn: func(t *testing.T, db *DB, m *stubMetrics) { setupFn: func(t *testing.T, db *DB, m *stubMetrics) {
require.ErrorIs(t, db.AddDerived(toRef(eDerivedFrom, common.Hash{0x42}), toRef(dDerived, cDerived.Hash)), entrydb.ErrConflict) require.ErrorIs(t, db.AddDerived(toRef(eDerivedFrom, common.Hash{0x42}), toRef(dDerived, cDerived.Hash)), types.ErrConflict)
}, },
assertFn: noChange, assertFn: noChange,
}, },
{ {
name: "add on too new derivedFrom (even if parent-hash looks correct)", name: "add on too new derivedFrom (even if parent-hash looks correct)",
setupFn: func(t *testing.T, db *DB, m *stubMetrics) { setupFn: func(t *testing.T, db *DB, m *stubMetrics) {
require.ErrorIs(t, db.AddDerived(toRef(fDerivedFrom, dDerivedFrom.Hash), toRef(eDerived, dDerived.Hash)), entrydb.ErrOutOfOrder) require.ErrorIs(t, db.AddDerived(toRef(fDerivedFrom, dDerivedFrom.Hash), toRef(eDerived, dDerived.Hash)), types.ErrOutOfOrder)
}, },
assertFn: noChange, assertFn: noChange,
}, },
{ {
name: "add on old derivedFrom (even if parent-hash looks correct)", name: "add on old derivedFrom (even if parent-hash looks correct)",
setupFn: func(t *testing.T, db *DB, m *stubMetrics) { setupFn: func(t *testing.T, db *DB, m *stubMetrics) {
require.ErrorIs(t, db.AddDerived(toRef(cDerivedFrom, bDerivedFrom.Hash), toRef(cDerived, dDerived.Hash)), entrydb.ErrOutOfOrder) require.ErrorIs(t, db.AddDerived(toRef(cDerivedFrom, bDerivedFrom.Hash), toRef(cDerived, dDerived.Hash)), types.ErrOutOfOrder)
}, },
assertFn: noChange, assertFn: noChange,
}, },
{ {
name: "add on even older derivedFrom", name: "add on even older derivedFrom",
setupFn: func(t *testing.T, db *DB, m *stubMetrics) { setupFn: func(t *testing.T, db *DB, m *stubMetrics) {
require.ErrorIs(t, db.AddDerived(toRef(bDerivedFrom, aDerivedFrom.Hash), toRef(dDerived, cDerived.Hash)), entrydb.ErrOutOfOrder) require.ErrorIs(t, db.AddDerived(toRef(bDerivedFrom, aDerivedFrom.Hash), toRef(dDerived, cDerived.Hash)), types.ErrOutOfOrder)
}, },
assertFn: noChange, assertFn: noChange,
}, },
...@@ -111,14 +110,14 @@ func TestBadUpdates(t *testing.T) { ...@@ -111,14 +110,14 @@ func TestBadUpdates(t *testing.T) {
Hash: common.Hash{0x42}, Hash: common.Hash{0x42},
Number: dDerived.Number, Number: dDerived.Number,
Timestamp: dDerived.Timestamp, Timestamp: dDerived.Timestamp,
}, cDerived.Hash)), entrydb.ErrConflict) }, cDerived.Hash)), types.ErrConflict)
}, },
assertFn: noChange, assertFn: noChange,
}, },
{ {
name: "add derived with conflicting parent hash, new L1 height, same L2 height: accepted, L2 parent-hash is only checked on L2 increments.", name: "add derived with conflicting parent hash, new L1 height, same L2 height: accepted, L2 parent-hash is only checked on L2 increments.",
setupFn: func(t *testing.T, db *DB, m *stubMetrics) { setupFn: func(t *testing.T, db *DB, m *stubMetrics) {
require.NoError(t, db.AddDerived(toRef(eDerivedFrom, dDerivedFrom.Hash), toRef(dDerived, common.Hash{0x42})), entrydb.ErrConflict) require.NoError(t, db.AddDerived(toRef(eDerivedFrom, dDerivedFrom.Hash), toRef(dDerived, common.Hash{0x42})), types.ErrConflict)
}, },
assertFn: func(t *testing.T, db *DB, m *stubMetrics) { assertFn: func(t *testing.T, db *DB, m *stubMetrics) {
derivedFrom, derived, err := db.Latest() derivedFrom, derived, err := db.Latest()
...@@ -130,28 +129,28 @@ func TestBadUpdates(t *testing.T) { ...@@ -130,28 +129,28 @@ func TestBadUpdates(t *testing.T) {
{ {
name: "add derived with conflicting parent hash, same L1 height, new L2 height", name: "add derived with conflicting parent hash, same L1 height, new L2 height",
setupFn: func(t *testing.T, db *DB, m *stubMetrics) { setupFn: func(t *testing.T, db *DB, m *stubMetrics) {
require.ErrorIs(t, db.AddDerived(toRef(dDerivedFrom, cDerivedFrom.Hash), toRef(eDerived, common.Hash{0x42})), entrydb.ErrConflict) require.ErrorIs(t, db.AddDerived(toRef(dDerivedFrom, cDerivedFrom.Hash), toRef(eDerived, common.Hash{0x42})), types.ErrConflict)
}, },
assertFn: noChange, assertFn: noChange,
}, },
{ {
name: "add on too new derived (even if parent-hash looks correct)", name: "add on too new derived (even if parent-hash looks correct)",
setupFn: func(t *testing.T, db *DB, m *stubMetrics) { setupFn: func(t *testing.T, db *DB, m *stubMetrics) {
require.ErrorIs(t, db.AddDerived(toRef(dDerivedFrom, cDerivedFrom.Hash), toRef(fDerived, dDerived.Hash)), entrydb.ErrOutOfOrder) require.ErrorIs(t, db.AddDerived(toRef(dDerivedFrom, cDerivedFrom.Hash), toRef(fDerived, dDerived.Hash)), types.ErrOutOfOrder)
}, },
assertFn: noChange, assertFn: noChange,
}, },
{ {
name: "add on old derived (even if parent-hash looks correct)", name: "add on old derived (even if parent-hash looks correct)",
setupFn: func(t *testing.T, db *DB, m *stubMetrics) { setupFn: func(t *testing.T, db *DB, m *stubMetrics) {
require.ErrorIs(t, db.AddDerived(toRef(dDerivedFrom, cDerivedFrom.Hash), toRef(cDerived, bDerived.Hash)), entrydb.ErrOutOfOrder) require.ErrorIs(t, db.AddDerived(toRef(dDerivedFrom, cDerivedFrom.Hash), toRef(cDerived, bDerived.Hash)), types.ErrOutOfOrder)
}, },
assertFn: noChange, assertFn: noChange,
}, },
{ {
name: "add on even older derived", name: "add on even older derived",
setupFn: func(t *testing.T, db *DB, m *stubMetrics) { setupFn: func(t *testing.T, db *DB, m *stubMetrics) {
require.ErrorIs(t, db.AddDerived(toRef(dDerivedFrom, cDerivedFrom.Hash), toRef(bDerived, aDerived.Hash)), entrydb.ErrOutOfOrder) require.ErrorIs(t, db.AddDerived(toRef(dDerivedFrom, cDerivedFrom.Hash), toRef(bDerived, aDerived.Hash)), types.ErrOutOfOrder)
}, },
assertFn: noChange, assertFn: noChange,
}, },
...@@ -159,7 +158,7 @@ func TestBadUpdates(t *testing.T) { ...@@ -159,7 +158,7 @@ func TestBadUpdates(t *testing.T) {
name: "repeat self, silent no-op", name: "repeat self, silent no-op",
setupFn: func(t *testing.T, db *DB, m *stubMetrics) { setupFn: func(t *testing.T, db *DB, m *stubMetrics) {
pre := m.DBDerivedEntryCount pre := m.DBDerivedEntryCount
require.NoError(t, db.AddDerived(toRef(dDerivedFrom, cDerivedFrom.Hash), toRef(dDerived, cDerived.Hash)), entrydb.ErrOutOfOrder) require.NoError(t, db.AddDerived(toRef(dDerivedFrom, cDerivedFrom.Hash), toRef(dDerived, cDerived.Hash)), types.ErrOutOfOrder)
require.Equal(t, pre, m.DBDerivedEntryCount) require.Equal(t, pre, m.DBDerivedEntryCount)
}, },
assertFn: noChange, assertFn: noChange,
......
...@@ -139,8 +139,8 @@ func (db *DB) FindSealedBlock(number uint64) (seal types.BlockSeal, err error) { ...@@ -139,8 +139,8 @@ func (db *DB) FindSealedBlock(number uint64) (seal types.BlockSeal, err error) {
db.rwLock.RLock() db.rwLock.RLock()
defer db.rwLock.RUnlock() defer db.rwLock.RUnlock()
iter, err := db.newIteratorAt(number, 0) iter, err := db.newIteratorAt(number, 0)
if errors.Is(err, entrydb.ErrFuture) { if errors.Is(err, types.ErrFuture) {
return types.BlockSeal{}, fmt.Errorf("block %d is not known yet: %w", number, entrydb.ErrFuture) return types.BlockSeal{}, fmt.Errorf("block %d is not known yet: %w", number, types.ErrFuture)
} else if err != nil { } else if err != nil {
return types.BlockSeal{}, fmt.Errorf("failed to find sealed block %d: %w", number, err) return types.BlockSeal{}, fmt.Errorf("failed to find sealed block %d: %w", number, err)
} }
...@@ -162,6 +162,89 @@ func (db *DB) FindSealedBlock(number uint64) (seal types.BlockSeal, err error) { ...@@ -162,6 +162,89 @@ func (db *DB) FindSealedBlock(number uint64) (seal types.BlockSeal, err error) {
}, nil }, nil
} }
// StartingBlock returns the first block seal in the DB, if any.
func (db *DB) StartingBlock() (seal types.BlockSeal, err error) {
db.rwLock.RLock()
defer db.rwLock.RUnlock()
iter := db.newIterator(0)
if err := iter.NextBlock(); err != nil {
return types.BlockSeal{}, err
}
h, n, _ := iter.SealedBlock()
t, _ := iter.SealedTimestamp()
return types.BlockSeal{
Hash: h,
Number: n,
Timestamp: t,
}, err
}
// OpenBlock returns the Executing Messages for the block at the given number.
// it returns identification of the block, the parent block, and the executing messages.
func (db *DB) OpenBlock(blockNum uint64) (ref eth.BlockRef, logCount uint32, execMsgs map[uint32]*types.ExecutingMessage, retErr error) {
db.rwLock.RLock()
defer db.rwLock.RUnlock()
if blockNum == 0 {
seal, err := db.StartingBlock()
if err != nil {
retErr = err
return
}
ref = eth.BlockRef{
Hash: seal.Hash,
Number: seal.Number,
ParentHash: common.Hash{},
Time: seal.Timestamp,
}
logCount = 0
execMsgs = nil
return
}
// start at the first log (if any) after the block-seal of the parent block
blockIter, err := db.newIteratorAt(blockNum-1, 0)
if err != nil {
retErr = err
return
}
// register the parent block
parentHash, _, ok := blockIter.SealedBlock()
if ok {
ref.ParentHash = parentHash
}
// walk to the end of the block, and remember what we see in the block.
logCount = 0
execMsgs = make(map[uint32]*types.ExecutingMessage, 0)
retErr = blockIter.TraverseConditional(func(state IteratorState) error {
_, logIndex, ok := state.InitMessage()
if ok {
logCount = logIndex + 1
}
if m := state.ExecMessage(); m != nil {
execMsgs[logIndex] = m
}
h, n, ok := state.SealedBlock()
if !ok {
return nil
}
if n == blockNum {
ref.Number = n
ref.Hash = h
ref.Time, _ = state.SealedTimestamp()
return types.ErrStop
}
if n > blockNum {
return fmt.Errorf("expected to run into block %d, but did not find it, found %d: %w", blockNum, n, types.ErrDataCorruption)
}
return nil
})
if errors.Is(retErr, types.ErrStop) {
retErr = nil
}
return
}
// LatestSealedBlockNum returns the block number of the block that was last sealed, // LatestSealedBlockNum returns the block number of the block that was last sealed,
// or ok=false if there is no sealed block (i.e. empty DB) // or ok=false if there is no sealed block (i.e. empty DB)
func (db *DB) LatestSealedBlockNum() (n uint64, ok bool) { func (db *DB) LatestSealedBlockNum() (n uint64, ok bool) {
...@@ -213,21 +296,21 @@ func (db *DB) Contains(blockNum uint64, logIdx uint32, logHash common.Hash) (typ ...@@ -213,21 +296,21 @@ func (db *DB) Contains(blockNum uint64, logIdx uint32, logHash common.Hash) (typ
return nil return nil
} }
if n == blockNum { if n == blockNum {
return entrydb.ErrStop return types.ErrStop
} }
if n > blockNum { if n > blockNum {
return entrydb.ErrDataCorruption return types.ErrDataCorruption
} }
return nil return nil
}) })
if err == nil { if err == nil {
panic("expected iterator to stop with error") panic("expected iterator to stop with error")
} }
if errors.Is(err, entrydb.ErrFuture) { if errors.Is(err, types.ErrFuture) {
// Log is known, but as part of an unsealed block. // Log is known, but as part of an unsealed block.
return types.BlockSeal{}, nil return types.BlockSeal{}, nil
} }
if errors.Is(err, entrydb.ErrStop) { if errors.Is(err, types.ErrStop) {
h, n, _ := iter.SealedBlock() h, n, _ := iter.SealedBlock()
timestamp, _ := iter.SealedTimestamp() timestamp, _ := iter.SealedTimestamp()
return types.BlockSeal{ return types.BlockSeal{
...@@ -241,12 +324,12 @@ func (db *DB) Contains(blockNum uint64, logIdx uint32, logHash common.Hash) (typ ...@@ -241,12 +324,12 @@ func (db *DB) Contains(blockNum uint64, logIdx uint32, logHash common.Hash) (typ
func (db *DB) findLogInfo(blockNum uint64, logIdx uint32) (common.Hash, Iterator, error) { func (db *DB) findLogInfo(blockNum uint64, logIdx uint32) (common.Hash, Iterator, error) {
if blockNum == 0 { if blockNum == 0 {
return common.Hash{}, nil, entrydb.ErrConflict // no logs in block 0 return common.Hash{}, nil, types.ErrConflict // no logs in block 0
} }
// blockNum-1, such that we find a log that came after the parent num-1 was sealed. // blockNum-1, such that we find a log that came after the parent num-1 was sealed.
// logIdx, such that all entries before logIdx can be skipped, but logIdx itself is still readable. // logIdx, such that all entries before logIdx can be skipped, but logIdx itself is still readable.
iter, err := db.newIteratorAt(blockNum-1, logIdx) iter, err := db.newIteratorAt(blockNum-1, logIdx)
if errors.Is(err, entrydb.ErrFuture) { if errors.Is(err, types.ErrFuture) {
db.log.Trace("Could not find log yet", "blockNum", blockNum, "logIdx", logIdx) db.log.Trace("Could not find log yet", "blockNum", blockNum, "logIdx", logIdx)
return common.Hash{}, nil, err return common.Hash{}, nil, err
} else if err != nil { } else if err != nil {
...@@ -261,7 +344,7 @@ func (db *DB) findLogInfo(blockNum uint64, logIdx uint32) (common.Hash, Iterator ...@@ -261,7 +344,7 @@ func (db *DB) findLogInfo(blockNum uint64, logIdx uint32) (common.Hash, Iterator
} else if x < blockNum-1 { } else if x < blockNum-1 {
panic(fmt.Errorf("bug in newIteratorAt, expected to have found parent block %d but got %d", blockNum-1, x)) panic(fmt.Errorf("bug in newIteratorAt, expected to have found parent block %d but got %d", blockNum-1, x))
} else if x > blockNum-1 { } else if x > blockNum-1 {
return common.Hash{}, nil, fmt.Errorf("log does not exist, found next block already: %w", entrydb.ErrConflict) return common.Hash{}, nil, fmt.Errorf("log does not exist, found next block already: %w", types.ErrConflict)
} }
logHash, x, ok := iter.InitMessage() logHash, x, ok := iter.InitMessage()
if !ok { if !ok {
...@@ -282,7 +365,7 @@ func (db *DB) newIteratorAt(blockNum uint64, logIndex uint32) (*iterator, error) ...@@ -282,7 +365,7 @@ func (db *DB) newIteratorAt(blockNum uint64, logIndex uint32) (*iterator, error)
searchCheckpointIndex, err := db.searchCheckpoint(blockNum, logIndex) searchCheckpointIndex, err := db.searchCheckpoint(blockNum, logIndex)
if errors.Is(err, io.EOF) { if errors.Is(err, io.EOF) {
// Did not find a checkpoint to start reading from so the log cannot be present. // Did not find a checkpoint to start reading from so the log cannot be present.
return nil, entrydb.ErrFuture return nil, types.ErrFuture
} else if err != nil { } else if err != nil {
return nil, err return nil, err
} }
...@@ -298,9 +381,9 @@ func (db *DB) newIteratorAt(blockNum uint64, logIndex uint32) (*iterator, error) ...@@ -298,9 +381,9 @@ func (db *DB) newIteratorAt(blockNum uint64, logIndex uint32) (*iterator, error)
if _, n, ok := iter.SealedBlock(); ok && n == blockNum { // we may already have it exactly if _, n, ok := iter.SealedBlock(); ok && n == blockNum { // we may already have it exactly
break break
} }
if err := iter.NextBlock(); errors.Is(err, entrydb.ErrFuture) { if err := iter.NextBlock(); errors.Is(err, types.ErrFuture) {
db.log.Trace("ran out of data, could not find block", "nextIndex", iter.NextIndex(), "target", blockNum) db.log.Trace("ran out of data, could not find block", "nextIndex", iter.NextIndex(), "target", blockNum)
return nil, entrydb.ErrFuture return nil, types.ErrFuture
} else if err != nil { } else if err != nil {
db.log.Error("failed to read next block", "nextIndex", iter.NextIndex(), "target", blockNum, "err", err) db.log.Error("failed to read next block", "nextIndex", iter.NextIndex(), "target", blockNum, "err", err)
return nil, err return nil, err
...@@ -314,7 +397,7 @@ func (db *DB) newIteratorAt(blockNum uint64, logIndex uint32) (*iterator, error) ...@@ -314,7 +397,7 @@ func (db *DB) newIteratorAt(blockNum uint64, logIndex uint32) (*iterator, error)
continue continue
} }
if num != blockNum { // block does not contain if num != blockNum { // block does not contain
return nil, fmt.Errorf("looking for %d, but already at %d: %w", blockNum, num, entrydb.ErrConflict) return nil, fmt.Errorf("looking for %d, but already at %d: %w", blockNum, num, types.ErrConflict)
} }
break break
} }
...@@ -323,7 +406,7 @@ func (db *DB) newIteratorAt(blockNum uint64, logIndex uint32) (*iterator, error) ...@@ -323,7 +406,7 @@ func (db *DB) newIteratorAt(blockNum uint64, logIndex uint32) (*iterator, error)
// so two logs before quiting (and not 3 to then quit after). // so two logs before quiting (and not 3 to then quit after).
for iter.current.logsSince < logIndex { for iter.current.logsSince < logIndex {
if err := iter.NextInitMsg(); err == io.EOF { if err := iter.NextInitMsg(); err == io.EOF {
return nil, entrydb.ErrFuture return nil, types.ErrFuture
} else if err != nil { } else if err != nil {
return nil, err return nil, err
} }
...@@ -333,7 +416,7 @@ func (db *DB) newIteratorAt(blockNum uint64, logIndex uint32) (*iterator, error) ...@@ -333,7 +416,7 @@ func (db *DB) newIteratorAt(blockNum uint64, logIndex uint32) (*iterator, error)
} }
if num > blockNum { if num > blockNum {
// we overshot, the block did not contain as many seen log events as requested // we overshot, the block did not contain as many seen log events as requested
return nil, entrydb.ErrConflict return nil, types.ErrConflict
} }
_, idx, ok := iter.InitMessage() _, idx, ok := iter.InitMessage()
if !ok { if !ok {
...@@ -367,7 +450,7 @@ func (db *DB) newIterator(index entrydb.EntryIdx) *iterator { ...@@ -367,7 +450,7 @@ func (db *DB) newIterator(index entrydb.EntryIdx) *iterator {
// Returns the index of the searchCheckpoint to begin reading from or an error. // Returns the index of the searchCheckpoint to begin reading from or an error.
func (db *DB) searchCheckpoint(sealedBlockNum uint64, logsSince uint32) (entrydb.EntryIdx, error) { func (db *DB) searchCheckpoint(sealedBlockNum uint64, logsSince uint32) (entrydb.EntryIdx, error) {
if db.lastEntryContext.nextEntryIndex == 0 { if db.lastEntryContext.nextEntryIndex == 0 {
return 0, entrydb.ErrFuture // empty DB, everything is in the future return 0, types.ErrFuture // empty DB, everything is in the future
} }
n := (db.lastEntryIdx() / searchCheckpointFrequency) + 1 n := (db.lastEntryIdx() / searchCheckpointFrequency) + 1
// Define: x is the array of known checkpoints // Define: x is the array of known checkpoints
...@@ -404,7 +487,7 @@ func (db *DB) searchCheckpoint(sealedBlockNum uint64, logsSince uint32) (entrydb ...@@ -404,7 +487,7 @@ func (db *DB) searchCheckpoint(sealedBlockNum uint64, logsSince uint32) (entrydb
if checkpoint.blockNum > sealedBlockNum || if checkpoint.blockNum > sealedBlockNum ||
(checkpoint.blockNum == sealedBlockNum && checkpoint.logsSince > logsSince) { (checkpoint.blockNum == sealedBlockNum && checkpoint.logsSince > logsSince) {
return 0, fmt.Errorf("missing data, earliest search checkpoint is %d with %d logs, cannot find something before or at %d with %d logs: %w", return 0, fmt.Errorf("missing data, earliest search checkpoint is %d with %d logs, cannot find something before or at %d with %d logs: %w",
checkpoint.blockNum, checkpoint.logsSince, sealedBlockNum, logsSince, entrydb.ErrSkipped) checkpoint.blockNum, checkpoint.logsSince, sealedBlockNum, logsSince, types.ErrSkipped)
} }
return result, nil return result, nil
} }
......
...@@ -6,7 +6,6 @@ import ( ...@@ -6,7 +6,6 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/entrydb"
"github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types"
) )
...@@ -29,7 +28,7 @@ func newSearchCheckpoint(blockNum uint64, logsSince uint32, timestamp uint64) se ...@@ -29,7 +28,7 @@ func newSearchCheckpoint(blockNum uint64, logsSince uint32, timestamp uint64) se
func newSearchCheckpointFromEntry(data Entry) (searchCheckpoint, error) { func newSearchCheckpointFromEntry(data Entry) (searchCheckpoint, error) {
if data.Type() != TypeSearchCheckpoint { if data.Type() != TypeSearchCheckpoint {
return searchCheckpoint{}, fmt.Errorf("%w: attempting to decode search checkpoint but was type %s", entrydb.ErrDataCorruption, data.Type()) return searchCheckpoint{}, fmt.Errorf("%w: attempting to decode search checkpoint but was type %s", types.ErrDataCorruption, data.Type())
} }
return searchCheckpoint{ return searchCheckpoint{
blockNum: binary.LittleEndian.Uint64(data[1:9]), blockNum: binary.LittleEndian.Uint64(data[1:9]),
...@@ -59,7 +58,7 @@ func newCanonicalHash(hash common.Hash) canonicalHash { ...@@ -59,7 +58,7 @@ func newCanonicalHash(hash common.Hash) canonicalHash {
func newCanonicalHashFromEntry(data Entry) (canonicalHash, error) { func newCanonicalHashFromEntry(data Entry) (canonicalHash, error) {
if data.Type() != TypeCanonicalHash { if data.Type() != TypeCanonicalHash {
return canonicalHash{}, fmt.Errorf("%w: attempting to decode canonical hash but was type %s", entrydb.ErrDataCorruption, data.Type()) return canonicalHash{}, fmt.Errorf("%w: attempting to decode canonical hash but was type %s", types.ErrDataCorruption, data.Type())
} }
return newCanonicalHash(common.Hash(data[1:33])), nil return newCanonicalHash(common.Hash(data[1:33])), nil
} }
...@@ -78,7 +77,7 @@ type initiatingEvent struct { ...@@ -78,7 +77,7 @@ type initiatingEvent struct {
func newInitiatingEventFromEntry(data Entry) (initiatingEvent, error) { func newInitiatingEventFromEntry(data Entry) (initiatingEvent, error) {
if data.Type() != TypeInitiatingEvent { if data.Type() != TypeInitiatingEvent {
return initiatingEvent{}, fmt.Errorf("%w: attempting to decode initiating event but was type %s", entrydb.ErrDataCorruption, data.Type()) return initiatingEvent{}, fmt.Errorf("%w: attempting to decode initiating event but was type %s", types.ErrDataCorruption, data.Type())
} }
flags := data[1] flags := data[1]
return initiatingEvent{ return initiatingEvent{
...@@ -109,7 +108,7 @@ func (i initiatingEvent) encode() Entry { ...@@ -109,7 +108,7 @@ func (i initiatingEvent) encode() Entry {
} }
type executingLink struct { type executingLink struct {
chain uint32 chain uint32 // chain index, not a chain ID
blockNum uint64 blockNum uint64
logIdx uint32 logIdx uint32
timestamp uint64 timestamp uint64
...@@ -120,7 +119,7 @@ func newExecutingLink(msg types.ExecutingMessage) (executingLink, error) { ...@@ -120,7 +119,7 @@ func newExecutingLink(msg types.ExecutingMessage) (executingLink, error) {
return executingLink{}, fmt.Errorf("log idx is too large (%v)", msg.LogIdx) return executingLink{}, fmt.Errorf("log idx is too large (%v)", msg.LogIdx)
} }
return executingLink{ return executingLink{
chain: msg.Chain, chain: uint32(msg.Chain),
blockNum: msg.BlockNum, blockNum: msg.BlockNum,
logIdx: msg.LogIdx, logIdx: msg.LogIdx,
timestamp: msg.Timestamp, timestamp: msg.Timestamp,
...@@ -129,7 +128,7 @@ func newExecutingLink(msg types.ExecutingMessage) (executingLink, error) { ...@@ -129,7 +128,7 @@ func newExecutingLink(msg types.ExecutingMessage) (executingLink, error) {
func newExecutingLinkFromEntry(data Entry) (executingLink, error) { func newExecutingLinkFromEntry(data Entry) (executingLink, error) {
if data.Type() != TypeExecutingLink { if data.Type() != TypeExecutingLink {
return executingLink{}, fmt.Errorf("%w: attempting to decode executing link but was type %s", entrydb.ErrDataCorruption, data.Type()) return executingLink{}, fmt.Errorf("%w: attempting to decode executing link but was type %s", types.ErrDataCorruption, data.Type())
} }
timestamp := binary.LittleEndian.Uint64(data[16:24]) timestamp := binary.LittleEndian.Uint64(data[16:24])
return executingLink{ return executingLink{
...@@ -166,7 +165,7 @@ func newExecutingCheck(hash common.Hash) executingCheck { ...@@ -166,7 +165,7 @@ func newExecutingCheck(hash common.Hash) executingCheck {
func newExecutingCheckFromEntry(data Entry) (executingCheck, error) { func newExecutingCheckFromEntry(data Entry) (executingCheck, error) {
if data.Type() != TypeExecutingCheck { if data.Type() != TypeExecutingCheck {
return executingCheck{}, fmt.Errorf("%w: attempting to decode executing check but was type %s", entrydb.ErrDataCorruption, data.Type()) return executingCheck{}, fmt.Errorf("%w: attempting to decode executing check but was type %s", types.ErrDataCorruption, data.Type())
} }
return newExecutingCheck(common.Hash(data[1:33])), nil return newExecutingCheck(common.Hash(data[1:33])), nil
} }
......
...@@ -41,7 +41,7 @@ type traverseConditionalFn func(state IteratorState) error ...@@ -41,7 +41,7 @@ type traverseConditionalFn func(state IteratorState) error
func (i *iterator) End() error { func (i *iterator) End() error {
for { for {
_, err := i.next() _, err := i.next()
if errors.Is(err, entrydb.ErrFuture) { if errors.Is(err, types.ErrFuture) {
return nil return nil
} else if err != nil { } else if err != nil {
return err return err
...@@ -49,7 +49,7 @@ func (i *iterator) End() error { ...@@ -49,7 +49,7 @@ func (i *iterator) End() error {
} }
} }
// NextInitMsg returns the next initiating message in the iterator. // NextInitMsg advances the iterator until it reads the next Initiating Message into the current state.
// It scans forward until it finds and fully reads an initiating event, skipping any blocks. // It scans forward until it finds and fully reads an initiating event, skipping any blocks.
func (i *iterator) NextInitMsg() error { func (i *iterator) NextInitMsg() error {
seenLog := false seenLog := false
...@@ -73,9 +73,8 @@ func (i *iterator) NextInitMsg() error { ...@@ -73,9 +73,8 @@ func (i *iterator) NextInitMsg() error {
} }
} }
// NextExecMsg returns the next executing message in the iterator. // NextExecMsg advances the iterator until it reads the next Executing Message into the current state.
// It scans forward until it finds and fully reads an initiating event, skipping any blocks. // It scans forward until it finds and fully reads an initiating event, skipping any blocks.
// This does not stay at the executing message of the current initiating message, if there is any.
func (i *iterator) NextExecMsg() error { func (i *iterator) NextExecMsg() error {
for { for {
err := i.NextInitMsg() err := i.NextInitMsg()
...@@ -88,7 +87,7 @@ func (i *iterator) NextExecMsg() error { ...@@ -88,7 +87,7 @@ func (i *iterator) NextExecMsg() error {
} }
} }
// NextBlock returns the next block in the iterator. // NextBlock advances the iterator until it reads the next block into the current state.
// It scans forward until it finds and fully reads a block, skipping any events. // It scans forward until it finds and fully reads a block, skipping any events.
func (i *iterator) NextBlock() error { func (i *iterator) NextBlock() error {
seenBlock := false seenBlock := false
...@@ -134,7 +133,7 @@ func (i *iterator) next() (EntryType, error) { ...@@ -134,7 +133,7 @@ func (i *iterator) next() (EntryType, error) {
entry, err := i.db.store.Read(index) entry, err := i.db.store.Read(index)
if err != nil { if err != nil {
if errors.Is(err, io.EOF) { if errors.Is(err, io.EOF) {
return 0, entrydb.ErrFuture return 0, types.ErrFuture
} }
return 0, fmt.Errorf("failed to read entry %d: %w", index, err) return 0, fmt.Errorf("failed to read entry %d: %w", index, err)
} }
......
...@@ -200,7 +200,7 @@ func (l *logContext) processEntry(entry Entry) error { ...@@ -200,7 +200,7 @@ func (l *logContext) processEntry(entry Entry) error {
return err return err
} }
l.execMsg = &types.ExecutingMessage{ l.execMsg = &types.ExecutingMessage{
Chain: link.chain, Chain: types.ChainIndex(link.chain), // TODO(#11105): translate chain ID to chain index
BlockNum: link.blockNum, BlockNum: link.blockNum,
LogIdx: link.logIdx, LogIdx: link.logIdx,
Timestamp: link.timestamp, Timestamp: link.timestamp,
...@@ -352,13 +352,13 @@ func (l *logContext) SealBlock(parent common.Hash, upd eth.BlockID, timestamp ui ...@@ -352,13 +352,13 @@ func (l *logContext) SealBlock(parent common.Hash, upd eth.BlockID, timestamp ui
return err return err
} }
if l.blockHash != parent { if l.blockHash != parent {
return fmt.Errorf("%w: cannot apply block %s (parent %s) on top of %s", entrydb.ErrConflict, upd, parent, l.blockHash) return fmt.Errorf("%w: cannot apply block %s (parent %s) on top of %s", types.ErrConflict, upd, parent, l.blockHash)
} }
if l.blockHash != (common.Hash{}) && l.blockNum+1 != upd.Number { if l.blockHash != (common.Hash{}) && l.blockNum+1 != upd.Number {
return fmt.Errorf("%w: cannot apply block %d on top of %d", entrydb.ErrConflict, upd.Number, l.blockNum) return fmt.Errorf("%w: cannot apply block %d on top of %d", types.ErrConflict, upd.Number, l.blockNum)
} }
if l.timestamp > timestamp { if l.timestamp > timestamp {
return fmt.Errorf("%w: block timestamp %d must be equal or larger than current timestamp %d", entrydb.ErrConflict, timestamp, l.timestamp) return fmt.Errorf("%w: block timestamp %d must be equal or larger than current timestamp %d", types.ErrConflict, timestamp, l.timestamp)
} }
} }
l.blockHash = upd.Hash l.blockHash = upd.Hash
...@@ -375,28 +375,28 @@ func (l *logContext) SealBlock(parent common.Hash, upd eth.BlockID, timestamp ui ...@@ -375,28 +375,28 @@ func (l *logContext) SealBlock(parent common.Hash, upd eth.BlockID, timestamp ui
// The parent-block that the log comes after must be applied with ApplyBlock first. // The parent-block that the log comes after must be applied with ApplyBlock first.
func (l *logContext) ApplyLog(parentBlock eth.BlockID, logIdx uint32, logHash common.Hash, execMsg *types.ExecutingMessage) error { func (l *logContext) ApplyLog(parentBlock eth.BlockID, logIdx uint32, logHash common.Hash, execMsg *types.ExecutingMessage) error {
if parentBlock == (eth.BlockID{}) { if parentBlock == (eth.BlockID{}) {
return fmt.Errorf("genesis does not have logs: %w", entrydb.ErrOutOfOrder) return fmt.Errorf("genesis does not have logs: %w", types.ErrOutOfOrder)
} }
if err := l.inferFull(); err != nil { // ensure we can start applying if err := l.inferFull(); err != nil { // ensure we can start applying
return err return err
} }
if !l.hasCompleteBlock() { if !l.hasCompleteBlock() {
if l.blockNum == 0 { if l.blockNum == 0 {
return fmt.Errorf("%w: should not have logs in block 0", entrydb.ErrOutOfOrder) return fmt.Errorf("%w: should not have logs in block 0", types.ErrOutOfOrder)
} else { } else {
return errors.New("cannot append log before last known block is sealed") return errors.New("cannot append log before last known block is sealed")
} }
} }
// check parent block // check parent block
if l.blockHash != parentBlock.Hash { if l.blockHash != parentBlock.Hash {
return fmt.Errorf("%w: log builds on top of block %s, but have block %s", entrydb.ErrOutOfOrder, parentBlock, l.blockHash) return fmt.Errorf("%w: log builds on top of block %s, but have block %s", types.ErrOutOfOrder, parentBlock, l.blockHash)
} }
if l.blockNum != parentBlock.Number { if l.blockNum != parentBlock.Number {
return fmt.Errorf("%w: log builds on top of block %d, but have block %d", entrydb.ErrOutOfOrder, parentBlock.Number, l.blockNum) return fmt.Errorf("%w: log builds on top of block %d, but have block %d", types.ErrOutOfOrder, parentBlock.Number, l.blockNum)
} }
// check if log fits on top. The length so far == the index of the next log. // check if log fits on top. The length so far == the index of the next log.
if logIdx != l.logsSince { if logIdx != l.logsSince {
return fmt.Errorf("%w: expected event index %d, cannot append %d", entrydb.ErrOutOfOrder, l.logsSince, logIdx) return fmt.Errorf("%w: expected event index %d, cannot append %d", types.ErrOutOfOrder, l.logsSince, logIdx)
} }
l.logHash = logHash l.logHash = logHash
l.execMsg = execMsg l.execMsg = execMsg
......
...@@ -20,7 +20,7 @@ func (db *ChainsDB) AddLog( ...@@ -20,7 +20,7 @@ func (db *ChainsDB) AddLog(
logDB, ok := db.logDBs[chain] logDB, ok := db.logDBs[chain]
if !ok { if !ok {
return fmt.Errorf("cannot AddLog: %w: %v", ErrUnknownChain, chain) return fmt.Errorf("cannot AddLog: %w: %v", types.ErrUnknownChain, chain)
} }
return logDB.AddLog(logHash, parentBlock, logIdx, execMsg) return logDB.AddLog(logHash, parentBlock, logIdx, execMsg)
} }
...@@ -31,8 +31,9 @@ func (db *ChainsDB) SealBlock(chain types.ChainID, block eth.BlockRef) error { ...@@ -31,8 +31,9 @@ func (db *ChainsDB) SealBlock(chain types.ChainID, block eth.BlockRef) error {
logDB, ok := db.logDBs[chain] logDB, ok := db.logDBs[chain]
if !ok { if !ok {
return fmt.Errorf("cannot SealBlock: %w: %v", ErrUnknownChain, chain) return fmt.Errorf("cannot SealBlock: %w: %v", types.ErrUnknownChain, chain)
} }
db.logger.Debug("Updating local unsafe", "chain", chain, "block", block)
err := logDB.SealBlock(block.ParentHash, block.ID(), block.Time) err := logDB.SealBlock(block.ParentHash, block.ID(), block.Time)
if err != nil { if err != nil {
return fmt.Errorf("failed to seal block %v: %w", block, err) return fmt.Errorf("failed to seal block %v: %w", block, err)
...@@ -46,7 +47,7 @@ func (db *ChainsDB) Rewind(chain types.ChainID, headBlockNum uint64) error { ...@@ -46,7 +47,7 @@ func (db *ChainsDB) Rewind(chain types.ChainID, headBlockNum uint64) error {
logDB, ok := db.logDBs[chain] logDB, ok := db.logDBs[chain]
if !ok { if !ok {
return fmt.Errorf("cannot Rewind: %w: %s", ErrUnknownChain, chain) return fmt.Errorf("cannot Rewind: %w: %s", types.ErrUnknownChain, chain)
} }
return logDB.Rewind(headBlockNum) return logDB.Rewind(headBlockNum)
} }
...@@ -57,8 +58,9 @@ func (db *ChainsDB) UpdateLocalSafe(chain types.ChainID, derivedFrom eth.BlockRe ...@@ -57,8 +58,9 @@ func (db *ChainsDB) UpdateLocalSafe(chain types.ChainID, derivedFrom eth.BlockRe
localDB, ok := db.localDBs[chain] localDB, ok := db.localDBs[chain]
if !ok { if !ok {
return fmt.Errorf("cannot UpdateLocalSafe: %w: %v", ErrUnknownChain, chain) return fmt.Errorf("cannot UpdateLocalSafe: %w: %v", types.ErrUnknownChain, chain)
} }
db.logger.Debug("Updating local safe", "chain", chain, "derivedFrom", derivedFrom, "lastDerived", lastDerived)
return localDB.AddDerived(derivedFrom, lastDerived) return localDB.AddDerived(derivedFrom, lastDerived)
} }
...@@ -67,8 +69,9 @@ func (db *ChainsDB) UpdateCrossUnsafe(chain types.ChainID, crossUnsafe types.Blo ...@@ -67,8 +69,9 @@ func (db *ChainsDB) UpdateCrossUnsafe(chain types.ChainID, crossUnsafe types.Blo
defer db.mu.RUnlock() defer db.mu.RUnlock()
if _, ok := db.crossUnsafe[chain]; !ok { if _, ok := db.crossUnsafe[chain]; !ok {
return fmt.Errorf("cannot UpdateCrossUnsafe: %w: %s", ErrUnknownChain, chain) return fmt.Errorf("cannot UpdateCrossUnsafe: %w: %s", types.ErrUnknownChain, chain)
} }
db.logger.Debug("Updating cross unsafe", "chain", chain, "crossUnsafe", crossUnsafe)
db.crossUnsafe[chain] = crossUnsafe db.crossUnsafe[chain] = crossUnsafe
return nil return nil
} }
...@@ -79,8 +82,9 @@ func (db *ChainsDB) UpdateCrossSafe(chain types.ChainID, l1View eth.BlockRef, la ...@@ -79,8 +82,9 @@ func (db *ChainsDB) UpdateCrossSafe(chain types.ChainID, l1View eth.BlockRef, la
crossDB, ok := db.crossDBs[chain] crossDB, ok := db.crossDBs[chain]
if !ok { if !ok {
return fmt.Errorf("cannot UpdateCrossSafe: %w: %s", ErrUnknownChain, chain) return fmt.Errorf("cannot UpdateCrossSafe: %w: %s", types.ErrUnknownChain, chain)
} }
db.logger.Debug("Updating cross safe", "chain", chain, "l1View", l1View, "lastCrossDerived", lastCrossDerived)
return crossDB.AddDerived(l1View, lastCrossDerived) return crossDB.AddDerived(l1View, lastCrossDerived)
} }
...@@ -91,6 +95,7 @@ func (db *ChainsDB) UpdateFinalizedL1(finalized eth.BlockRef) error { ...@@ -91,6 +95,7 @@ func (db *ChainsDB) UpdateFinalizedL1(finalized eth.BlockRef) error {
if db.finalizedL1.Number > finalized.Number { if db.finalizedL1.Number > finalized.Number {
return fmt.Errorf("cannot rewind finalized L1 head from %s to %s", db.finalizedL1, finalized) return fmt.Errorf("cannot rewind finalized L1 head from %s to %s", db.finalizedL1, finalized)
} }
db.logger.Debug("Updating finalized L1", "finalizedL1", finalized)
db.finalizedL1 = finalized db.finalizedL1 = finalized
return nil return nil
} }
...@@ -76,7 +76,7 @@ func (i *CrossL2Inbox) DecodeExecutingMessageLog(l *ethTypes.Log) (types.Executi ...@@ -76,7 +76,7 @@ func (i *CrossL2Inbox) DecodeExecutingMessageLog(l *ethTypes.Log) (types.Executi
} }
hash := payloadHashToLogHash(msgHash, identifier.Origin) hash := payloadHashToLogHash(msgHash, identifier.Origin)
return types.ExecutingMessage{ return types.ExecutingMessage{
Chain: chainID, Chain: types.ChainIndex(chainID), // TODO(#11105): translate chain ID to chain index
Hash: hash, Hash: hash,
BlockNum: identifier.BlockNumber.Uint64(), BlockNum: identifier.BlockNumber.Uint64(),
LogIdx: uint32(identifier.LogIndex.Uint64()), LogIdx: uint32(identifier.LogIndex.Uint64()),
......
...@@ -20,14 +20,14 @@ func TestDecodeExecutingMessageEvent(t *testing.T) { ...@@ -20,14 +20,14 @@ func TestDecodeExecutingMessageEvent(t *testing.T) {
payload := bytes.Repeat([]byte{0xaa, 0xbb}, 50) payload := bytes.Repeat([]byte{0xaa, 0xbb}, 50)
payloadHash := crypto.Keccak256Hash(payload) payloadHash := crypto.Keccak256Hash(payload)
expected := types.ExecutingMessage{ expected := types.ExecutingMessage{
Chain: 42424, Chain: 42424, // TODO(#11105): translate chain ID to chain index
BlockNum: 12345, BlockNum: 12345,
LogIdx: 98, LogIdx: 98,
Timestamp: 9578295, Timestamp: 9578295,
} }
contractIdent := contractIdentifier{ contractIdent := contractIdentifier{
Origin: common.Address{0xbb, 0xcc}, Origin: common.Address{0xbb, 0xcc},
ChainId: new(big.Int).SetUint64(uint64(expected.Chain)), ChainId: new(big.Int).SetUint64(uint64(expected.Chain)), // TODO(#11105): translate chain ID to chain index
BlockNumber: new(big.Int).SetUint64(expected.BlockNum), BlockNumber: new(big.Int).SetUint64(expected.BlockNum),
Timestamp: new(big.Int).SetUint64(expected.Timestamp), Timestamp: new(big.Int).SetUint64(expected.Timestamp),
LogIndex: new(big.Int).SetUint64(uint64(expected.LogIdx)), LogIndex: new(big.Int).SetUint64(uint64(expected.LogIdx)),
......
package entrydb package types
import "errors" import "errors"
...@@ -17,4 +17,11 @@ var ( ...@@ -17,4 +17,11 @@ var (
ErrConflict = errors.New("conflicting data") ErrConflict = errors.New("conflicting data")
// ErrStop can be used in iterators to indicate iteration has to stop // ErrStop can be used in iterators to indicate iteration has to stop
ErrStop = errors.New("iter stop") ErrStop = errors.New("iter stop")
// ErrOutOfScope is when data is accessed, but access is not allowed, because of a limited scope.
// E.g. when limiting scope to L2 blocks derived from a specific subset of the L1 chain.
ErrOutOfScope = errors.New("out of scope")
// ErrUnknownChain is when a chain is unknown, not in the dependency set.
ErrUnknownChain = errors.New("unknown chain")
// ErrNoRPCSource happens when a sub-service needs an RPC data source, but is not configured with one.
ErrNoRPCSource = errors.New("no RPC client configured")
) )
...@@ -6,6 +6,7 @@ import ( ...@@ -6,6 +6,7 @@ import (
"fmt" "fmt"
"math" "math"
"math/big" "math/big"
"strconv"
"github.com/holiman/uint256" "github.com/holiman/uint256"
...@@ -15,8 +16,28 @@ import ( ...@@ -15,8 +16,28 @@ import (
"github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/eth"
) )
// ChainIndex represents the lifetime of a chain in a dependency set.
type ChainIndex uint32
func (ci ChainIndex) String() string {
return strconv.FormatUint(uint64(ci), 10)
}
func (ci ChainIndex) MarshalText() ([]byte, error) {
return []byte(ci.String()), nil
}
func (ci *ChainIndex) UnmarshalText(data []byte) error {
v, err := strconv.ParseUint(string(data), 10, 32)
if err != nil {
return err
}
*ci = ChainIndex(v)
return nil
}
type ExecutingMessage struct { type ExecutingMessage struct {
Chain uint32 // same as ChainID for now, but will be indirect, i.e. translated to full ID, later Chain ChainIndex // same as ChainID for now, but will be indirect, i.e. translated to full ID, later
BlockNum uint64 BlockNum uint64
LogIdx uint32 LogIdx uint32
Timestamp uint64 Timestamp uint64
...@@ -24,7 +45,7 @@ type ExecutingMessage struct { ...@@ -24,7 +45,7 @@ type ExecutingMessage struct {
} }
func (s *ExecutingMessage) String() string { func (s *ExecutingMessage) String() string {
return fmt.Sprintf("ExecMsg(chain: %d, block: %d, log: %d, time: %d, logHash: %s)", return fmt.Sprintf("ExecMsg(chainIndex: %s, block: %d, log: %d, time: %d, logHash: %s)",
s.Chain, s.BlockNum, s.LogIdx, s.Timestamp, s.Hash) s.Chain, s.BlockNum, s.LogIdx, s.Timestamp, s.Hash)
} }
...@@ -208,3 +229,25 @@ func (s BlockSeal) String() string { ...@@ -208,3 +229,25 @@ func (s BlockSeal) String() string {
func (s BlockSeal) ID() eth.BlockID { func (s BlockSeal) ID() eth.BlockID {
return eth.BlockID{Hash: s.Hash, Number: s.Number} return eth.BlockID{Hash: s.Hash, Number: s.Number}
} }
func (s BlockSeal) WithParent(parent eth.BlockID) eth.BlockRef {
// prevent parent attachment if the parent is not the previous block,
// and the block is not the genesis block
if s.Number != parent.Number+1 && s.Number != 0 {
panic(fmt.Errorf("invalid parent block %s to combine with %s", parent, s))
}
return eth.BlockRef{
Hash: s.Hash,
Number: s.Number,
ParentHash: parent.Hash,
Time: s.Timestamp,
}
}
func BlockSealFromRef(ref eth.BlockRef) BlockSeal {
return BlockSeal{
Hash: ref.Hash,
Number: ref.Number,
Timestamp: ref.Time,
}
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment