Commit 6063f24d authored by Hamdi Allam's avatar Hamdi Allam

update l1 processor to index output proposals. l2 process only indexes checkpointed blocks

parent 69955784
This diff is collapsed.
...@@ -3,6 +3,7 @@ package processor ...@@ -3,6 +3,7 @@ package processor
import ( import (
"context" "context"
"errors" "errors"
"math/big"
"reflect" "reflect"
"github.com/ethereum-optimism/optimism/indexer/database" "github.com/ethereum-optimism/optimism/indexer/database"
...@@ -58,7 +59,7 @@ func NewL2Processor(ethClient node.EthClient, db *database.DB, l2Contracts L2Con ...@@ -58,7 +59,7 @@ func NewL2Processor(ethClient node.EthClient, db *database.DB, l2Contracts L2Con
l2ProcessLog := log.New("processor", "l2") l2ProcessLog := log.New("processor", "l2")
l2ProcessLog.Info("initializing processor") l2ProcessLog.Info("initializing processor")
latestHeader, err := db.Blocks.FinalizedL2BlockHeader() latestHeader, err := db.Blocks.LatestL2BlockHeader()
if err != nil { if err != nil {
return nil, err return nil, err
} }
...@@ -80,25 +81,52 @@ func NewL2Processor(ethClient node.EthClient, db *database.DB, l2Contracts L2Con ...@@ -80,25 +81,52 @@ func NewL2Processor(ethClient node.EthClient, db *database.DB, l2Contracts L2Con
l2Processor := &L2Processor{ l2Processor := &L2Processor{
processor: processor{ processor: processor{
fetcher: node.NewFetcher(ethClient, fromL2Header), headerTraversal: node.NewBufferedHeaderTraversal(ethClient, fromL2Header),
db: db, db: db,
processFn: l2ProcessFn(l2ProcessLog, ethClient, l2Contracts), processFn: l2ProcessFn(l2ProcessLog, ethClient, l2Contracts),
processLog: l2ProcessLog, processLog: l2ProcessLog,
}, },
} }
return l2Processor, nil return l2Processor, nil
} }
func l2ProcessFn(processLog log.Logger, ethClient node.EthClient, l2Contracts L2Contracts) func(db *database.DB, headers []*types.Header) error { func l2ProcessFn(processLog log.Logger, ethClient node.EthClient, l2Contracts L2Contracts) ProcessFn {
rawEthClient := ethclient.NewClient(ethClient.RawRpcClient()) rawEthClient := ethclient.NewClient(ethClient.RawRpcClient())
contractAddrs := l2Contracts.toSlice() contractAddrs := l2Contracts.toSlice()
processLog.Info("processor configured with contracts", "contracts", l2Contracts) processLog.Info("processor configured with contracts", "contracts", l2Contracts)
return func(db *database.DB, headers []*types.Header) error { return func(db *database.DB, headers []*types.Header) (*types.Header, error) {
numHeaders := len(headers) numHeaders := len(headers)
/** Index All L2 Blocks **/ latestOutput, err := db.Blocks.LatestOutputProposed()
if err != nil {
return nil, err
} else if latestOutput == nil {
processLog.Warn("no checkpointed outputs found. waiting...")
return nil, errors.New("no checkpointed l2 outputs")
}
// check if any of these blocks have been published to L1
latestOutputHeight := latestOutput.L2BlockNumber.Int
if headers[0].Number.Cmp(latestOutputHeight) > 0 {
processLog.Warn("entire batch exceeds the latest output", "latest_output_block_number", latestOutputHeight)
return nil, errors.New("entire batch exceeds latest output")
}
// check if we need to partially process this batch
if headers[numHeaders-1].Number.Cmp(latestOutputHeight) > 0 {
processLog.Info("reducing batch", "latest_output_block_number", latestOutputHeight)
// reduce the batch size
lastHeaderIndex := new(big.Int).Sub(latestOutputHeight, headers[0].Number).Uint64()
// update markers (including `lastHeaderIndex`)
headers = headers[:lastHeaderIndex+1]
numHeaders = len(headers)
}
/** Index all L2 blocks **/
l2Headers := make([]*database.L2BlockHeader, len(headers)) l2Headers := make([]*database.L2BlockHeader, len(headers))
l2HeaderMap := make(map[common.Hash]*types.Header) l2HeaderMap := make(map[common.Hash]*types.Header)
...@@ -121,7 +149,7 @@ func l2ProcessFn(processLog log.Logger, ethClient node.EthClient, l2Contracts L2 ...@@ -121,7 +149,7 @@ func l2ProcessFn(processLog log.Logger, ethClient node.EthClient, l2Contracts L2
logFilter := ethereum.FilterQuery{FromBlock: headers[0].Number, ToBlock: headers[numHeaders-1].Number, Addresses: contractAddrs} logFilter := ethereum.FilterQuery{FromBlock: headers[0].Number, ToBlock: headers[numHeaders-1].Number, Addresses: contractAddrs}
logs, err := rawEthClient.FilterLogs(context.Background(), logFilter) logs, err := rawEthClient.FilterLogs(context.Background(), logFilter)
if err != nil { if err != nil {
return err return nil, err
} }
numLogs := len(logs) numLogs := len(logs)
...@@ -129,9 +157,8 @@ func l2ProcessFn(processLog log.Logger, ethClient node.EthClient, l2Contracts L2 ...@@ -129,9 +157,8 @@ func l2ProcessFn(processLog log.Logger, ethClient node.EthClient, l2Contracts L2
for i, log := range logs { for i, log := range logs {
header, ok := l2HeaderMap[log.BlockHash] header, ok := l2HeaderMap[log.BlockHash]
if !ok { if !ok {
// Log the individual headers in the batch? processLog.Error("contract event found with associated header not in the batch", "header", header, "log_index", log.Index)
processLog.Crit("contract event found with associated header not in the batch", "header", header, "log_index", log.Index) return nil, errors.New("parsed log with a block hash not in this batch")
return errors.New("parsed log with a block hash not in this batch")
} }
l2ContractEvents[i] = &database.L2ContractEvent{ l2ContractEvents[i] = &database.L2ContractEvent{
...@@ -148,20 +175,21 @@ func l2ProcessFn(processLog log.Logger, ethClient node.EthClient, l2Contracts L2 ...@@ -148,20 +175,21 @@ func l2ProcessFn(processLog log.Logger, ethClient node.EthClient, l2Contracts L2
/** Update Database **/ /** Update Database **/
processLog.Info("saving l2 blocks", "size", numHeaders)
err = db.Blocks.StoreL2BlockHeaders(l2Headers) err = db.Blocks.StoreL2BlockHeaders(l2Headers)
if err != nil { if err != nil {
return err return nil, err
} }
if numLogs > 0 { if numLogs > 0 {
processLog.Info("detected new contract logs", "size", numLogs) processLog.Info("detected contract logs", "size", numLogs)
err = db.ContractEvents.StoreL2ContractEvents(l2ContractEvents) err = db.ContractEvents.StoreL2ContractEvents(l2ContractEvents)
if err != nil { if err != nil {
return err return nil, err
} }
} }
// a-ok! // a-ok!
return nil return headers[numHeaders-1], nil
} }
} }
...@@ -12,53 +12,63 @@ import ( ...@@ -12,53 +12,63 @@ import (
const defaultLoopInterval = 5 * time.Second const defaultLoopInterval = 5 * time.Second
// processFn is the the function used to process unindexed headers. In // ProcessFn is the the entrypoint for processing a batch of headers. To support
// the event of a failure, all database operations are not committed // partial batch processing, the function must return the last processed header
type processFn func(*database.DB, []*types.Header) error // in the batch. In the event of failure, database operations are rolled back
type ProcessFn func(*database.DB, []*types.Header) (*types.Header, error)
type processor struct { type processor struct {
fetcher *node.Fetcher headerTraversal *node.BufferedHeaderTraversal
db *database.DB db *database.DB
processFn processFn processFn ProcessFn
processLog log.Logger processLog log.Logger
} }
// Start kicks off the processing loop // Start kicks off the processing loop
func (p processor) Start() { func (p processor) Start() {
pollTicker := time.NewTicker(defaultLoopInterval) pollTicker := time.NewTicker(defaultLoopInterval)
p.processLog.Info("starting processor...") defer pollTicker.Stop()
// Make this loop stoppable p.processLog.Info("starting processor...")
for range pollTicker.C { for range pollTicker.C {
p.processLog.Info("checking for new headers...") headers, err := p.headerTraversal.NextFinalizedHeaders(500)
headers, err := p.fetcher.NextFinalizedHeaders()
if err != nil { if err != nil {
p.processLog.Error("unable to query for headers", "err", err) p.processLog.Error("error querying for headers", "err", err)
continue continue
} } else if len(headers) == 0 {
// Logged as an error since this loop should be operating at a longer interval than the provider
if len(headers) == 0 { p.processLog.Error("no new headers. processor unexpectadly at head...")
p.processLog.Info("no new headers. indexer must be at head...")
continue continue
} }
batchLog := p.processLog.New("startHeight", headers[0].Number, "endHeight", headers[len(headers)-1].Number) batchLog := p.processLog.New("batch_start_block_number", headers[0].Number, "batch_end_block_number", headers[len(headers)-1].Number)
batchLog.Info("indexing batch of headers") batchLog.Info("processing batch")
// wrap operations within a single transaction var lastProcessedHeader *types.Header
err = p.db.Transaction(func(db *database.DB) error { err = p.db.Transaction(func(db *database.DB) error {
return p.processFn(db, headers) lastProcessedHeader, err = p.processFn(db, headers)
}) if err != nil {
return err
}
// TODO(DX-79) if processFn failed, the next poll should retry starting from this same batch of headers err = p.headerTraversal.Advance(lastProcessedHeader)
if err != nil {
batchLog.Error("unable to advance processor", "last_processed_block_number", lastProcessedHeader.Number)
return err
}
return nil
})
if err != nil { if err != nil {
batchLog.Info("unable to index batch", "err", err) batchLog.Warn("error processing batch. no operations committed", "err", err)
panic(err)
} else { } else {
batchLog.Info("done indexing batch") if lastProcessedHeader.Number.Cmp(headers[len(headers)-1].Number) == 0 {
batchLog.Info("fully committed batch")
} else {
batchLog.Info("partially committed batch", "last_processed_block_number", lastProcessedHeader.Number)
}
} }
} }
} }
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment