1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
package etl
import (
"context"
"errors"
"time"
"github.com/ethereum-optimism/optimism/indexer/config"
"github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum-optimism/optimism/indexer/node"
"github.com/ethereum-optimism/optimism/op-service/retry"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
)
type L2ETL struct {
ETL
db *database.DB
}
func NewL2ETL(cfg Config, log log.Logger, db *database.DB, metrics Metricer, client node.EthClient, contracts config.L2Contracts) (*L2ETL, error) {
log = log.New("etl", "l2")
zeroAddr := common.Address{}
l2Contracts := []common.Address{}
if err := contracts.ForEach(func(name string, addr common.Address) error {
// Since we dont have backfill support yet, we want to make sure all expected
// contracts are specified to ensure consistent behavior. Once backfill support
// is ready, we can relax this requirement.
if addr == zeroAddr {
log.Error("address not configured", "name", name)
return errors.New("all L2Contracts must be configured")
}
log.Info("configured contract", "name", name, "addr", addr)
l2Contracts = append(l2Contracts, addr)
return nil
}); err != nil {
return nil, err
}
latestHeader, err := db.Blocks.L2LatestBlockHeader()
if err != nil {
return nil, err
}
var fromHeader *types.Header
if latestHeader != nil {
log.Info("detected last indexed block", "number", latestHeader.Number, "hash", latestHeader.Hash)
fromHeader = latestHeader.RLPHeader.Header()
} else {
log.Info("no indexed state, starting from genesis")
}
etlBatches := make(chan ETLBatch)
etl := ETL{
loopInterval: time.Duration(cfg.LoopIntervalMsec) * time.Millisecond,
headerBufferSize: uint64(cfg.HeaderBufferSize),
log: log,
metrics: metrics,
headerTraversal: node.NewHeaderTraversal(client, fromHeader, cfg.ConfirmationDepth),
contracts: l2Contracts,
etlBatches: etlBatches,
EthClient: client,
}
return &L2ETL{ETL: etl, db: db}, nil
}
func (l2Etl *L2ETL) Start(ctx context.Context) error {
errCh := make(chan error, 1)
go func() {
errCh <- l2Etl.ETL.Start(ctx)
}()
for {
select {
case err := <-errCh:
return err
// Index incoming batches (all L2 Blocks)
case batch := <-l2Etl.etlBatches:
l2BlockHeaders := make([]database.L2BlockHeader, len(batch.Headers))
for i := range batch.Headers {
l2BlockHeaders[i] = database.L2BlockHeader{BlockHeader: database.BlockHeaderFromHeader(&batch.Headers[i])}
}
l2ContractEvents := make([]database.L2ContractEvent, len(batch.Logs))
for i := range batch.Logs {
timestamp := batch.HeaderMap[batch.Logs[i].BlockHash].Time
l2ContractEvents[i] = database.L2ContractEvent{ContractEvent: database.ContractEventFromLog(&batch.Logs[i], timestamp)}
}
// Continually try to persist this batch. If it fails after 10 attempts, we simply error out
retryStrategy := &retry.ExponentialStrategy{Min: 1000, Max: 20_000, MaxJitter: 250}
if _, err := retry.Do[interface{}](ctx, 10, retryStrategy, func() (interface{}, error) {
if err := l2Etl.db.Transaction(func(tx *database.DB) error {
if err := tx.Blocks.StoreL2BlockHeaders(l2BlockHeaders); err != nil {
return err
}
if len(l2ContractEvents) > 0 {
if err := tx.ContractEvents.StoreL2ContractEvents(l2ContractEvents); err != nil {
return err
}
}
return nil
}); err != nil {
batch.Logger.Error("unable to persist batch", "err", err)
return nil, err
}
l2Etl.ETL.metrics.RecordIndexedHeaders(len(l2BlockHeaders))
l2Etl.ETL.metrics.RecordIndexedLatestHeight(l2BlockHeaders[len(l2BlockHeaders)-1].Number)
if len(l2ContractEvents) > 0 {
l2Etl.ETL.metrics.RecordIndexedLogs(len(l2ContractEvents))
}
// a-ok!
return nil, nil
}); err != nil {
return err
}
batch.Logger.Info("indexed batch")
}
}
}