Commit b47ee0a9 authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge branch 'develop' into aj/retry-fetch

parents 300e244e 95fe7e47
......@@ -86,6 +86,9 @@ nuke: clean devnet-clean
.PHONY: nuke
devnet-up:
@if [ ! -e op-program/bin ]; then \
make cannon-prestate; \
fi
$(shell ./ops/scripts/newer-file.sh .devnet/allocs-l1.json ./packages/contracts-bedrock)
if [ $(.SHELLSTATUS) -ne 0 ]; then \
make devnet-allocs; \
......
......@@ -33,3 +33,28 @@ TODO add indexer to the optimism devnet compose file (previously removed for bre
`docker-compose.dev.yml` is git ignored. Fill in your own docker-compose file here.
## Architecture
![Architectural Diagram](./assets/architecture.png)
The indexer application supports two separate services for collective operation:
**Indexer API** - Provides a lightweight API service that supports paginated lookups for bridge events.
**Indexer Service** - A polling based service that constantly reads and persists OP Stack chain data (i.e, block meta, system contract events, synchronized bridge events) from a L1 and L2 chain.
### Indexer API
TBD
### Indexer Service
![Service Component Diagram](./assets/indexer-service.png)
The indexer service is responsible for polling and processing real-time batches of L1 and L2 chain data. The indexer service is currently composed of the following key components:
- **Poller Routines** - Individually polls the L1/L2 chain for new blocks and OP Stack system contract events.
- **Insertion Routines** - Awaits new batches from the poller routines and inserts them into the database upon retrieval.
- **Bridge Routine** - Polls the database directly for new L1 blocks and bridge events. Upon retrieval, the bridge routine will:
* Process and persist new bridge events
* Synchronize L1 proven/finalized withdrawals with their L2 initialization counterparts
### Database
The indexer service currently supports a Postgres database for storing L1/L2 OP Stack chain data. The most up-to-date database schemas can be found in the `./migrations` directory.
**NOTE:** The indexer service implementation currently does not natively support database migration. Because of this a database must be manually updated to ensure forward compatibility with the latest indexer service implementation.
\ No newline at end of file
......@@ -2,7 +2,6 @@ package config
import (
"fmt"
"math/big"
"os"
"reflect"
......@@ -65,10 +64,15 @@ func (c *L1Contracts) AsSlice() ([]common.Address, error) {
// ChainConfig configures of the chain being indexed
type ChainConfig struct {
// Configure known chains with the l2 chain id
Preset int
L1Contracts L1Contracts `toml:"l1-contracts"`
// L1StartingHeight is the block height to start indexing from
L1StartingHeight uint `toml:"l1-starting-height"`
Preset int
L1Contracts L1Contracts `toml:"l1-contracts"`
L1StartingHeight uint `toml:"l1-starting-height"`
// These configuration options will be removed once
// native reorg handling is implemented
L1ConfirmationDepth uint `toml:"l1-confirmation-depth"`
L2ConfirmationDepth uint `toml:"l2-confirmation-depth"`
L1PollingInterval uint `toml:"l1-polling-interval"`
L2PollingInterval uint `toml:"l2-polling-interval"`
......@@ -77,11 +81,6 @@ type ChainConfig struct {
L2HeaderBufferSize uint `toml:"l2-header-buffer-size"`
}
// L1StartHeight returns the block height to start indexing from
func (cc *ChainConfig) L1StartHeight() *big.Int {
return big.NewInt(int64(cc.L1StartingHeight))
}
// RPCsConfig configures the RPC urls
type RPCsConfig struct {
L1RPC string `toml:"l1-rpc"`
......
......@@ -2,6 +2,7 @@ package database
import (
"errors"
"fmt"
"gorm.io/gorm"
......@@ -133,21 +134,31 @@ func (db *bridgeTransfersDB) L1BridgeDepositsByAddress(address common.Address, c
limit = defaultLimit
}
cursorClause := ""
if cursor != "" {
sourceHash := common.HexToHash(cursor)
txDeposit := new(L1TransactionDeposit)
result := db.gorm.Model(&L1TransactionDeposit{}).Where(&L1TransactionDeposit{SourceHash: sourceHash}).Take(txDeposit)
if result.Error != nil || errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, fmt.Errorf("unable to find transaction with supplied cursor source hash %s: %w", sourceHash, result.Error)
}
cursorClause = fmt.Sprintf("l1_transaction_deposits.timestamp <= %d", txDeposit.Tx.Timestamp)
}
// TODO join with l1_bridged_tokens and l2_bridged_tokens
ethAddressString := predeploys.LegacyERC20ETHAddr.String()
// Coalesce l1 transaction deposits that are simply ETH sends
ethTransactionDeposits := db.gorm.Model(&L1TransactionDeposit{})
ethTransactionDeposits = ethTransactionDeposits.Where(Transaction{FromAddress: address}).Where(`data = '0x' AND amount > 0`)
ethTransactionDeposits = ethTransactionDeposits.Where(Transaction{FromAddress: address}).Where("data = '0x' AND amount > 0")
ethTransactionDeposits = ethTransactionDeposits.Joins("INNER JOIN l1_contract_events ON l1_contract_events.guid = initiated_l1_event_guid")
ethTransactionDeposits = ethTransactionDeposits.Select(`
from_address, to_address, amount, data, source_hash AS transaction_source_hash,
l2_transaction_hash, l1_contract_events.transaction_hash AS l1_transaction_hash,
l1_transaction_deposits.timestamp, NULL AS cross_domain_message_hash, ? AS local_token_address, ? AS remote_token_address`, ethAddressString, ethAddressString)
if cursor != "" {
// Probably need to fix this and compare timestamps
ethTransactionDeposits = ethTransactionDeposits.Where("source_hash < ?", cursor)
ethTransactionDeposits = ethTransactionDeposits.Order("timestamp DESC").Limit(limit + 1)
if cursorClause != "" {
ethTransactionDeposits = ethTransactionDeposits.Where(cursorClause)
}
depositsQuery := db.gorm.Model(&L1BridgeDeposit{})
......@@ -157,10 +168,9 @@ l1_transaction_deposits.timestamp, NULL AS cross_domain_message_hash, ? AS local
l1_bridge_deposits.from_address, l1_bridge_deposits.to_address, l1_bridge_deposits.amount, l1_bridge_deposits.data, transaction_source_hash,
l2_transaction_hash, l1_contract_events.transaction_hash AS l1_transaction_hash,
l1_bridge_deposits.timestamp, cross_domain_message_hash, local_token_address, remote_token_address`)
if cursor != "" {
// Probably need to fix this and compare timestamps
depositsQuery = depositsQuery.Where("source_hash < ?", cursor)
depositsQuery = depositsQuery.Order("timestamp DESC").Limit(limit + 1)
if cursorClause != "" {
depositsQuery = depositsQuery.Where(cursorClause)
}
query := db.gorm.Table("(?) AS deposits", depositsQuery)
......@@ -179,16 +189,11 @@ l1_bridge_deposits.timestamp, cross_domain_message_hash, local_token_address, re
hasNextPage := false
if len(deposits) > limit {
hasNextPage = true
nextCursor = deposits[limit].L1BridgeDeposit.TransactionSourceHash.String()
deposits = deposits[:limit]
nextCursor = deposits[limit].L1TransactionHash.String()
}
response := &L1BridgeDepositsResponse{
Deposits: deposits,
Cursor: nextCursor,
HasNextPage: hasNextPage,
}
response := &L1BridgeDepositsResponse{Deposits: deposits, Cursor: nextCursor, HasNextPage: hasNextPage}
return response, nil
}
......@@ -242,6 +247,17 @@ func (db *bridgeTransfersDB) L2BridgeWithdrawalsByAddress(address common.Address
limit = defaultLimit
}
cursorClause := ""
if cursor != "" {
withdrawalHash := common.HexToHash(cursor)
var txWithdrawal L2TransactionWithdrawal
result := db.gorm.Model(&L2TransactionWithdrawal{}).Where(&L2TransactionWithdrawal{WithdrawalHash: withdrawalHash}).Take(&txWithdrawal)
if result.Error != nil || errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, fmt.Errorf("unable to find transaction with supplied cursor withdrawal hash %s: %w", withdrawalHash, result.Error)
}
cursorClause = fmt.Sprintf("l2_transaction_withdrawals.timestamp <= %d", txWithdrawal.Tx.Timestamp)
}
// TODO join with l1_bridged_tokens and l2_bridged_tokens
ethAddressString := predeploys.LegacyERC20ETHAddr.String()
......@@ -255,10 +271,9 @@ func (db *bridgeTransfersDB) L2BridgeWithdrawalsByAddress(address common.Address
from_address, to_address, amount, data, withdrawal_hash AS transaction_withdrawal_hash,
l2_contract_events.transaction_hash AS l2_transaction_hash, proven_l1_events.transaction_hash AS proven_l1_transaction_hash, finalized_l1_events.transaction_hash AS finalized_l1_transaction_hash,
l2_transaction_withdrawals.timestamp, NULL AS cross_domain_message_hash, ? AS local_token_address, ? AS remote_token_address`, ethAddressString, ethAddressString)
if cursor != "" {
// Probably need to fix this and compare timestamps
ethTransactionWithdrawals = ethTransactionWithdrawals.Where("withdrawal_hash < ?", cursor)
ethTransactionWithdrawals = ethTransactionWithdrawals.Order("timestamp DESC").Limit(limit + 1)
if cursorClause != "" {
ethTransactionWithdrawals = ethTransactionWithdrawals.Where(cursorClause)
}
withdrawalsQuery := db.gorm.Model(&L2BridgeWithdrawal{})
......@@ -270,17 +285,16 @@ l2_transaction_withdrawals.timestamp, NULL AS cross_domain_message_hash, ? AS lo
l2_bridge_withdrawals.from_address, l2_bridge_withdrawals.to_address, l2_bridge_withdrawals.amount, l2_bridge_withdrawals.data, transaction_withdrawal_hash,
l2_contract_events.transaction_hash AS l2_transaction_hash, proven_l1_events.transaction_hash AS proven_l1_transaction_hash, finalized_l1_events.transaction_hash AS finalized_l1_transaction_hash,
l2_bridge_withdrawals.timestamp, cross_domain_message_hash, local_token_address, remote_token_address`)
if cursor != "" {
// Probably need to fix this and compare timestamps
withdrawalsQuery = withdrawalsQuery.Where("withdrawal_hash < ?", cursor)
withdrawalsQuery = withdrawalsQuery.Order("timestamp DESC").Limit(limit + 1)
if cursorClause != "" {
withdrawalsQuery = withdrawalsQuery.Where(cursorClause)
}
query := db.gorm.Table("(?) AS withdrawals", withdrawalsQuery)
query = query.Joins("UNION (?)", ethTransactionWithdrawals)
query = query.Select("*").Order("timestamp DESC").Limit(limit + 1)
withdrawals := []L2BridgeWithdrawalWithTransactionHashes{}
result := query.Scan(&withdrawals)
result := query.Find(&withdrawals)
if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, nil
......@@ -292,21 +306,10 @@ l2_bridge_withdrawals.timestamp, cross_domain_message_hash, local_token_address,
hasNextPage := false
if len(withdrawals) > limit {
hasNextPage = true
nextCursor = withdrawals[limit].L2BridgeWithdrawal.TransactionWithdrawalHash.String()
withdrawals = withdrawals[:limit]
nextCursor = withdrawals[limit].L2TransactionHash.String()
}
if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, result.Error
}
response := &L2BridgeWithdrawalsResponse{
Withdrawals: withdrawals,
Cursor: nextCursor,
HasNextPage: hasNextPage,
}
response := &L2BridgeWithdrawalsResponse{Withdrawals: withdrawals, Cursor: nextCursor, HasNextPage: hasNextPage}
return response, nil
}
......@@ -43,9 +43,9 @@ func createE2ETestSuite(t *testing.T) E2ETestSuite {
dbUser := os.Getenv("DB_USER")
dbName := setupTestDatabase(t)
// Replace the handler of the global logger with the testlog
logger := testlog.Logger(t, log.LvlInfo)
log.Root().SetHandler(logger.GetHandler())
// Discard the Global Logger as each component
// has its own configured logger
log.Root().SetHandler(log.DiscardHandler())
// Rollup System Configuration and Start
opCfg := op_e2e.DefaultSystemConfig(t)
......@@ -71,8 +71,10 @@ func createE2ETestSuite(t *testing.T) E2ETestSuite {
L2RPC: opSys.EthInstances["sequencer"].HTTPEndpoint(),
},
Chain: config.ChainConfig{
L1PollingInterval: 1000,
L2PollingInterval: 1000,
L1PollingInterval: uint(opCfg.DeployConfig.L1BlockTime) * 1000,
L1ConfirmationDepth: 0,
L2PollingInterval: uint(opCfg.DeployConfig.L2BlockTime) * 1000,
L2ConfirmationDepth: 0,
L1Contracts: config.L1Contracts{
OptimismPortalProxy: opCfg.L1Deployments.OptimismPortalProxy,
L2OutputOracleProxy: opCfg.L1Deployments.L2OutputOracleProxy,
......@@ -90,7 +92,8 @@ func createE2ETestSuite(t *testing.T) E2ETestSuite {
require.NoError(t, err)
t.Cleanup(func() { db.Close() })
indexer, err := indexer.NewIndexer(logger, db, indexerCfg.Chain, indexerCfg.RPCs, indexerCfg.Metrics)
indexerLog := testlog.Logger(t, log.LvlInfo).New("role", "indexer")
indexer, err := indexer.NewIndexer(indexerLog, db, indexerCfg.Chain, indexerCfg.RPCs, indexerCfg.Metrics)
require.NoError(t, err)
indexerCtx, indexerStop := context.WithCancel(context.Background())
......
......@@ -16,7 +16,9 @@ import (
type Config struct {
LoopIntervalMsec uint
HeaderBufferSize uint
StartHeight *big.Int
StartHeight *big.Int
ConfirmationDepth *big.Int
}
type ETL struct {
......
......@@ -3,6 +3,7 @@ package etl
import (
"context"
"fmt"
"sync"
"time"
"github.com/ethereum-optimism/optimism/indexer/config"
......@@ -16,7 +17,9 @@ import (
type L1ETL struct {
ETL
db *database.DB
db *database.DB
mu *sync.Mutex
listeners []chan interface{}
}
// NewL1ETL creates a new L1ETL instance that will start indexing from different starting points
......@@ -62,13 +65,13 @@ func NewL1ETL(cfg Config, log log.Logger, db *database.DB, metrics Metricer, cli
log: log,
metrics: metrics,
headerTraversal: node.NewHeaderTraversal(client, fromHeader),
headerTraversal: node.NewHeaderTraversal(client, fromHeader, cfg.ConfirmationDepth),
ethClient: client,
contracts: cSlice,
etlBatches: etlBatches,
}
return &L1ETL{ETL: etl, db: db}, nil
return &L1ETL{ETL: etl, db: db, mu: new(sync.Mutex)}, nil
}
func (l1Etl *L1ETL) Start(ctx context.Context) error {
......@@ -129,6 +132,29 @@ func (l1Etl *L1ETL) Start(ctx context.Context) error {
}
batch.Logger.Info("indexed batch")
// Notify Listeners
l1Etl.mu.Lock()
for i := range l1Etl.listeners {
select {
case l1Etl.listeners[i] <- struct{}{}:
default:
// do nothing if the listener hasn't picked
// up the previous notif
}
}
l1Etl.mu.Unlock()
}
}
}
// Notify returns a channel that'll receive a value every time new data has
// been persisted by the L1ETL
func (l1Etl *L1ETL) Notify() <-chan interface{} {
receiver := make(chan interface{})
l1Etl.mu.Lock()
defer l1Etl.mu.Unlock()
l1Etl.listeners = append(l1Etl.listeners, receiver)
return receiver
}
......@@ -49,7 +49,7 @@ func NewL2ETL(cfg Config, log log.Logger, db *database.DB, metrics Metricer, cli
log: log,
metrics: metrics,
headerTraversal: node.NewHeaderTraversal(client, fromHeader),
headerTraversal: node.NewHeaderTraversal(client, fromHeader, cfg.ConfirmationDepth),
ethClient: client,
contracts: l2Contracts,
etlBatches: etlBatches,
......
......@@ -3,6 +3,7 @@ package indexer
import (
"context"
"fmt"
"math/big"
"runtime/debug"
"sync"
......@@ -26,9 +27,8 @@ type Indexer struct {
metricsConfig config.MetricsConfig
metricsRegistry *prometheus.Registry
L1ETL *etl.L1ETL
L2ETL *etl.L2ETL
L1ETL *etl.L1ETL
L2ETL *etl.L2ETL
BridgeProcessor *processors.BridgeProcessor
}
......@@ -41,7 +41,12 @@ func NewIndexer(logger log.Logger, db *database.DB, chainConfig config.ChainConf
if err != nil {
return nil, err
}
l1Cfg := etl.Config{LoopIntervalMsec: chainConfig.L1PollingInterval, HeaderBufferSize: chainConfig.L1HeaderBufferSize, StartHeight: chainConfig.L1StartHeight()}
l1Cfg := etl.Config{
LoopIntervalMsec: chainConfig.L1PollingInterval,
HeaderBufferSize: chainConfig.L1HeaderBufferSize,
ConfirmationDepth: big.NewInt(int64(chainConfig.L1ConfirmationDepth)),
StartHeight: big.NewInt(int64(chainConfig.L1StartingHeight)),
}
l1Etl, err := etl.NewL1ETL(l1Cfg, logger, db, etl.NewMetrics(metricsRegistry, "l1"), l1EthClient, chainConfig.L1Contracts)
if err != nil {
return nil, err
......@@ -52,14 +57,18 @@ func NewIndexer(logger log.Logger, db *database.DB, chainConfig config.ChainConf
if err != nil {
return nil, err
}
l2Cfg := etl.Config{LoopIntervalMsec: chainConfig.L2PollingInterval, HeaderBufferSize: chainConfig.L2HeaderBufferSize}
l2Cfg := etl.Config{
LoopIntervalMsec: chainConfig.L2PollingInterval,
HeaderBufferSize: chainConfig.L2HeaderBufferSize,
ConfirmationDepth: big.NewInt(int64(chainConfig.L2ConfirmationDepth)),
}
l2Etl, err := etl.NewL2ETL(l2Cfg, logger, db, etl.NewMetrics(metricsRegistry, "l2"), l2EthClient)
if err != nil {
return nil, err
}
// Bridge
bridgeProcessor, err := processors.NewBridgeProcessor(logger, db, chainConfig)
bridgeProcessor, err := processors.NewBridgeProcessor(logger, db, l1Etl, chainConfig)
if err != nil {
return nil, err
}
......
# Chain configures l1 chain addresses
# Can configure them manually or use a preset l2 ChainId for known chains including OP Mainnet, OP Goerli, Base, Base Goerli, Zora, and Zora goerli
[chain]
# OP Goerli
preset = 420
# L1 Config
l1-polling-interval = 0
l1-header-buffer-size = 0
l1-confirmation-depth = 0
l1-starting-height = 0
# L2 Config
l2-polling-interval = 0
l2-header-buffer-size = 0
l2-confirmation-depth = 0
# OP Goerli
preset = 420
l1-starting-height = 0
[rpcs]
l1-rpc = "${INDEXER_RPC_URL_L1}"
......
......@@ -25,8 +25,6 @@ const (
)
type EthClient interface {
FinalizedBlockHeight() (*big.Int, error)
BlockHeaderByNumber(*big.Int) (*types.Header, error)
BlockHeaderByHash(common.Hash) (*types.Header, error)
BlockHeadersByRange(*big.Int, *big.Int) ([]types.Header, error)
......@@ -52,24 +50,6 @@ func DialEthClient(rpcUrl string, metrics Metricer) (EthClient, error) {
return client, nil
}
// FinalizedBlockHeight retrieves the latest block height in a finalized state
func (c *client) FinalizedBlockHeight() (*big.Int, error) {
ctxwt, cancel := context.WithTimeout(context.Background(), defaultRequestTimeout)
defer cancel()
// **NOTE** Local devnet is having issues with the "finalized" block tag. Temp switch
// to "latest" to iterate faster locally but this needs to be updated
var header *types.Header
err := c.rpc.CallContext(ctxwt, &header, "eth_getBlockByNumber", "latest", false)
if err != nil {
return nil, err
} else if header == nil {
return nil, ethereum.NotFound
}
return header.Number, nil
}
// BlockHeaderByHash retrieves the block header attributed to the supplied hash
func (c *client) BlockHeaderByHash(hash common.Hash) (*types.Header, error) {
ctxwt, cancel := context.WithTimeout(context.Background(), defaultRequestTimeout)
......
......@@ -14,15 +14,16 @@ var (
)
type HeaderTraversal struct {
ethClient EthClient
lastHeader *types.Header
ethClient EthClient
lastHeader *types.Header
blockConfirmationDepth *big.Int
}
// NewHeaderTraversal instantiates a new instance of HeaderTraversal against the supplied rpc client.
// The HeaderTraversal will start fetching blocks starting from the supplied header unless
// nil, indicating genesis.
func NewHeaderTraversal(ethClient EthClient, fromHeader *types.Header) *HeaderTraversal {
return &HeaderTraversal{ethClient: ethClient, lastHeader: fromHeader}
// The HeaderTraversal will start fetching blocks starting from the supplied header unless nil, indicating genesis.
func NewHeaderTraversal(ethClient EthClient, fromHeader *types.Header, confDepth *big.Int) *HeaderTraversal {
return &HeaderTraversal{ethClient: ethClient, lastHeader: fromHeader, blockConfirmationDepth: confDepth}
}
// LastHeader returns the last header that was fetched by the HeaderTraversal
......@@ -34,13 +35,19 @@ func (f *HeaderTraversal) LastHeader() *types.Header {
// NextFinalizedHeaders retrives the next set of headers that have been
// marked as finalized by the connected client, bounded by the supplied size
func (f *HeaderTraversal) NextFinalizedHeaders(maxSize uint64) ([]types.Header, error) {
finalizedBlockHeight, err := f.ethClient.FinalizedBlockHeight()
latestBlockHeader, err := f.ethClient.BlockHeaderByNumber(nil)
if err != nil {
return nil, fmt.Errorf("unable to query latest finalized height: %w", err)
return nil, fmt.Errorf("unable to query latest block: %w", err)
}
endHeight := new(big.Int).Sub(latestBlockHeader.Number, f.blockConfirmationDepth)
if endHeight.Sign() < 0 {
// No blocks with the provided confirmation depth available
return nil, nil
}
if f.lastHeader != nil {
cmp := f.lastHeader.Number.Cmp(finalizedBlockHeight)
cmp := f.lastHeader.Number.Cmp(endHeight)
if cmp == 0 {
return nil, nil
} else if cmp > 0 {
......@@ -53,7 +60,7 @@ func (f *HeaderTraversal) NextFinalizedHeaders(maxSize uint64) ([]types.Header,
nextHeight = new(big.Int).Add(f.lastHeader.Number, bigOne)
}
endHeight := clampBigInt(nextHeight, finalizedBlockHeight, maxSize)
endHeight = clampBigInt(nextHeight, endHeight, maxSize)
headers, err := f.ethClient.BlockHeadersByRange(nextHeight, endHeight)
if err != nil {
return nil, fmt.Errorf("error querying blocks by range: %w", err)
......
......@@ -37,10 +37,10 @@ func TestHeaderTraversalNextFinalizedHeadersNoOp(t *testing.T) {
// start from block 10 as the latest fetched block
lastHeader := &types.Header{Number: big.NewInt(10)}
headerTraversal := NewHeaderTraversal(client, lastHeader)
headerTraversal := NewHeaderTraversal(client, lastHeader, bigZero)
// no new headers when matched with head
client.On("FinalizedBlockHeight").Return(big.NewInt(10), nil)
client.On("BlockHeaderByNumber", (*big.Int)(nil)).Return(lastHeader, nil)
headers, err := headerTraversal.NextFinalizedHeaders(100)
require.NoError(t, err)
require.Empty(t, headers)
......@@ -50,11 +50,11 @@ func TestHeaderTraversalNextFinalizedHeadersCursored(t *testing.T) {
client := new(MockEthClient)
// start from genesis
headerTraversal := NewHeaderTraversal(client, nil)
headerTraversal := NewHeaderTraversal(client, nil, bigZero)
// blocks [0..4]
headers := makeHeaders(5, nil)
client.On("FinalizedBlockHeight").Return(big.NewInt(4), nil).Times(1) // Times so that we can override next
client.On("BlockHeaderByNumber", (*big.Int)(nil)).Return(&headers[4], nil).Times(1) // Times so that we can override next
client.On("BlockHeadersByRange", mock.MatchedBy(BigIntMatcher(0)), mock.MatchedBy(BigIntMatcher(4))).Return(headers, nil)
headers, err := headerTraversal.NextFinalizedHeaders(5)
require.NoError(t, err)
......@@ -62,7 +62,7 @@ func TestHeaderTraversalNextFinalizedHeadersCursored(t *testing.T) {
// blocks [5..9]
headers = makeHeaders(5, &headers[len(headers)-1])
client.On("FinalizedBlockHeight").Return(big.NewInt(9), nil)
client.On("BlockHeaderByNumber", (*big.Int)(nil)).Return(&headers[4], nil)
client.On("BlockHeadersByRange", mock.MatchedBy(BigIntMatcher(5)), mock.MatchedBy(BigIntMatcher(9))).Return(headers, nil)
headers, err = headerTraversal.NextFinalizedHeaders(5)
require.NoError(t, err)
......@@ -73,10 +73,10 @@ func TestHeaderTraversalNextFinalizedHeadersMaxSize(t *testing.T) {
client := new(MockEthClient)
// start from genesis
headerTraversal := NewHeaderTraversal(client, nil)
headerTraversal := NewHeaderTraversal(client, nil, bigZero)
// 100 "available" headers
client.On("FinalizedBlockHeight").Return(big.NewInt(100), nil)
client.On("BlockHeaderByNumber", (*big.Int)(nil)).Return(&types.Header{Number: big.NewInt(100)}, nil)
// clamped by the supplied size
headers := makeHeaders(5, nil)
......@@ -97,11 +97,11 @@ func TestHeaderTraversalMismatchedProviderStateError(t *testing.T) {
client := new(MockEthClient)
// start from genesis
headerTraversal := NewHeaderTraversal(client, nil)
headerTraversal := NewHeaderTraversal(client, nil, bigZero)
// blocks [0..4]
headers := makeHeaders(5, nil)
client.On("FinalizedBlockHeight").Return(big.NewInt(4), nil).Times(1) // Times so that we can override next
client.On("BlockHeaderByNumber", (*big.Int)(nil)).Return(&headers[4], nil).Times(1) // Times so that we can override next
client.On("BlockHeadersByRange", mock.MatchedBy(BigIntMatcher(0)), mock.MatchedBy(BigIntMatcher(4))).Return(headers, nil)
headers, err := headerTraversal.NextFinalizedHeaders(5)
require.NoError(t, err)
......@@ -109,7 +109,7 @@ func TestHeaderTraversalMismatchedProviderStateError(t *testing.T) {
// blocks [5..9]. Next batch is not chained correctly (starts again from genesis)
headers = makeHeaders(5, nil)
client.On("FinalizedBlockHeight").Return(big.NewInt(9), nil)
client.On("BlockHeaderByNumber", (*big.Int)(nil)).Return(&types.Header{Number: big.NewInt(9)}, nil)
client.On("BlockHeadersByRange", mock.MatchedBy(BigIntMatcher(5)), mock.MatchedBy(BigIntMatcher(9))).Return(headers, nil)
headers, err = headerTraversal.NextFinalizedHeaders(5)
require.Nil(t, headers)
......
......@@ -20,9 +20,9 @@ func (m *MockEthClient) BlockHeaderByNumber(number *big.Int) (*types.Header, err
return args.Get(0).(*types.Header), args.Error(1)
}
func (m *MockEthClient) FinalizedBlockHeight() (*big.Int, error) {
args := m.Called()
return args.Get(0).(*big.Int), args.Error(1)
func (m *MockEthClient) BlockHeaderByHash(hash common.Hash) (*types.Header, error) {
args := m.Called(hash)
return args.Get(0).(*types.Header), args.Error(1)
}
func (m *MockEthClient) BlockHeadersByRange(from, to *big.Int) ([]types.Header, error) {
......@@ -30,11 +30,6 @@ func (m *MockEthClient) BlockHeadersByRange(from, to *big.Int) ([]types.Header,
return args.Get(0).([]types.Header), args.Error(1)
}
func (m *MockEthClient) BlockHeaderByHash(hash common.Hash) (*types.Header, error) {
args := m.Called(hash)
return args.Get(0).(*types.Header), args.Error(1)
}
func (m *MockEthClient) StorageHash(address common.Address, blockNumber *big.Int) (common.Hash, error) {
args := m.Called(address, blockNumber)
return args.Get(0).(common.Hash), args.Error(1)
......
......@@ -4,10 +4,10 @@ import (
"context"
"errors"
"math/big"
"time"
"github.com/ethereum-optimism/optimism/indexer/config"
"github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum-optimism/optimism/indexer/etl"
"github.com/ethereum-optimism/optimism/indexer/processors/bridge"
"github.com/ethereum/go-ethereum/core/types"
......@@ -17,6 +17,7 @@ import (
type BridgeProcessor struct {
log log.Logger
db *database.DB
l1Etl *etl.L1ETL
chainConfig config.ChainConfig
// NOTE: We'll need this processor to handle for reorgs events.
......@@ -25,7 +26,7 @@ type BridgeProcessor struct {
LatestL2Header *types.Header
}
func NewBridgeProcessor(log log.Logger, db *database.DB, chainConfig config.ChainConfig) (*BridgeProcessor, error) {
func NewBridgeProcessor(log log.Logger, db *database.DB, l1Etl *etl.L1ETL, chainConfig config.ChainConfig) (*BridgeProcessor, error) {
log = log.New("processor", "bridge")
latestL1Header, err := bridge.L1LatestBridgeEventHeader(db, chainConfig)
......@@ -56,17 +57,12 @@ func NewBridgeProcessor(log log.Logger, db *database.DB, chainConfig config.Chai
log.Info("detected the latest indexed state", "l1_block_number", latestL1Header.Number, "l2_block_number", latestL2Header.Number)
}
return &BridgeProcessor{log, db, chainConfig, latestL1Header, latestL2Header}, nil
return &BridgeProcessor{log, db, l1Etl, chainConfig, latestL1Header, latestL2Header}, nil
}
func (b *BridgeProcessor) Start(ctx context.Context) error {
done := ctx.Done()
// NOTE: This should run on same iterval as L1 ETL rather than as finding the
// lasted epoch is constrained to how much L1 data we've indexed.
pollTicker := time.NewTicker(5 * time.Second)
defer pollTicker.Stop()
// In order to ensure all seen bridge finalization events correspond with seen
// bridge initiated events, we establish a shared marker between L1 and L2 when
// processing events.
......@@ -75,9 +71,7 @@ func (b *BridgeProcessor) Start(ctx context.Context) error {
// sequencing epoch and corresponding L1 origin that has also been indexed
// serves as this shared marker.
// TODOs:
// 1. Fix Logging. Should be clear if we're looking at L1 or L2 side of things
l1EtlUpdates := b.l1Etl.Notify()
b.log.Info("starting bridge processor...")
for {
select {
......@@ -85,18 +79,18 @@ func (b *BridgeProcessor) Start(ctx context.Context) error {
b.log.Info("stopping bridge processor")
return nil
case <-pollTicker.C:
case <-l1EtlUpdates:
latestEpoch, err := b.db.Blocks.LatestEpoch()
if err != nil {
return err
}
if latestEpoch == nil {
if b.LatestL1Header != nil {
// Once we have some satte `latestEpoch` should never return nil.
b.log.Error("started with indexed bridge state, but no blocks epochs returned", "latest_bridge_l1_block_number", b.LatestL1Header.Number)
// Once we have some state `latestEpoch` should never return nil.
b.log.Error("started with indexed bridge state, but no latest epoch returned", "latest_bridge_l1_block_number", b.LatestL1Header.Number)
return errors.New("started with indexed bridge state, but no blocks epochs returned")
} else {
b.log.Warn("no indexed block state. waiting...")
b.log.Warn("no indexed epochs. waiting...")
continue
}
}
......@@ -116,7 +110,7 @@ func (b *BridgeProcessor) Start(ctx context.Context) error {
}
batchLog := b.log.New("epoch_start_number", fromL1Height, "epoch_end_number", toL1Height)
batchLog.Info("scanning bridge events")
batchLog.Info("scanning for new bridge events")
err = b.db.Transaction(func(tx *database.DB) error {
l1BridgeLog := b.log.New("from_l1_block_number", fromL1Height, "to_l1_block_number", toL1Height)
l2BridgeLog := b.log.New("from_l2_block_number", fromL2Height, "to_l2_block_number", toL2Height)
......
......@@ -141,9 +141,8 @@ func createContractInput(input abi.Argument, inputs []ContractInput) ([]Contract
return nil, err
}
// TODO: could probably do better than string comparison?
internalType := input.Type.String()
if inputType == "tuple" {
if input.Type.T == abi.TupleTy {
internalType = input.Type.TupleRawName
}
......
......@@ -20,3 +20,72 @@ to see a list of available options.
`op-challenger` is configurable via command line flags and environment variables. The help menu
shows the available config options and can be accessed by running `./op-challenger --help`.
## Scripts
The [scripts](scripts) directory contains a collection of scripts to assist with manually creating and playing games.
This are not intended to be used in production, only to support manual testing and to aid with understanding how
dispute games work. They also serve as examples of how to use `cast` to manually interact with the dispute game
contracts.
### Dependencies
These scripts assume that the following tools are installed and available on the current `PATH`:
* `cast` (https://book.getfoundry.sh/cast/)
* `jq` (https://jqlang.github.io/jq/)
* `bash`
### [create_game.sh](scripts/create_game.sh)
```shell
./scripts/create_game.sh <RPC_URL> <GAME_FACTORY_ADDRESS> <ROOT_CLAIM> <SIGNER_ARGS>...
```
Starts a new fault dispute game that disputes the latest output proposal in the L2 output oracle.
* `RPC_URL` - the RPC endpoint of the L1 endpoint to use (e.g. `http://localhost:8545`).
* `GAME_FACTORY_ADDRESS` - the address of the dispute game factory contract on L1.
* `ROOT_CLAIM` a hex encoded 32 byte hash to use as the root claim for the created game.
* `SIGNER_ARGS` the remaining args are past as arguments to `cast` when sending transactions.
These arguments must specify a way for `cast` to sign the transactions.
See `cast send --help` for supported options.
Creating a dispute game requires sending two transactions. The first transaction creates a
checkpoint in the `BlockOracle` that records the L1 block that will be used as the L1 head
when generating the cannon execution trace. The second transaction then creates the actual
dispute game, specifying the disputed L2 block number and previously checkpointed L1 head block.
### [move.sh](scripts/move.sh)
```shell
./scripts/move.sh <RPC_URL> <GAME_ADDRESS> (attack|defend) <PARENT_INDEX> <CLAIM> <SIGNER_ARGS>...
```
Performs a move to either attack or defend the latest claim in the specified game.
* `RPC_URL` - the RPC endpoint of the L1 endpoint to use (e.g. `http://localhost:8545`).
* `GAME_ADDRESS` - the address of the dispute game to perform the move in.
* `(attack|defend)` - the type of move to make.
* `attack` indicates that the state hash in your local cannon trace differs to the state
hash included in the latest claim.
* `defend` indicates that the state hash in your local cannon trace matches the state hash
included in the latest claim.
* `PARENT_INDEX` - the index of the parent claim that will be countered by this new claim.
The special value of `latest` will counter the latest claim added to the game.
* `CLAIM` - the state hash to include in the counter-claim you are posting.
* `SIGNER_ARGS` the remaining args are past as arguments to `cast` when sending transactions.
These arguments must specify a way for `cast` to sign the transactions.
See `cast send --help` for supported options.
### [list_claims.sh](scripts/list_claims.sh)
```shell
./scripts/list_claims.sh <RPC> <GAME_ADDR>
```
Prints the list of current claims in a dispute game.
* `RPC_URL` - the RPC endpoint of the L1 endpoint to use (e.g. `http://localhost:8545`).
* `GAME_ADDRESS` - the address of the dispute game to list the move in.
......@@ -254,6 +254,28 @@ func TestCannonSnapshotFreq(t *testing.T) {
cfg := configForArgs(t, addRequiredArgs(config.TraceTypeCannon, "--cannon-snapshot-freq=1234"))
require.Equal(t, uint(1234), cfg.CannonSnapshotFreq)
})
t.Run("Invalid", func(t *testing.T) {
verifyArgsInvalid(t, "invalid value \"abc\" for flag -cannon-snapshot-freq",
addRequiredArgs(config.TraceTypeCannon, "--cannon-snapshot-freq=abc"))
})
}
func TestCannonInfoFreq(t *testing.T) {
t.Run("UsesDefault", func(t *testing.T) {
cfg := configForArgs(t, addRequiredArgs(config.TraceTypeCannon))
require.Equal(t, config.DefaultCannonInfoFreq, cfg.CannonInfoFreq)
})
t.Run("Valid", func(t *testing.T) {
cfg := configForArgs(t, addRequiredArgs(config.TraceTypeCannon, "--cannon-info-freq=1234"))
require.Equal(t, uint(1234), cfg.CannonInfoFreq)
})
t.Run("Invalid", func(t *testing.T) {
verifyArgsInvalid(t, "invalid value \"abc\" for flag -cannon-info-freq",
addRequiredArgs(config.TraceTypeCannon, "--cannon-info-freq=abc"))
})
}
func TestGameWindow(t *testing.T) {
......
......@@ -26,6 +26,7 @@ var (
ErrMissingL1EthRPC = errors.New("missing l1 eth rpc url")
ErrMissingGameFactoryAddress = errors.New("missing game factory address")
ErrMissingCannonSnapshotFreq = errors.New("missing cannon snapshot freq")
ErrMissingCannonInfoFreq = errors.New("missing cannon info freq")
ErrMissingCannonRollupConfig = errors.New("missing cannon network or rollup config path")
ErrMissingCannonL2Genesis = errors.New("missing cannon network or l2 genesis path")
ErrCannonNetworkAndRollupConfig = errors.New("only specify one of network or rollup config path")
......@@ -78,6 +79,7 @@ func ValidTraceType(value TraceType) bool {
const (
DefaultCannonSnapshotFreq = uint(1_000_000_000)
DefaultCannonInfoFreq = uint(10_000_000)
// DefaultGameWindow is the default maximum time duration in the past
// that the challenger will look for games to progress.
// The default value is 11 days, which is a 4 day resolution buffer
......@@ -111,6 +113,7 @@ type Config struct {
CannonL2GenesisPath string
CannonL2 string // L2 RPC Url
CannonSnapshotFreq uint // Frequency of snapshots to create when executing cannon (in VM instructions)
CannonInfoFreq uint // Frequency of cannon progress log messages (in VM instructions)
TxMgrConfig txmgr.CLIConfig
MetricsConfig opmetrics.CLIConfig
......@@ -140,6 +143,7 @@ func NewConfig(
Datadir: datadir,
CannonSnapshotFreq: DefaultCannonSnapshotFreq,
CannonInfoFreq: DefaultCannonInfoFreq,
GameWindow: DefaultGameWindow,
}
}
......@@ -194,6 +198,9 @@ func (c Config) Check() error {
if c.CannonSnapshotFreq == 0 {
return ErrMissingCannonSnapshotFreq
}
if c.CannonInfoFreq == 0 {
return ErrMissingCannonInfoFreq
}
}
if c.TraceType == TraceTypeAlphabet && c.AlphabetTrace == "" {
return ErrMissingAlphabetTrace
......
......@@ -132,6 +132,14 @@ func TestCannonSnapshotFreq(t *testing.T) {
})
}
func TestCannonInfoFreq(t *testing.T) {
t.Run("MustNotBeZero", func(t *testing.T) {
cfg := validConfig(TraceTypeCannon)
cfg.CannonInfoFreq = 0
require.ErrorIs(t, cfg.Check(), ErrMissingCannonInfoFreq)
})
}
func TestCannonNetworkOrRollupConfigRequired(t *testing.T) {
cfg := validConfig(TraceTypeCannon)
cfg.CannonNetwork = ""
......
......@@ -116,6 +116,12 @@ var (
EnvVars: prefixEnvVars("CANNON_SNAPSHOT_FREQ"),
Value: config.DefaultCannonSnapshotFreq,
}
CannonInfoFreqFlag = &cli.UintFlag{
Name: "cannon-info-freq",
Usage: "Frequency of cannon info log messages to generate in VM steps (cannon trace type only)",
EnvVars: prefixEnvVars("CANNON_INFO_FREQ"),
Value: config.DefaultCannonInfoFreq,
}
GameWindowFlag = &cli.DurationFlag{
Name: "game-window",
Usage: "The time window which the challenger will look for games to progress.",
......@@ -146,6 +152,7 @@ var optionalFlags = []cli.Flag{
CannonPreStateFlag,
CannonL2Flag,
CannonSnapshotFreqFlag,
CannonInfoFreqFlag,
GameWindowFlag,
}
......@@ -250,6 +257,7 @@ func NewConfigFromCLI(ctx *cli.Context) (*config.Config, error) {
Datadir: ctx.String(DatadirFlag.Name),
CannonL2: ctx.String(CannonL2Flag.Name),
CannonSnapshotFreq: ctx.Uint(CannonSnapshotFreqFlag.Name),
CannonInfoFreq: ctx.Uint(CannonInfoFreqFlag.Name),
AgreeWithProposedOutput: ctx.Bool(AgreeWithProposedOutputFlag.Name),
TxMgrConfig: txMgrConfig,
MetricsConfig: metricsConfig,
......
......@@ -40,6 +40,7 @@ type Executor struct {
l2Genesis string
absolutePreState string
snapshotFreq uint
infoFreq uint
selectSnapshot snapshotSelect
cmdExecutor cmdExecutor
}
......@@ -57,6 +58,7 @@ func NewExecutor(logger log.Logger, cfg *config.Config, inputs LocalGameInputs)
l2Genesis: cfg.CannonL2GenesisPath,
absolutePreState: cfg.CannonAbsolutePreState,
snapshotFreq: cfg.CannonSnapshotFreq,
infoFreq: cfg.CannonInfoFreq,
selectSnapshot: findStartingSnapshot,
cmdExecutor: runCmd,
}
......@@ -76,6 +78,7 @@ func (e *Executor) GenerateProof(ctx context.Context, dir string, i uint64) erro
"--input", start,
"--output", lastGeneratedState,
"--meta", "",
"--info-at", "%" + strconv.FormatUint(uint64(e.infoFreq), 10),
"--proof-at", "=" + strconv.FormatUint(i, 10),
"--proof-fmt", filepath.Join(proofDir, "%d.json.gz"),
"--snapshot-at", "%" + strconv.FormatUint(uint64(e.snapshotFreq), 10),
......
......@@ -29,6 +29,7 @@ func TestGenerateProof(t *testing.T) {
cfg.CannonServer = "./bin/op-program"
cfg.CannonL2 = "http://localhost:9999"
cfg.CannonSnapshotFreq = 500
cfg.CannonInfoFreq = 900
inputs := LocalGameInputs{
L1Head: common.Hash{0x11},
......@@ -81,6 +82,7 @@ func TestGenerateProof(t *testing.T) {
require.Equal(t, "=150000000", args["--proof-at"])
require.Equal(t, "=150000001", args["--stop-at"])
require.Equal(t, "%500", args["--snapshot-at"])
require.Equal(t, "%900", args["--info-at"])
// Slight quirk of how we pair off args
// The server binary winds up as the key and the first arg --server as the value which has no value
// Then everything else pairs off correctly again
......
......@@ -15,7 +15,7 @@ make cannon-prestate
make devnet-up
DEVNET_SPONSOR="ac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80"
DISPUTE_GAME_PROXY=$(jq -r .DisputeGameFactoryProxy $MONOREPO_DIR/.devnet/addresses.json)
DISPUTE_GAME_FACTORY=$(jq -r .DisputeGameFactoryProxy $MONOREPO_DIR/.devnet/addresses.json)
echo "----------------------------------------------------------------"
echo " Dispute Game Factory at $DISPUTE_GAME_PROXY"
......@@ -59,43 +59,8 @@ do
sleep 2
done
# Fetch the latest block number
L2_BLOCK_NUMBER=$(cast call $L2_OUTPUT_ORACLE_PROXY "latestBlockNumber()")
echo "Using the latest L2OO block number: $L2_BLOCK_NUMBER"
# We will use the l2 block number of 1 for the dispute game.
# We need to check that the block oracle contains the corresponding l1 block number.
echo "Checkpointing the block oracle..."
L1_CHECKPOINT=$(cast send --private-key $DEVNET_SPONSOR $BLOCK_ORACLE_PROXY "checkpoint()" --json | jq -r .blockNumber | cast to-dec)
((L1_CHECKPOINT=L1_CHECKPOINT-1))
echo "L1 Checkpoint: $L1_CHECKPOINT"
INDEX=$(cast call $L2_OUTPUT_ORACLE_PROXY "getL2OutputIndexAfter(uint256)" $L2_BLOCK_NUMBER | cast to-dec)
((PRIOR_INDEX=INDEX-1))
echo "Getting the l2 output at index $PRIOR_INDEX"
cast call $L2_OUTPUT_ORACLE_PROXY "getL2Output(uint256)" $PRIOR_INDEX
echo "Getting the l2 output at index $INDEX"
cast call $L2_OUTPUT_ORACLE_PROXY "getL2Output(uint256)" $INDEX
# (Alphabet) Fault game type = 255
GAME_TYPE=255
# Root claim commits to the entire trace.
# Alphabet game claim construction: keccak256(abi.encode(trace_index, trace[trace_index]))
ROOT_CLAIM=$(cast keccak $(cast abi-encode "f(uint256,uint256)" 15 122))
# Fault dispute game extra data is calculated as follows.
# abi.encode(uint256(l2_block_number), uint256(l1 checkpoint))
EXTRA_DATA=$(cast abi-encode "f(uint256,uint256)" $L2_BLOCK_NUMBER $L1_CHECKPOINT)
echo "Initializing the game"
FAULT_GAME_ADDRESS=$(cast call --private-key $MALLORY_KEY $DISPUTE_GAME_PROXY "create(uint8,bytes32,bytes)" $GAME_TYPE $ROOT_CLAIM $EXTRA_DATA)
echo "Creating game at address $FAULT_GAME_ADDRESS"
cast send --private-key $MALLORY_KEY $DISPUTE_GAME_PROXY "create(uint8,bytes32,bytes)" $GAME_TYPE $ROOT_CLAIM $EXTRA_DATA
FORMATTED_ADDRESS=$(cast parse-bytes32-address $FAULT_GAME_ADDRESS)
echo "Formatted Address: $FORMATTED_ADDRESS"
echo $FORMATTED_ADDRESS > $CHALLENGER_DIR/.fault-game-address
GAME_TYPE=255 ${SOURCE_DIR}/../create_game.sh http://localhost:8545 "${DISPUTE_GAME_FACTORY}" "${ROOT_CLAIM}" --private-key "${DEVNET_SPONSOR}"
#!/usr/bin/env bash
set -euo pipefail
SOURCE_DIR=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd)
CHALLENGER_DIR=$(echo ${SOURCE_DIR%/*})
MONOREPO_DIR=$(echo ${CHALLENGER_DIR%/*})
# ./create_game.sh <rpc-addr> <dispute-game-factory-addr> <cast signing args>
RPC=${1:?Must specify RPC address}
FACTORY_ADDR=${2:?Must specify factory address}
ROOT_CLAIM=${3:?Must specify root claim}
SIGNER_ARGS="${@:4}"
# Default to Cannon Fault game type
GAME_TYPE=${GAME_TYPE:-0}
# Get the fault dispute game implementation addr
GAME_IMPL_ADDR=$(cast call --rpc-url "${RPC}" "${FACTORY_ADDR}" 'gameImpls(uint8) returns(address)' "${GAME_TYPE}")
echo "Fault dispute game impl: ${GAME_IMPL_ADDR}"
# Get the L2 output oracle address
L2OO_ADDR=$(cast call --rpc-url "${RPC}" "${GAME_IMPL_ADDR}" 'L2_OUTPUT_ORACLE() returns(address)')
echo "L2OO: ${L2OO_ADDR}"
# Get the block oracle address
BLOCK_ORACLE_ADDR=$(cast call --rpc-url "${RPC}" "${GAME_IMPL_ADDR}" 'BLOCK_ORACLE() returns(address)')
echo "Block Oracle: ${BLOCK_ORACLE_ADDR}"
# Get the L2 block number of the latest output proposal. This is the proposal that will be disputed by the created game.
L2_BLOCK_NUM=$(cast call --rpc-url "${RPC}" "${L2OO_ADDR}" 'latestBlockNumber() public view returns (uint256)')
echo "L2 Block Number: ${L2_BLOCK_NUM}"
# Create a checkpoint in the block oracle to commit to the current L1 head.
# This defines the L1 head that will be used in the dispute game.
echo "Checkpointing the block oracle..."
L1_CHECKPOINT=$(cast send --rpc-url "${RPC}" ${SIGNER_ARGS} "${BLOCK_ORACLE_ADDR}" "checkpoint()" --json | jq -r '.logs[0].topics[1]' | cast to-dec)
echo "L1 Checkpoint: $L1_CHECKPOINT"
# Fault dispute game extra data is calculated as follows.
# abi.encode(uint256(l2_block_number), uint256(l1 checkpoint))
EXTRA_DATA=$(cast abi-encode "f(uint256,uint256)" "${L2_BLOCK_NUM}" "${L1_CHECKPOINT}")
echo "Initializing the game"
FAULT_GAME_DATA=$(cast send --rpc-url "${RPC}" ${SIGNER_ARGS} "${FACTORY_ADDR}" "create(uint8,bytes32,bytes) returns(address)" "${GAME_TYPE}" "${ROOT_CLAIM}" "${EXTRA_DATA}" --json)
# Extract the address of the newly created game from the receipt logs.
FAULT_GAME_ADDRESS=$(echo "${FAULT_GAME_DATA}" | jq -r '.logs[0].topics[1]' | cast parse-bytes32-address)
echo "Fault game address: ${FAULT_GAME_ADDRESS}"
echo "${FAULT_GAME_ADDRESS}" > $CHALLENGER_DIR/.fault-game-address
#!/usr/bin/env bash
set -euo pipefail
RPC=${1:?Must specify RPC address}
GAME_ADDR=${2:?Must specify fault dispute game address}
COUNT=$(cast call --rpc-url "${RPC}" "${GAME_ADDR}" 'claimDataLen() returns(uint256)')
echo "Claim count: ${COUNT}"
((COUNT=COUNT-1))
for i in $(seq 0 "${COUNT}")
do
CLAIM=$(cast call --rpc-url "${RPC}" "${GAME_ADDR}" 'claimData(uint256) returns(uint32 parentIndex, bool countered, bytes32 claim, uint128 position, uint128 clock)' "${i}")
SAVEIFS=$IFS # Save current IFS (Internal Field Separator)
IFS=$'\n' # Change IFS to newline char
CLAIM=($CLAIM) # split the string into an array by the same name
IFS=$SAVEIFS # Restore original IFS
echo "${i} Parent: ${CLAIM[0]} Countered: ${CLAIM[1]} Claim: ${CLAIM[2]} Position: ${CLAIM[3]} Clock ${CLAIM[4]}"
done
#!/bin/bash
set -euo pipefail
RPC=${1:?Must specify RPC URL}
GAME_ADDR=${2:?Must specify game address}
ACTION=${3:?Must specify attack or defend}
PARENT_INDEX=${4:?Must specify parent index. Use latest to counter the latest claim added to the game.}
CLAIM=${5:?Must specify claim hash}
SIGNER_ARGS="${@:6}"
if [[ "${ACTION}" != "attack" && "${ACTION}" != "defend" ]]
then
echo "Action must be either attack or defend"
exit 1
fi
if [[ "${PARENT_INDEX}" == "latest" ]]
then
# Fetch the index of the most recent claim made.
PARENT_INDEX=$(cast call --rpc-url "${RPC}" "${GAME_ADDR}" 'claimDataLen() returns(uint256)')
((PARENT_INDEX=PARENT_INDEX-1))
fi
# Perform the move.
cast send --rpc-url "${RPC}" ${SIGNER_ARGS} "${GAME_ADDR}" "$ACTION(uint256,bytes32)" "${PARENT_INDEX}" "${CLAIM}"
......@@ -2,23 +2,8 @@
set -euo pipefail
if [ $# -eq 0 ]
then
echo "Missing Fault Dispute Game address argument"
fi
echo ""
echo "Visualize the fault dispute game at https://dispute.clab.by/game?addr=$1"
echo ""
DISPUTE_GAME_PROXY=$(jq .DisputeGameFactoryProxy .devnet/addresses.json)
DISPUTE_GAME_PROXY=$(echo $DISPUTE_GAME_PROXY | tr -d '"')
echo "----------------------------------------------------------------"
echo " Dispute Game Factory at $DISPUTE_GAME_PROXY"
echo "----------------------------------------------------------------"
FAULT_GAME_ADDRESS=$1
RPC="${1:?Must specify RPC address}"
FAULT_GAME_ADDRESS="${2:?Must specify game address}"
DIR=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd)
DIR=$(echo ${DIR%/*/*})
......@@ -26,6 +11,6 @@ cd $DIR/packages/contracts-bedrock
forge script scripts/FaultDisputeGameViz.s.sol \
--sig "remote(address)" $FAULT_GAME_ADDRESS \
--fork-url http://localhost:8545
--fork-url "$RPC"
mv dispute_game.svg "$dir"
......@@ -13,7 +13,7 @@ test: pre-test test-ws
test-external-%: pre-test
make -C ./external_$*/
$(go_test) $(go_test_flags) --externalL2 ./external_$*/shim
$(go_test) $(go_test_flags) --externalL2 ./external_$*/
test-ws: pre-test
$(go_test) $(go_test_flags) ./...
......
package config
import (
"encoding/json"
"errors"
"flag"
"fmt"
"os"
......@@ -9,6 +11,7 @@ import (
"time"
"github.com/ethereum-optimism/optimism/op-chain-ops/genesis"
"github.com/ethereum-optimism/optimism/op-e2e/external"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/state"
)
......@@ -26,21 +29,18 @@ var (
L1Deployments *genesis.L1Deployments
// DeployConfig represents the deploy config used by the system.
DeployConfig *genesis.DeployConfig
// ExternalL2Nodes is the shim to use if external ethereum client testing is
// ExternalL2Shim is the shim to use if external ethereum client testing is
// enabled
ExternalL2Nodes string
ExternalL2Shim string
// ExternalL2TestParms is additional metadata for executing external L2
// tests.
ExternalL2TestParms external.TestParms
// EthNodeVerbosity is the level of verbosity to output
EthNodeVerbosity int
)
// Init testing to enable test flags
var _ = func() bool {
testing.Init()
return true
}()
func init() {
var l1AllocsPath, l1DeploymentsPath, deployConfigPath string
var l1AllocsPath, l1DeploymentsPath, deployConfigPath, externalL2 string
cwd, err := os.Getwd()
if err != nil {
......@@ -58,8 +58,9 @@ func init() {
flag.StringVar(&l1AllocsPath, "l1-allocs", defaultL1AllocsPath, "")
flag.StringVar(&l1DeploymentsPath, "l1-deployments", defaultL1DeploymentsPath, "")
flag.StringVar(&deployConfigPath, "deploy-config", defaultDeployConfigPath, "")
flag.StringVar(&ExternalL2Nodes, "externalL2", "", "Enable tests with external L2")
flag.StringVar(&externalL2, "externalL2", "", "Enable tests with external L2")
flag.IntVar(&EthNodeVerbosity, "ethLogVerbosity", 3, "The level of verbosity to use for the eth node logs")
testing.Init() // Register test flags before parsing
flag.Parse()
if err := allExist(l1AllocsPath, l1DeploymentsPath, deployConfigPath); err != nil {
......@@ -92,6 +93,40 @@ func init() {
if L1Deployments != nil {
DeployConfig.SetDeployments(L1Deployments)
}
if externalL2 != "" {
if err := initExternalL2(externalL2); err != nil {
panic(fmt.Errorf("could not initialize external L2: %w", err))
}
}
}
func initExternalL2(externalL2 string) error {
var err error
ExternalL2Shim, err = filepath.Abs(filepath.Join(externalL2, "shim"))
if err != nil {
return fmt.Errorf("could not compute abs of externalL2Nodes shim: %w", err)
}
_, err = os.Stat(ExternalL2Shim)
if err != nil {
return fmt.Errorf("failed to stat externalL2Nodes path: %w", err)
}
file, err := os.Open(filepath.Join(externalL2, "test_parms.json"))
if err != nil {
if errors.Is(err, os.ErrNotExist) {
return nil
}
return fmt.Errorf("could not open external L2 test parms: %w", err)
}
defer file.Close()
if err := json.NewDecoder(file).Decode(&ExternalL2TestParms); err != nil {
return fmt.Errorf("could not decode external L2 test parms: %w", err)
}
return nil
}
func allExist(filenames ...string) error {
......
package external
import (
"bytes"
"encoding/json"
"os"
"strings"
"testing"
)
type Config struct {
......@@ -40,3 +43,26 @@ type Endpoints struct {
HTTPAuthEndpoint string `json:"http_auth_endpoint"`
WSAuthEndpoint string `json:"ws_auth_endpoint"`
}
type TestParms struct {
// SkipTests is a map from test name to skip message. The skip message may
// be arbitrary, but the test name should match the skipped test (either
// base, or a sub-test) exactly. Precisely, the skip name must match rune for
// rune starting with the first rune. If the skip name does not match all
// runes, the first mismatched rune must be a '/'.
SkipTests map[string]string `json:"skip_tests"`
}
func (tp TestParms) SkipIfNecessary(t *testing.T) {
if len(tp.SkipTests) == 0 {
return
}
var base bytes.Buffer
for _, name := range strings.Split(t.Name(), "/") {
base.WriteString(name)
if msg, ok := tp.SkipTests[base.String()]; ok {
t.Skip(msg)
}
base.WriteRune('/')
}
}
......@@ -41,6 +41,16 @@ process and looks for the lines indicating that the HTTP server and Auth HTTP
server have started up. It then reads the ports which were allocated (because
the requested ports were passed in as ephemeral via the CLI arguments).
## Skipping tests
Although ideally, all tests would be structured such that they may execute
either with an in-process op-geth or with an extra-process ethereum client,
this is not always the case. You may optionally create a `test_parms.json`
file in the `external_<your-client>` directory, as there is in the
`external_geth` directory which specifies a map of tests to skip, and
accompanying skip text. See the `op-e2e/external/config.go` file for more
details.
## Generalization
This shim is included to help document an demonstrate the usage of the
......
{
"skip_tests":{
"TestPendingGasLimit":"This test requires directly modifying go structures and cannot be implemented with flags"
}
}
......@@ -78,6 +78,8 @@ func newTxMgrConfig(l1Addr string, privKey *ecdsa.PrivateKey) txmgr.CLIConfig {
}
func DefaultSystemConfig(t *testing.T) SystemConfig {
config.ExternalL2TestParms.SkipIfNecessary(t)
secrets, err := e2eutils.DefaultMnemonicConfig.Secrets()
require.NoError(t, err)
deployConfig := config.DeployConfig.Copy()
......@@ -139,7 +141,7 @@ func DefaultSystemConfig(t *testing.T) SystemConfig {
GethOptions: map[string][]GethOption{},
P2PTopology: nil, // no P2P connectivity by default
NonFinalizedProposals: false,
ExternalL2Nodes: config.ExternalL2Nodes,
ExternalL2Shim: config.ExternalL2Shim,
BatcherTargetL1TxSizeBytes: 100_000,
}
}
......@@ -175,7 +177,7 @@ type SystemConfig struct {
ProposerLogger log.Logger
BatcherLogger log.Logger
ExternalL2Nodes string
ExternalL2Shim string
// map of outbound connections to other nodes. Node names prefixed with "~" are unconnected but linked.
// A nil map disables P2P completely.
......@@ -438,7 +440,7 @@ func (cfg SystemConfig) Start(t *testing.T, _opts ...SystemConfigOption) (*Syste
for name := range cfg.Nodes {
var ethClient EthInstance
if cfg.ExternalL2Nodes == "" {
if cfg.ExternalL2Shim == "" {
node, backend, err := initL2Geth(name, big.NewInt(int64(cfg.DeployConfig.L2ChainID)), l2Genesis, cfg.JWTFilePath, cfg.GethOptions[name]...)
if err != nil {
return nil, err
......@@ -459,7 +461,7 @@ func (cfg SystemConfig) Start(t *testing.T, _opts ...SystemConfigOption) (*Syste
}
ethClient = (&ExternalRunner{
Name: name,
BinPath: cfg.ExternalL2Nodes,
BinPath: cfg.ExternalL2Shim,
Genesis: l2Genesis,
JWTPath: cfg.JWTFilePath,
}).Run(t)
......
......@@ -5,7 +5,6 @@ import (
"fmt"
"math/big"
"os"
"path/filepath"
"runtime"
"testing"
"time"
......@@ -45,23 +44,8 @@ import (
)
func TestMain(m *testing.M) {
if config.ExternalL2Nodes != "" {
fmt.Println("Running tests with external L2 process adapter at ", config.ExternalL2Nodes)
shimPath, err := filepath.Abs(config.ExternalL2Nodes)
if err != nil {
fmt.Printf("Could not compute abs of externalL2Nodes shim: %s\n", err)
os.Exit(2)
}
// We convert the passed in path to an absolute path, as it simplifies
// the path handling logic for the rest of the testing
config.ExternalL2Nodes = shimPath
_, err = os.Stat(config.ExternalL2Nodes)
if err != nil {
fmt.Printf("Failed to stat externalL2Nodes path: %s\n", err)
os.Exit(3)
}
if config.ExternalL2Shim != "" {
fmt.Println("Running tests with external L2 process adapter at ", config.ExternalL2Shim)
// As these are integration tests which launch many other processes, the
// default parallelism makes the tests flaky. This change aims to
// reduce the flakiness of these tests.
......@@ -273,13 +257,6 @@ func TestPendingGasLimit(t *testing.T) {
InitParallel(t)
cfg := DefaultSystemConfig(t)
if cfg.ExternalL2Nodes != "" {
// Some eth clients such as Erigon don't currently build blocks until
// they receive the engine call which includes the gas limit. After we
// provide a mechanism for external clients to advertise test support we
// should enable for those which support it.
t.Skip()
}
// configure the L2 gas limit to be high, and the pending gas limits to be lower for resource saving.
cfg.DeployConfig.L2GenesisBlockGasLimit = 30_000_000
......
......@@ -17,8 +17,8 @@ const diskPermission = 0666
// DiskKV is a disk-backed key-value store, every key-value pair is a hex-encoded .txt file, with the value as content.
// DiskKV is safe for concurrent use with a single DiskKV instance.
// DiskKV is not safe for concurrent use between different DiskKV instances of the same disk directory:
// a Put needs to be completed before another DiskKV Get retrieves the values.
// DiskKV is safe for concurrent use between different DiskKV instances of the same disk directory as long as the
// file system supports atomic renames.
type DiskKV struct {
sync.RWMutex
path string
......@@ -37,19 +37,22 @@ func (d *DiskKV) pathKey(k common.Hash) string {
func (d *DiskKV) Put(k common.Hash, v []byte) error {
d.Lock()
defer d.Unlock()
f, err := os.OpenFile(d.pathKey(k), os.O_WRONLY|os.O_CREATE|os.O_EXCL|os.O_TRUNC, diskPermission)
f, err := os.CreateTemp(d.path, k.String()+".txt.*")
if err != nil {
if errors.Is(err, os.ErrExist) {
return ErrAlreadyExists
}
return fmt.Errorf("failed to open new pre-image file %s: %w", k, err)
return fmt.Errorf("failed to open temp file for pre-image %s: %w", k, err)
}
defer os.Remove(f.Name()) // Clean up the temp file if it doesn't actually get moved into place
if _, err := f.Write([]byte(hex.EncodeToString(v))); err != nil {
_ = f.Close()
return fmt.Errorf("failed to write pre-image %s to disk: %w", k, err)
}
if err := f.Close(); err != nil {
return fmt.Errorf("failed to close pre-image %s file: %w", k, err)
return fmt.Errorf("failed to close temp pre-image %s file: %w", k, err)
}
targetFile := d.pathKey(k)
if err := os.Rename(f.Name(), targetFile); err != nil {
return fmt.Errorf("failed to move temp dir %v to final destination %v: %w", f.Name(), targetFile, err)
}
return nil
}
......
......@@ -9,13 +9,9 @@ import (
// ErrNotFound is returned when a pre-image cannot be found in the KV store.
var ErrNotFound = errors.New("not found")
// ErrAlreadyExists is returned when a pre-image already exists in the KV store.
var ErrAlreadyExists = errors.New("already exists")
// KV is a Key-Value store interface for pre-image data.
type KV interface {
// Put puts the pre-image value v in the key-value store with key k.
// It returns ErrAlreadyExists when the key already exists.
// KV store implementations may return additional errors specific to the KV storage.
Put(k common.Hash, v []byte) error
......
......@@ -45,9 +45,9 @@ func kvTest(t *testing.T, kv KV) {
require.Equal(t, []byte{4, 2}, dat, "pre-image must match")
})
t.Run("not overwriting pre-image", func(t *testing.T) {
t.Run("allowing multiple writes for same pre-image", func(t *testing.T) {
t.Parallel()
require.NoError(t, kv.Put(common.Hash{0xdd}, []byte{4, 2}))
require.ErrorIs(t, kv.Put(common.Hash{0xdd}, []byte{4, 2}), ErrAlreadyExists)
require.NoError(t, kv.Put(common.Hash{0xdd}, []byte{4, 2}))
})
}
......@@ -23,9 +23,6 @@ func NewMemKV() *MemKV {
func (m *MemKV) Put(k common.Hash, v []byte) error {
m.Lock()
defer m.Unlock()
if _, ok := m.m[k]; ok {
return ErrAlreadyExists
}
m.m[k] = v
return nil
}
......
......@@ -159,10 +159,7 @@ func (p *Prefetcher) storeTrieNodes(values []hexutil.Bytes) error {
_, nodes := mpt.WriteTrie(values)
for _, node := range nodes {
key := preimage.Keccak256Key(crypto.Keccak256Hash(node)).PreimageKey()
if err := p.kvStore.Put(key, node); errors.Is(err, kvstore.ErrAlreadyExists) {
// It's not uncommon for different tries to contain common nodes (esp for receipts)
continue
} else if err != nil {
if err := p.kvStore.Put(key, node); err != nil {
return fmt.Errorf("failed to store node: %w", err)
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment