Commit ef6bfc7f authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge branch 'develop' into inphi/ci-builder

parents 27d6f449 84a57d43
---
'@eth-optimism/contracts-periphery': patch
'@eth-optimism/contracts-bedrock': patch
'@eth-optimism/fault-detector': patch
'@eth-optimism/core-utils': patch
'@eth-optimism/endpoint-monitor': patch
'@eth-optimism/sdk': patch
---
fix typo
......@@ -689,6 +689,10 @@ jobs:
name: Install pnpm package manager
command: |
npm i pnpm --global
- run:
name: Install node_modules
command: |
pnpm install
- run:
name: Lint check
command: |
......@@ -1063,7 +1067,7 @@ jobs:
sim:
type: string
machine:
image: ubuntu-2204:2022.07.1
image: ubuntu-2204:2022.10.2
docker_layer_caching: true
resource_class: large
steps:
......
......@@ -7,7 +7,6 @@ ignore:
- "**/*.t.sol"
- "op-bindings/bindings/*.go"
- "packages/contracts-bedrock/contracts/vendor/WETH9.sol"
- "packages/contracts-bedrock/contracts/echidna"
- "packages/contracts-bedrock/contracts/cannon" # tested through Go tests
coverage:
status:
......
......@@ -3,9 +3,9 @@ package database
import (
"context"
"errors"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/google/uuid"
"gorm.io/gorm"
)
......@@ -27,11 +27,6 @@ type L1BlockHeader struct {
type L2BlockHeader struct {
BlockHeader
// Marked when the proposed output is finalized on L1.
// All bedrock blocks will have `LegacyStateBatchIndex ^== NULL`
L1BlockHash *common.Hash `gorm:"serializer:json"`
LegacyStateBatchIndex *uint64
}
type LegacyStateBatch struct {
......@@ -39,25 +34,33 @@ type LegacyStateBatch struct {
// violating the primary key constraint.
Index uint64 `gorm:"primaryKey;default:0"`
Root common.Hash `gorm:"serializer:json"`
Size uint64
PrevTotal uint64
L1BlockHash common.Hash `gorm:"serializer:json"`
Root common.Hash `gorm:"serializer:json"`
Size uint64
PrevTotal uint64
L1ContractEventGUID uuid.UUID
}
type OutputProposal struct {
OutputRoot common.Hash `gorm:"primaryKey;serializer:json"`
L2BlockNumber U256
L1ContractEventGUID uuid.UUID
}
type BlocksView interface {
FinalizedL1BlockHeader() (*L1BlockHeader, error)
FinalizedL2BlockHeader() (*L2BlockHeader, error)
LatestL1BlockHeader() (*L1BlockHeader, error)
LatestCheckpointedOutput() (*OutputProposal, error)
LatestL2BlockHeader() (*L2BlockHeader, error)
}
type BlocksDB interface {
BlocksView
StoreL1BlockHeaders([]*L1BlockHeader) error
StoreLegacyStateBatch(*LegacyStateBatch) error
StoreL2BlockHeaders([]*L2BlockHeader) error
MarkFinalizedL1RootForL2Block(common.Hash, common.Hash) error
StoreLegacyStateBatches([]*LegacyStateBatch) error
StoreOutputProposals([]*OutputProposal) error
}
/**
......@@ -79,39 +82,33 @@ func (db *blocksDB) StoreL1BlockHeaders(headers []*L1BlockHeader) error {
return result.Error
}
func (db *blocksDB) StoreLegacyStateBatch(stateBatch *LegacyStateBatch) error {
result := db.gorm.Create(stateBatch)
if result.Error != nil {
return result.Error
}
func (db *blocksDB) StoreLegacyStateBatches(stateBatches []*LegacyStateBatch) error {
result := db.gorm.Create(stateBatches)
return result.Error
}
// Mark this state batch index & l1 block hash for all applicable l2 blocks
l2Headers := make([]*L2BlockHeader, stateBatch.Size)
func (db *blocksDB) StoreOutputProposals(outputs []*OutputProposal) error {
result := db.gorm.Create(outputs)
return result.Error
}
// [start, end] range is inclusive. Since `PrevTotal` is the index of the prior batch, no
// need to subtract one when adding the size
startHeight := U256{Int: big.NewInt(int64(stateBatch.PrevTotal + 1))}
endHeight := U256{Int: big.NewInt(int64(stateBatch.PrevTotal + stateBatch.Size))}
result = db.gorm.Where("number BETWEEN ? AND ?", &startHeight, &endHeight).Find(&l2Headers)
func (db *blocksDB) LatestL1BlockHeader() (*L1BlockHeader, error) {
var l1Header L1BlockHeader
result := db.gorm.Order("number DESC").Take(&l1Header)
if result.Error != nil {
return result.Error
} else if result.RowsAffected != int64(stateBatch.Size) {
return errors.New("state batch size exceeds number of indexed l2 blocks")
}
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, nil
}
for _, header := range l2Headers {
header.LegacyStateBatchIndex = &stateBatch.Index
header.L1BlockHash = &stateBatch.L1BlockHash
return nil, result.Error
}
result = db.gorm.Save(&l2Headers)
return result.Error
return &l1Header, nil
}
// FinalizedL1BlockHeader returns the latest L1 block header stored in the database, nil otherwise
func (db *blocksDB) FinalizedL1BlockHeader() (*L1BlockHeader, error) {
var l1Header L1BlockHeader
result := db.gorm.Order("number DESC").Take(&l1Header)
func (db *blocksDB) LatestCheckpointedOutput() (*OutputProposal, error) {
var outputProposal OutputProposal
result := db.gorm.Order("l2_block_number DESC").Take(&outputProposal)
if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, nil
......@@ -120,7 +117,7 @@ func (db *blocksDB) FinalizedL1BlockHeader() (*L1BlockHeader, error) {
return nil, result.Error
}
return &l1Header, nil
return &outputProposal, nil
}
// L2
......@@ -130,8 +127,7 @@ func (db *blocksDB) StoreL2BlockHeaders(headers []*L2BlockHeader) error {
return result.Error
}
// FinalizedL2BlockHeader returns the latest L2 block header stored in the database, nil otherwise
func (db *blocksDB) FinalizedL2BlockHeader() (*L2BlockHeader, error) {
func (db *blocksDB) LatestL2BlockHeader() (*L2BlockHeader, error) {
var l2Header L2BlockHeader
result := db.gorm.Order("number DESC").Take(&l2Header)
if result.Error != nil {
......@@ -145,19 +141,3 @@ func (db *blocksDB) FinalizedL2BlockHeader() (*L2BlockHeader, error) {
result.Logger.Info(context.Background(), "number ", l2Header.Number)
return &l2Header, nil
}
// MarkFinalizedL1RootForL2Block updates the stored L2 block header with the L1 block
// that contains the output proposal for the L2 root.
func (db *blocksDB) MarkFinalizedL1RootForL2Block(l2Root, l1Root common.Hash) error {
var l2Header L2BlockHeader
l2Header.Hash = l2Root // set the primary key
result := db.gorm.First(&l2Header)
if result.Error != nil {
return result.Error
}
l2Header.L1BlockHash = &l1Root
result = db.gorm.Save(&l2Header)
return result.Error
}
......@@ -13,28 +13,11 @@ CREATE TABLE IF NOT EXISTS l1_block_headers (
timestamp INTEGER NOT NULL
);
CREATE TABLE IF NOT EXISTS legacy_state_batches (
index INTEGER NOT NULL PRIMARY KEY,
root VARCHAR NOT NULL,
size INTEGER NOT NULL,
prev_total INTEGER NOT NULL,
-- Finalization information. Unlike `l2_block_headers` the NOT NULL
-- constraint is added since the l1 block hash will be known when
-- when reading the output event
l1_block_hash VARCHAR NOT NULL REFERENCES l1_block_headers(hash)
);
CREATE TABLE IF NOT EXISTS l2_block_headers (
-- Block header
hash VARCHAR NOT NULL PRIMARY KEY,
parent_hash VARCHAR NOT NULL,
number UINT256,
timestamp INTEGER NOT NULL,
-- Finalization information
l1_block_hash VARCHAR REFERENCES l1_block_headers(hash),
legacy_state_batch_index INTEGER REFERENCES legacy_state_batches(index)
hash VARCHAR NOT NULL PRIMARY KEY,
parent_hash VARCHAR NOT NULL,
number UINT256,
timestamp INTEGER NOT NULL
);
/**
......@@ -59,6 +42,24 @@ CREATE TABLE IF NOT EXISTS l2_contract_events (
timestamp INTEGER NOT NULL
);
-- Tables that index finalization markers for L2 blocks.
CREATE TABLE IF NOT EXISTS legacy_state_batches (
index INTEGER NOT NULL PRIMARY KEY,
root VARCHAR NOT NULL,
size INTEGER NOT NULL,
prev_total INTEGER NOT NULL,
l1_contract_event_guid VARCHAR REFERENCES l1_contract_events(guid)
);
CREATE TABLE IF NOT EXISTS output_proposals (
output_root VARCHAR NOT NULL PRIMARY KEY,
l2_block_number UINT256,
l1_contract_event_guid VARCHAR REFERENCES l1_contract_events(guid)
);
/**
* BRIDGING DATA
*/
......@@ -71,6 +72,7 @@ CREATE TABLE IF NOT EXISTS deposits (
-- Deposit information (do we need indexes on from/to?)
from_address VARCHAR NOT NULL,
to_address VARCHAR NOT NULL,
l1_token_address VARCHAR NOT NULL,
l2_token_address VARCHAR NOT NULL,
......
......@@ -91,7 +91,7 @@ func (c *client) BlockHeaderByHash(hash common.Hash) (*types.Header, error) {
// are placed on the range such as blocks in the "latest", "safe" or "finalized" states. If the specified
// range is too large, `endHeight > latest`, the resulting list is truncated to the available headers
func (c *client) BlockHeadersByRange(startHeight, endHeight *big.Int) ([]*types.Header, error) {
count := new(big.Int).Sub(endHeight, startHeight).Uint64()
count := new(big.Int).Sub(endHeight, startHeight).Uint64() + 1
batchElems := make([]rpc.BatchElem, count)
for i := uint64(0); i < count; i++ {
height := new(big.Int).Add(startHeight, new(big.Int).SetUint64(i))
......
......@@ -7,35 +7,38 @@ import (
"github.com/ethereum/go-ethereum/core/types"
)
// Max number of headers that's bee returned by the Fetcher at once.
const maxHeaderBatchSize = 50
var ErrFetcherAndProviderMismatchedState = errors.New("the fetcher and provider have diverged in finalized state")
var (
ErrHeaderTraversalAheadOfProvider = errors.New("the HeaderTraversal's internal state is ahead of the provider")
ErrHeaderTraversalAndProviderMismatchedState = errors.New("the HeaderTraversal and provider have diverged in state")
)
type Fetcher struct {
type HeaderTraversal struct {
ethClient EthClient
lastHeader *types.Header
}
// NewFetcher instantiates a new instance of Fetcher against the supplied rpc client.
// The Fetcher will start fetching blocks starting from the supplied header unless
// NewHeaderTraversal instantiates a new instance of HeaderTraversal against the supplied rpc client.
// The HeaderTraversal will start fetching blocks starting from the supplied header unless
// nil, indicating genesis.
func NewFetcher(ethClient EthClient, fromHeader *types.Header) *Fetcher {
return &Fetcher{ethClient: ethClient, lastHeader: fromHeader}
func NewHeaderTraversal(ethClient EthClient, fromHeader *types.Header) *HeaderTraversal {
return &HeaderTraversal{ethClient: ethClient, lastHeader: fromHeader}
}
// NextConfirmedHeaders retrives the next set of headers that have been
// marked as finalized by the connected client
func (f *Fetcher) NextFinalizedHeaders() ([]*types.Header, error) {
// NextFinalizedHeaders retrives the next set of headers that have been
// marked as finalized by the connected client, bounded by the supplied size
func (f *HeaderTraversal) NextFinalizedHeaders(maxSize uint64) ([]*types.Header, error) {
finalizedBlockHeight, err := f.ethClient.FinalizedBlockHeight()
if err != nil {
return nil, err
}
if f.lastHeader != nil && f.lastHeader.Number.Cmp(finalizedBlockHeight) >= 0 {
// Warn if our fetcher is ahead of the provider. The fetcher should always
// be behind or at head with the provider.
return nil, nil
if f.lastHeader != nil {
cmp := f.lastHeader.Number.Cmp(finalizedBlockHeight)
if cmp == 0 {
return nil, nil
} else if cmp > 0 {
return nil, ErrHeaderTraversalAheadOfProvider
}
}
nextHeight := bigZero
......@@ -43,7 +46,7 @@ func (f *Fetcher) NextFinalizedHeaders() ([]*types.Header, error) {
nextHeight = new(big.Int).Add(f.lastHeader.Number, bigOne)
}
endHeight := clampBigInt(nextHeight, finalizedBlockHeight, maxHeaderBatchSize)
endHeight := clampBigInt(nextHeight, finalizedBlockHeight, maxSize)
headers, err := f.ethClient.BlockHeadersByRange(nextHeight, endHeight)
if err != nil {
return nil, err
......@@ -55,7 +58,7 @@ func (f *Fetcher) NextFinalizedHeaders() ([]*types.Header, error) {
} else if f.lastHeader != nil && headers[0].ParentHash != f.lastHeader.Hash() {
// The indexer's state is in an irrecoverable state relative to the provider. This
// should never happen since the indexer is dealing with only finalized blocks.
return nil, ErrFetcherAndProviderMismatchedState
return nil, ErrHeaderTraversalAndProviderMismatchedState
}
f.lastHeader = headers[numHeaders-1]
......
......@@ -33,31 +33,31 @@ func makeHeaders(numHeaders uint64, prevHeader *types.Header) []*types.Header {
return headers
}
func TestFetcherNextFinalizedHeadersNoOp(t *testing.T) {
func TestHeaderTraversalNextFinalizedHeadersNoOp(t *testing.T) {
client := new(MockEthClient)
// start from block 0 as the latest fetched block
lastHeader := &types.Header{Number: bigZero}
fetcher := NewFetcher(client, lastHeader)
// start from block 10 as the latest fetched block
lastHeader := &types.Header{Number: big.NewInt(10)}
headerTraversal := NewHeaderTraversal(client, lastHeader)
// no new headers when matched with head
client.On("FinalizedBlockHeight").Return(big.NewInt(0), nil)
headers, err := fetcher.NextFinalizedHeaders()
client.On("FinalizedBlockHeight").Return(big.NewInt(10), nil)
headers, err := headerTraversal.NextFinalizedHeaders(100)
assert.NoError(t, err)
assert.Empty(t, headers)
}
func TestFetcherNextFinalizedHeadersCursored(t *testing.T) {
func TestHeaderTraversalNextFinalizedHeadersCursored(t *testing.T) {
client := new(MockEthClient)
// start from genesis
fetcher := NewFetcher(client, nil)
headerTraversal := NewHeaderTraversal(client, nil)
// blocks [0..4]
headers := makeHeaders(5, nil)
client.On("FinalizedBlockHeight").Return(big.NewInt(4), nil).Times(1) // Times so that we can override next
client.On("BlockHeadersByRange", mock.MatchedBy(bigIntMatcher(0)), mock.MatchedBy(bigIntMatcher(4))).Return(headers, nil)
headers, err := fetcher.NextFinalizedHeaders()
headers, err := headerTraversal.NextFinalizedHeaders(5)
assert.NoError(t, err)
assert.Len(t, headers, 5)
......@@ -65,46 +65,46 @@ func TestFetcherNextFinalizedHeadersCursored(t *testing.T) {
headers = makeHeaders(5, headers[len(headers)-1])
client.On("FinalizedBlockHeight").Return(big.NewInt(9), nil)
client.On("BlockHeadersByRange", mock.MatchedBy(bigIntMatcher(5)), mock.MatchedBy(bigIntMatcher(9))).Return(headers, nil)
headers, err = fetcher.NextFinalizedHeaders()
headers, err = headerTraversal.NextFinalizedHeaders(5)
assert.NoError(t, err)
assert.Len(t, headers, 5)
}
func TestFetcherNextFinalizedHeadersMaxHeaderBatch(t *testing.T) {
func TestHeaderTraversalNextFinalizedHeadersMaxSize(t *testing.T) {
client := new(MockEthClient)
// start from genesis
fetcher := NewFetcher(client, nil)
headerTraversal := NewHeaderTraversal(client, nil)
// blocks [0..maxBatchSize] size == maxBatchSize = 1
headers := makeHeaders(maxHeaderBatchSize, nil)
client.On("FinalizedBlockHeight").Return(big.NewInt(maxHeaderBatchSize), nil)
// 100 "available" headers
client.On("FinalizedBlockHeight").Return(big.NewInt(100), nil)
// clamped by the max batch size
client.On("BlockHeadersByRange", mock.MatchedBy(bigIntMatcher(0)), mock.MatchedBy(bigIntMatcher(maxHeaderBatchSize-1))).Return(headers, nil)
headers, err := fetcher.NextFinalizedHeaders()
// clamped by the supplied size
headers := makeHeaders(5, nil)
client.On("BlockHeadersByRange", mock.MatchedBy(bigIntMatcher(0)), mock.MatchedBy(bigIntMatcher(4))).Return(headers, nil)
headers, err := headerTraversal.NextFinalizedHeaders(5)
assert.NoError(t, err)
assert.Len(t, headers, maxHeaderBatchSize)
assert.Len(t, headers, 5)
// blocks [maxBatchSize..maxBatchSize]
headers = makeHeaders(1, headers[len(headers)-1])
client.On("BlockHeadersByRange", mock.MatchedBy(bigIntMatcher(maxHeaderBatchSize)), mock.MatchedBy(bigIntMatcher(maxHeaderBatchSize))).Return(headers, nil)
headers, err = fetcher.NextFinalizedHeaders()
// clamped by the supplied size. FinalizedHeight == 100
headers = makeHeaders(10, headers[len(headers)-1])
client.On("BlockHeadersByRange", mock.MatchedBy(bigIntMatcher(5)), mock.MatchedBy(bigIntMatcher(14))).Return(headers, nil)
headers, err = headerTraversal.NextFinalizedHeaders(10)
assert.NoError(t, err)
assert.Len(t, headers, 1)
assert.Len(t, headers, 10)
}
func TestFetcherMismatchedProviderStateError(t *testing.T) {
func TestHeaderTraversalMismatchedProviderStateError(t *testing.T) {
client := new(MockEthClient)
// start from genesis
fetcher := NewFetcher(client, nil)
headerTraversal := NewHeaderTraversal(client, nil)
// blocks [0..4]
headers := makeHeaders(5, nil)
client.On("FinalizedBlockHeight").Return(big.NewInt(4), nil).Times(1) // Times so that we can override next
client.On("BlockHeadersByRange", mock.MatchedBy(bigIntMatcher(0)), mock.MatchedBy(bigIntMatcher(4))).Return(headers, nil)
headers, err := fetcher.NextFinalizedHeaders()
headers, err := headerTraversal.NextFinalizedHeaders(5)
assert.NoError(t, err)
assert.Len(t, headers, 5)
......@@ -112,7 +112,7 @@ func TestFetcherMismatchedProviderStateError(t *testing.T) {
headers = makeHeaders(5, nil)
client.On("FinalizedBlockHeight").Return(big.NewInt(9), nil)
client.On("BlockHeadersByRange", mock.MatchedBy(bigIntMatcher(5)), mock.MatchedBy(bigIntMatcher(9))).Return(headers, nil)
headers, err = fetcher.NextFinalizedHeaders()
headers, err = headerTraversal.NextFinalizedHeaders(5)
assert.Nil(t, headers)
assert.Equal(t, ErrFetcherAndProviderMismatchedState, err)
assert.Equal(t, ErrHeaderTraversalAndProviderMismatchedState, err)
}
......@@ -2,14 +2,20 @@ package processor
import (
"context"
"encoding/hex"
"errors"
"math/big"
"reflect"
"github.com/google/uuid"
"github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum-optimism/optimism/indexer/node"
"github.com/google/uuid"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
legacy_bindings "github.com/ethereum-optimism/optimism/op-bindings/legacy-bindings"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethclient"
......@@ -30,6 +36,11 @@ type L1Contracts struct {
// Remove afterwards?
}
type checkpointAbi struct {
l2OutputOracle *abi.ABI
legacyStateCommitmentChain *abi.ABI
}
func (c L1Contracts) toSlice() []common.Address {
fields := reflect.VisibleFields(reflect.TypeOf(c))
v := reflect.ValueOf(c)
......@@ -50,7 +61,19 @@ func NewL1Processor(ethClient node.EthClient, db *database.DB, l1Contracts L1Con
l1ProcessLog := log.New("processor", "l1")
l1ProcessLog.Info("initializing processor")
latestHeader, err := db.Blocks.FinalizedL1BlockHeader()
l2OutputOracleABI, err := bindings.L2OutputOracleMetaData.GetAbi()
if err != nil {
l1ProcessLog.Error("unable to generate L2OutputOracle ABI", "err", err)
return nil, err
}
legacyStateCommitmentChainABI, err := legacy_bindings.StateCommitmentChainMetaData.GetAbi()
if err != nil {
l1ProcessLog.Error("unable to generate legacy StateCommitmentChain ABI", "err", err)
return nil, err
}
checkpointAbi := checkpointAbi{l2OutputOracle: l2OutputOracleABI, legacyStateCommitmentChain: legacyStateCommitmentChainABI}
latestHeader, err := db.Blocks.LatestL1BlockHeader()
if err != nil {
return nil, err
}
......@@ -66,34 +89,37 @@ func NewL1Processor(ethClient node.EthClient, db *database.DB, l1Contracts L1Con
fromL1Header = l1Header
} else {
// we shouldn't start from genesis with l1. Need a "genesis" height to be defined here
// we shouldn't start from genesis with l1. Need a "genesis" L1 height provided for the rollup
l1ProcessLog.Info("no indexed state, starting from genesis")
fromL1Header = nil
}
l1Processor := &L1Processor{
processor: processor{
fetcher: node.NewFetcher(ethClient, fromL1Header),
db: db,
processFn: l1ProcessFn(l1ProcessLog, ethClient, l1Contracts),
processLog: l1ProcessLog,
headerTraversal: node.NewHeaderTraversal(ethClient, fromL1Header),
db: db,
processFn: l1ProcessFn(l1ProcessLog, ethClient, l1Contracts, checkpointAbi),
processLog: l1ProcessLog,
},
}
return l1Processor, nil
}
func l1ProcessFn(processLog log.Logger, ethClient node.EthClient, l1Contracts L1Contracts) func(db *database.DB, headers []*types.Header) error {
func l1ProcessFn(processLog log.Logger, ethClient node.EthClient, l1Contracts L1Contracts, checkpointAbi checkpointAbi) ProcessFn {
rawEthClient := ethclient.NewClient(ethClient.RawRpcClient())
contractAddrs := l1Contracts.toSlice()
processLog.Info("processor configured with contracts", "contracts", l1Contracts)
outputProposedEventSig := checkpointAbi.l2OutputOracle.Events["OutputProposed"].ID
legacyStateBatchAppendedEventSig := checkpointAbi.legacyStateCommitmentChain.Events["StateBatchAppended"].ID
return func(db *database.DB, headers []*types.Header) error {
numHeaders := len(headers)
l1HeaderMap := make(map[common.Hash]*types.Header)
headerMap := make(map[common.Hash]*types.Header)
for _, header := range headers {
l1HeaderMap[header.Hash()] = header
headerMap[header.Hash()] = header
}
/** Watch for Contract Events **/
......@@ -104,18 +130,21 @@ func l1ProcessFn(processLog log.Logger, ethClient node.EthClient, l1Contracts L1
return err
}
// L2 checkpoitns posted on L1
outputProposals := []*database.OutputProposal{}
legacyStateBatches := []*database.LegacyStateBatch{}
numLogs := len(logs)
l1ContractEvents := make([]*database.L1ContractEvent, numLogs)
l1HeadersOfInterest := make(map[common.Hash]bool)
for i, log := range logs {
header, ok := l1HeaderMap[log.BlockHash]
header, ok := headerMap[log.BlockHash]
if !ok {
processLog.Crit("contract event found with associated header not in the batch", "header", log.BlockHash, "log_index", log.Index)
processLog.Error("contract event found with associated header not in the batch", "header", log.BlockHash, "log_index", log.Index)
return errors.New("parsed log with a block hash not in this batch")
}
l1HeadersOfInterest[log.BlockHash] = true
l1ContractEvents[i] = &database.L1ContractEvent{
contractEvent := &database.L1ContractEvent{
ContractEvent: database.ContractEvent{
GUID: uuid.New(),
BlockHash: log.BlockHash,
......@@ -125,21 +154,54 @@ func l1ProcessFn(processLog log.Logger, ethClient node.EthClient, l1Contracts L1
Timestamp: header.Time,
},
}
l1ContractEvents[i] = contractEvent
l1HeadersOfInterest[log.BlockHash] = true
// Track Checkpoint Events for L2
switch contractEvent.EventSignature {
case outputProposedEventSig:
if len(log.Topics) != 4 {
processLog.Error("parsed unexpected number of L2OutputOracle#OutputProposed log topics", "log_topics", log.Topics)
return errors.New("parsed unexpected OutputProposed event")
}
outputProposals = append(outputProposals, &database.OutputProposal{
OutputRoot: log.Topics[1],
L2BlockNumber: database.U256{Int: new(big.Int).SetBytes(log.Topics[2].Bytes())},
L1ContractEventGUID: contractEvent.GUID,
})
case legacyStateBatchAppendedEventSig:
var stateBatchAppended legacy_bindings.StateCommitmentChainStateBatchAppended
err := checkpointAbi.l2OutputOracle.UnpackIntoInterface(&stateBatchAppended, "StateBatchAppended", log.Data)
if err != nil || len(log.Topics) != 2 {
processLog.Error("unexpected StateCommitmentChain#StateBatchAppended log data or log topics", "log_topics", log.Topics, "log_data", hex.EncodeToString(log.Data), "err", err)
return err
}
legacyStateBatches = append(legacyStateBatches, &database.LegacyStateBatch{
Index: new(big.Int).SetBytes(log.Topics[1].Bytes()).Uint64(),
Root: stateBatchAppended.BatchRoot,
Size: stateBatchAppended.BatchSize.Uint64(),
PrevTotal: stateBatchAppended.PrevTotalElements.Uint64(),
L1ContractEventGUID: contractEvent.GUID,
})
}
}
/** Index L1 Blocks that have an optimism event **/
/** Aggregate applicable L1 Blocks **/
// we iterate on the original array to maintain ordering. probably can find a more efficient
// way to iterate over the `l1HeadersOfInterest` map while maintaining ordering
indexedL1Header := []*database.L1BlockHeader{}
l1Headers := []*database.L1BlockHeader{}
for _, header := range headers {
blockHash := header.Hash()
_, hasLogs := l1HeadersOfInterest[blockHash]
if !hasLogs {
if _, hasLogs := l1HeadersOfInterest[blockHash]; !hasLogs {
continue
}
indexedL1Header = append(indexedL1Header, &database.L1BlockHeader{
l1Headers = append(l1Headers, &database.L1BlockHeader{
BlockHeader: database.BlockHeader{
Hash: blockHash,
ParentHash: header.ParentHash,
......@@ -151,22 +213,41 @@ func l1ProcessFn(processLog log.Logger, ethClient node.EthClient, l1Contracts L1
/** Update Database **/
numIndexedL1Headers := len(indexedL1Header)
if numIndexedL1Headers > 0 {
processLog.Info("saved l1 blocks of interest within batch", "num", numIndexedL1Headers, "batchSize", numHeaders)
err = db.Blocks.StoreL1BlockHeaders(indexedL1Header)
if err != nil {
return err
}
numL1Headers := len(l1Headers)
if numL1Headers == 0 {
processLog.Info("no l1 blocks of interest")
return nil
}
processLog.Info("saving l1 blocks of interest", "size", numL1Headers, "batch_size", numHeaders)
err = db.Blocks.StoreL1BlockHeaders(l1Headers)
if err != nil {
return err
}
// Since the headers to index are derived from the existence of logs, we know in this branch `numLogs > 0`
processLog.Info("saving contract logs", "size", numLogs)
err = db.ContractEvents.StoreL1ContractEvents(l1ContractEvents)
if err != nil {
return err
}
// Mark L2 checkpoints that have been recorded on L1 (L2OutputProposal & StateBatchAppended events)
numLegacyStateBatches := len(legacyStateBatches)
if numLegacyStateBatches > 0 {
latestBatch := legacyStateBatches[numLegacyStateBatches-1]
latestL2Height := latestBatch.PrevTotal + latestBatch.Size - 1
processLog.Info("detected legacy state batches", "size", numLegacyStateBatches, "latest_l2_block_number", latestL2Height)
}
// Since the headers to index are derived from the existence of logs, we know in this branch `numLogs > 0`
processLog.Info("saving contract logs", "size", numLogs)
err = db.ContractEvents.StoreL1ContractEvents(l1ContractEvents)
numOutputProposals := len(outputProposals)
if numOutputProposals > 0 {
latestL2Height := outputProposals[numOutputProposals-1].L2BlockNumber.Int
processLog.Info("detected output proposals", "size", numOutputProposals, "latest_l2_block_number", latestL2Height)
err := db.Blocks.StoreOutputProposals(outputProposals)
if err != nil {
return err
}
} else {
processLog.Info("no l1 blocks of interest within batch")
}
// a-ok!
......
......@@ -58,7 +58,7 @@ func NewL2Processor(ethClient node.EthClient, db *database.DB, l2Contracts L2Con
l2ProcessLog := log.New("processor", "l2")
l2ProcessLog.Info("initializing processor")
latestHeader, err := db.Blocks.FinalizedL2BlockHeader()
latestHeader, err := db.Blocks.LatestL2BlockHeader()
if err != nil {
return nil, err
}
......@@ -80,17 +80,17 @@ func NewL2Processor(ethClient node.EthClient, db *database.DB, l2Contracts L2Con
l2Processor := &L2Processor{
processor: processor{
fetcher: node.NewFetcher(ethClient, fromL2Header),
db: db,
processFn: l2ProcessFn(l2ProcessLog, ethClient, l2Contracts),
processLog: l2ProcessLog,
headerTraversal: node.NewHeaderTraversal(ethClient, fromL2Header),
db: db,
processFn: l2ProcessFn(l2ProcessLog, ethClient, l2Contracts),
processLog: l2ProcessLog,
},
}
return l2Processor, nil
}
func l2ProcessFn(processLog log.Logger, ethClient node.EthClient, l2Contracts L2Contracts) func(db *database.DB, headers []*types.Header) error {
func l2ProcessFn(processLog log.Logger, ethClient node.EthClient, l2Contracts L2Contracts) ProcessFn {
rawEthClient := ethclient.NewClient(ethClient.RawRpcClient())
contractAddrs := l2Contracts.toSlice()
......@@ -98,7 +98,7 @@ func l2ProcessFn(processLog log.Logger, ethClient node.EthClient, l2Contracts L2
return func(db *database.DB, headers []*types.Header) error {
numHeaders := len(headers)
/** Index All L2 Blocks **/
/** Index all L2 blocks **/
l2Headers := make([]*database.L2BlockHeader, len(headers))
l2HeaderMap := make(map[common.Hash]*types.Header)
......@@ -129,8 +129,7 @@ func l2ProcessFn(processLog log.Logger, ethClient node.EthClient, l2Contracts L2
for i, log := range logs {
header, ok := l2HeaderMap[log.BlockHash]
if !ok {
// Log the individual headers in the batch?
processLog.Crit("contract event found with associated header not in the batch", "header", header, "log_index", log.Index)
processLog.Error("contract event found with associated header not in the batch", "header", header, "log_index", log.Index)
return errors.New("parsed log with a block hash not in this batch")
}
......@@ -148,13 +147,14 @@ func l2ProcessFn(processLog log.Logger, ethClient node.EthClient, l2Contracts L2
/** Update Database **/
processLog.Info("saving l2 blocks", "size", numHeaders)
err = db.Blocks.StoreL2BlockHeaders(l2Headers)
if err != nil {
return err
}
if numLogs > 0 {
processLog.Info("detected new contract logs", "size", numLogs)
processLog.Info("detected contract logs", "size", numLogs)
err = db.ContractEvents.StoreL2ContractEvents(l2ContractEvents)
if err != nil {
return err
......
......@@ -10,55 +10,61 @@ import (
"github.com/ethereum/go-ethereum/log"
)
const defaultLoopInterval = 5 * time.Second
const (
defaultLoopInterval = 5 * time.Second
defaultHeaderBufferSize = 500
)
// processFn is the the function used to process unindexed headers. In
// the event of a failure, all database operations are not committed
type processFn func(*database.DB, []*types.Header) error
// ProcessFn is the the entrypoint for processing a batch of headers.
// In the event of failure, database operations are rolled back
type ProcessFn func(*database.DB, []*types.Header) error
type processor struct {
fetcher *node.Fetcher
headerTraversal *node.HeaderTraversal
db *database.DB
processFn processFn
processFn ProcessFn
processLog log.Logger
}
// Start kicks off the processing loop
func (p processor) Start() {
pollTicker := time.NewTicker(defaultLoopInterval)
defer pollTicker.Stop()
p.processLog.Info("starting processor...")
// Make this loop stoppable
var unprocessedHeaders []*types.Header
for range pollTicker.C {
p.processLog.Info("checking for new headers...")
headers, err := p.fetcher.NextFinalizedHeaders()
if err != nil {
p.processLog.Error("unable to query for headers", "err", err)
continue
}
if len(unprocessedHeaders) == 0 {
newHeaders, err := p.headerTraversal.NextFinalizedHeaders(defaultHeaderBufferSize)
if err != nil {
p.processLog.Error("error querying for headers", "err", err)
continue
} else if len(newHeaders) == 0 {
// Logged as an error since this loop should be operating at a longer interval than the provider
p.processLog.Error("no new headers. processor unexpectedly at head...")
continue
}
if len(headers) == 0 {
p.processLog.Info("no new headers. indexer must be at head...")
continue
unprocessedHeaders = newHeaders
} else {
p.processLog.Info("retrying previous batch")
}
batchLog := p.processLog.New("startHeight", headers[0].Number, "endHeight", headers[len(headers)-1].Number)
batchLog.Info("indexing batch of headers")
// wrap operations within a single transaction
err = p.db.Transaction(func(db *database.DB) error {
return p.processFn(db, headers)
firstHeader := unprocessedHeaders[0]
lastHeader := unprocessedHeaders[len(unprocessedHeaders)-1]
batchLog := p.processLog.New("batch_start_block_number", firstHeader.Number, "batch_end_block_number", lastHeader.Number)
batchLog.Info("processing batch")
err := p.db.Transaction(func(db *database.DB) error {
return p.processFn(db, unprocessedHeaders)
})
// TODO(DX-79) if processFn failed, the next poll should retry starting from this same batch of headers
if err != nil {
batchLog.Info("unable to index batch", "err", err)
panic(err)
batchLog.Warn("error processing batch. no operations committed", "err", err)
} else {
batchLog.Info("done indexing batch")
batchLog.Info("fully committed batch")
unprocessedHeaders = nil
}
}
}
This diff is collapsed.
......@@ -15,7 +15,7 @@ var PreimageOracleStorageLayout = new(solc.StorageLayout)
var PreimageOracleDeployedBin = "0x608060405234801561001057600080fd5b50600436106100725760003560e01c8063e159261111610050578063e15926111461011b578063fe4ac08e14610130578063fef2b4ed146101a557600080fd5b806361238bde146100775780638542cf50146100b5578063e03110e1146100f3575b600080fd5b6100a26100853660046103b5565b600160209081526000928352604080842090915290825290205481565b6040519081526020015b60405180910390f35b6100e36100c33660046103b5565b600260209081526000928352604080842090915290825290205460ff1681565b60405190151581526020016100ac565b6101066101013660046103b5565b6101c5565b604080519283526020830191909152016100ac565b61012e6101293660046103d7565b6102b6565b005b61012e61013e366004610453565b6000838152600260209081526040808320878452825280832080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660019081179091558684528252808320968352958152858220939093559283529082905291902055565b6100a26101b3366004610485565b60006020819052908152604090205481565b6000828152600260209081526040808320848452909152812054819060ff1661024e576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601360248201527f707265696d616765206d75737420657869737400000000000000000000000000604482015260640160405180910390fd5b506000838152602081815260409091205461026a8160086104cd565b6102758560206104cd565b1061029357836102868260086104cd565b61029091906104e5565b91505b506000938452600160209081526040808620948652939052919092205492909150565b6044356000806008830186106102cb57600080fd5b60c083901b6080526088838682378087017ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff80151908490207effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff167f02000000000000000000000000000000000000000000000000000000000000001760008181526002602090815260408083208b8452825280832080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016600190811790915584845282528083209a83529981528982209390935590815290819052959095209190915550505050565b600080604083850312156103c857600080fd5b50508035926020909101359150565b6000806000604084860312156103ec57600080fd5b83359250602084013567ffffffffffffffff8082111561040b57600080fd5b818601915086601f83011261041f57600080fd5b81358181111561042e57600080fd5b87602082850101111561044057600080fd5b6020830194508093505050509250925092565b6000806000806080858703121561046957600080fd5b5050823594602084013594506040840135936060013592509050565b60006020828403121561049757600080fd5b5035919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b600082198211156104e0576104e061049e565b500190565b6000828210156104f7576104f761049e565b50039056fea164736f6c634300080f000a"
var PreimageOracleDeployedSourceMap = "57:2945:58:-:0;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;143:68;;;;;;:::i;:::-;;;;;;;;;;;;;;;;;;;;;;;;;;;;413:25:234;;;401:2;386:18;143:68:58;;;;;;;;217:66;;;;;;:::i;:::-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;614:14:234;;607:22;589:41;;577:2;562:18;217:66:58;449:187:234;290:454:58;;;;;;:::i;:::-;;:::i;:::-;;;;815:25:234;;;871:2;856:18;;849:34;;;;788:18;290:454:58;641:248:234;1537:1463:58;;;;;;:::i;:::-;;:::i;:::-;;1086:262;;;;;;:::i;:::-;1219:19;;;;:14;:19;;;;;;;;:31;;;;;;;;:38;;;;1253:4;1219:38;;;;;;1267:18;;;;;;;;:30;;;;;;;;;:37;;;;1314:20;;;;;;;;;;:27;1086:262;87:50;;;;;;:::i;:::-;;;;;;;;;;;;;;;290:454;388:11;439:19;;;:14;:19;;;;;;;;:27;;;;;;;;;388:11;;439:27;;431:59;;;;;;;2517:2:234;431:59:58;;;2499:21:234;2556:2;2536:18;;;2529:30;2595:21;2575:18;;;2568:49;2634:18;;431:59:58;;;;;;;;-1:-1:-1;521:14:58;538:20;;;509:2;538:20;;;;;;;;631:10;538:20;640:1;631:10;:::i;:::-;616:11;:6;625:2;616:11;:::i;:::-;:25;612:84;;679:6;666:10;:6;675:1;666:10;:::i;:::-;:19;;;;:::i;:::-;657:28;;612:84;-1:-1:-1;711:18:58;;;;:13;:18;;;;;;;;:26;;;;;;;;;;;;290:454;;-1:-1:-1;290:454:58:o;1537:1463::-;1831:4;1818:18;1636:12;;1966:1;1956:12;;1941:28;;1931:84;;1999:1;1996;1989:12;1931:84;2258:3;2254:14;;;2158:4;2242:27;2289:11;2263:4;2408:15;2289:11;2390:40;2620:28;;;2624:11;2620:28;2614:35;2671:20;;;;2818:19;2811:27;2840:11;2808:44;2871:19;;;;2849:1;2871:19;;;;;;;;:31;;;;;;;;:38;;;;2905:4;2871:38;;;;;;2919:18;;;;;;;;:30;;;;;;;;;:37;;;;2966:20;;;;;;;;;;;:27;;;;-1:-1:-1;;;;1537:1463:58:o;14:248:234:-;82:6;90;143:2;131:9;122:7;118:23;114:32;111:52;;;159:1;156;149:12;111:52;-1:-1:-1;;182:23:234;;;252:2;237:18;;;224:32;;-1:-1:-1;14:248:234:o;894:659::-;973:6;981;989;1042:2;1030:9;1021:7;1017:23;1013:32;1010:52;;;1058:1;1055;1048:12;1010:52;1094:9;1081:23;1071:33;;1155:2;1144:9;1140:18;1127:32;1178:18;1219:2;1211:6;1208:14;1205:34;;;1235:1;1232;1225:12;1205:34;1273:6;1262:9;1258:22;1248:32;;1318:7;1311:4;1307:2;1303:13;1299:27;1289:55;;1340:1;1337;1330:12;1289:55;1380:2;1367:16;1406:2;1398:6;1395:14;1392:34;;;1422:1;1419;1412:12;1392:34;1467:7;1462:2;1453:6;1449:2;1445:15;1441:24;1438:37;1435:57;;;1488:1;1485;1478:12;1435:57;1519:2;1515;1511:11;1501:21;;1541:6;1531:16;;;;;894:659;;;;;:::o;1558:385::-;1644:6;1652;1660;1668;1721:3;1709:9;1700:7;1696:23;1692:33;1689:53;;;1738:1;1735;1728:12;1689:53;-1:-1:-1;;1761:23:234;;;1831:2;1816:18;;1803:32;;-1:-1:-1;1882:2:234;1867:18;;1854:32;;1933:2;1918:18;1905:32;;-1:-1:-1;1558:385:234;-1:-1:-1;1558:385:234:o;1948:180::-;2007:6;2060:2;2048:9;2039:7;2035:23;2031:32;2028:52;;;2076:1;2073;2066:12;2028:52;-1:-1:-1;2099:23:234;;1948:180;-1:-1:-1;1948:180:234:o;2663:184::-;2715:77;2712:1;2705:88;2812:4;2809:1;2802:15;2836:4;2833:1;2826:15;2852:128;2892:3;2923:1;2919:6;2916:1;2913:13;2910:39;;;2929:18;;:::i;:::-;-1:-1:-1;2965:9:234;;2852:128::o;2985:125::-;3025:4;3053:1;3050;3047:8;3044:34;;;3058:18;;:::i;:::-;-1:-1:-1;3095:9:234;;2985:125::o"
var PreimageOracleDeployedSourceMap = "57:2945:58:-:0;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;143:68;;;;;;:::i;:::-;;;;;;;;;;;;;;;;;;;;;;;;;;;;413:25:228;;;401:2;386:18;143:68:58;;;;;;;;217:66;;;;;;:::i;:::-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;614:14:228;;607:22;589:41;;577:2;562:18;217:66:58;449:187:228;290:454:58;;;;;;:::i;:::-;;:::i;:::-;;;;815:25:228;;;871:2;856:18;;849:34;;;;788:18;290:454:58;641:248:228;1537:1463:58;;;;;;:::i;:::-;;:::i;:::-;;1086:262;;;;;;:::i;:::-;1219:19;;;;:14;:19;;;;;;;;:31;;;;;;;;:38;;;;1253:4;1219:38;;;;;;1267:18;;;;;;;;:30;;;;;;;;;:37;;;;1314:20;;;;;;;;;;:27;1086:262;87:50;;;;;;:::i;:::-;;;;;;;;;;;;;;;290:454;388:11;439:19;;;:14;:19;;;;;;;;:27;;;;;;;;;388:11;;439:27;;431:59;;;;;;;2517:2:228;431:59:58;;;2499:21:228;2556:2;2536:18;;;2529:30;2595:21;2575:18;;;2568:49;2634:18;;431:59:58;;;;;;;;-1:-1:-1;521:14:58;538:20;;;509:2;538:20;;;;;;;;631:10;538:20;640:1;631:10;:::i;:::-;616:11;:6;625:2;616:11;:::i;:::-;:25;612:84;;679:6;666:10;:6;675:1;666:10;:::i;:::-;:19;;;;:::i;:::-;657:28;;612:84;-1:-1:-1;711:18:58;;;;:13;:18;;;;;;;;:26;;;;;;;;;;;;290:454;;-1:-1:-1;290:454:58:o;1537:1463::-;1831:4;1818:18;1636:12;;1966:1;1956:12;;1941:28;;1931:84;;1999:1;1996;1989:12;1931:84;2258:3;2254:14;;;2158:4;2242:27;2289:11;2263:4;2408:15;2289:11;2390:40;2620:28;;;2624:11;2620:28;2614:35;2671:20;;;;2818:19;2811:27;2840:11;2808:44;2871:19;;;;2849:1;2871:19;;;;;;;;:31;;;;;;;;:38;;;;2905:4;2871:38;;;;;;2919:18;;;;;;;;:30;;;;;;;;;:37;;;;2966:20;;;;;;;;;;;:27;;;;-1:-1:-1;;;;1537:1463:58:o;14:248:228:-;82:6;90;143:2;131:9;122:7;118:23;114:32;111:52;;;159:1;156;149:12;111:52;-1:-1:-1;;182:23:228;;;252:2;237:18;;;224:32;;-1:-1:-1;14:248:228:o;894:659::-;973:6;981;989;1042:2;1030:9;1021:7;1017:23;1013:32;1010:52;;;1058:1;1055;1048:12;1010:52;1094:9;1081:23;1071:33;;1155:2;1144:9;1140:18;1127:32;1178:18;1219:2;1211:6;1208:14;1205:34;;;1235:1;1232;1225:12;1205:34;1273:6;1262:9;1258:22;1248:32;;1318:7;1311:4;1307:2;1303:13;1299:27;1289:55;;1340:1;1337;1330:12;1289:55;1380:2;1367:16;1406:2;1398:6;1395:14;1392:34;;;1422:1;1419;1412:12;1392:34;1467:7;1462:2;1453:6;1449:2;1445:15;1441:24;1438:37;1435:57;;;1488:1;1485;1478:12;1435:57;1519:2;1515;1511:11;1501:21;;1541:6;1531:16;;;;;894:659;;;;;:::o;1558:385::-;1644:6;1652;1660;1668;1721:3;1709:9;1700:7;1696:23;1692:33;1689:53;;;1738:1;1735;1728:12;1689:53;-1:-1:-1;;1761:23:228;;;1831:2;1816:18;;1803:32;;-1:-1:-1;1882:2:228;1867:18;;1854:32;;1933:2;1918:18;1905:32;;-1:-1:-1;1558:385:228;-1:-1:-1;1558:385:228:o;1948:180::-;2007:6;2060:2;2048:9;2039:7;2035:23;2031:32;2028:52;;;2076:1;2073;2066:12;2028:52;-1:-1:-1;2099:23:228;;1948:180;-1:-1:-1;1948:180:228:o;2663:184::-;2715:77;2712:1;2705:88;2812:4;2809:1;2802:15;2836:4;2833:1;2826:15;2852:128;2892:3;2923:1;2919:6;2916:1;2913:13;2910:39;;;2929:18;;:::i;:::-;-1:-1:-1;2965:9:228;;2852:128::o;2985:125::-;3025:4;3053:1;3050;3047:8;3044:34;;;3058:18;;:::i;:::-;-1:-1:-1;3095:9:228;;2985:125::o"
func init() {
if err := json.Unmarshal([]byte(PreimageOracleStorageLayoutJSON), PreimageOracleStorageLayout); err != nil {
......
package op_challenger
import (
"github.com/ethereum-optimism/optimism/op-challenger/config"
"github.com/ethereum/go-ethereum/log"
)
// Main is the programmatic entry-point for running op-challenger
func Main(logger log.Logger, cfg *config.Config) error {
logger.Info("Fault game started")
return nil
}
package challenger
import (
"context"
_ "net/http/pprof"
"sync"
"time"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
ethclient "github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-challenger/config"
"github.com/ethereum-optimism/optimism/op-challenger/metrics"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-node/eth"
opclient "github.com/ethereum-optimism/optimism/op-service/client"
"github.com/ethereum-optimism/optimism/op-service/txmgr"
)
type OutputAPI interface {
OutputAtBlock(ctx context.Context, blockNum uint64) (*eth.OutputResponse, error)
}
// Challenger contests invalid L2OutputOracle outputs
type Challenger struct {
txMgr txmgr.TxManager
wg sync.WaitGroup
done chan struct{}
log log.Logger
metr metrics.Metricer
ctx context.Context
cancel context.CancelFunc
l1Client *ethclient.Client
rollupClient OutputAPI
// l2 Output Oracle contract
l2ooContract *bindings.L2OutputOracleCaller
l2ooContractAddr common.Address
l2ooABI *abi.ABI
// dispute game factory contract
dgfContract *bindings.DisputeGameFactoryCaller
dgfContractAddr common.Address
dgfABI *abi.ABI
networkTimeout time.Duration
}
// From returns the address of the account used to send transactions.
func (c *Challenger) From() common.Address {
return c.txMgr.From()
}
// Client returns the client for the settlement layer.
func (c *Challenger) Client() *ethclient.Client {
return c.l1Client
}
func (c *Challenger) NewOracleSubscription() (*Subscription, error) {
query, err := BuildOutputLogFilter(c.l2ooABI)
if err != nil {
return nil, err
}
return NewSubscription(query, c.Client(), c.log), nil
}
// NewFactorySubscription creates a new [Subscription] listening to the DisputeGameFactory contract.
func (c *Challenger) NewFactorySubscription() (*Subscription, error) {
query, err := BuildDisputeGameLogFilter(c.dgfABI)
if err != nil {
return nil, err
}
return NewSubscription(query, c.Client(), c.log), nil
}
// NewChallenger creates a new Challenger
func NewChallenger(cfg config.Config, l log.Logger, m metrics.Metricer) (*Challenger, error) {
ctx, cancel := context.WithCancel(context.Background())
txManager, err := txmgr.NewSimpleTxManager("challenger", l, m, *cfg.TxMgrConfig)
if err != nil {
cancel()
return nil, err
}
// Connect to L1 and L2 providers. Perform these last since they are the most expensive.
l1Client, err := opclient.DialEthClientWithTimeout(ctx, cfg.L1EthRpc, opclient.DefaultDialTimeout)
if err != nil {
cancel()
return nil, err
}
rollupClient, err := opclient.DialRollupClientWithTimeout(ctx, cfg.RollupRpc, opclient.DefaultDialTimeout)
if err != nil {
cancel()
return nil, err
}
l2ooContract, err := bindings.NewL2OutputOracleCaller(cfg.L2OOAddress, l1Client)
if err != nil {
cancel()
return nil, err
}
dgfContract, err := bindings.NewDisputeGameFactoryCaller(cfg.DGFAddress, l1Client)
if err != nil {
cancel()
return nil, err
}
cCtx, cCancel := context.WithTimeout(ctx, cfg.NetworkTimeout)
defer cCancel()
version, err := l2ooContract.Version(&bind.CallOpts{Context: cCtx})
if err != nil {
cancel()
return nil, err
}
l.Info("Connected to L2OutputOracle", "address", cfg.L2OOAddress, "version", version)
parsedL2oo, err := bindings.L2OutputOracleMetaData.GetAbi()
if err != nil {
cancel()
return nil, err
}
parsedDgf, err := bindings.DisputeGameFactoryMetaData.GetAbi()
if err != nil {
cancel()
return nil, err
}
return &Challenger{
txMgr: txManager,
done: make(chan struct{}),
log: l,
metr: m,
ctx: ctx,
cancel: cancel,
rollupClient: rollupClient,
l1Client: l1Client,
l2ooContract: l2ooContract,
l2ooContractAddr: cfg.L2OOAddress,
l2ooABI: parsedL2oo,
dgfContract: dgfContract,
dgfContractAddr: cfg.DGFAddress,
dgfABI: parsedDgf,
networkTimeout: cfg.NetworkTimeout,
}, nil
}
// Start runs the challenger in a goroutine.
func (c *Challenger) Start() error {
c.log.Error("challenger not implemented.")
return nil
}
// Stop closes the challenger and waits for spawned goroutines to exit.
func (c *Challenger) Stop() {
c.cancel()
close(c.done)
c.wg.Wait()
}
package challenger
import (
"errors"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common"
)
var ErrMissingFactoryEvent = errors.New("missing factory event")
// BuildDisputeGameLogFilter creates a filter query for the DisputeGameFactory contract.
//
// The `DisputeGameCreated` event is encoded as:
// 0: address indexed disputeProxy,
// 1: GameType indexed gameType,
// 2: Claim indexed rootClaim,
func BuildDisputeGameLogFilter(contract *abi.ABI) (ethereum.FilterQuery, error) {
event := contract.Events["DisputeGameCreated"]
if event.ID == (common.Hash{}) {
return ethereum.FilterQuery{}, ErrMissingFactoryEvent
}
query := ethereum.FilterQuery{
Topics: [][]common.Hash{
{event.ID},
},
}
return query, nil
}
package challenger
import (
"testing"
"github.com/stretchr/testify/require"
eth "github.com/ethereum/go-ethereum"
abi "github.com/ethereum/go-ethereum/accounts/abi"
common "github.com/ethereum/go-ethereum/common"
)
// TestBuildDisputeGameLogFilter_Succeeds tests that the DisputeGame
// Log Filter is built correctly.
func TestBuildDisputeGameLogFilter_Succeeds(t *testing.T) {
event := abi.Event{
ID: [32]byte{0x01},
}
filterQuery := eth.FilterQuery{
Topics: [][]common.Hash{
{event.ID},
},
}
dgfABI := abi.ABI{
Events: map[string]abi.Event{
"DisputeGameCreated": event,
},
}
query, err := BuildDisputeGameLogFilter(&dgfABI)
require.Equal(t, filterQuery, query)
require.NoError(t, err)
}
// TestBuildDisputeGameLogFilter_Fails tests that the DisputeGame
// Log Filter fails when the event definition is missing.
func TestBuildDisputeGameLogFilter_Fails(t *testing.T) {
dgfABI := abi.ABI{
Events: map[string]abi.Event{},
}
_, err := BuildDisputeGameLogFilter(&dgfABI)
require.ErrorIs(t, ErrMissingFactoryEvent, err)
}
package challenger
import (
"context"
"sync"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-service/backoff"
)
// logStore manages log subscriptions.
type logStore struct {
// The log filter query
query ethereum.FilterQuery
// core sync mutex for log store
// this locks the entire log store
mu sync.Mutex
logList []types.Log
logMap map[common.Hash][]types.Log
// Log subscription
subscription *Subscription
// Client to query for logs
client ethereum.LogFilterer
// Logger
log log.Logger
}
// NewLogStore creates a new log store.
func NewLogStore(query ethereum.FilterQuery, client ethereum.LogFilterer, log log.Logger) *logStore {
return &logStore{
query: query,
mu: sync.Mutex{},
logList: make([]types.Log, 0),
logMap: make(map[common.Hash][]types.Log),
subscription: NewSubscription(query, client, log),
client: client,
log: log,
}
}
// Subscribed returns true if the subscription has started.
func (l *logStore) Subscribed() bool {
return l.subscription.Started()
}
// Query returns the log filter query.
func (l *logStore) Query() ethereum.FilterQuery {
return l.query
}
// Client returns the log filter client.
func (l *logStore) Client() ethereum.LogFilterer {
return l.client
}
// GetLogs returns all logs in the log store.
func (l *logStore) GetLogs() []types.Log {
l.mu.Lock()
defer l.mu.Unlock()
logs := make([]types.Log, len(l.logList))
copy(logs, l.logList)
return logs
}
// GetLogByBlockHash returns all logs in the log store for a given block hash.
func (l *logStore) GetLogByBlockHash(blockHash common.Hash) []types.Log {
l.mu.Lock()
defer l.mu.Unlock()
logs := make([]types.Log, len(l.logMap[blockHash]))
copy(logs, l.logMap[blockHash])
return logs
}
// Subscribe starts the subscription.
// This function spawns a new goroutine.
func (l *logStore) Subscribe(ctx context.Context) error {
err := l.subscription.Subscribe()
if err != nil {
l.log.Error("failed to subscribe", "err", err)
return err
}
go l.dispatchLogs(ctx)
return nil
}
// Quit stops all log store asynchronous tasks.
func (l *logStore) Quit() {
l.subscription.Quit()
}
// buildBackoffStrategy builds a [backoff.Strategy].
func (l *logStore) buildBackoffStrategy() backoff.Strategy {
return &backoff.ExponentialStrategy{
Min: 1000,
Max: 20_000,
MaxJitter: 250,
}
}
// resubscribe attempts to re-establish the log store internal
// subscription with a backoff strategy.
func (l *logStore) resubscribe(ctx context.Context) error {
l.log.Info("log store resubscribing with backoff")
backoffStrategy := l.buildBackoffStrategy()
return backoff.DoCtx(ctx, 10, backoffStrategy, func() error {
if l.subscription == nil {
l.log.Error("subscription zeroed out")
return nil
}
err := l.subscription.Subscribe()
if err == nil {
l.log.Info("subscription reconnected", "id", l.subscription.ID())
}
return err
})
}
// insertLog inserts a log into the log store.
func (l *logStore) insertLog(log types.Log) {
l.mu.Lock()
l.logList = append(l.logList, log)
l.logMap[log.BlockHash] = append(l.logMap[log.BlockHash], log)
l.mu.Unlock()
}
// dispatchLogs dispatches logs to the log store.
// This function is intended to be run as a goroutine.
func (l *logStore) dispatchLogs(ctx context.Context) {
for {
select {
case err := <-l.subscription.sub.Err():
l.log.Error("log subscription error", "err", err)
for {
err = l.resubscribe(ctx)
if err == nil {
break
}
}
case log := <-l.subscription.logs:
l.insertLog(log)
case <-l.subscription.quit:
l.log.Info("received quit signal from subscription", "id", l.subscription.ID())
return
}
}
}
package challenger
import (
"context"
"errors"
"testing"
"time"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils"
"github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/stretchr/testify/require"
)
type mockLogStoreClient struct {
sub mockSubscription
logs chan<- types.Log
subcount int
}
func newMockLogStoreClient() *mockLogStoreClient {
return &mockLogStoreClient{
sub: mockSubscription{
errorChan: make(chan error),
},
}
}
func (m *mockLogStoreClient) FilterLogs(context.Context, ethereum.FilterQuery) ([]types.Log, error) {
panic("this should not be called by the Subscription.Subscribe method")
}
func (m *mockLogStoreClient) SubscribeFilterLogs(ctx context.Context, query ethereum.FilterQuery, logs chan<- types.Log) (ethereum.Subscription, error) {
m.subcount = m.subcount + 1
m.logs = logs
return m.sub, nil
}
var (
ErrTestError = errors.New("test error")
)
// errLogStoreClient implements the [ethereum.LogFilter] interface for testing.
type errLogStoreClient struct{}
func (m errLogStoreClient) FilterLogs(context.Context, ethereum.FilterQuery) ([]types.Log, error) {
panic("this should not be called by the Subscription.Subscribe method")
}
func (m errLogStoreClient) SubscribeFilterLogs(context.Context, ethereum.FilterQuery, chan<- types.Log) (ethereum.Subscription, error) {
return nil, ErrTestError
}
type mockSubscription struct {
errorChan chan error
}
func (m mockSubscription) Err() <-chan error {
return m.errorChan
}
func (m mockSubscription) Unsubscribe() {}
func newLogStore(t *testing.T) (*logStore, *mockLogStoreClient) {
query := ethereum.FilterQuery{}
client := newMockLogStoreClient()
log := testlog.Logger(t, log.LvlError)
return NewLogStore(query, client, log), client
}
func newErrorLogStore(t *testing.T, client *errLogStoreClient) (*logStore, *errLogStoreClient) {
query := ethereum.FilterQuery{}
log := testlog.Logger(t, log.LvlError)
return NewLogStore(query, client, log), client
}
func TestLogStore_NewLogStore_NotSubscribed(t *testing.T) {
logStore, _ := newLogStore(t)
require.False(t, logStore.Subscribed())
}
func TestLogStore_NewLogStore_EmptyLogs(t *testing.T) {
logStore, _ := newLogStore(t)
require.Empty(t, logStore.GetLogs())
require.Empty(t, logStore.GetLogByBlockHash(common.Hash{}))
}
func TestLogStore_Subscribe_EstablishesSubscription(t *testing.T) {
logStore, client := newLogStore(t)
defer logStore.Quit()
require.Equal(t, 0, client.subcount)
require.False(t, logStore.Subscribed())
require.NoError(t, logStore.Subscribe(context.Background()))
require.True(t, logStore.Subscribed())
require.Equal(t, 1, client.subcount)
}
func TestLogStore_Subscribe_ReceivesLogs(t *testing.T) {
logStore, client := newLogStore(t)
defer logStore.Quit()
require.NoError(t, logStore.Subscribe(context.Background()))
mockLog := types.Log{
BlockHash: common.HexToHash("0x1"),
}
client.logs <- mockLog
timeout, tCancel := context.WithTimeout(context.Background(), 30*time.Second)
defer tCancel()
err := e2eutils.WaitFor(timeout, 500*time.Millisecond, func() (bool, error) {
result := logStore.GetLogByBlockHash(mockLog.BlockHash)
return result[0].BlockHash == mockLog.BlockHash, nil
})
require.NoError(t, err)
}
func TestLogStore_Subscribe_SubscriptionErrors(t *testing.T) {
logStore, client := newLogStore(t)
defer logStore.Quit()
require.NoError(t, logStore.Subscribe(context.Background()))
client.sub.errorChan <- ErrTestError
timeout, tCancel := context.WithTimeout(context.Background(), 30*time.Second)
defer tCancel()
err := e2eutils.WaitFor(timeout, 500*time.Millisecond, func() (bool, error) {
subcount := client.subcount == 2
started := logStore.subscription.Started()
return subcount && started, nil
})
require.NoError(t, err)
}
func TestLogStore_Subscribe_NoClient_Panics(t *testing.T) {
require.Panics(t, func() {
logStore, _ := newErrorLogStore(t, nil)
_ = logStore.Subscribe(context.Background())
})
}
func TestLogStore_Subscribe_ErrorSubscribing(t *testing.T) {
logStore, _ := newErrorLogStore(t, &errLogStoreClient{})
require.False(t, logStore.Subscribed())
require.EqualError(t, logStore.Subscribe(context.Background()), ErrTestError.Error())
}
func TestLogStore_Quit_ResetsSubscription(t *testing.T) {
logStore, _ := newLogStore(t)
require.False(t, logStore.Subscribed())
require.NoError(t, logStore.Subscribe(context.Background()))
require.True(t, logStore.Subscribed())
logStore.Quit()
require.False(t, logStore.Subscribed())
}
func TestLogStore_Quit_NoSubscription_Panics(t *testing.T) {
require.Panics(t, func() {
logStore, _ := newErrorLogStore(t, nil)
logStore.Quit()
})
}
package challenger
import (
"errors"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common"
)
var ErrMissingEvent = errors.New("missing event")
// BuildOutputLogFilter creates a filter query for the L2OutputOracle contract.
//
// The `OutputProposed` event is encoded as:
// 0: bytes32 indexed outputRoot,
// 1: uint256 indexed l2OutputIndex,
// 2: uint256 indexed l2BlockNumber,
// 3: uint256 l1Timestamp
func BuildOutputLogFilter(l2ooABI *abi.ABI) (ethereum.FilterQuery, error) {
// Get the L2OutputOracle contract `OutputProposed` event
event := l2ooABI.Events["OutputProposed"]
// Sanity check that the `OutputProposed` event is defined
if event.ID == (common.Hash{}) {
return ethereum.FilterQuery{}, ErrMissingEvent
}
query := ethereum.FilterQuery{
Topics: [][]common.Hash{
{event.ID},
},
}
return query, nil
}
package challenger
import (
"testing"
"github.com/stretchr/testify/require"
eth "github.com/ethereum/go-ethereum"
abi "github.com/ethereum/go-ethereum/accounts/abi"
common "github.com/ethereum/go-ethereum/common"
)
// TestBuildOutputLogFilter_Succeeds tests that the Output
// Log Filter is built correctly.
func TestBuildOutputLogFilter_Succeeds(t *testing.T) {
// Create a mock event id
event := abi.Event{
ID: [32]byte{0x01},
}
filterQuery := eth.FilterQuery{
Topics: [][]common.Hash{
{event.ID},
},
}
// Mock the ABI
l2ooABI := abi.ABI{
Events: map[string]abi.Event{
"OutputProposed": event,
},
}
// Build the filter
query, err := BuildOutputLogFilter(&l2ooABI)
require.Equal(t, filterQuery, query)
require.NoError(t, err)
}
// TestBuildOutputLogFilter_Fails tests that the Output
// Log Filter fails when the event definition is missing.
func TestBuildOutputLogFilter_Fails(t *testing.T) {
// Mock the ABI
l2ooABI := abi.ABI{
Events: map[string]abi.Event{},
}
// Build the filter
_, err := BuildOutputLogFilter(&l2ooABI)
require.Error(t, err)
require.ErrorIs(t, err, ErrMissingEvent)
}
package challenger
import (
"context"
"errors"
"math/big"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-node/eth"
)
var (
// supportedL2OutputVersion is the version of the L2 output that the challenger supports.
supportedL2OutputVersion = eth.Bytes32{}
// ErrInvalidBlockNumber is returned when the block number of the output does not match the expected block number.
ErrInvalidBlockNumber = errors.New("invalid block number")
// ErrUnsupportedL2OOVersion is returned when the output version is not supported.
ErrUnsupportedL2OOVersion = errors.New("unsupported l2oo version")
// ErrInvalidOutputLogTopic is returned when the output log topic is invalid.
ErrInvalidOutputLogTopic = errors.New("invalid output log topic")
// ErrInvalidOutputTopicLength is returned when the output log topic length is invalid.
ErrInvalidOutputTopicLength = errors.New("invalid output log topic length")
)
// ParseOutputLog parses a log from the L2OutputOracle contract.
func (c *Challenger) ParseOutputLog(log *types.Log) (*bindings.TypesOutputProposal, error) {
// Check the length of log topics
if len(log.Topics) != 4 {
return nil, ErrInvalidOutputTopicLength
}
// Validate the first topic is the output log topic
if log.Topics[0] != c.l2ooABI.Events["OutputProposed"].ID {
return nil, ErrInvalidOutputLogTopic
}
l2BlockNumber := new(big.Int).SetBytes(log.Topics[3][:])
expected := log.Topics[1]
return &bindings.TypesOutputProposal{
L2BlockNumber: l2BlockNumber,
OutputRoot: eth.Bytes32(expected),
}, nil
}
// ValidateOutput checks that a given output is expected via a trusted rollup node rpc.
// It returns: if the output is correct, the fetched output, error
func (c *Challenger) ValidateOutput(ctx context.Context, proposal bindings.TypesOutputProposal) (bool, eth.Bytes32, error) {
// Fetch the output from the rollup node
ctx, cancel := context.WithTimeout(ctx, c.networkTimeout)
defer cancel()
output, err := c.rollupClient.OutputAtBlock(ctx, proposal.L2BlockNumber.Uint64())
if err != nil {
c.log.Error("Failed to fetch output", "blockNum", proposal.L2BlockNumber, "err", err)
return false, eth.Bytes32{}, err
}
// Compare the output root to the expected output root
equalRoots, err := c.compareOutputRoots(output, proposal)
if err != nil {
return false, eth.Bytes32{}, err
}
return equalRoots, output.OutputRoot, nil
}
// compareOutputRoots compares the output root of the given block number to the expected output root.
func (c *Challenger) compareOutputRoots(received *eth.OutputResponse, expected bindings.TypesOutputProposal) (bool, error) {
if received.Version != supportedL2OutputVersion {
c.log.Error("Unsupported l2 output version", "version", received.Version)
return false, ErrUnsupportedL2OOVersion
}
if received.BlockRef.Number != expected.L2BlockNumber.Uint64() {
c.log.Error("Invalid blockNumber", "expected", expected.L2BlockNumber, "actual", received.BlockRef.Number)
return false, ErrInvalidBlockNumber
}
return received.OutputRoot == expected.OutputRoot, nil
}
package challenger
import (
"context"
"errors"
"math/big"
"testing"
"time"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-challenger/metrics"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/testlog"
)
func TestChallenger_OutputProposed_Signature(t *testing.T) {
computed := crypto.Keccak256Hash([]byte("OutputProposed(bytes32,uint256,uint256,uint256)"))
challenger := newTestChallenger(t, eth.OutputResponse{}, true)
expected := challenger.l2ooABI.Events["OutputProposed"].ID
require.Equal(t, expected, computed)
}
func TestParseOutputLog_Succeeds(t *testing.T) {
challenger := newTestChallenger(t, eth.OutputResponse{}, true)
expectedBlockNumber := big.NewInt(0x04)
expectedOutputRoot := [32]byte{0x02}
logTopic := challenger.l2ooABI.Events["OutputProposed"].ID
log := types.Log{
Topics: []common.Hash{logTopic, common.Hash(expectedOutputRoot), {0x03}, common.BigToHash(expectedBlockNumber)},
}
outputProposal, err := challenger.ParseOutputLog(&log)
require.NoError(t, err)
require.Equal(t, expectedBlockNumber, outputProposal.L2BlockNumber)
require.Equal(t, expectedOutputRoot, outputProposal.OutputRoot)
}
func TestParseOutputLog_WrongLogTopic_Errors(t *testing.T) {
challenger := newTestChallenger(t, eth.OutputResponse{}, true)
_, err := challenger.ParseOutputLog(&types.Log{
Topics: []common.Hash{{0x01}, {0x02}, {0x03}, {0x04}},
})
require.ErrorIs(t, err, ErrInvalidOutputLogTopic)
}
func TestParseOutputLog_WrongTopicLength_Errors(t *testing.T) {
challenger := newTestChallenger(t, eth.OutputResponse{}, true)
logTopic := challenger.l2ooABI.Events["OutputProposed"].ID
_, err := challenger.ParseOutputLog(&types.Log{
Topics: []common.Hash{logTopic, {0x02}, {0x03}},
})
require.ErrorIs(t, err, ErrInvalidOutputTopicLength)
}
func TestChallenger_ValidateOutput_RollupClientErrors(t *testing.T) {
output := eth.OutputResponse{
Version: supportedL2OutputVersion,
OutputRoot: eth.Bytes32{},
BlockRef: eth.L2BlockRef{},
}
challenger := newTestChallenger(t, output, true)
checked := bindings.TypesOutputProposal{
L2BlockNumber: big.NewInt(0),
OutputRoot: output.OutputRoot,
}
valid, received, err := challenger.ValidateOutput(context.Background(), checked)
require.False(t, valid)
require.Equal(t, eth.Bytes32{}, received)
require.ErrorIs(t, err, mockOutputApiError)
}
func TestChallenger_ValidateOutput_ErrorsWithWrongVersion(t *testing.T) {
output := eth.OutputResponse{
Version: eth.Bytes32{0x01},
OutputRoot: eth.Bytes32{0x01},
BlockRef: eth.L2BlockRef{},
}
challenger := newTestChallenger(t, output, false)
checked := bindings.TypesOutputProposal{
L2BlockNumber: big.NewInt(0),
OutputRoot: output.OutputRoot,
}
valid, received, err := challenger.ValidateOutput(context.Background(), checked)
require.False(t, valid)
require.Equal(t, eth.Bytes32{}, received)
require.ErrorIs(t, err, ErrUnsupportedL2OOVersion)
}
func TestChallenger_ValidateOutput_ErrorsInvalidBlockNumber(t *testing.T) {
output := eth.OutputResponse{
Version: supportedL2OutputVersion,
OutputRoot: eth.Bytes32{0x01},
BlockRef: eth.L2BlockRef{},
}
challenger := newTestChallenger(t, output, false)
checked := bindings.TypesOutputProposal{
L2BlockNumber: big.NewInt(1),
OutputRoot: output.OutputRoot,
}
valid, received, err := challenger.ValidateOutput(context.Background(), checked)
require.False(t, valid)
require.Equal(t, eth.Bytes32{}, received)
require.ErrorIs(t, err, ErrInvalidBlockNumber)
}
func TestOutput_ValidateOutput(t *testing.T) {
output := eth.OutputResponse{
Version: eth.Bytes32{},
OutputRoot: eth.Bytes32{},
BlockRef: eth.L2BlockRef{},
}
challenger := newTestChallenger(t, output, false)
checked := bindings.TypesOutputProposal{
L2BlockNumber: big.NewInt(0),
OutputRoot: output.OutputRoot,
}
valid, expected, err := challenger.ValidateOutput(context.Background(), checked)
require.Equal(t, expected, output.OutputRoot)
require.True(t, valid)
require.NoError(t, err)
}
func TestChallenger_CompareOutputRoots_ErrorsWithDifferentRoots(t *testing.T) {
output := eth.OutputResponse{
Version: eth.Bytes32{0xFF, 0xFF, 0xFF, 0xFF},
OutputRoot: eth.Bytes32{},
BlockRef: eth.L2BlockRef{},
}
challenger := newTestChallenger(t, output, false)
checked := bindings.TypesOutputProposal{
L2BlockNumber: big.NewInt(0),
OutputRoot: output.OutputRoot,
}
valid, err := challenger.compareOutputRoots(&output, checked)
require.False(t, valid)
require.ErrorIs(t, err, ErrUnsupportedL2OOVersion)
}
func TestChallenger_CompareOutputRoots_ErrInvalidBlockNumber(t *testing.T) {
output := eth.OutputResponse{
Version: supportedL2OutputVersion,
OutputRoot: eth.Bytes32{},
BlockRef: eth.L2BlockRef{},
}
challenger := newTestChallenger(t, output, false)
checked := bindings.TypesOutputProposal{
L2BlockNumber: big.NewInt(1),
OutputRoot: output.OutputRoot,
}
valid, err := challenger.compareOutputRoots(&output, checked)
require.False(t, valid)
require.ErrorIs(t, err, ErrInvalidBlockNumber)
}
func TestChallenger_CompareOutputRoots_Succeeds(t *testing.T) {
output := eth.OutputResponse{
Version: supportedL2OutputVersion,
OutputRoot: eth.Bytes32{},
BlockRef: eth.L2BlockRef{},
}
challenger := newTestChallenger(t, output, false)
checked := bindings.TypesOutputProposal{
L2BlockNumber: big.NewInt(0),
OutputRoot: output.OutputRoot,
}
valid, err := challenger.compareOutputRoots(&output, checked)
require.True(t, valid)
require.NoError(t, err)
checked = bindings.TypesOutputProposal{
L2BlockNumber: big.NewInt(0),
OutputRoot: eth.Bytes32{0x01},
}
valid, err = challenger.compareOutputRoots(&output, checked)
require.False(t, valid)
require.NoError(t, err)
}
func newTestChallenger(t *testing.T, output eth.OutputResponse, errors bool) *Challenger {
outputApi := newMockOutputApi(output, errors)
log := testlog.Logger(t, log.LvlError)
metr := metrics.NewMetrics("test")
parsedL2oo, err := bindings.L2OutputOracleMetaData.GetAbi()
require.NoError(t, err)
challenger := Challenger{
rollupClient: outputApi,
log: log,
metr: metr,
networkTimeout: time.Duration(5) * time.Second,
l2ooABI: parsedL2oo,
}
return &challenger
}
var mockOutputApiError = errors.New("mock output api error")
type mockOutputApi struct {
mock.Mock
expected eth.OutputResponse
errors bool
}
func newMockOutputApi(output eth.OutputResponse, errors bool) *mockOutputApi {
return &mockOutputApi{
expected: output,
errors: errors,
}
}
func (m *mockOutputApi) OutputAtBlock(ctx context.Context, blockNumber uint64) (*eth.OutputResponse, error) {
if m.errors {
return nil, mockOutputApiError
}
return &m.expected, nil
}
package challenger
import (
"context"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
)
// SubscriptionId is a unique subscription ID.
type SubscriptionId uint64
// Increment returns the next subscription ID.
func (s *SubscriptionId) Increment() SubscriptionId {
*s++
return *s
}
// Subscription wraps an [ethereum.Subscription] to provide a restart.
type Subscription struct {
// The subscription ID
id SubscriptionId
// The current subscription
sub ethereum.Subscription
// If the subscription is started
started bool
// The query used to create the subscription
query ethereum.FilterQuery
// The log channel
logs chan types.Log
// The quit channel
quit chan struct{}
// Filter client used to open the log subscription
client ethereum.LogFilterer
// Logger
log log.Logger
}
// NewSubscription creates a new subscription.
func NewSubscription(query ethereum.FilterQuery, client ethereum.LogFilterer, log log.Logger) *Subscription {
return &Subscription{
id: SubscriptionId(0),
sub: nil,
started: false,
query: query,
logs: make(chan types.Log),
quit: make(chan struct{}),
client: client,
log: log,
}
}
// ID returns the subscription ID.
func (s *Subscription) ID() SubscriptionId {
return s.id
}
// Started returns true if the subscription has started.
func (s *Subscription) Started() bool {
return s.started
}
// Logs returns the log channel.
func (s *Subscription) Logs() <-chan types.Log {
return s.logs
}
// Subscribe constructs the subscription.
func (s *Subscription) Subscribe() error {
s.log.Info("Subscribing to", "query", s.query.Topics, "id", s.id)
sub, err := s.client.SubscribeFilterLogs(context.Background(), s.query, s.logs)
if err != nil {
s.log.Error("failed to subscribe to logs", "err", err)
return err
}
s.sub = sub
s.started = true
return nil
}
// Quit closes the subscription.
func (s *Subscription) Quit() {
s.log.Info("Quitting subscription", "id", s.id)
s.sub.Unsubscribe()
s.quit <- struct{}{}
s.started = false
s.log.Info("Quit subscription", "id", s.id)
}
package challenger
import (
"context"
"errors"
"math"
"testing"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/stretchr/testify/require"
)
type mockLogFilterClient struct{}
func (m mockLogFilterClient) FilterLogs(context.Context, ethereum.FilterQuery) ([]types.Log, error) {
panic("this should not be called by the Subscription.Subscribe method")
}
func (m mockLogFilterClient) SubscribeFilterLogs(context.Context, ethereum.FilterQuery, chan<- types.Log) (ethereum.Subscription, error) {
return nil, nil
}
func newSubscription(t *testing.T, client *mockLogFilterClient) (*Subscription, *mockLogFilterClient) {
query := ethereum.FilterQuery{}
log := testlog.Logger(t, log.LvlError)
return NewSubscription(query, client, log), client
}
func FuzzSubscriptionId_Increment(f *testing.F) {
maxUint64 := uint64(math.MaxUint64)
f.Fuzz(func(t *testing.T, id uint64) {
if id >= maxUint64 {
t.Skip("skipping due to overflow")
} else {
subId := SubscriptionId(id)
require.Equal(t, subId.Increment(), SubscriptionId(id+1))
}
})
}
func TestSubscription_Subscribe_NilClient_Panics(t *testing.T) {
defer func() {
if recover() == nil {
t.Error("expected nil client to panic")
}
}()
subscription, _ := newSubscription(t, nil)
require.NoError(t, subscription.Subscribe())
}
func TestSubscription_Subscribe(t *testing.T) {
subscription, _ := newSubscription(t, &mockLogFilterClient{})
require.NoError(t, subscription.Subscribe())
require.True(t, subscription.Started())
}
var ErrSubscriptionFailed = errors.New("failed to subscribe to logs")
type errLogFilterClient struct{}
func (m errLogFilterClient) FilterLogs(context.Context, ethereum.FilterQuery) ([]types.Log, error) {
panic("this should not be called by the Subscription.Subscribe method")
}
func (m errLogFilterClient) SubscribeFilterLogs(context.Context, ethereum.FilterQuery, chan<- types.Log) (ethereum.Subscription, error) {
return nil, ErrSubscriptionFailed
}
func TestSubscription_Subscribe_SubscriptionErrors(t *testing.T) {
query := ethereum.FilterQuery{}
log := testlog.Logger(t, log.LvlError)
subscription := Subscription{
query: query,
client: errLogFilterClient{},
log: log,
}
require.EqualError(t, subscription.Subscribe(), ErrSubscriptionFailed.Error())
}
package main
import (
"context"
"fmt"
_ "net/http/pprof"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-challenger/config"
"github.com/ethereum-optimism/optimism/op-challenger/metrics"
"github.com/ethereum-optimism/optimism/op-service/opio"
"github.com/ethereum-optimism/optimism/op-challenger/challenger"
"github.com/ethereum-optimism/optimism/op-service/pprof"
"github.com/ethereum-optimism/optimism/op-service/rpc"
)
// Main is the entrypoint into the Challenger. This method executes the
// service and blocks until the service exits.
func Main(logger log.Logger, version string, cfg *config.Config) error {
if err := cfg.Check(); err != nil {
return fmt.Errorf("invalid config: %w", err)
}
m := metrics.NewMetrics("default")
logger.Info("Initializing Challenger")
service, err := challenger.NewChallenger(*cfg, logger, m)
if err != nil {
logger.Error("Unable to create the Challenger", "error", err)
return err
}
logger.Info("Starting Challenger")
ctx, cancel := context.WithCancel(context.Background())
if err := service.Start(); err != nil {
cancel()
logger.Error("Unable to start Challenger", "error", err)
return err
}
defer service.Stop()
logger.Info("Challenger started")
pprofConfig := cfg.PprofConfig
if pprofConfig.Enabled {
logger.Info("starting pprof", "addr", pprofConfig.ListenAddr, "port", pprofConfig.ListenPort)
go func() {
if err := pprof.ListenAndServe(ctx, pprofConfig.ListenAddr, pprofConfig.ListenPort); err != nil {
logger.Error("error starting pprof", "err", err)
}
}()
}
metricsCfg := cfg.MetricsConfig
if metricsCfg.Enabled {
log.Info("starting metrics server", "addr", metricsCfg.ListenAddr, "port", metricsCfg.ListenPort)
go func() {
if err := m.Serve(ctx, metricsCfg.ListenAddr, metricsCfg.ListenPort); err != nil {
logger.Error("error starting metrics server", err)
}
}()
m.StartBalanceMetrics(ctx, logger, service.Client(), service.From())
}
rpcCfg := cfg.RPCConfig
server := rpc.NewServer(rpcCfg.ListenAddr, rpcCfg.ListenPort, version, rpc.WithLogger(logger))
if err := server.Start(); err != nil {
cancel()
return fmt.Errorf("error starting RPC server: %w", err)
}
m.RecordInfo(version)
m.RecordUp()
opio.BlockOnInterrupts()
cancel()
return nil
}
package main
import (
"fmt"
"os"
log "github.com/ethereum/go-ethereum/log"
cli "github.com/urfave/cli/v2"
watch "github.com/ethereum-optimism/optimism/op-challenger/cmd/watch"
config "github.com/ethereum-optimism/optimism/op-challenger/config"
flags "github.com/ethereum-optimism/optimism/op-challenger/flags"
version "github.com/ethereum-optimism/optimism/op-challenger/version"
op_challenger "github.com/ethereum-optimism/optimism/op-challenger"
"github.com/ethereum/go-ethereum/log"
"github.com/urfave/cli/v2"
"github.com/ethereum-optimism/optimism/op-challenger/config"
"github.com/ethereum-optimism/optimism/op-challenger/flags"
"github.com/ethereum-optimism/optimism/op-challenger/version"
oplog "github.com/ethereum-optimism/optimism/op-service/log"
)
......@@ -36,46 +36,43 @@ var VersionWithMeta = func() string {
func main() {
args := os.Args
if err := run(args, Main); err != nil {
if err := run(args, op_challenger.Main); err != nil {
log.Crit("Application failed", "err", err)
}
}
type ConfigAction func(log log.Logger, version string, config *config.Config) error
type ConfigAction func(log log.Logger, config *config.Config) error
// run parses the supplied args to create a config.Config instance, sets up logging
// then calls the supplied ConfigAction.
// This allows testing the translation from CLI arguments to Config
func run(args []string, action ConfigAction) error {
// Set up logger with a default INFO level in case we fail to parse flags,
// otherwise the final critical log won't show what the parsing error was.
oplog.SetupDefaults()
app := cli.NewApp()
app.Version = VersionWithMeta
app.Flags = flags.Flags
app.Name = "op-challenger"
app.Usage = "Challenge Invalid L2OutputOracle Outputs"
app.Description = "A modular op-stack challenge agent for dispute games written in golang."
app.Usage = "Challenge outputs"
app.Description = "Ensures that on chain outputs are correct."
app.Action = func(ctx *cli.Context) error {
logger, err := config.LoggerFromCLI(ctx)
logger, err := setupLogging(ctx)
if err != nil {
return err
}
logger.Info("Starting challenger", "version", VersionWithMeta)
logger.Info("Starting op-challenger", "version", VersionWithMeta)
cfg, err := config.NewConfigFromCLI(ctx)
if err != nil {
return err
}
return action(logger, VersionWithMeta, cfg)
}
app.Commands = []*cli.Command{
{
Name: "watch",
Subcommands: watch.Subcommands,
},
return action(logger, cfg)
}
return app.Run(args)
}
func setupLogging(ctx *cli.Context) (log.Logger, error) {
logCfg := oplog.ReadCLIConfig(ctx)
if err := logCfg.Check(); err != nil {
return nil, fmt.Errorf("log config error: %w", err)
}
logger := oplog.NewLogger(logCfg)
return logger, nil
}
package main
import (
"testing"
"github.com/ethereum-optimism/optimism/op-challenger/config"
"github.com/ethereum-optimism/optimism/op-service/txmgr"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require"
)
var (
l1EthRpc = "http://example.com:8545"
gameAddressValue = "0xaa00000000000000000000000000000000000000"
alphabetTrace = "abcdefghijz"
)
func TestLogLevel(t *testing.T) {
t.Run("RejectInvalid", func(t *testing.T) {
verifyArgsInvalid(t, "unknown level: foo", addRequiredArgs("--log.level=foo"))
})
for _, lvl := range []string{"trace", "debug", "info", "error", "crit"} {
lvl := lvl
t.Run("AcceptValid_"+lvl, func(t *testing.T) {
logger, _, err := runWithArgs(addRequiredArgs("--log.level", lvl))
require.NoError(t, err)
require.NotNil(t, logger)
})
}
}
func TestDefaultCLIOptionsMatchDefaultConfig(t *testing.T) {
cfg := configForArgs(t, addRequiredArgs())
defaultCfg := config.NewConfig(l1EthRpc, common.HexToAddress(gameAddressValue), alphabetTrace)
require.Equal(t, defaultCfg, cfg)
}
func TestDefaultConfigIsValid(t *testing.T) {
cfg := config.NewConfig(l1EthRpc, common.HexToAddress(gameAddressValue), alphabetTrace)
require.NoError(t, cfg.Check())
}
func TestL1ETHRPCAddress(t *testing.T) {
t.Run("Required", func(t *testing.T) {
verifyArgsInvalid(t, "flag l1-eth-rpc is required", addRequiredArgsExcept("--l1-eth-rpc"))
})
t.Run("Valid", func(t *testing.T) {
url := "http://example.com:8888"
cfg := configForArgs(t, addRequiredArgsExcept("--l1-eth-rpc", "--l1-eth-rpc="+url))
require.Equal(t, url, cfg.L1EthRpc)
require.Equal(t, url, cfg.TxMgrConfig.L1RPCURL)
})
}
func TestAlphabetTrace(t *testing.T) {
t.Run("Required", func(t *testing.T) {
verifyArgsInvalid(t, "flag alphabet is required", addRequiredArgsExcept("--alphabet"))
})
t.Run("Valid", func(t *testing.T) {
value := "abcde"
cfg := configForArgs(t, addRequiredArgsExcept("--alphabet", "--alphabet="+value))
require.Equal(t, value, cfg.AlphabetTrace)
})
}
func TestGameAddress(t *testing.T) {
t.Run("Required", func(t *testing.T) {
verifyArgsInvalid(t, "flag game-address is required", addRequiredArgsExcept("--game-address"))
})
t.Run("Valid", func(t *testing.T) {
addr := common.Address{0xbb, 0xcc, 0xdd}
cfg := configForArgs(t, addRequiredArgsExcept("--game-address", "--game-address="+addr.Hex()))
require.Equal(t, addr, cfg.GameAddress)
})
t.Run("Invalid", func(t *testing.T) {
verifyArgsInvalid(t, "invalid address: foo", addRequiredArgsExcept("--game-address", "--game-address=foo"))
})
}
func TestTxManagerFlagsSupported(t *testing.T) {
// Not a comprehensive list of flags, just enough to sanity check the txmgr.CLIFlags were defined
cfg := configForArgs(t, addRequiredArgs("--"+txmgr.NumConfirmationsFlagName, "7"))
require.Equal(t, uint64(7), cfg.TxMgrConfig.NumConfirmations)
}
func verifyArgsInvalid(t *testing.T, messageContains string, cliArgs []string) {
_, _, err := runWithArgs(cliArgs)
require.ErrorContains(t, err, messageContains)
}
func configForArgs(t *testing.T, cliArgs []string) config.Config {
_, cfg, err := runWithArgs(cliArgs)
require.NoError(t, err)
return cfg
}
func runWithArgs(cliArgs []string) (log.Logger, config.Config, error) {
cfg := new(config.Config)
var logger log.Logger
fullArgs := append([]string{"op-program"}, cliArgs...)
err := run(fullArgs, func(log log.Logger, config *config.Config) error {
logger = log
cfg = config
return nil
})
return logger, *cfg, err
}
func addRequiredArgs(args ...string) []string {
req := requiredArgs()
combined := toArgList(req)
return append(combined, args...)
}
func addRequiredArgsExcept(name string, optionalArgs ...string) []string {
req := requiredArgs()
delete(req, name)
return append(toArgList(req), optionalArgs...)
}
func requiredArgs() map[string]string {
return map[string]string{
"--l1-eth-rpc": l1EthRpc,
"--game-address": gameAddressValue,
"--alphabet": alphabetTrace,
}
}
func toArgList(req map[string]string) []string {
var combined []string
for name, value := range req {
combined = append(combined, name)
combined = append(combined, value)
}
return combined
}
package watch
import (
"github.com/urfave/cli/v2"
"github.com/ethereum-optimism/optimism/op-challenger/config"
)
var Subcommands = cli.Commands{
{
Name: "oracle",
Usage: "Watches the L2OutputOracle for new output proposals",
Action: func(ctx *cli.Context) error {
logger, err := config.LoggerFromCLI(ctx)
if err != nil {
return err
}
logger.Info("Listening for new output proposals")
cfg, err := config.NewConfigFromCLI(ctx)
if err != nil {
return err
}
return Oracle(logger, cfg)
},
},
{
Name: "factory",
Usage: "Watches the DisputeGameFactory for new dispute games",
Action: func(ctx *cli.Context) error {
logger, err := config.LoggerFromCLI(ctx)
if err != nil {
return err
}
logger.Info("Listening for new dispute games")
cfg, err := config.NewConfigFromCLI(ctx)
if err != nil {
return err
}
return Factory(logger, cfg)
},
},
}
package watch
import (
"fmt"
"os"
"os/signal"
"syscall"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-challenger/challenger"
"github.com/ethereum-optimism/optimism/op-challenger/config"
"github.com/ethereum-optimism/optimism/op-challenger/metrics"
)
// Factory listens to the DisputeGameFactory for newly created dispute games.
func Factory(logger log.Logger, cfg *config.Config) error {
if err := cfg.Check(); err != nil {
return fmt.Errorf("invalid config: %w", err)
}
m := metrics.NewMetrics("default")
service, err := challenger.NewChallenger(*cfg, logger, m)
if err != nil {
logger.Error("Unable to create the Challenger", "error", err)
return err
}
logger.Info("Listening for DisputeGameCreated events from the DisputeGameFactory contract", "dgf", cfg.DGFAddress.String())
subscription, err := service.NewFactorySubscription()
if err != nil {
logger.Error("Unable to create the subscription", "error", err)
return err
}
err = subscription.Subscribe()
if err != nil {
logger.Error("Unable to subscribe to the DisputeGameFactory contract", "error", err)
return err
}
defer subscription.Quit()
interruptChannel := make(chan os.Signal, 1)
signal.Notify(interruptChannel, []os.Signal{
os.Interrupt,
os.Kill,
syscall.SIGTERM,
syscall.SIGQUIT,
}...)
for {
select {
case log := <-subscription.Logs():
logger.Info("Received log", "log", log)
case <-interruptChannel:
logger.Info("Received interrupt signal, exiting...")
}
}
}
package watch
import (
"fmt"
"os"
"os/signal"
"syscall"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-challenger/challenger"
"github.com/ethereum-optimism/optimism/op-challenger/config"
"github.com/ethereum-optimism/optimism/op-challenger/metrics"
)
// Oracle listens to the L2OutputOracle for newly proposed outputs.
func Oracle(logger log.Logger, cfg *config.Config) error {
if err := cfg.Check(); err != nil {
return fmt.Errorf("invalid config: %w", err)
}
m := metrics.NewMetrics("default")
service, err := challenger.NewChallenger(*cfg, logger, m)
if err != nil {
logger.Error("Unable to create the Challenger", "error", err)
return err
}
logger.Info("Listening for OutputProposed events from the L2OutputOracle contract", "l2oo", cfg.L2OOAddress.String())
subscription, err := service.NewOracleSubscription()
if err != nil {
logger.Error("Unable to create the subscription", "error", err)
return err
}
err = subscription.Subscribe()
if err != nil {
logger.Error("Unable to subscribe to the L2OutputOracle contract", "error", err)
return err
}
defer subscription.Quit()
interruptChannel := make(chan os.Signal, 1)
signal.Notify(interruptChannel, []os.Signal{
os.Interrupt,
os.Kill,
syscall.SIGTERM,
syscall.SIGQUIT,
}...)
for {
select {
case log := <-subscription.Logs():
logger.Info("Received log", "log", log)
case <-interruptChannel:
logger.Info("Received interrupt signal, exiting...")
}
}
}
......@@ -2,106 +2,53 @@ package config
import (
"errors"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/urfave/cli/v2"
flags "github.com/ethereum-optimism/optimism/op-challenger/flags"
"github.com/ethereum-optimism/optimism/op-challenger/flags"
opservice "github.com/ethereum-optimism/optimism/op-service"
oplog "github.com/ethereum-optimism/optimism/op-service/log"
opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics"
oppprof "github.com/ethereum-optimism/optimism/op-service/pprof"
oprpc "github.com/ethereum-optimism/optimism/op-service/rpc"
txmgr "github.com/ethereum-optimism/optimism/op-service/txmgr"
"github.com/ethereum-optimism/optimism/op-service/txmgr"
)
var (
ErrMissingL1EthRPC = errors.New("missing l1 eth rpc url")
ErrMissingRollupRpc = errors.New("missing rollup rpc url")
ErrMissingL2OOAddress = errors.New("missing l2 output oracle contract address")
ErrMissingDGFAddress = errors.New("missing dispute game factory contract address")
ErrInvalidNetworkTimeout = errors.New("invalid network timeout")
ErrMissingTxMgrConfig = errors.New("missing tx manager config")
ErrMissingRPCConfig = errors.New("missing rpc config")
ErrMissingLogConfig = errors.New("missing log config")
ErrMissingMetricsConfig = errors.New("missing metrics config")
ErrMissingPprofConfig = errors.New("missing pprof config")
ErrMissingL1EthRPC = errors.New("missing l1 eth rpc url")
ErrMissingGameAddress = errors.New("missing game address")
ErrMissingAlphabetTrace = errors.New("missing alphabet trace")
)
// Config is a well typed config that is parsed from the CLI params.
// This also contains config options for auxiliary services.
// It is used to initialize the challenger.
type Config struct {
// L1EthRpc is the HTTP provider URL for L1.
L1EthRpc string
// RollupRpc is the HTTP provider URL for the rollup node.
RollupRpc string
// L2OOAddress is the L2OutputOracle contract address.
L2OOAddress common.Address
// DGFAddress is the DisputeGameFactory contract address.
DGFAddress common.Address
// NetworkTimeout is the timeout for network requests.
NetworkTimeout time.Duration
L1EthRpc string // L1 RPC Url
GameAddress common.Address // Address of the fault game
AlphabetTrace string // String for the AlphabetTraceProvider
TxMgrConfig *txmgr.CLIConfig
RPCConfig *oprpc.CLIConfig
LogConfig *oplog.CLIConfig
MetricsConfig *opmetrics.CLIConfig
TxMgrConfig txmgr.CLIConfig
}
PprofConfig *oppprof.CLIConfig
func NewConfig(l1EthRpc string,
GameAddress common.Address,
AlphabetTrace string,
) Config {
return Config{
L1EthRpc: l1EthRpc,
GameAddress: GameAddress,
AlphabetTrace: AlphabetTrace,
TxMgrConfig: txmgr.NewCLIConfig(l1EthRpc),
}
}
func (c Config) Check() error {
if c.L1EthRpc == "" {
return ErrMissingL1EthRPC
}
if c.RollupRpc == "" {
return ErrMissingRollupRpc
}
if c.L2OOAddress == (common.Address{}) {
return ErrMissingL2OOAddress
}
if c.DGFAddress == (common.Address{}) {
return ErrMissingDGFAddress
}
if c.NetworkTimeout == 0 {
return ErrInvalidNetworkTimeout
}
if c.TxMgrConfig == nil {
return ErrMissingTxMgrConfig
}
if c.RPCConfig == nil {
return ErrMissingRPCConfig
}
if c.LogConfig == nil {
return ErrMissingLogConfig
}
if c.MetricsConfig == nil {
return ErrMissingMetricsConfig
if c.GameAddress == (common.Address{}) {
return ErrMissingGameAddress
}
if c.PprofConfig == nil {
return ErrMissingPprofConfig
}
if err := c.RPCConfig.Check(); err != nil {
return err
}
if err := c.LogConfig.Check(); err != nil {
return err
}
if err := c.MetricsConfig.Check(); err != nil {
return err
}
if err := c.PprofConfig.Check(); err != nil {
return err
if c.AlphabetTrace == "" {
return ErrMissingAlphabetTrace
}
if err := c.TxMgrConfig.Check(); err != nil {
return err
......@@ -109,72 +56,23 @@ func (c Config) Check() error {
return nil
}
// NewConfig creates a Config with all optional values set to the CLI default value
func NewConfig(
L1EthRpc string,
RollupRpc string,
L2OOAddress common.Address,
DGFAddress common.Address,
NetworkTimeout time.Duration,
TxMgrConfig *txmgr.CLIConfig,
RPCConfig *oprpc.CLIConfig,
LogConfig *oplog.CLIConfig,
MetricsConfig *opmetrics.CLIConfig,
PprofConfig *oppprof.CLIConfig,
) *Config {
return &Config{
L1EthRpc: L1EthRpc,
RollupRpc: RollupRpc,
L2OOAddress: L2OOAddress,
DGFAddress: DGFAddress,
NetworkTimeout: NetworkTimeout,
TxMgrConfig: TxMgrConfig,
RPCConfig: RPCConfig,
LogConfig: LogConfig,
MetricsConfig: MetricsConfig,
PprofConfig: PprofConfig,
}
}
// NewConfigFromCLI parses the Config from the provided flags or environment variables.
func NewConfigFromCLI(ctx *cli.Context) (*Config, error) {
if err := flags.CheckRequired(ctx); err != nil {
return nil, err
}
l1EthRpc := ctx.String(flags.L1EthRpcFlag.Name)
if l1EthRpc == "" {
return nil, ErrMissingL1EthRPC
}
rollupRpc := ctx.String(flags.RollupRpcFlag.Name)
if rollupRpc == "" {
return nil, ErrMissingRollupRpc
}
l2ooAddress, err := opservice.ParseAddress(ctx.String(flags.L2OOAddressFlag.Name))
if err != nil {
return nil, ErrMissingL2OOAddress
}
dgfAddress, err := opservice.ParseAddress(ctx.String(flags.DGFAddressFlag.Name))
if err != nil {
return nil, ErrMissingDGFAddress
return nil, err
}
txMgrConfig := txmgr.ReadCLIConfig(ctx)
rpcConfig := oprpc.ReadCLIConfig(ctx)
logConfig := oplog.ReadCLIConfig(ctx)
metricsConfig := opmetrics.ReadCLIConfig(ctx)
pprofConfig := oppprof.ReadCLIConfig(ctx)
return &Config{
// Required Flags
L1EthRpc: l1EthRpc,
RollupRpc: rollupRpc,
L2OOAddress: l2ooAddress,
DGFAddress: dgfAddress,
TxMgrConfig: &txMgrConfig,
// Optional Flags
RPCConfig: &rpcConfig,
LogConfig: &logConfig,
MetricsConfig: &metricsConfig,
PprofConfig: &pprofConfig,
L1EthRpc: ctx.String(flags.L1EthRpcFlag.Name),
GameAddress: dgfAddress,
AlphabetTrace: ctx.String(flags.AlphabetFlag.Name),
TxMgrConfig: txMgrConfig,
}, nil
}
......@@ -2,70 +2,20 @@ package config
import (
"testing"
"time"
oplog "github.com/ethereum-optimism/optimism/op-service/log"
opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics"
oppprof "github.com/ethereum-optimism/optimism/op-service/pprof"
oprpc "github.com/ethereum-optimism/optimism/op-service/rpc"
txmgr "github.com/ethereum-optimism/optimism/op-service/txmgr"
client "github.com/ethereum-optimism/optimism/op-signer/client"
"github.com/ethereum-optimism/optimism/op-service/txmgr"
"github.com/ethereum/go-ethereum/common"
"github.com/stretchr/testify/require"
)
var (
validL1EthRpc = "http://localhost:8545"
validRollupRpc = "http://localhost:8546"
validL2OOAddress = common.HexToAddress("0x7bdd3b028C4796eF0EAf07d11394d0d9d8c24139")
validDGFAddress = common.HexToAddress("0x7bdd3b028C4796eF0EAf07d11394d0d9d8c24139")
validNetworkTimeout = time.Duration(5) * time.Second
validL1EthRpc = "http://localhost:8545"
validGameAddress = common.HexToAddress("0x7bdd3b028C4796eF0EAf07d11394d0d9d8c24139")
validAlphabetTrace = "abcdefgh"
)
var validTxMgrConfig = txmgr.CLIConfig{
L1RPCURL: validL1EthRpc,
NumConfirmations: 10,
NetworkTimeout: validNetworkTimeout,
ResubmissionTimeout: time.Duration(5) * time.Second,
ReceiptQueryInterval: time.Duration(5) * time.Second,
TxNotInMempoolTimeout: time.Duration(5) * time.Second,
SafeAbortNonceTooLowCount: 10,
SignerCLIConfig: client.CLIConfig{
Endpoint: "http://localhost:8547",
// First address for the default hardhat mnemonic
Address: "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266",
},
}
var validRPCConfig = oprpc.CLIConfig{
ListenAddr: "localhost:8547",
ListenPort: 8547,
}
var validLogConfig = oplog.DefaultCLIConfig()
var validMetricsConfig = opmetrics.CLIConfig{
Enabled: false,
}
var validPprofConfig = oppprof.CLIConfig{
Enabled: false,
}
func validConfig() *Config {
cfg := NewConfig(
validL1EthRpc,
validRollupRpc,
validL2OOAddress,
validDGFAddress,
validNetworkTimeout,
&validTxMgrConfig,
&validRPCConfig,
&validLogConfig,
&validMetricsConfig,
&validPprofConfig,
)
func validConfig() Config {
cfg := NewConfig(validL1EthRpc, validGameAddress, validAlphabetTrace)
return cfg
}
......@@ -76,16 +26,9 @@ func TestValidConfigIsValid(t *testing.T) {
}
func TestTxMgrConfig(t *testing.T) {
t.Run("Required", func(t *testing.T) {
config := validConfig()
config.TxMgrConfig = nil
err := config.Check()
require.ErrorIs(t, err, ErrMissingTxMgrConfig)
})
t.Run("Invalid", func(t *testing.T) {
config := validConfig()
config.TxMgrConfig = &txmgr.CLIConfig{}
config.TxMgrConfig = txmgr.CLIConfig{}
err := config.Check()
require.Equal(t, err.Error(), "must provide a L1 RPC url")
})
......@@ -98,30 +41,16 @@ func TestL1EthRpcRequired(t *testing.T) {
require.ErrorIs(t, err, ErrMissingL1EthRPC)
}
func TestRollupRpcRequired(t *testing.T) {
config := validConfig()
config.RollupRpc = ""
err := config.Check()
require.ErrorIs(t, err, ErrMissingRollupRpc)
}
func TestL2OOAddressRequired(t *testing.T) {
config := validConfig()
config.L2OOAddress = common.Address{}
err := config.Check()
require.ErrorIs(t, err, ErrMissingL2OOAddress)
}
func TestDGFAddressRequired(t *testing.T) {
func TestGameAddressRequired(t *testing.T) {
config := validConfig()
config.DGFAddress = common.Address{}
config.GameAddress = common.Address{}
err := config.Check()
require.ErrorIs(t, err, ErrMissingDGFAddress)
require.ErrorIs(t, err, ErrMissingGameAddress)
}
func TestNetworkTimeoutRequired(t *testing.T) {
func TestAlphabetTraceRequired(t *testing.T) {
config := validConfig()
config.NetworkTimeout = 0
config.AlphabetTrace = ""
err := config.Check()
require.ErrorIs(t, err, ErrInvalidNetworkTimeout)
require.ErrorIs(t, err, ErrMissingAlphabetTrace)
}
package config
import (
"fmt"
log "github.com/ethereum/go-ethereum/log"
cli "github.com/urfave/cli/v2"
oplog "github.com/ethereum-optimism/optimism/op-service/log"
)
// LoggerFromCLI creates a [log.Logger] from the
// supplied [cli.Context].
func LoggerFromCLI(ctx *cli.Context) (log.Logger, error) {
logCfg := oplog.ReadCLIConfig(ctx)
if err := logCfg.Check(); err != nil {
return nil, fmt.Errorf("log config error: %w", err)
}
logger := oplog.NewLogger(logCfg)
return logger, nil
}
......@@ -18,27 +18,35 @@ type AlphabetProvider struct {
func NewAlphabetProvider(state string, depth uint64) *AlphabetProvider {
return &AlphabetProvider{
state: strings.Split(state, ""),
maxLen: (1 << depth),
maxLen: uint64(1 << depth),
}
}
// Get returns the claim value at the given index in the trace.
func (ap *AlphabetProvider) Get(i uint64) (common.Hash, error) {
// GetPreimage returns the preimage for the given hash.
func (ap *AlphabetProvider) GetPreimage(i uint64) ([]byte, error) {
// The index cannot be larger than the maximum index as computed by the depth.
if i >= ap.maxLen {
return common.Hash{}, ErrIndexTooLarge
return []byte{}, ErrIndexTooLarge
}
// We extend the deepest hash to the maximum depth if the trace is not expansive.
if i >= uint64(len(ap.state)) {
return ap.Get(uint64(len(ap.state)) - 1)
return ap.GetPreimage(uint64(len(ap.state)) - 1)
}
return buildAlphabetClaimBytes(i, ap.state[i]), nil
}
// Get returns the claim value at the given index in the trace.
func (ap *AlphabetProvider) Get(i uint64) (common.Hash, error) {
claimBytes, err := ap.GetPreimage(i)
if err != nil {
return common.Hash{}, err
}
return ap.ComputeAlphabetClaim(i), nil
return common.BytesToHash(claimBytes), nil
}
// ComputeAlphabetClaim computes the claim for the given index in the trace.
func (ap *AlphabetProvider) ComputeAlphabetClaim(i uint64) common.Hash {
concatenated := append(IndexToBytes(i), []byte(ap.state[i])...)
return common.BytesToHash(concatenated)
// buildAlphabetClaimBytes constructs the claim bytes for the index and state item.
func buildAlphabetClaimBytes(i uint64, letter string) []byte {
return append(IndexToBytes(i), []byte(letter)...)
}
// IndexToBytes converts an index to a byte slice big endian
......
......@@ -50,17 +50,26 @@ func FuzzIndexToBytes(f *testing.F) {
})
}
// TestComputeAlphabetClaim tests the ComputeAlphabetClaim function.
func TestComputeAlphabetClaim(t *testing.T) {
// TestGetPreimage_Succeeds tests the GetPreimage function
// returns the correct pre-image for a index.
func TestGetPreimage_Succeeds(t *testing.T) {
ap := NewAlphabetProvider("abc", 2)
claim := ap.ComputeAlphabetClaim(0)
concatenated := append(IndexToBytes(0), []byte("a")...)
expected := common.BytesToHash(concatenated)
require.Equal(t, expected, claim)
expected := append(IndexToBytes(uint64(0)), []byte("a")...)
retrieved, err := ap.GetPreimage(uint64(0))
require.NoError(t, err)
require.Equal(t, expected, retrieved)
}
// TestGetPreimage_TooLargeIndex_Fails tests the GetPreimage
// function errors if the index is too large.
func TestGetPreimage_TooLargeIndex_Fails(t *testing.T) {
ap := NewAlphabetProvider("abc", 2)
_, err := ap.GetPreimage(4)
require.ErrorIs(t, err, ErrIndexTooLarge)
}
// TestGet tests the Get function.
func TestGet(t *testing.T) {
// TestGet_Succeeds tests the Get function.
func TestGet_Succeeds(t *testing.T) {
ap := NewAlphabetProvider("abc", 2)
claim, err := ap.Get(0)
require.NoError(t, err)
......
......@@ -34,6 +34,10 @@ func (o *Orchestrator) Respond(_ context.Context, response Claim) error {
return nil
}
func (o *Orchestrator) Step(ctx context.Context, stepData StepCallData) error {
return nil
}
func (o *Orchestrator) Start() {
for i := 0; i < len(o.agents); i++ {
go runAgent(&o.agents[i], o.outputChs[i])
......
......@@ -74,17 +74,19 @@ func (r *faultResponder) BuildTx(ctx context.Context, response Claim) ([]byte, e
// Respond takes a [Claim] and executes the response action.
func (r *faultResponder) Respond(ctx context.Context, response Claim) error {
// Build the transaction data.
txData, err := r.BuildTx(ctx, response)
if err != nil {
return err
}
return r.sendTxAndWait(ctx, txData)
}
// Send the transaction through the [txmgr].
// sendTxAndWait sends a transaction through the [txmgr] and waits for a receipt.
// This sets the tx GasLimit to 0, performing gas estimation online through the [txmgr].
func (r *faultResponder) sendTxAndWait(ctx context.Context, txData []byte) error {
receipt, err := r.txMgr.Send(ctx, txmgr.TxCandidate{
To: &r.fdgAddr,
TxData: txData,
// Setting GasLimit to 0 performs gas estimation online through the [txmgr].
To: &r.fdgAddr,
TxData: txData,
GasLimit: 0,
})
if err != nil {
......@@ -95,6 +97,26 @@ func (r *faultResponder) Respond(ctx context.Context, response Claim) error {
} else {
r.log.Info("responder tx successfully published", "tx_hash", receipt.TxHash)
}
return nil
}
// buildStepTxData creates the transaction data for the step function.
func (r *faultResponder) buildStepTxData(stepData StepCallData) ([]byte, error) {
return r.fdgAbi.Pack(
"step",
big.NewInt(int64(stepData.StateIndex)),
big.NewInt(int64(stepData.ClaimIndex)),
stepData.IsAttack,
stepData.StateData,
stepData.Proof,
)
}
// Step accepts step data and executes the step on the fault dispute game contract.
func (r *faultResponder) Step(ctx context.Context, stepData StepCallData) error {
txData, err := r.buildStepTxData(stepData)
if err != nil {
return err
}
return r.sendTxAndWait(ctx, txData)
}
......@@ -40,6 +40,10 @@ func (m *mockTxManager) Send(ctx context.Context, candidate txmgr.TxCandidate) (
), nil
}
func (m *mockTxManager) BlockNumber(ctx context.Context) (uint64, error) {
panic("not implemented")
}
func (m *mockTxManager) From() common.Address {
return m.from
}
......
......@@ -25,19 +25,58 @@ func NewSolver(gameDepth int, traceProvider TraceProvider) *Solver {
func (s *Solver) NextMove(claim Claim) (*Claim, error) {
// Special case of the root claim
if claim.IsRoot() {
agree, err := s.agreeWithClaim(claim.ClaimData)
if err != nil {
return nil, err
}
// Attack the root claim if we do not agree with it
if !agree {
return s.attack(claim)
} else {
return nil, nil
}
return s.handleRoot(claim)
}
return s.handleMiddle(claim)
}
type StepData struct {
LeafClaim Claim
StateClaim Claim
IsAttack bool
}
// AttemptStep determines what step should occur for a given leaf claim.
// An error will be returned if the claim is not at the max depth.
func (s *Solver) AttemptStep(claim Claim, state Game) (StepData, error) {
if claim.Depth() != s.gameDepth {
return StepData{}, errors.New("cannot step on non-leaf claims")
}
claimCorrect, err := s.agreeWithClaim(claim.ClaimData)
if err != nil {
return StepData{}, err
}
var selectorFn func(Claim) (Claim, error)
if claimCorrect {
selectorFn = state.PostStateClaim
} else {
selectorFn = state.PreStateClaim
}
stateClaim, err := selectorFn(claim)
if err != nil {
return StepData{}, err
}
return StepData{
LeafClaim: claim,
StateClaim: stateClaim,
IsAttack: claimCorrect,
}, nil
}
func (s *Solver) handleRoot(claim Claim) (*Claim, error) {
agree, err := s.agreeWithClaim(claim.ClaimData)
if err != nil {
return nil, err
}
// Attack the root claim if we do not agree with it
if !agree {
return s.attack(claim)
} else {
return nil, nil
}
}
func (s *Solver) handleMiddle(claim Claim) (*Claim, error) {
parentCorrect, err := s.agreeWithClaim(claim.Parent)
if err != nil {
return nil, err
......
......@@ -78,3 +78,23 @@ func TestSolver_NextMove_Opponent(t *testing.T) {
require.Equal(t, test.response, res.ClaimData)
}
}
func TestAttemptStep(t *testing.T) {
maxDepth := 3
canonicalProvider := NewAlphabetProvider("abcdefgh", uint64(maxDepth))
solver := NewSolver(maxDepth, canonicalProvider)
root, top, middle, bottom := createTestClaims()
g := NewGameState(root, testMaxDepth)
require.NoError(t, g.Put(top))
require.NoError(t, g.Put(middle))
require.NoError(t, g.Put(bottom))
step, err := solver.AttemptStep(bottom, g)
require.NoError(t, err)
require.Equal(t, bottom, step.LeafClaim)
require.Equal(t, middle, step.StateClaim)
require.True(t, step.IsAttack)
_, err = solver.AttemptStep(middle, g)
require.Error(t, err)
}
......@@ -12,11 +12,21 @@ var (
ErrIndexTooLarge = errors.New("index is larger than the maximum index")
)
// StepCallData encapsulates the data needed to perform a step.
type StepCallData struct {
StateIndex uint64
ClaimIndex uint64
IsAttack bool
StateData []byte
Proof []byte
}
// TraceProvider is a generic way to get a claim value at a specific
// step in the trace.
// The [AlphabetProvider] is a minimal implementation of this interface.
type TraceProvider interface {
Get(i uint64) (common.Hash, error)
GetPreimage(i uint64) ([]byte, error)
}
// ClaimData is the core of a claim. It must be unique inside a specific game.
......@@ -60,4 +70,5 @@ func (c *Claim) DefendsParent() bool {
// For full op-challenger this means executing the transaction on chain.
type Responder interface {
Respond(ctx context.Context, response Claim) error
Step(ctx context.Context, stepData StepCallData) error
}
......@@ -7,9 +7,6 @@ import (
opservice "github.com/ethereum-optimism/optimism/op-service"
oplog "github.com/ethereum-optimism/optimism/op-service/log"
opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics"
oppprof "github.com/ethereum-optimism/optimism/op-service/pprof"
oprpc "github.com/ethereum-optimism/optimism/op-service/rpc"
txmgr "github.com/ethereum-optimism/optimism/op-service/txmgr"
)
......@@ -26,39 +23,31 @@ var (
Usage: "HTTP provider URL for L1.",
EnvVars: prefixEnvVars("L1_ETH_RPC"),
}
RollupRpcFlag = &cli.StringFlag{
Name: "rollup-rpc",
Usage: "HTTP provider URL for the rollup node.",
EnvVars: prefixEnvVars("ROLLUP_RPC"),
}
L2OOAddressFlag = &cli.StringFlag{
Name: "l2oo-address",
Usage: "Address of the L2OutputOracle contract.",
EnvVars: prefixEnvVars("L2OO_ADDRESS"),
}
DGFAddressFlag = &cli.StringFlag{
Name: "dgf-address",
Usage: "Address of the DisputeGameFactory contract.",
EnvVars: prefixEnvVars("DGF_ADDRESS"),
Name: "game-address",
Usage: "Address of the Fault Game contract.",
EnvVars: prefixEnvVars("GAME_ADDRESS"),
}
AlphabetFlag = &cli.StringFlag{
Name: "alphabet",
Usage: "Alphabet Trace (temporary)",
EnvVars: prefixEnvVars("ALPHABET"),
}
// Optional Flags
)
// requiredFlags are checked by [CheckRequired]
var requiredFlags = []cli.Flag{
L1EthRpcFlag,
RollupRpcFlag,
L2OOAddressFlag,
DGFAddressFlag,
AlphabetFlag,
}
// optionalFlags is a list of unchecked cli flags
var optionalFlags = []cli.Flag{}
func init() {
optionalFlags = append(optionalFlags, oprpc.CLIFlags(envVarPrefix)...)
optionalFlags = append(optionalFlags, oplog.CLIFlags(envVarPrefix)...)
optionalFlags = append(optionalFlags, opmetrics.CLIFlags(envVarPrefix)...)
optionalFlags = append(optionalFlags, oppprof.CLIFlags(envVarPrefix)...)
optionalFlags = append(optionalFlags, txmgr.CLIFlags(envVarPrefix)...)
Flags = append(requiredFlags, optionalFlags...)
......
package metrics
import (
"context"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
"github.com/prometheus/client_golang/prometheus"
opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics"
txmetrics "github.com/ethereum-optimism/optimism/op-service/txmgr/metrics"
)
const Namespace = "op_challenger"
type Metricer interface {
RecordInfo(version string)
RecordUp()
// Records all L1 and L2 block events
opmetrics.RefMetricer
// Record Tx metrics
txmetrics.TxMetricer
RecordValidOutput(l2ref eth.L2BlockRef)
RecordInvalidOutput(l2ref eth.L2BlockRef)
RecordOutputChallenged(l2ref eth.L2BlockRef)
}
type Metrics struct {
ns string
registry *prometheus.Registry
factory opmetrics.Factory
opmetrics.RefMetrics
txmetrics.TxMetrics
info prometheus.GaugeVec
up prometheus.Gauge
}
var _ Metricer = (*Metrics)(nil)
func NewMetrics(procName string) *Metrics {
if procName == "" {
procName = "default"
}
ns := Namespace + "_" + procName
registry := opmetrics.NewRegistry()
factory := opmetrics.With(registry)
return &Metrics{
ns: ns,
registry: registry,
factory: factory,
RefMetrics: opmetrics.MakeRefMetrics(ns, factory),
TxMetrics: txmetrics.MakeTxMetrics(ns, factory),
info: *factory.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns,
Name: "info",
Help: "Pseudo-metric tracking version and config info",
}, []string{
"version",
}),
up: factory.NewGauge(prometheus.GaugeOpts{
Namespace: ns,
Name: "up",
Help: "1 if the op-proposer has finished starting up",
}),
}
}
func (m *Metrics) Serve(ctx context.Context, host string, port int) error {
return opmetrics.ListenAndServe(ctx, m.registry, host, port)
}
func (m *Metrics) StartBalanceMetrics(ctx context.Context,
l log.Logger, client *ethclient.Client, account common.Address) {
opmetrics.LaunchBalanceMetrics(ctx, l, m.registry, m.ns, client, account)
}
// RecordInfo sets a pseudo-metric that contains versioning and
// config info for the op-proposer.
func (m *Metrics) RecordInfo(version string) {
m.info.WithLabelValues(version).Set(1)
}
// RecordUp sets the up metric to 1.
func (m *Metrics) RecordUp() {
prometheus.MustRegister()
m.up.Set(1)
}
const (
ValidOutput = "valid_output"
InvalidOutput = "invalid_output"
OutputChallenged = "output_challenged"
)
// RecordValidOutput should be called when a valid output is found
func (m *Metrics) RecordValidOutput(l2ref eth.L2BlockRef) {
m.RecordL2Ref(ValidOutput, l2ref)
}
// RecordInvalidOutput should be called when an invalid output is found
func (m *Metrics) RecordInvalidOutput(l2ref eth.L2BlockRef) {
m.RecordL2Ref(InvalidOutput, l2ref)
}
// RecordOutputChallenged should be called when an output is challenged
func (m *Metrics) RecordOutputChallenged(l2ref eth.L2BlockRef) {
m.RecordL2Ref(OutputChallenged, l2ref)
}
func (m *Metrics) Document() []opmetrics.DocumentedMetric {
return m.factory.Document()
}
package metrics
import (
"github.com/ethereum-optimism/optimism/op-node/eth"
opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics"
txmetrics "github.com/ethereum-optimism/optimism/op-service/txmgr/metrics"
)
type noopMetrics struct {
opmetrics.NoopRefMetrics
txmetrics.NoopTxMetrics
}
var NoopMetrics Metricer = new(noopMetrics)
func (*noopMetrics) RecordInfo(version string) {}
func (*noopMetrics) RecordUp() {}
func (*noopMetrics) RecordValidOutput(l2ref eth.L2BlockRef) {}
func (*noopMetrics) RecordInvalidOutput(l2ref eth.L2BlockRef) {}
func (*noopMetrics) RecordOutputChallenged(l2ref eth.L2BlockRef) {}
package types
import (
"fmt"
"github.com/ethereum-optimism/optimism/op-service/enum"
)
// GameType is the type of dispute game
type GameType uint8
// DefaultGameType returns the default dispute game type.
func DefaultGameType() GameType {
return AttestationDisputeGameType
}
// String returns the string value of a dispute game type.
func (g GameType) String() string {
return DisputeGameTypes[g]
}
const (
// AttestationDisputeGameType is the uint8 enum value for the attestation dispute game
AttestationDisputeGameType GameType = iota
// FaultDisputeGameType is the uint8 enum value for the fault dispute game
FaultDisputeGameType
// ValidityDisputeGameType is the uint8 enum value for the validity dispute game
ValidityDisputeGameType
)
// DisputeGameTypes is a list of dispute game types.
var DisputeGameTypes = []string{"attestation", "fault", "validity"}
// Valid returns true if the game type is within the valid range.
func (g GameType) Valid() bool {
return g >= AttestationDisputeGameType && g <= ValidityDisputeGameType
}
// DisputeGameType is a custom flag type for dispute game type.
type DisputeGameType struct {
Enum []enum.Stringered
selected GameType
}
// NewDisputeGameType returns a new dispute game type.
func NewDisputeGameType() *DisputeGameType {
return &DisputeGameType{
Enum: enum.StringeredList(DisputeGameTypes),
selected: DefaultGameType(),
}
}
// Set sets the dispute game type.
func (d *DisputeGameType) Set(value string) error {
for i, enum := range d.Enum {
if enum.String() == value {
d.selected = GameType(i)
return nil
}
}
return fmt.Errorf("allowed values are %s", enum.EnumString(d.Enum))
}
// String returns the selected dispute game type.
func (d DisputeGameType) String() string {
return d.selected.String()
}
// Type maps the [DisputeGameType] string value to a [GameType] enum value.
func (d DisputeGameType) Type() GameType {
return d.selected
}
package types
import (
"testing"
"github.com/stretchr/testify/require"
)
var (
disputeGames = []struct {
name string
gameType GameType
}{
{"attestation", AttestationDisputeGameType},
{"fault", FaultDisputeGameType},
{"validity", ValidityDisputeGameType},
}
)
// TestDefaultGameType returns the default dispute game type.
func TestDefaultGameType(t *testing.T) {
defaultGameType := disputeGames[0].gameType
require.Equal(t, defaultGameType, DefaultGameType())
}
// TestGameType_Valid tests the Valid function with valid inputs.
func TestGameType_Valid(t *testing.T) {
for _, game := range disputeGames {
require.True(t, game.gameType.Valid())
}
}
// TestGameType_Invalid tests the Valid function with an invalid input.
func TestGameType_Invalid(t *testing.T) {
invalid := disputeGames[len(disputeGames)-1].gameType + 1
require.False(t, GameType(invalid).Valid())
}
// FuzzGameType_Invalid checks that invalid game types are correctly
// returned as invalid by the validation [Valid] function.
func FuzzGameType_Invalid(f *testing.F) {
maxCount := len(DisputeGameTypes)
f.Fuzz(func(t *testing.T, number uint8) {
if number >= uint8(maxCount) {
require.False(t, GameType(number).Valid())
} else {
require.True(t, GameType(number).Valid())
}
})
}
// TestGameType_Default tests the default value of the DisputeGameType.
func TestGameType_Default(t *testing.T) {
d := NewDisputeGameType()
require.Equal(t, DefaultGameType(), d.selected)
require.Equal(t, DefaultGameType(), d.Type())
}
// TestGameType_String tests the Set and String function on the DisputeGameType.
func TestGameType_String(t *testing.T) {
for _, dg := range disputeGames {
t.Run(dg.name, func(t *testing.T) {
d := NewDisputeGameType()
require.Equal(t, dg.name, dg.gameType.String())
require.NoError(t, d.Set(dg.name))
require.Equal(t, dg.name, d.String())
require.Equal(t, dg.gameType, d.selected)
})
}
}
// TestGameType_Type tests the Type function on the DisputeGameType.
func TestGameType_Type(t *testing.T) {
for _, dg := range disputeGames {
t.Run(dg.name, func(t *testing.T) {
d := NewDisputeGameType()
require.Equal(t, dg.name, dg.gameType.String())
require.NoError(t, d.Set(dg.name))
require.Equal(t, dg.gameType, d.Type())
require.Equal(t, dg.gameType, d.selected)
})
}
}
......@@ -44,6 +44,9 @@ type fakeTxMgr struct {
func (f fakeTxMgr) From() common.Address {
return f.from
}
func (f fakeTxMgr) BlockNumber(_ context.Context) (uint64, error) {
panic("unimplemented")
}
func (f fakeTxMgr) Send(_ context.Context, _ txmgr.TxCandidate) (*types.Receipt, error) {
panic("unimplemented")
}
......
......@@ -39,7 +39,7 @@ type ChannelBank struct {
fetcher L1Fetcher
}
var _ ResetableStage = (*ChannelBank)(nil)
var _ ResettableStage = (*ChannelBank)(nil)
// NewChannelBank creates a ChannelBank, which should be Reset(origin) before use.
func NewChannelBank(log log.Logger, cfg *rollup.Config, prev NextFrameProvider, fetcher L1Fetcher) *ChannelBank {
......@@ -74,7 +74,7 @@ func (cb *ChannelBank) prune() {
}
}
// IngestData adds new L1 data to the channel bank.
// IngestFrame adds new L1 data to the channel bank.
// Read() should be called repeatedly first, until everything has been read, before adding new data.
func (cb *ChannelBank) IngestFrame(f Frame) {
origin := cb.Origin()
......
......@@ -10,11 +10,10 @@ import (
"github.com/ethereum-optimism/optimism/op-node/eth"
)
// Channel In Reader reads a batch from the channel
// ChannelInReader reads a batch from the channel
// This does decompression and limits the max RLP size
// This is a pure function from the channel, but each channel (or channel fragment)
// must be tagged with an L1 inclusion block to be passed to the batch queue.
type ChannelInReader struct {
log log.Logger
......@@ -25,7 +24,7 @@ type ChannelInReader struct {
metrics Metrics
}
var _ ResetableStage = (*ChannelInReader)(nil)
var _ ResettableStage = (*ChannelInReader)(nil)
// NewChannelInReader creates a ChannelInReader, which should be Reset(origin) before use.
func NewChannelInReader(log log.Logger, prev *ChannelBank, metrics Metrics) *ChannelInReader {
......
......@@ -682,7 +682,7 @@ func (eq *EngineQueue) resetBuildingState() {
eq.buildingSafe = false
}
// ResetStep Walks the L2 chain backwards until it finds an L2 block whose L1 origin is canonical.
// Reset walks the L2 chain backwards until it finds an L2 block whose L1 origin is canonical.
// The unsafe head is set to the head of the L2 chain, unless the existing safe head is not canonical.
func (eq *EngineQueue) Reset(ctx context.Context, _ eth.L1BlockRef, _ eth.SystemConfig) error {
result, err := sync.FindL2Heads(ctx, eq.cfg, eq.l1Fetcher, eq.engine, eq.log)
......
......@@ -28,7 +28,7 @@ type L1Retrieval struct {
datas DataIter
}
var _ ResetableStage = (*L1Retrieval)(nil)
var _ ResettableStage = (*L1Retrieval)(nil)
func NewL1Retrieval(log log.Logger, dataSrc DataAvailabilitySource, prev NextBlockProvider) *L1Retrieval {
return &L1Retrieval{
......@@ -69,7 +69,7 @@ func (l1r *L1Retrieval) NextData(ctx context.Context) ([]byte, error) {
}
}
// ResetStep re-initializes the L1 Retrieval stage to block of it's `next` progress.
// Reset re-initializes the L1 Retrieval stage to block of it's `next` progress.
// Note that we open up the `l1r.datas` here because it is requires to maintain the
// internal invariants that later propagate up the derivation pipeline.
func (l1r *L1Retrieval) Reset(ctx context.Context, base eth.L1BlockRef, sysCfg eth.SystemConfig) error {
......
......@@ -31,7 +31,7 @@ type L1Traversal struct {
cfg *rollup.Config
}
var _ ResetableStage = (*L1Traversal)(nil)
var _ ResettableStage = (*L1Traversal)(nil)
func NewL1Traversal(log log.Logger, cfg *rollup.Config, l1Blocks L1BlockRefByNumberFetcher) *L1Traversal {
return &L1Traversal{
......
......@@ -23,7 +23,7 @@ type L2BlockRefSource interface {
Transactions() types.Transactions
}
// PayloadToBlockRef extracts the essential L2BlockRef information from an L2
// L2BlockToBlockRef extracts the essential L2BlockRef information from an L2
// block ref source, falling back to genesis information if necessary.
func L2BlockToBlockRef(block L2BlockRefSource, genesis *rollup.Genesis) (eth.L2BlockRef, error) {
hash, number := block.Hash(), block.NumberU64()
......
......@@ -34,7 +34,7 @@ type ResettableEngineControl interface {
Reset()
}
type ResetableStage interface {
type ResettableStage interface {
// Reset resets a pull stage. `base` refers to the L1 Block Reference to reset to, with corresponding configuration.
Reset(ctx context.Context, base eth.L1BlockRef, baseCfg eth.SystemConfig) error
}
......@@ -65,7 +65,7 @@ type DerivationPipeline struct {
// Index of the stage that is currently being reset.
// >= len(stages) if no additional resetting is required
resetting int
stages []ResetableStage
stages []ResettableStage
// Special stages to keep track of
traversal *L1Traversal
......@@ -94,7 +94,7 @@ func NewDerivationPipeline(log log.Logger, cfg *rollup.Config, l1Fetcher L1Fetch
// Reset from engine queue then up from L1 Traversal. The stages do not talk to each other during
// the reset, but after the engine queue, this is the order in which the stages could talk to each other.
// Note: The engine queue stage is the only reset that can fail.
stages := []ResetableStage{eng, l1Traversal, l1Src, frameQueue, bank, chInReader, batchQueue, attributesQueue}
stages := []ResettableStage{eng, l1Traversal, l1Src, frameQueue, bank, chInReader, batchQueue, attributesQueue}
return &DerivationPipeline{
log: log,
......
......@@ -286,7 +286,7 @@ func (c *Config) Description(l2Chains map[string]string) string {
return banner
}
// Description outputs a banner describing the important parts of rollup configuration in a log format.
// LogDescription outputs a banner describing the important parts of rollup configuration in a log format.
// Optionally provide a mapping of L2 chain IDs to network names to label the L2 chain with if not unknown.
// The config should be config.Check()-ed before creating a description.
func (c *Config) LogDescription(log log.Logger, l2Chains map[string]string) {
......
......@@ -61,6 +61,11 @@ func (d *Driver) Step(ctx context.Context) error {
}
d.logger.Debug("Data is lacking")
return nil
} else if errors.Is(err, derive.ErrTemporary) {
// While most temporary errors are due to requests for external data failing which can't happen,
// they may also be returned due to other events like channels timing out so need to be handled
d.logger.Warn("Temporary error in derivation", "err", err)
return nil
} else if err != nil {
return fmt.Errorf("pipeline err: %w", err)
}
......
......@@ -23,7 +23,7 @@ func TestDerivationComplete(t *testing.T) {
func TestTemporaryError(t *testing.T) {
driver := createDriver(t, fmt.Errorf("whoopsie: %w", derive.ErrTemporary))
err := driver.Step(context.Background())
require.ErrorIs(t, err, derive.ErrTemporary)
require.NoError(t, err, "should allow derivation to continue after temporary error")
}
func TestNotEnoughDataError(t *testing.T) {
......
......@@ -259,6 +259,7 @@ func (l *L2OutputSubmitter) FetchNextOutputInfo(ctx context.Context) (*eth.Outpu
l.log.Error("proposer unable to get sync status", "err", err)
return nil, false, err
}
// Use either the finalized or safe head depending on the config. Finalized head is default & safer.
var currentBlockNumber *big.Int
if l.allowNonFinalized {
......@@ -268,14 +269,14 @@ func (l *L2OutputSubmitter) FetchNextOutputInfo(ctx context.Context) (*eth.Outpu
}
// Ensure that we do not submit a block in the future
if currentBlockNumber.Cmp(nextCheckpointBlock) < 0 {
l.log.Info("proposer submission interval has not elapsed", "currentBlockNumber", currentBlockNumber, "nextBlockNumber", nextCheckpointBlock)
l.log.Debug("proposer submission interval has not elapsed", "currentBlockNumber", currentBlockNumber, "nextBlockNumber", nextCheckpointBlock)
return nil, false, nil
}
return l.fetchOuput(ctx, nextCheckpointBlock)
return l.fetchOutput(ctx, nextCheckpointBlock)
}
func (l *L2OutputSubmitter) fetchOuput(ctx context.Context, block *big.Int) (*eth.OutputResponse, bool, error) {
func (l *L2OutputSubmitter) fetchOutput(ctx context.Context, block *big.Int) (*eth.OutputResponse, bool, error) {
ctx, cancel := context.WithTimeout(ctx, l.networkTimeout)
defer cancel()
output, err := l.rollupClient.OutputAtBlock(ctx, block.Uint64())
......@@ -319,8 +320,41 @@ func proposeL2OutputTxData(abi *abi.ABI, output *eth.OutputResponse) ([]byte, er
new(big.Int).SetUint64(output.Status.CurrentL1.Number))
}
// We wait until l1head advances beyond blocknum. This is used to make sure proposal tx won't
// immediately fail when checking the l1 blockhash. Note that EstimateGas uses "latest" state to
// execute the transaction by default, meaning inside the call, the head block is considered
// "pending" instead of committed. In the case l1blocknum == l1head then, blockhash(l1blocknum)
// will produce a value of 0 within EstimateGas, and the call will fail when the contract checks
// that l1blockhash matches blockhash(l1blocknum).
func (l *L2OutputSubmitter) waitForL1Head(ctx context.Context, blockNum uint64) error {
ticker := time.NewTicker(l.pollInterval)
defer ticker.Stop()
l1head, err := l.txMgr.BlockNumber(ctx)
if err != nil {
return err
}
for l1head <= blockNum {
l.log.Debug("waiting for l1 head > l1blocknum1+1", "l1head", l1head, "l1blocknum", blockNum)
select {
case <-ticker.C:
l1head, err = l.txMgr.BlockNumber(ctx)
if err != nil {
return err
}
break
case <-l.done:
return fmt.Errorf("L2OutputSubmitter is done()")
}
}
return nil
}
// sendTransaction creates & sends transactions through the underlying transaction manager.
func (l *L2OutputSubmitter) sendTransaction(ctx context.Context, output *eth.OutputResponse) error {
err := l.waitForL1Head(ctx, output.Status.HeadL1.Number+1)
if err != nil {
return err
}
data, err := l.ProposeL2OutputTxData(output)
if err != nil {
return err
......@@ -336,7 +370,10 @@ func (l *L2OutputSubmitter) sendTransaction(ctx context.Context, output *eth.Out
if receipt.Status == types.ReceiptStatusFailed {
l.log.Error("proposer tx successfully published but reverted", "tx_hash", receipt.TxHash)
} else {
l.log.Info("proposer tx successfully published", "tx_hash", receipt.TxHash)
l.log.Info("proposer tx successfully published",
"tx_hash", receipt.TxHash,
"l1blocknum", output.Status.CurrentL1.Number,
"l1blockhash", output.Status.CurrentL1.Hash)
}
return nil
}
......@@ -359,10 +396,13 @@ func (l *L2OutputSubmitter) loop() {
if !shouldPropose {
break
}
cCtx, cancel := context.WithTimeout(ctx, 10*time.Minute)
if err := l.sendTransaction(cCtx, output); err != nil {
l.log.Error("Failed to send proposal transaction", "err", err)
l.log.Error("Failed to send proposal transaction",
"err", err,
"l1blocknum", output.Status.CurrentL1.Number,
"l1blockhash", output.Status.CurrentL1.Hash,
"l1head", output.Status.HeadL1.Number)
cancel()
break
}
......
......@@ -23,6 +23,12 @@ func CLIFlags(envPrefix string) []cli.Flag {
return CLIFlagsWithFlagPrefix(envPrefix, "")
}
var (
defaultTLSCaCert = "tls/ca.crt"
defaultTLSCert = "tls/tls.crt"
defaultTLSKey = "tls/tls.key"
)
// CLIFlagsWithFlagPrefix returns flags with env var and cli flag prefixes
// Should be used for client TLS configs when different from server on the same process
func CLIFlagsWithFlagPrefix(envPrefix string, flagPrefix string) []cli.Flag {
......@@ -36,19 +42,19 @@ func CLIFlagsWithFlagPrefix(envPrefix string, flagPrefix string) []cli.Flag {
&cli.StringFlag{
Name: prefixFunc(TLSCaCertFlagName),
Usage: "tls ca cert path",
Value: "tls/ca.crt",
Value: defaultTLSCaCert,
EnvVars: prefixEnvVars("TLS_CA"),
},
&cli.StringFlag{
Name: prefixFunc(TLSCertFlagName),
Usage: "tls cert path",
Value: "tls/tls.crt",
Value: defaultTLSCert,
EnvVars: prefixEnvVars("TLS_CERT"),
},
&cli.StringFlag{
Name: prefixFunc(TLSKeyFlagName),
Usage: "tls key",
Value: "tls/tls.key",
Value: defaultTLSKey,
EnvVars: prefixEnvVars("TLS_KEY"),
},
}
......@@ -60,6 +66,14 @@ type CLIConfig struct {
TLSKey string
}
func NewCLIConfig() CLIConfig {
return CLIConfig{
TLSCaCert: defaultTLSCaCert,
TLSCert: defaultTLSCert,
TLSKey: defaultTLSKey,
}
}
func (c CLIConfig) Check() error {
if c.TLSEnabled() && (c.TLSCaCert == "" || c.TLSCert == "" || c.TLSKey == "") {
return errors.New("all tls flags must be set if at least one is set")
......
package tls
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/urfave/cli/v2"
)
func TestDefaultCLIOptionsMatchDefaultConfig(t *testing.T) {
cfg := configForArgs()
defaultCfg := NewCLIConfig()
require.Equal(t, defaultCfg, cfg)
}
func TestDefaultConfigIsValid(t *testing.T) {
err := NewCLIConfig().Check()
require.NoError(t, err)
}
func TestInvalidConfig(t *testing.T) {
tests := []struct {
name string
configChange func(config *CLIConfig)
}{
{"MissingCaCert", func(config *CLIConfig) {
config.TLSCaCert = ""
}},
{"MissingCert", func(config *CLIConfig) {
config.TLSCert = ""
}},
{"MissingKey", func(config *CLIConfig) {
config.TLSKey = ""
}},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
cfg := NewCLIConfig()
test.configChange(&cfg)
err := cfg.Check()
require.ErrorContains(t, err, "all tls flags must be set if at least one is set")
})
}
}
func configForArgs(args ...string) CLIConfig {
app := cli.NewApp()
app.Flags = CLIFlagsWithFlagPrefix("TEST_", "test")
app.Name = "test"
var config CLIConfig
app.Action = func(ctx *cli.Context) error {
config = ReadCLIConfigWithPrefix(ctx, "test")
return nil
}
_ = app.Run(args)
return config
}
......@@ -48,6 +48,16 @@ var (
}
)
var (
defaultNumConfirmations = uint64(10)
defaultSafeAbortNonceTooLowCount = uint64(3)
defaultResubmissionTimeout = 48 * time.Second
defaultNetworkTimeout = 2 * time.Second
defaultTxSendTimeout = 0 * time.Second
defaultTxNotInMempoolTimeout = 2 * time.Minute
defaultReceiptQueryInterval = 12 * time.Second
)
func CLIFlags(envPrefix string) []cli.Flag {
prefixEnvVars := func(name string) []string {
return opservice.PrefixEnvVar(envPrefix, name)
......@@ -71,43 +81,43 @@ func CLIFlags(envPrefix string) []cli.Flag {
&cli.Uint64Flag{
Name: NumConfirmationsFlagName,
Usage: "Number of confirmations which we will wait after sending a transaction",
Value: 10,
Value: defaultNumConfirmations,
EnvVars: prefixEnvVars("NUM_CONFIRMATIONS"),
},
&cli.Uint64Flag{
Name: SafeAbortNonceTooLowCountFlagName,
Usage: "Number of ErrNonceTooLow observations required to give up on a tx at a particular nonce without receiving confirmation",
Value: 3,
Value: defaultSafeAbortNonceTooLowCount,
EnvVars: prefixEnvVars("SAFE_ABORT_NONCE_TOO_LOW_COUNT"),
},
&cli.DurationFlag{
Name: ResubmissionTimeoutFlagName,
Usage: "Duration we will wait before resubmitting a transaction to L1",
Value: 48 * time.Second,
Value: defaultResubmissionTimeout,
EnvVars: prefixEnvVars("RESUBMISSION_TIMEOUT"),
},
&cli.DurationFlag{
Name: NetworkTimeoutFlagName,
Usage: "Timeout for all network operations",
Value: 2 * time.Second,
Value: defaultNetworkTimeout,
EnvVars: prefixEnvVars("NETWORK_TIMEOUT"),
},
&cli.DurationFlag{
Name: TxSendTimeoutFlagName,
Usage: "Timeout for sending transactions. If 0 it is disabled.",
Value: 0,
Value: defaultTxSendTimeout,
EnvVars: prefixEnvVars("TXMGR_TX_SEND_TIMEOUT"),
},
&cli.DurationFlag{
Name: TxNotInMempoolTimeoutFlagName,
Usage: "Timeout for aborting a tx send if the tx does not make it to the mempool.",
Value: 2 * time.Minute,
Value: defaultTxNotInMempoolTimeout,
EnvVars: prefixEnvVars("TXMGR_TX_NOT_IN_MEMPOOL_TIMEOUT"),
},
&cli.DurationFlag{
Name: ReceiptQueryIntervalFlagName,
Usage: "Frequency to poll for receipts",
Value: 12 * time.Second,
Value: defaultReceiptQueryInterval,
EnvVars: prefixEnvVars("TXMGR_RECEIPT_QUERY_INTERVAL"),
},
}, client.CLIFlags(envPrefix)...)
......@@ -130,6 +140,20 @@ type CLIConfig struct {
TxNotInMempoolTimeout time.Duration
}
func NewCLIConfig(l1RPCURL string) CLIConfig {
return CLIConfig{
L1RPCURL: l1RPCURL,
NumConfirmations: defaultNumConfirmations,
SafeAbortNonceTooLowCount: defaultSafeAbortNonceTooLowCount,
ResubmissionTimeout: defaultResubmissionTimeout,
NetworkTimeout: defaultNetworkTimeout,
TxSendTimeout: defaultTxSendTimeout,
TxNotInMempoolTimeout: defaultTxNotInMempoolTimeout,
ReceiptQueryInterval: defaultReceiptQueryInterval,
SignerCLIConfig: client.NewCLIConfig(),
}
}
func (m CLIConfig) Check() error {
if m.L1RPCURL == "" {
return errors.New("must provide a L1 RPC url")
......
package txmgr
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/urfave/cli/v2"
)
var (
l1EthRpcValue = "http://localhost:9546"
)
func TestDefaultCLIOptionsMatchDefaultConfig(t *testing.T) {
cfg := configForArgs()
defaultCfg := NewCLIConfig(l1EthRpcValue)
require.Equal(t, defaultCfg, cfg)
}
func TestDefaultConfigIsValid(t *testing.T) {
cfg := NewCLIConfig(l1EthRpcValue)
require.NoError(t, cfg.Check())
}
func configForArgs(args ...string) CLIConfig {
app := cli.NewApp()
// txmgr expects the --l1-eth-rpc option to be declared externally
flags := append(CLIFlags("TEST_"), &cli.StringFlag{
Name: L1RPCFlagName,
Value: l1EthRpcValue,
})
app.Flags = flags
app.Name = "test"
var config CLIConfig
app.Action = func(ctx *cli.Context) error {
config = ReadCLIConfig(ctx)
return nil
}
_ = app.Run(args)
return config
}
......@@ -19,6 +19,30 @@ type TxManager struct {
mock.Mock
}
// BlockNumber provides a mock function with given fields: ctx
func (_m *TxManager) BlockNumber(ctx context.Context) (uint64, error) {
ret := _m.Called(ctx)
var r0 uint64
var r1 error
if rf, ok := ret.Get(0).(func(context.Context) (uint64, error)); ok {
return rf(ctx)
}
if rf, ok := ret.Get(0).(func(context.Context) uint64); ok {
r0 = rf(ctx)
} else {
r0 = ret.Get(0).(uint64)
}
if rf, ok := ret.Get(1).(func(context.Context) error); ok {
r1 = rf(ctx)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// From provides a mock function with given fields:
func (_m *TxManager) From() common.Address {
ret := _m.Called()
......
......@@ -48,6 +48,9 @@ type TxManager interface {
// From returns the sending address associated with the instance of the transaction manager.
// It is static for a single instance of a TxManager.
From() common.Address
// BlockNumber returns the most recent block number from the underlying network.
BlockNumber(ctx context.Context) (uint64, error)
}
// ETHBackend is the set of methods that the transaction manager uses to resubmit gas & determine
......@@ -116,6 +119,10 @@ func (m *SimpleTxManager) From() common.Address {
return m.cfg.From
}
func (m *SimpleTxManager) BlockNumber(ctx context.Context) (uint64, error) {
return m.backend.BlockNumber(ctx)
}
// TxCandidate is a transaction candidate that can be submitted to ask the
// [TxManager] to construct a transaction with gas price bounds.
type TxCandidate struct {
......@@ -353,7 +360,8 @@ func (m *SimpleTxManager) publishAndWaitForTx(ctx context.Context, tx *types.Tra
// Poll for the transaction to be ready & then send the result to receiptChan
receipt, err := m.waitMined(ctx, tx, sendState)
if err != nil {
log.Warn("Transaction receipt not found", "err", err)
// this will happen if the tx was successfully replaced by a tx with bumped fees
log.Info("Transaction receipt not found", "err", err)
return
}
select {
......@@ -475,6 +483,10 @@ func (m *SimpleTxManager) increaseGasPrice(ctx context.Context, tx *types.Transa
Data: rawTx.Data,
})
if err != nil {
// If this is a transaction resubmission, we sometimes see this outcome because the
// original tx can get included in a block just before the above call. In this case the
// error is due to the tx reverting with message "block number must be equal to next
// expected block number"
m.l.Warn("failed to re-estimate gas", "err", err, "gaslimit", tx.Gas())
return nil, err
}
......
......@@ -38,6 +38,12 @@ type CLIConfig struct {
TLSConfig optls.CLIConfig
}
func NewCLIConfig() CLIConfig {
return CLIConfig{
TLSConfig: optls.NewCLIConfig(),
}
}
func (c CLIConfig) Check() error {
if err := c.TLSConfig.Check(); err != nil {
return err
......
package client
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/urfave/cli/v2"
)
func TestDefaultCLIOptionsMatchDefaultConfig(t *testing.T) {
cfg := configForArgs()
defaultCfg := NewCLIConfig()
require.Equal(t, defaultCfg, cfg)
}
func TestDefaultConfigIsValid(t *testing.T) {
err := NewCLIConfig().Check()
require.NoError(t, err)
}
func TestInvalidConfig(t *testing.T) {
tests := []struct {
name string
expected string
configChange func(config *CLIConfig)
}{
{
name: "MissingEndpoint",
expected: "signer endpoint and address must both be set or not set",
configChange: func(config *CLIConfig) {
config.Address = "0x1234"
},
},
{
name: "MissingAddress",
expected: "signer endpoint and address must both be set or not set",
configChange: func(config *CLIConfig) {
config.Endpoint = "http://localhost"
},
},
{
name: "InvalidTLSConfig",
expected: "all tls flags must be set if at least one is set",
configChange: func(config *CLIConfig) {
config.TLSConfig.TLSKey = ""
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
cfg := NewCLIConfig()
test.configChange(&cfg)
err := cfg.Check()
require.ErrorContains(t, err, test.expected)
})
}
}
func configForArgs(args ...string) CLIConfig {
app := cli.NewApp()
app.Flags = CLIFlags("TEST_")
app.Name = "test"
var config CLIConfig
app.Action = func(ctx *cli.Context) error {
config = ReadCLIConfig(ctx)
return nil
}
_ = app.Run(args)
return config
}
......@@ -32,12 +32,12 @@ DisputeGameFactory_SetImplementation_Test:test_setImplementation_notOwner_revert
DisputeGameFactory_SetImplementation_Test:test_setImplementation_succeeds() (gas: 44243)
DisputeGameFactory_TransferOwnership_Test:test_transferOwnership_notOwner_reverts() (gas: 15950)
DisputeGameFactory_TransferOwnership_Test:test_transferOwnership_succeeds() (gas: 18642)
FaultDisputeGame_ResolvesCorrectly_CorrectRoot2:test_resolvesCorrectly_succeeds() (gas: 502174)
FaultDisputeGame_ResolvesCorrectly_CorrectRoot3:test_resolvesCorrectly_succeeds() (gas: 504053)
FaultDisputeGame_ResolvesCorrectly_CorrectRoot:test_resolvesCorrectly_succeeds() (gas: 491517)
FaultDisputeGame_ResolvesCorrectly_IncorrectRoot2:test_resolvesCorrectly_succeeds() (gas: 500937)
FaultDisputeGame_ResolvesCorrectly_IncorrectRoot3:test_resolvesCorrectly_succeeds() (gas: 502816)
FaultDisputeGame_ResolvesCorrectly_IncorrectRoot:test_resolvesCorrectly_succeeds() (gas: 490280)
FaultDisputeGame_ResolvesCorrectly_CorrectRoot1:test_resolvesCorrectly_succeeds() (gas: 491839)
FaultDisputeGame_ResolvesCorrectly_CorrectRoot2:test_resolvesCorrectly_succeeds() (gas: 495751)
FaultDisputeGame_ResolvesCorrectly_CorrectRoot3:test_resolvesCorrectly_succeeds() (gas: 495049)
FaultDisputeGame_ResolvesCorrectly_IncorrectRoot1:test_resolvesCorrectly_succeeds() (gas: 490600)
FaultDisputeGame_ResolvesCorrectly_IncorrectRoot2:test_resolvesCorrectly_succeeds() (gas: 494512)
FaultDisputeGame_ResolvesCorrectly_IncorrectRoot3:test_resolvesCorrectly_succeeds() (gas: 493810)
FaultDisputeGame_Test:test_extraData_succeeds() (gas: 17426)
FaultDisputeGame_Test:test_gameData_succeeds() (gas: 17917)
FaultDisputeGame_Test:test_gameStart_succeeds() (gas: 10315)
......@@ -51,12 +51,12 @@ FaultDisputeGame_Test:test_move_gameDepthExceeded_reverts() (gas: 408100)
FaultDisputeGame_Test:test_move_gameNotInProgress_reverts() (gas: 10968)
FaultDisputeGame_Test:test_move_nonExistentParent_reverts() (gas: 24655)
FaultDisputeGame_Test:test_move_simpleAttack_succeeds() (gas: 107344)
FaultDisputeGame_Test:test_resolve_challengeContested_succeeds() (gas: 224789)
FaultDisputeGame_Test:test_resolve_challengeContested_succeeds() (gas: 224784)
FaultDisputeGame_Test:test_resolve_notInProgress_reverts() (gas: 9657)
FaultDisputeGame_Test:test_resolve_rootContested_succeeds() (gas: 109754)
FaultDisputeGame_Test:test_resolve_rootContested_succeeds() (gas: 109749)
FaultDisputeGame_Test:test_resolve_rootUncontestedClockNotExpired_succeeds() (gas: 21422)
FaultDisputeGame_Test:test_resolve_rootUncontested_succeeds() (gas: 27256)
FaultDisputeGame_Test:test_resolve_teamDeathmatch_succeeds() (gas: 395447)
FaultDisputeGame_Test:test_resolve_rootUncontested_succeeds() (gas: 27251)
FaultDisputeGame_Test:test_resolve_teamDeathmatch_succeeds() (gas: 395442)
FaultDisputeGame_Test:test_rootClaim_succeeds() (gas: 8181)
FeeVault_Test:test_constructor_succeeds() (gas: 18185)
GasBenchMark_L1CrossDomainMessenger:test_sendMessage_benchmark_0() (gas: 352113)
......
......@@ -80,17 +80,6 @@ pnpm build
pnpm test
```
#### Running Echidna tests
You must have [Echidna](https://github.com/crytic/echidna) installed.
Contracts targetted for Echidna testing are located in `./contracts/echidna`
Each target contract is tested with a separate pnpm command, for example:
```shell
pnpm echidna:aliasing
```
### Deployment
The smart contracts are deployed using `foundry` with a `hardhat-deploy` compatibility layer. When the contracts are deployed,
......
......@@ -20,32 +20,33 @@ interface IFaultDisputeGame is IDisputeGame {
/// @notice Emitted when a new claim is added to the DAG by `claimant`
/// @param parentIndex The index within the `claimData` array of the parent claim
/// @param pivot The claim being added
/// @param claim The claim being added
/// @param claimant The address of the claimant
event Move(uint256 indexed parentIndex, Claim indexed pivot, address indexed claimant);
event Move(uint256 indexed parentIndex, Claim indexed claim, address indexed claimant);
/// @notice Attack a disagreed upon `Claim`.
/// @param _parentIndex Index of the `Claim` to attack in `claimData`.
/// @param _pivot The `Claim` at the relative attack position.
function attack(uint256 _parentIndex, Claim _pivot) external payable;
/// @param _claim The `Claim` at the relative attack position.
function attack(uint256 _parentIndex, Claim _claim) external payable;
/// @notice Defend an agreed upon `Claim`.
/// @param _parentIndex Index of the claim to defend in `claimData`.
/// @param _pivot The `Claim` at the relative defense position.
function defend(uint256 _parentIndex, Claim _pivot) external payable;
/// @param _claim The `Claim` at the relative defense position.
function defend(uint256 _parentIndex, Claim _claim) external payable;
/// @notice Perform the final step via an on-chain fault proof processor
/// @dev This function should point to a fault proof processor in order to execute
/// a step in the fault proof program on-chain. The interface of the fault proof
/// processor contract should be generic enough such that we can use different
/// fault proof VMs (MIPS, RiscV5, etc.)
/// @param _stateIndex The index of the pre/post state of the step within `claimData`.
/// @param _claimIndex The index of the challenged claim within `claimData`.
/// @param _isAttack Whether or not the step is an attack or a defense.
/// @param _stateData The stateData of the step is the preimage of the claim @ `prestateIndex`
/// @param _stateData The stateData of the step is the preimage of the claim at the given
/// prestate, which is at `_stateIndex` if the move is an attack and `_claimIndex` if
/// the move is a defense. If the step is an attack on the first instruction, it is
/// the absolute prestate of the fault proof VM.
/// @param _proof Proof to access memory leaf nodes in the VM.
function step(
uint256 _stateIndex,
uint256 _claimIndex,
bool _isAttack,
bytes calldata _stateData,
......
......@@ -124,6 +124,27 @@ library LibPosition {
}
}
/// @notice Gets the position of the highest ancestor of `_position` that commits to the same
/// trace index.
/// @param _position The position to get the highest ancestor of.
/// @return ancestor_ The highest ancestor of `position` that commits to the same trace index.
function traceAncestor(Position _position) internal pure returns (Position ancestor_) {
// Create a field with only the lowest unset bit of `_position` set.
Position lsb;
assembly {
lsb := and(not(_position), add(_position, 1))
}
// Find the index of the lowest unset bit within the field.
uint256 msb = depth(lsb);
// The highest ancestor that commits to the same trace index is the original position
// shifted right by the index of the lowest unset bit.
assembly {
let a := shr(msb, _position)
// Bound the ancestor to the minimum gindex, 1.
ancestor_ := or(a, iszero(a))
}
}
/// @notice Get the move position of `_position`, which is the left child of:
/// 1. `_position + 1` if `_isAttack` is true.
/// 1. `_position` if `_isAttack` is false.
......
pragma solidity 0.8.15;
import { AddressAliasHelper } from "../vendor/AddressAliasHelper.sol";
contract EchidnaFuzzAddressAliasing {
bool internal failedRoundtrip;
/**
* @notice Takes an address to be aliased with AddressAliasHelper and then unaliased
* and updates the test contract's state indicating if the round trip encoding
* failed.
*/
function testRoundTrip(address addr) public {
// Alias our address
address aliasedAddr = AddressAliasHelper.applyL1ToL2Alias(addr);
// Unalias our address
address undoneAliasAddr = AddressAliasHelper.undoL1ToL2Alias(aliasedAddr);
// If our round trip aliasing did not return the original result, set our state.
if (addr != undoneAliasAddr) {
failedRoundtrip = true;
}
}
/**
* @custom:invariant Address aliases are always able to be undone.
*
* Asserts that an address that has been aliased with `applyL1ToL2Alias` can always
* be unaliased with `undoL1ToL2Alias`.
*/
function echidna_round_trip_aliasing() public view returns (bool) {
// ASSERTION: The round trip aliasing done in testRoundTrip(...) should never fail.
return !failedRoundtrip;
}
}
pragma solidity 0.8.15;
import { Burn } from "../libraries/Burn.sol";
import { StdUtils } from "forge-std/Test.sol";
contract EchidnaFuzzBurnEth is StdUtils {
bool internal failedEthBurn;
/**
* @notice Takes an integer amount of eth to burn through the Burn library and
* updates the contract state if an incorrect amount of eth moved from the contract
*/
function testBurn(uint256 _value) public {
// cache the contract's eth balance
uint256 preBurnBalance = address(this).balance;
uint256 value = bound(_value, 0, preBurnBalance);
// execute a burn of _value eth
Burn.eth(value);
// check that exactly value eth was transfered from the contract
unchecked {
if (address(this).balance != preBurnBalance - value) {
failedEthBurn = true;
}
}
}
/**
* @custom:invariant `eth(uint256)` always burns the exact amount of eth passed.
*
* Asserts that when `Burn.eth(uint256)` is called, it always burns the exact amount
* of ETH passed to the function.
*/
function echidna_burn_eth() public view returns (bool) {
// ASSERTION: The amount burned should always match the amount passed exactly
return !failedEthBurn;
}
}
contract EchidnaFuzzBurnGas is StdUtils {
bool internal failedGasBurn;
/**
* @notice Takes an integer amount of gas to burn through the Burn library and
* updates the contract state if at least that amount of gas was not burned
* by the library
*/
function testGas(uint256 _value) public {
// cap the value to the max resource limit
uint256 MAX_RESOURCE_LIMIT = 8_000_000;
uint256 value = bound(_value, 0, MAX_RESOURCE_LIMIT);
// cache the contract's current remaining gas
uint256 preBurnGas = gasleft();
// execute the gas burn
Burn.gas(value);
// cache the remaining gas post burn
uint256 postBurnGas = gasleft();
// check that at least value gas was burnt (and that there was no underflow)
unchecked {
if (postBurnGas - preBurnGas > value || preBurnGas - value > preBurnGas) {
failedGasBurn = true;
}
}
}
/**
* @custom:invariant `gas(uint256)` always burns at least the amount of gas passed.
*
* Asserts that when `Burn.gas(uint256)` is called, it always burns at least the amount
* of gas passed to the function.
*/
function echidna_burn_gas() public view returns (bool) {
// ASSERTION: The amount of gas burned should be strictly greater than the
// the amount passed as _value (minimum _value + whatever minor overhead to
// the value after the call)
return !failedGasBurn;
}
}
pragma solidity 0.8.15;
import { Encoding } from "../libraries/Encoding.sol";
contract EchidnaFuzzEncoding {
bool internal failedRoundtripAToB;
bool internal failedRoundtripBToA;
/**
* @notice Takes a pair of integers to be encoded into a versioned nonce with the
* Encoding library and then decoded and updates the test contract's state
* indicating if the round trip encoding failed.
*/
function testRoundTripAToB(uint240 _nonce, uint16 _version) public {
// Encode the nonce and version
uint256 encodedVersionedNonce = Encoding.encodeVersionedNonce(_nonce, _version);
// Decode the nonce and version
uint240 decodedNonce;
uint16 decodedVersion;
(decodedNonce, decodedVersion) = Encoding.decodeVersionedNonce(encodedVersionedNonce);
// If our round trip encoding did not return the original result, set our state.
if ((decodedNonce != _nonce) || (decodedVersion != _version)) {
failedRoundtripAToB = true;
}
}
/**
* @notice Takes an integer representing a packed version and nonce and attempts
* to decode them using the Encoding library before re-encoding and updates
* the test contract's state indicating if the round trip encoding failed.
*/
function testRoundTripBToA(uint256 _versionedNonce) public {
// Decode the nonce and version
uint240 decodedNonce;
uint16 decodedVersion;
(decodedNonce, decodedVersion) = Encoding.decodeVersionedNonce(_versionedNonce);
// Encode the nonce and version
uint256 encodedVersionedNonce = Encoding.encodeVersionedNonce(decodedNonce, decodedVersion);
// If our round trip encoding did not return the original result, set our state.
if (encodedVersionedNonce != _versionedNonce) {
failedRoundtripBToA = true;
}
}
/**
* @custom:invariant `testRoundTripAToB` never fails.
*
* Asserts that a raw versioned nonce can be encoded / decoded to reach the same raw value.
*/
function echidna_round_trip_encoding_AToB() public view returns (bool) {
// ASSERTION: The round trip encoding done in testRoundTripAToB(...)
return !failedRoundtripAToB;
}
/**
* @custom:invariant `testRoundTripBToA` never fails.
*
* Asserts that an encoded versioned nonce can always be decoded / re-encoded to reach
* the same encoded value.
*/
function echidna_round_trip_encoding_BToA() public view returns (bool) {
// ASSERTION: The round trip encoding done in testRoundTripBToA should never
// fail.
return !failedRoundtripBToA;
}
}
pragma solidity 0.8.15;
import { OptimismPortal } from "../L1/OptimismPortal.sol";
import { L2OutputOracle } from "../L1/L2OutputOracle.sol";
import { AddressAliasHelper } from "../vendor/AddressAliasHelper.sol";
import { SystemConfig } from "../L1/SystemConfig.sol";
import { ResourceMetering } from "../L1/ResourceMetering.sol";
import { Constants } from "../libraries/Constants.sol";
contract EchidnaFuzzOptimismPortal {
OptimismPortal internal portal;
bool internal failedToComplete;
constructor() {
ResourceMetering.ResourceConfig memory rcfg = Constants.DEFAULT_RESOURCE_CONFIG();
SystemConfig systemConfig = new SystemConfig({
_owner: address(1),
_overhead: 0,
_scalar: 10000,
_batcherHash: bytes32(0),
_gasLimit: 30_000_000,
_unsafeBlockSigner: address(0),
_config: rcfg
});
portal = new OptimismPortal({
_l2Oracle: L2OutputOracle(address(0)),
_guardian: address(0),
_paused: false,
_config: systemConfig
});
}
// A test intended to identify any unexpected halting conditions
function testDepositTransactionCompletes(
address _to,
uint256 _mint,
uint256 _value,
uint64 _gasLimit,
bool _isCreation,
bytes memory _data
) public payable {
failedToComplete = true;
require(!_isCreation || _to == address(0), "EchidnaFuzzOptimismPortal: invalid test case.");
portal.depositTransaction{ value: _mint }(_to, _value, _gasLimit, _isCreation, _data);
failedToComplete = false;
}
/**
* @custom:invariant Deposits of any value should always succeed unless
* `_to` = `address(0)` or `_isCreation` = `true`.
*
* All deposits, barring creation transactions and transactions sent to `address(0)`,
* should always succeed.
*/
function echidna_deposit_completes() public view returns (bool) {
return !failedToComplete;
}
}
cryticArgs: ["--hardhat-ignore-compile"]
format: text
# Set the timeout to 3 minutes to keep CI from getting too long.
# The tool also adds 2 or 3 minutes before/after the actual timeout window.
timeout: 180
# Prevent calls to the (non-existent) fallback function of EchidnaFuzzResourceMetering
filterFunctions: [EchidnaFuzzResourceMetering.*fallback*()]
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment