Commit 802a9c76 authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge branch 'develop' into jg/step_solver

parents e231d7cf a8e6061d
...@@ -7,7 +7,6 @@ ignore: ...@@ -7,7 +7,6 @@ ignore:
- "**/*.t.sol" - "**/*.t.sol"
- "op-bindings/bindings/*.go" - "op-bindings/bindings/*.go"
- "packages/contracts-bedrock/contracts/vendor/WETH9.sol" - "packages/contracts-bedrock/contracts/vendor/WETH9.sol"
- "packages/contracts-bedrock/contracts/echidna"
- "packages/contracts-bedrock/contracts/cannon" # tested through Go tests - "packages/contracts-bedrock/contracts/cannon" # tested through Go tests
coverage: coverage:
status: status:
......
...@@ -3,9 +3,9 @@ package database ...@@ -3,9 +3,9 @@ package database
import ( import (
"context" "context"
"errors" "errors"
"math/big"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/google/uuid"
"gorm.io/gorm" "gorm.io/gorm"
) )
...@@ -27,11 +27,6 @@ type L1BlockHeader struct { ...@@ -27,11 +27,6 @@ type L1BlockHeader struct {
type L2BlockHeader struct { type L2BlockHeader struct {
BlockHeader BlockHeader
// Marked when the proposed output is finalized on L1.
// All bedrock blocks will have `LegacyStateBatchIndex ^== NULL`
L1BlockHash *common.Hash `gorm:"serializer:json"`
LegacyStateBatchIndex *uint64
} }
type LegacyStateBatch struct { type LegacyStateBatch struct {
...@@ -39,25 +34,33 @@ type LegacyStateBatch struct { ...@@ -39,25 +34,33 @@ type LegacyStateBatch struct {
// violating the primary key constraint. // violating the primary key constraint.
Index uint64 `gorm:"primaryKey;default:0"` Index uint64 `gorm:"primaryKey;default:0"`
Root common.Hash `gorm:"serializer:json"` Root common.Hash `gorm:"serializer:json"`
Size uint64 Size uint64
PrevTotal uint64 PrevTotal uint64
L1BlockHash common.Hash `gorm:"serializer:json"` L1ContractEventGUID uuid.UUID
}
type OutputProposal struct {
OutputRoot common.Hash `gorm:"primaryKey;serializer:json"`
L2BlockNumber U256
L1ContractEventGUID uuid.UUID
} }
type BlocksView interface { type BlocksView interface {
FinalizedL1BlockHeader() (*L1BlockHeader, error) LatestL1BlockHeader() (*L1BlockHeader, error)
FinalizedL2BlockHeader() (*L2BlockHeader, error) LatestCheckpointedOutput() (*OutputProposal, error)
LatestL2BlockHeader() (*L2BlockHeader, error)
} }
type BlocksDB interface { type BlocksDB interface {
BlocksView BlocksView
StoreL1BlockHeaders([]*L1BlockHeader) error StoreL1BlockHeaders([]*L1BlockHeader) error
StoreLegacyStateBatch(*LegacyStateBatch) error
StoreL2BlockHeaders([]*L2BlockHeader) error StoreL2BlockHeaders([]*L2BlockHeader) error
MarkFinalizedL1RootForL2Block(common.Hash, common.Hash) error
StoreLegacyStateBatches([]*LegacyStateBatch) error
StoreOutputProposals([]*OutputProposal) error
} }
/** /**
...@@ -79,39 +82,33 @@ func (db *blocksDB) StoreL1BlockHeaders(headers []*L1BlockHeader) error { ...@@ -79,39 +82,33 @@ func (db *blocksDB) StoreL1BlockHeaders(headers []*L1BlockHeader) error {
return result.Error return result.Error
} }
func (db *blocksDB) StoreLegacyStateBatch(stateBatch *LegacyStateBatch) error { func (db *blocksDB) StoreLegacyStateBatches(stateBatches []*LegacyStateBatch) error {
result := db.gorm.Create(stateBatch) result := db.gorm.Create(stateBatches)
if result.Error != nil { return result.Error
return result.Error }
}
// Mark this state batch index & l1 block hash for all applicable l2 blocks func (db *blocksDB) StoreOutputProposals(outputs []*OutputProposal) error {
l2Headers := make([]*L2BlockHeader, stateBatch.Size) result := db.gorm.Create(outputs)
return result.Error
}
// [start, end] range is inclusive. Since `PrevTotal` is the index of the prior batch, no func (db *blocksDB) LatestL1BlockHeader() (*L1BlockHeader, error) {
// need to subtract one when adding the size var l1Header L1BlockHeader
startHeight := U256{Int: big.NewInt(int64(stateBatch.PrevTotal + 1))} result := db.gorm.Order("number DESC").Take(&l1Header)
endHeight := U256{Int: big.NewInt(int64(stateBatch.PrevTotal + stateBatch.Size))}
result = db.gorm.Where("number BETWEEN ? AND ?", &startHeight, &endHeight).Find(&l2Headers)
if result.Error != nil { if result.Error != nil {
return result.Error if errors.Is(result.Error, gorm.ErrRecordNotFound) {
} else if result.RowsAffected != int64(stateBatch.Size) { return nil, nil
return errors.New("state batch size exceeds number of indexed l2 blocks") }
}
for _, header := range l2Headers { return nil, result.Error
header.LegacyStateBatchIndex = &stateBatch.Index
header.L1BlockHash = &stateBatch.L1BlockHash
} }
result = db.gorm.Save(&l2Headers) return &l1Header, nil
return result.Error
} }
// FinalizedL1BlockHeader returns the latest L1 block header stored in the database, nil otherwise func (db *blocksDB) LatestCheckpointedOutput() (*OutputProposal, error) {
func (db *blocksDB) FinalizedL1BlockHeader() (*L1BlockHeader, error) { var outputProposal OutputProposal
var l1Header L1BlockHeader result := db.gorm.Order("l2_block_number DESC").Take(&outputProposal)
result := db.gorm.Order("number DESC").Take(&l1Header)
if result.Error != nil { if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) { if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, nil return nil, nil
...@@ -120,7 +117,7 @@ func (db *blocksDB) FinalizedL1BlockHeader() (*L1BlockHeader, error) { ...@@ -120,7 +117,7 @@ func (db *blocksDB) FinalizedL1BlockHeader() (*L1BlockHeader, error) {
return nil, result.Error return nil, result.Error
} }
return &l1Header, nil return &outputProposal, nil
} }
// L2 // L2
...@@ -130,8 +127,7 @@ func (db *blocksDB) StoreL2BlockHeaders(headers []*L2BlockHeader) error { ...@@ -130,8 +127,7 @@ func (db *blocksDB) StoreL2BlockHeaders(headers []*L2BlockHeader) error {
return result.Error return result.Error
} }
// FinalizedL2BlockHeader returns the latest L2 block header stored in the database, nil otherwise func (db *blocksDB) LatestL2BlockHeader() (*L2BlockHeader, error) {
func (db *blocksDB) FinalizedL2BlockHeader() (*L2BlockHeader, error) {
var l2Header L2BlockHeader var l2Header L2BlockHeader
result := db.gorm.Order("number DESC").Take(&l2Header) result := db.gorm.Order("number DESC").Take(&l2Header)
if result.Error != nil { if result.Error != nil {
...@@ -145,19 +141,3 @@ func (db *blocksDB) FinalizedL2BlockHeader() (*L2BlockHeader, error) { ...@@ -145,19 +141,3 @@ func (db *blocksDB) FinalizedL2BlockHeader() (*L2BlockHeader, error) {
result.Logger.Info(context.Background(), "number ", l2Header.Number) result.Logger.Info(context.Background(), "number ", l2Header.Number)
return &l2Header, nil return &l2Header, nil
} }
// MarkFinalizedL1RootForL2Block updates the stored L2 block header with the L1 block
// that contains the output proposal for the L2 root.
func (db *blocksDB) MarkFinalizedL1RootForL2Block(l2Root, l1Root common.Hash) error {
var l2Header L2BlockHeader
l2Header.Hash = l2Root // set the primary key
result := db.gorm.First(&l2Header)
if result.Error != nil {
return result.Error
}
l2Header.L1BlockHash = &l1Root
result = db.gorm.Save(&l2Header)
return result.Error
}
...@@ -13,28 +13,11 @@ CREATE TABLE IF NOT EXISTS l1_block_headers ( ...@@ -13,28 +13,11 @@ CREATE TABLE IF NOT EXISTS l1_block_headers (
timestamp INTEGER NOT NULL timestamp INTEGER NOT NULL
); );
CREATE TABLE IF NOT EXISTS legacy_state_batches (
index INTEGER NOT NULL PRIMARY KEY,
root VARCHAR NOT NULL,
size INTEGER NOT NULL,
prev_total INTEGER NOT NULL,
-- Finalization information. Unlike `l2_block_headers` the NOT NULL
-- constraint is added since the l1 block hash will be known when
-- when reading the output event
l1_block_hash VARCHAR NOT NULL REFERENCES l1_block_headers(hash)
);
CREATE TABLE IF NOT EXISTS l2_block_headers ( CREATE TABLE IF NOT EXISTS l2_block_headers (
-- Block header hash VARCHAR NOT NULL PRIMARY KEY,
hash VARCHAR NOT NULL PRIMARY KEY, parent_hash VARCHAR NOT NULL,
parent_hash VARCHAR NOT NULL, number UINT256,
number UINT256, timestamp INTEGER NOT NULL
timestamp INTEGER NOT NULL,
-- Finalization information
l1_block_hash VARCHAR REFERENCES l1_block_headers(hash),
legacy_state_batch_index INTEGER REFERENCES legacy_state_batches(index)
); );
/** /**
...@@ -59,6 +42,24 @@ CREATE TABLE IF NOT EXISTS l2_contract_events ( ...@@ -59,6 +42,24 @@ CREATE TABLE IF NOT EXISTS l2_contract_events (
timestamp INTEGER NOT NULL timestamp INTEGER NOT NULL
); );
-- Tables that index finalization markers for L2 blocks.
CREATE TABLE IF NOT EXISTS legacy_state_batches (
index INTEGER NOT NULL PRIMARY KEY,
root VARCHAR NOT NULL,
size INTEGER NOT NULL,
prev_total INTEGER NOT NULL,
l1_contract_event_guid VARCHAR REFERENCES l1_contract_events(guid)
);
CREATE TABLE IF NOT EXISTS output_proposals (
output_root VARCHAR NOT NULL PRIMARY KEY,
l2_block_number UINT256,
l1_contract_event_guid VARCHAR REFERENCES l1_contract_events(guid)
);
/** /**
* BRIDGING DATA * BRIDGING DATA
*/ */
...@@ -71,6 +72,7 @@ CREATE TABLE IF NOT EXISTS deposits ( ...@@ -71,6 +72,7 @@ CREATE TABLE IF NOT EXISTS deposits (
-- Deposit information (do we need indexes on from/to?) -- Deposit information (do we need indexes on from/to?)
from_address VARCHAR NOT NULL, from_address VARCHAR NOT NULL,
to_address VARCHAR NOT NULL, to_address VARCHAR NOT NULL,
l1_token_address VARCHAR NOT NULL, l1_token_address VARCHAR NOT NULL,
l2_token_address VARCHAR NOT NULL, l2_token_address VARCHAR NOT NULL,
......
...@@ -91,7 +91,7 @@ func (c *client) BlockHeaderByHash(hash common.Hash) (*types.Header, error) { ...@@ -91,7 +91,7 @@ func (c *client) BlockHeaderByHash(hash common.Hash) (*types.Header, error) {
// are placed on the range such as blocks in the "latest", "safe" or "finalized" states. If the specified // are placed on the range such as blocks in the "latest", "safe" or "finalized" states. If the specified
// range is too large, `endHeight > latest`, the resulting list is truncated to the available headers // range is too large, `endHeight > latest`, the resulting list is truncated to the available headers
func (c *client) BlockHeadersByRange(startHeight, endHeight *big.Int) ([]*types.Header, error) { func (c *client) BlockHeadersByRange(startHeight, endHeight *big.Int) ([]*types.Header, error) {
count := new(big.Int).Sub(endHeight, startHeight).Uint64() count := new(big.Int).Sub(endHeight, startHeight).Uint64() + 1
batchElems := make([]rpc.BatchElem, count) batchElems := make([]rpc.BatchElem, count)
for i := uint64(0); i < count; i++ { for i := uint64(0); i < count; i++ {
height := new(big.Int).Add(startHeight, new(big.Int).SetUint64(i)) height := new(big.Int).Add(startHeight, new(big.Int).SetUint64(i))
......
...@@ -7,35 +7,38 @@ import ( ...@@ -7,35 +7,38 @@ import (
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
) )
// Max number of headers that's bee returned by the Fetcher at once. var (
const maxHeaderBatchSize = 50 ErrHeaderTraversalAheadOfProvider = errors.New("the HeaderTraversal's internal state is ahead of the provider")
ErrHeaderTraversalAndProviderMismatchedState = errors.New("the HeaderTraversal and provider have diverged in state")
var ErrFetcherAndProviderMismatchedState = errors.New("the fetcher and provider have diverged in finalized state") )
type Fetcher struct { type HeaderTraversal struct {
ethClient EthClient ethClient EthClient
lastHeader *types.Header lastHeader *types.Header
} }
// NewFetcher instantiates a new instance of Fetcher against the supplied rpc client. // NewHeaderTraversal instantiates a new instance of HeaderTraversal against the supplied rpc client.
// The Fetcher will start fetching blocks starting from the supplied header unless // The HeaderTraversal will start fetching blocks starting from the supplied header unless
// nil, indicating genesis. // nil, indicating genesis.
func NewFetcher(ethClient EthClient, fromHeader *types.Header) *Fetcher { func NewHeaderTraversal(ethClient EthClient, fromHeader *types.Header) *HeaderTraversal {
return &Fetcher{ethClient: ethClient, lastHeader: fromHeader} return &HeaderTraversal{ethClient: ethClient, lastHeader: fromHeader}
} }
// NextConfirmedHeaders retrives the next set of headers that have been // NextFinalizedHeaders retrives the next set of headers that have been
// marked as finalized by the connected client // marked as finalized by the connected client, bounded by the supplied size
func (f *Fetcher) NextFinalizedHeaders() ([]*types.Header, error) { func (f *HeaderTraversal) NextFinalizedHeaders(maxSize uint64) ([]*types.Header, error) {
finalizedBlockHeight, err := f.ethClient.FinalizedBlockHeight() finalizedBlockHeight, err := f.ethClient.FinalizedBlockHeight()
if err != nil { if err != nil {
return nil, err return nil, err
} }
if f.lastHeader != nil && f.lastHeader.Number.Cmp(finalizedBlockHeight) >= 0 { if f.lastHeader != nil {
// Warn if our fetcher is ahead of the provider. The fetcher should always cmp := f.lastHeader.Number.Cmp(finalizedBlockHeight)
// be behind or at head with the provider. if cmp == 0 {
return nil, nil return nil, nil
} else if cmp > 0 {
return nil, ErrHeaderTraversalAheadOfProvider
}
} }
nextHeight := bigZero nextHeight := bigZero
...@@ -43,7 +46,7 @@ func (f *Fetcher) NextFinalizedHeaders() ([]*types.Header, error) { ...@@ -43,7 +46,7 @@ func (f *Fetcher) NextFinalizedHeaders() ([]*types.Header, error) {
nextHeight = new(big.Int).Add(f.lastHeader.Number, bigOne) nextHeight = new(big.Int).Add(f.lastHeader.Number, bigOne)
} }
endHeight := clampBigInt(nextHeight, finalizedBlockHeight, maxHeaderBatchSize) endHeight := clampBigInt(nextHeight, finalizedBlockHeight, maxSize)
headers, err := f.ethClient.BlockHeadersByRange(nextHeight, endHeight) headers, err := f.ethClient.BlockHeadersByRange(nextHeight, endHeight)
if err != nil { if err != nil {
return nil, err return nil, err
...@@ -55,7 +58,7 @@ func (f *Fetcher) NextFinalizedHeaders() ([]*types.Header, error) { ...@@ -55,7 +58,7 @@ func (f *Fetcher) NextFinalizedHeaders() ([]*types.Header, error) {
} else if f.lastHeader != nil && headers[0].ParentHash != f.lastHeader.Hash() { } else if f.lastHeader != nil && headers[0].ParentHash != f.lastHeader.Hash() {
// The indexer's state is in an irrecoverable state relative to the provider. This // The indexer's state is in an irrecoverable state relative to the provider. This
// should never happen since the indexer is dealing with only finalized blocks. // should never happen since the indexer is dealing with only finalized blocks.
return nil, ErrFetcherAndProviderMismatchedState return nil, ErrHeaderTraversalAndProviderMismatchedState
} }
f.lastHeader = headers[numHeaders-1] f.lastHeader = headers[numHeaders-1]
......
...@@ -33,31 +33,31 @@ func makeHeaders(numHeaders uint64, prevHeader *types.Header) []*types.Header { ...@@ -33,31 +33,31 @@ func makeHeaders(numHeaders uint64, prevHeader *types.Header) []*types.Header {
return headers return headers
} }
func TestFetcherNextFinalizedHeadersNoOp(t *testing.T) { func TestHeaderTraversalNextFinalizedHeadersNoOp(t *testing.T) {
client := new(MockEthClient) client := new(MockEthClient)
// start from block 0 as the latest fetched block // start from block 10 as the latest fetched block
lastHeader := &types.Header{Number: bigZero} lastHeader := &types.Header{Number: big.NewInt(10)}
fetcher := NewFetcher(client, lastHeader) headerTraversal := NewHeaderTraversal(client, lastHeader)
// no new headers when matched with head // no new headers when matched with head
client.On("FinalizedBlockHeight").Return(big.NewInt(0), nil) client.On("FinalizedBlockHeight").Return(big.NewInt(10), nil)
headers, err := fetcher.NextFinalizedHeaders() headers, err := headerTraversal.NextFinalizedHeaders(100)
assert.NoError(t, err) assert.NoError(t, err)
assert.Empty(t, headers) assert.Empty(t, headers)
} }
func TestFetcherNextFinalizedHeadersCursored(t *testing.T) { func TestHeaderTraversalNextFinalizedHeadersCursored(t *testing.T) {
client := new(MockEthClient) client := new(MockEthClient)
// start from genesis // start from genesis
fetcher := NewFetcher(client, nil) headerTraversal := NewHeaderTraversal(client, nil)
// blocks [0..4] // blocks [0..4]
headers := makeHeaders(5, nil) headers := makeHeaders(5, nil)
client.On("FinalizedBlockHeight").Return(big.NewInt(4), nil).Times(1) // Times so that we can override next client.On("FinalizedBlockHeight").Return(big.NewInt(4), nil).Times(1) // Times so that we can override next
client.On("BlockHeadersByRange", mock.MatchedBy(bigIntMatcher(0)), mock.MatchedBy(bigIntMatcher(4))).Return(headers, nil) client.On("BlockHeadersByRange", mock.MatchedBy(bigIntMatcher(0)), mock.MatchedBy(bigIntMatcher(4))).Return(headers, nil)
headers, err := fetcher.NextFinalizedHeaders() headers, err := headerTraversal.NextFinalizedHeaders(5)
assert.NoError(t, err) assert.NoError(t, err)
assert.Len(t, headers, 5) assert.Len(t, headers, 5)
...@@ -65,46 +65,46 @@ func TestFetcherNextFinalizedHeadersCursored(t *testing.T) { ...@@ -65,46 +65,46 @@ func TestFetcherNextFinalizedHeadersCursored(t *testing.T) {
headers = makeHeaders(5, headers[len(headers)-1]) headers = makeHeaders(5, headers[len(headers)-1])
client.On("FinalizedBlockHeight").Return(big.NewInt(9), nil) client.On("FinalizedBlockHeight").Return(big.NewInt(9), nil)
client.On("BlockHeadersByRange", mock.MatchedBy(bigIntMatcher(5)), mock.MatchedBy(bigIntMatcher(9))).Return(headers, nil) client.On("BlockHeadersByRange", mock.MatchedBy(bigIntMatcher(5)), mock.MatchedBy(bigIntMatcher(9))).Return(headers, nil)
headers, err = fetcher.NextFinalizedHeaders() headers, err = headerTraversal.NextFinalizedHeaders(5)
assert.NoError(t, err) assert.NoError(t, err)
assert.Len(t, headers, 5) assert.Len(t, headers, 5)
} }
func TestFetcherNextFinalizedHeadersMaxHeaderBatch(t *testing.T) { func TestHeaderTraversalNextFinalizedHeadersMaxSize(t *testing.T) {
client := new(MockEthClient) client := new(MockEthClient)
// start from genesis // start from genesis
fetcher := NewFetcher(client, nil) headerTraversal := NewHeaderTraversal(client, nil)
// blocks [0..maxBatchSize] size == maxBatchSize = 1 // 100 "available" headers
headers := makeHeaders(maxHeaderBatchSize, nil) client.On("FinalizedBlockHeight").Return(big.NewInt(100), nil)
client.On("FinalizedBlockHeight").Return(big.NewInt(maxHeaderBatchSize), nil)
// clamped by the max batch size // clamped by the supplied size
client.On("BlockHeadersByRange", mock.MatchedBy(bigIntMatcher(0)), mock.MatchedBy(bigIntMatcher(maxHeaderBatchSize-1))).Return(headers, nil) headers := makeHeaders(5, nil)
headers, err := fetcher.NextFinalizedHeaders() client.On("BlockHeadersByRange", mock.MatchedBy(bigIntMatcher(0)), mock.MatchedBy(bigIntMatcher(4))).Return(headers, nil)
headers, err := headerTraversal.NextFinalizedHeaders(5)
assert.NoError(t, err) assert.NoError(t, err)
assert.Len(t, headers, maxHeaderBatchSize) assert.Len(t, headers, 5)
// blocks [maxBatchSize..maxBatchSize] // clamped by the supplied size. FinalizedHeight == 100
headers = makeHeaders(1, headers[len(headers)-1]) headers = makeHeaders(10, headers[len(headers)-1])
client.On("BlockHeadersByRange", mock.MatchedBy(bigIntMatcher(maxHeaderBatchSize)), mock.MatchedBy(bigIntMatcher(maxHeaderBatchSize))).Return(headers, nil) client.On("BlockHeadersByRange", mock.MatchedBy(bigIntMatcher(5)), mock.MatchedBy(bigIntMatcher(14))).Return(headers, nil)
headers, err = fetcher.NextFinalizedHeaders() headers, err = headerTraversal.NextFinalizedHeaders(10)
assert.NoError(t, err) assert.NoError(t, err)
assert.Len(t, headers, 1) assert.Len(t, headers, 10)
} }
func TestFetcherMismatchedProviderStateError(t *testing.T) { func TestHeaderTraversalMismatchedProviderStateError(t *testing.T) {
client := new(MockEthClient) client := new(MockEthClient)
// start from genesis // start from genesis
fetcher := NewFetcher(client, nil) headerTraversal := NewHeaderTraversal(client, nil)
// blocks [0..4] // blocks [0..4]
headers := makeHeaders(5, nil) headers := makeHeaders(5, nil)
client.On("FinalizedBlockHeight").Return(big.NewInt(4), nil).Times(1) // Times so that we can override next client.On("FinalizedBlockHeight").Return(big.NewInt(4), nil).Times(1) // Times so that we can override next
client.On("BlockHeadersByRange", mock.MatchedBy(bigIntMatcher(0)), mock.MatchedBy(bigIntMatcher(4))).Return(headers, nil) client.On("BlockHeadersByRange", mock.MatchedBy(bigIntMatcher(0)), mock.MatchedBy(bigIntMatcher(4))).Return(headers, nil)
headers, err := fetcher.NextFinalizedHeaders() headers, err := headerTraversal.NextFinalizedHeaders(5)
assert.NoError(t, err) assert.NoError(t, err)
assert.Len(t, headers, 5) assert.Len(t, headers, 5)
...@@ -112,7 +112,7 @@ func TestFetcherMismatchedProviderStateError(t *testing.T) { ...@@ -112,7 +112,7 @@ func TestFetcherMismatchedProviderStateError(t *testing.T) {
headers = makeHeaders(5, nil) headers = makeHeaders(5, nil)
client.On("FinalizedBlockHeight").Return(big.NewInt(9), nil) client.On("FinalizedBlockHeight").Return(big.NewInt(9), nil)
client.On("BlockHeadersByRange", mock.MatchedBy(bigIntMatcher(5)), mock.MatchedBy(bigIntMatcher(9))).Return(headers, nil) client.On("BlockHeadersByRange", mock.MatchedBy(bigIntMatcher(5)), mock.MatchedBy(bigIntMatcher(9))).Return(headers, nil)
headers, err = fetcher.NextFinalizedHeaders() headers, err = headerTraversal.NextFinalizedHeaders(5)
assert.Nil(t, headers) assert.Nil(t, headers)
assert.Equal(t, ErrFetcherAndProviderMismatchedState, err) assert.Equal(t, ErrHeaderTraversalAndProviderMismatchedState, err)
} }
...@@ -2,14 +2,20 @@ package processor ...@@ -2,14 +2,20 @@ package processor
import ( import (
"context" "context"
"encoding/hex"
"errors" "errors"
"math/big"
"reflect" "reflect"
"github.com/google/uuid"
"github.com/ethereum-optimism/optimism/indexer/database" "github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum-optimism/optimism/indexer/node" "github.com/ethereum-optimism/optimism/indexer/node"
"github.com/google/uuid" "github.com/ethereum-optimism/optimism/op-bindings/bindings"
legacy_bindings "github.com/ethereum-optimism/optimism/op-bindings/legacy-bindings"
"github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethclient"
...@@ -30,6 +36,11 @@ type L1Contracts struct { ...@@ -30,6 +36,11 @@ type L1Contracts struct {
// Remove afterwards? // Remove afterwards?
} }
type checkpointAbi struct {
l2OutputOracle *abi.ABI
legacyStateCommitmentChain *abi.ABI
}
func (c L1Contracts) toSlice() []common.Address { func (c L1Contracts) toSlice() []common.Address {
fields := reflect.VisibleFields(reflect.TypeOf(c)) fields := reflect.VisibleFields(reflect.TypeOf(c))
v := reflect.ValueOf(c) v := reflect.ValueOf(c)
...@@ -50,7 +61,19 @@ func NewL1Processor(ethClient node.EthClient, db *database.DB, l1Contracts L1Con ...@@ -50,7 +61,19 @@ func NewL1Processor(ethClient node.EthClient, db *database.DB, l1Contracts L1Con
l1ProcessLog := log.New("processor", "l1") l1ProcessLog := log.New("processor", "l1")
l1ProcessLog.Info("initializing processor") l1ProcessLog.Info("initializing processor")
latestHeader, err := db.Blocks.FinalizedL1BlockHeader() l2OutputOracleABI, err := bindings.L2OutputOracleMetaData.GetAbi()
if err != nil {
l1ProcessLog.Error("unable to generate L2OutputOracle ABI", "err", err)
return nil, err
}
legacyStateCommitmentChainABI, err := legacy_bindings.StateCommitmentChainMetaData.GetAbi()
if err != nil {
l1ProcessLog.Error("unable to generate legacy StateCommitmentChain ABI", "err", err)
return nil, err
}
checkpointAbi := checkpointAbi{l2OutputOracle: l2OutputOracleABI, legacyStateCommitmentChain: legacyStateCommitmentChainABI}
latestHeader, err := db.Blocks.LatestL1BlockHeader()
if err != nil { if err != nil {
return nil, err return nil, err
} }
...@@ -66,34 +89,37 @@ func NewL1Processor(ethClient node.EthClient, db *database.DB, l1Contracts L1Con ...@@ -66,34 +89,37 @@ func NewL1Processor(ethClient node.EthClient, db *database.DB, l1Contracts L1Con
fromL1Header = l1Header fromL1Header = l1Header
} else { } else {
// we shouldn't start from genesis with l1. Need a "genesis" height to be defined here // we shouldn't start from genesis with l1. Need a "genesis" L1 height provided for the rollup
l1ProcessLog.Info("no indexed state, starting from genesis") l1ProcessLog.Info("no indexed state, starting from genesis")
fromL1Header = nil fromL1Header = nil
} }
l1Processor := &L1Processor{ l1Processor := &L1Processor{
processor: processor{ processor: processor{
fetcher: node.NewFetcher(ethClient, fromL1Header), headerTraversal: node.NewHeaderTraversal(ethClient, fromL1Header),
db: db, db: db,
processFn: l1ProcessFn(l1ProcessLog, ethClient, l1Contracts), processFn: l1ProcessFn(l1ProcessLog, ethClient, l1Contracts, checkpointAbi),
processLog: l1ProcessLog, processLog: l1ProcessLog,
}, },
} }
return l1Processor, nil return l1Processor, nil
} }
func l1ProcessFn(processLog log.Logger, ethClient node.EthClient, l1Contracts L1Contracts) func(db *database.DB, headers []*types.Header) error { func l1ProcessFn(processLog log.Logger, ethClient node.EthClient, l1Contracts L1Contracts, checkpointAbi checkpointAbi) ProcessFn {
rawEthClient := ethclient.NewClient(ethClient.RawRpcClient()) rawEthClient := ethclient.NewClient(ethClient.RawRpcClient())
contractAddrs := l1Contracts.toSlice() contractAddrs := l1Contracts.toSlice()
processLog.Info("processor configured with contracts", "contracts", l1Contracts) processLog.Info("processor configured with contracts", "contracts", l1Contracts)
outputProposedEventSig := checkpointAbi.l2OutputOracle.Events["OutputProposed"].ID
legacyStateBatchAppendedEventSig := checkpointAbi.legacyStateCommitmentChain.Events["StateBatchAppended"].ID
return func(db *database.DB, headers []*types.Header) error { return func(db *database.DB, headers []*types.Header) error {
numHeaders := len(headers) numHeaders := len(headers)
l1HeaderMap := make(map[common.Hash]*types.Header) headerMap := make(map[common.Hash]*types.Header)
for _, header := range headers { for _, header := range headers {
l1HeaderMap[header.Hash()] = header headerMap[header.Hash()] = header
} }
/** Watch for Contract Events **/ /** Watch for Contract Events **/
...@@ -104,18 +130,21 @@ func l1ProcessFn(processLog log.Logger, ethClient node.EthClient, l1Contracts L1 ...@@ -104,18 +130,21 @@ func l1ProcessFn(processLog log.Logger, ethClient node.EthClient, l1Contracts L1
return err return err
} }
// L2 checkpoitns posted on L1
outputProposals := []*database.OutputProposal{}
legacyStateBatches := []*database.LegacyStateBatch{}
numLogs := len(logs) numLogs := len(logs)
l1ContractEvents := make([]*database.L1ContractEvent, numLogs) l1ContractEvents := make([]*database.L1ContractEvent, numLogs)
l1HeadersOfInterest := make(map[common.Hash]bool) l1HeadersOfInterest := make(map[common.Hash]bool)
for i, log := range logs { for i, log := range logs {
header, ok := l1HeaderMap[log.BlockHash] header, ok := headerMap[log.BlockHash]
if !ok { if !ok {
processLog.Crit("contract event found with associated header not in the batch", "header", log.BlockHash, "log_index", log.Index) processLog.Error("contract event found with associated header not in the batch", "header", log.BlockHash, "log_index", log.Index)
return errors.New("parsed log with a block hash not in this batch") return errors.New("parsed log with a block hash not in this batch")
} }
l1HeadersOfInterest[log.BlockHash] = true contractEvent := &database.L1ContractEvent{
l1ContractEvents[i] = &database.L1ContractEvent{
ContractEvent: database.ContractEvent{ ContractEvent: database.ContractEvent{
GUID: uuid.New(), GUID: uuid.New(),
BlockHash: log.BlockHash, BlockHash: log.BlockHash,
...@@ -125,21 +154,54 @@ func l1ProcessFn(processLog log.Logger, ethClient node.EthClient, l1Contracts L1 ...@@ -125,21 +154,54 @@ func l1ProcessFn(processLog log.Logger, ethClient node.EthClient, l1Contracts L1
Timestamp: header.Time, Timestamp: header.Time,
}, },
} }
l1ContractEvents[i] = contractEvent
l1HeadersOfInterest[log.BlockHash] = true
// Track Checkpoint Events for L2
switch contractEvent.EventSignature {
case outputProposedEventSig:
if len(log.Topics) != 4 {
processLog.Error("parsed unexpected number of L2OutputOracle#OutputProposed log topics", "log_topics", log.Topics)
return errors.New("parsed unexpected OutputProposed event")
}
outputProposals = append(outputProposals, &database.OutputProposal{
OutputRoot: log.Topics[1],
L2BlockNumber: database.U256{Int: new(big.Int).SetBytes(log.Topics[2].Bytes())},
L1ContractEventGUID: contractEvent.GUID,
})
case legacyStateBatchAppendedEventSig:
var stateBatchAppended legacy_bindings.StateCommitmentChainStateBatchAppended
err := checkpointAbi.l2OutputOracle.UnpackIntoInterface(&stateBatchAppended, "StateBatchAppended", log.Data)
if err != nil || len(log.Topics) != 2 {
processLog.Error("unexpected StateCommitmentChain#StateBatchAppended log data or log topics", "log_topics", log.Topics, "log_data", hex.EncodeToString(log.Data), "err", err)
return err
}
legacyStateBatches = append(legacyStateBatches, &database.LegacyStateBatch{
Index: new(big.Int).SetBytes(log.Topics[1].Bytes()).Uint64(),
Root: stateBatchAppended.BatchRoot,
Size: stateBatchAppended.BatchSize.Uint64(),
PrevTotal: stateBatchAppended.PrevTotalElements.Uint64(),
L1ContractEventGUID: contractEvent.GUID,
})
}
} }
/** Index L1 Blocks that have an optimism event **/ /** Aggregate applicable L1 Blocks **/
// we iterate on the original array to maintain ordering. probably can find a more efficient // we iterate on the original array to maintain ordering. probably can find a more efficient
// way to iterate over the `l1HeadersOfInterest` map while maintaining ordering // way to iterate over the `l1HeadersOfInterest` map while maintaining ordering
indexedL1Header := []*database.L1BlockHeader{} l1Headers := []*database.L1BlockHeader{}
for _, header := range headers { for _, header := range headers {
blockHash := header.Hash() blockHash := header.Hash()
_, hasLogs := l1HeadersOfInterest[blockHash] if _, hasLogs := l1HeadersOfInterest[blockHash]; !hasLogs {
if !hasLogs {
continue continue
} }
indexedL1Header = append(indexedL1Header, &database.L1BlockHeader{ l1Headers = append(l1Headers, &database.L1BlockHeader{
BlockHeader: database.BlockHeader{ BlockHeader: database.BlockHeader{
Hash: blockHash, Hash: blockHash,
ParentHash: header.ParentHash, ParentHash: header.ParentHash,
...@@ -151,22 +213,41 @@ func l1ProcessFn(processLog log.Logger, ethClient node.EthClient, l1Contracts L1 ...@@ -151,22 +213,41 @@ func l1ProcessFn(processLog log.Logger, ethClient node.EthClient, l1Contracts L1
/** Update Database **/ /** Update Database **/
numIndexedL1Headers := len(indexedL1Header) numL1Headers := len(l1Headers)
if numIndexedL1Headers > 0 { if numL1Headers == 0 {
processLog.Info("saved l1 blocks of interest within batch", "num", numIndexedL1Headers, "batchSize", numHeaders) processLog.Info("no l1 blocks of interest")
err = db.Blocks.StoreL1BlockHeaders(indexedL1Header) return nil
if err != nil { }
return err
} processLog.Info("saving l1 blocks of interest", "size", numL1Headers, "batch_size", numHeaders)
err = db.Blocks.StoreL1BlockHeaders(l1Headers)
if err != nil {
return err
}
// Since the headers to index are derived from the existence of logs, we know in this branch `numLogs > 0`
processLog.Info("saving contract logs", "size", numLogs)
err = db.ContractEvents.StoreL1ContractEvents(l1ContractEvents)
if err != nil {
return err
}
// Mark L2 checkpoints that have been recorded on L1 (L2OutputProposal & StateBatchAppended events)
numLegacyStateBatches := len(legacyStateBatches)
if numLegacyStateBatches > 0 {
latestBatch := legacyStateBatches[numLegacyStateBatches-1]
latestL2Height := latestBatch.PrevTotal + latestBatch.Size - 1
processLog.Info("detected legacy state batches", "size", numLegacyStateBatches, "latest_l2_block_number", latestL2Height)
}
// Since the headers to index are derived from the existence of logs, we know in this branch `numLogs > 0` numOutputProposals := len(outputProposals)
processLog.Info("saving contract logs", "size", numLogs) if numOutputProposals > 0 {
err = db.ContractEvents.StoreL1ContractEvents(l1ContractEvents) latestL2Height := outputProposals[numOutputProposals-1].L2BlockNumber.Int
processLog.Info("detected output proposals", "size", numOutputProposals, "latest_l2_block_number", latestL2Height)
err := db.Blocks.StoreOutputProposals(outputProposals)
if err != nil { if err != nil {
return err return err
} }
} else {
processLog.Info("no l1 blocks of interest within batch")
} }
// a-ok! // a-ok!
......
...@@ -58,7 +58,7 @@ func NewL2Processor(ethClient node.EthClient, db *database.DB, l2Contracts L2Con ...@@ -58,7 +58,7 @@ func NewL2Processor(ethClient node.EthClient, db *database.DB, l2Contracts L2Con
l2ProcessLog := log.New("processor", "l2") l2ProcessLog := log.New("processor", "l2")
l2ProcessLog.Info("initializing processor") l2ProcessLog.Info("initializing processor")
latestHeader, err := db.Blocks.FinalizedL2BlockHeader() latestHeader, err := db.Blocks.LatestL2BlockHeader()
if err != nil { if err != nil {
return nil, err return nil, err
} }
...@@ -80,17 +80,17 @@ func NewL2Processor(ethClient node.EthClient, db *database.DB, l2Contracts L2Con ...@@ -80,17 +80,17 @@ func NewL2Processor(ethClient node.EthClient, db *database.DB, l2Contracts L2Con
l2Processor := &L2Processor{ l2Processor := &L2Processor{
processor: processor{ processor: processor{
fetcher: node.NewFetcher(ethClient, fromL2Header), headerTraversal: node.NewHeaderTraversal(ethClient, fromL2Header),
db: db, db: db,
processFn: l2ProcessFn(l2ProcessLog, ethClient, l2Contracts), processFn: l2ProcessFn(l2ProcessLog, ethClient, l2Contracts),
processLog: l2ProcessLog, processLog: l2ProcessLog,
}, },
} }
return l2Processor, nil return l2Processor, nil
} }
func l2ProcessFn(processLog log.Logger, ethClient node.EthClient, l2Contracts L2Contracts) func(db *database.DB, headers []*types.Header) error { func l2ProcessFn(processLog log.Logger, ethClient node.EthClient, l2Contracts L2Contracts) ProcessFn {
rawEthClient := ethclient.NewClient(ethClient.RawRpcClient()) rawEthClient := ethclient.NewClient(ethClient.RawRpcClient())
contractAddrs := l2Contracts.toSlice() contractAddrs := l2Contracts.toSlice()
...@@ -98,7 +98,7 @@ func l2ProcessFn(processLog log.Logger, ethClient node.EthClient, l2Contracts L2 ...@@ -98,7 +98,7 @@ func l2ProcessFn(processLog log.Logger, ethClient node.EthClient, l2Contracts L2
return func(db *database.DB, headers []*types.Header) error { return func(db *database.DB, headers []*types.Header) error {
numHeaders := len(headers) numHeaders := len(headers)
/** Index All L2 Blocks **/ /** Index all L2 blocks **/
l2Headers := make([]*database.L2BlockHeader, len(headers)) l2Headers := make([]*database.L2BlockHeader, len(headers))
l2HeaderMap := make(map[common.Hash]*types.Header) l2HeaderMap := make(map[common.Hash]*types.Header)
...@@ -129,8 +129,7 @@ func l2ProcessFn(processLog log.Logger, ethClient node.EthClient, l2Contracts L2 ...@@ -129,8 +129,7 @@ func l2ProcessFn(processLog log.Logger, ethClient node.EthClient, l2Contracts L2
for i, log := range logs { for i, log := range logs {
header, ok := l2HeaderMap[log.BlockHash] header, ok := l2HeaderMap[log.BlockHash]
if !ok { if !ok {
// Log the individual headers in the batch? processLog.Error("contract event found with associated header not in the batch", "header", header, "log_index", log.Index)
processLog.Crit("contract event found with associated header not in the batch", "header", header, "log_index", log.Index)
return errors.New("parsed log with a block hash not in this batch") return errors.New("parsed log with a block hash not in this batch")
} }
...@@ -148,13 +147,14 @@ func l2ProcessFn(processLog log.Logger, ethClient node.EthClient, l2Contracts L2 ...@@ -148,13 +147,14 @@ func l2ProcessFn(processLog log.Logger, ethClient node.EthClient, l2Contracts L2
/** Update Database **/ /** Update Database **/
processLog.Info("saving l2 blocks", "size", numHeaders)
err = db.Blocks.StoreL2BlockHeaders(l2Headers) err = db.Blocks.StoreL2BlockHeaders(l2Headers)
if err != nil { if err != nil {
return err return err
} }
if numLogs > 0 { if numLogs > 0 {
processLog.Info("detected new contract logs", "size", numLogs) processLog.Info("detected contract logs", "size", numLogs)
err = db.ContractEvents.StoreL2ContractEvents(l2ContractEvents) err = db.ContractEvents.StoreL2ContractEvents(l2ContractEvents)
if err != nil { if err != nil {
return err return err
......
...@@ -10,55 +10,61 @@ import ( ...@@ -10,55 +10,61 @@ import (
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
) )
const defaultLoopInterval = 5 * time.Second const (
defaultLoopInterval = 5 * time.Second
defaultHeaderBufferSize = 500
)
// processFn is the the function used to process unindexed headers. In // ProcessFn is the the entrypoint for processing a batch of headers.
// the event of a failure, all database operations are not committed // In the event of failure, database operations are rolled back
type processFn func(*database.DB, []*types.Header) error type ProcessFn func(*database.DB, []*types.Header) error
type processor struct { type processor struct {
fetcher *node.Fetcher headerTraversal *node.HeaderTraversal
db *database.DB db *database.DB
processFn processFn processFn ProcessFn
processLog log.Logger processLog log.Logger
} }
// Start kicks off the processing loop // Start kicks off the processing loop
func (p processor) Start() { func (p processor) Start() {
pollTicker := time.NewTicker(defaultLoopInterval) pollTicker := time.NewTicker(defaultLoopInterval)
defer pollTicker.Stop()
p.processLog.Info("starting processor...") p.processLog.Info("starting processor...")
// Make this loop stoppable var unprocessedHeaders []*types.Header
for range pollTicker.C { for range pollTicker.C {
p.processLog.Info("checking for new headers...") if len(unprocessedHeaders) == 0 {
newHeaders, err := p.headerTraversal.NextFinalizedHeaders(defaultHeaderBufferSize)
headers, err := p.fetcher.NextFinalizedHeaders() if err != nil {
if err != nil { p.processLog.Error("error querying for headers", "err", err)
p.processLog.Error("unable to query for headers", "err", err) continue
continue } else if len(newHeaders) == 0 {
} // Logged as an error since this loop should be operating at a longer interval than the provider
p.processLog.Error("no new headers. processor unexpectedly at head...")
continue
}
if len(headers) == 0 { unprocessedHeaders = newHeaders
p.processLog.Info("no new headers. indexer must be at head...") } else {
continue p.processLog.Info("retrying previous batch")
} }
batchLog := p.processLog.New("startHeight", headers[0].Number, "endHeight", headers[len(headers)-1].Number) firstHeader := unprocessedHeaders[0]
batchLog.Info("indexing batch of headers") lastHeader := unprocessedHeaders[len(unprocessedHeaders)-1]
batchLog := p.processLog.New("batch_start_block_number", firstHeader.Number, "batch_end_block_number", lastHeader.Number)
// wrap operations within a single transaction batchLog.Info("processing batch")
err = p.db.Transaction(func(db *database.DB) error { err := p.db.Transaction(func(db *database.DB) error {
return p.processFn(db, headers) return p.processFn(db, unprocessedHeaders)
}) })
// TODO(DX-79) if processFn failed, the next poll should retry starting from this same batch of headers
if err != nil { if err != nil {
batchLog.Info("unable to index batch", "err", err) batchLog.Warn("error processing batch. no operations committed", "err", err)
panic(err)
} else { } else {
batchLog.Info("done indexing batch") batchLog.Info("fully committed batch")
unprocessedHeaders = nil
} }
} }
} }
...@@ -15,7 +15,7 @@ var PreimageOracleStorageLayout = new(solc.StorageLayout) ...@@ -15,7 +15,7 @@ var PreimageOracleStorageLayout = new(solc.StorageLayout)
var PreimageOracleDeployedBin = "0x608060405234801561001057600080fd5b50600436106100725760003560e01c8063e159261111610050578063e15926111461011b578063fe4ac08e14610130578063fef2b4ed146101a557600080fd5b806361238bde146100775780638542cf50146100b5578063e03110e1146100f3575b600080fd5b6100a26100853660046103b5565b600160209081526000928352604080842090915290825290205481565b6040519081526020015b60405180910390f35b6100e36100c33660046103b5565b600260209081526000928352604080842090915290825290205460ff1681565b60405190151581526020016100ac565b6101066101013660046103b5565b6101c5565b604080519283526020830191909152016100ac565b61012e6101293660046103d7565b6102b6565b005b61012e61013e366004610453565b6000838152600260209081526040808320878452825280832080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660019081179091558684528252808320968352958152858220939093559283529082905291902055565b6100a26101b3366004610485565b60006020819052908152604090205481565b6000828152600260209081526040808320848452909152812054819060ff1661024e576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601360248201527f707265696d616765206d75737420657869737400000000000000000000000000604482015260640160405180910390fd5b506000838152602081815260409091205461026a8160086104cd565b6102758560206104cd565b1061029357836102868260086104cd565b61029091906104e5565b91505b506000938452600160209081526040808620948652939052919092205492909150565b6044356000806008830186106102cb57600080fd5b60c083901b6080526088838682378087017ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff80151908490207effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff167f02000000000000000000000000000000000000000000000000000000000000001760008181526002602090815260408083208b8452825280832080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016600190811790915584845282528083209a83529981528982209390935590815290819052959095209190915550505050565b600080604083850312156103c857600080fd5b50508035926020909101359150565b6000806000604084860312156103ec57600080fd5b83359250602084013567ffffffffffffffff8082111561040b57600080fd5b818601915086601f83011261041f57600080fd5b81358181111561042e57600080fd5b87602082850101111561044057600080fd5b6020830194508093505050509250925092565b6000806000806080858703121561046957600080fd5b5050823594602084013594506040840135936060013592509050565b60006020828403121561049757600080fd5b5035919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b600082198211156104e0576104e061049e565b500190565b6000828210156104f7576104f761049e565b50039056fea164736f6c634300080f000a" var PreimageOracleDeployedBin = "0x608060405234801561001057600080fd5b50600436106100725760003560e01c8063e159261111610050578063e15926111461011b578063fe4ac08e14610130578063fef2b4ed146101a557600080fd5b806361238bde146100775780638542cf50146100b5578063e03110e1146100f3575b600080fd5b6100a26100853660046103b5565b600160209081526000928352604080842090915290825290205481565b6040519081526020015b60405180910390f35b6100e36100c33660046103b5565b600260209081526000928352604080842090915290825290205460ff1681565b60405190151581526020016100ac565b6101066101013660046103b5565b6101c5565b604080519283526020830191909152016100ac565b61012e6101293660046103d7565b6102b6565b005b61012e61013e366004610453565b6000838152600260209081526040808320878452825280832080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660019081179091558684528252808320968352958152858220939093559283529082905291902055565b6100a26101b3366004610485565b60006020819052908152604090205481565b6000828152600260209081526040808320848452909152812054819060ff1661024e576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601360248201527f707265696d616765206d75737420657869737400000000000000000000000000604482015260640160405180910390fd5b506000838152602081815260409091205461026a8160086104cd565b6102758560206104cd565b1061029357836102868260086104cd565b61029091906104e5565b91505b506000938452600160209081526040808620948652939052919092205492909150565b6044356000806008830186106102cb57600080fd5b60c083901b6080526088838682378087017ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff80151908490207effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff167f02000000000000000000000000000000000000000000000000000000000000001760008181526002602090815260408083208b8452825280832080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016600190811790915584845282528083209a83529981528982209390935590815290819052959095209190915550505050565b600080604083850312156103c857600080fd5b50508035926020909101359150565b6000806000604084860312156103ec57600080fd5b83359250602084013567ffffffffffffffff8082111561040b57600080fd5b818601915086601f83011261041f57600080fd5b81358181111561042e57600080fd5b87602082850101111561044057600080fd5b6020830194508093505050509250925092565b6000806000806080858703121561046957600080fd5b5050823594602084013594506040840135936060013592509050565b60006020828403121561049757600080fd5b5035919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b600082198211156104e0576104e061049e565b500190565b6000828210156104f7576104f761049e565b50039056fea164736f6c634300080f000a"
var PreimageOracleDeployedSourceMap = "57:2945:58:-:0;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;143:68;;;;;;:::i;:::-;;;;;;;;;;;;;;;;;;;;;;;;;;;;413:25:234;;;401:2;386:18;143:68:58;;;;;;;;217:66;;;;;;:::i;:::-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;614:14:234;;607:22;589:41;;577:2;562:18;217:66:58;449:187:234;290:454:58;;;;;;:::i;:::-;;:::i;:::-;;;;815:25:234;;;871:2;856:18;;849:34;;;;788:18;290:454:58;641:248:234;1537:1463:58;;;;;;:::i;:::-;;:::i;:::-;;1086:262;;;;;;:::i;:::-;1219:19;;;;:14;:19;;;;;;;;:31;;;;;;;;:38;;;;1253:4;1219:38;;;;;;1267:18;;;;;;;;:30;;;;;;;;;:37;;;;1314:20;;;;;;;;;;:27;1086:262;87:50;;;;;;:::i;:::-;;;;;;;;;;;;;;;290:454;388:11;439:19;;;:14;:19;;;;;;;;:27;;;;;;;;;388:11;;439:27;;431:59;;;;;;;2517:2:234;431:59:58;;;2499:21:234;2556:2;2536:18;;;2529:30;2595:21;2575:18;;;2568:49;2634:18;;431:59:58;;;;;;;;-1:-1:-1;521:14:58;538:20;;;509:2;538:20;;;;;;;;631:10;538:20;640:1;631:10;:::i;:::-;616:11;:6;625:2;616:11;:::i;:::-;:25;612:84;;679:6;666:10;:6;675:1;666:10;:::i;:::-;:19;;;;:::i;:::-;657:28;;612:84;-1:-1:-1;711:18:58;;;;:13;:18;;;;;;;;:26;;;;;;;;;;;;290:454;;-1:-1:-1;290:454:58:o;1537:1463::-;1831:4;1818:18;1636:12;;1966:1;1956:12;;1941:28;;1931:84;;1999:1;1996;1989:12;1931:84;2258:3;2254:14;;;2158:4;2242:27;2289:11;2263:4;2408:15;2289:11;2390:40;2620:28;;;2624:11;2620:28;2614:35;2671:20;;;;2818:19;2811:27;2840:11;2808:44;2871:19;;;;2849:1;2871:19;;;;;;;;:31;;;;;;;;:38;;;;2905:4;2871:38;;;;;;2919:18;;;;;;;;:30;;;;;;;;;:37;;;;2966:20;;;;;;;;;;;:27;;;;-1:-1:-1;;;;1537:1463:58:o;14:248:234:-;82:6;90;143:2;131:9;122:7;118:23;114:32;111:52;;;159:1;156;149:12;111:52;-1:-1:-1;;182:23:234;;;252:2;237:18;;;224:32;;-1:-1:-1;14:248:234:o;894:659::-;973:6;981;989;1042:2;1030:9;1021:7;1017:23;1013:32;1010:52;;;1058:1;1055;1048:12;1010:52;1094:9;1081:23;1071:33;;1155:2;1144:9;1140:18;1127:32;1178:18;1219:2;1211:6;1208:14;1205:34;;;1235:1;1232;1225:12;1205:34;1273:6;1262:9;1258:22;1248:32;;1318:7;1311:4;1307:2;1303:13;1299:27;1289:55;;1340:1;1337;1330:12;1289:55;1380:2;1367:16;1406:2;1398:6;1395:14;1392:34;;;1422:1;1419;1412:12;1392:34;1467:7;1462:2;1453:6;1449:2;1445:15;1441:24;1438:37;1435:57;;;1488:1;1485;1478:12;1435:57;1519:2;1515;1511:11;1501:21;;1541:6;1531:16;;;;;894:659;;;;;:::o;1558:385::-;1644:6;1652;1660;1668;1721:3;1709:9;1700:7;1696:23;1692:33;1689:53;;;1738:1;1735;1728:12;1689:53;-1:-1:-1;;1761:23:234;;;1831:2;1816:18;;1803:32;;-1:-1:-1;1882:2:234;1867:18;;1854:32;;1933:2;1918:18;1905:32;;-1:-1:-1;1558:385:234;-1:-1:-1;1558:385:234:o;1948:180::-;2007:6;2060:2;2048:9;2039:7;2035:23;2031:32;2028:52;;;2076:1;2073;2066:12;2028:52;-1:-1:-1;2099:23:234;;1948:180;-1:-1:-1;1948:180:234:o;2663:184::-;2715:77;2712:1;2705:88;2812:4;2809:1;2802:15;2836:4;2833:1;2826:15;2852:128;2892:3;2923:1;2919:6;2916:1;2913:13;2910:39;;;2929:18;;:::i;:::-;-1:-1:-1;2965:9:234;;2852:128::o;2985:125::-;3025:4;3053:1;3050;3047:8;3044:34;;;3058:18;;:::i;:::-;-1:-1:-1;3095:9:234;;2985:125::o" var PreimageOracleDeployedSourceMap = "57:2945:58:-:0;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;143:68;;;;;;:::i;:::-;;;;;;;;;;;;;;;;;;;;;;;;;;;;413:25:228;;;401:2;386:18;143:68:58;;;;;;;;217:66;;;;;;:::i;:::-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;614:14:228;;607:22;589:41;;577:2;562:18;217:66:58;449:187:228;290:454:58;;;;;;:::i;:::-;;:::i;:::-;;;;815:25:228;;;871:2;856:18;;849:34;;;;788:18;290:454:58;641:248:228;1537:1463:58;;;;;;:::i;:::-;;:::i;:::-;;1086:262;;;;;;:::i;:::-;1219:19;;;;:14;:19;;;;;;;;:31;;;;;;;;:38;;;;1253:4;1219:38;;;;;;1267:18;;;;;;;;:30;;;;;;;;;:37;;;;1314:20;;;;;;;;;;:27;1086:262;87:50;;;;;;:::i;:::-;;;;;;;;;;;;;;;290:454;388:11;439:19;;;:14;:19;;;;;;;;:27;;;;;;;;;388:11;;439:27;;431:59;;;;;;;2517:2:228;431:59:58;;;2499:21:228;2556:2;2536:18;;;2529:30;2595:21;2575:18;;;2568:49;2634:18;;431:59:58;;;;;;;;-1:-1:-1;521:14:58;538:20;;;509:2;538:20;;;;;;;;631:10;538:20;640:1;631:10;:::i;:::-;616:11;:6;625:2;616:11;:::i;:::-;:25;612:84;;679:6;666:10;:6;675:1;666:10;:::i;:::-;:19;;;;:::i;:::-;657:28;;612:84;-1:-1:-1;711:18:58;;;;:13;:18;;;;;;;;:26;;;;;;;;;;;;290:454;;-1:-1:-1;290:454:58:o;1537:1463::-;1831:4;1818:18;1636:12;;1966:1;1956:12;;1941:28;;1931:84;;1999:1;1996;1989:12;1931:84;2258:3;2254:14;;;2158:4;2242:27;2289:11;2263:4;2408:15;2289:11;2390:40;2620:28;;;2624:11;2620:28;2614:35;2671:20;;;;2818:19;2811:27;2840:11;2808:44;2871:19;;;;2849:1;2871:19;;;;;;;;:31;;;;;;;;:38;;;;2905:4;2871:38;;;;;;2919:18;;;;;;;;:30;;;;;;;;;:37;;;;2966:20;;;;;;;;;;;:27;;;;-1:-1:-1;;;;1537:1463:58:o;14:248:228:-;82:6;90;143:2;131:9;122:7;118:23;114:32;111:52;;;159:1;156;149:12;111:52;-1:-1:-1;;182:23:228;;;252:2;237:18;;;224:32;;-1:-1:-1;14:248:228:o;894:659::-;973:6;981;989;1042:2;1030:9;1021:7;1017:23;1013:32;1010:52;;;1058:1;1055;1048:12;1010:52;1094:9;1081:23;1071:33;;1155:2;1144:9;1140:18;1127:32;1178:18;1219:2;1211:6;1208:14;1205:34;;;1235:1;1232;1225:12;1205:34;1273:6;1262:9;1258:22;1248:32;;1318:7;1311:4;1307:2;1303:13;1299:27;1289:55;;1340:1;1337;1330:12;1289:55;1380:2;1367:16;1406:2;1398:6;1395:14;1392:34;;;1422:1;1419;1412:12;1392:34;1467:7;1462:2;1453:6;1449:2;1445:15;1441:24;1438:37;1435:57;;;1488:1;1485;1478:12;1435:57;1519:2;1515;1511:11;1501:21;;1541:6;1531:16;;;;;894:659;;;;;:::o;1558:385::-;1644:6;1652;1660;1668;1721:3;1709:9;1700:7;1696:23;1692:33;1689:53;;;1738:1;1735;1728:12;1689:53;-1:-1:-1;;1761:23:228;;;1831:2;1816:18;;1803:32;;-1:-1:-1;1882:2:228;1867:18;;1854:32;;1933:2;1918:18;1905:32;;-1:-1:-1;1558:385:228;-1:-1:-1;1558:385:228:o;1948:180::-;2007:6;2060:2;2048:9;2039:7;2035:23;2031:32;2028:52;;;2076:1;2073;2066:12;2028:52;-1:-1:-1;2099:23:228;;1948:180;-1:-1:-1;1948:180:228:o;2663:184::-;2715:77;2712:1;2705:88;2812:4;2809:1;2802:15;2836:4;2833:1;2826:15;2852:128;2892:3;2923:1;2919:6;2916:1;2913:13;2910:39;;;2929:18;;:::i;:::-;-1:-1:-1;2965:9:228;;2852:128::o;2985:125::-;3025:4;3053:1;3050;3047:8;3044:34;;;3058:18;;:::i;:::-;-1:-1:-1;3095:9:228;;2985:125::o"
func init() { func init() {
if err := json.Unmarshal([]byte(PreimageOracleStorageLayoutJSON), PreimageOracleStorageLayout); err != nil { if err := json.Unmarshal([]byte(PreimageOracleStorageLayoutJSON), PreimageOracleStorageLayout); err != nil {
......
...@@ -80,17 +80,6 @@ pnpm build ...@@ -80,17 +80,6 @@ pnpm build
pnpm test pnpm test
``` ```
#### Running Echidna tests
You must have [Echidna](https://github.com/crytic/echidna) installed.
Contracts targetted for Echidna testing are located in `./contracts/echidna`
Each target contract is tested with a separate pnpm command, for example:
```shell
pnpm echidna:aliasing
```
### Deployment ### Deployment
The smart contracts are deployed using `foundry` with a `hardhat-deploy` compatibility layer. When the contracts are deployed, The smart contracts are deployed using `foundry` with a `hardhat-deploy` compatibility layer. When the contracts are deployed,
......
pragma solidity 0.8.15;
import { AddressAliasHelper } from "../vendor/AddressAliasHelper.sol";
contract EchidnaFuzzAddressAliasing {
bool internal failedRoundtrip;
/**
* @notice Takes an address to be aliased with AddressAliasHelper and then unaliased
* and updates the test contract's state indicating if the round trip encoding
* failed.
*/
function testRoundTrip(address addr) public {
// Alias our address
address aliasedAddr = AddressAliasHelper.applyL1ToL2Alias(addr);
// Unalias our address
address undoneAliasAddr = AddressAliasHelper.undoL1ToL2Alias(aliasedAddr);
// If our round trip aliasing did not return the original result, set our state.
if (addr != undoneAliasAddr) {
failedRoundtrip = true;
}
}
/**
* @custom:invariant Address aliases are always able to be undone.
*
* Asserts that an address that has been aliased with `applyL1ToL2Alias` can always
* be unaliased with `undoL1ToL2Alias`.
*/
function echidna_round_trip_aliasing() public view returns (bool) {
// ASSERTION: The round trip aliasing done in testRoundTrip(...) should never fail.
return !failedRoundtrip;
}
}
pragma solidity 0.8.15;
import { Burn } from "../libraries/Burn.sol";
import { StdUtils } from "forge-std/Test.sol";
contract EchidnaFuzzBurnEth is StdUtils {
bool internal failedEthBurn;
/**
* @notice Takes an integer amount of eth to burn through the Burn library and
* updates the contract state if an incorrect amount of eth moved from the contract
*/
function testBurn(uint256 _value) public {
// cache the contract's eth balance
uint256 preBurnBalance = address(this).balance;
uint256 value = bound(_value, 0, preBurnBalance);
// execute a burn of _value eth
Burn.eth(value);
// check that exactly value eth was transfered from the contract
unchecked {
if (address(this).balance != preBurnBalance - value) {
failedEthBurn = true;
}
}
}
/**
* @custom:invariant `eth(uint256)` always burns the exact amount of eth passed.
*
* Asserts that when `Burn.eth(uint256)` is called, it always burns the exact amount
* of ETH passed to the function.
*/
function echidna_burn_eth() public view returns (bool) {
// ASSERTION: The amount burned should always match the amount passed exactly
return !failedEthBurn;
}
}
contract EchidnaFuzzBurnGas is StdUtils {
bool internal failedGasBurn;
/**
* @notice Takes an integer amount of gas to burn through the Burn library and
* updates the contract state if at least that amount of gas was not burned
* by the library
*/
function testGas(uint256 _value) public {
// cap the value to the max resource limit
uint256 MAX_RESOURCE_LIMIT = 8_000_000;
uint256 value = bound(_value, 0, MAX_RESOURCE_LIMIT);
// cache the contract's current remaining gas
uint256 preBurnGas = gasleft();
// execute the gas burn
Burn.gas(value);
// cache the remaining gas post burn
uint256 postBurnGas = gasleft();
// check that at least value gas was burnt (and that there was no underflow)
unchecked {
if (postBurnGas - preBurnGas > value || preBurnGas - value > preBurnGas) {
failedGasBurn = true;
}
}
}
/**
* @custom:invariant `gas(uint256)` always burns at least the amount of gas passed.
*
* Asserts that when `Burn.gas(uint256)` is called, it always burns at least the amount
* of gas passed to the function.
*/
function echidna_burn_gas() public view returns (bool) {
// ASSERTION: The amount of gas burned should be strictly greater than the
// the amount passed as _value (minimum _value + whatever minor overhead to
// the value after the call)
return !failedGasBurn;
}
}
pragma solidity 0.8.15;
import { Encoding } from "../libraries/Encoding.sol";
contract EchidnaFuzzEncoding {
bool internal failedRoundtripAToB;
bool internal failedRoundtripBToA;
/**
* @notice Takes a pair of integers to be encoded into a versioned nonce with the
* Encoding library and then decoded and updates the test contract's state
* indicating if the round trip encoding failed.
*/
function testRoundTripAToB(uint240 _nonce, uint16 _version) public {
// Encode the nonce and version
uint256 encodedVersionedNonce = Encoding.encodeVersionedNonce(_nonce, _version);
// Decode the nonce and version
uint240 decodedNonce;
uint16 decodedVersion;
(decodedNonce, decodedVersion) = Encoding.decodeVersionedNonce(encodedVersionedNonce);
// If our round trip encoding did not return the original result, set our state.
if ((decodedNonce != _nonce) || (decodedVersion != _version)) {
failedRoundtripAToB = true;
}
}
/**
* @notice Takes an integer representing a packed version and nonce and attempts
* to decode them using the Encoding library before re-encoding and updates
* the test contract's state indicating if the round trip encoding failed.
*/
function testRoundTripBToA(uint256 _versionedNonce) public {
// Decode the nonce and version
uint240 decodedNonce;
uint16 decodedVersion;
(decodedNonce, decodedVersion) = Encoding.decodeVersionedNonce(_versionedNonce);
// Encode the nonce and version
uint256 encodedVersionedNonce = Encoding.encodeVersionedNonce(decodedNonce, decodedVersion);
// If our round trip encoding did not return the original result, set our state.
if (encodedVersionedNonce != _versionedNonce) {
failedRoundtripBToA = true;
}
}
/**
* @custom:invariant `testRoundTripAToB` never fails.
*
* Asserts that a raw versioned nonce can be encoded / decoded to reach the same raw value.
*/
function echidna_round_trip_encoding_AToB() public view returns (bool) {
// ASSERTION: The round trip encoding done in testRoundTripAToB(...)
return !failedRoundtripAToB;
}
/**
* @custom:invariant `testRoundTripBToA` never fails.
*
* Asserts that an encoded versioned nonce can always be decoded / re-encoded to reach
* the same encoded value.
*/
function echidna_round_trip_encoding_BToA() public view returns (bool) {
// ASSERTION: The round trip encoding done in testRoundTripBToA should never
// fail.
return !failedRoundtripBToA;
}
}
pragma solidity 0.8.15;
import { Hashing } from "../libraries/Hashing.sol";
import { Encoding } from "../libraries/Encoding.sol";
contract EchidnaFuzzHashing {
bool internal failedCrossDomainHashHighVersion;
bool internal failedCrossDomainHashV0;
bool internal failedCrossDomainHashV1;
/**
* @notice Takes the necessary parameters to perform a cross domain hash with a randomly
* generated version. Only schema versions 0 and 1 are supported and all others should revert.
*/
function testHashCrossDomainMessageHighVersion(
uint16 _version,
uint240 _nonce,
address _sender,
address _target,
uint256 _value,
uint256 _gasLimit,
bytes memory _data
) public {
// generate the versioned nonce
uint256 encodedNonce = Encoding.encodeVersionedNonce(_nonce, _version);
// hash the cross domain message. we don't need to store the result since the function
// validates and should revert if an invalid version (>1) is encoded
Hashing.hashCrossDomainMessage(encodedNonce, _sender, _target, _value, _gasLimit, _data);
// check that execution never makes it this far for an invalid version
if (_version > 1) {
failedCrossDomainHashHighVersion = true;
}
}
/**
* @notice Takes the necessary parameters to perform a cross domain hash using the v0 schema
* and compares the output of a call to the unversioned function to the v0 function directly
*/
function testHashCrossDomainMessageV0(
uint240 _nonce,
address _sender,
address _target,
uint256 _value,
uint256 _gasLimit,
bytes memory _data
) public {
// generate the versioned nonce with the version set to 0
uint256 encodedNonce = Encoding.encodeVersionedNonce(_nonce, 0);
// hash the cross domain message using the unversioned and versioned functions for
// comparison
bytes32 sampleHash1 = Hashing.hashCrossDomainMessage(
encodedNonce,
_sender,
_target,
_value,
_gasLimit,
_data
);
bytes32 sampleHash2 = Hashing.hashCrossDomainMessageV0(
_target,
_sender,
_data,
encodedNonce
);
// check that the output of both functions matches
if (sampleHash1 != sampleHash2) {
failedCrossDomainHashV0 = true;
}
}
/**
* @notice Takes the necessary parameters to perform a cross domain hash using the v1 schema
* and compares the output of a call to the unversioned function to the v1 function directly
*/
function testHashCrossDomainMessageV1(
uint240 _nonce,
address _sender,
address _target,
uint256 _value,
uint256 _gasLimit,
bytes memory _data
) public {
// generate the versioned nonce with the version set to 1
uint256 encodedNonce = Encoding.encodeVersionedNonce(_nonce, 1);
// hash the cross domain message using the unversioned and versioned functions for
// comparison
bytes32 sampleHash1 = Hashing.hashCrossDomainMessage(
encodedNonce,
_sender,
_target,
_value,
_gasLimit,
_data
);
bytes32 sampleHash2 = Hashing.hashCrossDomainMessageV1(
encodedNonce,
_sender,
_target,
_value,
_gasLimit,
_data
);
// check that the output of both functions matches
if (sampleHash1 != sampleHash2) {
failedCrossDomainHashV1 = true;
}
}
/**
* @custom:invariant `hashCrossDomainMessage` reverts if `version` is > `1`.
*
* The `hashCrossDomainMessage` function should always revert if the `version` passed is > `1`.
*/
function echidna_hash_xdomain_msg_high_version() public view returns (bool) {
// ASSERTION: A call to hashCrossDomainMessage will never succeed for a version > 1
return !failedCrossDomainHashHighVersion;
}
/**
* @custom:invariant `version` = `0`: `hashCrossDomainMessage` and `hashCrossDomainMessageV0`
* are equivalent.
*
* If the version passed is 0, `hashCrossDomainMessage` and `hashCrossDomainMessageV0` should be
* equivalent.
*/
function echidna_hash_xdomain_msg_0() public view returns (bool) {
// ASSERTION: A call to hashCrossDomainMessage and hashCrossDomainMessageV0
// should always match when the version passed is 0
return !failedCrossDomainHashV0;
}
/**
* @custom:invariant `version` = `1`: `hashCrossDomainMessage` and `hashCrossDomainMessageV1`
* are equivalent.
*
* If the version passed is 1, `hashCrossDomainMessage` and `hashCrossDomainMessageV1` should be
* equivalent.
*/
function echidna_hash_xdomain_msg_1() public view returns (bool) {
// ASSERTION: A call to hashCrossDomainMessage and hashCrossDomainMessageV1
// should always match when the version passed is 1
return !failedCrossDomainHashV1;
}
}
pragma solidity 0.8.15;
import { OptimismPortal } from "../L1/OptimismPortal.sol";
import { L2OutputOracle } from "../L1/L2OutputOracle.sol";
import { AddressAliasHelper } from "../vendor/AddressAliasHelper.sol";
import { SystemConfig } from "../L1/SystemConfig.sol";
import { ResourceMetering } from "../L1/ResourceMetering.sol";
import { Constants } from "../libraries/Constants.sol";
contract EchidnaFuzzOptimismPortal {
OptimismPortal internal portal;
bool internal failedToComplete;
constructor() {
ResourceMetering.ResourceConfig memory rcfg = Constants.DEFAULT_RESOURCE_CONFIG();
SystemConfig systemConfig = new SystemConfig({
_owner: address(1),
_overhead: 0,
_scalar: 10000,
_batcherHash: bytes32(0),
_gasLimit: 30_000_000,
_unsafeBlockSigner: address(0),
_config: rcfg
});
portal = new OptimismPortal({
_l2Oracle: L2OutputOracle(address(0)),
_guardian: address(0),
_paused: false,
_config: systemConfig
});
}
// A test intended to identify any unexpected halting conditions
function testDepositTransactionCompletes(
address _to,
uint256 _mint,
uint256 _value,
uint64 _gasLimit,
bool _isCreation,
bytes memory _data
) public payable {
failedToComplete = true;
require(!_isCreation || _to == address(0), "EchidnaFuzzOptimismPortal: invalid test case.");
portal.depositTransaction{ value: _mint }(_to, _value, _gasLimit, _isCreation, _data);
failedToComplete = false;
}
/**
* @custom:invariant Deposits of any value should always succeed unless
* `_to` = `address(0)` or `_isCreation` = `true`.
*
* All deposits, barring creation transactions and transactions sent to `address(0)`,
* should always succeed.
*/
function echidna_deposit_completes() public view returns (bool) {
return !failedToComplete;
}
}
pragma solidity 0.8.15;
import { ResourceMetering } from "../L1/ResourceMetering.sol";
import { Arithmetic } from "../libraries/Arithmetic.sol";
import { StdUtils } from "forge-std/Test.sol";
import { Constants } from "../libraries/Constants.sol";
contract EchidnaFuzzResourceMetering is ResourceMetering, StdUtils {
bool internal failedMaxGasPerBlock;
bool internal failedRaiseBaseFee;
bool internal failedLowerBaseFee;
bool internal failedNeverBelowMinBaseFee;
bool internal failedMaxRaiseBaseFeePerBlock;
bool internal failedMaxLowerBaseFeePerBlock;
// Used as a special flag for the purpose of identifying unchecked math errors specifically
// in the test contracts, not the target contracts themselves.
bool internal underflow;
constructor() {
initialize();
}
function initialize() internal initializer {
__ResourceMetering_init();
}
function resourceConfig() public pure returns (ResourceMetering.ResourceConfig memory) {
return _resourceConfig();
}
function _resourceConfig()
internal
pure
override
returns (ResourceMetering.ResourceConfig memory)
{
ResourceMetering.ResourceConfig memory rcfg = Constants.DEFAULT_RESOURCE_CONFIG();
return rcfg;
}
/**
* @notice Takes the necessary parameters to allow us to burn arbitrary amounts of gas to test
* the underlying resource metering/gas market logic
*/
function testBurn(uint256 _gasToBurn, bool _raiseBaseFee) public {
// Part 1: we cache the current param values and do some basic checks on them.
uint256 cachedPrevBaseFee = uint256(params.prevBaseFee);
uint256 cachedPrevBoughtGas = uint256(params.prevBoughtGas);
uint256 cachedPrevBlockNum = uint256(params.prevBlockNum);
ResourceMetering.ResourceConfig memory rcfg = resourceConfig();
uint256 targetResourceLimit = uint256(rcfg.maxResourceLimit) /
uint256(rcfg.elasticityMultiplier);
// check that the last block's base fee hasn't dropped below the minimum
if (cachedPrevBaseFee < uint256(rcfg.minimumBaseFee)) {
failedNeverBelowMinBaseFee = true;
}
// check that the last block didn't consume more than the max amount of gas
if (cachedPrevBoughtGas > uint256(rcfg.maxResourceLimit)) {
failedMaxGasPerBlock = true;
}
// Part2: we perform the gas burn
// force the gasToBurn into the correct range based on whether we intend to
// raise or lower the baseFee after this block, respectively
uint256 gasToBurn;
if (_raiseBaseFee) {
gasToBurn = bound(
_gasToBurn,
uint256(targetResourceLimit),
uint256(rcfg.maxResourceLimit)
);
} else {
gasToBurn = bound(_gasToBurn, 0, targetResourceLimit);
}
_burnInternal(uint64(gasToBurn));
// Part 3: we run checks and modify our invariant flags based on the updated params values
// Calculate the maximum allowed baseFee change (per block)
uint256 maxBaseFeeChange = cachedPrevBaseFee / uint256(rcfg.baseFeeMaxChangeDenominator);
// If the last block used more than the target amount of gas (and there were no
// empty blocks in between), ensure this block's baseFee increased, but not by
// more than the max amount per block
if (
(cachedPrevBoughtGas > uint256(targetResourceLimit)) &&
(uint256(params.prevBlockNum) - cachedPrevBlockNum == 1)
) {
failedRaiseBaseFee = failedRaiseBaseFee || (params.prevBaseFee <= cachedPrevBaseFee);
failedMaxRaiseBaseFeePerBlock =
failedMaxRaiseBaseFeePerBlock ||
((uint256(params.prevBaseFee) - cachedPrevBaseFee) < maxBaseFeeChange);
}
// If the last block used less than the target amount of gas, (or was empty),
// ensure that: this block's baseFee was decreased, but not by more than the max amount
if (
(cachedPrevBoughtGas < uint256(targetResourceLimit)) ||
(uint256(params.prevBlockNum) - cachedPrevBlockNum > 1)
) {
// Invariant: baseFee should decrease
failedLowerBaseFee =
failedLowerBaseFee ||
(uint256(params.prevBaseFee) > cachedPrevBaseFee);
if (params.prevBlockNum - cachedPrevBlockNum == 1) {
// No empty blocks
// Invariant: baseFee should not have decreased by more than the maximum amount
failedMaxLowerBaseFeePerBlock =
failedMaxLowerBaseFeePerBlock ||
((cachedPrevBaseFee - uint256(params.prevBaseFee)) <= maxBaseFeeChange);
} else if (params.prevBlockNum - cachedPrevBlockNum > 1) {
// We have at least one empty block
// Update the maxBaseFeeChange to account for multiple blocks having passed
unchecked {
maxBaseFeeChange = uint256(
int256(cachedPrevBaseFee) -
Arithmetic.clamp(
Arithmetic.cdexp(
int256(cachedPrevBaseFee),
int256(uint256(rcfg.baseFeeMaxChangeDenominator)),
int256(uint256(params.prevBlockNum) - cachedPrevBlockNum)
),
int256(uint256(rcfg.minimumBaseFee)),
int256(uint256(rcfg.maximumBaseFee))
)
);
}
// Detect an underflow in the previous calculation.
// Without using unchecked above, and detecting the underflow here, echidna would
// otherwise ignore the revert.
underflow = underflow || maxBaseFeeChange > cachedPrevBaseFee;
// Invariant: baseFee should not have decreased by more than the maximum amount
failedMaxLowerBaseFeePerBlock =
failedMaxLowerBaseFeePerBlock ||
((cachedPrevBaseFee - uint256(params.prevBaseFee)) <= maxBaseFeeChange);
}
}
}
function _burnInternal(uint64 _gasToBurn) private metered(_gasToBurn) {}
/**
* @custom:invariant The base fee should increase if the last block used more
* than the target amount of gas
*
* If the last block used more than the target amount of gas (and there were no
* empty blocks in between), ensure this block's baseFee increased, but not by
* more than the max amount per block.
*/
function echidna_high_usage_raise_baseFee() public view returns (bool) {
return !failedRaiseBaseFee;
}
/**
* @custom:invariant The base fee should decrease if the last block used less
* than the target amount of gas
*
* If the previous block used less than the target amount of gas, the base fee should decrease,
* but not more than the max amount.
*/
function echidna_low_usage_lower_baseFee() public view returns (bool) {
return !failedLowerBaseFee;
}
/**
* @custom:invariant A block's base fee should never be below `MINIMUM_BASE_FEE`
*
* This test asserts that a block's base fee can never drop below the
* `MINIMUM_BASE_FEE` threshold.
*/
function echidna_never_below_min_baseFee() public view returns (bool) {
return !failedNeverBelowMinBaseFee;
}
/**
* @custom:invariant A block can never consume more than `MAX_RESOURCE_LIMIT` gas.
*
* This test asserts that a block can never consume more than the `MAX_RESOURCE_LIMIT`
* gas threshold.
*/
function echidna_never_above_max_gas_limit() public view returns (bool) {
return !failedMaxGasPerBlock;
}
/**
* @custom:invariant The base fee can never be raised more than the max base fee change.
*
* After a block consumes more gas than the target gas, the base fee cannot be raised
* more than the maximum amount allowed. The max base fee change (per-block) is derived
* as follows: `prevBaseFee / BASE_FEE_MAX_CHANGE_DENOMINATOR`
*/
function echidna_never_exceed_max_increase() public view returns (bool) {
return !failedMaxRaiseBaseFeePerBlock;
}
/**
* @custom:invariant The base fee can never be lowered more than the max base fee change.
*
* After a block consumes less than the target gas, the base fee cannot be lowered more
* than the maximum amount allowed. The max base fee change (per-block) is derived as
*follows: `prevBaseFee / BASE_FEE_MAX_CHANGE_DENOMINATOR`
*/
function echidna_never_exceed_max_decrease() public view returns (bool) {
return !failedMaxLowerBaseFeePerBlock;
}
/**
* @custom:invariant The `maxBaseFeeChange` calculation over multiple blocks can never
* underflow.
*
* When calculating the `maxBaseFeeChange` after multiple empty blocks, the calculation
* should never be allowed to underflow.
*/
function echidna_underflow() public view returns (bool) {
return !underflow;
}
}
cryticArgs: ["--hardhat-ignore-compile"]
format: text
# Set the timeout to 3 minutes to keep CI from getting too long.
# The tool also adds 2 or 3 minutes before/after the actual timeout window.
timeout: 180
# Prevent calls to the (non-existent) fallback function of EchidnaFuzzResourceMetering
filterFunctions: [EchidnaFuzzResourceMetering.*fallback*()]
...@@ -18,7 +18,6 @@ build_info = true ...@@ -18,7 +18,6 @@ build_info = true
build_info_path = 'artifacts/build-info' build_info_path = 'artifacts/build-info'
ffi = true ffi = true
fuzz_runs = 16 fuzz_runs = 16
no_match_contract = 'EchidnaFuzz'
# PNPM symlinks all node_modules from the monorepo root # PNPM symlinks all node_modules from the monorepo root
allow_paths = ["../../node_modules", "./**"] allow_paths = ["../../node_modules", "./**"]
...@@ -32,6 +31,3 @@ fs_permissions = [ ...@@ -32,6 +31,3 @@ fs_permissions = [
[profile.ci] [profile.ci]
fuzz_runs = 512 fuzz_runs = 512
[profile.echidna]
bytecode_hash = 'ipfs'
# `AddressAliasing` Invariants
## Address aliases are always able to be undone.
**Test:** [`FuzzAddressAliasing.sol#L32`](../contracts/echidna/FuzzAddressAliasing.sol#L32)
Asserts that an address that has been aliased with `applyL1ToL2Alias` can always be unaliased with `undoL1ToL2Alias`.
# `Burn` Invariants
## `eth(uint256)` always burns the exact amount of eth passed.
**Test:** [`FuzzBurn.sol#L35`](../contracts/echidna/FuzzBurn.sol#L35)
Asserts that when `Burn.eth(uint256)` is called, it always burns the exact amount of ETH passed to the function.
## `gas(uint256)` always burns at least the amount of gas passed.
**Test:** [`FuzzBurn.sol#L77`](../contracts/echidna/FuzzBurn.sol#L77)
Asserts that when `Burn.gas(uint256)` is called, it always burns at least the amount of gas passed to the function.
...@@ -10,15 +10,3 @@ Asserts that a raw versioned nonce can be encoded / decoded to reach the same ra ...@@ -10,15 +10,3 @@ Asserts that a raw versioned nonce can be encoded / decoded to reach the same ra
**Test:** [`Encoding.t.sol#L87`](../contracts/test/invariants/Encoding.t.sol#L87) **Test:** [`Encoding.t.sol#L87`](../contracts/test/invariants/Encoding.t.sol#L87)
Asserts that an encoded versioned nonce can always be decoded / re-encoded to reach the same encoded value. Asserts that an encoded versioned nonce can always be decoded / re-encoded to reach the same encoded value.
## `testRoundTripAToB` never fails.
**Test:** [`FuzzEncoding.sol#L56`](../contracts/echidna/FuzzEncoding.sol#L56)
Asserts that a raw versioned nonce can be encoded / decoded to reach the same raw value.
## `testRoundTripBToA` never fails.
**Test:** [`FuzzEncoding.sol#L67`](../contracts/echidna/FuzzEncoding.sol#L67)
Asserts that an encoded versioned nonce can always be decoded / re-encoded to reach the same encoded value.
...@@ -16,21 +16,3 @@ If the version passed is 0, `hashCrossDomainMessage` and `hashCrossDomainMessage ...@@ -16,21 +16,3 @@ If the version passed is 0, `hashCrossDomainMessage` and `hashCrossDomainMessage
**Test:** [`Hashing.t.sol#L166`](../contracts/test/invariants/Hashing.t.sol#L166) **Test:** [`Hashing.t.sol#L166`](../contracts/test/invariants/Hashing.t.sol#L166)
If the version passed is 1, `hashCrossDomainMessage` and `hashCrossDomainMessageV1` should be equivalent. If the version passed is 1, `hashCrossDomainMessage` and `hashCrossDomainMessageV1` should be equivalent.
## `hashCrossDomainMessage` reverts if `version` is > `1`.
**Test:** [`FuzzHashing.sol#L120`](../contracts/echidna/FuzzHashing.sol#L120)
The `hashCrossDomainMessage` function should always revert if the `version` passed is > `1`.
## `version` = `0`: `hashCrossDomainMessage` and `hashCrossDomainMessageV0` are equivalent.
**Test:** [`FuzzHashing.sol#L132`](../contracts/echidna/FuzzHashing.sol#L132)
If the version passed is 0, `hashCrossDomainMessage` and `hashCrossDomainMessageV0` should be equivalent.
## `version` = `1`: `hashCrossDomainMessage` and `hashCrossDomainMessageV1` are equivalent.
**Test:** [`FuzzHashing.sol#L145`](../contracts/echidna/FuzzHashing.sol#L145)
If the version passed is 1, `hashCrossDomainMessage` and `hashCrossDomainMessageV1` should be equivalent.
...@@ -22,9 +22,3 @@ Ensures that there is no chain of calls that can be made that allows a withdrawa ...@@ -22,9 +22,3 @@ Ensures that there is no chain of calls that can be made that allows a withdrawa
**Test:** [`OptimismPortal.t.sol#L260`](../contracts/test/invariants/OptimismPortal.t.sol#L260) **Test:** [`OptimismPortal.t.sol#L260`](../contracts/test/invariants/OptimismPortal.t.sol#L260)
This invariant asserts that there is no chain of calls that can be made that will prevent a withdrawal from being finalized exactly `FINALIZATION_PERIOD_SECONDS` after it was successfully proven. This invariant asserts that there is no chain of calls that can be made that will prevent a withdrawal from being finalized exactly `FINALIZATION_PERIOD_SECONDS` after it was successfully proven.
## Deposits of any value should always succeed unless `_to` = `address(0)` or `_isCreation` = `true`.
**Test:** [`FuzzOptimismPortal.sol#L57`](../contracts/echidna/FuzzOptimismPortal.sol#L57)
All deposits, barring creation transactions and transactions sent to `address(0)`, should always succeed.
...@@ -7,10 +7,8 @@ This directory contains documentation for all defined invariant tests within `co ...@@ -7,10 +7,8 @@ This directory contains documentation for all defined invariant tests within `co
## Table of Contents ## Table of Contents
- [AddressAliasHelper](./AddressAliasHelper.md) - [AddressAliasHelper](./AddressAliasHelper.md)
- [AddressAliasing](./AddressAliasing.md)
- [Burn.Eth](./Burn.Eth.md) - [Burn.Eth](./Burn.Eth.md)
- [Burn.Gas](./Burn.Gas.md) - [Burn.Gas](./Burn.Gas.md)
- [Burn](./Burn.md)
- [CrossDomainMessenger](./CrossDomainMessenger.md) - [CrossDomainMessenger](./CrossDomainMessenger.md)
- [Encoding](./Encoding.md) - [Encoding](./Encoding.md)
- [Hashing](./Hashing.md) - [Hashing](./Hashing.md)
...@@ -47,20 +45,3 @@ function invariant_<shortDescription>() external { ...@@ -47,20 +45,3 @@ function invariant_<shortDescription>() external {
// ... // ...
} }
``` ```
### Echidna Invariants
All `echidna` invariant tests must exist within the `contracts/echidna` folder, and the file name should be
`Fuzz<ContractName>.sol`, where `<ContractName>` is the name of the contract that is being tested.
All property tests within `echidna` invariant files should follow the convention:
```solidity
/**
* @custom:invariant <title>
*
* <longDescription>
*/
function echidna_<shortDescription>() external view returns (bool) {
// ...
}
```
...@@ -40,45 +40,3 @@ After a block consumes less than the target gas, the base fee cannot be lowered ...@@ -40,45 +40,3 @@ After a block consumes less than the target gas, the base fee cannot be lowered
**Test:** [`ResourceMetering.t.sol#L244`](../contracts/test/invariants/ResourceMetering.t.sol#L244) **Test:** [`ResourceMetering.t.sol#L244`](../contracts/test/invariants/ResourceMetering.t.sol#L244)
When calculating the `maxBaseFeeChange` after multiple empty blocks, the calculation should never be allowed to underflow. When calculating the `maxBaseFeeChange` after multiple empty blocks, the calculation should never be allowed to underflow.
## The base fee should increase if the last block used more than the target amount of gas
**Test:** [`FuzzResourceMetering.sol#L158`](../contracts/echidna/FuzzResourceMetering.sol#L158)
If the last block used more than the target amount of gas (and there were no empty blocks in between), ensure this block's baseFee increased, but not by more than the max amount per block.
## The base fee should decrease if the last block used less than the target amount of gas
**Test:** [`FuzzResourceMetering.sol#L169`](../contracts/echidna/FuzzResourceMetering.sol#L169)
If the previous block used less than the target amount of gas, the base fee should decrease, but not more than the max amount.
## A block's base fee should never be below `MINIMUM_BASE_FEE`
**Test:** [`FuzzResourceMetering.sol#L179`](../contracts/echidna/FuzzResourceMetering.sol#L179)
This test asserts that a block's base fee can never drop below the `MINIMUM_BASE_FEE` threshold.
## A block can never consume more than `MAX_RESOURCE_LIMIT` gas.
**Test:** [`FuzzResourceMetering.sol#L189`](../contracts/echidna/FuzzResourceMetering.sol#L189)
This test asserts that a block can never consume more than the `MAX_RESOURCE_LIMIT` gas threshold.
## The base fee can never be raised more than the max base fee change.
**Test:** [`FuzzResourceMetering.sol#L200`](../contracts/echidna/FuzzResourceMetering.sol#L200)
After a block consumes more gas than the target gas, the base fee cannot be raised more than the maximum amount allowed. The max base fee change (per-block) is derived as follows: `prevBaseFee / BASE_FEE_MAX_CHANGE_DENOMINATOR`
## The base fee can never be lowered more than the max base fee change.
**Test:** [`FuzzResourceMetering.sol#L211`](../contracts/echidna/FuzzResourceMetering.sol#L211)
After a block consumes less than the target gas, the base fee cannot be lowered more than the maximum amount allowed. The max base fee change (per-block) is derived as follows: `prevBaseFee / BASE_FEE_MAX_CHANGE_DENOMINATOR`
## The `maxBaseFeeChange` calculation over multiple blocks can never underflow.
**Test:** [`FuzzResourceMetering.sol#L222`](../contracts/echidna/FuzzResourceMetering.sol#L222)
When calculating the `maxBaseFeeChange` after multiple empty blocks, the calculation should never be allowed to underflow.
...@@ -10,7 +10,6 @@ ...@@ -10,7 +10,6 @@
], ],
"scripts": { "scripts": {
"bindings": "cd ../../op-bindings && make", "bindings": "cd ../../op-bindings && make",
"build:with-metadata": "FOUNDRY_PROFILE=echidna pnpm build:forge",
"build": "npx nx build:contracts", "build": "npx nx build:contracts",
"prebuild:contracts": "./scripts/verify-foundry-install.sh", "prebuild:contracts": "./scripts/verify-foundry-install.sh",
"build:contracts": "pnpm build:forge", "build:contracts": "pnpm build:forge",
...@@ -35,14 +34,7 @@ ...@@ -35,14 +34,7 @@
"lint:ts:fix": "eslint --fix .", "lint:ts:fix": "eslint --fix .",
"lint:contracts:fix": "pnpm solhint --fix 'contracts/**/!(DisputeTypes|RLPReader).sol' && pnpm prettier --write 'contracts/**/!(DisputeTypes|RLPReader).sol'", "lint:contracts:fix": "pnpm solhint --fix 'contracts/**/!(DisputeTypes|RLPReader).sol' && pnpm prettier --write 'contracts/**/!(DisputeTypes|RLPReader).sol'",
"lint:fix": "pnpm lint:contracts:fix && pnpm lint:ts:fix", "lint:fix": "pnpm lint:contracts:fix && pnpm lint:ts:fix",
"lint": "pnpm lint:fix && pnpm lint:check", "lint": "pnpm lint:fix && pnpm lint:check"
"echidna:aliasing": "echidna-test --contract EchidnaFuzzAddressAliasing --config ./echidna.yaml .",
"echidna:burn:gas": "echidna-test --contract EchidnaFuzzBurnGas --config ./echidna.yaml .",
"echidna:burn:eth": "echidna-test --contract EchidnaFuzzBurnEth --config ./echidna.yaml .",
"echidna:encoding": "echidna-test --contract EchidnaFuzzEncoding --config ./echidna.yaml .",
"echidna:portal": "echidna-test --contract EchidnaFuzzOptimismPortal --config ./echidna.yaml .",
"echidna:hashing": "echidna-test --contract EchidnaFuzzHashing --config ./echidna.yaml .",
"echidna:metering": "echidna-test --contract EchidnaFuzzResourceMetering --config ./echidna.yaml ."
}, },
"dependencies": { "dependencies": {
"@eth-optimism/core-utils": "^0.12.1", "@eth-optimism/core-utils": "^0.12.1",
......
...@@ -8,9 +8,7 @@ const BASE_INVARIANTS_DIR = path.join( ...@@ -8,9 +8,7 @@ const BASE_INVARIANTS_DIR = path.join(
'test', 'test',
'invariants' 'invariants'
) )
const BASE_ECHIDNA_DIR = path.join(__dirname, '..', 'contracts', 'echidna')
const BASE_DOCS_DIR = path.join(__dirname, '..', 'invariant-docs') const BASE_DOCS_DIR = path.join(__dirname, '..', 'invariant-docs')
const BASE_ECHIDNA_GH_URL = '../contracts/echidna/'
const BASE_INVARIANT_GH_URL = '../contracts/test/invariants/' const BASE_INVARIANT_GH_URL = '../contracts/test/invariants/'
const NATSPEC_INV = '@custom:invariant' const NATSPEC_INV = '@custom:invariant'
const BLOCK_COMMENT_PREFIX_REGEX = /\*(\/)?/ const BLOCK_COMMENT_PREFIX_REGEX = /\*(\/)?/
...@@ -20,7 +18,6 @@ const BLOCK_COMMENT_HEADER_REGEX = /\*\s(.)+/ ...@@ -20,7 +18,6 @@ const BLOCK_COMMENT_HEADER_REGEX = /\*\s(.)+/
type Contract = { type Contract = {
name: string name: string
fileName: string fileName: string
isEchidna: boolean
docs: InvariantDoc[] docs: InvariantDoc[]
} }
...@@ -52,11 +49,8 @@ const docGen = (dir: string): void => { ...@@ -52,11 +49,8 @@ const docGen = (dir: string): void => {
const lines = fileContents.split('\n').map((line: string) => line.trim()) const lines = fileContents.split('\n').map((line: string) => line.trim())
// Create an object to store all invariant test docs for the current contract // Create an object to store all invariant test docs for the current contract
const isEchidna = fileName.startsWith('Fuzz') const name = fileName.replace('.t.sol', '')
const name = isEchidna const contract: Contract = { name, fileName, docs: [] }
? fileName.replace('Fuzz', '').replace('.sol', '')
: fileName.replace('.t.sol', '')
const contract: Contract = { name, fileName, isEchidna, docs: [] }
let currentDoc: InvariantDoc let currentDoc: InvariantDoc
...@@ -179,20 +173,12 @@ const renderContractDoc = (contract: Contract, header: boolean): string => { ...@@ -179,20 +173,12 @@ const renderContractDoc = (contract: Contract, header: boolean): string => {
const docs = contract.docs const docs = contract.docs
.map((doc: InvariantDoc) => { .map((doc: InvariantDoc) => {
const line = `${contract.fileName}#L${doc.lineNo}` const line = `${contract.fileName}#L${doc.lineNo}`
return `## ${doc.header}\n**Test:** [\`${line}\`](${getGithubBase( return `## ${doc.header}\n**Test:** [\`${line}\`](${BASE_INVARIANT_GH_URL}${line})\n\n${doc.desc}`
contract
)}${line})\n\n${doc.desc}`
}) })
.join('\n\n') .join('\n\n')
return `${_header}\n${docs}` return `${_header}\n${docs}`
} }
/**
* Get the base URL for the test contract
*/
const getGithubBase = ({ isEchidna }: Contract): string =>
isEchidna ? BASE_ECHIDNA_GH_URL : BASE_INVARIANT_GH_URL
// Generate the docs // Generate the docs
// Forge // Forge
...@@ -202,9 +188,5 @@ docGen(BASE_INVARIANTS_DIR) ...@@ -202,9 +188,5 @@ docGen(BASE_INVARIANTS_DIR)
// New line // New line
console.log() console.log()
// Echidna
console.log('Generating docs for echidna invariants...')
docGen(BASE_ECHIDNA_DIR)
// Generate an updated table of contents // Generate an updated table of contents
tocGen() tocGen()
...@@ -10,6 +10,6 @@ ...@@ -10,6 +10,6 @@
"hardhat_ignore_compile": false, "hardhat_ignore_compile": false,
"disable_color": false, "disable_color": false,
"exclude_dependencies": true, "exclude_dependencies": true,
"filter_paths": "contracts/test,contracts/vendor,contracts/echidna,node_modules,contracts/cannon/MIPS.sol", "filter_paths": "contracts/test,contracts/vendor,node_modules,contracts/cannon/MIPS.sol",
"foundry_out_directory": "artifacts" "foundry_out_directory": "artifacts"
} }
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment