Commit 9f1dcbc7 authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge branch 'develop' into rollup-config-warning

parents f42f65a4 417612ac
......@@ -140,7 +140,6 @@ jobs:
- "packages/common-ts/node_modules"
- "packages/contracts-bedrock/node_modules"
- "packages/core-utils/node_modules"
- "packages/fault-detector/node_modules"
- "packages/replica-healthcheck/node_modules"
- "packages/sdk/node_modules"
- "packages/contracts-ts/node_modules"
......@@ -1363,13 +1362,6 @@ workflows:
dependencies: "(common-ts|contracts-bedrock|core-utils|sdk)"
requires:
- pnpm-monorepo
- js-lint-test:
name: fault-detector-tests
coverage_flag: fault-detector-tests
package_name: fault-detector
dependencies: "(common-ts|core-utils|sdk)"
requires:
- pnpm-monorepo
- js-lint-test:
name: contracts-ts-tests
coverage_flag: contracts-ts-tests
......@@ -1616,14 +1608,6 @@ workflows:
docker_target: wd-mon
context:
- oplabs-gcr
- docker-publish:
name: fault-detector-docker-publish
docker_file: ./ops/docker/Dockerfile.packages
docker_name: fault-detector
docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>>
docker_target: fault-detector
context:
- oplabs-gcr
- hive-test:
name: hive-test-rpc
version: <<pipeline.git.revision>>
......@@ -1661,7 +1645,7 @@ workflows:
type: approval
filters:
tags:
only: /^(fault-detector|proxyd|indexer|ci-builder|op-[a-z0-9\-]*)\/v.*/
only: /^(proxyd|indexer|ci-builder|op-[a-z0-9\-]*)\/v.*/
branches:
ignore: /.*/
- docker-release:
......@@ -1728,23 +1712,6 @@ workflows:
- oplabs-gcr-release
requires:
- hold
- docker-release:
name: fault-detector-docker-release
filters:
tags:
only: /^fault-detector\/v.*/
branches:
ignore: /.*/
docker_file: ./ops/docker/Dockerfile.packages
docker_name: fault-detector
docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>>
docker_target: fault-detector
docker_context: .
platforms: "linux/amd64,linux/arm64"
context:
- oplabs-gcr-release
requires:
- hold
- docker-build:
name: op-migrate-docker-release
filters:
......
......@@ -3,8 +3,7 @@
/packages/contracts @ethereum-optimism/contract-reviewers
/packages/contracts-bedrock @ethereum-optimism/contract-reviewers
/packages/core-utils @ethereum-optimism/legacy-reviewers
/packages/chain-mon @smartcontracts
/packages/fault-detector @ethereum-optimism/devxpod
/packages/chain-mon @ethereum-optimism/devxpod
/packages/replica-healthcheck @ethereum-optimism/legacy-reviewers
/packages/sdk @ethereum-optimism/devxpod
/packages/atst @ethereum-optimism/devxpod
......
......@@ -15,7 +15,7 @@ jobs:
runs-on: ubuntu-latest
# map the step outputs to job outputs
outputs:
fault-detector: ${{ steps.packages.outputs.fault-detector }}
fault-mon: ${{ steps.packages.outputs.fault-mon }}
balance-mon: ${{ steps.packages.outputs.balance-mon }}
drippie-mon: ${{ steps.packages.outputs.drippie-mon }}
wd-mon: ${{ steps.packages.outputs.wd-mon }}
......@@ -43,10 +43,10 @@ jobs:
env:
CUSTOM_IMAGE_NAME: ${{ github.event.inputs.customImageName }}
fault-detector:
name: Publish Fault Detector Version ${{ needs.canary-publish.outputs.canary-docker-tag }}
fault-mon:
name: Publish fault-mon Version ${{ needs.canary-publish.outputs.canary-docker-tag }}
needs: canary-publish
if: needs.canary-publish.outputs.fault-detector != ''
if: needs.canary-publish.outputs.fault-mon != ''
runs-on: ubuntu-latest
steps:
......@@ -66,9 +66,9 @@ jobs:
with:
context: .
file: ./ops/docker/Dockerfile.packages
target: fault-detector
target: fault-mon
push: true
tags: ethereumoptimism/fault-detector:${{ needs.canary-publish.outputs.canary-docker-tag }}
tags: ethereumoptimism/fault-mon:${{ needs.canary-publish.outputs.canary-docker-tag }}
balance-mon:
name: Publish Balance Monitor Version ${{ needs.canary-publish.outputs.canary-docker-tag }}
......
......@@ -16,7 +16,7 @@ jobs:
if: github.repository == 'ethereum-optimism/optimism'
# map the step outputs to job outputs
outputs:
fault-detector: ${{ steps.packages.outputs.fault-detector }}
fault-mon: ${{ steps.packages.outputs.fault-mon }}
balance-mon: ${{ steps.packages.outputs.drippie-mon }}
drippie-mon: ${{ steps.packages.outputs.drippie-mon }}
wd-mon: ${{ steps.packages.outputs.wd-mon }}
......@@ -100,10 +100,10 @@ jobs:
GITCOMMIT=${{ steps.build_args.outputs.GITCOMMIT }}
GITVERSION=${{ steps.build_args.outputs.GITVERSION }}
fault-detector:
name: Publish Fault Detector Version ${{ needs.release.outputs.fault-detector }}
fault-mon:
name: Publish fault-mon Version ${{ needs.release.outputs.fault-mon }}
needs: release
if: needs.release.outputs.fault-detector != ''
if: needs.release.outputs.fault-mon != ''
runs-on: ubuntu-latest
steps:
......@@ -123,9 +123,9 @@ jobs:
with:
context: .
file: ./ops/docker/Dockerfile.packages
target: fault-detector
target: fault-mon
push: true
tags: ethereumoptimism/fault-detector:${{ needs.release.outputs.fault-detector }},ethereumoptimism/fault-detector:latest
tags: ethereumoptimism/fault-mon:${{ needs.release.outputs.fault-mon }},ethereumoptimism/fault-mon:latest
wd-mon:
name: Publish Withdrawal Monitor Version ${{ needs.release.outputs.wd-mon }}
......
......@@ -26,6 +26,8 @@ on:
- op-proposer
- op-ufm
- proxyd
- indexer
- ci-builder
prerelease:
description: Increment major/minor/patch as prerelease?
required: false
......
......@@ -19,10 +19,6 @@
{
"directory": "packages/chain-mon",
"changeProcessCWD": true
},
{
"directory": "packages/fault-detector",
"changeProcessCWD": true
}
],
"eslint.nodePath": "./node_modules/eslint/bin/",
......
......@@ -54,7 +54,6 @@ Refer to the Directory Structure section below to understand which packages are
│ ├── <a href="./packages/contracts-bedrock">contracts-bedrock</a>: Bedrock smart contracts.
│ ├── <a href="./packages/core-utils">core-utils</a>: Low-level utilities that make building Optimism easier
│ ├── <a href="./packages/chain-mon">chain-mon</a>: Chain monitoring services
│ ├── <a href="./packages/fault-detector">fault-detector</a>: Service for detecting Sequencer faults
│ ├── <a href="./packages/replica-healthcheck">replica-healthcheck</a>: Service for monitoring the health of a replica node
│ └── <a href="./packages/sdk">sdk</a>: provides a set of tools for interacting with Optimism
├── <a href="./op-bindings">op-bindings</a>: Go bindings for Bedrock smart contracts.
......@@ -80,7 +79,6 @@ Refer to the Directory Structure section below to understand which packages are
│ ├── <a href="./packages/common-ts">common-ts</a>: Common tools for building apps in TypeScript
│ ├── <a href="./packages/core-utils">core-utils</a>: Low-level utilities that make building Optimism easier
│ ├── <a href="./packages/chain-mon">chain-mon</a>: Chain monitoring services
│ ├── <a href="./packages/fault-detector">fault-detector</a>: Service for detecting Sequencer faults
│ ├── <a href="./packages/replica-healthcheck">replica-healthcheck</a>: Service for monitoring the health of a replica node
│ └── <a href="./packages/sdk">sdk</a>: provides a set of tools for interacting with Optimism
├── <a href="./indexer">indexer</a>: indexes and syncs transactions
......
......@@ -36,6 +36,5 @@ flag_management:
- name: core-utils-tests
- name: dtl-tests
- name: chain-mon-tests
- name: fault-detector-tests
- name: replica-healthcheck-tests
- name: sdk-tests
......@@ -29,6 +29,6 @@ Dive deep into the specifications for the Bedrock release in the [specs folder o
- [`op-batcher`](https://github.com/ethereum-optimism/optimism/tree/develop/op-batcher)
- [`op-proposer`](https://github.com/ethereum-optimism/optimism/tree/develop/op-proposer)
- [`contracts-bedrock`](https://github.com/ethereum-optimism/optimism/tree/develop/packages/contracts-bedrock)
- [`fault-detector`](https://github.com/ethereum-optimism/optimism/tree/develop/packages/fault-detector)
- [`fault-mon`](https://github.com/ethereum-optimism/optimism/tree/develop/packages/chain-mon/src/fault-mon)
- [`sdk`](https://github.com/ethereum-optimism/optimism/tree/develop/packages/sdk)
- [`chain-mon`](https://github.com/ethereum-optimism/optimism/tree/develop/packages/chain-mon)
......@@ -23,6 +23,7 @@ require (
github.com/ipfs/go-datastore v0.6.0
github.com/ipfs/go-ds-leveldb v0.5.0
github.com/jackc/pgtype v1.14.0
github.com/jackc/pgx/v5 v5.3.1
github.com/lib/pq v1.10.9
github.com/libp2p/go-libp2p v0.25.1
github.com/libp2p/go-libp2p-pubsub v0.9.3
......@@ -105,7 +106,6 @@ require (
github.com/jackc/pgio v1.0.0 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
github.com/jackc/pgx/v5 v5.3.1 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect
github.com/jbenet/goprocess v0.1.4 // indirect
......
......@@ -21,8 +21,8 @@ const (
)
// DepositsByAddress mocks returning deposits by an address
func (mbv *MockBridgeView) DepositsByAddress(address common.Address) ([]*database.DepositWithTransactionHash, error) {
return []*database.DepositWithTransactionHash{
func (mbv *MockBridgeView) DepositsByAddress(address common.Address) ([]*database.DepositWithTransactionHashes, error) {
return []*database.DepositWithTransactionHashes{
{
Deposit: database.Deposit{
GUID: uuid.MustParse(guid1),
......
package cli
import (
"context"
"fmt"
"os"
"github.com/ethereum-optimism/optimism/indexer"
"github.com/ethereum-optimism/optimism/indexer/config"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-service/log"
"github.com/ethereum-optimism/optimism/op-service/opio"
"github.com/ethereum/go-ethereum/params"
"github.com/urfave/cli/v2"
)
......@@ -20,16 +24,26 @@ type Cli struct {
func runIndexer(ctx *cli.Context) error {
configPath := ctx.String(ConfigFlag.Name)
conf, err := config.LoadConfig(configPath)
cfg, err := config.LoadConfig(configPath)
if err != nil {
return err
}
fmt.Println(conf)
// setup logger
cfg.Logger = log.NewLogger(log.ReadCLIConfig(ctx))
indexer, err := indexer.NewIndexer(cfg)
if err != nil {
log.Crit("Failed to load config", "message", err)
return err
}
// finish me
return nil
indexerCtx, indexerCancel := context.WithCancel(context.Background())
go func() {
opio.BlockOnInterrupts()
indexerCancel()
}()
return indexer.Run(indexerCtx)
}
func runApi(ctx *cli.Context) error {
......@@ -39,8 +53,9 @@ func runApi(ctx *cli.Context) error {
fmt.Println(conf)
if err != nil {
log.Crit("Failed to load config", "message", err)
panic(err)
}
// finish me
return nil
}
......@@ -71,17 +86,7 @@ func (c *Cli) Run(args []string) error {
}
func NewCli(GitVersion string, GitCommit string, GitDate string) *Cli {
log.Root().SetHandler(
log.LvlFilterHandler(
log.LvlInfo,
log.StreamHandler(os.Stdout, log.TerminalFormat(true)),
),
)
flags := []cli.Flag{
ConfigFlag,
}
flags := append([]cli.Flag{ConfigFlag}, log.CLIFlags("INDEXER")...)
app := &cli.App{
Version: fmt.Sprintf("%s-%s", GitVersion, params.VersionWithCommit(GitCommit, GitDate)),
Description: "An indexer of all optimism events with a serving api layer",
......
......@@ -4,6 +4,8 @@ import (
"os"
"github.com/BurntSushi/toml"
"github.com/ethereum/go-ethereum/log"
)
// Config represents the `indexer.toml` file used to configure the indexer
......@@ -13,6 +15,7 @@ type Config struct {
DB DBConfig
API APIConfig
Metrics MetricsConfig
Logger log.Logger `toml:"-"`
}
// ChainConfig configures of the chain being indexed
......@@ -31,6 +34,7 @@ type RPCsConfig struct {
type DBConfig struct {
Host string
Port int
Name string
User string
Password string
}
......
......@@ -3,6 +3,7 @@ package database
import (
"context"
"errors"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
......@@ -53,15 +54,20 @@ type LegacyStateBatch struct {
type OutputProposal struct {
OutputRoot common.Hash `gorm:"primaryKey;serializer:json"`
L2OutputIndex U256
L2BlockNumber U256
L1ContractEventGUID uuid.UUID
}
type BlocksView interface {
L1BlockHeader(*big.Int) (*L1BlockHeader, error)
LatestL1BlockHeader() (*L1BlockHeader, error)
LatestCheckpointedOutput() (*OutputProposal, error)
OutputProposal(index *big.Int) (*OutputProposal, error)
L2BlockHeader(*big.Int) (*L2BlockHeader, error)
LatestL2BlockHeader() (*L2BlockHeader, error)
}
......@@ -104,6 +110,20 @@ func (db *blocksDB) StoreOutputProposals(outputs []*OutputProposal) error {
return result.Error
}
func (db *blocksDB) L1BlockHeader(height *big.Int) (*L1BlockHeader, error) {
var l1Header L1BlockHeader
result := db.gorm.Where(&BlockHeader{Number: U256{Int: height}}).Take(&l1Header)
if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, result.Error
}
return &l1Header, nil
}
func (db *blocksDB) LatestL1BlockHeader() (*L1BlockHeader, error) {
var l1Header L1BlockHeader
result := db.gorm.Order("number DESC").Take(&l1Header)
......@@ -120,7 +140,21 @@ func (db *blocksDB) LatestL1BlockHeader() (*L1BlockHeader, error) {
func (db *blocksDB) LatestCheckpointedOutput() (*OutputProposal, error) {
var outputProposal OutputProposal
result := db.gorm.Order("l2_block_number DESC").Take(&outputProposal)
result := db.gorm.Order("l2_output_index DESC").Take(&outputProposal)
if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, result.Error
}
return &outputProposal, nil
}
func (db *blocksDB) OutputProposal(index *big.Int) (*OutputProposal, error) {
var outputProposal OutputProposal
result := db.gorm.Where(&OutputProposal{L2OutputIndex: U256{Int: index}}).Take(&outputProposal)
if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, nil
......@@ -139,6 +173,20 @@ func (db *blocksDB) StoreL2BlockHeaders(headers []*L2BlockHeader) error {
return result.Error
}
func (db *blocksDB) L2BlockHeader(height *big.Int) (*L2BlockHeader, error) {
var l2Header L2BlockHeader
result := db.gorm.Where(&BlockHeader{Number: U256{Int: height}}).Take(&l2Header)
if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, result.Error
}
return &l2Header, nil
}
func (db *blocksDB) LatestL2BlockHeader() (*L2BlockHeader, error) {
var l2Header L2BlockHeader
result := db.gorm.Order("number DESC").Take(&l2Header)
......
......@@ -48,9 +48,11 @@ type Deposit struct {
TokenPair TokenPair `gorm:"embedded"`
}
type DepositWithTransactionHash struct {
type DepositWithTransactionHashes struct {
Deposit Deposit `gorm:"embedded"`
L1TransactionHash common.Hash `gorm:"serializer:json"`
FinalizedL2TransactionHash common.Hash `gorm:"serializer:json"`
}
type Withdrawal struct {
......@@ -77,12 +79,12 @@ type WithdrawalWithTransactionHashes struct {
Withdrawal Withdrawal `gorm:"embedded"`
L2TransactionHash common.Hash `gorm:"serializer:json"`
ProvenL1TransactionHash *common.Hash `gorm:"serializer:json"`
FinalizedL1TransactionHash *common.Hash `gorm:"serializer:json"`
ProvenL1TransactionHash common.Hash `gorm:"serializer:json"`
FinalizedL1TransactionHash common.Hash `gorm:"serializer:json"`
}
type BridgeView interface {
DepositsByAddress(address common.Address) ([]*DepositWithTransactionHash, error)
DepositsByAddress(address common.Address) ([]*DepositWithTransactionHashes, error)
DepositByMessageNonce(*big.Int) (*Deposit, error)
LatestDepositMessageNonce() (*big.Int, error)
......@@ -122,14 +124,16 @@ func (db *bridgeDB) StoreDeposits(deposits []*Deposit) error {
return result.Error
}
func (db *bridgeDB) DepositsByAddress(address common.Address) ([]*DepositWithTransactionHash, error) {
depositsQuery := db.gorm.Table("deposits").Select("deposits.*, l1_contract_events.transaction_hash AS l1_transaction_hash")
eventsJoinQuery := depositsQuery.Joins("LEFT JOIN l1_contract_events ON deposits.initiated_l1_event_guid = l1_contract_events.guid")
func (db *bridgeDB) DepositsByAddress(address common.Address) ([]*DepositWithTransactionHashes, error) {
depositsQuery := db.gorm.Table("deposits").Select("deposits.*, l1_contract_events.transaction_hash AS l1_transaction_hash, l2_contract_events.transaction_hash AS finalized_l2_transaction_hash")
initiatedJoinQuery := depositsQuery.Joins("LEFT JOIN l1_contract_events ON deposits.initiated_l1_event_guid = l1_contract_events.guid")
finalizedJoinQuery := initiatedJoinQuery.Joins("LEFT JOIN l2_contract_events ON deposits.finalized_l2_event_guid = l2_contract_events.guid")
// add in cursoring options
filteredQuery := eventsJoinQuery.Where(&Transaction{FromAddress: address}).Order("deposits.timestamp DESC").Limit(100)
filteredQuery := finalizedJoinQuery.Where(&Transaction{FromAddress: address}).Order("deposits.timestamp DESC").Limit(100)
deposits := make([]*DepositWithTransactionHash, 100)
deposits := make([]*DepositWithTransactionHashes, 100)
result := filteredQuery.Scan(&deposits)
if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
......@@ -144,7 +148,7 @@ func (db *bridgeDB) DepositsByAddress(address common.Address) ([]*DepositWithTra
func (db *bridgeDB) DepositByMessageNonce(nonce *big.Int) (*Deposit, error) {
var deposit Deposit
result := db.gorm.First(&deposit, "sent_message_nonce = ?", U256{Int: nonce})
result := db.gorm.Where(&Deposit{SentMessageNonce: U256{Int: nonce}}).Take(&deposit)
if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, nil
......@@ -172,7 +176,7 @@ func (db *bridgeDB) LatestDepositMessageNonce() (*big.Int, error) {
func (db *bridgeDB) MarkFinalizedDepositEvent(guid, finalizationEventGUID uuid.UUID) error {
var deposit Deposit
result := db.gorm.First(&deposit, "guid = ?", guid)
result := db.gorm.Where(&Deposit{GUID: guid}).Take(&deposit)
if result.Error != nil {
return result.Error
}
......@@ -191,7 +195,7 @@ func (db *bridgeDB) StoreWithdrawals(withdrawals []*Withdrawal) error {
func (db *bridgeDB) MarkProvenWithdrawalEvent(guid, provenL1EventGuid uuid.UUID) error {
var withdrawal Withdrawal
result := db.gorm.First(&withdrawal, "guid = ?", guid)
result := db.gorm.Where(&Withdrawal{GUID: guid}).Take(&withdrawal)
if result.Error != nil {
return result.Error
}
......@@ -203,7 +207,7 @@ func (db *bridgeDB) MarkProvenWithdrawalEvent(guid, provenL1EventGuid uuid.UUID)
func (db *bridgeDB) MarkFinalizedWithdrawalEvent(guid, finalizedL1EventGuid uuid.UUID) error {
var withdrawal Withdrawal
result := db.gorm.First(&withdrawal, "guid = ?", guid)
result := db.gorm.Where(&Withdrawal{GUID: guid}).Take(&withdrawal)
if result.Error != nil {
return result.Error
}
......@@ -242,7 +246,7 @@ func (db *bridgeDB) WithdrawalsByAddress(address common.Address) ([]*WithdrawalW
func (db *bridgeDB) WithdrawalByMessageNonce(nonce *big.Int) (*Withdrawal, error) {
var withdrawal Withdrawal
result := db.gorm.First(&withdrawal, "sent_message_nonce = ?", U256{Int: nonce})
result := db.gorm.Where(&Withdrawal{SentMessageNonce: U256{Int: nonce}}).Take(&withdrawal)
if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, nil
......@@ -256,7 +260,7 @@ func (db *bridgeDB) WithdrawalByMessageNonce(nonce *big.Int) (*Withdrawal, error
func (db *bridgeDB) WithdrawalByHash(hash common.Hash) (*Withdrawal, error) {
var withdrawal Withdrawal
result := db.gorm.First(&withdrawal, "withdrawal_hash = ?", hash.String())
result := db.gorm.Where(&Withdrawal{WithdrawalHash: hash}).Take(&withdrawal)
if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, nil
......
package database
import (
"errors"
"gorm.io/gorm"
"github.com/ethereum/go-ethereum/common"
......@@ -46,6 +48,11 @@ type L2ContractEvent struct {
}
type ContractEventsView interface {
L1ContractEvent(uuid.UUID) (*L1ContractEvent, error)
L1ContractEventByTxLogIndex(common.Hash, uint64) (*L1ContractEvent, error)
L2ContractEvent(uuid.UUID) (*L2ContractEvent, error)
L2ContractEventByTxLogIndex(common.Hash, uint64) (*L2ContractEvent, error)
}
type ContractEventsDB interface {
......@@ -74,9 +81,65 @@ func (db *contractEventsDB) StoreL1ContractEvents(events []*L1ContractEvent) err
return result.Error
}
func (db *contractEventsDB) L1ContractEvent(uuid uuid.UUID) (*L1ContractEvent, error) {
var l1ContractEvent L1ContractEvent
result := db.gorm.Where(&ContractEvent{GUID: uuid}).Take(&l1ContractEvent)
if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, result.Error
}
return &l1ContractEvent, nil
}
func (db *contractEventsDB) L1ContractEventByTxLogIndex(txHash common.Hash, logIndex uint64) (*L1ContractEvent, error) {
var l1ContractEvent L1ContractEvent
result := db.gorm.Where(&ContractEvent{TransactionHash: txHash, LogIndex: logIndex}).Take(&l1ContractEvent)
if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, result.Error
}
return &l1ContractEvent, nil
}
// L2
func (db *contractEventsDB) StoreL2ContractEvents(events []*L2ContractEvent) error {
result := db.gorm.Create(&events)
return result.Error
}
func (db *contractEventsDB) L2ContractEvent(uuid uuid.UUID) (*L2ContractEvent, error) {
var l2ContractEvent L2ContractEvent
result := db.gorm.Where(&ContractEvent{GUID: uuid}).Take(&l2ContractEvent)
if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, result.Error
}
return &l2ContractEvent, nil
}
func (db *contractEventsDB) L2ContractEventByTxLogIndex(txHash common.Hash, logIndex uint64) (*L2ContractEvent, error) {
var l2ContractEvent L2ContractEvent
result := db.gorm.Where(&ContractEvent{TransactionHash: txHash, LogIndex: logIndex}).Take(&l2ContractEvent)
if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, result.Error
}
return &l2ContractEvent, nil
}
......@@ -4,6 +4,7 @@ package database
import (
"gorm.io/driver/postgres"
"gorm.io/gorm"
"gorm.io/gorm/logger"
)
type DB struct {
......@@ -19,6 +20,10 @@ func NewDB(dsn string) (*DB, error) {
// The indexer will explicitly manage the transaction
// flow processing blocks
SkipDefaultTransaction: true,
// We may choose to create an adapter such that the
// logger emits to the geth logger when on DEBUG mode
Logger: logger.Default.LogMode(logger.Silent),
})
if err != nil {
......@@ -43,6 +48,15 @@ func (db *DB) Transaction(fn func(db *DB) error) error {
})
}
func (db *DB) Close() error {
sql, err := db.gorm.DB()
if err != nil {
return err
}
return sql.Close()
}
func dbFromGormTx(tx *gorm.DB) *DB {
return &DB{
gorm: tx,
......
package e2e_tests
import (
"context"
"math/big"
"testing"
"time"
"github.com/ethereum-optimism/optimism/indexer/node"
"github.com/ethereum-optimism/optimism/indexer/processor"
"github.com/ethereum-optimism/optimism/op-service/client/utils"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/crypto"
"github.com/stretchr/testify/require"
)
func TestE2EBlockHeaders(t *testing.T) {
testSuite := createE2ETestSuite(t)
l1Client := testSuite.OpSys.Clients["l1"]
l2Client := testSuite.OpSys.Clients["sequencer"]
l2OutputOracle, err := bindings.NewL2OutputOracleCaller(predeploys.DevL2OutputOracleAddr, l1Client)
require.NoError(t, err)
// a minute for total setup to finish
setupCtx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
// wait for at least 10 L2 blocks to be created & posted on L1
require.NoError(t, utils.WaitFor(setupCtx, time.Second, func() (bool, error) {
l2Height, err := l2OutputOracle.LatestBlockNumber(&bind.CallOpts{Context: setupCtx})
return l2Height != nil && l2Height.Uint64() >= 9, err
}))
// ensure the processors are caught up to this state
l1Height, err := l1Client.BlockNumber(setupCtx)
require.NoError(t, err)
require.NoError(t, utils.WaitFor(setupCtx, time.Second, func() (bool, error) {
l1Header := testSuite.Indexer.L1Processor.LatestProcessedHeader()
l2Header := testSuite.Indexer.L2Processor.LatestProcessedHeader()
return (l1Header != nil && l1Header.Number.Uint64() >= l1Height) && (l2Header != nil && l2Header.Number.Uint64() >= 9), nil
}))
t.Run("indexes L2 blocks", func(t *testing.T) {
latestL2Header, err := testSuite.DB.Blocks.LatestL2BlockHeader()
require.NoError(t, err)
require.NotNil(t, latestL2Header)
require.True(t, latestL2Header.Number.Int.Uint64() >= 9)
for i := int64(0); i < 10; i++ {
height := big.NewInt(i)
indexedHeader, err := testSuite.DB.Blocks.L2BlockHeader(height)
require.NoError(t, err)
require.NotNil(t, indexedHeader)
header, err := l2Client.HeaderByNumber(context.Background(), height)
require.NoError(t, err)
require.NotNil(t, indexedHeader)
require.Equal(t, header.Number.Int64(), indexedHeader.Number.Int.Int64())
require.Equal(t, header.Hash(), indexedHeader.Hash)
require.Equal(t, header.ParentHash, indexedHeader.ParentHash)
require.Equal(t, header.Time, indexedHeader.Timestamp)
}
})
t.Run("indexes L2 checkpoints", func(t *testing.T) {
latestOutput, err := testSuite.DB.Blocks.LatestCheckpointedOutput()
require.NoError(t, err)
require.NotNil(t, latestOutput)
require.GreaterOrEqual(t, latestOutput.L2BlockNumber.Int.Uint64(), uint64(9))
l2EthClient, err := node.DialEthClient(testSuite.OpSys.Nodes["sequencer"].HTTPEndpoint())
require.NoError(t, err)
submissionInterval := testSuite.OpCfg.DeployConfig.L2OutputOracleSubmissionInterval
numOutputs := latestOutput.L2BlockNumber.Int.Uint64() / submissionInterval
for i := int64(0); i < int64(numOutputs); i++ {
blockNumber := big.NewInt((i + 1) * int64(submissionInterval))
output, err := testSuite.DB.Blocks.OutputProposal(big.NewInt(i))
require.NoError(t, err)
require.NotNil(t, output)
require.Equal(t, i, output.L2OutputIndex.Int.Int64())
require.Equal(t, blockNumber, output.L2BlockNumber.Int)
require.NotEmpty(t, output.L1ContractEventGUID)
// we may as well check the integrity of the output root
l2Block, err := l2Client.BlockByNumber(context.Background(), blockNumber)
require.NoError(t, err)
messagePasserStorageHash, err := l2EthClient.StorageHash(predeploys.L2ToL1MessagePasserAddr, blockNumber)
require.NoError(t, err)
// construct and check output root
outputRootPreImage := [128]byte{} // 4 words (first 32 are zero for version 0)
copy(outputRootPreImage[32:64], l2Block.Root().Bytes()) // state root
copy(outputRootPreImage[64:96], messagePasserStorageHash.Bytes()) // message passer storage root
copy(outputRootPreImage[96:128], l2Block.Hash().Bytes()) // block hash
require.Equal(t, crypto.Keccak256Hash(outputRootPreImage[:]), output.OutputRoot)
}
})
t.Run("indexes L1 logs and associated blocks", func(t *testing.T) {
testCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
devContracts := processor.DevL1Contracts().ToSlice()
logFilter := ethereum.FilterQuery{FromBlock: big.NewInt(0), ToBlock: big.NewInt(int64(l1Height)), Addresses: devContracts}
logs, err := l1Client.FilterLogs(testCtx, logFilter) // []types.Log
require.NoError(t, err)
for _, log := range logs {
contractEvent, err := testSuite.DB.ContractEvents.L1ContractEventByTxLogIndex(log.TxHash, uint64(log.Index))
require.NoError(t, err)
require.Equal(t, log.Topics[0], contractEvent.EventSignature)
require.Equal(t, log.BlockHash, contractEvent.BlockHash)
require.Equal(t, log.TxHash, contractEvent.TransactionHash)
require.Equal(t, log.Index, uint(contractEvent.LogIndex))
// ensure the block is also indexed
block, err := l1Client.BlockByNumber(testCtx, big.NewInt(int64(log.BlockNumber)))
require.NoError(t, err)
require.Equal(t, block.Time(), contractEvent.Timestamp)
l1BlockHeader, err := testSuite.DB.Blocks.L1BlockHeader(block.Number())
require.NoError(t, err)
require.Equal(t, block.Hash(), l1BlockHeader.Hash)
require.Equal(t, block.ParentHash(), l1BlockHeader.ParentHash)
require.Equal(t, block.Number(), l1BlockHeader.Number.Int)
require.Equal(t, block.Time(), l1BlockHeader.Timestamp)
}
})
}
package e2e_tests
import (
"context"
"math/big"
"testing"
"time"
"github.com/ethereum-optimism/optimism/indexer/processor"
"github.com/ethereum-optimism/optimism/op-service/client/utils"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
op_e2e "github.com/ethereum-optimism/optimism/op-e2e"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/params"
"github.com/stretchr/testify/require"
)
func TestE2EBridge(t *testing.T) {
testSuite := createE2ETestSuite(t)
l1Client := testSuite.OpSys.Clients["l1"]
l2Client := testSuite.OpSys.Clients["sequencer"]
l1StandardBridge, err := bindings.NewL1StandardBridge(predeploys.DevL1StandardBridgeAddr, l1Client)
require.NoError(t, err)
l2StandardBridge, err := bindings.NewL2StandardBridge(predeploys.L2StandardBridgeAddr, l2Client)
require.NoError(t, err)
// pre-emptively conduct a deposit & withdrawal to speed up the test
setupCtx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
aliceAddr := testSuite.OpCfg.Secrets.Addresses().Alice
l1Opts, err := bind.NewKeyedTransactorWithChainID(testSuite.OpCfg.Secrets.Alice, testSuite.OpCfg.L1ChainIDBig())
require.NoError(t, err)
l2Opts, err := bind.NewKeyedTransactorWithChainID(testSuite.OpCfg.Secrets.Alice, testSuite.OpCfg.L2ChainIDBig())
require.NoError(t, err)
l1Opts.Value = big.NewInt(params.Ether)
l2Opts.Value = big.NewInt(params.Ether)
depositTx, err := l1StandardBridge.DepositETH(l1Opts, 200_000, []byte{byte(1)})
require.NoError(t, err)
withdrawTx, err := l2StandardBridge.Withdraw(l2Opts, processor.EthAddress, big.NewInt(params.Ether), 200_000, []byte{byte(1)})
require.NoError(t, err)
depositReceipt, err := utils.WaitReceiptOK(setupCtx, l1Client, depositTx.Hash())
require.NoError(t, err)
withdrawalReceipt, err := utils.WaitReceiptOK(setupCtx, l2Client, withdrawTx.Hash())
require.NoError(t, err)
t.Run("indexes ETH deposits", func(t *testing.T) {
testCtx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
defer cancel()
// Pause the L2Processor so that we can test for finalization separately. A pause is
// required since deposit inclusion is apart of the L2 block derivation process
testSuite.Indexer.L2Processor.PauseForTest()
// (1) Test Deposit Initiation
// wait for processor catchup
require.NoError(t, utils.WaitFor(testCtx, 500*time.Millisecond, func() (bool, error) {
l1Header := testSuite.Indexer.L1Processor.LatestProcessedHeader()
return l1Header != nil && l1Header.Number.Uint64() >= depositReceipt.BlockNumber.Uint64(), nil
}))
aliceDeposits, err := testSuite.DB.Bridge.DepositsByAddress(aliceAddr)
require.NoError(t, err)
require.Len(t, aliceDeposits, 1)
require.Equal(t, depositTx.Hash(), aliceDeposits[0].L1TransactionHash)
require.Empty(t, aliceDeposits[0].FinalizedL2TransactionHash)
deposit := aliceDeposits[0].Deposit
require.Nil(t, deposit.FinalizedL2EventGUID)
require.Equal(t, processor.EthAddress, deposit.TokenPair.L1TokenAddress)
require.Equal(t, processor.EthAddress, deposit.TokenPair.L2TokenAddress)
require.Equal(t, big.NewInt(params.Ether), deposit.Tx.Amount.Int)
require.Equal(t, aliceAddr, deposit.Tx.FromAddress)
require.Equal(t, aliceAddr, deposit.Tx.ToAddress)
require.Equal(t, byte(1), deposit.Tx.Data[0])
// (2) Test Deposit Finalization
testSuite.Indexer.L2Processor.ResumeForTest()
// finalization hash can be deterministically derived from TransactionDeposited log
var depositTxHash common.Hash
for _, log := range depositReceipt.Logs {
if log.Topics[0] == derive.DepositEventABIHash {
deposit, err := derive.UnmarshalDepositLogEvent(log)
require.NoError(t, err)
depositTxHash = types.NewTx(deposit).Hash()
break
}
}
// wait for the l2 processor to catch this deposit in the derivation process
_, err = utils.WaitReceiptOK(testCtx, l2Client, depositTxHash)
require.NoError(t, err)
l2Height, err := l2Client.BlockNumber(testCtx)
require.NoError(t, err)
require.NoError(t, utils.WaitFor(testCtx, 500*time.Millisecond, func() (bool, error) {
l2Header := testSuite.Indexer.L2Processor.LatestProcessedHeader()
return l2Header != nil && l2Header.Number.Uint64() >= l2Height, nil
}))
aliceDeposits, err = testSuite.DB.Bridge.DepositsByAddress(aliceAddr)
require.NoError(t, err)
require.Equal(t, depositTxHash, aliceDeposits[0].FinalizedL2TransactionHash)
require.NotNil(t, aliceDeposits[0].Deposit.FinalizedL2EventGUID)
})
t.Run("indexes ETH withdrawals", func(t *testing.T) {
testCtx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)
defer cancel()
// (1) Test Withdrawal Initiation
// wait for processor catchup
require.NoError(t, utils.WaitFor(testCtx, 500*time.Millisecond, func() (bool, error) {
l2Header := testSuite.Indexer.L2Processor.LatestProcessedHeader()
return l2Header != nil && l2Header.Number.Uint64() >= withdrawalReceipt.BlockNumber.Uint64(), nil
}))
aliceWithdrawals, err := testSuite.DB.Bridge.WithdrawalsByAddress(aliceAddr)
require.NoError(t, err)
require.Len(t, aliceWithdrawals, 1)
require.Equal(t, withdrawTx.Hash(), aliceWithdrawals[0].L2TransactionHash)
require.Empty(t, aliceWithdrawals[0].ProvenL1TransactionHash)
require.Empty(t, aliceWithdrawals[0].FinalizedL1TransactionHash)
withdrawal := aliceWithdrawals[0].Withdrawal
require.Nil(t, withdrawal.ProvenL1EventGUID)
require.Nil(t, withdrawal.FinalizedL1EventGUID)
require.Equal(t, processor.EthAddress, withdrawal.TokenPair.L1TokenAddress)
require.Equal(t, processor.EthAddress, withdrawal.TokenPair.L2TokenAddress)
require.Equal(t, big.NewInt(params.Ether), withdrawal.Tx.Amount.Int)
require.Equal(t, aliceAddr, withdrawal.Tx.FromAddress)
require.Equal(t, aliceAddr, withdrawal.Tx.ToAddress)
require.Equal(t, byte(1), withdrawal.Tx.Data[0])
// (2) Test Withdrawal Proven
// prove & wait for processor catchup
withdrawParams, proveReceipt := op_e2e.ProveWithdrawal(t, *testSuite.OpCfg, l1Client, testSuite.OpSys.Nodes["sequencer"], testSuite.OpCfg.Secrets.Alice, withdrawalReceipt)
require.NoError(t, utils.WaitFor(testCtx, 500*time.Millisecond, func() (bool, error) {
l1Header := testSuite.Indexer.L1Processor.LatestProcessedHeader()
return l1Header != nil && l1Header.Number.Uint64() >= proveReceipt.BlockNumber.Uint64(), nil
}))
aliceWithdrawals, err = testSuite.DB.Bridge.WithdrawalsByAddress(aliceAddr)
require.NoError(t, err)
require.Empty(t, aliceWithdrawals[0].FinalizedL1TransactionHash)
require.Equal(t, proveReceipt.TxHash, aliceWithdrawals[0].ProvenL1TransactionHash)
// (3) Test Withdrawal Finalization
// finalize & wait for processor catchup
finalizeReceipt := op_e2e.FinalizeWithdrawal(t, *testSuite.OpCfg, l1Client, testSuite.OpCfg.Secrets.Alice, withdrawalReceipt, withdrawParams)
require.NoError(t, utils.WaitFor(testCtx, 500*time.Millisecond, func() (bool, error) {
l1Header := testSuite.Indexer.L1Processor.LatestProcessedHeader()
return l1Header != nil && l1Header.Number.Uint64() >= finalizeReceipt.BlockNumber.Uint64(), nil
}))
aliceWithdrawals, err = testSuite.DB.Bridge.WithdrawalsByAddress(aliceAddr)
require.NoError(t, err)
require.Equal(t, finalizeReceipt.TxHash, aliceWithdrawals[0].FinalizedL1TransactionHash)
})
}
package e2e_tests
import (
"context"
"database/sql"
"fmt"
"io/fs"
"os"
"path/filepath"
"testing"
"time"
"github.com/ethereum-optimism/optimism/indexer"
"github.com/ethereum-optimism/optimism/indexer/config"
"github.com/ethereum-optimism/optimism/indexer/database"
op_e2e "github.com/ethereum-optimism/optimism/op-e2e"
"github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum/go-ethereum/log"
_ "github.com/jackc/pgx/v5/stdlib"
"github.com/stretchr/testify/require"
)
type E2ETestSuite struct {
t *testing.T
// Indexer
DB *database.DB
Indexer *indexer.Indexer
// Rollup
OpCfg *op_e2e.SystemConfig
OpSys *op_e2e.System
}
func createE2ETestSuite(t *testing.T) E2ETestSuite {
dbUser := os.Getenv("DB_USER")
dbName := setupTestDatabase(t)
// Replace the handler of the global logger with the testlog
logger := testlog.Logger(t, log.LvlInfo)
log.Root().SetHandler(logger.GetHandler())
// Rollup System Configuration and Start
opCfg := op_e2e.DefaultSystemConfig(t)
opCfg.DeployConfig.FinalizationPeriodSeconds = 2
opSys, err := opCfg.Start()
require.NoError(t, err)
// Indexer Configuration and Start
indexerCfg := config.Config{
DB: config.DBConfig{
Host: "127.0.0.1",
Port: 5432,
Name: dbName,
User: dbUser,
},
RPCs: config.RPCsConfig{
L1RPC: opSys.Nodes["l1"].HTTPEndpoint(),
L2RPC: opSys.Nodes["sequencer"].HTTPEndpoint(),
},
Logger: logger,
}
db, err := database.NewDB(fmt.Sprintf("postgres://%s@localhost:5432/%s?sslmode=disable", dbUser, dbName))
require.NoError(t, err)
indexer, err := indexer.NewIndexer(indexerCfg)
require.NoError(t, err)
indexerCtx, indexerStop := context.WithCancel(context.Background())
go func() {
err := indexer.Run(indexerCtx)
require.NoError(t, err)
indexer.Cleanup()
}()
t.Cleanup(func() {
indexerStop()
// wait a second for the stop signal to be received
time.Sleep(1 * time.Second)
indexer.Cleanup()
db.Close()
opSys.Close()
})
return E2ETestSuite{
t: t,
DB: db,
Indexer: indexer,
OpCfg: &opCfg,
OpSys: opSys,
}
}
func setupTestDatabase(t *testing.T) string {
user := os.Getenv("DB_USER")
pg, err := sql.Open("pgx", fmt.Sprintf("postgres://%s@localhost:5432?sslmode=disable", user))
require.NoError(t, err)
require.NoError(t, pg.Ping())
// create database
dbName := fmt.Sprintf("indexer_test_%d", time.Now().UnixNano())
_, err = pg.Exec("CREATE DATABASE " + dbName)
require.NoError(t, err)
t.Cleanup(func() {
_, err := pg.Exec("DROP DATABASE " + dbName)
require.NoError(t, err)
pg.Close()
})
// setup schema, migration files ware walked in lexical order
t.Logf("created database %s", dbName)
db, err := sql.Open("pgx", fmt.Sprintf("postgres://%s@localhost:5432/%s?sslmode=disable", user, dbName))
require.NoError(t, err)
require.NoError(t, db.Ping())
defer db.Close()
t.Logf("running schema migrations...")
require.NoError(t, filepath.Walk("../migrations", func(path string, info fs.FileInfo, err error) error {
if err != nil {
return err
} else if info.IsDir() {
return nil
}
t.Logf("running schema migration: %s", path)
data, err := os.ReadFile(path)
if err != nil {
return err
}
_, err = db.Exec(string(data))
return err
}))
t.Logf("schema loaded")
return dbName
}
package indexer
import (
"context"
"fmt"
"os"
"sync"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/indexer/config"
"github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum-optimism/optimism/indexer/flags"
"github.com/ethereum-optimism/optimism/indexer/node"
"github.com/ethereum-optimism/optimism/indexer/processor"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/urfave/cli"
)
// Main is the entrypoint into the indexer service. This method returns
// a closure that executes the service and blocks until the service exits. The
// use of a closure allows the parameters bound to the top-level main package,
// e.g. GitVersion, to be captured and used once the function is executed.
func Main(gitVersion string) func(ctx *cli.Context) error {
return func(ctx *cli.Context) error {
log.Info("initializing indexer")
indexer, err := NewIndexer(ctx)
if err != nil {
log.Error("unable to initialize indexer", "err", err)
return err
}
log.Info("starting indexer")
if err := indexer.Start(); err != nil {
log.Error("unable to start indexer", "err", err)
}
defer indexer.Stop()
log.Info("indexer started")
// Never terminate
<-(chan struct{})(nil)
return nil
}
}
// Indexer is a service that configures the necessary resources for
// running the Sync and BlockHandler sub-services.
// Indexer contains the necessary resources for
// indexing the configured L1 and L2 chains
type Indexer struct {
db *database.DB
log log.Logger
l1Processor *processor.L1Processor
l2Processor *processor.L2Processor
L1Processor *processor.L1Processor
L2Processor *processor.L2Processor
}
// NewIndexer initializes the Indexer, gathering any resources
// that will be needed by the TxIndexer and StateIndexer
// sub-services.
func NewIndexer(ctx *cli.Context) (*Indexer, error) {
// TODO https://linear.app/optimism/issue/DX-55/api-implement-rest-api-with-mocked-data
// do json format too
// TODO https://linear.app/optimism/issue/DX-55/api-implement-rest-api-with-mocked-data
logLevel, err := log.LvlFromString(ctx.GlobalString(flags.LogLevelFlag.Name))
if err != nil {
return nil, err
// NewIndexer initializes an instance of the Indexer
func NewIndexer(cfg config.Config) (*Indexer, error) {
dsn := fmt.Sprintf("host=%s port=%d dbname=%s sslmode=disable", cfg.DB.Host, cfg.DB.Port, cfg.DB.Name)
if cfg.DB.User != "" {
dsn += fmt.Sprintf(" user=%s", cfg.DB.User)
}
if cfg.DB.Password != "" {
dsn += fmt.Sprintf(" password=%s", cfg.DB.Password)
}
logHandler := log.StreamHandler(os.Stdout, log.TerminalFormat(true))
log.Root().SetHandler(log.LvlFilterHandler(logLevel, logHandler))
dsn := fmt.Sprintf("database=%s", ctx.GlobalString(flags.DBNameFlag.Name))
db, err := database.NewDB(dsn)
if err != nil {
return nil, err
}
// L1 Processor (hardhat devnet contracts). Make this configurable
l1Contracts := processor.L1Contracts{
OptimismPortal: common.HexToAddress("0x6900000000000000000000000000000000000000"),
L2OutputOracle: common.HexToAddress("0x6900000000000000000000000000000000000001"),
L1CrossDomainMessenger: common.HexToAddress("0x6900000000000000000000000000000000000002"),
L1StandardBridge: common.HexToAddress("0x6900000000000000000000000000000000000003"),
L1ERC721Bridge: common.HexToAddress("0x6900000000000000000000000000000000000004"),
}
l1EthClient, err := node.NewEthClient(ctx.GlobalString(flags.L1EthRPCFlag.Name))
l1Contracts := processor.DevL1Contracts()
l1EthClient, err := node.DialEthClient(cfg.RPCs.L1RPC)
if err != nil {
return nil, err
}
l1Processor, err := processor.NewL1Processor(l1EthClient, db, l1Contracts)
l1Processor, err := processor.NewL1Processor(cfg.Logger, l1EthClient, db, l1Contracts)
if err != nil {
return nil, err
}
// L2Processor
l2Contracts := processor.L2ContractPredeploys() // Make this configurable
l2EthClient, err := node.NewEthClient(ctx.GlobalString(flags.L2EthRPCFlag.Name))
// L2Processor (predeploys). Although most likely the right setting, make this configurable?
l2Contracts := processor.L2ContractPredeploys()
l2EthClient, err := node.DialEthClient(cfg.RPCs.L2RPC)
if err != nil {
return nil, err
}
l2Processor, err := processor.NewL2Processor(l2EthClient, db, l2Contracts)
l2Processor, err := processor.NewL2Processor(cfg.Logger, l2EthClient, db, l2Contracts)
if err != nil {
return nil, err
}
indexer := &Indexer{
db: db,
l1Processor: l1Processor,
l2Processor: l2Processor,
log: cfg.Logger,
L1Processor: l1Processor,
L2Processor: l2Processor,
}
return indexer, nil
}
// Serve spins up a REST API server at the given hostname and port.
func (b *Indexer) Serve() error {
return nil
}
// Start starts the indexing service on L1 and L2 chains
func (i *Indexer) Run(ctx context.Context) error {
var wg sync.WaitGroup
errCh := make(chan error)
// Start starts the starts the indexing service on L1 and L2 chains and also
// starts the REST server.
func (b *Indexer) Start() error {
go b.l1Processor.Start()
go b.l2Processor.Start()
// If either processor errors out, we stop
processorCtx, cancel := context.WithCancel(ctx)
run := func(start func(ctx context.Context) error) {
wg.Add(1)
defer wg.Done()
return nil
err := start(processorCtx)
if err != nil {
i.log.Error("halting indexer on error", "err", err)
cancel()
errCh <- err
}
}
// Kick off the processors
go run(i.L1Processor.Start)
go run(i.L2Processor.Start)
err := <-errCh
// ensure both processors have halted before returning
wg.Wait()
return err
}
// Stop stops the indexing service on L1 and L2 chains.
func (b *Indexer) Stop() {
// Cleanup releases any resources that might be currently held by the indexer
func (i *Indexer) Cleanup() {
i.db.Close()
}
......@@ -56,6 +56,8 @@ CREATE TABLE IF NOT EXISTS legacy_state_batches (
CREATE TABLE IF NOT EXISTS output_proposals (
output_root VARCHAR NOT NULL PRIMARY KEY,
l2_output_index UINT256,
l2_block_number UINT256,
l1_contract_event_guid VARCHAR REFERENCES l1_contract_events(guid)
......
......@@ -3,6 +3,7 @@ package node
import (
"context"
"errors"
"fmt"
"math/big"
"time"
......@@ -29,6 +30,8 @@ type EthClient interface {
BlockHeadersByRange(*big.Int, *big.Int) ([]*types.Header, error)
BlockHeaderByHash(common.Hash) (*types.Header, error)
StorageHash(common.Address, *big.Int) (common.Hash, error)
RawRpcClient() *rpc.Client
}
......@@ -36,7 +39,7 @@ type client struct {
rpcClient *rpc.Client
}
func NewEthClient(rpcUrl string) (EthClient, error) {
func DialEthClient(rpcUrl string) (EthClient, error) {
ctxwt, cancel := context.WithTimeout(context.Background(), defaultDialTimeout)
defer cancel()
......@@ -49,6 +52,10 @@ func NewEthClient(rpcUrl string) (EthClient, error) {
return client, nil
}
func NewEthClient(rpcClient *rpc.Client) EthClient {
return &client{rpcClient}
}
func (c *client) RawRpcClient() *rpc.Client {
return c.rpcClient
}
......@@ -136,15 +143,33 @@ func (c *client) BlockHeadersByRange(startHeight, endHeight *big.Int) ([]*types.
return headers, nil
}
// StorageHash returns the sha3 of the storage root for the specified account
func (c *client) StorageHash(address common.Address, blockNumber *big.Int) (common.Hash, error) {
ctxwt, cancel := context.WithTimeout(context.Background(), defaultRequestTimeout)
defer cancel()
proof := struct{ StorageHash common.Hash }{}
err := c.rpcClient.CallContext(ctxwt, &proof, "eth_getProof", address, nil, toBlockNumArg(blockNumber))
if err != nil {
return common.Hash{}, err
}
return proof.StorageHash, nil
}
func toBlockNumArg(number *big.Int) string {
if number == nil {
return "latest"
} else if number.Sign() >= 0 {
return hexutil.EncodeBig(number)
}
pending := big.NewInt(-1)
if number.Cmp(pending) == 0 {
return "pending"
// It's negative.
if number.IsInt64() {
tag, _ := rpc.BlockNumber(number.Int64()).MarshalText()
return string(tag)
}
return hexutil.EncodeBig(number)
// It's negative and large, which is invalid.
return fmt.Sprintf("<invalid %d>", number)
}
......@@ -31,6 +31,11 @@ func (m *MockEthClient) BlockHeaderByHash(hash common.Hash) (*types.Header, erro
return args.Get(0).(*types.Header), args.Error(1)
}
func (m *MockEthClient) StorageHash(address common.Address, blockNumber *big.Int) (common.Hash, error) {
args := m.Called(address, blockNumber)
return args.Get(0).(common.Hash), args.Error(1)
}
func (m *MockEthClient) RawRpcClient() *rpc.Client {
args := m.Called()
return args.Get(0).(*rpc.Client)
......
......@@ -4,8 +4,8 @@ import (
"math/big"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"github.com/ethereum/go-ethereum/core/types"
)
......@@ -43,8 +43,8 @@ func TestHeaderTraversalNextFinalizedHeadersNoOp(t *testing.T) {
// no new headers when matched with head
client.On("FinalizedBlockHeight").Return(big.NewInt(10), nil)
headers, err := headerTraversal.NextFinalizedHeaders(100)
assert.NoError(t, err)
assert.Empty(t, headers)
require.NoError(t, err)
require.Empty(t, headers)
}
func TestHeaderTraversalNextFinalizedHeadersCursored(t *testing.T) {
......@@ -58,16 +58,16 @@ func TestHeaderTraversalNextFinalizedHeadersCursored(t *testing.T) {
client.On("FinalizedBlockHeight").Return(big.NewInt(4), nil).Times(1) // Times so that we can override next
client.On("BlockHeadersByRange", mock.MatchedBy(bigIntMatcher(0)), mock.MatchedBy(bigIntMatcher(4))).Return(headers, nil)
headers, err := headerTraversal.NextFinalizedHeaders(5)
assert.NoError(t, err)
assert.Len(t, headers, 5)
require.NoError(t, err)
require.Len(t, headers, 5)
// blocks [5..9]
headers = makeHeaders(5, headers[len(headers)-1])
client.On("FinalizedBlockHeight").Return(big.NewInt(9), nil)
client.On("BlockHeadersByRange", mock.MatchedBy(bigIntMatcher(5)), mock.MatchedBy(bigIntMatcher(9))).Return(headers, nil)
headers, err = headerTraversal.NextFinalizedHeaders(5)
assert.NoError(t, err)
assert.Len(t, headers, 5)
require.NoError(t, err)
require.Len(t, headers, 5)
}
func TestHeaderTraversalNextFinalizedHeadersMaxSize(t *testing.T) {
......@@ -83,15 +83,15 @@ func TestHeaderTraversalNextFinalizedHeadersMaxSize(t *testing.T) {
headers := makeHeaders(5, nil)
client.On("BlockHeadersByRange", mock.MatchedBy(bigIntMatcher(0)), mock.MatchedBy(bigIntMatcher(4))).Return(headers, nil)
headers, err := headerTraversal.NextFinalizedHeaders(5)
assert.NoError(t, err)
assert.Len(t, headers, 5)
require.NoError(t, err)
require.Len(t, headers, 5)
// clamped by the supplied size. FinalizedHeight == 100
headers = makeHeaders(10, headers[len(headers)-1])
client.On("BlockHeadersByRange", mock.MatchedBy(bigIntMatcher(5)), mock.MatchedBy(bigIntMatcher(14))).Return(headers, nil)
headers, err = headerTraversal.NextFinalizedHeaders(10)
assert.NoError(t, err)
assert.Len(t, headers, 10)
require.NoError(t, err)
require.Len(t, headers, 10)
}
func TestHeaderTraversalMismatchedProviderStateError(t *testing.T) {
......@@ -105,14 +105,14 @@ func TestHeaderTraversalMismatchedProviderStateError(t *testing.T) {
client.On("FinalizedBlockHeight").Return(big.NewInt(4), nil).Times(1) // Times so that we can override next
client.On("BlockHeadersByRange", mock.MatchedBy(bigIntMatcher(0)), mock.MatchedBy(bigIntMatcher(4))).Return(headers, nil)
headers, err := headerTraversal.NextFinalizedHeaders(5)
assert.NoError(t, err)
assert.Len(t, headers, 5)
require.NoError(t, err)
require.Len(t, headers, 5)
// blocks [5..9]. Next batch is not chained correctly (starts again from genesis)
headers = makeHeaders(5, nil)
client.On("FinalizedBlockHeight").Return(big.NewInt(9), nil)
client.On("BlockHeadersByRange", mock.MatchedBy(bigIntMatcher(5)), mock.MatchedBy(bigIntMatcher(9))).Return(headers, nil)
headers, err = headerTraversal.NextFinalizedHeaders(5)
assert.Nil(t, headers)
assert.Equal(t, ErrHeaderTraversalAndProviderMismatchedState, err)
require.Nil(t, headers)
require.Equal(t, ErrHeaderTraversalAndProviderMismatchedState, err)
}
......@@ -2,9 +2,7 @@ package processor
import (
"context"
"encoding/hex"
"errors"
"math/big"
"reflect"
"github.com/google/uuid"
......@@ -36,12 +34,17 @@ type L1Contracts struct {
// Remove afterwards?
}
type checkpointAbi struct {
l2OutputOracle *abi.ABI
legacyStateCommitmentChain *abi.ABI
func DevL1Contracts() L1Contracts {
return L1Contracts{
OptimismPortal: common.HexToAddress("0x6900000000000000000000000000000000000000"),
L2OutputOracle: common.HexToAddress("0x6900000000000000000000000000000000000001"),
L1CrossDomainMessenger: common.HexToAddress("0x6900000000000000000000000000000000000002"),
L1StandardBridge: common.HexToAddress("0x6900000000000000000000000000000000000003"),
L1ERC721Bridge: common.HexToAddress("0x6900000000000000000000000000000000000004"),
}
}
func (c L1Contracts) toSlice() []common.Address {
func (c L1Contracts) ToSlice() []common.Address {
fields := reflect.VisibleFields(reflect.TypeOf(c))
v := reflect.ValueOf(c)
......@@ -53,12 +56,17 @@ func (c L1Contracts) toSlice() []common.Address {
return contracts
}
type checkpointAbi struct {
l2OutputOracle *abi.ABI
legacyStateCommitmentChain *abi.ABI
}
type L1Processor struct {
processor
}
func NewL1Processor(ethClient node.EthClient, db *database.DB, l1Contracts L1Contracts) (*L1Processor, error) {
l1ProcessLog := log.New("processor", "l1")
func NewL1Processor(logger log.Logger, ethClient node.EthClient, db *database.DB, l1Contracts L1Contracts) (*L1Processor, error) {
l1ProcessLog := logger.New("processor", "l1")
l1ProcessLog.Info("initializing processor")
l2OutputOracleABI, err := bindings.L2OutputOracleMetaData.GetAbi()
......@@ -109,14 +117,16 @@ func NewL1Processor(ethClient node.EthClient, db *database.DB, l1Contracts L1Con
func l1ProcessFn(processLog log.Logger, ethClient node.EthClient, l1Contracts L1Contracts, checkpointAbi checkpointAbi) ProcessFn {
rawEthClient := ethclient.NewClient(ethClient.RawRpcClient())
contractAddrs := l1Contracts.toSlice()
contractAddrs := l1Contracts.ToSlice()
processLog.Info("processor configured with contracts", "contracts", l1Contracts)
outputProposedEventSig := checkpointAbi.l2OutputOracle.Events["OutputProposed"].ID
legacyStateBatchAppendedEventSig := checkpointAbi.legacyStateCommitmentChain.Events["StateBatchAppended"].ID
outputProposedEventName := "OutputProposed"
outputProposedEventSig := checkpointAbi.l2OutputOracle.Events[outputProposedEventName].ID
legacyStateBatchAppendedEventName := "StateBatchAppended"
legacyStateBatchAppendedEventSig := checkpointAbi.legacyStateCommitmentChain.Events[legacyStateBatchAppendedEventName].ID
return func(db *database.DB, headers []*types.Header) error {
numHeaders := len(headers)
headerMap := make(map[common.Hash]*types.Header)
for _, header := range headers {
headerMap[header.Hash()] = header
......@@ -124,7 +134,7 @@ func l1ProcessFn(processLog log.Logger, ethClient node.EthClient, l1Contracts L1
/** Watch for all Optimism Contract Events **/
logFilter := ethereum.FilterQuery{FromBlock: headers[0].Number, ToBlock: headers[numHeaders-1].Number, Addresses: contractAddrs}
logFilter := ethereum.FilterQuery{FromBlock: headers[0].Number, ToBlock: headers[len(headers)-1].Number, Addresses: contractAddrs}
logs, err := rawEthClient.FilterLogs(context.Background(), logFilter) // []types.Log
if err != nil {
return err
......@@ -138,41 +148,43 @@ func l1ProcessFn(processLog log.Logger, ethClient node.EthClient, l1Contracts L1
l1ContractEvents := make([]*database.L1ContractEvent, len(logs))
processedContractEvents := NewProcessedContractEvents()
for i, log := range logs {
for i := range logs {
log := &logs[i]
header, ok := headerMap[log.BlockHash]
if !ok {
processLog.Error("contract event found with associated header not in the batch", "header", log.BlockHash, "log_index", log.Index)
return errors.New("parsed log with a block hash not in this batch")
}
contractEvent := processedContractEvents.AddLog(&logs[i], header.Time)
contractEvent := processedContractEvents.AddLog(log, header.Time)
l1HeadersOfInterest[log.BlockHash] = true
l1ContractEvents[i] = &database.L1ContractEvent{ContractEvent: *contractEvent}
// Track Checkpoint Events for L2
switch contractEvent.EventSignature {
case outputProposedEventSig:
if len(log.Topics) != 4 {
processLog.Error("parsed unexpected number of L2OutputOracle#OutputProposed log topics", "log_topics", log.Topics)
return errors.New("parsed unexpected OutputProposed event")
var outputProposed bindings.L2OutputOracleOutputProposed
err := UnpackLog(&outputProposed, log, outputProposedEventName, checkpointAbi.l2OutputOracle)
if err != nil {
return err
}
outputProposals = append(outputProposals, &database.OutputProposal{
OutputRoot: log.Topics[1],
L2BlockNumber: database.U256{Int: new(big.Int).SetBytes(log.Topics[2].Bytes())},
OutputRoot: outputProposed.OutputRoot,
L2OutputIndex: database.U256{Int: outputProposed.L2OutputIndex},
L2BlockNumber: database.U256{Int: outputProposed.L2BlockNumber},
L1ContractEventGUID: contractEvent.GUID,
})
case legacyStateBatchAppendedEventSig:
var stateBatchAppended legacy_bindings.StateCommitmentChainStateBatchAppended
err := checkpointAbi.l2OutputOracle.UnpackIntoInterface(&stateBatchAppended, "StateBatchAppended", log.Data)
if err != nil || len(log.Topics) != 2 {
processLog.Error("unexpected StateCommitmentChain#StateBatchAppended log data or log topics", "log_topics", log.Topics, "log_data", hex.EncodeToString(log.Data), "err", err)
err := UnpackLog(&stateBatchAppended, log, legacyStateBatchAppendedEventName, checkpointAbi.legacyStateCommitmentChain)
if err != nil {
return err
}
legacyStateBatches = append(legacyStateBatches, &database.LegacyStateBatch{
Index: new(big.Int).SetBytes(log.Topics[1].Bytes()).Uint64(),
Index: stateBatchAppended.BatchIndex.Uint64(),
Root: stateBatchAppended.BatchRoot,
Size: stateBatchAppended.BatchSize.Uint64(),
PrevTotal: stateBatchAppended.PrevTotalElements.Uint64(),
......@@ -199,7 +211,7 @@ func l1ProcessFn(processLog log.Logger, ethClient node.EthClient, l1Contracts L1
numIndexedL1Headers := len(indexedL1Headers)
if numIndexedL1Headers > 0 {
processLog.Info("saving l1 blocks with optimism logs", "size", numIndexedL1Headers, "batch_size", numHeaders)
processLog.Info("saving l1 blocks with optimism logs", "size", numIndexedL1Headers, "batch_size", len(headers))
err = db.Blocks.StoreL1BlockHeaders(indexedL1Headers)
if err != nil {
return err
......@@ -296,15 +308,16 @@ func l1BridgeProcessContractEvents(processLog log.Logger, db *database.DB, ethCl
// Check if the L2Processor is behind or really has missed an event. We can compare against the
// OptimismPortal#ProvenWithdrawal on-chain mapping relative to the latest indexed L2 height
if withdrawal == nil {
bridgeAddress := l1Contracts.L1StandardBridge
portalAddress := l1Contracts.OptimismPortal
if provenWithdrawalEvent.From != bridgeAddress || provenWithdrawalEvent.To != bridgeAddress {
// This needs to be updated to read from config as well as correctly identify if the CrossDomainMessenger message is a standard
// bridge message. This will easier to do once we index passed messages separately which will include the right To/From fields
if provenWithdrawalEvent.From != common.HexToAddress("0x4200000000000000000000000000000000000007") || provenWithdrawalEvent.To != l1Contracts.L1CrossDomainMessenger {
// non-bridge withdrawal
continue
}
// Query for the the proven withdrawal on-chain
provenWithdrawal, err := OptimismPortalQueryProvenWithdrawal(rawEthClient, portalAddress, withdrawalHash)
provenWithdrawal, err := OptimismPortalQueryProvenWithdrawal(rawEthClient, l1Contracts.OptimismPortal, withdrawalHash)
if err != nil {
return err
}
......@@ -349,8 +362,8 @@ func l1BridgeProcessContractEvents(processLog log.Logger, db *database.DB, ethCl
return err
}
// Since we have to prove the event on-chain first, we don't need to check if the processor is
// behind. we're definitely in an error state if we cannot find the withdrawal when parsing this even
// Since we have to prove the event on-chain first, we don't need to check if the processor is behind
// We're definitely in an error state if we cannot find the withdrawal when parsing this event
if withdrawal == nil {
processLog.Crit("missing indexed withdrawal for this finalization event")
return errors.New("missing withdrawal message")
......
......@@ -39,7 +39,7 @@ func L2ContractPredeploys() L2Contracts {
}
}
func (c L2Contracts) toSlice() []common.Address {
func (c L2Contracts) ToSlice() []common.Address {
fields := reflect.VisibleFields(reflect.TypeOf(c))
v := reflect.ValueOf(c)
......@@ -55,8 +55,8 @@ type L2Processor struct {
processor
}
func NewL2Processor(ethClient node.EthClient, db *database.DB, l2Contracts L2Contracts) (*L2Processor, error) {
l2ProcessLog := log.New("processor", "l2")
func NewL2Processor(logger log.Logger, ethClient node.EthClient, db *database.DB, l2Contracts L2Contracts) (*L2Processor, error) {
l2ProcessLog := logger.New("processor", "l2")
l2ProcessLog.Info("initializing processor")
latestHeader, err := db.Blocks.LatestL2BlockHeader()
......@@ -94,7 +94,7 @@ func NewL2Processor(ethClient node.EthClient, db *database.DB, l2Contracts L2Con
func l2ProcessFn(processLog log.Logger, ethClient node.EthClient, l2Contracts L2Contracts) ProcessFn {
rawEthClient := ethclient.NewClient(ethClient.RawRpcClient())
contractAddrs := l2Contracts.toSlice()
contractAddrs := l2Contracts.ToSlice()
processLog.Info("processor configured with contracts", "contracts", l2Contracts)
return func(db *database.DB, headers []*types.Header) error {
numHeaders := len(headers)
......@@ -127,14 +127,15 @@ func l2ProcessFn(processLog log.Logger, ethClient node.EthClient, l2Contracts L2
l2ContractEvents := make([]*database.L2ContractEvent, len(logs))
processedContractEvents := NewProcessedContractEvents()
for i, log := range logs {
for i := range logs {
log := &logs[i]
header, ok := l2HeaderMap[log.BlockHash]
if !ok {
processLog.Error("contract event found with associated header not in the batch", "header", header, "log_index", log.Index)
return errors.New("parsed log with a block hash not in this batch")
}
contractEvent := processedContractEvents.AddLog(&logs[i], header.Time)
contractEvent := processedContractEvents.AddLog(log, header.Time)
l2ContractEvents[i] = &database.L2ContractEvent{ContractEvent: *contractEvent}
}
......
......@@ -30,11 +30,20 @@ func OptimismPortalWithdrawalProvenEvents(events *ProcessedContractEvents) ([]Op
return nil, err
}
processedWithdrawalProvenEvents := events.eventsBySignature[optimismPortalAbi.Events["WithdrawalProven"].ID]
eventName := "WithdrawalProven"
processedWithdrawalProvenEvents := events.eventsBySignature[optimismPortalAbi.Events[eventName].ID]
provenEvents := make([]OptimismPortalWithdrawalProvenEvent, len(processedWithdrawalProvenEvents))
for i, provenEvent := range processedWithdrawalProvenEvents {
log := events.eventLog[provenEvent.GUID]
var withdrawalProven bindings.OptimismPortalWithdrawalProven
err := UnpackLog(&withdrawalProven, log, eventName, optimismPortalAbi)
if err != nil {
return nil, err
}
provenEvents[i] = OptimismPortalWithdrawalProvenEvent{nil, provenEvent}
provenEvents[i] = OptimismPortalWithdrawalProvenEvent{&withdrawalProven, provenEvent}
}
return provenEvents, nil
......
package processor
import (
"context"
"time"
"github.com/ethereum-optimism/optimism/indexer/database"
......@@ -25,17 +26,33 @@ type processor struct {
db *database.DB
processFn ProcessFn
processLog log.Logger
paused bool
latestProcessedHeader *types.Header
}
// Start kicks off the processing loop
func (p processor) Start() {
// Start kicks off the processing loop. This is a block operation
// unless the processor encountering an error, abrupting the loop,
// or the supplied context is cancelled.
func (p *processor) Start(ctx context.Context) error {
done := ctx.Done()
pollTicker := time.NewTicker(defaultLoopInterval)
defer pollTicker.Stop()
p.processLog.Info("starting processor...")
var unprocessedHeaders []*types.Header
for range pollTicker.C {
for {
select {
case <-done:
p.processLog.Info("stopping processor")
return nil
case <-pollTicker.C:
if p.paused {
p.processLog.Warn("processor is paused...")
continue
}
if len(unprocessedHeaders) == 0 {
newHeaders, err := p.headerTraversal.NextFinalizedHeaders(defaultHeaderBufferSize)
if err != nil {
......@@ -60,11 +77,30 @@ func (p processor) Start() {
return p.processFn(db, unprocessedHeaders)
})
// Eventually, we want to halt the processor on any error rather than rely
// on this loop for retry functionality.
if err != nil {
batchLog.Warn("error processing batch. no operations committed", "err", err)
} else {
batchLog.Info("fully committed batch")
unprocessedHeaders = nil
p.latestProcessedHeader = lastHeader
}
}
}
}
func (p processor) LatestProcessedHeader() *types.Header {
return p.latestProcessedHeader
}
// Useful ONLY for tests!
func (p *processor) PauseForTest() {
p.paused = true
}
func (p *processor) ResumeForTest() {
p.paused = false
}
......@@ -4,6 +4,7 @@ import (
"bytes"
"context"
"errors"
"fmt"
"math/big"
"github.com/ethereum-optimism/optimism/indexer/database"
......@@ -14,7 +15,7 @@ import (
)
var (
ethAddress = common.HexToAddress("0xDeadDeAddeAddEAddeadDEaDDEAdDeaDDeAD0000")
EthAddress = common.HexToAddress("0xDeadDeAddeAddEAddeadDEaDDEAdDeaDDeAD0000")
)
type StandardBridgeInitiatedEvent struct {
......@@ -131,7 +132,7 @@ func _standardBridgeInitiatedEvents[BridgeEvent bindings.L1StandardBridgeETHBrid
// represent eth bridge as an erc20
erc20BridgeData = &bindings.L1StandardBridgeERC20BridgeInitiated{
// Represent ETH using the hardcoded address
LocalToken: ethAddress, RemoteToken: ethAddress,
LocalToken: EthAddress, RemoteToken: EthAddress,
// Bridge data
From: ethBridgeData.From, To: ethBridgeData.To, Amount: ethBridgeData.Amount, ExtraData: ethBridgeData.ExtraData,
}
......@@ -170,8 +171,14 @@ func _standardBridgeFinalizedEvents[BridgeEvent bindings.L1StandardBridgeETHBrid
return nil, err
}
optimismPortalAbi, err := bindings.OptimismPortalMetaData.GetAbi()
if err != nil {
return nil, err
}
relayedMessageEventAbi := l1CrossDomainMessengerABI.Events["RelayedMessage"]
relayMessageMethodAbi := l1CrossDomainMessengerABI.Methods["relayMessage"]
finalizeWithdrawalTransactionMethodAbi := optimismPortalAbi.Methods["finalizeWithdrawalTransaction"]
var bridgeData BridgeEvent
var eventName string
......@@ -201,27 +208,52 @@ func _standardBridgeFinalizedEvents[BridgeEvent bindings.L1StandardBridgeETHBrid
return nil, errors.New("unexpected bridge event ordering")
}
// There's no way to extract the nonce on the relayed message event. we can extract
// the nonce by unpacking the transaction input for the `relayMessage` transaction
// There's no way to extract the nonce on the relayed message event. we can extract the nonce by
// by unpacking the transaction input for the `relayMessage` transaction. Since bedrock has OptimismPortal
// as on L1 as an intermediary for finalization, we have to check both scenarios
tx, isPending, err := rawEthClient.TransactionByHash(context.Background(), relayedMsgLog.TxHash)
if err != nil || isPending {
return nil, errors.New("unable to query relayMessage tx for bridge finalization event")
}
txData := tx.Data()
if !bytes.Equal(txData[:4], relayMessageMethodAbi.ID) {
return nil, errors.New("bridge finalization event does not match relayMessage tx invocation")
// If this is a finalization step with the optimism portal, the calldata for relayMessage invocation can be
// extracted from the withdrawal transaction.
// NOTE: the L2CrossDomainMessenger nonce may not match the L2ToL1MessagePasser nonce, hence the additional
// layer of decoding vs reading the nocne of the withdrawal transaction. Both nonces have a similar but
// different lifeycle that might not match (i.e L2ToL1MessagePasser can be invoced directly)
var relayMsgCallData []byte
switch {
case bytes.Equal(tx.Data()[:4], relayMessageMethodAbi.ID):
relayMsgCallData = tx.Data()[4:]
case bytes.Equal(tx.Data()[:4], finalizeWithdrawalTransactionMethodAbi.ID):
data, err := finalizeWithdrawalTransactionMethodAbi.Inputs.Unpack(tx.Data()[4:])
if err != nil {
return nil, err
}
finalizeWithdrawTransactionInput := new(struct {
Tx bindings.TypesWithdrawalTransaction
})
err = finalizeWithdrawalTransactionMethodAbi.Inputs.Copy(finalizeWithdrawTransactionInput, data)
if err != nil {
return nil, fmt.Errorf("unable extract withdrawal tx input from finalizeWithdrawalTransaction calldata: %w", err)
} else if !bytes.Equal(finalizeWithdrawTransactionInput.Tx.Data[:4], relayMessageMethodAbi.ID) {
return nil, errors.New("finalizeWithdrawalTransaction calldata does not match relayMessage invocation")
}
relayMsgCallData = finalizeWithdrawTransactionInput.Tx.Data[4:]
default:
return nil, errors.New("bridge finalization event does not correlate with a relayMessage tx invocation")
}
inputsMap := make(map[string]interface{})
err = relayMessageMethodAbi.Inputs.UnpackIntoMap(inputsMap, txData[4:])
err = relayMessageMethodAbi.Inputs.UnpackIntoMap(inputsMap, relayMsgCallData)
if err != nil {
return nil, err
}
nonce, ok := inputsMap["_nonce"].(*big.Int)
if !ok {
return nil, errors.New("unable to extract `_nonce` parameter from relayMessage transaction")
return nil, errors.New("unable to extract `_nonce` parameter from relayMessage calldata")
}
var erc20BridgeData *bindings.L1StandardBridgeERC20BridgeFinalized
......@@ -230,7 +262,7 @@ func _standardBridgeFinalizedEvents[BridgeEvent bindings.L1StandardBridgeETHBrid
ethBridgeData := any(bridgeData).(bindings.L1StandardBridgeETHBridgeFinalized)
erc20BridgeData = &bindings.L1StandardBridgeERC20BridgeFinalized{
// Represent ETH using the hardcoded address
LocalToken: ethAddress, RemoteToken: ethAddress,
LocalToken: EthAddress, RemoteToken: EthAddress,
// Bridge data
From: ethBridgeData.From, To: ethBridgeData.To, Amount: ethBridgeData.Amount, ExtraData: ethBridgeData.ExtraData,
}
......
......@@ -28,6 +28,7 @@
"L1BlockNumber",
"DisputeGameFactory",
"FaultDisputeGame",
"AlphabetVM",
"StandardBridge",
"CrossDomainMessenger",
"MIPS",
......
This diff is collapsed.
// Code generated - DO NOT EDIT.
// This file is a generated binding and any manual changes will be lost.
package bindings
import (
"encoding/json"
"github.com/ethereum-optimism/optimism/op-bindings/solc"
)
const AlphabetVMStorageLayoutJSON = "{\"storage\":null,\"types\":{}}"
var AlphabetVMStorageLayout = new(solc.StorageLayout)
var AlphabetVMDeployedBin = "0x608060405234801561001057600080fd5b506004361061002b5760003560e01c8063f8e0cb9614610030575b600080fd5b61004361003e366004610157565b610055565b60405190815260200160405180910390f35b60008060007f0000000000000000000000000000000000000000000000000000000000000000878760405161008b9291906101c3565b6040518091039020036100af57600091506100a8868801886101d3565b90506100ce565b6100bb868801886101ec565b9092509050816100ca8161023d565b9250505b816100da826001610275565b6040805160208101939093528201526060016040516020818303038152906040528051906020012092505050949350505050565b60008083601f84011261012057600080fd5b50813567ffffffffffffffff81111561013857600080fd5b60208301915083602082850101111561015057600080fd5b9250929050565b6000806000806040858703121561016d57600080fd5b843567ffffffffffffffff8082111561018557600080fd5b6101918883890161010e565b909650945060208701359150808211156101aa57600080fd5b506101b78782880161010e565b95989497509550505050565b8183823760009101908152919050565b6000602082840312156101e557600080fd5b5035919050565b600080604083850312156101ff57600080fd5b50508035926020909101359150565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff820361026e5761026e61020e565b5060010190565b600082198211156102885761028861020e565b50019056fea164736f6c634300080f000a"
func init() {
if err := json.Unmarshal([]byte(AlphabetVMStorageLayoutJSON), AlphabetVMStorageLayout); err != nil {
panic(err)
}
layouts["AlphabetVM"] = AlphabetVMStorageLayout
deployedBytecodes["AlphabetVM"] = AlphabetVMDeployedBin
}
package op_challenger
import (
"context"
"fmt"
"time"
......@@ -14,7 +15,7 @@ import (
)
// Main is the programmatic entry-point for running op-challenger
func Main(logger log.Logger, cfg *config.Config) error {
func Main(ctx context.Context, logger log.Logger, cfg *config.Config) error {
client, err := ethclient.Dial(cfg.L1EthRpc)
if err != nil {
return fmt.Errorf("failed to dial L1: %w", err)
......@@ -48,8 +49,19 @@ func Main(logger log.Logger, cfg *config.Config) error {
for {
logger.Info("Performing action")
_ = agent.Act()
_ = agent.Act(ctx)
status, _ := caller.GetGameStatus(ctx)
if status != 0 {
caller.LogGameStatus()
return nil
} else {
caller.LogGameInfo()
time.Sleep(300 * time.Millisecond)
}
select {
case <-time.After(300 * time.Millisecond):
// Continue
case <-ctx.Done():
return ctx.Err()
}
}
}
package main
import (
"context"
"fmt"
"os"
......@@ -41,7 +42,7 @@ func main() {
}
}
type ConfigAction func(log log.Logger, config *config.Config) error
type ConfigAction func(ctx context.Context, log log.Logger, config *config.Config) error
func run(args []string, action ConfigAction) error {
oplog.SetupDefaults()
......@@ -63,7 +64,7 @@ func run(args []string, action ConfigAction) error {
if err != nil {
return err
}
return action(logger, cfg)
return action(ctx.Context, logger, cfg)
}
return app.Run(args)
}
......
package main
import (
"context"
"fmt"
"testing"
......@@ -137,7 +138,7 @@ func runWithArgs(cliArgs []string) (log.Logger, config.Config, error) {
cfg := new(config.Config)
var logger log.Logger
fullArgs := append([]string{"op-challenger"}, cliArgs...)
err := run(fullArgs, func(log log.Logger, config *config.Config) error {
err := run(fullArgs, func(ctx context.Context, log log.Logger, config *config.Config) error {
logger = log
cfg = config
return nil
......
......@@ -29,8 +29,11 @@ func NewAgent(loader Loader, maxDepth int, trace TraceProvider, responder Respon
}
// Act iterates the game & performs all of the next actions.
func (a *Agent) Act() error {
game, err := a.newGameFromContracts(context.Background())
func (a *Agent) Act(ctx context.Context) error {
if a.tryResolve(ctx) {
return nil
}
game, err := a.newGameFromContracts(ctx)
if err != nil {
a.log.Error("Failed to create new game", "err", err)
return err
......@@ -50,6 +53,19 @@ func (a *Agent) Act() error {
return nil
}
// tryResolve resolves the game if it is in a terminal state
// and returns true if the game resolves successfully.
func (a *Agent) tryResolve(ctx context.Context) bool {
if a.responder.CanResolve(ctx) {
err := a.responder.Resolve(ctx)
if err != nil {
return true
}
a.log.Error("failed to resolve the game", "err", err)
}
return false
}
// newGameFromContracts initializes a new game state from the state in the contract
func (a *Agent) newGameFromContracts(ctx context.Context) (Game, error) {
claims, err := a.loader.FetchClaims(ctx)
......
......@@ -76,6 +76,8 @@ func (c *Claim) DefendsParent() bool {
// Responder takes a response action & executes.
// For full op-challenger this means executing the transaction on chain.
type Responder interface {
CanResolve(ctx context.Context) bool
Resolve(ctx context.Context) error
Respond(ctx context.Context, response Claim) error
Step(ctx context.Context, stepData StepCallData) error
}
package challenger
import (
"context"
"errors"
"testing"
"time"
op_challenger "github.com/ethereum-optimism/optimism/op-challenger"
"github.com/ethereum-optimism/optimism/op-challenger/config"
"github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum-optimism/optimism/op-service/txmgr"
"github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require"
)
type Helper struct {
log log.Logger
cancel func()
errors chan error
}
type Option func(config2 *config.Config)
func NewChallenger(t *testing.T, ctx context.Context, l1Endpoint string, name string, options ...Option) *Helper {
log := testlog.Logger(t, log.LvlInfo).New("role", name)
log.Info("Creating challenger", "l1", l1Endpoint)
txmgrCfg := txmgr.NewCLIConfig(l1Endpoint)
txmgrCfg.NumConfirmations = 1
txmgrCfg.ReceiptQueryInterval = 1 * time.Second
cfg := &config.Config{
L1EthRpc: l1Endpoint,
AlphabetTrace: "",
AgreeWithProposedOutput: true,
TxMgrConfig: txmgrCfg,
}
for _, option := range options {
option(cfg)
}
require.NotEmpty(t, cfg.TxMgrConfig.PrivateKey, "Missing private key for TxMgrConfig")
errCh := make(chan error, 1)
ctx, cancel := context.WithCancel(ctx)
go func() {
defer close(errCh)
errCh <- op_challenger.Main(ctx, log, cfg)
}()
return &Helper{
log: log,
cancel: cancel,
errors: errCh,
}
}
func (h *Helper) Close() error {
h.cancel()
select {
case <-time.After(1 * time.Minute):
return errors.New("timed out while stopping challenger")
case err := <-h.errors:
if !errors.Is(err, context.Canceled) {
return err
}
return nil
}
}
package disputegame
import (
"context"
"math/big"
"time"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-chain-ops/deployer"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/stretchr/testify/require"
)
// deployDisputeGameContracts deploys the DisputeGameFactory, AlphabetVM and FaultDisputeGame contracts
// It configures the alphabet fault game as game type 0 (faultGameType)
// If/when the dispute game factory becomes a predeployed contract this can be removed and just use the
// predeployed version
func deployDisputeGameContracts(require *require.Assertions, ctx context.Context, client *ethclient.Client, opts *bind.TransactOpts, gameDuration uint64) *bindings.DisputeGameFactory {
ctx, cancel := context.WithTimeout(ctx, 5*time.Minute)
defer cancel()
// Deploy the proxy
_, tx, proxy, err := bindings.DeployProxy(opts, client, deployer.TestAddress)
require.NoError(err)
proxyAddr, err := bind.WaitDeployed(ctx, client, tx)
require.NoError(err)
// Deploy the dispute game factory implementation
_, tx, _, err = bindings.DeployDisputeGameFactory(opts, client)
require.NoError(err)
factoryAddr, err := bind.WaitDeployed(ctx, client, tx)
require.NoError(err)
// Point the proxy at the implementation and create bindings going via the proxy
disputeGameFactoryAbi, err := bindings.DisputeGameFactoryMetaData.GetAbi()
require.NoError(err)
data, err := disputeGameFactoryAbi.Pack("initialize", deployer.TestAddress)
require.NoError(err)
_, err = proxy.UpgradeToAndCall(opts, factoryAddr, data)
require.NoError(err)
factory, err := bindings.NewDisputeGameFactory(proxyAddr, client)
require.NoError(err)
// Now setup the fault dispute game type
// Start by deploying the AlphabetVM
_, tx, _, err = bindings.DeployAlphabetVM(opts, client, alphabetVMAbsolutePrestate)
require.NoError(err)
alphaVMAddr, err := bind.WaitDeployed(ctx, client, tx)
require.NoError(err)
// Deploy the fault dispute game implementation
_, tx, _, err = bindings.DeployFaultDisputeGame(opts, client, alphabetVMAbsolutePrestate, big.NewInt(alphabetGameDepth), gameDuration, alphaVMAddr)
require.NoError(err)
faultDisputeGameAddr, err := bind.WaitDeployed(ctx, client, tx)
require.NoError(err)
// Set the fault game type implementation
_, err = factory.SetImplementation(opts, faultGameType, faultDisputeGameAddr)
require.NoError(err)
return factory
}
package disputegame
import (
"context"
"fmt"
"math"
"math/big"
"testing"
"time"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-chain-ops/deployer"
"github.com/ethereum-optimism/optimism/op-challenger/config"
"github.com/ethereum-optimism/optimism/op-challenger/fault"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/challenger"
"github.com/ethereum-optimism/optimism/op-service/client/utils"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/holiman/uint256"
"github.com/stretchr/testify/require"
)
const faultGameType uint8 = 0
const alphabetGameDepth = 4
type Status uint8
const (
StatusInProgress Status = iota
StatusChallengerWins
StatusDefenderWins
)
var alphaExtraData = common.Hex2Bytes("1000000000000000000000000000000000000000000000000000000000000000")
var alphabetVMAbsolutePrestate = uint256.NewInt(96).Bytes32()
type FactoryHelper struct {
t *testing.T
require *require.Assertions
client *ethclient.Client
opts *bind.TransactOpts
factory *bindings.DisputeGameFactory
}
func NewFactoryHelper(t *testing.T, ctx context.Context, client *ethclient.Client, gameDuration uint64) *FactoryHelper {
require := require.New(t)
chainID, err := client.ChainID(ctx)
require.NoError(err)
opts, err := bind.NewKeyedTransactorWithChainID(deployer.TestKey, chainID)
require.NoError(err)
factory := deployDisputeGameContracts(require, ctx, client, opts, gameDuration)
return &FactoryHelper{
t: t,
require: require,
client: client,
opts: opts,
factory: factory,
}
}
func (h *FactoryHelper) StartAlphabetGame(ctx context.Context, claimedAlphabet string) *FaultGameHelper {
ctx, cancel := context.WithTimeout(ctx, 2*time.Minute)
defer cancel()
trace := fault.NewAlphabetProvider(claimedAlphabet, 4)
rootClaim, err := trace.Get(uint64(math.Pow(2, alphabetGameDepth)) - 1)
h.require.NoError(err)
tx, err := h.factory.Create(h.opts, faultGameType, rootClaim, alphaExtraData)
h.require.NoError(err)
rcpt, err := utils.WaitReceiptOK(ctx, h.client, tx.Hash())
h.require.NoError(err)
h.require.Len(rcpt.Logs, 1, "should have emitted a single DisputeGameCreated event")
createdEvent, err := h.factory.ParseDisputeGameCreated(*rcpt.Logs[0])
h.require.NoError(err)
game, err := bindings.NewFaultDisputeGame(createdEvent.DisputeProxy, h.client)
h.require.NoError(err)
return &FaultGameHelper{
t: h.t,
require: h.require,
client: h.client,
opts: h.opts,
game: game,
addr: createdEvent.DisputeProxy,
claimedAlphabet: claimedAlphabet,
}
}
type FaultGameHelper struct {
t *testing.T
require *require.Assertions
client *ethclient.Client
opts *bind.TransactOpts
game *bindings.FaultDisputeGame
addr common.Address
claimedAlphabet string
}
func (g *FaultGameHelper) StartChallenger(ctx context.Context, l1Endpoint string, name string, options ...challenger.Option) *challenger.Helper {
opts := []challenger.Option{
func(c *config.Config) {
c.GameAddress = g.addr
c.GameDepth = alphabetGameDepth
// By default the challenger agrees with the root claim (thus disagrees with the proposed output)
// This can be overridden by passing in options
c.AlphabetTrace = g.claimedAlphabet
c.AgreeWithProposedOutput = false
},
}
opts = append(opts, options...)
return challenger.NewChallenger(g.t, ctx, l1Endpoint, name, opts...)
}
func (g *FaultGameHelper) WaitForClaimCount(ctx context.Context, count int64) {
ctx, cancel := context.WithTimeout(ctx, 3*time.Minute)
defer cancel()
err := utils.WaitFor(ctx, 1*time.Second, func() (bool, error) {
actual, err := g.game.ClaimDataLen(&bind.CallOpts{Context: ctx})
if err != nil {
return false, err
}
g.t.Log("Waiting for claim count", "current", actual, "expected", count, "game", g.addr)
return actual.Cmp(big.NewInt(count)) == 0, nil
})
g.require.NoError(err)
}
func (g *FaultGameHelper) Resolve(ctx context.Context) {
ctx, cancel := context.WithTimeout(ctx, 2*time.Minute)
defer cancel()
tx, err := g.game.Resolve(g.opts)
g.require.NoError(err)
_, err = utils.WaitReceiptOK(ctx, g.client, tx.Hash())
g.require.NoError(err)
}
func (g *FaultGameHelper) WaitForGameStatus(ctx context.Context, expected Status) {
ctx, cancel := context.WithTimeout(ctx, 1*time.Minute)
defer cancel()
err := utils.WaitFor(ctx, 1*time.Second, func() (bool, error) {
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
defer cancel()
status, err := g.game.Status(&bind.CallOpts{Context: ctx})
if err != nil {
return false, fmt.Errorf("game status unavailable: %w", err)
}
return expected == Status(status), nil
})
g.require.NoError(err, "wait for game status")
}
......@@ -5,37 +5,48 @@ import (
"testing"
"time"
"github.com/ethereum-optimism/optimism/op-challenger/config"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/disputegame"
"github.com/ethereum-optimism/optimism/op-service/client/utils"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/stretchr/testify/require"
)
func TestTimeTravel(t *testing.T) {
func TestResolveDisputeGame(t *testing.T) {
InitParallel(t)
ctx := context.Background()
cfg := DefaultSystemConfig(t)
cfg.DeployConfig.L1BlockTime = 1
delete(cfg.Nodes, "verifier")
delete(cfg.Nodes, "sequencer")
cfg.SupportL1TimeTravel = true
sys, err := cfg.Start()
require.Nil(t, err, "Error starting up system")
defer sys.Close()
l1Client := sys.Clients["l1"]
preTravel, err := l1Client.BlockByNumber(context.Background(), nil)
require.NoError(t, err)
sys.TimeTravelClock.AdvanceTime(24 * time.Hour)
// Check that the L1 chain reaches the new time reasonably quickly (ie without taking a week)
// It should be able to jump straight to the new time with just a single block
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute)
defer cancel()
err = utils.WaitFor(ctx, time.Second, func() (bool, error) {
postTravel, err := l1Client.BlockByNumber(context.Background(), nil)
if err != nil {
return false, err
}
diff := time.Duration(postTravel.Time()-preTravel.Time()) * time.Second
return diff.Hours() > 23, nil
gameDuration := 24 * time.Hour
disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, l1Client, uint64(gameDuration.Seconds()))
game := disputeGameFactory.StartAlphabetGame(ctx, "zyxwvut")
require.NotNil(t, game)
game.WaitForGameStatus(ctx, disputegame.StatusInProgress)
honest := game.StartChallenger(ctx, sys.NodeEndpoint("l1"), "honestAlice", func(c *config.Config) {
c.AgreeWithProposedOutput = true // Agree with the proposed output, so disagree with the root claim
c.AlphabetTrace = "abcdefg"
c.TxMgrConfig.PrivateKey = hexutil.Encode(e2eutils.EncodePrivKey(cfg.Secrets.Alice))
})
require.NoError(t, err)
defer honest.Close()
game.WaitForClaimCount(ctx, 2)
sys.TimeTravelClock.AdvanceTime(gameDuration)
require.NoError(t, utils.WaitNextBlock(ctx, l1Client))
// Challenger should resolve the game now that the clocks have expired.
game.WaitForGameStatus(ctx, disputegame.StatusChallengerWins)
require.NoError(t, honest.Close())
}
......@@ -616,6 +616,10 @@ func (cfg SystemConfig) Start(_opts ...SystemConfigOption) (*System, error) {
}
}
// Don't start batch submitter and proposer if there's no sequencer.
if sys.RollupNodes["sequencer"] == nil {
return sys, nil
}
// L2Output Submitter
sys.L2OutputSubmitter, err = l2os.NewL2OutputSubmitterFromCLIConfig(l2os.CLIConfig{
L1EthRpc: sys.Nodes["l1"].WSEndpoint(),
......
......@@ -59,6 +59,14 @@ func WaitBlock(ctx context.Context, client *ethclient.Client, n uint64) error {
return nil
}
func WaitNextBlock(ctx context.Context, client *ethclient.Client) error {
current, err := client.BlockNumber(ctx)
if err != nil {
return fmt.Errorf("get starting block number: %w", err)
}
return WaitBlock(ctx, client, current+1)
}
func WaitFor(ctx context.Context, rate time.Duration, cb func() (bool, error)) error {
tick := time.NewTicker(rate)
defer tick.Stop()
......
......@@ -75,11 +75,6 @@ COPY ./packages ./packages
RUN pnpm build
FROM base as fault-detector
WORKDIR /opt/optimism/packages/fault-detector
COPY ./ops/scripts/detector.sh .
CMD ["pnpm", "run", "start"]
FROM base as replica-healthcheck
WORKDIR /opt/optimism/packages/replica-healthcheck
ENTRYPOINT ["pnpm", "run", "start"]
......@@ -92,7 +87,6 @@ FROM base as drippie-mon
WORKDIR /opt/optimism/packages/chain-mon
ENTRYPOINT ["pnpm", "run", "start:drippie-mon"]
FROM base as wd-mon
WORKDIR /opt/optimism/packages/chain-mon
ENTRYPOINT ["pnpm", "run", "start:wd-mon"]
......@@ -100,3 +94,7 @@ ENTRYPOINT ["pnpm", "run", "start:wd-mon"]
FROM base as wallet-mon
WORKDIR /opt/optimism/packages/chain-mon
ENTRYPOINT ["pnpm", "run", "start:wallet-mon"]
from base as fault-mon
WORKDIR /opt/optimism/packages/chain-mon
ENTRYPOINT ["pnpm", "run", "start:fault-mon"]
......@@ -6,7 +6,7 @@ DOCKER_REPO=$1
GIT_TAG=$2
GIT_SHA=$3
IMAGE_NAME=$(echo "$GIT_TAG" | grep -Eow '^(ci-builder|fault-detector|proxyd|indexer|op-[a-z0-9\-]*)' || true)
IMAGE_NAME=$(echo "$GIT_TAG" | grep -Eow '^(ci-builder|proxyd|indexer|op-[a-z0-9\-]*)' || true)
if [ -z "$IMAGE_NAME" ]; then
echo "image name could not be parsed from git tag '$GIT_TAG'"
exit 1
......
#!/bin/bash
set -e
RETRIES=${RETRIES:-60}
# waits for l2geth to be up
curl \
--fail \
--show-error \
--silent \
--output /dev/null \
--retry-connrefused \
--retry $RETRIES \
--retry-delay 1 \
$FAULT_DETECTOR__L2_RPC_PROVIDER
# go
exec pnpm start
......@@ -15,7 +15,6 @@ MIN_VERSIONS = {
'op-proposer': '0.10.14',
'proxyd': '3.16.0',
'indexer': '0.5.0',
'fault-detector': '0.6.3',
'ci-builder': '0.6.0'
}
......
......@@ -39,3 +39,31 @@ TWO_STEP_MONITOR__L2_RPC_PROVIDER=
# The block number to start monitoring from
TWO_STEP_MONITOR__START_BLOCK_NUMBER=
###############################################################################
# ↓ fault-mon ↓ #
###############################################################################
# --l1rpcprovider Provider for interacting with L1 (env: FAULT_DETECTOR__L1_RPC_PROVIDER)
FAULT_DETECTOR__L1_RPC_PROVIDER=
# --l2rpcprovider Provider for interacting with L2 (env: FAULT_DETECTOR__L2_RPC_PROVIDER)
FAULT_DETECTOR__L2_RPC_PROVIDER=
# --bedrock Whether or not the service is running against a Bedrock chain (env: FAULT_DETECTOR__BEDROCK)
BEDROCK=true
# Optional Params
# --startbatchindex Batch index to start checking from. For bedrock chains, this is the L2 height to start from (env: FAULT_DETECTOR__START_BATCH_INDEX)
# FAULT_DETECTOR__START_BATCH_INDEX=
# --optimismportaladdress [Custom Bedrock Chains] Deployed OptimismPortal contract address. Used to retrieve necessary info for ouput verification (env: FAULT_DETECTOR__OPTIMISM_PORTAL_ADDRESS)
# FAULT_DETECTOR__OPTIMISM_PORTAL_ADDRESS=
# --statecommitmentchainaddress [Custom Legacy Chains] Deployed StateCommitmentChain contract address. Used to fetch necessary info for output verification. (env: FAULT_DETECTOR__STATE_COMMITMENT_CHAIN_ADDRESS)
# FAULT_DETECTOR__STATE_COMMITMENT_CHAIN_ADDRESS=
# --loopintervalms Loop interval in milliseconds, only applies if service is set to loop (env: FAULT_DETECTOR__LOOP_INTERVAL_MS)
# FAULT_DETECTOR__LOOP_INTERVAL_MS=
# --port Port for the app server (env: FAULT_DETECTOR__PORT)
# FAULT_DETECTOR__PORT=
# --hostname Hostname for the app server (env: FAULT_DETECTOR__HOSTNAME)
# FAULT_DETECTOR__HOSTNAME=
# --loglevel Log level (env: FAULT_DETECTOR__LOG_LEVEL)
# FAULT_DETECTOR__LOG_LEVEL=
......@@ -13,7 +13,9 @@
"start:wallet-mon": "ts-node ./src/wallet-mon/service.ts",
"start:drippie-mon": "ts-node ./src/drippie-mon/service.ts",
"start:wd-mon": "ts-node ./src/wd-mon/service.ts",
"test:coverage": "echo 'No tests defined.'",
"start:fault-mon": "ts-node ./src/fault-mon/service.ts",
"test": "hardhat test",
"test:coverage": "nyc hardhat test && nyc merge .nyc_output coverage.json",
"build": "tsc -p ./tsconfig.json",
"clean": "rimraf dist/ ./tsconfig.tsbuildinfo",
"lint": "pnpm lint:fix && pnpm lint:check",
......@@ -40,12 +42,16 @@
"@eth-optimism/core-utils": "0.12.2",
"@eth-optimism/sdk": "3.1.0",
"ethers": "^5.7.0",
"dotenv": "^16.1.4",
"@types/dateformat": "^5.0.0",
"chai-as-promised": "^7.1.1",
"dateformat": "^4.5.1"
},
"devDependencies": {
"@ethersproject/abstract-provider": "^5.7.0",
"@nomiclabs/hardhat-ethers": "^2.0.6",
"@nomiclabs/hardhat-waffle": "^2.0.3",
"hardhat": "^2.9.6",
"ts-node": "^10.9.1"
}
}
# @eth-optimism/fault-detector
# @eth-optimism/fault-mon
[![codecov](https://codecov.io/gh/ethereum-optimism/optimism/branch/develop/graph/badge.svg?token=0VTG7PG7YR&flag=fault-detector-tests)](https://codecov.io/gh/ethereum-optimism/optimism)
The `fault-detector` is a simple service for detecting discrepancies between your local view of the Optimism network and the L2 output proposals published to Ethereum.
The `fault-mon` is a simple service for detecting discrepancies between your local view of the Optimism network and the L2 output proposals published to Ethereum.
## Installation
......@@ -34,7 +34,7 @@ pnpm start
## What this service does
The `fault-detector` detects differences between the transaction results generated by your local Optimism node and the transaction results actually published to Ethereum.
The `fault-mon` detects differences between the transaction results generated by your local Optimism node and the transaction results actually published to Ethereum.
Currently, transaction results take the form of [the root of the Optimism state trie](https://medium.com/@eiki1212/ethereum-state-trie-architecture-explained-a30237009d4e).
The state root of the block is published to the [`L2OutputOracle`](https://github.com/ethereum-optimism/optimism/blob/39b7262cc3ffd78cd314341b8512b2683c1d9af7/packages/contracts-bedrock/contracts/L1/L2OutputOracle.sol) contract on Ethereum.
......
......@@ -24,7 +24,7 @@ import { Provider } from '@ethersproject/abstract-provider'
import { Contract, ethers } from 'ethers'
import dateformat from 'dateformat'
import { version } from '../package.json'
import { version } from '../../package.json'
import {
findFirstUnfinalizedStateBatchIndex,
findOutputForIndex,
......
......@@ -2,3 +2,4 @@ export * from './balance-mon/service'
export * from './drippie-mon/service'
export * from './wd-mon/service'
export * from './wallet-mon/service'
export * from './fault-mon/index'
......@@ -6,7 +6,10 @@ import Artifact__L2OutputOracle from '@eth-optimism/contracts-bedrock/forge-arti
import { SignerWithAddress } from '@nomiclabs/hardhat-ethers/signers'
import { expect } from './setup'
import { findOutputForIndex, findFirstUnfinalizedStateBatchIndex } from '../src'
import {
findOutputForIndex,
findFirstUnfinalizedStateBatchIndex,
} from '../../src/fault-mon'
describe('helpers', () => {
const deployConfig = {
......
......@@ -19,3 +19,4 @@ deployments/hardhat
.envrc
!.envrc.example
deployments/getting-started
deployments/*/.deploy
......@@ -46,5 +46,5 @@
"l2GenesisRegolithTimeOffset": "0x0",
"faultGameAbsolutePrestate": 96,
"faultGameMaxDepth": 4,
"faultGameMaxDuration": 604800
"faultGameMaxDuration": 120
}
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -44,14 +44,18 @@ contract DeployConfig is Script {
uint256 public gasPriceOracleScalar;
uint256 public eip1559Denominator;
uint256 public eip1559Elasticity;
uint256 public l2GenesisRegolithTimeOffset;
uint256 public faultGameAbsolutePrestate;
uint256 public faultGameMaxDepth;
uint256 public faultGameMaxDuration;
constructor(string memory _path) {
console.log("DeployConfig: reading file %s", _path);
_json = vm.readFile(_path);
try vm.readFile(_path) returns (string memory data) {
_json = data;
} catch {
console.log("Warning: unable to read config. Do not deploy unless you are not using config.");
return;
}
finalSystemOwner = stdJson.readAddress(_json, "$.finalSystemOwner");
controller = stdJson.readAddress(_json, "$.controller");
......@@ -84,7 +88,6 @@ contract DeployConfig is Script {
gasPriceOracleScalar = stdJson.readUint(_json, "$.gasPriceOracleScalar");
eip1559Denominator = stdJson.readUint(_json, "$.eip1559Denominator");
eip1559Elasticity = stdJson.readUint(_json, "$.eip1559Elasticity");
l2GenesisRegolithTimeOffset = stdJson.readUint(_json, "$.l2GenesisRegolithTimeOffset");
if (block.chainid == 900) {
faultGameAbsolutePrestate = stdJson.readUint(_json, "$.faultGameAbsolutePrestate");
......
......@@ -97,6 +97,7 @@ abstract contract Deployer is Script {
function sync() public {
Deployment[] memory deployments = _getTempDeployments();
console.log("Syncing %s deployments", deployments.length);
console.log("Using deployment artifact %s", deployPath);
for (uint256 i; i < deployments.length; i++) {
address addr = deployments[i].addr;
......
......@@ -52,9 +52,24 @@ abstract contract SafeBuilder is EnhancedScript, GlobalConstants {
// Core Logic //
////////////////////////////////////////////////////////////////
function run() public returns (bool) {
address safe;
address proxyAdmin;
if (block.chainid == OP_GOERLI) {
safe = 0xE534ccA2753aCFbcDBCeB2291F596fc60495257e;
proxyAdmin = 0x4200000000000000000000000000000000000018;
}
return run(safe, proxyAdmin);
}
/// @notice The entrypoint to this script.
function run(address _safe, address _proxyAdmin) public returns (bool) {
require(_safe != address(0), "Safe address undefined");
require(_proxyAdmin != address(0), "ProxyAdminAddress undefined");
console.log("Using Safe: %s", _safe);
console.log("Using ProxyAdmin: %s", _proxyAdmin);
vm.startBroadcast();
bool success = _run(_safe, _proxyAdmin);
if (success) _postCheck();
......
// SPDX-License-Identifier: MIT
pragma solidity ^0.8.0;
import { console2 as console } from "forge-std/console2.sol";
import { SafeBuilder } from "../universal/SafeBuilder.sol";
import { IGnosisSafe, Enum } from "../interfaces/IGnosisSafe.sol";
import { IMulticall3 } from "forge-std/interfaces/IMulticall3.sol";
import { Predeploys } from "../../contracts/libraries/Predeploys.sol";
import { ProxyAdmin } from "../../contracts/universal/ProxyAdmin.sol";
import { Deployer } from "../Deployer.sol";
/// @title EASUpgrader
/// @notice Upgrades the EAS predeploys.
contract EASUpgrader is SafeBuilder, Deployer {
/// @notice The proxy admin predeploy on L2.
ProxyAdmin immutable PROXY_ADMIN = ProxyAdmin(Predeploys.PROXY_ADMIN);
/// @notice Represents the EAS contracts predeploys
struct ContractSet {
address EAS;
address SchemaRegistry;
}
/// @notice A mapping of chainid to a ContractSet of implementations.
mapping(uint256 => ContractSet) internal implementations;
/// @notice A mapping of chainid to ContractSet of proxy addresses.
mapping(uint256 => ContractSet) internal proxies;
/// @notice The expected versions for the contracts to be upgraded to.
string constant internal EAS_Version = "1.0.0";
string constant internal SchemaRegistry_Version = "1.0.0";
/// @notice Place the contract addresses in storage so they can be used when building calldata.
function setUp() public override {
super.setUp();
implementations[OP_GOERLI] = ContractSet({
EAS: getAddress("EAS"),
SchemaRegistry: getAddress("SchemaRegistry")
});
proxies[OP_GOERLI] = ContractSet({
EAS: Predeploys.EAS,
SchemaRegistry: Predeploys.SCHEMA_REGISTRY
});
}
/// @notice
function name() public override pure returns (string memory) {
return "EASUpgrader";
}
/// @notice Follow up assertions to ensure that the script ran to completion.
function _postCheck() internal override view {
ContractSet memory prox = getProxies();
require(_versionHash(prox.EAS) == keccak256(bytes(EAS_Version)), "EAS");
require(_versionHash(prox.SchemaRegistry) == keccak256(bytes(SchemaRegistry_Version)), "SchemaRegistry");
// Check that the codehashes of all implementations match the proxies set implementations.
ContractSet memory impl = getImplementations();
require(PROXY_ADMIN.getProxyImplementation(prox.EAS).codehash == impl.EAS.codehash);
require(PROXY_ADMIN.getProxyImplementation(prox.SchemaRegistry).codehash == impl.SchemaRegistry.codehash);
}
/// @notice Test coverage of the logic. Should only run on goerli but other chains
/// could be added.
function test_script_succeeds() skipWhenNotForking external {
address _safe;
address _proxyAdmin;
if (block.chainid == OP_GOERLI) {
_safe = 0xE534ccA2753aCFbcDBCeB2291F596fc60495257e;
_proxyAdmin = 0x4200000000000000000000000000000000000018;
}
require(_safe != address(0) && _proxyAdmin != address(0));
address[] memory owners = IGnosisSafe(payable(_safe)).getOwners();
for (uint256 i; i < owners.length; i++) {
address owner = owners[i];
vm.startBroadcast(owner);
bool success = _run(_safe, _proxyAdmin);
vm.stopBroadcast();
if (success) {
console.log("tx success");
break;
}
}
_postCheck();
}
/// @notice Builds the calldata that the multisig needs to make for the upgrade to happen.
/// A total of 9 calls are made to the proxy admin to upgrade the implementations
/// of the predeploys.
function buildCalldata(address _proxyAdmin) internal override view returns (bytes memory) {
IMulticall3.Call3[] memory calls = new IMulticall3.Call3[](2);
ContractSet memory impl = getImplementations();
ContractSet memory prox = getProxies();
// Upgrade EAS
calls[0] = IMulticall3.Call3({
target: _proxyAdmin,
allowFailure: false,
callData: abi.encodeCall(
ProxyAdmin.upgrade,
(payable(prox.EAS), impl.EAS)
)
});
// Upgrade SchemaRegistry
calls[1] = IMulticall3.Call3({
target: _proxyAdmin,
allowFailure: false,
callData: abi.encodeCall(
ProxyAdmin.upgrade,
(payable(prox.SchemaRegistry), impl.SchemaRegistry)
)
});
return abi.encodeCall(IMulticall3.aggregate3, (calls));
}
/// @notice Returns the ContractSet that represents the implementations for a given network.
function getImplementations() internal view returns (ContractSet memory) {
ContractSet memory set = implementations[block.chainid];
require(set.EAS != address(0), "no implementations for this network");
return set;
}
/// @notice Returns the ContractSet that represents the proxies for a given network.
function getProxies() internal view returns (ContractSet memory) {
ContractSet memory set = proxies[block.chainid];
require(set.EAS != address(0), "no proxies for this network");
return set;
}
}
ignores: [
"@babel/eslint-parser",
"@typescript-eslint/parser",
"eslint-plugin-import",
"eslint-plugin-unicorn",
"eslint-plugin-jsdoc",
"eslint-plugin-prefer-arrow",
"eslint-plugin-react",
"@typescript-eslint/eslint-plugin",
"eslint-config-prettier",
"eslint-plugin-prettier",
"chai"
]
# URL for an L1 RPC provider, used to query L2 output proposals
FAULT_DETECTOR__L1_RPC_PROVIDER=
# URL for an L2 RPC provider, used to query canonical L2 state
FAULT_DETECTOR__L2_RPC_PROVIDER=
module.exports = {
extends: '../../.eslintrc.js',
}
module.exports = {
...require('../../.prettierrc.js'),
};
# @eth-optimism/fault-detector
## 1.1.0
### Minor Changes
- [#6095](https://github.com/ethereum-optimism/optimism/pull/6095) [`5884c763a`](https://github.com/ethereum-optimism/optimism/commit/5884c763aa5e7687c61c906cb65069052d9c5d9e) Thanks [@zchn](https://github.com/zchn)! - Remove pre-bedrock support from fault detector.
### Patch Changes
- [#6201](https://github.com/ethereum-optimism/optimism/pull/6201) [`6fb399f27`](https://github.com/ethereum-optimism/optimism/commit/6fb399f27333ef709516765dde5b3d12501803cc) Thanks [@tynes](https://github.com/tynes)! - Bump contracts-bedrock version
- [#6164](https://github.com/ethereum-optimism/optimism/pull/6164) [`c11039060`](https://github.com/ethereum-optimism/optimism/commit/c11039060bc037a88916c2cba602687b6d69ad1a) Thanks [@pengin7384](https://github.com/pengin7384)! - fix typo
- Updated dependencies [[`a666c4f20`](https://github.com/ethereum-optimism/optimism/commit/a666c4f2082253abbb68c0678e5a0a1ed0c00f4b), [`ff577455f`](https://github.com/ethereum-optimism/optimism/commit/ff577455f196b5f5b8a889339b845561ca6c538a), [`89ca741a6`](https://github.com/ethereum-optimism/optimism/commit/89ca741a63c5e07f9d691bb6f7a89f7718fc49ca), [`c11039060`](https://github.com/ethereum-optimism/optimism/commit/c11039060bc037a88916c2cba602687b6d69ad1a), [`72d184854`](https://github.com/ethereum-optimism/optimism/commit/72d184854ebad8b2025641f126ed76573b1f0ac3), [`77da6edc6`](https://github.com/ethereum-optimism/optimism/commit/77da6edc643e0b5e39f7b6bb41c3c7ead418a876), [`3f13fd0bb`](https://github.com/ethereum-optimism/optimism/commit/3f13fd0bbea051a4550f1df6def1a53a616aa6f6), [`639163253`](https://github.com/ethereum-optimism/optimism/commit/639163253a5e2128f1c21c446b68d358d38cbd30)]:
- @eth-optimism/sdk@3.1.0
- @eth-optimism/contracts-bedrock@0.16.0
- @eth-optimism/core-utils@0.12.2
- @eth-optimism/common-ts@0.8.3
## 1.0.0
### Major Changes
- 119754c2f: Make optimism/sdk default to bedrock mode
### Patch Changes
- 16ccbee24: Fix false error to warning
- 685addec2: Add better source maps and developer support
- Updated dependencies [8d7dcc70c]
- Updated dependencies [119754c2f]
- Updated dependencies [d6388be4a]
- @eth-optimism/core-utils@0.12.1
- @eth-optimism/sdk@3.0.0
- @eth-optimism/common-ts@0.8.2
## 0.6.4
### Patch Changes
- Updated dependencies [a1b7ff9e3]
- Updated dependencies [8133872ed]
- Updated dependencies [afc2ab8c9]
- Updated dependencies [5063a69fb]
- Updated dependencies [aa854bdd8]
- @eth-optimism/sdk@2.1.0
- @eth-optimism/contracts@0.6.0
## 0.6.3
### Patch Changes
- dbe5eb308: Empty patch release to re-release packages that failed to be released by a bug in the release process.
- Updated dependencies [be3315689]
- @eth-optimism/sdk@2.0.2
## 0.6.2
### Patch Changes
- f9b579d55: Fixes a bug that would cause the fault detector to error out if no outputs had been proposed yet.
- Updated dependencies [fecd42d67]
- Updated dependencies [66cafc00a]
- @eth-optimism/common-ts@0.8.1
- @eth-optimism/sdk@2.0.1
## 0.6.1
### Patch Changes
- Updated dependencies [cb19e2f9c]
- @eth-optimism/sdk@2.0.0
## 0.6.0
### Minor Changes
- b004d1ad4: Updates the fault detector to support Bedrock networks.
### Patch Changes
- Updated dependencies [0e179781b]
- Updated dependencies [5372c9f5b]
- Updated dependencies [4ae94b412]
- @eth-optimism/common-ts@0.8.0
- @eth-optimism/sdk@1.10.2
## 0.5.0
### Minor Changes
- 9b2891852: Refactors BaseServiceV2 slightly, merges standard options with regular options
### Patch Changes
- ab8ec365c: Updates BaseServiceV2 so that options are secret by default. Services will have to explicitly mark options as "public" for those options to be logged and included in the metadata metric.
- c6c9c7dbf: Fault detector will now wait for providers to be connected
- Updated dependencies [e23f60f63]
- Updated dependencies [ab8ec365c]
- Updated dependencies [ba8b94a60]
- Updated dependencies [9b2891852]
- Updated dependencies [d1f9098f9]
- Updated dependencies [c6c9c7dbf]
- Updated dependencies [ffcee1013]
- Updated dependencies [eceb0de1d]
- @eth-optimism/common-ts@0.7.0
- @eth-optimism/sdk@1.9.0
- @eth-optimism/contracts@0.5.40
## 0.4.0
### Minor Changes
- ab5c1b897: Includes a new event caching mechanism for running the fault detector against Geth.
### Patch Changes
- 1d3c749a2: Bumps the version of ts-node used
- Updated dependencies [1d3c749a2]
- Updated dependencies [767585b07]
- Updated dependencies [c975c9620]
- Updated dependencies [1d3c749a2]
- Updated dependencies [136ea1785]
- @eth-optimism/contracts@0.5.39
- @eth-optimism/sdk@1.8.0
- @eth-optimism/core-utils@0.12.0
- @eth-optimism/common-ts@0.6.8
## 0.3.2
### Patch Changes
- 97b5f578c: Fixes how versions are imported for BaseServiceV2 services
- @eth-optimism/sdk@1.6.11
## 0.3.1
### Patch Changes
- Updated dependencies [1e76cdb86]
- @eth-optimism/core-utils@0.11.0
- @eth-optimism/common-ts@0.6.7
- @eth-optimism/contracts@0.5.38
- @eth-optimism/sdk@1.6.10
## 0.3.0
### Minor Changes
- 4a5e1832: Updates metrics to use better labels.
### Patch Changes
- Updated dependencies [e2faaa8b]
- @eth-optimism/sdk@1.6.5
## 0.2.7
### Patch Changes
- 7215f4ce: Bump ethers to 5.7.0 globally
- 17999a54: Adds a fault status API to the Fault Detector.
- 2f058b84: Fixes a small bug in the fault detector that would cause errors for testnets where the fault proof window is extremely short.
- Updated dependencies [7215f4ce]
- Updated dependencies [206f6033]
- Updated dependencies [d7679ca4]
- @eth-optimism/common-ts@0.6.5
- @eth-optimism/contracts@0.5.36
- @eth-optimism/core-utils@0.10.1
- @eth-optimism/sdk@1.6.4
## 0.2.6
### Patch Changes
- Updated dependencies [b27d0fa7]
- Updated dependencies [dbfea116]
- Updated dependencies [299157e7]
- @eth-optimism/sdk@1.6.1
- @eth-optimism/core-utils@0.10.0
- @eth-optimism/contracts@0.5.34
- @eth-optimism/common-ts@0.6.4
## 0.2.5
### Patch Changes
- 98206b7e: Properly handle connection failures for L2 node
## 0.2.4
### Patch Changes
- 89d01f2e: Update dev deps
- Updated dependencies [6e3449ba]
- Updated dependencies [f9fee446]
- @eth-optimism/contracts@0.5.30
- @eth-optimism/core-utils@0.9.1
- @eth-optimism/sdk@1.2.1
- @eth-optimism/common-ts@0.6.1
## 0.2.3
### Patch Changes
- 977493bc: Update SDK version and usage to account for new constructor
- 2296cf81: Fix bug where FD would try to sync beyond local tip
- Updated dependencies [977493bc]
- Updated dependencies [700dcbb0]
- Updated dependencies [3d1cb720]
- @eth-optimism/sdk@1.2.0
- @eth-optimism/core-utils@0.9.0
- @eth-optimism/common-ts@0.6.0
- @eth-optimism/contracts@0.5.29
## 0.2.2
### Patch Changes
- Updated dependencies [cb71fcde]
- Updated dependencies [10e41522]
- @eth-optimism/common-ts@0.5.0
## 0.2.1
### Patch Changes
- 29ff7462: Revert es target back to 2017
- Updated dependencies [27234f68]
- Updated dependencies [c201f3f1]
- Updated dependencies [29ff7462]
- Updated dependencies [52b26878]
- @eth-optimism/contracts@0.5.28
- @eth-optimism/common-ts@0.4.0
- @eth-optimism/core-utils@0.8.7
- @eth-optimism/sdk@1.1.9
## 0.2.0
### Minor Changes
- 84a8934c: BaseServiceV2 exposes service name and version as standard synthetic metric
### Patch Changes
- 37dfe4f6: Smarter starting height for fault-detector
- 6fe58eb2: Fix order in which a metric was bumped then emitted to fix off by one issue
- Updated dependencies [d9e39931]
- Updated dependencies [84a8934c]
- @eth-optimism/common-ts@0.3.0
## 0.1.1
### Patch Changes
- d18ae135: Updates all ethers versions in response to BN.js bug
- Updated dependencies [f16383f2]
- Updated dependencies [d18ae135]
- @eth-optimism/common-ts@0.2.8
- @eth-optimism/core-utils@0.8.5
- @eth-optimism/sdk@1.1.6
## 0.1.0
### Minor Changes
- 2177c8ef: Releases the first public version of the fault detector
### Patch Changes
- @eth-optimism/sdk@1.1.4
(The MIT License)
Copyright 2020-2021 Optimism
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# --l1rpcprovider Provider for interacting with L1 (env: FAULT_DETECTOR__L1_RPC_PROVIDER)
FAULT_DETECTOR__L1_RPC_PROVIDER=
# --l2rpcprovider Provider for interacting with L2 (env: FAULT_DETECTOR__L2_RPC_PROVIDER)
FAULT_DETECTOR__L2_RPC_PROVIDER=
# --bedrock Whether or not the service is running against a Bedrock chain (env: FAULT_DETECTOR__BEDROCK)
BEDROCK=true
# Optional Params
# --startbatchindex Batch index to start checking from. For bedrock chains, this is the L2 height to start from (env: FAULT_DETECTOR__START_BATCH_INDEX)
# FAULT_DETECTOR__START_BATCH_INDEX=
# --optimismportaladdress [Custom Bedrock Chains] Deployed OptimismPortal contract address. Used to retrieve necessary info for ouput verification (env: FAULT_DETECTOR__OPTIMISM_PORTAL_ADDRESS)
# FAULT_DETECTOR__OPTIMISM_PORTAL_ADDRESS=
# --statecommitmentchainaddress [Custom Legacy Chains] Deployed StateCommitmentChain contract address. Used to fetch necessary info for output verification. (env: FAULT_DETECTOR__STATE_COMMITMENT_CHAIN_ADDRESS)
# FAULT_DETECTOR__STATE_COMMITMENT_CHAIN_ADDRESS=
# --loopintervalms Loop interval in milliseconds, only applies if service is set to loop (env: FAULT_DETECTOR__LOOP_INTERVAL_MS)
# FAULT_DETECTOR__LOOP_INTERVAL_MS=
# --port Port for the app server (env: FAULT_DETECTOR__PORT)
# FAULT_DETECTOR__PORT=
# --hostname Hostname for the app server (env: FAULT_DETECTOR__HOSTNAME)
# FAULT_DETECTOR__HOSTNAME=
# --loglevel Log level (env: FAULT_DETECTOR__LOG_LEVEL)
# FAULT_DETECTOR__LOG_LEVEL=
{
"private": true,
"name": "@eth-optimism/fault-detector",
"version": "1.1.0",
"description": "[Optimism] Service for detecting faulty L2 output proposals",
"main": "dist/index",
"types": "dist/index",
"files": [
"dist/*"
],
"scripts": {
"start": "node --enable-source-maps dist/src/service.js",
"dev": "tsx watch ./src/service.ts",
"test": "hardhat test",
"test:coverage": "nyc hardhat test && nyc merge .nyc_output coverage.json",
"build": "tsc -p tsconfig.json",
"preview": "pnpm build && pnpm start",
"clean": "rimraf dist/ ./tsconfig.tsbuildinfo",
"lint": "pnpm lint:fix && pnpm lint:check",
"pre-commit": "lint-staged",
"lint:fix": "pnpm lint:check --fix",
"lint:check": "eslint . --max-warnings=0"
},
"keywords": [
"optimism",
"ethereum",
"fault",
"detector"
],
"homepage": "https://github.com/ethereum-optimism/optimism/tree/develop/packages/fault-detector#readme",
"license": "MIT",
"author": "Optimism PBC",
"repository": {
"type": "git",
"url": "https://github.com/ethereum-optimism/optimism.git"
},
"devDependencies": {
"@defi-wonderland/smock": "^2.0.7",
"@nomiclabs/hardhat-ethers": "^2.0.6",
"@nomiclabs/hardhat-waffle": "^2.0.3",
"@types/chai": "^4.3.1",
"@types/dateformat": "^5.0.0",
"chai-as-promised": "^7.1.1",
"dateformat": "^4.5.1",
"dotenv": "^16.1.4",
"ethereum-waffle": "^3.4.4",
"ethers": "^5.7.0",
"hardhat": "^2.9.6",
"lodash": "^4.17.21",
"tsx": "^3.12.7"
},
"dependencies": {
"@eth-optimism/common-ts": "^0.8.3",
"@eth-optimism/contracts-bedrock": "0.16.0",
"@eth-optimism/core-utils": "^0.12.2",
"@eth-optimism/sdk": "^3.1.0",
"@ethersproject/abstract-provider": "^5.7.0"
}
}
{
"extends": "../../tsconfig.json",
"compilerOptions": {
"outDir": "./dist",
"sourceMap": true
},
"include": [
"package.json",
"src/**/*"
]
}
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment