Commit dfd06d3a authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge branch 'develop' into aj/fpa-docs

parents 702eb6ba 49c42ab5
......@@ -4,42 +4,114 @@ import (
"context"
"fmt"
"net/http"
"runtime/debug"
"sync"
"github.com/ethereum-optimism/optimism/indexer/api/routes"
"github.com/ethereum-optimism/optimism/indexer/config"
"github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum-optimism/optimism/op-service/httputil"
"github.com/ethereum-optimism/optimism/op-service/metrics"
"github.com/ethereum/go-ethereum/log"
"github.com/go-chi/chi/v5"
"github.com/go-chi/chi/v5/middleware"
"github.com/prometheus/client_golang/prometheus"
)
const ethereumAddressRegex = `^0x[a-fA-F0-9]{40}$`
type Api struct {
log log.Logger
Router *chi.Mux
log log.Logger
Router *chi.Mux
serverConfig config.ServerConfig
metricsConfig config.ServerConfig
metricsRegistry *prometheus.Registry
}
func NewApi(logger log.Logger, bv database.BridgeTransfersView) *Api {
r := chi.NewRouter()
h := routes.NewRoutes(logger, bv, r)
const (
MetricsNamespace = "op_indexer"
)
func chiMetricsMiddleware(rec metrics.HTTPRecorder) func(http.Handler) http.Handler {
return func(next http.Handler) http.Handler {
return metrics.NewHTTPRecordingMiddleware(rec, next)
}
}
func NewApi(logger log.Logger, bv database.BridgeTransfersView, serverConfig config.ServerConfig, metricsConfig config.ServerConfig) *Api {
apiRouter := chi.NewRouter()
h := routes.NewRoutes(logger, bv, apiRouter)
mr := metrics.NewRegistry()
promRecorder := metrics.NewPromHTTPRecorder(mr, MetricsNamespace)
apiRouter.Use(chiMetricsMiddleware(promRecorder))
apiRouter.Use(middleware.Recoverer)
apiRouter.Use(middleware.Heartbeat("/healthz"))
apiRouter.Get(fmt.Sprintf("/api/v0/deposits/{address:%s}", ethereumAddressRegex), h.L1DepositsHandler)
apiRouter.Get(fmt.Sprintf("/api/v0/withdrawals/{address:%s}", ethereumAddressRegex), h.L2WithdrawalsHandler)
return &Api{log: logger, Router: apiRouter, metricsRegistry: mr, serverConfig: serverConfig, metricsConfig: metricsConfig}
}
func (a *Api) Start(ctx context.Context) error {
var wg sync.WaitGroup
errCh := make(chan error, 2)
processCtx, processCancel := context.WithCancel(ctx)
runProcess := func(start func(ctx context.Context) error) {
wg.Add(1)
go func() {
defer func() {
if err := recover(); err != nil {
a.log.Error("halting api on panic", "err", err)
debug.PrintStack()
errCh <- fmt.Errorf("panic: %v", err)
}
processCancel()
wg.Done()
}()
r.Use(middleware.Heartbeat("/healthz"))
errCh <- start(processCtx)
}()
}
runProcess(a.startServer)
runProcess(a.startMetricsServer)
wg.Wait()
err := <-errCh
if err != nil {
a.log.Error("api stopped", "err", err)
} else {
a.log.Info("api stopped")
}
r.Get(fmt.Sprintf("/api/v0/deposits/{address:%s}", ethereumAddressRegex), h.L1DepositsHandler)
r.Get(fmt.Sprintf("/api/v0/withdrawals/{address:%s}", ethereumAddressRegex), h.L2WithdrawalsHandler)
return &Api{log: logger, Router: r}
return err
}
func (a *Api) Listen(ctx context.Context, port int) error {
a.log.Info("api server listening...", "port", port)
server := http.Server{Addr: fmt.Sprintf(":%d", port), Handler: a.Router}
func (a *Api) startServer(ctx context.Context) error {
a.log.Info("api server listening...", "port", a.serverConfig.Port)
server := http.Server{Addr: fmt.Sprintf(":%d", a.serverConfig.Port), Handler: a.Router}
err := httputil.ListenAndServeContext(ctx, &server)
if err != nil {
a.log.Error("api server stopped", "err", err)
} else {
a.log.Info("api server stopped")
}
return err
}
func (a *Api) startMetricsServer(ctx context.Context) error {
a.log.Info("starting metrics server...", "port", a.metricsConfig.Port)
err := metrics.ListenAndServe(ctx, a.metricsRegistry, a.metricsConfig.Host, a.metricsConfig.Port)
if err != nil {
a.log.Error("metrics server stopped", "err", err)
} else {
a.log.Info("metrics server stopped")
}
return err
}
......@@ -6,6 +6,7 @@ import (
"net/http/httptest"
"testing"
"github.com/ethereum-optimism/optimism/indexer/config"
"github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum/go-ethereum/common"
......@@ -18,6 +19,15 @@ type MockBridgeTransfersView struct{}
var mockAddress = "0x4204204204204204204204204204204204204204"
var apiConfig = config.ServerConfig{
Host: "localhost",
Port: 8080,
}
var metricsConfig = config.ServerConfig{
Host: "localhost",
Port: 7300,
}
var (
deposit = database.L1BridgeDeposit{
TransactionSourceHash: common.HexToHash("abc"),
......@@ -77,7 +87,7 @@ func (mbv *MockBridgeTransfersView) L2BridgeWithdrawalsByAddress(address common.
}
func TestHealthz(t *testing.T) {
logger := testlog.Logger(t, log.LvlInfo)
api := NewApi(logger, &MockBridgeTransfersView{})
api := NewApi(logger, &MockBridgeTransfersView{}, apiConfig, metricsConfig)
request, err := http.NewRequest("GET", "/healthz", nil)
assert.Nil(t, err)
......@@ -89,7 +99,7 @@ func TestHealthz(t *testing.T) {
func TestL1BridgeDepositsHandler(t *testing.T) {
logger := testlog.Logger(t, log.LvlInfo)
api := NewApi(logger, &MockBridgeTransfersView{})
api := NewApi(logger, &MockBridgeTransfersView{}, apiConfig, metricsConfig)
request, err := http.NewRequest("GET", fmt.Sprintf("/api/v0/deposits/%s", mockAddress), nil)
assert.Nil(t, err)
......@@ -101,7 +111,7 @@ func TestL1BridgeDepositsHandler(t *testing.T) {
func TestL2BridgeWithdrawalsByAddressHandler(t *testing.T) {
logger := testlog.Logger(t, log.LvlInfo)
api := NewApi(logger, &MockBridgeTransfersView{})
api := NewApi(logger, &MockBridgeTransfersView{}, apiConfig, metricsConfig)
request, err := http.NewRequest("GET", fmt.Sprintf("/api/v0/withdrawals/%s", mockAddress), nil)
assert.Nil(t, err)
......
......@@ -62,8 +62,8 @@ func runApi(ctx *cli.Context) error {
}
defer db.Close()
api := api.NewApi(log, db.BridgeTransfers)
return api.Listen(ctx.Context, cfg.HTTPServer.Port)
api := api.NewApi(log, db.BridgeTransfers, cfg.HTTPServer, cfg.MetricsServer)
return api.Start(ctx.Context)
}
func runAll(ctx *cli.Context) error {
......
......@@ -233,7 +233,7 @@ func (db *blocksDB) LatestEpoch() (*Epoch, error) {
// L2 for a faster query. Per the protocol, the L2 block that starts a new epoch
// will have a matching timestamp with the L1 origin.
query := db.gorm.Table("l1_block_headers").Order("l1_block_headers.timestamp DESC")
query = query.Joins("INNER JOIN l2_block_headers ON l1_block_headers.timestamp = l2_block_headers.timestamp")
query = query.Joins("INNER JOIN l2_block_headers ON l2_block_headers.timestamp = l1_block_headers.timestamp")
query = query.Select("*")
var epoch Epoch
......
......@@ -47,7 +47,10 @@ type L2TransactionWithdrawal struct {
type BridgeTransactionsView interface {
L1TransactionDeposit(common.Hash) (*L1TransactionDeposit, error)
L1LatestBlockHeader() (*L1BlockHeader, error)
L2TransactionWithdrawal(common.Hash) (*L2TransactionWithdrawal, error)
L2LatestBlockHeader() (*L2BlockHeader, error)
}
type BridgeTransactionsDB interface {
......@@ -94,6 +97,37 @@ func (db *bridgeTransactionsDB) L1TransactionDeposit(sourceHash common.Hash) (*L
return &deposit, nil
}
func (db *bridgeTransactionsDB) L1LatestBlockHeader() (*L1BlockHeader, error) {
// Markers for an indexed bridge event
// L1: Latest Transaction Deposit, Latest Proven/Finalized Withdrawal
l1DepositQuery := db.gorm.Table("l1_transaction_deposits").Order("l1_transaction_deposits.timestamp DESC").Limit(1)
l1DepositQuery = l1DepositQuery.Joins("INNER JOIN l1_contract_events ON l1_contract_events.guid = l1_transaction_deposits.initiated_l1_event_guid")
l1DepositQuery = l1DepositQuery.Select("l1_contract_events.*")
l1ProvenQuery := db.gorm.Table("l2_transaction_withdrawals")
l1ProvenQuery = l1ProvenQuery.Joins("INNER JOIN l1_contract_events ON l1_contract_events.guid = l2_transaction_withdrawals.proven_l1_event_guid")
l1ProvenQuery = l1ProvenQuery.Order("l1_contract_events.timestamp DESC").Select("l1_contract_events.*").Limit(1)
l1FinalizedQuery := db.gorm.Table("l2_transaction_withdrawals")
l1FinalizedQuery = l1FinalizedQuery.Joins("INNER JOIN l1_contract_events ON l1_contract_events.guid = l2_transaction_withdrawals.proven_l1_event_guid")
l1FinalizedQuery = l1FinalizedQuery.Order("l1_contract_events.timestamp DESC").Select("l1_contract_events.*").Limit(1)
l1Query := db.gorm.Table("((?) UNION (?) UNION (?)) AS latest_bridge_events", l1DepositQuery.Limit(1), l1ProvenQuery, l1FinalizedQuery)
l1Query = l1Query.Joins("INNER JOIN l1_block_headers ON l1_block_headers.hash = latest_bridge_events.block_hash")
l1Query = l1Query.Order("l1_block_headers.number DESC").Select("l1_block_headers.*")
var l1Header L1BlockHeader
result := l1Query.Take(&l1Header)
if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, result.Error
}
return &l1Header, nil
}
/**
* Transactions withdrawn from L2
*/
......@@ -149,3 +183,25 @@ func (db *bridgeTransactionsDB) MarkL2TransactionWithdrawalFinalizedEvent(withdr
result := db.gorm.Save(&withdrawal)
return result.Error
}
func (db *bridgeTransactionsDB) L2LatestBlockHeader() (*L2BlockHeader, error) {
// L2: Inclusion of the latest deposit
l1DepositQuery := db.gorm.Table("l1_transaction_deposits").Order("l1_transaction_deposits.timestamp DESC")
l1DepositQuery = l1DepositQuery.Joins("INNER JOIN l1_contract_events ON l1_contract_events.guid = l1_transaction_deposits.initiated_l1_event_guid")
l1DepositQuery = l1DepositQuery.Select("l1_contract_events.*")
l2Query := db.gorm.Table("(?) AS l1_deposit_events", l1DepositQuery)
l2Query = l2Query.Joins("INNER JOIN l2_block_headers ON l2_block_headers.timestamp = l1_deposit_events.timestamp")
l2Query = l2Query.Select("l2_block_headers.*")
var l2Header L2BlockHeader
result := l2Query.Take(&l2Header)
if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, result.Error
}
return &l2Header, nil
}
......@@ -44,7 +44,7 @@ func NewL1ETL(cfg Config, log log.Logger, db *database.DB, metrics Metricer, cli
fromHeader = latestHeader.RLPHeader.Header()
} else if cfg.StartHeight.BitLen() > 0 {
log.Info("no indexed state in storage, starting from supplied L1 height", "height", cfg.StartHeight.String())
log.Info("no indexed state starting from supplied L1 height", "height", cfg.StartHeight.String())
header, err := client.BlockHeaderByNumber(cfg.StartHeight)
if err != nil {
return nil, fmt.Errorf("could not fetch starting block header: %w", err)
......@@ -53,7 +53,7 @@ func NewL1ETL(cfg Config, log log.Logger, db *database.DB, metrics Metricer, cli
fromHeader = header
} else {
log.Info("no indexed state in storage, starting from L1 genesis")
log.Info("no indexed state, starting from genesis")
}
// NOTE - The use of un-buffered channel here assumes that downstream consumers
......
......@@ -22,11 +22,15 @@ l1-rpc = "${INDEXER_RPC_URL_L1}"
l2-rpc = "${INDEXER_RPC_URL_L2}"
[db]
host = "postgres"
port = 5432
user = "db_username"
password = "db_password"
name = "db_name"
host = "$INDEXER_DB_HOST"
# this port may be problematic once we depoly
# the DATABASE_URL looks like this for previous services and didn't include a port
# DATABASE_URL="postgresql://${INDEXER_DB_USER}:${INDEXER_DB_PASS}@localhost/${INDEXER_DB_NAME}?host=${INDEXER_DB_HOST}"
# If not problematic delete these comments
port = $INDEXER_DB_PORT
user = "$INDEXER_DB_USER"
password = "$INDEXER_DB_PASS"
name = "$INDEXER_DB_NAME"
[http]
host = "127.0.0.1"
......
......@@ -29,35 +29,32 @@ type BridgeProcessor struct {
func NewBridgeProcessor(log log.Logger, db *database.DB, l1Etl *etl.L1ETL, chainConfig config.ChainConfig) (*BridgeProcessor, error) {
log = log.New("processor", "bridge")
latestL1Header, err := bridge.L1LatestBridgeEventHeader(db, chainConfig)
latestL1Header, err := db.BridgeTransactions.L1LatestBlockHeader()
if err != nil {
return nil, err
}
latestL2Header, err := bridge.L2LatestBridgeEventHeader(db)
latestL2Header, err := db.BridgeTransactions.L2LatestBlockHeader()
if err != nil {
return nil, err
}
// Since the bridge processor indexes events based on epochs, there's
// no scenario in which we have indexed L2 data with no L1 data.
//
// NOTE: Technically there is an exception if our bridging contracts are
// used to bridges native from L2 and an op-chain happens to launch where
// only L2 native bridge events have occurred. This is a rare situation now
// and it's worth the assertion as an integrity check. We can revisit this
// as more chains launch with primarily L2-native activity.
if latestL1Header == nil && latestL2Header != nil {
log.Error("detected indexed L2 bridge activity with no indexed L1 state", "l2_block_number", latestL2Header.Number)
return nil, errors.New("detected indexed L2 bridge activity with no indexed L1 state")
}
var l1Header, l2Header *types.Header
if latestL1Header == nil && latestL2Header == nil {
log.Info("no indexed state, starting from genesis")
log.Info("no indexed state, starting from rollup genesis")
} else {
log.Info("detected the latest indexed state", "l1_block_number", latestL1Header.Number, "l2_block_number", latestL2Header.Number)
l1Height, l2Height := big.NewInt(0), big.NewInt(0)
if latestL1Header != nil {
l1Height = latestL1Header.Number
l1Header = latestL1Header.RLPHeader.Header()
}
if latestL2Header != nil {
l2Height = latestL2Header.Number
l2Header = latestL2Header.RLPHeader.Header()
}
log.Info("detected latest indexed state", "l1_block_number", l1Height, "l2_block_number", l2Height)
}
return &BridgeProcessor{log, db, l1Etl, chainConfig, latestL1Header, latestL2Header}, nil
return &BridgeProcessor{log, db, l1Etl, chainConfig, l1Header, l2Header}, nil
}
func (b *BridgeProcessor) Start(ctx context.Context) error {
......@@ -83,29 +80,40 @@ func (b *BridgeProcessor) Start(ctx context.Context) error {
latestEpoch, err := b.db.Blocks.LatestEpoch()
if err != nil {
return err
}
if latestEpoch == nil {
if b.LatestL1Header != nil {
// Once we have some state `latestEpoch` should never return nil.
b.log.Error("started with indexed bridge state, but no latest epoch returned", "latest_bridge_l1_block_number", b.LatestL1Header.Number)
return errors.New("started with indexed bridge state, but no blocks epochs returned")
} else {
b.log.Warn("no indexed epochs. waiting...")
continue
} else if latestEpoch == nil {
if b.LatestL1Header != nil || b.LatestL2Header != nil {
// Once we have some indexed state `latestEpoch` can never return nil
b.log.Error("bridge events indexed, but no indexed epoch returned", "latest_bridge_l1_block_number", b.LatestL1Header.Number)
return errors.New("bridge events indexed, but no indexed epoch returned")
}
b.log.Warn("no indexed epochs available. waiting...")
continue
}
// Integrity Checks
if b.LatestL1Header != nil && latestEpoch.L1BlockHeader.Hash == b.LatestL1Header.Hash() {
// Marked as a warning since the bridge should always be processing at least 1 new epoch
b.log.Warn("all available epochs indexed by the bridge", "latest_epoch_number", b.LatestL1Header.Number)
b.log.Warn("all available epochs indexed", "latest_bridge_l1_block_number", b.LatestL1Header.Number)
continue
}
if b.LatestL1Header != nil && latestEpoch.L1BlockHeader.Number.Cmp(b.LatestL1Header.Number) <= 0 {
b.log.Error("non-increasing l1 block height observed", "latest_bridge_l1_block_number", b.LatestL1Header.Number, "latest_epoch_number", latestEpoch.L1BlockHeader.Number)
return errors.New("non-increasing l1 block heght observed")
}
if b.LatestL2Header != nil && latestEpoch.L2BlockHeader.Number.Cmp(b.LatestL2Header.Number) <= 0 {
b.log.Error("non-increasing l2 block height observed", "latest_bridge_l2_block_number", b.LatestL2Header.Number, "latest_epoch_number", latestEpoch.L2BlockHeader.Number)
return errors.New("non-increasing l2 block heght observed")
}
// Process Bridge Events
toL1Height, toL2Height := latestEpoch.L1BlockHeader.Number, latestEpoch.L2BlockHeader.Number
fromL1Height, fromL2Height := big.NewInt(0), big.NewInt(0)
if b.LatestL1Header != nil {
// `NewBridgeProcessor` ensures that LatestL2Header must not be nil if LatestL1Header is set
fromL1Height = new(big.Int).Add(b.LatestL1Header.Number, big.NewInt(1))
}
if b.LatestL2Header != nil {
fromL2Height = new(big.Int).Add(b.LatestL2Header.Number, big.NewInt(1))
}
......@@ -139,7 +147,7 @@ func (b *BridgeProcessor) Start(ctx context.Context) error {
// Try again on a subsequent interval
batchLog.Error("unable to index new bridge events", "err", err)
} else {
batchLog.Info("done indexing new bridge events", "latest_l1_block_number", toL1Height, "latest_l2_block_number", toL2Height)
batchLog.Info("done indexing bridge events", "latest_l1_block_number", toL1Height, "latest_l2_block_number", toL2Height)
b.LatestL1Header = latestEpoch.L1BlockHeader.RLPHeader.Header()
b.LatestL2Header = latestEpoch.L2BlockHeader.RLPHeader.Header()
}
......
......@@ -8,7 +8,6 @@ import (
"github.com/ethereum-optimism/optimism/indexer/config"
"github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum-optimism/optimism/indexer/processors/contracts"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
......@@ -233,80 +232,3 @@ func L1ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, chainConfig
// a-ok!
return nil
}
// L1LatestBridgeEventHeader returns the latest header for which and on-chain event
// has been observed on L1 -- Both initiated L1 events and finalization markers on L2.
func L1LatestBridgeEventHeader(db *database.DB, chainConfig config.ChainConfig) (*types.Header, error) {
portalAbi, err := bindings.OptimismPortalMetaData.GetAbi()
if err != nil {
return nil, err
}
depositEventID := portalAbi.Events["TransactionDeposited"].ID
provenEventID := portalAbi.Events["WithdrawalProven"].ID
finalizedEventID := portalAbi.Events["WithdrawalFinalized"].ID
// (1) Initiated L1 Events
// Since all initaited bridge events eventually reach the OptimismPortal to
// conduct the deposit, we can simply look for the last deposited transaction
// event on L2.
var latestDepositHeader *types.Header
contractEventFilter := database.ContractEvent{ContractAddress: chainConfig.L1Contracts.OptimismPortalProxy, EventSignature: depositEventID}
depositEvent, err := db.ContractEvents.L1LatestContractEventWithFilter(contractEventFilter)
if err != nil {
return nil, err
}
if depositEvent != nil {
l1BlockHeader, err := db.Blocks.L1BlockHeader(depositEvent.BlockHash)
if err != nil {
return nil, err
}
if l1BlockHeader != nil {
latestDepositHeader = l1BlockHeader.RLPHeader.Header()
}
}
// (2) Finalization markers for L2
// Like initiated L1 events, all withdrawals must flow through the OptimismPortal
// contract. We must look for both proven and finalized withdrawal events.
var latestWithdrawHeader *types.Header
contractEventFilter.EventSignature = finalizedEventID
withdrawEvent, err := db.ContractEvents.L1LatestContractEventWithFilter(contractEventFilter)
if err != nil {
return nil, err
}
if withdrawEvent != nil {
// Check if a have a later detected proven event
contractEventFilter.EventSignature = provenEventID
provenEvent, err := db.ContractEvents.L1LatestContractEventWithFilter(contractEventFilter)
if err != nil {
return nil, err
}
if provenEvent != nil && provenEvent.Timestamp > withdrawEvent.Timestamp {
withdrawEvent = provenEvent
}
l1BlockHeader, err := db.Blocks.L1BlockHeader(withdrawEvent.BlockHash)
if err != nil {
return nil, err
}
latestWithdrawHeader = l1BlockHeader.RLPHeader.Header()
}
if latestDepositHeader == nil {
// If there has been no seen deposits yet, there could have been no seen withdrawals
if latestWithdrawHeader != nil {
return nil, errors.New("detected an indexed withdrawal without any deposits")
}
return nil, nil
} else if latestWithdrawHeader == nil {
return latestDepositHeader, nil
} else {
// both deposits & withdrawals have occurred
if latestDepositHeader.Time > latestWithdrawHeader.Time {
return latestDepositHeader, nil
} else {
return latestWithdrawHeader, nil
}
}
}
......@@ -7,10 +7,8 @@ import (
"github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum-optimism/optimism/indexer/processors/contracts"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
)
......@@ -182,71 +180,3 @@ func L2ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, fromHeight
// a-ok!
return nil
}
// L2LatestBridgeEventHeader returns the latest header for which and on-chain event
// has been observed on L2 -- Both initiated L2 events and finalization markers from L1.
func L2LatestBridgeEventHeader(db *database.DB) (*types.Header, error) {
l2ToL1MessagePasserAbi, err := bindings.L2ToL1MessagePasserMetaData.GetAbi()
if err != nil {
return nil, err
}
crossDomainMessengerAbi, err := bindings.CrossDomainMessengerMetaData.GetAbi()
if err != nil {
return nil, err
}
messagePassedID := l2ToL1MessagePasserAbi.Events["MessagePassed"].ID
relayedEventID := crossDomainMessengerAbi.Events["RelayedMessage"].ID
// (1) Initiated L2 Events
// Since all initiated bridge events eventually reach the L2ToL1MessagePasser to
// initiate the withdrawal, we can simply look for the last message passed from
// this cont
var latestWithdrawHeader *types.Header
contractEventFilter := database.ContractEvent{ContractAddress: predeploys.L2ToL1MessagePasserAddr, EventSignature: messagePassedID}
withdrawEvent, err := db.ContractEvents.L2LatestContractEventWithFilter(contractEventFilter)
if err != nil {
return nil, err
}
if withdrawEvent != nil {
l2BlockHeader, err := db.Blocks.L2BlockHeader(withdrawEvent.BlockHash)
if err != nil {
return nil, err
}
if l2BlockHeader != nil {
latestWithdrawHeader = l2BlockHeader.RLPHeader.Header()
}
}
// (2) Finalization markers for L1
// Since deposited transactions from L1 are apart of the block derivation process,
// there are no native finalization markers for OptimismPortal#TransactionDeposited.
// The lowest layer to check for here is the CrossDomainMessenger#RelayedMessage event.
// This also converts the StandardBridge which simply is an extension of the messenger.
var latestRelayedMessageHeader *types.Header
contractEventFilter = database.ContractEvent{ContractAddress: predeploys.L2CrossDomainMessengerAddr, EventSignature: relayedEventID}
relayedEvent, err := db.ContractEvents.L2LatestContractEventWithFilter(contractEventFilter)
if err != nil {
return nil, err
}
if relayedEvent != nil {
l2BlockHeader, err := db.Blocks.L2BlockHeader(relayedEvent.BlockHash)
if err != nil {
return nil, err
}
if l2BlockHeader != nil {
latestRelayedMessageHeader = l2BlockHeader.RLPHeader.Header()
}
}
// No causaal relationship between withdraw and relayed messages
if latestWithdrawHeader == nil || latestRelayedMessageHeader == nil {
return nil, nil
} else {
if latestWithdrawHeader.Time > latestRelayedMessageHeader.Time {
return latestWithdrawHeader, nil
} else {
return latestRelayedMessageHeader, nil
}
}
}
......@@ -4,35 +4,34 @@ generator client {
datasource db {
provider = "postgresql"
url = env("DATABASE_URL")
url = "postgresql://db_username:db_password@localhost:5434/db_name"
}
model l1_bridged_tokens {
address String @id @db.VarChar
bridge_address String @db.VarChar
l2_token_address String @db.VarChar
name String @db.VarChar
symbol String @db.VarChar
decimals Int
l2_bridged_tokens l2_bridged_tokens[]
address String @id @db.VarChar
bridge_address String @db.VarChar
name String @db.VarChar
symbol String @db.VarChar
decimals Int
l2_bridged_tokens l2_bridged_tokens[]
}
model l2_bridged_tokens {
address String @id @db.VarChar
bridge_address String @db.VarChar
l1_token_address String? @db.VarChar
name String @db.VarChar
symbol String @db.VarChar
decimals Int
l1_bridged_tokens l1_bridged_tokens? @relation(fields: [l1_token_address], references: [address], onDelete: NoAction, onUpdate: NoAction)
address String @id @db.VarChar
bridge_address String @db.VarChar
l1_token_address String? @db.VarChar
name String @db.VarChar
symbol String @db.VarChar
decimals Int
l1_bridged_tokens l1_bridged_tokens? @relation(fields: [l1_token_address], references: [address], onDelete: Cascade, onUpdate: NoAction)
}
/// This table contains check constraints and requires additional setup for migrations. Visit https://pris.ly/d/check-constraints for more info.
model l1_block_headers {
hash String @id @db.VarChar
parent_hash String @db.VarChar
number Decimal @db.Decimal
timestamp Int
parent_hash String @unique @db.VarChar
number Decimal @unique @db.Decimal
timestamp Int @unique
rlp_bytes String @db.VarChar
l1_contract_events l1_contract_events[]
}
......@@ -40,7 +39,7 @@ model l1_block_headers {
/// This table contains check constraints and requires additional setup for migrations. Visit https://pris.ly/d/check-constraints for more info.
model l1_bridge_deposits {
transaction_source_hash String @id @db.VarChar
cross_domain_message_hash String? @unique @db.VarChar
cross_domain_message_hash String @unique @db.VarChar
from_address String @db.VarChar
to_address String @db.VarChar
local_token_address String @db.VarChar
......@@ -48,8 +47,8 @@ model l1_bridge_deposits {
amount Decimal @db.Decimal
data String @db.VarChar
timestamp Int
l1_bridge_messages l1_bridge_messages? @relation(fields: [cross_domain_message_hash], references: [message_hash], onDelete: NoAction, onUpdate: NoAction)
l1_transaction_deposits l1_transaction_deposits @relation(fields: [transaction_source_hash], references: [source_hash], onDelete: NoAction, onUpdate: NoAction)
l1_bridge_messages l1_bridge_messages @relation(fields: [cross_domain_message_hash], references: [message_hash], onDelete: Cascade, onUpdate: NoAction)
l1_transaction_deposits l1_transaction_deposits @relation(fields: [transaction_source_hash], references: [source_hash], onDelete: Cascade, onUpdate: NoAction)
}
/// This table contains check constraints and requires additional setup for migrations. Visit https://pris.ly/d/check-constraints for more info.
......@@ -66,36 +65,36 @@ model l1_bridge_messages {
data String @db.VarChar
timestamp Int
l1_bridge_deposits l1_bridge_deposits?
l2_contract_events l2_contract_events? @relation(fields: [relayed_message_event_guid], references: [guid], onDelete: NoAction, onUpdate: NoAction)
l1_contract_events l1_contract_events @relation(fields: [sent_message_event_guid], references: [guid], onDelete: NoAction, onUpdate: NoAction)
l1_transaction_deposits l1_transaction_deposits @relation(fields: [transaction_source_hash], references: [source_hash], onDelete: NoAction, onUpdate: NoAction)
l2_contract_events l2_contract_events? @relation(fields: [relayed_message_event_guid], references: [guid], onDelete: Cascade, onUpdate: NoAction)
l1_contract_events l1_contract_events @relation(fields: [sent_message_event_guid], references: [guid], onDelete: Cascade, onUpdate: NoAction)
l1_transaction_deposits l1_transaction_deposits @relation(fields: [transaction_source_hash], references: [source_hash], onDelete: Cascade, onUpdate: NoAction)
}
/// This table contains check constraints and requires additional setup for migrations. Visit https://pris.ly/d/check-constraints for more info.
model l1_contract_events {
guid String @id @db.VarChar
block_hash String @db.VarChar
contract_address String @db.VarChar
transaction_hash String @db.VarChar
guid String @id @db.VarChar
block_hash String @db.VarChar
contract_address String @db.VarChar
transaction_hash String @db.VarChar
log_index Int
event_signature String @db.VarChar
event_signature String @db.VarChar
timestamp Int
rlp_bytes String @db.VarChar
rlp_bytes String @db.VarChar
l1_bridge_messages l1_bridge_messages?
l1_block_headers l1_block_headers @relation(fields: [block_hash], references: [hash], onDelete: NoAction, onUpdate: NoAction)
l1_transaction_deposits l1_transaction_deposits[]
l1_block_headers l1_block_headers @relation(fields: [block_hash], references: [hash], onDelete: Cascade, onUpdate: NoAction)
l1_transaction_deposits l1_transaction_deposits?
l2_bridge_messages l2_bridge_messages?
l2_transaction_withdrawals_l2_transaction_withdrawals_finalized_l1_event_guidTol1_contract_events l2_transaction_withdrawals[] @relation("l2_transaction_withdrawals_finalized_l1_event_guidTol1_contract_events")
l2_transaction_withdrawals_l2_transaction_withdrawals_proven_l1_event_guidTol1_contract_events l2_transaction_withdrawals[] @relation("l2_transaction_withdrawals_proven_l1_event_guidTol1_contract_events")
legacy_state_batches legacy_state_batches[]
output_proposals output_proposals[]
l2_transaction_withdrawals_l2_transaction_withdrawals_finalized_l1_event_guidTol1_contract_events l2_transaction_withdrawals? @relation("l2_transaction_withdrawals_finalized_l1_event_guidTol1_contract_events")
l2_transaction_withdrawals_l2_transaction_withdrawals_proven_l1_event_guidTol1_contract_events l2_transaction_withdrawals? @relation("l2_transaction_withdrawals_proven_l1_event_guidTol1_contract_events")
legacy_state_batches legacy_state_batches?
output_proposals output_proposals?
}
/// This table contains check constraints and requires additional setup for migrations. Visit https://pris.ly/d/check-constraints for more info.
model l1_transaction_deposits {
source_hash String @id @db.VarChar
l2_transaction_hash String @db.VarChar
initiated_l1_event_guid String @db.VarChar
l2_transaction_hash String @unique @db.VarChar
initiated_l1_event_guid String @unique @db.VarChar
from_address String @db.VarChar
to_address String @db.VarChar
amount Decimal @db.Decimal
......@@ -104,15 +103,15 @@ model l1_transaction_deposits {
timestamp Int
l1_bridge_deposits l1_bridge_deposits?
l1_bridge_messages l1_bridge_messages?
l1_contract_events l1_contract_events @relation(fields: [initiated_l1_event_guid], references: [guid], onDelete: NoAction, onUpdate: NoAction)
l1_contract_events l1_contract_events @relation(fields: [initiated_l1_event_guid], references: [guid], onDelete: Cascade, onUpdate: NoAction)
}
/// This table contains check constraints and requires additional setup for migrations. Visit https://pris.ly/d/check-constraints for more info.
model l2_block_headers {
hash String @id @db.VarChar
parent_hash String @db.VarChar
number Decimal @db.Decimal
timestamp Int
parent_hash String @unique @db.VarChar
number Decimal @unique @db.Decimal
timestamp Int @unique
rlp_bytes String @db.VarChar
l2_contract_events l2_contract_events[]
}
......@@ -130,16 +129,16 @@ model l2_bridge_messages {
gas_limit Decimal @db.Decimal
data String @db.VarChar
timestamp Int
l1_contract_events l1_contract_events? @relation(fields: [relayed_message_event_guid], references: [guid], onDelete: NoAction, onUpdate: NoAction)
l2_contract_events l2_contract_events @relation(fields: [sent_message_event_guid], references: [guid], onDelete: NoAction, onUpdate: NoAction)
l2_transaction_withdrawals l2_transaction_withdrawals @relation(fields: [transaction_withdrawal_hash], references: [withdrawal_hash], onDelete: NoAction, onUpdate: NoAction)
l1_contract_events l1_contract_events? @relation(fields: [relayed_message_event_guid], references: [guid], onDelete: Cascade, onUpdate: NoAction)
l2_contract_events l2_contract_events @relation(fields: [sent_message_event_guid], references: [guid], onDelete: Cascade, onUpdate: NoAction)
l2_transaction_withdrawals l2_transaction_withdrawals @relation(fields: [transaction_withdrawal_hash], references: [withdrawal_hash], onDelete: Cascade, onUpdate: NoAction)
l2_bridge_withdrawals l2_bridge_withdrawals?
}
/// This table contains check constraints and requires additional setup for migrations. Visit https://pris.ly/d/check-constraints for more info.
model l2_bridge_withdrawals {
transaction_withdrawal_hash String @id @db.VarChar
cross_domain_message_hash String? @unique @db.VarChar
cross_domain_message_hash String @unique @db.VarChar
from_address String @db.VarChar
to_address String @db.VarChar
local_token_address String @db.VarChar
......@@ -147,34 +146,34 @@ model l2_bridge_withdrawals {
amount Decimal @db.Decimal
data String @db.VarChar
timestamp Int
l2_bridge_messages l2_bridge_messages? @relation(fields: [cross_domain_message_hash], references: [message_hash], onDelete: NoAction, onUpdate: NoAction)
l2_transaction_withdrawals l2_transaction_withdrawals @relation(fields: [transaction_withdrawal_hash], references: [withdrawal_hash], onDelete: NoAction, onUpdate: NoAction)
l2_bridge_messages l2_bridge_messages @relation(fields: [cross_domain_message_hash], references: [message_hash], onDelete: Cascade, onUpdate: NoAction)
l2_transaction_withdrawals l2_transaction_withdrawals @relation(fields: [transaction_withdrawal_hash], references: [withdrawal_hash], onDelete: Cascade, onUpdate: NoAction)
}
/// This table contains check constraints and requires additional setup for migrations. Visit https://pris.ly/d/check-constraints for more info.
model l2_contract_events {
guid String @id @db.VarChar
block_hash String @db.VarChar
contract_address String @db.VarChar
transaction_hash String @db.VarChar
guid String @id @db.VarChar
block_hash String @db.VarChar
contract_address String @db.VarChar
transaction_hash String @db.VarChar
log_index Int
event_signature String @db.VarChar
event_signature String @db.VarChar
timestamp Int
rlp_bytes String @db.VarChar
rlp_bytes String @db.VarChar
l1_bridge_messages l1_bridge_messages?
l2_bridge_messages l2_bridge_messages?
l2_block_headers l2_block_headers @relation(fields: [block_hash], references: [hash], onDelete: NoAction, onUpdate: NoAction)
l2_transaction_withdrawals l2_transaction_withdrawals[]
l2_block_headers l2_block_headers @relation(fields: [block_hash], references: [hash], onDelete: Cascade, onUpdate: NoAction)
l2_transaction_withdrawals l2_transaction_withdrawals?
}
/// This table contains check constraints and requires additional setup for migrations. Visit https://pris.ly/d/check-constraints for more info.
model l2_transaction_withdrawals {
withdrawal_hash String @id @db.VarChar
initiated_l2_event_guid String @db.VarChar
proven_l1_event_guid String? @db.VarChar
finalized_l1_event_guid String? @db.VarChar
nonce Decimal @unique @db.Decimal
initiated_l2_event_guid String @unique @db.VarChar
proven_l1_event_guid String? @unique @db.VarChar
finalized_l1_event_guid String? @unique @db.VarChar
succeeded Boolean?
nonce Decimal? @unique @db.Decimal
from_address String @db.VarChar
to_address String @db.VarChar
amount Decimal @db.Decimal
......@@ -183,24 +182,24 @@ model l2_transaction_withdrawals {
timestamp Int
l2_bridge_messages l2_bridge_messages?
l2_bridge_withdrawals l2_bridge_withdrawals?
l1_contract_events_l2_transaction_withdrawals_finalized_l1_event_guidTol1_contract_events l1_contract_events? @relation("l2_transaction_withdrawals_finalized_l1_event_guidTol1_contract_events", fields: [finalized_l1_event_guid], references: [guid], onDelete: NoAction, onUpdate: NoAction)
l2_contract_events l2_contract_events @relation(fields: [initiated_l2_event_guid], references: [guid], onDelete: NoAction, onUpdate: NoAction)
l1_contract_events_l2_transaction_withdrawals_proven_l1_event_guidTol1_contract_events l1_contract_events? @relation("l2_transaction_withdrawals_proven_l1_event_guidTol1_contract_events", fields: [proven_l1_event_guid], references: [guid], onDelete: NoAction, onUpdate: NoAction)
l1_contract_events_l2_transaction_withdrawals_finalized_l1_event_guidTol1_contract_events l1_contract_events? @relation("l2_transaction_withdrawals_finalized_l1_event_guidTol1_contract_events", fields: [finalized_l1_event_guid], references: [guid], onDelete: Cascade, onUpdate: NoAction)
l2_contract_events l2_contract_events @relation(fields: [initiated_l2_event_guid], references: [guid], onDelete: Cascade, onUpdate: NoAction)
l1_contract_events_l2_transaction_withdrawals_proven_l1_event_guidTol1_contract_events l1_contract_events? @relation("l2_transaction_withdrawals_proven_l1_event_guidTol1_contract_events", fields: [proven_l1_event_guid], references: [guid], onDelete: Cascade, onUpdate: NoAction)
}
model legacy_state_batches {
index Int @id
root String @db.VarChar
size Int
prev_total Int
l1_contract_event_guid String? @db.VarChar
l1_contract_events l1_contract_events? @relation(fields: [l1_contract_event_guid], references: [guid], onDelete: NoAction, onUpdate: NoAction)
index Int @id
root String @unique @db.VarChar
size Int
prev_total Int
state_batch_appended_guid String @unique @db.VarChar
l1_contract_events l1_contract_events @relation(fields: [state_batch_appended_guid], references: [guid], onDelete: Cascade, onUpdate: NoAction)
}
model output_proposals {
output_root String @id @db.VarChar
l2_output_index Decimal @db.Decimal
l2_block_number Decimal @db.Decimal
l1_contract_event_guid String? @db.VarChar
l1_contract_events l1_contract_events? @relation(fields: [l1_contract_event_guid], references: [guid], onDelete: NoAction, onUpdate: NoAction)
output_root String @id @db.VarChar
l2_output_index Decimal @unique @db.Decimal
l2_block_number Decimal @unique @db.Decimal
output_proposed_guid String @unique @db.VarChar
l1_contract_events l1_contract_events @relation(fields: [output_proposed_guid], references: [guid], onDelete: Cascade, onUpdate: NoAction)
}
......@@ -8,6 +8,7 @@ import (
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/geth"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
......@@ -40,14 +41,14 @@ func TestERC20BridgeDeposits(t *testing.T) {
// Deploy WETH9
weth9Address, tx, WETH9, err := bindings.DeployWETH9(opts, l1Client)
require.NoError(t, err)
_, err = waitForTransaction(tx.Hash(), l1Client, 3*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
_, err = geth.WaitForTransaction(tx.Hash(), l1Client, 3*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
require.NoError(t, err, "Waiting for deposit tx on L1")
// Get some WETH
opts.Value = big.NewInt(params.Ether)
tx, err = WETH9.Fallback(opts, []byte{})
require.NoError(t, err)
_, err = waitForTransaction(tx.Hash(), l1Client, 3*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
_, err = geth.WaitForTransaction(tx.Hash(), l1Client, 3*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
require.NoError(t, err)
opts.Value = nil
wethBalance, err := WETH9.BalanceOf(&bind.CallOpts{}, opts.From)
......@@ -61,7 +62,7 @@ func TestERC20BridgeDeposits(t *testing.T) {
require.NoError(t, err)
tx, err = optimismMintableTokenFactory.CreateOptimismMintableERC20(l2Opts, weth9Address, "L2-WETH", "L2-WETH")
require.NoError(t, err)
_, err = waitForTransaction(tx.Hash(), l2Client, 3*time.Duration(cfg.DeployConfig.L2BlockTime)*time.Second)
_, err = geth.WaitForTransaction(tx.Hash(), l2Client, 3*time.Duration(cfg.DeployConfig.L2BlockTime)*time.Second)
require.NoError(t, err)
// Get the deployment event to have access to the L2 WETH9 address
......@@ -76,7 +77,7 @@ func TestERC20BridgeDeposits(t *testing.T) {
// Approve WETH9 with the bridge
tx, err = WETH9.Approve(opts, cfg.L1Deployments.L1StandardBridgeProxy, new(big.Int).SetUint64(math.MaxUint64))
require.NoError(t, err)
_, err = waitForTransaction(tx.Hash(), l1Client, 6*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
_, err = geth.WaitForTransaction(tx.Hash(), l1Client, 6*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
require.NoError(t, err)
// Bridge the WETH9
......@@ -84,7 +85,7 @@ func TestERC20BridgeDeposits(t *testing.T) {
require.NoError(t, err)
tx, err = l1StandardBridge.BridgeERC20(opts, weth9Address, event.LocalToken, big.NewInt(100), 100000, []byte{})
require.NoError(t, err)
depositReceipt, err := waitForTransaction(tx.Hash(), l1Client, 3*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
depositReceipt, err := geth.WaitForTransaction(tx.Hash(), l1Client, 3*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
require.NoError(t, err)
t.Log("Deposit through L1StandardBridge", "gas used", depositReceipt.GasUsed)
......@@ -103,7 +104,7 @@ func TestERC20BridgeDeposits(t *testing.T) {
depositTx, err := derive.UnmarshalDepositLogEvent(&depositEvent.Raw)
require.NoError(t, err)
_, err = waitForTransaction(types.NewTx(depositTx).Hash(), l2Client, 3*time.Duration(cfg.DeployConfig.L2BlockTime)*time.Second)
_, err = geth.WaitForTransaction(types.NewTx(depositTx).Hash(), l2Client, 3*time.Duration(cfg.DeployConfig.L2BlockTime)*time.Second)
require.NoError(t, err)
// Ensure that the deposit went through
......
......@@ -16,6 +16,7 @@ import (
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/cannon"
"github.com/ethereum-optimism/optimism/op-challenger/metrics"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/challenger"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/l2oo"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/transactions"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait"
"github.com/ethereum-optimism/optimism/op-node/rollup"
......@@ -65,7 +66,7 @@ type FactoryHelper struct {
factoryAddr common.Address
factory *bindings.DisputeGameFactory
blockOracle *bindings.BlockOracle
l2oo *bindings.L2OutputOracleCaller
l2ooHelper *l2oo.L2OOHelper
}
func NewFactoryHelper(t *testing.T, ctx context.Context, deployments *genesis.L1Deployments, client *ethclient.Client) *FactoryHelper {
......@@ -81,8 +82,6 @@ func NewFactoryHelper(t *testing.T, ctx context.Context, deployments *genesis.L1
require.NoError(err)
blockOracle, err := bindings.NewBlockOracle(deployments.BlockOracle, client)
require.NoError(err)
l2oo, err := bindings.NewL2OutputOracleCaller(deployments.L2OutputOracleProxy, client)
require.NoError(err, "Error creating l2oo caller")
return &FactoryHelper{
t: t,
......@@ -92,7 +91,7 @@ func NewFactoryHelper(t *testing.T, ctx context.Context, deployments *genesis.L1
factory: factory,
factoryAddr: factoryAddr,
blockOracle: blockOracle,
l2oo: l2oo,
l2ooHelper: l2oo.NewL2OOHelperReadOnly(t, deployments, client),
}
}
......@@ -150,12 +149,8 @@ func (h *FactoryHelper) StartCannonGameWithCorrectRoot(ctx context.Context, roll
challengerOpts = append(challengerOpts, options...)
cfg := challenger.NewChallengerConfig(h.t, l1Endpoint, challengerOpts...)
opts := &bind.CallOpts{Context: ctx}
outputIdx, err := h.l2oo.GetL2OutputIndexAfter(opts, new(big.Int).SetUint64(l2BlockNumber))
h.require.NoError(err, "Fetch challenged output index")
challengedOutput, err := h.l2oo.GetL2Output(opts, outputIdx)
h.require.NoError(err, "Fetch challenged output")
agreedOutput, err := h.l2oo.GetL2Output(opts, new(big.Int).Sub(outputIdx, common.Big1))
h.require.NoError(err, "Fetch agreed output")
challengedOutput := h.l2ooHelper.GetL2OutputAfter(ctx, l2BlockNumber)
agreedOutput := h.l2ooHelper.GetL2OutputBefore(ctx, l2BlockNumber)
l1BlockInfo, err := h.blockOracle.Load(opts, l1Head)
h.require.NoError(err, "Fetch L1 block info")
......@@ -246,26 +241,8 @@ func (h *FactoryHelper) prepareCannonGame(ctx context.Context) (uint64, *big.Int
func (h *FactoryHelper) waitForProposals(ctx context.Context) uint64 {
ctx, cancel := context.WithTimeout(ctx, 2*time.Minute)
defer cancel()
opts := &bind.CallOpts{Context: ctx}
latestOutputIndex, err := wait.AndGet(
ctx,
time.Second,
func() (*big.Int, error) {
index, err := h.l2oo.LatestOutputIndex(opts)
if err != nil {
h.t.Logf("Could not get latest output index: %v", err.Error())
return nil, nil
}
h.t.Logf("Latest output index: %v", index)
return index, nil
},
func(index *big.Int) bool {
return index != nil && index.Cmp(big.NewInt(1)) >= 0
})
h.require.NoError(err, "Did not get two output roots")
output, err := h.l2oo.GetL2Output(opts, latestOutputIndex)
h.require.NoErrorf(err, "Could not get latst output root index: %v", latestOutputIndex)
return output.L2BlockNumber.Uint64()
latestOutputIdx := h.l2ooHelper.WaitForProposals(ctx, 2)
return h.l2ooHelper.GetL2Output(ctx, latestOutputIdx).L2BlockNumber.Uint64()
}
// checkpointL1Block stores the current L1 block in the oracle
......
package op_e2e
package geth
import (
"time"
......
package op_e2e
package geth
import (
"context"
"crypto/ecdsa"
"errors"
"fmt"
"math/big"
"time"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-service/clock"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts/keystore"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/eth/catalyst"
"github.com/ethereum/go-ethereum/eth/ethconfig"
"github.com/ethereum/go-ethereum/eth/tracers"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/miner"
"github.com/ethereum/go-ethereum/node"
......@@ -30,101 +21,9 @@ import (
_ "github.com/ethereum/go-ethereum/eth/tracers/native"
)
var (
// errTimeout represents a timeout
errTimeout = errors.New("timeout")
)
func waitForL1OriginOnL2(l1BlockNum uint64, client *ethclient.Client, timeout time.Duration) (*types.Block, error) {
timeoutCh := time.After(timeout)
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
headChan := make(chan *types.Header, 100)
headSub, err := client.SubscribeNewHead(ctx, headChan)
if err != nil {
return nil, err
}
defer headSub.Unsubscribe()
for {
select {
case head := <-headChan:
block, err := client.BlockByNumber(ctx, head.Number)
if err != nil {
return nil, err
}
l1Info, err := derive.L1InfoDepositTxData(block.Transactions()[0].Data())
if err != nil {
return nil, err
}
if l1Info.Number >= l1BlockNum {
return block, nil
}
case err := <-headSub.Err():
return nil, fmt.Errorf("error in head subscription: %w", err)
case <-timeoutCh:
return nil, errTimeout
}
}
}
func waitForTransaction(hash common.Hash, client *ethclient.Client, timeout time.Duration) (*types.Receipt, error) {
timeoutCh := time.After(timeout)
ticker := time.NewTicker(100 * time.Millisecond)
defer ticker.Stop()
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
for {
receipt, err := client.TransactionReceipt(ctx, hash)
if receipt != nil && err == nil {
return receipt, nil
} else if err != nil && !errors.Is(err, ethereum.NotFound) {
return nil, err
}
select {
case <-timeoutCh:
tip, err := client.BlockByNumber(context.Background(), nil)
if err != nil {
return nil, err
}
return nil, fmt.Errorf("receipt for transaction %s not found. tip block number is %d: %w", hash.Hex(), tip.NumberU64(), errTimeout)
case <-ticker.C:
}
}
}
func waitForBlock(number *big.Int, client *ethclient.Client, timeout time.Duration) (*types.Block, error) {
timeoutCh := time.After(timeout)
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
headChan := make(chan *types.Header, 100)
headSub, err := client.SubscribeNewHead(ctx, headChan)
if err != nil {
return nil, err
}
defer headSub.Unsubscribe()
for {
select {
case head := <-headChan:
if head.Number.Cmp(number) >= 0 {
return client.BlockByNumber(ctx, number)
}
case err := <-headSub.Err():
return nil, fmt.Errorf("error in head subscription: %w", err)
case <-timeoutCh:
return nil, errTimeout
}
}
}
func initL1Geth(cfg *SystemConfig, genesis *core.Genesis, c clock.Clock, opts ...GethOption) (*node.Node, *eth.Ethereum, error) {
func InitL1(chainID uint64, blockTime uint64, genesis *core.Genesis, c clock.Clock, opts ...GethOption) (*node.Node, *eth.Ethereum, error) {
ethConfig := &ethconfig.Config{
NetworkId: cfg.DeployConfig.L1ChainID,
NetworkId: chainID,
Genesis: genesis,
}
nodeConfig := &node.Config{
......@@ -137,7 +36,7 @@ func initL1Geth(cfg *SystemConfig, genesis *core.Genesis, c clock.Clock, opts ..
HTTPModules: []string{"debug", "admin", "eth", "txpool", "net", "rpc", "web3", "personal", "engine"},
}
l1Node, l1Eth, err := createGethNode(false, nodeConfig, ethConfig, []*ecdsa.PrivateKey{cfg.Secrets.CliqueSigner}, opts...)
l1Node, l1Eth, err := createGethNode(false, nodeConfig, ethConfig, opts...)
if err != nil {
return nil, nil, err
}
......@@ -149,7 +48,7 @@ func initL1Geth(cfg *SystemConfig, genesis *core.Genesis, c clock.Clock, opts ..
clock: c,
eth: l1Eth,
log: log.Root(), // geth logger is global anyway. Would be nice to replace with a local logger though.
blockTime: cfg.DeployConfig.L1BlockTime,
blockTime: blockTime,
// for testing purposes we make it really fast, otherwise we don't see it finalize in short tests
finalizedDistance: 8,
safeDistance: 4,
......@@ -176,8 +75,8 @@ func defaultNodeConfig(name string, jwtPath string) *node.Config {
type GethOption func(ethCfg *ethconfig.Config, nodeCfg *node.Config) error
// init a geth node.
func initL2Geth(name string, l2ChainID *big.Int, genesis *core.Genesis, jwtPath string, opts ...GethOption) (*node.Node, *eth.Ethereum, error) {
// InitL2 inits a L2 geth node.
func InitL2(name string, l2ChainID *big.Int, genesis *core.Genesis, jwtPath string, opts ...GethOption) (*node.Node, *eth.Ethereum, error) {
ethConfig := &ethconfig.Config{
NetworkId: l2ChainID.Uint64(),
Genesis: genesis,
......@@ -192,14 +91,14 @@ func initL2Geth(name string, l2ChainID *big.Int, genesis *core.Genesis, jwtPath
},
}
nodeConfig := defaultNodeConfig(fmt.Sprintf("l2-geth-%v", name), jwtPath)
return createGethNode(true, nodeConfig, ethConfig, nil, opts...)
return createGethNode(true, nodeConfig, ethConfig, opts...)
}
// createGethNode creates an in-memory geth node based on the configuration.
// The private keys are added to the keystore and are unlocked.
// If the node is l2, catalyst is enabled.
// The node should be started and then closed when done.
func createGethNode(l2 bool, nodeCfg *node.Config, ethCfg *ethconfig.Config, privateKeys []*ecdsa.PrivateKey, opts ...GethOption) (*node.Node, *eth.Ethereum, error) {
func createGethNode(l2 bool, nodeCfg *node.Config, ethCfg *ethconfig.Config, opts ...GethOption) (*node.Node, *eth.Ethereum, error) {
for i, opt := range opts {
if err := opt(ethCfg, nodeCfg); err != nil {
return nil, nil, fmt.Errorf("failed to apply geth option %d: %w", i, err)
......@@ -212,28 +111,6 @@ func createGethNode(l2 bool, nodeCfg *node.Config, ethCfg *ethconfig.Config, pri
return nil, nil, err
}
if !l2 {
keydir := n.KeyStoreDir()
scryptN := 2
scryptP := 1
n.AccountManager().AddBackend(keystore.NewKeyStore(keydir, scryptN, scryptP))
ks := n.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore)
password := "foobar"
for _, pk := range privateKeys {
act, err := ks.ImportECDSA(pk, password)
if err != nil {
n.Close()
return nil, nil, err
}
err = ks.Unlock(act, password)
if err != nil {
n.Close()
return nil, nil, err
}
}
}
backend, err := eth.New(n, ethCfg)
if err != nil {
n.Close()
......
package geth
import (
"context"
"errors"
"fmt"
"math/big"
"time"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethclient"
)
var (
// errTimeout represents a timeout
errTimeout = errors.New("timeout")
)
func WaitForL1OriginOnL2(l1BlockNum uint64, client *ethclient.Client, timeout time.Duration) (*types.Block, error) {
timeoutCh := time.After(timeout)
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
headChan := make(chan *types.Header, 100)
headSub, err := client.SubscribeNewHead(ctx, headChan)
if err != nil {
return nil, err
}
defer headSub.Unsubscribe()
for {
select {
case head := <-headChan:
block, err := client.BlockByNumber(ctx, head.Number)
if err != nil {
return nil, err
}
l1Info, err := derive.L1InfoDepositTxData(block.Transactions()[0].Data())
if err != nil {
return nil, err
}
if l1Info.Number >= l1BlockNum {
return block, nil
}
case err := <-headSub.Err():
return nil, fmt.Errorf("error in head subscription: %w", err)
case <-timeoutCh:
return nil, errTimeout
}
}
}
func WaitForTransaction(hash common.Hash, client *ethclient.Client, timeout time.Duration) (*types.Receipt, error) {
timeoutCh := time.After(timeout)
ticker := time.NewTicker(100 * time.Millisecond)
defer ticker.Stop()
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
for {
receipt, err := client.TransactionReceipt(ctx, hash)
if receipt != nil && err == nil {
return receipt, nil
} else if err != nil && !errors.Is(err, ethereum.NotFound) {
return nil, err
}
select {
case <-timeoutCh:
tip, err := client.BlockByNumber(context.Background(), nil)
if err != nil {
return nil, err
}
return nil, fmt.Errorf("receipt for transaction %s not found. tip block number is %d: %w", hash.Hex(), tip.NumberU64(), errTimeout)
case <-ticker.C:
}
}
}
func WaitForBlock(number *big.Int, client *ethclient.Client, timeout time.Duration) (*types.Block, error) {
timeoutCh := time.After(timeout)
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
headChan := make(chan *types.Header, 100)
headSub, err := client.SubscribeNewHead(ctx, headChan)
if err != nil {
return nil, err
}
defer headSub.Unsubscribe()
for {
select {
case head := <-headChan:
if head.Number.Cmp(number) >= 0 {
return client.BlockByNumber(ctx, number)
}
case err := <-headSub.Err():
return nil, fmt.Errorf("error in head subscription: %w", err)
case <-timeoutCh:
return nil, errTimeout
}
}
}
package l2oo
import (
"context"
"crypto/ecdsa"
"math/big"
"testing"
"time"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-chain-ops/genesis"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/stretchr/testify/require"
)
type L2OOHelper struct {
t *testing.T
require *require.Assertions
client *ethclient.Client
l2oo *bindings.L2OutputOracle
// Nil when read-only
transactOpts *bind.TransactOpts
rollupCfg *rollup.Config
}
func NewL2OOHelperReadOnly(t *testing.T, deployments *genesis.L1Deployments, client *ethclient.Client) *L2OOHelper {
require := require.New(t)
l2oo, err := bindings.NewL2OutputOracle(deployments.L2OutputOracleProxy, client)
require.NoError(err, "Error creating l2oo bindings")
return &L2OOHelper{
t: t,
require: require,
client: client,
l2oo: l2oo,
}
}
func NewL2OOHelper(t *testing.T, deployments *genesis.L1Deployments, client *ethclient.Client, proposerKey *ecdsa.PrivateKey, rollupCfg *rollup.Config) *L2OOHelper {
h := NewL2OOHelperReadOnly(t, deployments, client)
chainID, err := client.ChainID(context.Background())
h.require.NoError(err, "Failed to get chain ID")
transactOpts, err := bind.NewKeyedTransactorWithChainID(proposerKey, chainID)
h.require.NoError(err)
h.transactOpts = transactOpts
h.rollupCfg = rollupCfg
return h
}
// WaitForProposals waits until there are at least the specified number of proposals in the output oracle
// Returns the index of the latest output proposal
func (h *L2OOHelper) WaitForProposals(ctx context.Context, req int64) uint64 {
ctx, cancel := context.WithTimeout(ctx, 2*time.Minute)
defer cancel()
opts := &bind.CallOpts{Context: ctx}
latestOutputIndex, err := wait.AndGet(
ctx,
time.Second,
func() (*big.Int, error) {
index, err := h.l2oo.LatestOutputIndex(opts)
if err != nil {
h.t.Logf("Could not get latest output index: %v", err.Error())
return nil, nil
}
h.t.Logf("Latest output index: %v", index)
return index, nil
},
func(index *big.Int) bool {
return index != nil && index.Cmp(big.NewInt(req-1)) >= 0
})
h.require.NoErrorf(err, "Did not get %v output roots", req)
return latestOutputIndex.Uint64()
}
func (h *L2OOHelper) GetL2Output(ctx context.Context, idx uint64) bindings.TypesOutputProposal {
output, err := h.l2oo.GetL2Output(&bind.CallOpts{Context: ctx}, new(big.Int).SetUint64(idx))
h.require.NoErrorf(err, "Failed to get output root at index: %v", idx)
return output
}
func (h *L2OOHelper) GetL2OutputAfter(ctx context.Context, l2BlockNum uint64) bindings.TypesOutputProposal {
opts := &bind.CallOpts{Context: ctx}
outputIdx, err := h.l2oo.GetL2OutputIndexAfter(opts, new(big.Int).SetUint64(l2BlockNum))
h.require.NoError(err, "Fetch challenged output index")
output, err := h.l2oo.GetL2Output(opts, outputIdx)
h.require.NoError(err, "Fetch challenged output")
return output
}
func (h *L2OOHelper) GetL2OutputBefore(ctx context.Context, l2BlockNum uint64) bindings.TypesOutputProposal {
opts := &bind.CallOpts{Context: ctx}
latestBlockNum, err := h.l2oo.LatestBlockNumber(opts)
h.require.NoError(err, "Failed to get latest output root block number")
var outputIdx *big.Int
if latestBlockNum.Uint64() < l2BlockNum {
outputIdx, err = h.l2oo.LatestOutputIndex(opts)
h.require.NoError(err, "Failed to get latest output index")
} else {
outputIdx, err = h.l2oo.GetL2OutputIndexAfter(opts, new(big.Int).SetUint64(l2BlockNum))
h.require.NoErrorf(err, "Failed to get output index after block %v", l2BlockNum)
h.require.NotZerof(outputIdx.Uint64(), "No l2 output before block %v", l2BlockNum)
outputIdx = new(big.Int).Sub(outputIdx, common.Big1)
}
return h.GetL2Output(ctx, outputIdx.Uint64())
}
func (h *L2OOHelper) PublishNextOutput(ctx context.Context, outputRoot common.Hash) {
h.require.NotNil(h.transactOpts, "Can't publish outputs from a read only L2OOHelper")
nextBlockNum, err := h.l2oo.NextBlockNumber(&bind.CallOpts{Context: ctx})
h.require.NoError(err, "Should get next block number")
genesis := h.rollupCfg.Genesis
targetTimestamp := genesis.L2Time + ((nextBlockNum.Uint64() - genesis.L2.Number) * h.rollupCfg.BlockTime)
timedCtx, cancel := context.WithTimeout(ctx, 30*time.Second)
defer cancel()
h.require.NoErrorf(
wait.ForBlockWithTimestamp(timedCtx, h.client, targetTimestamp),
"Wait for L1 block with timestamp >= %v", targetTimestamp)
tx, err := h.l2oo.ProposeL2Output(h.transactOpts, outputRoot, nextBlockNum, [32]byte{}, common.Big0)
h.require.NoErrorf(err, "Failed to propose output root for l2 block number %v", nextBlockNum)
_, err = wait.ForReceiptOK(ctx, h.client, tx.Hash())
h.require.NoErrorf(err, "Proposal for l2 block %v failed", nextBlockNum)
}
......@@ -85,6 +85,19 @@ func ForBlock(ctx context.Context, client *ethclient.Client, n uint64) error {
return nil
}
func ForBlockWithTimestamp(ctx context.Context, client *ethclient.Client, target uint64) error {
_, err := AndGet(ctx, time.Second, func() (uint64, error) {
head, err := client.BlockByNumber(ctx, nil)
if err != nil {
return 0, err
}
return head.Time(), nil
}, func(actual uint64) bool {
return actual >= target
})
return err
}
func ForNextBlock(ctx context.Context, client *ethclient.Client) error {
current, err := client.BlockNumber(ctx)
if err != nil {
......
......@@ -6,6 +6,7 @@ import (
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/challenger"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/disputegame"
l2oo2 "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/l2oo"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient"
......@@ -355,6 +356,83 @@ func TestCannonDefendStep(t *testing.T) {
game.LogGameData(ctx)
}
func TestCannonProposedOutputRootInvalid(t *testing.T) {
InitParallel(t)
ctx := context.Background()
sys, l1Client, game, correctTrace := setupDisputeGameForInvalidOutputRoot(t, common.Hash{0xab})
t.Cleanup(sys.Close)
maxDepth := game.MaxDepth(ctx)
// Now maliciously play the game and it should be impossible to win
for claimCount := int64(1); claimCount < maxDepth; {
// Attack everything but oddly using the correct hash.
correctTrace.Attack(ctx, claimCount-1)
claimCount++
game.LogGameData(ctx)
game.WaitForClaimCount(ctx, claimCount)
game.LogGameData(ctx)
// Wait for the challenger to counter
claimCount++
game.WaitForClaimCount(ctx, claimCount)
}
game.LogGameData(ctx)
// Wait for the challenger to call step and counter our invalid claim
game.WaitForClaimAtMaxDepth(ctx, false)
// It's on us to call step if we want to win but shouldn't be possible
// Need to add support for this to the helper
// Time travel past when the game will be resolvable.
sys.TimeTravelClock.AdvanceTime(game.GameDuration(ctx))
require.NoError(t, wait.ForNextBlock(ctx, l1Client))
game.WaitForGameStatus(ctx, disputegame.StatusDefenderWins)
game.LogGameData(ctx)
}
// setupDisputeGameForInvalidOutputRoot sets up an L2 chain with at least one valid output root followed by an invalid output root.
// A cannon dispute game is started to dispute the invalid output root with the correct root claim provided.
// An honest challenger is run to defend the root claim (ie disagree with the invalid output root).
func setupDisputeGameForInvalidOutputRoot(t *testing.T, outputRoot common.Hash) (*System, *ethclient.Client, *disputegame.CannonGameHelper, *disputegame.HonestHelper) {
ctx := context.Background()
sys, l1Client := startFaultDisputeSystem(t)
l2oo := l2oo2.NewL2OOHelper(t, sys.cfg.L1Deployments, l1Client, sys.cfg.Secrets.Proposer, sys.RollupConfig)
// Wait for one valid output root to be submitted
l2oo.WaitForProposals(ctx, 1)
// Stop the honest output submitter so we can publish invalid outputs
sys.L2OutputSubmitter.Stop()
sys.L2OutputSubmitter = nil
// Submit an invalid output rooot
l2oo.PublishNextOutput(ctx, outputRoot)
l1Endpoint := sys.NodeEndpoint("l1")
l2Endpoint := sys.NodeEndpoint("sequencer")
// Dispute the new output root by creating a new game with the correct cannon trace.
disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys.cfg.L1Deployments, l1Client)
game, correctTrace := disputeGameFactory.StartCannonGameWithCorrectRoot(ctx, sys.RollupConfig, sys.L2GenesisCfg, l1Endpoint, l2Endpoint,
challenger.WithPrivKey(sys.cfg.Secrets.Mallory),
)
require.NotNil(t, game)
// Start the honest challenger
game.StartChallenger(ctx, sys.RollupConfig, sys.L2GenesisCfg, l1Endpoint, l2Endpoint, "Defender",
// Disagree with the proposed output, so agree with the (correct) root claim
challenger.WithAgreeProposedOutput(false),
challenger.WithPrivKey(sys.cfg.Secrets.Mallory),
)
return sys, l1Client, game, correctTrace
}
func TestCannonChallengeWithCorrectRoot(t *testing.T) {
t.Skip("Not currently handling this case as the correct approach will change when output root bisection is added")
InitParallel(t)
......
......@@ -11,7 +11,7 @@ import (
func TestTxGossip(t *testing.T) {
InitParallel(t)
cfg := DefaultSystemConfig(t)
gethOpts := []GethOption{
gethOpts := []geth.GethOption{
geth.WithP2P(),
}
cfg.GethOptions["sequencer"] = gethOpts
......
......@@ -11,6 +11,7 @@ import (
"github.com/ethereum-optimism/optimism/op-chain-ops/genesis"
"github.com/ethereum-optimism/optimism/op-e2e/config"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/geth"
"github.com/ethereum-optimism/optimism/op-node/client"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
......@@ -72,7 +73,7 @@ func NewOpGeth(t *testing.T, ctx context.Context, cfg *SystemConfig) (*OpGeth, e
SystemConfig: e2eutils.SystemConfigFromDeployConfig(cfg.DeployConfig),
}
node, _, err := initL2Geth("l2", big.NewInt(int64(cfg.DeployConfig.L2ChainID)), l2Genesis, cfg.JWTFilePath)
node, _, err := geth.InitL2("l2", big.NewInt(int64(cfg.DeployConfig.L2ChainID)), l2Genesis, cfg.JWTFilePath)
require.Nil(t, err)
require.Nil(t, node.Start())
......
......@@ -14,6 +14,7 @@ import (
"testing"
"time"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/geth"
"github.com/ethereum-optimism/optimism/op-node/p2p/store"
"github.com/ethereum-optimism/optimism/op-service/clock"
ds "github.com/ipfs/go-datastore"
......@@ -138,7 +139,7 @@ func DefaultSystemConfig(t *testing.T) SystemConfig {
"batcher": testlog.Logger(t, log.LvlInfo).New("role", "batcher"),
"proposer": testlog.Logger(t, log.LvlCrit).New("role", "proposer"),
},
GethOptions: map[string][]GethOption{},
GethOptions: map[string][]geth.GethOption{},
P2PTopology: nil, // no P2P connectivity by default
NonFinalizedProposals: false,
ExternalL2Shim: config.ExternalL2Shim,
......@@ -173,7 +174,7 @@ type SystemConfig struct {
Premine map[common.Address]*big.Int
Nodes map[string]*rollupNode.Config // Per node config. Don't use populate rollup.Config
Loggers map[string]log.Logger
GethOptions map[string][]GethOption
GethOptions map[string][]geth.GethOption
ProposerLogger log.Logger
BatcherLogger log.Logger
......@@ -424,7 +425,7 @@ func (cfg SystemConfig) Start(t *testing.T, _opts ...SystemConfigOption) (*Syste
sys.RollupConfig = &defaultConfig
// Initialize nodes
l1Node, l1Backend, err := initL1Geth(&cfg, l1Genesis, c, cfg.GethOptions["l1"]...)
l1Node, l1Backend, err := geth.InitL1(cfg.DeployConfig.L1ChainID, cfg.DeployConfig.L1BlockTime, l1Genesis, c, cfg.GethOptions["l1"]...)
if err != nil {
return nil, err
}
......@@ -441,7 +442,7 @@ func (cfg SystemConfig) Start(t *testing.T, _opts ...SystemConfigOption) (*Syste
for name := range cfg.Nodes {
var ethClient EthInstance
if cfg.ExternalL2Shim == "" {
node, backend, err := initL2Geth(name, big.NewInt(int64(cfg.DeployConfig.L2ChainID)), l2Genesis, cfg.JWTFilePath, cfg.GethOptions[name]...)
node, backend, err := geth.InitL2(name, big.NewInt(int64(cfg.DeployConfig.L2ChainID)), l2Genesis, cfg.JWTFilePath, cfg.GethOptions[name]...)
if err != nil {
return nil, err
}
......@@ -505,7 +506,7 @@ func (cfg SystemConfig) Start(t *testing.T, _opts ...SystemConfigOption) (*Syste
sys.Clients[name] = client
}
_, err = waitForBlock(big.NewInt(2), l1Client, 6*time.Second*time.Duration(cfg.DeployConfig.L1BlockTime))
_, err = geth.WaitForBlock(big.NewInt(2), l1Client, 6*time.Second*time.Duration(cfg.DeployConfig.L1BlockTime))
if err != nil {
return nil, fmt.Errorf("waiting for blocks: %w", err)
}
......
......@@ -6,6 +6,7 @@ import (
"testing"
"time"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/geth"
"github.com/ethereum-optimism/optimism/op-node/client"
"github.com/ethereum-optimism/optimism/op-node/sources"
"github.com/ethereum-optimism/optimism/op-node/testlog"
......@@ -99,7 +100,7 @@ func testVerifyL2OutputRootEmptyBlock(t *testing.T, detached bool) {
t.Log("Wait for sequencer to catch up with last submitted batch")
l1HeadNum, err := l1Client.BlockNumber(ctx)
require.NoError(t, err)
_, err = waitForL1OriginOnL2(l1HeadNum, l2Seq, 30*time.Second)
_, err = geth.WaitForL1OriginOnL2(l1HeadNum, l2Seq, 30*time.Second)
require.NoError(t, err)
// Get the current safe head now that the batcher is stopped
......
......@@ -9,6 +9,7 @@ import (
"testing"
"time"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/geth"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
......@@ -87,7 +88,7 @@ func TestL2OutputSubmitter(t *testing.T) {
// for that block and subsequently reorgs to match what the verifier derives when running the
// reconcillation process.
l2Verif := sys.Clients["verifier"]
_, err = waitForBlock(big.NewInt(6), l2Verif, 10*time.Duration(cfg.DeployConfig.L2BlockTime)*time.Second)
_, err = geth.WaitForBlock(big.NewInt(6), l2Verif, 10*time.Duration(cfg.DeployConfig.L2BlockTime)*time.Second)
require.Nil(t, err)
// Wait for batch submitter to update L2 output oracle.
......@@ -259,14 +260,14 @@ func TestPendingGasLimit(t *testing.T) {
// configure the L2 gas limit to be high, and the pending gas limits to be lower for resource saving.
cfg.DeployConfig.L2GenesisBlockGasLimit = 30_000_000
cfg.GethOptions["sequencer"] = []GethOption{
cfg.GethOptions["sequencer"] = []geth.GethOption{
func(ethCfg *ethconfig.Config, nodeCfg *node.Config) error {
ethCfg.Miner.GasCeil = 10_000_000
ethCfg.Miner.RollupComputePendingBlock = true
return nil
},
}
cfg.GethOptions["verifier"] = []GethOption{
cfg.GethOptions["verifier"] = []geth.GethOption{
func(ethCfg *ethconfig.Config, nodeCfg *node.Config) error {
ethCfg.Miner.GasCeil = 9_000_000
ethCfg.Miner.RollupComputePendingBlock = true
......@@ -369,7 +370,7 @@ func TestMissingBatchE2E(t *testing.T) {
})
// Wait until the block it was first included in shows up in the safe chain on the verifier
_, err = waitForBlock(receipt.BlockNumber, l2Verif, time.Duration((sys.RollupConfig.SeqWindowSize+4)*cfg.DeployConfig.L1BlockTime)*time.Second)
_, err = geth.WaitForBlock(receipt.BlockNumber, l2Verif, time.Duration((sys.RollupConfig.SeqWindowSize+4)*cfg.DeployConfig.L1BlockTime)*time.Second)
require.Nil(t, err, "Waiting for block on verifier")
// Assert that the transaction is not found on the verifier
......@@ -700,7 +701,7 @@ func TestSystemP2PAltSync(t *testing.T) {
},
}
configureL1(syncNodeCfg, sys.EthInstances["l1"])
syncerL2Engine, _, err := initL2Geth("syncer", big.NewInt(int64(cfg.DeployConfig.L2ChainID)), sys.L2GenesisCfg, cfg.JWTFilePath)
syncerL2Engine, _, err := geth.InitL2("syncer", big.NewInt(int64(cfg.DeployConfig.L2ChainID)), sys.L2GenesisCfg, cfg.JWTFilePath)
require.NoError(t, err)
require.NoError(t, syncerL2Engine.Start())
......@@ -722,7 +723,7 @@ func TestSystemP2PAltSync(t *testing.T) {
l2Verif := ethclient.NewClient(rpc)
// It may take a while to sync, but eventually we should see the sequenced data show up
receiptVerif, err := waitForTransaction(receiptSeq.TxHash, l2Verif, 100*time.Duration(sys.RollupConfig.BlockTime)*time.Second)
receiptVerif, err := geth.WaitForTransaction(receiptSeq.TxHash, l2Verif, 100*time.Duration(sys.RollupConfig.BlockTime)*time.Second)
require.Nil(t, err, "Waiting for L2 tx on verifier")
require.Equal(t, receiptSeq, receiptVerif)
......@@ -852,9 +853,9 @@ func TestL1InfoContract(t *testing.T) {
endVerifBlockNumber := big.NewInt(4)
endSeqBlockNumber := big.NewInt(6)
endVerifBlock, err := waitForBlock(endVerifBlockNumber, l2Verif, time.Minute)
endVerifBlock, err := geth.WaitForBlock(endVerifBlockNumber, l2Verif, time.Minute)
require.Nil(t, err)
endSeqBlock, err := waitForBlock(endSeqBlockNumber, l2Seq, time.Minute)
endSeqBlock, err := geth.WaitForBlock(endSeqBlockNumber, l2Seq, time.Minute)
require.Nil(t, err)
seqL1Info, err := bindings.NewL1Block(cfg.L1InfoPredeployAddress, l2Seq)
......@@ -1251,7 +1252,7 @@ func TestStopStartBatcher(t *testing.T) {
// wait until the block the tx was first included in shows up in the safe chain on the verifier
safeBlockInclusionDuration := time.Duration(6*cfg.DeployConfig.L1BlockTime) * time.Second
_, err = waitForBlock(receipt.BlockNumber, l2Verif, safeBlockInclusionDuration)
_, err = geth.WaitForBlock(receipt.BlockNumber, l2Verif, safeBlockInclusionDuration)
require.Nil(t, err, "Waiting for block on verifier")
// ensure the safe chain advances
......@@ -1288,7 +1289,7 @@ func TestStopStartBatcher(t *testing.T) {
receipt = sendTx()
// wait until the block the tx was first included in shows up in the safe chain on the verifier
_, err = waitForBlock(receipt.BlockNumber, l2Verif, safeBlockInclusionDuration)
_, err = geth.WaitForBlock(receipt.BlockNumber, l2Verif, safeBlockInclusionDuration)
require.Nil(t, err, "Waiting for block on verifier")
// ensure that the safe chain advances after restarting the batcher
......@@ -1310,7 +1311,7 @@ func TestBatcherMultiTx(t *testing.T) {
l1Client := sys.Clients["l1"]
l2Seq := sys.Clients["sequencer"]
_, err = waitForBlock(big.NewInt(10), l2Seq, time.Duration(cfg.DeployConfig.L2BlockTime*15)*time.Second)
_, err = geth.WaitForBlock(big.NewInt(10), l2Seq, time.Duration(cfg.DeployConfig.L2BlockTime*15)*time.Second)
require.Nil(t, err, "Waiting for L2 blocks")
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
......@@ -1327,7 +1328,7 @@ func TestBatcherMultiTx(t *testing.T) {
// possible additional L1 blocks will be created before the batcher starts,
// so we wait additional blocks.
for i := int64(0); i < 10; i++ {
block, err := waitForBlock(big.NewInt(int64(l1Number)+i), l1Client, time.Duration(cfg.DeployConfig.L1BlockTime*5)*time.Second)
block, err := geth.WaitForBlock(big.NewInt(int64(l1Number)+i), l1Client, time.Duration(cfg.DeployConfig.L1BlockTime*5)*time.Second)
require.Nil(t, err, "Waiting for l1 blocks")
totalTxCount += len(block.Transactions())
......
......@@ -13,6 +13,7 @@ import (
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/geth"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait"
"github.com/ethereum-optimism/optimism/op-node/testutils/fuzzerutils"
"github.com/ethereum-optimism/optimism/op-node/withdrawals"
......@@ -70,11 +71,11 @@ func TestGasPriceOracleFeeUpdates(t *testing.T) {
cancel()
require.Nil(t, err, "sending overhead update tx")
receipt, err := waitForTransaction(tx.Hash(), l1Client, txTimeoutDuration)
receipt, err := geth.WaitForTransaction(tx.Hash(), l1Client, txTimeoutDuration)
require.Nil(t, err, "waiting for sysconfig set gas config update tx")
require.Equal(t, receipt.Status, types.ReceiptStatusSuccessful, "transaction failed")
_, err = waitForL1OriginOnL2(receipt.BlockNumber.Uint64(), l2Seq, txTimeoutDuration)
_, err = geth.WaitForL1OriginOnL2(receipt.BlockNumber.Uint64(), l2Seq, txTimeoutDuration)
require.NoError(t, err, "waiting for L2 block to include the sysconfig update")
gpoOverhead, err := gpoContract.Overhead(&bind.CallOpts{})
......@@ -97,11 +98,11 @@ func TestGasPriceOracleFeeUpdates(t *testing.T) {
cancel()
require.Nil(t, err, "sending overhead update tx")
receipt, err = waitForTransaction(tx.Hash(), l1Client, txTimeoutDuration)
receipt, err = geth.WaitForTransaction(tx.Hash(), l1Client, txTimeoutDuration)
require.Nil(t, err, "waiting for sysconfig set gas config update tx")
require.Equal(t, receipt.Status, types.ReceiptStatusSuccessful, "transaction failed")
_, err = waitForL1OriginOnL2(receipt.BlockNumber.Uint64(), l2Seq, txTimeoutDuration)
_, err = geth.WaitForL1OriginOnL2(receipt.BlockNumber.Uint64(), l2Seq, txTimeoutDuration)
require.NoError(t, err, "waiting for L2 block to include the sysconfig update")
gpoOverhead, err = gpoContract.Overhead(&bind.CallOpts{})
......@@ -504,7 +505,7 @@ func TestMixedWithdrawalValidity(t *testing.T) {
require.Nil(t, err, "sending initiate withdraw tx")
t.Logf("Waiting for tx %s to be in sequencer", tx.Hash().Hex())
receiptSeq, err := waitForTransaction(tx.Hash(), l2Seq, txTimeoutDuration)
receiptSeq, err := geth.WaitForTransaction(tx.Hash(), l2Seq, txTimeoutDuration)
require.Nil(t, err, "withdrawal initiated on L2 sequencer")
require.Equal(t, receiptSeq.Status, types.ReceiptStatusSuccessful, "transaction failed")
......@@ -513,7 +514,7 @@ func TestMixedWithdrawalValidity(t *testing.T) {
t.Logf("Waiting for tx %s to be in verifier. Verifier tip is %s:%d. Included in sequencer in block %s:%d", tx.Hash().Hex(), verifierTip.Hash().Hex(), verifierTip.NumberU64(), receiptSeq.BlockHash.Hex(), receiptSeq.BlockNumber)
// Wait for the transaction to appear in L2 verifier
receipt, err := waitForTransaction(tx.Hash(), l2Verif, txTimeoutDuration)
receipt, err := geth.WaitForTransaction(tx.Hash(), l2Verif, txTimeoutDuration)
require.Nilf(t, err, "withdrawal tx %s not found in verifier. included in block %s:%d", tx.Hash().Hex(), receiptSeq.BlockHash.Hex(), receiptSeq.BlockNumber)
require.Equal(t, receipt.Status, types.ReceiptStatusSuccessful, "transaction failed")
......@@ -638,7 +639,7 @@ func TestMixedWithdrawalValidity(t *testing.T) {
} else {
require.NoError(t, err)
receipt, err = waitForTransaction(tx.Hash(), l1Client, txTimeoutDuration)
receipt, err = geth.WaitForTransaction(tx.Hash(), l1Client, txTimeoutDuration)
require.Nil(t, err, "finalize withdrawal")
require.Equal(t, types.ReceiptStatusSuccessful, receipt.Status)
......@@ -656,7 +657,7 @@ func TestMixedWithdrawalValidity(t *testing.T) {
transactor.ExpectedL1Nonce++
// Ensure that our withdrawal was proved successfully
proveReceipt, err := waitForTransaction(tx.Hash(), l1Client, 3*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
proveReceipt, err := geth.WaitForTransaction(tx.Hash(), l1Client, 3*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
require.Nil(t, err, "prove withdrawal")
require.Equal(t, types.ReceiptStatusSuccessful, proveReceipt.Status)
......
......@@ -8,6 +8,7 @@ import (
"time"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/geth"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/transactions"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
......@@ -39,14 +40,14 @@ func SendDepositTx(t *testing.T, cfg SystemConfig, l1Client *ethclient.Client, l
require.Nil(t, err, "with deposit tx")
// Wait for transaction on L1
l1Receipt, err := waitForTransaction(tx.Hash(), l1Client, 10*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
l1Receipt, err := geth.WaitForTransaction(tx.Hash(), l1Client, 10*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
require.Nil(t, err, "Waiting for deposit tx on L1")
// Wait for transaction to be included on L2
reconstructedDep, err := derive.UnmarshalDepositLogEvent(l1Receipt.Logs[0])
require.NoError(t, err, "Could not reconstruct L2 Deposit")
tx = types.NewTx(reconstructedDep)
l2Receipt, err := waitForTransaction(tx.Hash(), l2Client, 10*time.Duration(cfg.DeployConfig.L2BlockTime)*time.Second)
l2Receipt, err := geth.WaitForTransaction(tx.Hash(), l2Client, 10*time.Duration(cfg.DeployConfig.L2BlockTime)*time.Second)
require.NoError(t, err)
require.Equal(t, l2Opts.ExpectedStatus, l2Receipt.Status, "l2 transaction status")
return l2Receipt
......@@ -95,13 +96,13 @@ func SendL2Tx(t *testing.T, cfg SystemConfig, l2Client *ethclient.Client, privKe
err := l2Client.SendTransaction(ctx, tx)
require.NoError(t, err, "Sending L2 tx")
receipt, err := waitForTransaction(tx.Hash(), l2Client, 10*time.Duration(cfg.DeployConfig.L2BlockTime)*time.Second)
receipt, err := geth.WaitForTransaction(tx.Hash(), l2Client, 10*time.Duration(cfg.DeployConfig.L2BlockTime)*time.Second)
require.NoError(t, err, "Waiting for L2 tx")
require.Equal(t, opts.ExpectedStatus, receipt.Status, "TX should have expected status")
for i, client := range opts.VerifyClients {
t.Logf("Waiting for tx %v on verification client %d", tx.Hash(), i)
receiptVerif, err := waitForTransaction(tx.Hash(), client, 10*time.Duration(cfg.DeployConfig.L2BlockTime)*time.Second)
receiptVerif, err := geth.WaitForTransaction(tx.Hash(), client, 10*time.Duration(cfg.DeployConfig.L2BlockTime)*time.Second)
require.NoErrorf(t, err, "Waiting for L2 tx on verification client %d", i)
require.Equalf(t, receipt, receiptVerif, "Receipts should be the same on sequencer and verification client %d", i)
}
......
......@@ -10,6 +10,7 @@ import (
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum-optimism/optimism/op-e2e/config"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/geth"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait"
"github.com/ethereum-optimism/optimism/op-node/withdrawals"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
......@@ -36,13 +37,13 @@ func SendWithdrawal(t *testing.T, cfg SystemConfig, l2Client *ethclient.Client,
tx, err := l2withdrawer.InitiateWithdrawal(l2opts, l2opts.From, big.NewInt(int64(opts.Gas)), opts.Data)
require.Nil(t, err, "sending initiate withdraw tx")
receipt, err := waitForTransaction(tx.Hash(), l2Client, 10*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
receipt, err := geth.WaitForTransaction(tx.Hash(), l2Client, 10*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
require.Nil(t, err, "withdrawal initiated on L2 sequencer")
require.Equal(t, opts.ExpectedStatus, receipt.Status, "transaction had incorrect status")
for i, client := range opts.VerifyClients {
t.Logf("Waiting for tx %v on verification client %d", tx.Hash(), i)
receiptVerif, err := waitForTransaction(tx.Hash(), client, 10*time.Duration(cfg.DeployConfig.L2BlockTime)*time.Second)
receiptVerif, err := geth.WaitForTransaction(tx.Hash(), client, 10*time.Duration(cfg.DeployConfig.L2BlockTime)*time.Second)
require.Nilf(t, err, "Waiting for L2 tx on verification client %d", i)
require.Equalf(t, receipt, receiptVerif, "Receipts should be the same on sequencer and verification client %d", i)
}
......@@ -134,7 +135,7 @@ func ProveWithdrawal(t *testing.T, cfg SystemConfig, l1Client *ethclient.Client,
require.Nil(t, err)
// Ensure that our withdrawal was proved successfully
proveReceipt, err := waitForTransaction(tx.Hash(), l1Client, 3*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
proveReceipt, err := geth.WaitForTransaction(tx.Hash(), l1Client, 3*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
require.Nil(t, err, "prove withdrawal")
require.Equal(t, types.ReceiptStatusSuccessful, proveReceipt.Status)
return params, proveReceipt
......@@ -167,7 +168,7 @@ func FinalizeWithdrawal(t *testing.T, cfg SystemConfig, l1Client *ethclient.Clie
require.Nil(t, err)
// Ensure that our withdrawal was finalized successfully
finalizeReceipt, err := waitForTransaction(tx.Hash(), l1Client, 3*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
finalizeReceipt, err := geth.WaitForTransaction(tx.Hash(), l1Client, 3*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
require.Nil(t, err, "finalize withdrawal")
require.Equal(t, types.ReceiptStatusSuccessful, finalizeReceipt.Status)
return finalizeReceipt
......
......@@ -17,7 +17,7 @@ importers:
devDependencies:
'@babel/eslint-parser':
specifier: ^7.18.2
version: 7.22.10(@babel/core@7.22.10)(eslint@8.47.0)
version: 7.22.15(@babel/core@7.22.10)(eslint@8.47.0)
'@changesets/changelog-github':
specifier: ^0.4.8
version: 0.4.8
......@@ -604,13 +604,6 @@ packages:
'@babel/highlight': 7.22.10
chalk: 2.4.2
/@babel/code-frame@7.22.5:
resolution: {integrity: sha512-Xmwn266vad+6DAqEB2A6V/CcZVp62BbwVmcOJc2RPuwih1kw02TjQvWVWlcKGbBPd+8/0V5DEkOcizRGYsspYQ==}
engines: {node: '>=6.9.0'}
dependencies:
'@babel/highlight': 7.22.10
dev: true
/@babel/compat-data@7.22.9:
resolution: {integrity: sha512-5UamI7xkUcJ3i9qVDS+KFDEK8/7oJ55/sJMB1Ge7IEapr7KfdfV/HErR+koZwOfd+SgtFKOKRhRakdg++DcJpQ==}
engines: {node: '>=6.9.0'}
......@@ -639,31 +632,8 @@ packages:
- supports-color
dev: true
/@babel/core@7.22.9:
resolution: {integrity: sha512-G2EgeufBcYw27U4hhoIwFcgc1XU7TlXJ3mv04oOv1WCuo900U/anZSPzEqNjwdjgffkk2Gs0AN0dW1CKVLcG7w==}
engines: {node: '>=6.9.0'}
dependencies:
'@ampproject/remapping': 2.2.1
'@babel/code-frame': 7.22.10
'@babel/generator': 7.22.9
'@babel/helper-compilation-targets': 7.22.9(@babel/core@7.22.9)
'@babel/helper-module-transforms': 7.22.9(@babel/core@7.22.9)
'@babel/helpers': 7.22.6
'@babel/parser': 7.22.7
'@babel/template': 7.22.5
'@babel/traverse': 7.22.8
'@babel/types': 7.22.5
convert-source-map: 1.9.0
debug: 4.3.4(supports-color@8.1.1)
gensync: 1.0.0-beta.2
json5: 2.2.3
semver: 6.3.1
transitivePeerDependencies:
- supports-color
dev: true
/@babel/eslint-parser@7.22.10(@babel/core@7.22.10)(eslint@8.47.0):
resolution: {integrity: sha512-0J8DNPRXQRLeR9rPaUMM3fA+RbixjnVLe/MRMYCkp3hzgsSuxCHQ8NN8xQG1wIHKJ4a1DTROTvFJdW+B5/eOsg==}
/@babel/eslint-parser@7.22.15(@babel/core@7.22.10)(eslint@8.47.0):
resolution: {integrity: sha512-yc8OOBIQk1EcRrpizuARSQS0TWAcOMpEJ1aafhNznaeYkeL+OhqnDObGFylB8ka8VFF/sZc+S4RzHyO+3LjQxg==}
engines: {node: ^10.13.0 || ^12.13.0 || >=14.0.0}
peerDependencies:
'@babel/core': ^7.11.0
......@@ -686,26 +656,6 @@ packages:
jsesc: 2.5.2
dev: true
/@babel/generator@7.22.5:
resolution: {integrity: sha512-+lcUbnTRhd0jOewtFSedLyiPsD5tswKkbgcezOqqWFUVNEwoUTlpPOBmvhG7OXWLR4jMdv0czPGH5XbflnD1EA==}
engines: {node: '>=6.9.0'}
dependencies:
'@babel/types': 7.22.5
'@jridgewell/gen-mapping': 0.3.3
'@jridgewell/trace-mapping': 0.3.19
jsesc: 2.5.2
dev: true
/@babel/generator@7.22.9:
resolution: {integrity: sha512-KtLMbmicyuK2Ak/FTCJVbDnkN1SlT8/kceFTiuDiiRUUSMnHMidxSCdG4ndkTOHHpoomWe/4xkvHkEOncwjYIw==}
engines: {node: '>=6.9.0'}
dependencies:
'@babel/types': 7.22.5
'@jridgewell/gen-mapping': 0.3.3
'@jridgewell/trace-mapping': 0.3.19
jsesc: 2.5.2
dev: true
/@babel/helper-compilation-targets@7.22.10:
resolution: {integrity: sha512-JMSwHD4J7SLod0idLq5PKgI+6g/hLD/iuWBq08ZX49xE14VpVEojJ5rHWptpirV2j020MvypRLAXAO50igCJ5Q==}
engines: {node: '>=6.9.0'}
......@@ -717,65 +667,31 @@ packages:
semver: 6.3.1
dev: true
/@babel/helper-compilation-targets@7.22.9(@babel/core@7.22.9):
resolution: {integrity: sha512-7qYrNM6HjpnPHJbopxmb8hSPoZ0gsX8IvUS32JGVoy+pU9e5N0nLr1VjJoR6kA4d9dmGLxNYOjeB8sUDal2WMw==}
engines: {node: '>=6.9.0'}
peerDependencies:
'@babel/core': ^7.0.0
dependencies:
'@babel/compat-data': 7.22.9
'@babel/core': 7.22.9
'@babel/helper-validator-option': 7.22.5
browserslist: 4.21.9
lru-cache: 5.1.1
semver: 6.3.1
dev: true
/@babel/helper-environment-visitor@7.18.2:
resolution: {integrity: sha512-14GQKWkX9oJzPiQQ7/J36FTXcD4kSp8egKjO9nINlSKiHITRA9q/R74qu8S9xlc/b/yjsJItQUeeh3xnGN0voQ==}
engines: {node: '>=6.9.0'}
dev: true
/@babel/helper-environment-visitor@7.22.5:
resolution: {integrity: sha512-XGmhECfVA/5sAt+H+xpSg0mfrHq6FzNr9Oxh7PSEBBRUb/mL7Kz3NICXb194rCqAEdxkhPT1a88teizAFyvk8Q==}
engines: {node: '>=6.9.0'}
dev: true
/@babel/helper-function-name@7.17.9:
resolution: {integrity: sha512-7cRisGlVtiVqZ0MW0/yFB4atgpGLWEHUVYnb448hZK4x+vih0YO5UoS11XIYtZYqHd0dIPMdUSv8q5K4LdMnIg==}
engines: {node: '>=6.9.0'}
dependencies:
'@babel/template': 7.22.5
'@babel/types': 7.22.5
dev: true
/@babel/helper-function-name@7.22.5:
resolution: {integrity: sha512-wtHSq6jMRE3uF2otvfuD3DIvVhOsSNshQl0Qrd7qC9oQJzHvOL4qQXlQn2916+CXGywIjpGuIkoyZRRxHPiNQQ==}
engines: {node: '>=6.9.0'}
dependencies:
'@babel/template': 7.22.5
'@babel/types': 7.22.5
dev: true
/@babel/helper-hoist-variables@7.16.7:
resolution: {integrity: sha512-m04d/0Op34H5v7pbZw6pSKP7weA6lsMvfiIAMeIvkY/R4xQtBSMFEigu9QTZ2qB/9l22vsxtM8a+Q8CzD255fg==}
engines: {node: '>=6.9.0'}
dependencies:
'@babel/types': 7.22.5
'@babel/types': 7.22.10
dev: true
/@babel/helper-hoist-variables@7.22.5:
resolution: {integrity: sha512-wGjk9QZVzvknA6yKIUURb8zY3grXCcOZt+/7Wcy8O2uctxhplmUPkOdlgoNhmdVee2c92JXbf1xpMtVNbfoxRw==}
engines: {node: '>=6.9.0'}
dependencies:
'@babel/types': 7.22.5
'@babel/types': 7.22.10
dev: true
/@babel/helper-module-imports@7.22.5:
resolution: {integrity: sha512-8Dl6+HD/cKifutF5qGd/8ZJi84QeAKh+CEe1sBzz8UayBBGg1dAIJrdHOcOM5b2MpzWL2yuotJTtGjETq0qjXg==}
engines: {node: '>=6.9.0'}
dependencies:
'@babel/types': 7.22.5
'@babel/types': 7.22.10
dev: true
/@babel/helper-module-transforms@7.22.9(@babel/core@7.22.10):
......@@ -792,39 +708,18 @@ packages:
'@babel/helper-validator-identifier': 7.22.5
dev: true
/@babel/helper-module-transforms@7.22.9(@babel/core@7.22.9):
resolution: {integrity: sha512-t+WA2Xn5K+rTeGtC8jCsdAH52bjggG5TKRuRrAGNM/mjIbO4GxvlLMFOEz9wXY5I2XQ60PMFsAG2WIcG82dQMQ==}
engines: {node: '>=6.9.0'}
peerDependencies:
'@babel/core': ^7.0.0
dependencies:
'@babel/core': 7.22.9
'@babel/helper-environment-visitor': 7.22.5
'@babel/helper-module-imports': 7.22.5
'@babel/helper-simple-access': 7.22.5
'@babel/helper-split-export-declaration': 7.22.6
'@babel/helper-validator-identifier': 7.22.5
dev: true
/@babel/helper-simple-access@7.22.5:
resolution: {integrity: sha512-n0H99E/K+Bika3++WNL17POvo4rKWZ7lZEp1Q+fStVbUi8nxPQEBOlTmCOxW/0JsS56SKKQ+ojAe2pHKJHN35w==}
engines: {node: '>=6.9.0'}
dependencies:
'@babel/types': 7.22.5
dev: true
/@babel/helper-split-export-declaration@7.16.7:
resolution: {integrity: sha512-xbWoy/PFoxSWazIToT9Sif+jJTlrMcndIsaOKvTA6u7QEo7ilkRZpjew18/W3c7nm8fXdUDXh02VXTbZ0pGDNw==}
engines: {node: '>=6.9.0'}
dependencies:
'@babel/types': 7.22.5
'@babel/types': 7.22.10
dev: true
/@babel/helper-split-export-declaration@7.22.6:
resolution: {integrity: sha512-AsUnxuLhRYsisFiaJwvp1QF+I3KjD5FOxut14q/GzovUe6orHLesW2C7d754kRm53h5gqrz6sFl6sxc4BVtE/g==}
engines: {node: '>=6.9.0'}
dependencies:
'@babel/types': 7.22.5
'@babel/types': 7.22.10
dev: true
/@babel/helper-string-parser@7.22.5:
......@@ -857,17 +752,6 @@ packages:
- supports-color
dev: true
/@babel/helpers@7.22.6:
resolution: {integrity: sha512-YjDs6y/fVOYFV8hAf1rxd1QvR9wJe1pDBZ2AREKq/SDayfPzgk0PBnVuTCE5X1acEpMMNOVUqoe+OwiZGJ+OaA==}
engines: {node: '>=6.9.0'}
dependencies:
'@babel/template': 7.22.5
'@babel/traverse': 7.22.8
'@babel/types': 7.22.5
transitivePeerDependencies:
- supports-color
dev: true
/@babel/highlight@7.22.10:
resolution: {integrity: sha512-78aUtVcT7MUscr0K5mIEnkwxPE0MaxkR5RxRwuHaQ+JuU5AmTPhY+do2mdzVTnIJJpyBglql2pehuBIWHug+WQ==}
engines: {node: '>=6.9.0'}
......@@ -881,7 +765,7 @@ packages:
engines: {node: '>=6.0.0'}
hasBin: true
dependencies:
'@babel/types': 7.22.5
'@babel/types': 7.22.10
dev: true
/@babel/parser@7.22.10:
......@@ -892,22 +776,6 @@ packages:
'@babel/types': 7.22.10
dev: true
/@babel/parser@7.22.5:
resolution: {integrity: sha512-DFZMC9LJUG9PLOclRC32G63UXwzqS2koQC8dkx+PLdmt1xSePYpbT/NbsrJy8Q/muXz7o/h/d4A7Fuyixm559Q==}
engines: {node: '>=6.0.0'}
hasBin: true
dependencies:
'@babel/types': 7.22.5
dev: true
/@babel/parser@7.22.7:
resolution: {integrity: sha512-7NF8pOkHP5o2vpmGgNGcfAeCvOYhGLyA3Z4eBQkT1RJlWu47n63bCs93QfJ2hIAFCil7L5P2IWhs1oToVgrL0Q==}
engines: {node: '>=6.0.0'}
hasBin: true
dependencies:
'@babel/types': 7.22.5
dev: true
/@babel/runtime@7.20.7:
resolution: {integrity: sha512-UF0tvkUtxwAgZ5W/KrkHf0Rn0fdnLDU9ScxBrEVNUprE/MzirjK4MJUX1/BVDv00Sv8cljtukVK1aky++X1SjQ==}
engines: {node: '>=6.9.0'}
......@@ -926,31 +794,13 @@ packages:
engines: {node: '>=6.9.0'}
dependencies:
'@babel/code-frame': 7.22.10
'@babel/parser': 7.22.7
'@babel/types': 7.22.5
'@babel/parser': 7.22.10
'@babel/types': 7.22.10
dev: true
/@babel/traverse@7.18.2:
resolution: {integrity: sha512-9eNwoeovJ6KH9zcCNnENY7DMFwTU9JdGCFtqNLfUAqtUHRCOsTOqWoffosP8vKmNYeSBUv3yVJXjfd8ucwOjUA==}
engines: {node: '>=6.9.0'}
dependencies:
'@babel/code-frame': 7.22.5
'@babel/generator': 7.22.5
'@babel/helper-environment-visitor': 7.18.2
'@babel/helper-function-name': 7.17.9
'@babel/helper-hoist-variables': 7.16.7
'@babel/helper-split-export-declaration': 7.16.7
'@babel/parser': 7.22.5
'@babel/types': 7.22.5
debug: 4.3.4(supports-color@8.1.1)
globals: 11.12.0
transitivePeerDependencies:
- supports-color
dev: true
/@babel/traverse@7.22.10:
resolution: {integrity: sha512-Q/urqV4pRByiNNpb/f5OSv28ZlGJiFiiTh+GAHktbIrkPhPbl90+uW6SmpoLyZqutrg9AEaEf3Q/ZBRHBXgxig==}
engines: {node: '>=6.9.0'}
dependencies:
'@babel/code-frame': 7.22.10
'@babel/generator': 7.22.10
......@@ -966,18 +816,18 @@ packages:
- supports-color
dev: true
/@babel/traverse@7.22.8:
resolution: {integrity: sha512-y6LPR+wpM2I3qJrsheCTwhIinzkETbplIgPBbwvqPKc+uljeA5gP+3nP8irdYt1mjQaDnlIcG+dw8OjAco4GXw==}
/@babel/traverse@7.22.10:
resolution: {integrity: sha512-Q/urqV4pRByiNNpb/f5OSv28ZlGJiFiiTh+GAHktbIrkPhPbl90+uW6SmpoLyZqutrg9AEaEf3Q/ZBRHBXgxig==}
engines: {node: '>=6.9.0'}
dependencies:
'@babel/code-frame': 7.22.10
'@babel/generator': 7.22.9
'@babel/generator': 7.22.10
'@babel/helper-environment-visitor': 7.22.5
'@babel/helper-function-name': 7.22.5
'@babel/helper-hoist-variables': 7.22.5
'@babel/helper-split-export-declaration': 7.22.6
'@babel/parser': 7.22.7
'@babel/types': 7.22.5
'@babel/parser': 7.22.10
'@babel/types': 7.22.10
debug: 4.3.4(supports-color@8.1.1)
globals: 11.12.0
transitivePeerDependencies:
......@@ -993,15 +843,6 @@ packages:
to-fast-properties: 2.0.0
dev: true
/@babel/types@7.22.5:
resolution: {integrity: sha512-zo3MIHGOkPOfoRXitsgHLjEXmlDaD/5KU1Uzuc9GNiZPhSqVxVRtxuPaSBZDsYZ9qV88AjtMtWW7ww98loJ9KA==}
engines: {node: '>=6.9.0'}
dependencies:
'@babel/helper-string-parser': 7.22.5
'@babel/helper-validator-identifier': 7.22.5
to-fast-properties: 2.0.0
dev: true
/@changesets/apply-release-plan@6.1.3:
resolution: {integrity: sha512-ECDNeoc3nfeAe1jqJb5aFQX7CqzQhD2klXRez2JDb/aVpGUbX673HgKrnrgJRuQR/9f2TtLoYIzrGB9qwD77mg==}
dependencies:
......@@ -4578,7 +4419,7 @@ packages:
/@vue/compiler-core@3.2.36:
resolution: {integrity: sha512-bbyZM5hvBicv0PW3KUfVi+x3ylHnfKG7DOn5wM+f2OztTzTjLEyBb/5yrarIYpmnGitVGbjZqDbODyW4iK8hqw==}
dependencies:
'@babel/parser': 7.22.5
'@babel/parser': 7.22.10
'@vue/shared': 3.2.36
estree-walker: 2.0.2
source-map: 0.6.1
......@@ -4594,7 +4435,7 @@ packages:
/@vue/compiler-sfc@3.2.36:
resolution: {integrity: sha512-AvGb4bTj4W8uQ4BqaSxo7UwTEqX5utdRSMyHy58OragWlt8nEACQ9mIeQh3K4di4/SX+41+pJrLIY01lHAOFOA==}
dependencies:
'@babel/parser': 7.22.5
'@babel/parser': 7.22.10
'@vue/compiler-core': 3.2.36
'@vue/compiler-dom': 3.2.36
'@vue/compiler-ssr': 3.2.36
......@@ -4616,7 +4457,7 @@ packages:
/@vue/reactivity-transform@3.2.36:
resolution: {integrity: sha512-Jk5o2BhpODC9XTA7o4EL8hSJ4JyrFWErLtClG3NH8wDS7ri9jBDWxI7/549T7JY9uilKsaNM+4pJASLj5dtRwA==}
dependencies:
'@babel/parser': 7.22.5
'@babel/parser': 7.22.10
'@vue/compiler-core': 3.2.36
'@vue/shared': 3.2.36
estree-walker: 2.0.2
......@@ -6105,17 +5946,6 @@ packages:
update-browserslist-db: 1.0.11(browserslist@4.21.10)
dev: true
/browserslist@4.21.9:
resolution: {integrity: sha512-M0MFoZzbUrRU4KNfCrDLnvyE7gub+peetoTid3TBIqtunaDJyXlwhakT+/VkvSXcfIzFfK/nkCs4nmyTmxdNSg==}
engines: {node: ^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7}
hasBin: true
dependencies:
caniuse-lite: 1.0.30001517
electron-to-chromium: 1.4.468
node-releases: 2.0.13
update-browserslist-db: 1.0.11(browserslist@4.21.9)
dev: true
/bs58@4.0.1:
resolution: {integrity: sha512-Ok3Wdf5vOIlBrgCvTq96gBkJw+JUEzdBgyaza5HLtPm7yTHkjRy8+JzNyHF7BHa0bNWOQIp3m5YF0nnFcOIKLw==}
dependencies:
......@@ -6302,10 +6132,6 @@ packages:
engines: {node: '>=10'}
dev: true
/caniuse-lite@1.0.30001517:
resolution: {integrity: sha512-Vdhm5S11DaFVLlyiKu4hiUTkpZu+y1KA/rZZqVQfOD5YdDT/eQKlkt7NaE0WGOFgX32diqt9MiP9CAiFeRklaA==}
dev: true
/caniuse-lite@1.0.30001520:
resolution: {integrity: sha512-tahF5O9EiiTzwTUqAeFjIZbn4Dnqxzz7ktrgGlMYNLH43Ul26IgTMH/zvL3DG0lZxBYnlT04axvInszUsZULdA==}
dev: true
......@@ -7455,10 +7281,6 @@ packages:
jake: 10.8.7
dev: true
/electron-to-chromium@1.4.468:
resolution: {integrity: sha512-6M1qyhaJOt7rQtNti1lBA0GwclPH+oKCmsra/hkcWs5INLxfXXD/dtdnaKUYQu/pjOBP/8Osoe4mAcNvvzoFag==}
dev: true
/electron-to-chromium@1.4.491:
resolution: {integrity: sha512-ZzPqGKghdVzlQJ+qpfE+r6EB321zed7e5JsvHIlMM4zPFF8okXUkF5Of7h7F3l3cltPL0rG7YVmlp5Qro7RQLA==}
dev: true
......@@ -10394,7 +10216,7 @@ packages:
resolution: {integrity: sha512-BXgQl9kf4WTCPCCpmFGoJkz/+uhvm7h7PFKUYxh7qarQd3ER33vHG//qaE8eN25l07YqZPpHXU9I09l/RD5aGQ==}
engines: {node: '>=8'}
dependencies:
'@babel/core': 7.22.9
'@babel/core': 7.22.10
'@istanbuljs/schema': 0.1.3
istanbul-lib-coverage: 3.2.0
semver: 6.3.1
......@@ -15823,17 +15645,6 @@ packages:
picocolors: 1.0.0
dev: true
/update-browserslist-db@1.0.11(browserslist@4.21.9):
resolution: {integrity: sha512-dCwEFf0/oT85M1fHBg4F0jtLwJrutGoHSQXCh7u4o2t1drG+c0a9Flnqww6XUKSfQMPpJBRjU8d4RXB09qtvaA==}
hasBin: true
peerDependencies:
browserslist: '>= 4.21.0'
dependencies:
browserslist: 4.21.9
escalade: 3.1.1
picocolors: 1.0.0
dev: true
/update-section@0.3.3:
resolution: {integrity: sha512-BpRZMZpgXLuTiKeiu7kK0nIPwGdyrqrs6EDSaXtjD/aQ2T+qVo9a5hRC3HN3iJjCMxNT/VxoLGQ7E/OzE5ucnw==}
dev: true
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment