Commit 9690fdd5 authored by Matthew Slipper's avatar Matthew Slipper Committed by GitHub

Merge pull request #2504 from ethereum-optimism/develop

Develop -> Master
parents 0b4fd1ea d9f058ce
---
'@eth-optimism/proxyd': patch
---
proxyd: Log ssanitized RPC requests
---
'@eth-optimism/indexer': minor
---
Add airdrops API
---
'@eth-optimism/proxyd': patch
---
proxyd: Reduced RPC request logging
---
'@eth-optimism/teleportr': patch
---
Use L2 gas price in driver
---
'@eth-optimism/l2geth-exporter': patch
---
Added SCC collection
---
'@eth-optimism/proxyd': patch
---
proxyd: Limit the number of concurrent RPCs to backends
---
'@eth-optimism/indexer': patch
---
fix context reuse
---
'@eth-optimism/l2geth': patch
---
rollup: fix log.Crit usage
---
'@eth-optimism/l2geth': patch
---
l2geth: Record rollup transaction metrics
---
'@eth-optimism/replica-healthcheck': patch
---
Fixes a bug that would cause the service to stop properly checking blocks when the target client consistently leads the reference client
---
'@eth-optimism/integration-tests': patch
---
Add tests for system addrs on verifiers/replicas
---
'@eth-optimism/sdk': patch
---
Fixes a bug where the wrong Overrides type was being used for gas estimation functions
This diff is collapsed.
...@@ -9,6 +9,7 @@ pull_request_rules: ...@@ -9,6 +9,7 @@ pull_request_rules:
- "#review-threads-unresolved=0" - "#review-threads-unresolved=0"
- "#approved-reviews-by>=2" - "#approved-reviews-by>=2"
- "#changes-requested-reviews-by=0" - "#changes-requested-reviews-by=0"
- "label!=do-not-merge"
- or: - or:
- and: - and:
- "label!=SR-Risk" - "label!=SR-Risk"
...@@ -100,7 +101,7 @@ pull_request_rules: ...@@ -100,7 +101,7 @@ pull_request_rules:
- name: Nag changesets - name: Nag changesets
conditions: conditions:
- and: - and:
- 'files~=\.(ts|go|js|mod|sum)$' - 'files~=\.((?<!\.spec\.)ts|go|js|mod|sum)$'
- '-files~=^\.changeset/(.*)\.md' - '-files~=^\.changeset/(.*)\.md'
actions: actions:
comment: comment:
......
package db
type Airdrop struct {
Address string `json:"address"`
VoterAmount string `json:"voterAmount"`
MultisigSignerAmount string `json:"multisigSignerAmount"`
GitcoinAmount string `json:"gitcoinAmount"`
ActiveBridgedAmount string `json:"activeBridgedAmount"`
OpUserAmount string `json:"opUserAmount"`
OpRepeatUserAmount string `json:"opRepeatUserAmount"`
BonusAmount string `json:"bonusAmount"`
TotalAmount string `json:"totalAmount"`
}
...@@ -3,6 +3,8 @@ package db ...@@ -3,6 +3,8 @@ package db
import ( import (
"database/sql" "database/sql"
"errors" "errors"
"fmt"
"strings"
l2common "github.com/ethereum-optimism/optimism/l2geth/common" l2common "github.com/ethereum-optimism/optimism/l2geth/common"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
...@@ -17,6 +19,31 @@ type Database struct { ...@@ -17,6 +19,31 @@ type Database struct {
config string config string
} }
// NewDatabase returns the database for the given connection string.
func NewDatabase(config string) (*Database, error) {
db, err := sql.Open("postgres", config)
if err != nil {
return nil, err
}
err = db.Ping()
if err != nil {
return nil, err
}
for _, migration := range schema {
_, err = db.Exec(migration)
if err != nil {
return nil, err
}
}
return &Database{
db: db,
config: config,
}, nil
}
// Close closes the database. // Close closes the database.
// NOTE: "It is rarely necessary to close a DB." // NOTE: "It is rarely necessary to close a DB."
// See: https://pkg.go.dev/database/sql#Open // See: https://pkg.go.dev/database/sql#Open
...@@ -633,27 +660,38 @@ func (d *Database) GetIndexedL1BlockByHash(hash common.Hash) (*IndexedL1Block, e ...@@ -633,27 +660,38 @@ func (d *Database) GetIndexedL1BlockByHash(hash common.Hash) (*IndexedL1Block, e
return block, nil return block, nil
} }
// NewDatabase returns the database for the given connection string. const getAirdropQuery = `
func NewDatabase(config string) (*Database, error) { SELECT
db, err := sql.Open("postgres", config) address, voter_amount, multisig_signer_amount, gitcoin_amount,
if err != nil { active_bridged_amount, op_user_amount, op_repeat_user_amount,
return nil, err bonus_amount, total_amount
FROM airdrops
WHERE address = $1
`
func (d *Database) GetAirdrop(address common.Address) (*Airdrop, error) {
row := d.db.QueryRow(getAirdropQuery, strings.ToLower(address.String()))
if row.Err() != nil {
return nil, fmt.Errorf("error getting airdrop: %v", row.Err())
} }
err = db.Ping() airdrop := new(Airdrop)
if err != nil { err := row.Scan(
return nil, err &airdrop.Address,
&airdrop.VoterAmount,
&airdrop.MultisigSignerAmount,
&airdrop.GitcoinAmount,
&airdrop.ActiveBridgedAmount,
&airdrop.OpUserAmount,
&airdrop.OpRepeatUserAmount,
&airdrop.BonusAmount,
&airdrop.TotalAmount,
)
if errors.Is(err, sql.ErrNoRows) {
return nil, nil
} }
if err != nil {
for _, migration := range schema { return nil, fmt.Errorf("error scanning airdrop: %v", err)
_, err = db.Exec(migration)
if err != nil {
return nil, err
}
} }
return airdrop, nil
return &Database{
db: db,
config: config,
}, nil
} }
...@@ -107,6 +107,21 @@ CREATE UNIQUE INDEX IF NOT EXISTS l1_blocks_number ON l1_blocks(number); ...@@ -107,6 +107,21 @@ CREATE UNIQUE INDEX IF NOT EXISTS l1_blocks_number ON l1_blocks(number);
CREATE UNIQUE INDEX IF NOT EXISTS l2_blocks_number ON l2_blocks(number); CREATE UNIQUE INDEX IF NOT EXISTS l2_blocks_number ON l2_blocks(number);
` `
const createAirdropsTable = `
CREATE TABLE IF NOT EXISTS airdrops (
address VARCHAR(42) PRIMARY KEY,
voter_amount VARCHAR NOT NULL DEFAULT '0' CHECK(voter_amount ~ '^\d+$') ,
multisig_signer_amount VARCHAR NOT NULL DEFAULT '0' CHECK(multisig_signer_amount ~ '^\d+$'),
gitcoin_amount VARCHAR NOT NULL DEFAULT '0' CHECK(gitcoin_amount ~ '^\d+$'),
active_bridged_amount VARCHAR NOT NULL DEFAULT '0' CHECK(active_bridged_amount ~ '^\d+$'),
op_user_amount VARCHAR NOT NULL DEFAULT '0' CHECK(op_user_amount ~ '^\d+$'),
op_repeat_user_amount VARCHAR NOT NULL DEFAULT '0' CHECK(op_user_amount ~ '^\d+$'),
op_og_amount VARCHAR NOT NULL DEFAULT '0' CHECK(op_og_amount ~ '^\d+$'),
bonus_amount VARCHAR NOT NULL DEFAULT '0' CHECK(bonus_amount ~ '^\d+$'),
total_amount VARCHAR NOT NULL CHECK(voter_amount ~ '^\d+$')
)
`
var schema = []string{ var schema = []string{
createL1BlocksTable, createL1BlocksTable,
createL2BlocksTable, createL2BlocksTable,
...@@ -118,4 +133,5 @@ var schema = []string{ ...@@ -118,4 +133,5 @@ var schema = []string{
createDepositsTable, createDepositsTable,
createWithdrawalsTable, createWithdrawalsTable,
createL1L2NumberIndex, createL1L2NumberIndex,
createAirdropsTable,
} }
...@@ -9,6 +9,8 @@ import ( ...@@ -9,6 +9,8 @@ import (
"strconv" "strconv"
"time" "time"
"github.com/ethereum-optimism/optimism/go/indexer/services"
l2rpc "github.com/ethereum-optimism/optimism/l2geth/rpc" l2rpc "github.com/ethereum-optimism/optimism/l2geth/rpc"
"github.com/ethereum-optimism/optimism/go/indexer/metrics" "github.com/ethereum-optimism/optimism/go/indexer/metrics"
...@@ -83,8 +85,10 @@ type Indexer struct { ...@@ -83,8 +85,10 @@ type Indexer struct {
l1IndexingService *l1.Service l1IndexingService *l1.Service
l2IndexingService *l2.Service l2IndexingService *l2.Service
airdropService *services.Airdrop
router *mux.Router router *mux.Router
metrics *metrics.Metrics
} }
// NewIndexer initializes the Indexer, gathering any resources // NewIndexer initializes the Indexer, gathering any resources
...@@ -201,7 +205,9 @@ func NewIndexer(cfg Config, gitVersion string) (*Indexer, error) { ...@@ -201,7 +205,9 @@ func NewIndexer(cfg Config, gitVersion string) (*Indexer, error) {
l2Client: l2Client, l2Client: l2Client,
l1IndexingService: l1IndexingService, l1IndexingService: l1IndexingService,
l2IndexingService: l2IndexingService, l2IndexingService: l2IndexingService,
airdropService: services.NewAirdrop(db, m),
router: mux.NewRouter(), router: mux.NewRouter(),
metrics: m,
}, nil }, nil
} }
...@@ -216,6 +222,7 @@ func (b *Indexer) Serve() error { ...@@ -216,6 +222,7 @@ func (b *Indexer) Serve() error {
b.router.HandleFunc("/v1/deposits/0x{address:[a-fA-F0-9]{40}}", b.l1IndexingService.GetDeposits).Methods("GET") b.router.HandleFunc("/v1/deposits/0x{address:[a-fA-F0-9]{40}}", b.l1IndexingService.GetDeposits).Methods("GET")
b.router.HandleFunc("/v1/withdrawal/0x{hash:[a-fA-F0-9]{64}}", b.l2IndexingService.GetWithdrawalBatch).Methods("GET") b.router.HandleFunc("/v1/withdrawal/0x{hash:[a-fA-F0-9]{64}}", b.l2IndexingService.GetWithdrawalBatch).Methods("GET")
b.router.HandleFunc("/v1/withdrawals/0x{address:[a-fA-F0-9]{40}}", b.l2IndexingService.GetWithdrawals).Methods("GET") b.router.HandleFunc("/v1/withdrawals/0x{address:[a-fA-F0-9]{40}}", b.l2IndexingService.GetWithdrawals).Methods("GET")
b.router.HandleFunc("/v1/airdrops/0x{address:[a-fA-F0-9]{40}}", b.airdropService.GetAirdrop)
b.router.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) { b.router.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(200) w.WriteHeader(200)
_, err := w.Write([]byte("OK")) _, err := w.Write([]byte("OK"))
...@@ -224,7 +231,7 @@ func (b *Indexer) Serve() error { ...@@ -224,7 +231,7 @@ func (b *Indexer) Serve() error {
} }
}) })
middleware := server.LoggingMiddleware(log.New("service", "server")) middleware := server.LoggingMiddleware(b.metrics, log.New("service", "server"))
port := strconv.FormatUint(b.cfg.RESTPort, 10) port := strconv.FormatUint(b.cfg.RESTPort, 10)
addr := fmt.Sprintf("%s:%s", b.cfg.RESTHostname, port) addr := fmt.Sprintf("%s:%s", b.cfg.RESTHostname, port)
......
...@@ -3,6 +3,8 @@ package metrics ...@@ -3,6 +3,8 @@ package metrics
import ( import (
"fmt" "fmt"
"net/http" "net/http"
"strconv"
"time"
l2common "github.com/ethereum-optimism/optimism/l2geth/common" l2common "github.com/ethereum-optimism/optimism/l2geth/common"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
...@@ -32,6 +34,12 @@ type Metrics struct { ...@@ -32,6 +34,12 @@ type Metrics struct {
CachedTokensCount *prometheus.CounterVec CachedTokensCount *prometheus.CounterVec
HTTPRequestsCount prometheus.Counter
HTTPResponsesCount *prometheus.CounterVec
HTTPRequestDurationSecs prometheus.Summary
tokenAddrs map[string]string tokenAddrs map[string]string
} }
...@@ -110,6 +118,27 @@ func NewMetrics(monitoredTokens map[string]string) *Metrics { ...@@ -110,6 +118,27 @@ func NewMetrics(monitoredTokens map[string]string) *Metrics {
"chain", "chain",
}), }),
HTTPRequestsCount: promauto.NewCounter(prometheus.CounterOpts{
Name: "http_requests_count",
Help: "How many HTTP requests this instance has seen",
Namespace: metricsNamespace,
}),
HTTPResponsesCount: promauto.NewCounterVec(prometheus.CounterOpts{
Name: "http_responses_count",
Help: "How many HTTP responses this instance has served",
Namespace: metricsNamespace,
}, []string{
"status_code",
}),
HTTPRequestDurationSecs: promauto.NewSummary(prometheus.SummaryOpts{
Name: "http_request_duration_secs",
Help: "How long each HTTP request took",
Namespace: metricsNamespace,
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.95: 0.005, 0.99: 0.001},
}),
tokenAddrs: mts, tokenAddrs: mts,
} }
} }
...@@ -176,6 +205,15 @@ func (m *Metrics) IncL2CachedTokensCount() { ...@@ -176,6 +205,15 @@ func (m *Metrics) IncL2CachedTokensCount() {
m.CachedTokensCount.WithLabelValues("l2").Inc() m.CachedTokensCount.WithLabelValues("l2").Inc()
} }
func (m *Metrics) RecordHTTPRequest() {
m.HTTPRequestsCount.Inc()
}
func (m *Metrics) RecordHTTPResponse(code int, dur time.Duration) {
m.HTTPResponsesCount.WithLabelValues(strconv.Itoa(code)).Inc()
m.HTTPRequestDurationSecs.Observe(float64(dur) / float64(time.Second))
}
func (m *Metrics) Serve(hostname string, port uint64) (*http.Server, error) { func (m *Metrics) Serve(hostname string, port uint64) (*http.Server, error) {
mux := http.NewServeMux() mux := http.NewServeMux()
mux.Handle("/metrics", promhttp.Handler()) mux.Handle("/metrics", promhttp.Handler())
......
...@@ -6,6 +6,8 @@ import ( ...@@ -6,6 +6,8 @@ import (
"runtime/debug" "runtime/debug"
"time" "time"
"github.com/ethereum-optimism/optimism/go/indexer/metrics"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
) )
...@@ -50,7 +52,7 @@ func (rw *responseWriter) WriteHeader(code int) { ...@@ -50,7 +52,7 @@ func (rw *responseWriter) WriteHeader(code int) {
} }
// LoggingMiddleware logs the incoming HTTP request & its duration. // LoggingMiddleware logs the incoming HTTP request & its duration.
func LoggingMiddleware(logger log.Logger) func(http.Handler) http.Handler { func LoggingMiddleware(metrics *metrics.Metrics, logger log.Logger) func(http.Handler) http.Handler {
return func(next http.Handler) http.Handler { return func(next http.Handler) http.Handler {
fn := func(w http.ResponseWriter, r *http.Request) { fn := func(w http.ResponseWriter, r *http.Request) {
defer func() { defer func() {
...@@ -64,16 +66,19 @@ func LoggingMiddleware(logger log.Logger) func(http.Handler) http.Handler { ...@@ -64,16 +66,19 @@ func LoggingMiddleware(logger log.Logger) func(http.Handler) http.Handler {
} }
}() }()
metrics.RecordHTTPRequest()
start := time.Now() start := time.Now()
wrapped := wrapResponseWriter(w) wrapped := wrapResponseWriter(w)
next.ServeHTTP(wrapped, r) next.ServeHTTP(wrapped, r)
dur := time.Since(start)
logger.Info( logger.Info(
"served request", "served request",
"status", wrapped.status, "status", wrapped.status,
"method", r.Method, "method", r.Method,
"path", r.URL.EscapedPath(), "path", r.URL.EscapedPath(),
"duration", time.Since(start), "duration", dur,
) )
metrics.RecordHTTPResponse(wrapped.status, dur)
} }
return http.HandlerFunc(fn) return http.HandlerFunc(fn)
......
package services
import (
"net/http"
"github.com/ethereum-optimism/optimism/go/indexer/db"
"github.com/ethereum-optimism/optimism/go/indexer/metrics"
"github.com/ethereum-optimism/optimism/go/indexer/server"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/gorilla/mux"
)
var airdropLogger = log.New("service", "airdrop")
type Airdrop struct {
db *db.Database
metrics *metrics.Metrics
}
func NewAirdrop(db *db.Database, metrics *metrics.Metrics) *Airdrop {
return &Airdrop{
db: db,
metrics: metrics,
}
}
func (a *Airdrop) GetAirdrop(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
address := vars["address"]
airdrop, err := a.db.GetAirdrop(common.HexToAddress(address))
if err != nil {
airdropLogger.Error("db error getting airdrop", "err", err)
server.RespondWithError(w, http.StatusInternalServerError, "database error")
return
}
if airdrop == nil {
server.RespondWithError(w, http.StatusNotFound, "airdrop not found")
return
}
server.RespondWithJSON(w, http.StatusOK, airdrop)
}
...@@ -24,10 +24,9 @@ func (e *EthBridge) Address() common.Address { ...@@ -24,10 +24,9 @@ func (e *EthBridge) Address() common.Address {
func (e *EthBridge) GetDepositsByBlockRange(start, end uint64) (DepositsMap, error) { func (e *EthBridge) GetDepositsByBlockRange(start, end uint64) (DepositsMap, error) {
depositsByBlockhash := make(DepositsMap) depositsByBlockhash := make(DepositsMap)
iter, err := FilterETHDepositInitiatedWithRetry(e.filterer, &bind.FilterOpts{ iter, err := FilterETHDepositInitiatedWithRetry(e.ctx, e.filterer, &bind.FilterOpts{
Start: start, Start: start,
End: &end, End: &end,
Context: e.ctx,
}) })
if err != nil { if err != nil {
logger.Error("Error fetching filter", "err", err) logger.Error("Error fetching filter", "err", err)
......
...@@ -15,54 +15,48 @@ var clientRetryInterval = 5 * time.Second ...@@ -15,54 +15,48 @@ var clientRetryInterval = 5 * time.Second
// FilterStateBatchAppendedWithRetry retries the given func until it succeeds, // FilterStateBatchAppendedWithRetry retries the given func until it succeeds,
// waiting for clientRetryInterval duration after every call. // waiting for clientRetryInterval duration after every call.
func FilterStateBatchAppendedWithRetry(filterer *scc.StateCommitmentChainFilterer, opts *bind.FilterOpts) (*scc.StateCommitmentChainStateBatchAppendedIterator, error) { func FilterStateBatchAppendedWithRetry(ctx context.Context, filterer *scc.StateCommitmentChainFilterer, opts *bind.FilterOpts) (*scc.StateCommitmentChainStateBatchAppendedIterator, error) {
for { for {
ctxt, cancel := context.WithTimeout(opts.Context, DefaultConnectionTimeout) ctxt, cancel := context.WithTimeout(ctx, DefaultConnectionTimeout)
opts.Context = ctxt opts.Context = ctxt
res, err := filterer.FilterStateBatchAppended(opts, nil) res, err := filterer.FilterStateBatchAppended(opts, nil)
switch err { cancel()
case nil: if err == nil {
cancel() return res, nil
return res, err
default:
logger.Error("Error fetching filter", "err", err)
} }
logger.Error("Error fetching filter", "err", err)
time.Sleep(clientRetryInterval) time.Sleep(clientRetryInterval)
} }
} }
// FilterETHDepositInitiatedWithRetry retries the given func until it succeeds, // FilterETHDepositInitiatedWithRetry retries the given func until it succeeds,
// waiting for clientRetryInterval duration after every call. // waiting for clientRetryInterval duration after every call.
func FilterETHDepositInitiatedWithRetry(filterer *l1bridge.L1StandardBridgeFilterer, opts *bind.FilterOpts) (*l1bridge.L1StandardBridgeETHDepositInitiatedIterator, error) { func FilterETHDepositInitiatedWithRetry(ctx context.Context, filterer *l1bridge.L1StandardBridgeFilterer, opts *bind.FilterOpts) (*l1bridge.L1StandardBridgeETHDepositInitiatedIterator, error) {
for { for {
ctxt, cancel := context.WithTimeout(opts.Context, DefaultConnectionTimeout) ctxt, cancel := context.WithTimeout(ctx, DefaultConnectionTimeout)
opts.Context = ctxt opts.Context = ctxt
res, err := filterer.FilterETHDepositInitiated(opts, nil, nil) res, err := filterer.FilterETHDepositInitiated(opts, nil, nil)
switch err { cancel()
case nil: if err == nil {
cancel() return res, nil
return res, err
default:
logger.Error("Error fetching filter", "err", err)
} }
logger.Error("Error fetching filter", "err", err)
time.Sleep(clientRetryInterval) time.Sleep(clientRetryInterval)
} }
} }
// FilterERC20DepositInitiatedWithRetry retries the given func until it succeeds, // FilterERC20DepositInitiatedWithRetry retries the given func until it succeeds,
// waiting for clientRetryInterval duration after every call. // waiting for clientRetryInterval duration after every call.
func FilterERC20DepositInitiatedWithRetry(filterer *l1bridge.L1StandardBridgeFilterer, opts *bind.FilterOpts) (*l1bridge.L1StandardBridgeERC20DepositInitiatedIterator, error) { func FilterERC20DepositInitiatedWithRetry(ctx context.Context, filterer *l1bridge.L1StandardBridgeFilterer, opts *bind.FilterOpts) (*l1bridge.L1StandardBridgeERC20DepositInitiatedIterator, error) {
for { for {
ctxt, cancel := context.WithTimeout(opts.Context, DefaultConnectionTimeout) ctxt, cancel := context.WithTimeout(ctx, DefaultConnectionTimeout)
opts.Context = ctxt opts.Context = ctxt
res, err := filterer.FilterERC20DepositInitiated(opts, nil, nil, nil) res, err := filterer.FilterERC20DepositInitiated(opts, nil, nil, nil)
switch err { cancel()
case nil: if err == nil {
cancel() return res, nil
return res, err
default:
logger.Error("Error fetching filter", "err", err)
} }
logger.Error("Error fetching filter", "err", err)
time.Sleep(clientRetryInterval) time.Sleep(clientRetryInterval)
} }
} }
...@@ -24,10 +24,9 @@ func (s *StandardBridge) Address() common.Address { ...@@ -24,10 +24,9 @@ func (s *StandardBridge) Address() common.Address {
func (s *StandardBridge) GetDepositsByBlockRange(start, end uint64) (DepositsMap, error) { func (s *StandardBridge) GetDepositsByBlockRange(start, end uint64) (DepositsMap, error) {
depositsByBlockhash := make(DepositsMap) depositsByBlockhash := make(DepositsMap)
iter, err := FilterERC20DepositInitiatedWithRetry(s.filterer, &bind.FilterOpts{ iter, err := FilterERC20DepositInitiatedWithRetry(s.ctx, s.filterer, &bind.FilterOpts{
Start: start, Start: start,
End: &end, End: &end,
Context: s.ctx,
}) })
if err != nil { if err != nil {
logger.Error("Error fetching filter", "err", err) logger.Error("Error fetching filter", "err", err)
......
...@@ -44,10 +44,9 @@ func QueryERC20(address common.Address, client *ethclient.Client) (*db.Token, er ...@@ -44,10 +44,9 @@ func QueryERC20(address common.Address, client *ethclient.Client) (*db.Token, er
func QueryStateBatches(filterer *scc.StateCommitmentChainFilterer, startHeight, endHeight uint64, ctx context.Context) (map[common.Hash][]db.StateBatch, error) { func QueryStateBatches(filterer *scc.StateCommitmentChainFilterer, startHeight, endHeight uint64, ctx context.Context) (map[common.Hash][]db.StateBatch, error) {
batches := make(map[common.Hash][]db.StateBatch) batches := make(map[common.Hash][]db.StateBatch)
iter, err := bridge.FilterStateBatchAppendedWithRetry(filterer, &bind.FilterOpts{ iter, err := bridge.FilterStateBatchAppendedWithRetry(ctx, filterer, &bind.FilterOpts{
Start: startHeight, Start: startHeight,
End: &endHeight, End: &endHeight,
Context: ctx,
}) })
if err != nil { if err != nil {
return nil, err return nil, err
......
...@@ -14,18 +14,16 @@ var clientRetryInterval = 5 * time.Second ...@@ -14,18 +14,16 @@ var clientRetryInterval = 5 * time.Second
// FilterWithdrawalInitiatedWithRetry retries the given func until it succeeds, // FilterWithdrawalInitiatedWithRetry retries the given func until it succeeds,
// waiting for clientRetryInterval duration after every call. // waiting for clientRetryInterval duration after every call.
func FilterWithdrawalInitiatedWithRetry(filterer *l2bridge.L2StandardBridgeFilterer, opts *bind.FilterOpts) (*l2bridge.L2StandardBridgeWithdrawalInitiatedIterator, error) { func FilterWithdrawalInitiatedWithRetry(ctx context.Context, filterer *l2bridge.L2StandardBridgeFilterer, opts *bind.FilterOpts) (*l2bridge.L2StandardBridgeWithdrawalInitiatedIterator, error) {
for { for {
ctxt, cancel := context.WithTimeout(opts.Context, DefaultConnectionTimeout) ctxt, cancel := context.WithTimeout(ctx, DefaultConnectionTimeout)
opts.Context = ctxt opts.Context = ctxt
res, err := filterer.FilterWithdrawalInitiated(opts, nil, nil, nil) res, err := filterer.FilterWithdrawalInitiated(opts, nil, nil, nil)
switch err { cancel()
case nil: if err == nil {
cancel() return res, nil
return res, err
default:
logger.Error("Error fetching filter", "err", err)
} }
logger.Error("Error fetching filter", "err", err)
time.Sleep(clientRetryInterval) time.Sleep(clientRetryInterval)
} }
} }
...@@ -25,10 +25,9 @@ func (s *StandardBridge) Address() common.Address { ...@@ -25,10 +25,9 @@ func (s *StandardBridge) Address() common.Address {
func (s *StandardBridge) GetWithdrawalsByBlockRange(start, end uint64) (WithdrawalsMap, error) { func (s *StandardBridge) GetWithdrawalsByBlockRange(start, end uint64) (WithdrawalsMap, error) {
withdrawalsByBlockhash := make(map[common.Hash][]db.Withdrawal) withdrawalsByBlockhash := make(map[common.Hash][]db.Withdrawal)
iter, err := FilterWithdrawalInitiatedWithRetry(s.filterer, &bind.FilterOpts{ iter, err := FilterWithdrawalInitiatedWithRetry(s.ctx, s.filterer, &bind.FilterOpts{
Start: start, Start: start,
End: &end, End: &end,
Context: s.ctx,
}) })
if err != nil { if err != nil {
logger.Error("Error fetching filter", "err", err) logger.Error("Error fetching filter", "err", err)
......
...@@ -25,10 +25,10 @@ lint: ...@@ -25,10 +25,10 @@ lint:
golangci-lint run ./... golangci-lint run ./...
binding: binding:
$(eval temp := $(shell mktemp)) $(eval tempCTC := $(shell mktemp))
cat ../../packages/contracts/deployments/mainnet/CanonicalTransactionChain.json \ cat ../../packages/contracts/deployments/mainnet/CanonicalTransactionChain.json \
| jq -r .bytecode > $(temp) | jq -r .bytecode > $(tempCTC)
cat ../../packages/contracts/deployments/mainnet/CanonicalTransactionChain.json \ cat ../../packages/contracts/deployments/mainnet/CanonicalTransactionChain.json \
| jq .abi \ | jq .abi \
...@@ -36,6 +36,21 @@ binding: ...@@ -36,6 +36,21 @@ binding:
--abi - \ --abi - \
--out bindings/CanonicalTransactionChain.go \ --out bindings/CanonicalTransactionChain.go \
--type CanonicalTransactionChain \ --type CanonicalTransactionChain \
--bin $(temp) --bin $(tempCTC)
rm $(temp) rm $(tempCTC)
$(eval tempSCC := $(shell mktemp))
cat ../../packages/contracts/deployments/mainnet/StateCommitmentChain.json \
| jq -r .bytecode > $(tempSCC)
cat ../../packages/contracts/deployments/mainnet/StateCommitmentChain.json \
| jq .abi \
| abigen --pkg bindings \
--abi - \
--out bindings/StateCommitmentChain.go \
--type StateCommitmentChain \
--bin $(tempSCC)
rm $(tempSCC)
\ No newline at end of file
This diff is collapsed.
...@@ -6,21 +6,22 @@ import ( ...@@ -6,21 +6,22 @@ import (
//Define the metrics we wish to expose //Define the metrics we wish to expose
var ( var (
ctcTotalElements = prometheus.NewGaugeVec( addressTotalElements = prometheus.NewGaugeVec(
prometheus.GaugeOpts{ prometheus.GaugeOpts{
Name: "l2geth_ctc_total_elements", Name: "l2geth_total_elements",
Help: "CTC GetTotalElements value."}, Help: "GetTotalElements value."},
[]string{"state"}, []string{"state", "address"},
) )
ctcTotalElementsCallSuccess = prometheus.NewGauge( addressTotalElementsCallStatus = prometheus.NewCounterVec(
prometheus.GaugeOpts{ prometheus.CounterOpts{
Name: "l2geth_ctc_total_elements_call_success", Name: "l2geth_total_elements_call_status",
Help: "CTC GetTotalElements call success."}, Help: "GetTotalElements call status."},
[]string{"status", "address"},
) )
) )
func init() { func init() {
//Register metrics with prometheus //Register metrics with prometheus
prometheus.MustRegister(ctcTotalElements) prometheus.MustRegister(addressTotalElements)
prometheus.MustRegister(ctcTotalElementsCallSuccess) prometheus.MustRegister(addressTotalElementsCallStatus)
} }
...@@ -10,12 +10,36 @@ import ( ...@@ -10,12 +10,36 @@ import (
"github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethclient"
) )
// CTC interacts with the OVM CTC contract // CTC interacts with the OVM Canonical Transaction Chain contract
type CTC struct { type CTC struct {
Address common.Address Address common.Address
Client *ethclient.Client Client *ethclient.Client
} }
// SCC interacts with the OVM State Commitment Chain contract
type SCC struct {
Address common.Address
Client *ethclient.Client
}
func (ctc *SCC) GetTotalElements(ctx context.Context) (*big.Int, error) {
contract, err := bindings.NewCanonicalTransactionChainCaller(ctc.Address, ctc.Client)
if err != nil {
return nil, err
}
totalElements, err := contract.GetTotalElements(&bind.CallOpts{
Context: ctx,
})
if err != nil {
return nil, err
}
return totalElements, nil
}
func (ctc *CTC) GetTotalElements(ctx context.Context) (*big.Int, error) { func (ctc *CTC) GetTotalElements(ctx context.Context) (*big.Int, error) {
contract, err := bindings.NewCanonicalTransactionChainCaller(ctc.Address, ctc.Client) contract, err := bindings.NewCanonicalTransactionChainCaller(ctc.Address, ctc.Client)
......
...@@ -31,11 +31,16 @@ func main() { ...@@ -31,11 +31,16 @@ func main() {
log.Error("L1_URL environmental variable is required") log.Error("L1_URL environmental variable is required")
os.Exit(1) os.Exit(1)
} }
ctcAddress := os.Getenv("CTC_ADDRESS") ctcAddress := os.Getenv("OVM_CTC_ADDRESS")
if ctcAddress == "" { if ctcAddress == "" {
log.Error("CTC_ADDRESS environmental variable is required") log.Error("CTC_ADDRESS environmental variable is required")
os.Exit(1) os.Exit(1)
} }
sccAddress := os.Getenv("OVM_SCC_ADDRESS")
if sccAddress == "" {
log.Error("OVM_SCC_ADDRESS environmental variable is required")
os.Exit(1)
}
client, err := ethclient.Dial(l1Url) client, err := ethclient.Dial(l1Url)
if err != nil { if err != nil {
log.Error("Problem connecting to L1: %s", err) log.Error("Problem connecting to L1: %s", err)
...@@ -51,7 +56,8 @@ func main() { ...@@ -51,7 +56,8 @@ func main() {
</body> </body>
</html>`)) </html>`))
}) })
go getCTCTotalElements(ctcAddress, client) go getCTCTotalElements(ctcAddress, "ctc", client)
go getSCCTotalElements(sccAddress, "scc", client)
log.Info("Program starting", "listenAddress", listenAddress, "GETH_URL", l1Url, "CTC_ADDRESS", ctcAddress) log.Info("Program starting", "listenAddress", listenAddress, "GETH_URL", l1Url, "CTC_ADDRESS", ctcAddress)
if err := http.ListenAndServe(listenAddress, nil); err != nil { if err := http.ListenAndServe(listenAddress, nil); err != nil {
...@@ -60,7 +66,35 @@ func main() { ...@@ -60,7 +66,35 @@ func main() {
} }
func getCTCTotalElements(address string, client *ethclient.Client) { func getSCCTotalElements(address string, addressLabel string, client *ethclient.Client) {
scc := l1contracts.SCC{
Address: common.HexToAddress(address),
Client: client,
}
ticker := time.NewTicker(30 * time.Second)
defer ticker.Stop()
for {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*time.Duration(l1TimeoutSeconds))
totalElements, err := scc.GetTotalElements(ctx)
if err != nil {
addressTotalElementsCallStatus.WithLabelValues("error", addressLabel).Inc()
log.Error("Error calling GetTotalElements", "address", addressLabel, "error", err)
cancel()
continue
}
addressTotalElementsCallStatus.WithLabelValues("success", addressLabel).Inc()
totalElementsFloat, _ := new(big.Float).SetInt(totalElements).Float64()
addressTotalElements.WithLabelValues("latest", addressLabel).Set(totalElementsFloat)
log.Info(addressLabel, "TotalElements", totalElementsFloat)
cancel()
<-ticker.C
}
}
func getCTCTotalElements(address string, addressLabel string, client *ethclient.Client) {
ctc := l1contracts.CTC{ ctc := l1contracts.CTC{
Address: common.HexToAddress(address), Address: common.HexToAddress(address),
Client: client, Client: client,
...@@ -72,16 +106,16 @@ func getCTCTotalElements(address string, client *ethclient.Client) { ...@@ -72,16 +106,16 @@ func getCTCTotalElements(address string, client *ethclient.Client) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*time.Duration(l1TimeoutSeconds)) ctx, cancel := context.WithTimeout(context.Background(), time.Second*time.Duration(l1TimeoutSeconds))
totalElements, err := ctc.GetTotalElements(ctx) totalElements, err := ctc.GetTotalElements(ctx)
if err != nil { if err != nil {
ctcTotalElementsCallSuccess.Set(0) addressTotalElementsCallStatus.WithLabelValues("error", addressLabel).Inc()
log.Error("Error calling GetTotalElements", "error", err) log.Error("Error calling GetTotalElements", "address", addressLabel, "error", err)
cancel() cancel()
continue continue
} }
ctcTotalElementsCallSuccess.Set(1) addressTotalElementsCallStatus.WithLabelValues("success", addressLabel).Inc()
totalElementsFloat, _ := new(big.Float).SetInt(totalElements).Float64() totalElementsFloat, _ := new(big.Float).SetInt(totalElements).Float64()
ctcTotalElements.WithLabelValues( addressTotalElements.WithLabelValues("latest", addressLabel).Set(totalElementsFloat)
"latest").Set(totalElementsFloat)
log.Info("ctc updated", "ctcTotalElements", totalElementsFloat) log.Info(addressLabel, "TotalElements", totalElementsFloat)
cancel() cancel()
<-ticker.C <-ticker.C
......
...@@ -19,6 +19,7 @@ import ( ...@@ -19,6 +19,7 @@ import (
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/gorilla/websocket" "github.com/gorilla/websocket"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"golang.org/x/sync/semaphore"
) )
const ( const (
...@@ -88,7 +89,7 @@ type Backend struct { ...@@ -88,7 +89,7 @@ type Backend struct {
authUsername string authUsername string
authPassword string authPassword string
rateLimiter RateLimiter rateLimiter RateLimiter
client *http.Client client *LimitedHTTPClient
dialer *websocket.Dialer dialer *websocket.Dialer
maxRetries int maxRetries int
maxResponseSize int64 maxResponseSize int64
...@@ -170,6 +171,7 @@ func NewBackend( ...@@ -170,6 +171,7 @@ func NewBackend(
rpcURL string, rpcURL string,
wsURL string, wsURL string,
rateLimiter RateLimiter, rateLimiter RateLimiter,
rpcSemaphore *semaphore.Weighted,
opts ...BackendOpt, opts ...BackendOpt,
) *Backend { ) *Backend {
backend := &Backend{ backend := &Backend{
...@@ -178,8 +180,10 @@ func NewBackend( ...@@ -178,8 +180,10 @@ func NewBackend(
wsURL: wsURL, wsURL: wsURL,
rateLimiter: rateLimiter, rateLimiter: rateLimiter,
maxResponseSize: math.MaxInt64, maxResponseSize: math.MaxInt64,
client: &http.Client{ client: &LimitedHTTPClient{
Timeout: 5 * time.Second, Client: http.Client{Timeout: 5 * time.Second},
sem: rpcSemaphore,
backendName: name,
}, },
dialer: &websocket.Dialer{}, dialer: &websocket.Dialer{},
} }
...@@ -358,7 +362,7 @@ func (b *Backend) doForward(ctx context.Context, rpcReq *RPCReq) (*RPCRes, error ...@@ -358,7 +362,7 @@ func (b *Backend) doForward(ctx context.Context, rpcReq *RPCReq) (*RPCRes, error
httpReq.Header.Set("content-type", "application/json") httpReq.Header.Set("content-type", "application/json")
httpReq.Header.Set("X-Forwarded-For", xForwardedFor) httpReq.Header.Set("X-Forwarded-For", xForwardedFor)
httpRes, err := b.client.Do(httpReq) httpRes, err := b.client.DoLimited(httpReq)
if err != nil { if err != nil {
return nil, wrapErr(err, "error in backend request") return nil, wrapErr(err, "error in backend request")
} }
...@@ -693,3 +697,18 @@ func sleepContext(ctx context.Context, duration time.Duration) { ...@@ -693,3 +697,18 @@ func sleepContext(ctx context.Context, duration time.Duration) {
case <-time.After(duration): case <-time.After(duration):
} }
} }
type LimitedHTTPClient struct {
http.Client
sem *semaphore.Weighted
backendName string
}
func (c *LimitedHTTPClient) DoLimited(req *http.Request) (*http.Response, error) {
if err := c.sem.Acquire(req.Context(), 1); err != nil {
tooManyRequestErrorsTotal.WithLabelValues(c.backendName).Inc()
return nil, wrapErr(err, "too many requests")
}
defer c.sem.Release(1)
return c.Do(req)
}
...@@ -7,11 +7,12 @@ import ( ...@@ -7,11 +7,12 @@ import (
) )
type ServerConfig struct { type ServerConfig struct {
RPCHost string `toml:"rpc_host"` RPCHost string `toml:"rpc_host"`
RPCPort int `toml:"rpc_port"` RPCPort int `toml:"rpc_port"`
WSHost string `toml:"ws_host"` WSHost string `toml:"ws_host"`
WSPort int `toml:"ws_port"` WSPort int `toml:"ws_port"`
MaxBodySizeBytes int64 `toml:"max_body_size_bytes"` MaxBodySizeBytes int64 `toml:"max_body_size_bytes"`
MaxConcurrentRPCs int64 `toml:"max_concurrent_rpcs"`
// TimeoutSeconds specifies the maximum time spent serving an HTTP request. Note that isn't used for websocket connections // TimeoutSeconds specifies the maximum time spent serving an HTTP request. Note that isn't used for websocket connections
TimeoutSeconds int `toml:"timeout_seconds"` TimeoutSeconds int `toml:"timeout_seconds"`
......
...@@ -18,6 +18,7 @@ ws_host = "0.0.0.0" ...@@ -18,6 +18,7 @@ ws_host = "0.0.0.0"
ws_port = 8085 ws_port = 8085
# Maximum client body size, in bytes, that the server will accept. # Maximum client body size, in bytes, that the server will accept.
max_body_size_bytes = 10485760 max_body_size_bytes = 10485760
max_concurrent_rpcs = 1000
[redis] [redis]
# URL to a Redis instance. # URL to a Redis instance.
......
...@@ -17,5 +17,6 @@ require ( ...@@ -17,5 +17,6 @@ require (
github.com/rs/cors v1.8.0 github.com/rs/cors v1.8.0
github.com/stretchr/testify v1.7.0 github.com/stretchr/testify v1.7.0
github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9 // indirect github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9 // indirect
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
) )
package integration_tests
import (
"net/http"
"net/http/httptest"
"os"
"sync"
"testing"
"time"
"github.com/ethereum-optimism/optimism/go/proxyd"
"github.com/stretchr/testify/require"
)
func TestMaxConcurrentRPCs(t *testing.T) {
var (
mu sync.Mutex
concurrentRPCs int
maxConcurrentRPCs int
)
handler := func(w http.ResponseWriter, r *http.Request) {
mu.Lock()
concurrentRPCs++
if maxConcurrentRPCs < concurrentRPCs {
maxConcurrentRPCs = concurrentRPCs
}
mu.Unlock()
time.Sleep(time.Second * 2)
SingleResponseHandler(200, goodResponse)(w, r)
mu.Lock()
concurrentRPCs--
mu.Unlock()
}
// We don't use the MockBackend because it serializes requests to the handler
slowBackend := httptest.NewServer(http.HandlerFunc(handler))
defer slowBackend.Close()
require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", slowBackend.URL))
config := ReadConfig("max_rpc_conns")
client := NewProxydClient("http://127.0.0.1:8545")
shutdown, err := proxyd.Start(config)
require.NoError(t, err)
defer shutdown()
type resWithCodeErr struct {
res []byte
code int
err error
}
resCh := make(chan *resWithCodeErr)
for i := 0; i < 3; i++ {
go func() {
res, code, err := client.SendRPC("eth_chainId", nil)
resCh <- &resWithCodeErr{
res: res,
code: code,
err: err,
}
}()
}
res1 := <-resCh
res2 := <-resCh
res3 := <-resCh
require.NoError(t, res1.err)
require.NoError(t, res2.err)
require.NoError(t, res3.err)
require.Equal(t, 200, res1.code)
require.Equal(t, 200, res2.code)
require.Equal(t, 200, res3.code)
RequireEqualJSON(t, []byte(goodResponse), res1.res)
RequireEqualJSON(t, []byte(goodResponse), res2.res)
RequireEqualJSON(t, []byte(goodResponse), res3.res)
require.EqualValues(t, 2, maxConcurrentRPCs)
}
[server]
rpc_port = 8545
max_concurrent_rpcs = 2
[backend]
# this should cover blocked requests due to max_concurrent_rpcs
response_timeout_seconds = 12
[backends]
[backends.good]
rpc_url = "$GOOD_BACKEND_RPC_URL"
ws_url = "$GOOD_BACKEND_RPC_URL"
[backend_groups]
[backend_groups.main]
backends = ["good"]
[rpc_method_mappings]
eth_chainId = "main"
...@@ -212,6 +212,14 @@ var ( ...@@ -212,6 +212,14 @@ var (
Help: "Histogram of Redis command durations, in milliseconds.", Help: "Histogram of Redis command durations, in milliseconds.",
Buckets: MillisecondDurationBuckets, Buckets: MillisecondDurationBuckets,
}, []string{"command"}) }, []string{"command"})
tooManyRequestErrorsTotal = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: MetricsNamespace,
Name: "too_many_request_errors_total",
Help: "Count of request timeouts due to too many concurrent RPCs.",
}, []string{
"backend_name",
})
) )
func RecordRedisError(source string) { func RecordRedisError(source string) {
......
...@@ -10,9 +10,11 @@ import ( ...@@ -10,9 +10,11 @@ import (
"strconv" "strconv"
"time" "time"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/client_golang/prometheus/promhttp"
"golang.org/x/sync/semaphore"
) )
func Start(config *Config) (func(), error) { func Start(config *Config) (func(), error) {
...@@ -53,6 +55,12 @@ func Start(config *Config) (func(), error) { ...@@ -53,6 +55,12 @@ func Start(config *Config) (func(), error) {
} }
} }
maxConcurrentRPCs := config.Server.MaxConcurrentRPCs
if maxConcurrentRPCs == 0 {
maxConcurrentRPCs = math.MaxInt64
}
rpcRequestSemaphore := semaphore.NewWeighted(maxConcurrentRPCs)
backendNames := make([]string, 0) backendNames := make([]string, 0)
backendsByName := make(map[string]*Backend) backendsByName := make(map[string]*Backend)
for name, cfg := range config.Backends { for name, cfg := range config.Backends {
...@@ -111,7 +119,7 @@ func Start(config *Config) (func(), error) { ...@@ -111,7 +119,7 @@ func Start(config *Config) (func(), error) {
opts = append(opts, WithStrippedTrailingXFF()) opts = append(opts, WithStrippedTrailingXFF())
} }
opts = append(opts, WithProxydIP(os.Getenv("PROXYD_IP"))) opts = append(opts, WithProxydIP(os.Getenv("PROXYD_IP")))
back := NewBackend(name, rpcURL, wsURL, lim, opts...) back := NewBackend(name, rpcURL, wsURL, lim, rpcRequestSemaphore, opts...)
backendNames = append(backendNames, name) backendNames = append(backendNames, name)
backendsByName[name] = back backendsByName[name] = back
log.Info("configured backend", "name", name, "rpc_url", rpcURL, "ws_url", wsURL) log.Info("configured backend", "name", name, "rpc_url", rpcURL, "ws_url", wsURL)
......
...@@ -27,6 +27,7 @@ const ( ...@@ -27,6 +27,7 @@ const (
MaxBatchRPCCalls = 100 MaxBatchRPCCalls = 100
cacheStatusHdr = "X-Proxyd-Cache-Status" cacheStatusHdr = "X-Proxyd-Cache-Status"
defaultServerTimeout = time.Second * 10 defaultServerTimeout = time.Second * 10
maxLogLength = 2000
) )
type Server struct { type Server struct {
...@@ -150,6 +151,12 @@ func (s *Server) HandleRPC(w http.ResponseWriter, r *http.Request) { ...@@ -150,6 +151,12 @@ func (s *Server) HandleRPC(w http.ResponseWriter, r *http.Request) {
} }
RecordRequestPayloadSize(ctx, len(body)) RecordRequestPayloadSize(ctx, len(body))
log.Info("Raw RPC request",
"body", truncate(string(body)),
"req_id", GetReqID(ctx),
"auth", GetAuthCtx(ctx),
)
if IsBatch(body) { if IsBatch(body) {
reqs, err := ParseBatchRPCReq(body) reqs, err := ParseBatchRPCReq(body)
if err != nil { if err != nil {
...@@ -457,3 +464,11 @@ func (n *NoopRPCCache) GetRPC(context.Context, *RPCReq) (*RPCRes, error) { ...@@ -457,3 +464,11 @@ func (n *NoopRPCCache) GetRPC(context.Context, *RPCReq) (*RPCRes, error) {
func (n *NoopRPCCache) PutRPC(context.Context, *RPCReq, *RPCRes) error { func (n *NoopRPCCache) PutRPC(context.Context, *RPCReq, *RPCRes) error {
return nil return nil
} }
func truncate(str string) string {
if len(str) > maxLogLength {
return str[:maxLogLength] + "..."
} else {
return str
}
}
...@@ -291,7 +291,7 @@ func (d *Driver) UpdateGasPrice( ...@@ -291,7 +291,7 @@ func (d *Driver) UpdateGasPrice(
tx *types.Transaction, tx *types.Transaction,
) (*types.Transaction, error) { ) (*types.Transaction, error) {
gasPrice, err := d.cfg.L1Client.SuggestGasPrice(ctx) gasPrice, err := d.cfg.L2Client.SuggestGasPrice(ctx)
if err != nil { if err != nil {
return nil, err return nil, err
} }
......
#!/bin/sh
set -exu
GETH_DATA_DIR=/geth
GETH_CHAINDATA_DIR=$GETH_DATA_DIR/geth/chaindata
GETH_KEYSTORE_DIR=$GETH_DATA_DIR/keystore
if [ ! -d "$GETH_KEYSTORE_DIR" ]; then
echo "$GETH_KEYSTORE_DIR missing, running account import"
echo -n "$BLOCK_SIGNER_PRIVATE_KEY_PASSWORD" > "$GETH_DATA_DIR"/password
echo -n "$BLOCK_SIGNER_PRIVATE_KEY" > "$GETH_DATA_DIR"/block-signer-key
geth account import \
--datadir="$GETH_DATA_DIR" \
--password="$GETH_DATA_DIR"/password \
"$GETH_DATA_DIR"/block-signer-key
echo "get account import complete"
fi
if [ ! -d "$GETH_CHAINDATA_DIR" ]; then
echo "$GETH_CHAINDATA_DIR missing, running init"
geth init --datadir="$GETH_DATA_DIR" "$L2GETH_GENESIS_URL" "$L2GETH_GENESIS_HASH"
echo "geth init complete"
else
echo "$GETH_CHAINDATA_DIR exists, checking for hardfork."
echo "Chain config:"
geth dump-chain-cfg --datadir="$GETH_DATA_DIR"
if geth dump-chain-cfg --datadir="$GETH_DATA_DIR" | grep -q "\"berlinBlock\": $L2GETH_BERLIN_ACTIVATION_HEIGHT"; then
echo "Hardfork already activated."
else
echo "Hardfork not activated, running init."
geth init --datadir="$GETH_DATA_DIR" "$L2GETH_GENESIS_URL" "$L2GETH_GENESIS_HASH"
echo "geth hardfork activation complete"
fi
fi
\ No newline at end of file
DATA_TRANSPORT_LAYER__ADDRESS_MANAGER=0x07917E3D28aa7C61baAAcf8e486a77007E1Daa89
DATA_TRANSPORT_LAYER__CONFIRMATIONS=12
DATA_TRANSPORT_LAYER__DANGEROUSLY_CATCH_ALL_ERRORS=true
DATA_TRANSPORT_LAYER__DB_PATH=/db
DATA_TRANSPORT_LAYER__DEFAULT_BACKEND=l2
DATA_TRANSPORT_LAYER__ENABLE_METRICS=true
DATA_TRANSPORT_LAYER__ETH_NETWORK_NAME=goerli
DATA_TRANSPORT_LAYER__L1_GAS_PRICE_BACKEND=l2
DATA_TRANSPORT_LAYER__L1_START_HEIGHT=6680000
DATA_TRANSPORT_LAYER__L2_CHAIN_ID=421
DATA_TRANSPORT_LAYER__LOGS_PER_POLLING_INTERVAL=2000
DATA_TRANSPORT_LAYER__NODE_ENV=production
DATA_TRANSPORT_LAYER__POLLING_INTERVAL=500
DATA_TRANSPORT_LAYER__SENTRY_TRACE_RATE=0.05
DATA_TRANSPORT_LAYER__SERVER_HOSTNAME=0.0.0.0
DATA_TRANSPORT_LAYER__SERVER_PORT=7878
DATA_TRANSPORT_LAYER__SYNC_FROM_L1=false
DATA_TRANSPORT_LAYER__SYNC_FROM_L2=true
DATA_TRANSPORT_LAYER__TRANSACTIONS_PER_POLLING_INTERVAL=1000
DEPLOYER_HTTP=
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
configMapGenerator:
- name: data-transport-layer
envs:
- ./data-transport-layer.env
- name: l2geth-replica
envs:
- ./l2geth-replica.env
- name: replica-healthcheck
envs:
- ./replica-healthcheck.env
- name: geth-scripts
files:
- ./check-for-chaindata.sh
\ No newline at end of file
CHAIN_ID=421
DATADIR=/geth
NETWORK_ID=421
NO_DISCOVER=true
NO_USB=true
GASPRICE=0
GCMODE=archive
BLOCK_SIGNER_ADDRESS=0x00000398232E2064F896018496b4b44b3D62751F
BLOCK_SIGNER_PRIVATE_KEY=6587ae678cf4fc9a33000cdbf9f35226b71dcc6a4684a31203241f9bcfd55d27
BLOCK_SIGNER_PRIVATE_KEY_PASSWORD=pwd
ETH1_CTC_DEPLOYMENT_HEIGHT=0
ETH1_SYNC_SERVICE_ENABLE=true
L2GETH_GENESIS_URL=https://storage.googleapis.com/optimism/goerli-nightly/genesis.json
L2GETH_GENESIS_HASH=0x20091f0016a82537cda17009b87fb67cb9e3398eb4f7d2a99a47ffab62460cb1
ROLLUP_BACKEND=l2
ROLLUP_CLIENT_HTTP=http://data-transport-layer:7878
ROLLUP_DISABLE_TRANSFERS=false
ROLLUP_ENABLE_L2_GAS_POLLING=false
ROLLUP_MAX_CALLDATA_SIZE=40000
ROLLUP_POLL_INTERVAL_FLAG=3s
ROLLUP_SYNC_SERVICE_ENABLE=true
ROLLUP_TIMESTAMP_REFRESH=3m
ROLLUP_VERIFIER_ENABLE=true
RPC_ADDR=0.0.0.0
RPC_API=eth,rollup,net,web3,debug
RPC_CORS_DOMAIN=*
RPC_ENABLE=true
RPC_PORT=8545
RPC_VHOSTS=*
TARGET_GAS_LIMIT=15000000
USING_OVM=true
WS_ADDR=0.0.0.0
WS_API=eth,rollup,net,web3,debug
WS_ORIGINS=*
WS=true
REPLICA_HEALTHCHECK__ETH_NETWORK=goerli-nightly
REPLICA_HEALTHCHECK__L2GETH_IMAGE_TAG=0.4.9
REPLICA_HEALTHCHECK__ETH_REPLICA_RPC_PROVIDER=http://l2geth-replica:8545
\ No newline at end of file
apiVersion: apps/v1
kind: Deployment
metadata:
name: replica-healthcheck-v1
spec:
replicas: 1
template:
spec:
containers:
- name: replica-healthcheck
image: ethereumoptimism/replica-healthcheck-v1
ports:
- containerPort: 7300
name: metrics
resources:
limits:
memory: 256Mi
cpu: 512m
requests:
memory: 128Mi
cpu: 256m
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- deployment.yaml
commonLabels:
app: replica-healthcheck-v1
\ No newline at end of file
...@@ -2,4 +2,5 @@ apiVersion: kustomize.config.k8s.io/v1beta1 ...@@ -2,4 +2,5 @@ apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization kind: Kustomization
resources: resources:
- ./data-transport-layer.yaml - ./data-transport-layer.yaml
- ./replica-healthcheck.yaml - ./replica-healthcheck.yaml
\ No newline at end of file - ./replica-healthcheck-v1.yaml
\ No newline at end of file
---
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: replica-healthcheck-v1
spec:
selector:
matchLabels:
app: replica-healthcheck-v1
provider: internal
podMetricsEndpoints:
- port: metrics
podTargetLabels:
- network
- provider
- sync_source
\ No newline at end of file
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: goerli-nightly-replica
commonLabels:
network: goerli-nightly
provider: internal
bases:
- ../../../envs/goerli-nightly
- ../../../scripts
resources:
- ../../bases/data-transport-layer
- ../../bases/l2geth-replica
- ../../bases/servicemonitors
- ../../bases/replica-healthcheck-v1
- ./volumes.yaml
images:
- name: ethereumoptimism/data-transport-layer
newName: ethereumoptimism/data-transport-layer
newTag: latest
- name: ethereumoptimism/l2geth
newName: ethereumoptimism/l2geth
newTag: latest
- name: ethereumoptimism/replica-healthcheck-v1
newName: ethereumoptimism/replica-healthcheck
newTag: latest
patchesStrategicMerge:
- ./patches/dtl.yaml
- ./patches/l2geth.yaml
- ./patches/replica-healthcheck.yaml
patches:
- path: ./patches/l2geth-volume.yaml
target:
group: apps
version: v1
kind: StatefulSet
name: l2geth-replica
- path: ./patches/dtl-volume.yaml
target:
group: apps
version: v1
kind: StatefulSet
name: data-transport-layer
\ No newline at end of file
---
- op: replace
path: /spec/template/spec/volumes/0
value:
name: data-transport-layer
persistentVolumeClaim:
claimName: data-transport-layer-data
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: data-transport-layer
spec:
template:
spec:
initContainers:
- name: wait-for-l1
env:
- name: L1_NODE_WEB3_URL
value: http://failover-proxyd.default:8080
containers:
- name: data-transport-layer
resources:
limits:
cpu: "2"
memory: 4Gi
requests:
cpu: "1"
memory: 1Gi
env:
- name: DATA_TRANSPORT_LAYER__L1_RPC_ENDPOINT
value: http://failover-proxyd.default:8080
- name: DATA_TRANSPORT_LAYER__L2_RPC_ENDPOINT
value: http://sequencer.default:8545
- name: L1_NODE_WEB3_URL
value: http://failover-proxyd.default:8080
\ No newline at end of file
- op: replace
path: /spec/template/spec/volumes/0
value:
name: l2geth-replica-data
persistentVolumeClaim:
claimName: l2geth-replica-data
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: l2geth-replica
spec:
template:
spec:
containers:
- name: l2geth-replica
env:
- name: IPC_DISABLE
value: "false"
resources:
limits:
cpu: "4"
memory: 12Gi
requests:
cpu: "2"
memory: 8Gi
apiVersion: apps/v1
kind: Deployment
metadata:
name: replica-healthcheck-v1
spec:
replicas: 1
template:
spec:
containers:
- name: replica-healthcheck
env:
- name: HEALTHCHECK__REFERENCE_RPC_PROVIDER
value: http://sequencer.default:8545
- name: HEALTHCHECK__TARGET_RPC_PROVIDER
value: http://l2geth-replica:8545
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: l2geth-replica-data
spec:
storageClassName: premium-rwo
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 200Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: data-transport-layer-data
spec:
storageClassName: premium-rwo
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
\ No newline at end of file
...@@ -14,6 +14,7 @@ resources: ...@@ -14,6 +14,7 @@ resources:
- ../../bases/l2geth-replica - ../../bases/l2geth-replica
- ../../bases/servicemonitors - ../../bases/servicemonitors
- ../../bases/replica-healthcheck - ../../bases/replica-healthcheck
- ../../bases/replica-healthcheck-v1
- ./volumes.yaml - ./volumes.yaml
- ./ingress.yaml - ./ingress.yaml
...@@ -27,6 +28,9 @@ images: ...@@ -27,6 +28,9 @@ images:
- name: ethereumoptimism/replica-healthcheck - name: ethereumoptimism/replica-healthcheck
newName: ethereumoptimism/replica-healthcheck newName: ethereumoptimism/replica-healthcheck
newTag: 0.3.3 newTag: 0.3.3
- name: ethereumoptimism/replica-healthcheck-v1
newName: ethereumoptimism/replica-healthcheck
newTag: 1.0.4
patchesStrategicMerge: patchesStrategicMerge:
- ./patches/dtl.yaml - ./patches/dtl.yaml
...@@ -39,4 +43,9 @@ patches: ...@@ -39,4 +43,9 @@ patches:
group: apps group: apps
version: v1 version: v1
kind: StatefulSet kind: StatefulSet
name: l2geth-replica name: l2geth-replica
\ No newline at end of file
configMapGenerator:
- name: replica-healthcheck-v1
envs:
- ./replica-healthcheck-v1.env
HEALTHCHECK__REFERENCE_RPC_PROVIDER=https://mainnet.optimism.io
HEALTHCHECK__TARGET_RPC_PROVIDER=http://l2geth-replica:8545
\ No newline at end of file
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: mainnet-replica-0-5-17
commonLabels:
network: mainnet
provider: internal
bases:
- ../../../envs/mainnet-gen5-berlin/
- ../../../scripts
resources:
- ../../bases/data-transport-layer
- ../../bases/l2geth-replica
- ../../bases/servicemonitors
- ../../bases/replica-healthcheck
- ../../bases/replica-healthcheck-v1
- ./volumes.yaml
images:
- name: ethereumoptimism/data-transport-layer
newName: ethereumoptimism/data-transport-layer
newTag: 0.5.25
- name: ethereumoptimism/l2geth
newName: ethereumoptimism/l2geth
newTag: 0.5.17
- name: ethereumoptimism/replica-healthcheck
newName: ethereumoptimism/replica-healthcheck
newTag: 0.3.3
- name: ethereumoptimism/replica-healthcheck-v1
newName: ethereumoptimism/replica-healthcheck
newTag: 1.0.4
patchesStrategicMerge:
- ./patches/dtl.yaml
- ./patches/l2geth.yaml
- ./patches/replica-healthcheck.yaml
patches:
- path: ./patches/l2geth-volume.yaml
target:
group: apps
version: v1
kind: StatefulSet
name: l2geth-replica
configMapGenerator:
- name: replica-healthcheck-v1
envs:
- ./replica-healthcheck-v1.env
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: data-transport-layer
spec:
template:
spec:
initContainers:
- name: wait-for-l1
env:
- name: L1_NODE_WEB3_URL
valueFrom:
secretKeyRef:
name: replica-secrets
key: l1-rpc-endpoint
containers:
- name: data-transport-layer
resources:
limits:
cpu: "2"
memory: 4Gi
requests:
cpu: "1"
memory: 1Gi
env:
- name: DATA_TRANSPORT_LAYER__L1_RPC_ENDPOINT
valueFrom:
secretKeyRef:
name: replica-secrets
key: l1-rpc-endpoint
- name: DATA_TRANSPORT_LAYER__L2_RPC_ENDPOINT
valueFrom:
secretKeyRef:
name: replica-secrets
key: l2-rpc-endpoint
- name: L1_NODE_WEB3_URL
valueFrom:
secretKeyRef:
name: replica-secrets
key: l1-rpc-endpoint
\ No newline at end of file
- op: replace
path: /spec/template/spec/volumes/0
value:
name: l2geth-replica-data
persistentVolumeClaim:
claimName: l2geth-replica-data
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: l2geth-replica
spec:
template:
spec:
containers:
- name: l2geth-replica
command:
- geth
- --config=/l2geth-config/l2geth.toml
- --datadir=$(DATADIR)
- --password=$(DATADIR)/password
- --allow-insecure-unlock
- --unlock=$(BLOCK_SIGNER_ADDRESS)
- --mine
- --miner.etherbase=$(BLOCK_SIGNER_ADDRESS)
- --metrics
- --metrics.influxdb
- --metrics.influxdb.endpoint=http://influxdb.monitoring:8086
- --metrics.influxdb.database=$(NAMESPACE)
resources:
limits:
cpu: "8"
memory: 24Gi
requests:
cpu: "4"
memory: 12Gi
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: data-transport-layer
spec:
template:
spec:
initContainers:
- name: wait-for-l1
env:
- name: L1_NODE_WEB3_URL
valueFrom:
secretKeyRef:
name: replica-secrets
key: l1-rpc-endpoint
containers:
- name: data-transport-layer
resources:
limits:
cpu: "2"
memory: 4Gi
requests:
cpu: "1"
memory: 1Gi
env:
- name: DATA_TRANSPORT_LAYER__L1_RPC_ENDPOINT
valueFrom:
secretKeyRef:
name: replica-secrets
key: l1-rpc-endpoint
- name: DATA_TRANSPORT_LAYER__L2_RPC_ENDPOINT
valueFrom:
secretKeyRef:
name: replica-secrets
key: l2-rpc-endpoint
- name: L1_NODE_WEB3_URL
valueFrom:
secretKeyRef:
name: replica-secrets
key: l1-rpc-endpoint
\ No newline at end of file
apiVersion: apps/v1
kind: Deployment
metadata:
name: replica-healthcheck
spec:
template:
spec:
containers:
- name: replica-healthcheck
env:
- name: REPLICA_HEALTHCHECK__ETH_NETWORK_RPC_PROVIDER
value: https://mainnet.optimism.io
HEALTHCHECK__REFERENCE_RPC_PROVIDER=https://mainnet.optimism.io
HEALTHCHECK__TARGET_RPC_PROVIDER=http://l2geth-replica:8545
\ No newline at end of file
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: l2geth-replica-data
spec:
storageClassName: premium-rwo
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 500Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: data-transport-layer-data
spec:
storageClassName: premium-rwo
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
\ No newline at end of file
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
import { BigNumber, Contract, ContractFactory, utils, Wallet } from 'ethers' import { BigNumber, Contract, ContractFactory, utils, Wallet } from 'ethers'
import { ethers } from 'hardhat' import { ethers } from 'hardhat'
import { futurePredeploys } from '@eth-optimism/contracts' import { futurePredeploys } from '@eth-optimism/contracts'
import { sleep } from '@eth-optimism/core-utils'
/* Imports: Internal */ /* Imports: Internal */
import { expect } from './shared/setup' import { expect } from './shared/setup'
...@@ -102,4 +103,42 @@ describe('System addresses', () => { ...@@ -102,4 +103,42 @@ describe('System addresses', () => {
expect(receipt.contractAddress).not.to.eq(null) expect(receipt.contractAddress).not.to.eq(null)
} }
}) })
const testReplica = async (otherProvider) => {
const seqBlock = await env.l2Provider.getBlock('latest')
while (true) {
const verHeight = await otherProvider.getBlockNumber()
if (verHeight >= seqBlock.number) {
break
}
await sleep(200)
}
const verBlock = await otherProvider.getBlock(seqBlock.number)
expect(verBlock).to.deep.eq(seqBlock)
for (const addr of SYSTEM_ADDRESSES) {
const seqCode = await env.l2Provider.getCode(addr)
const verCode = await otherProvider.getCode(addr)
expect(seqCode).to.eq(verCode)
}
}
it('should be properly handled on verifiers', async function () {
if (!envConfig.RUN_VERIFIER_TESTS) {
this.skip()
return
}
await testReplica(env.verifierProvider)
})
it('should be properly handled on replicas', async function () {
if (!envConfig.RUN_REPLICA_TESTS) {
this.skip()
return
}
await testReplica(env.replicaProvider)
})
}) })
...@@ -34,6 +34,7 @@ import ( ...@@ -34,6 +34,7 @@ import (
"github.com/ethereum-optimism/optimism/l2geth/core/types" "github.com/ethereum-optimism/optimism/l2geth/core/types"
"github.com/ethereum-optimism/optimism/l2geth/event" "github.com/ethereum-optimism/optimism/l2geth/event"
"github.com/ethereum-optimism/optimism/l2geth/log" "github.com/ethereum-optimism/optimism/l2geth/log"
"github.com/ethereum-optimism/optimism/l2geth/metrics"
"github.com/ethereum-optimism/optimism/l2geth/params" "github.com/ethereum-optimism/optimism/l2geth/params"
) )
...@@ -85,6 +86,13 @@ var ( ...@@ -85,6 +86,13 @@ var (
// l2geth, rather the actual execution error should be returned to the // l2geth, rather the actual execution error should be returned to the
// user. // user.
ErrCannotCommitTxn = errors.New("Cannot commit transaction in miner") ErrCannotCommitTxn = errors.New("Cannot commit transaction in miner")
// rollup apply transaction metrics
accountReadTimer = metrics.NewRegisteredTimer("rollup/tx/account/reads", nil)
accountUpdateTimer = metrics.NewRegisteredTimer("rollup/tx/account/updates", nil)
storageReadTimer = metrics.NewRegisteredTimer("rollup/tx/storage/reads", nil)
storageUpdateTimer = metrics.NewRegisteredTimer("rollup/tx/storage/updates", nil)
txExecutionTimer = metrics.NewRegisteredTimer("rollup/tx/execution", nil)
) )
// environment is the worker's current environment and holds all of the current state information. // environment is the worker's current environment and holds all of the current state information.
...@@ -771,6 +779,7 @@ func (w *worker) commitTransaction(tx *types.Transaction, coinbase common.Addres ...@@ -771,6 +779,7 @@ func (w *worker) commitTransaction(tx *types.Transaction, coinbase common.Addres
} }
snap := w.current.state.Snapshot() snap := w.current.state.Snapshot()
start := time.Now()
receipt, err := core.ApplyTransaction(w.chainConfig, w.chain, &coinbase, w.current.gasPool, w.current.state, w.current.header, tx, &w.current.header.GasUsed, *w.chain.GetVMConfig()) receipt, err := core.ApplyTransaction(w.chainConfig, w.chain, &coinbase, w.current.gasPool, w.current.state, w.current.header, tx, &w.current.header.GasUsed, *w.chain.GetVMConfig())
if err != nil { if err != nil {
w.current.state.RevertToSnapshot(snap) w.current.state.RevertToSnapshot(snap)
...@@ -779,6 +788,8 @@ func (w *worker) commitTransaction(tx *types.Transaction, coinbase common.Addres ...@@ -779,6 +788,8 @@ func (w *worker) commitTransaction(tx *types.Transaction, coinbase common.Addres
w.current.txs = append(w.current.txs, tx) w.current.txs = append(w.current.txs, tx)
w.current.receipts = append(w.current.receipts, receipt) w.current.receipts = append(w.current.receipts, receipt)
updateTransactionStateMetrics(start, w.current.state)
return receipt.Logs, nil return receipt.Logs, nil
} }
...@@ -1143,3 +1154,13 @@ func (w *worker) postSideBlock(event core.ChainSideEvent) { ...@@ -1143,3 +1154,13 @@ func (w *worker) postSideBlock(event core.ChainSideEvent) {
case <-w.exitCh: case <-w.exitCh:
} }
} }
func updateTransactionStateMetrics(start time.Time, state *state.StateDB) {
accountReadTimer.Update(state.AccountReads)
storageReadTimer.Update(state.StorageReads)
accountUpdateTimer.Update(state.AccountUpdates)
storageUpdateTimer.Update(state.StorageUpdates)
triehash := state.AccountHashes + state.StorageHashes
txExecutionTimer.Update(time.Since(start) - triehash)
}
...@@ -247,10 +247,10 @@ func (s *SyncService) Start() error { ...@@ -247,10 +247,10 @@ func (s *SyncService) Start() error {
} else { } else {
go func() { go func() {
if err := s.syncTransactionsToTip(); err != nil { if err := s.syncTransactionsToTip(); err != nil {
log.Crit("Sequencer cannot sync transactions to tip: %w", err) log.Crit("Sequencer cannot sync transactions to tip", "err", err)
} }
if err := s.syncQueueToTip(); err != nil { if err := s.syncQueueToTip(); err != nil {
log.Crit("Sequencer cannot sync queue to tip: %w", err) log.Crit("Sequencer cannot sync queue to tip", "err", err)
} }
s.setSyncStatus(false) s.setSyncStatus(false)
go s.SequencerLoop() go s.SequencerLoop()
......
...@@ -95,6 +95,9 @@ contract L2CrossDomainMessenger is IL2CrossDomainMessenger { ...@@ -95,6 +95,9 @@ contract L2CrossDomainMessenger is IL2CrossDomainMessenger {
bytes memory _message, bytes memory _message,
uint256 _messageNonce uint256 _messageNonce
) public { ) public {
// Since it is impossible to deploy a contract to an address on L2 which matches
// the alias of the L1CrossDomainMessenger, this check can only pass when it is called in
// the first call from of a deposit transaction. Thus reentrancy is prevented here.
require( require(
AddressAliasHelper.undoL1ToL2Alias(msg.sender) == l1CrossDomainMessenger, AddressAliasHelper.undoL1ToL2Alias(msg.sender) == l1CrossDomainMessenger,
"Provided message could not be verified." "Provided message could not be verified."
......
/* External Imports */
import { ethers } from 'hardhat' import { ethers } from 'hardhat'
import { Signer, ContractFactory, Contract } from 'ethers' import { Contract } from 'ethers'
import { MockContract, smock } from '@defi-wonderland/smock' import { MockContract, smock } from '@defi-wonderland/smock'
import { expectApprox } from '@eth-optimism/core-utils' import { expectApprox } from '@eth-optimism/core-utils'
import { SignerWithAddress } from '@nomiclabs/hardhat-ethers/signers'
/* Internal Imports */
import { import {
makeAddressManager, deploy,
L2_GAS_DISCOUNT_DIVISOR, L2_GAS_DISCOUNT_DIVISOR,
ENQUEUE_GAS_COST, ENQUEUE_GAS_COST,
NON_ZERO_ADDRESS, NON_ZERO_ADDRESS,
...@@ -22,44 +21,34 @@ const INITIAL_TOTAL_L1_SUPPLY = 5000 ...@@ -22,44 +21,34 @@ const INITIAL_TOTAL_L1_SUPPLY = 5000
const FINALIZATION_GAS = 1_200_000 const FINALIZATION_GAS = 1_200_000
describe('[GAS BENCHMARK] Depositing via the standard bridge [ @skip-on-coverage ]', () => { describe('[GAS BENCHMARK] Depositing via the standard bridge [ @skip-on-coverage ]', () => {
let sequencer: Signer let sequencer: SignerWithAddress
let alice: Signer let alice: SignerWithAddress
before(async () => { before(async () => {
;[sequencer, alice] = await ethers.getSigners() ;[sequencer, alice] = await ethers.getSigners()
}) })
let AddressManager: Contract let AddressManager: Contract
before('Deploy address manager and register sequencer', async () => { let CanonicalTransactionChain: Contract
AddressManager = await makeAddressManager() before(async () => {
AddressManager = await deploy('Lib_AddressManager')
CanonicalTransactionChain = await deploy('CanonicalTransactionChain', {
args: [
AddressManager.address,
MAX_GAS_LIMIT,
L2_GAS_DISCOUNT_DIVISOR,
ENQUEUE_GAS_COST,
],
})
const batches = await deploy('ChainStorageContainer', {
args: [AddressManager.address, 'CanonicalTransactionChain'],
})
await AddressManager.setAddress( await AddressManager.setAddress(
'OVM_Sequencer', 'OVM_Sequencer',
await sequencer.getAddress() await sequencer.getAddress()
) )
})
let CanonicalTransactionChain: Contract
let Factory__ChainStorageContainer: ContractFactory
before('Init CTC and Storage Container contracts.', async () => {
CanonicalTransactionChain = await (
await ethers.getContractFactory('CanonicalTransactionChain')
).deploy(
AddressManager.address,
MAX_GAS_LIMIT,
L2_GAS_DISCOUNT_DIVISOR,
ENQUEUE_GAS_COST
)
Factory__ChainStorageContainer = await ethers.getContractFactory(
'ChainStorageContainer'
)
const batches = await Factory__ChainStorageContainer.deploy(
AddressManager.address,
'CanonicalTransactionChain'
)
await Factory__ChainStorageContainer.deploy(
AddressManager.address,
'CanonicalTransactionChain'
)
await AddressManager.setAddress( await AddressManager.setAddress(
'ChainStorageContainer-CTC-batches', 'ChainStorageContainer-CTC-batches',
...@@ -72,50 +61,43 @@ describe('[GAS BENCHMARK] Depositing via the standard bridge [ @skip-on-coverage ...@@ -72,50 +61,43 @@ describe('[GAS BENCHMARK] Depositing via the standard bridge [ @skip-on-coverage
) )
}) })
// 3 Messenger
let L1CrossDomainMessenger: Contract let L1CrossDomainMessenger: Contract
before('Deploy Messenger proxy and implementation', async () => { before(async () => {
// Deploy the implementation contract first const xDomainMessengerImpl = await deploy('L1CrossDomainMessenger')
const xDomainMessengerImpl = await (
await ethers.getContractFactory('L1CrossDomainMessenger')
).deploy()
await AddressManager.setAddress( await AddressManager.setAddress(
'L1CrossDomainMessenger', 'L1CrossDomainMessenger',
xDomainMessengerImpl.address xDomainMessengerImpl.address
) )
// Deploy and initialize the Proxy Messenger
const proxy = await ( const proxy = await deploy('Lib_ResolvedDelegateProxy', {
await ethers.getContractFactory('Lib_ResolvedDelegateProxy') args: [AddressManager.address, 'L1CrossDomainMessenger'],
).deploy(AddressManager.address, 'L1CrossDomainMessenger') })
L1CrossDomainMessenger = xDomainMessengerImpl.attach(proxy.address) L1CrossDomainMessenger = xDomainMessengerImpl.attach(proxy.address)
await L1CrossDomainMessenger.initialize(AddressManager.address) await L1CrossDomainMessenger.initialize(AddressManager.address)
}) })
// 4 Bridge
let L1ERC20: MockContract<Contract> let L1ERC20: MockContract<Contract>
let L1StandardBridge: Contract let L1StandardBridge: Contract
before('Deploy the bridge and setup the token', async () => { before('Deploy the bridge and setup the token', async () => {
// Deploy the Bridge L1StandardBridge = await deploy('L1StandardBridge')
L1StandardBridge = await (
await ethers.getContractFactory('L1StandardBridge')
).deploy()
await L1StandardBridge.initialize( await L1StandardBridge.initialize(
L1CrossDomainMessenger.address, L1CrossDomainMessenger.address,
NON_ZERO_ADDRESS NON_ZERO_ADDRESS
) )
L1ERC20 = await ( L1ERC20 = await (await smock.mock('ERC20')).deploy('L1ERC20', 'ERC')
await smock.mock('@openzeppelin/contracts/token/ERC20/ERC20.sol:ERC20')
).deploy('L1ERC20', 'ERC')
const aliceAddress = await alice.getAddress()
await L1ERC20.setVariable('_totalSupply', INITIAL_TOTAL_L1_SUPPLY) await L1ERC20.setVariable('_totalSupply', INITIAL_TOTAL_L1_SUPPLY)
await L1ERC20.setVariable('_balances', { await L1ERC20.setVariable('_balances', {
[aliceAddress]: INITIAL_TOTAL_L1_SUPPLY, [alice.address]: INITIAL_TOTAL_L1_SUPPLY,
}) })
}) })
describe('[GAS BENCHMARK] L1 to L2 Deposit costs [ @skip-on-coverage ]', async () => { describe('[GAS BENCHMARK] L1 to L2 Deposit costs [ @skip-on-coverage ]', async () => {
const depositAmount = 1_000 const depositAmount = 1_000
before(async () => { before(async () => {
// Load a transaction into the queue first to 'dirty' the buffer's length slot // Load a transaction into the queue first to 'dirty' the buffer's length slot
await CanonicalTransactionChain.enqueue( await CanonicalTransactionChain.enqueue(
...@@ -124,6 +106,7 @@ describe('[GAS BENCHMARK] Depositing via the standard bridge [ @skip-on-coverage ...@@ -124,6 +106,7 @@ describe('[GAS BENCHMARK] Depositing via the standard bridge [ @skip-on-coverage
'0x1234' '0x1234'
) )
}) })
it('cost to deposit ETH', async () => { it('cost to deposit ETH', async () => {
// Alice calls deposit on the bridge and the L1 bridge calls transferFrom on the token. // Alice calls deposit on the bridge and the L1 bridge calls transferFrom on the token.
const res = await L1StandardBridge.connect(alice).depositETH( const res = await L1StandardBridge.connect(alice).depositETH(
...@@ -137,12 +120,14 @@ describe('[GAS BENCHMARK] Depositing via the standard bridge [ @skip-on-coverage ...@@ -137,12 +120,14 @@ describe('[GAS BENCHMARK] Depositing via the standard bridge [ @skip-on-coverage
const receipt = await res.wait() const receipt = await res.wait()
const gasUsed = receipt.gasUsed.toNumber() const gasUsed = receipt.gasUsed.toNumber()
console.log(' - Gas used:', gasUsed) console.log(' - Gas used:', gasUsed)
expectApprox(gasUsed, 132_481, { expectApprox(gasUsed, 132_481, {
absoluteUpperDeviation: 500, absoluteUpperDeviation: 500,
// Assert a lower bound of 1% reduction on gas cost. If your tests are breaking because your // Assert a lower bound of 1% reduction on gas cost. If your tests are breaking because your
// contracts are too efficient, consider updating the target value! // contracts are too efficient, consider updating the target value!
percentLowerDeviation: 1, percentLowerDeviation: 1,
}) })
// Sanity check that the message was enqueued. // Sanity check that the message was enqueued.
expect(await CanonicalTransactionChain.getQueueLength()).to.equal(2) expect(await CanonicalTransactionChain.getQueueLength()).to.equal(2)
}) })
...@@ -152,6 +137,7 @@ describe('[GAS BENCHMARK] Depositing via the standard bridge [ @skip-on-coverage ...@@ -152,6 +137,7 @@ describe('[GAS BENCHMARK] Depositing via the standard bridge [ @skip-on-coverage
L1StandardBridge.address, L1StandardBridge.address,
depositAmount depositAmount
) )
// Alice calls deposit on the bridge and the L1 bridge calls transferFrom on the token. // Alice calls deposit on the bridge and the L1 bridge calls transferFrom on the token.
const res = await L1StandardBridge.connect(alice).depositERC20( const res = await L1StandardBridge.connect(alice).depositERC20(
L1ERC20.address, L1ERC20.address,
...@@ -160,9 +146,11 @@ describe('[GAS BENCHMARK] Depositing via the standard bridge [ @skip-on-coverage ...@@ -160,9 +146,11 @@ describe('[GAS BENCHMARK] Depositing via the standard bridge [ @skip-on-coverage
FINALIZATION_GAS, FINALIZATION_GAS,
NON_NULL_BYTES32 NON_NULL_BYTES32
) )
const receipt = await res.wait() const receipt = await res.wait()
const gasUsed = receipt.gasUsed.toNumber() const gasUsed = receipt.gasUsed.toNumber()
console.log(' - Gas used:', gasUsed) console.log(' - Gas used:', gasUsed)
expectApprox(gasUsed, 192_822, { expectApprox(gasUsed, 192_822, {
absoluteUpperDeviation: 500, absoluteUpperDeviation: 500,
// Assert a lower bound of 1% reduction on gas cost. If your tests are breaking because your // Assert a lower bound of 1% reduction on gas cost. If your tests are breaking because your
......
/* External Imports */ /* External Imports */
import { ethers } from 'hardhat' import { ethers } from 'hardhat'
import { Signer, ContractFactory, Contract } from 'ethers' import { Signer, Contract } from 'ethers'
import { smock, FakeContract } from '@defi-wonderland/smock' import { smock, FakeContract } from '@defi-wonderland/smock'
import { import {
AppendSequencerBatchParams, AppendSequencerBatchParams,
...@@ -9,11 +9,10 @@ import { ...@@ -9,11 +9,10 @@ import {
expectApprox, expectApprox,
} from '@eth-optimism/core-utils' } from '@eth-optimism/core-utils'
import { TransactionResponse } from '@ethersproject/abstract-provider' import { TransactionResponse } from '@ethersproject/abstract-provider'
import { keccak256 } from 'ethers/lib/utils'
/* Internal Imports */ /* Internal Imports */
import { import {
makeAddressManager, deploy,
setProxyTarget, setProxyTarget,
L2_GAS_DISCOUNT_DIVISOR, L2_GAS_DISCOUNT_DIVISOR,
ENQUEUE_GAS_COST, ENQUEUE_GAS_COST,
...@@ -31,11 +30,11 @@ const appendSequencerBatch = async ( ...@@ -31,11 +30,11 @@ const appendSequencerBatch = async (
CanonicalTransactionChain: Contract, CanonicalTransactionChain: Contract,
batch: AppendSequencerBatchParams batch: AppendSequencerBatchParams
): Promise<TransactionResponse> => { ): Promise<TransactionResponse> => {
const methodId = keccak256(Buffer.from('appendSequencerBatch()')).slice(2, 10)
const calldata = encodeAppendSequencerBatch(batch)
return CanonicalTransactionChain.signer.sendTransaction({ return CanonicalTransactionChain.signer.sendTransaction({
to: CanonicalTransactionChain.address, to: CanonicalTransactionChain.address,
data: '0x' + methodId + calldata, data:
ethers.utils.id('appendSequencerBatch()').slice(0, 10) +
encodeAppendSequencerBatch(batch),
}) })
} }
...@@ -47,15 +46,17 @@ describe('[GAS BENCHMARK] CanonicalTransactionChain [ @skip-on-coverage ]', () = ...@@ -47,15 +46,17 @@ describe('[GAS BENCHMARK] CanonicalTransactionChain [ @skip-on-coverage ]', () =
let AddressManager: Contract let AddressManager: Contract
let Fake__StateCommitmentChain: FakeContract let Fake__StateCommitmentChain: FakeContract
before(async () => { let CanonicalTransactionChain: Contract
AddressManager = await makeAddressManager() beforeEach(async () => {
AddressManager = await deploy('Lib_AddressManager')
await AddressManager.setAddress( await AddressManager.setAddress(
'OVM_Sequencer', 'OVM_Sequencer',
await sequencer.getAddress() await sequencer.getAddress()
) )
Fake__StateCommitmentChain = await smock.fake<Contract>( Fake__StateCommitmentChain = await smock.fake<Contract>(
await ethers.getContractFactory('StateCommitmentChain') 'StateCommitmentChain'
) )
await setProxyTarget( await setProxyTarget(
...@@ -63,37 +64,20 @@ describe('[GAS BENCHMARK] CanonicalTransactionChain [ @skip-on-coverage ]', () = ...@@ -63,37 +64,20 @@ describe('[GAS BENCHMARK] CanonicalTransactionChain [ @skip-on-coverage ]', () =
'StateCommitmentChain', 'StateCommitmentChain',
Fake__StateCommitmentChain Fake__StateCommitmentChain
) )
})
let Factory__CanonicalTransactionChain: ContractFactory
let Factory__ChainStorageContainer: ContractFactory
before(async () => {
Factory__CanonicalTransactionChain = await ethers.getContractFactory(
'CanonicalTransactionChain'
)
Factory__ChainStorageContainer = await ethers.getContractFactory(
'ChainStorageContainer'
)
})
let CanonicalTransactionChain: Contract CanonicalTransactionChain = await deploy('CanonicalTransactionChain', {
beforeEach(async () => { signer: sequencer,
CanonicalTransactionChain = await Factory__CanonicalTransactionChain.deploy( args: [
AddressManager.address, AddressManager.address,
MAX_GAS_LIMIT, MAX_GAS_LIMIT,
L2_GAS_DISCOUNT_DIVISOR, L2_GAS_DISCOUNT_DIVISOR,
ENQUEUE_GAS_COST ENQUEUE_GAS_COST,
) ],
})
const batches = await Factory__ChainStorageContainer.deploy( const batches = await deploy('ChainStorageContainer', {
AddressManager.address, args: [AddressManager.address, 'CanonicalTransactionChain'],
'CanonicalTransactionChain' })
)
await Factory__ChainStorageContainer.deploy(
AddressManager.address,
'CanonicalTransactionChain'
)
await AddressManager.setAddress( await AddressManager.setAddress(
'ChainStorageContainer-CTC-batches', 'ChainStorageContainer-CTC-batches',
...@@ -107,12 +91,7 @@ describe('[GAS BENCHMARK] CanonicalTransactionChain [ @skip-on-coverage ]', () = ...@@ -107,12 +91,7 @@ describe('[GAS BENCHMARK] CanonicalTransactionChain [ @skip-on-coverage ]', () =
}) })
describe('appendSequencerBatch [ @skip-on-coverage ]', () => { describe('appendSequencerBatch [ @skip-on-coverage ]', () => {
beforeEach(() => {
CanonicalTransactionChain = CanonicalTransactionChain.connect(sequencer)
})
it('200 transactions in a single context', async () => { it('200 transactions in a single context', async () => {
console.log(`Benchmark: 200 transactions in a single context.`)
const timestamp = (await getEthTime(ethers.provider)) - 100 const timestamp = (await getEthTime(ethers.provider)) - 100
const blockNumber = await getNextBlockNumber(ethers.provider) const blockNumber = await getNextBlockNumber(ethers.provider)
...@@ -143,13 +122,12 @@ describe('[GAS BENCHMARK] CanonicalTransactionChain [ @skip-on-coverage ]', () = ...@@ -143,13 +122,12 @@ describe('[GAS BENCHMARK] CanonicalTransactionChain [ @skip-on-coverage ]', () =
const receipt = await res.wait() const receipt = await res.wait()
const gasUsed = receipt.gasUsed.toNumber() const gasUsed = receipt.gasUsed.toNumber()
console.log('Benchmark complete.')
console.log('Fixed calldata cost:', fixedCalldataCost) console.log('Fixed calldata cost:', fixedCalldataCost)
console.log( console.log(
'Non-calldata overhead gas cost per transaction:', 'Non-calldata overhead gas cost per transaction:',
(gasUsed - fixedCalldataCost) / numTxs (gasUsed - fixedCalldataCost) / numTxs
) )
expectApprox(gasUsed, 1_402_638, { expectApprox(gasUsed, 1_402_638, {
absoluteUpperDeviation: 1000, absoluteUpperDeviation: 1000,
// Assert a lower bound of 1% reduction on gas cost. If your tests are breaking because your // Assert a lower bound of 1% reduction on gas cost. If your tests are breaking because your
...@@ -159,7 +137,6 @@ describe('[GAS BENCHMARK] CanonicalTransactionChain [ @skip-on-coverage ]', () = ...@@ -159,7 +137,6 @@ describe('[GAS BENCHMARK] CanonicalTransactionChain [ @skip-on-coverage ]', () =
}).timeout(10_000_000) }).timeout(10_000_000)
it('200 transactions in 200 contexts', async () => { it('200 transactions in 200 contexts', async () => {
console.log(`Benchmark: 200 transactions in 200 contexts.`)
const timestamp = (await getEthTime(ethers.provider)) - 100 const timestamp = (await getEthTime(ethers.provider)) - 100
const blockNumber = await getNextBlockNumber(ethers.provider) const blockNumber = await getNextBlockNumber(ethers.provider)
...@@ -190,13 +167,12 @@ describe('[GAS BENCHMARK] CanonicalTransactionChain [ @skip-on-coverage ]', () = ...@@ -190,13 +167,12 @@ describe('[GAS BENCHMARK] CanonicalTransactionChain [ @skip-on-coverage ]', () =
const receipt = await res.wait() const receipt = await res.wait()
const gasUsed = receipt.gasUsed.toNumber() const gasUsed = receipt.gasUsed.toNumber()
console.log('Benchmark complete.')
console.log('Fixed calldata cost:', fixedCalldataCost) console.log('Fixed calldata cost:', fixedCalldataCost)
console.log( console.log(
'Non-calldata overhead gas cost per transaction:', 'Non-calldata overhead gas cost per transaction:',
(gasUsed - fixedCalldataCost) / numTxs (gasUsed - fixedCalldataCost) / numTxs
) )
expectApprox(gasUsed, 1_619_781, { expectApprox(gasUsed, 1_619_781, {
absoluteUpperDeviation: 1000, absoluteUpperDeviation: 1000,
// Assert a lower bound of 1% reduction on gas cost. If your tests are breaking because your // Assert a lower bound of 1% reduction on gas cost. If your tests are breaking because your
...@@ -248,12 +224,12 @@ describe('[GAS BENCHMARK] CanonicalTransactionChain [ @skip-on-coverage ]', () = ...@@ -248,12 +224,12 @@ describe('[GAS BENCHMARK] CanonicalTransactionChain [ @skip-on-coverage ]', () =
const gasUsed = receipt.gasUsed.toNumber() const gasUsed = receipt.gasUsed.toNumber()
console.log('Benchmark complete.') console.log('Benchmark complete.')
console.log('Fixed calldata cost:', fixedCalldataCost) console.log('Fixed calldata cost:', fixedCalldataCost)
console.log( console.log(
'Non-calldata overhead gas cost per transaction:', 'Non-calldata overhead gas cost per transaction:',
(gasUsed - fixedCalldataCost) / numTxs (gasUsed - fixedCalldataCost) / numTxs
) )
expectApprox(gasUsed, 891_158, { expectApprox(gasUsed, 891_158, {
absoluteUpperDeviation: 1000, absoluteUpperDeviation: 1000,
// Assert a lower bound of 1% reduction on gas cost. If your tests are breaking because your // Assert a lower bound of 1% reduction on gas cost. If your tests are breaking because your
...@@ -264,13 +240,12 @@ describe('[GAS BENCHMARK] CanonicalTransactionChain [ @skip-on-coverage ]', () = ...@@ -264,13 +240,12 @@ describe('[GAS BENCHMARK] CanonicalTransactionChain [ @skip-on-coverage ]', () =
}) })
describe('enqueue [ @skip-on-coverage ]', () => { describe('enqueue [ @skip-on-coverage ]', () => {
let enqueueL2GasPrepaid const data = '0x' + '12'.repeat(1234)
let data
let enqueueL2GasPrepaid: number
beforeEach(async () => { beforeEach(async () => {
CanonicalTransactionChain = CanonicalTransactionChain.connect(sequencer)
enqueueL2GasPrepaid = enqueueL2GasPrepaid =
await CanonicalTransactionChain.enqueueL2GasPrepaid() await CanonicalTransactionChain.enqueueL2GasPrepaid()
data = '0x' + '12'.repeat(1234)
}) })
it('cost to enqueue a transaction above the prepaid threshold', async () => { it('cost to enqueue a transaction above the prepaid threshold', async () => {
...@@ -281,11 +256,10 @@ describe('[GAS BENCHMARK] CanonicalTransactionChain [ @skip-on-coverage ]', () = ...@@ -281,11 +256,10 @@ describe('[GAS BENCHMARK] CanonicalTransactionChain [ @skip-on-coverage ]', () =
l2GasLimit, l2GasLimit,
data data
) )
const receipt = await res.wait() const receipt = await res.wait()
const gasUsed = receipt.gasUsed.toNumber() const gasUsed = receipt.gasUsed.toNumber()
console.log('Benchmark complete.')
expectApprox(gasUsed, 196_687, { expectApprox(gasUsed, 196_687, {
absoluteUpperDeviation: 500, absoluteUpperDeviation: 500,
// Assert a lower bound of 1% reduction on gas cost. If your tests are breaking because your // Assert a lower bound of 1% reduction on gas cost. If your tests are breaking because your
...@@ -302,11 +276,10 @@ describe('[GAS BENCHMARK] CanonicalTransactionChain [ @skip-on-coverage ]', () = ...@@ -302,11 +276,10 @@ describe('[GAS BENCHMARK] CanonicalTransactionChain [ @skip-on-coverage ]', () =
l2GasLimit, l2GasLimit,
data data
) )
const receipt = await res.wait() const receipt = await res.wait()
const gasUsed = receipt.gasUsed.toNumber() const gasUsed = receipt.gasUsed.toNumber()
console.log('Benchmark complete.')
expectApprox(gasUsed, 134_100, { expectApprox(gasUsed, 134_100, {
absoluteUpperDeviation: 500, absoluteUpperDeviation: 500,
// Assert a lower bound of 1% reduction on gas cost. If your tests are breaking because your // Assert a lower bound of 1% reduction on gas cost. If your tests are breaking because your
......
/* External Imports */
import { ethers } from 'hardhat' import { ethers } from 'hardhat'
import { Contract, Signer, ContractFactory } from 'ethers' import { Contract } from 'ethers'
import { SignerWithAddress } from '@nomiclabs/hardhat-ethers/signers'
/* Internal Imports */
import { expect } from '../../../setup' import { expect } from '../../../setup'
import { makeAddressManager, NON_NULL_BYTES32 } from '../../../helpers' import { deploy, NON_NULL_BYTES32 } from '../../../helpers'
describe('ChainStorageContainer', () => { describe('ChainStorageContainer', () => {
let sequencer: Signer let signer1: SignerWithAddress
let otherSigner: Signer let signer2: SignerWithAddress
let signer: Signer
let signerAddress: string
let AddressManager: Contract
let Factory__ChainStorageContainer: ContractFactory
before(async () => { before(async () => {
;[sequencer, otherSigner, signer] = await ethers.getSigners() ;[signer1, signer2] = await ethers.getSigners()
signerAddress = await otherSigner.getAddress()
AddressManager = await makeAddressManager()
await AddressManager.setAddress(
'OVM_Sequencer',
await sequencer.getAddress()
)
Factory__ChainStorageContainer = await ethers.getContractFactory(
'ChainStorageContainer'
)
}) })
let AddressManager: Contract
let ChainStorageContainer: Contract let ChainStorageContainer: Contract
beforeEach(async () => { beforeEach(async () => {
ChainStorageContainer = await Factory__ChainStorageContainer.connect( AddressManager = await deploy('Lib_AddressManager')
otherSigner ChainStorageContainer = await deploy('ChainStorageContainer', {
).deploy(AddressManager.address, signerAddress) signer: signer1,
args: [AddressManager.address, signer1.address],
await AddressManager.setAddress( })
'ChainStorageContainer',
ChainStorageContainer.address
)
await AddressManager.setAddress(signerAddress, signerAddress) // ChainStorageContainer uses name resolution to check the owner address.
await AddressManager.setAddress(signer1.address, signer1.address)
}) })
describe('push', () => { describe('push', () => {
for (const len of [1, 2, 4, 8, 32]) { for (const len of [1, 2, 4, 8, 32]) {
it(`it should be able to add ${len} element(s) to the array`, async () => { it(`it should be able to add ${len} element(s) to the array`, async () => {
for (let i = 0; i < len; i++) { for (let i = 0; i < len; i++) {
await expect( await expect(ChainStorageContainer['push(bytes32)'](NON_NULL_BYTES32))
ChainStorageContainer.connect(otherSigner)['push(bytes32)']( .to.not.be.reverted
NON_NULL_BYTES32
)
).to.not.be.reverted
} }
}) })
} }
...@@ -60,9 +39,7 @@ describe('ChainStorageContainer', () => { ...@@ -60,9 +39,7 @@ describe('ChainStorageContainer', () => {
describe('setGlobalMetadata', () => { describe('setGlobalMetadata', () => {
it('should modify the extra data', async () => { it('should modify the extra data', async () => {
const globalMetaData = `0x${'11'.repeat(27)}` const globalMetaData = `0x${'11'.repeat(27)}`
await ChainStorageContainer.connect(otherSigner).setGlobalMetadata( await ChainStorageContainer.setGlobalMetadata(globalMetaData)
globalMetaData
)
expect(await ChainStorageContainer.getGlobalMetadata()).to.equal( expect(await ChainStorageContainer.getGlobalMetadata()).to.equal(
globalMetaData globalMetaData
...@@ -73,15 +50,13 @@ describe('ChainStorageContainer', () => { ...@@ -73,15 +50,13 @@ describe('ChainStorageContainer', () => {
describe('deleteElementsAfterInclusive', () => { describe('deleteElementsAfterInclusive', () => {
it('should revert when the array is empty', async () => { it('should revert when the array is empty', async () => {
await expect( await expect(
ChainStorageContainer.connect(otherSigner)[ ChainStorageContainer['deleteElementsAfterInclusive(uint256)'](0)
'deleteElementsAfterInclusive(uint256)'
](0)
).to.be.reverted ).to.be.reverted
}) })
it('should revert when called by non-owner', async () => { it('should revert when called by non-owner', async () => {
await expect( await expect(
ChainStorageContainer.connect(signer)[ ChainStorageContainer.connect(signer2)[
'deleteElementsAfterInclusive(uint256)' 'deleteElementsAfterInclusive(uint256)'
](0) ](0)
).to.be.revertedWith( ).to.be.revertedWith(
...@@ -96,18 +71,14 @@ describe('ChainStorageContainer', () => { ...@@ -96,18 +71,14 @@ describe('ChainStorageContainer', () => {
for (let i = 0; i < len; i++) { for (let i = 0; i < len; i++) {
const value = NON_NULL_BYTES32 const value = NON_NULL_BYTES32
values.push(value) values.push(value)
await ChainStorageContainer.connect(otherSigner)['push(bytes32)']( await ChainStorageContainer['push(bytes32)'](value)
value
)
} }
}) })
for (let i = len - 1; i > 0; i -= Math.max(1, len / 4)) { for (let i = len - 1; i > 0; i -= Math.max(1, len / 4)) {
it(`should be able to delete everything after and including the ${i}th/st/rd/whatever element`, async () => { it(`should be able to delete everything after and including the ${i}th/st/rd/whatever element`, async () => {
await expect( await expect(
ChainStorageContainer.connect(otherSigner)[ ChainStorageContainer['deleteElementsAfterInclusive(uint256)'](i)
'deleteElementsAfterInclusive(uint256)'
](i)
).to.not.be.reverted ).to.not.be.reverted
expect(await ChainStorageContainer.length()).to.equal(i) expect(await ChainStorageContainer.length()).to.equal(i)
......
/* External Imports */
import { ethers } from 'hardhat' import { ethers } from 'hardhat'
import { Signer, Contract } from 'ethers' import { Contract } from 'ethers'
import { SignerWithAddress } from '@nomiclabs/hardhat-ethers/signers'
/* Internal Imports */
import { expect } from '../../../setup' import { expect } from '../../../setup'
import { makeAddressManager } from '../../../helpers' import { deploy } from '../../../helpers'
describe('BondManager', () => { describe('BondManager', () => {
let sequencer: Signer let sequencer: SignerWithAddress
let nonSequencer: Signer let nonSequencer: SignerWithAddress
before(async () => { before(async () => {
;[sequencer, nonSequencer] = await ethers.getSigners() ;[sequencer, nonSequencer] = await ethers.getSigners()
}) })
let AddressManager: Contract let AddressManager: Contract
before(async () => {
AddressManager = await makeAddressManager()
})
let BondManager: Contract let BondManager: Contract
before(async () => { beforeEach(async () => {
BondManager = await ( AddressManager = await deploy('Lib_AddressManager')
await ethers.getContractFactory('BondManager')
).deploy(AddressManager.address) BondManager = await deploy('BondManager', {
args: [AddressManager.address],
})
AddressManager.setAddress('OVM_Proposer', await sequencer.getAddress()) AddressManager.setAddress('OVM_Proposer', sequencer.address)
}) })
describe('isCollateralized', () => { describe('isCollateralized', () => {
it('should return true for OVM_Proposer', async () => { it('should return true for OVM_Proposer', async () => {
expect( expect(await BondManager.isCollateralized(sequencer.address)).to.equal(
await BondManager.isCollateralized(await sequencer.getAddress()) true
).to.equal(true) )
}) })
it('should return false for non-sequencer', async () => { it('should return false for non-sequencer', async () => {
expect( expect(await BondManager.isCollateralized(nonSequencer.address)).to.equal(
await BondManager.isCollateralized(await nonSequencer.getAddress()) false
).to.equal(false) )
}) })
}) })
}) })
...@@ -28,7 +28,7 @@ describe.skip('OVM_L2ToL1MessagePasser', () => { ...@@ -28,7 +28,7 @@ describe.skip('OVM_L2ToL1MessagePasser', () => {
let Fake__OVM_ExecutionManager: FakeContract let Fake__OVM_ExecutionManager: FakeContract
before(async () => { before(async () => {
Fake__OVM_ExecutionManager = await smock.fake<Contract>( Fake__OVM_ExecutionManager = await smock.fake<Contract>(
await ethers.getContractFactory('OVM_ExecutionManager') 'OVM_ExecutionManager'
) )
}) })
......
...@@ -39,9 +39,7 @@ describe('Lib_MerkleTree', () => { ...@@ -39,9 +39,7 @@ describe('Lib_MerkleTree', () => {
await ethers.getContractFactory('TestLib_MerkleTree') await ethers.getContractFactory('TestLib_MerkleTree')
).deploy() ).deploy()
Fake__LibMerkleTree = await smock.fake( Fake__LibMerkleTree = await smock.fake('TestLib_MerkleTree')
await ethers.getContractFactory('TestLib_MerkleTree')
)
}) })
describe('getMerkleRoot', () => { describe('getMerkleRoot', () => {
......
import { toRpcHexString } from '@eth-optimism/core-utils'
import { SignerWithAddress } from '@nomiclabs/hardhat-ethers/signers'
import { BigNumber } from 'ethers'
import hre from 'hardhat'
export const impersonate = async (
address: string,
balance?: string | number | BigNumber
): Promise<SignerWithAddress> => {
await hre.network.provider.request({
method: 'hardhat_impersonateAccount',
params: [address],
})
if (balance !== undefined) {
await hre.network.provider.request({
method: 'hardhat_setBalance',
params: [address, toRpcHexString(BigNumber.from(balance))],
})
}
return hre.ethers.getSigner(address)
}
export * from './eth-time' export * from './eth-time'
export * from './deploy' export * from './deploy'
export * from './impersonation'
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment