Commit b256b358 authored by Andreas Bigger's avatar Andreas Bigger

fix: upstream sync

parents 9ea1b53b e535dd27
---
'@eth-optimism/l2geth': patch
---
Use default cas gap of 25 million
...@@ -2,4 +2,4 @@ ...@@ -2,4 +2,4 @@
'@eth-optimism/common-ts': patch '@eth-optimism/common-ts': patch
--- ---
Fix unknown option error in base service v2 Fixes a minor bug where the provider name was incorrectly logged when using waitForProvider
---
'@eth-optimism/contracts-periphery': patch
---
Update the attestation station impl to 1.1.0
---
'@eth-optimism/contracts-bedrock': patch
---
Added a test for large deposit gaps
...@@ -30,10 +30,10 @@ ...@@ -30,10 +30,10 @@
"devDependencies": { "devDependencies": {
"@babel/eslint-parser": "^7.5.4", "@babel/eslint-parser": "^7.5.4",
"@eth-optimism/contracts": "^0.5.40", "@eth-optimism/contracts": "^0.5.40",
"@eth-optimism/contracts-bedrock": "0.11.2", "@eth-optimism/contracts-bedrock": "0.11.3",
"@eth-optimism/contracts-periphery": "^1.0.6", "@eth-optimism/contracts-periphery": "^1.0.7",
"@eth-optimism/core-utils": "0.12.0", "@eth-optimism/core-utils": "0.12.0",
"@eth-optimism/sdk": "1.10.0", "@eth-optimism/sdk": "1.10.1",
"@ethersproject/abstract-provider": "^5.7.0", "@ethersproject/abstract-provider": "^5.7.0",
"@ethersproject/providers": "^5.7.0", "@ethersproject/providers": "^5.7.0",
"@ethersproject/transactions": "^5.7.0", "@ethersproject/transactions": "^5.7.0",
......
# Changelog # Changelog
## 0.5.32
### Patch Changes
- ea817097b: Use default cas gap of 25 million
## 0.5.31 ## 0.5.31
### Patch Changes ### Patch Changes
......
{ {
"name": "@eth-optimism/l2geth", "name": "@eth-optimism/l2geth",
"version": "0.5.31", "version": "0.5.32",
"private": true, "private": true,
"devDependencies": {} "devDependencies": {}
} }
...@@ -9,7 +9,6 @@ import ( ...@@ -9,7 +9,6 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
) )
...@@ -29,7 +28,7 @@ var ( ...@@ -29,7 +28,7 @@ var (
} }
) )
func MigrateLegacyETH(ldb ethdb.Database, db *state.StateDB, addresses []common.Address, chainID int, noCheck bool) error { func MigrateLegacyETH(db *state.StateDB, addresses []common.Address, chainID int, noCheck bool) error {
// Chain params to use for integrity checking. // Chain params to use for integrity checking.
params := migration.ParamsByChainID[chainID] params := migration.ParamsByChainID[chainID]
if params == nil { if params == nil {
......
...@@ -8,7 +8,6 @@ import ( ...@@ -8,7 +8,6 @@ import (
"github.com/ethereum-optimism/optimism/op-chain-ops/genesis/migration" "github.com/ethereum-optimism/optimism/op-chain-ops/genesis/migration"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
...@@ -31,6 +30,8 @@ func PreCheckBalances(ldb ethdb.Database, db *state.StateDB, addresses []common. ...@@ -31,6 +30,8 @@ func PreCheckBalances(ldb ethdb.Database, db *state.StateDB, addresses []common.
slotsInp := make(map[common.Hash]int) slotsInp := make(map[common.Hash]int)
// For each known address, compute its balance key and add it to the list of addresses. // For each known address, compute its balance key and add it to the list of addresses.
// Mint events are instrumented as regular ETH events in the witness data, so we no longer
// need to iterate over mint events during the migration.
for _, addr := range addresses { for _, addr := range addresses {
addrs = append(addrs, addr) addrs = append(addrs, addr)
slotsInp[CalcOVMETHStorageKey(addr)] = 1 slotsInp[CalcOVMETHStorageKey(addr)] = 1
...@@ -48,28 +49,11 @@ func PreCheckBalances(ldb ethdb.Database, db *state.StateDB, addresses []common. ...@@ -48,28 +49,11 @@ func PreCheckBalances(ldb ethdb.Database, db *state.StateDB, addresses []common.
addrs = append(addrs, sequencerEntrypointAddr) addrs = append(addrs, sequencerEntrypointAddr)
slotsInp[CalcOVMETHStorageKey(sequencerEntrypointAddr)] = 1 slotsInp[CalcOVMETHStorageKey(sequencerEntrypointAddr)] = 1
// Also extract addresses/slots from Mint events. Our instrumentation currently only looks at
// direct balance changes inside of Geth, but Mint events mutate the ERC20 storage directly and
// therefore aren't picked up by our instrumentation. Instead of updating the instrumentation,
// we can simply iterate over every Mint event and add the address to the list of addresses.
log.Info("Reading mint events from DB")
headBlock := rawdb.ReadHeadBlock(ldb)
logProgress := ProgressLogger(100, "read mint events")
err := IterateMintEvents(ldb, headBlock.NumberU64(), func(address common.Address, headNum uint64) error {
addrs = append(addrs, address)
slotsInp[CalcOVMETHStorageKey(address)] = 1
logProgress("headnum", headNum)
return nil
})
if err != nil {
return nil, wrapErr(err, "error reading mint events")
}
// Build a mapping of every storage slot in the LegacyERC20ETH contract, except the list of // Build a mapping of every storage slot in the LegacyERC20ETH contract, except the list of
// slots that we know we can ignore (totalSupply, name, symbol). // slots that we know we can ignore (totalSupply, name, symbol).
var count int var count int
slotsAct := make(map[common.Hash]common.Hash) slotsAct := make(map[common.Hash]common.Hash)
err = db.ForEachStorage(predeploys.LegacyERC20ETHAddr, func(key, value common.Hash) bool { err := db.ForEachStorage(predeploys.LegacyERC20ETHAddr, func(key, value common.Hash) bool {
// We can safely ignore specific slots (totalSupply, name, symbol). // We can safely ignore specific slots (totalSupply, name, symbol).
if ignoredSlots[key] { if ignoredSlots[key] {
return true return true
......
package ether package ether
import ( import (
"fmt"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
) )
func wrapErr(err error, msg string, ctx ...any) error {
return fmt.Errorf("%s: %w", fmt.Sprintf(msg, ctx...), err)
}
func ProgressLogger(n int, msg string) func(...any) { func ProgressLogger(n int, msg string) func(...any) {
var i int var i int
......
...@@ -193,7 +193,7 @@ func MigrateDB(ldb ethdb.Database, config *DeployConfig, l1Block *types.Block, m ...@@ -193,7 +193,7 @@ func MigrateDB(ldb ethdb.Database, config *DeployConfig, l1Block *types.Block, m
// Finally we migrate the balances held inside the LegacyERC20ETH contract into the state trie. // Finally we migrate the balances held inside the LegacyERC20ETH contract into the state trie.
// Note that we do NOT delete the balances from the LegacyERC20ETH contract. // Note that we do NOT delete the balances from the LegacyERC20ETH contract.
log.Info("Starting to migrate ERC20 ETH") log.Info("Starting to migrate ERC20 ETH")
err = ether.MigrateLegacyETH(ldb, db, addrs, int(config.L1ChainID), noCheck) err = ether.MigrateLegacyETH(db, addrs, int(config.L1ChainID), noCheck)
if err != nil { if err != nil {
return nil, fmt.Errorf("cannot migrate legacy eth: %w", err) return nil, fmt.Errorf("cannot migrate legacy eth: %w", err)
} }
......
// Package heartbeat provides a service for sending heartbeats to a server.
package heartbeat package heartbeat
import ( import (
...@@ -22,6 +23,8 @@ type Payload struct { ...@@ -22,6 +23,8 @@ type Payload struct {
ChainID uint64 `json:"chainID"` ChainID uint64 `json:"chainID"`
} }
// Beat sends a heartbeat to the server at the given URL. It will send a heartbeat immediately, and then every SendInterval.
// Beat spawns a goroutine that will send heartbeats until the context is canceled.
func Beat( func Beat(
ctx context.Context, ctx context.Context,
log log.Logger, log log.Logger,
......
// Package metrics provides a set of metrics for the op-node.
package metrics package metrics
import ( import (
...@@ -63,6 +64,7 @@ type Metricer interface { ...@@ -63,6 +64,7 @@ type Metricer interface {
Document() []metrics.DocumentedMetric Document() []metrics.DocumentedMetric
} }
// Metrics tracks all the metrics for the op-node.
type Metrics struct { type Metrics struct {
Info *prometheus.GaugeVec Info *prometheus.GaugeVec
Up prometheus.Gauge Up prometheus.Gauge
...@@ -118,6 +120,7 @@ type Metrics struct { ...@@ -118,6 +120,7 @@ type Metrics struct {
var _ Metricer = (*Metrics)(nil) var _ Metricer = (*Metrics)(nil)
// NewMetrics creates a new [Metrics] instance with the given process name.
func NewMetrics(procName string) *Metrics { func NewMetrics(procName string) *Metrics {
if procName == "" { if procName == "" {
procName = "default" procName = "default"
......
...@@ -140,6 +140,8 @@ func BuildGlobalGossipParams(cfg *rollup.Config) pubsub.GossipSubParams { ...@@ -140,6 +140,8 @@ func BuildGlobalGossipParams(cfg *rollup.Config) pubsub.GossipSubParams {
return params return params
} }
// NewGossipSub configures a new pubsub instance with the specified parameters.
// PubSub uses a GossipSubRouter as it's router under the hood.
func NewGossipSub(p2pCtx context.Context, h host.Host, cfg *rollup.Config, gossipConf GossipSetupConfigurables, m GossipMetricer) (*pubsub.PubSub, error) { func NewGossipSub(p2pCtx context.Context, h host.Host, cfg *rollup.Config, gossipConf GossipSetupConfigurables, m GossipMetricer) (*pubsub.PubSub, error) {
denyList, err := pubsub.NewTimeCachedBlacklist(30 * time.Second) denyList, err := pubsub.NewTimeCachedBlacklist(30 * time.Second)
if err != nil { if err != nil {
......
...@@ -23,6 +23,7 @@ import ( ...@@ -23,6 +23,7 @@ import (
"github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup"
) )
// NodeP2P is a p2p node, which can be used to gossip messages.
type NodeP2P struct { type NodeP2P struct {
host host.Host // p2p host (optional, may be nil) host host.Host // p2p host (optional, may be nil)
gater ConnectionGater // p2p gater, to ban/unban peers with, may be nil even with p2p enabled gater ConnectionGater // p2p gater, to ban/unban peers with, may be nil even with p2p enabled
...@@ -34,6 +35,8 @@ type NodeP2P struct { ...@@ -34,6 +35,8 @@ type NodeP2P struct {
gsOut GossipOut // p2p gossip application interface for publishing gsOut GossipOut // p2p gossip application interface for publishing
} }
// NewNodeP2P creates a new p2p node, and returns a reference to it. If the p2p is disabled, it returns nil.
// If metrics are configured, a bandwidth monitor will be spawned in a goroutine.
func NewNodeP2P(resourcesCtx context.Context, rollupCfg *rollup.Config, log log.Logger, setup SetupP2P, gossipIn GossipIn, runCfg GossipRuntimeConfig, metrics metrics.Metricer) (*NodeP2P, error) { func NewNodeP2P(resourcesCtx context.Context, rollupCfg *rollup.Config, log log.Logger, setup SetupP2P, gossipIn GossipIn, runCfg GossipRuntimeConfig, metrics metrics.Metricer) (*NodeP2P, error) {
if setup == nil { if setup == nil {
return nil, errors.New("p2p node cannot be created without setup") return nil, errors.New("p2p node cannot be created without setup")
......
...@@ -12,9 +12,9 @@ import ( ...@@ -12,9 +12,9 @@ import (
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
) )
// IterativeBatchCall is an util to create a job to fetch many RPC requests in batches, // IterativeBatchCall batches many RPC requests with safe and easy parallelization.
// and enable the caller to parallelize easily and safely, handle and re-try errors, // Request errors are handled and re-tried, and the batch size is configurable.
// and pick a batch size all by simply calling Fetch again and again until it returns io.EOF. // Executing IterativeBatchCall is as simple as calling Fetch repeatedly until it returns io.EOF.
type IterativeBatchCall[K any, V any] struct { type IterativeBatchCall[K any, V any] struct {
completed uint32 // tracks how far to completing all requests we are completed uint32 // tracks how far to completing all requests we are
resetLock sync.RWMutex // ensures we do not concurrently read (incl. fetch) / reset resetLock sync.RWMutex // ensures we do not concurrently read (incl. fetch) / reset
...@@ -77,7 +77,7 @@ func (ibc *IterativeBatchCall[K, V]) Reset() { ...@@ -77,7 +77,7 @@ func (ibc *IterativeBatchCall[K, V]) Reset() {
} }
// Fetch fetches more of the data, and returns io.EOF when all data has been fetched. // Fetch fetches more of the data, and returns io.EOF when all data has been fetched.
// This method is safe to call concurrently: it will parallelize the fetching work. // This method is safe to call concurrently; it will parallelize the fetching work.
// If no work is available, but the fetching is not done yet, // If no work is available, but the fetching is not done yet,
// then Fetch will block until the next thing can be fetched, or until the context expires. // then Fetch will block until the next thing can be fetched, or until the context expires.
func (ibc *IterativeBatchCall[K, V]) Fetch(ctx context.Context) error { func (ibc *IterativeBatchCall[K, V]) Fetch(ctx context.Context) error {
......
// Package sources exports a number of clients used to access ethereum chain data.
//
// There are a number of these exported clients used by the op-node:
// [L1Client] wraps an RPC client to retrieve L1 ethereum data.
// [L2Client] wraps an RPC client to retrieve L2 ethereum data.
// [RollupClient] wraps an RPC client to retrieve rollup data.
// [EngineClient] extends the [L2Client] providing engine API bindings.
//
// Internally, the listed clients wrap an [EthClient] which itself wraps a specified RPC client.
package sources package sources
import ( import (
...@@ -127,8 +136,8 @@ func (s *EthClient) OnReceiptsMethodErr(m ReceiptsFetchingMethod, err error) { ...@@ -127,8 +136,8 @@ func (s *EthClient) OnReceiptsMethodErr(m ReceiptsFetchingMethod, err error) {
} }
} }
// NewEthClient wraps a RPC with bindings to fetch ethereum data, // NewEthClient returns an [EthClient], wrapping an RPC with bindings to fetch ethereum data with added error logging,
// while logging errors, parallel-requests constraint, tracking metrics (optional), and caching. // metric tracking, and caching. The [EthClient] uses a [LimitRPC] wrapper to limit the number of concurrent RPC requests.
func NewEthClient(client client.RPC, log log.Logger, metrics caching.Metrics, config *EthClientConfig) (*EthClient, error) { func NewEthClient(client client.RPC, log log.Logger, metrics caching.Metrics, config *EthClientConfig) (*EthClient, error) {
if err := config.Check(); err != nil { if err := config.Check(); err != nil {
return nil, fmt.Errorf("bad config, cannot create L1 source: %w", err) return nil, fmt.Errorf("bad config, cannot create L1 source: %w", err)
......
...@@ -74,6 +74,8 @@ func (s *L1Client) L1ChainID(ctx context.Context) (*big.Int, error) { ...@@ -74,6 +74,8 @@ func (s *L1Client) L1ChainID(ctx context.Context) (*big.Int, error) {
return s.ChainID(ctx) return s.ChainID(ctx)
} }
// L1BlockRefByLabel returns the [eth.L1BlockRef] for the given block label.
// Notice, we cannot cache a block reference by label because labels are not guaranteed to be unique.
func (s *L1Client) L1BlockRefByLabel(ctx context.Context, label eth.BlockLabel) (eth.L1BlockRef, error) { func (s *L1Client) L1BlockRefByLabel(ctx context.Context, label eth.BlockLabel) (eth.L1BlockRef, error) {
info, err := s.InfoByLabel(ctx, label) info, err := s.InfoByLabel(ctx, label)
if err != nil { if err != nil {
...@@ -89,6 +91,8 @@ func (s *L1Client) L1BlockRefByLabel(ctx context.Context, label eth.BlockLabel) ...@@ -89,6 +91,8 @@ func (s *L1Client) L1BlockRefByLabel(ctx context.Context, label eth.BlockLabel)
return ref, nil return ref, nil
} }
// L1BlockRefByNumber returns an [eth.L1BlockRef] for the given block number.
// Notice, we cannot cache a block reference by number because L1 re-orgs can invalidate the cached block reference.
func (s *L1Client) L1BlockRefByNumber(ctx context.Context, num uint64) (eth.L1BlockRef, error) { func (s *L1Client) L1BlockRefByNumber(ctx context.Context, num uint64) (eth.L1BlockRef, error) {
info, err := s.InfoByNumber(ctx, num) info, err := s.InfoByNumber(ctx, num)
if err != nil { if err != nil {
...@@ -99,6 +103,8 @@ func (s *L1Client) L1BlockRefByNumber(ctx context.Context, num uint64) (eth.L1Bl ...@@ -99,6 +103,8 @@ func (s *L1Client) L1BlockRefByNumber(ctx context.Context, num uint64) (eth.L1Bl
return ref, nil return ref, nil
} }
// L1BlockRefByHash returns the [eth.L1BlockRef] for the given block hash.
// We cache the block reference by hash as it is safe to assume collision will not occur.
func (s *L1Client) L1BlockRefByHash(ctx context.Context, hash common.Hash) (eth.L1BlockRef, error) { func (s *L1Client) L1BlockRefByHash(ctx context.Context, hash common.Hash) (eth.L1BlockRef, error) {
if v, ok := s.l1BlockRefsCache.Get(hash); ok { if v, ok := s.l1BlockRefsCache.Get(hash); ok {
return v.(eth.L1BlockRef), nil return v.(eth.L1BlockRef), nil
......
...@@ -71,6 +71,9 @@ type L2Client struct { ...@@ -71,6 +71,9 @@ type L2Client struct {
systemConfigsCache *caching.LRUCache systemConfigsCache *caching.LRUCache
} }
// NewL2Client constructs a new L2Client instance. The L2Client is a thin wrapper around the EthClient with added functions
// for fetching and caching eth.L2BlockRef values. This includes fetching an L2BlockRef by block number, label, or hash.
// See: [L2BlockRefByLabel], [L2BlockRefByNumber], [L2BlockRefByHash]
func NewL2Client(client client.RPC, log log.Logger, metrics caching.Metrics, config *L2ClientConfig) (*L2Client, error) { func NewL2Client(client client.RPC, log log.Logger, metrics caching.Metrics, config *L2ClientConfig) (*L2Client, error) {
ethClient, err := NewEthClient(client, log, metrics, &config.EthClientConfig) ethClient, err := NewEthClient(client, log, metrics, &config.EthClientConfig)
if err != nil { if err != nil {
...@@ -91,6 +94,7 @@ func (s *L2Client) L2ChainID(ctx context.Context) (*big.Int, error) { ...@@ -91,6 +94,7 @@ func (s *L2Client) L2ChainID(ctx context.Context) (*big.Int, error) {
} }
// L2BlockRefByLabel returns the L2 block reference for the given label. // L2BlockRefByLabel returns the L2 block reference for the given label.
// L2BlockRefByLabel returns the [eth.L2BlockRef] for the given block label.
func (s *L2Client) L2BlockRefByLabel(ctx context.Context, label eth.BlockLabel) (eth.L2BlockRef, error) { func (s *L2Client) L2BlockRefByLabel(ctx context.Context, label eth.BlockLabel) (eth.L2BlockRef, error) {
payload, err := s.PayloadByLabel(ctx, label) payload, err := s.PayloadByLabel(ctx, label)
if err != nil { if err != nil {
...@@ -110,7 +114,7 @@ func (s *L2Client) L2BlockRefByLabel(ctx context.Context, label eth.BlockLabel) ...@@ -110,7 +114,7 @@ func (s *L2Client) L2BlockRefByLabel(ctx context.Context, label eth.BlockLabel)
return ref, nil return ref, nil
} }
// L2BlockRefByNumber returns the L2 block reference for the given block number. // L2BlockRefByNumber returns the [eth.L2BlockRef] for the given block number.
func (s *L2Client) L2BlockRefByNumber(ctx context.Context, num uint64) (eth.L2BlockRef, error) { func (s *L2Client) L2BlockRefByNumber(ctx context.Context, num uint64) (eth.L2BlockRef, error) {
payload, err := s.PayloadByNumber(ctx, num) payload, err := s.PayloadByNumber(ctx, num)
if err != nil { if err != nil {
...@@ -125,7 +129,7 @@ func (s *L2Client) L2BlockRefByNumber(ctx context.Context, num uint64) (eth.L2Bl ...@@ -125,7 +129,7 @@ func (s *L2Client) L2BlockRefByNumber(ctx context.Context, num uint64) (eth.L2Bl
return ref, nil return ref, nil
} }
// L2BlockRefByHash returns the L2 block reference for the given block hash. // L2BlockRefByHash returns the [eth.L2BlockRef] for the given block hash.
// The returned BlockRef may not be in the canonical chain. // The returned BlockRef may not be in the canonical chain.
func (s *L2Client) L2BlockRefByHash(ctx context.Context, hash common.Hash) (eth.L2BlockRef, error) { func (s *L2Client) L2BlockRefByHash(ctx context.Context, hash common.Hash) (eth.L2BlockRef, error) {
if ref, ok := s.l2BlockRefsCache.Get(hash); ok { if ref, ok := s.l2BlockRefsCache.Get(hash); ok {
...@@ -145,8 +149,8 @@ func (s *L2Client) L2BlockRefByHash(ctx context.Context, hash common.Hash) (eth. ...@@ -145,8 +149,8 @@ func (s *L2Client) L2BlockRefByHash(ctx context.Context, hash common.Hash) (eth.
return ref, nil return ref, nil
} }
// SystemConfigByL2Hash returns the system config (matching the config updates up to and including the L1 origin) for the given L2 block hash. // SystemConfigByL2Hash returns the [eth.SystemConfig] (matching the config updates up to and including the L1 origin) for the given L2 block hash.
// The returned SystemConfig may not be in the canonical chain when the hash is not canonical. // The returned [eth.SystemConfig] may not be in the canonical chain when the hash is not canonical.
func (s *L2Client) SystemConfigByL2Hash(ctx context.Context, hash common.Hash) (eth.SystemConfig, error) { func (s *L2Client) SystemConfigByL2Hash(ctx context.Context, hash common.Hash) (eth.SystemConfig, error) {
if ref, ok := s.systemConfigsCache.Get(hash); ok { if ref, ok := s.systemConfigsCache.Get(hash); ok {
return ref.(eth.SystemConfig), nil return ref.(eth.SystemConfig), nil
......
# @eth-optimism/actor-tests # @eth-optimism/actor-tests
## 0.0.18
### Patch Changes
- Updated dependencies [4964be480]
- @eth-optimism/contracts-bedrock@0.11.3
- @eth-optimism/sdk@1.10.1
## 0.0.17 ## 0.0.17
### Patch Changes ### Patch Changes
......
{ {
"name": "@eth-optimism/actor-tests", "name": "@eth-optimism/actor-tests",
"version": "0.0.17", "version": "0.0.18",
"description": "A library and suite of tests to stress test Optimism Bedrock.", "description": "A library and suite of tests to stress test Optimism Bedrock.",
"license": "MIT", "license": "MIT",
"author": "", "author": "",
...@@ -18,9 +18,9 @@ ...@@ -18,9 +18,9 @@
"test:coverage": "yarn test" "test:coverage": "yarn test"
}, },
"dependencies": { "dependencies": {
"@eth-optimism/contracts-bedrock": "0.11.2", "@eth-optimism/contracts-bedrock": "0.11.3",
"@eth-optimism/core-utils": "^0.12.0", "@eth-optimism/core-utils": "^0.12.0",
"@eth-optimism/sdk": "^1.9.1", "@eth-optimism/sdk": "^1.10.1",
"@types/chai": "^4.2.18", "@types/chai": "^4.2.18",
"@types/chai-as-promised": "^7.1.4", "@types/chai-as-promised": "^7.1.4",
"async-mutex": "^0.3.2", "async-mutex": "^0.3.2",
......
# @eth-optimism/common-ts # @eth-optimism/common-ts
## 0.7.1
### Patch Changes
- f04e5db2d: Fix unknown option error in base service v2
## 0.7.0 ## 0.7.0
### Minor Changes ### Minor Changes
......
{ {
"name": "@eth-optimism/common-ts", "name": "@eth-optimism/common-ts",
"version": "0.7.0", "version": "0.7.1",
"description": "[Optimism] Advanced typescript tooling used by various services", "description": "[Optimism] Advanced typescript tooling used by various services",
"main": "dist/index", "main": "dist/index",
"types": "dist/index", "types": "dist/index",
......
...@@ -20,20 +20,17 @@ export const waitForProvider = async ( ...@@ -20,20 +20,17 @@ export const waitForProvider = async (
name?: string name?: string
} }
) => { ) => {
opts?.logger?.info(`waiting for ${opts?.name || 'target'} provider...`) const name = opts?.name || 'target'
opts?.logger?.info(`waiting for ${name} provider...`)
let connected = false let connected = false
while (!connected) { while (!connected) {
try { try {
await provider.getBlockNumber() await provider.getBlockNumber()
connected = true connected = true
} catch (e) { } catch (e) {
opts?.logger?.info(`${provider} provider not connected, retrying...`) opts?.logger?.info(`${name} provider not connected, retrying...`)
// Don't spam requests
await sleep(opts?.intervalMs || 15000) await sleep(opts?.intervalMs || 15000)
} }
} }
opts?.logger?.info(`${name} provider connected`)
opts?.logger?.info(`${opts?.name || 'target'} provider connected`)
} }
# @eth-optimism/contracts-bedrock # @eth-optimism/contracts-bedrock
## 0.11.3
### Patch Changes
- 4964be480: Added a test for large deposit gaps
## 0.11.2 ## 0.11.2
### Patch Changes ### Patch Changes
......
{ {
"name": "@eth-optimism/contracts-bedrock", "name": "@eth-optimism/contracts-bedrock",
"version": "0.11.2", "version": "0.11.3",
"description": "Contracts for Optimism Specs", "description": "Contracts for Optimism Specs",
"main": "dist/index", "main": "dist/index",
"types": "dist/index", "types": "dist/index",
......
# @eth-optimism/contracts-periphery # @eth-optimism/contracts-periphery
## 1.0.7
### Patch Changes
- 4c64a5811: Update the attestation station impl to 1.1.0
## 1.0.6 ## 1.0.6
### Patch Changes ### Patch Changes
......
{ {
"name": "@eth-optimism/contracts-periphery", "name": "@eth-optimism/contracts-periphery",
"version": "1.0.6", "version": "1.0.7",
"description": "[Optimism] External (out-of-protocol) L1 and L2 smart contracts for Optimism", "description": "[Optimism] External (out-of-protocol) L1 and L2 smart contracts for Optimism",
"main": "dist/index", "main": "dist/index",
"types": "dist/index", "types": "dist/index",
...@@ -55,7 +55,7 @@ ...@@ -55,7 +55,7 @@
}, },
"devDependencies": { "devDependencies": {
"@defi-wonderland/smock": "^2.0.7", "@defi-wonderland/smock": "^2.0.7",
"@eth-optimism/contracts-bedrock": "0.11.2", "@eth-optimism/contracts-bedrock": "0.11.3",
"@eth-optimism/core-utils": "^0.12.0", "@eth-optimism/core-utils": "^0.12.0",
"@eth-optimism/hardhat-deploy-config": "^0.2.5", "@eth-optimism/hardhat-deploy-config": "^0.2.5",
"@ethersproject/hardware-wallets": "^5.7.0", "@ethersproject/hardware-wallets": "^5.7.0",
......
# data transport layer # data transport layer
## 0.5.52
### Patch Changes
- Updated dependencies [f04e5db2d]
- @eth-optimism/common-ts@0.7.1
## 0.5.51 ## 0.5.51
### Patch Changes ### Patch Changes
......
{ {
"private": true, "private": true,
"name": "@eth-optimism/data-transport-layer", "name": "@eth-optimism/data-transport-layer",
"version": "0.5.51", "version": "0.5.52",
"description": "[Optimism] Service for shuttling data from L1 into L2", "description": "[Optimism] Service for shuttling data from L1 into L2",
"main": "dist/index", "main": "dist/index",
"types": "dist/index", "types": "dist/index",
...@@ -36,7 +36,7 @@ ...@@ -36,7 +36,7 @@
"url": "https://github.com/ethereum-optimism/optimism.git" "url": "https://github.com/ethereum-optimism/optimism.git"
}, },
"dependencies": { "dependencies": {
"@eth-optimism/common-ts": "0.7.0", "@eth-optimism/common-ts": "0.7.1",
"@eth-optimism/contracts": "0.5.40", "@eth-optimism/contracts": "0.5.40",
"@eth-optimism/core-utils": "0.12.0", "@eth-optimism/core-utils": "0.12.0",
"@ethersproject/providers": "^5.7.0", "@ethersproject/providers": "^5.7.0",
......
# @eth-optimism/drippie-mon # @eth-optimism/drippie-mon
## 0.4.3
### Patch Changes
- Updated dependencies [f04e5db2d]
- Updated dependencies [4c64a5811]
- @eth-optimism/common-ts@0.7.1
- @eth-optimism/contracts-periphery@1.0.7
- @eth-optimism/sdk@1.10.1
## 0.4.2 ## 0.4.2
### Patch Changes ### Patch Changes
......
{ {
"private": true, "private": true,
"name": "@eth-optimism/drippie-mon", "name": "@eth-optimism/drippie-mon",
"version": "0.4.2", "version": "0.4.3",
"description": "[Optimism] Service for monitoring Drippie instances", "description": "[Optimism] Service for monitoring Drippie instances",
"main": "dist/index", "main": "dist/index",
"types": "dist/index", "types": "dist/index",
...@@ -32,10 +32,10 @@ ...@@ -32,10 +32,10 @@
"url": "https://github.com/ethereum-optimism/optimism.git" "url": "https://github.com/ethereum-optimism/optimism.git"
}, },
"dependencies": { "dependencies": {
"@eth-optimism/common-ts": "0.7.0", "@eth-optimism/common-ts": "0.7.1",
"@eth-optimism/contracts-periphery": "1.0.6", "@eth-optimism/contracts-periphery": "1.0.7",
"@eth-optimism/core-utils": "0.12.0", "@eth-optimism/core-utils": "0.12.0",
"@eth-optimism/sdk": "1.10.0", "@eth-optimism/sdk": "1.10.1",
"ethers": "^5.7.0" "ethers": "^5.7.0"
}, },
"devDependencies": { "devDependencies": {
......
# @eth-optimism/message-relayer # @eth-optimism/message-relayer
## 0.5.27
### Patch Changes
- Updated dependencies [f04e5db2d]
- @eth-optimism/common-ts@0.7.1
- @eth-optimism/sdk@1.10.1
## 0.5.26 ## 0.5.26
### Patch Changes ### Patch Changes
......
{ {
"private": true, "private": true,
"name": "@eth-optimism/message-relayer", "name": "@eth-optimism/message-relayer",
"version": "0.5.26", "version": "0.5.27",
"description": "[Optimism] Service for automatically relaying L2 to L1 transactions", "description": "[Optimism] Service for automatically relaying L2 to L1 transactions",
"main": "dist/index", "main": "dist/index",
"types": "dist/index", "types": "dist/index",
...@@ -31,9 +31,9 @@ ...@@ -31,9 +31,9 @@
"url": "https://github.com/ethereum-optimism/optimism.git" "url": "https://github.com/ethereum-optimism/optimism.git"
}, },
"dependencies": { "dependencies": {
"@eth-optimism/common-ts": "0.7.0", "@eth-optimism/common-ts": "0.7.1",
"@eth-optimism/core-utils": "0.12.0", "@eth-optimism/core-utils": "0.12.0",
"@eth-optimism/sdk": "1.10.0", "@eth-optimism/sdk": "1.10.1",
"ethers": "^5.7.0" "ethers": "^5.7.0"
}, },
"devDependencies": { "devDependencies": {
......
# @eth-optimism/replica-healthcheck # @eth-optimism/replica-healthcheck
## 1.2.1
### Patch Changes
- Updated dependencies [f04e5db2d]
- @eth-optimism/common-ts@0.7.1
## 1.2.0 ## 1.2.0
### Minor Changes ### Minor Changes
......
{ {
"private": true, "private": true,
"name": "@eth-optimism/replica-healthcheck", "name": "@eth-optimism/replica-healthcheck",
"version": "1.2.0", "version": "1.2.1",
"description": "[Optimism] Service for monitoring the health of replica nodes", "description": "[Optimism] Service for monitoring the health of replica nodes",
"main": "dist/index", "main": "dist/index",
"types": "dist/index", "types": "dist/index",
...@@ -32,7 +32,7 @@ ...@@ -32,7 +32,7 @@
"url": "https://github.com/ethereum-optimism/optimism.git" "url": "https://github.com/ethereum-optimism/optimism.git"
}, },
"dependencies": { "dependencies": {
"@eth-optimism/common-ts": "0.7.0", "@eth-optimism/common-ts": "0.7.1",
"@eth-optimism/core-utils": "0.12.0", "@eth-optimism/core-utils": "0.12.0",
"@ethersproject/abstract-provider": "^5.7.0" "@ethersproject/abstract-provider": "^5.7.0"
}, },
......
# @eth-optimism/sdk # @eth-optimism/sdk
## 1.10.1
### Patch Changes
- Updated dependencies [4964be480]
- @eth-optimism/contracts-bedrock@0.11.3
## 1.10.0 ## 1.10.0
### Minor Changes ### Minor Changes
......
{ {
"name": "@eth-optimism/sdk", "name": "@eth-optimism/sdk",
"version": "1.10.0", "version": "1.10.1",
"description": "[Optimism] Tools for working with Optimism", "description": "[Optimism] Tools for working with Optimism",
"main": "dist/index", "main": "dist/index",
"types": "dist/index", "types": "dist/index",
...@@ -50,7 +50,7 @@ ...@@ -50,7 +50,7 @@
"dependencies": { "dependencies": {
"@eth-optimism/contracts": "0.5.40", "@eth-optimism/contracts": "0.5.40",
"@eth-optimism/core-utils": "0.12.0", "@eth-optimism/core-utils": "0.12.0",
"@eth-optimism/contracts-bedrock": "0.11.2", "@eth-optimism/contracts-bedrock": "0.11.3",
"lodash": "^4.17.21", "lodash": "^4.17.21",
"merkletreejs": "^0.2.27", "merkletreejs": "^0.2.27",
"rlp": "^2.2.7" "rlp": "^2.2.7"
......
# @eth-optimism/proxyd # @eth-optimism/proxyd
## 3.13.0
### Minor Changes
- 6de891d3b: Add sender-based rate limiter
## 3.12.0 ## 3.12.0
### Minor Changes ### Minor Changes
......
...@@ -78,13 +78,26 @@ var ( ...@@ -78,13 +78,26 @@ var (
Message: "over rate limit", Message: "over rate limit",
HTTPErrorCode: 429, HTTPErrorCode: 429,
} }
ErrOverSenderRateLimit = &RPCErr{
Code: JSONRPCErrorInternal - 17,
Message: "sender is over rate limit",
HTTPErrorCode: 429,
}
ErrBackendUnexpectedJSONRPC = errors.New("backend returned an unexpected JSON-RPC response") ErrBackendUnexpectedJSONRPC = errors.New("backend returned an unexpected JSON-RPC response")
) )
func ErrInvalidRequest(msg string) *RPCErr { func ErrInvalidRequest(msg string) *RPCErr {
return &RPCErr{ return &RPCErr{
Code: -32601, Code: -32600,
Message: msg,
HTTPErrorCode: 400,
}
}
func ErrInvalidParams(msg string) *RPCErr {
return &RPCErr{
Code: -32602,
Message: msg, Message: msg,
HTTPErrorCode: 400, HTTPErrorCode: 400,
} }
......
...@@ -104,21 +104,30 @@ type BatchConfig struct { ...@@ -104,21 +104,30 @@ type BatchConfig struct {
ErrorMessage string `toml:"error_message"` ErrorMessage string `toml:"error_message"`
} }
// SenderRateLimitConfig configures the sender-based rate limiter
// for eth_sendRawTransaction requests.
type SenderRateLimitConfig struct {
Enabled bool
Interval TOMLDuration
Limit int
}
type Config struct { type Config struct {
WSBackendGroup string `toml:"ws_backend_group"` WSBackendGroup string `toml:"ws_backend_group"`
Server ServerConfig `toml:"server"` Server ServerConfig `toml:"server"`
Cache CacheConfig `toml:"cache"` Cache CacheConfig `toml:"cache"`
Redis RedisConfig `toml:"redis"` Redis RedisConfig `toml:"redis"`
Metrics MetricsConfig `toml:"metrics"` Metrics MetricsConfig `toml:"metrics"`
RateLimit RateLimitConfig `toml:"rate_limit"` RateLimit RateLimitConfig `toml:"rate_limit"`
BackendOptions BackendOptions `toml:"backend"` BackendOptions BackendOptions `toml:"backend"`
Backends BackendsConfig `toml:"backends"` Backends BackendsConfig `toml:"backends"`
BatchConfig BatchConfig `toml:"batch"` BatchConfig BatchConfig `toml:"batch"`
Authentication map[string]string `toml:"authentication"` Authentication map[string]string `toml:"authentication"`
BackendGroups BackendGroupsConfig `toml:"backend_groups"` BackendGroups BackendGroupsConfig `toml:"backend_groups"`
RPCMethodMappings map[string]string `toml:"rpc_method_mappings"` RPCMethodMappings map[string]string `toml:"rpc_method_mappings"`
WSMethodWhitelist []string `toml:"ws_method_whitelist"` WSMethodWhitelist []string `toml:"ws_method_whitelist"`
WhitelistErrorMessage string `toml:"whitelist_error_message"` WhitelistErrorMessage string `toml:"whitelist_error_message"`
SenderRateLimit SenderRateLimitConfig `toml:"sender_rate_limit"`
} }
func ReadFromEnvOrConfig(value string) (string, error) { func ReadFromEnvOrConfig(value string) (string, error) {
......
package integration_tests
import (
"bufio"
"fmt"
"math"
"os"
"strings"
"testing"
"time"
"github.com/ethereum-optimism/optimism/proxyd"
"github.com/stretchr/testify/require"
)
const txHex1 = "0x02f8b28201a406849502f931849502f931830147f9948f3ddd0fbf3e78ca1d6c" +
"d17379ed88e261249b5280b84447e7ef2400000000000000000000000089c8b1" +
"b2774201bac50f627403eac1b732459cf7000000000000000000000000000000" +
"0000000000000000056bc75e2d63100000c080a0473c95566026c312c9664cd6" +
"1145d2f3e759d49209fe96011ac012884ec5b017a0763b58f6fa6096e6ba28ee" +
"08bfac58f58fb3b8bcef5af98578bdeaddf40bde42"
const txHex2 = "0xf8aa82afd2830f4240830493e094464959ad46e64046b891f562cff202a465d5" +
"22f380b844d5bade070000000000000000000000004200000000000000000000" +
"0000000000000000060000000000000000000000000000000000000000000000" +
"0000000025ef43fc0038a05d8ea9837ea81469bda4dadbe852fdd37fcfbcd666" +
"5641a35e4726fbc04364e7a0107e20bb34aea53c695a551204a11d42fe465055" +
"510ee240e8f884fb70289be6"
const dummyRes = `{"id": 123, "jsonrpc": "2.0", "result": "dummy"}`
const limRes = `{"error":{"code":-32017,"message":"sender is over rate limit"},"id":1,"jsonrpc":"2.0"}`
func TestSenderRateLimitValidation(t *testing.T) {
goodBackend := NewMockBackend(SingleResponseHandler(200, dummyRes))
defer goodBackend.Close()
require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", goodBackend.URL()))
config := ReadConfig("sender_rate_limit")
// Don't perform rate limiting in this test since we're only testing
// validation.
config.SenderRateLimit.Limit = math.MaxInt
client := NewProxydClient("http://127.0.0.1:8545")
shutdown, err := proxyd.Start(config)
require.NoError(t, err)
defer shutdown()
f, err := os.Open("testdata/testdata.txt")
require.NoError(t, err)
defer f.Close()
scanner := bufio.NewScanner(f)
scanner.Scan() // skip header
for scanner.Scan() {
record := strings.Split(scanner.Text(), "|")
name, body, expResponseBody := record[0], record[1], record[2]
require.NoError(t, err)
t.Run(name, func(t *testing.T) {
res, _, err := client.SendRequest([]byte(body))
require.NoError(t, err)
RequireEqualJSON(t, []byte(expResponseBody), res)
})
}
}
func TestSenderRateLimitLimiting(t *testing.T) {
goodBackend := NewMockBackend(SingleResponseHandler(200, dummyRes))
defer goodBackend.Close()
require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", goodBackend.URL()))
config := ReadConfig("sender_rate_limit")
client := NewProxydClient("http://127.0.0.1:8545")
shutdown, err := proxyd.Start(config)
require.NoError(t, err)
defer shutdown()
// Two separate requests from the same sender
// should be rate limited.
res1, code1, err := client.SendRequest(makeSendRawTransaction(txHex1))
require.NoError(t, err)
res2, code2, err := client.SendRequest(makeSendRawTransaction(txHex1))
require.NoError(t, err)
RequireEqualJSON(t, []byte(dummyRes), res1)
require.Equal(t, 200, code1)
RequireEqualJSON(t, []byte(limRes), res2)
require.Equal(t, 429, code2)
// Clear the limiter.
time.Sleep(1100 * time.Millisecond)
// Two separate requests from different senders
// should not be rate limited.
res1, code1, err = client.SendRequest(makeSendRawTransaction(txHex1))
require.NoError(t, err)
res2, code2, err = client.SendRequest(makeSendRawTransaction(txHex2))
require.NoError(t, err)
RequireEqualJSON(t, []byte(dummyRes), res1)
require.Equal(t, 200, code1)
RequireEqualJSON(t, []byte(dummyRes), res2)
require.Equal(t, 200, code2)
// Clear the limiter.
time.Sleep(1100 * time.Millisecond)
// A batch request should rate limit within the batch itself.
batch := []byte(fmt.Sprintf(
`[%s, %s, %s]`,
makeSendRawTransaction(txHex1),
makeSendRawTransaction(txHex1),
makeSendRawTransaction(txHex2),
))
res, code, err := client.SendRequest(batch)
require.NoError(t, err)
require.Equal(t, 200, code)
RequireEqualJSON(t, []byte(fmt.Sprintf(
`[%s, %s, %s]`,
dummyRes,
limRes,
dummyRes,
)), res)
}
func makeSendRawTransaction(dataHex string) []byte {
return []byte(`{"jsonrpc":"2.0","method":"eth_sendRawTransaction","params":["` + dataHex + `"],"id":1}`)
}
[server]
rpc_port = 8545
[backend]
response_timeout_seconds = 1
[backends]
[backends.good]
rpc_url = "$GOOD_BACKEND_RPC_URL"
ws_url = "$GOOD_BACKEND_RPC_URL"
[backend_groups]
[backend_groups.main]
backends = ["good"]
[rpc_method_mappings]
eth_chainId = "main"
eth_sendRawTransaction = "main"
[sender_rate_limit]
enabled = true
interval = "1s"
limit = 1
\ No newline at end of file
name|body|responseBody
not json|not json|{"jsonrpc":"2.0","error":{"code":-32700,"message":"parse error"},"id":null}
not json-rpc|{"foo":"bar"}|{"jsonrpc":"2.0","error":{"code":-32600,"message":"invalid JSON-RPC version"},"id":null}
missing fields json-rpc|{"jsonrpc":"2.0"}|{"jsonrpc":"2.0","error":{"code":-32600,"message":"no method specified"},"id":null}
bad method json-rpc|{"jsonrpc":"2.0","method":"eth_notSendRawTransaction","id":1}|{"jsonrpc":"2.0","error":{"code":-32001,"message":"rpc method is not whitelisted"},"id":1}
no transaction data|{"jsonrpc":"2.0","method":"eth_sendRawTransaction","params":[],"id":1}|{"jsonrpc":"2.0","error":{"code":-32602,"message":"missing value for required argument 0"},"id":1}
invalid transaction data|{"jsonrpc":"2.0","method":"eth_sendRawTransaction","params":["0xf6806872fcc650ad4e77e0629206426cd183d751e9ddcc8d5e77"],"id":1}|{"jsonrpc":"2.0","error":{"code":-32602,"message":"rlp: value size exceeds available input length"},"id":1}
invalid transaction data|{"jsonrpc":"2.0","method":"eth_sendRawTransaction","params":["0x1234"],"id":1}|{"jsonrpc":"2.0","error":{"code":-32602,"message":"transaction type not supported"},"id":1}
valid transaction data - simple send|{"jsonrpc":"2.0","method":"eth_sendRawTransaction","params":["0x02f8748201a415843b9aca31843b9aca3182520894f80267194936da1e98db10bce06f3147d580a62e880de0b6b3a764000080c001a0b50ee053102360ff5fedf0933b912b7e140c90fe57fa07a0cebe70dbd72339dda072974cb7bfe5c3dc54dde110e2b049408ccab8a879949c3b4d42a3a7555a618b"],"id":1}|{"id": 123, "jsonrpc": "2.0", "result": "dummy"}
valid transaction data - contract call|{"jsonrpc":"2.0","method":"eth_sendRawTransaction","params":["0x02f8b28201a406849502f931849502f931830147f9948f3ddd0fbf3e78ca1d6cd17379ed88e261249b5280b84447e7ef2400000000000000000000000089c8b1b2774201bac50f627403eac1b732459cf70000000000000000000000000000000000000000000000056bc75e2d63100000c080a0473c95566026c312c9664cd61145d2f3e759d49209fe96011ac012884ec5b017a0763b58f6fa6096e6ba28ee08bfac58f58fb3b8bcef5af98578bdeaddf40bde42"],"id":1}|{"id": 123, "jsonrpc": "2.0", "result": "dummy"}
batch with mixed results|[{"jsonrpc":"2.0","method":"eth_sendRawTransaction","params":["0x02f8748201a415843b9aca31843b9aca3182520894f80267194936da1e98db10bce06f3147d580a62e880de0b6b3a764000080c001a0b50ee053102360ff5fedf0933b912b7e140c90fe57fa07a0cebe70dbd72339dda072974cb7bfe5c3dc54dde110e2b049408ccab8a879949c3b4d42a3a7555a618b"],"id":1},{"bad":"json"},{"jsonrpc":"2.0","method":"eth_fooTheBar","params":[],"id":123}]|[{"id": 123, "jsonrpc": "2.0", "result": "dummy"},{"jsonrpc":"2.0","error":{"code":-32600,"message":"invalid JSON-RPC version"},"id":null},{"jsonrpc":"2.0","error":{"code":-32001,"message":"rpc method is not whitelisted"},"id":123}]
\ No newline at end of file
...@@ -12,10 +12,10 @@ import ( ...@@ -12,10 +12,10 @@ import (
const ( const (
notWhitelistedResponse = `{"jsonrpc":"2.0","error":{"code":-32001,"message":"rpc method is not whitelisted custom message"},"id":999}` notWhitelistedResponse = `{"jsonrpc":"2.0","error":{"code":-32001,"message":"rpc method is not whitelisted custom message"},"id":999}`
parseErrResponse = `{"jsonrpc":"2.0","error":{"code":-32700,"message":"parse error"},"id":null}` parseErrResponse = `{"jsonrpc":"2.0","error":{"code":-32700,"message":"parse error"},"id":null}`
invalidJSONRPCVersionResponse = `{"error":{"code":-32601,"message":"invalid JSON-RPC version"},"id":null,"jsonrpc":"2.0"}` invalidJSONRPCVersionResponse = `{"error":{"code":-32600,"message":"invalid JSON-RPC version"},"id":null,"jsonrpc":"2.0"}`
invalidIDResponse = `{"error":{"code":-32601,"message":"invalid ID"},"id":null,"jsonrpc":"2.0"}` invalidIDResponse = `{"error":{"code":-32600,"message":"invalid ID"},"id":null,"jsonrpc":"2.0"}`
invalidMethodResponse = `{"error":{"code":-32601,"message":"no method specified"},"id":null,"jsonrpc":"2.0"}` invalidMethodResponse = `{"error":{"code":-32600,"message":"no method specified"},"id":null,"jsonrpc":"2.0"}`
invalidBatchLenResponse = `{"error":{"code":-32601,"message":"must specify at least one batch call"},"id":null,"jsonrpc":"2.0"}` invalidBatchLenResponse = `{"error":{"code":-32600,"message":"must specify at least one batch call"},"id":null,"jsonrpc":"2.0"}`
) )
func TestSingleRPCValidation(t *testing.T) { func TestSingleRPCValidation(t *testing.T) {
......
{ {
"name": "@eth-optimism/proxyd", "name": "@eth-optimism/proxyd",
"version": "3.12.0", "version": "3.13.0",
"private": true, "private": true,
"dependencies": {} "dependencies": {}
} }
...@@ -78,6 +78,15 @@ func Start(config *Config) (func(), error) { ...@@ -78,6 +78,15 @@ func Start(config *Config) (func(), error) {
ErrTooManyBatchRequests.Message = config.BatchConfig.ErrorMessage ErrTooManyBatchRequests.Message = config.BatchConfig.ErrorMessage
} }
if config.SenderRateLimit.Enabled {
if config.SenderRateLimit.Limit <= 0 {
return nil, errors.New("limit in sender_rate_limit must be > 0")
}
if time.Duration(config.SenderRateLimit.Interval) < time.Second {
return nil, errors.New("interval in sender_rate_limit must be >= 1s")
}
}
maxConcurrentRPCs := config.Server.MaxConcurrentRPCs maxConcurrentRPCs := config.Server.MaxConcurrentRPCs
if maxConcurrentRPCs == 0 { if maxConcurrentRPCs == 0 {
maxConcurrentRPCs = math.MaxInt64 maxConcurrentRPCs = math.MaxInt64
...@@ -244,6 +253,7 @@ func Start(config *Config) (func(), error) { ...@@ -244,6 +253,7 @@ func Start(config *Config) (func(), error) {
config.Server.MaxUpstreamBatchSize, config.Server.MaxUpstreamBatchSize,
rpcCache, rpcCache,
config.RateLimit, config.RateLimit,
config.SenderRateLimit,
config.Server.EnableRequestLog, config.Server.EnableRequestLog,
config.Server.MaxRequestBodyLogLen, config.Server.MaxRequestBodyLogLen,
config.BatchConfig.MaxSize, config.BatchConfig.MaxSize,
......
...@@ -14,6 +14,9 @@ import ( ...@@ -14,6 +14,9 @@ import (
"sync" "sync"
"time" "time"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/go-redis/redis/v8" "github.com/go-redis/redis/v8"
"github.com/gorilla/mux" "github.com/gorilla/mux"
...@@ -50,6 +53,7 @@ type Server struct { ...@@ -50,6 +53,7 @@ type Server struct {
upgrader *websocket.Upgrader upgrader *websocket.Upgrader
mainLim FrontendRateLimiter mainLim FrontendRateLimiter
overrideLims map[string]FrontendRateLimiter overrideLims map[string]FrontendRateLimiter
senderLim FrontendRateLimiter
limExemptOrigins []*regexp.Regexp limExemptOrigins []*regexp.Regexp
limExemptUserAgents []*regexp.Regexp limExemptUserAgents []*regexp.Regexp
rpcServer *http.Server rpcServer *http.Server
...@@ -71,6 +75,7 @@ func NewServer( ...@@ -71,6 +75,7 @@ func NewServer(
maxUpstreamBatchSize int, maxUpstreamBatchSize int,
cache RPCCache, cache RPCCache,
rateLimitConfig RateLimitConfig, rateLimitConfig RateLimitConfig,
senderRateLimitConfig SenderRateLimitConfig,
enableRequestLog bool, enableRequestLog bool,
maxRequestBodyLogLen int, maxRequestBodyLogLen int,
maxBatchSize int, maxBatchSize int,
...@@ -135,6 +140,10 @@ func NewServer( ...@@ -135,6 +140,10 @@ func NewServer(
return nil, err return nil, err
} }
} }
var senderLim FrontendRateLimiter
if senderRateLimitConfig.Enabled {
senderLim = limiterFactory(time.Duration(senderRateLimitConfig.Interval), senderRateLimitConfig.Limit, "senders")
}
return &Server{ return &Server{
backendGroups: backendGroups, backendGroups: backendGroups,
...@@ -154,6 +163,7 @@ func NewServer( ...@@ -154,6 +163,7 @@ func NewServer(
}, },
mainLim: mainLim, mainLim: mainLim,
overrideLims: overrideLims, overrideLims: overrideLims,
senderLim: senderLim,
limExemptOrigins: limExemptOrigins, limExemptOrigins: limExemptOrigins,
limExemptUserAgents: limExemptUserAgents, limExemptUserAgents: limExemptUserAgents,
}, nil }, nil
...@@ -409,6 +419,17 @@ func (s *Server) handleBatchRPC(ctx context.Context, reqs []json.RawMessage, isL ...@@ -409,6 +419,17 @@ func (s *Server) handleBatchRPC(ctx context.Context, reqs []json.RawMessage, isL
continue continue
} }
// Apply a sender-based rate limit if it is enabled. Note that sender-based rate
// limits apply regardless of origin or user-agent. As such, they don't use the
// isLimited method.
if parsedReq.Method == "eth_sendRawTransaction" && s.senderLim != nil {
if err := s.rateLimitSender(ctx, parsedReq); err != nil {
RecordRPCError(ctx, BackendProxyd, parsedReq.Method, err)
responses[i] = NewRPCErrorRes(parsedReq.ID, err)
continue
}
}
id := string(parsedReq.ID) id := string(parsedReq.ID)
// If this is a duplicate Request ID, move the Request to a new batchGroup // If this is a duplicate Request ID, move the Request to a new batchGroup
ids[id]++ ids[id]++
...@@ -575,6 +596,54 @@ func (s *Server) isUnlimitedUserAgent(origin string) bool { ...@@ -575,6 +596,54 @@ func (s *Server) isUnlimitedUserAgent(origin string) bool {
return false return false
} }
func (s *Server) rateLimitSender(ctx context.Context, req *RPCReq) error {
var params []string
if err := json.Unmarshal(req.Params, &params); err != nil {
log.Debug("error unmarshaling raw transaction params", "err", err, "req_Id", GetReqID(ctx))
return ErrParseErr
}
if len(params) != 1 {
log.Debug("raw transaction request has invalid number of params", "req_id", GetReqID(ctx))
// The error below is identical to the one Geth responds with.
return ErrInvalidParams("missing value for required argument 0")
}
var data hexutil.Bytes
if err := data.UnmarshalText([]byte(params[0])); err != nil {
log.Debug("error decoding raw tx data", "err", err, "req_id", GetReqID(ctx))
// Geth returns the raw error from UnmarshalText.
return ErrInvalidParams(err.Error())
}
// Inflates a types.Transaction object from the transaction's raw bytes.
tx := new(types.Transaction)
if err := tx.UnmarshalBinary(data); err != nil {
log.Debug("could not unmarshal transaction", "err", err, "req_id", GetReqID(ctx))
return ErrInvalidParams(err.Error())
}
// Convert the transaction into a Message object so that we can get the
// sender. This method performs an ecrecover, which can be expensive.
msg, err := tx.AsMessage(types.LatestSignerForChainID(tx.ChainId()), nil)
if err != nil {
log.Debug("could not get message from transaction", "err", err, "req_id", GetReqID(ctx))
return ErrInvalidParams(err.Error())
}
ok, err := s.senderLim.Take(ctx, msg.From().Hex())
if err != nil {
log.Error("error taking from sender limiter", "err", err, "req_id", GetReqID(ctx))
return ErrInternal
}
if !ok {
log.Debug("sender rate limit exceeded", "sender", msg.From(), "req_id", GetReqID(ctx))
return ErrOverSenderRateLimit
}
return nil
}
func setCacheHeader(w http.ResponseWriter, cached bool) { func setCacheHeader(w http.ResponseWriter, cached bool) {
if cached { if cached {
w.Header().Set(cacheStatusHdr, "HIT") w.Header().Set(cacheStatusHdr, "HIT")
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment