Commit 592d0ed2 authored by Matthew Slipper's avatar Matthew Slipper Committed by GitHub

Merge pull request #2452 from ethereum-optimism/develop

Develop -> Master
parents 60c92b60 94876e28
---
"@eth-optimism/batch-submitter-service": patch
---
feat: bss less strict min-tx-size
---
'@eth-optimism/proxyd': patch
---
proxyd: Record redis cache operation latency
---
'@eth-optimism/data-transport-layer': patch
---
Use Basic Authentication in L1TransportServer
---
'@eth-optimism/indexer': patch
---
Don't spam the backend
---
'@eth-optimism/proxyd': patch
---
proxyd: Request-scoped context for fast batch RPC short-circuiting
---
"@eth-optimism/indexer": patch
---
fix (indexer): update l2 bridge addresses
......@@ -5,22 +5,22 @@ queue_rules:
pull_request_rules:
- name: Automatic merge on approval
conditions:
- and:
- "#review-threads-unresolved=0"
- "#approved-reviews-by>=2"
- "#changes-requested-reviews-by=0"
- or:
- and:
- "label!=SR-Risk"
- "label!=C-Protocol-Critical"
- and:
- "label=SR-Risk"
- "approved-reviews-by=maurelian"
- and:
- "label=C-Protocol-Critical"
- or:
- "approved-reviews-by=tynes"
- "approved-reviews-by=smartcontracts"
- and:
- "#review-threads-unresolved=0"
- "#approved-reviews-by>=2"
- "#changes-requested-reviews-by=0"
- or:
- and:
- "label!=SR-Risk"
- "label!=C-Protocol-Critical"
- and:
- "label=SR-Risk"
- "approved-reviews-by=maurelian"
- and:
- "label=C-Protocol-Critical"
- or:
- "approved-reviews-by=tynes"
- "approved-reviews-by=smartcontracts"
actions:
queue:
name: default
......@@ -97,3 +97,12 @@ pull_request_rules:
label:
remove:
- on-merge-train
- name: Nag changesets
conditions:
- and:
- 'files~=\.(ts|go|js|mod|sum)$'
- '-files~=^\.changeset/(.*)\.md'
actions:
comment:
message: |
This PR changes implementation code, but doesn't include a changeset. Did you forget to add one?
......@@ -167,8 +167,9 @@ func (d *Driver) CraftBatchTx(
"nonce", nonce, "type", d.cfg.BatchType.String())
var (
batchElements []BatchElement
totalTxSize uint64
batchElements []BatchElement
totalTxSize uint64
hasLargeNextTx bool
)
for i := new(big.Int).Set(start); i.Cmp(end) < 0; i.Add(i, bigOne) {
block, err := d.cfg.L2Client.BlockByNumber(ctx, i)
......@@ -187,6 +188,12 @@ func (d *Driver) CraftBatchTx(
// size also adheres to this constraint.
txLen := batchElement.Tx.Size()
if totalTxSize+uint64(TxLenSize+txLen) > d.cfg.MaxTxSize {
// Adding this transaction causes the batch to be too large, but
// we also record if the batch size without the transaction
// fails to meet our minimum size constraint. This is used below
// to determine whether or not to ignore the minimum size check,
// since in this case it can't be avoided.
hasLargeNextTx = totalTxSize < d.cfg.MinTxSize
break
}
totalTxSize += uint64(TxLenSize + txLen)
......@@ -214,6 +221,11 @@ func (d *Driver) CraftBatchTx(
appendSequencerBatchID := d.ctcABI.Methods[appendSequencerBatchMethodName].ID
plaintextCalldata := append(appendSequencerBatchID, plaintextBatchArguments...)
log.Info(name+" testing batch size",
"plaintext_size", len(plaintextCalldata),
"min_tx_size", d.cfg.MinTxSize,
"max_tx_size", d.cfg.MaxTxSize)
// Continue pruning until plaintext calldata size is less than
// configured max.
plaintextCalldataSize := uint64(len(plaintextCalldata))
......@@ -222,16 +234,33 @@ func (d *Driver) CraftBatchTx(
newBatchElementsLen := (oldLen * 9) / 10
batchElements = batchElements[:newBatchElementsLen]
log.Info(name+" pruned batch",
"plaintext_size", plaintextCalldataSize,
"max_tx_size", d.cfg.MaxTxSize,
"old_num_txs", oldLen,
"new_num_txs", newBatchElementsLen)
pruneCount++
continue
} else if plaintextCalldataSize < d.cfg.MinTxSize {
}
// There are two specific cases in which we choose to ignore the minimum
// L1 tx size. These cases are permitted since they arise from
// situations where the difference between the configured MinTxSize and
// MaxTxSize is less than the maximum L2 tx size permitted by the
// mempool.
//
// This configuration is useful when trying to ensure the profitability
// is sufficient, and we permit batches to be submitted with less than
// our desired configuration only if it is not possible to construct a
// batch within the given parameters.
//
// The two cases are:
// 1. When the next elenent is larger than the difference between the
// min and the max, causing the batch to be too small without the
// element, and too large with it.
// 2. When pruning a batch that exceeds the mac size below, and then
// becomes too small as a result. This is avoided by only applying
// the min size check when the pruneCount is zero.
ignoreMinTxSize := pruneCount > 0 || hasLargeNextTx
if !ignoreMinTxSize && plaintextCalldataSize < d.cfg.MinTxSize {
log.Info(name+" batch tx size below minimum",
"plaintext_size", plaintextCalldataSize,
"min_tx_size", d.cfg.MinTxSize,
"num_txs", len(batchElements))
return nil, nil
}
......@@ -249,7 +278,10 @@ func (d *Driver) CraftBatchTx(
calldata = append(appendSequencerBatchID, batchArguments...)
}
log.Info(name+" batch constructed", "num_txs", len(batchElements), "length", len(calldata))
log.Info(name+" batch constructed",
"num_txs", len(batchElements),
"final_size", len(calldata),
"batch_type", d.cfg.BatchType)
opts, err := bind.NewKeyedTransactorWithChainID(
d.cfg.PrivKey, d.cfg.ChainID,
......
......@@ -9,6 +9,8 @@ import (
"strconv"
"time"
l2rpc "github.com/ethereum-optimism/optimism/l2geth/rpc"
"github.com/ethereum-optimism/optimism/go/indexer/metrics"
"github.com/ethereum-optimism/optimism/go/indexer/server"
"github.com/rs/cors"
......@@ -128,7 +130,7 @@ func NewIndexer(cfg Config, gitVersion string) (*Indexer, error) {
return nil, err
}
l2Client, err := dialL2EthClientWithTimeout(ctx, cfg.L2EthRpc)
l2Client, l2RPC, err := dialL2EthClientWithTimeout(ctx, cfg.L2EthRpc)
if err != nil {
return nil, err
}
......@@ -180,6 +182,7 @@ func NewIndexer(cfg Config, gitVersion string) (*Indexer, error) {
l2IndexingService, err := l2.NewService(l2.ServiceConfig{
Context: ctx,
Metrics: m,
L2RPC: l2RPC,
L2Client: l2Client,
DB: db,
ConfDepth: cfg.ConfDepth,
......@@ -277,12 +280,17 @@ func dialL1EthClientWithTimeout(ctx context.Context, url string) (
// provided URL. If the dial doesn't complete within defaultDialTimeout seconds,
// this method will return an error.
func dialL2EthClientWithTimeout(ctx context.Context, url string) (
*l2ethclient.Client, error) {
*l2ethclient.Client, *l2rpc.Client, error) {
ctxt, cancel := context.WithTimeout(ctx, defaultDialTimeout)
defer cancel()
return l2ethclient.DialContext(ctxt, url)
rpc, err := l2rpc.DialContext(ctxt, url)
if err != nil {
return nil, nil, err
}
return l2ethclient.NewClient(rpc), rpc, nil
}
// traceRateToFloat64 converts a time.Duration into a valid float64 for the
......
......@@ -5,10 +5,10 @@ import (
"encoding/json"
"errors"
"math/big"
"sync"
"time"
"github.com/ethereum/go-ethereum"
"github.com/ethereum-optimism/optimism/go/indexer/services/util"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
......@@ -17,9 +17,9 @@ import (
)
const (
DefaultConnectionTimeout = 20 * time.Second
DefaultConnectionTimeout = 30 * time.Second
DefaultConfDepth uint64 = 20
DefaultMaxBatchSize uint64 = 100
DefaultMaxBatchSize = 100
)
type NewHeader struct {
......@@ -128,24 +128,34 @@ type ConfirmedHeaderSelector struct {
cfg HeaderSelectorConfig
}
func toBlockNumArg(number *big.Int) string {
if number == nil {
return "latest"
func HeadersByRange(ctx context.Context, client *rpc.Client, startHeight uint64, count int) ([]*NewHeader, error) {
height := startHeight
batchElems := make([]rpc.BatchElem, count)
for i := 0; i < count; i++ {
batchElems[i] = rpc.BatchElem{
Method: "eth_getBlockByNumber",
Args: []interface{}{
util.ToBlockNumArg(new(big.Int).SetUint64(height + uint64(i))),
false,
},
Result: new(NewHeader),
Error: nil,
}
}
pending := big.NewInt(-1)
if number.Cmp(pending) == 0 {
return "pending"
if err := client.BatchCallContext(ctx, batchElems); err != nil {
return nil, err
}
return hexutil.EncodeBig(number)
}
func HeaderByNumber(ctx context.Context, client *rpc.Client, height *big.Int) (*NewHeader, error) {
var head *NewHeader
err := client.CallContext(ctx, &head, "eth_getBlockByNumber", toBlockNumArg(height), false)
if err == nil && head == nil {
err = ethereum.NotFound
out := make([]*NewHeader, count)
for i := 0; i < len(batchElems); i++ {
if batchElems[i].Error != nil {
return nil, batchElems[i].Error
}
out[i] = batchElems[i].Result.(*NewHeader)
}
return head, err
return out, nil
}
func (f *ConfirmedHeaderSelector) NewHead(
......@@ -153,7 +163,7 @@ func (f *ConfirmedHeaderSelector) NewHead(
lowest uint64,
header *types.Header,
client *rpc.Client,
) []*NewHeader {
) ([]*NewHeader, error) {
number := header.Number.Uint64()
blockHash := header.Hash
......@@ -161,14 +171,14 @@ func (f *ConfirmedHeaderSelector) NewHead(
logger.Info("New block", "block", number, "hash", blockHash)
if number < f.cfg.ConfDepth {
return nil
return nil, nil
}
endHeight := number - f.cfg.ConfDepth + 1
minNextHeight := lowest + f.cfg.ConfDepth
if minNextHeight > number {
log.Info("Fork block ", "block", number, "hash", blockHash)
return nil
return nil, nil
}
startHeight := lowest + 1
......@@ -177,34 +187,35 @@ func (f *ConfirmedHeaderSelector) NewHead(
endHeight = startHeight + f.cfg.MaxBatchSize - 1
}
nHeaders := endHeight - startHeight + 1
nHeaders := int(endHeight - startHeight + 1)
if nHeaders > 1 {
logger.Info("Loading block batch ",
logger.Info("Loading blocks",
"startHeight", startHeight, "endHeight", endHeight)
}
headers := make([]*NewHeader, nHeaders)
var wg sync.WaitGroup
for i := uint64(0); i < nHeaders; i++ {
wg.Add(1)
go func(ii uint64) {
defer wg.Done()
headers := make([]*NewHeader, 0)
height := startHeight
left := nHeaders - len(headers)
for left > 0 {
count := DefaultMaxBatchSize
if count > left {
count = left
}
ctxt, cancel := context.WithTimeout(ctx, DefaultConnectionTimeout)
defer cancel()
logger.Info("Loading block batch",
"height", height, "count", count)
height := startHeight + ii
bigHeight := new(big.Int).SetUint64(height)
header, err := HeaderByNumber(ctxt, client, bigHeight)
if err != nil {
log.Error("Unable to load block ", "block", height, "err", err)
return
}
ctxt, cancel := context.WithTimeout(ctx, DefaultConnectionTimeout)
fetched, err := HeadersByRange(ctxt, client, height, count)
cancel()
if err != nil {
return nil, err
}
headers[ii] = header
}(i)
headers = append(headers, fetched...)
left = nHeaders - len(headers)
height += uint64(count)
}
wg.Wait()
logger.Debug("Verifying block range ",
"startHeight", startHeight, "endHeight", endHeight)
......@@ -233,7 +244,7 @@ func (f *ConfirmedHeaderSelector) NewHead(
"block", header.Number.Uint64(), "hash", header.Hash)
}
return headers
return headers, nil
}
func NewConfirmedHeaderSelector(cfg HeaderSelectorConfig) (*ConfirmedHeaderSelector,
......
......@@ -215,7 +215,10 @@ func (s *Service) Update(newHeader *types.Header) error {
lowest = *highestConfirmed
}
headers := s.headerSelector.NewHead(s.ctx, lowest.Number, newHeader, s.cfg.RawL1Client)
headers, err := s.headerSelector.NewHead(s.ctx, lowest.Number, newHeader, s.cfg.RawL1Client)
if err != nil {
return err
}
if len(headers) == 0 {
return errNoNewBlocks
}
......
......@@ -33,14 +33,14 @@ var defaultBridgeCfgs = []*implConfig{
var customBridgeCfgs = map[uint64][]*implConfig{
// Mainnet
10: {
{"BitBTC", StandardBridgeImpl, "0xaBA2c5F108F7E820C049D5Af70B16ac266c8f128"},
//{"DAI", "DAIBridge", "0x10E6593CDda8c58a1d0f14C5164B376352a55f2F"},
{"BitBTC", StandardBridgeImpl, "0x158F513096923fF2d3aab2BcF4478536de6725e2"},
//{"DAI", "DAIBridge", "0x467194771dAe2967Aef3ECbEDD3Bf9a310C76C65"},
},
// Kovan
69: {
{"BitBTC", StandardBridgeImpl, "0x0b651A42F32069d62d5ECf4f2a7e5Bd3E9438746"},
{"USX", StandardBridgeImpl, "0x40E862341b2416345F02c41Ac70df08525150dC7"},
//{"DAI", " DAIBridge", "0xb415e822C4983ecD6B1c1596e8a5f976cf6CD9e3"},
{"BitBTC", StandardBridgeImpl, "0x0CFb46528a7002a7D8877a5F7a69b9AaF1A9058e"},
{"USX", StandardBridgeImpl, "0xB4d37826b14Cd3CB7257A2A5094507d701fe715f"},
//{"DAI", " DAIBridge", "0x467194771dAe2967Aef3ECbEDD3Bf9a310C76C65"},
},
}
......
......@@ -4,18 +4,20 @@ import (
"context"
"errors"
"math/big"
"sync"
"time"
"github.com/ethereum-optimism/optimism/go/indexer/services/util"
"github.com/ethereum-optimism/optimism/l2geth/rpc"
"github.com/ethereum-optimism/optimism/l2geth/core/types"
l2ethclient "github.com/ethereum-optimism/optimism/l2geth/ethclient"
"github.com/ethereum-optimism/optimism/l2geth/log"
l2rpc "github.com/ethereum-optimism/optimism/l2geth/rpc"
)
const (
DefaultConnectionTimeout = 20 * time.Second
DefaultConfDepth uint64 = 20
DefaultMaxBatchSize uint64 = 100
DefaultMaxBatchSize = 50
)
type HeaderSelectorConfig struct {
......@@ -27,12 +29,42 @@ type ConfirmedHeaderSelector struct {
cfg HeaderSelectorConfig
}
func HeadersByRange(ctx context.Context, client *l2rpc.Client, startHeight uint64, count int) ([]*types.Header, error) {
height := startHeight
batchElems := make([]rpc.BatchElem, count)
for i := 0; i < count; i++ {
batchElems[i] = rpc.BatchElem{
Method: "eth_getBlockByNumber",
Args: []interface{}{
util.ToBlockNumArg(new(big.Int).SetUint64(height + uint64(i))),
false,
},
Result: new(types.Header),
Error: nil,
}
}
if err := client.BatchCallContext(ctx, batchElems); err != nil {
return nil, err
}
out := make([]*types.Header, count)
for i := 0; i < len(batchElems); i++ {
if batchElems[i].Error != nil {
return nil, batchElems[i].Error
}
out[i] = batchElems[i].Result.(*types.Header)
}
return out, nil
}
func (f *ConfirmedHeaderSelector) NewHead(
ctx context.Context,
lowest uint64,
header *types.Header,
client *l2ethclient.Client,
) []*types.Header {
client *l2rpc.Client,
) ([]*types.Header, error) {
number := header.Number.Uint64()
blockHash := header.Hash()
......@@ -40,14 +72,14 @@ func (f *ConfirmedHeaderSelector) NewHead(
logger.Info("New block", "block", number, "hash", blockHash)
if number < f.cfg.ConfDepth {
return nil
return nil, nil
}
endHeight := number - f.cfg.ConfDepth + 1
minNextHeight := lowest + f.cfg.ConfDepth
if minNextHeight > number {
log.Info("Fork block=%d hash=%s", number, blockHash)
return nil
return nil, nil
}
startHeight := lowest + 1
......@@ -56,34 +88,35 @@ func (f *ConfirmedHeaderSelector) NewHead(
endHeight = startHeight + f.cfg.MaxBatchSize - 1
}
nHeaders := endHeight - startHeight + 1
nHeaders := int(endHeight - startHeight + 1)
if nHeaders > 1 {
logger.Info("Loading block batch ",
logger.Info("Loading blocks",
"startHeight", startHeight, "endHeight", endHeight)
}
headers := make([]*types.Header, nHeaders)
var wg sync.WaitGroup
for i := uint64(0); i < nHeaders; i++ {
wg.Add(1)
go func(ii uint64) {
defer wg.Done()
ctxt, cancel := context.WithTimeout(ctx, DefaultConnectionTimeout)
defer cancel()
height := startHeight + ii
bigHeight := new(big.Int).SetUint64(height)
header, err := client.HeaderByNumber(ctxt, bigHeight)
if err != nil {
log.Error("Unable to load block ", "block", height, "err", err)
return
}
headers := make([]*types.Header, 0)
height := startHeight
left := nHeaders - len(headers)
for left > 0 {
count := DefaultMaxBatchSize
if count > left {
count = left
}
logger.Info("Loading block batch",
"height", height, "count", count)
ctxt, cancel := context.WithTimeout(ctx, DefaultConnectionTimeout)
fetched, err := HeadersByRange(ctxt, client, height, count)
cancel()
if err != nil {
return nil, err
}
headers[ii] = header
}(i)
headers = append(headers, fetched...)
left = nHeaders - len(headers)
height += uint64(count)
}
wg.Wait()
logger.Debug("Verifying block range ",
"startHeight", startHeight, "endHeight", endHeight)
......@@ -112,7 +145,7 @@ func (f *ConfirmedHeaderSelector) NewHead(
"block", header.Number.Uint64(), "hash", header.Hash())
}
return headers
return headers, nil
}
func NewConfirmedHeaderSelector(cfg HeaderSelectorConfig) (*ConfirmedHeaderSelector,
......
......@@ -10,6 +10,8 @@ import (
"sync"
"time"
l2rpc "github.com/ethereum-optimism/optimism/l2geth/rpc"
"github.com/ethereum-optimism/optimism/go/indexer/metrics"
"github.com/ethereum-optimism/optimism/go/indexer/server"
"github.com/prometheus/client_golang/prometheus"
......@@ -58,6 +60,7 @@ func HeaderByNumberWithRetry(ctx context.Context,
type ServiceConfig struct {
Context context.Context
Metrics *metrics.Metrics
L2RPC *l2rpc.Client
L2Client *l2ethclient.Client
ChainID *big.Int
ConfDepth uint64
......@@ -190,7 +193,10 @@ func (s *Service) Update(newHeader *types.Header) error {
lowest = *highestConfirmed
}
headers := s.headerSelector.NewHead(s.ctx, lowest.Number, newHeader, s.cfg.L2Client)
headers, err := s.headerSelector.NewHead(s.ctx, lowest.Number, newHeader, s.cfg.L2RPC)
if err != nil {
return err
}
if len(headers) == 0 {
return errNoNewBlocks
}
......
package util
import (
"math/big"
"github.com/ethereum/go-ethereum/common/hexutil"
)
func ToBlockNumArg(number *big.Int) string {
if number == nil {
return "latest"
}
pending := big.NewInt(-1)
if number.Cmp(pending) == 0 {
return "pending"
}
return hexutil.EncodeBig(number)
}
......@@ -66,6 +66,11 @@ var (
Code: JSONRPCErrorInternal - 14,
Message: "too many RPC calls in batch request",
}
ErrGatewayTimeout = &RPCErr{
Code: JSONRPCErrorInternal - 15,
Message: "gateway timeout",
HTTPErrorCode: 504,
}
)
func ErrInvalidRequest(msg string) *RPCErr {
......@@ -217,7 +222,7 @@ func (b *Backend) Forward(ctx context.Context, req *RPCReq) (*RPCRes, error) {
)
respTimer.ObserveDuration()
RecordRPCError(ctx, b.Name, req.Method, err)
time.Sleep(calcBackoff(i))
sleepContext(ctx, calcBackoff(i))
continue
}
respTimer.ObserveDuration()
......@@ -331,7 +336,7 @@ func (b *Backend) setOffline() {
func (b *Backend) doForward(ctx context.Context, rpcReq *RPCReq) (*RPCRes, error) {
body := mustMarshalJSON(rpcReq)
httpReq, err := http.NewRequest("POST", b.rpcURL, bytes.NewReader(body))
httpReq, err := http.NewRequestWithContext(ctx, "POST", b.rpcURL, bytes.NewReader(body))
if err != nil {
return nil, wrapErr(err, "error creating backend request")
}
......@@ -681,3 +686,10 @@ func formatWSError(err error) []byte {
}
return m
}
func sleepContext(ctx context.Context, duration time.Duration) {
select {
case <-ctx.Done():
case <-time.After(duration):
}
}
......@@ -59,7 +59,10 @@ func newRedisCache(url string) (*redisCache, error) {
}
func (c *redisCache) Get(ctx context.Context, key string) (string, error) {
start := time.Now()
val, err := c.rdb.Get(ctx, key).Result()
redisCacheDurationSumm.WithLabelValues("GET").Observe(float64(time.Since(start).Milliseconds()))
if err == redis.Nil {
return "", nil
} else if err != nil {
......@@ -70,7 +73,10 @@ func (c *redisCache) Get(ctx context.Context, key string) (string, error) {
}
func (c *redisCache) Put(ctx context.Context, key string, value string) error {
start := time.Now()
err := c.rdb.SetEX(ctx, key, value, redisTTL).Err()
redisCacheDurationSumm.WithLabelValues("SETEX").Observe(float64(time.Since(start).Milliseconds()))
if err != nil {
RecordRedisError("CacheSet")
}
......
......@@ -12,6 +12,9 @@ type ServerConfig struct {
WSHost string `toml:"ws_host"`
WSPort int `toml:"ws_port"`
MaxBodySizeBytes int64 `toml:"max_body_size_bytes"`
// TimeoutSeconds specifies the maximum time spent serving an HTTP request. Note that isn't used for websocket connections
TimeoutSeconds int `toml:"timeout_seconds"`
}
type CacheConfig struct {
......
package integration_tests
import (
"net/http"
"os"
"testing"
"time"
"github.com/ethereum-optimism/optimism/go/proxyd"
"github.com/stretchr/testify/require"
)
const (
batchTimeoutResponse = `{"error":{"code":-32015,"message":"gateway timeout"},"id":null,"jsonrpc":"2.0"}`
)
func TestBatchTimeout(t *testing.T) {
slowBackend := NewMockBackend(nil)
defer slowBackend.Close()
require.NoError(t, os.Setenv("SLOW_BACKEND_RPC_URL", slowBackend.URL()))
config := ReadConfig("batch_timeout")
client := NewProxydClient("http://127.0.0.1:8545")
shutdown, err := proxyd.Start(config)
require.NoError(t, err)
defer shutdown()
slowBackend.SetHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// check the config. The sleep duration should be at least double the server.timeout_seconds config to prevent flakes
time.Sleep(time.Second * 2)
SingleResponseHandler(200, goodResponse)(w, r)
}))
res, statusCode, err := client.SendBatchRPC(
NewRPCReq("1", "eth_chainId", nil),
NewRPCReq("1", "eth_chainId", nil),
)
require.NoError(t, err)
require.Equal(t, 504, statusCode)
RequireEqualJSON(t, []byte(batchTimeoutResponse), res)
require.Equal(t, 1, len(slowBackend.Requests()))
}
[server]
rpc_port = 8545
timeout_seconds = 1
[backend]
response_timeout_seconds = 1
max_retries = 3
[backends]
[backends.slow]
rpc_url = "$SLOW_BACKEND_RPC_URL"
ws_url = "$SLOW_BACKEND_RPC_URL"
[backend_groups]
[backend_groups.main]
backends = ["slow"]
[rpc_method_mappings]
eth_chainId = "main"
......@@ -22,6 +22,7 @@ const (
)
var PayloadSizeBuckets = []float64{10, 50, 100, 500, 1000, 5000, 10000, 100000, 1000000}
var MillisecondDurationBuckets = []float64{1, 10, 50, 100, 500, 1000, 5000, 10000, 100000}
var (
rpcRequestsTotal = promauto.NewCounter(prometheus.CounterOpts{
......@@ -192,12 +193,25 @@ var (
"key",
})
batchRPCShortCircuitsTotal = promauto.NewCounter(prometheus.CounterOpts{
Namespace: MetricsNamespace,
Name: "batch_rpc_short_circuits_total",
Help: "Count of total batch RPC short-circuits.",
})
rpcSpecialErrors = []string{
"nonce too low",
"gas price too high",
"gas price too low",
"invalid parameters",
}
redisCacheDurationSumm = promauto.NewHistogramVec(prometheus.HistogramOpts{
Namespace: MetricsNamespace,
Name: "redis_cache_duration_milliseconds",
Help: "Histogram of Redis command durations, in milliseconds.",
Buckets: MillisecondDurationBuckets,
}, []string{"command"})
)
func RecordRedisError(source string) {
......
......@@ -211,6 +211,7 @@ func Start(config *Config) (func(), error) {
config.RPCMethodMappings,
config.Server.MaxBodySizeBytes,
resolvedAuth,
secondsToDuration(config.Server.TimeoutSeconds),
rpcCache,
)
......
......@@ -26,6 +26,7 @@ const (
ContextKeyXForwardedFor = "x_forwarded_for"
MaxBatchRPCCalls = 100
cacheStatusHdr = "X-Proxyd-Cache-Status"
defaultServerTimeout = time.Second * 10
)
type Server struct {
......@@ -35,6 +36,7 @@ type Server struct {
rpcMethodMappings map[string]string
maxBodySize int64
authenticatedPaths map[string]string
timeout time.Duration
upgrader *websocket.Upgrader
rpcServer *http.Server
wsServer *http.Server
......@@ -48,6 +50,7 @@ func NewServer(
rpcMethodMappings map[string]string,
maxBodySize int64,
authenticatedPaths map[string]string,
timeout time.Duration,
cache RPCCache,
) *Server {
if cache == nil {
......@@ -58,6 +61,10 @@ func NewServer(
maxBodySize = math.MaxInt64
}
if timeout == 0 {
timeout = defaultServerTimeout
}
return &Server{
backendGroups: backendGroups,
wsBackendGroup: wsBackendGroup,
......@@ -65,6 +72,7 @@ func NewServer(
rpcMethodMappings: rpcMethodMappings,
maxBodySize: maxBodySize,
authenticatedPaths: authenticatedPaths,
timeout: timeout,
cache: cache,
upgrader: &websocket.Upgrader{
HandshakeTimeout: 5 * time.Second,
......@@ -123,6 +131,9 @@ func (s *Server) HandleRPC(w http.ResponseWriter, r *http.Request) {
if ctx == nil {
return
}
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, s.timeout)
defer cancel()
log.Info(
"received RPC request",
......@@ -162,6 +173,19 @@ func (s *Server) HandleRPC(w http.ResponseWriter, r *http.Request) {
batchRes := make([]*RPCRes, len(reqs))
var batchContainsCached bool
for i := 0; i < len(reqs); i++ {
if ctx.Err() == context.DeadlineExceeded {
log.Info(
"short-circuiting batch RPC",
"req_id", GetReqID(ctx),
"auth", GetAuthCtx(ctx),
"index", i,
"batch_size", len(reqs),
)
batchRPCShortCircuitsTotal.Inc()
writeRPCError(ctx, w, nil, ErrGatewayTimeout)
return
}
req, err := ParseRPCReq(reqs[i])
if err != nil {
log.Info("error parsing RPC call", "source", "rpc", "err", err)
......
import { DeployConfig } from '../src/deploy-config'
const config: DeployConfig = {
network: 'goerli-nightly',
l1BlockTimeSeconds: 15,
l2BlockGasLimit: 15_000_000,
l2ChainId: 421,
ctcL2GasDiscountDivisor: 32,
ctcEnqueueGasCost: 60_000,
sccFaultProofWindowSeconds: 604800,
sccSequencerPublishWindowSeconds: 12592000,
ovmSequencerAddress: '0xba517B809d22D5e27F607c03dEBDe09d5Ad27049',
ovmProposerAddress: '0x69fe6dE5b2Cd205FfC6D0c9F26df681b262A91dd',
ovmBlockSignerAddress: '0x00000398232E2064F896018496b4b44b3D62751F',
ovmFeeWalletAddress: '0xba517B809d22D5e27F607c03dEBDe09d5Ad27049',
ovmAddressManagerOwner: '0x4F3F400c20448D33ECc12E7d289F49dA7fC51736',
ovmGasPriceOracleOwner: '0xc8910a1957d276cE5634B978d908B5ef9fB0e05B',
}
export default config
This diff is collapsed.
This source diff could not be displayed because it is too large. You can view the blob instead.
This diff is collapsed.
......@@ -2,11 +2,6 @@ import { HardhatUserConfig } from 'hardhat/types'
import 'solidity-coverage'
import * as dotenv from 'dotenv'
import {
DEFAULT_ACCOUNTS_HARDHAT,
RUN_OVM_TEST_GAS,
} from './test/helpers/constants'
// Hardhat plugins
import '@nomiclabs/hardhat-ethers'
import '@nomiclabs/hardhat-waffle'
......@@ -28,8 +23,6 @@ const deploy = process.env.DEPLOY_DIRECTORY || 'deploy'
const config: HardhatUserConfig = {
networks: {
hardhat: {
accounts: DEFAULT_ACCOUNTS_HARDHAT,
blockGasLimit: RUN_OVM_TEST_GAS * 2,
live: false,
saveDeployments: false,
tags: ['local'],
......
import { ethers } from 'ethers'
/**
* Gets the hardhat artifact for the given contract name.
* Will throw an error if the contract artifact is not found.
*
* @param name Contract name.
* @returns The artifact for the given contract name.
*/
export const getContractDefinition = (name: string): any => {
// We import this using `require` because hardhat tries to build this file when compiling
// the contracts, but we need the contracts to be compiled before the contract-artifacts.ts
......@@ -13,11 +20,24 @@ export const getContractDefinition = (name: string): any => {
return artifact
}
/**
* Gets an ethers Interface instance for the given contract name.
*
* @param name Contract name.
* @returns The interface for the given contract name.
*/
export const getContractInterface = (name: string): ethers.utils.Interface => {
const definition = getContractDefinition(name)
return new ethers.utils.Interface(definition.abi)
}
/**
* Gets an ethers ContractFactory instance for the given contract name.
*
* @param name Contract name.
* @param signer The signer for the ContractFactory to use.
* @returns The contract factory for the given contract name.
*/
export const getContractFactory = (
name: string,
signer?: ethers.Signer
......
......@@ -168,7 +168,7 @@ describe('CanonicalTransactionChain', () => {
await expect(
CanonicalTransactionChain.enqueue(target, gasLimit, data, {
gasLimit: 40000000,
gasLimit: 30_000_000,
})
).to.be.revertedWith(
'Transaction data size exceeds maximum for rollup transaction.'
......
/* External Imports */
import { defaultAccounts } from 'ethereum-waffle'
export const DEFAULT_ACCOUNTS_HARDHAT = defaultAccounts.map((account) => {
return {
balance: account.balance,
privateKey: account.secretKey,
}
})
export const RUN_OVM_TEST_GAS = 20_000_000
export const L2_GAS_DISCOUNT_DIVISOR = 32
export const ENQUEUE_GAS_COST = 60_000
export const NON_NULL_BYTES32 =
'0x1111111111111111111111111111111111111111111111111111111111111111'
export const NON_ZERO_ADDRESS = '0x1111111111111111111111111111111111111111'
......@@ -93,12 +93,22 @@ export class L1TransportServer extends BaseService<L1TransportServerOptions> {
this.state.l1RpcProvider =
typeof this.options.l1RpcProvider === 'string'
? new JsonRpcProvider(this.options.l1RpcProvider)
? new JsonRpcProvider({
url: this.options.l1RpcProvider,
user: this.options.l1RpcProviderUser,
password: this.options.l1RpcProviderPassword,
headers: { 'User-Agent': 'data-transport-layer' },
})
: this.options.l1RpcProvider
this.state.l2RpcProvider =
typeof this.options.l2RpcProvider === 'string'
? new JsonRpcProvider(this.options.l2RpcProvider)
? new JsonRpcProvider({
url: this.options.l2RpcProvider,
user: this.options.l2RpcProviderUser,
password: this.options.l2RpcProviderPassword,
headers: { 'User-Agent': 'data-transport-layer' },
})
: this.options.l2RpcProvider
this._initializeApp()
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment