Commit 3aeb6bdd authored by Jacob Elias's avatar Jacob Elias Committed by GitHub

feat: remove proxyd from monorepo, since it has been moved to ethereum-optimism/infra (#10831)

parent 5cd02140
......@@ -1555,10 +1555,6 @@ workflows:
dependencies: "contracts-bedrock"
requires:
- pnpm-monorepo
- go-lint-test-build:
name: proxyd-tests
binary_name: proxyd
working_directory: proxyd
- semgrep-scan
- go-mod-download
- fuzz-golang:
......@@ -1774,7 +1770,7 @@ workflows:
type: approval
filters:
tags:
only: /^(da-server|proxyd|chain-mon|ci-builder(-rust)?|ufm-[a-z0-9\-]*|op-[a-z0-9\-]*)\/v.*/
only: /^(da-server|chain-mon|ci-builder(-rust)?|ufm-[a-z0-9\-]*|op-[a-z0-9\-]*)\/v.*/
branches:
ignore: /.*/
- docker-build:
......@@ -1952,21 +1948,6 @@ workflows:
- oplabs-gcr-release
requires:
- hold
- docker-build:
name: proxyd-docker-release
filters:
tags:
only: /^proxyd\/v.*/
branches:
ignore: /.*/
docker_name: proxyd
docker_tags: <<pipeline.git.revision>>
publish: true
release: true
context:
- oplabs-gcr-release
requires:
- hold
- docker-build:
name: chain-mon-docker-release
filters:
......
......@@ -228,14 +228,6 @@ pull_request_rules:
request_reviews:
users:
- roninjin10
- name: Add A-proxyd label
conditions:
- 'files~=^proxyd/'
- '#label<5'
actions:
label:
add:
- A-proxyd
- name: Add M-docs label
conditions:
- 'files~=^(docs|specs)\/'
......
......@@ -30,7 +30,6 @@ on:
- op-dispute-mon
- op-ufm
- da-server
- proxyd
- op-contracts
- op-conductor
prerelease:
......
......@@ -199,19 +199,6 @@ target "cannon" {
tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/cannon:${tag}"]
}
target "proxyd" {
dockerfile = "./proxyd/Dockerfile"
context = "./"
args = {
// proxyd dockerfile has no _ in the args
GITCOMMIT = "${GIT_COMMIT}"
GITDATE = "${GIT_DATE}"
GITVERSION = "${GIT_VERSION}"
}
platforms = split(",", PLATFORMS)
tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/proxyd:${tag}"]
}
target "chain-mon" {
dockerfile = "./ops/docker/Dockerfile.packages"
context = "."
......
......@@ -55,58 +55,58 @@ ARG TARGETOS TARGETARCH
FROM --platform=$BUILDPLATFORM builder as cannon-builder
ARG CANNON_VERSION=v0.0.0
RUN --mount=type=cache,target=/root/.cache/go-build cd cannon && make cannon \
GOOS=$TARGETOS GOARCH=$TARGETARCH GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE VERSION="$CANNON_VERSION"
GOOS=$TARGETOS GOARCH=$TARGETARCH GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE VERSION="$CANNON_VERSION"
FROM --platform=$BUILDPLATFORM builder as op-program-builder
ARG OP_PROGRAM_VERSION=v0.0.0
# note: we only build the host, that's all the user needs. No Go MIPS cross-build in docker
RUN --mount=type=cache,target=/root/.cache/go-build cd op-program && make op-program-host \
GOOS=$TARGETOS GOARCH=$TARGETARCH GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE VERSION="$OP_PROGRAM_VERSION"
GOOS=$TARGETOS GOARCH=$TARGETARCH GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE VERSION="$OP_PROGRAM_VERSION"
FROM --platform=$BUILDPLATFORM builder as op-heartbeat-builder
ARG OP_HEARTBEAT_VERSION=v0.0.0
RUN --mount=type=cache,target=/root/.cache/go-build cd op-heartbeat && make op-heartbeat \
GOOS=$TARGETOS GOARCH=$TARGETARCH GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE VERSION="$OP_HEARTBEAT_VERSION"
GOOS=$TARGETOS GOARCH=$TARGETARCH GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE VERSION="$OP_HEARTBEAT_VERSION"
FROM --platform=$BUILDPLATFORM builder as op-wheel-builder
ARG OP_WHEEL_VERSION=v0.0.0
RUN --mount=type=cache,target=/root/.cache/go-build cd op-wheel && make op-wheel \
GOOS=$TARGETOS GOARCH=$TARGETARCH GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE VERSION="$OP_WHEEL_VERSION"
GOOS=$TARGETOS GOARCH=$TARGETARCH GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE VERSION="$OP_WHEEL_VERSION"
FROM --platform=$BUILDPLATFORM builder as op-node-builder
ARG OP_NODE_VERSION=v0.0.0
RUN echo TARGETOS=$TARGETOS TARGETARCH=$TARGETARCH BUILDPLATFORM=$BUILDPLATFORM TARGETPLATFORM=$TARGETPLATFORM OP_NODE_VERSION=$OP_NODE_VERSION
RUN --mount=type=cache,target=/root/.cache/go-build cd op-node && make op-node \
GOOS=$TARGETOS GOARCH=$TARGETARCH GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE VERSION="$OP_NODE_VERSION"
GOOS=$TARGETOS GOARCH=$TARGETARCH GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE VERSION="$OP_NODE_VERSION"
FROM --platform=$BUILDPLATFORM builder as op-challenger-builder
ARG OP_CHALLENGER_VERSION=v0.0.0
RUN --mount=type=cache,target=/root/.cache/go-build cd op-challenger && make op-challenger \
GOOS=$TARGETOS GOARCH=$TARGETARCH GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE VERSION="$OP_CHALLENGER_VERSION"
GOOS=$TARGETOS GOARCH=$TARGETARCH GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE VERSION="$OP_CHALLENGER_VERSION"
FROM --platform=$BUILDPLATFORM builder as op-dispute-mon-builder
ARG OP_DISPUTE_MON_VERSION=v0.0.0
RUN --mount=type=cache,target=/root/.cache/go-build cd op-dispute-mon && make op-dispute-mon \
GOOS=$TARGETOS GOARCH=$TARGETARCH GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE VERSION="$OP_DISPUTE_MON_VERSION"
GOOS=$TARGETOS GOARCH=$TARGETARCH GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE VERSION="$OP_DISPUTE_MON_VERSION"
FROM --platform=$BUILDPLATFORM builder as op-batcher-builder
ARG OP_BATCHER_VERSION=v0.0.0
RUN --mount=type=cache,target=/root/.cache/go-build cd op-batcher && make op-batcher \
GOOS=$TARGETOS GOARCH=$TARGETARCH GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE VERSION="$OP_BATCHER_VERSION"
GOOS=$TARGETOS GOARCH=$TARGETARCH GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE VERSION="$OP_BATCHER_VERSION"
FROM --platform=$BUILDPLATFORM builder as op-proposer-builder
ARG OP_PROPOSER_VERSION=v0.0.0
RUN --mount=type=cache,target=/root/.cache/go-build cd op-proposer && make op-proposer \
GOOS=$TARGETOS GOARCH=$TARGETARCH GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE VERSION="$OP_PROPOSER_VERSION"
GOOS=$TARGETOS GOARCH=$TARGETARCH GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE VERSION="$OP_PROPOSER_VERSION"
FROM --platform=$BUILDPLATFORM builder as op-conductor-builder
ARG OP_CONDUCTOR_VERSION=v0.0.0
RUN --mount=type=cache,target=/root/.cache/go-build cd op-conductor && make op-conductor \
GOOS=$TARGETOS GOARCH=$TARGETARCH GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE VERSION="$OP_CONDUCTOR_VERSION"
GOOS=$TARGETOS GOARCH=$TARGETARCH GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE VERSION="$OP_CONDUCTOR_VERSION"
FROM --platform=$BUILDPLATFORM builder as da-server-builder
RUN --mount=type=cache,target=/root/.cache/go-build cd op-plasma && make da-server \
GOOS=$TARGETOS GOARCH=$TARGETARCH GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE
GOOS=$TARGETOS GOARCH=$TARGETARCH GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE
FROM --platform=$TARGETPLATFORM $TARGET_BASE_IMAGE as cannon-target
COPY --from=cannon-builder /app/cannon/bin/cannon /usr/local/bin/
......
......@@ -6,7 +6,7 @@ DOCKER_REPO=$1
GIT_TAG=$2
GIT_SHA=$3
IMAGE_NAME=$(echo "$GIT_TAG" | grep -Eow '^(ci-builder(-rust)?|chain-mon|proxyd|da-server|ufm-[a-z0-9\-]*|op-[a-z0-9\-]*)' || true)
IMAGE_NAME=$(echo "$GIT_TAG" | grep -Eow '^(ci-builder(-rust)?|chain-mon|da-server|ufm-[a-z0-9\-]*|op-[a-z0-9\-]*)' || true)
if [ -z "$IMAGE_NAME" ]; then
echo "image name could not be parsed from git tag '$GIT_TAG'"
exit 1
......
......@@ -18,7 +18,6 @@ MIN_VERSIONS = {
'op-program': '0.0.0',
'op-dispute-mon': '0.0.0',
'op-proposer': '0.10.14',
'proxyd': '3.16.0',
'op-heartbeat': '0.1.0',
'op-contracts': '1.0.0',
'op-conductor': '0.0.0',
......
......@@ -12,7 +12,6 @@ SERVICES = [
'op-dispute-mon',
'op-proposer',
'da-server',
'proxyd',
'op-heartbeat',
'op-contracts',
'test',
......
# @eth-optimism/proxyd
## 3.14.1
### Patch Changes
- 5602deec7: chore(deps): bump github.com/prometheus/client_golang from 1.11.0 to 1.11.1 in /proxyd
- 6b3cf2070: Remove useless logging
## 3.14.0
### Minor Changes
- 9cc39bcfa: Add support for global method override rate limit
- 30db32862: Include nonce in sender rate limit
### Patch Changes
- b9bb1a98a: proxyd: Add req_id to log
## 3.13.0
### Minor Changes
- 6de891d3b: Add sender-based rate limiter
## 3.12.0
### Minor Changes
- e9f2c701: Allow disabling backend rate limiter
- ca45a85e: Support pattern matching in exempt origins/user agents
- f4faa44c: adds server.log_level config
## 3.11.0
### Minor Changes
- b3c5eeec: Fixed JSON-RPC 2.0 specification compliance by adding the optional data field on an RPCError
- 01ae6625: Adds new Redis rate limiter
## 3.10.2
### Patch Changes
- 6bb35fd8: Add customizable whitelist error
- 7121648c: Batch metrics and max batch size
## 3.10.1
### Patch Changes
- b82a8f48: Add logging for origin and remote IP'
- 1bf9559c: Carry over custom limit message in batches
## 3.10.0
### Minor Changes
- 157ccc84: Support per-method rate limiting
## 3.9.1
### Patch Changes
- dc4f6a06: Add logging/metrics
## 3.9.0
### Minor Changes
- b6f4bfcf: Add frontend rate limiting
### Patch Changes
- 406a4fce: Unwrap single RPC batches
- 915f3b28: Parameterize full RPC request logging
## 3.8.9
### Patch Changes
- 063c55cf: Use canned response for eth_accounts
## 3.8.8
### Patch Changes
- 58dc7adc: Improve robustness against unexpected JSON-RPC from upstream
- 552cd641: Fix concurrent write panic in WS
## 3.8.7
### Patch Changes
- 6f458607: Bump go-ethereum to 1.10.17
## 3.8.6
### Patch Changes
- d79d40c4: proxyd: Proxy requests using batch JSON-RPC
## 3.8.5
### Patch Changes
- 2a062b11: proxyd: Log ssanitized RPC requests
- d9f058ce: proxyd: Reduced RPC request logging
- a4bfd9e7: proxyd: Limit the number of concurrent RPCs to backends
## 3.8.4
### Patch Changes
- 08329ba2: proxyd: Record redis cache operation latency
- ae112021: proxyd: Request-scoped context for fast batch RPC short-circuiting
## 3.8.3
### Patch Changes
- 160f4c3d: Update docker image to use golang 1.18.0
## 3.8.2
### Patch Changes
- ae18cea1: Don't hit Redis when the out of service interval is zero
## 3.8.1
### Patch Changes
- acf7dbd5: Update to go-ethereum v1.10.16
## 3.8.0
### Minor Changes
- 527448bb: Handle nil responses better
## 3.7.0
### Minor Changes
- 3c2926b1: Add debug cache status header to proxyd responses
## 3.6.0
### Minor Changes
- 096c5f20: proxyd: Allow cached RPCs to be evicted by redis
- 71d64834: Add caching for block-dependent RPCs
- fd2e1523: proxyd: Cache block-dependent RPCs
- 1760613c: Add integration tests and batching
## 3.5.0
### Minor Changes
- 025a3c0d: Add request/response payload size metrics to proxyd
- daf8db0b: cache immutable RPC responses in proxyd
- 8aa89bf3: Add X-Forwarded-For header when proxying RPCs on proxyd
## 3.4.1
### Patch Changes
- 415164e1: Force proxyd build
## 3.4.0
### Minor Changes
- 4b56ed84: Various proxyd fixes
## 3.3.0
### Minor Changes
- 7b7ffd2e: Allows string RPC ids on proxyd
## 3.2.0
### Minor Changes
- 73484138: Adds ability to specify env vars in config
## 3.1.2
### Patch Changes
- 1b79aa62: Release proxyd
## 3.1.1
### Patch Changes
- b8802054: Trigger release of proxyd
- 34fcb277: Bump proxyd to test release build workflow
## 3.1.0
### Minor Changes
- da6138fd: Updated metrics, support local rate limiter
### Patch Changes
- 6c7f483b: Add support for additional SSL certificates in Docker container
## 3.0.0
### Major Changes
- abe231bf: Make endpoints match Geth, better logging
## 2.0.0
### Major Changes
- 6c50098b: Update metrics, support WS
- f827dbda: Brings back the ability to selectively route RPC methods to backend groups
### Minor Changes
- 8cc824e5: Updates proxyd to include additional error metrics.
- 9ba4c5e0: Update metrics, support authenticated endpoints
- 78d0f3f0: Put special errors in a dedicated metric, pass along the content-type header
### Patch Changes
- 6e6a55b1: Canary release
## 1.0.2
### Patch Changes
- b9d2fbee: Trigger releases
## 1.0.1
### Patch Changes
- 893623c9: Trigger patch releases for dockerhub
## 1.0.0
### Major Changes
- 28aabc41: Initial release of RPC proxy daemon
FROM golang:1.21.3-alpine3.18 as builder
ARG GITCOMMIT=docker
ARG GITDATE=docker
ARG GITVERSION=docker
RUN apk add make jq git gcc musl-dev linux-headers
COPY ./proxyd /app
WORKDIR /app
RUN make proxyd
FROM alpine:3.18
RUN apk add bind-tools jq curl bash git redis
COPY ./proxyd/entrypoint.sh /bin/entrypoint.sh
RUN apk update && \
apk add ca-certificates && \
chmod +x /bin/entrypoint.sh
EXPOSE 8080
VOLUME /etc/proxyd
COPY --from=builder /app/bin/proxyd /bin/proxyd
ENTRYPOINT ["/bin/entrypoint.sh"]
CMD ["/bin/proxyd", "/etc/proxyd/proxyd.toml"]
# ignore everything but proxyd, proxyd defines all its dependencies in the go.mod
*
!/proxyd
LDFLAGSSTRING +=-X main.GitCommit=$(GITCOMMIT)
LDFLAGSSTRING +=-X main.GitDate=$(GITDATE)
LDFLAGSSTRING +=-X main.GitVersion=$(GITVERSION)
LDFLAGS := -ldflags "$(LDFLAGSSTRING)"
proxyd:
go build -v $(LDFLAGS) -o ./bin/proxyd ./cmd/proxyd
.PHONY: proxyd
fmt:
go mod tidy
gofmt -w .
.PHONY: fmt
test:
go test -v ./...
.PHONY: test
lint:
go vet ./...
.PHONY: test
test-fallback:
go test -v ./... -test.run ^TestFallback$
.PHONY: test-fallback
# rpc-proxy
This tool implements `proxyd`, an RPC request router and proxy. It does the following things:
1. Whitelists RPC methods.
2. Routes RPC methods to groups of backend services.
3. Automatically retries failed backend requests.
4. Track backend consensus (`latest`, `safe`, `finalized` blocks), peer count and sync state.
5. Re-write requests and responses to enforce consensus.
6. Load balance requests across backend services.
7. Cache immutable responses from backends.
8. Provides metrics to measure request latency, error rates, and the like.
## Usage
Run `make proxyd` to build the binary. No additional dependencies are necessary.
To configure `proxyd` for use, you'll need to create a configuration file to define your proxy backends and routing rules. Check out [example.config.toml](./example.config.toml) for how to do this alongside a full list of all options with commentary.
Once you have a config file, start the daemon via `proxyd <path-to-config>.toml`.
## Consensus awareness
Starting on v4.0.0, `proxyd` is aware of the consensus state of its backends. This helps minimize chain reorgs experienced by clients.
To enable this behavior, you must set `consensus_aware` value to `true` in the backend group.
When consensus awareness is enabled, `proxyd` will poll the backends for their states and resolve a consensus group based on:
* the common ancestor `latest` block, i.e. if a backend is experiencing a fork, the fork won't be visible to the clients
* the lowest `safe` block
* the lowest `finalized` block
* peer count
* sync state
The backend group then acts as a round-robin load balancer distributing traffic equally across healthy backends in the consensus group, increasing the availability of the proxy.
A backend is considered healthy if it meets the following criteria:
* not banned
* avg 1-min moving window error rate ≤ configurable threshold
* avg 1-min moving window latency ≤ configurable threshold
* peer count ≥ configurable threshold
* `latest` block lag ≤ configurable threshold
* last state update ≤ configurable threshold
* not currently syncing
When a backend is experiencing inconsistent consensus, high error rates or high latency,
the backend will be banned for a configurable amount of time (default 5 minutes)
and won't receive any traffic during this period.
## Tag rewrite
When consensus awareness is enabled, `proxyd` will enforce the consensus state transparently for all the clients.
For example, if a client requests the `eth_getBlockByNumber` method with the `latest` tag,
`proxyd` will rewrite the request to use the resolved latest block from the consensus group
and forward it to the backend.
The following request methods are rewritten:
* `eth_getLogs`
* `eth_newFilter`
* `eth_getBalance`
* `eth_getCode`
* `eth_getTransactionCount`
* `eth_call`
* `eth_getStorageAt`
* `eth_getBlockTransactionCountByNumber`
* `eth_getUncleCountByBlockNumber`
* `eth_getBlockByNumber`
* `eth_getTransactionByBlockNumberAndIndex`
* `eth_getUncleByBlockNumberAndIndex`
* `debug_getRawReceipts`
And `eth_blockNumber` response is overridden with current block consensus.
## Cacheable methods
Cache use Redis and can be enabled for the following immutable methods:
* `eth_chainId`
* `net_version`
* `eth_getBlockTransactionCountByHash`
* `eth_getUncleCountByBlockHash`
* `eth_getBlockByHash`
* `eth_getTransactionByBlockHashAndIndex`
* `eth_getUncleByBlockHashAndIndex`
* `debug_getRawReceipts` (block hash only)
## Meta method `consensus_getReceipts`
To support backends with different specifications in the same backend group,
proxyd exposes a convenient method to fetch receipts abstracting away
what specific backend will serve the request.
Each backend specifies their preferred method to fetch receipts with `consensus_receipts_target` config,
which will be translated from `consensus_getReceipts`.
This method takes a `blockNumberOrHash` (i.e. `tag|qty|hash`)
and returns the receipts for all transactions in the block.
Request example
```json
{
"jsonrpc":"2.0",
"id": 1,
"params": ["0xc6ef2fc5426d6ad6fd9e2a26abeab0aa2411b7ab17f30a99d3cb96aed1d1055b"]
}
```
It currently supports translation to the following targets:
* `debug_getRawReceipts(blockOrHash)` (default)
* `alchemy_getTransactionReceipts(blockOrHash)`
* `parity_getBlockReceipts(blockOrHash)`
* `eth_getBlockReceipts(blockOrHash)`
The selected target is returned in the response, in a wrapped result.
Response example
```json
{
"jsonrpc": "2.0",
"id": 1,
"result": {
"method": "debug_getRawReceipts",
"result": {
// the actual raw result from backend
}
}
}
```
See [op-node receipt fetcher](https://github.com/ethereum-optimism/optimism/blob/186e46a47647a51a658e699e9ff047d39444c2de/op-node/sources/receipts.go#L186-L253).
## Metrics
See `metrics.go` for a list of all available metrics.
The metrics port is configurable via the `metrics.port` and `metrics.host` keys in the config.
## Adding Backend SSL Certificates in Docker
The Docker image runs on Alpine Linux. If you get SSL errors when connecting to a backend within Docker, you may need to add additional certificates to Alpine's certificate store. To do this, bind mount the certificate bundle into a file in `/usr/local/share/ca-certificates`. The `entrypoint.sh` script will then update the store with whatever is in the `ca-certificates` directory prior to starting `proxyd`.
# ⚠️ Important
This project has been moved to [ethereum-optimism/infra](https://github.com/ethereum-optimism/infra)
This diff is collapsed.
package proxyd
import (
"github.com/stretchr/testify/assert"
"testing"
)
func TestStripXFF(t *testing.T) {
tests := []struct {
in, out string
}{
{"1.2.3, 4.5.6, 7.8.9", "1.2.3"},
{"1.2.3,4.5.6", "1.2.3"},
{" 1.2.3 , 4.5.6 ", "1.2.3"},
}
for _, test := range tests {
actual := stripXFF(test.in)
assert.Equal(t, test.out, actual)
}
}
package proxyd
import (
"context"
"encoding/json"
"strings"
"time"
"github.com/ethereum/go-ethereum/rpc"
"github.com/redis/go-redis/v9"
"github.com/golang/snappy"
lru "github.com/hashicorp/golang-lru"
)
type Cache interface {
Get(ctx context.Context, key string) (string, error)
Put(ctx context.Context, key string, value string) error
}
const (
// assuming an average RPCRes size of 3 KB
memoryCacheLimit = 4096
)
type cache struct {
lru *lru.Cache
}
func newMemoryCache() *cache {
rep, _ := lru.New(memoryCacheLimit)
return &cache{rep}
}
func (c *cache) Get(ctx context.Context, key string) (string, error) {
if val, ok := c.lru.Get(key); ok {
return val.(string), nil
}
return "", nil
}
func (c *cache) Put(ctx context.Context, key string, value string) error {
c.lru.Add(key, value)
return nil
}
type redisCache struct {
rdb *redis.Client
prefix string
ttl time.Duration
}
func newRedisCache(rdb *redis.Client, prefix string, ttl time.Duration) *redisCache {
return &redisCache{rdb, prefix, ttl}
}
func (c *redisCache) namespaced(key string) string {
if c.prefix == "" {
return key
}
return strings.Join([]string{c.prefix, key}, ":")
}
func (c *redisCache) Get(ctx context.Context, key string) (string, error) {
start := time.Now()
val, err := c.rdb.Get(ctx, c.namespaced(key)).Result()
redisCacheDurationSumm.WithLabelValues("GET").Observe(float64(time.Since(start).Milliseconds()))
if err == redis.Nil {
return "", nil
} else if err != nil {
RecordRedisError("CacheGet")
return "", err
}
return val, nil
}
func (c *redisCache) Put(ctx context.Context, key string, value string) error {
start := time.Now()
err := c.rdb.SetEx(ctx, c.namespaced(key), value, c.ttl).Err()
redisCacheDurationSumm.WithLabelValues("SETEX").Observe(float64(time.Since(start).Milliseconds()))
if err != nil {
RecordRedisError("CacheSet")
}
return err
}
type cacheWithCompression struct {
cache Cache
}
func newCacheWithCompression(cache Cache) *cacheWithCompression {
return &cacheWithCompression{cache}
}
func (c *cacheWithCompression) Get(ctx context.Context, key string) (string, error) {
encodedVal, err := c.cache.Get(ctx, key)
if err != nil {
return "", err
}
if encodedVal == "" {
return "", nil
}
val, err := snappy.Decode(nil, []byte(encodedVal))
if err != nil {
return "", err
}
return string(val), nil
}
func (c *cacheWithCompression) Put(ctx context.Context, key string, value string) error {
encodedVal := snappy.Encode(nil, []byte(value))
return c.cache.Put(ctx, key, string(encodedVal))
}
type RPCCache interface {
GetRPC(ctx context.Context, req *RPCReq) (*RPCRes, error)
PutRPC(ctx context.Context, req *RPCReq, res *RPCRes) error
}
type rpcCache struct {
cache Cache
handlers map[string]RPCMethodHandler
}
func newRPCCache(cache Cache) RPCCache {
staticHandler := &StaticMethodHandler{cache: cache}
debugGetRawReceiptsHandler := &StaticMethodHandler{cache: cache,
filterGet: func(req *RPCReq) bool {
// cache only if the request is for a block hash
var p []rpc.BlockNumberOrHash
err := json.Unmarshal(req.Params, &p)
if err != nil {
return false
}
if len(p) != 1 {
return false
}
return p[0].BlockHash != nil
},
filterPut: func(req *RPCReq, res *RPCRes) bool {
// don't cache if response contains 0 receipts
rawReceipts, ok := res.Result.([]interface{})
if !ok {
return false
}
return len(rawReceipts) > 0
},
}
handlers := map[string]RPCMethodHandler{
"eth_chainId": staticHandler,
"net_version": staticHandler,
"eth_getBlockTransactionCountByHash": staticHandler,
"eth_getUncleCountByBlockHash": staticHandler,
"eth_getBlockByHash": staticHandler,
"eth_getTransactionByBlockHashAndIndex": staticHandler,
"eth_getUncleByBlockHashAndIndex": staticHandler,
"debug_getRawReceipts": debugGetRawReceiptsHandler,
}
return &rpcCache{
cache: cache,
handlers: handlers,
}
}
func (c *rpcCache) GetRPC(ctx context.Context, req *RPCReq) (*RPCRes, error) {
handler := c.handlers[req.Method]
if handler == nil {
return nil, nil
}
res, err := handler.GetRPCMethod(ctx, req)
if err != nil {
RecordCacheError(req.Method)
return nil, err
}
if res == nil {
RecordCacheMiss(req.Method)
} else {
RecordCacheHit(req.Method)
}
return res, nil
}
func (c *rpcCache) PutRPC(ctx context.Context, req *RPCReq, res *RPCRes) error {
handler := c.handlers[req.Method]
if handler == nil {
return nil
}
return handler.PutRPCMethod(ctx, req, res)
}
package proxyd
import (
"context"
"strconv"
"testing"
"github.com/stretchr/testify/require"
)
func TestRPCCacheImmutableRPCs(t *testing.T) {
ctx := context.Background()
cache := newRPCCache(newMemoryCache())
ID := []byte(strconv.Itoa(1))
rpcs := []struct {
req *RPCReq
res *RPCRes
name string
}{
{
req: &RPCReq{
JSONRPC: "2.0",
Method: "eth_chainId",
ID: ID,
},
res: &RPCRes{
JSONRPC: "2.0",
Result: "0xff",
ID: ID,
},
name: "eth_chainId",
},
{
req: &RPCReq{
JSONRPC: "2.0",
Method: "net_version",
ID: ID,
},
res: &RPCRes{
JSONRPC: "2.0",
Result: "9999",
ID: ID,
},
name: "net_version",
},
{
req: &RPCReq{
JSONRPC: "2.0",
Method: "eth_getBlockTransactionCountByHash",
Params: mustMarshalJSON([]string{"0xb903239f8543d04b5dc1ba6579132b143087c68db1b2168786408fcbce568238"}),
ID: ID,
},
res: &RPCRes{
JSONRPC: "2.0",
Result: `{"eth_getBlockTransactionCountByHash":"!"}`,
ID: ID,
},
name: "eth_getBlockTransactionCountByHash",
},
{
req: &RPCReq{
JSONRPC: "2.0",
Method: "eth_getUncleCountByBlockHash",
Params: mustMarshalJSON([]string{"0xb903239f8543d04b5dc1ba6579132b143087c68db1b2168786408fcbce568238"}),
ID: ID,
},
res: &RPCRes{
JSONRPC: "2.0",
Result: `{"eth_getUncleCountByBlockHash":"!"}`,
ID: ID,
},
name: "eth_getUncleCountByBlockHash",
},
{
req: &RPCReq{
JSONRPC: "2.0",
Method: "eth_getBlockByHash",
Params: mustMarshalJSON([]string{"0xc6ef2fc5426d6ad6fd9e2a26abeab0aa2411b7ab17f30a99d3cb96aed1d1055b", "false"}),
ID: ID,
},
res: &RPCRes{
JSONRPC: "2.0",
Result: `{"eth_getBlockByHash":"!"}`,
ID: ID,
},
name: "eth_getBlockByHash",
},
{
req: &RPCReq{
JSONRPC: "2.0",
Method: "eth_getUncleByBlockHashAndIndex",
Params: mustMarshalJSON([]string{"0xb903239f8543d04b5dc1ba6579132b143087c68db1b2168786408fcbce568238", "0x90"}),
ID: ID,
},
res: &RPCRes{
JSONRPC: "2.0",
Result: `{"eth_getUncleByBlockHashAndIndex":"!"}`,
ID: ID,
},
name: "eth_getUncleByBlockHashAndIndex",
},
{
req: &RPCReq{
JSONRPC: "2.0",
Method: "debug_getRawReceipts",
Params: mustMarshalJSON([]string{"0xc6ef2fc5426d6ad6fd9e2a26abeab0aa2411b7ab17f30a99d3cb96aed1d1055b"}),
ID: ID,
},
res: &RPCRes{
JSONRPC: "2.0",
Result: []interface{}{"a"},
ID: ID,
},
name: "debug_getRawReceipts",
},
}
for _, rpc := range rpcs {
t.Run(rpc.name, func(t *testing.T) {
err := cache.PutRPC(ctx, rpc.req, rpc.res)
require.NoError(t, err)
cachedRes, err := cache.GetRPC(ctx, rpc.req)
require.NoError(t, err)
require.Equal(t, rpc.res, cachedRes)
})
}
}
func TestRPCCacheUnsupportedMethod(t *testing.T) {
ctx := context.Background()
cache := newRPCCache(newMemoryCache())
ID := []byte(strconv.Itoa(1))
rpcs := []struct {
req *RPCReq
name string
}{
{
name: "eth_syncing",
req: &RPCReq{
JSONRPC: "2.0",
Method: "eth_syncing",
ID: ID,
},
},
{
name: "eth_blockNumber",
req: &RPCReq{
JSONRPC: "2.0",
Method: "eth_blockNumber",
ID: ID,
},
},
{
name: "eth_getBlockByNumber",
req: &RPCReq{
JSONRPC: "2.0",
Method: "eth_getBlockByNumber",
ID: ID,
},
},
{
name: "eth_getBlockRange",
req: &RPCReq{
JSONRPC: "2.0",
Method: "eth_getBlockRange",
ID: ID,
},
},
{
name: "eth_gasPrice",
req: &RPCReq{
JSONRPC: "2.0",
Method: "eth_gasPrice",
ID: ID,
},
},
{
name: "eth_call",
req: &RPCReq{
JSONRPC: "2.0",
Method: "eth_call",
ID: ID,
},
},
{
req: &RPCReq{
JSONRPC: "2.0",
Method: "debug_getRawReceipts",
Params: mustMarshalJSON([]string{"0x100"}),
ID: ID,
},
name: "debug_getRawReceipts",
},
}
for _, rpc := range rpcs {
t.Run(rpc.name, func(t *testing.T) {
fakeval := mustMarshalJSON([]string{rpc.name})
err := cache.PutRPC(ctx, rpc.req, &RPCRes{Result: fakeval})
require.NoError(t, err)
cachedRes, err := cache.GetRPC(ctx, rpc.req)
require.NoError(t, err)
require.Nil(t, cachedRes)
})
}
}
package main
import (
"fmt"
"net"
"net/http"
"net/http/pprof"
"os"
"os/signal"
"strconv"
"strings"
"syscall"
"github.com/BurntSushi/toml"
"golang.org/x/exp/slog"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/proxyd"
)
var (
GitVersion = ""
GitCommit = ""
GitDate = ""
)
func main() {
// Set up logger with a default INFO level in case we fail to parse flags.
// Otherwise the final critical log won't show what the parsing error was.
proxyd.SetLogLevel(slog.LevelInfo)
log.Info("starting proxyd", "version", GitVersion, "commit", GitCommit, "date", GitDate)
if len(os.Args) < 2 {
log.Crit("must specify a config file on the command line")
}
config := new(proxyd.Config)
if _, err := toml.DecodeFile(os.Args[1], config); err != nil {
log.Crit("error reading config file", "err", err)
}
// update log level from config
logLevel, err := LevelFromString(config.Server.LogLevel)
if err != nil {
logLevel = log.LevelInfo
if config.Server.LogLevel != "" {
log.Warn("invalid server.log_level set: " + config.Server.LogLevel)
}
}
proxyd.SetLogLevel(logLevel)
if config.Server.EnablePprof {
log.Info("starting pprof", "addr", "0.0.0.0", "port", "6060")
pprofSrv := StartPProf("0.0.0.0", 6060)
log.Info("started pprof server", "addr", pprofSrv.Addr)
defer func() {
if err := pprofSrv.Close(); err != nil {
log.Error("failed to stop pprof server", "err", err)
}
}()
}
_, shutdown, err := proxyd.Start(config)
if err != nil {
log.Crit("error starting proxyd", "err", err)
}
sig := make(chan os.Signal, 1)
signal.Notify(sig, syscall.SIGINT, syscall.SIGTERM)
recvSig := <-sig
log.Info("caught signal, shutting down", "signal", recvSig)
shutdown()
}
// LevelFromString returns the appropriate Level from a string name.
// Useful for parsing command line args and configuration files.
// It also converts strings to lowercase.
// Note: copied from op-service/log to avoid monorepo dependency
func LevelFromString(lvlString string) (slog.Level, error) {
lvlString = strings.ToLower(lvlString) // ignore case
switch lvlString {
case "trace", "trce":
return log.LevelTrace, nil
case "debug", "dbug":
return log.LevelDebug, nil
case "info":
return log.LevelInfo, nil
case "warn":
return log.LevelWarn, nil
case "error", "eror":
return log.LevelError, nil
case "crit":
return log.LevelCrit, nil
default:
return log.LevelDebug, fmt.Errorf("unknown level: %v", lvlString)
}
}
func StartPProf(hostname string, port int) *http.Server {
mux := http.NewServeMux()
// have to do below to support multiple servers, since the
// pprof import only uses DefaultServeMux
mux.Handle("/debug/pprof/", http.HandlerFunc(pprof.Index))
mux.Handle("/debug/pprof/cmdline", http.HandlerFunc(pprof.Cmdline))
mux.Handle("/debug/pprof/profile", http.HandlerFunc(pprof.Profile))
mux.Handle("/debug/pprof/symbol", http.HandlerFunc(pprof.Symbol))
mux.Handle("/debug/pprof/trace", http.HandlerFunc(pprof.Trace))
addr := net.JoinHostPort(hostname, strconv.Itoa(port))
srv := &http.Server{
Handler: mux,
Addr: addr,
}
go srv.ListenAndServe()
return srv
}
package proxyd
import (
"fmt"
"math/big"
"os"
"strings"
"time"
)
type ServerConfig struct {
RPCHost string `toml:"rpc_host"`
RPCPort int `toml:"rpc_port"`
WSHost string `toml:"ws_host"`
WSPort int `toml:"ws_port"`
MaxBodySizeBytes int64 `toml:"max_body_size_bytes"`
MaxConcurrentRPCs int64 `toml:"max_concurrent_rpcs"`
LogLevel string `toml:"log_level"`
// TimeoutSeconds specifies the maximum time spent serving an HTTP request. Note that isn't used for websocket connections
TimeoutSeconds int `toml:"timeout_seconds"`
MaxUpstreamBatchSize int `toml:"max_upstream_batch_size"`
EnableRequestLog bool `toml:"enable_request_log"`
MaxRequestBodyLogLen int `toml:"max_request_body_log_len"`
EnablePprof bool `toml:"enable_pprof"`
EnableXServedByHeader bool `toml:"enable_served_by_header"`
AllowAllOrigins bool `toml:"allow_all_origins"`
}
type CacheConfig struct {
Enabled bool `toml:"enabled"`
TTL TOMLDuration `toml:"ttl"`
}
type RedisConfig struct {
URL string `toml:"url"`
Namespace string `toml:"namespace"`
}
type MetricsConfig struct {
Enabled bool `toml:"enabled"`
Host string `toml:"host"`
Port int `toml:"port"`
}
type RateLimitConfig struct {
UseRedis bool `toml:"use_redis"`
BaseRate int `toml:"base_rate"`
BaseInterval TOMLDuration `toml:"base_interval"`
ExemptOrigins []string `toml:"exempt_origins"`
ExemptUserAgents []string `toml:"exempt_user_agents"`
ErrorMessage string `toml:"error_message"`
MethodOverrides map[string]*RateLimitMethodOverride `toml:"method_overrides"`
IPHeaderOverride string `toml:"ip_header_override"`
}
type RateLimitMethodOverride struct {
Limit int `toml:"limit"`
Interval TOMLDuration `toml:"interval"`
Global bool `toml:"global"`
}
type TOMLDuration time.Duration
func (t *TOMLDuration) UnmarshalText(b []byte) error {
d, err := time.ParseDuration(string(b))
if err != nil {
return err
}
*t = TOMLDuration(d)
return nil
}
type BackendOptions struct {
ResponseTimeoutSeconds int `toml:"response_timeout_seconds"`
MaxResponseSizeBytes int64 `toml:"max_response_size_bytes"`
MaxRetries int `toml:"max_retries"`
OutOfServiceSeconds int `toml:"out_of_service_seconds"`
MaxDegradedLatencyThreshold TOMLDuration `toml:"max_degraded_latency_threshold"`
MaxLatencyThreshold TOMLDuration `toml:"max_latency_threshold"`
MaxErrorRateThreshold float64 `toml:"max_error_rate_threshold"`
}
type BackendConfig struct {
Username string `toml:"username"`
Password string `toml:"password"`
RPCURL string `toml:"rpc_url"`
WSURL string `toml:"ws_url"`
WSPort int `toml:"ws_port"`
MaxRPS int `toml:"max_rps"`
MaxWSConns int `toml:"max_ws_conns"`
CAFile string `toml:"ca_file"`
ClientCertFile string `toml:"client_cert_file"`
ClientKeyFile string `toml:"client_key_file"`
StripTrailingXFF bool `toml:"strip_trailing_xff"`
Headers map[string]string `toml:"headers"`
Weight int `toml:"weight"`
ConsensusSkipPeerCountCheck bool `toml:"consensus_skip_peer_count"`
ConsensusForcedCandidate bool `toml:"consensus_forced_candidate"`
ConsensusReceiptsTarget string `toml:"consensus_receipts_target"`
}
type BackendsConfig map[string]*BackendConfig
type BackendGroupConfig struct {
Backends []string `toml:"backends"`
WeightedRouting bool `toml:"weighted_routing"`
ConsensusAware bool `toml:"consensus_aware"`
ConsensusAsyncHandler string `toml:"consensus_handler"`
ConsensusPollerInterval TOMLDuration `toml:"consensus_poller_interval"`
ConsensusBanPeriod TOMLDuration `toml:"consensus_ban_period"`
ConsensusMaxUpdateThreshold TOMLDuration `toml:"consensus_max_update_threshold"`
ConsensusMaxBlockLag uint64 `toml:"consensus_max_block_lag"`
ConsensusMaxBlockRange uint64 `toml:"consensus_max_block_range"`
ConsensusMinPeerCount int `toml:"consensus_min_peer_count"`
ConsensusHA bool `toml:"consensus_ha"`
ConsensusHAHeartbeatInterval TOMLDuration `toml:"consensus_ha_heartbeat_interval"`
ConsensusHALockPeriod TOMLDuration `toml:"consensus_ha_lock_period"`
ConsensusHARedis RedisConfig `toml:"consensus_ha_redis"`
Fallbacks []string `toml:"fallbacks"`
}
type BackendGroupsConfig map[string]*BackendGroupConfig
type MethodMappingsConfig map[string]string
type BatchConfig struct {
MaxSize int `toml:"max_size"`
ErrorMessage string `toml:"error_message"`
}
// SenderRateLimitConfig configures the sender-based rate limiter
// for eth_sendRawTransaction requests.
// To enable pre-eip155 transactions, add '0' to allowed_chain_ids.
type SenderRateLimitConfig struct {
Enabled bool
Interval TOMLDuration
Limit int
AllowedChainIds []*big.Int `toml:"allowed_chain_ids"`
}
type Config struct {
WSBackendGroup string `toml:"ws_backend_group"`
Server ServerConfig `toml:"server"`
Cache CacheConfig `toml:"cache"`
Redis RedisConfig `toml:"redis"`
Metrics MetricsConfig `toml:"metrics"`
RateLimit RateLimitConfig `toml:"rate_limit"`
BackendOptions BackendOptions `toml:"backend"`
Backends BackendsConfig `toml:"backends"`
BatchConfig BatchConfig `toml:"batch"`
Authentication map[string]string `toml:"authentication"`
BackendGroups BackendGroupsConfig `toml:"backend_groups"`
RPCMethodMappings map[string]string `toml:"rpc_method_mappings"`
WSMethodWhitelist []string `toml:"ws_method_whitelist"`
WhitelistErrorMessage string `toml:"whitelist_error_message"`
SenderRateLimit SenderRateLimitConfig `toml:"sender_rate_limit"`
}
func ReadFromEnvOrConfig(value string) (string, error) {
if strings.HasPrefix(value, "$") {
envValue := os.Getenv(strings.TrimPrefix(value, "$"))
if envValue == "" {
return "", fmt.Errorf("config env var %s not found", value)
}
return envValue, nil
}
if strings.HasPrefix(value, "\\") {
return strings.TrimPrefix(value, "\\"), nil
}
return value, nil
}
This diff is collapsed.
This diff is collapsed.
#!/bin/sh
echo "Updating CA certificates."
update-ca-certificates
echo "Running CMD."
exec "$@"
\ No newline at end of file
package proxyd
import "fmt"
func wrapErr(err error, msg string) error {
return fmt.Errorf("%s %w", msg, err)
}
# List of WS methods to whitelist.
ws_method_whitelist = [
"eth_subscribe",
"eth_call",
"eth_chainId"
]
# Enable WS on this backend group. There can only be one WS-enabled backend group.
ws_backend_group = "main"
[server]
# Host for the proxyd RPC server to listen on.
rpc_host = "0.0.0.0"
# Port for the above.
rpc_port = 8080
# Host for the proxyd WS server to listen on.
ws_host = "0.0.0.0"
# Port for the above
# Set the ws_port to 0 to disable WS
ws_port = 8085
# Maximum client body size, in bytes, that the server will accept.
max_body_size_bytes = 10485760
max_concurrent_rpcs = 1000
# Server log level
log_level = "info"
[redis]
# URL to a Redis instance.
url = "redis://localhost:6379"
[metrics]
# Whether or not to enable Prometheus metrics.
enabled = true
# Host for the Prometheus metrics endpoint to listen on.
host = "0.0.0.0"
# Port for the above.
port = 9761
[backend]
# How long proxyd should wait for a backend response before timing out.
response_timeout_seconds = 5
# Maximum response size, in bytes, that proxyd will accept from a backend.
max_response_size_bytes = 5242880
# Maximum number of times proxyd will try a backend before giving up.
max_retries = 3
# Number of seconds to wait before trying an unhealthy backend again.
out_of_service_seconds = 600
# Maximum latency accepted to serve requests, default 10s
max_latency_threshold = "30s"
# Maximum latency accepted to serve requests before degraded, default 5s
max_degraded_latency_threshold = "10s"
# Maximum error rate accepted to serve requests, default 0.5 (i.e. 50%)
max_error_rate_threshold = 0.3
[backends]
# A map of backends by name.
[backends.infura]
# The URL to contact the backend at. Will be read from the environment
# if an environment variable prefixed with $ is provided.
rpc_url = ""
# The WS URL to contact the backend at. Will be read from the environment
# if an environment variable prefixed with $ is provided.
ws_url = ""
username = ""
# An HTTP Basic password to authenticate with the backend. Will be read from
# the environment if an environment variable prefixed with $ is provided.
password = ""
max_rps = 3
max_ws_conns = 1
# Path to a custom root CA.
ca_file = ""
# Path to a custom client cert file.
client_cert_file = ""
# Path to a custom client key file.
client_key_file = ""
# Allows backends to skip peer count checking, default false
# consensus_skip_peer_count = true
# Specified the target method to get receipts, default "debug_getRawReceipts"
# See https://github.com/ethereum-optimism/optimism/blob/186e46a47647a51a658e699e9ff047d39444c2de/op-node/sources/receipts.go#L186-L253
consensus_receipts_target = "eth_getBlockReceipts"
[backends.alchemy]
rpc_url = ""
ws_url = ""
username = ""
password = ""
max_rps = 3
max_ws_conns = 1
consensus_receipts_target = "alchemy_getTransactionReceipts"
[backend_groups]
[backend_groups.main]
backends = ["infura"]
# Enable consensus awareness for backend group, making it act as a load balancer, default false
# consensus_aware = true
# Period in which the backend wont serve requests if banned, default 5m
# consensus_ban_period = "1m"
# Maximum delay for update the backend, default 30s
# consensus_max_update_threshold = "20s"
# Maximum block lag, default 8
# consensus_max_block_lag = 16
# Maximum block range (for eth_getLogs method), no default
# consensus_max_block_range = 20000
# Minimum peer count, default 3
# consensus_min_peer_count = 4
[backend_groups.alchemy]
backends = ["alchemy"]
# If the authentication group below is in the config,
# proxyd will only accept authenticated requests.
[authentication]
# Mapping of auth key to alias. The alias is used to provide a human-
# readable name for the auth key in monitoring. The auth key will be
# read from the environment if an environment variable prefixed with $
# is provided. Note that you will need to quote the environment variable
# in order for it to be value TOML, e.g. "$FOO_AUTH_KEY" = "foo_alias".
secret = "test"
# Mapping of methods to backend groups.
[rpc_method_mappings]
eth_call = "main"
eth_chainId = "main"
eth_blockNumber = "alchemy"
package proxyd
import (
"context"
"fmt"
"sync"
"time"
"github.com/redis/go-redis/v9"
)
type FrontendRateLimiter interface {
// Take consumes a key, and a maximum number of requests
// per time interval. It returns a boolean denoting if
// the limit could be taken, or an error if a failure
// occurred in the backing rate limit implementation.
//
// No error will be returned if the limit could not be taken
// as a result of the requestor being over the limit.
Take(ctx context.Context, key string) (bool, error)
}
// limitedKeys is a wrapper around a map that stores a truncated
// timestamp and a mutex. The map is used to keep track of rate
// limit keys, and their used limits.
type limitedKeys struct {
truncTS int64
keys map[string]int
mtx sync.Mutex
}
func newLimitedKeys(t int64) *limitedKeys {
return &limitedKeys{
truncTS: t,
keys: make(map[string]int),
}
}
func (l *limitedKeys) Take(key string, max int) bool {
l.mtx.Lock()
defer l.mtx.Unlock()
val, ok := l.keys[key]
if !ok {
l.keys[key] = 0
val = 0
}
l.keys[key] = val + 1
return val < max
}
// MemoryFrontendRateLimiter is a rate limiter that stores
// all rate limiting information in local memory. It works
// by storing a limitedKeys struct that references the
// truncated timestamp at which the struct was created. If
// the current truncated timestamp doesn't match what's
// referenced, the limit is reset. Otherwise, values in
// a map are incremented to represent the limit.
type MemoryFrontendRateLimiter struct {
currGeneration *limitedKeys
dur time.Duration
max int
mtx sync.Mutex
}
func NewMemoryFrontendRateLimit(dur time.Duration, max int) FrontendRateLimiter {
return &MemoryFrontendRateLimiter{
dur: dur,
max: max,
}
}
func (m *MemoryFrontendRateLimiter) Take(ctx context.Context, key string) (bool, error) {
m.mtx.Lock()
// Create truncated timestamp
truncTS := truncateNow(m.dur)
// If there is no current rate limit map or the rate limit map reference
// a different timestamp, reset limits.
if m.currGeneration == nil || m.currGeneration.truncTS != truncTS {
m.currGeneration = newLimitedKeys(truncTS)
}
// Pull out the limiter so we can unlock before incrementing the limit.
limiter := m.currGeneration
m.mtx.Unlock()
return limiter.Take(key, m.max), nil
}
// RedisFrontendRateLimiter is a rate limiter that stores data in Redis.
// It uses the basic rate limiter pattern described on the Redis best
// practices website: https://redis.com/redis-best-practices/basic-rate-limiting/.
type RedisFrontendRateLimiter struct {
r *redis.Client
dur time.Duration
max int
prefix string
}
func NewRedisFrontendRateLimiter(r *redis.Client, dur time.Duration, max int, prefix string) FrontendRateLimiter {
return &RedisFrontendRateLimiter{
r: r,
dur: dur,
max: max,
prefix: prefix,
}
}
func (r *RedisFrontendRateLimiter) Take(ctx context.Context, key string) (bool, error) {
var incr *redis.IntCmd
truncTS := truncateNow(r.dur)
fullKey := fmt.Sprintf("rate_limit:%s:%s:%d", r.prefix, key, truncTS)
_, err := r.r.Pipelined(ctx, func(pipe redis.Pipeliner) error {
incr = pipe.Incr(ctx, fullKey)
pipe.PExpire(ctx, fullKey, r.dur-time.Millisecond)
return nil
})
if err != nil {
frontendRateLimitTakeErrors.Inc()
return false, err
}
return incr.Val()-1 < int64(r.max), nil
}
type noopFrontendRateLimiter struct{}
var NoopFrontendRateLimiter = &noopFrontendRateLimiter{}
func (n *noopFrontendRateLimiter) Take(ctx context.Context, key string) (bool, error) {
return true, nil
}
// truncateNow truncates the current timestamp
// to the specified duration.
func truncateNow(dur time.Duration) int64 {
return time.Now().Truncate(dur).Unix()
}
package proxyd
import (
"context"
"fmt"
"testing"
"time"
"github.com/alicebob/miniredis"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/require"
)
func TestFrontendRateLimiter(t *testing.T) {
redisServer, err := miniredis.Run()
require.NoError(t, err)
defer redisServer.Close()
redisClient := redis.NewClient(&redis.Options{
Addr: fmt.Sprintf("127.0.0.1:%s", redisServer.Port()),
})
max := 2
lims := []struct {
name string
frl FrontendRateLimiter
}{
{"memory", NewMemoryFrontendRateLimit(2*time.Second, max)},
{"redis", NewRedisFrontendRateLimiter(redisClient, 2*time.Second, max, "")},
}
for _, cfg := range lims {
frl := cfg.frl
ctx := context.Background()
t.Run(cfg.name, func(t *testing.T) {
for i := 0; i < 4; i++ {
ok, err := frl.Take(ctx, "foo")
require.NoError(t, err)
require.Equal(t, i < max, ok)
ok, err = frl.Take(ctx, "bar")
require.NoError(t, err)
require.Equal(t, i < max, ok)
}
time.Sleep(2 * time.Second)
for i := 0; i < 4; i++ {
ok, _ := frl.Take(ctx, "foo")
require.Equal(t, i < max, ok)
ok, _ = frl.Take(ctx, "bar")
require.Equal(t, i < max, ok)
}
})
}
}
module github.com/ethereum-optimism/optimism/proxyd
go 1.21
require (
github.com/BurntSushi/toml v1.3.2
github.com/alicebob/miniredis v2.5.0+incompatible
github.com/emirpasic/gods v1.18.1
github.com/ethereum/go-ethereum v1.13.15
github.com/go-redsync/redsync/v4 v4.10.0
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb
github.com/gorilla/mux v1.8.0
github.com/gorilla/websocket v1.5.0
github.com/hashicorp/golang-lru v1.0.2
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.17.0
github.com/redis/go-redis/v9 v9.2.1
github.com/rs/cors v1.10.1
github.com/stretchr/testify v1.8.4
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7
github.com/xaionaro-go/weightedshuffle v0.0.0-20211213010739-6a74fbc7d24a
golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa
golang.org/x/sync v0.5.0
gopkg.in/yaml.v3 v3.0.1
)
require (
github.com/DataDog/zstd v1.5.5 // indirect
github.com/Microsoft/go-winio v0.6.1 // indirect
github.com/VictoriaMetrics/fastcache v1.12.1 // indirect
github.com/alicebob/gopher-json v0.0.0-20230218143504-906a9b012302 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bits-and-blooms/bitset v1.10.0 // indirect
github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/cockroachdb/errors v1.11.1 // indirect
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
github.com/cockroachdb/pebble v0.0.0-20231020221949-babd592d2360 // indirect
github.com/cockroachdb/redact v1.1.5 // indirect
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect
github.com/consensys/bavard v0.1.13 // indirect
github.com/consensys/gnark-crypto v0.12.1 // indirect
github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 // indirect
github.com/crate-crypto/go-kzg-4844 v0.7.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/deckarep/golang-set/v2 v2.3.1 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/ethereum/c-kzg-4844 v0.4.0 // indirect
github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46 // indirect
github.com/getsentry/sentry-go v0.25.0 // indirect
github.com/go-ole/go-ole v1.3.0 // indirect
github.com/gofrs/flock v0.8.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/gomodule/redigo v1.8.9 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
github.com/holiman/uint256 v1.2.4 // indirect
github.com/klauspost/compress v1.17.1 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/mattn/go-runewidth v0.0.15 // indirect
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect
github.com/mmcloughlin/addchain v0.4.0 // indirect
github.com/olekukonko/tablewriter v0.0.5 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_model v0.5.0 // indirect
github.com/prometheus/common v0.45.0 // indirect
github.com/prometheus/procfs v0.12.0 // indirect
github.com/rivo/uniseg v0.4.4 // indirect
github.com/rogpeppe/go-internal v1.11.0 // indirect
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
github.com/supranational/blst v0.3.11 // indirect
github.com/tklauser/go-sysconf v0.3.12 // indirect
github.com/tklauser/numcpus v0.6.1 // indirect
github.com/yuin/gopher-lua v1.1.0 // indirect
github.com/yusufpapurcu/wmi v1.2.3 // indirect
golang.org/x/crypto v0.17.0 // indirect
golang.org/x/mod v0.14.0 // indirect
golang.org/x/sys v0.16.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/tools v0.15.0 // indirect
google.golang.org/protobuf v1.33.0 // indirect
rsc.io/tmplfunc v0.0.3 // indirect
)
This diff is collapsed.
package integration_tests
import (
"net/http"
"os"
"testing"
"time"
"github.com/ethereum-optimism/optimism/proxyd"
"github.com/stretchr/testify/require"
)
const (
batchTimeoutResponse = `{"error":{"code":-32015,"message":"gateway timeout"},"id":null,"jsonrpc":"2.0"}`
)
func TestBatchTimeout(t *testing.T) {
slowBackend := NewMockBackend(nil)
defer slowBackend.Close()
require.NoError(t, os.Setenv("SLOW_BACKEND_RPC_URL", slowBackend.URL()))
config := ReadConfig("batch_timeout")
client := NewProxydClient("http://127.0.0.1:8545")
_, shutdown, err := proxyd.Start(config)
require.NoError(t, err)
defer shutdown()
slowBackend.SetHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// check the config. The sleep duration should be at least double the server.timeout_seconds config to prevent flakes
time.Sleep(time.Second * 2)
BatchedResponseHandler(200, goodResponse)(w, r)
}))
res, statusCode, err := client.SendBatchRPC(
NewRPCReq("1", "eth_chainId", nil),
NewRPCReq("1", "eth_chainId", nil),
)
require.NoError(t, err)
require.Equal(t, 504, statusCode)
RequireEqualJSON(t, []byte(batchTimeoutResponse), res)
require.Equal(t, 1, len(slowBackend.Requests()))
}
package integration_tests
import (
"net/http"
"os"
"testing"
"github.com/ethereum-optimism/optimism/proxyd"
"github.com/stretchr/testify/require"
)
func TestBatching(t *testing.T) {
config := ReadConfig("batching")
chainIDResponse1 := `{"jsonrpc": "2.0", "result": "hello1", "id": 1}`
chainIDResponse2 := `{"jsonrpc": "2.0", "result": "hello2", "id": 2}`
chainIDResponse3 := `{"jsonrpc": "2.0", "result": "hello3", "id": 3}`
netVersionResponse1 := `{"jsonrpc": "2.0", "result": "1.0", "id": 1}`
callResponse1 := `{"jsonrpc": "2.0", "result": "ekans1", "id": 1}`
ethAccountsResponse2 := `{"jsonrpc": "2.0", "result": [], "id": 2}`
backendResTooLargeResponse1 := `{"error":{"code":-32020,"message":"backend response too large"},"id":1,"jsonrpc":"2.0"}`
backendResTooLargeResponse2 := `{"error":{"code":-32020,"message":"backend response too large"},"id":2,"jsonrpc":"2.0"}`
type mockResult struct {
method string
id string
result interface{}
}
chainIDMock1 := mockResult{"eth_chainId", "1", "hello1"}
chainIDMock2 := mockResult{"eth_chainId", "2", "hello2"}
chainIDMock3 := mockResult{"eth_chainId", "3", "hello3"}
netVersionMock1 := mockResult{"net_version", "1", "1.0"}
callMock1 := mockResult{"eth_call", "1", "ekans1"}
tests := []struct {
name string
handler http.Handler
mocks []mockResult
reqs []*proxyd.RPCReq
expectedRes string
maxUpstreamBatchSize int
numExpectedForwards int
maxResponseSizeBytes int64
}{
{
name: "backend returns batches out of order",
mocks: []mockResult{chainIDMock1, chainIDMock2, chainIDMock3},
reqs: []*proxyd.RPCReq{
NewRPCReq("1", "eth_chainId", nil),
NewRPCReq("2", "eth_chainId", nil),
NewRPCReq("3", "eth_chainId", nil),
},
expectedRes: asArray(chainIDResponse1, chainIDResponse2, chainIDResponse3),
maxUpstreamBatchSize: 2,
numExpectedForwards: 2,
},
{
// infura behavior
name: "backend returns single RPC response object as error",
handler: SingleResponseHandler(500, `{"jsonrpc":"2.0","error":{"code":-32001,"message":"internal server error"},"id":1}`),
reqs: []*proxyd.RPCReq{
NewRPCReq("1", "eth_chainId", nil),
NewRPCReq("2", "eth_chainId", nil),
},
expectedRes: asArray(
`{"error":{"code":-32011,"message":"no backends available for method"},"id":1,"jsonrpc":"2.0"}`,
`{"error":{"code":-32011,"message":"no backends available for method"},"id":2,"jsonrpc":"2.0"}`,
),
maxUpstreamBatchSize: 10,
numExpectedForwards: 1,
},
{
name: "backend returns single RPC response object for minibatches",
handler: SingleResponseHandler(500, `{"jsonrpc":"2.0","error":{"code":-32001,"message":"internal server error"},"id":1}`),
reqs: []*proxyd.RPCReq{
NewRPCReq("1", "eth_chainId", nil),
NewRPCReq("2", "eth_chainId", nil),
},
expectedRes: asArray(
`{"error":{"code":-32011,"message":"no backends available for method"},"id":1,"jsonrpc":"2.0"}`,
`{"error":{"code":-32011,"message":"no backends available for method"},"id":2,"jsonrpc":"2.0"}`,
),
maxUpstreamBatchSize: 1,
numExpectedForwards: 2,
},
{
name: "duplicate request ids are on distinct batches",
mocks: []mockResult{
netVersionMock1,
chainIDMock2,
chainIDMock1,
callMock1,
},
reqs: []*proxyd.RPCReq{
NewRPCReq("1", "net_version", nil),
NewRPCReq("2", "eth_chainId", nil),
NewRPCReq("1", "eth_chainId", nil),
NewRPCReq("1", "eth_call", nil),
},
expectedRes: asArray(netVersionResponse1, chainIDResponse2, chainIDResponse1, callResponse1),
maxUpstreamBatchSize: 2,
numExpectedForwards: 3,
},
{
name: "over max size",
mocks: []mockResult{},
reqs: []*proxyd.RPCReq{
NewRPCReq("1", "net_version", nil),
NewRPCReq("2", "eth_chainId", nil),
NewRPCReq("3", "eth_chainId", nil),
NewRPCReq("4", "eth_call", nil),
NewRPCReq("5", "eth_call", nil),
NewRPCReq("6", "eth_call", nil),
},
expectedRes: "{\"error\":{\"code\":-32014,\"message\":\"over batch size custom message\"},\"id\":null,\"jsonrpc\":\"2.0\"}",
maxUpstreamBatchSize: 2,
numExpectedForwards: 0,
},
{
name: "eth_accounts does not get forwarded",
mocks: []mockResult{
callMock1,
},
reqs: []*proxyd.RPCReq{
NewRPCReq("1", "eth_call", nil),
NewRPCReq("2", "eth_accounts", nil),
},
expectedRes: asArray(callResponse1, ethAccountsResponse2),
maxUpstreamBatchSize: 2,
numExpectedForwards: 1,
},
{
name: "large upstream response gets dropped",
mocks: []mockResult{chainIDMock1, chainIDMock2},
reqs: []*proxyd.RPCReq{
NewRPCReq("1", "eth_chainId", nil),
NewRPCReq("2", "eth_chainId", nil),
},
expectedRes: asArray(backendResTooLargeResponse1, backendResTooLargeResponse2),
maxUpstreamBatchSize: 2,
numExpectedForwards: 1,
maxResponseSizeBytes: 1,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
config.Server.MaxUpstreamBatchSize = tt.maxUpstreamBatchSize
config.BackendOptions.MaxResponseSizeBytes = tt.maxResponseSizeBytes
handler := tt.handler
if handler == nil {
router := NewBatchRPCResponseRouter()
for _, mock := range tt.mocks {
router.SetRoute(mock.method, mock.id, mock.result)
}
handler = router
}
goodBackend := NewMockBackend(handler)
defer goodBackend.Close()
require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", goodBackend.URL()))
client := NewProxydClient("http://127.0.0.1:8545")
_, shutdown, err := proxyd.Start(config)
require.NoError(t, err)
defer shutdown()
res, statusCode, err := client.SendBatchRPC(tt.reqs...)
require.NoError(t, err)
require.Equal(t, http.StatusOK, statusCode)
RequireEqualJSON(t, []byte(tt.expectedRes), res)
if tt.numExpectedForwards != 0 {
require.Equal(t, tt.numExpectedForwards, len(goodBackend.Requests()))
}
if handler, ok := handler.(*BatchRPCResponseRouter); ok {
for i, mock := range tt.mocks {
require.Equal(t, 1, handler.GetNumCalls(mock.method, mock.id), i)
}
}
})
}
}
package integration_tests
import (
"bytes"
"fmt"
"os"
"testing"
"time"
"github.com/alicebob/miniredis"
"github.com/ethereum-optimism/optimism/proxyd"
"github.com/stretchr/testify/require"
)
func TestCaching(t *testing.T) {
redis, err := miniredis.Run()
require.NoError(t, err)
defer redis.Close()
hdlr := NewBatchRPCResponseRouter()
/* cacheable */
hdlr.SetRoute("eth_chainId", "999", "0x420")
hdlr.SetRoute("net_version", "999", "0x1234")
hdlr.SetRoute("eth_getBlockTransactionCountByHash", "999", "eth_getBlockTransactionCountByHash")
hdlr.SetRoute("eth_getBlockByHash", "999", "eth_getBlockByHash")
hdlr.SetRoute("eth_getTransactionByHash", "999", "eth_getTransactionByHash")
hdlr.SetRoute("eth_getTransactionByBlockHashAndIndex", "999", "eth_getTransactionByBlockHashAndIndex")
hdlr.SetRoute("eth_getUncleByBlockHashAndIndex", "999", "eth_getUncleByBlockHashAndIndex")
hdlr.SetRoute("eth_getTransactionReceipt", "999", "eth_getTransactionReceipt")
hdlr.SetRoute("debug_getRawReceipts", "999", "debug_getRawReceipts")
/* not cacheable */
hdlr.SetRoute("eth_getBlockByNumber", "999", "eth_getBlockByNumber")
hdlr.SetRoute("eth_blockNumber", "999", "eth_blockNumber")
hdlr.SetRoute("eth_call", "999", "eth_call")
backend := NewMockBackend(hdlr)
defer backend.Close()
require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", backend.URL()))
require.NoError(t, os.Setenv("REDIS_URL", fmt.Sprintf("redis://127.0.0.1:%s", redis.Port())))
config := ReadConfig("caching")
client := NewProxydClient("http://127.0.0.1:8545")
_, shutdown, err := proxyd.Start(config)
require.NoError(t, err)
defer shutdown()
// allow time for the block number fetcher to fire
time.Sleep(1500 * time.Millisecond)
tests := []struct {
method string
params []interface{}
response string
backendCalls int
}{
/* cacheable */
{
"eth_chainId",
nil,
"{\"jsonrpc\": \"2.0\", \"result\": \"0x420\", \"id\": 999}",
1,
},
{
"net_version",
nil,
"{\"jsonrpc\": \"2.0\", \"result\": \"0x1234\", \"id\": 999}",
1,
},
{
"eth_getBlockTransactionCountByHash",
[]interface{}{"0xb903239f8543d04b5dc1ba6579132b143087c68db1b2168786408fcbce568238"},
"{\"jsonrpc\": \"2.0\", \"result\": \"eth_getBlockTransactionCountByHash\", \"id\": 999}",
1,
},
{
"eth_getBlockByHash",
[]interface{}{"0xc6ef2fc5426d6ad6fd9e2a26abeab0aa2411b7ab17f30a99d3cb96aed1d1055b", "false"},
"{\"jsonrpc\": \"2.0\", \"result\": \"eth_getBlockByHash\", \"id\": 999}",
1,
},
{
"eth_getTransactionByBlockHashAndIndex",
[]interface{}{"0xe670ec64341771606e55d6b4ca35a1a6b75ee3d5145a99d05921026d1527331", "0x55"},
"{\"jsonrpc\": \"2.0\", \"result\": \"eth_getTransactionByBlockHashAndIndex\", \"id\": 999}",
1,
},
{
"eth_getUncleByBlockHashAndIndex",
[]interface{}{"0xb903239f8543d04b5dc1ba6579132b143087c68db1b2168786408fcbce568238", "0x90"},
"{\"jsonrpc\": \"2.0\", \"result\": \"eth_getUncleByBlockHashAndIndex\", \"id\": 999}",
1,
},
/* not cacheable */
{
"eth_getBlockByNumber",
[]interface{}{
"0x1",
true,
},
"{\"jsonrpc\": \"2.0\", \"result\": \"eth_getBlockByNumber\", \"id\": 999}",
2,
},
{
"eth_getTransactionReceipt",
[]interface{}{"0x85d995eba9763907fdf35cd2034144dd9d53ce32cbec21349d4b12823c6860c5"},
"{\"jsonrpc\": \"2.0\", \"result\": \"eth_getTransactionReceipt\", \"id\": 999}",
2,
},
{
"eth_getTransactionByHash",
[]interface{}{"0x88df016429689c079f3b2f6ad39fa052532c56795b733da78a91ebe6a713944b"},
"{\"jsonrpc\": \"2.0\", \"result\": \"eth_getTransactionByHash\", \"id\": 999}",
2,
},
{
"eth_call",
[]interface{}{
struct {
To string `json:"to"`
}{
"0x1234",
},
"0x60",
},
"{\"jsonrpc\": \"2.0\", \"result\": \"eth_call\", \"id\": 999}",
2,
},
{
"eth_blockNumber",
nil,
"{\"jsonrpc\": \"2.0\", \"result\": \"eth_blockNumber\", \"id\": 999}",
2,
},
{
"eth_call",
[]interface{}{
struct {
To string `json:"to"`
}{
"0x1234",
},
"latest",
},
"{\"jsonrpc\": \"2.0\", \"result\": \"eth_call\", \"id\": 999}",
2,
},
{
"eth_call",
[]interface{}{
struct {
To string `json:"to"`
}{
"0x1234",
},
"pending",
},
"{\"jsonrpc\": \"2.0\", \"result\": \"eth_call\", \"id\": 999}",
2,
},
}
for _, tt := range tests {
t.Run(tt.method, func(t *testing.T) {
resRaw, _, err := client.SendRPC(tt.method, tt.params)
require.NoError(t, err)
resCache, _, err := client.SendRPC(tt.method, tt.params)
require.NoError(t, err)
RequireEqualJSON(t, []byte(tt.response), resCache)
RequireEqualJSON(t, resRaw, resCache)
require.Equal(t, tt.backendCalls, countRequests(backend, tt.method))
backend.Reset()
})
}
t.Run("nil responses should not be cached", func(t *testing.T) {
hdlr.SetRoute("eth_getBlockByHash", "999", nil)
resRaw, _, err := client.SendRPC("eth_getBlockByHash", []interface{}{"0x123"})
require.NoError(t, err)
resCache, _, err := client.SendRPC("eth_getBlockByHash", []interface{}{"0x123"})
require.NoError(t, err)
RequireEqualJSON(t, []byte("{\"id\":999,\"jsonrpc\":\"2.0\",\"result\":null}"), resRaw)
RequireEqualJSON(t, resRaw, resCache)
require.Equal(t, 2, countRequests(backend, "eth_getBlockByHash"))
})
t.Run("debug_getRawReceipts with 0 receipts should not be cached", func(t *testing.T) {
backend.Reset()
hdlr.SetRoute("debug_getRawReceipts", "999", []string{})
resRaw, _, err := client.SendRPC("debug_getRawReceipts", []interface{}{"0x88420081ab9c6d50dc57af36b541c6b8a7b3e9c0d837b0414512c4c5883560ff"})
require.NoError(t, err)
resCache, _, err := client.SendRPC("debug_getRawReceipts", []interface{}{"0x88420081ab9c6d50dc57af36b541c6b8a7b3e9c0d837b0414512c4c5883560ff"})
require.NoError(t, err)
RequireEqualJSON(t, []byte("{\"id\":999,\"jsonrpc\":\"2.0\",\"result\":[]}"), resRaw)
RequireEqualJSON(t, resRaw, resCache)
require.Equal(t, 2, countRequests(backend, "debug_getRawReceipts"))
})
t.Run("debug_getRawReceipts with more than 0 receipts should be cached", func(t *testing.T) {
backend.Reset()
hdlr.SetRoute("debug_getRawReceipts", "999", []string{"a"})
resRaw, _, err := client.SendRPC("debug_getRawReceipts", []interface{}{"0x88420081ab9c6d50dc57af36b541c6b8a7b3e9c0d837b0414512c4c5883560bb"})
require.NoError(t, err)
resCache, _, err := client.SendRPC("debug_getRawReceipts", []interface{}{"0x88420081ab9c6d50dc57af36b541c6b8a7b3e9c0d837b0414512c4c5883560bb"})
require.NoError(t, err)
RequireEqualJSON(t, []byte("{\"id\":999,\"jsonrpc\":\"2.0\",\"result\":[\"a\"]}"), resRaw)
RequireEqualJSON(t, resRaw, resCache)
require.Equal(t, 1, countRequests(backend, "debug_getRawReceipts"))
})
}
func TestBatchCaching(t *testing.T) {
redis, err := miniredis.Run()
require.NoError(t, err)
defer redis.Close()
hdlr := NewBatchRPCResponseRouter()
hdlr.SetRoute("eth_chainId", "1", "0x420")
hdlr.SetRoute("net_version", "1", "0x1234")
hdlr.SetRoute("eth_call", "1", "dummy_call")
hdlr.SetRoute("eth_getBlockByHash", "1", "eth_getBlockByHash")
backend := NewMockBackend(hdlr)
defer backend.Close()
require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", backend.URL()))
require.NoError(t, os.Setenv("REDIS_URL", fmt.Sprintf("redis://127.0.0.1:%s", redis.Port())))
config := ReadConfig("caching")
client := NewProxydClient("http://127.0.0.1:8545")
_, shutdown, err := proxyd.Start(config)
require.NoError(t, err)
defer shutdown()
// allow time for the block number fetcher to fire
time.Sleep(1500 * time.Millisecond)
goodChainIdResponse := "{\"jsonrpc\": \"2.0\", \"result\": \"0x420\", \"id\": 1}"
goodNetVersionResponse := "{\"jsonrpc\": \"2.0\", \"result\": \"0x1234\", \"id\": 1}"
goodEthCallResponse := "{\"jsonrpc\": \"2.0\", \"result\": \"dummy_call\", \"id\": 1}"
goodEthGetBlockByHash := "{\"jsonrpc\": \"2.0\", \"result\": \"eth_getBlockByHash\", \"id\": 1}"
res, _, err := client.SendBatchRPC(
NewRPCReq("1", "eth_chainId", nil),
NewRPCReq("1", "net_version", nil),
NewRPCReq("1", "eth_getBlockByHash", []interface{}{"0xc6ef2fc5426d6ad6fd9e2a26abeab0aa2411b7ab17f30a99d3cb96aed1d1055b", "false"}),
)
require.NoError(t, err)
RequireEqualJSON(t, []byte(asArray(goodChainIdResponse, goodNetVersionResponse, goodEthGetBlockByHash)), res)
require.Equal(t, 1, countRequests(backend, "eth_chainId"))
require.Equal(t, 1, countRequests(backend, "net_version"))
require.Equal(t, 1, countRequests(backend, "eth_getBlockByHash"))
backend.Reset()
res, _, err = client.SendBatchRPC(
NewRPCReq("1", "eth_chainId", nil),
NewRPCReq("1", "eth_call", []interface{}{`{"to":"0x1234"}`, "pending"}),
NewRPCReq("1", "net_version", nil),
NewRPCReq("1", "eth_getBlockByHash", []interface{}{"0xc6ef2fc5426d6ad6fd9e2a26abeab0aa2411b7ab17f30a99d3cb96aed1d1055b", "false"}),
)
require.NoError(t, err)
RequireEqualJSON(t, []byte(asArray(goodChainIdResponse, goodEthCallResponse, goodNetVersionResponse, goodEthGetBlockByHash)), res)
require.Equal(t, 0, countRequests(backend, "eth_chainId"))
require.Equal(t, 0, countRequests(backend, "net_version"))
require.Equal(t, 0, countRequests(backend, "eth_getBlockByHash"))
require.Equal(t, 1, countRequests(backend, "eth_call"))
}
func countRequests(backend *MockBackend, name string) int {
var count int
for _, req := range backend.Requests() {
if bytes.Contains(req.Body, []byte(name)) {
count++
}
}
return count
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
package integration_tests
import (
"fmt"
"io"
"os"
"strings"
"testing"
"github.com/ethereum-optimism/optimism/proxyd"
"github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require"
)
func TestInitProxyd(t *testing.T) {
goodBackend := NewMockBackend(BatchedResponseHandler(200, goodResponse))
defer goodBackend.Close()
require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", goodBackend.URL()))
config := ReadConfig("smoke")
sysStdOut := os.Stdout
r, w, err := os.Pipe()
require.NoError(t, err)
os.Stdout = w
proxyd.SetLogLevel(log.LevelInfo)
defer func() {
w.Close()
out, _ := io.ReadAll(r)
require.True(t, strings.Contains(string(out), "started proxyd"))
require.True(t, strings.Contains(string(out), "shutting down proxyd"))
fmt.Println(string(out))
os.Stdout = sysStdOut
}()
_, shutdown, err := proxyd.Start(config)
require.NoError(t, err)
defer shutdown()
t.Run("initialization", func(t *testing.T) {
client := NewProxydClient("http://127.0.0.1:8545")
res, code, err := client.SendRPC(ethChainID, nil)
require.NoError(t, err)
require.Equal(t, 200, code)
require.NotNil(t, res)
})
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment