Commit bf4d0c33 authored by Mark Tyneway's avatar Mark Tyneway Committed by GitHub

Merge pull request #2071 from ethereum-optimism/develop

Develop -> Master PR
parents 6f8e4325 c59c3d38
---
'@eth-optimism/proxyd': minor
---
proxyd: Allow cached RPCs to be evicted by redis
---
'@eth-optimism/core-utils': patch
---
Improved docstrings for BCFG typings
---
'@eth-optimism/integration-tests': minor
---
Updates to work with a live network
---
'@eth-optimism/batch-submitter-service': patch
---
Adds confirmation depth awareness to txmgr
---
'@eth-optimism/l2geth': patch
---
Add a better error message for when the sequencer url is not configured when proxying user requests to the sequencer for `eth_sendRawTransaction` when running as a verifier/replica
---
'@eth-optimism/proxyd': minor
---
Add caching for block-dependent RPCs
---
'@eth-optimism/proxyd': minor
---
proxyd: Cache block-dependent RPCs
---
'@eth-optimism/l2geth': patch
---
Fix nonce issue
---
'@eth-optimism/integration-tests': patch
---
Use hardhat-ethers for importing factories in integration tests
---
'@eth-optimism/l2geth': patch
---
Add reinitialize-by-url command, add dump chain state command
---
'@eth-optimism/core-utils': patch
---
Cleans up the internal file and folder structure for the typings exported by core-utils
---
'@eth-optimism/integration-tests': patch
---
Split OVMMulticall.sol into Multicall.sol & OVMContext.sol
---
'@eth-optimism/batch-submitter-service': minor
---
Add multi-tx support, clear pending txs on startup
---
'@eth-optimism/l2geth': patch
---
Fix blocknumber monotonicity logging bug
---
'@eth-optimism/proxyd': minor
---
Add integration tests and batching
......@@ -141,6 +141,32 @@ jobs:
kubectl rollout restart statefulset nightly-dtl --namespace nightly
kubectl rollout restart deployment nightly-gas-oracle --namespace nightly
kubectl rollout restart deployment edge-proxyd --namespace nightly
run-itests-nightly:
docker:
- image: cimg/base:2021.04
steps:
- setup_remote_docker:
version: 19.03.13
- run:
name: Run integration tests
command: |
docker run \
--env PRIVATE_KEY=$NIGHTLY_ITESTS_PRIVKEY \
--env L1_URL=https://nightly-l1.optimism-stacks.net \
--env L2_URL=https://nightly-l2.optimism-stacks.net \
--env ADDRESS_MANAGER=0x22D4E211ef8704f2ca2d6dfdB32125E2530ACE3e \
--env L2_CHAINID=69 \
--env MOCHA_BAIL=true \
--env MOCHA_TIMEOUT=300000 \
--env L1_GAS_PRICE=onchain \
--env L2_GAS_PRICE=onchain \
--env RUN_DEBUG_TRACE_TESTS=false \
--env RUN_REPLICA_TESTS=false \
--env RUN_STRESS_TESTS=false \
--env OVMCONTEXT_SPEC_NUM_TXS=1 \
--env DTL_ENQUEUE_CONFIRMATIONS=12 \
"$STACKMAN_REPO/integration-tests:nightly" \
yarn test:integration:live
notify:
docker:
- image: cimg/base:2021.04
......@@ -152,6 +178,18 @@ jobs:
workflows:
nightly-itests:
triggers:
- schedule:
cron: "0 1 * * * "
filters:
branches:
only:
- develop
jobs:
- run-itests-nightly:
context:
- optimism
nightly:
triggers:
- schedule:
......
......@@ -101,7 +101,17 @@ module.exports = {
'id-match': 'off',
'import/no-extraneous-dependencies': ['error'],
'import/no-internal-modules': 'off',
'import/order': 'off',
'import/order': [
"error",
{
groups: [
'builtin',
'external',
'internal',
],
'newlines-between': 'always',
},
],
indent: 'off',
'jsdoc/check-alignment': 'error',
'jsdoc/check-indentation': 'error',
......
name: proxyd unit tests
on:
push:
branches:
- 'master'
- 'develop'
pull_request:
paths:
- 'go/proxyd/**'
workflow_dispatch:
defaults:
run:
working-directory: ./go/proxyd
jobs:
tests:
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.15.x
- name: Checkout code
uses: actions/checkout@v2
- name: Build
run: make proxyd
- name: Lint
run: make lint
- name: Test
run: make test
......@@ -29,6 +29,7 @@ jobs:
rpc-proxy : ${{ steps.packages.outputs.rpc-proxy }}
op-exporter : ${{ steps.packages.outputs.op-exporter }}
l2geth-exporter : ${{ steps.packages.outputs.l2geth-exporter }}
batch-submitter-service : ${{ steps.packages.outputs.batch-submitter-service }}
steps:
- name: Check out source code
......@@ -506,3 +507,29 @@ jobs:
file: ./ops/docker/Dockerfile.rpc-proxy
push: true
tags: ethereumoptimism/rpc-proxy:${{ needs.canary-publish.outputs.rpc-proxy }}
batch-submitter-service:
name: Publish batch-submitter-service Version ${{ needs.canary-publish.outputs.canary-docker-tag }}
needs: canary-publish
if: needs.canary-publish.outputs.batch-submitter-service != ''
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to Docker Hub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_USERNAME }}
password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_SECRET }}
- name: Build and push
uses: docker/build-push-action@v2
with:
context: .
file: ./ops/docker/Dockerfile.batch-submitter-service
push: true
tags: ethereumoptimism/batch-submitter-service:${{ needs.canary-publish.outputs.batch-submitter-service }}
......@@ -25,6 +25,7 @@ jobs:
hardhat-node: ${{ steps.packages.outputs.hardhat-node }}
op-exporter : ${{ steps.packages.outputs.op-exporter }}
l2geth-exporter : ${{ steps.packages.outputs.l2geth-exporter }}
batch-submitter-service : ${{ steps.packages.outputs.batch-submitter-service }}
steps:
- name: Checkout Repo
......@@ -502,3 +503,29 @@ jobs:
push: true
tags: ethereumoptimism/replica-healthcheck:${{ needs.builder.outputs.replica-healthcheck }},ethereumoptimism/replica-healthcheck:latest
build-args: BUILDER_TAG=${{ needs.builder.outputs.builder }}
batch-submitter-service:
name: Publish batch-submitter-service Version ${{ needs.release.outputs.batch-submitter-service }}
needs: release
if: needs.release.outputs.batch-submitter-service != ''
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to Docker Hub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_USERNAME }}
password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_SECRET }}
- name: Build and push
uses: docker/build-push-action@v2
with:
context: .
file: ./ops/docker/Dockerfile.batch-submitter-service
push: true
tags: ethereumoptimism/batch-submitter-service:${{ needs.release.outputs.batch-submitter-service }},ethereumoptimism/batch-submitter-service:latest
......@@ -164,6 +164,7 @@ func NewBatchSubmitter(cfg Config, gitVersion string) (*BatchSubmitter, error) {
GasRetryIncrement: utils.GasPriceFromGwei(cfg.GasRetryIncrement),
ResubmissionTimeout: cfg.ResubmissionTimeout,
ReceiptQueryInterval: time.Second,
NumConfirmations: cfg.NumConfirmations,
}
var batchTxService *Service
......
......@@ -32,12 +32,13 @@ func init() {
}
var (
testPrivKey *ecdsa.PrivateKey
testWalletAddr common.Address
testChainID *big.Int // 1
testNonce = uint64(2)
testGasPrice *big.Int // 3
testGasLimit = uint64(4)
testPrivKey *ecdsa.PrivateKey
testWalletAddr common.Address
testChainID = big.NewInt(1)
testNonce = uint64(2)
testGasPrice = big.NewInt(3)
testGasLimit = uint64(4)
testBlockNumber = uint64(5)
)
// TestCraftClearingTx asserts that CraftClearingTx produces the expected
......@@ -102,11 +103,20 @@ func TestSignClearingTxEstimateGasFail(t *testing.T) {
}
type clearPendingTxHarness struct {
l1Client drivers.L1Client
l1Client *mock.L1Client
txMgr txmgr.TxManager
}
func newClearPendingTxHarness(l1ClientConfig mock.L1ClientConfig) *clearPendingTxHarness {
func newClearPendingTxHarnessWithNumConfs(
l1ClientConfig mock.L1ClientConfig,
numConfirmations uint64,
) *clearPendingTxHarness {
if l1ClientConfig.BlockNumber == nil {
l1ClientConfig.BlockNumber = func(_ context.Context) (uint64, error) {
return testBlockNumber, nil
}
}
if l1ClientConfig.NonceAt == nil {
l1ClientConfig.NonceAt = func(_ context.Context, _ common.Address, _ *big.Int) (uint64, error) {
return testNonce, nil
......@@ -125,6 +135,7 @@ func newClearPendingTxHarness(l1ClientConfig mock.L1ClientConfig) *clearPendingT
GasRetryIncrement: utils.GasPriceFromGwei(5),
ResubmissionTimeout: time.Second,
ReceiptQueryInterval: 50 * time.Millisecond,
NumConfirmations: numConfirmations,
}, l1Client)
return &clearPendingTxHarness{
......@@ -133,6 +144,10 @@ func newClearPendingTxHarness(l1ClientConfig mock.L1ClientConfig) *clearPendingT
}
}
func newClearPendingTxHarness(l1ClientConfig mock.L1ClientConfig) *clearPendingTxHarness {
return newClearPendingTxHarnessWithNumConfs(l1ClientConfig, 1)
}
// TestClearPendingTxClearingTxÇonfirms asserts the happy path where our
// clearing transactions confirms unobstructed.
func TestClearPendingTxClearingTxConfirms(t *testing.T) {
......@@ -142,7 +157,8 @@ func TestClearPendingTxClearingTxConfirms(t *testing.T) {
},
TransactionReceipt: func(_ context.Context, txHash common.Hash) (*types.Receipt, error) {
return &types.Receipt{
TxHash: txHash,
TxHash: txHash,
BlockNumber: big.NewInt(int64(testBlockNumber)),
}, nil
},
})
......@@ -190,3 +206,42 @@ func TestClearPendingTxTimeout(t *testing.T) {
)
require.Equal(t, txmgr.ErrPublishTimeout, err)
}
// TestClearPendingTxMultipleConfs tests we wait the appropriate number of
// confirmations for the clearing transaction to confirm.
func TestClearPendingTxMultipleConfs(t *testing.T) {
const numConfs = 2
// Instantly confirm transaction.
h := newClearPendingTxHarnessWithNumConfs(mock.L1ClientConfig{
SendTransaction: func(_ context.Context, _ *types.Transaction) error {
return nil
},
TransactionReceipt: func(_ context.Context, txHash common.Hash) (*types.Receipt, error) {
return &types.Receipt{
TxHash: txHash,
BlockNumber: big.NewInt(int64(testBlockNumber)),
}, nil
},
}, numConfs)
// The txmgr should timeout waiting for the txn to confirm.
err := drivers.ClearPendingTx(
"test", context.Background(), h.txMgr, h.l1Client, testWalletAddr,
testPrivKey, testChainID,
)
require.Equal(t, txmgr.ErrPublishTimeout, err)
// Now set the chain height to the earliest the transaction will be
// considered sufficiently confirmed.
h.l1Client.SetBlockNumberFunc(func(_ context.Context) (uint64, error) {
return testBlockNumber + numConfs - 1, nil
})
// Publishing should succeed.
err = drivers.ClearPendingTx(
"test", context.Background(), h.txMgr, h.l1Client, testWalletAddr,
testPrivKey, testChainID,
)
require.Nil(t, err)
}
......@@ -13,6 +13,9 @@ import (
// L1ClientConfig houses the internal methods that are executed by the mock
// L1Client. Any members left as nil will panic on execution.
type L1ClientConfig struct {
// BlockNumber returns the most recent block number.
BlockNumber func(context.Context) (uint64, error)
// EstimateGas tries to estimate the gas needed to execute a specific
// transaction based on the current pending state of the backend blockchain.
// There is no guarantee that this is the true gas limit requirement as
......@@ -50,6 +53,14 @@ func NewL1Client(cfg L1ClientConfig) *L1Client {
}
}
// BlockNumber returns the most recent block number.
func (c *L1Client) BlockNumber(ctx context.Context) (uint64, error) {
c.mu.RLock()
defer c.mu.RUnlock()
return c.cfg.BlockNumber(ctx)
}
// EstimateGas executes the mock EstimateGas method.
func (c *L1Client) EstimateGas(ctx context.Context, call ethereum.CallMsg) (uint64, error) {
c.mu.RLock()
......@@ -82,6 +93,16 @@ func (c *L1Client) TransactionReceipt(ctx context.Context, txHash common.Hash) (
return c.cfg.TransactionReceipt(ctx, txHash)
}
// SetBlockNumberFunc overwrites the mock BlockNumber method.
func (c *L1Client) SetBlockNumberFunc(
f func(context.Context) (uint64, error)) {
c.mu.Lock()
defer c.mu.Unlock()
c.cfg.BlockNumber = f
}
// SetEstimateGasFunc overrwrites the mock EstimateGas method.
func (c *L1Client) SetEstimateGasFunc(
f func(context.Context, ethereum.CallMsg) (uint64, error)) {
......
......@@ -52,6 +52,10 @@ type Config struct {
// query the backend to check for confirmations after a tx at a
// specific gas price has been published.
ReceiptQueryInterval time.Duration
// NumConfirmations specifies how many blocks are need to consider a
// transaction confirmed.
NumConfirmations uint64
}
// TxManager is an interface that allows callers to reliably publish txs,
......@@ -71,6 +75,9 @@ type TxManager interface {
//
// NOTE: This is a subset of bind.DeployBackend.
type ReceiptSource interface {
// BlockNumber returns the most recent block number.
BlockNumber(ctx context.Context) (uint64, error)
// TransactionReceipt queries the backend for a receipt associated with
// txHash. If lookup does not fail, but the transaction is not found,
// nil should be returned for both values.
......@@ -90,6 +97,10 @@ type SimpleTxManager struct {
func NewSimpleTxManager(
name string, cfg Config, backend ReceiptSource) *SimpleTxManager {
if cfg.NumConfirmations == 0 {
panic("txmgr: NumConfirmations cannot be zero")
}
return &SimpleTxManager{
name: name,
cfg: cfg,
......@@ -148,6 +159,7 @@ func (m *SimpleTxManager) Send(
// back to the main event loop if found.
receipt, err := WaitMined(
ctxc, m.backend, tx, m.cfg.ReceiptQueryInterval,
m.cfg.NumConfirmations,
)
if err != nil {
log.Debug(name+" send tx failed", "hash", txHash,
......@@ -220,6 +232,7 @@ func WaitMined(
backend ReceiptSource,
tx *types.Transaction,
queryInterval time.Duration,
numConfirmations uint64,
) (*types.Receipt, error) {
queryTicker := time.NewTicker(queryInterval)
......@@ -229,14 +242,42 @@ func WaitMined(
for {
receipt, err := backend.TransactionReceipt(ctx, txHash)
if receipt != nil {
return receipt, nil
}
switch {
case receipt != nil:
txHeight := receipt.BlockNumber.Uint64()
tipHeight, err := backend.BlockNumber(ctx)
if err != nil {
log.Error("Unable to fetch block number", "err", err)
break
}
if err != nil {
log.Trace("Transaction mined, checking confirmations",
"txHash", txHash, "txHeight", txHeight,
"tipHeight", tipHeight,
"numConfirmations", numConfirmations)
// The transaction is considered confirmed when
// txHeight+numConfirmations-1 <= tipHeight. Note that the -1 is
// needed to account for the fact that confirmations have an
// inherent off-by-one, i.e. when using 1 confirmation the
// transaction should be confirmed when txHeight is equal to
// tipHeight. The equation is rewritten in this form to avoid
// underflows.
if txHeight+numConfirmations <= tipHeight+1 {
log.Info("Transaction confirmed", "txHash", txHash)
return receipt, nil
}
// Safe to subtract since we know the LHS above is greater.
confsRemaining := (txHeight + numConfirmations) - (tipHeight + 1)
log.Info("Transaction not yet confirmed", "txHash", txHash,
"confsRemaining", confsRemaining)
case err != nil:
log.Trace("Receipt retrievel failed", "hash", txHash,
"err", err)
} else {
default:
log.Trace("Transaction not yet mined", "hash", txHash)
}
......
......@@ -95,13 +95,23 @@ func newTestHarnessWithConfig(cfg txmgr.Config) *testHarness {
// newTestHarness initializes a testHarness with a defualt configuration that is
// suitable for most tests.
func newTestHarness() *testHarness {
return newTestHarnessWithConfig(txmgr.Config{
return newTestHarnessWithConfig(configWithNumConfs(1))
}
func configWithNumConfs(numConfirmations uint64) txmgr.Config {
return txmgr.Config{
MinGasPrice: new(big.Int).SetUint64(5),
MaxGasPrice: new(big.Int).SetUint64(50),
GasRetryIncrement: new(big.Int).SetUint64(5),
ResubmissionTimeout: time.Second,
ReceiptQueryInterval: 50 * time.Millisecond,
})
NumConfirmations: numConfirmations,
}
}
type minedTxInfo struct {
gasPrice *big.Int
blockNumber uint64
}
// mockBackend implements txmgr.ReceiptSource that tracks mined transactions
......@@ -109,25 +119,42 @@ func newTestHarness() *testHarness {
type mockBackend struct {
mu sync.RWMutex
// txHashMinedWithGasPrice tracks the has of a mined transaction to its
// gas price.
txHashMinedWithGasPrice map[common.Hash]*big.Int
// blockHeight tracks the current height of the chain.
blockHeight uint64
// minedTxs maps the hash of a mined transaction to its details.
minedTxs map[common.Hash]minedTxInfo
}
// newMockBackend initializes a new mockBackend.
func newMockBackend() *mockBackend {
return &mockBackend{
txHashMinedWithGasPrice: make(map[common.Hash]*big.Int),
minedTxs: make(map[common.Hash]minedTxInfo),
}
}
// mine records a (txHash, gasPrice) as confirmed. Subsequent calls to
// TransactionReceipt with a matching txHash will result in a non-nil receipt.
func (b *mockBackend) mine(txHash common.Hash, gasPrice *big.Int) {
// If a nil txHash is supplied this has the effect of mining an empty block.
func (b *mockBackend) mine(txHash *common.Hash, gasPrice *big.Int) {
b.mu.Lock()
defer b.mu.Unlock()
b.txHashMinedWithGasPrice[txHash] = gasPrice
b.blockHeight++
if txHash != nil {
b.minedTxs[*txHash] = minedTxInfo{
gasPrice: gasPrice,
blockNumber: b.blockHeight,
}
}
}
// BlockNumber returns the most recent block number.
func (b *mockBackend) BlockNumber(ctx context.Context) (uint64, error) {
b.mu.RLock()
defer b.mu.RUnlock()
return b.blockHeight, nil
}
// TransactionReceipt queries the mockBackend for a mined txHash. If none is
......@@ -142,7 +169,7 @@ func (b *mockBackend) TransactionReceipt(
b.mu.RLock()
defer b.mu.RUnlock()
gasPrice, ok := b.txHashMinedWithGasPrice[txHash]
txInfo, ok := b.minedTxs[txHash]
if !ok {
return nil, nil
}
......@@ -150,8 +177,9 @@ func (b *mockBackend) TransactionReceipt(
// Return the gas price for the transaction in the GasUsed field so that
// we can assert the proper tx confirmed in our tests.
return &types.Receipt{
TxHash: txHash,
GasUsed: gasPrice.Uint64(),
TxHash: txHash,
GasUsed: txInfo.gasPrice.Uint64(),
BlockNumber: big.NewInt(int64(txInfo.blockNumber)),
}, nil
}
......@@ -168,7 +196,8 @@ func TestTxMgrConfirmAtMinGasPrice(t *testing.T) {
tx := types.NewTx(&types.LegacyTx{
GasPrice: gasPrice,
})
h.backend.mine(tx.Hash(), gasPrice)
txHash := tx.Hash()
h.backend.mine(&txHash, gasPrice)
return tx, nil
}
......@@ -220,7 +249,8 @@ func TestTxMgrConfirmsAtMaxGasPrice(t *testing.T) {
GasPrice: gasPrice,
})
if gasPrice.Cmp(h.cfg.MaxGasPrice) == 0 {
h.backend.mine(tx.Hash(), gasPrice)
txHash := tx.Hash()
h.backend.mine(&txHash, gasPrice)
}
return tx, nil
}
......@@ -252,7 +282,8 @@ func TestTxMgrConfirmsAtMaxGasPriceDelayed(t *testing.T) {
// should still return an error beforehand.
if gasPrice.Cmp(h.cfg.MaxGasPrice) == 0 {
time.AfterFunc(2*time.Second, func() {
h.backend.mine(tx.Hash(), gasPrice)
txHash := tx.Hash()
h.backend.mine(&txHash, gasPrice)
})
}
return tx, nil
......@@ -308,7 +339,8 @@ func TestTxMgrOnlyOnePublicationSucceeds(t *testing.T) {
tx := types.NewTx(&types.LegacyTx{
GasPrice: gasPrice,
})
h.backend.mine(tx.Hash(), gasPrice)
txHash := tx.Hash()
h.backend.mine(&txHash, gasPrice)
return tx, nil
}
......@@ -338,7 +370,8 @@ func TestTxMgrConfirmsMinGasPriceAfterBumping(t *testing.T) {
// Delay mining the tx with the min gas price.
if gasPrice.Cmp(h.cfg.MinGasPrice) == 0 {
time.AfterFunc(5*time.Second, func() {
h.backend.mine(tx.Hash(), gasPrice)
txHash := tx.Hash()
h.backend.mine(&txHash, gasPrice)
})
}
return tx, nil
......@@ -361,10 +394,10 @@ func TestWaitMinedReturnsReceiptOnFirstSuccess(t *testing.T) {
// Create a tx and mine it immediately using the default backend.
tx := types.NewTx(&types.LegacyTx{})
txHash := tx.Hash()
h.backend.mine(txHash, new(big.Int))
h.backend.mine(&txHash, new(big.Int))
ctx := context.Background()
receipt, err := txmgr.WaitMined(ctx, h.backend, tx, 50*time.Millisecond)
receipt, err := txmgr.WaitMined(ctx, h.backend, tx, 50*time.Millisecond, 1)
require.Nil(t, err)
require.NotNil(t, receipt)
require.Equal(t, receipt.TxHash, txHash)
......@@ -383,16 +416,73 @@ func TestWaitMinedCanBeCanceled(t *testing.T) {
// Create an unimined tx.
tx := types.NewTx(&types.LegacyTx{})
receipt, err := txmgr.WaitMined(ctx, h.backend, tx, 50*time.Millisecond)
receipt, err := txmgr.WaitMined(ctx, h.backend, tx, 50*time.Millisecond, 1)
require.Equal(t, err, context.DeadlineExceeded)
require.Nil(t, receipt)
}
// TestWaitMinedMultipleConfs asserts that WaitMiend will properly wait for more
// than one confirmation.
func TestWaitMinedMultipleConfs(t *testing.T) {
t.Parallel()
const numConfs = 2
h := newTestHarnessWithConfig(configWithNumConfs(numConfs))
ctxt, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
// Create an unimined tx.
tx := types.NewTx(&types.LegacyTx{})
txHash := tx.Hash()
h.backend.mine(&txHash, new(big.Int))
receipt, err := txmgr.WaitMined(ctxt, h.backend, tx, 50*time.Millisecond, numConfs)
require.Equal(t, err, context.DeadlineExceeded)
require.Nil(t, receipt)
ctxt, cancel = context.WithTimeout(context.Background(), time.Second)
defer cancel()
// Mine an empty block, tx should now be confirmed.
h.backend.mine(nil, nil)
receipt, err = txmgr.WaitMined(ctxt, h.backend, tx, 50*time.Millisecond, numConfs)
require.Nil(t, err)
require.NotNil(t, receipt)
require.Equal(t, txHash, receipt.TxHash)
}
// TestManagerPanicOnZeroConfs ensures that the NewSimpleTxManager will panic
// when attempting to configure with NumConfirmations set to zero.
func TestManagerPanicOnZeroConfs(t *testing.T) {
t.Parallel()
defer func() {
if r := recover(); r == nil {
t.Fatal("NewSimpleTxManager should panic when using zero conf")
}
}()
_ = newTestHarnessWithConfig(configWithNumConfs(0))
}
// failingBackend implements txmgr.ReceiptSource, returning a failure on the
// first call but a success on the second call. This allows us to test that the
// inner loop of WaitMined properly handles this case.
type failingBackend struct {
returnSuccess bool
returnSuccessBlockNumber bool
returnSuccessReceipt bool
}
// BlockNumber for the failingBackend returns errRpcFailure on the first
// invocation and a fixed block height on subsequent calls.
func (b *failingBackend) BlockNumber(ctx context.Context) (uint64, error) {
if !b.returnSuccessBlockNumber {
b.returnSuccessBlockNumber = true
return 0, errRpcFailure
}
return 1, nil
}
// TransactionReceipt for the failingBackend returns errRpcFailure on the first
......@@ -400,13 +490,14 @@ type failingBackend struct {
func (b *failingBackend) TransactionReceipt(
ctx context.Context, txHash common.Hash) (*types.Receipt, error) {
if !b.returnSuccess {
b.returnSuccess = true
if !b.returnSuccessReceipt {
b.returnSuccessReceipt = true
return nil, errRpcFailure
}
return &types.Receipt{
TxHash: txHash,
TxHash: txHash,
BlockNumber: big.NewInt(1),
}, nil
}
......@@ -424,7 +515,7 @@ func TestWaitMinedReturnsReceiptAfterFailure(t *testing.T) {
txHash := tx.Hash()
ctx := context.Background()
receipt, err := txmgr.WaitMined(ctx, &borkedBackend, tx, 50*time.Millisecond)
receipt, err := txmgr.WaitMined(ctx, &borkedBackend, tx, 50*time.Millisecond, 1)
require.Nil(t, err)
require.NotNil(t, receipt)
require.Equal(t, receipt.TxHash, txHash)
......
......@@ -11,3 +11,11 @@ fmt:
go mod tidy
gofmt -w .
.PHONY: fmt
test:
go test -race -v ./...
.PHONY: test
lint:
go vet ./...
.PHONY: test
\ No newline at end of file
......@@ -62,6 +62,10 @@ var (
Message: "backend returned an invalid response",
HTTPErrorCode: 500,
}
ErrTooManyBatchRequests = &RPCErr{
Code: JSONRPCErrorInternal - 14,
Message: "too many RPC calls in batch request",
}
)
func ErrInvalidRequest(msg string) *RPCErr {
......@@ -631,7 +635,7 @@ func (w *WSProxier) close() {
}
func (w *WSProxier) prepareClientMsg(msg []byte) (*RPCReq, error) {
req, err := ParseRPCReq(bytes.NewReader(msg))
req, err := ParseRPCReq(msg)
if err != nil {
return nil, err
}
......
......@@ -2,7 +2,7 @@ package proxyd
import (
"context"
"encoding/json"
"time"
"github.com/go-redis/redis/v8"
"github.com/golang/snappy"
......@@ -14,10 +14,11 @@ type Cache interface {
Put(ctx context.Context, key string, value string) error
}
// assuming an average RPCRes size of 3 KB
const (
memoryCacheLimit = 4096
numBlockConfirmations = 50
// assuming an average RPCRes size of 3 KB
memoryCacheLimit = 4096
// Set a large ttl to avoid expirations. However, a ttl must be set for volatile-lru to take effect.
redisTTL = 30 * 7 * 24 * time.Hour
)
type cache struct {
......@@ -62,17 +63,50 @@ func (c *redisCache) Get(ctx context.Context, key string) (string, error) {
if err == redis.Nil {
return "", nil
} else if err != nil {
RecordRedisError("CacheGet")
return "", err
}
return val, nil
}
func (c *redisCache) Put(ctx context.Context, key string, value string) error {
err := c.rdb.Set(ctx, key, value, 0).Err()
err := c.rdb.SetEX(ctx, key, value, redisTTL).Err()
if err != nil {
RecordRedisError("CacheSet")
}
return err
}
type cacheWithCompression struct {
cache Cache
}
func newCacheWithCompression(cache Cache) *cacheWithCompression {
return &cacheWithCompression{cache}
}
func (c *cacheWithCompression) Get(ctx context.Context, key string) (string, error) {
encodedVal, err := c.cache.Get(ctx, key)
if err != nil {
return "", err
}
if encodedVal == "" {
return "", nil
}
val, err := snappy.Decode(nil, []byte(encodedVal))
if err != nil {
return "", err
}
return string(val), nil
}
func (c *cacheWithCompression) Put(ctx context.Context, key string, value string) error {
encodedVal := snappy.Encode(nil, []byte(value))
return c.cache.Put(ctx, key, string(encodedVal))
}
type GetLatestBlockNumFn func(ctx context.Context) (uint64, error)
type GetLatestGasPriceFn func(ctx context.Context) (uint64, error)
type RPCCache interface {
GetRPC(ctx context.Context, req *RPCReq) (*RPCRes, error)
......@@ -80,19 +114,24 @@ type RPCCache interface {
}
type rpcCache struct {
cache Cache
getLatestBlockNumFn GetLatestBlockNumFn
handlers map[string]RPCMethodHandler
cache Cache
handlers map[string]RPCMethodHandler
}
func newRPCCache(cache Cache, getLatestBlockNumFn GetLatestBlockNumFn) RPCCache {
func newRPCCache(cache Cache, getLatestBlockNumFn GetLatestBlockNumFn, getLatestGasPriceFn GetLatestGasPriceFn, numBlockConfirmations int) RPCCache {
handlers := map[string]RPCMethodHandler{
"eth_chainId": &StaticRPCMethodHandler{"eth_chainId"},
"net_version": &StaticRPCMethodHandler{"net_version"},
"eth_getBlockByNumber": &EthGetBlockByNumberMethod{getLatestBlockNumFn},
"eth_getBlockRange": &EthGetBlockRangeMethod{getLatestBlockNumFn},
"eth_chainId": &StaticMethodHandler{},
"net_version": &StaticMethodHandler{},
"eth_getBlockByNumber": &EthGetBlockByNumberMethodHandler{cache, getLatestBlockNumFn, numBlockConfirmations},
"eth_getBlockRange": &EthGetBlockRangeMethodHandler{cache, getLatestBlockNumFn, numBlockConfirmations},
"eth_blockNumber": &EthBlockNumberMethodHandler{getLatestBlockNumFn},
"eth_gasPrice": &EthGasPriceMethodHandler{getLatestGasPriceFn},
"eth_call": &EthCallMethodHandler{cache, getLatestBlockNumFn, numBlockConfirmations},
}
return &rpcCache{
cache: cache,
handlers: handlers,
}
return &rpcCache{cache: cache, getLatestBlockNumFn: getLatestBlockNumFn, handlers: handlers}
}
func (c *rpcCache) GetRPC(ctx context.Context, req *RPCReq) (*RPCRes, error) {
......@@ -100,34 +139,15 @@ func (c *rpcCache) GetRPC(ctx context.Context, req *RPCReq) (*RPCRes, error) {
if handler == nil {
return nil, nil
}
cacheable, err := handler.IsCacheable(req)
if err != nil {
return nil, err
}
if !cacheable {
return nil, nil
}
key := handler.CacheKey(req)
encodedVal, err := c.cache.Get(ctx, key)
if err != nil {
return nil, err
}
if encodedVal == "" {
return nil, nil
}
val, err := snappy.Decode(nil, []byte(encodedVal))
if err != nil {
return nil, err
}
res := new(RPCRes)
err = json.Unmarshal(val, res)
if err != nil {
return nil, err
res, err := handler.GetRPCMethod(ctx, req)
if res != nil {
if res == nil {
RecordCacheMiss(req.Method)
} else {
RecordCacheHit(req.Method)
}
}
res.ID = req.ID
return res, nil
return res, err
}
func (c *rpcCache) PutRPC(ctx context.Context, req *RPCReq, res *RPCRes) error {
......@@ -135,23 +155,5 @@ func (c *rpcCache) PutRPC(ctx context.Context, req *RPCReq, res *RPCRes) error {
if handler == nil {
return nil
}
cacheable, err := handler.IsCacheable(req)
if err != nil {
return err
}
if !cacheable {
return nil
}
requiresConfirmations, err := handler.RequiresUnconfirmedBlocks(ctx, req)
if err != nil {
return err
}
if requiresConfirmations {
return nil
}
key := handler.CacheKey(req)
val := mustMarshalJSON(res)
encodedVal := snappy.Encode(nil, val)
return c.cache.Put(ctx, key, string(encodedVal))
return handler.PutRPCMethod(ctx, req, res)
}
This diff is collapsed.
......@@ -2,6 +2,8 @@ package main
import (
"os"
"os/signal"
"syscall"
"github.com/BurntSushi/toml"
"github.com/ethereum-optimism/optimism/go/proxyd"
......@@ -35,7 +37,14 @@ func main() {
log.Crit("error reading config file", "err", err)
}
if err := proxyd.Start(config); err != nil {
shutdown, err := proxyd.Start(config)
if err != nil {
log.Crit("error starting proxyd", "err", err)
}
sig := make(chan os.Signal, 1)
signal.Notify(sig, syscall.SIGINT, syscall.SIGTERM)
recvSig := <-sig
log.Info("caught signal, shutting down", "signal", recvSig)
shutdown()
}
......@@ -15,8 +15,9 @@ type ServerConfig struct {
}
type CacheConfig struct {
Enabled bool `toml:"enabled"`
BlockSyncRPCURL string `toml:"block_sync_rpc_url"`
Enabled bool `toml:"enabled"`
BlockSyncRPCURL string `toml:"block_sync_rpc_url"`
NumBlockConfirmations int `toml:"num_block_confirmations"`
}
type RedisConfig struct {
......@@ -61,11 +62,11 @@ type MethodMappingsConfig map[string]string
type Config struct {
WSBackendGroup string `toml:"ws_backend_group"`
Server *ServerConfig `toml:"server"`
Cache *CacheConfig `toml:"cache"`
Redis *RedisConfig `toml:"redis"`
Metrics *MetricsConfig `toml:"metrics"`
BackendOptions *BackendOptions `toml:"backend"`
Server ServerConfig `toml:"server"`
Cache CacheConfig `toml:"cache"`
Redis RedisConfig `toml:"redis"`
Metrics MetricsConfig `toml:"metrics"`
BackendOptions BackendOptions `toml:"backend"`
Backends BackendsConfig `toml:"backends"`
Authentication map[string]string `toml:"authentication"`
BackendGroups BackendGroupsConfig `toml:"backend_groups"`
......
......@@ -4,13 +4,18 @@ go 1.16
require (
github.com/BurntSushi/toml v0.4.1
github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a // indirect
github.com/alicebob/miniredis v2.5.0+incompatible
github.com/ethereum/go-ethereum v1.10.11
github.com/go-redis/redis/v8 v8.11.4
github.com/golang/snappy v0.0.4
github.com/gomodule/redigo v1.8.8 // indirect
github.com/gorilla/mux v1.8.0
github.com/gorilla/websocket v1.4.2
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d
github.com/prometheus/client_golang v1.11.0
github.com/rs/cors v1.8.0
github.com/stretchr/testify v1.7.0
github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9 // indirect
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
)
......@@ -48,6 +48,10 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZpUGpz5+4FfNmIU+FmZg2P3Xaj2v2bfNWmk=
github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc=
github.com/alicebob/miniredis v2.5.0+incompatible h1:yBHoLpsyjupjz3NL3MhKMVkR41j82Yjf3KFv7ApYzUI=
github.com/alicebob/miniredis v2.5.0+incompatible/go.mod h1:8HZjEj4yU0dwhYHky+DxYx+6BMjkBbe5ONFIF1MXffk=
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0=
......@@ -185,6 +189,8 @@ github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y=
github.com/gomodule/redigo v1.8.8 h1:f6cXq6RRfiyrOJEV7p3JhLDlmawGBVBBP1MggY8Mo4E=
github.com/gomodule/redigo v1.8.8/go.mod h1:7ArFNvsTjH8GMMzB4uy1snslv2BwmginuMs06a1uzZE=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
......@@ -427,6 +433,8 @@ github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+
github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9 h1:k/gmLsJDWwWqbLCur2yWnJzwQEKRcAHXo6seXGuSwWw=
github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9/go.mod h1:E1AXubJBdNmFERAOucpDIxNzeGfLzg0mYh+UfMWdChA=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
......@@ -520,6 +528,7 @@ golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
......@@ -679,8 +688,9 @@ gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
......
package integration_tests
import (
"bytes"
"fmt"
"github.com/alicebob/miniredis"
"github.com/ethereum-optimism/optimism/go/proxyd"
"github.com/stretchr/testify/require"
"os"
"testing"
"time"
)
func TestCaching(t *testing.T) {
redis, err := miniredis.Run()
require.NoError(t, err)
defer redis.Close()
hdlr := NewRPCResponseHandler(map[string]string{
"eth_chainId": "0x420",
"net_version": "0x1234",
"eth_blockNumber": "0x64",
"eth_getBlockByNumber": "dummy_block",
"eth_call": "dummy_call",
})
backend := NewMockBackend(hdlr)
defer backend.Close()
require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", backend.URL()))
require.NoError(t, os.Setenv("REDIS_URL", fmt.Sprintf("redis://127.0.0.1:%s", redis.Port())))
config := ReadConfig("caching")
client := NewProxydClient("http://127.0.0.1:8545")
shutdown, err := proxyd.Start(config)
require.NoError(t, err)
defer shutdown()
// allow time for the block number fetcher to fire
time.Sleep(1500 * time.Millisecond)
tests := []struct {
method string
params []interface{}
response string
backendCalls int
}{
{
"eth_chainId",
nil,
"{\"jsonrpc\": \"2.0\", \"result\": \"0x420\", \"id\": 999}",
1,
},
{
"net_version",
nil,
"{\"jsonrpc\": \"2.0\", \"result\": \"0x1234\", \"id\": 999}",
1,
},
{
"eth_getBlockByNumber",
[]interface{}{
"0x1",
true,
},
"{\"jsonrpc\": \"2.0\", \"result\": \"dummy_block\", \"id\": 999}",
1,
},
{
"eth_call",
[]interface{}{
struct {
To string `json:"to"`
}{
"0x1234",
},
"0x60",
},
"{\"id\":999,\"jsonrpc\":\"2.0\",\"result\":\"dummy_call\"}",
1,
},
{
"eth_blockNumber",
nil,
"{\"id\":999,\"jsonrpc\":\"2.0\",\"result\":\"0x64\"}",
0,
},
{
"eth_call",
[]interface{}{
struct {
To string `json:"to"`
}{
"0x1234",
},
"latest",
},
"{\"id\":999,\"jsonrpc\":\"2.0\",\"result\":\"dummy_call\"}",
2,
},
{
"eth_call",
[]interface{}{
struct {
To string `json:"to"`
}{
"0x1234",
},
"pending",
},
"{\"id\":999,\"jsonrpc\":\"2.0\",\"result\":\"dummy_call\"}",
2,
},
}
for _, tt := range tests {
t.Run(tt.method, func(t *testing.T) {
resRaw, _, err := client.SendRPC(tt.method, tt.params)
require.NoError(t, err)
resCache, _, err := client.SendRPC(tt.method, tt.params)
require.NoError(t, err)
RequireEqualJSON(t, []byte(tt.response), resCache)
RequireEqualJSON(t, resRaw, resCache)
require.Equal(t, tt.backendCalls, countRequests(backend, tt.method))
backend.Reset()
})
}
hdlr.SetResponse("eth_blockNumber", "0x100")
time.Sleep(1500 * time.Millisecond)
resRaw, _, err := client.SendRPC("eth_blockNumber", nil)
RequireEqualJSON(t, []byte("{\"id\":999,\"jsonrpc\":\"2.0\",\"result\":\"0x100\"}"), resRaw)
}
func countRequests(backend *MockBackend, name string) int {
var count int
for _, req := range backend.Requests() {
if bytes.Contains(req.Body, []byte(name)) {
count++
}
}
return count
}
package integration_tests
import (
"fmt"
"github.com/ethereum-optimism/optimism/go/proxyd"
"github.com/stretchr/testify/require"
"net/http"
"os"
"sync/atomic"
"testing"
"time"
)
const (
goodResponse = `{"jsonrpc": "2.0", "result": "hello", "id": 999}`
noBackendsResponse = `{"error":{"code":-32011,"message":"no backends available for method"},"id":999,"jsonrpc":"2.0"}`
)
func TestFailover(t *testing.T) {
goodBackend := NewMockBackend(SingleResponseHandler(200, goodResponse))
defer goodBackend.Close()
badBackend := NewMockBackend(nil)
defer badBackend.Close()
require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", goodBackend.URL()))
require.NoError(t, os.Setenv("BAD_BACKEND_RPC_URL", badBackend.URL()))
config := ReadConfig("failover")
client := NewProxydClient("http://127.0.0.1:8545")
shutdown, err := proxyd.Start(config)
require.NoError(t, err)
defer shutdown()
tests := []struct {
name string
handler http.Handler
}{
{
"backend responds 200 with non-JSON response",
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(200)
w.Write([]byte("this data is not JSON!"))
}),
},
{
"backend responds with no body",
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(200)
}),
},
}
codes := []int{
300,
301,
302,
401,
403,
429,
500,
503,
}
for _, code := range codes {
tests = append(tests, struct {
name string
handler http.Handler
}{
fmt.Sprintf("backend %d", code),
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(code)
}),
})
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
badBackend.SetHandler(tt.handler)
res, statusCode, err := client.SendRPC("eth_chainId", nil)
require.NoError(t, err)
require.Equal(t, 200, statusCode)
RequireEqualJSON(t, []byte(goodResponse), res)
require.Equal(t, 1, len(badBackend.Requests()))
require.Equal(t, 1, len(goodBackend.Requests()))
badBackend.Reset()
goodBackend.Reset()
})
}
t.Run("backend times out and falls back to another", func(t *testing.T) {
badBackend.SetHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
time.Sleep(2 * time.Second)
w.Write([]byte("{}"))
}))
res, statusCode, err := client.SendRPC("eth_chainId", nil)
require.NoError(t, err)
require.Equal(t, 200, statusCode)
RequireEqualJSON(t, []byte(goodResponse), res)
require.Equal(t, 1, len(badBackend.Requests()))
require.Equal(t, 1, len(goodBackend.Requests()))
goodBackend.Reset()
badBackend.Reset()
})
t.Run("works with a batch request", func(t *testing.T) {
badBackend.SetHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(500)
}))
res, statusCode, err := client.SendBatchRPC(
NewRPCReq("1", "eth_chainId", nil),
NewRPCReq("1", "eth_chainId", nil),
)
require.NoError(t, err)
require.Equal(t, 200, statusCode)
RequireEqualJSON(t, []byte(asArray(goodResponse, goodResponse)), res)
require.Equal(t, 2, len(badBackend.Requests()))
require.Equal(t, 2, len(goodBackend.Requests()))
})
}
func TestRetries(t *testing.T) {
backend := NewMockBackend(SingleResponseHandler(200, goodResponse))
defer backend.Close()
require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", backend.URL()))
config := ReadConfig("retries")
client := NewProxydClient("http://127.0.0.1:8545")
shutdown, err := proxyd.Start(config)
require.NoError(t, err)
defer shutdown()
attempts := int32(0)
backend.SetHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
incremented := atomic.AddInt32(&attempts, 1)
if incremented != 2 {
w.WriteHeader(500)
return
}
w.Write([]byte(goodResponse))
}))
// test case where request eventually succeeds
res, statusCode, err := client.SendRPC("eth_chainId", nil)
require.NoError(t, err)
require.Equal(t, 200, statusCode)
RequireEqualJSON(t, []byte(goodResponse), res)
require.Equal(t, 2, len(backend.Requests()))
// test case where it does not
backend.Reset()
attempts = -10
res, statusCode, err = client.SendRPC("eth_chainId", nil)
require.NoError(t, err)
require.Equal(t, 503, statusCode)
RequireEqualJSON(t, []byte(noBackendsResponse), res)
require.Equal(t, 4, len(backend.Requests()))
}
func TestOutOfServiceInterval(t *testing.T) {
goodBackend := NewMockBackend(SingleResponseHandler(200, goodResponse))
defer goodBackend.Close()
badBackend := NewMockBackend(nil)
defer badBackend.Close()
require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", goodBackend.URL()))
require.NoError(t, os.Setenv("BAD_BACKEND_RPC_URL", badBackend.URL()))
config := ReadConfig("out_of_service_interval")
client := NewProxydClient("http://127.0.0.1:8545")
shutdown, err := proxyd.Start(config)
require.NoError(t, err)
defer shutdown()
okHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte(goodResponse))
})
badBackend.SetHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(503)
}))
goodBackend.SetHandler(okHandler)
res, statusCode, err := client.SendRPC("eth_chainId", nil)
require.NoError(t, err)
require.Equal(t, 200, statusCode)
RequireEqualJSON(t, []byte(goodResponse), res)
require.Equal(t, 2, len(badBackend.Requests()))
require.Equal(t, 1, len(goodBackend.Requests()))
res, statusCode, err = client.SendRPC("eth_chainId", nil)
require.NoError(t, err)
require.Equal(t, 200, statusCode)
RequireEqualJSON(t, []byte(goodResponse), res)
require.Equal(t, 2, len(badBackend.Requests()))
require.Equal(t, 2, len(goodBackend.Requests()))
res, statusCode, err = client.SendBatchRPC(
NewRPCReq("1", "eth_chainId", nil),
NewRPCReq("1", "eth_chainId", nil),
)
require.Equal(t, 2, len(badBackend.Requests()))
require.Equal(t, 4, len(goodBackend.Requests()))
time.Sleep(time.Second)
badBackend.SetHandler(okHandler)
res, statusCode, err = client.SendRPC("eth_chainId", nil)
require.NoError(t, err)
require.Equal(t, 200, statusCode)
RequireEqualJSON(t, []byte(goodResponse), res)
require.Equal(t, 3, len(badBackend.Requests()))
require.Equal(t, 4, len(goodBackend.Requests()))
}
package integration_tests
import (
"bytes"
"context"
"encoding/json"
"github.com/ethereum-optimism/optimism/go/proxyd"
"io/ioutil"
"net/http"
"net/http/httptest"
"sync"
)
type RecordedRequest struct {
Method string
Headers http.Header
Body []byte
}
type MockBackend struct {
handler http.Handler
server *httptest.Server
mtx sync.RWMutex
requests []*RecordedRequest
}
func SingleResponseHandler(code int, response string) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(code)
w.Write([]byte(response))
}
}
type RPCResponseHandler struct {
mtx sync.RWMutex
rpcResponses map[string]string
}
func NewRPCResponseHandler(rpcResponses map[string]string) *RPCResponseHandler {
return &RPCResponseHandler{
rpcResponses: rpcResponses,
}
}
func (h *RPCResponseHandler) SetResponse(method, response string) {
h.mtx.Lock()
defer h.mtx.Unlock()
h.rpcResponses[method] = response
}
func (h *RPCResponseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
body, err := ioutil.ReadAll(r.Body)
if err != nil {
panic(err)
}
req, err := proxyd.ParseRPCReq(body)
if err != nil {
panic(err)
}
h.mtx.RLock()
res := h.rpcResponses[req.Method]
h.mtx.RUnlock()
if res == "" {
w.WriteHeader(400)
return
}
out := &proxyd.RPCRes{
JSONRPC: proxyd.JSONRPCVersion,
Result: res,
ID: req.ID,
}
enc := json.NewEncoder(w)
if err := enc.Encode(out); err != nil {
panic(err)
}
}
func NewMockBackend(handler http.Handler) *MockBackend {
mb := &MockBackend{
handler: handler,
}
mb.server = httptest.NewServer(http.HandlerFunc(mb.wrappedHandler))
return mb
}
func (m *MockBackend) URL() string {
return m.server.URL
}
func (m *MockBackend) Close() {
m.server.Close()
}
func (m *MockBackend) SetHandler(handler http.Handler) {
m.mtx.Lock()
m.handler = handler
m.mtx.Unlock()
}
func (m *MockBackend) Reset() {
m.mtx.Lock()
m.requests = nil
m.mtx.Unlock()
}
func (m *MockBackend) Requests() []*RecordedRequest {
m.mtx.RLock()
defer m.mtx.RUnlock()
out := make([]*RecordedRequest, len(m.requests))
for i := 0; i < len(m.requests); i++ {
out[i] = m.requests[i]
}
return out
}
func (m *MockBackend) wrappedHandler(w http.ResponseWriter, r *http.Request) {
m.mtx.Lock()
body, err := ioutil.ReadAll(r.Body)
if err != nil {
panic(err)
}
clone := r.Clone(context.Background())
clone.Body = ioutil.NopCloser(bytes.NewReader(body))
m.requests = append(m.requests, &RecordedRequest{
Method: r.Method,
Headers: r.Header.Clone(),
Body: body,
})
m.handler.ServeHTTP(w, clone)
m.mtx.Unlock()
}
package integration_tests
import (
"github.com/ethereum-optimism/optimism/go/proxyd"
"github.com/stretchr/testify/require"
"os"
"testing"
)
type resWithCode struct {
code int
res []byte
}
func TestMaxRPSLimit(t *testing.T) {
goodBackend := NewMockBackend(SingleResponseHandler(200, goodResponse))
defer goodBackend.Close()
require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", goodBackend.URL()))
config := ReadConfig("rate_limit")
client := NewProxydClient("http://127.0.0.1:8545")
shutdown, err := proxyd.Start(config)
require.NoError(t, err)
defer shutdown()
resCh := make(chan *resWithCode)
for i := 0; i < 3; i++ {
go func() {
res, code, err := client.SendRPC("eth_chainId", nil)
require.NoError(t, err)
resCh <- &resWithCode{
code: code,
res: res,
}
}()
}
codes := make(map[int]int)
var limitedRes []byte
for i := 0; i < 3; i++ {
res := <-resCh
code := res.code
if codes[code] == 0 {
codes[code] = 1
} else {
codes[code] += 1
}
// 503 because there's only one backend available
if code == 503 {
limitedRes = res.res
}
}
require.Equal(t, 2, codes[200])
require.Equal(t, 1, codes[503])
RequireEqualJSON(t, []byte(noBackendsResponse), limitedRes)
}
[server]
rpc_port = 8545
[backend]
response_timeout_seconds = 1
[redis]
url = "$REDIS_URL"
[cache]
enabled = true
block_sync_rpc_url = "$GOOD_BACKEND_RPC_URL"
[backends]
[backends.good]
rpc_url = "$GOOD_BACKEND_RPC_URL"
ws_url = "$GOOD_BACKEND_RPC_URL"
[backend_groups]
[backend_groups.main]
backends = ["good"]
[rpc_method_mappings]
eth_chainId = "main"
net_version = "main"
eth_getBlockByNumber = "main"
eth_blockNumber = "main"
eth_call = "main"
[server]
rpc_port = 8545
[backend]
response_timeout_seconds = 1
[backends]
[backends.good]
rpc_url = "$GOOD_BACKEND_RPC_URL"
ws_url = "$GOOD_BACKEND_RPC_URL"
[backends.bad]
rpc_url = "$BAD_BACKEND_RPC_URL"
ws_url = "$BAD_BACKEND_RPC_URL"
[backend_groups]
[backend_groups.main]
backends = ["bad", "good"]
[rpc_method_mappings]
eth_chainId = "main"
\ No newline at end of file
[server]
rpc_port = 8545
[backend]
response_timeout_seconds = 1
max_retries = 1
out_of_service_seconds = 1
[backends]
[backends.good]
rpc_url = "$GOOD_BACKEND_RPC_URL"
ws_url = "$GOOD_BACKEND_RPC_URL"
[backends.bad]
rpc_url = "$BAD_BACKEND_RPC_URL"
ws_url = "$BAD_BACKEND_RPC_URL"
[backend_groups]
[backend_groups.main]
backends = ["bad", "good"]
[rpc_method_mappings]
eth_chainId = "main"
\ No newline at end of file
[server]
rpc_port = 8545
[backend]
response_timeout_seconds = 1
[backends]
[backends.good]
rpc_url = "$GOOD_BACKEND_RPC_URL"
ws_url = "$GOOD_BACKEND_RPC_URL"
max_rps = 2
[backend_groups]
[backend_groups.main]
backends = ["good"]
[rpc_method_mappings]
eth_chainId = "main"
\ No newline at end of file
[server]
rpc_port = 8545
[backend]
response_timeout_seconds = 1
max_retries = 3
[backends]
[backends.good]
rpc_url = "$GOOD_BACKEND_RPC_URL"
ws_url = "$GOOD_BACKEND_RPC_URL"
[backend_groups]
[backend_groups.main]
backends = ["good"]
[rpc_method_mappings]
eth_chainId = "main"
\ No newline at end of file
[server]
rpc_port = 8545
[backend]
response_timeout_seconds = 1
[backends]
[backends.good]
rpc_url = "$GOOD_BACKEND_RPC_URL"
ws_url = "$GOOD_BACKEND_RPC_URL"
[backend_groups]
[backend_groups.main]
backends = ["good"]
[rpc_method_mappings]
eth_chainId = "main"
\ No newline at end of file
package integration_tests
import (
"bytes"
"encoding/json"
"fmt"
"github.com/BurntSushi/toml"
"github.com/ethereum-optimism/optimism/go/proxyd"
"github.com/stretchr/testify/require"
"io/ioutil"
"net/http"
"testing"
)
type ProxydClient struct {
url string
}
func NewProxydClient(url string) *ProxydClient {
return &ProxydClient{url: url}
}
func (p *ProxydClient) SendRPC(method string, params []interface{}) ([]byte, int, error) {
rpcReq := NewRPCReq("999", method, params)
body, err := json.Marshal(rpcReq)
if err != nil {
panic(err)
}
return p.SendRequest(body)
}
func (p *ProxydClient) SendBatchRPC(reqs ...*proxyd.RPCReq) ([]byte, int, error) {
body, err := json.Marshal(reqs)
if err != nil {
panic(err)
}
return p.SendRequest(body)
}
func (p *ProxydClient) SendRequest(body []byte) ([]byte, int, error) {
res, err := http.Post(p.url, "application/json", bytes.NewReader(body))
if err != nil {
return nil, -1, err
}
defer res.Body.Close()
code := res.StatusCode
resBody, err := ioutil.ReadAll(res.Body)
if err != nil {
panic(err)
}
return resBody, code, nil
}
func RequireEqualJSON(t *testing.T, expected []byte, actual []byte) {
expJSON := canonicalizeJSON(t, expected)
actJSON := canonicalizeJSON(t, actual)
require.Equal(t, string(expJSON), string(actJSON))
}
func canonicalizeJSON(t *testing.T, in []byte) []byte {
var any interface{}
if in[0] == '[' {
any = make([]interface{}, 0)
} else {
any = make(map[string]interface{})
}
err := json.Unmarshal(in, &any)
require.NoError(t, err)
out, err := json.Marshal(any)
require.NoError(t, err)
return out
}
func ReadConfig(name string) *proxyd.Config {
config := new(proxyd.Config)
_, err := toml.DecodeFile(fmt.Sprintf("testdata/%s.toml", name), config)
if err != nil {
panic(err)
}
return config
}
func NewRPCReq(id string, method string, params []interface{}) *proxyd.RPCReq {
jsonParams, err := json.Marshal(params)
if err != nil {
panic(err)
}
return &proxyd.RPCReq{
JSONRPC: proxyd.JSONRPCVersion,
Method: method,
Params: jsonParams,
ID: []byte(id),
}
}
package integration_tests
import (
"github.com/ethereum-optimism/optimism/go/proxyd"
"github.com/stretchr/testify/require"
"os"
"strings"
"testing"
)
const (
notWhitelistedResponse = `{"jsonrpc":"2.0","error":{"code":-32001,"message":"rpc method is not whitelisted"},"id":999}`
parseErrResponse = `{"jsonrpc":"2.0","error":{"code":-32700,"message":"parse error"},"id":null}`
invalidJSONRPCVersionResponse = `{"error":{"code":-32601,"message":"invalid JSON-RPC version"},"id":null,"jsonrpc":"2.0"}`
invalidIDResponse = `{"error":{"code":-32601,"message":"invalid ID"},"id":null,"jsonrpc":"2.0"}`
invalidMethodResponse = `{"error":{"code":-32601,"message":"no method specified"},"id":null,"jsonrpc":"2.0"}`
invalidBatchLenResponse = `{"error":{"code":-32601,"message":"must specify at least one batch call"},"id":null,"jsonrpc":"2.0"}`
)
func TestSingleRPCValidation(t *testing.T) {
goodBackend := NewMockBackend(SingleResponseHandler(200, goodResponse))
defer goodBackend.Close()
require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", goodBackend.URL()))
config := ReadConfig("whitelist")
client := NewProxydClient("http://127.0.0.1:8545")
shutdown, err := proxyd.Start(config)
require.NoError(t, err)
defer shutdown()
tests := []struct {
name string
body string
res string
code int
}{
{
"body not JSON",
"this ain't an RPC call",
parseErrResponse,
400,
},
{
"body not RPC",
"{\"not\": \"rpc\"}",
invalidJSONRPCVersionResponse,
400,
},
{
"body missing RPC ID",
"{\"jsonrpc\": \"2.0\", \"method\": \"subtract\", \"params\": [42, 23]}",
invalidIDResponse,
400,
},
{
"body has array ID",
"{\"jsonrpc\": \"2.0\", \"method\": \"subtract\", \"params\": [42, 23], \"id\": []}",
invalidIDResponse,
400,
},
{
"body has object ID",
"{\"jsonrpc\": \"2.0\", \"method\": \"subtract\", \"params\": [42, 23], \"id\": {}}",
invalidIDResponse,
400,
},
{
"bad method",
"{\"jsonrpc\": \"2.0\", \"method\": 7, \"params\": [42, 23], \"id\": 1}",
parseErrResponse,
400,
},
{
"bad JSON-RPC",
"{\"jsonrpc\": \"1.0\", \"method\": \"subtract\", \"params\": [42, 23], \"id\": 1}",
invalidJSONRPCVersionResponse,
400,
},
{
"omitted method",
"{\"jsonrpc\": \"2.0\", \"params\": [42, 23], \"id\": 1}",
invalidMethodResponse,
400,
},
{
"not whitelisted method",
"{\"jsonrpc\": \"2.0\", \"method\": \"subtract\", \"params\": [42, 23], \"id\": 999}",
notWhitelistedResponse,
403,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
res, code, err := client.SendRequest([]byte(tt.body))
require.NoError(t, err)
RequireEqualJSON(t, []byte(tt.res), res)
require.Equal(t, tt.code, code)
require.Equal(t, 0, len(goodBackend.Requests()))
})
}
}
func TestBatchRPCValidation(t *testing.T) {
goodBackend := NewMockBackend(SingleResponseHandler(200, goodResponse))
defer goodBackend.Close()
require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", goodBackend.URL()))
config := ReadConfig("whitelist")
client := NewProxydClient("http://127.0.0.1:8545")
shutdown, err := proxyd.Start(config)
require.NoError(t, err)
defer shutdown()
tests := []struct {
name string
body string
res string
code int
reqCount int
}{
{
"empty batch",
"[]",
invalidBatchLenResponse,
400,
0,
},
{
"bad json",
"[{,]",
parseErrResponse,
400,
0,
},
{
"not object in batch",
"[123]",
asArray(parseErrResponse),
200,
0,
},
{
"body not RPC",
"[{\"not\": \"rpc\"}]",
asArray(invalidJSONRPCVersionResponse),
200,
0,
},
{
"body missing RPC ID",
"[{\"jsonrpc\": \"2.0\", \"method\": \"subtract\", \"params\": [42, 23]}]",
asArray(invalidIDResponse),
200,
0,
},
{
"body has array ID",
"[{\"jsonrpc\": \"2.0\", \"method\": \"subtract\", \"params\": [42, 23], \"id\": []}]",
asArray(invalidIDResponse),
200,
0,
},
{
"body has object ID",
"[{\"jsonrpc\": \"2.0\", \"method\": \"subtract\", \"params\": [42, 23], \"id\": {}}]",
asArray(invalidIDResponse),
200,
0,
},
// this happens because we can't deserialize the method into a non
// string value, and it blows up the parsing for the whole request.
{
"bad method",
"[{\"error\":{\"code\":-32600,\"message\":\"invalid request\"},\"id\":null,\"jsonrpc\":\"2.0\"}]",
asArray(invalidMethodResponse),
200,
0,
},
{
"bad JSON-RPC",
"[{\"jsonrpc\": \"1.0\", \"method\": \"subtract\", \"params\": [42, 23], \"id\": 1}]",
asArray(invalidJSONRPCVersionResponse),
200,
0,
},
{
"omitted method",
"[{\"jsonrpc\": \"2.0\", \"params\": [42, 23], \"id\": 1}]",
asArray(invalidMethodResponse),
200,
0,
},
{
"not whitelisted method",
"[{\"jsonrpc\": \"2.0\", \"method\": \"subtract\", \"params\": [42, 23], \"id\": 999}]",
asArray(notWhitelistedResponse),
200,
0,
},
{
"mixed",
asArray(
"{\"jsonrpc\": \"2.0\", \"method\": \"subtract\", \"params\": [42, 23], \"id\": 999}",
"{\"jsonrpc\": \"2.0\", \"method\": \"eth_chainId\", \"params\": [], \"id\": 123}",
"123",
),
asArray(
notWhitelistedResponse,
goodResponse,
parseErrResponse,
),
200,
1,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
res, code, err := client.SendRequest([]byte(tt.body))
require.NoError(t, err)
RequireEqualJSON(t, []byte(tt.res), res)
require.Equal(t, tt.code, code)
require.Equal(t, tt.reqCount, len(goodBackend.Requests()))
})
}
}
func asArray(in ...string) string {
return "[" + strings.Join(in, ",") + "]"
}
package proxyd
import (
"context"
"sync"
"time"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
)
const blockHeadSyncPeriod = 1 * time.Second
type LatestBlockHead struct {
url string
client *ethclient.Client
quit chan struct{}
done chan struct{}
mutex sync.RWMutex
blockNum uint64
}
func newLatestBlockHead(url string) (*LatestBlockHead, error) {
client, err := ethclient.Dial(url)
if err != nil {
return nil, err
}
return &LatestBlockHead{
url: url,
client: client,
quit: make(chan struct{}),
done: make(chan struct{}),
}, nil
}
func (h *LatestBlockHead) Start() {
go func() {
ticker := time.NewTicker(blockHeadSyncPeriod)
defer ticker.Stop()
for {
select {
case <-ticker.C:
blockNum, err := h.getBlockNum()
if err != nil {
log.Error("error retrieving latest block number", "error", err)
continue
}
log.Trace("polling block number", "blockNum", blockNum)
h.mutex.Lock()
h.blockNum = blockNum
h.mutex.Unlock()
case <-h.quit:
close(h.done)
return
}
}
}()
}
func (h *LatestBlockHead) getBlockNum() (uint64, error) {
const maxRetries = 5
var err error
for i := 0; i <= maxRetries; i++ {
var blockNum uint64
blockNum, err = h.client.BlockNumber(context.Background())
if err != nil {
backoff := calcBackoff(i)
log.Warn("http operation failed. retrying...", "error", err, "backoff", backoff)
time.Sleep(backoff)
continue
}
return blockNum, nil
}
return 0, wrapErr(err, "exceeded retries")
}
func (h *LatestBlockHead) Stop() {
close(h.quit)
<-h.done
h.client.Close()
}
func (h *LatestBlockHead) GetBlockNum() uint64 {
h.mutex.RLock()
defer h.mutex.RUnlock()
return h.blockNum
}
package proxyd
import (
"context"
"time"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
)
const cacheSyncRate = 1 * time.Second
type lvcUpdateFn func(context.Context, *ethclient.Client) (string, error)
type EthLastValueCache struct {
client *ethclient.Client
cache Cache
key string
updater lvcUpdateFn
quit chan struct{}
}
func newLVC(client *ethclient.Client, cache Cache, cacheKey string, updater lvcUpdateFn) *EthLastValueCache {
return &EthLastValueCache{
client: client,
cache: cache,
key: cacheKey,
updater: updater,
quit: make(chan struct{}),
}
}
func (h *EthLastValueCache) Start() {
go func() {
ticker := time.NewTicker(cacheSyncRate)
defer ticker.Stop()
for {
select {
case <-ticker.C:
lvcPollTimeGauge.WithLabelValues(h.key).SetToCurrentTime()
value, err := h.getUpdate()
if err != nil {
log.Error("error retrieving latest value", "key", h.key, "error", err)
continue
}
log.Trace("polling latest value", "value", value)
if err := h.cache.Put(context.Background(), h.key, value); err != nil {
log.Error("error writing last value to cache", "key", h.key, "error", err)
}
case <-h.quit:
return
}
}
}()
}
func (h *EthLastValueCache) getUpdate() (string, error) {
const maxRetries = 5
var err error
for i := 0; i <= maxRetries; i++ {
var value string
value, err = h.updater(context.Background(), h.client)
if err != nil {
backoff := calcBackoff(i)
log.Warn("http operation failed. retrying...", "error", err, "backoff", backoff)
lvcErrorsTotal.WithLabelValues(h.key).Inc()
time.Sleep(backoff)
continue
}
return value, nil
}
return "", wrapErr(err, "exceeded retries")
}
func (h *EthLastValueCache) Stop() {
close(h.quit)
}
func (h *EthLastValueCache) Read(ctx context.Context) (string, error) {
return h.cache.Get(ctx, h.key)
}
This diff is collapsed.
......@@ -145,22 +145,53 @@ var (
requestPayloadSizesGauge = promauto.NewHistogramVec(prometheus.HistogramOpts{
Namespace: MetricsNamespace,
Name: "request_payload_sizes",
Help: "Gauge of client request payload sizes.",
Help: "Histogram of client request payload sizes.",
Buckets: PayloadSizeBuckets,
}, []string{
"auth",
"method_name",
})
responsePayloadSizesGauge = promauto.NewHistogramVec(prometheus.HistogramOpts{
Namespace: MetricsNamespace,
Name: "response_payload_sizes",
Help: "Gauge of client response payload sizes.",
Help: "Histogram of client response payload sizes.",
Buckets: PayloadSizeBuckets,
}, []string{
"auth",
})
cacheHitsTotal = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: MetricsNamespace,
Name: "cache_hits_total",
Help: "Number of cache hits.",
}, []string{
"method",
})
cacheMissesTotal = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: MetricsNamespace,
Name: "cache_misses_total",
Help: "Number of cache misses.",
}, []string{
"method",
})
lvcErrorsTotal = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: MetricsNamespace,
Name: "lvc_errors_total",
Help: "Count of lvc errors.",
}, []string{
"key",
})
lvcPollTimeGauge = promauto.NewGaugeVec(prometheus.GaugeOpts{
Namespace: MetricsNamespace,
Name: "lvc_poll_time_gauge",
Help: "Gauge of lvc poll time.",
}, []string{
"key",
})
rpcSpecialErrors = []string{
"nonce too low",
"gas price too high",
......@@ -208,10 +239,18 @@ func MaybeRecordSpecialRPCError(ctx context.Context, backendName, method string,
}
}
func RecordRequestPayloadSize(ctx context.Context, method string, payloadSize int) {
requestPayloadSizesGauge.WithLabelValues(GetAuthCtx(ctx), method).Observe(float64(payloadSize))
func RecordRequestPayloadSize(ctx context.Context, payloadSize int) {
requestPayloadSizesGauge.WithLabelValues(GetAuthCtx(ctx)).Observe(float64(payloadSize))
}
func RecordResponsePayloadSize(ctx context.Context, payloadSize int) {
responsePayloadSizesGauge.WithLabelValues(GetAuthCtx(ctx)).Observe(float64(payloadSize))
}
func RecordCacheHit(method string) {
cacheHitsTotal.WithLabelValues(method).Inc()
}
func RecordCacheMiss(method string) {
cacheMissesTotal.WithLabelValues(method).Inc()
}
......@@ -7,40 +7,49 @@ import (
"fmt"
"net/http"
"os"
"os/signal"
"syscall"
"strconv"
"time"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
func Start(config *Config) error {
func Start(config *Config) (func(), error) {
if len(config.Backends) == 0 {
return errors.New("must define at least one backend")
return nil, errors.New("must define at least one backend")
}
if len(config.BackendGroups) == 0 {
return errors.New("must define at least one backend group")
return nil, errors.New("must define at least one backend group")
}
if len(config.RPCMethodMappings) == 0 {
return errors.New("must define at least one RPC method mapping")
return nil, errors.New("must define at least one RPC method mapping")
}
for authKey := range config.Authentication {
if authKey == "none" {
return errors.New("cannot use none as an auth key")
return nil, errors.New("cannot use none as an auth key")
}
}
var redisURL string
if config.Redis.URL != "" {
rURL, err := ReadFromEnvOrConfig(config.Redis.URL)
if err != nil {
return nil, err
}
redisURL = rURL
}
var lim RateLimiter
var err error
if config.Redis == nil {
if redisURL == "" {
log.Warn("redis is not configured, using local rate limiter")
lim = NewLocalRateLimiter()
} else {
lim, err = NewRedisRateLimiter(config.Redis.URL)
lim, err = NewRedisRateLimiter(redisURL)
if err != nil {
return err
return nil, err
}
}
......@@ -51,17 +60,17 @@ func Start(config *Config) error {
rpcURL, err := ReadFromEnvOrConfig(cfg.RPCURL)
if err != nil {
return err
return nil, err
}
wsURL, err := ReadFromEnvOrConfig(cfg.WSURL)
if err != nil {
return err
return nil, err
}
if rpcURL == "" {
return fmt.Errorf("must define an RPC URL for backend %s", name)
return nil, fmt.Errorf("must define an RPC URL for backend %s", name)
}
if wsURL == "" {
return fmt.Errorf("must define a WS URL for backend %s", name)
return nil, fmt.Errorf("must define a WS URL for backend %s", name)
}
if config.BackendOptions.ResponseTimeoutSeconds != 0 {
......@@ -86,13 +95,13 @@ func Start(config *Config) error {
if cfg.Password != "" {
passwordVal, err := ReadFromEnvOrConfig(cfg.Password)
if err != nil {
return err
return nil, err
}
opts = append(opts, WithBasicAuth(cfg.Username, passwordVal))
}
tlsConfig, err := configureBackendTLS(cfg)
if err != nil {
return err
return nil, err
}
if tlsConfig != nil {
log.Info("using custom TLS config for backend", "name", name)
......@@ -113,7 +122,7 @@ func Start(config *Config) error {
backends := make([]*Backend, 0)
for _, bName := range bg.Backends {
if backendsByName[bName] == nil {
return fmt.Errorf("backend %s is not defined", bName)
return nil, fmt.Errorf("backend %s is not defined", bName)
}
backends = append(backends, backendsByName[bName])
}
......@@ -128,17 +137,17 @@ func Start(config *Config) error {
if config.WSBackendGroup != "" {
wsBackendGroup = backendGroups[config.WSBackendGroup]
if wsBackendGroup == nil {
return fmt.Errorf("ws backend group %s does not exist", config.WSBackendGroup)
return nil, fmt.Errorf("ws backend group %s does not exist", config.WSBackendGroup)
}
}
if wsBackendGroup == nil && config.Server.WSPort != 0 {
return fmt.Errorf("a ws port was defined, but no ws group was defined")
return nil, fmt.Errorf("a ws port was defined, but no ws group was defined")
}
for _, bg := range config.RPCMethodMappings {
if backendGroups[bg] == nil {
return fmt.Errorf("undefined backend group %s", bg)
return nil, fmt.Errorf("undefined backend group %s", bg)
}
}
......@@ -149,39 +158,50 @@ func Start(config *Config) error {
for secret, alias := range config.Authentication {
resolvedSecret, err := ReadFromEnvOrConfig(secret)
if err != nil {
return err
return nil, err
}
resolvedAuth[resolvedSecret] = alias
}
}
var rpcCache RPCCache
if config.Cache != nil && config.Cache.Enabled {
var cache Cache
if config.Redis != nil {
if cache, err = newRedisCache(config.Redis.URL); err != nil {
return err
var (
rpcCache RPCCache
blockNumLVC *EthLastValueCache
gasPriceLVC *EthLastValueCache
)
if config.Cache.Enabled {
var (
cache Cache
blockNumFn GetLatestBlockNumFn
gasPriceFn GetLatestGasPriceFn
)
if config.Cache.BlockSyncRPCURL == "" {
return nil, fmt.Errorf("block sync node required for caching")
}
blockSyncRPCURL, err := ReadFromEnvOrConfig(config.Cache.BlockSyncRPCURL)
if err != nil {
return nil, err
}
if redisURL != "" {
if cache, err = newRedisCache(redisURL); err != nil {
return nil, err
}
} else {
log.Warn("redis is not configured, using in-memory cache")
cache = newMemoryCache()
}
var getLatestBlockNumFn GetLatestBlockNumFn
if config.Cache.BlockSyncRPCURL == "" {
return fmt.Errorf("block sync node required for caching")
}
latestHead, err := newLatestBlockHead(config.Cache.BlockSyncRPCURL)
// Ideally, the BlocKSyncRPCURL should be the sequencer or a HA replica that's not far behind
ethClient, err := ethclient.Dial(blockSyncRPCURL)
if err != nil {
return err
return nil, err
}
latestHead.Start()
defer latestHead.Stop()
defer ethClient.Close()
getLatestBlockNumFn = func(ctx context.Context) (uint64, error) {
return latestHead.GetBlockNum(), nil
}
rpcCache = newRPCCache(cache, getLatestBlockNumFn)
blockNumLVC, blockNumFn = makeGetLatestBlockNumFn(ethClient, cache)
gasPriceLVC, gasPriceFn = makeGetLatestGasPriceFn(ethClient, cache)
rpcCache = newRPCCache(newCacheWithCompression(cache), blockNumFn, gasPriceFn, config.Cache.NumBlockConfirmations)
}
srv := NewServer(
......@@ -194,12 +214,17 @@ func Start(config *Config) error {
rpcCache,
)
if config.Metrics != nil && config.Metrics.Enabled {
if config.Metrics.Enabled {
addr := fmt.Sprintf("%s:%d", config.Metrics.Host, config.Metrics.Port)
log.Info("starting metrics server", "addr", addr)
go http.ListenAndServe(addr, promhttp.Handler())
}
// To allow integration tests to cleanly come up, wait
// 10ms to give the below goroutines enough time to
// encounter an error creating their servers
errTimer := time.NewTimer(10 * time.Millisecond)
if config.Server.RPCPort != 0 {
go func() {
if err := srv.RPCListenAndServe(config.Server.RPCHost, config.Server.RPCPort); err != nil {
......@@ -224,15 +249,23 @@ func Start(config *Config) error {
}()
}
sig := make(chan os.Signal, 1)
signal.Notify(sig, syscall.SIGINT, syscall.SIGTERM)
recvSig := <-sig
log.Info("caught signal, shutting down", "signal", recvSig)
srv.Shutdown()
if err := lim.FlushBackendWSConns(backendNames); err != nil {
log.Error("error flushing backend ws conns", "err", err)
}
return nil
<-errTimer.C
log.Info("started proxyd")
return func() {
log.Info("shutting down proxyd")
if blockNumLVC != nil {
blockNumLVC.Stop()
}
if gasPriceLVC != nil {
gasPriceLVC.Stop()
}
srv.Shutdown()
if err := lim.FlushBackendWSConns(backendNames); err != nil {
log.Error("error flushing backend ws conns", "err", err)
}
log.Info("goodbye")
}, nil
}
func secondsToDuration(seconds int) time.Duration {
......@@ -259,3 +292,39 @@ func configureBackendTLS(cfg *BackendConfig) (*tls.Config, error) {
return tlsConfig, nil
}
func makeUint64LastValueFn(client *ethclient.Client, cache Cache, key string, updater lvcUpdateFn) (*EthLastValueCache, func(context.Context) (uint64, error)) {
lvc := newLVC(client, cache, key, updater)
lvc.Start()
return lvc, func(ctx context.Context) (uint64, error) {
value, err := lvc.Read(ctx)
if err != nil {
return 0, err
}
if value == "" {
return 0, fmt.Errorf("%s is unavailable", key)
}
valueUint, err := strconv.ParseUint(value, 10, 64)
if err != nil {
return 0, err
}
return valueUint, nil
}
}
func makeGetLatestBlockNumFn(client *ethclient.Client, cache Cache) (*EthLastValueCache, GetLatestBlockNumFn) {
return makeUint64LastValueFn(client, cache, "lvc:block_number", func(ctx context.Context, c *ethclient.Client) (string, error) {
blockNum, err := c.BlockNumber(ctx)
return strconv.FormatUint(blockNum, 10), err
})
}
func makeGetLatestGasPriceFn(client *ethclient.Client, cache Cache) (*EthLastValueCache, GetLatestGasPriceFn) {
return makeUint64LastValueFn(client, cache, "lvc:gas_price", func(ctx context.Context, c *ethclient.Client) (string, error) {
gasPrice, err := c.SuggestGasPrice(ctx)
if err != nil {
return "", err
}
return gasPrice.String(), nil
})
}
......@@ -15,16 +15,46 @@ type RPCReq struct {
}
type RPCRes struct {
JSONRPC string
Result interface{}
Error *RPCErr
ID json.RawMessage
}
type rpcResJSON struct {
JSONRPC string `json:"jsonrpc"`
Result interface{} `json:"result,omitempty"`
Error *RPCErr `json:"error,omitempty"`
ID json.RawMessage `json:"id"`
}
type nullResultRPCRes struct {
JSONRPC string `json:"jsonrpc"`
Result interface{} `json:"result"`
ID json.RawMessage `json:"id"`
}
func (r *RPCRes) IsError() bool {
return r.Error != nil
}
func (r *RPCRes) MarshalJSON() ([]byte, error) {
if r.Result == nil && r.Error == nil {
return json.Marshal(&nullResultRPCRes{
JSONRPC: r.JSONRPC,
Result: nil,
ID: r.ID,
})
}
return json.Marshal(&rpcResJSON{
JSONRPC: r.JSONRPC,
Result: r.Result,
Error: r.Error,
ID: r.ID,
})
}
type RPCErr struct {
Code int `json:"code"`
Message string `json:"message"`
......@@ -46,30 +76,22 @@ func IsValidID(id json.RawMessage) bool {
return len(id) > 0 && id[0] != '{' && id[0] != '['
}
func ParseRPCReq(r io.Reader) (*RPCReq, error) {
body, err := ioutil.ReadAll(r)
if err != nil {
return nil, wrapErr(err, "error reading request body")
}
func ParseRPCReq(body []byte) (*RPCReq, error) {
req := new(RPCReq)
if err := json.Unmarshal(body, req); err != nil {
return nil, ErrParseErr
}
if req.JSONRPC != JSONRPCVersion {
return nil, ErrInvalidRequest("invalid JSON-RPC version")
}
if req.Method == "" {
return nil, ErrInvalidRequest("no method specified")
}
return req, nil
}
if !IsValidID(req.ID) {
return nil, ErrInvalidRequest("invalid ID")
func ParseBatchRPCReq(body []byte) ([]json.RawMessage, error) {
batch := make([]json.RawMessage, 0)
if err := json.Unmarshal(body, &batch); err != nil {
return nil, err
}
return req, nil
return batch, nil
}
func ParseRPCRes(r io.Reader) (*RPCRes, error) {
......@@ -86,6 +108,22 @@ func ParseRPCRes(r io.Reader) (*RPCRes, error) {
return res, nil
}
func ValidateRPCReq(req *RPCReq) error {
if req.JSONRPC != JSONRPCVersion {
return ErrInvalidRequest("invalid JSON-RPC version")
}
if req.Method == "" {
return ErrInvalidRequest("no method specified")
}
if !IsValidID(req.ID) {
return ErrInvalidRequest("invalid ID")
}
return nil
}
func NewRPCErrorRes(id json.RawMessage, err error) *RPCRes {
var rpcErr *RPCErr
if rr, ok := err.(*RPCErr); ok {
......@@ -103,3 +141,14 @@ func NewRPCErrorRes(id json.RawMessage, err error) *RPCRes {
ID: id,
}
}
func IsBatch(raw []byte) bool {
for _, c := range raw {
// skip insignificant whitespace (http://www.ietf.org/rfc/rfc4627.txt)
if c == 0x20 || c == 0x09 || c == 0x0a || c == 0x0d {
continue
}
return c == '['
}
return false
}
package proxyd
import (
"encoding/json"
"github.com/stretchr/testify/require"
"testing"
)
func TestRPCResJSON(t *testing.T) {
tests := []struct {
name string
in *RPCRes
out string
}{
{
"string result",
&RPCRes{
JSONRPC: JSONRPCVersion,
Result: "foobar",
ID: []byte("123"),
},
`{"jsonrpc":"2.0","result":"foobar","id":123}`,
},
{
"object result",
&RPCRes{
JSONRPC: JSONRPCVersion,
Result: struct {
Str string `json:"str"`
}{
"test",
},
ID: []byte("123"),
},
`{"jsonrpc":"2.0","result":{"str":"test"},"id":123}`,
},
{
"nil result",
&RPCRes{
JSONRPC: JSONRPCVersion,
Result: nil,
ID: []byte("123"),
},
`{"jsonrpc":"2.0","result":null,"id":123}`,
},
{
"error result",
&RPCRes{
JSONRPC: JSONRPCVersion,
Error: &RPCErr{
Code: 1234,
Message: "test err",
},
ID: []byte("123"),
},
`{"jsonrpc":"2.0","error":{"code":1234,"message":"test err"},"id":123}`,
},
{
"string ID",
&RPCRes{
JSONRPC: JSONRPCVersion,
Result: "foobar",
ID: []byte("\"123\""),
},
`{"jsonrpc":"2.0","result":"foobar","id":"123"}`,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
out, err := json.Marshal(tt.in)
require.NoError(t, err)
require.Equal(t, tt.out, string(out))
})
}
}
......@@ -6,6 +6,8 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"math"
"net/http"
"strconv"
"strings"
......@@ -22,6 +24,7 @@ const (
ContextKeyAuth = "authorization"
ContextKeyReqID = "req_id"
ContextKeyXForwardedFor = "x_forwarded_for"
MaxBatchRPCCalls = 100
)
type Server struct {
......@@ -49,6 +52,11 @@ func NewServer(
if cache == nil {
cache = &NoopRPCCache{}
}
if maxBodySize == 0 {
maxBodySize = math.MaxInt64
}
return &Server{
backendGroups: backendGroups,
wsBackendGroup: wsBackendGroup,
......@@ -122,15 +130,66 @@ func (s *Server) HandleRPC(w http.ResponseWriter, r *http.Request) {
"user_agent", r.Header.Get("user-agent"),
)
bodyReader := &recordLenReader{Reader: io.LimitReader(r.Body, s.maxBodySize)}
req, err := ParseRPCReq(bodyReader)
body, err := ioutil.ReadAll(io.LimitReader(r.Body, s.maxBodySize))
if err != nil {
log.Info("rejected request with bad rpc request", "source", "rpc", "err", err)
RecordRPCError(ctx, BackendProxyd, MethodUnknown, err)
log.Error("error reading request body", "err", err)
writeRPCError(ctx, w, nil, ErrInternal)
return
}
RecordRequestPayloadSize(ctx, len(body))
if IsBatch(body) {
reqs, err := ParseBatchRPCReq(body)
if err != nil {
log.Error("error parsing batch RPC request", "err", err)
RecordRPCError(ctx, BackendProxyd, MethodUnknown, err)
writeRPCError(ctx, w, nil, ErrParseErr)
return
}
if len(reqs) > MaxBatchRPCCalls {
RecordRPCError(ctx, BackendProxyd, MethodUnknown, ErrTooManyBatchRequests)
writeRPCError(ctx, w, nil, ErrTooManyBatchRequests)
return
}
if len(reqs) == 0 {
writeRPCError(ctx, w, nil, ErrInvalidRequest("must specify at least one batch call"))
return
}
batchRes := make([]*RPCRes, len(reqs), len(reqs))
for i := 0; i < len(reqs); i++ {
req, err := ParseRPCReq(reqs[i])
if err != nil {
log.Info("error parsing RPC call", "source", "rpc", "err", err)
batchRes[i] = NewRPCErrorRes(nil, err)
continue
}
batchRes[i] = s.handleSingleRPC(ctx, req)
}
writeBatchRPCRes(ctx, w, batchRes)
return
}
req, err := ParseRPCReq(body)
if err != nil {
log.Info("error parsing RPC call", "source", "rpc", "err", err)
writeRPCError(ctx, w, nil, err)
return
}
RecordRequestPayloadSize(ctx, req.Method, bodyReader.Len)
backendRes := s.handleSingleRPC(ctx, req)
writeRPCRes(ctx, w, backendRes)
}
func (s *Server) handleSingleRPC(ctx context.Context, req *RPCReq) *RPCRes {
if err := ValidateRPCReq(req); err != nil {
RecordRPCError(ctx, BackendProxyd, MethodUnknown, err)
return NewRPCErrorRes(nil, err)
}
group := s.rpcMethodMappings[req.Method]
if group == "" {
......@@ -143,16 +202,11 @@ func (s *Server) HandleRPC(w http.ResponseWriter, r *http.Request) {
"method", req.Method,
)
RecordRPCError(ctx, BackendProxyd, MethodUnknown, ErrMethodNotWhitelisted)
writeRPCError(ctx, w, req.ID, ErrMethodNotWhitelisted)
return
return NewRPCErrorRes(req.ID, ErrMethodNotWhitelisted)
}
var backendRes *RPCRes
backendRes, err = s.cache.GetRPC(ctx, req)
if err == nil && backendRes != nil {
writeRPCRes(ctx, w, backendRes)
return
}
backendRes, err := s.cache.GetRPC(ctx, req)
if err != nil {
log.Warn(
"cache lookup error",
......@@ -160,6 +214,9 @@ func (s *Server) HandleRPC(w http.ResponseWriter, r *http.Request) {
"err", err,
)
}
if backendRes != nil {
return backendRes
}
backendRes, err = s.backendGroups[group].Forward(ctx, req)
if err != nil {
......@@ -169,8 +226,7 @@ func (s *Server) HandleRPC(w http.ResponseWriter, r *http.Request) {
"req_id", GetReqID(ctx),
"err", err,
)
writeRPCError(ctx, w, req.ID, err)
return
return NewRPCErrorRes(req.ID, err)
}
if backendRes.Error == nil {
......@@ -183,7 +239,7 @@ func (s *Server) HandleRPC(w http.ResponseWriter, r *http.Request) {
}
}
writeRPCRes(ctx, w, backendRes)
return backendRes
}
func (s *Server) HandleWS(w http.ResponseWriter, r *http.Request) {
......@@ -282,6 +338,7 @@ func writeRPCRes(ctx context.Context, w http.ResponseWriter, res *RPCRes) {
statusCode = res.Error.HTTPErrorCode
}
w.Header().Set("content-type", "application/json")
w.WriteHeader(statusCode)
ww := &recordLenWriter{Writer: w}
enc := json.NewEncoder(ww)
......@@ -294,6 +351,19 @@ func writeRPCRes(ctx context.Context, w http.ResponseWriter, res *RPCRes) {
RecordResponsePayloadSize(ctx, ww.Len)
}
func writeBatchRPCRes(ctx context.Context, w http.ResponseWriter, res []*RPCRes) {
w.Header().Set("content-type", "application/json")
w.WriteHeader(200)
ww := &recordLenWriter{Writer: w}
enc := json.NewEncoder(ww)
if err := enc.Encode(res); err != nil {
log.Error("error writing batch rpc response", "err", err)
RecordRPCError(ctx, BackendProxyd, MethodUnknown, err)
return
}
RecordResponsePayloadSize(ctx, ww.Len)
}
func instrumentedHdlr(h http.Handler) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
respTimer := prometheus.NewTimer(httpRequestDurationSumm)
......
......@@ -4,3 +4,14 @@ L1_URL=
L2_URL=
ADDRESS_MANAGER=
L2_CHAINID=
DTL_ENQUEUE_CONFIRMATIONS=
OVMCONTEXT_SPEC_NUM_TXS=1
# Can be set to true below if the withdrawal window is short enough
RUN_WITHDRAWAL_TESTS=false
RUN_DEBUG_TRACE_TESTS=false
RUN_REPLICA_TESTS=false
RUN_STRESS_TESTS=false
# Can be configured up or down as necessary
MOCHA_TIMEOUT=300000
# Set to true to make Mocha stop after the first failed test.
MOCHA_BAIL=false
\ No newline at end of file
import { utils, Wallet, BigNumber } from 'ethers'
import { expect } from 'chai'
import { setupActor, setupRun, actor, run } from './lib/convenience'
import { OptimismEnv } from '../test/shared/env'
import { Direction } from '../test/shared/watcher-utils'
import { expect } from 'chai'
interface BenchContext {
l1Wallet: Wallet
......
import { performance } from 'perf_hooks'
import { Mutex } from 'async-mutex'
import { sleep } from '../../test/shared/utils'
import {
sanitizeForMetrics,
benchDurationsSummary,
......@@ -9,7 +11,7 @@ import {
failedBenchRunsTotal,
} from './metrics'
import { ActorLogger, WorkerLogger } from './logger'
import { performance } from 'perf_hooks'
import { sleep } from '../../test/shared/utils'
// eslint-disable-next-line @typescript-eslint/no-empty-function
const asyncNoop = async () => {}
......
import fs from 'fs'
import client from 'prom-client'
import http from 'http'
import url from 'url'
import client from 'prom-client'
export const metricsRegistry = new client.Registry()
const metricName = (name: string) => {
......
import * as path from 'path'
import { Command } from 'commander'
import { defaultRuntime } from './convenience'
import { RunOpts } from './actor'
import { Command } from 'commander'
import pkg from '../../package.json'
import { serveMetrics } from './metrics'
import pkg from '../../package.json'
const program = new Command()
program.version(pkg.version)
......
import { utils, Wallet, Contract } from 'ethers'
import { expect } from 'chai'
import { actor, run, setupActor, setupRun } from './lib/convenience'
import { OptimismEnv } from '../test/shared/env'
import ERC721 from '../artifacts/contracts/NFT.sol/NFT.json'
import { expect } from 'chai'
interface Context {
wallet: Wallet
......
import { utils, Wallet, BigNumber } from 'ethers'
import { expect } from 'chai'
import { actor, setupRun, setupActor, run } from './lib/convenience'
import { OptimismEnv } from '../test/shared/env'
......
import { utils, Wallet, Contract, ContractFactory } from 'ethers'
import { utils, Wallet, Contract } from 'ethers'
import { ethers } from 'hardhat'
import { expect } from 'chai'
import { actor, setupActor, run, setupRun } from './lib/convenience'
import { OptimismEnv } from '../test/shared/env'
import StateDOS from '../artifacts/contracts/StateDOS.sol/StateDOS.json'
import { expect } from 'chai'
interface Context {
wallet: Wallet
......@@ -16,11 +17,7 @@ actor('Trie DoS accounts', () => {
setupActor(async () => {
env = await OptimismEnv.new()
const factory = new ContractFactory(
StateDOS.abi,
StateDOS.bytecode,
env.l2Wallet
)
const factory = await ethers.getContractFactory('StateDOS', env.l2Wallet)
contract = await factory.deploy()
await contract.deployed()
})
......
import { Contract, utils, Wallet } from 'ethers'
import { actor, run, setupActor, setupRun } from './lib/convenience'
import { OptimismEnv } from '../test/shared/env'
import { FeeAmount } from '@uniswap/v3-sdk'
import ERC20 from '../artifacts/contracts/ERC20.sol/ERC20.json'
import { abi as NFTABI } from '@uniswap/v3-periphery/artifacts/contracts/NonfungiblePositionManager.sol/NonfungiblePositionManager.json'
import { abi as RouterABI } from '@uniswap/v3-periphery/artifacts/contracts/SwapRouter.sol/SwapRouter.json'
import { actor, run, setupActor, setupRun } from './lib/convenience'
import { OptimismEnv } from '../test/shared/env'
import ERC20 from '../artifacts/contracts/ERC20.sol/ERC20.json'
interface Context {
contracts: { [name: string]: Contract }
wallet: Wallet
......
// SPDX-License-Identifier: MIT
pragma solidity >=0.5.0;
pragma experimental ABIEncoderV2;
// https://github.com/makerdao/multicall/blob/master/src/Multicall.sol
/// @title Multicall - Aggregate results from multiple read-only function calls
/// @author Michael Elliot <mike@makerdao.com>
/// @author Joshua Levine <joshua@makerdao.com>
/// @author Nick Johnson <arachnid@notdot.net>
contract Multicall {
struct Call {
address target;
bytes callData;
}
function aggregate(Call[] memory calls) public returns (uint256 blockNumber, bytes[] memory returnData) {
blockNumber = block.number;
returnData = new bytes[](calls.length);
for (uint256 i = 0; i < calls.length; i++) {
(bool success, bytes memory ret) = calls[i].target.call(calls[i].callData);
require(success);
returnData[i] = ret;
}
}
// Helper functions
function getEthBalance(address addr) public view returns (uint256 balance) {
balance = addr.balance;
}
function getBlockHash(uint256 blockNumber) public view returns (bytes32 blockHash) {
blockHash = blockhash(blockNumber);
}
function getLastBlockHash() public view returns (bytes32 blockHash) {
blockHash = blockhash(block.number - 1);
}
function getCurrentBlockTimestamp() public view returns (uint256 timestamp) {
timestamp = block.timestamp;
}
function getCurrentBlockDifficulty() public view returns (uint256 difficulty) {
difficulty = block.difficulty;
}
function getCurrentBlockGasLimit() public view returns (uint256 gaslimit) {
gaslimit = block.gaslimit;
}
function getCurrentBlockCoinbase() public view returns (address coinbase) {
coinbase = block.coinbase;
}
}
......@@ -22,26 +22,13 @@ pragma solidity ^0.8.9;
// Can't do this until the package is published.
//import { iOVM_L1BlockNumber } from "@eth-optimism/contracts/iOVM_L1BlockNumber";
import { iOVM_L1BlockNumber } from "./OVMContextStorage.sol";
/// @title OVMMulticall - Aggregate results from multiple read-only function calls
contract OVMMulticall {
struct Call {
address target;
bytes callData;
}
function aggregate(Call[] memory calls) public returns (uint256 blockNumber, bytes[] memory returnData) {
blockNumber = block.number;
returnData = new bytes[](calls.length);
for (uint256 i = 0; i < calls.length; i++) {
(bool success, bytes memory ret) = calls[i].target.call(calls[i].callData);
require(success);
returnData[i] = ret;
}
}
interface iOVM_L1BlockNumber {
function getL1BlockNumber() external view returns (uint256);
}
// Helper functions
/// @title OVMContext - Helper Functions
contract OVMContext {
function getCurrentBlockTimestamp() public view returns (uint256 timestamp) {
timestamp = block.timestamp;
}
......
// SPDX-License-Identifier: MIT
pragma solidity ^0.8.9;
// Can't do this until the package is published.
//import { iOVM_L1BlockNumber } from "@eth-optimism/contracts/iOVM_L1BlockNumber";
interface iOVM_L1BlockNumber {
function getL1BlockNumber() external view returns (uint256);
}
import {OVMContext} from "./OVMContext.sol";
contract OVMContextStorage {
mapping (uint256 => uint256) public l1BlockNumbers;
mapping (uint256 => uint256) public blockNumbers;
mapping (uint256 => uint256) public timestamps;
mapping (uint256 => uint256) public difficulty;
mapping (uint256 => address) public coinbases;
contract OVMContextStorage is OVMContext {
mapping(uint256 => uint256) public l1BlockNumbers;
mapping(uint256 => uint256) public blockNumbers;
mapping(uint256 => uint256) public timestamps;
mapping(uint256 => uint256) public difficulty;
mapping(uint256 => address) public coinbases;
uint256 public index = 0;
fallback() external {
l1BlockNumbers[index] = iOVM_L1BlockNumber(
0x4200000000000000000000000000000000000013
).getL1BlockNumber();
blockNumbers[index] = block.number;
timestamps[index] = block.timestamp;
l1BlockNumbers[index] = getCurrentL1BlockNumber();
blockNumbers[index] = getCurrentBlockNumber();
timestamps[index] = getCurrentBlockTimestamp();
difficulty[index] = block.difficulty;
coinbases[index] = block.coinbase;
index++;
......
......@@ -4,7 +4,7 @@ import { HardhatUserConfig } from 'hardhat/types'
import '@nomiclabs/hardhat-ethers'
import '@nomiclabs/hardhat-waffle'
import 'hardhat-gas-reporter'
import { isLiveNetwork } from './test/shared/utils'
import { envConfig } from './test/shared/utils'
const enableGasReport = !!process.env.ENABLE_GAS_REPORT
......@@ -15,7 +15,8 @@ const config: HardhatUserConfig = {
},
},
mocha: {
timeout: isLiveNetwork() ? 300_000 : 75_000,
timeout: envConfig.MOCHA_TIMEOUT,
bail: envConfig.MOCHA_BAIL,
},
solidity: {
compilers: [
......
......@@ -41,7 +41,6 @@
"@types/chai-as-promised": "^7.1.4",
"@types/mocha": "^8.2.2",
"@types/rimraf": "^3.0.0",
"@types/shelljs": "^0.8.8",
"@typescript-eslint/eslint-plugin": "^4.26.0",
"@typescript-eslint/parser": "^4.26.0",
"@uniswap/v3-core": "1.0.0",
......@@ -52,7 +51,6 @@
"chai": "^4.3.4",
"chai-as-promised": "^7.1.1",
"commander": "^8.3.0",
"docker-compose": "^0.23.8",
"dotenv": "^10.0.0",
"envalid": "^7.1.0",
"eslint": "^7.27.0",
......@@ -71,7 +69,6 @@
"mocha": "^8.4.0",
"prom-client": "^14.0.1",
"rimraf": "^3.0.2",
"shelljs": "^0.8.4",
"typescript": "^4.3.5",
"uniswap-v3-deploy-plugin": "^0.1.0"
}
......
import { expect } from './shared/setup'
/* Imports: External */
import { Contract, ContractFactory } from 'ethers'
import { ethers } from 'hardhat'
import { applyL1ToL2Alias, awaitCondition } from '@eth-optimism/core-utils'
/* Imports: Internal */
import simpleStorageJson from '../artifacts/contracts/SimpleStorage.sol/SimpleStorage.json'
import l2ReverterJson from '../artifacts/contracts/Reverter.sol/Reverter.json'
import { expect } from './shared/setup'
import { Direction } from './shared/watcher-utils'
import { OptimismEnv } from './shared/env'
import { isMainnet } from './shared/utils'
import {
DEFAULT_TEST_GAS_L1,
DEFAULT_TEST_GAS_L2,
envConfig,
sleep,
withdrawalTest,
} from './shared/utils'
describe('Basic L1<>L2 Communication', async () => {
let Factory__L1SimpleStorage: ContractFactory
......@@ -22,61 +26,58 @@ describe('Basic L1<>L2 Communication', async () => {
before(async () => {
env = await OptimismEnv.new()
Factory__L1SimpleStorage = new ContractFactory(
simpleStorageJson.abi,
simpleStorageJson.bytecode,
Factory__L1SimpleStorage = await ethers.getContractFactory(
'SimpleStorage',
env.l1Wallet
)
Factory__L2SimpleStorage = new ContractFactory(
simpleStorageJson.abi,
simpleStorageJson.bytecode,
Factory__L2SimpleStorage = await ethers.getContractFactory(
'SimpleStorage',
env.l2Wallet
)
Factory__L2Reverter = new ContractFactory(
l2ReverterJson.abi,
l2ReverterJson.bytecode,
Factory__L2Reverter = await ethers.getContractFactory(
'Reverter',
env.l2Wallet
)
})
beforeEach(async () => {
L1SimpleStorage = await Factory__L1SimpleStorage.deploy()
await L1SimpleStorage.deployTransaction.wait()
await L1SimpleStorage.deployed()
L2SimpleStorage = await Factory__L2SimpleStorage.deploy()
await L2SimpleStorage.deployTransaction.wait()
await L2SimpleStorage.deployed()
L2Reverter = await Factory__L2Reverter.deploy()
await L2Reverter.deployTransaction.wait()
await L2Reverter.deployed()
})
describe('L2 => L1', () => {
it('should be able to perform a withdrawal from L2 -> L1', async function () {
if (await isMainnet(env)) {
console.log('Skipping withdrawals test on mainnet.')
this.skip()
return
}
const value = `0x${'77'.repeat(32)}`
// Send L2 -> L1 message.
const transaction = await env.l2Messenger.sendMessage(
L1SimpleStorage.address,
L1SimpleStorage.interface.encodeFunctionData('setValue', [value]),
5000000
)
await transaction.wait()
await env.relayXDomainMessages(transaction)
await env.waitForXDomainTransaction(transaction, Direction.L2ToL1)
withdrawalTest(
'should be able to perform a withdrawal from L2 -> L1',
async () => {
const value = `0x${'77'.repeat(32)}`
// Send L2 -> L1 message.
const transaction = await env.l2Messenger.sendMessage(
L1SimpleStorage.address,
L1SimpleStorage.interface.encodeFunctionData('setValue', [value]),
5000000,
{
gasLimit: DEFAULT_TEST_GAS_L2,
}
)
await transaction.wait()
await env.relayXDomainMessages(transaction)
await env.waitForXDomainTransaction(transaction, Direction.L2ToL1)
expect(await L1SimpleStorage.msgSender()).to.equal(
env.l1Messenger.address
)
expect(await L1SimpleStorage.xDomainSender()).to.equal(
env.l2Wallet.address
)
expect(await L1SimpleStorage.value()).to.equal(value)
expect((await L1SimpleStorage.totalCount()).toNumber()).to.equal(1)
})
expect(await L1SimpleStorage.msgSender()).to.equal(
env.l1Messenger.address
)
expect(await L1SimpleStorage.xDomainSender()).to.equal(
env.l2Wallet.address
)
expect(await L1SimpleStorage.value()).to.equal(value)
expect((await L1SimpleStorage.totalCount()).toNumber()).to.equal(1)
}
)
})
describe('L1 => L2', () => {
......@@ -87,7 +88,10 @@ describe('Basic L1<>L2 Communication', async () => {
const transaction = await env.l1Messenger.sendMessage(
L2SimpleStorage.address,
L2SimpleStorage.interface.encodeFunctionData('setValue', [value]),
5000000
5000000,
{
gasLimit: DEFAULT_TEST_GAS_L1,
}
)
await env.waitForXDomainTransaction(transaction, Direction.L1ToL2)
......@@ -105,19 +109,41 @@ describe('Basic L1<>L2 Communication', async () => {
expect((await L2SimpleStorage.totalCount()).toNumber()).to.equal(1)
})
it('should deposit from L1 -> L2 directly via enqueue', async () => {
it('should deposit from L1 -> L2 directly via enqueue', async function () {
this.timeout(
envConfig.MOCHA_TIMEOUT * 2 +
envConfig.DTL_ENQUEUE_CONFIRMATIONS * 15000
)
const value = `0x${'42'.repeat(32)}`
// Send L1 -> L2 message.
await env.ctc
const tx = await env.ctc
.connect(env.l1Wallet)
.enqueue(
L2SimpleStorage.address,
5000000,
L2SimpleStorage.interface.encodeFunctionData('setValueNotXDomain', [
value,
])
]),
{
gasLimit: DEFAULT_TEST_GAS_L1,
}
)
const receipt = await tx.wait()
const waitUntilBlock =
receipt.blockNumber + envConfig.DTL_ENQUEUE_CONFIRMATIONS
let currBlock = await env.l1Provider.getBlockNumber()
while (currBlock <= waitUntilBlock) {
const progress =
envConfig.DTL_ENQUEUE_CONFIRMATIONS - (waitUntilBlock - currBlock)
console.log(
`Waiting for ${progress}/${envConfig.DTL_ENQUEUE_CONFIRMATIONS} confirmations.`
)
await sleep(5000)
currBlock = await env.l1Provider.getBlockNumber()
}
console.log('Enqueue should be confirmed.')
await awaitCondition(
async () => {
......@@ -142,8 +168,12 @@ describe('Basic L1<>L2 Communication', async () => {
const transaction = await env.l1Messenger.sendMessage(
L2SimpleStorage.address,
L2SimpleStorage.interface.encodeFunctionData('setValue', [value]),
5000000
5000000,
{
gasLimit: DEFAULT_TEST_GAS_L1,
}
)
await transaction.wait()
const { remoteReceipt } = await env.waitForXDomainTransaction(
transaction,
......@@ -159,7 +189,10 @@ describe('Basic L1<>L2 Communication', async () => {
const transaction = await env.l1Messenger.sendMessage(
L2Reverter.address,
L2Reverter.interface.encodeFunctionData('doRevert', []),
5000000
5000000,
{
gasLimit: DEFAULT_TEST_GAS_L1,
}
)
const { remoteReceipt } = await env.waitForXDomainTransaction(
......
import { expect } from './shared/setup'
import { BigNumber, Contract, ContractFactory, utils, Wallet } from 'ethers'
import { ethers } from 'hardhat'
import * as L2Artifact from '@eth-optimism/contracts/artifacts/contracts/standards/L2StandardERC20.sol/L2StandardERC20.json'
import { expect } from './shared/setup'
import { OptimismEnv } from './shared/env'
import { isLiveNetwork, isMainnet } from './shared/utils'
import { withdrawalTest } from './shared/utils'
import { Direction } from './shared/watcher-utils'
describe('Bridged tokens', () => {
......@@ -25,14 +24,16 @@ describe('Bridged tokens', () => {
const other = Wallet.createRandom()
otherWalletL1 = other.connect(env.l1Wallet.provider)
otherWalletL2 = other.connect(env.l2Wallet.provider)
await env.l1Wallet.sendTransaction({
let tx = await env.l1Wallet.sendTransaction({
to: otherWalletL1.address,
value: utils.parseEther('0.01'),
})
await env.l2Wallet.sendTransaction({
await tx.wait()
tx = await env.l2Wallet.sendTransaction({
to: otherWalletL2.address,
value: utils.parseEther('0.01'),
})
await tx.wait()
L1Factory__ERC20 = await ethers.getContractFactory('ERC20', env.l1Wallet)
L2Factory__ERC20 = new ethers.ContractFactory(
......@@ -77,7 +78,7 @@ describe('Bridged tokens', () => {
expect(await L2__ERC20.balanceOf(env.l2Wallet.address)).to.deep.equal(
BigNumber.from(1000)
)
}).timeout(isLiveNetwork() ? 300_000 : 120_000)
})
it('should transfer tokens on L2', async () => {
const tx = await L2__ERC20.transfer(otherWalletL1.address, 500)
......@@ -90,46 +91,40 @@ describe('Bridged tokens', () => {
)
})
it('should withdraw tokens from L2 to the depositor', async function () {
if (await isMainnet(env)) {
console.log('Skipping withdrawals test on mainnet.')
this.skip()
return
withdrawalTest(
'should withdraw tokens from L2 to the depositor',
async () => {
const tx = await env.l2Bridge.withdraw(
L2__ERC20.address,
500,
2000000,
'0x'
)
await env.relayXDomainMessages(tx)
await env.waitForXDomainTransaction(tx, Direction.L2ToL1)
expect(await L1__ERC20.balanceOf(env.l1Wallet.address)).to.deep.equal(
BigNumber.from(999500)
)
expect(await L2__ERC20.balanceOf(env.l2Wallet.address)).to.deep.equal(
BigNumber.from(0)
)
}
const tx = await env.l2Bridge.withdraw(
L2__ERC20.address,
500,
2000000,
'0x'
)
await env.relayXDomainMessages(tx)
await env.waitForXDomainTransaction(tx, Direction.L2ToL1)
expect(await L1__ERC20.balanceOf(env.l1Wallet.address)).to.deep.equal(
BigNumber.from(999500)
)
expect(await L2__ERC20.balanceOf(env.l2Wallet.address)).to.deep.equal(
BigNumber.from(0)
)
}).timeout(isLiveNetwork() ? 300_000 : 120_000)
it('should withdraw tokens from L2 to the transfer recipient', async function () {
if (await isMainnet(env)) {
console.log('Skipping withdrawals test on mainnet.')
this.skip()
return
)
withdrawalTest(
'should withdraw tokens from L2 to the transfer recipient',
async () => {
const tx = await env.l2Bridge
.connect(otherWalletL2)
.withdraw(L2__ERC20.address, 500, 2000000, '0x')
await env.relayXDomainMessages(tx)
await env.waitForXDomainTransaction(tx, Direction.L2ToL1)
expect(await L1__ERC20.balanceOf(otherWalletL1.address)).to.deep.equal(
BigNumber.from(500)
)
expect(await L2__ERC20.balanceOf(otherWalletL2.address)).to.deep.equal(
BigNumber.from(0)
)
}
const tx = await env.l2Bridge
.connect(otherWalletL2)
.withdraw(L2__ERC20.address, 500, 2000000, '0x')
await env.relayXDomainMessages(tx)
await env.waitForXDomainTransaction(tx, Direction.L2ToL1)
expect(await L1__ERC20.balanceOf(otherWalletL1.address)).to.deep.equal(
BigNumber.from(500)
)
expect(await L2__ERC20.balanceOf(otherWalletL2.address)).to.deep.equal(
BigNumber.from(0)
)
}).timeout(isLiveNetwork() ? 300_000 : 120_000)
)
})
import { expect } from './shared/setup'
import { BigNumber, Contract, ContractFactory, utils, Wallet } from 'ethers'
import { ethers } from 'hardhat'
import { UniswapV3Deployer } from 'uniswap-v3-deploy-plugin/dist/deployer/UniswapV3Deployer'
import { OptimismEnv } from './shared/env'
import { FeeAmount, TICK_SPACINGS } from '@uniswap/v3-sdk'
import { abi as NFTABI } from '@uniswap/v3-periphery/artifacts/contracts/NonfungiblePositionManager.sol/NonfungiblePositionManager.json'
import { abi as RouterABI } from '@uniswap/v3-periphery/artifacts/contracts/SwapRouter.sol/SwapRouter.json'
import { OptimismEnv } from './shared/env'
import { expect } from './shared/setup'
// Below methods taken from the Uniswap test suite, see
// https://github.com/Uniswap/v3-periphery/blob/main/test/shared/ticks.ts
const getMinTick = (tickSpacing: number) =>
......
import { expect } from './shared/setup'
/* Imports: External */
import { BigNumber, utils } from 'ethers'
import { serialize } from '@ethersproject/transactions'
import { predeploys, getContractFactory } from '@eth-optimism/contracts'
/* Imports: Internal */
import { isLiveNetwork } from './shared/utils'
import { expect } from './shared/setup'
import { hardhatTest } from './shared/utils'
import { OptimismEnv } from './shared/env'
import { Direction } from './shared/watcher-utils'
const setPrices = async (env: OptimismEnv, value: number | BigNumber) => {
if (isLiveNetwork()) {
return
}
const gasPrice = await env.gasPriceOracle.setGasPrice(value)
await gasPrice.wait()
const baseFee = await env.gasPriceOracle.setL1BaseFee(value)
......@@ -28,24 +24,25 @@ describe('Fee Payment Integration Tests', async () => {
env = await OptimismEnv.new()
})
if (!isLiveNetwork()) {
it(`should return eth_gasPrice equal to OVM_GasPriceOracle.gasPrice`, async () => {
hardhatTest(
`should return eth_gasPrice equal to OVM_GasPriceOracle.gasPrice`,
async () => {
const assertGasPrice = async () => {
const gasPrice = await env.l2Wallet.getGasPrice()
const oracleGasPrice = await env.gasPriceOracle.gasPrice()
expect(gasPrice).to.deep.equal(oracleGasPrice)
}
assertGasPrice()
await assertGasPrice()
// update the gas price
const tx = await env.gasPriceOracle.setGasPrice(1000)
await tx.wait()
assertGasPrice()
})
}
await assertGasPrice()
}
)
it('Paying a nonzero but acceptable gasPrice fee', async () => {
hardhatTest('Paying a nonzero but acceptable gasPrice fee', async () => {
await setPrices(env, 1000)
const amount = utils.parseEther('0.0000001')
......@@ -97,7 +94,7 @@ describe('Fee Payment Integration Tests', async () => {
await setPrices(env, 1)
})
it('should compute correct fee', async () => {
hardhatTest('should compute correct fee', async () => {
await setPrices(env, 1000)
const preBalance = await env.l2Wallet.getBalance()
......@@ -149,39 +146,38 @@ describe('Fee Payment Integration Tests', async () => {
await expect(env.sequencerFeeVault.withdraw()).to.be.rejected
})
it('should be able to withdraw fees back to L1 once the minimum is met', async function () {
if (isLiveNetwork()) {
this.skip()
return
}
const l1FeeWallet = await env.sequencerFeeVault.l1FeeWallet()
const balanceBefore = await env.l1Wallet.provider.getBalance(l1FeeWallet)
const withdrawalAmount = await env.sequencerFeeVault.MIN_WITHDRAWAL_AMOUNT()
// Transfer the minimum required to withdraw.
const tx = await env.l2Wallet.sendTransaction({
to: env.sequencerFeeVault.address,
value: withdrawalAmount,
gasLimit: 500000,
})
await tx.wait()
hardhatTest(
'should be able to withdraw fees back to L1 once the minimum is met',
async () => {
const l1FeeWallet = await env.sequencerFeeVault.l1FeeWallet()
const balanceBefore = await env.l1Wallet.provider.getBalance(l1FeeWallet)
const withdrawalAmount =
await env.sequencerFeeVault.MIN_WITHDRAWAL_AMOUNT()
// Transfer the minimum required to withdraw.
const tx = await env.l2Wallet.sendTransaction({
to: env.sequencerFeeVault.address,
value: withdrawalAmount,
gasLimit: 500000,
})
await tx.wait()
const vaultBalance = await env.ovmEth.balanceOf(
env.sequencerFeeVault.address
)
const vaultBalance = await env.ovmEth.balanceOf(
env.sequencerFeeVault.address
)
const withdrawTx = await env.sequencerFeeVault.withdraw()
const withdrawTx = await env.sequencerFeeVault.withdraw()
// Wait for the withdrawal to be relayed to L1.
await withdrawTx.wait()
await env.relayXDomainMessages(withdrawTx)
await env.waitForXDomainTransaction(withdrawTx, Direction.L2ToL1)
// Wait for the withdrawal to be relayed to L1.
await withdrawTx.wait()
await env.relayXDomainMessages(withdrawTx)
await env.waitForXDomainTransaction(withdrawTx, Direction.L2ToL1)
// Balance difference should be equal to old L2 balance.
const balanceAfter = await env.l1Wallet.provider.getBalance(l1FeeWallet)
expect(balanceAfter.sub(balanceBefore)).to.deep.equal(
BigNumber.from(vaultBalance)
)
})
// Balance difference should be equal to old L2 balance.
const balanceAfter = await env.l1Wallet.provider.getBalance(l1FeeWallet)
expect(balanceAfter.sub(balanceBefore)).to.deep.equal(
BigNumber.from(vaultBalance)
)
}
)
})
import { expect } from './shared/setup'
import { BigNumber, Contract, ContractFactory, Wallet } from 'ethers'
import { ethers } from 'hardhat'
import { expect } from './shared/setup'
import {
fundUser,
encodeSolidityRevertMessage,
......@@ -45,7 +45,7 @@ describe('Native ETH value integration tests', () => {
const there = await wallet.sendTransaction({
to: other.address,
value,
gasPrice: await gasPriceForL2(env),
gasPrice: await gasPriceForL2(),
})
const thereReceipt = await there.wait()
const thereGas = thereReceipt.gasUsed.mul(there.gasPrice)
......@@ -63,7 +63,7 @@ describe('Native ETH value integration tests', () => {
const backAgain = await other.sendTransaction({
to: wallet.address,
value: backVal,
gasPrice: await gasPriceForL2(env),
gasPrice: await gasPriceForL2(),
})
const backReceipt = await backAgain.wait()
const backGas = backReceipt.gasUsed.mul(backAgain.gasPrice)
......@@ -169,7 +169,7 @@ describe('Native ETH value integration tests', () => {
it('should allow ETH to be sent', async () => {
const sendAmount = 15
const tx = await ValueCalls0.simpleSend(ValueCalls1.address, sendAmount, {
gasPrice: await gasPriceForL2(env),
gasPrice: await gasPriceForL2(),
})
await tx.wait()
......
import { expect } from './shared/setup'
/* Imports: External */
import { Wallet, utils, BigNumber } from 'ethers'
import { serialize } from '@ethersproject/transactions'
......@@ -7,13 +5,17 @@ import { predeploys } from '@eth-optimism/contracts'
import { expectApprox } from '@eth-optimism/core-utils'
/* Imports: Internal */
import { expect } from './shared/setup'
import { Direction } from './shared/watcher-utils'
import { isMainnet, PROXY_SEQUENCER_ENTRYPOINT_ADDRESS } from './shared/utils'
import {
DEFAULT_TEST_GAS_L1,
DEFAULT_TEST_GAS_L2,
envConfig,
PROXY_SEQUENCER_ENTRYPOINT_ADDRESS,
withdrawalTest,
} from './shared/utils'
import { OptimismEnv } from './shared/env'
const DEFAULT_TEST_GAS_L1 = 330_000
const DEFAULT_TEST_GAS_L2 = 1_300_000
// TX size enforced by CTC:
const MAX_ROLLUP_TX_SIZE = 50_000
......@@ -183,13 +185,7 @@ describe('Native ETH Integration Tests', async () => {
).to.be.reverted
})
it('withdraw', async function () {
if (await isMainnet(env)) {
console.log('Skipping withdrawals test on mainnet.')
this.skip()
return
}
withdrawalTest('withdraw', async () => {
const withdrawAmount = BigNumber.from(3)
const preBalances = await getBalances(env)
expect(
......@@ -231,13 +227,7 @@ describe('Native ETH Integration Tests', async () => {
)
})
it('withdrawTo', async function () {
if (await isMainnet(env)) {
console.log('Skipping withdrawals test on mainnet.')
this.skip()
return
}
withdrawalTest('withdrawTo', async () => {
const withdrawAmount = BigNumber.from(3)
const preBalances = await getBalances(env)
......@@ -295,73 +285,71 @@ describe('Native ETH Integration Tests', async () => {
)
})
it('deposit, transfer, withdraw', async function () {
if (await isMainnet(env)) {
console.log('Skipping withdrawals test on mainnet.')
this.skip()
return
}
// 1. deposit
const amount = utils.parseEther('1')
await env.waitForXDomainTransaction(
env.l1Bridge.depositETH(DEFAULT_TEST_GAS_L2, '0xFFFF', {
value: amount,
gasLimit: DEFAULT_TEST_GAS_L1,
}),
Direction.L1ToL2
)
// 2. transfer to another address
const other = Wallet.createRandom().connect(env.l2Wallet.provider)
const tx = await env.l2Wallet.sendTransaction({
to: other.address,
value: amount,
})
await tx.wait()
const l1BalanceBefore = await other
.connect(env.l1Wallet.provider)
.getBalance()
// 3. do withdrawal
const withdrawnAmount = utils.parseEther('0.95')
const transaction = await env.l2Bridge
.connect(other)
.withdraw(
predeploys.OVM_ETH,
withdrawnAmount,
DEFAULT_TEST_GAS_L1,
'0xFFFF'
withdrawalTest(
'deposit, transfer, withdraw',
async () => {
// 1. deposit
const amount = utils.parseEther('1')
await env.waitForXDomainTransaction(
env.l1Bridge.depositETH(DEFAULT_TEST_GAS_L2, '0xFFFF', {
value: amount,
gasLimit: DEFAULT_TEST_GAS_L1,
}),
Direction.L1ToL2
)
await transaction.wait()
await env.relayXDomainMessages(transaction)
const receipts = await env.waitForXDomainTransaction(
transaction,
Direction.L2ToL1
)
// Compute the L1 portion of the fee
const l1Fee = await env.gasPriceOracle.getL1Fee(
serialize({
nonce: transaction.nonce,
value: transaction.value,
gasPrice: transaction.gasPrice,
gasLimit: transaction.gasLimit,
to: transaction.to,
data: transaction.data,
// 2. transfer to another address
const other = Wallet.createRandom().connect(env.l2Wallet.provider)
const tx = await env.l2Wallet.sendTransaction({
to: other.address,
value: amount,
})
)
await tx.wait()
const l1BalanceBefore = await other
.connect(env.l1Wallet.provider)
.getBalance()
// 3. do withdrawal
const withdrawnAmount = utils.parseEther('0.95')
const transaction = await env.l2Bridge
.connect(other)
.withdraw(
predeploys.OVM_ETH,
withdrawnAmount,
DEFAULT_TEST_GAS_L1,
'0xFFFF'
)
await transaction.wait()
await env.relayXDomainMessages(transaction)
const receipts = await env.waitForXDomainTransaction(
transaction,
Direction.L2ToL1
)
// check that correct amount was withdrawn and that fee was charged
const l2Fee = receipts.tx.gasPrice.mul(receipts.receipt.gasUsed)
// Compute the L1 portion of the fee
const l1Fee = await env.gasPriceOracle.getL1Fee(
serialize({
nonce: transaction.nonce,
value: transaction.value,
gasPrice: transaction.gasPrice,
gasLimit: transaction.gasLimit,
to: transaction.to,
data: transaction.data,
})
)
const fee = l1Fee.add(l2Fee)
const l1BalanceAfter = await other
.connect(env.l1Wallet.provider)
.getBalance()
const l2BalanceAfter = await other.getBalance()
expect(l1BalanceAfter).to.deep.eq(l1BalanceBefore.add(withdrawnAmount))
expect(l2BalanceAfter).to.deep.eq(amount.sub(withdrawnAmount).sub(fee))
})
// check that correct amount was withdrawn and that fee was charged
const l2Fee = receipts.tx.gasPrice.mul(receipts.receipt.gasUsed)
const fee = l1Fee.add(l2Fee)
const l1BalanceAfter = await other
.connect(env.l1Wallet.provider)
.getBalance()
const l2BalanceAfter = await other.getBalance()
expect(l1BalanceAfter).to.deep.eq(l1BalanceBefore.add(withdrawnAmount))
expect(l2BalanceAfter).to.deep.eq(amount.sub(withdrawnAmount).sub(fee))
},
envConfig.MOCHA_TIMEOUT * 3
)
})
import { expect } from './shared/setup'
/* Imports: External */
import { ethers } from 'hardhat'
import { injectL2Context, expectApprox } from '@eth-optimism/core-utils'
......@@ -7,7 +5,13 @@ import { predeploys } from '@eth-optimism/contracts'
import { Contract, BigNumber } from 'ethers'
/* Imports: Internal */
import { l2Provider, l1Provider, IS_LIVE_NETWORK } from './shared/utils'
import { expect } from './shared/setup'
import {
l2Provider,
l1Provider,
envConfig,
DEFAULT_TEST_GAS_L1,
} from './shared/utils'
import { OptimismEnv } from './shared/env'
import { Direction } from './shared/watcher-utils'
......@@ -23,29 +27,25 @@ describe('OVM Context: Layer 2 EVM Context', () => {
env = await OptimismEnv.new()
})
let OVMMulticall: Contract
let Multicall: Contract
let OVMContextStorage: Contract
beforeEach(async () => {
const OVMContextStorageFactory = await ethers.getContractFactory(
'OVMContextStorage',
env.l2Wallet
)
const OVMMulticallFactory = await ethers.getContractFactory(
'OVMMulticall',
const MulticallFactory = await ethers.getContractFactory(
'Multicall',
env.l2Wallet
)
OVMContextStorage = await OVMContextStorageFactory.deploy()
await OVMContextStorage.deployTransaction.wait()
OVMMulticall = await OVMMulticallFactory.deploy()
await OVMMulticall.deployTransaction.wait()
Multicall = await MulticallFactory.deploy()
await Multicall.deployTransaction.wait()
})
let numTxs = 5
if (IS_LIVE_NETWORK) {
// Tests take way too long if we don't reduce the number of txs here.
numTxs = 1
}
const numTxs = envConfig.OVMCONTEXT_SPEC_NUM_TXS
it('enqueue: L1 contextual values are correctly set in L2', async () => {
for (let i = 0; i < numTxs; i++) {
......@@ -54,7 +54,10 @@ describe('OVM Context: Layer 2 EVM Context', () => {
const tx = await env.l1Messenger.sendMessage(
OVMContextStorage.address,
'0x',
2_000_000
2_000_000,
{
gasLimit: DEFAULT_TEST_GAS_L1,
}
)
// Wait for the transaction to be sent over to L2.
......@@ -89,7 +92,7 @@ describe('OVM Context: Layer 2 EVM Context', () => {
const coinbase = await OVMContextStorage.coinbases(i)
expect(coinbase).to.equal(predeploys.OVM_SequencerFeeVault)
}
}).timeout(150000) // this specific test takes a while because it involves L1 to L2 txs
})
it('should set correct OVM Context for `eth_call`', async () => {
for (let i = 0; i < numTxs; i++) {
......@@ -101,21 +104,23 @@ describe('OVM Context: Layer 2 EVM Context', () => {
await dummyTx.wait()
const block = await L2Provider.getBlockWithTransactions('latest')
const [, returnData] = await OVMMulticall.callStatic.aggregate(
const [, returnData] = await Multicall.callStatic.aggregate(
[
[
OVMMulticall.address,
OVMMulticall.interface.encodeFunctionData(
OVMContextStorage.address,
OVMContextStorage.interface.encodeFunctionData(
'getCurrentBlockTimestamp'
),
],
[
OVMMulticall.address,
OVMMulticall.interface.encodeFunctionData('getCurrentBlockNumber'),
OVMContextStorage.address,
OVMContextStorage.interface.encodeFunctionData(
'getCurrentBlockNumber'
),
],
[
OVMMulticall.address,
OVMMulticall.interface.encodeFunctionData(
OVMContextStorage.address,
OVMContextStorage.interface.encodeFunctionData(
'getCurrentL1BlockNumber'
),
],
......@@ -141,19 +146,23 @@ describe('OVM Context: Layer 2 EVM Context', () => {
*/
it('should return same timestamp and blocknumbers between `eth_call` and `rollup_getInfo`', async () => {
// As atomically as possible, call `rollup_getInfo` and OVMMulticall for the
// As atomically as possible, call `rollup_getInfo` and Multicall for the
// blocknumber and timestamp. If this is not atomic, then the sequencer can
// happend to update the timestamp between the `eth_call` and the `rollup_getInfo`
const [info, [, returnData]] = await Promise.all([
L2Provider.send('rollup_getInfo', []),
OVMMulticall.callStatic.aggregate([
Multicall.callStatic.aggregate([
[
OVMMulticall.address,
OVMMulticall.interface.encodeFunctionData('getCurrentBlockTimestamp'),
OVMContextStorage.address,
OVMContextStorage.interface.encodeFunctionData(
'getCurrentBlockTimestamp'
),
],
[
OVMMulticall.address,
OVMMulticall.interface.encodeFunctionData('getCurrentL1BlockNumber'),
OVMContextStorage.address,
OVMContextStorage.interface.encodeFunctionData(
'getCurrentL1BlockNumber'
),
],
]),
])
......
import { expect } from './shared/setup'
/* Imports: Internal */
import { ethers } from 'ethers'
import { predeploys, getContractInterface } from '@eth-optimism/contracts'
/* Imports: External */
import { expect } from './shared/setup'
import { OptimismEnv } from './shared/env'
describe('predeploys', () => {
......
import { expect } from './shared/setup'
/* Imports: Internal */
import { providers } from 'ethers'
import { injectL2Context, applyL1ToL2Alias } from '@eth-optimism/core-utils'
/* Imports: External */
import { expect } from './shared/setup'
import { OptimismEnv } from './shared/env'
import { Direction } from './shared/watcher-utils'
import { isLiveNetwork } from './shared/utils'
import { DEFAULT_TEST_GAS_L1, envConfig } from './shared/utils'
describe('Queue Ingestion', () => {
let env: OptimismEnv
......@@ -21,7 +20,7 @@ describe('Queue Ingestion', () => {
// that are in the queue and submit them. L2 will pick up the
// sequencer batch appended event and play the transactions.
it('should order transactions correctly', async () => {
const numTxs = 5
const numTxs = envConfig.OVMCONTEXT_SPEC_NUM_TXS
// Enqueue some transactions by building the calldata and then sending
// the transaction to Layer 1
......@@ -30,7 +29,10 @@ describe('Queue Ingestion', () => {
const tx = await env.l1Messenger.sendMessage(
`0x${`${i}`.repeat(40)}`,
`0x0${i}`,
1_000_000
1_000_000,
{
gasLimit: DEFAULT_TEST_GAS_L1,
}
)
await tx.wait()
txs.push(tx)
......@@ -62,5 +64,5 @@ describe('Queue Ingestion', () => {
)
expect(l2Tx.l1BlockNumber).to.equal(l1TxReceipt.blockNumber)
}
}).timeout(isLiveNetwork() ? 300_000 : 100_000)
})
})
import { TransactionReceipt } from '@ethersproject/abstract-provider'
import { expect } from './shared/setup'
import { OptimismEnv } from './shared/env'
import {
defaultTransactionFactory,
gasPriceForL2,
sleep,
isLiveNetwork,
envConfig,
} from './shared/utils'
import { TransactionReceipt } from '@ethersproject/abstract-provider'
describe('Replica Tests', () => {
let env: OptimismEnv
before(async () => {
before(async function () {
if (!envConfig.RUN_REPLICA_TESTS) {
this.skip()
return
}
env = await OptimismEnv.new()
})
describe('Matching blocks', () => {
if (isLiveNetwork()) {
console.log('Skipping replica tests on live network')
return
}
it('should sync a transaction', async () => {
const tx = defaultTransactionFactory()
tx.gasPrice = await gasPriceForL2(env)
tx.gasPrice = await gasPriceForL2()
const result = await env.l2Wallet.sendTransaction(tx)
let receipt: TransactionReceipt
......@@ -48,7 +49,7 @@ describe('Replica Tests', () => {
const tx = {
...defaultTransactionFactory(),
nonce: await env.l2Wallet.getTransactionCount(),
gasPrice: await gasPriceForL2(env),
gasPrice: await gasPriceForL2(),
chainId: null, // Disables EIP155 transaction signing.
}
const signed = await env.l2Wallet.signTransaction(tx)
......@@ -76,7 +77,7 @@ describe('Replica Tests', () => {
const tx = {
...defaultTransactionFactory(),
nonce: await env.l2Wallet.getTransactionCount(),
gasPrice: await gasPriceForL2(env),
gasPrice: await gasPriceForL2(),
}
const signed = await env.l2Wallet.signTransaction(tx)
const result = await env.replicaProvider.sendTransaction(signed)
......
This diff is collapsed.
import { DockerComposeNetwork } from './shared/docker-compose'
before(async () => {
if (!process.env.NO_NETWORK) {
await new DockerComposeNetwork().up()
}
})
This diff is collapsed.
......@@ -19,6 +19,8 @@ import {
getL1Bridge,
getL2Bridge,
sleep,
envConfig,
DEFAULT_TEST_GAS_L1,
} from './utils'
import {
initWatcher,
......@@ -83,8 +85,10 @@ export class OptimismEnv {
// fund the user if needed
const balance = await l2Wallet.getBalance()
if (balance.lt(utils.parseEther('1'))) {
await fundUser(watcher, l1Bridge, utils.parseEther('1').sub(balance))
const min = envConfig.L2_WALLET_MIN_BALANCE_ETH.toString()
const topUp = envConfig.L2_WALLET_TOP_UP_AMOUNT_ETH.toString()
if (balance.lt(utils.parseEther(min))) {
await fundUser(watcher, l1Bridge, utils.parseEther(topUp))
}
const l1Messenger = getContractFactory('L1CrossDomainMessenger')
.connect(l1Wallet)
......@@ -156,6 +160,7 @@ export class OptimismEnv {
tx: Promise<TransactionResponse> | TransactionResponse
): Promise<void> {
tx = await tx
await tx.wait()
let messagePairs = []
while (true) {
......@@ -187,7 +192,10 @@ export class OptimismEnv {
message.sender,
message.message,
message.messageNonce,
proof
proof,
{
gasLimit: DEFAULT_TEST_GAS_L1 * 10,
}
)
await result.wait()
break
......
This diff is collapsed.
......@@ -4,7 +4,6 @@ import {
TransactionResponse,
} from '@ethersproject/providers'
import { Watcher } from '@eth-optimism/core-utils'
import { Contract, Transaction } from 'ethers'
export const initWatcher = async (
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -216,6 +216,7 @@ func init() {
app.Commands = []cli.Command{
// See chaincmd.go:
initCommand,
dumpChainCfgCommand,
importCommand,
exportCommand,
importPreimagesCommand,
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment