Commit e2be492f authored by Matthew Slipper's avatar Matthew Slipper

Merge branch 'develop' into fix/eip2930

parents 9343f33e 21084129
---
'@eth-optimism/data-transport-layer': patch
---
Smaller filter query for searching for L1 start height. This number should be configured so that the search does not need to happen because using a smaller filter will cause it to take too long.
---
'@eth-optimism/integration-tests': minor
---
Updates to work with a live network
---
'@eth-optimism/op-exporter': patch
---
Fixes panic caused by version initialized to nil
---
'@eth-optimism/proxyd': minor
---
Add X-Forwarded-For header when proxying RPCs on proxyd
---
'@eth-optimism/gas-oracle': patch
'@eth-optimism/contracts': patch
'@eth-optimism/data-transport-layer': patch
---
String update to change the system name from OE to Optimism
---
'@eth-optimism/op-exporter': patch
---
Added version metrics
---
'@eth-optimism/batch-submitter-service': patch
---
Adds confirmation depth awareness to txmgr
---
'@eth-optimism/message-relayer': patch
---
Fix docker build
---
'@eth-optimism/batch-submitter': patch
---
Properly clear state root batch txs on startup
---
'@eth-optimism/contracts': patch
---
Update hardhat task for managing the gas oracle
---
'@eth-optimism/contracts': patch
---
Remove legacy bin/deploy.ts script
...@@ -2,4 +2,4 @@ ...@@ -2,4 +2,4 @@
'@eth-optimism/proxyd': minor '@eth-optimism/proxyd': minor
--- ---
cache immutable RPC responses in proxyd proxyd: Cache block-dependent RPCs
...@@ -2,4 +2,4 @@ ...@@ -2,4 +2,4 @@
'@eth-optimism/integration-tests': patch '@eth-optimism/integration-tests': patch
--- ---
Update timestamp assertion for new logic Use hardhat-ethers for importing factories in integration tests
...@@ -2,4 +2,4 @@ ...@@ -2,4 +2,4 @@
'@eth-optimism/l2geth': patch '@eth-optimism/l2geth': patch
--- ---
changed the default address to be address(0) in `call` Add reinitialize-by-url command, add dump chain state command
...@@ -2,4 +2,4 @@ ...@@ -2,4 +2,4 @@
'@eth-optimism/integration-tests': patch '@eth-optimism/integration-tests': patch
--- ---
Updates to support nightly actor tests Split OVMMulticall.sol into Multicall.sol & OVMContext.sol
...@@ -2,4 +2,4 @@ ...@@ -2,4 +2,4 @@
'@eth-optimism/l2geth': patch '@eth-optimism/l2geth': patch
--- ---
Implement updated timestamp logic Fix blocknumber monotonicity logging bug
...@@ -2,4 +2,4 @@ ...@@ -2,4 +2,4 @@
'@eth-optimism/proxyd': minor '@eth-optimism/proxyd': minor
--- ---
Add request/response payload size metrics to proxyd Add integration tests and batching
...@@ -141,6 +141,32 @@ jobs: ...@@ -141,6 +141,32 @@ jobs:
kubectl rollout restart statefulset nightly-dtl --namespace nightly kubectl rollout restart statefulset nightly-dtl --namespace nightly
kubectl rollout restart deployment nightly-gas-oracle --namespace nightly kubectl rollout restart deployment nightly-gas-oracle --namespace nightly
kubectl rollout restart deployment edge-proxyd --namespace nightly kubectl rollout restart deployment edge-proxyd --namespace nightly
run-itests-nightly:
docker:
- image: cimg/base:2021.04
steps:
- setup_remote_docker:
version: 19.03.13
- run:
name: Run integration tests
command: |
docker run \
--env PRIVATE_KEY=$NIGHTLY_ITESTS_PRIVKEY \
--env L1_URL=https://nightly-l1.optimism-stacks.net \
--env L2_URL=https://nightly-l2.optimism-stacks.net \
--env ADDRESS_MANAGER=0x22D4E211ef8704f2ca2d6dfdB32125E2530ACE3e \
--env L2_CHAINID=69 \
--env MOCHA_BAIL=true \
--env MOCHA_TIMEOUT=300000 \
--env L1_GAS_PRICE=onchain \
--env L2_GAS_PRICE=onchain \
--env RUN_DEBUG_TRACE_TESTS=false \
--env RUN_REPLICA_TESTS=false \
--env RUN_STRESS_TESTS=false \
--env OVMCONTEXT_SPEC_NUM_TXS=1 \
--env DTL_ENQUEUE_CONFIRMATIONS=12 \
"$STACKMAN_REPO/integration-tests:nightly" \
yarn test:integration:live
notify: notify:
docker: docker:
- image: cimg/base:2021.04 - image: cimg/base:2021.04
...@@ -152,6 +178,18 @@ jobs: ...@@ -152,6 +178,18 @@ jobs:
workflows: workflows:
nightly-itests:
triggers:
- schedule:
cron: "0 1 * * * "
filters:
branches:
only:
- develop
jobs:
- run-itests-nightly:
context:
- optimism
nightly: nightly:
triggers: triggers:
- schedule: - schedule:
......
name: proxyd unit tests
on:
push:
branches:
- 'master'
- 'develop'
pull_request:
workflow_dispatch:
defaults:
run:
working-directory: ./go/proxyd
jobs:
test:
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.15.x
- name: Checkout code
uses: actions/checkout@v2
- name: Build
run: make proxyd
- name: Lint
run: make lint
- name: Test
run: make test
...@@ -138,6 +138,66 @@ jobs: ...@@ -138,6 +138,66 @@ jobs:
verbose: true verbose: true
flags: sdk flags: sdk
depcheck:
name: Check for unused dependencies
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Fetch history
run: git fetch
- name: Setup node
uses: actions/setup-node@v1
with:
node-version: '16.x'
- name: Get yarn cache directory path
id: yarn-cache-dir-path
run: echo "::set-output name=dir::$(yarn cache dir)"
- uses: actions/cache@v2
id: yarn-cache
with:
path: ${{ steps.yarn-cache-dir-path.outputs.dir }}
key: ${{ runner.os }}-yarn-${{ hashFiles('**/yarn.lock') }}
restore-keys: |
${{ runner.os }}-yarn-
- name: Install Dependencies
# only install dependencies if there was a change in the deps
# if: steps.yarn-cache.outputs.cache-hit != 'true'
run: yarn install
- name: Check packages/batch-submitter
working-directory: ./packages/batch-submitter
run: npx depcheck
- name: Check packages/contracts
working-directory: ./packages/contracts
run: npx depcheck
- name: Check packages/core-utils
working-directory: ./packages/core-utils
run: npx depcheck
- name: Check packages/data-transport-layer
working-directory: ./packages/data-transport-layer
run: npx depcheck
- name: Check packages/message-relayer
working-directory: ./packages/message-relayer
run: npx depcheck
- name: Check packages/sdk
working-directory: ./packages/sdk
run: npx depcheck
- name: Check integration-tests
working-directory: ./integration-tests
run: npx depcheck
lint: lint:
name: Linting name: Linting
runs-on: ubuntu-latest runs-on: ubuntu-latest
......
...@@ -164,6 +164,7 @@ func NewBatchSubmitter(cfg Config, gitVersion string) (*BatchSubmitter, error) { ...@@ -164,6 +164,7 @@ func NewBatchSubmitter(cfg Config, gitVersion string) (*BatchSubmitter, error) {
GasRetryIncrement: utils.GasPriceFromGwei(cfg.GasRetryIncrement), GasRetryIncrement: utils.GasPriceFromGwei(cfg.GasRetryIncrement),
ResubmissionTimeout: cfg.ResubmissionTimeout, ResubmissionTimeout: cfg.ResubmissionTimeout,
ReceiptQueryInterval: time.Second, ReceiptQueryInterval: time.Second,
NumConfirmations: cfg.NumConfirmations,
} }
var batchTxService *Service var batchTxService *Service
......
...@@ -34,10 +34,11 @@ func init() { ...@@ -34,10 +34,11 @@ func init() {
var ( var (
testPrivKey *ecdsa.PrivateKey testPrivKey *ecdsa.PrivateKey
testWalletAddr common.Address testWalletAddr common.Address
testChainID *big.Int // 1 testChainID = big.NewInt(1)
testNonce = uint64(2) testNonce = uint64(2)
testGasPrice *big.Int // 3 testGasPrice = big.NewInt(3)
testGasLimit = uint64(4) testGasLimit = uint64(4)
testBlockNumber = uint64(5)
) )
// TestCraftClearingTx asserts that CraftClearingTx produces the expected // TestCraftClearingTx asserts that CraftClearingTx produces the expected
...@@ -102,11 +103,20 @@ func TestSignClearingTxEstimateGasFail(t *testing.T) { ...@@ -102,11 +103,20 @@ func TestSignClearingTxEstimateGasFail(t *testing.T) {
} }
type clearPendingTxHarness struct { type clearPendingTxHarness struct {
l1Client drivers.L1Client l1Client *mock.L1Client
txMgr txmgr.TxManager txMgr txmgr.TxManager
} }
func newClearPendingTxHarness(l1ClientConfig mock.L1ClientConfig) *clearPendingTxHarness { func newClearPendingTxHarnessWithNumConfs(
l1ClientConfig mock.L1ClientConfig,
numConfirmations uint64,
) *clearPendingTxHarness {
if l1ClientConfig.BlockNumber == nil {
l1ClientConfig.BlockNumber = func(_ context.Context) (uint64, error) {
return testBlockNumber, nil
}
}
if l1ClientConfig.NonceAt == nil { if l1ClientConfig.NonceAt == nil {
l1ClientConfig.NonceAt = func(_ context.Context, _ common.Address, _ *big.Int) (uint64, error) { l1ClientConfig.NonceAt = func(_ context.Context, _ common.Address, _ *big.Int) (uint64, error) {
return testNonce, nil return testNonce, nil
...@@ -125,6 +135,7 @@ func newClearPendingTxHarness(l1ClientConfig mock.L1ClientConfig) *clearPendingT ...@@ -125,6 +135,7 @@ func newClearPendingTxHarness(l1ClientConfig mock.L1ClientConfig) *clearPendingT
GasRetryIncrement: utils.GasPriceFromGwei(5), GasRetryIncrement: utils.GasPriceFromGwei(5),
ResubmissionTimeout: time.Second, ResubmissionTimeout: time.Second,
ReceiptQueryInterval: 50 * time.Millisecond, ReceiptQueryInterval: 50 * time.Millisecond,
NumConfirmations: numConfirmations,
}, l1Client) }, l1Client)
return &clearPendingTxHarness{ return &clearPendingTxHarness{
...@@ -133,6 +144,10 @@ func newClearPendingTxHarness(l1ClientConfig mock.L1ClientConfig) *clearPendingT ...@@ -133,6 +144,10 @@ func newClearPendingTxHarness(l1ClientConfig mock.L1ClientConfig) *clearPendingT
} }
} }
func newClearPendingTxHarness(l1ClientConfig mock.L1ClientConfig) *clearPendingTxHarness {
return newClearPendingTxHarnessWithNumConfs(l1ClientConfig, 1)
}
// TestClearPendingTxClearingTxÇonfirms asserts the happy path where our // TestClearPendingTxClearingTxÇonfirms asserts the happy path where our
// clearing transactions confirms unobstructed. // clearing transactions confirms unobstructed.
func TestClearPendingTxClearingTxConfirms(t *testing.T) { func TestClearPendingTxClearingTxConfirms(t *testing.T) {
...@@ -143,6 +158,7 @@ func TestClearPendingTxClearingTxConfirms(t *testing.T) { ...@@ -143,6 +158,7 @@ func TestClearPendingTxClearingTxConfirms(t *testing.T) {
TransactionReceipt: func(_ context.Context, txHash common.Hash) (*types.Receipt, error) { TransactionReceipt: func(_ context.Context, txHash common.Hash) (*types.Receipt, error) {
return &types.Receipt{ return &types.Receipt{
TxHash: txHash, TxHash: txHash,
BlockNumber: big.NewInt(int64(testBlockNumber)),
}, nil }, nil
}, },
}) })
...@@ -190,3 +206,42 @@ func TestClearPendingTxTimeout(t *testing.T) { ...@@ -190,3 +206,42 @@ func TestClearPendingTxTimeout(t *testing.T) {
) )
require.Equal(t, txmgr.ErrPublishTimeout, err) require.Equal(t, txmgr.ErrPublishTimeout, err)
} }
// TestClearPendingTxMultipleConfs tests we wait the appropriate number of
// confirmations for the clearing transaction to confirm.
func TestClearPendingTxMultipleConfs(t *testing.T) {
const numConfs = 2
// Instantly confirm transaction.
h := newClearPendingTxHarnessWithNumConfs(mock.L1ClientConfig{
SendTransaction: func(_ context.Context, _ *types.Transaction) error {
return nil
},
TransactionReceipt: func(_ context.Context, txHash common.Hash) (*types.Receipt, error) {
return &types.Receipt{
TxHash: txHash,
BlockNumber: big.NewInt(int64(testBlockNumber)),
}, nil
},
}, numConfs)
// The txmgr should timeout waiting for the txn to confirm.
err := drivers.ClearPendingTx(
"test", context.Background(), h.txMgr, h.l1Client, testWalletAddr,
testPrivKey, testChainID,
)
require.Equal(t, txmgr.ErrPublishTimeout, err)
// Now set the chain height to the earliest the transaction will be
// considered sufficiently confirmed.
h.l1Client.SetBlockNumberFunc(func(_ context.Context) (uint64, error) {
return testBlockNumber + numConfs - 1, nil
})
// Publishing should succeed.
err = drivers.ClearPendingTx(
"test", context.Background(), h.txMgr, h.l1Client, testWalletAddr,
testPrivKey, testChainID,
)
require.Nil(t, err)
}
...@@ -13,6 +13,9 @@ import ( ...@@ -13,6 +13,9 @@ import (
// L1ClientConfig houses the internal methods that are executed by the mock // L1ClientConfig houses the internal methods that are executed by the mock
// L1Client. Any members left as nil will panic on execution. // L1Client. Any members left as nil will panic on execution.
type L1ClientConfig struct { type L1ClientConfig struct {
// BlockNumber returns the most recent block number.
BlockNumber func(context.Context) (uint64, error)
// EstimateGas tries to estimate the gas needed to execute a specific // EstimateGas tries to estimate the gas needed to execute a specific
// transaction based on the current pending state of the backend blockchain. // transaction based on the current pending state of the backend blockchain.
// There is no guarantee that this is the true gas limit requirement as // There is no guarantee that this is the true gas limit requirement as
...@@ -50,6 +53,14 @@ func NewL1Client(cfg L1ClientConfig) *L1Client { ...@@ -50,6 +53,14 @@ func NewL1Client(cfg L1ClientConfig) *L1Client {
} }
} }
// BlockNumber returns the most recent block number.
func (c *L1Client) BlockNumber(ctx context.Context) (uint64, error) {
c.mu.RLock()
defer c.mu.RUnlock()
return c.cfg.BlockNumber(ctx)
}
// EstimateGas executes the mock EstimateGas method. // EstimateGas executes the mock EstimateGas method.
func (c *L1Client) EstimateGas(ctx context.Context, call ethereum.CallMsg) (uint64, error) { func (c *L1Client) EstimateGas(ctx context.Context, call ethereum.CallMsg) (uint64, error) {
c.mu.RLock() c.mu.RLock()
...@@ -82,6 +93,16 @@ func (c *L1Client) TransactionReceipt(ctx context.Context, txHash common.Hash) ( ...@@ -82,6 +93,16 @@ func (c *L1Client) TransactionReceipt(ctx context.Context, txHash common.Hash) (
return c.cfg.TransactionReceipt(ctx, txHash) return c.cfg.TransactionReceipt(ctx, txHash)
} }
// SetBlockNumberFunc overwrites the mock BlockNumber method.
func (c *L1Client) SetBlockNumberFunc(
f func(context.Context) (uint64, error)) {
c.mu.Lock()
defer c.mu.Unlock()
c.cfg.BlockNumber = f
}
// SetEstimateGasFunc overrwrites the mock EstimateGas method. // SetEstimateGasFunc overrwrites the mock EstimateGas method.
func (c *L1Client) SetEstimateGasFunc( func (c *L1Client) SetEstimateGasFunc(
f func(context.Context, ethereum.CallMsg) (uint64, error)) { f func(context.Context, ethereum.CallMsg) (uint64, error)) {
......
...@@ -52,6 +52,10 @@ type Config struct { ...@@ -52,6 +52,10 @@ type Config struct {
// query the backend to check for confirmations after a tx at a // query the backend to check for confirmations after a tx at a
// specific gas price has been published. // specific gas price has been published.
ReceiptQueryInterval time.Duration ReceiptQueryInterval time.Duration
// NumConfirmations specifies how many blocks are need to consider a
// transaction confirmed.
NumConfirmations uint64
} }
// TxManager is an interface that allows callers to reliably publish txs, // TxManager is an interface that allows callers to reliably publish txs,
...@@ -71,6 +75,9 @@ type TxManager interface { ...@@ -71,6 +75,9 @@ type TxManager interface {
// //
// NOTE: This is a subset of bind.DeployBackend. // NOTE: This is a subset of bind.DeployBackend.
type ReceiptSource interface { type ReceiptSource interface {
// BlockNumber returns the most recent block number.
BlockNumber(ctx context.Context) (uint64, error)
// TransactionReceipt queries the backend for a receipt associated with // TransactionReceipt queries the backend for a receipt associated with
// txHash. If lookup does not fail, but the transaction is not found, // txHash. If lookup does not fail, but the transaction is not found,
// nil should be returned for both values. // nil should be returned for both values.
...@@ -90,6 +97,10 @@ type SimpleTxManager struct { ...@@ -90,6 +97,10 @@ type SimpleTxManager struct {
func NewSimpleTxManager( func NewSimpleTxManager(
name string, cfg Config, backend ReceiptSource) *SimpleTxManager { name string, cfg Config, backend ReceiptSource) *SimpleTxManager {
if cfg.NumConfirmations == 0 {
panic("txmgr: NumConfirmations cannot be zero")
}
return &SimpleTxManager{ return &SimpleTxManager{
name: name, name: name,
cfg: cfg, cfg: cfg,
...@@ -148,6 +159,7 @@ func (m *SimpleTxManager) Send( ...@@ -148,6 +159,7 @@ func (m *SimpleTxManager) Send(
// back to the main event loop if found. // back to the main event loop if found.
receipt, err := WaitMined( receipt, err := WaitMined(
ctxc, m.backend, tx, m.cfg.ReceiptQueryInterval, ctxc, m.backend, tx, m.cfg.ReceiptQueryInterval,
m.cfg.NumConfirmations,
) )
if err != nil { if err != nil {
log.Debug(name+" send tx failed", "hash", txHash, log.Debug(name+" send tx failed", "hash", txHash,
...@@ -220,6 +232,7 @@ func WaitMined( ...@@ -220,6 +232,7 @@ func WaitMined(
backend ReceiptSource, backend ReceiptSource,
tx *types.Transaction, tx *types.Transaction,
queryInterval time.Duration, queryInterval time.Duration,
numConfirmations uint64,
) (*types.Receipt, error) { ) (*types.Receipt, error) {
queryTicker := time.NewTicker(queryInterval) queryTicker := time.NewTicker(queryInterval)
...@@ -229,14 +242,42 @@ func WaitMined( ...@@ -229,14 +242,42 @@ func WaitMined(
for { for {
receipt, err := backend.TransactionReceipt(ctx, txHash) receipt, err := backend.TransactionReceipt(ctx, txHash)
if receipt != nil { switch {
case receipt != nil:
txHeight := receipt.BlockNumber.Uint64()
tipHeight, err := backend.BlockNumber(ctx)
if err != nil {
log.Error("Unable to fetch block number", "err", err)
break
}
log.Trace("Transaction mined, checking confirmations",
"txHash", txHash, "txHeight", txHeight,
"tipHeight", tipHeight,
"numConfirmations", numConfirmations)
// The transaction is considered confirmed when
// txHeight+numConfirmations-1 <= tipHeight. Note that the -1 is
// needed to account for the fact that confirmations have an
// inherent off-by-one, i.e. when using 1 confirmation the
// transaction should be confirmed when txHeight is equal to
// tipHeight. The equation is rewritten in this form to avoid
// underflows.
if txHeight+numConfirmations <= tipHeight+1 {
log.Info("Transaction confirmed", "txHash", txHash)
return receipt, nil return receipt, nil
} }
if err != nil { // Safe to subtract since we know the LHS above is greater.
confsRemaining := (txHeight + numConfirmations) - (tipHeight + 1)
log.Info("Transaction not yet confirmed", "txHash", txHash,
"confsRemaining", confsRemaining)
case err != nil:
log.Trace("Receipt retrievel failed", "hash", txHash, log.Trace("Receipt retrievel failed", "hash", txHash,
"err", err) "err", err)
} else {
default:
log.Trace("Transaction not yet mined", "hash", txHash) log.Trace("Transaction not yet mined", "hash", txHash)
} }
......
...@@ -95,13 +95,23 @@ func newTestHarnessWithConfig(cfg txmgr.Config) *testHarness { ...@@ -95,13 +95,23 @@ func newTestHarnessWithConfig(cfg txmgr.Config) *testHarness {
// newTestHarness initializes a testHarness with a defualt configuration that is // newTestHarness initializes a testHarness with a defualt configuration that is
// suitable for most tests. // suitable for most tests.
func newTestHarness() *testHarness { func newTestHarness() *testHarness {
return newTestHarnessWithConfig(txmgr.Config{ return newTestHarnessWithConfig(configWithNumConfs(1))
}
func configWithNumConfs(numConfirmations uint64) txmgr.Config {
return txmgr.Config{
MinGasPrice: new(big.Int).SetUint64(5), MinGasPrice: new(big.Int).SetUint64(5),
MaxGasPrice: new(big.Int).SetUint64(50), MaxGasPrice: new(big.Int).SetUint64(50),
GasRetryIncrement: new(big.Int).SetUint64(5), GasRetryIncrement: new(big.Int).SetUint64(5),
ResubmissionTimeout: time.Second, ResubmissionTimeout: time.Second,
ReceiptQueryInterval: 50 * time.Millisecond, ReceiptQueryInterval: 50 * time.Millisecond,
}) NumConfirmations: numConfirmations,
}
}
type minedTxInfo struct {
gasPrice *big.Int
blockNumber uint64
} }
// mockBackend implements txmgr.ReceiptSource that tracks mined transactions // mockBackend implements txmgr.ReceiptSource that tracks mined transactions
...@@ -109,25 +119,42 @@ func newTestHarness() *testHarness { ...@@ -109,25 +119,42 @@ func newTestHarness() *testHarness {
type mockBackend struct { type mockBackend struct {
mu sync.RWMutex mu sync.RWMutex
// txHashMinedWithGasPrice tracks the has of a mined transaction to its // blockHeight tracks the current height of the chain.
// gas price. blockHeight uint64
txHashMinedWithGasPrice map[common.Hash]*big.Int
// minedTxs maps the hash of a mined transaction to its details.
minedTxs map[common.Hash]minedTxInfo
} }
// newMockBackend initializes a new mockBackend. // newMockBackend initializes a new mockBackend.
func newMockBackend() *mockBackend { func newMockBackend() *mockBackend {
return &mockBackend{ return &mockBackend{
txHashMinedWithGasPrice: make(map[common.Hash]*big.Int), minedTxs: make(map[common.Hash]minedTxInfo),
} }
} }
// mine records a (txHash, gasPrice) as confirmed. Subsequent calls to // mine records a (txHash, gasPrice) as confirmed. Subsequent calls to
// TransactionReceipt with a matching txHash will result in a non-nil receipt. // TransactionReceipt with a matching txHash will result in a non-nil receipt.
func (b *mockBackend) mine(txHash common.Hash, gasPrice *big.Int) { // If a nil txHash is supplied this has the effect of mining an empty block.
func (b *mockBackend) mine(txHash *common.Hash, gasPrice *big.Int) {
b.mu.Lock() b.mu.Lock()
defer b.mu.Unlock() defer b.mu.Unlock()
b.txHashMinedWithGasPrice[txHash] = gasPrice b.blockHeight++
if txHash != nil {
b.minedTxs[*txHash] = minedTxInfo{
gasPrice: gasPrice,
blockNumber: b.blockHeight,
}
}
}
// BlockNumber returns the most recent block number.
func (b *mockBackend) BlockNumber(ctx context.Context) (uint64, error) {
b.mu.RLock()
defer b.mu.RUnlock()
return b.blockHeight, nil
} }
// TransactionReceipt queries the mockBackend for a mined txHash. If none is // TransactionReceipt queries the mockBackend for a mined txHash. If none is
...@@ -142,7 +169,7 @@ func (b *mockBackend) TransactionReceipt( ...@@ -142,7 +169,7 @@ func (b *mockBackend) TransactionReceipt(
b.mu.RLock() b.mu.RLock()
defer b.mu.RUnlock() defer b.mu.RUnlock()
gasPrice, ok := b.txHashMinedWithGasPrice[txHash] txInfo, ok := b.minedTxs[txHash]
if !ok { if !ok {
return nil, nil return nil, nil
} }
...@@ -151,7 +178,8 @@ func (b *mockBackend) TransactionReceipt( ...@@ -151,7 +178,8 @@ func (b *mockBackend) TransactionReceipt(
// we can assert the proper tx confirmed in our tests. // we can assert the proper tx confirmed in our tests.
return &types.Receipt{ return &types.Receipt{
TxHash: txHash, TxHash: txHash,
GasUsed: gasPrice.Uint64(), GasUsed: txInfo.gasPrice.Uint64(),
BlockNumber: big.NewInt(int64(txInfo.blockNumber)),
}, nil }, nil
} }
...@@ -168,7 +196,8 @@ func TestTxMgrConfirmAtMinGasPrice(t *testing.T) { ...@@ -168,7 +196,8 @@ func TestTxMgrConfirmAtMinGasPrice(t *testing.T) {
tx := types.NewTx(&types.LegacyTx{ tx := types.NewTx(&types.LegacyTx{
GasPrice: gasPrice, GasPrice: gasPrice,
}) })
h.backend.mine(tx.Hash(), gasPrice) txHash := tx.Hash()
h.backend.mine(&txHash, gasPrice)
return tx, nil return tx, nil
} }
...@@ -220,7 +249,8 @@ func TestTxMgrConfirmsAtMaxGasPrice(t *testing.T) { ...@@ -220,7 +249,8 @@ func TestTxMgrConfirmsAtMaxGasPrice(t *testing.T) {
GasPrice: gasPrice, GasPrice: gasPrice,
}) })
if gasPrice.Cmp(h.cfg.MaxGasPrice) == 0 { if gasPrice.Cmp(h.cfg.MaxGasPrice) == 0 {
h.backend.mine(tx.Hash(), gasPrice) txHash := tx.Hash()
h.backend.mine(&txHash, gasPrice)
} }
return tx, nil return tx, nil
} }
...@@ -252,7 +282,8 @@ func TestTxMgrConfirmsAtMaxGasPriceDelayed(t *testing.T) { ...@@ -252,7 +282,8 @@ func TestTxMgrConfirmsAtMaxGasPriceDelayed(t *testing.T) {
// should still return an error beforehand. // should still return an error beforehand.
if gasPrice.Cmp(h.cfg.MaxGasPrice) == 0 { if gasPrice.Cmp(h.cfg.MaxGasPrice) == 0 {
time.AfterFunc(2*time.Second, func() { time.AfterFunc(2*time.Second, func() {
h.backend.mine(tx.Hash(), gasPrice) txHash := tx.Hash()
h.backend.mine(&txHash, gasPrice)
}) })
} }
return tx, nil return tx, nil
...@@ -308,7 +339,8 @@ func TestTxMgrOnlyOnePublicationSucceeds(t *testing.T) { ...@@ -308,7 +339,8 @@ func TestTxMgrOnlyOnePublicationSucceeds(t *testing.T) {
tx := types.NewTx(&types.LegacyTx{ tx := types.NewTx(&types.LegacyTx{
GasPrice: gasPrice, GasPrice: gasPrice,
}) })
h.backend.mine(tx.Hash(), gasPrice) txHash := tx.Hash()
h.backend.mine(&txHash, gasPrice)
return tx, nil return tx, nil
} }
...@@ -338,7 +370,8 @@ func TestTxMgrConfirmsMinGasPriceAfterBumping(t *testing.T) { ...@@ -338,7 +370,8 @@ func TestTxMgrConfirmsMinGasPriceAfterBumping(t *testing.T) {
// Delay mining the tx with the min gas price. // Delay mining the tx with the min gas price.
if gasPrice.Cmp(h.cfg.MinGasPrice) == 0 { if gasPrice.Cmp(h.cfg.MinGasPrice) == 0 {
time.AfterFunc(5*time.Second, func() { time.AfterFunc(5*time.Second, func() {
h.backend.mine(tx.Hash(), gasPrice) txHash := tx.Hash()
h.backend.mine(&txHash, gasPrice)
}) })
} }
return tx, nil return tx, nil
...@@ -361,10 +394,10 @@ func TestWaitMinedReturnsReceiptOnFirstSuccess(t *testing.T) { ...@@ -361,10 +394,10 @@ func TestWaitMinedReturnsReceiptOnFirstSuccess(t *testing.T) {
// Create a tx and mine it immediately using the default backend. // Create a tx and mine it immediately using the default backend.
tx := types.NewTx(&types.LegacyTx{}) tx := types.NewTx(&types.LegacyTx{})
txHash := tx.Hash() txHash := tx.Hash()
h.backend.mine(txHash, new(big.Int)) h.backend.mine(&txHash, new(big.Int))
ctx := context.Background() ctx := context.Background()
receipt, err := txmgr.WaitMined(ctx, h.backend, tx, 50*time.Millisecond) receipt, err := txmgr.WaitMined(ctx, h.backend, tx, 50*time.Millisecond, 1)
require.Nil(t, err) require.Nil(t, err)
require.NotNil(t, receipt) require.NotNil(t, receipt)
require.Equal(t, receipt.TxHash, txHash) require.Equal(t, receipt.TxHash, txHash)
...@@ -383,16 +416,73 @@ func TestWaitMinedCanBeCanceled(t *testing.T) { ...@@ -383,16 +416,73 @@ func TestWaitMinedCanBeCanceled(t *testing.T) {
// Create an unimined tx. // Create an unimined tx.
tx := types.NewTx(&types.LegacyTx{}) tx := types.NewTx(&types.LegacyTx{})
receipt, err := txmgr.WaitMined(ctx, h.backend, tx, 50*time.Millisecond) receipt, err := txmgr.WaitMined(ctx, h.backend, tx, 50*time.Millisecond, 1)
require.Equal(t, err, context.DeadlineExceeded) require.Equal(t, err, context.DeadlineExceeded)
require.Nil(t, receipt) require.Nil(t, receipt)
} }
// TestWaitMinedMultipleConfs asserts that WaitMiend will properly wait for more
// than one confirmation.
func TestWaitMinedMultipleConfs(t *testing.T) {
t.Parallel()
const numConfs = 2
h := newTestHarnessWithConfig(configWithNumConfs(numConfs))
ctxt, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
// Create an unimined tx.
tx := types.NewTx(&types.LegacyTx{})
txHash := tx.Hash()
h.backend.mine(&txHash, new(big.Int))
receipt, err := txmgr.WaitMined(ctxt, h.backend, tx, 50*time.Millisecond, numConfs)
require.Equal(t, err, context.DeadlineExceeded)
require.Nil(t, receipt)
ctxt, cancel = context.WithTimeout(context.Background(), time.Second)
defer cancel()
// Mine an empty block, tx should now be confirmed.
h.backend.mine(nil, nil)
receipt, err = txmgr.WaitMined(ctxt, h.backend, tx, 50*time.Millisecond, numConfs)
require.Nil(t, err)
require.NotNil(t, receipt)
require.Equal(t, txHash, receipt.TxHash)
}
// TestManagerPanicOnZeroConfs ensures that the NewSimpleTxManager will panic
// when attempting to configure with NumConfirmations set to zero.
func TestManagerPanicOnZeroConfs(t *testing.T) {
t.Parallel()
defer func() {
if r := recover(); r == nil {
t.Fatal("NewSimpleTxManager should panic when using zero conf")
}
}()
_ = newTestHarnessWithConfig(configWithNumConfs(0))
}
// failingBackend implements txmgr.ReceiptSource, returning a failure on the // failingBackend implements txmgr.ReceiptSource, returning a failure on the
// first call but a success on the second call. This allows us to test that the // first call but a success on the second call. This allows us to test that the
// inner loop of WaitMined properly handles this case. // inner loop of WaitMined properly handles this case.
type failingBackend struct { type failingBackend struct {
returnSuccess bool returnSuccessBlockNumber bool
returnSuccessReceipt bool
}
// BlockNumber for the failingBackend returns errRpcFailure on the first
// invocation and a fixed block height on subsequent calls.
func (b *failingBackend) BlockNumber(ctx context.Context) (uint64, error) {
if !b.returnSuccessBlockNumber {
b.returnSuccessBlockNumber = true
return 0, errRpcFailure
}
return 1, nil
} }
// TransactionReceipt for the failingBackend returns errRpcFailure on the first // TransactionReceipt for the failingBackend returns errRpcFailure on the first
...@@ -400,13 +490,14 @@ type failingBackend struct { ...@@ -400,13 +490,14 @@ type failingBackend struct {
func (b *failingBackend) TransactionReceipt( func (b *failingBackend) TransactionReceipt(
ctx context.Context, txHash common.Hash) (*types.Receipt, error) { ctx context.Context, txHash common.Hash) (*types.Receipt, error) {
if !b.returnSuccess { if !b.returnSuccessReceipt {
b.returnSuccess = true b.returnSuccessReceipt = true
return nil, errRpcFailure return nil, errRpcFailure
} }
return &types.Receipt{ return &types.Receipt{
TxHash: txHash, TxHash: txHash,
BlockNumber: big.NewInt(1),
}, nil }, nil
} }
...@@ -424,7 +515,7 @@ func TestWaitMinedReturnsReceiptAfterFailure(t *testing.T) { ...@@ -424,7 +515,7 @@ func TestWaitMinedReturnsReceiptAfterFailure(t *testing.T) {
txHash := tx.Hash() txHash := tx.Hash()
ctx := context.Background() ctx := context.Background()
receipt, err := txmgr.WaitMined(ctx, &borkedBackend, tx, 50*time.Millisecond) receipt, err := txmgr.WaitMined(ctx, &borkedBackend, tx, 50*time.Millisecond, 1)
require.Nil(t, err) require.Nil(t, err)
require.NotNil(t, receipt) require.NotNil(t, receipt)
require.Equal(t, receipt.TxHash, txHash) require.Equal(t, receipt.TxHash, txHash)
......
# @eth-optimism/gas-oracle # @eth-optimism/gas-oracle
## 0.1.6
### Patch Changes
- b3efb8b7: String update to change the system name from OE to Optimism
## 0.1.5 ## 0.1.5
### Patch Changes ### Patch Changes
......
{ {
"name": "@eth-optimism/gas-oracle", "name": "@eth-optimism/gas-oracle",
"version": "0.1.5", "version": "0.1.6",
"private": true, "private": true,
"devDependencies": {} "devDependencies": {}
} }
# @eth-optimism/op-exporter # @eth-optimism/op-exporter
## 0.5.3
### Patch Changes
- 673bfcc4: Fixes panic caused by version initialized to nil
- c7e6bed3: Added version metrics
## 0.5.2 ## 0.5.2
### Patch Changes ### Patch Changes
......
{ {
"name": "@eth-optimism/op-exporter", "name": "@eth-optimism/op-exporter",
"version": "0.5.2", "version": "0.5.3",
"private": true, "private": true,
"devDependencies": {} "devDependencies": {}
} }
# @eth-optimism/proxyd # @eth-optimism/proxyd
## 3.5.0
### Minor Changes
- 025a3c0d: Add request/response payload size metrics to proxyd
- daf8db0b: cache immutable RPC responses in proxyd
- 8aa89bf3: Add X-Forwarded-For header when proxying RPCs on proxyd
## 3.4.1 ## 3.4.1
### Patch Changes ### Patch Changes
......
...@@ -4,7 +4,7 @@ ARG GITCOMMIT=docker ...@@ -4,7 +4,7 @@ ARG GITCOMMIT=docker
ARG GITDATE=docker ARG GITDATE=docker
ARG GITVERSION=docker ARG GITVERSION=docker
RUN apk add make jq git RUN apk add make jq git gcc musl-dev linux-headers
WORKDIR /app WORKDIR /app
COPY ./go/proxyd /app COPY ./go/proxyd /app
......
...@@ -11,3 +11,11 @@ fmt: ...@@ -11,3 +11,11 @@ fmt:
go mod tidy go mod tidy
gofmt -w . gofmt -w .
.PHONY: fmt .PHONY: fmt
test:
go test -race -v ./...
.PHONY: test
lint:
go vet ./...
.PHONY: test
\ No newline at end of file
...@@ -62,6 +62,10 @@ var ( ...@@ -62,6 +62,10 @@ var (
Message: "backend returned an invalid response", Message: "backend returned an invalid response",
HTTPErrorCode: 500, HTTPErrorCode: 500,
} }
ErrTooManyBatchRequests = &RPCErr{
Code: JSONRPCErrorInternal - 14,
Message: "too many RPC calls in batch request",
}
) )
func ErrInvalidRequest(msg string) *RPCErr { func ErrInvalidRequest(msg string) *RPCErr {
...@@ -631,7 +635,7 @@ func (w *WSProxier) close() { ...@@ -631,7 +635,7 @@ func (w *WSProxier) close() {
} }
func (w *WSProxier) prepareClientMsg(msg []byte) (*RPCReq, error) { func (w *WSProxier) prepareClientMsg(msg []byte) (*RPCReq, error) {
req, err := ParseRPCReq(bytes.NewReader(msg)) req, err := ParseRPCReq(msg)
if err != nil { if err != nil {
return nil, err return nil, err
} }
......
...@@ -2,7 +2,6 @@ package proxyd ...@@ -2,7 +2,6 @@ package proxyd
import ( import (
"context" "context"
"encoding/json"
"github.com/go-redis/redis/v8" "github.com/go-redis/redis/v8"
"github.com/golang/snappy" "github.com/golang/snappy"
...@@ -14,10 +13,9 @@ type Cache interface { ...@@ -14,10 +13,9 @@ type Cache interface {
Put(ctx context.Context, key string, value string) error Put(ctx context.Context, key string, value string) error
} }
// assuming an average RPCRes size of 3 KB
const ( const (
// assuming an average RPCRes size of 3 KB
memoryCacheLimit = 4096 memoryCacheLimit = 4096
numBlockConfirmations = 50
) )
type cache struct { type cache struct {
...@@ -62,6 +60,7 @@ func (c *redisCache) Get(ctx context.Context, key string) (string, error) { ...@@ -62,6 +60,7 @@ func (c *redisCache) Get(ctx context.Context, key string) (string, error) {
if err == redis.Nil { if err == redis.Nil {
return "", nil return "", nil
} else if err != nil { } else if err != nil {
RecordRedisError("CacheGet")
return "", err return "", err
} }
return val, nil return val, nil
...@@ -69,10 +68,42 @@ func (c *redisCache) Get(ctx context.Context, key string) (string, error) { ...@@ -69,10 +68,42 @@ func (c *redisCache) Get(ctx context.Context, key string) (string, error) {
func (c *redisCache) Put(ctx context.Context, key string, value string) error { func (c *redisCache) Put(ctx context.Context, key string, value string) error {
err := c.rdb.Set(ctx, key, value, 0).Err() err := c.rdb.Set(ctx, key, value, 0).Err()
if err != nil {
RecordRedisError("CacheSet")
}
return err return err
} }
type cacheWithCompression struct {
cache Cache
}
func newCacheWithCompression(cache Cache) *cacheWithCompression {
return &cacheWithCompression{cache}
}
func (c *cacheWithCompression) Get(ctx context.Context, key string) (string, error) {
encodedVal, err := c.cache.Get(ctx, key)
if err != nil {
return "", err
}
if encodedVal == "" {
return "", nil
}
val, err := snappy.Decode(nil, []byte(encodedVal))
if err != nil {
return "", err
}
return string(val), nil
}
func (c *cacheWithCompression) Put(ctx context.Context, key string, value string) error {
encodedVal := snappy.Encode(nil, []byte(value))
return c.cache.Put(ctx, key, string(encodedVal))
}
type GetLatestBlockNumFn func(ctx context.Context) (uint64, error) type GetLatestBlockNumFn func(ctx context.Context) (uint64, error)
type GetLatestGasPriceFn func(ctx context.Context) (uint64, error)
type RPCCache interface { type RPCCache interface {
GetRPC(ctx context.Context, req *RPCReq) (*RPCRes, error) GetRPC(ctx context.Context, req *RPCReq) (*RPCRes, error)
...@@ -81,18 +112,23 @@ type RPCCache interface { ...@@ -81,18 +112,23 @@ type RPCCache interface {
type rpcCache struct { type rpcCache struct {
cache Cache cache Cache
getLatestBlockNumFn GetLatestBlockNumFn
handlers map[string]RPCMethodHandler handlers map[string]RPCMethodHandler
} }
func newRPCCache(cache Cache, getLatestBlockNumFn GetLatestBlockNumFn) RPCCache { func newRPCCache(cache Cache, getLatestBlockNumFn GetLatestBlockNumFn, getLatestGasPriceFn GetLatestGasPriceFn, numBlockConfirmations int) RPCCache {
handlers := map[string]RPCMethodHandler{ handlers := map[string]RPCMethodHandler{
"eth_chainId": &StaticRPCMethodHandler{"eth_chainId"}, "eth_chainId": &StaticMethodHandler{},
"net_version": &StaticRPCMethodHandler{"net_version"}, "net_version": &StaticMethodHandler{},
"eth_getBlockByNumber": &EthGetBlockByNumberMethod{getLatestBlockNumFn}, "eth_getBlockByNumber": &EthGetBlockByNumberMethodHandler{cache, getLatestBlockNumFn, numBlockConfirmations},
"eth_getBlockRange": &EthGetBlockRangeMethod{getLatestBlockNumFn}, "eth_getBlockRange": &EthGetBlockRangeMethodHandler{cache, getLatestBlockNumFn, numBlockConfirmations},
"eth_blockNumber": &EthBlockNumberMethodHandler{getLatestBlockNumFn},
"eth_gasPrice": &EthGasPriceMethodHandler{getLatestGasPriceFn},
"eth_call": &EthCallMethodHandler{cache, getLatestBlockNumFn, numBlockConfirmations},
}
return &rpcCache{
cache: cache,
handlers: handlers,
} }
return &rpcCache{cache: cache, getLatestBlockNumFn: getLatestBlockNumFn, handlers: handlers}
} }
func (c *rpcCache) GetRPC(ctx context.Context, req *RPCReq) (*RPCRes, error) { func (c *rpcCache) GetRPC(ctx context.Context, req *RPCReq) (*RPCRes, error) {
...@@ -100,34 +136,15 @@ func (c *rpcCache) GetRPC(ctx context.Context, req *RPCReq) (*RPCRes, error) { ...@@ -100,34 +136,15 @@ func (c *rpcCache) GetRPC(ctx context.Context, req *RPCReq) (*RPCRes, error) {
if handler == nil { if handler == nil {
return nil, nil return nil, nil
} }
cacheable, err := handler.IsCacheable(req) res, err := handler.GetRPCMethod(ctx, req)
if err != nil { if res != nil {
return nil, err if res == nil {
} RecordCacheMiss(req.Method)
if !cacheable { } else {
return nil, nil RecordCacheHit(req.Method)
}
key := handler.CacheKey(req)
encodedVal, err := c.cache.Get(ctx, key)
if err != nil {
return nil, err
}
if encodedVal == "" {
return nil, nil
} }
val, err := snappy.Decode(nil, []byte(encodedVal))
if err != nil {
return nil, err
} }
return res, err
res := new(RPCRes)
err = json.Unmarshal(val, res)
if err != nil {
return nil, err
}
res.ID = req.ID
return res, nil
} }
func (c *rpcCache) PutRPC(ctx context.Context, req *RPCReq, res *RPCRes) error { func (c *rpcCache) PutRPC(ctx context.Context, req *RPCReq, res *RPCRes) error {
...@@ -135,23 +152,5 @@ func (c *rpcCache) PutRPC(ctx context.Context, req *RPCReq, res *RPCRes) error { ...@@ -135,23 +152,5 @@ func (c *rpcCache) PutRPC(ctx context.Context, req *RPCReq, res *RPCRes) error {
if handler == nil { if handler == nil {
return nil return nil
} }
cacheable, err := handler.IsCacheable(req) return handler.PutRPCMethod(ctx, req, res)
if err != nil {
return err
}
if !cacheable {
return nil
}
requiresConfirmations, err := handler.RequiresUnconfirmedBlocks(ctx, req)
if err != nil {
return err
}
if requiresConfirmations {
return nil
}
key := handler.CacheKey(req)
val := mustMarshalJSON(res)
encodedVal := snappy.Encode(nil, val)
return c.cache.Put(ctx, key, string(encodedVal))
} }
This diff is collapsed.
...@@ -2,6 +2,8 @@ package main ...@@ -2,6 +2,8 @@ package main
import ( import (
"os" "os"
"os/signal"
"syscall"
"github.com/BurntSushi/toml" "github.com/BurntSushi/toml"
"github.com/ethereum-optimism/optimism/go/proxyd" "github.com/ethereum-optimism/optimism/go/proxyd"
...@@ -35,7 +37,14 @@ func main() { ...@@ -35,7 +37,14 @@ func main() {
log.Crit("error reading config file", "err", err) log.Crit("error reading config file", "err", err)
} }
if err := proxyd.Start(config); err != nil { shutdown, err := proxyd.Start(config)
if err != nil {
log.Crit("error starting proxyd", "err", err) log.Crit("error starting proxyd", "err", err)
} }
sig := make(chan os.Signal, 1)
signal.Notify(sig, syscall.SIGINT, syscall.SIGTERM)
recvSig := <-sig
log.Info("caught signal, shutting down", "signal", recvSig)
shutdown()
} }
...@@ -17,6 +17,7 @@ type ServerConfig struct { ...@@ -17,6 +17,7 @@ type ServerConfig struct {
type CacheConfig struct { type CacheConfig struct {
Enabled bool `toml:"enabled"` Enabled bool `toml:"enabled"`
BlockSyncRPCURL string `toml:"block_sync_rpc_url"` BlockSyncRPCURL string `toml:"block_sync_rpc_url"`
NumBlockConfirmations int `toml:"num_block_confirmations"`
} }
type RedisConfig struct { type RedisConfig struct {
...@@ -61,11 +62,11 @@ type MethodMappingsConfig map[string]string ...@@ -61,11 +62,11 @@ type MethodMappingsConfig map[string]string
type Config struct { type Config struct {
WSBackendGroup string `toml:"ws_backend_group"` WSBackendGroup string `toml:"ws_backend_group"`
Server *ServerConfig `toml:"server"` Server ServerConfig `toml:"server"`
Cache *CacheConfig `toml:"cache"` Cache CacheConfig `toml:"cache"`
Redis *RedisConfig `toml:"redis"` Redis RedisConfig `toml:"redis"`
Metrics *MetricsConfig `toml:"metrics"` Metrics MetricsConfig `toml:"metrics"`
BackendOptions *BackendOptions `toml:"backend"` BackendOptions BackendOptions `toml:"backend"`
Backends BackendsConfig `toml:"backends"` Backends BackendsConfig `toml:"backends"`
Authentication map[string]string `toml:"authentication"` Authentication map[string]string `toml:"authentication"`
BackendGroups BackendGroupsConfig `toml:"backend_groups"` BackendGroups BackendGroupsConfig `toml:"backend_groups"`
......
...@@ -4,13 +4,18 @@ go 1.16 ...@@ -4,13 +4,18 @@ go 1.16
require ( require (
github.com/BurntSushi/toml v0.4.1 github.com/BurntSushi/toml v0.4.1
github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a // indirect
github.com/alicebob/miniredis v2.5.0+incompatible
github.com/ethereum/go-ethereum v1.10.11 github.com/ethereum/go-ethereum v1.10.11
github.com/go-redis/redis/v8 v8.11.4 github.com/go-redis/redis/v8 v8.11.4
github.com/golang/snappy v0.0.4 github.com/golang/snappy v0.0.4
github.com/gomodule/redigo v1.8.8 // indirect
github.com/gorilla/mux v1.8.0 github.com/gorilla/mux v1.8.0
github.com/gorilla/websocket v1.4.2 github.com/gorilla/websocket v1.4.2
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d
github.com/prometheus/client_golang v1.11.0 github.com/prometheus/client_golang v1.11.0
github.com/rs/cors v1.8.0 github.com/rs/cors v1.8.0
github.com/stretchr/testify v1.7.0 github.com/stretchr/testify v1.7.0
github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9 // indirect
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
) )
...@@ -48,6 +48,10 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy ...@@ -48,6 +48,10 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZpUGpz5+4FfNmIU+FmZg2P3Xaj2v2bfNWmk=
github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc=
github.com/alicebob/miniredis v2.5.0+incompatible h1:yBHoLpsyjupjz3NL3MhKMVkR41j82Yjf3KFv7ApYzUI=
github.com/alicebob/miniredis v2.5.0+incompatible/go.mod h1:8HZjEj4yU0dwhYHky+DxYx+6BMjkBbe5ONFIF1MXffk=
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0= github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0=
...@@ -185,6 +189,8 @@ github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW ...@@ -185,6 +189,8 @@ github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y= github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y=
github.com/gomodule/redigo v1.8.8 h1:f6cXq6RRfiyrOJEV7p3JhLDlmawGBVBBP1MggY8Mo4E=
github.com/gomodule/redigo v1.8.8/go.mod h1:7ArFNvsTjH8GMMzB4uy1snslv2BwmginuMs06a1uzZE=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
...@@ -427,6 +433,8 @@ github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+ ...@@ -427,6 +433,8 @@ github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+
github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9 h1:k/gmLsJDWwWqbLCur2yWnJzwQEKRcAHXo6seXGuSwWw=
github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9/go.mod h1:E1AXubJBdNmFERAOucpDIxNzeGfLzg0mYh+UfMWdChA=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
...@@ -520,6 +528,7 @@ golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5h ...@@ -520,6 +528,7 @@ golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
...@@ -679,8 +688,9 @@ gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= ...@@ -679,8 +688,9 @@ gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
......
package integration_tests
import (
"bytes"
"fmt"
"github.com/alicebob/miniredis"
"github.com/ethereum-optimism/optimism/go/proxyd"
"github.com/stretchr/testify/require"
"os"
"testing"
"time"
)
func TestCaching(t *testing.T) {
redis, err := miniredis.Run()
require.NoError(t, err)
defer redis.Close()
backend := NewMockBackend(RPCResponseHandler(map[string]string{
"eth_chainId": "0x420",
"net_version": "0x1234",
"eth_blockNumber": "0x64",
"eth_getBlockByNumber": "dummy_block",
}))
defer backend.Close()
require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", backend.URL()))
require.NoError(t, os.Setenv("REDIS_URL", fmt.Sprintf("redis://127.0.0.1:%s", redis.Port())))
config := ReadConfig("caching")
client := NewProxydClient("http://127.0.0.1:8545")
shutdown, err := proxyd.Start(config)
require.NoError(t, err)
defer shutdown()
// allow time for the block number fetcher to fire
time.Sleep(1500 * time.Millisecond)
tests := []struct {
method string
params []interface{}
response string
}{
{
"eth_chainId",
nil,
"{\"jsonrpc\": \"2.0\", \"result\": \"0x420\", \"id\": 999}",
},
{
"net_version",
nil,
"{\"jsonrpc\": \"2.0\", \"result\": \"0x1234\", \"id\": 999}",
},
{
"eth_getBlockByNumber",
[]interface{}{
"0x1",
true,
},
"{\"jsonrpc\": \"2.0\", \"result\": \"dummy_block\", \"id\": 999}",
},
}
for _, tt := range tests {
t.Run(tt.method, func(t *testing.T) {
_, _, err := client.SendRPC(tt.method, tt.params)
require.NoError(t, err)
res, _, err := client.SendRPC(tt.method, tt.params)
require.NoError(t, err)
RequireEqualJSON(t, []byte(tt.response), res)
var count int
for _, req := range backend.Requests() {
if bytes.Contains(req.Body, []byte(tt.method)) {
count++
}
}
require.Equal(t, 1, count)
backend.Reset()
})
}
}
package integration_tests
import (
"fmt"
"github.com/ethereum-optimism/optimism/go/proxyd"
"github.com/stretchr/testify/require"
"net/http"
"os"
"sync/atomic"
"testing"
"time"
)
const (
goodResponse = `{"jsonrpc": "2.0", "result": "hello", "id": 999}`
noBackendsResponse = `{"error":{"code":-32011,"message":"no backends available for method"},"id":999,"jsonrpc":"2.0"}`
)
func TestFailover(t *testing.T) {
goodBackend := NewMockBackend(SingleResponseHandler(200, goodResponse))
defer goodBackend.Close()
badBackend := NewMockBackend(nil)
defer badBackend.Close()
require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", goodBackend.URL()))
require.NoError(t, os.Setenv("BAD_BACKEND_RPC_URL", badBackend.URL()))
config := ReadConfig("failover")
client := NewProxydClient("http://127.0.0.1:8545")
shutdown, err := proxyd.Start(config)
require.NoError(t, err)
defer shutdown()
tests := []struct {
name string
handler http.Handler
}{
{
"backend responds 200 with non-JSON response",
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(200)
w.Write([]byte("this data is not JSON!"))
}),
},
{
"backend responds with no body",
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(200)
}),
},
}
codes := []int{
300,
301,
302,
401,
403,
429,
500,
503,
}
for _, code := range codes {
tests = append(tests, struct {
name string
handler http.Handler
}{
fmt.Sprintf("backend %d", code),
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(code)
}),
})
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
badBackend.SetHandler(tt.handler)
res, statusCode, err := client.SendRPC("eth_chainId", nil)
require.NoError(t, err)
require.Equal(t, 200, statusCode)
RequireEqualJSON(t, []byte(goodResponse), res)
require.Equal(t, 1, len(badBackend.Requests()))
require.Equal(t, 1, len(goodBackend.Requests()))
badBackend.Reset()
goodBackend.Reset()
})
}
t.Run("backend times out and falls back to another", func(t *testing.T) {
badBackend.SetHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
time.Sleep(2 * time.Second)
w.Write([]byte("{}"))
}))
res, statusCode, err := client.SendRPC("eth_chainId", nil)
require.NoError(t, err)
require.Equal(t, 200, statusCode)
RequireEqualJSON(t, []byte(goodResponse), res)
require.Equal(t, 1, len(badBackend.Requests()))
require.Equal(t, 1, len(goodBackend.Requests()))
goodBackend.Reset()
badBackend.Reset()
})
t.Run("works with a batch request", func(t *testing.T) {
badBackend.SetHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(500)
}))
res, statusCode, err := client.SendBatchRPC(
NewRPCReq("1", "eth_chainId", nil),
NewRPCReq("1", "eth_chainId", nil),
)
require.NoError(t, err)
require.Equal(t, 200, statusCode)
RequireEqualJSON(t, []byte(asArray(goodResponse, goodResponse)), res)
require.Equal(t, 2, len(badBackend.Requests()))
require.Equal(t, 2, len(goodBackend.Requests()))
})
}
func TestRetries(t *testing.T) {
backend := NewMockBackend(SingleResponseHandler(200, goodResponse))
defer backend.Close()
require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", backend.URL()))
config := ReadConfig("retries")
client := NewProxydClient("http://127.0.0.1:8545")
shutdown, err := proxyd.Start(config)
require.NoError(t, err)
defer shutdown()
attempts := int32(0)
backend.SetHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
incremented := atomic.AddInt32(&attempts, 1)
if incremented != 2 {
w.WriteHeader(500)
return
}
w.Write([]byte(goodResponse))
}))
// test case where request eventually succeeds
res, statusCode, err := client.SendRPC("eth_chainId", nil)
require.NoError(t, err)
require.Equal(t, 200, statusCode)
RequireEqualJSON(t, []byte(goodResponse), res)
require.Equal(t, 2, len(backend.Requests()))
// test case where it does not
backend.Reset()
attempts = -10
res, statusCode, err = client.SendRPC("eth_chainId", nil)
require.NoError(t, err)
require.Equal(t, 503, statusCode)
RequireEqualJSON(t, []byte(noBackendsResponse), res)
require.Equal(t, 4, len(backend.Requests()))
}
func TestOutOfServiceInterval(t *testing.T) {
goodBackend := NewMockBackend(SingleResponseHandler(200, goodResponse))
defer goodBackend.Close()
badBackend := NewMockBackend(nil)
defer badBackend.Close()
require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", goodBackend.URL()))
require.NoError(t, os.Setenv("BAD_BACKEND_RPC_URL", badBackend.URL()))
config := ReadConfig("out_of_service_interval")
client := NewProxydClient("http://127.0.0.1:8545")
shutdown, err := proxyd.Start(config)
require.NoError(t, err)
defer shutdown()
okHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte(goodResponse))
})
badBackend.SetHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(503)
}))
goodBackend.SetHandler(okHandler)
res, statusCode, err := client.SendRPC("eth_chainId", nil)
require.NoError(t, err)
require.Equal(t, 200, statusCode)
RequireEqualJSON(t, []byte(goodResponse), res)
require.Equal(t, 2, len(badBackend.Requests()))
require.Equal(t, 1, len(goodBackend.Requests()))
res, statusCode, err = client.SendRPC("eth_chainId", nil)
require.NoError(t, err)
require.Equal(t, 200, statusCode)
RequireEqualJSON(t, []byte(goodResponse), res)
require.Equal(t, 2, len(badBackend.Requests()))
require.Equal(t, 2, len(goodBackend.Requests()))
res, statusCode, err = client.SendBatchRPC(
NewRPCReq("1", "eth_chainId", nil),
NewRPCReq("1", "eth_chainId", nil),
)
require.Equal(t, 2, len(badBackend.Requests()))
require.Equal(t, 4, len(goodBackend.Requests()))
time.Sleep(time.Second)
badBackend.SetHandler(okHandler)
res, statusCode, err = client.SendRPC("eth_chainId", nil)
require.NoError(t, err)
require.Equal(t, 200, statusCode)
RequireEqualJSON(t, []byte(goodResponse), res)
require.Equal(t, 3, len(badBackend.Requests()))
require.Equal(t, 4, len(goodBackend.Requests()))
}
package integration_tests
import (
"bytes"
"context"
"encoding/json"
"github.com/ethereum-optimism/optimism/go/proxyd"
"io/ioutil"
"net/http"
"net/http/httptest"
"sync"
)
type RecordedRequest struct {
Method string
Headers http.Header
Body []byte
}
type MockBackend struct {
handler http.Handler
server *httptest.Server
mtx sync.RWMutex
requests []*RecordedRequest
}
func SingleResponseHandler(code int, response string) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(code)
w.Write([]byte(response))
}
}
func RPCResponseHandler(rpcResponses map[string]string) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
body, err := ioutil.ReadAll(r.Body)
if err != nil {
panic(err)
}
req, err := proxyd.ParseRPCReq(body)
if err != nil {
panic(err)
}
res := rpcResponses[req.Method]
if res == "" {
w.WriteHeader(400)
return
}
out := &proxyd.RPCRes{
JSONRPC: proxyd.JSONRPCVersion,
Result: res,
ID: req.ID,
}
enc := json.NewEncoder(w)
if err := enc.Encode(out); err != nil {
panic(err)
}
}
}
func NewMockBackend(handler http.Handler) *MockBackend {
mb := &MockBackend{
handler: handler,
}
mb.server = httptest.NewServer(http.HandlerFunc(mb.wrappedHandler))
return mb
}
func (m *MockBackend) URL() string {
return m.server.URL
}
func (m *MockBackend) Close() {
m.server.Close()
}
func (m *MockBackend) SetHandler(handler http.Handler) {
m.mtx.Lock()
m.handler = handler
m.mtx.Unlock()
}
func (m *MockBackend) Reset() {
m.mtx.Lock()
m.requests = nil
m.mtx.Unlock()
}
func (m *MockBackend) Requests() []*RecordedRequest {
m.mtx.RLock()
defer m.mtx.RUnlock()
out := make([]*RecordedRequest, len(m.requests))
for i := 0; i < len(m.requests); i++ {
out[i] = m.requests[i]
}
return out
}
func (m *MockBackend) wrappedHandler(w http.ResponseWriter, r *http.Request) {
m.mtx.Lock()
body, err := ioutil.ReadAll(r.Body)
if err != nil {
panic(err)
}
clone := r.Clone(context.Background())
clone.Body = ioutil.NopCloser(bytes.NewReader(body))
m.requests = append(m.requests, &RecordedRequest{
Method: r.Method,
Headers: r.Header.Clone(),
Body: body,
})
m.handler.ServeHTTP(w, clone)
m.mtx.Unlock()
}
package integration_tests
import (
"github.com/ethereum-optimism/optimism/go/proxyd"
"github.com/stretchr/testify/require"
"os"
"testing"
)
type resWithCode struct {
code int
res []byte
}
func TestMaxRPSLimit(t *testing.T) {
goodBackend := NewMockBackend(SingleResponseHandler(200, goodResponse))
defer goodBackend.Close()
require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", goodBackend.URL()))
config := ReadConfig("rate_limit")
client := NewProxydClient("http://127.0.0.1:8545")
shutdown, err := proxyd.Start(config)
require.NoError(t, err)
defer shutdown()
resCh := make(chan *resWithCode)
for i := 0; i < 3; i++ {
go func() {
res, code, err := client.SendRPC("eth_chainId", nil)
require.NoError(t, err)
resCh <- &resWithCode{
code: code,
res: res,
}
}()
}
codes := make(map[int]int)
var limitedRes []byte
for i := 0; i < 3; i++ {
res := <-resCh
code := res.code
if codes[code] == 0 {
codes[code] = 1
} else {
codes[code] += 1
}
// 503 because there's only one backend available
if code == 503 {
limitedRes = res.res
}
}
require.Equal(t, 2, codes[200])
require.Equal(t, 1, codes[503])
RequireEqualJSON(t, []byte(noBackendsResponse), limitedRes)
}
[server]
rpc_port = 8545
[backend]
response_timeout_seconds = 1
[redis]
url = "$REDIS_URL"
[cache]
enabled = true
block_sync_rpc_url = "$GOOD_BACKEND_RPC_URL"
[backends]
[backends.good]
rpc_url = "$GOOD_BACKEND_RPC_URL"
ws_url = "$GOOD_BACKEND_RPC_URL"
[backend_groups]
[backend_groups.main]
backends = ["good"]
[rpc_method_mappings]
eth_chainId = "main"
net_version = "main"
eth_getBlockByNumber = "main"
[server]
rpc_port = 8545
[backend]
response_timeout_seconds = 1
[backends]
[backends.good]
rpc_url = "$GOOD_BACKEND_RPC_URL"
ws_url = "$GOOD_BACKEND_RPC_URL"
[backends.bad]
rpc_url = "$BAD_BACKEND_RPC_URL"
ws_url = "$BAD_BACKEND_RPC_URL"
[backend_groups]
[backend_groups.main]
backends = ["bad", "good"]
[rpc_method_mappings]
eth_chainId = "main"
\ No newline at end of file
[server]
rpc_port = 8545
[backend]
response_timeout_seconds = 1
max_retries = 1
out_of_service_seconds = 1
[backends]
[backends.good]
rpc_url = "$GOOD_BACKEND_RPC_URL"
ws_url = "$GOOD_BACKEND_RPC_URL"
[backends.bad]
rpc_url = "$BAD_BACKEND_RPC_URL"
ws_url = "$BAD_BACKEND_RPC_URL"
[backend_groups]
[backend_groups.main]
backends = ["bad", "good"]
[rpc_method_mappings]
eth_chainId = "main"
\ No newline at end of file
[server]
rpc_port = 8545
[backend]
response_timeout_seconds = 1
[backends]
[backends.good]
rpc_url = "$GOOD_BACKEND_RPC_URL"
ws_url = "$GOOD_BACKEND_RPC_URL"
max_rps = 2
[backend_groups]
[backend_groups.main]
backends = ["good"]
[rpc_method_mappings]
eth_chainId = "main"
\ No newline at end of file
[server]
rpc_port = 8545
[backend]
response_timeout_seconds = 1
max_retries = 3
[backends]
[backends.good]
rpc_url = "$GOOD_BACKEND_RPC_URL"
ws_url = "$GOOD_BACKEND_RPC_URL"
[backend_groups]
[backend_groups.main]
backends = ["good"]
[rpc_method_mappings]
eth_chainId = "main"
\ No newline at end of file
[server]
rpc_port = 8545
[backend]
response_timeout_seconds = 1
[backends]
[backends.good]
rpc_url = "$GOOD_BACKEND_RPC_URL"
ws_url = "$GOOD_BACKEND_RPC_URL"
[backend_groups]
[backend_groups.main]
backends = ["good"]
[rpc_method_mappings]
eth_chainId = "main"
\ No newline at end of file
package integration_tests
import (
"bytes"
"encoding/json"
"fmt"
"github.com/BurntSushi/toml"
"github.com/ethereum-optimism/optimism/go/proxyd"
"github.com/stretchr/testify/require"
"io/ioutil"
"net/http"
"testing"
)
type ProxydClient struct {
url string
}
func NewProxydClient(url string) *ProxydClient {
return &ProxydClient{url: url}
}
func (p *ProxydClient) SendRPC(method string, params []interface{}) ([]byte, int, error) {
rpcReq := NewRPCReq("999", method, params)
body, err := json.Marshal(rpcReq)
if err != nil {
panic(err)
}
return p.SendRequest(body)
}
func (p *ProxydClient) SendBatchRPC(reqs ...*proxyd.RPCReq) ([]byte, int, error) {
body, err := json.Marshal(reqs)
if err != nil {
panic(err)
}
return p.SendRequest(body)
}
func (p *ProxydClient) SendRequest(body []byte) ([]byte, int, error) {
res, err := http.Post(p.url, "application/json", bytes.NewReader(body))
if err != nil {
return nil, -1, err
}
defer res.Body.Close()
code := res.StatusCode
resBody, err := ioutil.ReadAll(res.Body)
if err != nil {
panic(err)
}
return resBody, code, nil
}
func RequireEqualJSON(t *testing.T, expected []byte, actual []byte) {
expJSON := canonicalizeJSON(t, expected)
actJSON := canonicalizeJSON(t, actual)
require.Equal(t, string(expJSON), string(actJSON))
}
func canonicalizeJSON(t *testing.T, in []byte) []byte {
var any interface{}
if in[0] == '[' {
any = make([]interface{}, 0)
} else {
any = make(map[string]interface{})
}
err := json.Unmarshal(in, &any)
require.NoError(t, err)
out, err := json.Marshal(any)
require.NoError(t, err)
return out
}
func ReadConfig(name string) *proxyd.Config {
config := new(proxyd.Config)
_, err := toml.DecodeFile(fmt.Sprintf("testdata/%s.toml", name), config)
if err != nil {
panic(err)
}
return config
}
func NewRPCReq(id string, method string, params []interface{}) *proxyd.RPCReq {
jsonParams, err := json.Marshal(params)
if err != nil {
panic(err)
}
return &proxyd.RPCReq{
JSONRPC: proxyd.JSONRPCVersion,
Method: method,
Params: jsonParams,
ID: []byte(id),
}
}
package integration_tests
import (
"github.com/ethereum-optimism/optimism/go/proxyd"
"github.com/stretchr/testify/require"
"os"
"strings"
"testing"
)
const (
notWhitelistedResponse = `{"jsonrpc":"2.0","error":{"code":-32001,"message":"rpc method is not whitelisted"},"id":999}`
parseErrResponse = `{"jsonrpc":"2.0","error":{"code":-32700,"message":"parse error"},"id":null}`
invalidJSONRPCVersionResponse = `{"error":{"code":-32601,"message":"invalid JSON-RPC version"},"id":null,"jsonrpc":"2.0"}`
invalidIDResponse = `{"error":{"code":-32601,"message":"invalid ID"},"id":null,"jsonrpc":"2.0"}`
invalidMethodResponse = `{"error":{"code":-32601,"message":"no method specified"},"id":null,"jsonrpc":"2.0"}`
invalidBatchLenResponse = `{"error":{"code":-32601,"message":"must specify at least one batch call"},"id":null,"jsonrpc":"2.0"}`
)
func TestSingleRPCValidation(t *testing.T) {
goodBackend := NewMockBackend(SingleResponseHandler(200, goodResponse))
defer goodBackend.Close()
require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", goodBackend.URL()))
config := ReadConfig("whitelist")
client := NewProxydClient("http://127.0.0.1:8545")
shutdown, err := proxyd.Start(config)
require.NoError(t, err)
defer shutdown()
tests := []struct {
name string
body string
res string
code int
}{
{
"body not JSON",
"this ain't an RPC call",
parseErrResponse,
400,
},
{
"body not RPC",
"{\"not\": \"rpc\"}",
invalidJSONRPCVersionResponse,
400,
},
{
"body missing RPC ID",
"{\"jsonrpc\": \"2.0\", \"method\": \"subtract\", \"params\": [42, 23]}",
invalidIDResponse,
400,
},
{
"body has array ID",
"{\"jsonrpc\": \"2.0\", \"method\": \"subtract\", \"params\": [42, 23], \"id\": []}",
invalidIDResponse,
400,
},
{
"body has object ID",
"{\"jsonrpc\": \"2.0\", \"method\": \"subtract\", \"params\": [42, 23], \"id\": {}}",
invalidIDResponse,
400,
},
{
"bad method",
"{\"jsonrpc\": \"2.0\", \"method\": 7, \"params\": [42, 23], \"id\": 1}",
parseErrResponse,
400,
},
{
"bad JSON-RPC",
"{\"jsonrpc\": \"1.0\", \"method\": \"subtract\", \"params\": [42, 23], \"id\": 1}",
invalidJSONRPCVersionResponse,
400,
},
{
"omitted method",
"{\"jsonrpc\": \"2.0\", \"params\": [42, 23], \"id\": 1}",
invalidMethodResponse,
400,
},
{
"not whitelisted method",
"{\"jsonrpc\": \"2.0\", \"method\": \"subtract\", \"params\": [42, 23], \"id\": 999}",
notWhitelistedResponse,
403,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
res, code, err := client.SendRequest([]byte(tt.body))
require.NoError(t, err)
RequireEqualJSON(t, []byte(tt.res), res)
require.Equal(t, tt.code, code)
require.Equal(t, 0, len(goodBackend.Requests()))
})
}
}
func TestBatchRPCValidation(t *testing.T) {
goodBackend := NewMockBackend(SingleResponseHandler(200, goodResponse))
defer goodBackend.Close()
require.NoError(t, os.Setenv("GOOD_BACKEND_RPC_URL", goodBackend.URL()))
config := ReadConfig("whitelist")
client := NewProxydClient("http://127.0.0.1:8545")
shutdown, err := proxyd.Start(config)
require.NoError(t, err)
defer shutdown()
tests := []struct {
name string
body string
res string
code int
reqCount int
}{
{
"empty batch",
"[]",
invalidBatchLenResponse,
400,
0,
},
{
"bad json",
"[{,]",
parseErrResponse,
400,
0,
},
{
"not object in batch",
"[123]",
asArray(parseErrResponse),
200,
0,
},
{
"body not RPC",
"[{\"not\": \"rpc\"}]",
asArray(invalidJSONRPCVersionResponse),
200,
0,
},
{
"body missing RPC ID",
"[{\"jsonrpc\": \"2.0\", \"method\": \"subtract\", \"params\": [42, 23]}]",
asArray(invalidIDResponse),
200,
0,
},
{
"body has array ID",
"[{\"jsonrpc\": \"2.0\", \"method\": \"subtract\", \"params\": [42, 23], \"id\": []}]",
asArray(invalidIDResponse),
200,
0,
},
{
"body has object ID",
"[{\"jsonrpc\": \"2.0\", \"method\": \"subtract\", \"params\": [42, 23], \"id\": {}}]",
asArray(invalidIDResponse),
200,
0,
},
// this happens because we can't deserialize the method into a non
// string value, and it blows up the parsing for the whole request.
{
"bad method",
"[{\"error\":{\"code\":-32600,\"message\":\"invalid request\"},\"id\":null,\"jsonrpc\":\"2.0\"}]",
asArray(invalidMethodResponse),
200,
0,
},
{
"bad JSON-RPC",
"[{\"jsonrpc\": \"1.0\", \"method\": \"subtract\", \"params\": [42, 23], \"id\": 1}]",
asArray(invalidJSONRPCVersionResponse),
200,
0,
},
{
"omitted method",
"[{\"jsonrpc\": \"2.0\", \"params\": [42, 23], \"id\": 1}]",
asArray(invalidMethodResponse),
200,
0,
},
{
"not whitelisted method",
"[{\"jsonrpc\": \"2.0\", \"method\": \"subtract\", \"params\": [42, 23], \"id\": 999}]",
asArray(notWhitelistedResponse),
200,
0,
},
{
"mixed",
asArray(
"{\"jsonrpc\": \"2.0\", \"method\": \"subtract\", \"params\": [42, 23], \"id\": 999}",
"{\"jsonrpc\": \"2.0\", \"method\": \"eth_chainId\", \"params\": [], \"id\": 123}",
"123",
),
asArray(
notWhitelistedResponse,
goodResponse,
parseErrResponse,
),
200,
1,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
res, code, err := client.SendRequest([]byte(tt.body))
require.NoError(t, err)
RequireEqualJSON(t, []byte(tt.res), res)
require.Equal(t, tt.code, code)
require.Equal(t, tt.reqCount, len(goodBackend.Requests()))
})
}
}
func asArray(in ...string) string {
return "[" + strings.Join(in, ",") + "]"
}
package proxyd
import (
"context"
"sync"
"time"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
)
const blockHeadSyncPeriod = 1 * time.Second
type LatestBlockHead struct {
url string
client *ethclient.Client
quit chan struct{}
done chan struct{}
mutex sync.RWMutex
blockNum uint64
}
func newLatestBlockHead(url string) (*LatestBlockHead, error) {
client, err := ethclient.Dial(url)
if err != nil {
return nil, err
}
return &LatestBlockHead{
url: url,
client: client,
quit: make(chan struct{}),
done: make(chan struct{}),
}, nil
}
func (h *LatestBlockHead) Start() {
go func() {
ticker := time.NewTicker(blockHeadSyncPeriod)
defer ticker.Stop()
for {
select {
case <-ticker.C:
blockNum, err := h.getBlockNum()
if err != nil {
log.Error("error retrieving latest block number", "error", err)
continue
}
log.Trace("polling block number", "blockNum", blockNum)
h.mutex.Lock()
h.blockNum = blockNum
h.mutex.Unlock()
case <-h.quit:
close(h.done)
return
}
}
}()
}
func (h *LatestBlockHead) getBlockNum() (uint64, error) {
const maxRetries = 5
var err error
for i := 0; i <= maxRetries; i++ {
var blockNum uint64
blockNum, err = h.client.BlockNumber(context.Background())
if err != nil {
backoff := calcBackoff(i)
log.Warn("http operation failed. retrying...", "error", err, "backoff", backoff)
time.Sleep(backoff)
continue
}
return blockNum, nil
}
return 0, wrapErr(err, "exceeded retries")
}
func (h *LatestBlockHead) Stop() {
close(h.quit)
<-h.done
h.client.Close()
}
func (h *LatestBlockHead) GetBlockNum() uint64 {
h.mutex.RLock()
defer h.mutex.RUnlock()
return h.blockNum
}
package proxyd
import (
"context"
"time"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
)
const cacheSyncRate = 1 * time.Second
type lvcUpdateFn func(context.Context, *ethclient.Client) (string, error)
type EthLastValueCache struct {
client *ethclient.Client
cache Cache
key string
updater lvcUpdateFn
quit chan struct{}
}
func newLVC(client *ethclient.Client, cache Cache, cacheKey string, updater lvcUpdateFn) *EthLastValueCache {
return &EthLastValueCache{
client: client,
cache: cache,
key: cacheKey,
updater: updater,
quit: make(chan struct{}),
}
}
func (h *EthLastValueCache) Start() {
go func() {
ticker := time.NewTicker(cacheSyncRate)
defer ticker.Stop()
for {
select {
case <-ticker.C:
lvcPollTimeGauge.WithLabelValues(h.key).SetToCurrentTime()
value, err := h.getUpdate()
if err != nil {
log.Error("error retrieving latest value", "key", h.key, "error", err)
continue
}
log.Trace("polling latest value", "value", value)
if err := h.cache.Put(context.Background(), h.key, value); err != nil {
log.Error("error writing last value to cache", "key", h.key, "error", err)
}
case <-h.quit:
return
}
}
}()
}
func (h *EthLastValueCache) getUpdate() (string, error) {
const maxRetries = 5
var err error
for i := 0; i <= maxRetries; i++ {
var value string
value, err = h.updater(context.Background(), h.client)
if err != nil {
backoff := calcBackoff(i)
log.Warn("http operation failed. retrying...", "error", err, "backoff", backoff)
lvcErrorsTotal.WithLabelValues(h.key).Inc()
time.Sleep(backoff)
continue
}
return value, nil
}
return "", wrapErr(err, "exceeded retries")
}
func (h *EthLastValueCache) Stop() {
close(h.quit)
}
func (h *EthLastValueCache) Read(ctx context.Context) (string, error) {
return h.cache.Get(ctx, h.key)
}
This diff is collapsed.
...@@ -145,22 +145,53 @@ var ( ...@@ -145,22 +145,53 @@ var (
requestPayloadSizesGauge = promauto.NewHistogramVec(prometheus.HistogramOpts{ requestPayloadSizesGauge = promauto.NewHistogramVec(prometheus.HistogramOpts{
Namespace: MetricsNamespace, Namespace: MetricsNamespace,
Name: "request_payload_sizes", Name: "request_payload_sizes",
Help: "Gauge of client request payload sizes.", Help: "Histogram of client request payload sizes.",
Buckets: PayloadSizeBuckets, Buckets: PayloadSizeBuckets,
}, []string{ }, []string{
"auth", "auth",
"method_name",
}) })
responsePayloadSizesGauge = promauto.NewHistogramVec(prometheus.HistogramOpts{ responsePayloadSizesGauge = promauto.NewHistogramVec(prometheus.HistogramOpts{
Namespace: MetricsNamespace, Namespace: MetricsNamespace,
Name: "response_payload_sizes", Name: "response_payload_sizes",
Help: "Gauge of client response payload sizes.", Help: "Histogram of client response payload sizes.",
Buckets: PayloadSizeBuckets, Buckets: PayloadSizeBuckets,
}, []string{ }, []string{
"auth", "auth",
}) })
cacheHitsTotal = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: MetricsNamespace,
Name: "cache_hits_total",
Help: "Number of cache hits.",
}, []string{
"method",
})
cacheMissesTotal = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: MetricsNamespace,
Name: "cache_misses_total",
Help: "Number of cache misses.",
}, []string{
"method",
})
lvcErrorsTotal = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: MetricsNamespace,
Name: "lvc_errors_total",
Help: "Count of lvc errors.",
}, []string{
"key",
})
lvcPollTimeGauge = promauto.NewGaugeVec(prometheus.GaugeOpts{
Namespace: MetricsNamespace,
Name: "lvc_poll_time_gauge",
Help: "Gauge of lvc poll time.",
}, []string{
"key",
})
rpcSpecialErrors = []string{ rpcSpecialErrors = []string{
"nonce too low", "nonce too low",
"gas price too high", "gas price too high",
...@@ -208,10 +239,18 @@ func MaybeRecordSpecialRPCError(ctx context.Context, backendName, method string, ...@@ -208,10 +239,18 @@ func MaybeRecordSpecialRPCError(ctx context.Context, backendName, method string,
} }
} }
func RecordRequestPayloadSize(ctx context.Context, method string, payloadSize int) { func RecordRequestPayloadSize(ctx context.Context, payloadSize int) {
requestPayloadSizesGauge.WithLabelValues(GetAuthCtx(ctx), method).Observe(float64(payloadSize)) requestPayloadSizesGauge.WithLabelValues(GetAuthCtx(ctx)).Observe(float64(payloadSize))
} }
func RecordResponsePayloadSize(ctx context.Context, payloadSize int) { func RecordResponsePayloadSize(ctx context.Context, payloadSize int) {
responsePayloadSizesGauge.WithLabelValues(GetAuthCtx(ctx)).Observe(float64(payloadSize)) responsePayloadSizesGauge.WithLabelValues(GetAuthCtx(ctx)).Observe(float64(payloadSize))
} }
func RecordCacheHit(method string) {
cacheHitsTotal.WithLabelValues(method).Inc()
}
func RecordCacheMiss(method string) {
cacheMissesTotal.WithLabelValues(method).Inc()
}
{ {
"name": "@eth-optimism/proxyd", "name": "@eth-optimism/proxyd",
"version": "3.4.1", "version": "3.5.0",
"private": true, "private": true,
"dependencies": {} "dependencies": {}
} }
...@@ -7,40 +7,49 @@ import ( ...@@ -7,40 +7,49 @@ import (
"fmt" "fmt"
"net/http" "net/http"
"os" "os"
"os/signal" "strconv"
"syscall"
"time" "time"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/client_golang/prometheus/promhttp"
) )
func Start(config *Config) error { func Start(config *Config) (func(), error) {
if len(config.Backends) == 0 { if len(config.Backends) == 0 {
return errors.New("must define at least one backend") return nil, errors.New("must define at least one backend")
} }
if len(config.BackendGroups) == 0 { if len(config.BackendGroups) == 0 {
return errors.New("must define at least one backend group") return nil, errors.New("must define at least one backend group")
} }
if len(config.RPCMethodMappings) == 0 { if len(config.RPCMethodMappings) == 0 {
return errors.New("must define at least one RPC method mapping") return nil, errors.New("must define at least one RPC method mapping")
} }
for authKey := range config.Authentication { for authKey := range config.Authentication {
if authKey == "none" { if authKey == "none" {
return errors.New("cannot use none as an auth key") return nil, errors.New("cannot use none as an auth key")
} }
} }
var redisURL string
if config.Redis.URL != "" {
rURL, err := ReadFromEnvOrConfig(config.Redis.URL)
if err != nil {
return nil, err
}
redisURL = rURL
}
var lim RateLimiter var lim RateLimiter
var err error var err error
if config.Redis == nil { if redisURL == "" {
log.Warn("redis is not configured, using local rate limiter") log.Warn("redis is not configured, using local rate limiter")
lim = NewLocalRateLimiter() lim = NewLocalRateLimiter()
} else { } else {
lim, err = NewRedisRateLimiter(config.Redis.URL) lim, err = NewRedisRateLimiter(redisURL)
if err != nil { if err != nil {
return err return nil, err
} }
} }
...@@ -51,17 +60,17 @@ func Start(config *Config) error { ...@@ -51,17 +60,17 @@ func Start(config *Config) error {
rpcURL, err := ReadFromEnvOrConfig(cfg.RPCURL) rpcURL, err := ReadFromEnvOrConfig(cfg.RPCURL)
if err != nil { if err != nil {
return err return nil, err
} }
wsURL, err := ReadFromEnvOrConfig(cfg.WSURL) wsURL, err := ReadFromEnvOrConfig(cfg.WSURL)
if err != nil { if err != nil {
return err return nil, err
} }
if rpcURL == "" { if rpcURL == "" {
return fmt.Errorf("must define an RPC URL for backend %s", name) return nil, fmt.Errorf("must define an RPC URL for backend %s", name)
} }
if wsURL == "" { if wsURL == "" {
return fmt.Errorf("must define a WS URL for backend %s", name) return nil, fmt.Errorf("must define a WS URL for backend %s", name)
} }
if config.BackendOptions.ResponseTimeoutSeconds != 0 { if config.BackendOptions.ResponseTimeoutSeconds != 0 {
...@@ -86,13 +95,13 @@ func Start(config *Config) error { ...@@ -86,13 +95,13 @@ func Start(config *Config) error {
if cfg.Password != "" { if cfg.Password != "" {
passwordVal, err := ReadFromEnvOrConfig(cfg.Password) passwordVal, err := ReadFromEnvOrConfig(cfg.Password)
if err != nil { if err != nil {
return err return nil, err
} }
opts = append(opts, WithBasicAuth(cfg.Username, passwordVal)) opts = append(opts, WithBasicAuth(cfg.Username, passwordVal))
} }
tlsConfig, err := configureBackendTLS(cfg) tlsConfig, err := configureBackendTLS(cfg)
if err != nil { if err != nil {
return err return nil, err
} }
if tlsConfig != nil { if tlsConfig != nil {
log.Info("using custom TLS config for backend", "name", name) log.Info("using custom TLS config for backend", "name", name)
...@@ -113,7 +122,7 @@ func Start(config *Config) error { ...@@ -113,7 +122,7 @@ func Start(config *Config) error {
backends := make([]*Backend, 0) backends := make([]*Backend, 0)
for _, bName := range bg.Backends { for _, bName := range bg.Backends {
if backendsByName[bName] == nil { if backendsByName[bName] == nil {
return fmt.Errorf("backend %s is not defined", bName) return nil, fmt.Errorf("backend %s is not defined", bName)
} }
backends = append(backends, backendsByName[bName]) backends = append(backends, backendsByName[bName])
} }
...@@ -128,17 +137,17 @@ func Start(config *Config) error { ...@@ -128,17 +137,17 @@ func Start(config *Config) error {
if config.WSBackendGroup != "" { if config.WSBackendGroup != "" {
wsBackendGroup = backendGroups[config.WSBackendGroup] wsBackendGroup = backendGroups[config.WSBackendGroup]
if wsBackendGroup == nil { if wsBackendGroup == nil {
return fmt.Errorf("ws backend group %s does not exist", config.WSBackendGroup) return nil, fmt.Errorf("ws backend group %s does not exist", config.WSBackendGroup)
} }
} }
if wsBackendGroup == nil && config.Server.WSPort != 0 { if wsBackendGroup == nil && config.Server.WSPort != 0 {
return fmt.Errorf("a ws port was defined, but no ws group was defined") return nil, fmt.Errorf("a ws port was defined, but no ws group was defined")
} }
for _, bg := range config.RPCMethodMappings { for _, bg := range config.RPCMethodMappings {
if backendGroups[bg] == nil { if backendGroups[bg] == nil {
return fmt.Errorf("undefined backend group %s", bg) return nil, fmt.Errorf("undefined backend group %s", bg)
} }
} }
...@@ -149,39 +158,50 @@ func Start(config *Config) error { ...@@ -149,39 +158,50 @@ func Start(config *Config) error {
for secret, alias := range config.Authentication { for secret, alias := range config.Authentication {
resolvedSecret, err := ReadFromEnvOrConfig(secret) resolvedSecret, err := ReadFromEnvOrConfig(secret)
if err != nil { if err != nil {
return err return nil, err
} }
resolvedAuth[resolvedSecret] = alias resolvedAuth[resolvedSecret] = alias
} }
} }
var rpcCache RPCCache var (
if config.Cache != nil && config.Cache.Enabled { rpcCache RPCCache
var cache Cache blockNumLVC *EthLastValueCache
if config.Redis != nil { gasPriceLVC *EthLastValueCache
if cache, err = newRedisCache(config.Redis.URL); err != nil { )
return err if config.Cache.Enabled {
var (
cache Cache
blockNumFn GetLatestBlockNumFn
gasPriceFn GetLatestGasPriceFn
)
if config.Cache.BlockSyncRPCURL == "" {
return nil, fmt.Errorf("block sync node required for caching")
}
blockSyncRPCURL, err := ReadFromEnvOrConfig(config.Cache.BlockSyncRPCURL)
if err != nil {
return nil, err
}
if redisURL != "" {
if cache, err = newRedisCache(redisURL); err != nil {
return nil, err
} }
} else { } else {
log.Warn("redis is not configured, using in-memory cache") log.Warn("redis is not configured, using in-memory cache")
cache = newMemoryCache() cache = newMemoryCache()
} }
// Ideally, the BlocKSyncRPCURL should be the sequencer or a HA replica that's not far behind
var getLatestBlockNumFn GetLatestBlockNumFn ethClient, err := ethclient.Dial(blockSyncRPCURL)
if config.Cache.BlockSyncRPCURL == "" {
return fmt.Errorf("block sync node required for caching")
}
latestHead, err := newLatestBlockHead(config.Cache.BlockSyncRPCURL)
if err != nil { if err != nil {
return err return nil, err
} }
latestHead.Start() defer ethClient.Close()
defer latestHead.Stop()
getLatestBlockNumFn = func(ctx context.Context) (uint64, error) { blockNumLVC, blockNumFn = makeGetLatestBlockNumFn(ethClient, cache)
return latestHead.GetBlockNum(), nil gasPriceLVC, gasPriceFn = makeGetLatestGasPriceFn(ethClient, cache)
} rpcCache = newRPCCache(newCacheWithCompression(cache), blockNumFn, gasPriceFn, config.Cache.NumBlockConfirmations)
rpcCache = newRPCCache(cache, getLatestBlockNumFn)
} }
srv := NewServer( srv := NewServer(
...@@ -194,12 +214,17 @@ func Start(config *Config) error { ...@@ -194,12 +214,17 @@ func Start(config *Config) error {
rpcCache, rpcCache,
) )
if config.Metrics != nil && config.Metrics.Enabled { if config.Metrics.Enabled {
addr := fmt.Sprintf("%s:%d", config.Metrics.Host, config.Metrics.Port) addr := fmt.Sprintf("%s:%d", config.Metrics.Host, config.Metrics.Port)
log.Info("starting metrics server", "addr", addr) log.Info("starting metrics server", "addr", addr)
go http.ListenAndServe(addr, promhttp.Handler()) go http.ListenAndServe(addr, promhttp.Handler())
} }
// To allow integration tests to cleanly come up, wait
// 10ms to give the below goroutines enough time to
// encounter an error creating their servers
errTimer := time.NewTimer(10 * time.Millisecond)
if config.Server.RPCPort != 0 { if config.Server.RPCPort != 0 {
go func() { go func() {
if err := srv.RPCListenAndServe(config.Server.RPCHost, config.Server.RPCPort); err != nil { if err := srv.RPCListenAndServe(config.Server.RPCHost, config.Server.RPCPort); err != nil {
...@@ -224,15 +249,23 @@ func Start(config *Config) error { ...@@ -224,15 +249,23 @@ func Start(config *Config) error {
}() }()
} }
sig := make(chan os.Signal, 1) <-errTimer.C
signal.Notify(sig, syscall.SIGINT, syscall.SIGTERM) log.Info("started proxyd")
recvSig := <-sig
log.Info("caught signal, shutting down", "signal", recvSig) return func() {
log.Info("shutting down proxyd")
if blockNumLVC != nil {
blockNumLVC.Stop()
}
if gasPriceLVC != nil {
gasPriceLVC.Stop()
}
srv.Shutdown() srv.Shutdown()
if err := lim.FlushBackendWSConns(backendNames); err != nil { if err := lim.FlushBackendWSConns(backendNames); err != nil {
log.Error("error flushing backend ws conns", "err", err) log.Error("error flushing backend ws conns", "err", err)
} }
return nil log.Info("goodbye")
}, nil
} }
func secondsToDuration(seconds int) time.Duration { func secondsToDuration(seconds int) time.Duration {
...@@ -259,3 +292,39 @@ func configureBackendTLS(cfg *BackendConfig) (*tls.Config, error) { ...@@ -259,3 +292,39 @@ func configureBackendTLS(cfg *BackendConfig) (*tls.Config, error) {
return tlsConfig, nil return tlsConfig, nil
} }
func makeUint64LastValueFn(client *ethclient.Client, cache Cache, key string, updater lvcUpdateFn) (*EthLastValueCache, func(context.Context) (uint64, error)) {
lvc := newLVC(client, cache, key, updater)
lvc.Start()
return lvc, func(ctx context.Context) (uint64, error) {
value, err := lvc.Read(ctx)
if err != nil {
return 0, err
}
if value == "" {
return 0, fmt.Errorf("%s is unavailable", key)
}
valueUint, err := strconv.ParseUint(value, 10, 64)
if err != nil {
return 0, err
}
return valueUint, nil
}
}
func makeGetLatestBlockNumFn(client *ethclient.Client, cache Cache) (*EthLastValueCache, GetLatestBlockNumFn) {
return makeUint64LastValueFn(client, cache, "lvc:block_number", func(ctx context.Context, c *ethclient.Client) (string, error) {
blockNum, err := c.BlockNumber(ctx)
return strconv.FormatUint(blockNum, 10), err
})
}
func makeGetLatestGasPriceFn(client *ethclient.Client, cache Cache) (*EthLastValueCache, GetLatestGasPriceFn) {
return makeUint64LastValueFn(client, cache, "lvc:gas_price", func(ctx context.Context, c *ethclient.Client) (string, error) {
gasPrice, err := c.SuggestGasPrice(ctx)
if err != nil {
return "", err
}
return gasPrice.String(), nil
})
}
...@@ -46,30 +46,22 @@ func IsValidID(id json.RawMessage) bool { ...@@ -46,30 +46,22 @@ func IsValidID(id json.RawMessage) bool {
return len(id) > 0 && id[0] != '{' && id[0] != '[' return len(id) > 0 && id[0] != '{' && id[0] != '['
} }
func ParseRPCReq(r io.Reader) (*RPCReq, error) { func ParseRPCReq(body []byte) (*RPCReq, error) {
body, err := ioutil.ReadAll(r)
if err != nil {
return nil, wrapErr(err, "error reading request body")
}
req := new(RPCReq) req := new(RPCReq)
if err := json.Unmarshal(body, req); err != nil { if err := json.Unmarshal(body, req); err != nil {
return nil, ErrParseErr return nil, ErrParseErr
} }
if req.JSONRPC != JSONRPCVersion { return req, nil
return nil, ErrInvalidRequest("invalid JSON-RPC version") }
}
if req.Method == "" {
return nil, ErrInvalidRequest("no method specified")
}
if !IsValidID(req.ID) { func ParseBatchRPCReq(body []byte) ([]json.RawMessage, error) {
return nil, ErrInvalidRequest("invalid ID") batch := make([]json.RawMessage, 0)
if err := json.Unmarshal(body, &batch); err != nil {
return nil, err
} }
return req, nil return batch, nil
} }
func ParseRPCRes(r io.Reader) (*RPCRes, error) { func ParseRPCRes(r io.Reader) (*RPCRes, error) {
...@@ -86,6 +78,22 @@ func ParseRPCRes(r io.Reader) (*RPCRes, error) { ...@@ -86,6 +78,22 @@ func ParseRPCRes(r io.Reader) (*RPCRes, error) {
return res, nil return res, nil
} }
func ValidateRPCReq(req *RPCReq) error {
if req.JSONRPC != JSONRPCVersion {
return ErrInvalidRequest("invalid JSON-RPC version")
}
if req.Method == "" {
return ErrInvalidRequest("no method specified")
}
if !IsValidID(req.ID) {
return ErrInvalidRequest("invalid ID")
}
return nil
}
func NewRPCErrorRes(id json.RawMessage, err error) *RPCRes { func NewRPCErrorRes(id json.RawMessage, err error) *RPCRes {
var rpcErr *RPCErr var rpcErr *RPCErr
if rr, ok := err.(*RPCErr); ok { if rr, ok := err.(*RPCErr); ok {
...@@ -103,3 +111,14 @@ func NewRPCErrorRes(id json.RawMessage, err error) *RPCRes { ...@@ -103,3 +111,14 @@ func NewRPCErrorRes(id json.RawMessage, err error) *RPCRes {
ID: id, ID: id,
} }
} }
func IsBatch(raw []byte) bool {
for _, c := range raw {
// skip insignificant whitespace (http://www.ietf.org/rfc/rfc4627.txt)
if c == 0x20 || c == 0x09 || c == 0x0a || c == 0x0d {
continue
}
return c == '['
}
return false
}
...@@ -6,6 +6,8 @@ import ( ...@@ -6,6 +6,8 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"math"
"net/http" "net/http"
"strconv" "strconv"
"strings" "strings"
...@@ -22,6 +24,7 @@ const ( ...@@ -22,6 +24,7 @@ const (
ContextKeyAuth = "authorization" ContextKeyAuth = "authorization"
ContextKeyReqID = "req_id" ContextKeyReqID = "req_id"
ContextKeyXForwardedFor = "x_forwarded_for" ContextKeyXForwardedFor = "x_forwarded_for"
MaxBatchRPCCalls = 100
) )
type Server struct { type Server struct {
...@@ -49,6 +52,11 @@ func NewServer( ...@@ -49,6 +52,11 @@ func NewServer(
if cache == nil { if cache == nil {
cache = &NoopRPCCache{} cache = &NoopRPCCache{}
} }
if maxBodySize == 0 {
maxBodySize = math.MaxInt64
}
return &Server{ return &Server{
backendGroups: backendGroups, backendGroups: backendGroups,
wsBackendGroup: wsBackendGroup, wsBackendGroup: wsBackendGroup,
...@@ -122,15 +130,66 @@ func (s *Server) HandleRPC(w http.ResponseWriter, r *http.Request) { ...@@ -122,15 +130,66 @@ func (s *Server) HandleRPC(w http.ResponseWriter, r *http.Request) {
"user_agent", r.Header.Get("user-agent"), "user_agent", r.Header.Get("user-agent"),
) )
bodyReader := &recordLenReader{Reader: io.LimitReader(r.Body, s.maxBodySize)} body, err := ioutil.ReadAll(io.LimitReader(r.Body, s.maxBodySize))
req, err := ParseRPCReq(bodyReader) if err != nil {
log.Error("error reading request body", "err", err)
writeRPCError(ctx, w, nil, ErrInternal)
return
}
RecordRequestPayloadSize(ctx, len(body))
if IsBatch(body) {
reqs, err := ParseBatchRPCReq(body)
if err != nil { if err != nil {
log.Info("rejected request with bad rpc request", "source", "rpc", "err", err) log.Error("error parsing batch RPC request", "err", err)
RecordRPCError(ctx, BackendProxyd, MethodUnknown, err) RecordRPCError(ctx, BackendProxyd, MethodUnknown, err)
writeRPCError(ctx, w, nil, ErrParseErr)
return
}
if len(reqs) > MaxBatchRPCCalls {
RecordRPCError(ctx, BackendProxyd, MethodUnknown, ErrTooManyBatchRequests)
writeRPCError(ctx, w, nil, ErrTooManyBatchRequests)
return
}
if len(reqs) == 0 {
writeRPCError(ctx, w, nil, ErrInvalidRequest("must specify at least one batch call"))
return
}
batchRes := make([]*RPCRes, len(reqs), len(reqs))
for i := 0; i < len(reqs); i++ {
req, err := ParseRPCReq(reqs[i])
if err != nil {
log.Info("error parsing RPC call", "source", "rpc", "err", err)
batchRes[i] = NewRPCErrorRes(nil, err)
continue
}
batchRes[i] = s.handleSingleRPC(ctx, req)
}
writeBatchRPCRes(ctx, w, batchRes)
return
}
req, err := ParseRPCReq(body)
if err != nil {
log.Info("error parsing RPC call", "source", "rpc", "err", err)
writeRPCError(ctx, w, nil, err) writeRPCError(ctx, w, nil, err)
return return
} }
RecordRequestPayloadSize(ctx, req.Method, bodyReader.Len)
backendRes := s.handleSingleRPC(ctx, req)
writeRPCRes(ctx, w, backendRes)
}
func (s *Server) handleSingleRPC(ctx context.Context, req *RPCReq) *RPCRes {
if err := ValidateRPCReq(req); err != nil {
RecordRPCError(ctx, BackendProxyd, MethodUnknown, err)
return NewRPCErrorRes(nil, err)
}
group := s.rpcMethodMappings[req.Method] group := s.rpcMethodMappings[req.Method]
if group == "" { if group == "" {
...@@ -143,16 +202,11 @@ func (s *Server) HandleRPC(w http.ResponseWriter, r *http.Request) { ...@@ -143,16 +202,11 @@ func (s *Server) HandleRPC(w http.ResponseWriter, r *http.Request) {
"method", req.Method, "method", req.Method,
) )
RecordRPCError(ctx, BackendProxyd, MethodUnknown, ErrMethodNotWhitelisted) RecordRPCError(ctx, BackendProxyd, MethodUnknown, ErrMethodNotWhitelisted)
writeRPCError(ctx, w, req.ID, ErrMethodNotWhitelisted) return NewRPCErrorRes(req.ID, ErrMethodNotWhitelisted)
return
} }
var backendRes *RPCRes var backendRes *RPCRes
backendRes, err = s.cache.GetRPC(ctx, req) backendRes, err := s.cache.GetRPC(ctx, req)
if err == nil && backendRes != nil {
writeRPCRes(ctx, w, backendRes)
return
}
if err != nil { if err != nil {
log.Warn( log.Warn(
"cache lookup error", "cache lookup error",
...@@ -160,6 +214,9 @@ func (s *Server) HandleRPC(w http.ResponseWriter, r *http.Request) { ...@@ -160,6 +214,9 @@ func (s *Server) HandleRPC(w http.ResponseWriter, r *http.Request) {
"err", err, "err", err,
) )
} }
if backendRes != nil {
return backendRes
}
backendRes, err = s.backendGroups[group].Forward(ctx, req) backendRes, err = s.backendGroups[group].Forward(ctx, req)
if err != nil { if err != nil {
...@@ -169,8 +226,7 @@ func (s *Server) HandleRPC(w http.ResponseWriter, r *http.Request) { ...@@ -169,8 +226,7 @@ func (s *Server) HandleRPC(w http.ResponseWriter, r *http.Request) {
"req_id", GetReqID(ctx), "req_id", GetReqID(ctx),
"err", err, "err", err,
) )
writeRPCError(ctx, w, req.ID, err) return NewRPCErrorRes(req.ID, err)
return
} }
if backendRes.Error == nil { if backendRes.Error == nil {
...@@ -183,7 +239,7 @@ func (s *Server) HandleRPC(w http.ResponseWriter, r *http.Request) { ...@@ -183,7 +239,7 @@ func (s *Server) HandleRPC(w http.ResponseWriter, r *http.Request) {
} }
} }
writeRPCRes(ctx, w, backendRes) return backendRes
} }
func (s *Server) HandleWS(w http.ResponseWriter, r *http.Request) { func (s *Server) HandleWS(w http.ResponseWriter, r *http.Request) {
...@@ -282,6 +338,7 @@ func writeRPCRes(ctx context.Context, w http.ResponseWriter, res *RPCRes) { ...@@ -282,6 +338,7 @@ func writeRPCRes(ctx context.Context, w http.ResponseWriter, res *RPCRes) {
statusCode = res.Error.HTTPErrorCode statusCode = res.Error.HTTPErrorCode
} }
w.Header().Set("content-type", "application/json")
w.WriteHeader(statusCode) w.WriteHeader(statusCode)
ww := &recordLenWriter{Writer: w} ww := &recordLenWriter{Writer: w}
enc := json.NewEncoder(ww) enc := json.NewEncoder(ww)
...@@ -294,6 +351,19 @@ func writeRPCRes(ctx context.Context, w http.ResponseWriter, res *RPCRes) { ...@@ -294,6 +351,19 @@ func writeRPCRes(ctx context.Context, w http.ResponseWriter, res *RPCRes) {
RecordResponsePayloadSize(ctx, ww.Len) RecordResponsePayloadSize(ctx, ww.Len)
} }
func writeBatchRPCRes(ctx context.Context, w http.ResponseWriter, res []*RPCRes) {
w.Header().Set("content-type", "application/json")
w.WriteHeader(200)
ww := &recordLenWriter{Writer: w}
enc := json.NewEncoder(ww)
if err := enc.Encode(res); err != nil {
log.Error("error writing batch rpc response", "err", err)
RecordRPCError(ctx, BackendProxyd, MethodUnknown, err)
return
}
RecordResponsePayloadSize(ctx, ww.Len)
}
func instrumentedHdlr(h http.Handler) http.HandlerFunc { func instrumentedHdlr(h http.Handler) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) { return func(w http.ResponseWriter, r *http.Request) {
respTimer := prometheus.NewTimer(httpRequestDurationSumm) respTimer := prometheus.NewTimer(httpRequestDurationSumm)
......
ignores: [
"@openzeppelin/contracts",
"@types/mocha",
"@types/rimraf",
"@uniswap/v3-core",
"mocha",
"typescript",
]
\ No newline at end of file
...@@ -4,3 +4,14 @@ L1_URL= ...@@ -4,3 +4,14 @@ L1_URL=
L2_URL= L2_URL=
ADDRESS_MANAGER= ADDRESS_MANAGER=
L2_CHAINID= L2_CHAINID=
DTL_ENQUEUE_CONFIRMATIONS=
OVMCONTEXT_SPEC_NUM_TXS=1
# Can be set to true below if the withdrawal window is short enough
RUN_WITHDRAWAL_TESTS=false
RUN_DEBUG_TRACE_TESTS=false
RUN_REPLICA_TESTS=false
RUN_STRESS_TESTS=false
# Can be configured up or down as necessary
MOCHA_TIMEOUT=300000
# Set to true to make Mocha stop after the first failed test.
MOCHA_BAIL=false
\ No newline at end of file
# @eth-optimism/integration-tests # @eth-optimism/integration-tests
## 0.4.2
### Patch Changes
- 5787a55b: Updates to support nightly actor tests
- dad6fd9b: Update timestamp assertion for new logic
## 0.4.1 ## 0.4.1
### Patch Changes ### Patch Changes
......
import { utils, Wallet, Contract, ContractFactory } from 'ethers' import { utils, Wallet, Contract } from 'ethers'
import { ethers } from 'hardhat'
import { actor, setupActor, run, setupRun } from './lib/convenience' import { actor, setupActor, run, setupRun } from './lib/convenience'
import { OptimismEnv } from '../test/shared/env' import { OptimismEnv } from '../test/shared/env'
import StateDOS from '../artifacts/contracts/StateDOS.sol/StateDOS.json'
import { expect } from 'chai' import { expect } from 'chai'
interface Context { interface Context {
...@@ -16,11 +16,7 @@ actor('Trie DoS accounts', () => { ...@@ -16,11 +16,7 @@ actor('Trie DoS accounts', () => {
setupActor(async () => { setupActor(async () => {
env = await OptimismEnv.new() env = await OptimismEnv.new()
const factory = new ContractFactory( const factory = await ethers.getContractFactory('StateDOS', env.l2Wallet)
StateDOS.abi,
StateDOS.bytecode,
env.l2Wallet
)
contract = await factory.deploy() contract = await factory.deploy()
await contract.deployed() await contract.deployed()
}) })
......
// SPDX-License-Identifier: MIT
pragma solidity >=0.5.0;
pragma experimental ABIEncoderV2;
// https://github.com/makerdao/multicall/blob/master/src/Multicall.sol
/// @title Multicall - Aggregate results from multiple read-only function calls
/// @author Michael Elliot <mike@makerdao.com>
/// @author Joshua Levine <joshua@makerdao.com>
/// @author Nick Johnson <arachnid@notdot.net>
contract Multicall {
struct Call {
address target;
bytes callData;
}
function aggregate(Call[] memory calls) public returns (uint256 blockNumber, bytes[] memory returnData) {
blockNumber = block.number;
returnData = new bytes[](calls.length);
for (uint256 i = 0; i < calls.length; i++) {
(bool success, bytes memory ret) = calls[i].target.call(calls[i].callData);
require(success);
returnData[i] = ret;
}
}
// Helper functions
function getEthBalance(address addr) public view returns (uint256 balance) {
balance = addr.balance;
}
function getBlockHash(uint256 blockNumber) public view returns (bytes32 blockHash) {
blockHash = blockhash(blockNumber);
}
function getLastBlockHash() public view returns (bytes32 blockHash) {
blockHash = blockhash(block.number - 1);
}
function getCurrentBlockTimestamp() public view returns (uint256 timestamp) {
timestamp = block.timestamp;
}
function getCurrentBlockDifficulty() public view returns (uint256 difficulty) {
difficulty = block.difficulty;
}
function getCurrentBlockGasLimit() public view returns (uint256 gaslimit) {
gaslimit = block.gaslimit;
}
function getCurrentBlockCoinbase() public view returns (address coinbase) {
coinbase = block.coinbase;
}
}
...@@ -22,26 +22,13 @@ pragma solidity ^0.8.9; ...@@ -22,26 +22,13 @@ pragma solidity ^0.8.9;
// Can't do this until the package is published. // Can't do this until the package is published.
//import { iOVM_L1BlockNumber } from "@eth-optimism/contracts/iOVM_L1BlockNumber"; //import { iOVM_L1BlockNumber } from "@eth-optimism/contracts/iOVM_L1BlockNumber";
import { iOVM_L1BlockNumber } from "./OVMContextStorage.sol";
/// @title OVMMulticall - Aggregate results from multiple read-only function calls interface iOVM_L1BlockNumber {
contract OVMMulticall { function getL1BlockNumber() external view returns (uint256);
struct Call { }
address target;
bytes callData;
}
function aggregate(Call[] memory calls) public returns (uint256 blockNumber, bytes[] memory returnData) {
blockNumber = block.number;
returnData = new bytes[](calls.length);
for (uint256 i = 0; i < calls.length; i++) {
(bool success, bytes memory ret) = calls[i].target.call(calls[i].callData);
require(success);
returnData[i] = ret;
}
}
// Helper functions /// @title OVMContext - Helper Functions
contract OVMContext {
function getCurrentBlockTimestamp() public view returns (uint256 timestamp) { function getCurrentBlockTimestamp() public view returns (uint256 timestamp) {
timestamp = block.timestamp; timestamp = block.timestamp;
} }
......
// SPDX-License-Identifier: MIT // SPDX-License-Identifier: MIT
pragma solidity ^0.8.9; pragma solidity ^0.8.9;
// Can't do this until the package is published. import {OVMContext} from "./OVMContext.sol";
//import { iOVM_L1BlockNumber } from "@eth-optimism/contracts/iOVM_L1BlockNumber";
interface iOVM_L1BlockNumber {
function getL1BlockNumber() external view returns (uint256);
}
contract OVMContextStorage { contract OVMContextStorage is OVMContext {
mapping (uint256 => uint256) public l1BlockNumbers; mapping(uint256 => uint256) public l1BlockNumbers;
mapping (uint256 => uint256) public blockNumbers; mapping(uint256 => uint256) public blockNumbers;
mapping (uint256 => uint256) public timestamps; mapping(uint256 => uint256) public timestamps;
mapping (uint256 => uint256) public difficulty; mapping(uint256 => uint256) public difficulty;
mapping (uint256 => address) public coinbases; mapping(uint256 => address) public coinbases;
uint256 public index = 0; uint256 public index = 0;
fallback() external { fallback() external {
l1BlockNumbers[index] = iOVM_L1BlockNumber( l1BlockNumbers[index] = getCurrentL1BlockNumber();
0x4200000000000000000000000000000000000013 blockNumbers[index] = getCurrentBlockNumber();
).getL1BlockNumber(); timestamps[index] = getCurrentBlockTimestamp();
blockNumbers[index] = block.number;
timestamps[index] = block.timestamp;
difficulty[index] = block.difficulty; difficulty[index] = block.difficulty;
coinbases[index] = block.coinbase; coinbases[index] = block.coinbase;
index++; index++;
......
...@@ -4,7 +4,7 @@ import { HardhatUserConfig } from 'hardhat/types' ...@@ -4,7 +4,7 @@ import { HardhatUserConfig } from 'hardhat/types'
import '@nomiclabs/hardhat-ethers' import '@nomiclabs/hardhat-ethers'
import '@nomiclabs/hardhat-waffle' import '@nomiclabs/hardhat-waffle'
import 'hardhat-gas-reporter' import 'hardhat-gas-reporter'
import { isLiveNetwork } from './test/shared/utils' import { envConfig } from './test/shared/utils'
const enableGasReport = !!process.env.ENABLE_GAS_REPORT const enableGasReport = !!process.env.ENABLE_GAS_REPORT
...@@ -15,7 +15,8 @@ const config: HardhatUserConfig = { ...@@ -15,7 +15,8 @@ const config: HardhatUserConfig = {
}, },
}, },
mocha: { mocha: {
timeout: isLiveNetwork() ? 300_000 : 75_000, timeout: envConfig.MOCHA_TIMEOUT,
bail: envConfig.MOCHA_BAIL,
}, },
solidity: { solidity: {
compilers: [ compilers: [
......
--file ./test/setup-docker-compose-network.js
\ No newline at end of file
{ {
"private": true, "private": true,
"name": "@eth-optimism/integration-tests", "name": "@eth-optimism/integration-tests",
"version": "0.4.1", "version": "0.4.2",
"description": "[Optimism] Integration tests", "description": "[Optimism] Integration tests",
"scripts": { "scripts": {
"lint": "yarn lint:fix && yarn lint:check", "lint": "yarn lint:fix && yarn lint:check",
...@@ -28,9 +28,9 @@ ...@@ -28,9 +28,9 @@
"url": "https://github.com/ethereum-optimism/optimism.git" "url": "https://github.com/ethereum-optimism/optimism.git"
}, },
"devDependencies": { "devDependencies": {
"@eth-optimism/contracts": "0.5.7", "@eth-optimism/contracts": "0.5.8",
"@eth-optimism/core-utils": "0.7.3", "@eth-optimism/core-utils": "0.7.3",
"@eth-optimism/message-relayer": "0.2.11", "@eth-optimism/message-relayer": "0.2.12",
"@ethersproject/abstract-provider": "^5.5.1", "@ethersproject/abstract-provider": "^5.5.1",
"@ethersproject/providers": "^5.4.5", "@ethersproject/providers": "^5.4.5",
"@ethersproject/transactions": "^5.4.0", "@ethersproject/transactions": "^5.4.0",
...@@ -41,7 +41,6 @@ ...@@ -41,7 +41,6 @@
"@types/chai-as-promised": "^7.1.4", "@types/chai-as-promised": "^7.1.4",
"@types/mocha": "^8.2.2", "@types/mocha": "^8.2.2",
"@types/rimraf": "^3.0.0", "@types/rimraf": "^3.0.0",
"@types/shelljs": "^0.8.8",
"@typescript-eslint/eslint-plugin": "^4.26.0", "@typescript-eslint/eslint-plugin": "^4.26.0",
"@typescript-eslint/parser": "^4.26.0", "@typescript-eslint/parser": "^4.26.0",
"@uniswap/v3-core": "1.0.0", "@uniswap/v3-core": "1.0.0",
...@@ -52,7 +51,6 @@ ...@@ -52,7 +51,6 @@
"chai": "^4.3.4", "chai": "^4.3.4",
"chai-as-promised": "^7.1.1", "chai-as-promised": "^7.1.1",
"commander": "^8.3.0", "commander": "^8.3.0",
"docker-compose": "^0.23.8",
"dotenv": "^10.0.0", "dotenv": "^10.0.0",
"envalid": "^7.1.0", "envalid": "^7.1.0",
"eslint": "^7.27.0", "eslint": "^7.27.0",
...@@ -71,7 +69,6 @@ ...@@ -71,7 +69,6 @@
"mocha": "^8.4.0", "mocha": "^8.4.0",
"prom-client": "^14.0.1", "prom-client": "^14.0.1",
"rimraf": "^3.0.2", "rimraf": "^3.0.2",
"shelljs": "^0.8.4",
"typescript": "^4.3.5", "typescript": "^4.3.5",
"uniswap-v3-deploy-plugin": "^0.1.0" "uniswap-v3-deploy-plugin": "^0.1.0"
} }
......
...@@ -2,14 +2,19 @@ import { expect } from './shared/setup' ...@@ -2,14 +2,19 @@ import { expect } from './shared/setup'
/* Imports: External */ /* Imports: External */
import { Contract, ContractFactory } from 'ethers' import { Contract, ContractFactory } from 'ethers'
import { ethers } from 'hardhat'
import { applyL1ToL2Alias, awaitCondition } from '@eth-optimism/core-utils' import { applyL1ToL2Alias, awaitCondition } from '@eth-optimism/core-utils'
/* Imports: Internal */ /* Imports: Internal */
import simpleStorageJson from '../artifacts/contracts/SimpleStorage.sol/SimpleStorage.json'
import l2ReverterJson from '../artifacts/contracts/Reverter.sol/Reverter.json'
import { Direction } from './shared/watcher-utils' import { Direction } from './shared/watcher-utils'
import { OptimismEnv } from './shared/env' import { OptimismEnv } from './shared/env'
import { isMainnet } from './shared/utils' import {
DEFAULT_TEST_GAS_L1,
DEFAULT_TEST_GAS_L2,
envConfig,
sleep,
withdrawalTest,
} from './shared/utils'
describe('Basic L1<>L2 Communication', async () => { describe('Basic L1<>L2 Communication', async () => {
let Factory__L1SimpleStorage: ContractFactory let Factory__L1SimpleStorage: ContractFactory
...@@ -22,47 +27,43 @@ describe('Basic L1<>L2 Communication', async () => { ...@@ -22,47 +27,43 @@ describe('Basic L1<>L2 Communication', async () => {
before(async () => { before(async () => {
env = await OptimismEnv.new() env = await OptimismEnv.new()
Factory__L1SimpleStorage = new ContractFactory( Factory__L1SimpleStorage = await ethers.getContractFactory(
simpleStorageJson.abi, 'SimpleStorage',
simpleStorageJson.bytecode,
env.l1Wallet env.l1Wallet
) )
Factory__L2SimpleStorage = new ContractFactory( Factory__L2SimpleStorage = await ethers.getContractFactory(
simpleStorageJson.abi, 'SimpleStorage',
simpleStorageJson.bytecode,
env.l2Wallet env.l2Wallet
) )
Factory__L2Reverter = new ContractFactory( Factory__L2Reverter = await ethers.getContractFactory(
l2ReverterJson.abi, 'Reverter',
l2ReverterJson.bytecode,
env.l2Wallet env.l2Wallet
) )
}) })
beforeEach(async () => { beforeEach(async () => {
L1SimpleStorage = await Factory__L1SimpleStorage.deploy() L1SimpleStorage = await Factory__L1SimpleStorage.deploy()
await L1SimpleStorage.deployTransaction.wait() await L1SimpleStorage.deployed()
L2SimpleStorage = await Factory__L2SimpleStorage.deploy() L2SimpleStorage = await Factory__L2SimpleStorage.deploy()
await L2SimpleStorage.deployTransaction.wait() await L2SimpleStorage.deployed()
L2Reverter = await Factory__L2Reverter.deploy() L2Reverter = await Factory__L2Reverter.deploy()
await L2Reverter.deployTransaction.wait() await L2Reverter.deployed()
}) })
describe('L2 => L1', () => { describe('L2 => L1', () => {
it('should be able to perform a withdrawal from L2 -> L1', async function () { withdrawalTest(
if (await isMainnet(env)) { 'should be able to perform a withdrawal from L2 -> L1',
console.log('Skipping withdrawals test on mainnet.') async () => {
this.skip()
return
}
const value = `0x${'77'.repeat(32)}` const value = `0x${'77'.repeat(32)}`
// Send L2 -> L1 message. // Send L2 -> L1 message.
const transaction = await env.l2Messenger.sendMessage( const transaction = await env.l2Messenger.sendMessage(
L1SimpleStorage.address, L1SimpleStorage.address,
L1SimpleStorage.interface.encodeFunctionData('setValue', [value]), L1SimpleStorage.interface.encodeFunctionData('setValue', [value]),
5000000 5000000,
{
gasLimit: DEFAULT_TEST_GAS_L2,
}
) )
await transaction.wait() await transaction.wait()
await env.relayXDomainMessages(transaction) await env.relayXDomainMessages(transaction)
...@@ -76,7 +77,8 @@ describe('Basic L1<>L2 Communication', async () => { ...@@ -76,7 +77,8 @@ describe('Basic L1<>L2 Communication', async () => {
) )
expect(await L1SimpleStorage.value()).to.equal(value) expect(await L1SimpleStorage.value()).to.equal(value)
expect((await L1SimpleStorage.totalCount()).toNumber()).to.equal(1) expect((await L1SimpleStorage.totalCount()).toNumber()).to.equal(1)
}) }
)
}) })
describe('L1 => L2', () => { describe('L1 => L2', () => {
...@@ -87,7 +89,10 @@ describe('Basic L1<>L2 Communication', async () => { ...@@ -87,7 +89,10 @@ describe('Basic L1<>L2 Communication', async () => {
const transaction = await env.l1Messenger.sendMessage( const transaction = await env.l1Messenger.sendMessage(
L2SimpleStorage.address, L2SimpleStorage.address,
L2SimpleStorage.interface.encodeFunctionData('setValue', [value]), L2SimpleStorage.interface.encodeFunctionData('setValue', [value]),
5000000 5000000,
{
gasLimit: DEFAULT_TEST_GAS_L1,
}
) )
await env.waitForXDomainTransaction(transaction, Direction.L1ToL2) await env.waitForXDomainTransaction(transaction, Direction.L1ToL2)
...@@ -105,19 +110,41 @@ describe('Basic L1<>L2 Communication', async () => { ...@@ -105,19 +110,41 @@ describe('Basic L1<>L2 Communication', async () => {
expect((await L2SimpleStorage.totalCount()).toNumber()).to.equal(1) expect((await L2SimpleStorage.totalCount()).toNumber()).to.equal(1)
}) })
it('should deposit from L1 -> L2 directly via enqueue', async () => { it('should deposit from L1 -> L2 directly via enqueue', async function () {
this.timeout(
envConfig.MOCHA_TIMEOUT * 2 +
envConfig.DTL_ENQUEUE_CONFIRMATIONS * 15000
)
const value = `0x${'42'.repeat(32)}` const value = `0x${'42'.repeat(32)}`
// Send L1 -> L2 message. // Send L1 -> L2 message.
await env.ctc const tx = await env.ctc
.connect(env.l1Wallet) .connect(env.l1Wallet)
.enqueue( .enqueue(
L2SimpleStorage.address, L2SimpleStorage.address,
5000000, 5000000,
L2SimpleStorage.interface.encodeFunctionData('setValueNotXDomain', [ L2SimpleStorage.interface.encodeFunctionData('setValueNotXDomain', [
value, value,
]) ]),
{
gasLimit: DEFAULT_TEST_GAS_L1,
}
) )
const receipt = await tx.wait()
const waitUntilBlock =
receipt.blockNumber + envConfig.DTL_ENQUEUE_CONFIRMATIONS
let currBlock = await env.l1Provider.getBlockNumber()
while (currBlock <= waitUntilBlock) {
const progress =
envConfig.DTL_ENQUEUE_CONFIRMATIONS - (waitUntilBlock - currBlock)
console.log(
`Waiting for ${progress}/${envConfig.DTL_ENQUEUE_CONFIRMATIONS} confirmations.`
)
await sleep(5000)
currBlock = await env.l1Provider.getBlockNumber()
}
console.log('Enqueue should be confirmed.')
await awaitCondition( await awaitCondition(
async () => { async () => {
...@@ -142,8 +169,12 @@ describe('Basic L1<>L2 Communication', async () => { ...@@ -142,8 +169,12 @@ describe('Basic L1<>L2 Communication', async () => {
const transaction = await env.l1Messenger.sendMessage( const transaction = await env.l1Messenger.sendMessage(
L2SimpleStorage.address, L2SimpleStorage.address,
L2SimpleStorage.interface.encodeFunctionData('setValue', [value]), L2SimpleStorage.interface.encodeFunctionData('setValue', [value]),
5000000 5000000,
{
gasLimit: DEFAULT_TEST_GAS_L1,
}
) )
await transaction.wait()
const { remoteReceipt } = await env.waitForXDomainTransaction( const { remoteReceipt } = await env.waitForXDomainTransaction(
transaction, transaction,
...@@ -159,7 +190,10 @@ describe('Basic L1<>L2 Communication', async () => { ...@@ -159,7 +190,10 @@ describe('Basic L1<>L2 Communication', async () => {
const transaction = await env.l1Messenger.sendMessage( const transaction = await env.l1Messenger.sendMessage(
L2Reverter.address, L2Reverter.address,
L2Reverter.interface.encodeFunctionData('doRevert', []), L2Reverter.interface.encodeFunctionData('doRevert', []),
5000000 5000000,
{
gasLimit: DEFAULT_TEST_GAS_L1,
}
) )
const { remoteReceipt } = await env.waitForXDomainTransaction( const { remoteReceipt } = await env.waitForXDomainTransaction(
......
...@@ -5,7 +5,7 @@ import { ethers } from 'hardhat' ...@@ -5,7 +5,7 @@ import { ethers } from 'hardhat'
import * as L2Artifact from '@eth-optimism/contracts/artifacts/contracts/standards/L2StandardERC20.sol/L2StandardERC20.json' import * as L2Artifact from '@eth-optimism/contracts/artifacts/contracts/standards/L2StandardERC20.sol/L2StandardERC20.json'
import { OptimismEnv } from './shared/env' import { OptimismEnv } from './shared/env'
import { isLiveNetwork, isMainnet } from './shared/utils' import { withdrawalTest } from './shared/utils'
import { Direction } from './shared/watcher-utils' import { Direction } from './shared/watcher-utils'
describe('Bridged tokens', () => { describe('Bridged tokens', () => {
...@@ -25,14 +25,16 @@ describe('Bridged tokens', () => { ...@@ -25,14 +25,16 @@ describe('Bridged tokens', () => {
const other = Wallet.createRandom() const other = Wallet.createRandom()
otherWalletL1 = other.connect(env.l1Wallet.provider) otherWalletL1 = other.connect(env.l1Wallet.provider)
otherWalletL2 = other.connect(env.l2Wallet.provider) otherWalletL2 = other.connect(env.l2Wallet.provider)
await env.l1Wallet.sendTransaction({ let tx = await env.l1Wallet.sendTransaction({
to: otherWalletL1.address, to: otherWalletL1.address,
value: utils.parseEther('0.01'), value: utils.parseEther('0.01'),
}) })
await env.l2Wallet.sendTransaction({ await tx.wait()
tx = await env.l2Wallet.sendTransaction({
to: otherWalletL2.address, to: otherWalletL2.address,
value: utils.parseEther('0.01'), value: utils.parseEther('0.01'),
}) })
await tx.wait()
L1Factory__ERC20 = await ethers.getContractFactory('ERC20', env.l1Wallet) L1Factory__ERC20 = await ethers.getContractFactory('ERC20', env.l1Wallet)
L2Factory__ERC20 = new ethers.ContractFactory( L2Factory__ERC20 = new ethers.ContractFactory(
...@@ -77,7 +79,7 @@ describe('Bridged tokens', () => { ...@@ -77,7 +79,7 @@ describe('Bridged tokens', () => {
expect(await L2__ERC20.balanceOf(env.l2Wallet.address)).to.deep.equal( expect(await L2__ERC20.balanceOf(env.l2Wallet.address)).to.deep.equal(
BigNumber.from(1000) BigNumber.from(1000)
) )
}).timeout(isLiveNetwork() ? 300_000 : 120_000) })
it('should transfer tokens on L2', async () => { it('should transfer tokens on L2', async () => {
const tx = await L2__ERC20.transfer(otherWalletL1.address, 500) const tx = await L2__ERC20.transfer(otherWalletL1.address, 500)
...@@ -90,13 +92,9 @@ describe('Bridged tokens', () => { ...@@ -90,13 +92,9 @@ describe('Bridged tokens', () => {
) )
}) })
it('should withdraw tokens from L2 to the depositor', async function () { withdrawalTest(
if (await isMainnet(env)) { 'should withdraw tokens from L2 to the depositor',
console.log('Skipping withdrawals test on mainnet.') async () => {
this.skip()
return
}
const tx = await env.l2Bridge.withdraw( const tx = await env.l2Bridge.withdraw(
L2__ERC20.address, L2__ERC20.address,
500, 500,
...@@ -111,15 +109,12 @@ describe('Bridged tokens', () => { ...@@ -111,15 +109,12 @@ describe('Bridged tokens', () => {
expect(await L2__ERC20.balanceOf(env.l2Wallet.address)).to.deep.equal( expect(await L2__ERC20.balanceOf(env.l2Wallet.address)).to.deep.equal(
BigNumber.from(0) BigNumber.from(0)
) )
}).timeout(isLiveNetwork() ? 300_000 : 120_000)
it('should withdraw tokens from L2 to the transfer recipient', async function () {
if (await isMainnet(env)) {
console.log('Skipping withdrawals test on mainnet.')
this.skip()
return
} }
)
withdrawalTest(
'should withdraw tokens from L2 to the transfer recipient',
async () => {
const tx = await env.l2Bridge const tx = await env.l2Bridge
.connect(otherWalletL2) .connect(otherWalletL2)
.withdraw(L2__ERC20.address, 500, 2000000, '0x') .withdraw(L2__ERC20.address, 500, 2000000, '0x')
...@@ -131,5 +126,6 @@ describe('Bridged tokens', () => { ...@@ -131,5 +126,6 @@ describe('Bridged tokens', () => {
expect(await L2__ERC20.balanceOf(otherWalletL2.address)).to.deep.equal( expect(await L2__ERC20.balanceOf(otherWalletL2.address)).to.deep.equal(
BigNumber.from(0) BigNumber.from(0)
) )
}).timeout(isLiveNetwork() ? 300_000 : 120_000) }
)
}) })
...@@ -6,14 +6,11 @@ import { serialize } from '@ethersproject/transactions' ...@@ -6,14 +6,11 @@ import { serialize } from '@ethersproject/transactions'
import { predeploys, getContractFactory } from '@eth-optimism/contracts' import { predeploys, getContractFactory } from '@eth-optimism/contracts'
/* Imports: Internal */ /* Imports: Internal */
import { isLiveNetwork } from './shared/utils' import { hardhatTest } from './shared/utils'
import { OptimismEnv } from './shared/env' import { OptimismEnv } from './shared/env'
import { Direction } from './shared/watcher-utils' import { Direction } from './shared/watcher-utils'
const setPrices = async (env: OptimismEnv, value: number | BigNumber) => { const setPrices = async (env: OptimismEnv, value: number | BigNumber) => {
if (isLiveNetwork()) {
return
}
const gasPrice = await env.gasPriceOracle.setGasPrice(value) const gasPrice = await env.gasPriceOracle.setGasPrice(value)
await gasPrice.wait() await gasPrice.wait()
const baseFee = await env.gasPriceOracle.setL1BaseFee(value) const baseFee = await env.gasPriceOracle.setL1BaseFee(value)
...@@ -28,24 +25,25 @@ describe('Fee Payment Integration Tests', async () => { ...@@ -28,24 +25,25 @@ describe('Fee Payment Integration Tests', async () => {
env = await OptimismEnv.new() env = await OptimismEnv.new()
}) })
if (!isLiveNetwork()) { hardhatTest(
it(`should return eth_gasPrice equal to OVM_GasPriceOracle.gasPrice`, async () => { `should return eth_gasPrice equal to OVM_GasPriceOracle.gasPrice`,
async () => {
const assertGasPrice = async () => { const assertGasPrice = async () => {
const gasPrice = await env.l2Wallet.getGasPrice() const gasPrice = await env.l2Wallet.getGasPrice()
const oracleGasPrice = await env.gasPriceOracle.gasPrice() const oracleGasPrice = await env.gasPriceOracle.gasPrice()
expect(gasPrice).to.deep.equal(oracleGasPrice) expect(gasPrice).to.deep.equal(oracleGasPrice)
} }
assertGasPrice() await assertGasPrice()
// update the gas price // update the gas price
const tx = await env.gasPriceOracle.setGasPrice(1000) const tx = await env.gasPriceOracle.setGasPrice(1000)
await tx.wait() await tx.wait()
assertGasPrice() await assertGasPrice()
})
} }
)
it('Paying a nonzero but acceptable gasPrice fee', async () => { hardhatTest('Paying a nonzero but acceptable gasPrice fee', async () => {
await setPrices(env, 1000) await setPrices(env, 1000)
const amount = utils.parseEther('0.0000001') const amount = utils.parseEther('0.0000001')
...@@ -97,7 +95,7 @@ describe('Fee Payment Integration Tests', async () => { ...@@ -97,7 +95,7 @@ describe('Fee Payment Integration Tests', async () => {
await setPrices(env, 1) await setPrices(env, 1)
}) })
it('should compute correct fee', async () => { hardhatTest('should compute correct fee', async () => {
await setPrices(env, 1000) await setPrices(env, 1000)
const preBalance = await env.l2Wallet.getBalance() const preBalance = await env.l2Wallet.getBalance()
...@@ -149,15 +147,13 @@ describe('Fee Payment Integration Tests', async () => { ...@@ -149,15 +147,13 @@ describe('Fee Payment Integration Tests', async () => {
await expect(env.sequencerFeeVault.withdraw()).to.be.rejected await expect(env.sequencerFeeVault.withdraw()).to.be.rejected
}) })
it('should be able to withdraw fees back to L1 once the minimum is met', async function () { hardhatTest(
if (isLiveNetwork()) { 'should be able to withdraw fees back to L1 once the minimum is met',
this.skip() async () => {
return
}
const l1FeeWallet = await env.sequencerFeeVault.l1FeeWallet() const l1FeeWallet = await env.sequencerFeeVault.l1FeeWallet()
const balanceBefore = await env.l1Wallet.provider.getBalance(l1FeeWallet) const balanceBefore = await env.l1Wallet.provider.getBalance(l1FeeWallet)
const withdrawalAmount = await env.sequencerFeeVault.MIN_WITHDRAWAL_AMOUNT() const withdrawalAmount =
await env.sequencerFeeVault.MIN_WITHDRAWAL_AMOUNT()
// Transfer the minimum required to withdraw. // Transfer the minimum required to withdraw.
const tx = await env.l2Wallet.sendTransaction({ const tx = await env.l2Wallet.sendTransaction({
...@@ -183,5 +179,6 @@ describe('Fee Payment Integration Tests', async () => { ...@@ -183,5 +179,6 @@ describe('Fee Payment Integration Tests', async () => {
expect(balanceAfter.sub(balanceBefore)).to.deep.equal( expect(balanceAfter.sub(balanceBefore)).to.deep.equal(
BigNumber.from(vaultBalance) BigNumber.from(vaultBalance)
) )
}) }
)
}) })
...@@ -45,7 +45,7 @@ describe('Native ETH value integration tests', () => { ...@@ -45,7 +45,7 @@ describe('Native ETH value integration tests', () => {
const there = await wallet.sendTransaction({ const there = await wallet.sendTransaction({
to: other.address, to: other.address,
value, value,
gasPrice: await gasPriceForL2(env), gasPrice: await gasPriceForL2(),
}) })
const thereReceipt = await there.wait() const thereReceipt = await there.wait()
const thereGas = thereReceipt.gasUsed.mul(there.gasPrice) const thereGas = thereReceipt.gasUsed.mul(there.gasPrice)
...@@ -63,7 +63,7 @@ describe('Native ETH value integration tests', () => { ...@@ -63,7 +63,7 @@ describe('Native ETH value integration tests', () => {
const backAgain = await other.sendTransaction({ const backAgain = await other.sendTransaction({
to: wallet.address, to: wallet.address,
value: backVal, value: backVal,
gasPrice: await gasPriceForL2(env), gasPrice: await gasPriceForL2(),
}) })
const backReceipt = await backAgain.wait() const backReceipt = await backAgain.wait()
const backGas = backReceipt.gasUsed.mul(backAgain.gasPrice) const backGas = backReceipt.gasUsed.mul(backAgain.gasPrice)
...@@ -169,7 +169,7 @@ describe('Native ETH value integration tests', () => { ...@@ -169,7 +169,7 @@ describe('Native ETH value integration tests', () => {
it('should allow ETH to be sent', async () => { it('should allow ETH to be sent', async () => {
const sendAmount = 15 const sendAmount = 15
const tx = await ValueCalls0.simpleSend(ValueCalls1.address, sendAmount, { const tx = await ValueCalls0.simpleSend(ValueCalls1.address, sendAmount, {
gasPrice: await gasPriceForL2(env), gasPrice: await gasPriceForL2(),
}) })
await tx.wait() await tx.wait()
......
...@@ -9,11 +9,15 @@ import { expectApprox } from '@eth-optimism/core-utils' ...@@ -9,11 +9,15 @@ import { expectApprox } from '@eth-optimism/core-utils'
/* Imports: Internal */ /* Imports: Internal */
import { Direction } from './shared/watcher-utils' import { Direction } from './shared/watcher-utils'
import { isMainnet, PROXY_SEQUENCER_ENTRYPOINT_ADDRESS } from './shared/utils' import {
DEFAULT_TEST_GAS_L1,
DEFAULT_TEST_GAS_L2,
envConfig,
PROXY_SEQUENCER_ENTRYPOINT_ADDRESS,
withdrawalTest,
} from './shared/utils'
import { OptimismEnv } from './shared/env' import { OptimismEnv } from './shared/env'
const DEFAULT_TEST_GAS_L1 = 330_000
const DEFAULT_TEST_GAS_L2 = 1_300_000
// TX size enforced by CTC: // TX size enforced by CTC:
const MAX_ROLLUP_TX_SIZE = 50_000 const MAX_ROLLUP_TX_SIZE = 50_000
...@@ -183,13 +187,7 @@ describe('Native ETH Integration Tests', async () => { ...@@ -183,13 +187,7 @@ describe('Native ETH Integration Tests', async () => {
).to.be.reverted ).to.be.reverted
}) })
it('withdraw', async function () { withdrawalTest('withdraw', async () => {
if (await isMainnet(env)) {
console.log('Skipping withdrawals test on mainnet.')
this.skip()
return
}
const withdrawAmount = BigNumber.from(3) const withdrawAmount = BigNumber.from(3)
const preBalances = await getBalances(env) const preBalances = await getBalances(env)
expect( expect(
...@@ -231,13 +229,7 @@ describe('Native ETH Integration Tests', async () => { ...@@ -231,13 +229,7 @@ describe('Native ETH Integration Tests', async () => {
) )
}) })
it('withdrawTo', async function () { withdrawalTest('withdrawTo', async () => {
if (await isMainnet(env)) {
console.log('Skipping withdrawals test on mainnet.')
this.skip()
return
}
const withdrawAmount = BigNumber.from(3) const withdrawAmount = BigNumber.from(3)
const preBalances = await getBalances(env) const preBalances = await getBalances(env)
...@@ -295,13 +287,9 @@ describe('Native ETH Integration Tests', async () => { ...@@ -295,13 +287,9 @@ describe('Native ETH Integration Tests', async () => {
) )
}) })
it('deposit, transfer, withdraw', async function () { withdrawalTest(
if (await isMainnet(env)) { 'deposit, transfer, withdraw',
console.log('Skipping withdrawals test on mainnet.') async () => {
this.skip()
return
}
// 1. deposit // 1. deposit
const amount = utils.parseEther('1') const amount = utils.parseEther('1')
await env.waitForXDomainTransaction( await env.waitForXDomainTransaction(
...@@ -363,5 +351,7 @@ describe('Native ETH Integration Tests', async () => { ...@@ -363,5 +351,7 @@ describe('Native ETH Integration Tests', async () => {
const l2BalanceAfter = await other.getBalance() const l2BalanceAfter = await other.getBalance()
expect(l1BalanceAfter).to.deep.eq(l1BalanceBefore.add(withdrawnAmount)) expect(l1BalanceAfter).to.deep.eq(l1BalanceBefore.add(withdrawnAmount))
expect(l2BalanceAfter).to.deep.eq(amount.sub(withdrawnAmount).sub(fee)) expect(l2BalanceAfter).to.deep.eq(amount.sub(withdrawnAmount).sub(fee))
}) },
envConfig.MOCHA_TIMEOUT * 3
)
}) })
...@@ -7,7 +7,12 @@ import { predeploys } from '@eth-optimism/contracts' ...@@ -7,7 +7,12 @@ import { predeploys } from '@eth-optimism/contracts'
import { Contract, BigNumber } from 'ethers' import { Contract, BigNumber } from 'ethers'
/* Imports: Internal */ /* Imports: Internal */
import { l2Provider, l1Provider, IS_LIVE_NETWORK } from './shared/utils' import {
l2Provider,
l1Provider,
envConfig,
DEFAULT_TEST_GAS_L1,
} from './shared/utils'
import { OptimismEnv } from './shared/env' import { OptimismEnv } from './shared/env'
import { Direction } from './shared/watcher-utils' import { Direction } from './shared/watcher-utils'
...@@ -23,29 +28,25 @@ describe('OVM Context: Layer 2 EVM Context', () => { ...@@ -23,29 +28,25 @@ describe('OVM Context: Layer 2 EVM Context', () => {
env = await OptimismEnv.new() env = await OptimismEnv.new()
}) })
let OVMMulticall: Contract let Multicall: Contract
let OVMContextStorage: Contract let OVMContextStorage: Contract
beforeEach(async () => { beforeEach(async () => {
const OVMContextStorageFactory = await ethers.getContractFactory( const OVMContextStorageFactory = await ethers.getContractFactory(
'OVMContextStorage', 'OVMContextStorage',
env.l2Wallet env.l2Wallet
) )
const OVMMulticallFactory = await ethers.getContractFactory( const MulticallFactory = await ethers.getContractFactory(
'OVMMulticall', 'Multicall',
env.l2Wallet env.l2Wallet
) )
OVMContextStorage = await OVMContextStorageFactory.deploy() OVMContextStorage = await OVMContextStorageFactory.deploy()
await OVMContextStorage.deployTransaction.wait() await OVMContextStorage.deployTransaction.wait()
OVMMulticall = await OVMMulticallFactory.deploy() Multicall = await MulticallFactory.deploy()
await OVMMulticall.deployTransaction.wait() await Multicall.deployTransaction.wait()
}) })
let numTxs = 5 const numTxs = envConfig.OVMCONTEXT_SPEC_NUM_TXS
if (IS_LIVE_NETWORK) {
// Tests take way too long if we don't reduce the number of txs here.
numTxs = 1
}
it('enqueue: L1 contextual values are correctly set in L2', async () => { it('enqueue: L1 contextual values are correctly set in L2', async () => {
for (let i = 0; i < numTxs; i++) { for (let i = 0; i < numTxs; i++) {
...@@ -54,7 +55,10 @@ describe('OVM Context: Layer 2 EVM Context', () => { ...@@ -54,7 +55,10 @@ describe('OVM Context: Layer 2 EVM Context', () => {
const tx = await env.l1Messenger.sendMessage( const tx = await env.l1Messenger.sendMessage(
OVMContextStorage.address, OVMContextStorage.address,
'0x', '0x',
2_000_000 2_000_000,
{
gasLimit: DEFAULT_TEST_GAS_L1,
}
) )
// Wait for the transaction to be sent over to L2. // Wait for the transaction to be sent over to L2.
...@@ -89,7 +93,7 @@ describe('OVM Context: Layer 2 EVM Context', () => { ...@@ -89,7 +93,7 @@ describe('OVM Context: Layer 2 EVM Context', () => {
const coinbase = await OVMContextStorage.coinbases(i) const coinbase = await OVMContextStorage.coinbases(i)
expect(coinbase).to.equal(predeploys.OVM_SequencerFeeVault) expect(coinbase).to.equal(predeploys.OVM_SequencerFeeVault)
} }
}).timeout(150000) // this specific test takes a while because it involves L1 to L2 txs })
it('should set correct OVM Context for `eth_call`', async () => { it('should set correct OVM Context for `eth_call`', async () => {
for (let i = 0; i < numTxs; i++) { for (let i = 0; i < numTxs; i++) {
...@@ -101,21 +105,23 @@ describe('OVM Context: Layer 2 EVM Context', () => { ...@@ -101,21 +105,23 @@ describe('OVM Context: Layer 2 EVM Context', () => {
await dummyTx.wait() await dummyTx.wait()
const block = await L2Provider.getBlockWithTransactions('latest') const block = await L2Provider.getBlockWithTransactions('latest')
const [, returnData] = await OVMMulticall.callStatic.aggregate( const [, returnData] = await Multicall.callStatic.aggregate(
[ [
[ [
OVMMulticall.address, OVMContextStorage.address,
OVMMulticall.interface.encodeFunctionData( OVMContextStorage.interface.encodeFunctionData(
'getCurrentBlockTimestamp' 'getCurrentBlockTimestamp'
), ),
], ],
[ [
OVMMulticall.address, OVMContextStorage.address,
OVMMulticall.interface.encodeFunctionData('getCurrentBlockNumber'), OVMContextStorage.interface.encodeFunctionData(
'getCurrentBlockNumber'
),
], ],
[ [
OVMMulticall.address, OVMContextStorage.address,
OVMMulticall.interface.encodeFunctionData( OVMContextStorage.interface.encodeFunctionData(
'getCurrentL1BlockNumber' 'getCurrentL1BlockNumber'
), ),
], ],
...@@ -141,19 +147,23 @@ describe('OVM Context: Layer 2 EVM Context', () => { ...@@ -141,19 +147,23 @@ describe('OVM Context: Layer 2 EVM Context', () => {
*/ */
it('should return same timestamp and blocknumbers between `eth_call` and `rollup_getInfo`', async () => { it('should return same timestamp and blocknumbers between `eth_call` and `rollup_getInfo`', async () => {
// As atomically as possible, call `rollup_getInfo` and OVMMulticall for the // As atomically as possible, call `rollup_getInfo` and Multicall for the
// blocknumber and timestamp. If this is not atomic, then the sequencer can // blocknumber and timestamp. If this is not atomic, then the sequencer can
// happend to update the timestamp between the `eth_call` and the `rollup_getInfo` // happend to update the timestamp between the `eth_call` and the `rollup_getInfo`
const [info, [, returnData]] = await Promise.all([ const [info, [, returnData]] = await Promise.all([
L2Provider.send('rollup_getInfo', []), L2Provider.send('rollup_getInfo', []),
OVMMulticall.callStatic.aggregate([ Multicall.callStatic.aggregate([
[ [
OVMMulticall.address, OVMContextStorage.address,
OVMMulticall.interface.encodeFunctionData('getCurrentBlockTimestamp'), OVMContextStorage.interface.encodeFunctionData(
'getCurrentBlockTimestamp'
),
], ],
[ [
OVMMulticall.address, OVMContextStorage.address,
OVMMulticall.interface.encodeFunctionData('getCurrentL1BlockNumber'), OVMContextStorage.interface.encodeFunctionData(
'getCurrentL1BlockNumber'
),
], ],
]), ]),
]) ])
......
...@@ -7,7 +7,7 @@ import { injectL2Context, applyL1ToL2Alias } from '@eth-optimism/core-utils' ...@@ -7,7 +7,7 @@ import { injectL2Context, applyL1ToL2Alias } from '@eth-optimism/core-utils'
/* Imports: External */ /* Imports: External */
import { OptimismEnv } from './shared/env' import { OptimismEnv } from './shared/env'
import { Direction } from './shared/watcher-utils' import { Direction } from './shared/watcher-utils'
import { isLiveNetwork } from './shared/utils' import { DEFAULT_TEST_GAS_L1, envConfig } from './shared/utils'
describe('Queue Ingestion', () => { describe('Queue Ingestion', () => {
let env: OptimismEnv let env: OptimismEnv
...@@ -21,7 +21,7 @@ describe('Queue Ingestion', () => { ...@@ -21,7 +21,7 @@ describe('Queue Ingestion', () => {
// that are in the queue and submit them. L2 will pick up the // that are in the queue and submit them. L2 will pick up the
// sequencer batch appended event and play the transactions. // sequencer batch appended event and play the transactions.
it('should order transactions correctly', async () => { it('should order transactions correctly', async () => {
const numTxs = 5 const numTxs = envConfig.OVMCONTEXT_SPEC_NUM_TXS
// Enqueue some transactions by building the calldata and then sending // Enqueue some transactions by building the calldata and then sending
// the transaction to Layer 1 // the transaction to Layer 1
...@@ -30,7 +30,10 @@ describe('Queue Ingestion', () => { ...@@ -30,7 +30,10 @@ describe('Queue Ingestion', () => {
const tx = await env.l1Messenger.sendMessage( const tx = await env.l1Messenger.sendMessage(
`0x${`${i}`.repeat(40)}`, `0x${`${i}`.repeat(40)}`,
`0x0${i}`, `0x0${i}`,
1_000_000 1_000_000,
{
gasLimit: DEFAULT_TEST_GAS_L1,
}
) )
await tx.wait() await tx.wait()
txs.push(tx) txs.push(tx)
...@@ -62,5 +65,5 @@ describe('Queue Ingestion', () => { ...@@ -62,5 +65,5 @@ describe('Queue Ingestion', () => {
) )
expect(l2Tx.l1BlockNumber).to.equal(l1TxReceipt.blockNumber) expect(l2Tx.l1BlockNumber).to.equal(l1TxReceipt.blockNumber)
} }
}).timeout(isLiveNetwork() ? 300_000 : 100_000) })
}) })
...@@ -4,26 +4,26 @@ import { ...@@ -4,26 +4,26 @@ import {
defaultTransactionFactory, defaultTransactionFactory,
gasPriceForL2, gasPriceForL2,
sleep, sleep,
isLiveNetwork, envConfig,
} from './shared/utils' } from './shared/utils'
import { TransactionReceipt } from '@ethersproject/abstract-provider' import { TransactionReceipt } from '@ethersproject/abstract-provider'
describe('Replica Tests', () => { describe('Replica Tests', () => {
let env: OptimismEnv let env: OptimismEnv
before(async () => { before(async function () {
if (!envConfig.RUN_REPLICA_TESTS) {
this.skip()
return
}
env = await OptimismEnv.new() env = await OptimismEnv.new()
}) })
describe('Matching blocks', () => { describe('Matching blocks', () => {
if (isLiveNetwork()) {
console.log('Skipping replica tests on live network')
return
}
it('should sync a transaction', async () => { it('should sync a transaction', async () => {
const tx = defaultTransactionFactory() const tx = defaultTransactionFactory()
tx.gasPrice = await gasPriceForL2(env) tx.gasPrice = await gasPriceForL2()
const result = await env.l2Wallet.sendTransaction(tx) const result = await env.l2Wallet.sendTransaction(tx)
let receipt: TransactionReceipt let receipt: TransactionReceipt
...@@ -48,7 +48,7 @@ describe('Replica Tests', () => { ...@@ -48,7 +48,7 @@ describe('Replica Tests', () => {
const tx = { const tx = {
...defaultTransactionFactory(), ...defaultTransactionFactory(),
nonce: await env.l2Wallet.getTransactionCount(), nonce: await env.l2Wallet.getTransactionCount(),
gasPrice: await gasPriceForL2(env), gasPrice: await gasPriceForL2(),
chainId: null, // Disables EIP155 transaction signing. chainId: null, // Disables EIP155 transaction signing.
} }
const signed = await env.l2Wallet.signTransaction(tx) const signed = await env.l2Wallet.signTransaction(tx)
...@@ -71,5 +71,32 @@ describe('Replica Tests', () => { ...@@ -71,5 +71,32 @@ describe('Replica Tests', () => {
expect(sequencerBlock.stateRoot).to.deep.eq(replicaBlock.stateRoot) expect(sequencerBlock.stateRoot).to.deep.eq(replicaBlock.stateRoot)
expect(sequencerBlock.hash).to.deep.eq(replicaBlock.hash) expect(sequencerBlock.hash).to.deep.eq(replicaBlock.hash)
}) })
it('should forward tx to sequencer', async () => {
const tx = {
...defaultTransactionFactory(),
nonce: await env.l2Wallet.getTransactionCount(),
gasPrice: await gasPriceForL2(),
}
const signed = await env.l2Wallet.signTransaction(tx)
const result = await env.replicaProvider.sendTransaction(signed)
let receipt: TransactionReceipt
while (!receipt) {
receipt = await env.replicaProvider.getTransactionReceipt(result.hash)
await sleep(200)
}
const sequencerBlock = (await env.l2Provider.getBlock(
result.blockNumber
)) as any
const replicaBlock = (await env.replicaProvider.getBlock(
result.blockNumber
)) as any
expect(sequencerBlock.stateRoot).to.deep.eq(replicaBlock.stateRoot)
expect(sequencerBlock.hash).to.deep.eq(replicaBlock.hash)
})
}) })
}) })
...@@ -10,15 +10,16 @@ import { ...@@ -10,15 +10,16 @@ import {
defaultTransactionFactory, defaultTransactionFactory,
fundUser, fundUser,
L2_CHAINID, L2_CHAINID,
isLiveNetwork,
gasPriceForL2, gasPriceForL2,
isHardhat,
hardhatTest,
envConfig,
} from './shared/utils' } from './shared/utils'
import { OptimismEnv } from './shared/env' import { OptimismEnv } from './shared/env'
import { import {
TransactionReceipt, TransactionReceipt,
TransactionRequest, TransactionRequest,
} from '@ethersproject/providers' } from '@ethersproject/providers'
import simpleStorageJson from '../artifacts/contracts/SimpleStorage.sol/SimpleStorage.json'
describe('Basic RPC tests', () => { describe('Basic RPC tests', () => {
let env: OptimismEnv let env: OptimismEnv
...@@ -64,7 +65,7 @@ describe('Basic RPC tests', () => { ...@@ -64,7 +65,7 @@ describe('Basic RPC tests', () => {
describe('eth_sendRawTransaction', () => { describe('eth_sendRawTransaction', () => {
it('should correctly process a valid transaction', async () => { it('should correctly process a valid transaction', async () => {
const tx = defaultTransactionFactory() const tx = defaultTransactionFactory()
tx.gasPrice = await gasPriceForL2(env) tx.gasPrice = await gasPriceForL2()
const nonce = await wallet.getTransactionCount() const nonce = await wallet.getTransactionCount()
const result = await wallet.sendTransaction(tx) const result = await wallet.sendTransaction(tx)
...@@ -78,7 +79,7 @@ describe('Basic RPC tests', () => { ...@@ -78,7 +79,7 @@ describe('Basic RPC tests', () => {
it('should not accept a transaction with the wrong chain ID', async () => { it('should not accept a transaction with the wrong chain ID', async () => {
const tx = { const tx = {
...defaultTransactionFactory(), ...defaultTransactionFactory(),
gasPrice: await gasPriceForL2(env), gasPrice: await gasPriceForL2(),
chainId: (await wallet.getChainId()) + 1, chainId: (await wallet.getChainId()) + 1,
} }
...@@ -91,7 +92,7 @@ describe('Basic RPC tests', () => { ...@@ -91,7 +92,7 @@ describe('Basic RPC tests', () => {
const tx = { const tx = {
...defaultTransactionFactory(), ...defaultTransactionFactory(),
nonce: await wallet.getTransactionCount(), nonce: await wallet.getTransactionCount(),
gasPrice: await gasPriceForL2(env), gasPrice: await gasPriceForL2(),
chainId: null, // Disables EIP155 transaction signing. chainId: null, // Disables EIP155 transaction signing.
} }
const signed = await wallet.signTransaction(tx) const signed = await wallet.signTransaction(tx)
...@@ -105,7 +106,7 @@ describe('Basic RPC tests', () => { ...@@ -105,7 +106,7 @@ describe('Basic RPC tests', () => {
it('should accept a transaction with a value', async () => { it('should accept a transaction with a value', async () => {
const tx = { const tx = {
...defaultTransactionFactory(), ...defaultTransactionFactory(),
gasPrice: await gasPriceForL2(env), gasPrice: await gasPriceForL2(),
chainId: await env.l2Wallet.getChainId(), chainId: await env.l2Wallet.getChainId(),
data: '0x', data: '0x',
value: ethers.utils.parseEther('0.1'), value: ethers.utils.parseEther('0.1'),
...@@ -125,7 +126,7 @@ describe('Basic RPC tests', () => { ...@@ -125,7 +126,7 @@ describe('Basic RPC tests', () => {
const balance = await env.l2Wallet.getBalance() const balance = await env.l2Wallet.getBalance()
const tx = { const tx = {
...defaultTransactionFactory(), ...defaultTransactionFactory(),
gasPrice: await gasPriceForL2(env), gasPrice: await gasPriceForL2(),
chainId: await env.l2Wallet.getChainId(), chainId: await env.l2Wallet.getChainId(),
data: '0x', data: '0x',
value: balance.add(ethers.utils.parseEther('1')), value: balance.add(ethers.utils.parseEther('1')),
...@@ -145,13 +146,12 @@ describe('Basic RPC tests', () => { ...@@ -145,13 +146,12 @@ describe('Basic RPC tests', () => {
}) })
it('should reject a transaction with too low of a fee', async () => { it('should reject a transaction with too low of a fee', async () => {
if (isLiveNetwork()) { const isHH = await isHardhat()
console.log('Skipping too low of a fee test on live network') let gasPrice
return if (isHH) {
} gasPrice = await env.gasPriceOracle.gasPrice()
const gasPrice = await env.gasPriceOracle.gasPrice()
await env.gasPriceOracle.setGasPrice(1000) await env.gasPriceOracle.setGasPrice(1000)
}
const tx = { const tx = {
...defaultTransactionFactory(), ...defaultTransactionFactory(),
...@@ -159,18 +159,16 @@ describe('Basic RPC tests', () => { ...@@ -159,18 +159,16 @@ describe('Basic RPC tests', () => {
} }
await expect(env.l2Wallet.sendTransaction(tx)).to.be.rejectedWith( await expect(env.l2Wallet.sendTransaction(tx)).to.be.rejectedWith(
`gas price too low: 1 wei, use at least tx.gasPrice = 1000 wei` /gas price too low: 1 wei, use at least tx\.gasPrice = \d+ wei/
) )
if (isHH) {
// Reset the gas price to its original price // Reset the gas price to its original price
await env.gasPriceOracle.setGasPrice(gasPrice) await env.gasPriceOracle.setGasPrice(gasPrice)
}
}) })
it('should reject a transaction with too high of a fee', async () => { it('should reject a transaction with too high of a fee', async () => {
if (isLiveNetwork()) {
console.log('Skpping too high of a fee test on live network')
return
}
const gasPrice = await env.gasPriceOracle.gasPrice() const gasPrice = await env.gasPriceOracle.gasPrice()
const largeGasPrice = gasPrice.mul(10) const largeGasPrice = gasPrice.mul(10)
const tx = { const tx = {
...@@ -331,7 +329,7 @@ describe('Basic RPC tests', () => { ...@@ -331,7 +329,7 @@ describe('Basic RPC tests', () => {
it('includes L1 gas price and L1 gas used', async () => { it('includes L1 gas price and L1 gas used', async () => {
const tx = await env.l2Wallet.populateTransaction({ const tx = await env.l2Wallet.populateTransaction({
to: env.l2Wallet.address, to: env.l2Wallet.address,
gasPrice: isLiveNetwork() ? 10000 : 1, gasPrice: await gasPriceForL2(),
}) })
const raw = serialize({ const raw = serialize({
...@@ -366,7 +364,7 @@ describe('Basic RPC tests', () => { ...@@ -366,7 +364,7 @@ describe('Basic RPC tests', () => {
describe('eth_getTransactionByHash', () => { describe('eth_getTransactionByHash', () => {
it('should be able to get all relevant l1/l2 transaction data', async () => { it('should be able to get all relevant l1/l2 transaction data', async () => {
const tx = defaultTransactionFactory() const tx = defaultTransactionFactory()
tx.gasPrice = await gasPriceForL2(env) tx.gasPrice = await gasPriceForL2()
const result = await wallet.sendTransaction(tx) const result = await wallet.sendTransaction(tx)
await result.wait() await result.wait()
...@@ -381,7 +379,7 @@ describe('Basic RPC tests', () => { ...@@ -381,7 +379,7 @@ describe('Basic RPC tests', () => {
it('should return the block and all included transactions', async () => { it('should return the block and all included transactions', async () => {
// Send a transaction and wait for it to be mined. // Send a transaction and wait for it to be mined.
const tx = defaultTransactionFactory() const tx = defaultTransactionFactory()
tx.gasPrice = await gasPriceForL2(env) tx.gasPrice = await gasPriceForL2()
const result = await wallet.sendTransaction(tx) const result = await wallet.sendTransaction(tx)
const receipt = await result.wait() const receipt = await result.wait()
...@@ -407,11 +405,9 @@ describe('Basic RPC tests', () => { ...@@ -407,11 +405,9 @@ describe('Basic RPC tests', () => {
// Needs to be skipped on Prod networks because this test doesn't work when // Needs to be skipped on Prod networks because this test doesn't work when
// other people are sending transactions to the Sequencer at the same time // other people are sending transactions to the Sequencer at the same time
// as this test is running. // as this test is running.
it('should return the same result when new transactions are not applied', async function () { hardhatTest(
if (isLiveNetwork()) { 'should return the same result when new transactions are not applied',
this.skip() async () => {
}
// Get latest block once to start. // Get latest block once to start.
const prev = await provider.getBlockWithTransactions('latest') const prev = await provider.getBlockWithTransactions('latest')
// set wait to null to allow a deep object comparison // set wait to null to allow a deep object comparison
...@@ -432,7 +428,8 @@ describe('Basic RPC tests', () => { ...@@ -432,7 +428,8 @@ describe('Basic RPC tests', () => {
expect(latest).to.deep.equal(prev) expect(latest).to.deep.equal(prev)
await sleep(2000) await sleep(2000)
} }
}) }
)
}) })
describe('eth_getBalance', () => { describe('eth_getBalance', () => {
...@@ -497,10 +494,15 @@ describe('Basic RPC tests', () => { ...@@ -497,10 +494,15 @@ describe('Basic RPC tests', () => {
}) })
describe('debug_traceTransaction', () => { describe('debug_traceTransaction', () => {
before(async function () {
if (!envConfig.RUN_DEBUG_TRACE_TESTS) {
this.skip()
}
})
it('should match debug_traceBlock', async () => { it('should match debug_traceBlock', async () => {
const storage = new ContractFactory( const storage = await ethers.getContractFactory(
simpleStorageJson.abi, 'SimpleStorage',
simpleStorageJson.bytecode,
env.l2Wallet env.l2Wallet
) )
const tx = (await storage.deploy()).deployTransaction const tx = (await storage.deploy()).deployTransaction
......
const { DockerComposeNetwork } = require('./shared/docker-compose')
before(async () => {
if (!process.env.NO_NETWORK) {
await new DockerComposeNetwork().up()
}
})
import * as compose from 'docker-compose'
import * as shell from 'shelljs'
import * as path from 'path'
type ServiceNames =
| 'batch_submitter'
| 'dtl'
| 'l2geth'
| 'relayer'
| 'verifier'
| 'replica'
const OPS_DIRECTORY = path.join(process.cwd(), '../ops')
const DEFAULT_SERVICES: ServiceNames[] = [
'batch_submitter',
'dtl',
'l2geth',
'relayer',
]
export class DockerComposeNetwork {
constructor(private readonly services: ServiceNames[] = DEFAULT_SERVICES) {}
async up(options?: compose.IDockerComposeOptions) {
const out = await compose.upMany(this.services, {
cwd: OPS_DIRECTORY,
...options,
})
const { err, exitCode } = out
if (!err || exitCode) {
console.error(err)
throw new Error(
'Unexpected error when starting docker-compose network, dumping output'
)
}
if (err.includes('Creating')) {
console.info(
'🐳 Tests required starting containers. Waiting for sequencer to ready.'
)
shell.exec(`${OPS_DIRECTORY}/scripts/wait-for-sequencer.sh`, {
cwd: OPS_DIRECTORY,
})
}
return out
}
async logs() {
return compose.logs(this.services, { cwd: OPS_DIRECTORY })
}
async stop(service: ServiceNames) {
return compose.stopOne(service, { cwd: OPS_DIRECTORY })
}
async rm() {
return compose.rm({ cwd: OPS_DIRECTORY })
}
}
...@@ -19,6 +19,8 @@ import { ...@@ -19,6 +19,8 @@ import {
getL1Bridge, getL1Bridge,
getL2Bridge, getL2Bridge,
sleep, sleep,
envConfig,
DEFAULT_TEST_GAS_L1,
} from './utils' } from './utils'
import { import {
initWatcher, initWatcher,
...@@ -83,8 +85,10 @@ export class OptimismEnv { ...@@ -83,8 +85,10 @@ export class OptimismEnv {
// fund the user if needed // fund the user if needed
const balance = await l2Wallet.getBalance() const balance = await l2Wallet.getBalance()
if (balance.lt(utils.parseEther('1'))) { const min = envConfig.L2_WALLET_MIN_BALANCE_ETH.toString()
await fundUser(watcher, l1Bridge, utils.parseEther('1').sub(balance)) const topUp = envConfig.L2_WALLET_TOP_UP_AMOUNT_ETH.toString()
if (balance.lt(utils.parseEther(min))) {
await fundUser(watcher, l1Bridge, utils.parseEther(topUp))
} }
const l1Messenger = getContractFactory('L1CrossDomainMessenger') const l1Messenger = getContractFactory('L1CrossDomainMessenger')
.connect(l1Wallet) .connect(l1Wallet)
...@@ -156,6 +160,7 @@ export class OptimismEnv { ...@@ -156,6 +160,7 @@ export class OptimismEnv {
tx: Promise<TransactionResponse> | TransactionResponse tx: Promise<TransactionResponse> | TransactionResponse
): Promise<void> { ): Promise<void> {
tx = await tx tx = await tx
await tx.wait()
let messagePairs = [] let messagePairs = []
while (true) { while (true) {
...@@ -187,7 +192,10 @@ export class OptimismEnv { ...@@ -187,7 +192,10 @@ export class OptimismEnv {
message.sender, message.sender,
message.message, message.message,
message.messageNonce, message.messageNonce,
proof proof,
{
gasLimit: DEFAULT_TEST_GAS_L1 * 10,
}
) )
await result.wait() await result.wait()
break break
......
...@@ -23,7 +23,7 @@ export const fundRandomWallet = async ( ...@@ -23,7 +23,7 @@ export const fundRandomWallet = async (
const fundTx = await env.l1Wallet.sendTransaction({ const fundTx = await env.l1Wallet.sendTransaction({
gasLimit: 25_000, gasLimit: 25_000,
to: wallet.address, to: wallet.address,
gasPrice: await gasPriceForL1(env), gasPrice: await gasPriceForL1(),
value, value,
}) })
await fundTx.wait() await fundTx.wait()
...@@ -47,7 +47,7 @@ export const executeL1ToL2Transaction = async ( ...@@ -47,7 +47,7 @@ export const executeL1ToL2Transaction = async (
), ),
MESSAGE_GAS, MESSAGE_GAS,
{ {
gasPrice: await gasPriceForL1(env), gasPrice: await gasPriceForL1(),
} }
) )
) )
...@@ -71,7 +71,7 @@ export const executeL2ToL1Transaction = async ( ...@@ -71,7 +71,7 @@ export const executeL2ToL1Transaction = async (
), ),
MESSAGE_GAS, MESSAGE_GAS,
{ {
gasPrice: gasPriceForL2(env), gasPrice: gasPriceForL2(),
} }
) )
) )
...@@ -90,7 +90,7 @@ export const executeL2Transaction = async ( ...@@ -90,7 +90,7 @@ export const executeL2Transaction = async (
tx.contract tx.contract
.connect(signer) .connect(signer)
.functions[tx.functionName](...tx.functionParams, { .functions[tx.functionName](...tx.functionParams, {
gasPrice: gasPriceForL2(env), gasPrice: gasPriceForL2(),
}) })
) )
await result.wait() await result.wait()
......
This diff is collapsed.
import { expect } from './shared/setup' import { expect } from './shared/setup'
/* Imports: External */ /* Imports: External */
import { Contract, ContractFactory, Wallet, utils } from 'ethers' import { Contract, Wallet, utils } from 'ethers'
import { ethers } from 'hardhat'
/* Imports: Internal */ /* Imports: Internal */
import { OptimismEnv } from './shared/env' import { OptimismEnv } from './shared/env'
...@@ -16,13 +17,12 @@ import { ...@@ -16,13 +17,12 @@ import {
} from './shared/stress-test-helpers' } from './shared/stress-test-helpers'
/* Imports: Artifacts */ /* Imports: Artifacts */
import simpleStorageJson from '../artifacts/contracts/SimpleStorage.sol/SimpleStorage.json' import { envConfig, fundUser } from './shared/utils'
import { fundUser, isLiveNetwork, isMainnet } from './shared/utils'
// Need a big timeout to allow for all transactions to be processed. // Need a big timeout to allow for all transactions to be processed.
// For some reason I can't figure out how to set the timeout on a per-suite basis // For some reason I can't figure out how to set the timeout on a per-suite basis
// so I'm instead setting it for every test. // so I'm instead setting it for every test.
const STRESS_TEST_TIMEOUT = isLiveNetwork() ? 500_000 : 1_200_000 const STRESS_TEST_TIMEOUT = envConfig.MOCHA_TIMEOUT * 5
describe('stress tests', () => { describe('stress tests', () => {
const numTransactions = 3 const numTransactions = 3
...@@ -32,13 +32,14 @@ describe('stress tests', () => { ...@@ -32,13 +32,14 @@ describe('stress tests', () => {
const wallets: Wallet[] = [] const wallets: Wallet[] = []
before(async function () { before(async function () {
env = await OptimismEnv.new() if (!envConfig.RUN_STRESS_TESTS) {
if (await isMainnet(env)) { console.log('Skipping stress tests.')
console.log('Skipping stress tests on mainnet.')
this.skip() this.skip()
return return
} }
env = await OptimismEnv.new()
for (let i = 0; i < numTransactions; i++) { for (let i = 0; i < numTransactions; i++) {
wallets.push(Wallet.createRandom()) wallets.push(Wallet.createRandom())
} }
...@@ -60,14 +61,12 @@ describe('stress tests', () => { ...@@ -60,14 +61,12 @@ describe('stress tests', () => {
let L2SimpleStorage: Contract let L2SimpleStorage: Contract
let L1SimpleStorage: Contract let L1SimpleStorage: Contract
beforeEach(async () => { beforeEach(async () => {
const factory__L1SimpleStorage = new ContractFactory( const factory__L1SimpleStorage = await ethers.getContractFactory(
simpleStorageJson.abi, 'SimpleStorage',
simpleStorageJson.bytecode,
env.l1Wallet env.l1Wallet
) )
const factory__L2SimpleStorage = new ContractFactory( const factory__L2SimpleStorage = await ethers.getContractFactory(
simpleStorageJson.abi, 'SimpleStorage',
simpleStorageJson.bytecode,
env.l2Wallet env.l2Wallet
) )
L1SimpleStorage = await factory__L1SimpleStorage.deploy() L1SimpleStorage = await factory__L1SimpleStorage.deploy()
......
# Changelog # Changelog
## 0.5.7
### Patch Changes
- d4bf299f: Add support to fully unmarshal Receipts with Optimism fields
- 8be69ca7: Add changeset for https://github.com/ethereum-optimism/optimism/pull/2011 - replicas forward write requests to the sequencer via a configured parameter `--sequencer.clienthttp` or `SEQUENCER_CLIENT_HTTP`
- c9fd6ec2: Correctly parse fee enforcement via config to allow turning off L2 fees for development
## 0.5.6
### Patch Changes
- 3a77bbcc: Implement updated timestamp logic
- 3e3c07a3: changed the default address to be address(0) in `call`
## 0.5.5 ## 0.5.5
### Patch Changes ### Patch Changes
......
...@@ -17,15 +17,23 @@ ...@@ -17,15 +17,23 @@
package main package main
import ( import (
"bytes"
"crypto/sha256"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io"
"io/ioutil"
"net/http"
"os" "os"
"path/filepath" "path/filepath"
"regexp"
"runtime" "runtime"
"strconv" "strconv"
"sync/atomic" "sync/atomic"
"time" "time"
"github.com/ethereum-optimism/optimism/l2geth/common/hexutil"
"github.com/ethereum-optimism/optimism/l2geth/cmd/utils" "github.com/ethereum-optimism/optimism/l2geth/cmd/utils"
"github.com/ethereum-optimism/optimism/l2geth/common" "github.com/ethereum-optimism/optimism/l2geth/common"
"github.com/ethereum-optimism/optimism/l2geth/console" "github.com/ethereum-optimism/optimism/l2geth/console"
...@@ -45,7 +53,7 @@ var ( ...@@ -45,7 +53,7 @@ var (
Action: utils.MigrateFlags(initGenesis), Action: utils.MigrateFlags(initGenesis),
Name: "init", Name: "init",
Usage: "Bootstrap and initialize a new genesis block", Usage: "Bootstrap and initialize a new genesis block",
ArgsUsage: "<genesisPath>", ArgsUsage: "<genesisPathOrUrl> (<genesisHash>)",
Flags: []cli.Flag{ Flags: []cli.Flag{
utils.DataDirFlag, utils.DataDirFlag,
}, },
...@@ -55,7 +63,22 @@ The init command initializes a new genesis block and definition for the network. ...@@ -55,7 +63,22 @@ The init command initializes a new genesis block and definition for the network.
This is a destructive action and changes the network in which you will be This is a destructive action and changes the network in which you will be
participating. participating.
It expects the genesis file as argument.`, It expects either a path or an HTTP URL to the genesis file as an argument. If an
HTTP URL is specified for the genesis file, then a hex-encoded SHA256 hash of the
genesis file must be included as a second argument. The hash provided on the CLI
will be checked against the hash of the genesis file downloaded from the URL.`,
}
dumpChainCfgCommand = cli.Command{
Action: utils.MigrateFlags(dumpChainCfg),
Name: "dump-chain-cfg",
Usage: "Dumps the current chain config to standard out.",
Flags: []cli.Flag{
utils.DataDirFlag,
},
Category: "BLOCKCHAIN COMMANDS",
Description: `
This command dumps the currently configured chain state to standard output. It
will fail if there is no genesis block configured.`,
} }
importCommand = cli.Command{ importCommand = cli.Command{
Action: utils.MigrateFlags(importChain), Action: utils.MigrateFlags(importChain),
...@@ -194,15 +217,50 @@ Use "ethereum dump 0" to dump the genesis block.`, ...@@ -194,15 +217,50 @@ Use "ethereum dump 0" to dump the genesis block.`,
// the zero'd block (i.e. genesis) or will fail hard if it can't succeed. // the zero'd block (i.e. genesis) or will fail hard if it can't succeed.
func initGenesis(ctx *cli.Context) error { func initGenesis(ctx *cli.Context) error {
// Make sure we have a valid genesis JSON // Make sure we have a valid genesis JSON
genesisPath := ctx.Args().First() genesisPathOrURL := ctx.Args().First()
if len(genesisPath) == 0 { if len(genesisPathOrURL) == 0 {
utils.Fatalf("Must supply path to genesis JSON file") utils.Fatalf("Must supply path or URL to genesis JSON file")
}
var file io.ReadCloser
if matched, _ := regexp.MatchString("^http(s)?://", genesisPathOrURL); matched {
genesisHashStr := ctx.Args().Get(1)
if genesisHashStr == "" {
utils.Fatalf("Must specify a genesis hash argument if the genesis path argument is an URL.")
}
genesisHashData, err := hexutil.Decode(genesisHashStr)
if err != nil {
utils.Fatalf("Error decoding genesis hash: %v", err)
}
log.Info("Fetching genesis file", "url", genesisPathOrURL)
genesisData, err := fetchGenesis(genesisPathOrURL)
if err != nil {
utils.Fatalf("Failed to fetch genesis file: %v", err)
}
hash := sha256.New()
hash.Write(genesisData)
actualHash := hash.Sum(nil)
if !bytes.Equal(actualHash, genesisHashData) {
utils.Fatalf(
"Genesis hashes do not match. Need: %s, got: %s",
genesisHashStr,
hexutil.Encode(actualHash),
)
} }
file, err := os.Open(genesisPath)
file = ioutil.NopCloser(bytes.NewReader(genesisData))
} else {
var err error
file, err = os.Open(genesisPathOrURL)
if err != nil { if err != nil {
utils.Fatalf("Failed to read genesis file: %v", err) utils.Fatalf("Failed to read genesis file: %v", err)
} }
defer file.Close() defer file.Close()
}
genesis := new(core.Genesis) genesis := new(core.Genesis)
if err := json.NewDecoder(file).Decode(genesis); err != nil { if err := json.NewDecoder(file).Decode(genesis); err != nil {
...@@ -227,6 +285,30 @@ func initGenesis(ctx *cli.Context) error { ...@@ -227,6 +285,30 @@ func initGenesis(ctx *cli.Context) error {
return nil return nil
} }
// dumpChainCfg dumps chain config to standard output.
func dumpChainCfg(ctx *cli.Context) error {
stack := makeFullNode(ctx)
defer stack.Close()
db, err := stack.OpenDatabase("chaindata", 0, 0, "")
if err != nil {
utils.Fatalf("Failed to open database: %v", err)
}
stored := rawdb.ReadCanonicalHash(db, 0)
var zeroHash common.Hash
if stored == zeroHash {
utils.Fatalf("No genesis block configured.")
}
chainCfg := rawdb.ReadChainConfig(db, stored)
out, err := json.MarshalIndent(chainCfg, "", " ")
if err != nil {
utils.Fatalf("Failed to marshal chain config: %v", out)
}
fmt.Println(string(out))
return nil
}
func importChain(ctx *cli.Context) error { func importChain(ctx *cli.Context) error {
if len(ctx.Args()) < 1 { if len(ctx.Args()) < 1 {
utils.Fatalf("This command requires an argument.") utils.Fatalf("This command requires an argument.")
...@@ -557,3 +639,15 @@ func hashish(x string) bool { ...@@ -557,3 +639,15 @@ func hashish(x string) bool {
_, err := strconv.Atoi(x) _, err := strconv.Atoi(x)
return err != nil return err != nil
} }
func fetchGenesis(url string) ([]byte, error) {
client := &http.Client{
Timeout: 10 * time.Second,
}
resp, err := client.Get(url)
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
}
This diff is collapsed.
...@@ -163,6 +163,7 @@ var ( ...@@ -163,6 +163,7 @@ var (
utils.RollupEnforceFeesFlag, utils.RollupEnforceFeesFlag,
utils.RollupFeeThresholdDownFlag, utils.RollupFeeThresholdDownFlag,
utils.RollupFeeThresholdUpFlag, utils.RollupFeeThresholdUpFlag,
utils.SequencerClientHttpFlag,
} }
rpcFlags = []cli.Flag{ rpcFlags = []cli.Flag{
...@@ -215,6 +216,7 @@ func init() { ...@@ -215,6 +216,7 @@ func init() {
app.Commands = []cli.Command{ app.Commands = []cli.Command{
// See chaincmd.go: // See chaincmd.go:
initCommand, initCommand,
dumpChainCfgCommand,
importCommand, importCommand,
exportCommand, exportCommand,
importPreimagesCommand, importPreimagesCommand,
......
This diff is collapsed.
...@@ -77,6 +77,7 @@ var AppHelpFlagGroups = []flagGroup{ ...@@ -77,6 +77,7 @@ var AppHelpFlagGroups = []flagGroup{
utils.RollupEnforceFeesFlag, utils.RollupEnforceFeesFlag,
utils.RollupFeeThresholdDownFlag, utils.RollupFeeThresholdDownFlag,
utils.RollupFeeThresholdUpFlag, utils.RollupFeeThresholdUpFlag,
utils.SequencerClientHttpFlag,
}, },
}, },
{ {
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment