Commit c4eb2e57 authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge branch 'develop' into refcell/challenger/types

parents a48ed5b8 3a4c7dd4
...@@ -190,6 +190,6 @@ require ( ...@@ -190,6 +190,6 @@ require (
nhooyr.io/websocket v1.8.7 // indirect nhooyr.io/websocket v1.8.7 // indirect
) )
replace github.com/ethereum/go-ethereum v1.11.6 => github.com/ethereum-optimism/op-geth v1.101105.2-0.20230502202351-9cc072e922f6 replace github.com/ethereum/go-ethereum v1.11.6 => github.com/ethereum-optimism/op-geth v1.101105.2-0.20230526154603-bdab05ca786f
//replace github.com/ethereum/go-ethereum v1.11.6 => ../go-ethereum //replace github.com/ethereum/go-ethereum v1.11.6 => ../go-ethereum
...@@ -151,8 +151,8 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7 ...@@ -151,8 +151,8 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7
github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw=
github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3 h1:RWHKLhCrQThMfch+QJ1Z8veEq5ZO3DfIhZ7xgRP9WTc= github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3 h1:RWHKLhCrQThMfch+QJ1Z8veEq5ZO3DfIhZ7xgRP9WTc=
github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3/go.mod h1:QziizLAiF0KqyLdNJYD7O5cpDlaFMNZzlxYNcWsJUxs= github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3/go.mod h1:QziizLAiF0KqyLdNJYD7O5cpDlaFMNZzlxYNcWsJUxs=
github.com/ethereum-optimism/op-geth v1.101105.2-0.20230502202351-9cc072e922f6 h1:Fh9VBmDCwjVn8amx1Dfrx+hIh16C/FDkS17EN25MGO8= github.com/ethereum-optimism/op-geth v1.101105.2-0.20230526154603-bdab05ca786f h1:+yZN8K/4AIN5f+gazMwZAeqzDG2EL2GydMrccjnEK+A=
github.com/ethereum-optimism/op-geth v1.101105.2-0.20230502202351-9cc072e922f6/go.mod h1:X9t7oeerFMU9/zMIjZKT/jbIca+O05QqtBTLjL+XVeA= github.com/ethereum-optimism/op-geth v1.101105.2-0.20230526154603-bdab05ca786f/go.mod h1:X9t7oeerFMU9/zMIjZKT/jbIca+O05QqtBTLjL+XVeA=
github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8= github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8=
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
github.com/fjl/memsize v0.0.1 h1:+zhkb+dhUgx0/e+M8sF0QqiouvMQUiKR+QYvdxIOKcQ= github.com/fjl/memsize v0.0.1 h1:+zhkb+dhUgx0/e+M8sF0QqiouvMQUiKR+QYvdxIOKcQ=
......
docker-compose.dev.yml
.env
# @eth-optimism/indexer
## Getting started
### Run indexer vs goerli
- install docker
- `cp example.env .env`
- fill in .env
- run `docker-compose up` to start the indexer vs optimism goerli network
### Run indexer with go
See the flags in `flags.go` for reference of what command line flags to pass to `go run`
### Run indexer vs devnet
TODO add indexer to the optimism devnet compose file (previously removed for breaking CI)
### Run indexer vs a custom configuration
`docker-compose.dev.yml` is git ignored. Fill in your own docker-compose file here.
version: '3.8'
services:
postgres:
image: postgres:latest
environment:
- POSTGRES_USER=db_username
- POSTGRES_PASSWORD=db_password
- POSTGRES_DB=db_name
- PGDATA=/data/postgres
- POSTGRES_HOST_AUTH_METHOD=trust
healthcheck:
test: [ "CMD-SHELL", "pg_isready -q -U db_username -d db_name" ]
ports:
- "5434:5432"
volumes:
- postgres_data:/data/postgres
indexer:
build:
context: ..
dockerfile: indexer/Dockerfile
healthcheck:
test: wget localhost:8080/healthz -q -O - > /dev/null 2>&1
environment:
# Note that you must index goerli with INDEXER_BEDROCK=false first, then
# reindex with INDEXER_BEDROCK=true or seed the database
- INDEXER_BEDROCK=${INDEXER_BEDROCK_GOERLI:-true}
- INDEXER_BUILD_ENV=${INDEXER_BUILD_ENV:-development}
- INDEXER_DB_PORT=${INDEXER_DB_PORT:-5432}
- INDEXER_DB_USER=${INDEXER_DB_USER:-db_username}
- INDEXER_DB_PASSWORD=${INDEXER_DB_PASSWORD:-db_password}
- INDEXER_DB_NAME=${INDEXER_DB_NAME:-db_name}
- INDEXER_DB_HOST=${INDEXER_DB_HOST:-postgres}
- INDEXER_CHAIN_ID=${INDEXER_CHAIN_ID:-5}
- INDEXER_L1_ETH_RPC=$INDEXER_L1_ETH_RPC
- INDEXER_L2_ETH_RPC=$INDEXER_L2_ETH_RPC
- INDEXER_REST_HOSTNAME=0.0.0.0
- INDEXER_REST_PORT=8080
- INDEXER_BEDROCK_L1_STANDARD_BRIDGE=0
- INDEXER_BEDROCK_L1_STANDARD_BRIDGE=0x636Af16bf2f682dD3109e60102b8E1A089FedAa8
- INDEXER_BEDROCK_OPTIMISM_PORTAL=0xB7040fd32359688346A3D1395a42114cf8E3b9b2
ports:
- 8080:8080
depends_on:
postgres:
condition: service_healthy
volumes:
postgres_data:
# Fill me in with goerli uris and run docker-compose up to run indexer vs goerli
INDEXER_L1_ETH_RPC=FILL_ME_IN
INDEXER_L2_ETH_RPC=FILL_ME_IN
...@@ -212,7 +212,14 @@ func TestGPOParamsChange(gt *testing.T) { ...@@ -212,7 +212,14 @@ func TestGPOParamsChange(gt *testing.T) {
receipt := alice.LastTxReceipt(t) receipt := alice.LastTxReceipt(t)
require.Equal(t, basefee, receipt.L1GasPrice, "L1 gas price matches basefee of L1 origin") require.Equal(t, basefee, receipt.L1GasPrice, "L1 gas price matches basefee of L1 origin")
require.NotZero(t, receipt.L1GasUsed, "L2 tx uses L1 data") require.NotZero(t, receipt.L1GasUsed, "L2 tx uses L1 data")
l1Cost := types.L1Cost(receipt.L1GasUsed.Uint64(), basefee, big.NewInt(2100), big.NewInt(1000_000)) require.Equal(t,
new(big.Float).Mul(
new(big.Float).SetInt(basefee),
new(big.Float).Mul(new(big.Float).SetInt(receipt.L1GasUsed), receipt.FeeScalar),
),
new(big.Float).SetInt(receipt.L1Fee), "fee field in receipt matches gas used times scalar times basefee")
// receipt.L1GasUsed includes the overhead already, so subtract that before passing it into the L1 cost func
l1Cost := types.L1Cost(receipt.L1GasUsed.Uint64()-2100, basefee, big.NewInt(2100), big.NewInt(1000_000))
require.Equal(t, l1Cost, receipt.L1Fee, "L1 fee is computed with standard GPO params") require.Equal(t, l1Cost, receipt.L1Fee, "L1 fee is computed with standard GPO params")
require.Equal(t, "1", receipt.FeeScalar.String(), "1000_000 divided by 6 decimals = float(1)") require.Equal(t, "1", receipt.FeeScalar.String(), "1000_000 divided by 6 decimals = float(1)")
...@@ -268,7 +275,8 @@ func TestGPOParamsChange(gt *testing.T) { ...@@ -268,7 +275,8 @@ func TestGPOParamsChange(gt *testing.T) {
receipt = alice.LastTxReceipt(t) receipt = alice.LastTxReceipt(t)
require.Equal(t, basefeeGPOUpdate, receipt.L1GasPrice, "L1 gas price matches basefee of L1 origin") require.Equal(t, basefeeGPOUpdate, receipt.L1GasPrice, "L1 gas price matches basefee of L1 origin")
require.NotZero(t, receipt.L1GasUsed, "L2 tx uses L1 data") require.NotZero(t, receipt.L1GasUsed, "L2 tx uses L1 data")
l1Cost = types.L1Cost(receipt.L1GasUsed.Uint64(), basefeeGPOUpdate, big.NewInt(1000), big.NewInt(2_300_000)) // subtract overhead from L1GasUsed receipt field, types.L1Cost applies it again
l1Cost = types.L1Cost(receipt.L1GasUsed.Uint64()-1000, basefeeGPOUpdate, big.NewInt(1000), big.NewInt(2_300_000))
require.Equal(t, l1Cost, receipt.L1Fee, "L1 fee is computed with updated GPO params") require.Equal(t, l1Cost, receipt.L1Fee, "L1 fee is computed with updated GPO params")
require.Equal(t, "2.3", receipt.FeeScalar.String(), "2_300_000 divided by 6 decimals = float(2.3)") require.Equal(t, "2.3", receipt.FeeScalar.String(), "2_300_000 divided by 6 decimals = float(2.3)")
...@@ -288,7 +296,8 @@ func TestGPOParamsChange(gt *testing.T) { ...@@ -288,7 +296,8 @@ func TestGPOParamsChange(gt *testing.T) {
receipt = alice.LastTxReceipt(t) receipt = alice.LastTxReceipt(t)
require.Equal(t, basefee, receipt.L1GasPrice, "L1 gas price matches basefee of L1 origin") require.Equal(t, basefee, receipt.L1GasPrice, "L1 gas price matches basefee of L1 origin")
require.NotZero(t, receipt.L1GasUsed, "L2 tx uses L1 data") require.NotZero(t, receipt.L1GasUsed, "L2 tx uses L1 data")
l1Cost = types.L1Cost(receipt.L1GasUsed.Uint64(), basefee, big.NewInt(1000), big.NewInt(2_300_000)) // subtract overhead from L1GasUsed receipt field, types.L1Cost applies it again
l1Cost = types.L1Cost(receipt.L1GasUsed.Uint64()-1000, basefee, big.NewInt(1000), big.NewInt(2_300_000))
require.Equal(t, l1Cost, receipt.L1Fee, "L1 fee is computed with updated GPO params") require.Equal(t, l1Cost, receipt.L1Fee, "L1 fee is computed with updated GPO params")
require.Equal(t, "2.3", receipt.FeeScalar.String(), "2_300_000 divided by 6 decimals = float(2.3)") require.Equal(t, "2.3", receipt.FeeScalar.String(), "2_300_000 divided by 6 decimals = float(2.3)")
} }
......
...@@ -100,6 +100,126 @@ func TestInvalidDepositInFCU(t *testing.T) { ...@@ -100,6 +100,126 @@ func TestInvalidDepositInFCU(t *testing.T) {
require.Equal(t, 0, balance.Cmp(common.Big0)) require.Equal(t, 0, balance.Cmp(common.Big0))
} }
// TestGethOnlyPendingBlockIsLatest walks through an engine-API block building job,
// and asserts that the pending block is set to match the latest block at every stage,
// for stability and tx-privacy.
func TestGethOnlyPendingBlockIsLatest(t *testing.T) {
InitParallel(t)
cfg := DefaultSystemConfig(t)
cfg.DeployConfig.FundDevAccounts = true
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
opGeth, err := NewOpGeth(t, ctx, &cfg)
require.NoError(t, err)
defer opGeth.Close()
checkPending := func(stage string, number uint64) {
// TODO(CLI-4044): pending-block ID change
pendingBlock, err := opGeth.L2Client.BlockByNumber(ctx, big.NewInt(-1))
require.NoError(t, err, "failed to fetch pending block at stage "+stage)
require.Equal(t, number, pendingBlock.NumberU64(), "pending block must have expected number")
latestBlock, err := opGeth.L2Client.BlockByNumber(ctx, nil)
require.NoError(t, err, "failed to fetch latest block at stage "+stage)
require.Equal(t, pendingBlock.Hash(), latestBlock.Hash(), "pending and latest do not match at stage "+stage)
}
checkPending("genesis", 0)
amount := big.NewInt(42) // send 42 wei
aliceStartBalance, err := opGeth.L2Client.PendingBalanceAt(ctx, cfg.Secrets.Addresses().Alice)
require.NoError(t, err)
require.True(t, aliceStartBalance.Cmp(big.NewInt(0)) > 0, "alice must be funded")
checkPendingBalance := func() {
pendingBalance, err := opGeth.L2Client.PendingBalanceAt(ctx, cfg.Secrets.Addresses().Alice)
require.NoError(t, err)
require.Equal(t, pendingBalance, aliceStartBalance, "pending balance must still be the same")
}
startBlock, err := opGeth.L2Client.BlockByNumber(ctx, nil)
require.NoError(t, err)
signer := types.LatestSigner(opGeth.L2ChainConfig)
tip := big.NewInt(7_000_000_000) // 7 gwei tip
tx := types.MustSignNewTx(cfg.Secrets.Alice, signer, &types.DynamicFeeTx{
ChainID: big.NewInt(int64(cfg.DeployConfig.L2ChainID)),
Nonce: 0,
GasTipCap: tip,
GasFeeCap: new(big.Int).Add(startBlock.BaseFee(), tip),
Gas: 1_000_000,
To: &cfg.Secrets.Addresses().Bob,
Value: amount,
Data: nil,
})
require.NoError(t, opGeth.L2Client.SendTransaction(ctx, tx), "send tx to make pending work different")
checkPending("prepared", 0)
rpcClient, err := opGeth.node.Attach()
require.NoError(t, err)
defer rpcClient.Close()
// Wait for tx to be in tx-pool, for it to be picked up in block building
var txPoolStatus struct {
Pending hexutil.Uint64 `json:"pending"`
}
for i := 0; i < 5; i++ {
require.NoError(t, rpcClient.CallContext(ctx, &txPoolStatus, "txpool_status"))
if txPoolStatus.Pending == 0 {
time.Sleep(time.Second)
} else {
break
}
}
require.NotZero(t, txPoolStatus.Pending, "must have pending tx in pool")
checkPending("in-pool", 0)
checkPendingBalance()
// start building a block
attrs, err := opGeth.CreatePayloadAttributes()
require.NoError(t, err)
attrs.NoTxPool = false // we want to include a tx
fc := eth.ForkchoiceState{
HeadBlockHash: opGeth.L2Head.BlockHash,
SafeBlockHash: opGeth.L2Head.BlockHash,
}
res, err := opGeth.l2Engine.ForkchoiceUpdate(ctx, &fc, attrs)
require.NoError(t, err)
checkPending("building", 0)
checkPendingBalance()
// Now we have to wait until the block-building job picks up the tx from the tx-pool.
// See go routine that spins up in buildPayload() func in payload_building.go in miner package.
// We can't check it, we don't want to finish block-building prematurely, and so we have to wait.
time.Sleep(time.Second * 4) // conservatively wait 4 seconds, CI might lag during block building.
// retrieve the block
payload, err := opGeth.l2Engine.GetPayload(ctx, *res.PayloadID)
require.NoError(t, err)
checkPending("retrieved", 0)
require.Len(t, payload.Transactions, 2, "must include L1 info tx and tx from alice")
checkPendingBalance()
// process the block
status, err := opGeth.l2Engine.NewPayload(ctx, payload)
require.NoError(t, err)
require.Equal(t, eth.ExecutionValid, status.Status)
checkPending("processed", 0)
checkPendingBalance()
// make the block canonical
fc = eth.ForkchoiceState{
HeadBlockHash: payload.BlockHash,
SafeBlockHash: payload.BlockHash,
}
res, err = opGeth.l2Engine.ForkchoiceUpdate(ctx, &fc, nil)
require.NoError(t, err)
require.Equal(t, eth.ExecutionValid, res.PayloadStatus.Status)
checkPending("canonical", 1)
}
func TestPreregolith(t *testing.T) { func TestPreregolith(t *testing.T) {
InitParallel(t) InitParallel(t)
futureTimestamp := hexutil.Uint64(4) futureTimestamp := hexutil.Uint64(4)
......
...@@ -679,7 +679,7 @@ func (sys *System) newMockNetPeer() (host.Host, error) { ...@@ -679,7 +679,7 @@ func (sys *System) newMockNetPeer() (host.Host, error) {
_ = ps.AddPubKey(p, sk.GetPublic()) _ = ps.AddPubKey(p, sk.GetPublic())
ds := sync.MutexWrap(ds.NewMapDatastore()) ds := sync.MutexWrap(ds.NewMapDatastore())
eps, err := store.NewExtendedPeerstore(context.Background(), log.Root(), clock.SystemClock, ps, ds) eps, err := store.NewExtendedPeerstore(context.Background(), log.Root(), clock.SystemClock, ps, ds, 24*time.Hour)
if err != nil { if err != nil {
return nil, err return nil, err
} }
......
...@@ -241,12 +241,14 @@ func TestPendingGasLimit(t *testing.T) { ...@@ -241,12 +241,14 @@ func TestPendingGasLimit(t *testing.T) {
cfg.GethOptions["sequencer"] = []GethOption{ cfg.GethOptions["sequencer"] = []GethOption{
func(ethCfg *ethconfig.Config, nodeCfg *node.Config) error { func(ethCfg *ethconfig.Config, nodeCfg *node.Config) error {
ethCfg.Miner.GasCeil = 10_000_000 ethCfg.Miner.GasCeil = 10_000_000
ethCfg.Miner.RollupComputePendingBlock = true
return nil return nil
}, },
} }
cfg.GethOptions["verifier"] = []GethOption{ cfg.GethOptions["verifier"] = []GethOption{
func(ethCfg *ethconfig.Config, nodeCfg *node.Config) error { func(ethCfg *ethconfig.Config, nodeCfg *node.Config) error {
ethCfg.Miner.GasCeil = 9_000_000 ethCfg.Miner.GasCeil = 9_000_000
ethCfg.Miner.RollupComputePendingBlock = true
return nil return nil
}, },
} }
...@@ -1230,6 +1232,14 @@ func TestFees(t *testing.T) { ...@@ -1230,6 +1232,14 @@ func TestFees(t *testing.T) {
require.Nil(t, err) require.Nil(t, err)
require.Equal(t, l1Fee, gpoL1Fee, "l1 fee mismatch") require.Equal(t, l1Fee, gpoL1Fee, "l1 fee mismatch")
require.Equal(t, receipt.L1Fee, l1Fee, "l1 fee in receipt is correct")
require.Equal(t,
new(big.Float).Mul(
new(big.Float).SetInt(l1Header.BaseFee),
new(big.Float).Mul(new(big.Float).SetInt(receipt.L1GasUsed), receipt.FeeScalar),
),
new(big.Float).SetInt(receipt.L1Fee), "fee field in receipt matches gas used times scalar times basefee")
// Calculate total fee // Calculate total fee
baseFeeRecipientDiff.Add(baseFeeRecipientDiff, coinbaseDiff) baseFeeRecipientDiff.Add(baseFeeRecipientDiff, coinbaseDiff)
totalFee := new(big.Int).Add(baseFeeRecipientDiff, l1FeeRecipientDiff) totalFee := new(big.Int).Add(baseFeeRecipientDiff, l1FeeRecipientDiff)
...@@ -1371,3 +1381,45 @@ func latestBlock(t *testing.T, client *ethclient.Client) uint64 { ...@@ -1371,3 +1381,45 @@ func latestBlock(t *testing.T, client *ethclient.Client) uint64 {
require.Nil(t, err, "Error getting latest block") require.Nil(t, err, "Error getting latest block")
return blockAfter return blockAfter
} }
// TestPendingBlockIsLatest tests that we serve the latest block as pending block
func TestPendingBlockIsLatest(t *testing.T) {
InitParallel(t)
cfg := DefaultSystemConfig(t)
sys, err := cfg.Start()
require.Nil(t, err, "Error starting up system")
defer sys.Close()
l2Seq := sys.Clients["sequencer"]
t.Run("block", func(t *testing.T) {
for i := 0; i < 10; i++ {
// TODO(CLI-4044): pending-block ID change
pending, err := l2Seq.BlockByNumber(context.Background(), big.NewInt(-1))
require.NoError(t, err)
latest, err := l2Seq.BlockByNumber(context.Background(), nil)
require.NoError(t, err)
if pending.NumberU64() == latest.NumberU64() {
require.Equal(t, pending.Hash(), latest.Hash(), "pending must exactly match latest block")
return
}
// re-try until we have the same number, as the requests are not an atomic bundle, and the sequencer may create a block.
}
t.Fatal("failed to get pending block with same number as latest block")
})
t.Run("header", func(t *testing.T) {
for i := 0; i < 10; i++ {
// TODO(CLI-4044): pending-block ID change
pending, err := l2Seq.HeaderByNumber(context.Background(), big.NewInt(-1))
require.NoError(t, err)
latest, err := l2Seq.HeaderByNumber(context.Background(), nil)
require.NoError(t, err)
if pending.Number.Uint64() == latest.Number.Uint64() {
require.Equal(t, pending.Hash(), latest.Hash(), "pending must exactly match latest header")
return
}
// re-try until we have the same number, as the requests are not an atomic bundle, and the sequencer may create a block.
}
t.Fatal("failed to get pending header with same number as latest header")
})
}
...@@ -141,7 +141,8 @@ func (conf *Config) Host(log log.Logger, reporter metrics.Reporter, metrics Host ...@@ -141,7 +141,8 @@ func (conf *Config) Host(log log.Logger, reporter metrics.Reporter, metrics Host
return nil, fmt.Errorf("failed to open peerstore: %w", err) return nil, fmt.Errorf("failed to open peerstore: %w", err)
} }
ps, err := store.NewExtendedPeerstore(context.Background(), log, clock.SystemClock, basePs, conf.Store) peerScoreParams := conf.PeerScoringParams()
ps, err := store.NewExtendedPeerstore(context.Background(), log, clock.SystemClock, basePs, conf.Store, peerScoreParams.RetainScore)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to open extended peerstore: %w", err) return nil, fmt.Errorf("failed to open extended peerstore: %w", err)
} }
......
...@@ -76,7 +76,7 @@ func getNetHosts(testSuite *PeerScoresTestSuite, ctx context.Context, n int) []h ...@@ -76,7 +76,7 @@ func getNetHosts(testSuite *PeerScoresTestSuite, ctx context.Context, n int) []h
log := testlog.Logger(testSuite.T(), log.LvlError) log := testlog.Logger(testSuite.T(), log.LvlError)
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
swarm := tswarm.GenSwarm(testSuite.T()) swarm := tswarm.GenSwarm(testSuite.T())
eps, err := store.NewExtendedPeerstore(ctx, log, clock.SystemClock, swarm.Peerstore(), sync.MutexWrap(ds.NewMapDatastore())) eps, err := store.NewExtendedPeerstore(ctx, log, clock.SystemClock, swarm.Peerstore(), sync.MutexWrap(ds.NewMapDatastore()), 1*time.Hour)
netw := &customPeerstoreNetwork{swarm, eps} netw := &customPeerstoreNetwork{swarm, eps}
require.NoError(testSuite.T(), err) require.NoError(testSuite.T(), err)
h := bhost.NewBlankHost(netw) h := bhost.NewBlankHost(netw)
...@@ -99,7 +99,7 @@ func newGossipSubs(testSuite *PeerScoresTestSuite, ctx context.Context, hosts [] ...@@ -99,7 +99,7 @@ func newGossipSubs(testSuite *PeerScoresTestSuite, ctx context.Context, hosts []
dataStore := sync.MutexWrap(ds.NewMapDatastore()) dataStore := sync.MutexWrap(ds.NewMapDatastore())
peerStore, err := pstoreds.NewPeerstore(context.Background(), dataStore, pstoreds.DefaultOpts()) peerStore, err := pstoreds.NewPeerstore(context.Background(), dataStore, pstoreds.DefaultOpts())
require.NoError(testSuite.T(), err) require.NoError(testSuite.T(), err)
extPeerStore, err := store.NewExtendedPeerstore(context.Background(), logger, clock.SystemClock, peerStore, dataStore) extPeerStore, err := store.NewExtendedPeerstore(context.Background(), logger, clock.SystemClock, peerStore, dataStore, 1*time.Hour)
require.NoError(testSuite.T(), err) require.NoError(testSuite.T(), err)
scorer := NewScorer( scorer := NewScorer(
......
...@@ -4,6 +4,7 @@ import ( ...@@ -4,6 +4,7 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"time"
"github.com/ethereum-optimism/optimism/op-service/clock" "github.com/ethereum-optimism/optimism/op-service/clock"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
...@@ -19,12 +20,12 @@ type extendedStore struct { ...@@ -19,12 +20,12 @@ type extendedStore struct {
*ipBanBook *ipBanBook
} }
func NewExtendedPeerstore(ctx context.Context, logger log.Logger, clock clock.Clock, ps peerstore.Peerstore, store ds.Batching) (ExtendedPeerstore, error) { func NewExtendedPeerstore(ctx context.Context, logger log.Logger, clock clock.Clock, ps peerstore.Peerstore, store ds.Batching, scoreRetention time.Duration) (ExtendedPeerstore, error) {
cab, ok := peerstore.GetCertifiedAddrBook(ps) cab, ok := peerstore.GetCertifiedAddrBook(ps)
if !ok { if !ok {
return nil, errors.New("peerstore should also be a certified address book") return nil, errors.New("peerstore should also be a certified address book")
} }
sb, err := newScoreBook(ctx, logger, clock, store) sb, err := newScoreBook(ctx, logger, clock, store, scoreRetention)
if err != nil { if err != nil {
return nil, fmt.Errorf("create scorebook: %w", err) return nil, fmt.Errorf("create scorebook: %w", err)
} }
......
...@@ -102,6 +102,9 @@ func (d *recordsBook[K, V]) deleteRecord(key K) error { ...@@ -102,6 +102,9 @@ func (d *recordsBook[K, V]) deleteRecord(key K) error {
func (d *recordsBook[K, V]) getRecord(key K) (v V, err error) { func (d *recordsBook[K, V]) getRecord(key K) (v V, err error) {
if val, ok := d.cache.Get(key); ok { if val, ok := d.cache.Get(key); ok {
if d.hasExpired(val) {
return v, UnknownRecordErr
}
return val, nil return val, nil
} }
data, err := d.store.Get(d.ctx, d.dsKey(key)) data, err := d.store.Get(d.ctx, d.dsKey(key))
...@@ -114,6 +117,9 @@ func (d *recordsBook[K, V]) getRecord(key K) (v V, err error) { ...@@ -114,6 +117,9 @@ func (d *recordsBook[K, V]) getRecord(key K) (v V, err error) {
if err := v.UnmarshalBinary(data); err != nil { if err := v.UnmarshalBinary(data); err != nil {
return v, fmt.Errorf("invalid value for key %v: %w", key, err) return v, fmt.Errorf("invalid value for key %v: %w", key, err)
} }
if d.hasExpired(v) {
return v, UnknownRecordErr
}
d.cache.Add(key, v) d.cache.Add(key, v)
return v, nil return v, nil
} }
...@@ -142,9 +148,9 @@ func (d *recordsBook[K, V]) SetRecord(key K, diff recordDiff[V]) error { ...@@ -142,9 +148,9 @@ func (d *recordsBook[K, V]) SetRecord(key K, diff recordDiff[V]) error {
} }
// prune deletes entries from the store that are older than the configured prune expiration. // prune deletes entries from the store that are older than the configured prune expiration.
// Note that the expiry period is not a strict TTL. Entries that are eligible for deletion may still be present // Entries that are eligible for deletion may still be present either because the prune function hasn't yet run or
// either because the prune function hasn't yet run or because they are still preserved in the in-memory cache after // because they are still preserved in the in-memory cache after having been deleted from the database.
// having been deleted from the database. // Such expired entries are filtered out in getRecord
func (d *recordsBook[K, V]) prune() error { func (d *recordsBook[K, V]) prune() error {
results, err := d.store.Query(d.ctx, query.Query{ results, err := d.store.Query(d.ctx, query.Query{
Prefix: d.dsBaseKey.String(), Prefix: d.dsBaseKey.String(),
...@@ -168,7 +174,7 @@ func (d *recordsBook[K, V]) prune() error { ...@@ -168,7 +174,7 @@ func (d *recordsBook[K, V]) prune() error {
if err := v.UnmarshalBinary(result.Value); err != nil { if err := v.UnmarshalBinary(result.Value); err != nil {
return err return err
} }
if v.LastUpdated().Add(d.recordExpiry).Before(d.clock.Now()) { if d.hasExpired(v) {
if pending > maxPruneBatchSize { if pending > maxPruneBatchSize {
if err := batch.Commit(d.ctx); err != nil { if err := batch.Commit(d.ctx); err != nil {
return err return err
...@@ -191,6 +197,10 @@ func (d *recordsBook[K, V]) prune() error { ...@@ -191,6 +197,10 @@ func (d *recordsBook[K, V]) prune() error {
return nil return nil
} }
func (d *recordsBook[K, V]) hasExpired(v V) bool {
return v.LastUpdated().Add(d.recordExpiry).Before(d.clock.Now())
}
func (d *recordsBook[K, V]) Close() { func (d *recordsBook[K, V]) Close() {
d.cancelFn() d.cancelFn()
d.bgTasks.Wait() d.bgTasks.Wait()
......
...@@ -12,8 +12,7 @@ import ( ...@@ -12,8 +12,7 @@ import (
) )
const ( const (
scoreCacheSize = 100 scoreCacheSize = 100
scoreRecordExpiryPeriod = 24 * time.Hour
) )
var scoresBase = ds.NewKey("/peers/scores") var scoresBase = ds.NewKey("/peers/scores")
...@@ -56,8 +55,8 @@ func peerIDKey(id peer.ID) ds.Key { ...@@ -56,8 +55,8 @@ func peerIDKey(id peer.ID) ds.Key {
return ds.NewKey(base32.RawStdEncoding.EncodeToString([]byte(id))) return ds.NewKey(base32.RawStdEncoding.EncodeToString([]byte(id)))
} }
func newScoreBook(ctx context.Context, logger log.Logger, clock clock.Clock, store ds.Batching) (*scoreBook, error) { func newScoreBook(ctx context.Context, logger log.Logger, clock clock.Clock, store ds.Batching, retain time.Duration) (*scoreBook, error) {
book, err := newRecordsBook[peer.ID, *scoreRecord](ctx, logger, clock, store, scoreCacheSize, scoreRecordExpiryPeriod, scoresBase, newScoreRecord, peerIDKey) book, err := newRecordsBook[peer.ID, *scoreRecord](ctx, logger, clock, store, scoreCacheSize, retain, scoresBase, newScoreRecord, peerIDKey)
if err != nil { if err != nil {
return nil, err return nil, err
} }
......
...@@ -81,7 +81,7 @@ func TestPrune(t *testing.T) { ...@@ -81,7 +81,7 @@ func TestPrune(t *testing.T) {
logger := testlog.Logger(t, log.LvlInfo) logger := testlog.Logger(t, log.LvlInfo)
store := sync.MutexWrap(ds.NewMapDatastore()) store := sync.MutexWrap(ds.NewMapDatastore())
clock := clock.NewDeterministicClock(time.UnixMilli(1000)) clock := clock.NewDeterministicClock(time.UnixMilli(1000))
book, err := newScoreBook(ctx, logger, clock, store) book, err := newScoreBook(ctx, logger, clock, store, 24*time.Hour)
require.NoError(t, err) require.NoError(t, err)
hasScoreRecorded := func(id peer.ID) bool { hasScoreRecorded := func(id peer.ID) bool {
...@@ -135,7 +135,7 @@ func TestPruneMultipleBatches(t *testing.T) { ...@@ -135,7 +135,7 @@ func TestPruneMultipleBatches(t *testing.T) {
defer cancelFunc() defer cancelFunc()
logger := testlog.Logger(t, log.LvlInfo) logger := testlog.Logger(t, log.LvlInfo)
clock := clock.NewDeterministicClock(time.UnixMilli(1000)) clock := clock.NewDeterministicClock(time.UnixMilli(1000))
book, err := newScoreBook(ctx, logger, clock, sync.MutexWrap(ds.NewMapDatastore())) book, err := newScoreBook(ctx, logger, clock, sync.MutexWrap(ds.NewMapDatastore()), 24*time.Hour)
require.NoError(t, err) require.NoError(t, err)
hasScoreRecorded := func(id peer.ID) bool { hasScoreRecorded := func(id peer.ID) bool {
...@@ -159,6 +159,31 @@ func TestPruneMultipleBatches(t *testing.T) { ...@@ -159,6 +159,31 @@ func TestPruneMultipleBatches(t *testing.T) {
} }
} }
// Check that scores that are eligible for pruning are not returned, even if they haven't yet been removed
func TestIgnoreOutdatedScores(t *testing.T) {
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
logger := testlog.Logger(t, log.LvlInfo)
clock := clock.NewDeterministicClock(time.UnixMilli(1000))
retentionPeriod := 24 * time.Hour
book, err := newScoreBook(ctx, logger, clock, sync.MutexWrap(ds.NewMapDatastore()), retentionPeriod)
require.NoError(t, err)
require.NoError(t, book.SetScore("a", &GossipScores{Total: 123.45}))
clock.AdvanceTime(retentionPeriod + 1)
// Not available from cache
scores, err := book.GetPeerScores("a")
require.NoError(t, err)
require.Equal(t, scores, PeerScores{})
book.book.cache.Purge()
// Not available from disk
scores, err = book.GetPeerScores("a")
require.NoError(t, err)
require.Equal(t, scores, PeerScores{})
}
func assertPeerScores(t *testing.T, store ExtendedPeerstore, id peer.ID, expected PeerScores) { func assertPeerScores(t *testing.T, store ExtendedPeerstore, id peer.ID, expected PeerScores) {
result, err := store.GetPeerScores(id) result, err := store.GetPeerScores(id)
require.NoError(t, err) require.NoError(t, err)
...@@ -174,8 +199,8 @@ func createPeerstoreWithBacking(t *testing.T, store *sync.MutexDatastore) Extend ...@@ -174,8 +199,8 @@ func createPeerstoreWithBacking(t *testing.T, store *sync.MutexDatastore) Extend
ps, err := pstoreds.NewPeerstore(context.Background(), store, pstoreds.DefaultOpts()) ps, err := pstoreds.NewPeerstore(context.Background(), store, pstoreds.DefaultOpts())
require.NoError(t, err, "Failed to create peerstore") require.NoError(t, err, "Failed to create peerstore")
logger := testlog.Logger(t, log.LvlInfo) logger := testlog.Logger(t, log.LvlInfo)
clock := clock.NewDeterministicClock(time.UnixMilli(100)) c := clock.NewDeterministicClock(time.UnixMilli(100))
eps, err := NewExtendedPeerstore(context.Background(), logger, clock, ps, store) eps, err := NewExtendedPeerstore(context.Background(), logger, c, ps, store, 24*time.Hour)
require.NoError(t, err) require.NoError(t, err)
t.Cleanup(func() { t.Cleanup(func() {
_ = eps.Close() _ = eps.Close()
......
FROM golang:1.18.0-alpine3.15 as builder FROM golang:1.20.4-alpine3.18 as builder
ARG GITCOMMIT=docker ARG GITCOMMIT=docker
ARG GITDATE=docker ARG GITDATE=docker
...@@ -12,7 +12,7 @@ WORKDIR /app ...@@ -12,7 +12,7 @@ WORKDIR /app
RUN make proxyd RUN make proxyd
FROM alpine:3.15 FROM alpine:3.18
COPY ./proxyd/entrypoint.sh /bin/entrypoint.sh COPY ./proxyd/entrypoint.sh /bin/entrypoint.sh
......
...@@ -374,7 +374,6 @@ func (b *Backend) ForwardRPC(ctx context.Context, res *RPCRes, id string, method ...@@ -374,7 +374,6 @@ func (b *Backend) ForwardRPC(ctx context.Context, res *RPCRes, id string, method
func (b *Backend) doForward(ctx context.Context, rpcReqs []*RPCReq, isBatch bool) ([]*RPCRes, error) { func (b *Backend) doForward(ctx context.Context, rpcReqs []*RPCReq, isBatch bool) ([]*RPCRes, error) {
// we are concerned about network error rates, so we record 1 request independently of how many are in the batch // we are concerned about network error rates, so we record 1 request independently of how many are in the batch
b.networkRequestsSlidingWindow.Incr() b.networkRequestsSlidingWindow.Incr()
RecordBackendNetworkRequestCountSlidingWindow(b, b.networkRequestsSlidingWindow.Count())
isSingleElementBatch := len(rpcReqs) == 1 isSingleElementBatch := len(rpcReqs) == 1
...@@ -391,7 +390,7 @@ func (b *Backend) doForward(ctx context.Context, rpcReqs []*RPCReq, isBatch bool ...@@ -391,7 +390,7 @@ func (b *Backend) doForward(ctx context.Context, rpcReqs []*RPCReq, isBatch bool
httpReq, err := http.NewRequestWithContext(ctx, "POST", b.rpcURL, bytes.NewReader(body)) httpReq, err := http.NewRequestWithContext(ctx, "POST", b.rpcURL, bytes.NewReader(body))
if err != nil { if err != nil {
b.networkErrorsSlidingWindow.Incr() b.networkErrorsSlidingWindow.Incr()
RecordBackendNetworkErrorCountSlidingWindow(b, b.networkErrorsSlidingWindow.Count()) RecordBackendNetworkErrorRateSlidingWindow(b, b.ErrorRate())
return nil, wrapErr(err, "error creating backend request") return nil, wrapErr(err, "error creating backend request")
} }
...@@ -413,7 +412,7 @@ func (b *Backend) doForward(ctx context.Context, rpcReqs []*RPCReq, isBatch bool ...@@ -413,7 +412,7 @@ func (b *Backend) doForward(ctx context.Context, rpcReqs []*RPCReq, isBatch bool
httpRes, err := b.client.DoLimited(httpReq) httpRes, err := b.client.DoLimited(httpReq)
if err != nil { if err != nil {
b.networkErrorsSlidingWindow.Incr() b.networkErrorsSlidingWindow.Incr()
RecordBackendNetworkErrorCountSlidingWindow(b, b.networkErrorsSlidingWindow.Count()) RecordBackendNetworkErrorRateSlidingWindow(b, b.ErrorRate())
return nil, wrapErr(err, "error in backend request") return nil, wrapErr(err, "error in backend request")
} }
...@@ -432,7 +431,7 @@ func (b *Backend) doForward(ctx context.Context, rpcReqs []*RPCReq, isBatch bool ...@@ -432,7 +431,7 @@ func (b *Backend) doForward(ctx context.Context, rpcReqs []*RPCReq, isBatch bool
// Alchemy returns a 400 on bad JSONs, so handle that case // Alchemy returns a 400 on bad JSONs, so handle that case
if httpRes.StatusCode != 200 && httpRes.StatusCode != 400 { if httpRes.StatusCode != 200 && httpRes.StatusCode != 400 {
b.networkErrorsSlidingWindow.Incr() b.networkErrorsSlidingWindow.Incr()
RecordBackendNetworkErrorCountSlidingWindow(b, b.networkErrorsSlidingWindow.Count()) RecordBackendNetworkErrorRateSlidingWindow(b, b.ErrorRate())
return nil, fmt.Errorf("response code %d", httpRes.StatusCode) return nil, fmt.Errorf("response code %d", httpRes.StatusCode)
} }
...@@ -440,7 +439,7 @@ func (b *Backend) doForward(ctx context.Context, rpcReqs []*RPCReq, isBatch bool ...@@ -440,7 +439,7 @@ func (b *Backend) doForward(ctx context.Context, rpcReqs []*RPCReq, isBatch bool
resB, err := io.ReadAll(io.LimitReader(httpRes.Body, b.maxResponseSize)) resB, err := io.ReadAll(io.LimitReader(httpRes.Body, b.maxResponseSize))
if err != nil { if err != nil {
b.networkErrorsSlidingWindow.Incr() b.networkErrorsSlidingWindow.Incr()
RecordBackendNetworkErrorCountSlidingWindow(b, b.networkErrorsSlidingWindow.Count()) RecordBackendNetworkErrorRateSlidingWindow(b, b.ErrorRate())
return nil, wrapErr(err, "error reading response body") return nil, wrapErr(err, "error reading response body")
} }
...@@ -458,18 +457,18 @@ func (b *Backend) doForward(ctx context.Context, rpcReqs []*RPCReq, isBatch bool ...@@ -458,18 +457,18 @@ func (b *Backend) doForward(ctx context.Context, rpcReqs []*RPCReq, isBatch bool
// Infura may return a single JSON-RPC response if, for example, the batch contains a request for an unsupported method // Infura may return a single JSON-RPC response if, for example, the batch contains a request for an unsupported method
if responseIsNotBatched(resB) { if responseIsNotBatched(resB) {
b.networkErrorsSlidingWindow.Incr() b.networkErrorsSlidingWindow.Incr()
RecordBackendNetworkErrorCountSlidingWindow(b, b.networkErrorsSlidingWindow.Count()) RecordBackendNetworkErrorRateSlidingWindow(b, b.ErrorRate())
return nil, ErrBackendUnexpectedJSONRPC return nil, ErrBackendUnexpectedJSONRPC
} }
b.networkErrorsSlidingWindow.Incr() b.networkErrorsSlidingWindow.Incr()
RecordBackendNetworkErrorCountSlidingWindow(b, b.networkErrorsSlidingWindow.Count()) RecordBackendNetworkErrorRateSlidingWindow(b, b.ErrorRate())
return nil, ErrBackendBadResponse return nil, ErrBackendBadResponse
} }
} }
if len(rpcReqs) != len(res) { if len(rpcReqs) != len(res) {
b.networkErrorsSlidingWindow.Incr() b.networkErrorsSlidingWindow.Incr()
RecordBackendNetworkErrorCountSlidingWindow(b, b.networkErrorsSlidingWindow.Count()) RecordBackendNetworkErrorRateSlidingWindow(b, b.ErrorRate())
return nil, ErrBackendUnexpectedJSONRPC return nil, ErrBackendUnexpectedJSONRPC
} }
...@@ -483,6 +482,7 @@ func (b *Backend) doForward(ctx context.Context, rpcReqs []*RPCReq, isBatch bool ...@@ -483,6 +482,7 @@ func (b *Backend) doForward(ctx context.Context, rpcReqs []*RPCReq, isBatch bool
duration := time.Since(start) duration := time.Since(start)
b.latencySlidingWindow.Add(float64(duration)) b.latencySlidingWindow.Add(float64(duration))
RecordBackendNetworkLatencyAverageSlidingWindow(b, time.Duration(b.latencySlidingWindow.Avg())) RecordBackendNetworkLatencyAverageSlidingWindow(b, time.Duration(b.latencySlidingWindow.Avg()))
RecordBackendNetworkErrorRateSlidingWindow(b, b.ErrorRate())
sortBatchRPCResponse(rpcReqs, res) sortBatchRPCResponse(rpcReqs, res)
return res, nil return res, nil
...@@ -490,11 +490,7 @@ func (b *Backend) doForward(ctx context.Context, rpcReqs []*RPCReq, isBatch bool ...@@ -490,11 +490,7 @@ func (b *Backend) doForward(ctx context.Context, rpcReqs []*RPCReq, isBatch bool
// IsHealthy checks if the backend is able to serve traffic, based on dynamic parameters // IsHealthy checks if the backend is able to serve traffic, based on dynamic parameters
func (b *Backend) IsHealthy() bool { func (b *Backend) IsHealthy() bool {
errorRate := float64(0) errorRate := b.ErrorRate()
// avoid division-by-zero when the window is empty
if b.networkRequestsSlidingWindow.Sum() >= 10 {
errorRate = b.networkErrorsSlidingWindow.Sum() / b.networkRequestsSlidingWindow.Sum()
}
avgLatency := time.Duration(b.latencySlidingWindow.Avg()) avgLatency := time.Duration(b.latencySlidingWindow.Avg())
if errorRate >= b.maxErrorRateThreshold { if errorRate >= b.maxErrorRateThreshold {
return false return false
...@@ -505,6 +501,16 @@ func (b *Backend) IsHealthy() bool { ...@@ -505,6 +501,16 @@ func (b *Backend) IsHealthy() bool {
return true return true
} }
// ErrorRate returns the instant error rate of the backend
func (b *Backend) ErrorRate() (errorRate float64) {
// we only really start counting the error rate after a minimum of 10 requests
// this is to avoid false positives when the backend is just starting up
if b.networkRequestsSlidingWindow.Sum() >= 10 {
errorRate = b.networkErrorsSlidingWindow.Sum() / b.networkRequestsSlidingWindow.Sum()
}
return errorRate
}
// IsDegraded checks if the backend is serving traffic in a degraded state (i.e. used as a last resource) // IsDegraded checks if the backend is serving traffic in a degraded state (i.e. used as a last resource)
func (b *Backend) IsDegraded() bool { func (b *Backend) IsDegraded() bool {
avgLatency := time.Duration(b.latencySlidingWindow.Avg()) avgLatency := time.Duration(b.latencySlidingWindow.Avg())
...@@ -556,7 +562,11 @@ func (bg *BackendGroup) Forward(ctx context.Context, rpcReqs []*RPCReq, isBatch ...@@ -556,7 +562,11 @@ func (bg *BackendGroup) Forward(ctx context.Context, rpcReqs []*RPCReq, isBatch
backends = bg.loadBalancedConsensusGroup() backends = bg.loadBalancedConsensusGroup()
// We also rewrite block tags to enforce compliance with consensus // We also rewrite block tags to enforce compliance with consensus
rctx := RewriteContext{latest: bg.Consensus.GetConsensusBlockNumber()} rctx := RewriteContext{
latest: bg.Consensus.GetLatestBlockNumber(),
finalized: bg.Consensus.GetFinalizedBlockNumber(),
safe: bg.Consensus.GetSafeBlockNumber(),
}
for i, req := range rpcReqs { for i, req := range rpcReqs {
res := RPCRes{JSONRPC: JSONRPCVersion, ID: req.ID} res := RPCRes{JSONRPC: JSONRPCVersion, ID: req.ID}
......
This diff is collapsed.
...@@ -13,35 +13,68 @@ import ( ...@@ -13,35 +13,68 @@ import (
// ConsensusTracker abstracts how we store and retrieve the current consensus // ConsensusTracker abstracts how we store and retrieve the current consensus
// allowing it to be stored locally in-memory or in a shared Redis cluster // allowing it to be stored locally in-memory or in a shared Redis cluster
type ConsensusTracker interface { type ConsensusTracker interface {
GetConsensusBlockNumber() hexutil.Uint64 GetLatestBlockNumber() hexutil.Uint64
SetConsensusBlockNumber(blockNumber hexutil.Uint64) SetLatestBlockNumber(blockNumber hexutil.Uint64)
GetFinalizedBlockNumber() hexutil.Uint64
SetFinalizedBlockNumber(blockNumber hexutil.Uint64)
GetSafeBlockNumber() hexutil.Uint64
SetSafeBlockNumber(blockNumber hexutil.Uint64)
} }
// InMemoryConsensusTracker store and retrieve in memory, async-safe // InMemoryConsensusTracker store and retrieve in memory, async-safe
type InMemoryConsensusTracker struct { type InMemoryConsensusTracker struct {
consensusBlockNumber hexutil.Uint64 latestBlockNumber hexutil.Uint64
finalizedBlockNumber hexutil.Uint64
safeBlockNumber hexutil.Uint64
mutex sync.Mutex mutex sync.Mutex
} }
func NewInMemoryConsensusTracker() ConsensusTracker { func NewInMemoryConsensusTracker() ConsensusTracker {
return &InMemoryConsensusTracker{ return &InMemoryConsensusTracker{
consensusBlockNumber: 0, mutex: sync.Mutex{},
mutex: sync.Mutex{},
} }
} }
func (ct *InMemoryConsensusTracker) GetConsensusBlockNumber() hexutil.Uint64 { func (ct *InMemoryConsensusTracker) GetLatestBlockNumber() hexutil.Uint64 {
defer ct.mutex.Unlock() defer ct.mutex.Unlock()
ct.mutex.Lock() ct.mutex.Lock()
return ct.consensusBlockNumber return ct.latestBlockNumber
} }
func (ct *InMemoryConsensusTracker) SetConsensusBlockNumber(blockNumber hexutil.Uint64) { func (ct *InMemoryConsensusTracker) SetLatestBlockNumber(blockNumber hexutil.Uint64) {
defer ct.mutex.Unlock() defer ct.mutex.Unlock()
ct.mutex.Lock() ct.mutex.Lock()
ct.consensusBlockNumber = blockNumber ct.latestBlockNumber = blockNumber
}
func (ct *InMemoryConsensusTracker) GetFinalizedBlockNumber() hexutil.Uint64 {
defer ct.mutex.Unlock()
ct.mutex.Lock()
return ct.finalizedBlockNumber
}
func (ct *InMemoryConsensusTracker) SetFinalizedBlockNumber(blockNumber hexutil.Uint64) {
defer ct.mutex.Unlock()
ct.mutex.Lock()
ct.finalizedBlockNumber = blockNumber
}
func (ct *InMemoryConsensusTracker) GetSafeBlockNumber() hexutil.Uint64 {
defer ct.mutex.Unlock()
ct.mutex.Lock()
return ct.safeBlockNumber
}
func (ct *InMemoryConsensusTracker) SetSafeBlockNumber(blockNumber hexutil.Uint64) {
defer ct.mutex.Unlock()
ct.mutex.Lock()
ct.safeBlockNumber = blockNumber
} }
// RedisConsensusTracker uses a Redis `client` to store and retrieve consensus, async-safe // RedisConsensusTracker uses a Redis `client` to store and retrieve consensus, async-safe
...@@ -59,14 +92,29 @@ func NewRedisConsensusTracker(ctx context.Context, r *redis.Client, namespace st ...@@ -59,14 +92,29 @@ func NewRedisConsensusTracker(ctx context.Context, r *redis.Client, namespace st
} }
} }
func (ct *RedisConsensusTracker) key() string { func (ct *RedisConsensusTracker) key(tag string) string {
return fmt.Sprintf("consensus_latest_block:%s", ct.backendGroup) return fmt.Sprintf("consensus:%s:%s", ct.backendGroup, tag)
} }
func (ct *RedisConsensusTracker) GetConsensusBlockNumber() hexutil.Uint64 { func (ct *RedisConsensusTracker) GetLatestBlockNumber() hexutil.Uint64 {
return hexutil.Uint64(hexutil.MustDecodeUint64(ct.client.Get(ct.ctx, ct.key()).Val())) return hexutil.Uint64(hexutil.MustDecodeUint64(ct.client.Get(ct.ctx, ct.key("latest")).Val()))
}
func (ct *RedisConsensusTracker) SetLatestBlockNumber(blockNumber hexutil.Uint64) {
ct.client.Set(ct.ctx, ct.key("latest"), blockNumber, 0)
}
func (ct *RedisConsensusTracker) GetFinalizedBlockNumber() hexutil.Uint64 {
return hexutil.Uint64(hexutil.MustDecodeUint64(ct.client.Get(ct.ctx, ct.key("finalized")).Val()))
}
func (ct *RedisConsensusTracker) SetFinalizedBlockNumber(blockNumber hexutil.Uint64) {
ct.client.Set(ct.ctx, ct.key("finalized"), blockNumber, 0)
}
func (ct *RedisConsensusTracker) GetSafeBlockNumber() hexutil.Uint64 {
return hexutil.Uint64(hexutil.MustDecodeUint64(ct.client.Get(ct.ctx, ct.key("safe")).Val()))
} }
func (ct *RedisConsensusTracker) SetConsensusBlockNumber(blockNumber hexutil.Uint64) { func (ct *RedisConsensusTracker) SetSafeBlockNumber(blockNumber hexutil.Uint64) {
ct.client.Set(ct.ctx, ct.key(), blockNumber, 0) ct.client.Set(ct.ctx, ct.key("safe"), blockNumber, 0)
} }
...@@ -93,8 +93,8 @@ backends = ["infura"] ...@@ -93,8 +93,8 @@ backends = ["infura"]
# consensus_ban_period = "1m" # consensus_ban_period = "1m"
# Maximum delay for update the backend, default 30s # Maximum delay for update the backend, default 30s
# consensus_max_update_threshold = "20s" # consensus_max_update_threshold = "20s"
# Maximum block lag, default 50 # Maximum block lag, default 8
# consensus_max_block_lag = 10 # consensus_max_block_lag = 16
# Minimum peer count, default 3 # Minimum peer count, default 3
# consensus_min_peer_count = 4 # consensus_min_peer_count = 4
......
This diff is collapsed.
...@@ -18,7 +18,7 @@ consensus_aware = true ...@@ -18,7 +18,7 @@ consensus_aware = true
consensus_handler = "noop" # allow more control over the consensus poller for tests consensus_handler = "noop" # allow more control over the consensus poller for tests
consensus_ban_period = "1m" consensus_ban_period = "1m"
consensus_max_update_threshold = "2m" consensus_max_update_threshold = "2m"
consensus_max_block_lag = 50 consensus_max_block_lag = 8
consensus_min_peer_count = 4 consensus_min_peer_count = 4
[rpc_method_mappings] [rpc_method_mappings]
......
...@@ -26,40 +26,161 @@ ...@@ -26,40 +26,161 @@
"jsonrpc": "2.0", "jsonrpc": "2.0",
"id": 67, "id": 67,
"result": { "result": {
"hash": "hash1", "hash": "hash_0x101",
"number": "0x1" "number": "0x101"
} }
} }
- method: eth_getBlockByNumber - method: eth_getBlockByNumber
block: 0x1 block: 0x101
response: > response: >
{ {
"jsonrpc": "2.0", "jsonrpc": "2.0",
"id": 67, "id": 67,
"result": { "result": {
"hash": "hash1", "hash": "hash_0x101",
"number": "0x1" "number": "0x101"
} }
} }
- method: eth_getBlockByNumber - method: eth_getBlockByNumber
block: 0x2 block: 0x102
response: > response: >
{ {
"jsonrpc": "2.0", "jsonrpc": "2.0",
"id": 67, "id": 67,
"result": { "result": {
"hash": "hash2", "hash": "hash_0x102",
"number": "0x2" "number": "0x102"
} }
} }
- method: eth_getBlockByNumber - method: eth_getBlockByNumber
block: 0x3 block: 0x103
response: > response: >
{ {
"jsonrpc": "2.0", "jsonrpc": "2.0",
"id": 67, "id": 67,
"result": { "result": {
"hash": "hash3", "hash": "hash_0x103",
"number": "0x3" "number": "0x103"
}
}
- method: eth_getBlockByNumber
block: 0x10a
response: >
{
"jsonrpc": "2.0",
"id": 67,
"result": {
"hash": "hash_0x10a",
"number": "0x10a"
}
}
- method: eth_getBlockByNumber
block: 0x132
response: >
{
"jsonrpc": "2.0",
"id": 67,
"result": {
"hash": "hash_0x132",
"number": "0x132"
}
}
- method: eth_getBlockByNumber
block: 0x133
response: >
{
"jsonrpc": "2.0",
"id": 67,
"result": {
"hash": "hash_0x133",
"number": "0x133"
}
}
- method: eth_getBlockByNumber
block: 0x134
response: >
{
"jsonrpc": "2.0",
"id": 67,
"result": {
"hash": "hash_0x134",
"number": "0x134"
}
}
- method: eth_getBlockByNumber
block: 0x200
response: >
{
"jsonrpc": "2.0",
"id": 67,
"result": {
"hash": "hash_0x200",
"number": "0x200"
}
}
- method: eth_getBlockByNumber
block: 0x91
response: >
{
"jsonrpc": "2.0",
"id": 67,
"result": {
"hash": "hash_0x91",
"number": "0x91"
}
}
- method: eth_getBlockByNumber
block: safe
response: >
{
"jsonrpc": "2.0",
"id": 67,
"result": {
"hash": "hash_0xe1",
"number": "0xe1"
}
}
- method: eth_getBlockByNumber
block: 0xe1
response: >
{
"jsonrpc": "2.0",
"id": 67,
"result": {
"hash": "hash_0xe1",
"number": "0xe1"
}
}
- method: eth_getBlockByNumber
block: finalized
response: >
{
"jsonrpc": "2.0",
"id": 67,
"result": {
"hash": "hash_0xc1",
"number": "0xc1"
}
}
- method: eth_getBlockByNumber
block: 0xc1
response: >
{
"jsonrpc": "2.0",
"id": 67,
"result": {
"hash": "hash_0xc1",
"number": "0xc1"
}
}
- method: eth_getBlockByNumber
block: 0xd1
response: >
{
"jsonrpc": "2.0",
"id": 67,
"result": {
"hash": "hash_0xd1",
"number": "0xd1"
} }
} }
...@@ -246,6 +246,22 @@ var ( ...@@ -246,6 +246,22 @@ var (
"backend_group_name", "backend_group_name",
}) })
consensusSafeBlock = promauto.NewGaugeVec(prometheus.GaugeOpts{
Namespace: MetricsNamespace,
Name: "group_consensus_safe_block",
Help: "Consensus safe block",
}, []string{
"backend_group_name",
})
consensusFinalizedBlock = promauto.NewGaugeVec(prometheus.GaugeOpts{
Namespace: MetricsNamespace,
Name: "group_consensus_finalized_block",
Help: "Consensus finalized block",
}, []string{
"backend_group_name",
})
backendLatestBlockBackend = promauto.NewGaugeVec(prometheus.GaugeOpts{ backendLatestBlockBackend = promauto.NewGaugeVec(prometheus.GaugeOpts{
Namespace: MetricsNamespace, Namespace: MetricsNamespace,
Name: "backend_latest_block", Name: "backend_latest_block",
...@@ -254,6 +270,30 @@ var ( ...@@ -254,6 +270,30 @@ var (
"backend_name", "backend_name",
}) })
backendSafeBlockBackend = promauto.NewGaugeVec(prometheus.GaugeOpts{
Namespace: MetricsNamespace,
Name: "backend_safe_block",
Help: "Current safe block observed per backend",
}, []string{
"backend_name",
})
backendFinalizedBlockBackend = promauto.NewGaugeVec(prometheus.GaugeOpts{
Namespace: MetricsNamespace,
Name: "backend_finalized_block",
Help: "Current finalized block observed per backend",
}, []string{
"backend_name",
})
backendUnexpectedBlockTagsBackend = promauto.NewGaugeVec(prometheus.GaugeOpts{
Namespace: MetricsNamespace,
Name: "backend_unexpected_block_tags",
Help: "Bool gauge for unexpected block tags",
}, []string{
"backend_name",
})
consensusGroupCount = promauto.NewGaugeVec(prometheus.GaugeOpts{ consensusGroupCount = promauto.NewGaugeVec(prometheus.GaugeOpts{
Namespace: MetricsNamespace, Namespace: MetricsNamespace,
Name: "group_consensus_count", Name: "group_consensus_count",
...@@ -318,18 +358,10 @@ var ( ...@@ -318,18 +358,10 @@ var (
"backend_name", "backend_name",
}) })
networkErrorCountBackend = promauto.NewGaugeVec(prometheus.GaugeOpts{ networkErrorRateBackend = promauto.NewGaugeVec(prometheus.GaugeOpts{
Namespace: MetricsNamespace, Namespace: MetricsNamespace,
Name: "backend_net_error_count", Name: "backend_error_rate",
Help: "Network error count per backend", Help: "Request error rate per backend",
}, []string{
"backend_name",
})
requestCountBackend = promauto.NewGaugeVec(prometheus.GaugeOpts{
Namespace: MetricsNamespace,
Name: "backend_request_count",
Help: "Request count per backend",
}, []string{ }, []string{
"backend_name", "backend_name",
}) })
...@@ -402,6 +434,14 @@ func RecordGroupConsensusLatestBlock(group *BackendGroup, blockNumber hexutil.Ui ...@@ -402,6 +434,14 @@ func RecordGroupConsensusLatestBlock(group *BackendGroup, blockNumber hexutil.Ui
consensusLatestBlock.WithLabelValues(group.Name).Set(float64(blockNumber)) consensusLatestBlock.WithLabelValues(group.Name).Set(float64(blockNumber))
} }
func RecordGroupConsensusSafeBlock(group *BackendGroup, blockNumber hexutil.Uint64) {
consensusSafeBlock.WithLabelValues(group.Name).Set(float64(blockNumber))
}
func RecordGroupConsensusFinalizedBlock(group *BackendGroup, blockNumber hexutil.Uint64) {
consensusFinalizedBlock.WithLabelValues(group.Name).Set(float64(blockNumber))
}
func RecordGroupConsensusCount(group *BackendGroup, count int) { func RecordGroupConsensusCount(group *BackendGroup, count int) {
consensusGroupCount.WithLabelValues(group.Name).Set(float64(count)) consensusGroupCount.WithLabelValues(group.Name).Set(float64(count))
} }
...@@ -418,12 +458,20 @@ func RecordBackendLatestBlock(b *Backend, blockNumber hexutil.Uint64) { ...@@ -418,12 +458,20 @@ func RecordBackendLatestBlock(b *Backend, blockNumber hexutil.Uint64) {
backendLatestBlockBackend.WithLabelValues(b.Name).Set(float64(blockNumber)) backendLatestBlockBackend.WithLabelValues(b.Name).Set(float64(blockNumber))
} }
func RecordBackendSafeBlock(b *Backend, blockNumber hexutil.Uint64) {
backendSafeBlockBackend.WithLabelValues(b.Name).Set(float64(blockNumber))
}
func RecordBackendFinalizedBlock(b *Backend, blockNumber hexutil.Uint64) {
backendFinalizedBlockBackend.WithLabelValues(b.Name).Set(float64(blockNumber))
}
func RecordBackendUnexpectedBlockTags(b *Backend, unexpected bool) {
backendUnexpectedBlockTagsBackend.WithLabelValues(b.Name).Set(boolToFloat64(unexpected))
}
func RecordConsensusBackendBanned(b *Backend, banned bool) { func RecordConsensusBackendBanned(b *Backend, banned bool) {
v := float64(0) consensusBannedBackends.WithLabelValues(b.Name).Set(boolToFloat64(banned))
if banned {
v = float64(1)
}
consensusBannedBackends.WithLabelValues(b.Name).Set(v)
} }
func RecordConsensusBackendPeerCount(b *Backend, peerCount uint64) { func RecordConsensusBackendPeerCount(b *Backend, peerCount uint64) {
...@@ -431,11 +479,7 @@ func RecordConsensusBackendPeerCount(b *Backend, peerCount uint64) { ...@@ -431,11 +479,7 @@ func RecordConsensusBackendPeerCount(b *Backend, peerCount uint64) {
} }
func RecordConsensusBackendInSync(b *Backend, inSync bool) { func RecordConsensusBackendInSync(b *Backend, inSync bool) {
v := float64(0) consensusInSyncBackend.WithLabelValues(b.Name).Set(boolToFloat64(inSync))
if inSync {
v = float64(1)
}
consensusInSyncBackend.WithLabelValues(b.Name).Set(v)
} }
func RecordConsensusBackendUpdateDelay(b *Backend, delay time.Duration) { func RecordConsensusBackendUpdateDelay(b *Backend, delay time.Duration) {
...@@ -446,10 +490,13 @@ func RecordBackendNetworkLatencyAverageSlidingWindow(b *Backend, avgLatency time ...@@ -446,10 +490,13 @@ func RecordBackendNetworkLatencyAverageSlidingWindow(b *Backend, avgLatency time
avgLatencyBackend.WithLabelValues(b.Name).Set(float64(avgLatency.Milliseconds())) avgLatencyBackend.WithLabelValues(b.Name).Set(float64(avgLatency.Milliseconds()))
} }
func RecordBackendNetworkRequestCountSlidingWindow(b *Backend, count uint) { func RecordBackendNetworkErrorRateSlidingWindow(b *Backend, rate float64) {
requestCountBackend.WithLabelValues(b.Name).Set(float64(count)) networkErrorRateBackend.WithLabelValues(b.Name).Set(rate)
} }
func RecordBackendNetworkErrorCountSlidingWindow(b *Backend, count uint) { func boolToFloat64(b bool) float64 {
networkErrorCountBackend.WithLabelValues(b.Name).Set(float64(count)) if b {
return 1
}
return 0
} }
...@@ -9,7 +9,9 @@ import ( ...@@ -9,7 +9,9 @@ import (
) )
type RewriteContext struct { type RewriteContext struct {
latest hexutil.Uint64 latest hexutil.Uint64
safe hexutil.Uint64
finalized hexutil.Uint64
} }
type RewriteResult uint8 type RewriteResult uint8
...@@ -180,11 +182,13 @@ func rewriteTag(rctx RewriteContext, current string) (string, bool, error) { ...@@ -180,11 +182,13 @@ func rewriteTag(rctx RewriteContext, current string) (string, bool, error) {
} }
switch *bnh.BlockNumber { switch *bnh.BlockNumber {
case rpc.SafeBlockNumber, case rpc.PendingBlockNumber,
rpc.FinalizedBlockNumber,
rpc.PendingBlockNumber,
rpc.EarliestBlockNumber: rpc.EarliestBlockNumber:
return current, false, nil return current, false, nil
case rpc.FinalizedBlockNumber:
return rctx.finalized.String(), true, nil
case rpc.SafeBlockNumber:
return rctx.safe.String(), true, nil
case rpc.LatestBlockNumber: case rpc.LatestBlockNumber:
return rctx.latest.String(), true, nil return rctx.latest.String(), true, nil
default: default:
......
...@@ -326,33 +326,33 @@ func TestRewriteRequest(t *testing.T) { ...@@ -326,33 +326,33 @@ func TestRewriteRequest(t *testing.T) {
{ {
name: "eth_getBlockByNumber finalized", name: "eth_getBlockByNumber finalized",
args: args{ args: args{
rctx: RewriteContext{latest: hexutil.Uint64(100)}, rctx: RewriteContext{latest: hexutil.Uint64(100), finalized: hexutil.Uint64(55)},
req: &RPCReq{Method: "eth_getBlockByNumber", Params: mustMarshalJSON([]string{"finalized"})}, req: &RPCReq{Method: "eth_getBlockByNumber", Params: mustMarshalJSON([]string{"finalized"})},
res: nil, res: nil,
}, },
expected: RewriteNone, expected: RewriteOverrideRequest,
check: func(t *testing.T, args args) { check: func(t *testing.T, args args) {
var p []string var p []string
err := json.Unmarshal(args.req.Params, &p) err := json.Unmarshal(args.req.Params, &p)
require.Nil(t, err) require.Nil(t, err)
require.Equal(t, 1, len(p)) require.Equal(t, 1, len(p))
require.Equal(t, "finalized", p[0]) require.Equal(t, hexutil.Uint64(55).String(), p[0])
}, },
}, },
{ {
name: "eth_getBlockByNumber safe", name: "eth_getBlockByNumber safe",
args: args{ args: args{
rctx: RewriteContext{latest: hexutil.Uint64(100)}, rctx: RewriteContext{latest: hexutil.Uint64(100), safe: hexutil.Uint64(50)},
req: &RPCReq{Method: "eth_getBlockByNumber", Params: mustMarshalJSON([]string{"safe"})}, req: &RPCReq{Method: "eth_getBlockByNumber", Params: mustMarshalJSON([]string{"safe"})},
res: nil, res: nil,
}, },
expected: RewriteNone, expected: RewriteOverrideRequest,
check: func(t *testing.T, args args) { check: func(t *testing.T, args args) {
var p []string var p []string
err := json.Unmarshal(args.req.Params, &p) err := json.Unmarshal(args.req.Params, &p)
require.Nil(t, err) require.Nil(t, err)
require.Equal(t, 1, len(p)) require.Equal(t, 1, len(p))
require.Equal(t, "safe", p[0]) require.Equal(t, hexutil.Uint64(50).String(), p[0])
}, },
}, },
{ {
......
...@@ -95,7 +95,7 @@ func (mh *MockedHandler) Handler(w http.ResponseWriter, req *http.Request) { ...@@ -95,7 +95,7 @@ func (mh *MockedHandler) Handler(w http.ResponseWriter, req *http.Request) {
resBody := "" resBody := ""
if batched { if batched {
resBody = "[" + strings.Join(responses, ",") + "]" resBody = "[" + strings.Join(responses, ",") + "]"
} else { } else if len(responses) > 0 {
resBody = responses[0] resBody = responses[0]
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment