Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
N
nebula
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
exchain
nebula
Commits
d64569f2
Unverified
Commit
d64569f2
authored
Apr 28, 2023
by
felipe-op
Committed by
GitHub
Apr 28, 2023
Browse files
Options
Browse Files
Download
Plain Diff
Merge pull request #5556 from ethereum-optimism/felipe/lb-round-robin
proxyd: round-robin load balancing for consensus group
parents
6d77414f
63c734c9
Changes
4
Show whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
130 additions
and
3 deletions
+130
-3
backend.go
proxyd/backend.go
+43
-1
consensus_test.go
proxyd/integration_tests/consensus_test.go
+79
-1
consensus.toml
proxyd/integration_tests/testdata/consensus.toml
+1
-1
consensus_responses.yml
proxyd/integration_tests/testdata/consensus_responses.yml
+7
-0
No files found.
proxyd/backend.go
View file @
d64569f2
...
...
@@ -591,9 +591,17 @@ func (b *BackendGroup) Forward(ctx context.Context, rpcReqs []*RPCReq, isBatch b
return
nil
,
nil
}
backends
:=
b
.
Backends
// When `consensus_aware` is set to `true`, the backend group acts as a load balancer
// serving traffic from any backend that agrees in the consensus group
if
b
.
Consensus
!=
nil
{
backends
=
b
.
loadBalancedConsensusGroup
()
}
rpcRequestsTotal
.
Inc
()
for
_
,
back
:=
range
b
.
B
ackends
{
for
_
,
back
:=
range
backends
{
res
,
err
:=
back
.
Forward
(
ctx
,
rpcReqs
,
isBatch
)
if
errors
.
Is
(
err
,
ErrMethodNotWhitelisted
)
{
return
nil
,
err
...
...
@@ -670,6 +678,40 @@ func (b *BackendGroup) ProxyWS(ctx context.Context, clientConn *websocket.Conn,
return
nil
,
ErrNoBackends
}
func
(
b
*
BackendGroup
)
loadBalancedConsensusGroup
()
[]
*
Backend
{
cg
:=
b
.
Consensus
.
GetConsensusGroup
()
backendsHealthy
:=
make
([]
*
Backend
,
0
,
len
(
cg
))
backendsDegraded
:=
make
([]
*
Backend
,
0
,
len
(
cg
))
// separate into healthy, degraded and unhealthy backends
for
_
,
be
:=
range
cg
{
// unhealthy are filtered out and not attempted
if
!
be
.
IsHealthy
()
{
continue
}
if
be
.
IsDegraded
()
{
backendsDegraded
=
append
(
backendsDegraded
,
be
)
continue
}
backendsHealthy
=
append
(
backendsHealthy
,
be
)
}
// shuffle both slices
r
:=
rand
.
New
(
rand
.
NewSource
(
time
.
Now
()
.
UnixNano
()))
r
.
Shuffle
(
len
(
backendsHealthy
),
func
(
i
,
j
int
)
{
backendsHealthy
[
i
],
backendsHealthy
[
j
]
=
backendsHealthy
[
j
],
backendsHealthy
[
i
]
})
r
.
Shuffle
(
len
(
backendsDegraded
),
func
(
i
,
j
int
)
{
backendsDegraded
[
i
],
backendsDegraded
[
j
]
=
backendsDegraded
[
j
],
backendsDegraded
[
i
]
})
// healthy are put into a priority position
// degraded backends are used as fallback
backendsHealthy
=
append
(
backendsHealthy
,
backendsDegraded
...
)
return
backendsHealthy
}
func
calcBackoff
(
i
int
)
time
.
Duration
{
jitter
:=
float64
(
rand
.
Int63n
(
250
))
ms
:=
math
.
Min
(
math
.
Pow
(
2
,
float64
(
i
))
*
1000
+
jitter
,
3000
)
...
...
proxyd/integration_tests/consensus_test.go
View file @
d64569f2
...
...
@@ -3,6 +3,7 @@ package integration_tests
import
(
"context"
"encoding/json"
"fmt"
"net/http"
"os"
"path"
...
...
@@ -47,6 +48,7 @@ func TestConsensus(t *testing.T) {
ctx
:=
context
.
Background
()
svr
,
shutdown
,
err
:=
proxyd
.
Start
(
config
)
require
.
NoError
(
t
,
err
)
client
:=
NewProxydClient
(
"http://127.0.0.1:8545"
)
defer
shutdown
()
bg
:=
svr
.
BackendGroups
[
"node"
]
...
...
@@ -76,7 +78,6 @@ func TestConsensus(t *testing.T) {
h2
.
ResetOverrides
()
bg
.
Consensus
.
Unban
()
// advance latest on node2 to 0x2
h1
.
AddOverride
(
&
ms
.
MethodTemplate
{
Method
:
"net_peerCount"
,
Block
:
""
,
...
...
@@ -355,6 +356,83 @@ func TestConsensus(t *testing.T) {
// should resolve to 0x1, the highest common ancestor
require
.
Equal
(
t
,
"0x1"
,
bg
.
Consensus
.
GetConsensusBlockNumber
()
.
String
())
})
t
.
Run
(
"load balancing should hit both backends"
,
func
(
t
*
testing
.
T
)
{
h1
.
ResetOverrides
()
h2
.
ResetOverrides
()
bg
.
Consensus
.
Unban
()
for
_
,
be
:=
range
bg
.
Backends
{
bg
.
Consensus
.
UpdateBackend
(
ctx
,
be
)
}
bg
.
Consensus
.
UpdateBackendGroupConsensus
(
ctx
)
require
.
Equal
(
t
,
2
,
len
(
bg
.
Consensus
.
GetConsensusGroup
()))
node1
.
Reset
()
node2
.
Reset
()
require
.
Equal
(
t
,
0
,
len
(
node1
.
Requests
()))
require
.
Equal
(
t
,
0
,
len
(
node2
.
Requests
()))
// there is a random component to this test,
// since our round-robin implementation shuffles the ordering
// to achieve uniform distribution
// so we just make 100 requests per backend and expect the number of requests to be somewhat balanced
// i.e. each backend should be hit minimally by at least 50% of the requests
consensusGroup
:=
bg
.
Consensus
.
GetConsensusGroup
()
numberReqs
:=
len
(
consensusGroup
)
*
100
for
numberReqs
>
0
{
_
,
statusCode
,
err
:=
client
.
SendRPC
(
"eth_getBlockByNumber"
,
[]
interface
{}{
"0x1"
,
false
})
require
.
NoError
(
t
,
err
)
require
.
Equal
(
t
,
200
,
statusCode
)
numberReqs
--
}
msg
:=
fmt
.
Sprintf
(
"n1 %d, n2 %d"
,
len
(
node1
.
Requests
()),
len
(
node2
.
Requests
()))
require
.
GreaterOrEqual
(
t
,
len
(
node1
.
Requests
()),
50
,
msg
)
require
.
GreaterOrEqual
(
t
,
len
(
node2
.
Requests
()),
50
,
msg
)
})
t
.
Run
(
"load balancing should not hit if node is not healthy"
,
func
(
t
*
testing
.
T
)
{
h1
.
ResetOverrides
()
h2
.
ResetOverrides
()
bg
.
Consensus
.
Unban
()
// node1 should not be serving any traffic
h1
.
AddOverride
(
&
ms
.
MethodTemplate
{
Method
:
"net_peerCount"
,
Block
:
""
,
Response
:
buildPeerCountResponse
(
1
),
})
for
_
,
be
:=
range
bg
.
Backends
{
bg
.
Consensus
.
UpdateBackend
(
ctx
,
be
)
}
bg
.
Consensus
.
UpdateBackendGroupConsensus
(
ctx
)
require
.
Equal
(
t
,
1
,
len
(
bg
.
Consensus
.
GetConsensusGroup
()))
node1
.
Reset
()
node2
.
Reset
()
require
.
Equal
(
t
,
0
,
len
(
node1
.
Requests
()))
require
.
Equal
(
t
,
0
,
len
(
node2
.
Requests
()))
numberReqs
:=
10
for
numberReqs
>
0
{
_
,
statusCode
,
err
:=
client
.
SendRPC
(
"eth_getBlockByNumber"
,
[]
interface
{}{
"0x1"
,
false
})
require
.
NoError
(
t
,
err
)
require
.
Equal
(
t
,
200
,
statusCode
)
numberReqs
--
}
msg
:=
fmt
.
Sprintf
(
"n1 %d, n2 %d"
,
len
(
node1
.
Requests
()),
len
(
node2
.
Requests
()))
require
.
Equal
(
t
,
len
(
node1
.
Requests
()),
0
,
msg
)
require
.
Equal
(
t
,
len
(
node2
.
Requests
()),
10
,
msg
)
})
}
func
backend
(
bg
*
proxyd
.
BackendGroup
,
name
string
)
*
proxyd
.
Backend
{
...
...
proxyd/integration_tests/testdata/consensus.toml
View file @
d64569f2
[server]
rpc_port
=
8
080
rpc_port
=
8
545
[backend]
response_timeout_seconds
=
1
...
...
proxyd/integration_tests/testdata/consensus_responses.yml
View file @
d64569f2
-
method
:
eth_chainId
response
:
>
{
"jsonrpc": "2.0",
"id": 67,
"result": "hello",
}
-
method
:
net_peerCount
response
:
>
{
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment