Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
N
nebula
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
exchain
nebula
Commits
73012b0e
Unverified
Commit
73012b0e
authored
Jun 01, 2023
by
OptimismBot
Committed by
GitHub
Jun 01, 2023
Browse files
Options
Browse Files
Download
Plain Diff
Merge pull request #5800 from ethereum-optimism/sync-test-race-fix
op-node: fix TestMultiPeerSync race condition
parents
30a5aa10
2b558e5e
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
69 additions
and
31 deletions
+69
-31
sync.go
op-node/p2p/sync.go
+1
-0
sync_test.go
op-node/p2p/sync_test.go
+68
-31
No files found.
op-node/p2p/sync.go
View file @
73012b0e
...
@@ -368,6 +368,7 @@ func (s *SyncClient) onRangeRequest(ctx context.Context, req rangeRequest) {
...
@@ -368,6 +368,7 @@ func (s *SyncClient) onRangeRequest(ctx context.Context, req rangeRequest) {
}
}
if
_
,
ok
:=
s
.
inFlight
[
num
];
ok
{
if
_
,
ok
:=
s
.
inFlight
[
num
];
ok
{
log
.
Debug
(
"request still in-flight, not rescheduling sync request"
,
"num"
,
num
)
continue
// request still in flight
continue
// request still in flight
}
}
pr
:=
peerRequest
{
num
:
num
,
complete
:
new
(
atomic
.
Bool
)}
pr
:=
peerRequest
{
num
:
num
,
complete
:
new
(
atomic
.
Bool
)}
...
...
op-node/p2p/sync_test.go
View file @
73012b0e
...
@@ -3,7 +3,9 @@ package p2p
...
@@ -3,7 +3,9 @@ package p2p
import
(
import
(
"context"
"context"
"math/big"
"math/big"
"sync"
"testing"
"testing"
"time"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/network"
...
@@ -29,7 +31,42 @@ func (fn mockPayloadFn) PayloadByNumber(_ context.Context, number uint64) (*eth.
...
@@ -29,7 +31,42 @@ func (fn mockPayloadFn) PayloadByNumber(_ context.Context, number uint64) (*eth.
var
_
L2Chain
=
mockPayloadFn
(
nil
)
var
_
L2Chain
=
mockPayloadFn
(
nil
)
func
setupSyncTestData
(
length
uint64
)
(
*
rollup
.
Config
,
map
[
uint64
]
*
eth
.
ExecutionPayload
,
func
(
i
uint64
)
eth
.
L2BlockRef
)
{
type
syncTestData
struct
{
sync
.
RWMutex
payloads
map
[
uint64
]
*
eth
.
ExecutionPayload
}
func
(
s
*
syncTestData
)
getPayload
(
i
uint64
)
(
payload
*
eth
.
ExecutionPayload
,
ok
bool
)
{
s
.
RLock
()
defer
s
.
RUnlock
()
payload
,
ok
=
s
.
payloads
[
i
]
return
payload
,
ok
}
func
(
s
*
syncTestData
)
deletePayload
(
i
uint64
)
{
s
.
Lock
()
defer
s
.
Unlock
()
delete
(
s
.
payloads
,
i
)
}
func
(
s
*
syncTestData
)
addPayload
(
payload
*
eth
.
ExecutionPayload
)
{
s
.
Lock
()
defer
s
.
Unlock
()
s
.
payloads
[
uint64
(
payload
.
BlockNumber
)]
=
payload
}
func
(
s
*
syncTestData
)
getBlockRef
(
i
uint64
)
eth
.
L2BlockRef
{
s
.
RLock
()
defer
s
.
RUnlock
()
return
eth
.
L2BlockRef
{
Hash
:
s
.
payloads
[
i
]
.
BlockHash
,
Number
:
uint64
(
s
.
payloads
[
i
]
.
BlockNumber
),
ParentHash
:
s
.
payloads
[
i
]
.
ParentHash
,
Time
:
uint64
(
s
.
payloads
[
i
]
.
Timestamp
),
}
}
func
setupSyncTestData
(
length
uint64
)
(
*
rollup
.
Config
,
*
syncTestData
)
{
// minimal rollup config to build mock blocks & verify their time.
// minimal rollup config to build mock blocks & verify their time.
cfg
:=
&
rollup
.
Config
{
cfg
:=
&
rollup
.
Config
{
Genesis
:
rollup
.
Genesis
{
Genesis
:
rollup
.
Genesis
{
...
@@ -57,15 +94,7 @@ func setupSyncTestData(length uint64) (*rollup.Config, map[uint64]*eth.Execution
...
@@ -57,15 +94,7 @@ func setupSyncTestData(length uint64) (*rollup.Config, map[uint64]*eth.Execution
payloads
[
i
]
=
payload
payloads
[
i
]
=
payload
}
}
l2Ref
:=
func
(
i
uint64
)
eth
.
L2BlockRef
{
return
cfg
,
&
syncTestData
{
payloads
:
payloads
}
return
eth
.
L2BlockRef
{
Hash
:
payloads
[
i
]
.
BlockHash
,
Number
:
uint64
(
payloads
[
i
]
.
BlockNumber
),
ParentHash
:
payloads
[
i
]
.
ParentHash
,
Time
:
uint64
(
payloads
[
i
]
.
Timestamp
),
}
}
return
cfg
,
payloads
,
l2Ref
}
}
func
TestSinglePeerSync
(
t
*
testing
.
T
)
{
func
TestSinglePeerSync
(
t
*
testing
.
T
)
{
...
@@ -73,11 +102,11 @@ func TestSinglePeerSync(t *testing.T) {
...
@@ -73,11 +102,11 @@ func TestSinglePeerSync(t *testing.T) {
log
:=
testlog
.
Logger
(
t
,
log
.
LvlError
)
log
:=
testlog
.
Logger
(
t
,
log
.
LvlError
)
cfg
,
payloads
,
l2Ref
:=
setupSyncTestData
(
25
)
cfg
,
payloads
:=
setupSyncTestData
(
25
)
// Serving payloads: just load them from the map, if they exist
// Serving payloads: just load them from the map, if they exist
servePayload
:=
mockPayloadFn
(
func
(
n
uint64
)
(
*
eth
.
ExecutionPayload
,
error
)
{
servePayload
:=
mockPayloadFn
(
func
(
n
uint64
)
(
*
eth
.
ExecutionPayload
,
error
)
{
p
,
ok
:=
payloads
[
n
]
p
,
ok
:=
payloads
.
getPayload
(
n
)
if
!
ok
{
if
!
ok
{
return
nil
,
ethereum
.
NotFound
return
nil
,
ethereum
.
NotFound
}
}
...
@@ -116,13 +145,13 @@ func TestSinglePeerSync(t *testing.T) {
...
@@ -116,13 +145,13 @@ func TestSinglePeerSync(t *testing.T) {
defer
cl
.
Close
()
defer
cl
.
Close
()
// request to start syncing between 10 and 20
// request to start syncing between 10 and 20
require
.
NoError
(
t
,
cl
.
RequestL2Range
(
ctx
,
l2Ref
(
10
),
l2
Ref
(
20
)))
require
.
NoError
(
t
,
cl
.
RequestL2Range
(
ctx
,
payloads
.
getBlockRef
(
10
),
payloads
.
getBlock
Ref
(
20
)))
// and wait for the sync results to come in (in reverse order)
// and wait for the sync results to come in (in reverse order)
for
i
:=
uint64
(
19
);
i
>
10
;
i
--
{
for
i
:=
uint64
(
19
);
i
>
10
;
i
--
{
p
:=
<-
received
p
:=
<-
received
require
.
Equal
(
t
,
uint64
(
p
.
BlockNumber
),
i
,
"expecting payloads in order"
)
require
.
Equal
(
t
,
uint64
(
p
.
BlockNumber
),
i
,
"expecting payloads in order"
)
exp
,
ok
:=
payloads
[
uint64
(
p
.
BlockNumber
)]
exp
,
ok
:=
payloads
.
getPayload
(
uint64
(
p
.
BlockNumber
))
require
.
True
(
t
,
ok
,
"expecting known payload"
)
require
.
True
(
t
,
ok
,
"expecting known payload"
)
require
.
Equal
(
t
,
exp
.
BlockHash
,
p
.
BlockHash
,
"expecting the correct payload"
)
require
.
Equal
(
t
,
exp
.
BlockHash
,
p
.
BlockHash
,
"expecting the correct payload"
)
}
}
...
@@ -131,14 +160,14 @@ func TestSinglePeerSync(t *testing.T) {
...
@@ -131,14 +160,14 @@ func TestSinglePeerSync(t *testing.T) {
func
TestMultiPeerSync
(
t
*
testing
.
T
)
{
func
TestMultiPeerSync
(
t
*
testing
.
T
)
{
t
.
Parallel
()
// Takes a while, but can run in parallel
t
.
Parallel
()
// Takes a while, but can run in parallel
log
:=
testlog
.
Logger
(
t
,
log
.
Lvl
Error
)
log
:=
testlog
.
Logger
(
t
,
log
.
Lvl
Debug
)
cfg
,
payloads
,
l2Ref
:=
setupSyncTestData
(
100
)
cfg
,
payloads
:=
setupSyncTestData
(
100
)
setupPeer
:=
func
(
ctx
context
.
Context
,
h
host
.
Host
)
(
*
SyncClient
,
chan
*
eth
.
ExecutionPayload
)
{
setupPeer
:=
func
(
ctx
context
.
Context
,
h
host
.
Host
)
(
*
SyncClient
,
chan
*
eth
.
ExecutionPayload
)
{
// Serving payloads: just load them from the map, if they exist
// Serving payloads: just load them from the map, if they exist
servePayload
:=
mockPayloadFn
(
func
(
n
uint64
)
(
*
eth
.
ExecutionPayload
,
error
)
{
servePayload
:=
mockPayloadFn
(
func
(
n
uint64
)
(
*
eth
.
ExecutionPayload
,
error
)
{
p
,
ok
:=
payloads
[
n
]
p
,
ok
:=
payloads
.
getPayload
(
n
)
if
!
ok
{
if
!
ok
{
return
nil
,
ethereum
.
NotFound
return
nil
,
ethereum
.
NotFound
}
}
...
@@ -190,23 +219,25 @@ func TestMultiPeerSync(t *testing.T) {
...
@@ -190,23 +219,25 @@ func TestMultiPeerSync(t *testing.T) {
clC
.
Start
()
clC
.
Start
()
defer
clC
.
Close
()
defer
clC
.
Close
()
// request to start syncing between 10 and
10
0
// request to start syncing between 10 and
9
0
require
.
NoError
(
t
,
clA
.
RequestL2Range
(
ctx
,
l2Ref
(
10
),
l2
Ref
(
90
)))
require
.
NoError
(
t
,
clA
.
RequestL2Range
(
ctx
,
payloads
.
getBlockRef
(
10
),
payloads
.
getBlock
Ref
(
90
)))
// With such large range to request we are going to hit the rate-limits of B and C,
// With such large range to request we are going to hit the rate-limits of B and C,
// but that means we'll balance the work between the peers.
// but that means we'll balance the work between the peers.
p
:=
<-
recvA
for
i
:=
uint64
(
89
);
i
>
10
;
i
--
{
// wait for all payloads
exp
,
ok
:=
payloads
[
uint64
(
p
.
BlockNumber
)]
p
:=
<-
recvA
require
.
True
(
t
,
ok
,
"expecting known payload"
)
exp
,
ok
:=
payloads
.
getPayload
(
uint64
(
p
.
BlockNumber
))
require
.
Equal
(
t
,
exp
.
BlockHash
,
p
.
BlockHash
,
"expecting the correct payload"
)
require
.
True
(
t
,
ok
,
"expecting known payload"
)
require
.
Equal
(
t
,
exp
.
BlockHash
,
p
.
BlockHash
,
"expecting the correct payload"
)
}
// now see if B can sync a range, and fill the gap with a re-request
// now see if B can sync a range, and fill the gap with a re-request
bl25
:=
payloads
[
25
]
// temporarily remove it from the available payloads. This will create a gap
bl25
,
_
:=
payloads
.
getPayload
(
25
)
// temporarily remove it from the available payloads. This will create a gap
delete
(
payloads
,
uint64
(
25
)
)
payloads
.
deletePayload
(
25
)
require
.
NoError
(
t
,
clB
.
RequestL2Range
(
ctx
,
l2Ref
(
20
),
l2
Ref
(
30
)))
require
.
NoError
(
t
,
clB
.
RequestL2Range
(
ctx
,
payloads
.
getBlockRef
(
20
),
payloads
.
getBlock
Ref
(
30
)))
for
i
:=
uint64
(
29
);
i
>
25
;
i
--
{
for
i
:=
uint64
(
29
);
i
>
25
;
i
--
{
p
:=
<-
recvB
p
:=
<-
recvB
exp
,
ok
:=
payloads
[
uint64
(
p
.
BlockNumber
)]
exp
,
ok
:=
payloads
.
getPayload
(
uint64
(
p
.
BlockNumber
))
require
.
True
(
t
,
ok
,
"expecting known payload"
)
require
.
True
(
t
,
ok
,
"expecting known payload"
)
require
.
Equal
(
t
,
exp
.
BlockHash
,
p
.
BlockHash
,
"expecting the correct payload"
)
require
.
Equal
(
t
,
exp
.
BlockHash
,
p
.
BlockHash
,
"expecting the correct payload"
)
}
}
...
@@ -215,13 +246,19 @@ func TestMultiPeerSync(t *testing.T) {
...
@@ -215,13 +246,19 @@ func TestMultiPeerSync(t *testing.T) {
// client: WARN failed p2p sync request num=25 err="peer failed to serve request with code 1"
// client: WARN failed p2p sync request num=25 err="peer failed to serve request with code 1"
require
.
Zero
(
t
,
len
(
recvB
),
"there is a gap, should not see other payloads yet"
)
require
.
Zero
(
t
,
len
(
recvB
),
"there is a gap, should not see other payloads yet"
)
// Add back the block
// Add back the block
payloads
[
25
]
=
bl25
payloads
.
addPayload
(
bl25
)
// race-condition fix: the request for 25 is expected to error, but is marked as complete in the peer-loop.
// But the re-request checks the status in the main loop, and it may thus look like it's still in-flight,
// and thus not run the new request.
// Wait till the failed request is recognized as marked as done, so the re-request actually runs.
for
!
clB
.
inFlight
[
25
]
.
Load
()
{
time
.
Sleep
(
time
.
Second
)
}
// And request a range again, 25 is there now, and 21-24 should follow quickly (some may already have been fetched and wait in quarantine)
// And request a range again, 25 is there now, and 21-24 should follow quickly (some may already have been fetched and wait in quarantine)
require
.
NoError
(
t
,
clB
.
RequestL2Range
(
ctx
,
l2Ref
(
20
),
l2Ref
(
26
)))
require
.
NoError
(
t
,
clB
.
RequestL2Range
(
ctx
,
payloads
.
getBlockRef
(
20
),
payloads
.
getBlockRef
(
26
)))
for
i
:=
uint64
(
25
);
i
>
20
;
i
--
{
for
i
:=
uint64
(
25
);
i
>
20
;
i
--
{
p
:=
<-
recvB
p
:=
<-
recvB
exp
,
ok
:=
payloads
[
uint64
(
p
.
BlockNumber
)]
exp
,
ok
:=
payloads
.
getPayload
(
uint64
(
p
.
BlockNumber
))
require
.
True
(
t
,
ok
,
"expecting known payload"
)
require
.
True
(
t
,
ok
,
"expecting known payload"
)
require
.
Equal
(
t
,
exp
.
BlockHash
,
p
.
BlockHash
,
"expecting the correct payload"
)
require
.
Equal
(
t
,
exp
.
BlockHash
,
p
.
BlockHash
,
"expecting the correct payload"
)
}
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment