Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
N
nebula
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
exchain
nebula
Commits
0706f729
Unverified
Commit
0706f729
authored
May 02, 2023
by
OptimismBot
Committed by
GitHub
May 02, 2023
Browse files
Options
Browse Files
Download
Plain Diff
Merge pull request #5573 from ethereum-optimism/inphi/fpp-empty
op-e2e: Add FPP test for empty blocks
parents
ab8a3ba5
574a3406
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
138 additions
and
5 deletions
+138
-5
system_fpp_test.go
op-e2e/system_fpp_test.go
+138
-5
No files found.
op-e2e/system_fpp_test.go
View file @
0706f729
...
...
@@ -27,6 +27,119 @@ func TestVerifyL2OutputRootDetached(t *testing.T) {
testVerifyL2OutputRoot
(
t
,
true
)
}
func
TestVerifyL2OutputRootEmptyBlock
(
t
*
testing
.
T
)
{
testVerifyL2OutputRootEmptyBlock
(
t
,
false
)
}
func
TestVerifyL2OutputRootEmptyBlockDetached
(
t
*
testing
.
T
)
{
testVerifyL2OutputRootEmptyBlock
(
t
,
true
)
}
// TestVerifyL2OutputRootEmptyBlock asserts that the program can verify the output root of an empty block
// induced by missing batches.
// Setup is as follows:
// - create initial conditions and agreed l2 state
// - stop the batch submitter to induce empty blocks
// - wait for the seq window to expire so we can observe empty blocks
// - select an empty block as our claim
// - reboot the batch submitter
// - update the state root via a tx
// - run program
func
testVerifyL2OutputRootEmptyBlock
(
t
*
testing
.
T
,
detached
bool
)
{
InitParallel
(
t
)
ctx
:=
context
.
Background
()
cfg
:=
DefaultSystemConfig
(
t
)
// We don't need a verifier - just the sequencer is enough
delete
(
cfg
.
Nodes
,
"verifier"
)
// Use a small sequencer window size to avoid test timeout while waiting for empty blocks
// But not too small to ensure that our claim and subsequent state change is published
cfg
.
DeployConfig
.
SequencerWindowSize
=
16
sys
,
err
:=
cfg
.
Start
()
require
.
Nil
(
t
,
err
,
"Error starting up system"
)
defer
sys
.
Close
()
log
:=
testlog
.
Logger
(
t
,
log
.
LvlInfo
)
log
.
Info
(
"genesis"
,
"l2"
,
sys
.
RollupConfig
.
Genesis
.
L2
,
"l1"
,
sys
.
RollupConfig
.
Genesis
.
L1
,
"l2_time"
,
sys
.
RollupConfig
.
Genesis
.
L2Time
)
l1Client
:=
sys
.
Clients
[
"l1"
]
l2Seq
:=
sys
.
Clients
[
"sequencer"
]
rollupRPCClient
,
err
:=
rpc
.
DialContext
(
context
.
Background
(),
sys
.
RollupNodes
[
"sequencer"
]
.
HTTPEndpoint
())
require
.
Nil
(
t
,
err
)
rollupClient
:=
sources
.
NewRollupClient
(
client
.
NewBaseRPCClient
(
rollupRPCClient
))
// Avoids flaky test by avoiding reorgs at epoch 0
t
.
Log
(
"Wait for safe head to advance once for setup"
)
ss
,
err
:=
l2Seq
.
BlockByNumber
(
ctx
,
big
.
NewInt
(
int64
(
rpc
.
SafeBlockNumber
)))
require
.
NoError
(
t
,
err
)
require
.
NoError
(
t
,
waitForSafeHead
(
ctx
,
ss
.
NumberU64
()
+
cfg
.
DeployConfig
.
SequencerWindowSize
+
1
,
rollupClient
))
t
.
Log
(
"Sending transactions to setup existing state, prior to challenged period"
)
aliceKey
:=
cfg
.
Secrets
.
Alice
receipt
:=
SendL2Tx
(
t
,
cfg
,
l2Seq
,
aliceKey
,
func
(
opts
*
TxOpts
)
{
opts
.
ToAddr
=
&
cfg
.
Secrets
.
Addresses
()
.
Bob
opts
.
Value
=
big
.
NewInt
(
1
_000
)
})
require
.
NoError
(
t
,
waitForSafeHead
(
ctx
,
receipt
.
BlockNumber
.
Uint64
(),
rollupClient
))
t
.
Logf
(
"Capture current L2 head as agreed starting point. l2Head=%x l2BlockNumber=%v"
,
receipt
.
BlockHash
,
receipt
.
BlockNumber
)
l2Head
:=
receipt
.
BlockHash
t
.
Log
(
"=====Stopping batch submitter====="
)
err
=
sys
.
BatchSubmitter
.
Stop
(
ctx
)
require
.
NoError
(
t
,
err
,
"could not stop batch submitter"
)
// Wait for the sequencer to catch up with the current L1 head so we know all submitted batches are processed
t
.
Log
(
"Wait for sequencer to catch up with last submitted batch"
)
l1HeadNum
,
err
:=
l1Client
.
BlockNumber
(
ctx
)
require
.
NoError
(
t
,
err
)
_
,
err
=
waitForL1OriginOnL2
(
l1HeadNum
,
l2Seq
,
30
*
time
.
Second
)
require
.
NoError
(
t
,
err
)
// Get the current safe head now that the batcher is stopped
safeBlock
,
err
:=
l2Seq
.
BlockByNumber
(
ctx
,
big
.
NewInt
(
int64
(
rpc
.
SafeBlockNumber
)))
require
.
NoError
(
t
,
err
)
// Wait for safe head to start advancing again when the sequencing window elapses, for at least three blocks
t
.
Log
(
"Wait for safe head to advance after sequencing window elapses"
)
require
.
NoError
(
t
,
waitForSafeHead
(
ctx
,
safeBlock
.
NumberU64
()
+
3
,
rollupClient
))
// Use the 2nd empty block as our L2 claim block
t
.
Log
(
"Determine L2 claim"
)
l2ClaimBlock
,
err
:=
l2Seq
.
BlockByNumber
(
ctx
,
big
.
NewInt
(
int64
(
safeBlock
.
NumberU64
()
+
2
)))
require
.
NoError
(
t
,
err
,
"get L2 claim block number"
)
l2ClaimBlockNumber
:=
l2ClaimBlock
.
Number
()
.
Uint64
()
l2Output
,
err
:=
rollupClient
.
OutputAtBlock
(
ctx
,
l2ClaimBlockNumber
)
require
.
NoError
(
t
,
err
,
"could not get expected output"
)
l2Claim
:=
l2Output
.
OutputRoot
t
.
Log
(
"=====Restarting batch submitter====="
)
err
=
sys
.
BatchSubmitter
.
Start
()
require
.
NoError
(
t
,
err
,
"could not start batch submitter"
)
t
.
Log
(
"Add a transaction to the next batch after sequence of empty blocks"
)
receipt
=
SendL2Tx
(
t
,
cfg
,
l2Seq
,
aliceKey
,
func
(
opts
*
TxOpts
)
{
opts
.
ToAddr
=
&
cfg
.
Secrets
.
Addresses
()
.
Bob
opts
.
Value
=
big
.
NewInt
(
1
_000
)
opts
.
Nonce
=
1
})
require
.
NoError
(
t
,
waitForSafeHead
(
ctx
,
receipt
.
BlockNumber
.
Uint64
(),
rollupClient
))
t
.
Log
(
"Determine L1 head that includes batch after sequence of empty blocks"
)
l1HeadBlock
,
err
:=
l1Client
.
BlockByNumber
(
ctx
,
nil
)
require
.
NoError
(
t
,
err
,
"get l1 head block"
)
l1Head
:=
l1HeadBlock
.
Hash
()
testFaultProofProgramScenario
(
t
,
ctx
,
sys
,
&
FaultProofProgramTestScenario
{
L1Head
:
l1Head
,
L2Head
:
l2Head
,
L2Claim
:
common
.
Hash
(
l2Claim
),
L2ClaimBlockNumber
:
l2ClaimBlockNumber
,
Detached
:
detached
,
})
}
func
testVerifyL2OutputRoot
(
t
*
testing
.
T
,
detached
bool
)
{
InitParallel
(
t
)
ctx
:=
context
.
Background
()
...
...
@@ -96,19 +209,39 @@ func testVerifyL2OutputRoot(t *testing.T, detached bool) {
require
.
NoError
(
t
,
err
,
"get l1 head block"
)
l1Head
:=
l1HeadBlock
.
Hash
()
testFaultProofProgramScenario
(
t
,
ctx
,
sys
,
&
FaultProofProgramTestScenario
{
L1Head
:
l1Head
,
L2Head
:
l2Head
,
L2Claim
:
common
.
Hash
(
l2Claim
),
L2ClaimBlockNumber
:
l2ClaimBlockNumber
,
Detached
:
detached
,
})
}
type
FaultProofProgramTestScenario
struct
{
L1Head
common
.
Hash
L2Head
common
.
Hash
L2Claim
common
.
Hash
L2ClaimBlockNumber
uint64
Detached
bool
}
// testFaultProofProgramScenario runs the fault proof program in several contexts, given a test scenario.
func
testFaultProofProgramScenario
(
t
*
testing
.
T
,
ctx
context
.
Context
,
sys
*
System
,
s
*
FaultProofProgramTestScenario
)
{
preimageDir
:=
t
.
TempDir
()
fppConfig
:=
oppconf
.
NewConfig
(
sys
.
RollupConfig
,
sys
.
L2GenesisCfg
.
Config
,
l1Head
,
l2Head
,
common
.
Hash
(
l2Claim
),
l
2ClaimBlockNumber
)
fppConfig
:=
oppconf
.
NewConfig
(
sys
.
RollupConfig
,
sys
.
L2GenesisCfg
.
Config
,
s
.
L1Head
,
s
.
L2Head
,
common
.
Hash
(
s
.
L2Claim
),
s
.
L
2ClaimBlockNumber
)
fppConfig
.
L1URL
=
sys
.
NodeEndpoint
(
"l1"
)
fppConfig
.
L2URL
=
sys
.
NodeEndpoint
(
"sequencer"
)
fppConfig
.
DataDir
=
preimageDir
if
d
etached
{
if
s
.
D
etached
{
// When running in detached mode we need to compile the client executable since it will be called directly.
fppConfig
.
ExecCmd
=
BuildOpProgramClient
(
t
)
}
// Check the FPP confirms the expected output
t
.
Log
(
"Running fault proof in fetching mode"
)
err
=
opp
.
FaultProofProgram
(
ctx
,
log
,
fppConfig
)
log
:=
testlog
.
Logger
(
t
,
log
.
LvlInfo
)
err
:=
opp
.
FaultProofProgram
(
ctx
,
log
,
fppConfig
)
require
.
NoError
(
t
,
err
)
t
.
Log
(
"Shutting down network"
)
...
...
@@ -131,7 +264,7 @@ func testVerifyL2OutputRoot(t *testing.T, detached bool) {
t
.
Log
(
"Running fault proof with invalid claim"
)
fppConfig
.
L2Claim
=
common
.
Hash
{
0xaa
}
err
=
opp
.
FaultProofProgram
(
ctx
,
log
,
fppConfig
)
if
d
etached
{
if
s
.
D
etached
{
require
.
Error
(
t
,
err
,
"exit status 1"
)
}
else
{
require
.
ErrorIs
(
t
,
err
,
driver
.
ErrClaimNotValid
)
...
...
@@ -139,7 +272,7 @@ func testVerifyL2OutputRoot(t *testing.T, detached bool) {
}
func
waitForSafeHead
(
ctx
context
.
Context
,
safeBlockNum
uint64
,
rollupClient
*
sources
.
RollupClient
)
error
{
ctx
,
cancel
:=
context
.
WithTimeout
(
ctx
,
3
0
*
time
.
Second
)
ctx
,
cancel
:=
context
.
WithTimeout
(
ctx
,
6
0
*
time
.
Second
)
defer
cancel
()
for
{
seqStatus
,
err
:=
rollupClient
.
SyncStatus
(
ctx
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment