Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
N
nebula
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
exchain
nebula
Commits
ea45c05e
Unverified
Commit
ea45c05e
authored
Jan 24, 2023
by
mergify[bot]
Committed by
GitHub
Jan 24, 2023
Browse files
Options
Browse Files
Download
Plain Diff
Merge pull request #4688 from ethereum-optimism/clabby/e2e/garbage-batch
feat(e2e): Garbage batch test
parents
ff01cd62
38b4a12d
Changes
3
Show whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
438 additions
and
2 deletions
+438
-2
garbage_channel_out.go
op-e2e/actions/garbage_channel_out.go
+263
-0
l2_batcher.go
op-e2e/actions/l2_batcher.go
+93
-2
l2_batcher_test.go
op-e2e/actions/l2_batcher_test.go
+82
-0
No files found.
op-e2e/actions/garbage_channel_out.go
0 → 100644
View file @
ea45c05e
package
actions
import
(
"bytes"
"compress/gzip"
"compress/zlib"
"crypto/rand"
"errors"
"fmt"
"io"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
)
type
GarbageKind
int64
const
(
STRIP_VERSION
GarbageKind
=
iota
RANDOM
TRUNCATE_END
DIRTY_APPEND
INVALID_COMPRESSION
MALFORM_RLP
)
var
GarbageKinds
=
[]
GarbageKind
{
STRIP_VERSION
,
RANDOM
,
TRUNCATE_END
,
DIRTY_APPEND
,
INVALID_COMPRESSION
,
MALFORM_RLP
,
}
// GarbageChannelCfg is the configuration for a `GarbageChannelOut`
type
GarbageChannelCfg
struct
{
useInvalidCompression
bool
malformRLP
bool
}
// Writer is the interface shared between `zlib.Writer` and `gzip.Writer`
type
Writer
interface
{
Close
()
error
Flush
()
error
Reset
(
io
.
Writer
)
Write
([]
byte
)
(
int
,
error
)
}
// ChannelOutIface is the interface implemented by ChannelOut & GarbageChannelOut
type
ChannelOutIface
interface
{
ID
()
derive
.
ChannelID
Reset
()
error
AddBlock
(
block
*
types
.
Block
)
error
ReadyBytes
()
int
Flush
()
error
Close
()
error
OutputFrame
(
w
*
bytes
.
Buffer
,
maxSize
uint64
)
error
}
// Compile-time check for ChannelOutIface interface implementation for the ChannelOut type.
var
_
ChannelOutIface
=
(
*
derive
.
ChannelOut
)(
nil
)
// Compile-time check for ChannelOutIface interface implementation for the GarbageChannelOut type.
var
_
ChannelOutIface
=
(
*
GarbageChannelOut
)(
nil
)
// GarbageChannelOut is a modified `derive.ChannelOut` that can be configured to behave differently
// than the original
type
GarbageChannelOut
struct
{
id
derive
.
ChannelID
// Frame ID of the next frame to emit. Increment after emitting
frame
uint64
// rlpLength is the uncompressed size of the channel. Must be less than MAX_RLP_BYTES_PER_CHANNEL
rlpLength
int
// Compressor stage. Write input data to it
compress
Writer
// post compression buffer
buf
bytes
.
Buffer
closed
bool
// Garbage channel configuration
cfg
*
GarbageChannelCfg
}
func
(
co
*
GarbageChannelOut
)
ID
()
derive
.
ChannelID
{
return
co
.
id
}
// NewGarbageChannelOut creates a new `GarbageChannelOut` with the given configuration.
func
NewGarbageChannelOut
(
cfg
*
GarbageChannelCfg
)
(
*
GarbageChannelOut
,
error
)
{
c
:=
&
GarbageChannelOut
{
id
:
derive
.
ChannelID
{},
// TODO: use GUID here instead of fully random data
frame
:
0
,
rlpLength
:
0
,
cfg
:
cfg
,
}
_
,
err
:=
rand
.
Read
(
c
.
id
[
:
])
if
err
!=
nil
{
return
nil
,
err
}
// Optionally use zlib or gzip compression
var
compress
Writer
if
cfg
.
useInvalidCompression
{
compress
,
err
=
gzip
.
NewWriterLevel
(
&
c
.
buf
,
gzip
.
BestCompression
)
}
else
{
compress
,
err
=
zlib
.
NewWriterLevel
(
&
c
.
buf
,
zlib
.
BestCompression
)
}
if
err
!=
nil
{
return
nil
,
err
}
c
.
compress
=
compress
return
c
,
nil
}
// TODO: reuse ChannelOut for performance
func
(
co
*
GarbageChannelOut
)
Reset
()
error
{
co
.
frame
=
0
co
.
rlpLength
=
0
co
.
buf
.
Reset
()
co
.
compress
.
Reset
(
&
co
.
buf
)
co
.
closed
=
false
_
,
err
:=
rand
.
Read
(
co
.
id
[
:
])
return
err
}
// AddBlock adds a block to the channel. It returns an error
// if there is a problem adding the block. The only sentinel
// error that it returns is ErrTooManyRLPBytes. If this error
// is returned, the channel should be closed and a new one
// should be made.
func
(
co
*
GarbageChannelOut
)
AddBlock
(
block
*
types
.
Block
)
error
{
if
co
.
closed
{
return
errors
.
New
(
"already closed"
)
}
batch
,
err
:=
blockToBatch
(
block
)
if
err
!=
nil
{
return
err
}
// We encode to a temporary buffer to determine the encoded length to
// ensure that the total size of all RLP elements is less than or equal to MAX_RLP_BYTES_PER_CHANNEL
var
buf
bytes
.
Buffer
if
err
:=
rlp
.
Encode
(
&
buf
,
batch
);
err
!=
nil
{
return
err
}
if
co
.
cfg
.
malformRLP
{
// Malform the RLP by incrementing the length prefix by 1.
bufBytes
:=
buf
.
Bytes
()
bufBytes
[
0
]
+=
1
buf
.
Reset
()
buf
.
Write
(
bufBytes
)
}
if
co
.
rlpLength
+
buf
.
Len
()
>
derive
.
MaxRLPBytesPerChannel
{
return
fmt
.
Errorf
(
"could not add %d bytes to channel of %d bytes, max is %d. err: %w"
,
buf
.
Len
(),
co
.
rlpLength
,
derive
.
MaxRLPBytesPerChannel
,
derive
.
ErrTooManyRLPBytes
)
}
co
.
rlpLength
+=
buf
.
Len
()
_
,
err
=
io
.
Copy
(
co
.
compress
,
&
buf
)
return
err
}
// ReadyBytes returns the number of bytes that the channel out can immediately output into a frame.
// Use `Flush` or `Close` to move data from the compression buffer into the ready buffer if more bytes
// are needed. Add blocks may add to the ready buffer, but it is not guaranteed due to the compression stage.
func
(
co
*
GarbageChannelOut
)
ReadyBytes
()
int
{
return
co
.
buf
.
Len
()
}
// Flush flushes the internal compression stage to the ready buffer. It enables pulling a larger & more
// complete frame. It reduces the compression efficiency.
func
(
co
*
GarbageChannelOut
)
Flush
()
error
{
return
co
.
compress
.
Flush
()
}
func
(
co
*
GarbageChannelOut
)
Close
()
error
{
if
co
.
closed
{
return
errors
.
New
(
"already closed"
)
}
co
.
closed
=
true
return
co
.
compress
.
Close
()
}
// OutputFrame writes a frame to w with a given max size
// Use `ReadyBytes`, `Flush`, and `Close` to modify the ready buffer.
// Returns io.EOF when the channel is closed & there are no more frames
// Returns nil if there is still more buffered data.
// Returns and error if it ran into an error during processing.
func
(
co
*
GarbageChannelOut
)
OutputFrame
(
w
*
bytes
.
Buffer
,
maxSize
uint64
)
error
{
f
:=
derive
.
Frame
{
ID
:
co
.
id
,
FrameNumber
:
uint16
(
co
.
frame
),
}
// Copy data from the local buffer into the frame data buffer
// Don't go past the maxSize with the fixed frame overhead.
// Fixed overhead: 32 + 8 + 2 + 4 + 1 = 47 bytes.
// Add one extra byte for the version byte (for the entire L1 tx though)
maxDataSize
:=
maxSize
-
47
-
1
if
maxDataSize
>
uint64
(
co
.
buf
.
Len
())
{
maxDataSize
=
uint64
(
co
.
buf
.
Len
())
// If we are closed & will not spill past the current frame
// mark it is the final frame of the channel.
if
co
.
closed
{
f
.
IsLast
=
true
}
}
f
.
Data
=
make
([]
byte
,
maxDataSize
)
if
_
,
err
:=
io
.
ReadFull
(
&
co
.
buf
,
f
.
Data
);
err
!=
nil
{
return
err
}
if
err
:=
f
.
MarshalBinary
(
w
);
err
!=
nil
{
return
err
}
co
.
frame
+=
1
if
f
.
IsLast
{
return
io
.
EOF
}
else
{
return
nil
}
}
// blockToBatch transforms a block into a batch object that can easily be RLP encoded.
func
blockToBatch
(
block
*
types
.
Block
)
(
*
derive
.
BatchData
,
error
)
{
opaqueTxs
:=
make
([]
hexutil
.
Bytes
,
0
,
len
(
block
.
Transactions
()))
for
i
,
tx
:=
range
block
.
Transactions
()
{
if
tx
.
Type
()
==
types
.
DepositTxType
{
continue
}
otx
,
err
:=
tx
.
MarshalBinary
()
if
err
!=
nil
{
return
nil
,
fmt
.
Errorf
(
"could not encode tx %v in block %v: %w"
,
i
,
tx
.
Hash
(),
err
)
}
opaqueTxs
=
append
(
opaqueTxs
,
otx
)
}
l1InfoTx
:=
block
.
Transactions
()[
0
]
if
l1InfoTx
.
Type
()
!=
types
.
DepositTxType
{
return
nil
,
derive
.
ErrNotDepositTx
}
l1Info
,
err
:=
derive
.
L1InfoDepositTxData
(
l1InfoTx
.
Data
())
if
err
!=
nil
{
return
nil
,
fmt
.
Errorf
(
"could not parse the L1 Info deposit: %w"
,
err
)
}
return
&
derive
.
BatchData
{
BatchV1
:
derive
.
BatchV1
{
ParentHash
:
block
.
ParentHash
(),
EpochNum
:
rollup
.
Epoch
(
l1Info
.
Number
),
EpochHash
:
l1Info
.
BlockHash
,
Timestamp
:
block
.
Time
(),
Transactions
:
opaqueTxs
,
},
},
nil
}
op-e2e/actions/l2_batcher.go
View file @
ea45c05e
...
@@ -4,6 +4,7 @@ import (
...
@@ -4,6 +4,7 @@ import (
"bytes"
"bytes"
"context"
"context"
"crypto/ecdsa"
"crypto/ecdsa"
"crypto/rand"
"io"
"io"
"math/big"
"math/big"
...
@@ -40,6 +41,8 @@ type BatcherCfg struct {
...
@@ -40,6 +41,8 @@ type BatcherCfg struct {
MaxL1TxSize
uint64
MaxL1TxSize
uint64
BatcherKey
*
ecdsa
.
PrivateKey
BatcherKey
*
ecdsa
.
PrivateKey
GarbageCfg
*
GarbageChannelCfg
}
}
// L2Batcher buffers and submits L2 batches to L1.
// L2Batcher buffers and submits L2 batches to L1.
...
@@ -58,7 +61,7 @@ type L2Batcher struct {
...
@@ -58,7 +61,7 @@ type L2Batcher struct {
l1Signer
types
.
Signer
l1Signer
types
.
Signer
l2ChannelOut
*
derive
.
ChannelOut
l2ChannelOut
ChannelOutIface
l2Submitting
bool
// when the channel out is being submitted, and not safe to write to without resetting
l2Submitting
bool
// when the channel out is being submitted, and not safe to write to without resetting
l2BufferedBlock
eth
.
BlockID
l2BufferedBlock
eth
.
BlockID
l2SubmittedBlock
eth
.
BlockID
l2SubmittedBlock
eth
.
BlockID
...
@@ -122,7 +125,12 @@ func (s *L2Batcher) ActL2BatchBuffer(t Testing) {
...
@@ -122,7 +125,12 @@ func (s *L2Batcher) ActL2BatchBuffer(t Testing) {
}
}
// Create channel if we don't have one yet
// Create channel if we don't have one yet
if
s
.
l2ChannelOut
==
nil
{
if
s
.
l2ChannelOut
==
nil
{
ch
,
err
:=
derive
.
NewChannelOut
()
var
ch
ChannelOutIface
if
s
.
l2BatcherCfg
.
GarbageCfg
!=
nil
{
ch
,
err
=
NewGarbageChannelOut
(
s
.
l2BatcherCfg
.
GarbageCfg
)
}
else
{
ch
,
err
=
derive
.
NewChannelOut
()
}
require
.
NoError
(
t
,
err
,
"failed to create channel"
)
require
.
NoError
(
t
,
err
,
"failed to create channel"
)
s
.
l2ChannelOut
=
ch
s
.
l2ChannelOut
=
ch
}
}
...
@@ -195,6 +203,89 @@ func (s *L2Batcher) ActL2BatchSubmit(t Testing) {
...
@@ -195,6 +203,89 @@ func (s *L2Batcher) ActL2BatchSubmit(t Testing) {
require
.
NoError
(
t
,
err
,
"need to send tx"
)
require
.
NoError
(
t
,
err
,
"need to send tx"
)
}
}
// ActL2BatchSubmitGarbage constructs a malformed channel frame and submits it to the
// batch inbox. This *should* cause the batch inbox to reject the blocks
// encoded within the frame, even if the blocks themselves are valid.
func
(
s
*
L2Batcher
)
ActL2BatchSubmitGarbage
(
t
Testing
,
kind
GarbageKind
)
{
// Don't run this action if there's no data to submit
if
s
.
l2ChannelOut
==
nil
{
t
.
InvalidAction
(
"need to buffer data first, cannot batch submit with empty buffer"
)
return
}
// Collect the output frame
data
:=
new
(
bytes
.
Buffer
)
data
.
WriteByte
(
derive
.
DerivationVersion0
)
// subtract one, to account for the version byte
if
err
:=
s
.
l2ChannelOut
.
OutputFrame
(
data
,
s
.
l2BatcherCfg
.
MaxL1TxSize
-
1
);
err
==
io
.
EOF
{
s
.
l2ChannelOut
=
nil
s
.
l2Submitting
=
false
}
else
if
err
!=
nil
{
s
.
l2Submitting
=
false
t
.
Fatalf
(
"failed to output channel data to frame: %v"
,
err
)
}
outputFrame
:=
data
.
Bytes
()
// Malform the output frame
switch
kind
{
// Strip the derivation version byte from the output frame
case
STRIP_VERSION
:
outputFrame
=
outputFrame
[
1
:
]
// Replace the output frame with random bytes of length [1, 512]
case
RANDOM
:
i
,
err
:=
rand
.
Int
(
rand
.
Reader
,
big
.
NewInt
(
512
))
require
.
NoError
(
t
,
err
,
"error generating random bytes length"
)
buf
:=
make
([]
byte
,
i
.
Int64
()
+
1
)
_
,
err
=
rand
.
Read
(
buf
)
require
.
NoError
(
t
,
err
,
"error generating random bytes"
)
outputFrame
=
buf
// Remove 4 bytes from the tail end of the output frame
case
TRUNCATE_END
:
outputFrame
=
outputFrame
[
:
len
(
outputFrame
)
-
4
]
// Append 4 garbage bytes to the end of the output frame
case
DIRTY_APPEND
:
outputFrame
=
append
(
outputFrame
,
[]
byte
{
0xBA
,
0xD0
,
0xC0
,
0xDE
}
...
)
case
INVALID_COMPRESSION
:
// Do nothing post frame encoding- the `GarbageChannelOut` used for this case is modified to
// use gzip compression rather than zlib, which is invalid.
break
case
MALFORM_RLP
:
// Do nothing post frame encoding- the `GarbageChannelOut` used for this case is modified to
// write malformed RLP each time a block is added to the channel.
break
default
:
t
.
Fatalf
(
"Unexpected garbage kind: %v"
,
kind
)
}
nonce
,
err
:=
s
.
l1
.
PendingNonceAt
(
t
.
Ctx
(),
s
.
batcherAddr
)
require
.
NoError
(
t
,
err
,
"need batcher nonce"
)
gasTipCap
:=
big
.
NewInt
(
2
*
params
.
GWei
)
pendingHeader
,
err
:=
s
.
l1
.
HeaderByNumber
(
t
.
Ctx
(),
big
.
NewInt
(
-
1
))
require
.
NoError
(
t
,
err
,
"need l1 pending header for gas price estimation"
)
gasFeeCap
:=
new
(
big
.
Int
)
.
Add
(
gasTipCap
,
new
(
big
.
Int
)
.
Mul
(
pendingHeader
.
BaseFee
,
big
.
NewInt
(
2
)))
rawTx
:=
&
types
.
DynamicFeeTx
{
ChainID
:
s
.
rollupCfg
.
L1ChainID
,
Nonce
:
nonce
,
To
:
&
s
.
rollupCfg
.
BatchInboxAddress
,
GasTipCap
:
gasTipCap
,
GasFeeCap
:
gasFeeCap
,
Data
:
outputFrame
,
}
gas
,
err
:=
core
.
IntrinsicGas
(
rawTx
.
Data
,
nil
,
false
,
true
,
true
)
require
.
NoError
(
t
,
err
,
"need to compute intrinsic gas"
)
rawTx
.
Gas
=
gas
tx
,
err
:=
types
.
SignNewTx
(
s
.
l2BatcherCfg
.
BatcherKey
,
s
.
l1Signer
,
rawTx
)
require
.
NoError
(
t
,
err
,
"need to sign tx"
)
err
=
s
.
l1
.
SendTransaction
(
t
.
Ctx
(),
tx
)
require
.
NoError
(
t
,
err
,
"need to send tx"
)
}
func
(
s
*
L2Batcher
)
ActBufferAll
(
t
Testing
)
{
func
(
s
*
L2Batcher
)
ActBufferAll
(
t
Testing
)
{
stat
,
err
:=
s
.
syncStatusAPI
.
SyncStatus
(
t
.
Ctx
())
stat
,
err
:=
s
.
syncStatusAPI
.
SyncStatus
(
t
.
Ctx
())
require
.
NoError
(
t
,
err
)
require
.
NoError
(
t
,
err
)
...
...
op-e2e/actions/l2_batcher_test.go
View file @
ea45c05e
...
@@ -196,6 +196,88 @@ func TestL2Finalization(gt *testing.T) {
...
@@ -196,6 +196,88 @@ func TestL2Finalization(gt *testing.T) {
require
.
Equal
(
t
,
heightToSubmit
,
sequencer
.
SyncStatus
()
.
FinalizedL2
.
Number
,
"unknown/bad finalized L1 blocks are ignored"
)
require
.
Equal
(
t
,
heightToSubmit
,
sequencer
.
SyncStatus
()
.
FinalizedL2
.
Number
,
"unknown/bad finalized L1 blocks are ignored"
)
}
}
// TestGarbageBatch tests the behavior of an invalid/malformed output channel frame containing
// valid batches being submitted to the batch inbox. These batches should always be rejected
// and the safe L2 head should remain unaltered.
func
TestGarbageBatch
(
gt
*
testing
.
T
)
{
t
:=
NewDefaultTesting
(
gt
)
p
:=
defaultRollupTestParams
dp
:=
e2eutils
.
MakeDeployParams
(
t
,
p
)
for
_
,
garbageKind
:=
range
GarbageKinds
{
sd
:=
e2eutils
.
Setup
(
t
,
dp
,
defaultAlloc
)
log
:=
testlog
.
Logger
(
t
,
log
.
LvlError
)
miner
,
engine
,
sequencer
:=
setupSequencerTest
(
t
,
sd
,
log
)
_
,
verifier
:=
setupVerifier
(
t
,
sd
,
log
,
miner
.
L1Client
(
t
,
sd
.
RollupCfg
))
batcherCfg
:=
&
BatcherCfg
{
MinL1TxSize
:
0
,
MaxL1TxSize
:
128
_000
,
BatcherKey
:
dp
.
Secrets
.
Batcher
,
}
if
garbageKind
==
MALFORM_RLP
||
garbageKind
==
INVALID_COMPRESSION
{
// If the garbage kind is `INVALID_COMPRESSION` or `MALFORM_RLP`, use the `actions` packages
// modified `ChannelOut`.
batcherCfg
.
GarbageCfg
=
&
GarbageChannelCfg
{
useInvalidCompression
:
garbageKind
==
INVALID_COMPRESSION
,
malformRLP
:
garbageKind
==
MALFORM_RLP
,
}
}
batcher
:=
NewL2Batcher
(
log
,
sd
.
RollupCfg
,
batcherCfg
,
sequencer
.
RollupClient
(),
miner
.
EthClient
(),
engine
.
EthClient
())
sequencer
.
ActL2PipelineFull
(
t
)
verifier
.
ActL2PipelineFull
(
t
)
syncAndBuildL2
:=
func
()
{
// Send a head signal to the sequencer and verifier
sequencer
.
ActL1HeadSignal
(
t
)
verifier
.
ActL1HeadSignal
(
t
)
// Run the derivation pipeline on the sequencer and verifier
sequencer
.
ActL2PipelineFull
(
t
)
verifier
.
ActL2PipelineFull
(
t
)
// Build the L2 chain to the L1 head
sequencer
.
ActBuildToL1Head
(
t
)
}
// Build an empty block on L1 and run the derivation pipeline + build L2
// to the L1 head (block #1)
miner
.
ActEmptyBlock
(
t
)
syncAndBuildL2
()
// Ensure that the L2 safe head has an L1 Origin at genesis before any
// batches are submitted.
require
.
Equal
(
t
,
uint64
(
0
),
sequencer
.
L2Safe
()
.
L1Origin
.
Number
)
require
.
Equal
(
t
,
uint64
(
1
),
sequencer
.
L2Unsafe
()
.
L1Origin
.
Number
)
// Submit a batch containing all blocks built on L2 while catching up
// to the L1 head above. The output channel frame submitted to the batch
// inbox will be invalid- it will be malformed depending on the passed
// `garbageKind`.
batcher
.
ActBufferAll
(
t
)
batcher
.
ActL2ChannelClose
(
t
)
batcher
.
ActL2BatchSubmitGarbage
(
t
,
garbageKind
)
// Include the batch on L1 in block #2
miner
.
ActL1StartBlock
(
12
)(
t
)
miner
.
ActL1IncludeTx
(
dp
.
Addresses
.
Batcher
)(
t
)
miner
.
ActL1EndBlock
(
t
)
// Send a head signal + run the derivation pipeline on the sequencer
// and verifier.
syncAndBuildL2
()
// Verify that the L2 blocks that were batch submitted were *not* marked
// as safe due to the malformed output channel frame. The safe head should
// still have an L1 Origin at genesis.
require
.
Equal
(
t
,
uint64
(
0
),
sequencer
.
L2Safe
()
.
L1Origin
.
Number
)
require
.
Equal
(
t
,
uint64
(
2
),
sequencer
.
L2Unsafe
()
.
L1Origin
.
Number
)
}
}
func
TestExtendedTimeWithoutL1Batches
(
gt
*
testing
.
T
)
{
func
TestExtendedTimeWithoutL1Batches
(
gt
*
testing
.
T
)
{
t
:=
NewDefaultTesting
(
gt
)
t
:=
NewDefaultTesting
(
gt
)
p
:=
&
e2eutils
.
TestParams
{
p
:=
&
e2eutils
.
TestParams
{
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment