Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
N
nebula
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
exchain
nebula
Commits
2775f256
Unverified
Commit
2775f256
authored
Sep 21, 2021
by
Karl Floersch
Committed by
Kelvin Fichter
Nov 10, 2021
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
feat: further improve batch submission efficiency
test(contracts): Add a test for block hash in logs
parent
7aec0958
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
29 additions
and
119 deletions
+29
-119
CanonicalTransactionChain.sol
...ntracts/contracts/L1/rollup/CanonicalTransactionChain.sol
+4
-43
CanonicalTransactionChain.spec.ts
...est/contracts/L1/rollup/CanonicalTransactionChain.spec.ts
+25
-76
No files found.
packages/contracts/contracts/L1/rollup/CanonicalTransactionChain.sol
View file @
2775f256
...
...
@@ -362,14 +362,6 @@ contract CanonicalTransactionChain is ICanonicalTransactionChain, Lib_AddressRes
IChainStorageContainer queueRef = queue();
uint40 queueLength = _getQueueLength(queueRef);
// Reserve some memory to save gas on hashing later on. This is a relatively safe estimate
// for the average transaction size that will prevent having to resize this chunk of memory
// later on. Saves gas.
bytes memory hashMemory = new bytes((msg.data.length / totalElementsToAppend) * 2);
// Initialize the array of canonical chain leaves that we will append.
bytes32[] memory leaves = new bytes32[](totalElementsToAppend);
// Each leaf index corresponds to a tx, either sequenced or enqueued.
uint32 leafIndex = 0;
...
...
@@ -388,39 +380,10 @@ contract CanonicalTransactionChain is ICanonicalTransactionChain, Lib_AddressRes
curContext = nextContext;
// Process sequencer transactions first.
for (uint32 j = 0; j < curContext.numSequencedTransactions; j++) {
uint256 txDataLength;
assembly {
txDataLength := shr(232, calldataload(nextTransactionPtr))
}
require(
txDataLength <= MAX_ROLLUP_TX_SIZE,
"Transaction data size exceeds maximum for rollup transaction."
);
leaves[leafIndex] = _getSequencerLeafHash(
curContext,
nextTransactionPtr,
txDataLength,
hashMemory
);
nextTransactionPtr += uint40(TX_DATA_HEADER_SIZE + txDataLength);
numSequencerTransactions++;
leafIndex++;
}
numSequencerTransactions += uint32(curContext.numSequencedTransactions);
// Now process any subsequent queue transactions.
for (uint32 j = 0; j < curContext.numSubsequentQueueTransactions; j++) {
require(
nextQueueIndex < queueLength,
"Not enough queued transactions to append."
);
leaves[leafIndex] = _getQueueLeafHash(nextQueueIndex);
nextQueueIndex++;
leafIndex++;
}
nextQueueIndex += uint40(curContext.numSubsequentQueueTransactions);
}
// Generate the required metadata that we need to append this batch
...
...
@@ -447,11 +410,9 @@ contract CanonicalTransactionChain is ICanonicalTransactionChain, Lib_AddressRes
blockNumber = lastElement.blockNumber;
}
// For efficiency reasons getMerkleRoot modifies the `leaves` argument in place
// while calculating the root hash therefore any arguments passed to it must not
// be used again afterwards
// Cache the previous blockhash to ensure all transaction data can be retrieved efficiently.
_appendBatch(
Lib_MerkleTree.getMerkleRoot(leaves
),
blockhash(block.number-1
),
totalElementsToAppend,
numQueuedTransactions,
blockTimestamp,
...
...
packages/contracts/test/contracts/L1/rollup/CanonicalTransactionChain.spec.ts
View file @
2775f256
...
...
@@ -620,86 +620,35 @@ describe('CanonicalTransactionChain', () => {
).
to
.
be
.
revertedWith
(
'
Function can only be called by the Sequencer.
'
)
})
it
(
'
should revert when trying to input more data than the max data size
'
,
async
()
=>
{
const
MAX_ROLLUP_TX_SIZE
=
await
CanonicalTransactionChain
.
MAX_ROLLUP_TX_SIZE
()
const
data
=
'
0x
'
+
'
12
'
.
repeat
(
MAX_ROLLUP_TX_SIZE
+
1
)
it
(
'
should emit the previous blockhash in the TransactionBatchAppended event
'
,
async
()
=>
{
const
timestamp
=
await
getEthTime
(
ethers
.
provider
)
const
blockNumber
=
(
await
getNextBlockNumber
(
ethers
.
provider
))
-
1
const
currentBlockHash
=
await
(
await
ethers
.
provider
.
getBlock
(
'
latest
'
)
).
hash
const
blockNumber
=
await
getNextBlockNumber
(
ethers
.
provider
)
const
res
=
await
appendSequencerBatch
(
CanonicalTransactionChain
,
{
transactions
:
[
'
0x1234
'
],
contexts
:
[
{
numSequencedTransactions
:
1
,
numSubsequentQueueTransactions
:
0
,
timestamp
,
blockNumber
,
},
],
shouldStartAtElement
:
0
,
totalElementsToAppend
:
1
,
})
const
receipt
=
await
res
.
wait
()
await
expect
(
appendSequencerBatch
(
CanonicalTransactionChain
,
{
transactions
:
[
data
],
contexts
:
[
{
numSequencedTransactions
:
1
,
numSubsequentQueueTransactions
:
0
,
timestamp
,
blockNumber
,
},
],
shouldStartAtElement
:
0
,
totalElementsToAppend
:
1
,
})
).
to
.
be
.
revertedWith
(
'
Transaction data size exceeds maximum for rollup transaction.
'
// Because the res value is returned by a sendTransaction type, we need to manually
// decode the logs.
const
eventArgs
=
ethers
.
utils
.
defaultAbiCoder
.
decode
(
[
'
uint256
'
,
'
bytes32
'
,
'
uint256
'
,
'
uint256
'
,
'
bytes
'
],
receipt
.
logs
[
0
].
data
)
})
describe
(
'
Sad path cases
'
,
()
=>
{
const
target
=
NON_ZERO_ADDRESS
const
gasLimit
=
500
_000
const
data
=
'
0x
'
+
'
12
'
.
repeat
(
1234
)
describe
(
'
when the sequencer attempts to add more queue transactions than exist
'
,
()
=>
{
it
(
'
reverts when there are zero transactions in the queue
'
,
async
()
=>
{
const
timestamp
=
await
getEthTime
(
ethers
.
provider
)
const
blockNumber
=
(
await
getNextBlockNumber
(
ethers
.
provider
))
-
1
await
expect
(
appendSequencerBatch
(
CanonicalTransactionChain
,
{
transactions
:
[
'
0x1234
'
],
contexts
:
[
{
numSequencedTransactions
:
1
,
numSubsequentQueueTransactions
:
1
,
timestamp
,
blockNumber
,
},
],
shouldStartAtElement
:
0
,
totalElementsToAppend
:
1
,
})
).
to
.
be
.
revertedWith
(
'
Not enough queued transactions to append.
'
)
})
it
(
'
reverts when there are insufficient (but nonzero) transactions in the queue
'
,
async
()
=>
{
const
timestamp
=
await
getEthTime
(
ethers
.
provider
)
const
blockNumber
=
(
await
getNextBlockNumber
(
ethers
.
provider
))
-
1
const
numEnqueues
=
7
for
(
let
i
=
0
;
i
<
numEnqueues
;
i
++
)
{
await
CanonicalTransactionChain
.
enqueue
(
target
,
gasLimit
,
data
)
}
await
expect
(
appendSequencerBatch
(
CanonicalTransactionChain
,
{
transactions
:
[
'
0x1234
'
],
contexts
:
[
{
numSequencedTransactions
:
1
,
numSubsequentQueueTransactions
:
numEnqueues
+
1
,
timestamp
,
blockNumber
,
},
],
shouldStartAtElement
:
0
,
totalElementsToAppend
:
numEnqueues
+
1
,
})
).
to
.
be
.
revertedWith
(
'
Not enough queued transactions to append.
'
)
})
})
await
expect
(
eventArgs
[
0
]).
to
.
eq
(
currentBlockHash
)
})
for
(
const
size
of
ELEMENT_TEST_SIZES
)
{
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment