Commit f4e24507 authored by pcw109550's avatar pcw109550 Committed by protolambda

Rename variable and explanations span batch limit

parent 51866cdb
......@@ -19,10 +19,10 @@ func frameSize(frame Frame) uint64 {
const DerivationVersion0 = 0
// MaxSpanBatchFieldSize is the maximum amount of bytes that will be read from
// a span batch to decode span batch field. This value cannot be larger than
// MaxSpanBatchSize is the maximum amount of bytes that will be needed
// to decode every span batch field. This value cannot be larger than
// MaxRLPBytesPerChannel because single batch cannot be larger than channel size.
const MaxSpanBatchFieldSize = MaxRLPBytesPerChannel
const MaxSpanBatchSize = MaxRLPBytesPerChannel
// MaxChannelBankSize is the amount of memory space, in number of bytes,
// till the bank is pruned by removing channels,
......
......@@ -27,7 +27,7 @@ import (
// payload := block_count ++ origin_bits ++ block_tx_counts ++ txs
// txs := contract_creation_bits ++ y_parity_bits ++ tx_sigs ++ tx_tos ++ tx_datas ++ tx_nonces ++ tx_gases
var ErrTooBigSpanBatchFieldSize = errors.New("batch would cause field bytes to go over limit")
var ErrTooBigSpanBatchSize = errors.New("span batch size limit reached")
var ErrEmptySpanBatch = errors.New("span-batch must not be empty")
......@@ -59,8 +59,8 @@ func (bp *spanBatchPayload) decodeOriginBits(r *bytes.Reader) error {
originBitBufferLen++
}
// avoid out of memory before allocation
if originBitBufferLen > MaxSpanBatchFieldSize {
return ErrTooBigSpanBatchFieldSize
if originBitBufferLen > MaxSpanBatchSize {
return ErrTooBigSpanBatchSize
}
originBitBuffer := make([]byte, originBitBufferLen)
_, err := io.ReadFull(r, originBitBuffer)
......@@ -146,9 +146,9 @@ func (bp *spanBatchPayload) decodeBlockCount(r *bytes.Reader) error {
if err != nil {
return fmt.Errorf("failed to read block count: %w", err)
}
// number of L2 block in span batch cannot be greater than MaxSpanBatchFieldSize
if blockCount > MaxSpanBatchFieldSize {
return ErrTooBigSpanBatchFieldSize
// number of L2 block in span batch cannot be greater than MaxSpanBatchSize
if blockCount > MaxSpanBatchSize {
return ErrTooBigSpanBatchSize
}
if blockCount == 0 {
return ErrEmptySpanBatch
......@@ -166,10 +166,10 @@ func (bp *spanBatchPayload) decodeBlockTxCounts(r *bytes.Reader) error {
if err != nil {
return fmt.Errorf("failed to read block tx count: %w", err)
}
// number of txs in single L2 block cannot be greater than MaxSpanBatchFieldSize
// number of txs in single L2 block cannot be greater than MaxSpanBatchSize
// every tx will take at least single byte
if blockTxCount > MaxSpanBatchFieldSize {
return ErrTooBigSpanBatchFieldSize
if blockTxCount > MaxSpanBatchSize {
return ErrTooBigSpanBatchSize
}
blockTxCounts = append(blockTxCounts, blockTxCount)
}
......@@ -189,13 +189,13 @@ func (bp *spanBatchPayload) decodeTxs(r *bytes.Reader) error {
for i := 0; i < len(bp.blockTxCounts); i++ {
total, overflow := math.SafeAdd(totalBlockTxCount, bp.blockTxCounts[i])
if overflow {
return ErrTooBigSpanBatchFieldSize
return ErrTooBigSpanBatchSize
}
totalBlockTxCount = total
}
// total number of txs in span batch cannot be greater than MaxSpanBatchFieldSize
if totalBlockTxCount > MaxSpanBatchFieldSize {
return ErrTooBigSpanBatchFieldSize
// total number of txs in span batch cannot be greater than MaxSpanBatchSize
if totalBlockTxCount > MaxSpanBatchSize {
return ErrTooBigSpanBatchSize
}
bp.txs.totalBlockTxCount = totalBlockTxCount
if err := bp.txs.decode(r); err != nil {
......@@ -224,6 +224,14 @@ func (bp *spanBatchPayload) decodePayload(r *bytes.Reader) error {
// decodeBytes parses data into b from data
func (b *RawSpanBatch) decodeBytes(data []byte) error {
r := bytes.NewReader(data)
return b.decode(r)
}
// decode reads the byte encoding of SpanBatch from Reader stream
func (b *RawSpanBatch) decode(r *bytes.Reader) error {
if r.Len() > MaxSpanBatchSize {
return ErrTooBigSpanBatchSize
}
if err := b.decodePrefix(r); err != nil {
return err
}
......@@ -680,13 +688,13 @@ func ReadTxData(r *bytes.Reader) ([]byte, int, error) {
}
}
// avoid out of memory before allocation
s := rlp.NewStream(r, MaxSpanBatchFieldSize)
s := rlp.NewStream(r, MaxSpanBatchSize)
var txPayload []byte
kind, _, err := s.Kind()
switch {
case err != nil:
if errors.Is(err, rlp.ErrValueTooLarge) {
return nil, 0, ErrTooBigSpanBatchFieldSize
return nil, 0, ErrTooBigSpanBatchSize
}
return nil, 0, fmt.Errorf("failed to read tx RLP prefix: %w", err)
case kind == rlp.List:
......
......@@ -529,7 +529,7 @@ func TestSpanBatchMaxTxData(t *testing.T) {
rng := rand.New(rand.NewSource(0x177288))
invalidTx := types.NewTx(&types.DynamicFeeTx{
Data: testutils.RandomData(rng, MaxSpanBatchFieldSize+1),
Data: testutils.RandomData(rng, MaxSpanBatchSize+1),
})
txEncoded, err := invalidTx.MarshalBinary()
......@@ -538,7 +538,7 @@ func TestSpanBatchMaxTxData(t *testing.T) {
r := bytes.NewReader(txEncoded)
_, _, err = ReadTxData(r)
assert.ErrorIs(t, err, ErrTooBigSpanBatchFieldSize)
require.ErrorIs(t, err, ErrTooBigSpanBatchSize)
}
func TestSpanBatchMaxOriginBitsLength(t *testing.T) {
......@@ -547,7 +547,7 @@ func TestSpanBatchMaxOriginBitsLength(t *testing.T) {
r := bytes.NewReader([]byte{})
err := sb.decodeOriginBits(r)
assert.ErrorIs(t, err, ErrTooBigSpanBatchFieldSize)
require.ErrorIs(t, err, ErrTooBigSpanBatchSize)
}
func TestSpanBatchMaxBlockCount(t *testing.T) {
......@@ -565,7 +565,7 @@ func TestSpanBatchMaxBlockCount(t *testing.T) {
r := bytes.NewReader(result)
var sb RawSpanBatch
err = sb.decodeBlockCount(r)
require.ErrorIs(t, err, ErrTooBigSpanBatchFieldSize)
require.ErrorIs(t, err, ErrTooBigSpanBatchSize)
}
func TestSpanBatchMaxBlockTxCount(t *testing.T) {
......@@ -584,7 +584,7 @@ func TestSpanBatchMaxBlockTxCount(t *testing.T) {
var sb RawSpanBatch
sb.blockCount = rawSpanBatch.blockCount
err = sb.decodeBlockTxCounts(r)
require.ErrorIs(t, err, ErrTooBigSpanBatchFieldSize)
require.ErrorIs(t, err, ErrTooBigSpanBatchSize)
}
func TestSpanBatchTotalBlockTxCountNotOverflow(t *testing.T) {
......@@ -592,8 +592,8 @@ func TestSpanBatchTotalBlockTxCountNotOverflow(t *testing.T) {
chainID := big.NewInt(rng.Int63n(1000))
rawSpanBatch := RandomRawSpanBatch(rng, chainID)
rawSpanBatch.blockTxCounts[0] = MaxSpanBatchFieldSize - 1
rawSpanBatch.blockTxCounts[1] = MaxSpanBatchFieldSize - 1
rawSpanBatch.blockTxCounts[0] = MaxSpanBatchSize - 1
rawSpanBatch.blockTxCounts[1] = MaxSpanBatchSize - 1
// we are sure that totalBlockTxCount will overflow on uint64
var buf bytes.Buffer
......@@ -606,5 +606,5 @@ func TestSpanBatchTotalBlockTxCountNotOverflow(t *testing.T) {
sb.blockTxCounts = rawSpanBatch.blockTxCounts
err = sb.decodeTxs(r)
require.ErrorIs(t, err, ErrTooBigSpanBatchFieldSize)
require.ErrorIs(t, err, ErrTooBigSpanBatchSize)
}
......@@ -67,8 +67,8 @@ func (btx *spanBatchTxs) decodeContractCreationBits(r *bytes.Reader) error {
contractCreationBitBufferLen++
}
// avoid out of memory before allocation
if contractCreationBitBufferLen > MaxSpanBatchFieldSize {
return ErrTooBigSpanBatchFieldSize
if contractCreationBitBufferLen > MaxSpanBatchSize {
return ErrTooBigSpanBatchSize
}
contractCreationBitBuffer := make([]byte, contractCreationBitBufferLen)
_, err := io.ReadFull(r, contractCreationBitBuffer)
......@@ -190,8 +190,8 @@ func (btx *spanBatchTxs) decodeYParityBits(r *bytes.Reader) error {
yParityBitBufferLen++
}
// avoid out of memory before allocation
if yParityBitBufferLen > MaxSpanBatchFieldSize {
return ErrTooBigSpanBatchFieldSize
if yParityBitBufferLen > MaxSpanBatchSize {
return ErrTooBigSpanBatchSize
}
yParityBitBuffer := make([]byte, yParityBitBufferLen)
_, err := io.ReadFull(r, yParityBitBuffer)
......
......@@ -391,7 +391,7 @@ func TestSpanBatchTxsMaxContractCreationBitsLength(t *testing.T) {
r := bytes.NewReader([]byte{})
err := sbt.decodeContractCreationBits(r)
assert.ErrorIs(t, err, ErrTooBigSpanBatchFieldSize)
require.ErrorIs(t, err, ErrTooBigSpanBatchSize)
}
func TestSpanBatchTxsMaxYParityBitsLength(t *testing.T) {
......@@ -400,5 +400,5 @@ func TestSpanBatchTxsMaxYParityBitsLength(t *testing.T) {
r := bytes.NewReader([]byte{})
err := sb.decodeOriginBits(r)
assert.ErrorIs(t, err, ErrTooBigSpanBatchFieldSize)
require.ErrorIs(t, err, ErrTooBigSpanBatchSize)
}
......@@ -144,10 +144,11 @@ Where:
[EIP-1559]: https://eips.ethereum.org/EIPS/eip-1559
Every field size of span batch is limited to `MAX_SPAN_BATCH_FIELD_SIZE` (currently 10,000,000 bytes,
equal to `MAX_RLP_BYTES_PER_CHANNEL`). There can be at least single span batch per channel, and channel size is limited
Total size of encoded span batch is limited to `MAX_SPAN_BATCH_SIZE` (currently 10,000,000 bytes,
equal to `MAX_RLP_BYTES_PER_CHANNEL`). Therefore every field size of span batch will be implicitly limited to
`MAX_SPAN_BATCH_SIZE` . There can be at least single span batch per channel, and channel size is limited
to `MAX_RLP_BYTES_PER_CHANNEL` and you may think that there is already an implicit limit. However, having an explicit
limit for each field is helpful for several reasons. We may save computation costs by avoiding malicious input while
limit for span batch is helpful for several reasons. We may save computation costs by avoiding malicious input while
decoding. For example, lets say bad batcher wrote span batch which `block_count = max.Uint64`. We may early return using
the explicit limit, not trying to consume data until EOF is reached. We can also safely preallocate memory for decoding
because we know the upper limit of memory usage.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment