Commit babe5e7d authored by protolambda's avatar protolambda Committed by GitHub

Merge pull request #7752 from testinprod-io/tip/span-batch-types-refactor

op-node: Span Batch Type, Encoding, and Decoding Refactoring
parents 8ac11e31 9f9cf618
...@@ -759,7 +759,7 @@ func TestFramePublished(t *testing.T) { ...@@ -759,7 +759,7 @@ func TestFramePublished(t *testing.T) {
} }
func ChannelBuilder_PendingFrames_TotalFrames(t *testing.T, batchType uint) { func ChannelBuilder_PendingFrames_TotalFrames(t *testing.T, batchType uint) {
const tnf = 8 const tnf = 9
rng := rand.New(rand.NewSource(94572314)) rng := rand.New(rand.NewSource(94572314))
require := require.New(t) require := require.New(t)
cfg := defaultTestChannelConfig cfg := defaultTestChannelConfig
...@@ -828,7 +828,7 @@ func ChannelBuilder_InputBytes(t *testing.T, batchType uint) { ...@@ -828,7 +828,7 @@ func ChannelBuilder_InputBytes(t *testing.T, batchType uint) {
spanBatchBuilder.AppendSingularBatch(singularBatch, l1Info.SequenceNumber) spanBatchBuilder.AppendSingularBatch(singularBatch, l1Info.SequenceNumber)
rawSpanBatch, err := spanBatchBuilder.GetRawSpanBatch() rawSpanBatch, err := spanBatchBuilder.GetRawSpanBatch()
require.NoError(err) require.NoError(err)
batch := derive.NewSpanBatchData(*rawSpanBatch) batch := derive.NewBatchData(rawSpanBatch)
var buf bytes.Buffer var buf bytes.Buffer
require.NoError(batch.EncodeRLP(&buf)) require.NoError(batch.EncodeRLP(&buf))
l = buf.Len() l = buf.Len()
...@@ -878,7 +878,7 @@ func ChannelBuilder_OutputBytes(t *testing.T, batchType uint) { ...@@ -878,7 +878,7 @@ func ChannelBuilder_OutputBytes(t *testing.T, batchType uint) {
func blockBatchRlpSize(t *testing.T, b *types.Block) int { func blockBatchRlpSize(t *testing.T, b *types.Block) int {
t.Helper() t.Helper()
singularBatch, _, err := derive.BlockToSingularBatch(b) singularBatch, _, err := derive.BlockToSingularBatch(b)
batch := derive.NewSingularBatchData(*singularBatch) batch := derive.NewBatchData(singularBatch)
require.NoError(t, err) require.NoError(t, err)
var buf bytes.Buffer var buf bytes.Buffer
require.NoError(t, batch.EncodeRLP(&buf), "RLP-encoding batch") require.NoError(t, batch.EncodeRLP(&buf), "RLP-encoding batch")
......
...@@ -255,13 +255,13 @@ func blockToBatch(block *types.Block) (*derive.BatchData, error) { ...@@ -255,13 +255,13 @@ func blockToBatch(block *types.Block) (*derive.BatchData, error) {
return nil, fmt.Errorf("could not parse the L1 Info deposit: %w", err) return nil, fmt.Errorf("could not parse the L1 Info deposit: %w", err)
} }
return &derive.BatchData{ singularBatch := &derive.SingularBatch{
SingularBatch: derive.SingularBatch{
ParentHash: block.ParentHash(), ParentHash: block.ParentHash(),
EpochNum: rollup.Epoch(l1Info.Number), EpochNum: rollup.Epoch(l1Info.Number),
EpochHash: l1Info.BlockHash, EpochHash: l1Info.BlockHash,
Timestamp: block.Time(), Timestamp: block.Time(),
Transactions: opaqueTxs, Transactions: opaqueTxs,
}, }
}, nil
return derive.NewBatchData(singularBatch), nil
} }
...@@ -24,7 +24,7 @@ type ChannelWithMetadata struct { ...@@ -24,7 +24,7 @@ type ChannelWithMetadata struct {
InvalidFrames bool `json:"invalid_frames"` InvalidFrames bool `json:"invalid_frames"`
InvalidBatches bool `json:"invalid_batches"` InvalidBatches bool `json:"invalid_batches"`
Frames []FrameWithMetadata `json:"frames"` Frames []FrameWithMetadata `json:"frames"`
Batches []derive.SingularBatch `json:"batches"` Batches []derive.BatchData `json:"batches"`
} }
type FrameWithMetadata struct { type FrameWithMetadata struct {
...@@ -104,7 +104,7 @@ func processFrames(cfg *rollup.Config, id derive.ChannelID, frames []FrameWithMe ...@@ -104,7 +104,7 @@ func processFrames(cfg *rollup.Config, id derive.ChannelID, frames []FrameWithMe
} }
} }
var batches []derive.SingularBatch var batches []derive.BatchData
invalidBatches := false invalidBatches := false
if ch.IsReady() { if ch.IsReady() {
br, err := derive.BatchReader(ch.Reader()) br, err := derive.BatchReader(ch.Reader())
...@@ -114,11 +114,7 @@ func processFrames(cfg *rollup.Config, id derive.ChannelID, frames []FrameWithMe ...@@ -114,11 +114,7 @@ func processFrames(cfg *rollup.Config, id derive.ChannelID, frames []FrameWithMe
fmt.Printf("Error reading batch for channel %v. Err: %v\n", id.String(), err) fmt.Printf("Error reading batch for channel %v. Err: %v\n", id.String(), err)
invalidBatches = true invalidBatches = true
} else { } else {
if batch.BatchType != derive.SingularBatchType { batches = append(batches, *batch)
batches = append(batches, batch.SingularBatch)
} else {
fmt.Printf("batch-type %d is not supported", batch.BatchType)
}
} }
} }
} else { } else {
......
...@@ -25,9 +25,9 @@ var encodeBufferPool = sync.Pool{ ...@@ -25,9 +25,9 @@ var encodeBufferPool = sync.Pool{
const ( const (
// SingularBatchType is the first version of Batch format, representing a single L2 block. // SingularBatchType is the first version of Batch format, representing a single L2 block.
SingularBatchType = iota SingularBatchType = 0
// SpanBatchType is the Batch version used after SpanBatch hard fork, representing a span of L2 blocks. // SpanBatchType is the Batch version used after SpanBatch hard fork, representing a span of L2 blocks.
SpanBatchType SpanBatchType = 1
) )
// Batch contains information to build one or multiple L2 blocks. // Batch contains information to build one or multiple L2 blocks.
...@@ -39,12 +39,20 @@ type Batch interface { ...@@ -39,12 +39,20 @@ type Batch interface {
LogContext(log.Logger) log.Logger LogContext(log.Logger) log.Logger
} }
// BatchData is a composition type that contains raw data of each batch version. // BatchData is used to represent the typed encoding & decoding.
// It has encoding & decoding methods to implement typed encoding. // and wraps around a single interface InnerBatchData.
// Further fields such as cache can be added in the future, without embedding each type of InnerBatchData.
// Similar design with op-geth's types.Transaction struct.
type BatchData struct { type BatchData struct {
BatchType int inner InnerBatchData
SingularBatch }
RawSpanBatch
// InnerBatchData is the underlying data of a BatchData.
// This is implemented by SingularBatch and RawSpanBatch.
type InnerBatchData interface {
GetBatchType() int
encode(w io.Writer) error
decode(r *bytes.Reader) error
} }
// EncodeRLP implements rlp.Encoder // EncodeRLP implements rlp.Encoder
...@@ -58,6 +66,10 @@ func (b *BatchData) EncodeRLP(w io.Writer) error { ...@@ -58,6 +66,10 @@ func (b *BatchData) EncodeRLP(w io.Writer) error {
return rlp.Encode(w, buf.Bytes()) return rlp.Encode(w, buf.Bytes())
} }
func (bd *BatchData) GetBatchType() uint8 {
return uint8(bd.inner.GetBatchType())
}
// MarshalBinary returns the canonical encoding of the batch. // MarshalBinary returns the canonical encoding of the batch.
func (b *BatchData) MarshalBinary() ([]byte, error) { func (b *BatchData) MarshalBinary() ([]byte, error) {
var buf bytes.Buffer var buf bytes.Buffer
...@@ -67,16 +79,10 @@ func (b *BatchData) MarshalBinary() ([]byte, error) { ...@@ -67,16 +79,10 @@ func (b *BatchData) MarshalBinary() ([]byte, error) {
// encodeTyped encodes batch type and payload for each batch type. // encodeTyped encodes batch type and payload for each batch type.
func (b *BatchData) encodeTyped(buf *bytes.Buffer) error { func (b *BatchData) encodeTyped(buf *bytes.Buffer) error {
switch b.BatchType { if err := buf.WriteByte(b.GetBatchType()); err != nil {
case SingularBatchType: return err
buf.WriteByte(SingularBatchType)
return rlp.Encode(buf, &b.SingularBatch)
case SpanBatchType:
buf.WriteByte(SpanBatchType)
return b.RawSpanBatch.encode(buf)
default:
return fmt.Errorf("unrecognized batch type: %d", b.BatchType)
} }
return b.inner.encode(buf)
} }
// DecodeRLP implements rlp.Decoder // DecodeRLP implements rlp.Decoder
...@@ -99,35 +105,28 @@ func (b *BatchData) UnmarshalBinary(data []byte) error { ...@@ -99,35 +105,28 @@ func (b *BatchData) UnmarshalBinary(data []byte) error {
return b.decodeTyped(data) return b.decodeTyped(data)
} }
// decodeTyped decodes batch type and payload for each batch type. // decodeTyped decodes a typed batchData
func (b *BatchData) decodeTyped(data []byte) error { func (b *BatchData) decodeTyped(data []byte) error {
if len(data) == 0 { if len(data) == 0 {
return fmt.Errorf("batch too short") return errors.New("batch too short")
} }
var inner InnerBatchData
switch data[0] { switch data[0] {
case SingularBatchType: case SingularBatchType:
b.BatchType = SingularBatchType inner = new(SingularBatch)
return rlp.DecodeBytes(data[1:], &b.SingularBatch)
case SpanBatchType: case SpanBatchType:
b.BatchType = int(data[0]) inner = new(RawSpanBatch)
return b.RawSpanBatch.decodeBytes(data[1:])
default: default:
return fmt.Errorf("unrecognized batch type: %d", data[0]) return fmt.Errorf("unrecognized batch type: %d", data[0])
} }
} if err := inner.decode(bytes.NewReader(data[1:])); err != nil {
return err
// NewSingularBatchData creates new BatchData with SingularBatch
func NewSingularBatchData(singularBatch SingularBatch) *BatchData {
return &BatchData{
BatchType: SingularBatchType,
SingularBatch: singularBatch,
} }
b.inner = inner
return nil
} }
// NewSpanBatchData creates new BatchData with SpanBatch // NewBatchData creates a new BatchData
func NewSpanBatchData(spanBatch RawSpanBatch) *BatchData { func NewBatchData(inner InnerBatchData) *BatchData {
return &BatchData{ return &BatchData{inner: inner}
BatchType: SpanBatchType,
RawSpanBatch: spanBatch,
}
} }
...@@ -6,15 +6,16 @@ import ( ...@@ -6,15 +6,16 @@ import (
"math/rand" "math/rand"
"testing" "testing"
"github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/stretchr/testify/require"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/testutils"
"github.com/stretchr/testify/assert"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/testutils"
) )
func RandomRawSpanBatch(rng *rand.Rand, chainId *big.Int) *RawSpanBatch { func RandomRawSpanBatch(rng *rand.Rand, chainId *big.Int) *RawSpanBatch {
...@@ -52,8 +53,8 @@ func RandomRawSpanBatch(rng *rand.Rand, chainId *big.Int) *RawSpanBatch { ...@@ -52,8 +53,8 @@ func RandomRawSpanBatch(rng *rand.Rand, chainId *big.Int) *RawSpanBatch {
spanBatchPrefix: spanBatchPrefix{ spanBatchPrefix: spanBatchPrefix{
relTimestamp: uint64(rng.Uint32()), relTimestamp: uint64(rng.Uint32()),
l1OriginNum: rng.Uint64(), l1OriginNum: rng.Uint64(),
parentCheck: testutils.RandomData(rng, 20), parentCheck: [20]byte(testutils.RandomData(rng, 20)),
l1OriginCheck: testutils.RandomData(rng, 20), l1OriginCheck: [20]byte(testutils.RandomData(rng, 20)),
}, },
spanBatchPayload: spanBatchPayload{ spanBatchPayload: spanBatchPayload{
blockCount: blockCount, blockCount: blockCount,
...@@ -141,40 +142,42 @@ func TestBatchRoundTrip(t *testing.T) { ...@@ -141,40 +142,42 @@ func TestBatchRoundTrip(t *testing.T) {
chainID := new(big.Int).SetUint64(rng.Uint64()) chainID := new(big.Int).SetUint64(rng.Uint64())
batches := []*BatchData{ batches := []*BatchData{
{ NewBatchData(
SingularBatch: SingularBatch{ &SingularBatch{
ParentHash: common.Hash{}, ParentHash: common.Hash{},
EpochNum: 0, EpochNum: 0,
Timestamp: 0, Timestamp: 0,
Transactions: []hexutil.Bytes{}, Transactions: []hexutil.Bytes{},
}, },
}, ),
{ NewBatchData(
SingularBatch: SingularBatch{ &SingularBatch{
ParentHash: common.Hash{31: 0x42}, ParentHash: common.Hash{31: 0x42},
EpochNum: 1, EpochNum: 1,
Timestamp: 1647026951, Timestamp: 1647026951,
Transactions: []hexutil.Bytes{[]byte{0, 0, 0}, []byte{0x76, 0xfd, 0x7c}}, Transactions: []hexutil.Bytes{[]byte{0, 0, 0}, []byte{0x76, 0xfd, 0x7c}},
}, },
}, ),
NewSingularBatchData(*RandomSingularBatch(rng, 5, chainID)), NewBatchData(RandomSingularBatch(rng, 5, chainID)),
NewSingularBatchData(*RandomSingularBatch(rng, 7, chainID)), NewBatchData(RandomSingularBatch(rng, 7, chainID)),
NewSpanBatchData(*RandomRawSpanBatch(rng, chainID)), NewBatchData(RandomRawSpanBatch(rng, chainID)),
NewSpanBatchData(*RandomRawSpanBatch(rng, chainID)), NewBatchData(RandomRawSpanBatch(rng, chainID)),
NewSpanBatchData(*RandomRawSpanBatch(rng, chainID)), NewBatchData(RandomRawSpanBatch(rng, chainID)),
} }
for i, batch := range batches { for i, batch := range batches {
enc, err := batch.MarshalBinary() enc, err := batch.MarshalBinary()
assert.NoError(t, err) require.NoError(t, err)
var dec BatchData var dec BatchData
err = dec.UnmarshalBinary(enc) err = dec.UnmarshalBinary(enc)
assert.NoError(t, err) require.NoError(t, err)
if dec.BatchType == SpanBatchType { if dec.GetBatchType() == SpanBatchType {
_, err := dec.RawSpanBatch.derive(blockTime, genesisTimestamp, chainID) rawSpanBatch, ok := dec.inner.(*RawSpanBatch)
assert.NoError(t, err) require.True(t, ok)
_, err := rawSpanBatch.derive(blockTime, genesisTimestamp, chainID)
require.NoError(t, err)
} }
assert.Equal(t, batch, &dec, "Batch not equal test case %v", i) require.Equal(t, batch, &dec, "Batch not equal test case %v", i)
} }
} }
...@@ -185,43 +188,45 @@ func TestBatchRoundTripRLP(t *testing.T) { ...@@ -185,43 +188,45 @@ func TestBatchRoundTripRLP(t *testing.T) {
chainID := new(big.Int).SetUint64(rng.Uint64()) chainID := new(big.Int).SetUint64(rng.Uint64())
batches := []*BatchData{ batches := []*BatchData{
{ NewBatchData(
SingularBatch: SingularBatch{ &SingularBatch{
ParentHash: common.Hash{}, ParentHash: common.Hash{},
EpochNum: 0, EpochNum: 0,
Timestamp: 0, Timestamp: 0,
Transactions: []hexutil.Bytes{}, Transactions: []hexutil.Bytes{},
}, },
}, ),
{ NewBatchData(
SingularBatch: SingularBatch{ &SingularBatch{
ParentHash: common.Hash{31: 0x42}, ParentHash: common.Hash{31: 0x42},
EpochNum: 1, EpochNum: 1,
Timestamp: 1647026951, Timestamp: 1647026951,
Transactions: []hexutil.Bytes{[]byte{0, 0, 0}, []byte{0x76, 0xfd, 0x7c}}, Transactions: []hexutil.Bytes{[]byte{0, 0, 0}, []byte{0x76, 0xfd, 0x7c}},
}, },
}, ),
NewSingularBatchData(*RandomSingularBatch(rng, 5, chainID)), NewBatchData(RandomSingularBatch(rng, 5, chainID)),
NewSingularBatchData(*RandomSingularBatch(rng, 7, chainID)), NewBatchData(RandomSingularBatch(rng, 7, chainID)),
NewSpanBatchData(*RandomRawSpanBatch(rng, chainID)), NewBatchData(RandomRawSpanBatch(rng, chainID)),
NewSpanBatchData(*RandomRawSpanBatch(rng, chainID)), NewBatchData(RandomRawSpanBatch(rng, chainID)),
NewSpanBatchData(*RandomRawSpanBatch(rng, chainID)), NewBatchData(RandomRawSpanBatch(rng, chainID)),
} }
for i, batch := range batches { for i, batch := range batches {
var buf bytes.Buffer var buf bytes.Buffer
err := batch.EncodeRLP(&buf) err := batch.EncodeRLP(&buf)
assert.NoError(t, err) require.NoError(t, err)
result := buf.Bytes() result := buf.Bytes()
var dec BatchData var dec BatchData
r := bytes.NewReader(result) r := bytes.NewReader(result)
s := rlp.NewStream(r, 0) s := rlp.NewStream(r, 0)
err = dec.DecodeRLP(s) err = dec.DecodeRLP(s)
assert.NoError(t, err) require.NoError(t, err)
if dec.BatchType == SpanBatchType { if dec.GetBatchType() == SpanBatchType {
_, err := dec.RawSpanBatch.derive(blockTime, genesisTimestamp, chainID) rawSpanBatch, ok := dec.inner.(*RawSpanBatch)
assert.NoError(t, err) require.True(t, ok)
} _, err := rawSpanBatch.derive(blockTime, genesisTimestamp, chainID)
assert.Equal(t, batch, &dec, "Batch not equal test case %v", i) require.NoError(t, err)
}
require.Equal(t, batch, &dec, "Batch not equal test case %v", i)
} }
} }
...@@ -17,13 +17,13 @@ func FuzzBatchRoundTrip(f *testing.F) { ...@@ -17,13 +17,13 @@ func FuzzBatchRoundTrip(f *testing.F) {
typeProvider := fuzz.NewFromGoFuzz(fuzzedData).NilChance(0).MaxDepth(10000).NumElements(0, 0x100).AllowUnexportedFields(true) typeProvider := fuzz.NewFromGoFuzz(fuzzedData).NilChance(0).MaxDepth(10000).NumElements(0, 0x100).AllowUnexportedFields(true)
fuzzerutils.AddFuzzerFunctions(typeProvider) fuzzerutils.AddFuzzerFunctions(typeProvider)
var singularBatch SingularBatch
typeProvider.Fuzz(&singularBatch)
// Create our batch data from fuzzed data // Create our batch data from fuzzed data
var batchData BatchData var batchData BatchData
typeProvider.Fuzz(&batchData)
// force batchdata to only contain singular batch // force batchdata to only contain singular batch
batchData.BatchType = SingularBatchType batchData.inner = &singularBatch
batchData.RawSpanBatch = RawSpanBatch{}
// Encode our batch data // Encode our batch data
enc, err := batchData.MarshalBinary() enc, err := batchData.MarshalBinary()
......
...@@ -3,6 +3,7 @@ package derive ...@@ -3,6 +3,7 @@ package derive
import ( import (
"bytes" "bytes"
"context" "context"
"errors"
"fmt" "fmt"
"io" "io"
...@@ -89,22 +90,30 @@ func (cr *ChannelInReader) NextBatch(ctx context.Context) (Batch, error) { ...@@ -89,22 +90,30 @@ func (cr *ChannelInReader) NextBatch(ctx context.Context) (Batch, error) {
cr.NextChannel() cr.NextChannel()
return nil, NotEnoughData return nil, NotEnoughData
} }
switch batchData.BatchType { switch batchData.GetBatchType() {
case SingularBatchType: case SingularBatchType:
return &batchData.SingularBatch, nil singularBatch, ok := batchData.inner.(*SingularBatch)
if !ok {
return nil, NewCriticalError(errors.New("failed type assertion to SingularBatch"))
}
return singularBatch, nil
case SpanBatchType: case SpanBatchType:
if origin := cr.Origin(); !cr.cfg.IsSpanBatch(origin.Time) { if origin := cr.Origin(); !cr.cfg.IsSpanBatch(origin.Time) {
return nil, NewTemporaryError(fmt.Errorf("cannot accept span batch in L1 block %s at time %d", origin, origin.Time)) return nil, NewTemporaryError(fmt.Errorf("cannot accept span batch in L1 block %s at time %d", origin, origin.Time))
} }
rawSpanBatch, ok := batchData.inner.(*RawSpanBatch)
if !ok {
return nil, NewCriticalError(errors.New("failed type assertion to SpanBatch"))
}
// If the batch type is Span batch, derive block inputs from RawSpanBatch. // If the batch type is Span batch, derive block inputs from RawSpanBatch.
spanBatch, err := batchData.RawSpanBatch.derive(cr.cfg.BlockTime, cr.cfg.Genesis.L2Time, cr.cfg.L2ChainID) spanBatch, err := rawSpanBatch.derive(cr.cfg.BlockTime, cr.cfg.Genesis.L2Time, cr.cfg.L2ChainID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return spanBatch, nil return spanBatch, nil
default: default:
// error is bubbled up to user, but pipeline can skip the batch and continue after. // error is bubbled up to user, but pipeline can skip the batch and continue after.
return nil, NewTemporaryError(fmt.Errorf("unrecognized batch type: %w", err)) return nil, NewTemporaryError(fmt.Errorf("unrecognized batch type: %d", batchData.GetBatchType()))
} }
} }
......
...@@ -145,7 +145,7 @@ func (co *SingularChannelOut) AddSingularBatch(batch *SingularBatch, _ uint64) ( ...@@ -145,7 +145,7 @@ func (co *SingularChannelOut) AddSingularBatch(batch *SingularBatch, _ uint64) (
// We encode to a temporary buffer to determine the encoded length to // We encode to a temporary buffer to determine the encoded length to
// ensure that the total size of all RLP elements is less than or equal to MAX_RLP_BYTES_PER_CHANNEL // ensure that the total size of all RLP elements is less than or equal to MAX_RLP_BYTES_PER_CHANNEL
var buf bytes.Buffer var buf bytes.Buffer
if err := rlp.Encode(&buf, NewSingularBatchData(*batch)); err != nil { if err := rlp.Encode(&buf, NewBatchData(batch)); err != nil {
return 0, err return 0, err
} }
if co.rlpLength+buf.Len() > MaxRLPBytesPerChannel { if co.rlpLength+buf.Len() > MaxRLPBytesPerChannel {
......
package derive package derive
import ( import (
"bytes"
"io"
"github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
) )
// Batch format // Batch format
...@@ -51,3 +55,13 @@ func (b *SingularBatch) LogContext(log log.Logger) log.Logger { ...@@ -51,3 +55,13 @@ func (b *SingularBatch) LogContext(log log.Logger) log.Logger {
func (b *SingularBatch) Epoch() eth.BlockID { func (b *SingularBatch) Epoch() eth.BlockID {
return eth.BlockID{Hash: b.EpochHash, Number: uint64(b.EpochNum)} return eth.BlockID{Hash: b.EpochHash, Number: uint64(b.EpochNum)}
} }
// encode writes the byte encoding of SingularBatch to Writer stream
func (b *SingularBatch) encode(w io.Writer) error {
return rlp.Encode(w, b)
}
// decode reads the byte encoding of SingularBatch from Reader stream
func (b *SingularBatch) decode(r *bytes.Reader) error {
return rlp.Decode(r, b)
}
...@@ -5,7 +5,7 @@ import ( ...@@ -5,7 +5,7 @@ import (
"math/rand" "math/rand"
"testing" "testing"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/require"
) )
func TestSingularBatchForBatchInterface(t *testing.T) { func TestSingularBatchForBatchInterface(t *testing.T) {
...@@ -15,7 +15,7 @@ func TestSingularBatchForBatchInterface(t *testing.T) { ...@@ -15,7 +15,7 @@ func TestSingularBatchForBatchInterface(t *testing.T) {
singularBatch := RandomSingularBatch(rng, txCount, chainID) singularBatch := RandomSingularBatch(rng, txCount, chainID)
assert.Equal(t, SingularBatchType, singularBatch.GetBatchType()) require.Equal(t, SingularBatchType, singularBatch.GetBatchType())
assert.Equal(t, singularBatch.Timestamp, singularBatch.GetTimestamp()) require.Equal(t, singularBatch.Timestamp, singularBatch.GetTimestamp())
assert.Equal(t, singularBatch.EpochNum, singularBatch.GetEpochNum()) require.Equal(t, singularBatch.EpochNum, singularBatch.GetEpochNum())
} }
...@@ -34,8 +34,8 @@ var ErrEmptySpanBatch = errors.New("span-batch must not be empty") ...@@ -34,8 +34,8 @@ var ErrEmptySpanBatch = errors.New("span-batch must not be empty")
type spanBatchPrefix struct { type spanBatchPrefix struct {
relTimestamp uint64 // Relative timestamp of the first block relTimestamp uint64 // Relative timestamp of the first block
l1OriginNum uint64 // L1 origin number l1OriginNum uint64 // L1 origin number
parentCheck []byte // First 20 bytes of the first block's parent hash parentCheck [20]byte // First 20 bytes of the first block's parent hash
l1OriginCheck []byte // First 20 bytes of the last block's L1 origin hash l1OriginCheck [20]byte // First 20 bytes of the last block's L1 origin hash
} }
type spanBatchPayload struct { type spanBatchPayload struct {
...@@ -51,6 +51,11 @@ type RawSpanBatch struct { ...@@ -51,6 +51,11 @@ type RawSpanBatch struct {
spanBatchPayload spanBatchPayload
} }
// GetBatchType returns its batch type (batch_version)
func (b *RawSpanBatch) GetBatchType() int {
return SpanBatchType
}
// decodeOriginBits parses data into bp.originBits // decodeOriginBits parses data into bp.originBits
// originBits is bitlist right-padded to a multiple of 8 bits // originBits is bitlist right-padded to a multiple of 8 bits
func (bp *spanBatchPayload) decodeOriginBits(r *bytes.Reader) error { func (bp *spanBatchPayload) decodeOriginBits(r *bytes.Reader) error {
...@@ -105,8 +110,7 @@ func (bp *spanBatchPrefix) decodeL1OriginNum(r *bytes.Reader) error { ...@@ -105,8 +110,7 @@ func (bp *spanBatchPrefix) decodeL1OriginNum(r *bytes.Reader) error {
// decodeParentCheck parses data into bp.parentCheck // decodeParentCheck parses data into bp.parentCheck
func (bp *spanBatchPrefix) decodeParentCheck(r *bytes.Reader) error { func (bp *spanBatchPrefix) decodeParentCheck(r *bytes.Reader) error {
bp.parentCheck = make([]byte, 20) _, err := io.ReadFull(r, bp.parentCheck[:])
_, err := io.ReadFull(r, bp.parentCheck)
if err != nil { if err != nil {
return fmt.Errorf("failed to read parent check: %w", err) return fmt.Errorf("failed to read parent check: %w", err)
} }
...@@ -115,8 +119,7 @@ func (bp *spanBatchPrefix) decodeParentCheck(r *bytes.Reader) error { ...@@ -115,8 +119,7 @@ func (bp *spanBatchPrefix) decodeParentCheck(r *bytes.Reader) error {
// decodeL1OriginCheck parses data into bp.decodeL1OriginCheck // decodeL1OriginCheck parses data into bp.decodeL1OriginCheck
func (bp *spanBatchPrefix) decodeL1OriginCheck(r *bytes.Reader) error { func (bp *spanBatchPrefix) decodeL1OriginCheck(r *bytes.Reader) error {
bp.l1OriginCheck = make([]byte, 20) _, err := io.ReadFull(r, bp.l1OriginCheck[:])
_, err := io.ReadFull(r, bp.l1OriginCheck)
if err != nil { if err != nil {
return fmt.Errorf("failed to read l1 origin check: %w", err) return fmt.Errorf("failed to read l1 origin check: %w", err)
} }
...@@ -221,12 +224,6 @@ func (bp *spanBatchPayload) decodePayload(r *bytes.Reader) error { ...@@ -221,12 +224,6 @@ func (bp *spanBatchPayload) decodePayload(r *bytes.Reader) error {
return nil return nil
} }
// decodeBytes parses data into b from data
func (b *RawSpanBatch) decodeBytes(data []byte) error {
r := bytes.NewReader(data)
return b.decode(r)
}
// decode reads the byte encoding of SpanBatch from Reader stream // decode reads the byte encoding of SpanBatch from Reader stream
func (b *RawSpanBatch) decode(r *bytes.Reader) error { func (b *RawSpanBatch) decode(r *bytes.Reader) error {
if r.Len() > MaxSpanBatchSize { if r.Len() > MaxSpanBatchSize {
...@@ -263,7 +260,7 @@ func (bp *spanBatchPrefix) encodeL1OriginNum(w io.Writer) error { ...@@ -263,7 +260,7 @@ func (bp *spanBatchPrefix) encodeL1OriginNum(w io.Writer) error {
// encodeParentCheck encodes bp.parentCheck // encodeParentCheck encodes bp.parentCheck
func (bp *spanBatchPrefix) encodeParentCheck(w io.Writer) error { func (bp *spanBatchPrefix) encodeParentCheck(w io.Writer) error {
if _, err := w.Write(bp.parentCheck); err != nil { if _, err := w.Write(bp.parentCheck[:]); err != nil {
return fmt.Errorf("cannot write parent check: %w", err) return fmt.Errorf("cannot write parent check: %w", err)
} }
return nil return nil
...@@ -271,7 +268,7 @@ func (bp *spanBatchPrefix) encodeParentCheck(w io.Writer) error { ...@@ -271,7 +268,7 @@ func (bp *spanBatchPrefix) encodeParentCheck(w io.Writer) error {
// encodeL1OriginCheck encodes bp.l1OriginCheck // encodeL1OriginCheck encodes bp.l1OriginCheck
func (bp *spanBatchPrefix) encodeL1OriginCheck(w io.Writer) error { func (bp *spanBatchPrefix) encodeL1OriginCheck(w io.Writer) error {
if _, err := w.Write(bp.l1OriginCheck); err != nil { if _, err := w.Write(bp.l1OriginCheck[:]); err != nil {
return fmt.Errorf("cannot write l1 origin check: %w", err) return fmt.Errorf("cannot write l1 origin check: %w", err)
} }
return nil return nil
...@@ -380,17 +377,6 @@ func (b *RawSpanBatch) encode(w io.Writer) error { ...@@ -380,17 +377,6 @@ func (b *RawSpanBatch) encode(w io.Writer) error {
return nil return nil
} }
// encodeBytes returns the byte encoding of SpanBatch
func (b *RawSpanBatch) encodeBytes() ([]byte, error) {
buf := encodeBufferPool.Get().(*bytes.Buffer)
defer encodeBufferPool.Put(buf)
buf.Reset()
if err := b.encode(buf); err != nil {
return []byte{}, err
}
return buf.Bytes(), nil
}
// derive converts RawSpanBatch into SpanBatch, which has a list of spanBatchElement. // derive converts RawSpanBatch into SpanBatch, which has a list of spanBatchElement.
// We need chain config constants to derive values for making payload attributes. // We need chain config constants to derive values for making payload attributes.
func (b *RawSpanBatch) derive(blockTime, genesisTimestamp uint64, chainID *big.Int) (*SpanBatch, error) { func (b *RawSpanBatch) derive(blockTime, genesisTimestamp uint64, chainID *big.Int) (*SpanBatch, error) {
...@@ -451,8 +437,8 @@ func singularBatchToElement(singularBatch *SingularBatch) *spanBatchElement { ...@@ -451,8 +437,8 @@ func singularBatchToElement(singularBatch *SingularBatch) *spanBatchElement {
// SpanBatch is an implementation of Batch interface, // SpanBatch is an implementation of Batch interface,
// containing the input to build a span of L2 blocks in derived form (spanBatchElement) // containing the input to build a span of L2 blocks in derived form (spanBatchElement)
type SpanBatch struct { type SpanBatch struct {
parentCheck []byte // First 20 bytes of the first block's parent hash parentCheck [20]byte // First 20 bytes of the first block's parent hash
l1OriginCheck []byte // First 20 bytes of the last block's L1 origin hash l1OriginCheck [20]byte // First 20 bytes of the last block's L1 origin hash
batches []*spanBatchElement // List of block input in derived form batches []*spanBatchElement // List of block input in derived form
} }
...@@ -473,8 +459,8 @@ func (b *SpanBatch) LogContext(log log.Logger) log.Logger { ...@@ -473,8 +459,8 @@ func (b *SpanBatch) LogContext(log log.Logger) log.Logger {
} }
return log.New( return log.New(
"batch_timestamp", b.batches[0].Timestamp, "batch_timestamp", b.batches[0].Timestamp,
"parent_check", hexutil.Encode(b.parentCheck), "parent_check", hexutil.Encode(b.parentCheck[:]),
"origin_check", hexutil.Encode(b.l1OriginCheck), "origin_check", hexutil.Encode(b.l1OriginCheck[:]),
"start_epoch_number", b.GetStartEpochNum(), "start_epoch_number", b.GetStartEpochNum(),
"end_epoch_number", b.GetBlockEpochNum(len(b.batches)-1), "end_epoch_number", b.GetBlockEpochNum(len(b.batches)-1),
"block_count", len(b.batches), "block_count", len(b.batches),
...@@ -488,12 +474,12 @@ func (b *SpanBatch) GetStartEpochNum() rollup.Epoch { ...@@ -488,12 +474,12 @@ func (b *SpanBatch) GetStartEpochNum() rollup.Epoch {
// CheckOriginHash checks if the l1OriginCheck matches the first 20 bytes of given hash, probably L1 block hash from the current canonical L1 chain. // CheckOriginHash checks if the l1OriginCheck matches the first 20 bytes of given hash, probably L1 block hash from the current canonical L1 chain.
func (b *SpanBatch) CheckOriginHash(hash common.Hash) bool { func (b *SpanBatch) CheckOriginHash(hash common.Hash) bool {
return bytes.Equal(b.l1OriginCheck, hash.Bytes()[:20]) return bytes.Equal(b.l1OriginCheck[:], hash.Bytes()[:20])
} }
// CheckParentHash checks if the parentCheck matches the first 20 bytes of given hash, probably the current L2 safe head. // CheckParentHash checks if the parentCheck matches the first 20 bytes of given hash, probably the current L2 safe head.
func (b *SpanBatch) CheckParentHash(hash common.Hash) bool { func (b *SpanBatch) CheckParentHash(hash common.Hash) bool {
return bytes.Equal(b.parentCheck, hash.Bytes()[:20]) return bytes.Equal(b.parentCheck[:], hash.Bytes()[:20])
} }
// GetBlockEpochNum returns the epoch number(L1 origin block number) of the block at the given index in the span. // GetBlockEpochNum returns the epoch number(L1 origin block number) of the block at the given index in the span.
...@@ -520,10 +506,10 @@ func (b *SpanBatch) GetBlockCount() int { ...@@ -520,10 +506,10 @@ func (b *SpanBatch) GetBlockCount() int {
// updates l1OriginCheck or parentCheck if needed. // updates l1OriginCheck or parentCheck if needed.
func (b *SpanBatch) AppendSingularBatch(singularBatch *SingularBatch) { func (b *SpanBatch) AppendSingularBatch(singularBatch *SingularBatch) {
if len(b.batches) == 0 { if len(b.batches) == 0 {
b.parentCheck = singularBatch.ParentHash.Bytes()[:20] copy(b.parentCheck[:], singularBatch.ParentHash.Bytes()[:20])
} }
b.batches = append(b.batches, singularBatchToElement(singularBatch)) b.batches = append(b.batches, singularBatchToElement(singularBatch))
b.l1OriginCheck = singularBatch.EpochHash.Bytes()[:20] copy(b.l1OriginCheck[:], singularBatch.EpochHash.Bytes()[:20])
} }
// ToRawSpanBatch merges SingularBatch List and initialize single RawSpanBatch // ToRawSpanBatch merges SingularBatch List and initialize single RawSpanBatch
...@@ -541,10 +527,8 @@ func (b *SpanBatch) ToRawSpanBatch(originChangedBit uint, genesisTimestamp uint6 ...@@ -541,10 +527,8 @@ func (b *SpanBatch) ToRawSpanBatch(originChangedBit uint, genesisTimestamp uint6
span_end := b.batches[len(b.batches)-1] span_end := b.batches[len(b.batches)-1]
raw.relTimestamp = span_start.Timestamp - genesisTimestamp raw.relTimestamp = span_start.Timestamp - genesisTimestamp
raw.l1OriginNum = uint64(span_end.EpochNum) raw.l1OriginNum = uint64(span_end.EpochNum)
raw.parentCheck = make([]byte, 20) raw.parentCheck = b.parentCheck
copy(raw.parentCheck, b.parentCheck) raw.l1OriginCheck = b.l1OriginCheck
raw.l1OriginCheck = make([]byte, 20)
copy(raw.l1OriginCheck, b.l1OriginCheck)
// spanBatchPayload // spanBatchPayload
raw.blockCount = uint64(len(b.batches)) raw.blockCount = uint64(len(b.batches))
raw.originBits = new(big.Int) raw.originBits = new(big.Int)
...@@ -608,17 +592,16 @@ func (b *SpanBatch) GetSingularBatches(l1Origins []eth.L1BlockRef, l2SafeHead et ...@@ -608,17 +592,16 @@ func (b *SpanBatch) GetSingularBatches(l1Origins []eth.L1BlockRef, l2SafeHead et
// NewSpanBatch converts given singularBatches into spanBatchElements, and creates a new SpanBatch. // NewSpanBatch converts given singularBatches into spanBatchElements, and creates a new SpanBatch.
func NewSpanBatch(singularBatches []*SingularBatch) *SpanBatch { func NewSpanBatch(singularBatches []*SingularBatch) *SpanBatch {
spanBatch := &SpanBatch{}
if len(singularBatches) == 0 { if len(singularBatches) == 0 {
return &SpanBatch{} return spanBatch
}
spanBatch := SpanBatch{
parentCheck: singularBatches[0].ParentHash.Bytes()[:20],
l1OriginCheck: singularBatches[len(singularBatches)-1].EpochHash.Bytes()[:20],
} }
copy(spanBatch.parentCheck[:], singularBatches[0].ParentHash.Bytes()[:20])
copy(spanBatch.l1OriginCheck[:], singularBatches[len(singularBatches)-1].EpochHash.Bytes()[:20])
for _, singularBatch := range singularBatches { for _, singularBatch := range singularBatches {
spanBatch.batches = append(spanBatch.batches, singularBatchToElement(singularBatch)) spanBatch.batches = append(spanBatch.batches, singularBatchToElement(singularBatch))
} }
return &spanBatch return spanBatch
} }
// SpanBatchBuilder is a utility type to build a SpanBatch by adding a SingularBatch one by one. // SpanBatchBuilder is a utility type to build a SpanBatch by adding a SingularBatch one by one.
......
This diff is collapsed.
...@@ -4,7 +4,6 @@ import ( ...@@ -4,7 +4,6 @@ import (
"bytes" "bytes"
"errors" "errors"
"fmt" "fmt"
"io"
"math/big" "math/big"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
...@@ -70,21 +69,6 @@ func (tx *spanBatchTx) MarshalBinary() ([]byte, error) { ...@@ -70,21 +69,6 @@ func (tx *spanBatchTx) MarshalBinary() ([]byte, error) {
return buf.Bytes(), err return buf.Bytes(), err
} }
// EncodeRLP implements rlp.Encoder
func (tx *spanBatchTx) EncodeRLP(w io.Writer) error {
if tx.Type() == types.LegacyTxType {
return rlp.Encode(w, tx.inner)
}
// It's an EIP-2718 typed TX envelope.
buf := encodeBufferPool.Get().(*bytes.Buffer)
defer encodeBufferPool.Put(buf)
buf.Reset()
if err := tx.encodeTyped(buf); err != nil {
return err
}
return rlp.Encode(w, buf.Bytes())
}
// setDecoded sets the inner transaction after decoding. // setDecoded sets the inner transaction after decoding.
func (tx *spanBatchTx) setDecoded(inner spanBatchTxData, size uint64) { func (tx *spanBatchTx) setDecoded(inner spanBatchTxData, size uint64) {
tx.inner = inner tx.inner = inner
...@@ -115,36 +99,6 @@ func (tx *spanBatchTx) decodeTyped(b []byte) (spanBatchTxData, error) { ...@@ -115,36 +99,6 @@ func (tx *spanBatchTx) decodeTyped(b []byte) (spanBatchTxData, error) {
} }
} }
// DecodeRLP implements rlp.Decoder
func (tx *spanBatchTx) DecodeRLP(s *rlp.Stream) error {
kind, size, err := s.Kind()
switch {
case err != nil:
return err
case kind == rlp.List:
// It's a legacy transaction.
var inner spanBatchLegacyTxData
err = s.Decode(&inner)
if err != nil {
return fmt.Errorf("failed to decode spanBatchLegacyTxData: %w", err)
}
tx.setDecoded(&inner, rlp.ListSize(size))
return nil
default:
// It's an EIP-2718 typed TX envelope.
var b []byte
if b, err = s.Bytes(); err != nil {
return err
}
inner, err := tx.decodeTyped(b)
if err != nil {
return err
}
tx.setDecoded(inner, uint64(len(b)))
return nil
}
}
// UnmarshalBinary decodes the canonical encoding of transactions. // UnmarshalBinary decodes the canonical encoding of transactions.
// It supports legacy RLP transactions and EIP2718 typed transactions. // It supports legacy RLP transactions and EIP2718 typed transactions.
func (tx *spanBatchTx) UnmarshalBinary(b []byte) error { func (tx *spanBatchTx) UnmarshalBinary(b []byte) error {
......
package derive package derive
import ( import (
"bytes"
"math/big" "math/big"
"math/rand" "math/rand"
"testing" "testing"
"github.com/ethereum-optimism/optimism/op-service/testutils" "github.com/ethereum-optimism/optimism/op-service/testutils"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
) )
type spanBatchTxTest struct {
name string
trials int
mkTx func(rng *rand.Rand, signer types.Signer) *types.Transaction
}
func TestSpanBatchTxConvert(t *testing.T) { func TestSpanBatchTxConvert(t *testing.T) {
rng := rand.New(rand.NewSource(0x1331)) cases := []spanBatchTxTest{
{"legacy tx", 32, testutils.RandomLegacyTx},
{"access list tx", 32, testutils.RandomAccessListTx},
{"dynamic fee tx", 32, testutils.RandomDynamicFeeTx},
}
for i, testCase := range cases {
t.Run(testCase.name, func(t *testing.T) {
rng := rand.New(rand.NewSource(int64(0x1331 + i)))
chainID := big.NewInt(rng.Int63n(1000)) chainID := big.NewInt(rng.Int63n(1000))
signer := types.NewLondonSigner(chainID) signer := types.NewLondonSigner(chainID)
m := make(map[byte]int) for txIdx := 0; txIdx < testCase.trials; txIdx++ {
for i := 0; i < 32; i++ { tx := testCase.mkTx(rng, signer)
tx := testutils.RandomTx(rng, new(big.Int).SetUint64(rng.Uint64()), signer)
m[tx.Type()] += 1
v, r, s := tx.RawSignatureValues() v, r, s := tx.RawSignatureValues()
sbtx, err := newSpanBatchTx(*tx) sbtx, err := newSpanBatchTx(*tx)
assert.NoError(t, err) require.NoError(t, err)
tx2, err := sbtx.convertToFullTx(tx.Nonce(), tx.Gas(), tx.To(), chainID, v, r, s) tx2, err := sbtx.convertToFullTx(tx.Nonce(), tx.Gas(), tx.To(), chainID, v, r, s)
assert.NoError(t, err) require.NoError(t, err)
// compare after marshal because we only need inner field of transaction // compare after marshal because we only need inner field of transaction
txEncoded, err := tx.MarshalBinary() txEncoded, err := tx.MarshalBinary()
assert.NoError(t, err) require.NoError(t, err)
tx2Encoded, err := tx2.MarshalBinary() tx2Encoded, err := tx2.MarshalBinary()
assert.NoError(t, err) require.NoError(t, err)
assert.Equal(t, txEncoded, tx2Encoded) assert.Equal(t, txEncoded, tx2Encoded)
} }
// make sure every tx type is tested })
assert.Positive(t, m[types.LegacyTxType]) }
assert.Positive(t, m[types.AccessListTxType])
assert.Positive(t, m[types.DynamicFeeTxType])
} }
func TestSpanBatchTxRoundTrip(t *testing.T) { func TestSpanBatchTxRoundTrip(t *testing.T) {
rng := rand.New(rand.NewSource(0x1332)) cases := []spanBatchTxTest{
{"legacy tx", 32, testutils.RandomLegacyTx},
{"access list tx", 32, testutils.RandomAccessListTx},
{"dynamic fee tx", 32, testutils.RandomDynamicFeeTx},
}
for i, testCase := range cases {
t.Run(testCase.name, func(t *testing.T) {
rng := rand.New(rand.NewSource(int64(0x1332 + i)))
chainID := big.NewInt(rng.Int63n(1000)) chainID := big.NewInt(rng.Int63n(1000))
signer := types.NewLondonSigner(chainID) signer := types.NewLondonSigner(chainID)
m := make(map[byte]int) for txIdx := 0; txIdx < testCase.trials; txIdx++ {
for i := 0; i < 32; i++ { tx := testCase.mkTx(rng, signer)
tx := testutils.RandomTx(rng, new(big.Int).SetUint64(rng.Uint64()), signer)
m[tx.Type()] += 1
sbtx, err := newSpanBatchTx(*tx) sbtx, err := newSpanBatchTx(*tx)
assert.NoError(t, err) require.NoError(t, err)
sbtxEncoded, err := sbtx.MarshalBinary() sbtxEncoded, err := sbtx.MarshalBinary()
assert.NoError(t, err) require.NoError(t, err)
var sbtx2 spanBatchTx var sbtx2 spanBatchTx
err = sbtx2.UnmarshalBinary(sbtxEncoded) err = sbtx2.UnmarshalBinary(sbtxEncoded)
assert.NoError(t, err) require.NoError(t, err)
assert.Equal(t, sbtx, &sbtx2) assert.Equal(t, sbtx, &sbtx2)
} }
// make sure every tx type is tested })
assert.Positive(t, m[types.LegacyTxType])
assert.Positive(t, m[types.AccessListTxType])
assert.Positive(t, m[types.DynamicFeeTxType])
}
func TestSpanBatchTxRoundTripRLP(t *testing.T) {
rng := rand.New(rand.NewSource(0x1333))
chainID := big.NewInt(rng.Int63n(1000))
signer := types.NewLondonSigner(chainID)
m := make(map[byte]int)
for i := 0; i < 32; i++ {
tx := testutils.RandomTx(rng, new(big.Int).SetUint64(rng.Uint64()), signer)
m[tx.Type()] += 1
sbtx, err := newSpanBatchTx(*tx)
assert.NoError(t, err)
var buf bytes.Buffer
err = sbtx.EncodeRLP(&buf)
assert.NoError(t, err)
result := buf.Bytes()
var sbtx2 spanBatchTx
r := bytes.NewReader(result)
rlpReader := rlp.NewStream(r, 0)
err = sbtx2.DecodeRLP(rlpReader)
assert.NoError(t, err)
assert.Equal(t, sbtx, &sbtx2)
} }
// make sure every tx type is tested
assert.Positive(t, m[types.LegacyTxType])
assert.Positive(t, m[types.AccessListTxType])
assert.Positive(t, m[types.DynamicFeeTxType])
} }
type spanBatchDummyTxData struct{} type spanBatchDummyTxData struct{}
...@@ -107,44 +91,44 @@ func TestSpanBatchTxInvalidTxType(t *testing.T) { ...@@ -107,44 +91,44 @@ func TestSpanBatchTxInvalidTxType(t *testing.T) {
// span batch never contain deposit tx // span batch never contain deposit tx
depositTx := types.NewTx(&types.DepositTx{}) depositTx := types.NewTx(&types.DepositTx{})
_, err := newSpanBatchTx(*depositTx) _, err := newSpanBatchTx(*depositTx)
assert.ErrorContains(t, err, "invalid tx type") require.ErrorContains(t, err, "invalid tx type")
var sbtx spanBatchTx var sbtx spanBatchTx
sbtx.inner = &spanBatchDummyTxData{} sbtx.inner = &spanBatchDummyTxData{}
_, err = sbtx.convertToFullTx(0, 0, nil, nil, nil, nil, nil) _, err = sbtx.convertToFullTx(0, 0, nil, nil, nil, nil, nil)
assert.ErrorContains(t, err, "invalid tx type") require.ErrorContains(t, err, "invalid tx type")
} }
func TestSpanBatchTxDecodeInvalid(t *testing.T) { func TestSpanBatchTxDecodeInvalid(t *testing.T) {
var sbtx spanBatchTx var sbtx spanBatchTx
_, err := sbtx.decodeTyped([]byte{}) _, err := sbtx.decodeTyped([]byte{})
assert.EqualError(t, err, "typed transaction too short") require.EqualError(t, err, "typed transaction too short")
tx := types.NewTx(&types.LegacyTx{}) tx := types.NewTx(&types.LegacyTx{})
txEncoded, err := tx.MarshalBinary() txEncoded, err := tx.MarshalBinary()
assert.NoError(t, err) require.NoError(t, err)
// legacy tx is not typed tx // legacy tx is not typed tx
_, err = sbtx.decodeTyped(txEncoded) _, err = sbtx.decodeTyped(txEncoded)
assert.EqualError(t, err, types.ErrTxTypeNotSupported.Error()) require.EqualError(t, err, types.ErrTxTypeNotSupported.Error())
tx2 := types.NewTx(&types.AccessListTx{}) tx2 := types.NewTx(&types.AccessListTx{})
tx2Encoded, err := tx2.MarshalBinary() tx2Encoded, err := tx2.MarshalBinary()
assert.NoError(t, err) require.NoError(t, err)
tx2Encoded[0] = types.DynamicFeeTxType tx2Encoded[0] = types.DynamicFeeTxType
_, err = sbtx.decodeTyped(tx2Encoded) _, err = sbtx.decodeTyped(tx2Encoded)
assert.ErrorContains(t, err, "failed to decode spanBatchDynamicFeeTxData") require.ErrorContains(t, err, "failed to decode spanBatchDynamicFeeTxData")
tx3 := types.NewTx(&types.DynamicFeeTx{}) tx3 := types.NewTx(&types.DynamicFeeTx{})
tx3Encoded, err := tx3.MarshalBinary() tx3Encoded, err := tx3.MarshalBinary()
assert.NoError(t, err) require.NoError(t, err)
tx3Encoded[0] = types.AccessListTxType tx3Encoded[0] = types.AccessListTxType
_, err = sbtx.decodeTyped(tx3Encoded) _, err = sbtx.decodeTyped(tx3Encoded)
assert.ErrorContains(t, err, "failed to decode spanBatchAccessListTxData") require.ErrorContains(t, err, "failed to decode spanBatchAccessListTxData")
invalidLegacyTxDecoded := []byte{0xFF, 0xFF} invalidLegacyTxDecoded := []byte{0xFF, 0xFF}
err = sbtx.UnmarshalBinary(invalidLegacyTxDecoded) err = sbtx.UnmarshalBinary(invalidLegacyTxDecoded)
assert.ErrorContains(t, err, "failed to decode spanBatchLegacyTxData") require.ErrorContains(t, err, "failed to decode spanBatchLegacyTxData")
} }
...@@ -6,11 +6,12 @@ import ( ...@@ -6,11 +6,12 @@ import (
"math/rand" "math/rand"
"testing" "testing"
"github.com/ethereum-optimism/optimism/op-service/testutils"
"github.com/ethereum/go-ethereum/core/types"
"github.com/holiman/uint256" "github.com/holiman/uint256"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum-optimism/optimism/op-service/testutils"
) )
func TestSpanBatchTxsContractCreationBits(t *testing.T) { func TestSpanBatchTxsContractCreationBits(t *testing.T) {
...@@ -27,23 +28,23 @@ func TestSpanBatchTxsContractCreationBits(t *testing.T) { ...@@ -27,23 +28,23 @@ func TestSpanBatchTxsContractCreationBits(t *testing.T) {
var buf bytes.Buffer var buf bytes.Buffer
err := sbt.encodeContractCreationBits(&buf) err := sbt.encodeContractCreationBits(&buf)
assert.NoError(t, err) require.NoError(t, err)
// contractCreationBit field is fixed length: single bit // contractCreationBit field is fixed length: single bit
contractCreationBitBufferLen := totalBlockTxCount / 8 contractCreationBitBufferLen := totalBlockTxCount / 8
if totalBlockTxCount%8 != 0 { if totalBlockTxCount%8 != 0 {
contractCreationBitBufferLen++ contractCreationBitBufferLen++
} }
assert.Equal(t, buf.Len(), int(contractCreationBitBufferLen)) require.Equal(t, buf.Len(), int(contractCreationBitBufferLen))
result := buf.Bytes() result := buf.Bytes()
sbt.contractCreationBits = nil sbt.contractCreationBits = nil
r := bytes.NewReader(result) r := bytes.NewReader(result)
err = sbt.decodeContractCreationBits(r) err = sbt.decodeContractCreationBits(r)
assert.NoError(t, err) require.NoError(t, err)
assert.Equal(t, contractCreationBits, sbt.contractCreationBits) require.Equal(t, contractCreationBits, sbt.contractCreationBits)
} }
func TestSpanBatchTxsContractCreationCount(t *testing.T) { func TestSpanBatchTxsContractCreationCount(t *testing.T) {
...@@ -62,16 +63,16 @@ func TestSpanBatchTxsContractCreationCount(t *testing.T) { ...@@ -62,16 +63,16 @@ func TestSpanBatchTxsContractCreationCount(t *testing.T) {
var buf bytes.Buffer var buf bytes.Buffer
err := sbt.encodeContractCreationBits(&buf) err := sbt.encodeContractCreationBits(&buf)
assert.NoError(t, err) require.NoError(t, err)
result := buf.Bytes() result := buf.Bytes()
sbt.contractCreationBits = nil sbt.contractCreationBits = nil
r := bytes.NewReader(result) r := bytes.NewReader(result)
err = sbt.decodeContractCreationBits(r) err = sbt.decodeContractCreationBits(r)
assert.NoError(t, err) require.NoError(t, err)
assert.Equal(t, contractCreationCount, sbt.contractCreationCount()) require.Equal(t, contractCreationCount, sbt.contractCreationCount())
} }
func TestSpanBatchTxsYParityBits(t *testing.T) { func TestSpanBatchTxsYParityBits(t *testing.T) {
...@@ -88,23 +89,23 @@ func TestSpanBatchTxsYParityBits(t *testing.T) { ...@@ -88,23 +89,23 @@ func TestSpanBatchTxsYParityBits(t *testing.T) {
var buf bytes.Buffer var buf bytes.Buffer
err := sbt.encodeYParityBits(&buf) err := sbt.encodeYParityBits(&buf)
assert.NoError(t, err) require.NoError(t, err)
// yParityBit field is fixed length: single bit // yParityBit field is fixed length: single bit
yParityBitBufferLen := totalBlockTxCount / 8 yParityBitBufferLen := totalBlockTxCount / 8
if totalBlockTxCount%8 != 0 { if totalBlockTxCount%8 != 0 {
yParityBitBufferLen++ yParityBitBufferLen++
} }
assert.Equal(t, buf.Len(), int(yParityBitBufferLen)) require.Equal(t, buf.Len(), int(yParityBitBufferLen))
result := buf.Bytes() result := buf.Bytes()
sbt.yParityBits = nil sbt.yParityBits = nil
r := bytes.NewReader(result) r := bytes.NewReader(result)
err = sbt.decodeYParityBits(r) err = sbt.decodeYParityBits(r)
assert.NoError(t, err) require.NoError(t, err)
assert.Equal(t, yParityBits, sbt.yParityBits) require.Equal(t, yParityBits, sbt.yParityBits)
} }
func TestSpanBatchTxsTxSigs(t *testing.T) { func TestSpanBatchTxsTxSigs(t *testing.T) {
...@@ -121,22 +122,22 @@ func TestSpanBatchTxsTxSigs(t *testing.T) { ...@@ -121,22 +122,22 @@ func TestSpanBatchTxsTxSigs(t *testing.T) {
var buf bytes.Buffer var buf bytes.Buffer
err := sbt.encodeTxSigsRS(&buf) err := sbt.encodeTxSigsRS(&buf)
assert.NoError(t, err) require.NoError(t, err)
// txSig field is fixed length: 32 byte + 32 byte = 64 byte // txSig field is fixed length: 32 byte + 32 byte = 64 byte
assert.Equal(t, buf.Len(), 64*int(totalBlockTxCount)) require.Equal(t, buf.Len(), 64*int(totalBlockTxCount))
result := buf.Bytes() result := buf.Bytes()
sbt.txSigs = nil sbt.txSigs = nil
r := bytes.NewReader(result) r := bytes.NewReader(result)
err = sbt.decodeTxSigsRS(r) err = sbt.decodeTxSigsRS(r)
assert.NoError(t, err) require.NoError(t, err)
// v field is not set // v field is not set
for i := 0; i < int(totalBlockTxCount); i++ { for i := 0; i < int(totalBlockTxCount); i++ {
assert.Equal(t, txSigs[i].r, sbt.txSigs[i].r) require.Equal(t, txSigs[i].r, sbt.txSigs[i].r)
assert.Equal(t, txSigs[i].s, sbt.txSigs[i].s) require.Equal(t, txSigs[i].s, sbt.txSigs[i].s)
} }
} }
...@@ -154,16 +155,16 @@ func TestSpanBatchTxsTxNonces(t *testing.T) { ...@@ -154,16 +155,16 @@ func TestSpanBatchTxsTxNonces(t *testing.T) {
var buf bytes.Buffer var buf bytes.Buffer
err := sbt.encodeTxNonces(&buf) err := sbt.encodeTxNonces(&buf)
assert.NoError(t, err) require.NoError(t, err)
result := buf.Bytes() result := buf.Bytes()
sbt.txNonces = nil sbt.txNonces = nil
r := bytes.NewReader(result) r := bytes.NewReader(result)
err = sbt.decodeTxNonces(r) err = sbt.decodeTxNonces(r)
assert.NoError(t, err) require.NoError(t, err)
assert.Equal(t, txNonces, sbt.txNonces) require.Equal(t, txNonces, sbt.txNonces)
} }
func TestSpanBatchTxsTxGases(t *testing.T) { func TestSpanBatchTxsTxGases(t *testing.T) {
...@@ -180,16 +181,16 @@ func TestSpanBatchTxsTxGases(t *testing.T) { ...@@ -180,16 +181,16 @@ func TestSpanBatchTxsTxGases(t *testing.T) {
var buf bytes.Buffer var buf bytes.Buffer
err := sbt.encodeTxGases(&buf) err := sbt.encodeTxGases(&buf)
assert.NoError(t, err) require.NoError(t, err)
result := buf.Bytes() result := buf.Bytes()
sbt.txGases = nil sbt.txGases = nil
r := bytes.NewReader(result) r := bytes.NewReader(result)
err = sbt.decodeTxGases(r) err = sbt.decodeTxGases(r)
assert.NoError(t, err) require.NoError(t, err)
assert.Equal(t, txGases, sbt.txGases) require.Equal(t, txGases, sbt.txGases)
} }
func TestSpanBatchTxsTxTos(t *testing.T) { func TestSpanBatchTxsTxTos(t *testing.T) {
...@@ -209,19 +210,19 @@ func TestSpanBatchTxsTxTos(t *testing.T) { ...@@ -209,19 +210,19 @@ func TestSpanBatchTxsTxTos(t *testing.T) {
var buf bytes.Buffer var buf bytes.Buffer
err := sbt.encodeTxTos(&buf) err := sbt.encodeTxTos(&buf)
assert.NoError(t, err) require.NoError(t, err)
// to field is fixed length: 20 bytes // to field is fixed length: 20 bytes
assert.Equal(t, buf.Len(), 20*len(txTos)) require.Equal(t, buf.Len(), 20*len(txTos))
result := buf.Bytes() result := buf.Bytes()
sbt.txTos = nil sbt.txTos = nil
r := bytes.NewReader(result) r := bytes.NewReader(result)
err = sbt.decodeTxTos(r) err = sbt.decodeTxTos(r)
assert.NoError(t, err) require.NoError(t, err)
assert.Equal(t, txTos, sbt.txTos) require.Equal(t, txTos, sbt.txTos)
} }
func TestSpanBatchTxsTxDatas(t *testing.T) { func TestSpanBatchTxsTxDatas(t *testing.T) {
...@@ -240,7 +241,7 @@ func TestSpanBatchTxsTxDatas(t *testing.T) { ...@@ -240,7 +241,7 @@ func TestSpanBatchTxsTxDatas(t *testing.T) {
var buf bytes.Buffer var buf bytes.Buffer
err := sbt.encodeTxDatas(&buf) err := sbt.encodeTxDatas(&buf)
assert.NoError(t, err) require.NoError(t, err)
result := buf.Bytes() result := buf.Bytes()
sbt.txDatas = nil sbt.txDatas = nil
...@@ -248,10 +249,10 @@ func TestSpanBatchTxsTxDatas(t *testing.T) { ...@@ -248,10 +249,10 @@ func TestSpanBatchTxsTxDatas(t *testing.T) {
r := bytes.NewReader(result) r := bytes.NewReader(result)
err = sbt.decodeTxDatas(r) err = sbt.decodeTxDatas(r)
assert.NoError(t, err) require.NoError(t, err)
assert.Equal(t, txDatas, sbt.txDatas) require.Equal(t, txDatas, sbt.txDatas)
assert.Equal(t, txTypes, sbt.txTypes) require.Equal(t, txTypes, sbt.txTypes)
} }
func TestSpanBatchTxsRecoverV(t *testing.T) { func TestSpanBatchTxsRecoverV(t *testing.T) {
...@@ -291,7 +292,7 @@ func TestSpanBatchTxsRecoverV(t *testing.T) { ...@@ -291,7 +292,7 @@ func TestSpanBatchTxsRecoverV(t *testing.T) {
recoveredVs = append(recoveredVs, txSig.v) recoveredVs = append(recoveredVs, txSig.v)
} }
assert.Equal(t, originalVs, recoveredVs, "recovered v mismatch") require.Equal(t, originalVs, recoveredVs, "recovered v mismatch")
} }
func TestSpanBatchTxsRoundTrip(t *testing.T) { func TestSpanBatchTxsRoundTrip(t *testing.T) {
...@@ -305,7 +306,7 @@ func TestSpanBatchTxsRoundTrip(t *testing.T) { ...@@ -305,7 +306,7 @@ func TestSpanBatchTxsRoundTrip(t *testing.T) {
var buf bytes.Buffer var buf bytes.Buffer
err := sbt.encode(&buf) err := sbt.encode(&buf)
assert.NoError(t, err) require.NoError(t, err)
result := buf.Bytes() result := buf.Bytes()
r := bytes.NewReader(result) r := bytes.NewReader(result)
...@@ -313,10 +314,10 @@ func TestSpanBatchTxsRoundTrip(t *testing.T) { ...@@ -313,10 +314,10 @@ func TestSpanBatchTxsRoundTrip(t *testing.T) {
var sbt2 spanBatchTxs var sbt2 spanBatchTxs
sbt2.totalBlockTxCount = totalBlockTxCount sbt2.totalBlockTxCount = totalBlockTxCount
err = sbt2.decode(r) err = sbt2.decode(r)
assert.NoError(t, err) require.NoError(t, err)
sbt2.recoverV(chainID) sbt2.recoverV(chainID)
assert.Equal(t, sbt, &sbt2) require.Equal(t, sbt, &sbt2)
} }
} }
...@@ -331,16 +332,16 @@ func TestSpanBatchTxsRoundTripFullTxs(t *testing.T) { ...@@ -331,16 +332,16 @@ func TestSpanBatchTxsRoundTripFullTxs(t *testing.T) {
for i := 0; i < int(totalblockTxCounts); i++ { for i := 0; i < int(totalblockTxCounts); i++ {
tx := testutils.RandomTx(rng, new(big.Int).SetUint64(rng.Uint64()), signer) tx := testutils.RandomTx(rng, new(big.Int).SetUint64(rng.Uint64()), signer)
rawTx, err := tx.MarshalBinary() rawTx, err := tx.MarshalBinary()
assert.NoError(t, err) require.NoError(t, err)
txs = append(txs, rawTx) txs = append(txs, rawTx)
} }
sbt, err := newSpanBatchTxs(txs, chainID) sbt, err := newSpanBatchTxs(txs, chainID)
assert.NoError(t, err) require.NoError(t, err)
txs2, err := sbt.fullTxs(chainID) txs2, err := sbt.fullTxs(chainID)
assert.NoError(t, err) require.NoError(t, err)
assert.Equal(t, txs, txs2) require.Equal(t, txs, txs2)
} }
} }
...@@ -373,17 +374,17 @@ func TestSpanBatchTxsFullTxNotEnoughTxTos(t *testing.T) { ...@@ -373,17 +374,17 @@ func TestSpanBatchTxsFullTxNotEnoughTxTos(t *testing.T) {
for i := 0; i < int(totalblockTxCounts); i++ { for i := 0; i < int(totalblockTxCounts); i++ {
tx := testutils.RandomTx(rng, new(big.Int).SetUint64(rng.Uint64()), signer) tx := testutils.RandomTx(rng, new(big.Int).SetUint64(rng.Uint64()), signer)
rawTx, err := tx.MarshalBinary() rawTx, err := tx.MarshalBinary()
assert.NoError(t, err) require.NoError(t, err)
txs = append(txs, rawTx) txs = append(txs, rawTx)
} }
sbt, err := newSpanBatchTxs(txs, chainID) sbt, err := newSpanBatchTxs(txs, chainID)
assert.NoError(t, err) require.NoError(t, err)
// drop single to field // drop single to field
sbt.txTos = sbt.txTos[:len(sbt.txTos)-2] sbt.txTos = sbt.txTos[:len(sbt.txTos)-2]
_, err = sbt.fullTxs(chainID) _, err = sbt.fullTxs(chainID)
assert.EqualError(t, err, "tx to not enough") require.EqualError(t, err, "tx to not enough")
} }
func TestSpanBatchTxsMaxContractCreationBitsLength(t *testing.T) { func TestSpanBatchTxsMaxContractCreationBitsLength(t *testing.T) {
......
...@@ -106,12 +106,12 @@ func (co *SpanChannelOut) AddSingularBatch(batch *SingularBatch, seqNum uint64) ...@@ -106,12 +106,12 @@ func (co *SpanChannelOut) AddSingularBatch(batch *SingularBatch, seqNum uint64)
return 0, fmt.Errorf("failed to convert SpanBatch into RawSpanBatch: %w", err) return 0, fmt.Errorf("failed to convert SpanBatch into RawSpanBatch: %w", err)
} }
// Encode RawSpanBatch into bytes // Encode RawSpanBatch into bytes
if err = rlp.Encode(&buf, NewSpanBatchData(*rawSpanBatch)); err != nil { if err = rlp.Encode(&buf, NewBatchData(rawSpanBatch)); err != nil {
return 0, fmt.Errorf("failed to encode RawSpanBatch into bytes: %w", err) return 0, fmt.Errorf("failed to encode RawSpanBatch into bytes: %w", err)
} }
// Ensure that the total size of all RLP elements is less than or equal to MAX_RLP_BYTES_PER_CHANNEL // Ensure that the total size of all RLP elements is less than or equal to MAX_RLP_BYTES_PER_CHANNEL
if buf.Len() > MaxRLPBytesPerChannel { if buf.Len() > MaxRLPBytesPerChannel {
return 0, fmt.Errorf("could not add %d bytes to channel of %d bytes, max is %d. err: %w", return 0, fmt.Errorf("could not take %d bytes as replacement of channel of %d bytes, max is %d. err: %w",
buf.Len(), co.rlpLength, MaxRLPBytesPerChannel, ErrTooManyRLPBytes) buf.Len(), co.rlpLength, MaxRLPBytesPerChannel, ErrTooManyRLPBytes)
} }
co.rlpLength = buf.Len() co.rlpLength = buf.Len()
......
...@@ -141,49 +141,72 @@ func RandomTo(rng *rand.Rand) *common.Address { ...@@ -141,49 +141,72 @@ func RandomTo(rng *rand.Rand) *common.Address {
} }
func RandomTx(rng *rand.Rand, baseFee *big.Int, signer types.Signer) *types.Transaction { func RandomTx(rng *rand.Rand, baseFee *big.Int, signer types.Signer) *types.Transaction {
gas := params.TxGas + uint64(rng.Int63n(2_000_000))
key := InsecureRandomKey(rng)
tip := big.NewInt(rng.Int63n(10 * params.GWei))
txTypeList := []int{types.LegacyTxType, types.AccessListTxType, types.DynamicFeeTxType} txTypeList := []int{types.LegacyTxType, types.AccessListTxType, types.DynamicFeeTxType}
txType := txTypeList[rng.Intn(len(txTypeList))] txType := txTypeList[rng.Intn(len(txTypeList))]
var txData types.TxData var tx *types.Transaction
switch txType { switch txType {
case types.LegacyTxType: case types.LegacyTxType:
txData = &types.LegacyTx{ tx = RandomLegacyTx(rng, signer)
case types.AccessListTxType:
tx = RandomAccessListTx(rng, signer)
case types.DynamicFeeTxType:
tx = RandomDynamicFeeTxWithBaseFee(rng, baseFee, signer)
default:
panic("invalid tx type")
}
return tx
}
func RandomLegacyTx(rng *rand.Rand, signer types.Signer) *types.Transaction {
key := InsecureRandomKey(rng)
txData := &types.LegacyTx{
Nonce: rng.Uint64(), Nonce: rng.Uint64(),
GasPrice: new(big.Int).SetUint64(rng.Uint64()), GasPrice: new(big.Int).SetUint64(rng.Uint64()),
Gas: gas, Gas: params.TxGas + uint64(rng.Int63n(2_000_000)),
To: RandomTo(rng), To: RandomTo(rng),
Value: RandomETH(rng, 10), Value: RandomETH(rng, 10),
Data: RandomData(rng, rng.Intn(1000)), Data: RandomData(rng, rng.Intn(1000)),
} }
case types.AccessListTxType: tx, err := types.SignNewTx(key, signer, txData)
txData = &types.AccessListTx{ if err != nil {
panic(err)
}
return tx
}
func RandomAccessListTx(rng *rand.Rand, signer types.Signer) *types.Transaction {
key := InsecureRandomKey(rng)
txData := &types.AccessListTx{
ChainID: signer.ChainID(), ChainID: signer.ChainID(),
Nonce: rng.Uint64(), Nonce: rng.Uint64(),
GasPrice: new(big.Int).SetUint64(rng.Uint64()), GasPrice: new(big.Int).SetUint64(rng.Uint64()),
Gas: gas, Gas: params.TxGas + uint64(rng.Int63n(2_000_000)),
To: RandomTo(rng), To: RandomTo(rng),
Value: RandomETH(rng, 10), Value: RandomETH(rng, 10),
Data: RandomData(rng, rng.Intn(1000)), Data: RandomData(rng, rng.Intn(1000)),
AccessList: nil, AccessList: nil,
} }
case types.DynamicFeeTxType: tx, err := types.SignNewTx(key, signer, txData)
txData = &types.DynamicFeeTx{ if err != nil {
panic(err)
}
return tx
}
func RandomDynamicFeeTxWithBaseFee(rng *rand.Rand, baseFee *big.Int, signer types.Signer) *types.Transaction {
key := InsecureRandomKey(rng)
tip := big.NewInt(rng.Int63n(10 * params.GWei))
txData := &types.DynamicFeeTx{
ChainID: signer.ChainID(), ChainID: signer.ChainID(),
Nonce: rng.Uint64(), Nonce: rng.Uint64(),
GasTipCap: tip, GasTipCap: tip,
GasFeeCap: new(big.Int).Add(baseFee, tip), GasFeeCap: new(big.Int).Add(baseFee, tip),
Gas: gas, Gas: params.TxGas + uint64(rng.Int63n(2_000_000)),
To: RandomTo(rng), To: RandomTo(rng),
Value: RandomETH(rng, 10), Value: RandomETH(rng, 10),
Data: RandomData(rng, rng.Intn(1000)), Data: RandomData(rng, rng.Intn(1000)),
AccessList: nil, AccessList: nil,
} }
default:
panic("invalid tx type")
}
tx, err := types.SignNewTx(key, signer, txData) tx, err := types.SignNewTx(key, signer, txData)
if err != nil { if err != nil {
panic(err) panic(err)
...@@ -191,6 +214,11 @@ func RandomTx(rng *rand.Rand, baseFee *big.Int, signer types.Signer) *types.Tran ...@@ -191,6 +214,11 @@ func RandomTx(rng *rand.Rand, baseFee *big.Int, signer types.Signer) *types.Tran
return tx return tx
} }
func RandomDynamicFeeTx(rng *rand.Rand, signer types.Signer) *types.Transaction {
baseFee := new(big.Int).SetUint64(rng.Uint64())
return RandomDynamicFeeTxWithBaseFee(rng, baseFee, signer)
}
func RandomReceipt(rng *rand.Rand, signer types.Signer, tx *types.Transaction, txIndex uint64, cumulativeGasUsed uint64) *types.Receipt { func RandomReceipt(rng *rand.Rand, signer types.Signer, tx *types.Transaction, txIndex uint64, cumulativeGasUsed uint64) *types.Receipt {
gasUsed := params.TxGas + uint64(rng.Int63n(int64(tx.Gas()-params.TxGas+1))) gasUsed := params.TxGas + uint64(rng.Int63n(int64(tx.Gas()-params.TxGas+1)))
logs := make([]*types.Log, rng.Intn(10)) logs := make([]*types.Log, rng.Intn(10))
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment