Commit c9677e41 authored by Axel Kingsley's avatar Axel Kingsley Committed by GitHub

op-batcher: stateful span batches & blind compressor (#9954)

* Add Benchmark for AddSingularBatch

* update compressor configs ; address PR comments

* Add b.N

* Export RandomSingularBatch through batch_test_util.go

* measure only the final batch ; other organizational improvements

* Add Benchmark for ToRawSpanBatch

* update tests

* minor fixup

* Add Benchmark for adding *All* Span Batches

* comment fixups

* narrow tests to only test span batches that won't exceed RLP limit

* Stateful Span Batches

* Blind Compressor

* final fixes

* add peek helper function

* Address PR Comments
parent 919df965
...@@ -83,11 +83,11 @@ func NewChannelBuilder(cfg ChannelConfig, rollupCfg rollup.Config, latestL1Origi ...@@ -83,11 +83,11 @@ func NewChannelBuilder(cfg ChannelConfig, rollupCfg rollup.Config, latestL1Origi
if err != nil { if err != nil {
return nil, err return nil, err
} }
var spanBatchBuilder *derive.SpanBatchBuilder var spanBatch *derive.SpanBatch
if cfg.BatchType == derive.SpanBatchType { if cfg.BatchType == derive.SpanBatchType {
spanBatchBuilder = derive.NewSpanBatchBuilder(rollupCfg.Genesis.L2Time, rollupCfg.L2ChainID) spanBatch = derive.NewSpanBatch(rollupCfg.Genesis.L2Time, rollupCfg.L2ChainID)
} }
co, err := derive.NewChannelOut(cfg.BatchType, c, spanBatchBuilder) co, err := derive.NewChannelOut(cfg.BatchType, c, spanBatch)
if err != nil { if err != nil {
return nil, err return nil, err
} }
......
...@@ -7,6 +7,7 @@ import ( ...@@ -7,6 +7,7 @@ import (
"math/big" "math/big"
"math/rand" "math/rand"
"testing" "testing"
"time"
"github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-node/rollup/derive"
...@@ -83,8 +84,9 @@ func newMiniL2BlockWithNumberParentAndL1Information(numTx int, l2Number *big.Int ...@@ -83,8 +84,9 @@ func newMiniL2BlockWithNumberParentAndL1Information(numTx int, l2Number *big.Int
// which is presumably ErrTooManyRLPBytes. // which is presumably ErrTooManyRLPBytes.
func addTooManyBlocks(cb *ChannelBuilder) error { func addTooManyBlocks(cb *ChannelBuilder) error {
rng := rand.New(rand.NewSource(1234)) rng := rand.New(rand.NewSource(1234))
t := time.Now()
for i := 0; i < 10_000; i++ { for i := 0; i < 10_000; i++ {
block := dtest.RandomL2BlockWithChainId(rng, 1000, defaultTestRollupConfig.L2ChainID) block := dtest.RandomL2BlockWithChainIdAndTime(rng, 1000, defaultTestRollupConfig.L2ChainID, t.Add(time.Duration(i)*time.Second))
_, err := cb.AddBlock(block) _, err := cb.AddBlock(block)
if err != nil { if err != nil {
return err return err
...@@ -511,8 +513,9 @@ func ChannelBuilder_OutputFramesMaxFrameIndex(t *testing.T, batchType uint) { ...@@ -511,8 +513,9 @@ func ChannelBuilder_OutputFramesMaxFrameIndex(t *testing.T, batchType uint) {
require.NoError(t, err) require.NoError(t, err)
require.False(t, cb.IsFull()) require.False(t, cb.IsFull())
require.Equal(t, 0, cb.PendingFrames()) require.Equal(t, 0, cb.PendingFrames())
for { ti := time.Now()
a := dtest.RandomL2BlockWithChainId(rng, 1000, defaultTestRollupConfig.L2ChainID) for i := 0; ; i++ {
a := dtest.RandomL2BlockWithChainIdAndTime(rng, 1000, defaultTestRollupConfig.L2ChainID, ti.Add(time.Duration(i)*time.Second))
_, err = cb.AddBlock(a) _, err = cb.AddBlock(a)
if cb.IsFull() { if cb.IsFull() {
fullErr := cb.FullErr() fullErr := cb.FullErr()
...@@ -702,9 +705,10 @@ func ChannelBuilder_PendingFrames_TotalFrames(t *testing.T, batchType uint) { ...@@ -702,9 +705,10 @@ func ChannelBuilder_PendingFrames_TotalFrames(t *testing.T, batchType uint) {
require.Zero(cb.PendingFrames()) require.Zero(cb.PendingFrames())
require.Zero(cb.TotalFrames()) require.Zero(cb.TotalFrames())
ti := time.Now()
// fill up // fill up
for { for i := 0; ; i++ {
block := dtest.RandomL2BlockWithChainId(rng, 4, defaultTestRollupConfig.L2ChainID) block := dtest.RandomL2BlockWithChainIdAndTime(rng, 4, defaultTestRollupConfig.L2ChainID, ti.Add(time.Duration(i)*time.Second))
_, err := cb.AddBlock(block) _, err := cb.AddBlock(block)
if cb.IsFull() { if cb.IsFull() {
break break
...@@ -734,10 +738,10 @@ func ChannelBuilder_InputBytes(t *testing.T, batchType uint) { ...@@ -734,10 +738,10 @@ func ChannelBuilder_InputBytes(t *testing.T, batchType uint) {
rng := rand.New(rand.NewSource(4982432)) rng := rand.New(rand.NewSource(4982432))
cfg := defaultTestChannelConfig() cfg := defaultTestChannelConfig()
cfg.BatchType = batchType cfg.BatchType = batchType
var spanBatchBuilder *derive.SpanBatchBuilder var spanBatch *derive.SpanBatch
if batchType == derive.SpanBatchType { if batchType == derive.SpanBatchType {
chainId := big.NewInt(1234) chainId := big.NewInt(1234)
spanBatchBuilder = derive.NewSpanBatchBuilder(uint64(0), chainId) spanBatch = derive.NewSpanBatch(uint64(0), chainId)
} }
cb, err := NewChannelBuilder(cfg, defaultTestRollupConfig, latestL1BlockOrigin) cb, err := NewChannelBuilder(cfg, defaultTestRollupConfig, latestL1BlockOrigin)
require.NoError(err) require.NoError(err)
...@@ -745,15 +749,17 @@ func ChannelBuilder_InputBytes(t *testing.T, batchType uint) { ...@@ -745,15 +749,17 @@ func ChannelBuilder_InputBytes(t *testing.T, batchType uint) {
require.Zero(cb.InputBytes()) require.Zero(cb.InputBytes())
var l int var l int
ti := time.Now()
for i := 0; i < 5; i++ { for i := 0; i < 5; i++ {
block := dtest.RandomL2BlockWithChainId(rng, rng.Intn(32), defaultTestRollupConfig.L2ChainID) block := dtest.RandomL2BlockWithChainIdAndTime(rng, rng.Intn(32), defaultTestRollupConfig.L2ChainID, ti.Add(time.Duration(i)*time.Second))
if batchType == derive.SingularBatchType { if batchType == derive.SingularBatchType {
l += blockBatchRlpSize(t, block) l += blockBatchRlpSize(t, block)
} else { } else {
singularBatch, l1Info, err := derive.BlockToSingularBatch(&defaultTestRollupConfig, block) singularBatch, l1Info, err := derive.BlockToSingularBatch(&defaultTestRollupConfig, block)
require.NoError(err) require.NoError(err)
spanBatchBuilder.AppendSingularBatch(singularBatch, l1Info.SequenceNumber) err = spanBatch.AppendSingularBatch(singularBatch, l1Info.SequenceNumber)
rawSpanBatch, err := spanBatchBuilder.GetRawSpanBatch() require.NoError(err)
rawSpanBatch, err := spanBatch.ToRawSpanBatch()
require.NoError(err) require.NoError(err)
batch := derive.NewBatchData(rawSpanBatch) batch := derive.NewBatchData(rawSpanBatch)
var buf bytes.Buffer var buf bytes.Buffer
...@@ -779,8 +785,9 @@ func ChannelBuilder_OutputBytes(t *testing.T, batchType uint) { ...@@ -779,8 +785,9 @@ func ChannelBuilder_OutputBytes(t *testing.T, batchType uint) {
require.Zero(cb.OutputBytes()) require.Zero(cb.OutputBytes())
for { ti := time.Now()
block := dtest.RandomL2BlockWithChainId(rng, rng.Intn(32), defaultTestRollupConfig.L2ChainID) for i := 0; ; i++ {
block := dtest.RandomL2BlockWithChainIdAndTime(rng, rng.Intn(32), defaultTestRollupConfig.L2ChainID, ti.Add(time.Duration(i)*time.Second))
_, err := cb.AddBlock(block) _, err := cb.AddBlock(block)
if errors.Is(err, derive.ErrCompressorFull) { if errors.Is(err, derive.ErrCompressorFull) {
break break
......
package compressor
import (
"bytes"
"compress/zlib"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
)
// BlindCompressor is a simple compressor that blindly compresses data
// the only way to know if the target size has been reached is to first flush the buffer
// and then check the length of the compressed data
type BlindCompressor struct {
config Config
inputBytes int
buf bytes.Buffer
compress *zlib.Writer
}
// NewBlindCompressor creates a new derive.Compressor implementation that compresses
func NewBlindCompressor(config Config) (derive.Compressor, error) {
c := &BlindCompressor{
config: config,
}
compress, err := zlib.NewWriterLevel(&c.buf, zlib.BestCompression)
if err != nil {
return nil, err
}
c.compress = compress
return c, nil
}
func (t *BlindCompressor) Write(p []byte) (int, error) {
if err := t.FullErr(); err != nil {
return 0, err
}
t.inputBytes += len(p)
return t.compress.Write(p)
}
func (t *BlindCompressor) Close() error {
return t.compress.Close()
}
func (t *BlindCompressor) Read(p []byte) (int, error) {
return t.buf.Read(p)
}
func (t *BlindCompressor) Reset() {
t.buf.Reset()
t.compress.Reset(&t.buf)
t.inputBytes = 0
}
func (t *BlindCompressor) Len() int {
return t.buf.Len()
}
func (t *BlindCompressor) Flush() error {
return t.compress.Flush()
}
// FullErr returns an error if the target output size has been reached.
// Flush *must* be called before this method to ensure the buffer is up to date
func (t *BlindCompressor) FullErr() error {
if uint64(t.Len()) >= t.config.TargetOutputSize {
return derive.ErrCompressorFull
}
return nil
}
package compressor_test
import (
"testing"
"github.com/ethereum-optimism/optimism/op-batcher/compressor"
"github.com/stretchr/testify/require"
)
func TestBlindCompressorLimit(t *testing.T) {
bc, err := compressor.NewBlindCompressor(compressor.Config{
TargetOutputSize: 10,
})
require.NoError(t, err)
// write far too much data to the compressor, but never flush
for i := 0; i < 100; i++ {
_, err := bc.Write([]byte("hello"))
require.NoError(t, err)
require.NoError(t, bc.FullErr())
}
// finally flush the compressor and see that it is full
bc.Flush()
require.Error(t, bc.FullErr())
// write a little more data to the compressor and see that it is still full
_, err = bc.Write([]byte("hello"))
require.Error(t, err)
}
...@@ -10,6 +10,7 @@ const ( ...@@ -10,6 +10,7 @@ const (
RatioKind = "ratio" RatioKind = "ratio"
ShadowKind = "shadow" ShadowKind = "shadow"
NoneKind = "none" NoneKind = "none"
BlindKind = "blind"
// CloseOverheadZlib is the number of final bytes a [zlib.Writer] call writes // CloseOverheadZlib is the number of final bytes a [zlib.Writer] call writes
// to the output buffer. // to the output buffer.
...@@ -20,6 +21,7 @@ var Kinds = map[string]FactoryFunc{ ...@@ -20,6 +21,7 @@ var Kinds = map[string]FactoryFunc{
RatioKind: NewRatioCompressor, RatioKind: NewRatioCompressor,
ShadowKind: NewShadowCompressor, ShadowKind: NewShadowCompressor,
NoneKind: NewNonCompressor, NoneKind: NewNonCompressor,
BlindKind: NewBlindCompressor,
} }
var KindKeys []string var KindKeys []string
......
...@@ -189,14 +189,13 @@ func (s *L2Batcher) Buffer(t Testing) error { ...@@ -189,14 +189,13 @@ func (s *L2Batcher) Buffer(t Testing) error {
if s.l2BatcherCfg.GarbageCfg != nil { if s.l2BatcherCfg.GarbageCfg != nil {
ch, err = NewGarbageChannelOut(s.l2BatcherCfg.GarbageCfg) ch, err = NewGarbageChannelOut(s.l2BatcherCfg.GarbageCfg)
} else { } else {
c, e := compressor.NewRatioCompressor(compressor.Config{ c, e := compressor.NewBlindCompressor(compressor.Config{
TargetOutputSize: batcher.MaxDataSize(1, s.l2BatcherCfg.MaxL1TxSize), TargetOutputSize: batcher.MaxDataSize(1, s.l2BatcherCfg.MaxL1TxSize),
ApproxComprRatio: 1,
}) })
require.NoError(t, e, "failed to create compressor") require.NoError(t, e, "failed to create compressor")
var batchType uint = derive.SingularBatchType var batchType uint = derive.SingularBatchType
var spanBatchBuilder *derive.SpanBatchBuilder = nil var spanBatch *derive.SpanBatch
if s.l2BatcherCfg.ForceSubmitSingularBatch && s.l2BatcherCfg.ForceSubmitSpanBatch { if s.l2BatcherCfg.ForceSubmitSingularBatch && s.l2BatcherCfg.ForceSubmitSpanBatch {
t.Fatalf("ForceSubmitSingularBatch and ForceSubmitSpanBatch cannot be set to true at the same time") t.Fatalf("ForceSubmitSingularBatch and ForceSubmitSpanBatch cannot be set to true at the same time")
...@@ -205,9 +204,9 @@ func (s *L2Batcher) Buffer(t Testing) error { ...@@ -205,9 +204,9 @@ func (s *L2Batcher) Buffer(t Testing) error {
} else if s.l2BatcherCfg.ForceSubmitSpanBatch || s.rollupCfg.IsDelta(block.Time()) { } else if s.l2BatcherCfg.ForceSubmitSpanBatch || s.rollupCfg.IsDelta(block.Time()) {
// If both ForceSubmitSingularBatch and ForceSubmitSpanbatch are false, use SpanBatch automatically if Delta HF is activated. // If both ForceSubmitSingularBatch and ForceSubmitSpanbatch are false, use SpanBatch automatically if Delta HF is activated.
batchType = derive.SpanBatchType batchType = derive.SpanBatchType
spanBatchBuilder = derive.NewSpanBatchBuilder(s.rollupCfg.Genesis.L2Time, s.rollupCfg.L2ChainID) spanBatch = derive.NewSpanBatch(s.rollupCfg.Genesis.L2Time, s.rollupCfg.L2ChainID)
} }
ch, err = derive.NewChannelOut(batchType, c, spanBatchBuilder) ch, err = derive.NewChannelOut(batchType, c, spanBatch)
} }
require.NoError(t, err, "failed to create channel") require.NoError(t, err, "failed to create channel")
s.l2ChannelOut = ch s.l2ChannelOut = ch
......
...@@ -26,6 +26,17 @@ import ( ...@@ -26,6 +26,17 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
func newSpanChannelOut(t StatefulTesting, e e2eutils.SetupData) derive.ChannelOut {
c, err := compressor.NewBlindCompressor(compressor.Config{
TargetOutputSize: 128_000,
})
require.NoError(t, err)
spanBatch := derive.NewSpanBatch(e.RollupCfg.Genesis.L2Time, e.RollupCfg.L2ChainID)
channelOut, err := derive.NewChannelOut(derive.SpanBatchType, c, spanBatch)
require.NoError(t, err)
return channelOut
}
// TestSyncBatchType run each sync test case in singular batch mode and span batch mode. // TestSyncBatchType run each sync test case in singular batch mode and span batch mode.
func TestSyncBatchType(t *testing.T) { func TestSyncBatchType(t *testing.T) {
tests := []struct { tests := []struct {
...@@ -211,15 +222,7 @@ func TestBackupUnsafe(gt *testing.T) { ...@@ -211,15 +222,7 @@ func TestBackupUnsafe(gt *testing.T) {
require.Equal(t, verifier.L2Unsafe().Number, uint64(5)) require.Equal(t, verifier.L2Unsafe().Number, uint64(5))
require.Equal(t, verifier.L2Safe().Number, uint64(0)) require.Equal(t, verifier.L2Safe().Number, uint64(0))
c, e := compressor.NewRatioCompressor(compressor.Config{ channelOut := newSpanChannelOut(t, *sd)
TargetOutputSize: 128_000,
ApproxComprRatio: 1,
})
require.NoError(t, e)
spanBatchBuilder := derive.NewSpanBatchBuilder(sd.RollupCfg.Genesis.L2Time, sd.RollupCfg.L2ChainID)
// Create new span batch channel
channelOut, err := derive.NewChannelOut(derive.SpanBatchType, c, spanBatchBuilder)
require.NoError(t, err)
for i := uint64(1); i <= sequencer.L2Unsafe().Number; i++ { for i := uint64(1); i <= sequencer.L2Unsafe().Number; i++ {
block, err := l2Cl.BlockByNumber(t.Ctx(), new(big.Int).SetUint64(i)) block, err := l2Cl.BlockByNumber(t.Ctx(), new(big.Int).SetUint64(i))
...@@ -382,15 +385,7 @@ func TestBackupUnsafeReorgForkChoiceInputError(gt *testing.T) { ...@@ -382,15 +385,7 @@ func TestBackupUnsafeReorgForkChoiceInputError(gt *testing.T) {
require.Equal(t, verifier.L2Unsafe().Number, uint64(5)) require.Equal(t, verifier.L2Unsafe().Number, uint64(5))
require.Equal(t, verifier.L2Safe().Number, uint64(0)) require.Equal(t, verifier.L2Safe().Number, uint64(0))
c, e := compressor.NewRatioCompressor(compressor.Config{ channelOut := newSpanChannelOut(t, *sd)
TargetOutputSize: 128_000,
ApproxComprRatio: 1,
})
require.NoError(t, e)
spanBatchBuilder := derive.NewSpanBatchBuilder(sd.RollupCfg.Genesis.L2Time, sd.RollupCfg.L2ChainID)
// Create new span batch channel
channelOut, err := derive.NewChannelOut(derive.SpanBatchType, c, spanBatchBuilder)
require.NoError(t, err)
for i := uint64(1); i <= sequencer.L2Unsafe().Number; i++ { for i := uint64(1); i <= sequencer.L2Unsafe().Number; i++ {
block, err := l2Cl.BlockByNumber(t.Ctx(), new(big.Int).SetUint64(i)) block, err := l2Cl.BlockByNumber(t.Ctx(), new(big.Int).SetUint64(i))
...@@ -529,15 +524,7 @@ func TestBackupUnsafeReorgForkChoiceNotInputError(gt *testing.T) { ...@@ -529,15 +524,7 @@ func TestBackupUnsafeReorgForkChoiceNotInputError(gt *testing.T) {
require.Equal(t, verifier.L2Unsafe().Number, uint64(5)) require.Equal(t, verifier.L2Unsafe().Number, uint64(5))
require.Equal(t, verifier.L2Safe().Number, uint64(0)) require.Equal(t, verifier.L2Safe().Number, uint64(0))
c, e := compressor.NewRatioCompressor(compressor.Config{ channelOut := newSpanChannelOut(t, *sd)
TargetOutputSize: 128_000,
ApproxComprRatio: 1,
})
require.NoError(t, e)
spanBatchBuilder := derive.NewSpanBatchBuilder(sd.RollupCfg.Genesis.L2Time, sd.RollupCfg.L2ChainID)
// Create new span batch channel
channelOut, err := derive.NewChannelOut(derive.SpanBatchType, c, spanBatchBuilder)
require.NoError(t, err)
for i := uint64(1); i <= sequencer.L2Unsafe().Number; i++ { for i := uint64(1); i <= sequencer.L2Unsafe().Number; i++ {
block, err := l2Cl.BlockByNumber(t.Ctx(), new(big.Int).SetUint64(i)) block, err := l2Cl.BlockByNumber(t.Ctx(), new(big.Int).SetUint64(i))
...@@ -866,15 +853,7 @@ func TestInvalidPayloadInSpanBatch(gt *testing.T) { ...@@ -866,15 +853,7 @@ func TestInvalidPayloadInSpanBatch(gt *testing.T) {
sequencer.ActL2PipelineFull(t) sequencer.ActL2PipelineFull(t)
verifier.ActL2PipelineFull(t) verifier.ActL2PipelineFull(t)
c, e := compressor.NewRatioCompressor(compressor.Config{ channelOut := newSpanChannelOut(t, *sd)
TargetOutputSize: 128_000,
ApproxComprRatio: 1,
})
require.NoError(t, e)
spanBatchBuilder := derive.NewSpanBatchBuilder(sd.RollupCfg.Genesis.L2Time, sd.RollupCfg.L2ChainID)
// Create new span batch channel
channelOut, err := derive.NewChannelOut(derive.SpanBatchType, c, spanBatchBuilder)
require.NoError(t, err)
// Create block A1 ~ A12 for L1 block #0 ~ #2 // Create block A1 ~ A12 for L1 block #0 ~ #2
miner.ActEmptyBlock(t) miner.ActEmptyBlock(t)
...@@ -913,15 +892,7 @@ func TestInvalidPayloadInSpanBatch(gt *testing.T) { ...@@ -913,15 +892,7 @@ func TestInvalidPayloadInSpanBatch(gt *testing.T) {
require.Equal(t, verifier.L2Unsafe().Number, uint64(7)) require.Equal(t, verifier.L2Unsafe().Number, uint64(7))
require.Equal(t, verifier.L2Safe().Number, uint64(0)) require.Equal(t, verifier.L2Safe().Number, uint64(0))
// Create new span batch channel channelOut = newSpanChannelOut(t, *sd)
c, e = compressor.NewRatioCompressor(compressor.Config{
TargetOutputSize: 128_000,
ApproxComprRatio: 1,
})
require.NoError(t, e)
spanBatchBuilder = derive.NewSpanBatchBuilder(sd.RollupCfg.Genesis.L2Time, sd.RollupCfg.L2ChainID)
channelOut, err = derive.NewChannelOut(derive.SpanBatchType, c, spanBatchBuilder)
require.NoError(t, err)
for i := uint64(1); i <= sequencer.L2Unsafe().Number; i++ { for i := uint64(1); i <= sequencer.L2Unsafe().Number; i++ {
block, err := l2Cl.BlockByNumber(t.Ctx(), new(big.Int).SetUint64(i)) block, err := l2Cl.BlockByNumber(t.Ctx(), new(big.Int).SetUint64(i))
......
...@@ -25,8 +25,12 @@ var ( ...@@ -25,8 +25,12 @@ var (
nc, _ = compressor.NewNonCompressor(compressor.Config{ nc, _ = compressor.NewNonCompressor(compressor.Config{
TargetOutputSize: 100_000_000_000, TargetOutputSize: 100_000_000_000,
}) })
bc, _ = compressor.NewBlindCompressor(compressor.Config{
TargetOutputSize: 100_000_000_000,
})
compressors = map[string]derive.Compressor{ compressors = map[string]derive.Compressor{
"BlindCompressor": bc,
"NonCompressor": nc, "NonCompressor": nc,
"RatioCompressor": rc, "RatioCompressor": rc,
"ShadowCompressor": sc, "ShadowCompressor": sc,
...@@ -107,8 +111,8 @@ func BenchmarkFinalBatchChannelOut(b *testing.B) { ...@@ -107,8 +111,8 @@ func BenchmarkFinalBatchChannelOut(b *testing.B) {
// don't measure the setup time // don't measure the setup time
b.StopTimer() b.StopTimer()
compressors[tc.compKey].Reset() compressors[tc.compKey].Reset()
spanBatchBuilder := derive.NewSpanBatchBuilder(0, chainID) spanBatch := derive.NewSpanBatch(uint64(0), chainID)
cout, _ := derive.NewChannelOut(tc.BatchType, compressors[tc.compKey], spanBatchBuilder) cout, _ := derive.NewChannelOut(tc.BatchType, compressors[tc.compKey], spanBatch)
// add all but the final batch to the channel out // add all but the final batch to the channel out
for i := 0; i < tc.BatchCount-1; i++ { for i := 0; i < tc.BatchCount-1; i++ {
_, err := cout.AddSingularBatch(batches[i], 0) _, err := cout.AddSingularBatch(batches[i], 0)
...@@ -170,8 +174,8 @@ func BenchmarkAllBatchesChannelOut(b *testing.B) { ...@@ -170,8 +174,8 @@ func BenchmarkAllBatchesChannelOut(b *testing.B) {
// don't measure the setup time // don't measure the setup time
b.StopTimer() b.StopTimer()
compressors[tc.compKey].Reset() compressors[tc.compKey].Reset()
spanBatchBuilder := derive.NewSpanBatchBuilder(0, chainID) spanBatch := derive.NewSpanBatch(0, chainID)
cout, _ := derive.NewChannelOut(tc.BatchType, compressors[tc.compKey], spanBatchBuilder) cout, _ := derive.NewChannelOut(tc.BatchType, compressors[tc.compKey], spanBatch)
b.StartTimer() b.StartTimer()
// add all batches to the channel out // add all batches to the channel out
for i := 0; i < tc.BatchCount; i++ { for i := 0; i < tc.BatchCount; i++ {
...@@ -219,12 +223,13 @@ func BenchmarkGetRawSpanBatch(b *testing.B) { ...@@ -219,12 +223,13 @@ func BenchmarkGetRawSpanBatch(b *testing.B) {
for bn := 0; bn < b.N; bn++ { for bn := 0; bn < b.N; bn++ {
// don't measure the setup time // don't measure the setup time
b.StopTimer() b.StopTimer()
spanBatchBuilder := derive.NewSpanBatchBuilder(0, chainID) spanBatch := derive.NewSpanBatch(uint64(0), chainID)
for i := 0; i < tc.BatchCount; i++ { for i := 0; i < tc.BatchCount; i++ {
spanBatchBuilder.AppendSingularBatch(batches[i], 0) err := spanBatch.AppendSingularBatch(batches[i], 0)
require.NoError(b, err)
} }
b.StartTimer() b.StartTimer()
_, err := spanBatchBuilder.GetRawSpanBatch() _, err := spanBatch.ToRawSpanBatch()
require.NoError(b, err) require.NoError(b, err)
} }
}) })
......
...@@ -66,7 +66,7 @@ func buildSpanBatches(t *testing.T, parent *eth.L2BlockRef, singularBatches []*S ...@@ -66,7 +66,7 @@ func buildSpanBatches(t *testing.T, parent *eth.L2BlockRef, singularBatches []*S
var spanBatches []Batch var spanBatches []Batch
idx := 0 idx := 0
for _, count := range blockCounts { for _, count := range blockCounts {
span := NewSpanBatch(singularBatches[idx : idx+count]) span := initializedSpanBatch(singularBatches[idx:idx+count], uint64(0), chainId)
spanBatches = append(spanBatches, span) spanBatches = append(spanBatches, span)
idx += count idx += count
} }
...@@ -767,7 +767,7 @@ func TestBatchQueueOverlappingSpanBatch(t *testing.T) { ...@@ -767,7 +767,7 @@ func TestBatchQueueOverlappingSpanBatch(t *testing.T) {
var inputBatches []Batch var inputBatches []Batch
batchSize := 3 batchSize := 3
for i := 0; i < len(expectedOutputBatches)-batchSize; i++ { for i := 0; i < len(expectedOutputBatches)-batchSize; i++ {
inputBatches = append(inputBatches, NewSpanBatch(expectedOutputBatches[i:i+batchSize])) inputBatches = append(inputBatches, initializedSpanBatch(expectedOutputBatches[i:i+batchSize], uint64(0), chainId))
} }
inputBatches = append(inputBatches, nil) inputBatches = append(inputBatches, nil)
// inputBatches: // inputBatches:
...@@ -872,12 +872,12 @@ func TestBatchQueueComplex(t *testing.T) { ...@@ -872,12 +872,12 @@ func TestBatchQueueComplex(t *testing.T) {
inputErrors := []error{nil, nil, nil, nil, nil, nil, io.EOF} inputErrors := []error{nil, nil, nil, nil, nil, nil, io.EOF}
// batches will be returned by fakeBatchQueueInput // batches will be returned by fakeBatchQueueInput
inputBatches := []Batch{ inputBatches := []Batch{
NewSpanBatch(expectedOutputBatches[0:2]), // [6, 8] - no overlap initializedSpanBatch(expectedOutputBatches[0:2], uint64(0), chainId), // [6, 8] - no overlap
expectedOutputBatches[2], // [10] - no overlap expectedOutputBatches[2], // [10] - no overlap
NewSpanBatch(expectedOutputBatches[1:4]), // [8, 10, 12] - overlapped blocks: 8 or 8, 10 initializedSpanBatch(expectedOutputBatches[1:4], uint64(0), chainId), // [8, 10, 12] - overlapped blocks: 8 or 8, 10
expectedOutputBatches[4], // [14] - no overlap expectedOutputBatches[4], // [14] - no overlap
NewSpanBatch(expectedOutputBatches[4:6]), // [14, 16] - overlapped blocks: nothing or 14 initializedSpanBatch(expectedOutputBatches[4:6], uint64(0), chainId), // [14, 16] - overlapped blocks: nothing or 14
NewSpanBatch(expectedOutputBatches[6:9]), // [18, 20, 22] - no overlap initializedSpanBatch(expectedOutputBatches[6:9], uint64(0), chainId), // [18, 20, 22] - no overlap
} }
// Shuffle the order of input batches // Shuffle the order of input batches
...@@ -979,7 +979,7 @@ func TestBatchQueueResetSpan(t *testing.T) { ...@@ -979,7 +979,7 @@ func TestBatchQueueResetSpan(t *testing.T) {
} }
input := &fakeBatchQueueInput{ input := &fakeBatchQueueInput{
batches: []Batch{NewSpanBatch(singularBatches)}, batches: []Batch{initializedSpanBatch(singularBatches, uint64(0), chainId)},
errors: []error{nil}, errors: []error{nil},
origin: l1[2], origin: l1[2],
} }
......
...@@ -558,7 +558,7 @@ func TestValidBatch(t *testing.T) { ...@@ -558,7 +558,7 @@ func TestValidBatch(t *testing.T) {
L2SafeHead: l2A0, L2SafeHead: l2A0,
Batch: BatchWithL1InclusionBlock{ Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1B, L1InclusionBlock: l1B,
Batch: NewSpanBatch([]*SingularBatch{ Batch: initializedSpanBatch([]*SingularBatch{
{ {
ParentHash: l2A1.ParentHash, ParentHash: l2A1.ParentHash,
EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochNum: rollup.Epoch(l2A1.L1Origin.Number),
...@@ -566,7 +566,7 @@ func TestValidBatch(t *testing.T) { ...@@ -566,7 +566,7 @@ func TestValidBatch(t *testing.T) {
Timestamp: l2A1.Time, Timestamp: l2A1.Time,
Transactions: nil, Transactions: nil,
}, },
}), }, uint64(0), big.NewInt(0)),
}, },
Expected: BatchUndecided, Expected: BatchUndecided,
ExpectedLog: "missing L1 block input, cannot proceed with batch checking", ExpectedLog: "missing L1 block input, cannot proceed with batch checking",
...@@ -578,7 +578,7 @@ func TestValidBatch(t *testing.T) { ...@@ -578,7 +578,7 @@ func TestValidBatch(t *testing.T) {
L2SafeHead: l2A0, L2SafeHead: l2A0,
Batch: BatchWithL1InclusionBlock{ Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1B, L1InclusionBlock: l1B,
Batch: NewSpanBatch([]*SingularBatch{ Batch: initializedSpanBatch([]*SingularBatch{
{ {
ParentHash: l2A1.ParentHash, ParentHash: l2A1.ParentHash,
EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochNum: rollup.Epoch(l2A1.L1Origin.Number),
...@@ -586,7 +586,7 @@ func TestValidBatch(t *testing.T) { ...@@ -586,7 +586,7 @@ func TestValidBatch(t *testing.T) {
Timestamp: l2A1.Time + 1, // 1 too high Timestamp: l2A1.Time + 1, // 1 too high
Transactions: nil, Transactions: nil,
}, },
}), }, uint64(0), big.NewInt(0)),
}, },
Expected: BatchFuture, Expected: BatchFuture,
ExpectedLog: "received out-of-order batch for future processing after next batch", ExpectedLog: "received out-of-order batch for future processing after next batch",
...@@ -598,7 +598,7 @@ func TestValidBatch(t *testing.T) { ...@@ -598,7 +598,7 @@ func TestValidBatch(t *testing.T) {
L2SafeHead: l2A0, L2SafeHead: l2A0,
Batch: BatchWithL1InclusionBlock{ Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1B, L1InclusionBlock: l1B,
Batch: NewSpanBatch([]*SingularBatch{ Batch: initializedSpanBatch([]*SingularBatch{
{ {
ParentHash: l2A1.ParentHash, ParentHash: l2A1.ParentHash,
EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochNum: rollup.Epoch(l2A1.L1Origin.Number),
...@@ -606,7 +606,7 @@ func TestValidBatch(t *testing.T) { ...@@ -606,7 +606,7 @@ func TestValidBatch(t *testing.T) {
Timestamp: l2A1.Time - 1, // block time is 2, so this is 1 too low Timestamp: l2A1.Time - 1, // block time is 2, so this is 1 too low
Transactions: nil, Transactions: nil,
}, },
}), }, uint64(0), big.NewInt(0)),
}, },
Expected: BatchDrop, Expected: BatchDrop,
ExpectedLog: "span batch has no new blocks after safe head", ExpectedLog: "span batch has no new blocks after safe head",
...@@ -618,7 +618,7 @@ func TestValidBatch(t *testing.T) { ...@@ -618,7 +618,7 @@ func TestValidBatch(t *testing.T) {
L2SafeHead: l2A0, L2SafeHead: l2A0,
Batch: BatchWithL1InclusionBlock{ Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1B, L1InclusionBlock: l1B,
Batch: NewSpanBatch([]*SingularBatch{ Batch: initializedSpanBatch([]*SingularBatch{
{ {
ParentHash: testutils.RandomHash(rng), ParentHash: testutils.RandomHash(rng),
EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochNum: rollup.Epoch(l2A1.L1Origin.Number),
...@@ -626,7 +626,7 @@ func TestValidBatch(t *testing.T) { ...@@ -626,7 +626,7 @@ func TestValidBatch(t *testing.T) {
Timestamp: l2A1.Time, Timestamp: l2A1.Time,
Transactions: nil, Transactions: nil,
}, },
}), }, uint64(0), big.NewInt(0)),
}, },
Expected: BatchDrop, Expected: BatchDrop,
ExpectedLog: "ignoring batch with mismatching parent hash", ExpectedLog: "ignoring batch with mismatching parent hash",
...@@ -638,7 +638,7 @@ func TestValidBatch(t *testing.T) { ...@@ -638,7 +638,7 @@ func TestValidBatch(t *testing.T) {
L2SafeHead: l2A0, L2SafeHead: l2A0,
Batch: BatchWithL1InclusionBlock{ Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1F, L1InclusionBlock: l1F,
Batch: NewSpanBatch([]*SingularBatch{ Batch: initializedSpanBatch([]*SingularBatch{
{ {
ParentHash: l2A1.ParentHash, ParentHash: l2A1.ParentHash,
EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochNum: rollup.Epoch(l2A1.L1Origin.Number),
...@@ -646,7 +646,7 @@ func TestValidBatch(t *testing.T) { ...@@ -646,7 +646,7 @@ func TestValidBatch(t *testing.T) {
Timestamp: l2A1.Time, Timestamp: l2A1.Time,
Transactions: nil, Transactions: nil,
}, },
}), }, uint64(0), big.NewInt(0)),
}, },
Expected: BatchDrop, Expected: BatchDrop,
ExpectedLog: "batch was included too late, sequence window expired", ExpectedLog: "batch was included too late, sequence window expired",
...@@ -658,7 +658,7 @@ func TestValidBatch(t *testing.T) { ...@@ -658,7 +658,7 @@ func TestValidBatch(t *testing.T) {
L2SafeHead: l2B0, // we already moved on to B L2SafeHead: l2B0, // we already moved on to B
Batch: BatchWithL1InclusionBlock{ Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1C, L1InclusionBlock: l1C,
Batch: NewSpanBatch([]*SingularBatch{ Batch: initializedSpanBatch([]*SingularBatch{
{ {
ParentHash: l2B0.Hash, // build on top of safe head to continue ParentHash: l2B0.Hash, // build on top of safe head to continue
EpochNum: rollup.Epoch(l2A3.L1Origin.Number), // epoch A is no longer valid EpochNum: rollup.Epoch(l2A3.L1Origin.Number), // epoch A is no longer valid
...@@ -672,7 +672,7 @@ func TestValidBatch(t *testing.T) { ...@@ -672,7 +672,7 @@ func TestValidBatch(t *testing.T) {
Timestamp: l2B0.Time + defaultConf.BlockTime*2, Timestamp: l2B0.Time + defaultConf.BlockTime*2,
Transactions: nil, Transactions: nil,
}, },
}), }, uint64(0), big.NewInt(0)),
}, },
Expected: BatchDrop, Expected: BatchDrop,
ExpectedLog: "dropped batch, epoch is too old", ExpectedLog: "dropped batch, epoch is too old",
...@@ -684,7 +684,7 @@ func TestValidBatch(t *testing.T) { ...@@ -684,7 +684,7 @@ func TestValidBatch(t *testing.T) {
L2SafeHead: l2A3, L2SafeHead: l2A3,
Batch: BatchWithL1InclusionBlock{ Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1C, L1InclusionBlock: l1C,
Batch: NewSpanBatch([]*SingularBatch{ Batch: initializedSpanBatch([]*SingularBatch{
{ {
ParentHash: l2B0.ParentHash, ParentHash: l2B0.ParentHash,
EpochNum: rollup.Epoch(l2B0.L1Origin.Number), EpochNum: rollup.Epoch(l2B0.L1Origin.Number),
...@@ -692,7 +692,7 @@ func TestValidBatch(t *testing.T) { ...@@ -692,7 +692,7 @@ func TestValidBatch(t *testing.T) {
Timestamp: l2B0.Time, Timestamp: l2B0.Time,
Transactions: nil, Transactions: nil,
}, },
}), }, uint64(0), big.NewInt(0)),
}, },
Expected: BatchUndecided, Expected: BatchUndecided,
ExpectedLog: "eager batch wants to advance epoch, but could not without more L1 blocks", ExpectedLog: "eager batch wants to advance epoch, but could not without more L1 blocks",
...@@ -704,7 +704,7 @@ func TestValidBatch(t *testing.T) { ...@@ -704,7 +704,7 @@ func TestValidBatch(t *testing.T) {
L2SafeHead: l2A2, L2SafeHead: l2A2,
Batch: BatchWithL1InclusionBlock{ Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1C, L1InclusionBlock: l1C,
Batch: NewSpanBatch([]*SingularBatch{ Batch: initializedSpanBatch([]*SingularBatch{
{ {
ParentHash: l2A3.ParentHash, ParentHash: l2A3.ParentHash,
EpochNum: rollup.Epoch(l2A3.L1Origin.Number), EpochNum: rollup.Epoch(l2A3.L1Origin.Number),
...@@ -719,7 +719,7 @@ func TestValidBatch(t *testing.T) { ...@@ -719,7 +719,7 @@ func TestValidBatch(t *testing.T) {
Timestamp: l2B0.Time, Timestamp: l2B0.Time,
Transactions: nil, Transactions: nil,
}, },
}), }, uint64(0), big.NewInt(0)),
}, },
Expected: BatchUndecided, Expected: BatchUndecided,
ExpectedLog: "need more l1 blocks to check entire origins of span batch", ExpectedLog: "need more l1 blocks to check entire origins of span batch",
...@@ -731,7 +731,7 @@ func TestValidBatch(t *testing.T) { ...@@ -731,7 +731,7 @@ func TestValidBatch(t *testing.T) {
L2SafeHead: l2A3, L2SafeHead: l2A3,
Batch: BatchWithL1InclusionBlock{ Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1D, L1InclusionBlock: l1D,
Batch: NewSpanBatch([]*SingularBatch{ Batch: initializedSpanBatch([]*SingularBatch{
{ {
ParentHash: l2B0.ParentHash, ParentHash: l2B0.ParentHash,
EpochNum: rollup.Epoch(l1C.Number), // invalid, we need to adopt epoch B before C EpochNum: rollup.Epoch(l1C.Number), // invalid, we need to adopt epoch B before C
...@@ -739,7 +739,7 @@ func TestValidBatch(t *testing.T) { ...@@ -739,7 +739,7 @@ func TestValidBatch(t *testing.T) {
Timestamp: l2B0.Time, Timestamp: l2B0.Time,
Transactions: nil, Transactions: nil,
}, },
}), }, uint64(0), big.NewInt(0)),
}, },
Expected: BatchDrop, Expected: BatchDrop,
ExpectedLog: "batch is for future epoch too far ahead, while it has the next timestamp, so it must be invalid", ExpectedLog: "batch is for future epoch too far ahead, while it has the next timestamp, so it must be invalid",
...@@ -751,7 +751,7 @@ func TestValidBatch(t *testing.T) { ...@@ -751,7 +751,7 @@ func TestValidBatch(t *testing.T) {
L2SafeHead: l2A3, L2SafeHead: l2A3,
Batch: BatchWithL1InclusionBlock{ Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1C, L1InclusionBlock: l1C,
Batch: NewSpanBatch([]*SingularBatch{ Batch: initializedSpanBatch([]*SingularBatch{
{ {
ParentHash: l2B0.ParentHash, ParentHash: l2B0.ParentHash,
EpochNum: rollup.Epoch(l2B0.L1Origin.Number), EpochNum: rollup.Epoch(l2B0.L1Origin.Number),
...@@ -759,7 +759,7 @@ func TestValidBatch(t *testing.T) { ...@@ -759,7 +759,7 @@ func TestValidBatch(t *testing.T) {
Timestamp: l2B0.Time, Timestamp: l2B0.Time,
Transactions: nil, Transactions: nil,
}, },
}), }, uint64(0), big.NewInt(0)),
}, },
Expected: BatchDrop, Expected: BatchDrop,
ExpectedLog: "batch is for different L1 chain, epoch hash does not match", ExpectedLog: "batch is for different L1 chain, epoch hash does not match",
...@@ -771,7 +771,7 @@ func TestValidBatch(t *testing.T) { ...@@ -771,7 +771,7 @@ func TestValidBatch(t *testing.T) {
L2SafeHead: l2A2, L2SafeHead: l2A2,
Batch: BatchWithL1InclusionBlock{ Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1C, L1InclusionBlock: l1C,
Batch: NewSpanBatch([]*SingularBatch{ Batch: initializedSpanBatch([]*SingularBatch{
{ // valid batch { // valid batch
ParentHash: l2A3.ParentHash, ParentHash: l2A3.ParentHash,
EpochNum: rollup.Epoch(l2A3.L1Origin.Number), EpochNum: rollup.Epoch(l2A3.L1Origin.Number),
...@@ -786,7 +786,7 @@ func TestValidBatch(t *testing.T) { ...@@ -786,7 +786,7 @@ func TestValidBatch(t *testing.T) {
Timestamp: l2B0.Time, Timestamp: l2B0.Time,
Transactions: nil, Transactions: nil,
}, },
}), }, uint64(0), big.NewInt(0)),
}, },
Expected: BatchDrop, Expected: BatchDrop,
ExpectedLog: "batch is for different L1 chain, epoch hash does not match", ExpectedLog: "batch is for different L1 chain, epoch hash does not match",
...@@ -798,7 +798,7 @@ func TestValidBatch(t *testing.T) { ...@@ -798,7 +798,7 @@ func TestValidBatch(t *testing.T) {
L2SafeHead: l2A3, L2SafeHead: l2A3,
Batch: BatchWithL1InclusionBlock{ Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1B, L1InclusionBlock: l1B,
Batch: NewSpanBatch([]*SingularBatch{ Batch: initializedSpanBatch([]*SingularBatch{
{ // we build l2A4, which has a timestamp of 2*4 = 8 higher than l2A0 { // we build l2A4, which has a timestamp of 2*4 = 8 higher than l2A0
ParentHash: l2A4.ParentHash, ParentHash: l2A4.ParentHash,
EpochNum: rollup.Epoch(l2A4.L1Origin.Number), EpochNum: rollup.Epoch(l2A4.L1Origin.Number),
...@@ -806,7 +806,7 @@ func TestValidBatch(t *testing.T) { ...@@ -806,7 +806,7 @@ func TestValidBatch(t *testing.T) {
Timestamp: l2A4.Time, Timestamp: l2A4.Time,
Transactions: []hexutil.Bytes{randTxData}, Transactions: []hexutil.Bytes{randTxData},
}, },
}), }, uint64(0), big.NewInt(0)),
}, },
Expected: BatchDrop, Expected: BatchDrop,
ExpectedLog: "batch exceeded sequencer time drift, sequencer must adopt new L1 origin to include transactions again", ExpectedLog: "batch exceeded sequencer time drift, sequencer must adopt new L1 origin to include transactions again",
...@@ -818,7 +818,7 @@ func TestValidBatch(t *testing.T) { ...@@ -818,7 +818,7 @@ func TestValidBatch(t *testing.T) {
L2SafeHead: l2A2, L2SafeHead: l2A2,
Batch: BatchWithL1InclusionBlock{ Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1B, L1InclusionBlock: l1B,
Batch: NewSpanBatch([]*SingularBatch{ Batch: initializedSpanBatch([]*SingularBatch{
{ // valid batch { // valid batch
ParentHash: l2A3.ParentHash, ParentHash: l2A3.ParentHash,
EpochNum: rollup.Epoch(l2A3.L1Origin.Number), EpochNum: rollup.Epoch(l2A3.L1Origin.Number),
...@@ -833,7 +833,7 @@ func TestValidBatch(t *testing.T) { ...@@ -833,7 +833,7 @@ func TestValidBatch(t *testing.T) {
Timestamp: l2A4.Time, Timestamp: l2A4.Time,
Transactions: []hexutil.Bytes{randTxData}, Transactions: []hexutil.Bytes{randTxData},
}, },
}), }, uint64(0), big.NewInt(0)),
}, },
Expected: BatchDrop, Expected: BatchDrop,
ExpectedLog: "batch exceeded sequencer time drift, sequencer must adopt new L1 origin to include transactions again", ExpectedLog: "batch exceeded sequencer time drift, sequencer must adopt new L1 origin to include transactions again",
...@@ -845,7 +845,7 @@ func TestValidBatch(t *testing.T) { ...@@ -845,7 +845,7 @@ func TestValidBatch(t *testing.T) {
L2SafeHead: l2X0, L2SafeHead: l2X0,
Batch: BatchWithL1InclusionBlock{ Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1Z, L1InclusionBlock: l1Z,
Batch: NewSpanBatch([]*SingularBatch{ Batch: initializedSpanBatch([]*SingularBatch{
{ {
ParentHash: l2Y0.ParentHash, ParentHash: l2Y0.ParentHash,
EpochNum: rollup.Epoch(l2Y0.L1Origin.Number), EpochNum: rollup.Epoch(l2Y0.L1Origin.Number),
...@@ -853,7 +853,7 @@ func TestValidBatch(t *testing.T) { ...@@ -853,7 +853,7 @@ func TestValidBatch(t *testing.T) {
Timestamp: l2Y0.Time, // valid, but more than 6 ahead of l1Y.Time Timestamp: l2Y0.Time, // valid, but more than 6 ahead of l1Y.Time
Transactions: []hexutil.Bytes{randTxData}, Transactions: []hexutil.Bytes{randTxData},
}, },
}), }, uint64(0), big.NewInt(0)),
}, },
Expected: BatchDrop, Expected: BatchDrop,
ExpectedLog: "batch exceeded sequencer time drift, sequencer must adopt new L1 origin to include transactions again", ExpectedLog: "batch exceeded sequencer time drift, sequencer must adopt new L1 origin to include transactions again",
...@@ -865,7 +865,7 @@ func TestValidBatch(t *testing.T) { ...@@ -865,7 +865,7 @@ func TestValidBatch(t *testing.T) {
L2SafeHead: l2A3, L2SafeHead: l2A3,
Batch: BatchWithL1InclusionBlock{ Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1BLate, L1InclusionBlock: l1BLate,
Batch: NewSpanBatch([]*SingularBatch{ Batch: initializedSpanBatch([]*SingularBatch{
{ // l2A4 time < l1BLate time, so we cannot adopt origin B yet { // l2A4 time < l1BLate time, so we cannot adopt origin B yet
ParentHash: l2A4.ParentHash, ParentHash: l2A4.ParentHash,
EpochNum: rollup.Epoch(l2A4.L1Origin.Number), EpochNum: rollup.Epoch(l2A4.L1Origin.Number),
...@@ -873,7 +873,7 @@ func TestValidBatch(t *testing.T) { ...@@ -873,7 +873,7 @@ func TestValidBatch(t *testing.T) {
Timestamp: l2A4.Time, Timestamp: l2A4.Time,
Transactions: nil, Transactions: nil,
}, },
}), }, uint64(0), big.NewInt(0)),
}, },
Expected: BatchAccept, // accepted because empty & preserving L2 time invariant Expected: BatchAccept, // accepted because empty & preserving L2 time invariant
DeltaTime: &minTs, DeltaTime: &minTs,
...@@ -884,7 +884,7 @@ func TestValidBatch(t *testing.T) { ...@@ -884,7 +884,7 @@ func TestValidBatch(t *testing.T) {
L2SafeHead: l2X0, L2SafeHead: l2X0,
Batch: BatchWithL1InclusionBlock{ Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1Z, L1InclusionBlock: l1Z,
Batch: NewSpanBatch([]*SingularBatch{ Batch: initializedSpanBatch([]*SingularBatch{
{ {
ParentHash: l2Y0.ParentHash, ParentHash: l2Y0.ParentHash,
EpochNum: rollup.Epoch(l2Y0.L1Origin.Number), EpochNum: rollup.Epoch(l2Y0.L1Origin.Number),
...@@ -899,7 +899,7 @@ func TestValidBatch(t *testing.T) { ...@@ -899,7 +899,7 @@ func TestValidBatch(t *testing.T) {
Timestamp: l2Z0.Time, // valid, but more than 6 ahead of l1Y.Time Timestamp: l2Z0.Time, // valid, but more than 6 ahead of l1Y.Time
Transactions: nil, Transactions: nil,
}, },
}), }, uint64(0), big.NewInt(0)),
}, },
Expected: BatchAccept, // accepted because empty & still advancing epoch Expected: BatchAccept, // accepted because empty & still advancing epoch
DeltaTime: &minTs, DeltaTime: &minTs,
...@@ -911,7 +911,7 @@ func TestValidBatch(t *testing.T) { ...@@ -911,7 +911,7 @@ func TestValidBatch(t *testing.T) {
L2SafeHead: l2A3, L2SafeHead: l2A3,
Batch: BatchWithL1InclusionBlock{ Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1B, L1InclusionBlock: l1B,
Batch: NewSpanBatch([]*SingularBatch{ Batch: initializedSpanBatch([]*SingularBatch{
{ // we build l2A4, which has a timestamp of 2*4 = 8 higher than l2A0 { // we build l2A4, which has a timestamp of 2*4 = 8 higher than l2A0
ParentHash: l2A4.ParentHash, ParentHash: l2A4.ParentHash,
EpochNum: rollup.Epoch(l2A4.L1Origin.Number), EpochNum: rollup.Epoch(l2A4.L1Origin.Number),
...@@ -919,7 +919,7 @@ func TestValidBatch(t *testing.T) { ...@@ -919,7 +919,7 @@ func TestValidBatch(t *testing.T) {
Timestamp: l2A4.Time, Timestamp: l2A4.Time,
Transactions: nil, Transactions: nil,
}, },
}), }, uint64(0), big.NewInt(0)),
}, },
Expected: BatchUndecided, // we have to wait till the next epoch is in sight to check the time Expected: BatchUndecided, // we have to wait till the next epoch is in sight to check the time
ExpectedLog: "without the next L1 origin we cannot determine yet if this empty batch that exceeds the time drift is still valid", ExpectedLog: "without the next L1 origin we cannot determine yet if this empty batch that exceeds the time drift is still valid",
...@@ -931,7 +931,7 @@ func TestValidBatch(t *testing.T) { ...@@ -931,7 +931,7 @@ func TestValidBatch(t *testing.T) {
L2SafeHead: l2A2, L2SafeHead: l2A2,
Batch: BatchWithL1InclusionBlock{ Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1B, L1InclusionBlock: l1B,
Batch: NewSpanBatch([]*SingularBatch{ Batch: initializedSpanBatch([]*SingularBatch{
{ // valid batch { // valid batch
ParentHash: l2A3.ParentHash, ParentHash: l2A3.ParentHash,
EpochNum: rollup.Epoch(l2A3.L1Origin.Number), EpochNum: rollup.Epoch(l2A3.L1Origin.Number),
...@@ -946,7 +946,7 @@ func TestValidBatch(t *testing.T) { ...@@ -946,7 +946,7 @@ func TestValidBatch(t *testing.T) {
Timestamp: l2A4.Time, Timestamp: l2A4.Time,
Transactions: nil, Transactions: nil,
}, },
}), }, uint64(0), big.NewInt(0)),
}, },
Expected: BatchUndecided, // we have to wait till the next epoch is in sight to check the time Expected: BatchUndecided, // we have to wait till the next epoch is in sight to check the time
ExpectedLog: "without the next L1 origin we cannot determine yet if this empty batch that exceeds the time drift is still valid", ExpectedLog: "without the next L1 origin we cannot determine yet if this empty batch that exceeds the time drift is still valid",
...@@ -958,7 +958,7 @@ func TestValidBatch(t *testing.T) { ...@@ -958,7 +958,7 @@ func TestValidBatch(t *testing.T) {
L2SafeHead: l2A3, L2SafeHead: l2A3,
Batch: BatchWithL1InclusionBlock{ Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1C, L1InclusionBlock: l1C,
Batch: NewSpanBatch([]*SingularBatch{ Batch: initializedSpanBatch([]*SingularBatch{
{ // we build l2A4, which has a timestamp of 2*4 = 8 higher than l2A0 { // we build l2A4, which has a timestamp of 2*4 = 8 higher than l2A0
ParentHash: l2A4.ParentHash, ParentHash: l2A4.ParentHash,
EpochNum: rollup.Epoch(l2A4.L1Origin.Number), EpochNum: rollup.Epoch(l2A4.L1Origin.Number),
...@@ -966,7 +966,7 @@ func TestValidBatch(t *testing.T) { ...@@ -966,7 +966,7 @@ func TestValidBatch(t *testing.T) {
Timestamp: l2A4.Time, Timestamp: l2A4.Time,
Transactions: nil, Transactions: nil,
}, },
}), }, uint64(0), big.NewInt(0)),
}, },
Expected: BatchDrop, // dropped because it could have advanced the epoch to B Expected: BatchDrop, // dropped because it could have advanced the epoch to B
ExpectedLog: "batch exceeded sequencer time drift without adopting next origin, and next L1 origin would have been valid", ExpectedLog: "batch exceeded sequencer time drift without adopting next origin, and next L1 origin would have been valid",
...@@ -978,7 +978,7 @@ func TestValidBatch(t *testing.T) { ...@@ -978,7 +978,7 @@ func TestValidBatch(t *testing.T) {
L2SafeHead: l2A2, L2SafeHead: l2A2,
Batch: BatchWithL1InclusionBlock{ Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1C, L1InclusionBlock: l1C,
Batch: NewSpanBatch([]*SingularBatch{ Batch: initializedSpanBatch([]*SingularBatch{
{ // valid batch { // valid batch
ParentHash: l2A3.ParentHash, ParentHash: l2A3.ParentHash,
EpochNum: rollup.Epoch(l2A3.L1Origin.Number), EpochNum: rollup.Epoch(l2A3.L1Origin.Number),
...@@ -993,7 +993,7 @@ func TestValidBatch(t *testing.T) { ...@@ -993,7 +993,7 @@ func TestValidBatch(t *testing.T) {
Timestamp: l2A4.Time, Timestamp: l2A4.Time,
Transactions: nil, Transactions: nil,
}, },
}), }, uint64(0), big.NewInt(0)),
}, },
Expected: BatchDrop, // dropped because it could have advanced the epoch to B Expected: BatchDrop, // dropped because it could have advanced the epoch to B
ExpectedLog: "batch exceeded sequencer time drift without adopting next origin, and next L1 origin would have been valid", ExpectedLog: "batch exceeded sequencer time drift without adopting next origin, and next L1 origin would have been valid",
...@@ -1005,7 +1005,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1005,7 +1005,7 @@ func TestValidBatch(t *testing.T) {
L2SafeHead: l2A0, L2SafeHead: l2A0,
Batch: BatchWithL1InclusionBlock{ Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1B, L1InclusionBlock: l1B,
Batch: NewSpanBatch([]*SingularBatch{ Batch: initializedSpanBatch([]*SingularBatch{
{ {
ParentHash: l2A1.ParentHash, ParentHash: l2A1.ParentHash,
EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochNum: rollup.Epoch(l2A1.L1Origin.Number),
...@@ -1015,7 +1015,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1015,7 +1015,7 @@ func TestValidBatch(t *testing.T) {
[]byte{}, // empty tx data []byte{}, // empty tx data
}, },
}, },
}), }, uint64(0), big.NewInt(0)),
}, },
Expected: BatchDrop, Expected: BatchDrop,
ExpectedLog: "transaction data must not be empty, but found empty tx", ExpectedLog: "transaction data must not be empty, but found empty tx",
...@@ -1027,7 +1027,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1027,7 +1027,7 @@ func TestValidBatch(t *testing.T) {
L2SafeHead: l2A0, L2SafeHead: l2A0,
Batch: BatchWithL1InclusionBlock{ Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1B, L1InclusionBlock: l1B,
Batch: NewSpanBatch([]*SingularBatch{ Batch: initializedSpanBatch([]*SingularBatch{
{ {
ParentHash: l2A1.ParentHash, ParentHash: l2A1.ParentHash,
EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochNum: rollup.Epoch(l2A1.L1Origin.Number),
...@@ -1037,7 +1037,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1037,7 +1037,7 @@ func TestValidBatch(t *testing.T) {
[]byte{types.DepositTxType, 0}, // piece of data alike to a deposit []byte{types.DepositTxType, 0}, // piece of data alike to a deposit
}, },
}, },
}), }, uint64(0), big.NewInt(0)),
}, },
Expected: BatchDrop, Expected: BatchDrop,
ExpectedLog: "sequencers may not embed any deposits into batch data, but found tx that has one", ExpectedLog: "sequencers may not embed any deposits into batch data, but found tx that has one",
...@@ -1049,7 +1049,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1049,7 +1049,7 @@ func TestValidBatch(t *testing.T) {
L2SafeHead: l2A0, L2SafeHead: l2A0,
Batch: BatchWithL1InclusionBlock{ Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1B, L1InclusionBlock: l1B,
Batch: NewSpanBatch([]*SingularBatch{ Batch: initializedSpanBatch([]*SingularBatch{
{ {
ParentHash: l2A1.ParentHash, ParentHash: l2A1.ParentHash,
EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochNum: rollup.Epoch(l2A1.L1Origin.Number),
...@@ -1057,7 +1057,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1057,7 +1057,7 @@ func TestValidBatch(t *testing.T) {
Timestamp: l2A1.Time, Timestamp: l2A1.Time,
Transactions: []hexutil.Bytes{randTxData}, Transactions: []hexutil.Bytes{randTxData},
}, },
}), }, uint64(0), big.NewInt(0)),
}, },
Expected: BatchAccept, Expected: BatchAccept,
DeltaTime: &minTs, DeltaTime: &minTs,
...@@ -1068,7 +1068,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1068,7 +1068,7 @@ func TestValidBatch(t *testing.T) {
L2SafeHead: l2A3, L2SafeHead: l2A3,
Batch: BatchWithL1InclusionBlock{ Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1C, L1InclusionBlock: l1C,
Batch: NewSpanBatch([]*SingularBatch{ Batch: initializedSpanBatch([]*SingularBatch{
{ {
ParentHash: l2B0.ParentHash, ParentHash: l2B0.ParentHash,
EpochNum: rollup.Epoch(l2B0.L1Origin.Number), EpochNum: rollup.Epoch(l2B0.L1Origin.Number),
...@@ -1076,7 +1076,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1076,7 +1076,7 @@ func TestValidBatch(t *testing.T) {
Timestamp: l2B0.Time, Timestamp: l2B0.Time,
Transactions: []hexutil.Bytes{randTxData}, Transactions: []hexutil.Bytes{randTxData},
}, },
}), }, uint64(0), big.NewInt(0)),
}, },
Expected: BatchAccept, Expected: BatchAccept,
DeltaTime: &minTs, DeltaTime: &minTs,
...@@ -1087,7 +1087,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1087,7 +1087,7 @@ func TestValidBatch(t *testing.T) {
L2SafeHead: l2A2, L2SafeHead: l2A2,
Batch: BatchWithL1InclusionBlock{ Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1B, L1InclusionBlock: l1B,
Batch: NewSpanBatch([]*SingularBatch{ Batch: initializedSpanBatch([]*SingularBatch{
{ // we build l2B0, which starts a new epoch too early { // we build l2B0, which starts a new epoch too early
ParentHash: l2A2.Hash, ParentHash: l2A2.Hash,
EpochNum: rollup.Epoch(l2B0.L1Origin.Number), EpochNum: rollup.Epoch(l2B0.L1Origin.Number),
...@@ -1095,7 +1095,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1095,7 +1095,7 @@ func TestValidBatch(t *testing.T) {
Timestamp: l2A2.Time + defaultConf.BlockTime, Timestamp: l2A2.Time + defaultConf.BlockTime,
Transactions: nil, Transactions: nil,
}, },
}), }, uint64(0), big.NewInt(0)),
}, },
Expected: BatchDrop, Expected: BatchDrop,
ExpectedLog: "block timestamp is less than L1 origin timestamp", ExpectedLog: "block timestamp is less than L1 origin timestamp",
...@@ -1107,7 +1107,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1107,7 +1107,7 @@ func TestValidBatch(t *testing.T) {
L2SafeHead: l2A1, L2SafeHead: l2A1,
Batch: BatchWithL1InclusionBlock{ Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1B, L1InclusionBlock: l1B,
Batch: NewSpanBatch([]*SingularBatch{ Batch: initializedSpanBatch([]*SingularBatch{
{ // valid batch { // valid batch
ParentHash: l2A1.Hash, ParentHash: l2A1.Hash,
EpochNum: rollup.Epoch(l2A2.L1Origin.Number), EpochNum: rollup.Epoch(l2A2.L1Origin.Number),
...@@ -1122,7 +1122,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1122,7 +1122,7 @@ func TestValidBatch(t *testing.T) {
Timestamp: l2A2.Time + defaultConf.BlockTime, Timestamp: l2A2.Time + defaultConf.BlockTime,
Transactions: nil, Transactions: nil,
}, },
}), }, uint64(0), big.NewInt(0)),
}, },
Expected: BatchDrop, Expected: BatchDrop,
ExpectedLog: "block timestamp is less than L1 origin timestamp", ExpectedLog: "block timestamp is less than L1 origin timestamp",
...@@ -1134,7 +1134,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1134,7 +1134,7 @@ func TestValidBatch(t *testing.T) {
L2SafeHead: l2A2, L2SafeHead: l2A2,
Batch: BatchWithL1InclusionBlock{ Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1B, L1InclusionBlock: l1B,
Batch: NewSpanBatch([]*SingularBatch{ Batch: initializedSpanBatch([]*SingularBatch{
{ {
ParentHash: l2A1.Hash, ParentHash: l2A1.Hash,
EpochNum: rollup.Epoch(l2A2.L1Origin.Number), EpochNum: rollup.Epoch(l2A2.L1Origin.Number),
...@@ -1149,7 +1149,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1149,7 +1149,7 @@ func TestValidBatch(t *testing.T) {
Timestamp: l2A3.Time, Timestamp: l2A3.Time,
Transactions: nil, Transactions: nil,
}, },
}), }, uint64(0), big.NewInt(0)),
}, },
Expected: BatchAccept, Expected: BatchAccept,
DeltaTime: &minTs, DeltaTime: &minTs,
...@@ -1160,7 +1160,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1160,7 +1160,7 @@ func TestValidBatch(t *testing.T) {
L2SafeHead: l2A2, L2SafeHead: l2A2,
Batch: BatchWithL1InclusionBlock{ Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1B, L1InclusionBlock: l1B,
Batch: NewSpanBatch([]*SingularBatch{ Batch: initializedSpanBatch([]*SingularBatch{
{ {
ParentHash: l2A0.Hash, ParentHash: l2A0.Hash,
EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochNum: rollup.Epoch(l2A1.L1Origin.Number),
...@@ -1182,7 +1182,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1182,7 +1182,7 @@ func TestValidBatch(t *testing.T) {
Timestamp: l2A3.Time, Timestamp: l2A3.Time,
Transactions: nil, Transactions: nil,
}, },
}), }, uint64(0), big.NewInt(0)),
}, },
Expected: BatchAccept, Expected: BatchAccept,
DeltaTime: &minTs, DeltaTime: &minTs,
...@@ -1193,7 +1193,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1193,7 +1193,7 @@ func TestValidBatch(t *testing.T) {
L2SafeHead: l2A2, L2SafeHead: l2A2,
Batch: BatchWithL1InclusionBlock{ Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1B, L1InclusionBlock: l1B,
Batch: NewSpanBatch([]*SingularBatch{ Batch: initializedSpanBatch([]*SingularBatch{
{ {
ParentHash: l2A0.Hash, ParentHash: l2A0.Hash,
EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochNum: rollup.Epoch(l2A1.L1Origin.Number),
...@@ -1208,7 +1208,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1208,7 +1208,7 @@ func TestValidBatch(t *testing.T) {
Timestamp: l2A2.Time, Timestamp: l2A2.Time,
Transactions: nil, Transactions: nil,
}, },
}), }, uint64(0), big.NewInt(0)),
}, },
Expected: BatchDrop, Expected: BatchDrop,
ExpectedLog: "span batch has no new blocks after safe head", ExpectedLog: "span batch has no new blocks after safe head",
...@@ -1220,7 +1220,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1220,7 +1220,7 @@ func TestValidBatch(t *testing.T) {
L2SafeHead: l2A2, L2SafeHead: l2A2,
Batch: BatchWithL1InclusionBlock{ Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1B, L1InclusionBlock: l1B,
Batch: NewSpanBatch([]*SingularBatch{ Batch: initializedSpanBatch([]*SingularBatch{
{ {
ParentHash: l2A0.Hash, ParentHash: l2A0.Hash,
EpochNum: rollup.Epoch(l2A2.L1Origin.Number), EpochNum: rollup.Epoch(l2A2.L1Origin.Number),
...@@ -1235,7 +1235,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1235,7 +1235,7 @@ func TestValidBatch(t *testing.T) {
Timestamp: l2A3.Time, Timestamp: l2A3.Time,
Transactions: nil, Transactions: nil,
}, },
}), }, uint64(0), big.NewInt(0)),
}, },
Expected: BatchDrop, Expected: BatchDrop,
ExpectedLog: "ignoring batch with mismatching parent hash", ExpectedLog: "ignoring batch with mismatching parent hash",
...@@ -1247,7 +1247,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1247,7 +1247,7 @@ func TestValidBatch(t *testing.T) {
L2SafeHead: l2A2, L2SafeHead: l2A2,
Batch: BatchWithL1InclusionBlock{ Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1B, L1InclusionBlock: l1B,
Batch: NewSpanBatch([]*SingularBatch{ Batch: initializedSpanBatch([]*SingularBatch{
{ {
ParentHash: l2A1.Hash, ParentHash: l2A1.Hash,
EpochNum: rollup.Epoch(l2A2.L1Origin.Number) + 1, EpochNum: rollup.Epoch(l2A2.L1Origin.Number) + 1,
...@@ -1262,7 +1262,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1262,7 +1262,7 @@ func TestValidBatch(t *testing.T) {
Timestamp: l2A3.Time, Timestamp: l2A3.Time,
Transactions: nil, Transactions: nil,
}, },
}), }, uint64(0), big.NewInt(0)),
}, },
Expected: BatchDrop, Expected: BatchDrop,
ExpectedLog: "overlapped block's L1 origin number does not match", ExpectedLog: "overlapped block's L1 origin number does not match",
...@@ -1274,7 +1274,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1274,7 +1274,7 @@ func TestValidBatch(t *testing.T) {
L2SafeHead: l2A2, L2SafeHead: l2A2,
Batch: BatchWithL1InclusionBlock{ Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1B, L1InclusionBlock: l1B,
Batch: NewSpanBatch([]*SingularBatch{ Batch: initializedSpanBatch([]*SingularBatch{
{ {
ParentHash: l2A1.Hash, ParentHash: l2A1.Hash,
EpochNum: rollup.Epoch(l2A2.L1Origin.Number), EpochNum: rollup.Epoch(l2A2.L1Origin.Number),
...@@ -1289,7 +1289,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1289,7 +1289,7 @@ func TestValidBatch(t *testing.T) {
Timestamp: l2A3.Time, Timestamp: l2A3.Time,
Transactions: nil, Transactions: nil,
}, },
}), }, uint64(0), big.NewInt(0)),
}, },
Expected: BatchDrop, Expected: BatchDrop,
ExpectedLog: "overlapped block's tx count does not match", ExpectedLog: "overlapped block's tx count does not match",
...@@ -1301,7 +1301,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1301,7 +1301,7 @@ func TestValidBatch(t *testing.T) {
L2SafeHead: l2A1, L2SafeHead: l2A1,
Batch: BatchWithL1InclusionBlock{ Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1B, L1InclusionBlock: l1B,
Batch: NewSpanBatch([]*SingularBatch{ Batch: initializedSpanBatch([]*SingularBatch{
{ {
ParentHash: l2A0.ParentHash, ParentHash: l2A0.ParentHash,
EpochNum: rollup.Epoch(l2A0.L1Origin.Number), EpochNum: rollup.Epoch(l2A0.L1Origin.Number),
...@@ -1323,7 +1323,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1323,7 +1323,7 @@ func TestValidBatch(t *testing.T) {
Timestamp: l2A2.Time, Timestamp: l2A2.Time,
Transactions: nil, Transactions: nil,
}, },
}), }, uint64(0), big.NewInt(0)),
}, },
Expected: BatchUndecided, Expected: BatchUndecided,
ExpectedLog: "failed to fetch L2 block", ExpectedLog: "failed to fetch L2 block",
...@@ -1335,7 +1335,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1335,7 +1335,7 @@ func TestValidBatch(t *testing.T) {
L2SafeHead: l2A0, L2SafeHead: l2A0,
Batch: BatchWithL1InclusionBlock{ Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1B, L1InclusionBlock: l1B,
Batch: NewSpanBatch([]*SingularBatch{ Batch: initializedSpanBatch([]*SingularBatch{
{ {
ParentHash: l2A0.Hash, ParentHash: l2A0.Hash,
EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochNum: rollup.Epoch(l2A1.L1Origin.Number),
...@@ -1350,7 +1350,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1350,7 +1350,7 @@ func TestValidBatch(t *testing.T) {
Timestamp: l2A1.Time + 1, Timestamp: l2A1.Time + 1,
Transactions: nil, Transactions: nil,
}, },
}), }, uint64(0), big.NewInt(0)),
}, },
Expected: BatchDrop, Expected: BatchDrop,
ExpectedLog: "batch has misaligned timestamp, block time is too short", ExpectedLog: "batch has misaligned timestamp, block time is too short",
...@@ -1362,7 +1362,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1362,7 +1362,7 @@ func TestValidBatch(t *testing.T) {
L2SafeHead: l2A0, L2SafeHead: l2A0,
Batch: BatchWithL1InclusionBlock{ Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1B, L1InclusionBlock: l1B,
Batch: NewSpanBatch([]*SingularBatch{ Batch: initializedSpanBatch([]*SingularBatch{
{ {
ParentHash: l2A0.Hash, ParentHash: l2A0.Hash,
EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochNum: rollup.Epoch(l2A1.L1Origin.Number),
...@@ -1377,7 +1377,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1377,7 +1377,7 @@ func TestValidBatch(t *testing.T) {
Timestamp: l2A1.Time, Timestamp: l2A1.Time,
Transactions: nil, Transactions: nil,
}, },
}), }, uint64(0), big.NewInt(0)),
}, },
Expected: BatchDrop, Expected: BatchDrop,
ExpectedLog: "batch has misaligned timestamp, not overlapped exactly", ExpectedLog: "batch has misaligned timestamp, not overlapped exactly",
...@@ -1389,7 +1389,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1389,7 +1389,7 @@ func TestValidBatch(t *testing.T) {
L2SafeHead: l2A3, L2SafeHead: l2A3,
Batch: BatchWithL1InclusionBlock{ Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1B, L1InclusionBlock: l1B,
Batch: NewSpanBatch([]*SingularBatch{ Batch: initializedSpanBatch([]*SingularBatch{
{ {
ParentHash: l2A2.Hash, ParentHash: l2A2.Hash,
EpochNum: rollup.Epoch(l2A3.L1Origin.Number), EpochNum: rollup.Epoch(l2A3.L1Origin.Number),
...@@ -1404,7 +1404,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1404,7 +1404,7 @@ func TestValidBatch(t *testing.T) {
Timestamp: l2B0.Time, Timestamp: l2B0.Time,
Transactions: nil, Transactions: nil,
}, },
}), }, uint64(0), big.NewInt(0)),
}, },
Expected: BatchUndecided, Expected: BatchUndecided,
ExpectedLog: "failed to fetch L2 block payload", ExpectedLog: "failed to fetch L2 block payload",
...@@ -1433,7 +1433,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1433,7 +1433,7 @@ func TestValidBatch(t *testing.T) {
L2SafeHead: l2A0, L2SafeHead: l2A0,
Batch: BatchWithL1InclusionBlock{ Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1B, L1InclusionBlock: l1B,
Batch: NewSpanBatch([]*SingularBatch{ Batch: initializedSpanBatch([]*SingularBatch{
{ {
ParentHash: l2A1.ParentHash, ParentHash: l2A1.ParentHash,
EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochNum: rollup.Epoch(l2A1.L1Origin.Number),
...@@ -1441,7 +1441,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1441,7 +1441,7 @@ func TestValidBatch(t *testing.T) {
Timestamp: l2A1.Time, Timestamp: l2A1.Time,
Transactions: []hexutil.Bytes{randTxData}, Transactions: []hexutil.Bytes{randTxData},
}, },
}), }, uint64(0), big.NewInt(0)),
}, },
DeltaTime: &l1B.Time, DeltaTime: &l1B.Time,
Expected: BatchDrop, Expected: BatchDrop,
...@@ -1470,7 +1470,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1470,7 +1470,7 @@ func TestValidBatch(t *testing.T) {
L2SafeHead: l2A0, L2SafeHead: l2A0,
Batch: BatchWithL1InclusionBlock{ Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1B, L1InclusionBlock: l1B,
Batch: NewSpanBatch([]*SingularBatch{ Batch: initializedSpanBatch([]*SingularBatch{
{ {
ParentHash: l2A1.ParentHash, ParentHash: l2A1.ParentHash,
EpochNum: rollup.Epoch(l2A1.L1Origin.Number), EpochNum: rollup.Epoch(l2A1.L1Origin.Number),
...@@ -1478,7 +1478,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1478,7 +1478,7 @@ func TestValidBatch(t *testing.T) {
Timestamp: l2A1.Time, Timestamp: l2A1.Time,
Transactions: []hexutil.Bytes{randTxData}, Transactions: []hexutil.Bytes{randTxData},
}, },
}), }, uint64(0), big.NewInt(0)),
}, },
DeltaTime: &l1A.Time, DeltaTime: &l1A.Time,
Expected: BatchAccept, Expected: BatchAccept,
...@@ -1576,7 +1576,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1576,7 +1576,7 @@ func TestValidBatch(t *testing.T) {
L2SafeHead: l2B1, L2SafeHead: l2B1,
Batch: BatchWithL1InclusionBlock{ Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1B, L1InclusionBlock: l1B,
Batch: NewSpanBatch([]*SingularBatch{ Batch: initializedSpanBatch([]*SingularBatch{
{ {
ParentHash: l2B0.Hash, ParentHash: l2B0.Hash,
EpochNum: rollup.Epoch(l2B1.L1Origin.Number), EpochNum: rollup.Epoch(l2B1.L1Origin.Number),
...@@ -1591,7 +1591,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1591,7 +1591,7 @@ func TestValidBatch(t *testing.T) {
Timestamp: l2B2.Time, Timestamp: l2B2.Time,
Transactions: nil, Transactions: nil,
}, },
}), }, uint64(0), big.NewInt(0)),
}, },
Expected: BatchDrop, Expected: BatchDrop,
ExpectedLog: "overlapped block's transaction does not match", ExpectedLog: "overlapped block's transaction does not match",
...@@ -1621,7 +1621,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1621,7 +1621,7 @@ func TestValidBatch(t *testing.T) {
L2SafeHead: l2B1, L2SafeHead: l2B1,
Batch: BatchWithL1InclusionBlock{ Batch: BatchWithL1InclusionBlock{
L1InclusionBlock: l1B, L1InclusionBlock: l1B,
Batch: NewSpanBatch([]*SingularBatch{ Batch: initializedSpanBatch([]*SingularBatch{
{ {
ParentHash: l2B0.Hash, ParentHash: l2B0.Hash,
EpochNum: rollup.Epoch(l2B1.L1Origin.Number), EpochNum: rollup.Epoch(l2B1.L1Origin.Number),
...@@ -1636,7 +1636,7 @@ func TestValidBatch(t *testing.T) { ...@@ -1636,7 +1636,7 @@ func TestValidBatch(t *testing.T) {
Timestamp: l2B2.Time, Timestamp: l2B2.Time,
Transactions: nil, Transactions: nil,
}, },
}), }, uint64(0), big.NewInt(0)),
}, },
Expected: BatchDrop, Expected: BatchDrop,
ExpectedLog: "failed to extract L2BlockRef from execution payload", ExpectedLog: "failed to extract L2BlockRef from execution payload",
......
...@@ -63,12 +63,12 @@ type ChannelOut interface { ...@@ -63,12 +63,12 @@ type ChannelOut interface {
OutputFrame(*bytes.Buffer, uint64) (uint16, error) OutputFrame(*bytes.Buffer, uint64) (uint16, error)
} }
func NewChannelOut(batchType uint, compress Compressor, spanBatchBuilder *SpanBatchBuilder) (ChannelOut, error) { func NewChannelOut(batchType uint, compress Compressor, spanBatch *SpanBatch) (ChannelOut, error) {
switch batchType { switch batchType {
case SingularBatchType: case SingularBatchType:
return NewSingularChannelOut(compress) return NewSingularChannelOut(compress)
case SpanBatchType: case SpanBatchType:
return NewSpanChannelOut(compress, spanBatchBuilder) return NewSpanChannelOut(compress, spanBatch)
default: default:
return nil, fmt.Errorf("unrecognized batch type: %d", batchType) return nil, fmt.Errorf("unrecognized batch type: %d", batchType)
} }
......
...@@ -8,7 +8,6 @@ import ( ...@@ -8,7 +8,6 @@ import (
"fmt" "fmt"
"io" "io"
"math/big" "math/big"
"sort"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
...@@ -413,7 +412,14 @@ func singularBatchToElement(singularBatch *SingularBatch) *SpanBatchElement { ...@@ -413,7 +412,14 @@ func singularBatchToElement(singularBatch *SingularBatch) *SpanBatchElement {
type SpanBatch struct { type SpanBatch struct {
ParentCheck [20]byte // First 20 bytes of the first block's parent hash ParentCheck [20]byte // First 20 bytes of the first block's parent hash
L1OriginCheck [20]byte // First 20 bytes of the last block's L1 origin hash L1OriginCheck [20]byte // First 20 bytes of the last block's L1 origin hash
GenesisTimestamp uint64
ChainID *big.Int
Batches []*SpanBatchElement // List of block input in derived form Batches []*SpanBatchElement // List of block input in derived form
// caching
originBits *big.Int
blockTxCounts []uint64
sbtxs *spanBatchTxs
} }
// spanBatchMarshaling is a helper type used for JSON marshaling. // spanBatchMarshaling is a helper type used for JSON marshaling.
...@@ -493,60 +499,73 @@ func (b *SpanBatch) GetBlockCount() int { ...@@ -493,60 +499,73 @@ func (b *SpanBatch) GetBlockCount() int {
return len(b.Batches) return len(b.Batches)
} }
func (b *SpanBatch) peek(n int) *SpanBatchElement { return b.Batches[len(b.Batches)-1-n] }
// AppendSingularBatch appends a SingularBatch into the span batch // AppendSingularBatch appends a SingularBatch into the span batch
// updates l1OriginCheck or parentCheck if needed. // updates l1OriginCheck or parentCheck if needed.
func (b *SpanBatch) AppendSingularBatch(singularBatch *SingularBatch) { func (b *SpanBatch) AppendSingularBatch(singularBatch *SingularBatch, seqNum uint64) error {
if len(b.Batches) == 0 { // if this new element is not ordered with respect to the last element, panic
copy(b.ParentCheck[:], singularBatch.ParentHash.Bytes()[:20]) if len(b.Batches) > 0 && b.peek(0).Timestamp > singularBatch.Timestamp {
panic("span batch is not ordered")
} }
// always append the new batch and set the L1 origin check
b.Batches = append(b.Batches, singularBatchToElement(singularBatch)) b.Batches = append(b.Batches, singularBatchToElement(singularBatch))
// always update the L1 origin check
copy(b.L1OriginCheck[:], singularBatch.EpochHash.Bytes()[:20]) copy(b.L1OriginCheck[:], singularBatch.EpochHash.Bytes()[:20])
// if there is only one batch, initialize the ParentCheck
// and set the epochBit based on the seqNum
epochBit := uint(0)
if len(b.Batches) == 1 {
if seqNum == 0 {
epochBit = 1
}
copy(b.ParentCheck[:], singularBatch.ParentHash.Bytes()[:20])
} else {
// if there is more than one batch, set the epochBit based on the last two batches
if b.peek(1).EpochNum < b.peek(0).EpochNum {
epochBit = 1
}
}
// set the respective bit in the originBits
b.originBits.SetBit(b.originBits, len(b.Batches)-1, epochBit)
// update the blockTxCounts cache with the latest batch's tx count
b.blockTxCounts = append(b.blockTxCounts, uint64(len(b.peek(0).Transactions)))
// add the new txs to the sbtxs
newTxs := make([][]byte, 0, len(b.peek(0).Transactions))
for i := 0; i < len(b.peek(0).Transactions); i++ {
newTxs = append(newTxs, b.peek(0).Transactions[i])
}
// add the new txs to the sbtxs
// this is the only place where we can get an error
return b.sbtxs.AddTxs(newTxs, b.ChainID)
} }
// ToRawSpanBatch merges SingularBatch List and initialize single RawSpanBatch // ToRawSpanBatch merges SingularBatch List and initialize single RawSpanBatch
func (b *SpanBatch) ToRawSpanBatch(originChangedBit uint, genesisTimestamp uint64, chainID *big.Int) (*RawSpanBatch, error) { func (b *SpanBatch) ToRawSpanBatch() (*RawSpanBatch, error) {
if len(b.Batches) == 0 { if len(b.Batches) == 0 {
return nil, errors.New("cannot merge empty singularBatch list") return nil, errors.New("cannot merge empty singularBatch list")
} }
raw := RawSpanBatch{}
// Sort by timestamp of L2 block
sort.Slice(b.Batches, func(i, j int) bool {
return b.Batches[i].Timestamp < b.Batches[j].Timestamp
})
// spanBatchPrefix
span_start := b.Batches[0] span_start := b.Batches[0]
span_end := b.Batches[len(b.Batches)-1] span_end := b.Batches[len(b.Batches)-1]
raw.relTimestamp = span_start.Timestamp - genesisTimestamp
raw.l1OriginNum = uint64(span_end.EpochNum) return &RawSpanBatch{
raw.parentCheck = b.ParentCheck spanBatchPrefix: spanBatchPrefix{
raw.l1OriginCheck = b.L1OriginCheck relTimestamp: span_start.Timestamp - b.GenesisTimestamp,
// spanBatchPayload l1OriginNum: uint64(span_end.EpochNum),
raw.blockCount = uint64(len(b.Batches)) parentCheck: b.ParentCheck,
raw.originBits = new(big.Int) l1OriginCheck: b.L1OriginCheck,
raw.originBits.SetBit(raw.originBits, 0, originChangedBit) },
for i := 1; i < len(b.Batches); i++ { spanBatchPayload: spanBatchPayload{
bit := uint(0) blockCount: uint64(len(b.Batches)),
if b.Batches[i-1].EpochNum < b.Batches[i].EpochNum { originBits: b.originBits,
bit = 1 blockTxCounts: b.blockTxCounts,
} txs: b.sbtxs,
raw.originBits.SetBit(raw.originBits, i, bit) },
} }, nil
var blockTxCounts []uint64
var txs [][]byte
for _, batch := range b.Batches {
blockTxCount := uint64(len(batch.Transactions))
blockTxCounts = append(blockTxCounts, blockTxCount)
for _, rawTx := range batch.Transactions {
txs = append(txs, rawTx)
}
}
raw.blockTxCounts = blockTxCounts
stxs, err := newSpanBatchTxs(txs, chainID)
if err != nil {
return nil, err
}
raw.txs = stxs
return &raw, nil
} }
// GetSingularBatches converts SpanBatchElements after L2 safe head to SingularBatches. // GetSingularBatches converts SpanBatchElements after L2 safe head to SingularBatches.
...@@ -582,17 +601,15 @@ func (b *SpanBatch) GetSingularBatches(l1Origins []eth.L1BlockRef, l2SafeHead et ...@@ -582,17 +601,15 @@ func (b *SpanBatch) GetSingularBatches(l1Origins []eth.L1BlockRef, l2SafeHead et
} }
// NewSpanBatch converts given singularBatches into SpanBatchElements, and creates a new SpanBatch. // NewSpanBatch converts given singularBatches into SpanBatchElements, and creates a new SpanBatch.
func NewSpanBatch(singularBatches []*SingularBatch) *SpanBatch { func NewSpanBatch(genesisTimestamp uint64, chainID *big.Int) *SpanBatch {
spanBatch := &SpanBatch{} // newSpanBatchTxs can't fail with empty txs
if len(singularBatches) == 0 { sbtxs, _ := newSpanBatchTxs([][]byte{}, chainID)
return spanBatch return &SpanBatch{
} GenesisTimestamp: genesisTimestamp,
copy(spanBatch.ParentCheck[:], singularBatches[0].ParentHash.Bytes()[:20]) ChainID: chainID,
copy(spanBatch.L1OriginCheck[:], singularBatches[len(singularBatches)-1].EpochHash.Bytes()[:20]) originBits: big.NewInt(0),
for _, singularBatch := range singularBatches { sbtxs: sbtxs,
spanBatch.Batches = append(spanBatch.Batches, singularBatchToElement(singularBatch))
} }
return spanBatch
} }
// DeriveSpanBatch derives SpanBatch from BatchData. // DeriveSpanBatch derives SpanBatch from BatchData.
...@@ -605,49 +622,6 @@ func DeriveSpanBatch(batchData *BatchData, blockTime, genesisTimestamp uint64, c ...@@ -605,49 +622,6 @@ func DeriveSpanBatch(batchData *BatchData, blockTime, genesisTimestamp uint64, c
return rawSpanBatch.ToSpanBatch(blockTime, genesisTimestamp, chainID) return rawSpanBatch.ToSpanBatch(blockTime, genesisTimestamp, chainID)
} }
// SpanBatchBuilder is a utility type to build a SpanBatch by adding a SingularBatch one by one.
// makes easier to stack SingularBatches and convert to RawSpanBatch for encoding.
type SpanBatchBuilder struct {
genesisTimestamp uint64
chainID *big.Int
spanBatch *SpanBatch
originChangedBit uint
}
func NewSpanBatchBuilder(genesisTimestamp uint64, chainID *big.Int) *SpanBatchBuilder {
return &SpanBatchBuilder{
genesisTimestamp: genesisTimestamp,
chainID: chainID,
spanBatch: &SpanBatch{},
}
}
func (b *SpanBatchBuilder) AppendSingularBatch(singularBatch *SingularBatch, seqNum uint64) {
if b.GetBlockCount() == 0 {
b.originChangedBit = 0
if seqNum == 0 {
b.originChangedBit = 1
}
}
b.spanBatch.AppendSingularBatch(singularBatch)
}
func (b *SpanBatchBuilder) GetRawSpanBatch() (*RawSpanBatch, error) {
raw, err := b.spanBatch.ToRawSpanBatch(b.originChangedBit, b.genesisTimestamp, b.chainID)
if err != nil {
return nil, err
}
return raw, nil
}
func (b *SpanBatchBuilder) GetBlockCount() int {
return len(b.spanBatch.Batches)
}
func (b *SpanBatchBuilder) Reset() {
b.spanBatch = &SpanBatch{}
}
// ReadTxData reads raw RLP tx data from reader and returns txData and txType // ReadTxData reads raw RLP tx data from reader and returns txData and txType
func ReadTxData(r *bytes.Reader) ([]byte, int, error) { func ReadTxData(r *bytes.Reader) ([]byte, int, error) {
var txData []byte var txData []byte
......
...@@ -18,6 +18,29 @@ import ( ...@@ -18,6 +18,29 @@ import (
"github.com/ethereum-optimism/optimism/op-service/testutils" "github.com/ethereum-optimism/optimism/op-service/testutils"
) )
// initializedSpanBatch creates a new SpanBatch with given SingularBatches.
// It is used *only* in tests to create a SpanBatch with given SingularBatches as a convenience.
// It will also ignore any errors that occur during AppendSingularBatch.
// Tests should manually set the first bit of the originBits if needed using SetFirstOriginChangedBit
func initializedSpanBatch(singularBatches []*SingularBatch, genesisTimestamp uint64, chainID *big.Int) *SpanBatch {
spanBatch := NewSpanBatch(genesisTimestamp, chainID)
if len(singularBatches) == 0 {
return spanBatch
}
for i := 0; i < len(singularBatches); i++ {
if err := spanBatch.AppendSingularBatch(singularBatches[i], uint64(i)); err != nil {
continue
}
}
return spanBatch
}
// setFirstOriginChangedBit sets the first bit of the originBits to the given value
// used for testing when a Span Batch is made with InitializedSpanBatch, which doesn't have a sequence number
func (b *SpanBatch) setFirstOriginChangedBit(bit uint) {
b.originBits.SetBit(b.originBits, 0, bit)
}
func TestSpanBatchForBatchInterface(t *testing.T) { func TestSpanBatchForBatchInterface(t *testing.T) {
rng := rand.New(rand.NewSource(0x5432177)) rng := rand.New(rand.NewSource(0x5432177))
chainID := big.NewInt(rng.Int63n(1000)) chainID := big.NewInt(rng.Int63n(1000))
...@@ -27,7 +50,7 @@ func TestSpanBatchForBatchInterface(t *testing.T) { ...@@ -27,7 +50,7 @@ func TestSpanBatchForBatchInterface(t *testing.T) {
safeL2Head := testutils.RandomL2BlockRef(rng) safeL2Head := testutils.RandomL2BlockRef(rng)
safeL2Head.Hash = common.BytesToHash(singularBatches[0].ParentHash[:]) safeL2Head.Hash = common.BytesToHash(singularBatches[0].ParentHash[:])
spanBatch := NewSpanBatch(singularBatches) spanBatch := initializedSpanBatch(singularBatches, uint64(0), chainID)
// check interface method implementations except logging // check interface method implementations except logging
require.Equal(t, SpanBatchType, spanBatch.GetBatchType()) require.Equal(t, SpanBatchType, spanBatch.GetBatchType())
...@@ -322,9 +345,10 @@ func TestSpanBatchDerive(t *testing.T) { ...@@ -322,9 +345,10 @@ func TestSpanBatchDerive(t *testing.T) {
safeL2Head.Hash = common.BytesToHash(singularBatches[0].ParentHash[:]) safeL2Head.Hash = common.BytesToHash(singularBatches[0].ParentHash[:])
genesisTimeStamp := 1 + singularBatches[0].Timestamp - 128 genesisTimeStamp := 1 + singularBatches[0].Timestamp - 128
spanBatch := NewSpanBatch(singularBatches) spanBatch := initializedSpanBatch(singularBatches, genesisTimeStamp, chainID)
originChangedBit := uint(originChangedBit) // set originChangedBit to match the original test implementation
rawSpanBatch, err := spanBatch.ToRawSpanBatch(originChangedBit, genesisTimeStamp, chainID) spanBatch.setFirstOriginChangedBit(uint(originChangedBit))
rawSpanBatch, err := spanBatch.ToRawSpanBatch()
require.NoError(t, err) require.NoError(t, err)
spanBatchDerived, err := rawSpanBatch.derive(l2BlockTime, genesisTimeStamp, chainID) spanBatchDerived, err := rawSpanBatch.derive(l2BlockTime, genesisTimeStamp, chainID)
...@@ -354,14 +378,15 @@ func TestSpanBatchAppend(t *testing.T) { ...@@ -354,14 +378,15 @@ func TestSpanBatchAppend(t *testing.T) {
singularBatches := RandomValidConsecutiveSingularBatches(rng, chainID) singularBatches := RandomValidConsecutiveSingularBatches(rng, chainID)
// initialize empty span batch // initialize empty span batch
spanBatch := NewSpanBatch([]*SingularBatch{}) spanBatch := initializedSpanBatch([]*SingularBatch{}, uint64(0), chainID)
L := 2 L := 2
for i := 0; i < L; i++ { for i := 0; i < L; i++ {
spanBatch.AppendSingularBatch(singularBatches[i]) err := spanBatch.AppendSingularBatch(singularBatches[i], uint64(i))
require.NoError(t, err)
} }
// initialize with two singular batches // initialize with two singular batches
spanBatch2 := NewSpanBatch(singularBatches[:L]) spanBatch2 := initializedSpanBatch(singularBatches[:L], uint64(0), chainID)
require.Equal(t, spanBatch, spanBatch2) require.Equal(t, spanBatch, spanBatch2)
} }
...@@ -376,9 +401,10 @@ func TestSpanBatchMerge(t *testing.T) { ...@@ -376,9 +401,10 @@ func TestSpanBatchMerge(t *testing.T) {
singularBatches := RandomValidConsecutiveSingularBatches(rng, chainID) singularBatches := RandomValidConsecutiveSingularBatches(rng, chainID)
blockCount := len(singularBatches) blockCount := len(singularBatches)
spanBatch := NewSpanBatch(singularBatches) spanBatch := initializedSpanBatch(singularBatches, genesisTimeStamp, chainID)
originChangedBit := uint(originChangedBit) // set originChangedBit to match the original test implementation
rawSpanBatch, err := spanBatch.ToRawSpanBatch(originChangedBit, genesisTimeStamp, chainID) spanBatch.setFirstOriginChangedBit(uint(originChangedBit))
rawSpanBatch, err := spanBatch.ToRawSpanBatch()
require.NoError(t, err) require.NoError(t, err)
// check span batch prefix // check span batch prefix
...@@ -389,7 +415,7 @@ func TestSpanBatchMerge(t *testing.T) { ...@@ -389,7 +415,7 @@ func TestSpanBatchMerge(t *testing.T) {
// check span batch payload // check span batch payload
require.Equal(t, int(rawSpanBatch.blockCount), len(singularBatches)) require.Equal(t, int(rawSpanBatch.blockCount), len(singularBatches))
require.Equal(t, rawSpanBatch.originBits.Bit(0), originChangedBit) require.Equal(t, rawSpanBatch.originBits.Bit(0), uint(originChangedBit))
for i := 1; i < blockCount; i++ { for i := 1; i < blockCount; i++ {
if rawSpanBatch.originBits.Bit(i) == 1 { if rawSpanBatch.originBits.Bit(i) == 1 {
require.Equal(t, singularBatches[i].EpochNum, singularBatches[i-1].EpochNum+1) require.Equal(t, singularBatches[i].EpochNum, singularBatches[i-1].EpochNum+1)
...@@ -421,9 +447,10 @@ func TestSpanBatchToSingularBatch(t *testing.T) { ...@@ -421,9 +447,10 @@ func TestSpanBatchToSingularBatch(t *testing.T) {
safeL2Head.Time = singularBatches[0].Timestamp - 2 safeL2Head.Time = singularBatches[0].Timestamp - 2
genesisTimeStamp := 1 + singularBatches[0].Timestamp - 128 genesisTimeStamp := 1 + singularBatches[0].Timestamp - 128
spanBatch := NewSpanBatch(singularBatches) spanBatch := initializedSpanBatch(singularBatches, genesisTimeStamp, chainID)
originChangedBit := uint(originChangedBit) // set originChangedBit to match the original test implementation
rawSpanBatch, err := spanBatch.ToRawSpanBatch(originChangedBit, genesisTimeStamp, chainID) spanBatch.setFirstOriginChangedBit(uint(originChangedBit))
rawSpanBatch, err := spanBatch.ToRawSpanBatch()
require.NoError(t, err) require.NoError(t, err)
l1Origins := mockL1Origin(rng, rawSpanBatch, singularBatches) l1Origins := mockL1Origin(rng, rawSpanBatch, singularBatches)
...@@ -492,49 +519,6 @@ func TestSpanBatchReadTxDataInvalid(t *testing.T) { ...@@ -492,49 +519,6 @@ func TestSpanBatchReadTxDataInvalid(t *testing.T) {
require.ErrorContains(t, err, "tx RLP prefix type must be list") require.ErrorContains(t, err, "tx RLP prefix type must be list")
} }
func TestSpanBatchBuilder(t *testing.T) {
rng := rand.New(rand.NewSource(0xbab1bab1))
chainID := new(big.Int).SetUint64(rng.Uint64())
for originChangedBit := 0; originChangedBit < 2; originChangedBit++ {
singularBatches := RandomValidConsecutiveSingularBatches(rng, chainID)
safeL2Head := testutils.RandomL2BlockRef(rng)
if originChangedBit == 0 {
safeL2Head.Hash = common.BytesToHash(singularBatches[0].ParentHash[:])
}
genesisTimeStamp := 1 + singularBatches[0].Timestamp - 128
var seqNum uint64 = 1
if originChangedBit == 1 {
seqNum = 0
}
spanBatchBuilder := NewSpanBatchBuilder(genesisTimeStamp, chainID)
require.Equal(t, 0, spanBatchBuilder.GetBlockCount())
for i := 0; i < len(singularBatches); i++ {
spanBatchBuilder.AppendSingularBatch(singularBatches[i], seqNum)
require.Equal(t, i+1, spanBatchBuilder.GetBlockCount())
require.Equal(t, singularBatches[0].ParentHash.Bytes()[:20], spanBatchBuilder.spanBatch.ParentCheck[:])
require.Equal(t, singularBatches[i].EpochHash.Bytes()[:20], spanBatchBuilder.spanBatch.L1OriginCheck[:])
}
rawSpanBatch, err := spanBatchBuilder.GetRawSpanBatch()
require.NoError(t, err)
// compare with rawSpanBatch not using spanBatchBuilder
spanBatch := NewSpanBatch(singularBatches)
originChangedBit := uint(originChangedBit)
rawSpanBatch2, err := spanBatch.ToRawSpanBatch(originChangedBit, genesisTimeStamp, chainID)
require.NoError(t, err)
require.Equal(t, rawSpanBatch2, rawSpanBatch)
spanBatchBuilder.Reset()
require.Equal(t, 0, spanBatchBuilder.GetBlockCount())
}
}
func TestSpanBatchMaxTxData(t *testing.T) { func TestSpanBatchMaxTxData(t *testing.T) {
rng := rand.New(rand.NewSource(0x177288)) rng := rand.New(rand.NewSource(0x177288))
......
...@@ -396,32 +396,42 @@ func isProtectedV(v uint64, txType int) bool { ...@@ -396,32 +396,42 @@ func isProtectedV(v uint64, txType int) bool {
} }
func newSpanBatchTxs(txs [][]byte, chainID *big.Int) (*spanBatchTxs, error) { func newSpanBatchTxs(txs [][]byte, chainID *big.Int) (*spanBatchTxs, error) {
sbtxs := &spanBatchTxs{
contractCreationBits: big.NewInt(0),
yParityBits: big.NewInt(0),
txSigs: []spanBatchSignature{},
txNonces: []uint64{},
txGases: []uint64{},
txTos: []common.Address{},
txDatas: []hexutil.Bytes{},
txTypes: []int{},
protectedBits: big.NewInt(0),
}
if err := sbtxs.AddTxs(txs, chainID); err != nil {
return nil, err
}
return sbtxs, nil
}
func (sbtx *spanBatchTxs) AddTxs(txs [][]byte, chainID *big.Int) error {
totalBlockTxCount := uint64(len(txs)) totalBlockTxCount := uint64(len(txs))
var txSigs []spanBatchSignature offset := sbtx.totalBlockTxCount
var txTos []common.Address
var txNonces []uint64
var txGases []uint64
var txDatas []hexutil.Bytes
var txTypes []int
contractCreationBits := new(big.Int)
yParityBits := new(big.Int)
protectedBits := new(big.Int)
totalLegacyTxCount := uint64(0)
for idx := 0; idx < int(totalBlockTxCount); idx++ { for idx := 0; idx < int(totalBlockTxCount); idx++ {
var tx types.Transaction var tx types.Transaction
if err := tx.UnmarshalBinary(txs[idx]); err != nil { if err := tx.UnmarshalBinary(txs[idx]); err != nil {
return nil, errors.New("failed to decode tx") return errors.New("failed to decode tx")
} }
if tx.Type() == types.LegacyTxType { if tx.Type() == types.LegacyTxType {
protectedBit := uint(0) protectedBit := uint(0)
if tx.Protected() { if tx.Protected() {
protectedBit = uint(1) protectedBit = uint(1)
} }
protectedBits.SetBit(protectedBits, int(totalLegacyTxCount), protectedBit) sbtx.protectedBits.SetBit(sbtx.protectedBits, int(sbtx.totalLegacyTxCount), protectedBit)
totalLegacyTxCount++ sbtx.totalLegacyTxCount++
} }
if tx.Protected() && tx.ChainId().Cmp(chainID) != 0 { if tx.Protected() && tx.ChainId().Cmp(chainID) != 0 {
return nil, fmt.Errorf("protected tx has chain ID %d, but expected chain ID %d", tx.ChainId(), chainID) return fmt.Errorf("protected tx has chain ID %d, but expected chain ID %d", tx.ChainId(), chainID)
} }
var txSig spanBatchSignature var txSig spanBatchSignature
v, r, s := tx.RawSignatureValues() v, r, s := tx.RawSignatureValues()
...@@ -430,42 +440,31 @@ func newSpanBatchTxs(txs [][]byte, chainID *big.Int) (*spanBatchTxs, error) { ...@@ -430,42 +440,31 @@ func newSpanBatchTxs(txs [][]byte, chainID *big.Int) (*spanBatchTxs, error) {
txSig.v = v.Uint64() txSig.v = v.Uint64()
txSig.r = R txSig.r = R
txSig.s = S txSig.s = S
txSigs = append(txSigs, txSig) sbtx.txSigs = append(sbtx.txSigs, txSig)
contractCreationBit := uint(1) contractCreationBit := uint(1)
if tx.To() != nil { if tx.To() != nil {
txTos = append(txTos, *tx.To()) sbtx.txTos = append(sbtx.txTos, *tx.To())
contractCreationBit = uint(0) contractCreationBit = uint(0)
} }
contractCreationBits.SetBit(contractCreationBits, idx, contractCreationBit) sbtx.contractCreationBits.SetBit(sbtx.contractCreationBits, idx+int(offset), contractCreationBit)
yParityBit, err := convertVToYParity(txSig.v, int(tx.Type())) yParityBit, err := convertVToYParity(txSig.v, int(tx.Type()))
if err != nil { if err != nil {
return nil, err return err
} }
yParityBits.SetBit(yParityBits, idx, yParityBit) sbtx.yParityBits.SetBit(sbtx.yParityBits, idx+int(offset), yParityBit)
txNonces = append(txNonces, tx.Nonce()) sbtx.txNonces = append(sbtx.txNonces, tx.Nonce())
txGases = append(txGases, tx.Gas()) sbtx.txGases = append(sbtx.txGases, tx.Gas())
stx, err := newSpanBatchTx(tx) stx, err := newSpanBatchTx(tx)
if err != nil { if err != nil {
return nil, err return err
} }
txData, err := stx.MarshalBinary() txData, err := stx.MarshalBinary()
if err != nil { if err != nil {
return nil, err return err
} }
txDatas = append(txDatas, txData) sbtx.txDatas = append(sbtx.txDatas, txData)
txTypes = append(txTypes, int(tx.Type())) sbtx.txTypes = append(sbtx.txTypes, int(tx.Type()))
} }
return &spanBatchTxs{ sbtx.totalBlockTxCount += totalBlockTxCount
totalBlockTxCount: totalBlockTxCount, return nil
contractCreationBits: contractCreationBits,
yParityBits: yParityBits,
txSigs: txSigs,
txNonces: txNonces,
txGases: txGases,
txTos: txTos,
txDatas: txDatas,
txTypes: txTypes,
protectedBits: protectedBits,
totalLegacyTxCount: totalLegacyTxCount,
}, nil
} }
...@@ -302,6 +302,31 @@ func TestSpanBatchTxsTxDatas(t *testing.T) { ...@@ -302,6 +302,31 @@ func TestSpanBatchTxsTxDatas(t *testing.T) {
require.Equal(t, txDatas, sbt.txDatas) require.Equal(t, txDatas, sbt.txDatas)
require.Equal(t, txTypes, sbt.txTypes) require.Equal(t, txTypes, sbt.txTypes)
} }
func TestSpanBatchTxsAddTxs(t *testing.T) {
rng := rand.New(rand.NewSource(0x1234))
chainID := big.NewInt(rng.Int63n(1000))
// make batches to extract txs from
batches := RandomValidConsecutiveSingularBatches(rng, chainID)
allTxs := [][]byte{}
iterativeSBTX, err := newSpanBatchTxs([][]byte{}, chainID)
require.NoError(t, err)
for i := 0; i < len(batches); i++ {
// explicitly extract txs due to mismatch of [][]byte to []hexutil.Bytes
txs := [][]byte{}
for j := 0; j < len(batches[i].Transactions); j++ {
txs = append(txs, batches[i].Transactions[j])
}
err = iterativeSBTX.AddTxs(txs, chainID)
require.NoError(t, err)
allTxs = append(allTxs, txs...)
}
fullSBTX, err := newSpanBatchTxs(allTxs, chainID)
require.NoError(t, err)
require.Equal(t, iterativeSBTX, fullSBTX)
}
func TestSpanBatchTxsRecoverV(t *testing.T) { func TestSpanBatchTxsRecoverV(t *testing.T) {
rng := rand.New(rand.NewSource(0x123)) rng := rand.New(rand.NewSource(0x123))
......
...@@ -23,8 +23,8 @@ type SpanChannelOut struct { ...@@ -23,8 +23,8 @@ type SpanChannelOut struct {
compress Compressor compress Compressor
// closed indicates if the channel is closed // closed indicates if the channel is closed
closed bool closed bool
// spanBatchBuilder contains information requires to build SpanBatch // spanBatch is the batch being built
spanBatchBuilder *SpanBatchBuilder spanBatch *SpanBatch
// reader contains compressed data for making output frames // reader contains compressed data for making output frames
reader *bytes.Buffer reader *bytes.Buffer
} }
...@@ -33,13 +33,13 @@ func (co *SpanChannelOut) ID() ChannelID { ...@@ -33,13 +33,13 @@ func (co *SpanChannelOut) ID() ChannelID {
return co.id return co.id
} }
func NewSpanChannelOut(compress Compressor, spanBatchBuilder *SpanBatchBuilder) (*SpanChannelOut, error) { func NewSpanChannelOut(compress Compressor, spanBatch *SpanBatch) (*SpanChannelOut, error) {
c := &SpanChannelOut{ c := &SpanChannelOut{
id: ChannelID{}, id: ChannelID{},
frame: 0, frame: 0,
rlpLength: 0, rlpLength: 0,
compress: compress, compress: compress,
spanBatchBuilder: spanBatchBuilder, spanBatch: spanBatch,
reader: &bytes.Buffer{}, reader: &bytes.Buffer{},
} }
_, err := rand.Read(c.id[:]) _, err := rand.Read(c.id[:])
...@@ -56,7 +56,7 @@ func (co *SpanChannelOut) Reset() error { ...@@ -56,7 +56,7 @@ func (co *SpanChannelOut) Reset() error {
co.compress.Reset() co.compress.Reset()
co.reader.Reset() co.reader.Reset()
co.closed = false co.closed = false
co.spanBatchBuilder.Reset() co.spanBatch = NewSpanBatch(co.spanBatch.GenesisTimestamp, co.spanBatch.ChainID)
_, err := rand.Read(co.id[:]) _, err := rand.Read(co.id[:])
return err return err
} }
...@@ -100,9 +100,11 @@ func (co *SpanChannelOut) AddSingularBatch(batch *SingularBatch, seqNum uint64) ...@@ -100,9 +100,11 @@ func (co *SpanChannelOut) AddSingularBatch(batch *SingularBatch, seqNum uint64)
} }
var buf bytes.Buffer var buf bytes.Buffer
// Append Singular batch to its span batch builder // Append Singular batch to its span batch builder
co.spanBatchBuilder.AppendSingularBatch(batch, seqNum) if err := co.spanBatch.AppendSingularBatch(batch, seqNum); err != nil {
return 0, fmt.Errorf("failed to append SingularBatch to SpanBatch: %w", err)
}
// Convert Span batch to RawSpanBatch // Convert Span batch to RawSpanBatch
rawSpanBatch, err := co.spanBatchBuilder.GetRawSpanBatch() rawSpanBatch, err := co.spanBatch.ToRawSpanBatch()
if err != nil { if err != nil {
return 0, fmt.Errorf("failed to convert SpanBatch into RawSpanBatch: %w", err) return 0, fmt.Errorf("failed to convert SpanBatch into RawSpanBatch: %w", err)
} }
...@@ -117,12 +119,9 @@ func (co *SpanChannelOut) AddSingularBatch(batch *SingularBatch, seqNum uint64) ...@@ -117,12 +119,9 @@ func (co *SpanChannelOut) AddSingularBatch(batch *SingularBatch, seqNum uint64)
} }
co.rlpLength = buf.Len() co.rlpLength = buf.Len()
if co.spanBatchBuilder.GetBlockCount() > 1 {
// Flush compressed data into reader to preserve current result.
// If the channel is full after this block is appended, we should use preserved data. // If the channel is full after this block is appended, we should use preserved data.
if err := co.compress.Flush(); err != nil { // so copy the compressed data to reader
return 0, fmt.Errorf("failed to flush compressor: %w", err) if len(co.spanBatch.Batches) > 1 {
}
_, err = io.Copy(co.reader, co.compress) _, err = io.Copy(co.reader, co.compress)
if err != nil { if err != nil {
// Must reset reader to avoid partial output // Must reset reader to avoid partial output
...@@ -135,9 +134,13 @@ func (co *SpanChannelOut) AddSingularBatch(batch *SingularBatch, seqNum uint64) ...@@ -135,9 +134,13 @@ func (co *SpanChannelOut) AddSingularBatch(batch *SingularBatch, seqNum uint64)
co.compress.Reset() co.compress.Reset()
// Avoid using io.Copy here, because we need all or nothing // Avoid using io.Copy here, because we need all or nothing
written, err := co.compress.Write(buf.Bytes()) written, err := co.compress.Write(buf.Bytes())
// Always flush (for BlindCompressor to check if it's full)
if err := co.compress.Flush(); err != nil {
return 0, fmt.Errorf("failed to flush compressor: %w", err)
}
if co.compress.FullErr() != nil { if co.compress.FullErr() != nil {
err = co.compress.FullErr() err = co.compress.FullErr()
if co.spanBatchBuilder.GetBlockCount() == 1 { if len(co.spanBatch.Batches) == 1 {
// Do not return ErrCompressorFull for the first block in the batch // Do not return ErrCompressorFull for the first block in the batch
// In this case, reader must be empty. then the contents of compressor will be copied to reader when the channel is closed. // In this case, reader must be empty. then the contents of compressor will be copied to reader when the channel is closed.
err = nil err = nil
......
...@@ -3,6 +3,7 @@ package test ...@@ -3,6 +3,7 @@ package test
import ( import (
"math/big" "math/big"
"math/rand" "math/rand"
"time"
"github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-node/rollup/derive"
...@@ -14,9 +15,8 @@ import ( ...@@ -14,9 +15,8 @@ import (
// RandomL2Block returns a random block whose first transaction is a random pre-Ecotone upgrade // RandomL2Block returns a random block whose first transaction is a random pre-Ecotone upgrade
// L1 Info Deposit transaction. // L1 Info Deposit transaction.
func RandomL2Block(rng *rand.Rand, txCount int) (*types.Block, []*types.Receipt) { func RandomL2Block(rng *rand.Rand, txCount int, t time.Time) (*types.Block, []*types.Receipt) {
l1Block := types.NewBlock(testutils.RandomHeader(rng), l1Block := types.NewBlock(testutils.RandomHeader(rng), nil, nil, nil, trie.NewStackTrie(nil))
nil, nil, nil, trie.NewStackTrie(nil))
rollupCfg := rollup.Config{} rollupCfg := rollup.Config{}
if testutils.RandomBool(rng) { if testutils.RandomBool(rng) {
t := uint64(0) t := uint64(0)
...@@ -26,12 +26,21 @@ func RandomL2Block(rng *rand.Rand, txCount int) (*types.Block, []*types.Receipt) ...@@ -26,12 +26,21 @@ func RandomL2Block(rng *rand.Rand, txCount int) (*types.Block, []*types.Receipt)
if err != nil { if err != nil {
panic("L1InfoDeposit: " + err.Error()) panic("L1InfoDeposit: " + err.Error())
} }
if t.IsZero() {
return testutils.RandomBlockPrependTxs(rng, txCount, types.NewTx(l1InfoTx)) return testutils.RandomBlockPrependTxs(rng, txCount, types.NewTx(l1InfoTx))
} else {
return testutils.RandomBlockPrependTxsWithTime(rng, txCount, uint64(t.Unix()), types.NewTx(l1InfoTx))
}
} }
func RandomL2BlockWithChainId(rng *rand.Rand, txCount int, chainId *big.Int) *types.Block { func RandomL2BlockWithChainId(rng *rand.Rand, txCount int, chainId *big.Int) *types.Block {
return RandomL2BlockWithChainIdAndTime(rng, txCount, chainId, time.Time{})
}
func RandomL2BlockWithChainIdAndTime(rng *rand.Rand, txCount int, chainId *big.Int, t time.Time) *types.Block {
signer := types.NewLondonSigner(chainId) signer := types.NewLondonSigner(chainId)
block, _ := RandomL2Block(rng, 0) block, _ := RandomL2Block(rng, 0, t)
txs := []*types.Transaction{block.Transactions()[0]} // L1 info deposit TX txs := []*types.Transaction{block.Transactions()[0]} // L1 info deposit TX
for i := 0; i < txCount; i++ { for i := 0; i < txCount; i++ {
txs = append(txs, testutils.RandomTx(rng, big.NewInt(int64(rng.Uint32())), signer)) txs = append(txs, testutils.RandomTx(rng, big.NewInt(int64(rng.Uint32())), signer))
......
...@@ -250,7 +250,7 @@ func RandomReceipt(rng *rand.Rand, signer types.Signer, tx *types.Transaction, t ...@@ -250,7 +250,7 @@ func RandomReceipt(rng *rand.Rand, signer types.Signer, tx *types.Transaction, t
} }
} }
func RandomHeader(rng *rand.Rand) *types.Header { func RandomHeaderWithTime(rng *rand.Rand, t uint64) *types.Header {
return &types.Header{ return &types.Header{
ParentHash: RandomHash(rng), ParentHash: RandomHash(rng),
UncleHash: types.EmptyUncleHash, UncleHash: types.EmptyUncleHash,
...@@ -263,7 +263,7 @@ func RandomHeader(rng *rand.Rand) *types.Header { ...@@ -263,7 +263,7 @@ func RandomHeader(rng *rand.Rand) *types.Header {
Number: big.NewInt(1 + rng.Int63n(100_000_000)), Number: big.NewInt(1 + rng.Int63n(100_000_000)),
GasLimit: 0, GasLimit: 0,
GasUsed: 0, GasUsed: 0,
Time: uint64(rng.Int63n(2_000_000_000)), Time: t,
Extra: RandomData(rng, rng.Intn(33)), Extra: RandomData(rng, rng.Intn(33)),
MixDigest: common.Hash{}, MixDigest: common.Hash{},
Nonce: types.BlockNonce{}, Nonce: types.BlockNonce{},
...@@ -271,15 +271,17 @@ func RandomHeader(rng *rand.Rand) *types.Header { ...@@ -271,15 +271,17 @@ func RandomHeader(rng *rand.Rand) *types.Header {
} }
} }
func RandomHeader(rng *rand.Rand) *types.Header {
t := uint64(rng.Int63n(2_000_000_000))
return RandomHeaderWithTime(rng, t)
}
func RandomBlock(rng *rand.Rand, txCount uint64) (*types.Block, []*types.Receipt) { func RandomBlock(rng *rand.Rand, txCount uint64) (*types.Block, []*types.Receipt) {
return RandomBlockPrependTxs(rng, int(txCount)) return RandomBlockPrependTxs(rng, int(txCount))
} }
// RandomBlockPrependTxs returns a random block with txCount randomly generated func RandomBlockPrependTxsWithTime(rng *rand.Rand, txCount int, t uint64, ptxs ...*types.Transaction) (*types.Block, []*types.Receipt) {
// transactions and additionally the transactions ptxs prepended. So the total header := RandomHeaderWithTime(rng, t)
// number of transactions is len(ptxs) + txCount.
func RandomBlockPrependTxs(rng *rand.Rand, txCount int, ptxs ...*types.Transaction) (*types.Block, []*types.Receipt) {
header := RandomHeader(rng)
signer := types.NewLondonSigner(big.NewInt(rng.Int63n(1000))) signer := types.NewLondonSigner(big.NewInt(rng.Int63n(1000)))
txs := make([]*types.Transaction, 0, txCount+len(ptxs)) txs := make([]*types.Transaction, 0, txCount+len(ptxs))
txs = append(txs, ptxs...) txs = append(txs, ptxs...)
...@@ -312,6 +314,14 @@ func RandomBlockPrependTxs(rng *rand.Rand, txCount int, ptxs ...*types.Transacti ...@@ -312,6 +314,14 @@ func RandomBlockPrependTxs(rng *rand.Rand, txCount int, ptxs ...*types.Transacti
return block, receipts return block, receipts
} }
// RandomBlockPrependTxs returns a random block with txCount randomly generated
// transactions and additionally the transactions ptxs prepended. So the total
// number of transactions is len(ptxs) + txCount.
func RandomBlockPrependTxs(rng *rand.Rand, txCount int, ptxs ...*types.Transaction) (*types.Block, []*types.Receipt) {
t := uint64(rng.Int63n(2_000_000_000))
return RandomBlockPrependTxsWithTime(rng, txCount, t, ptxs...)
}
func RandomOutputResponse(rng *rand.Rand) *eth.OutputResponse { func RandomOutputResponse(rng *rand.Rand) *eth.OutputResponse {
return &eth.OutputResponse{ return &eth.OutputResponse{
Version: eth.Bytes32(RandomHash(rng)), Version: eth.Bytes32(RandomHash(rng)),
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment