1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
package benchmarks
import (
"fmt"
"math/big"
"math/rand"
"testing"
"time"
"github.com/ethereum-optimism/optimism/op-batcher/compressor"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/stretchr/testify/require"
)
const (
// a really large target output size to ensure that the compressors are never full
targetOutput_huge = uint64(100_000_000_000)
// this target size was determiend by the devnet sepolia batcher's configuration
targetOuput_real = uint64(780120)
)
var (
// compressors used in the benchmark
rc, _ = compressor.NewRatioCompressor(compressor.Config{
TargetOutputSize: targetOutput_huge,
ApproxComprRatio: 0.4,
})
sc, _ = compressor.NewShadowCompressor(compressor.Config{
TargetOutputSize: targetOutput_huge,
})
nc, _ = compressor.NewNonCompressor(compressor.Config{
TargetOutputSize: targetOutput_huge,
})
realsc, _ = compressor.NewShadowCompressor(compressor.Config{
TargetOutputSize: targetOuput_real,
})
// compressors used in the benchmark mapped by their name
// they come paired with a target output size so span batches can use the target size directly
compressors = map[string]compressorAndTarget{
"NonCompressor": {nc, targetOutput_huge},
"RatioCompressor": {rc, targetOutput_huge},
"ShadowCompressor": {sc, targetOutput_huge},
"RealShadowCompressor": {realsc, targetOuput_real},
}
// batch types used in the benchmark
batchTypes = []uint{
derive.SpanBatchType,
// uncomment to include singular batches in the benchmark
// singular batches are not included by default because they are not the target of the benchmark
//derive.SingularBatchType,
}
)
type compressorAndTarget struct {
compressor derive.Compressor
targetOutput uint64
}
// channelOutByType returns a channel out of the given type as a helper for the benchmarks
func channelOutByType(batchType uint, compKey string) (derive.ChannelOut, error) {
chainID := big.NewInt(333)
if batchType == derive.SingularBatchType {
return derive.NewSingularChannelOut(compressors[compKey].compressor)
}
if batchType == derive.SpanBatchType {
return derive.NewSpanChannelOut(0, chainID, compressors[compKey].targetOutput)
}
return nil, fmt.Errorf("unsupported batch type: %d", batchType)
}
// a test case for the benchmark controls the number of batches and transactions per batch,
// as well as the batch type and compressor used
type BatchingBenchmarkTC struct {
BatchType uint
BatchCount int
txPerBatch int
compKey string
}
func (t BatchingBenchmarkTC) String() string {
var btype string
if t.BatchType == derive.SingularBatchType {
btype = "Singular"
}
if t.BatchType == derive.SpanBatchType {
btype = "Span"
}
return fmt.Sprintf("BatchType=%s, txPerBatch=%d, BatchCount=%d, Compressor=%s", btype, t.txPerBatch, t.BatchCount, t.compKey)
}
// BenchmarkChannelOut benchmarks the performance of adding singular batches to a channel out
// this exercises the compression and batching logic, as well as any batch-building logic
// Every Compressor in the compressor map is benchmarked for each test case
// The results of the Benchmark measure *only* the time to add the final batch to the channel out,
// not the time to send all the batches through the channel out
// Hint: Raise the derive.MaxRLPBytesPerChannel to 10_000_000_000 to avoid hitting limits if adding larger test cases
func BenchmarkFinalBatchChannelOut(b *testing.B) {
// Targets define the number of batches and transactions per batch to test
type target struct{ bs, tpb int }
targets := []target{
{10, 1},
{100, 1},
{1000, 1},
{10, 100},
{100, 100},
}
// build a set of test cases for each batch type, compressor, and target-pair
tests := []BatchingBenchmarkTC{}
for _, bt := range batchTypes {
for compkey := range compressors {
for _, t := range targets {
tests = append(tests, BatchingBenchmarkTC{bt, t.bs, t.tpb, compkey})
}
}
}
for _, tc := range tests {
chainID := big.NewInt(333)
rng := rand.New(rand.NewSource(0x543331))
// pre-generate batches to keep the benchmark from including the random generation
batches := make([]*derive.SingularBatch, tc.BatchCount)
t := time.Now()
for i := 0; i < tc.BatchCount; i++ {
batches[i] = derive.RandomSingularBatch(rng, tc.txPerBatch, chainID)
// set the timestamp to increase with each batch
// to leverage optimizations in the Batch Linked List
batches[i].Timestamp = uint64(t.Add(time.Duration(i) * time.Second).Unix())
}
b.Run(tc.String(), func(b *testing.B) {
// reset the compressor used in the test case
for bn := 0; bn < b.N; bn++ {
// don't measure the setup time
b.StopTimer()
compressors[tc.compKey].compressor.Reset()
cout, _ := channelOutByType(tc.BatchType, tc.compKey)
// add all but the final batch to the channel out
for i := 0; i < tc.BatchCount-1; i++ {
err := cout.AddSingularBatch(batches[i], 0)
require.NoError(b, err)
}
// measure the time to add the final batch
b.StartTimer()
// add the final batch to the channel out
err := cout.AddSingularBatch(batches[tc.BatchCount-1], 0)
require.NoError(b, err)
}
})
}
}
// BenchmarkIncremental fills a channel out incrementally with batches
// each increment is counted as its own benchmark
// Hint: use -benchtime=1x to run the benchmarks for a single iteration
// it is not currently designed to use b.N
func BenchmarkIncremental(b *testing.B) {
chainID := big.NewInt(333)
rng := rand.New(rand.NewSource(0x543331))
// use the real compressor for this benchmark
// use batchCount as the number of batches to add in each benchmark iteration
// and use txPerBatch as the number of transactions per batch
tcs := []BatchingBenchmarkTC{
{derive.SpanBatchType, 5, 1, "RealBlindCompressor"},
//{derive.SingularBatchType, 100, 1, "RealShadowCompressor"},
}
for _, tc := range tcs {
cout, err := channelOutByType(tc.BatchType, tc.compKey)
if err != nil {
b.Fatal(err)
}
done := false
for base := 0; !done; base += tc.BatchCount {
rangeName := fmt.Sprintf("Incremental %s: %d-%d", tc.String(), base, base+tc.BatchCount)
b.Run(rangeName, func(b *testing.B) {
b.StopTimer()
// prepare the batches
t := time.Now()
batches := make([]*derive.SingularBatch, tc.BatchCount)
for i := 0; i < tc.BatchCount; i++ {
t := t.Add(time.Second)
batches[i] = derive.RandomSingularBatch(rng, tc.txPerBatch, chainID)
// set the timestamp to increase with each batch
// to leverage optimizations in the Batch Linked List
batches[i].Timestamp = uint64(t.Unix())
}
b.StartTimer()
for i := 0; i < tc.BatchCount; i++ {
err := cout.AddSingularBatch(batches[i], 0)
if err != nil {
done = true
return
}
}
})
}
}
}
// BenchmarkAllBatchesChannelOut benchmarks the performance of adding singular batches to a channel out
// this exercises the compression and batching logic, as well as any batch-building logic
// Every Compressor in the compressor map is benchmarked for each test case
// The results of the Benchmark measure the time to add the *all batches* to the channel out,
// not the time to send all the batches through the channel out
// Hint: Raise the derive.MaxRLPBytesPerChannel to 10_000_000_000 to avoid hitting limits
func BenchmarkAllBatchesChannelOut(b *testing.B) {
// Targets define the number of batches and transactions per batch to test
type target struct{ bs, tpb int }
targets := []target{
{10, 1},
{100, 1},
{1000, 1},
{10, 100},
{100, 100},
}
// build a set of test cases for each batch type, compressor, and target-pair
tests := []BatchingBenchmarkTC{}
for _, bt := range batchTypes {
for compkey := range compressors {
for _, t := range targets {
tests = append(tests, BatchingBenchmarkTC{bt, t.bs, t.tpb, compkey})
}
}
}
for _, tc := range tests {
chainID := big.NewInt(333)
rng := rand.New(rand.NewSource(0x543331))
// pre-generate batches to keep the benchmark from including the random generation
batches := make([]*derive.SingularBatch, tc.BatchCount)
t := time.Now()
for i := 0; i < tc.BatchCount; i++ {
batches[i] = derive.RandomSingularBatch(rng, tc.txPerBatch, chainID)
// set the timestamp to increase with each batch
// to leverage optimizations in the Batch Linked List
batches[i].Timestamp = uint64(t.Add(time.Duration(i) * time.Second).Unix())
}
b.Run(tc.String(), func(b *testing.B) {
// reset the compressor used in the test case
for bn := 0; bn < b.N; bn++ {
// don't measure the setup time
b.StopTimer()
compressors[tc.compKey].compressor.Reset()
cout, _ := channelOutByType(tc.BatchType, tc.compKey)
b.StartTimer()
// add all batches to the channel out
for i := 0; i < tc.BatchCount; i++ {
err := cout.AddSingularBatch(batches[i], 0)
require.NoError(b, err)
}
}
})
}
}
// BenchmarkGetRawSpanBatch benchmarks the performance of building a span batch from singular batches
// this exercises the span batch building logic directly
// The adding of batches to the span batch builder is not included in the benchmark, only the final build to RawSpanBatch
func BenchmarkGetRawSpanBatch(b *testing.B) {
// Targets define the number of batches and transactions per batch to test
type target struct{ bs, tpb int }
targets := []target{
{10, 1},
{100, 1},
{1000, 1},
{10000, 1},
{10, 100},
{100, 100},
{1000, 100},
}
tests := []BatchingBenchmarkTC{}
for _, t := range targets {
tests = append(tests, BatchingBenchmarkTC{derive.SpanBatchType, t.bs, t.tpb, "NonCompressor"})
}
for _, tc := range tests {
chainID := big.NewInt(333)
rng := rand.New(rand.NewSource(0x543331))
// pre-generate batches to keep the benchmark from including the random generation
batches := make([]*derive.SingularBatch, tc.BatchCount)
t := time.Now()
for i := 0; i < tc.BatchCount; i++ {
batches[i] = derive.RandomSingularBatch(rng, tc.txPerBatch, chainID)
batches[i].Timestamp = uint64(t.Add(time.Duration(i) * time.Second).Unix())
}
b.Run(tc.String(), func(b *testing.B) {
for bn := 0; bn < b.N; bn++ {
// don't measure the setup time
b.StopTimer()
spanBatch := derive.NewSpanBatch(uint64(0), chainID)
for i := 0; i < tc.BatchCount; i++ {
err := spanBatch.AppendSingularBatch(batches[i], 0)
require.NoError(b, err)
}
b.StartTimer()
_, err := spanBatch.ToRawSpanBatch()
require.NoError(b, err)
}
})
}
}