Commit 179aea69 authored by protolambda's avatar protolambda Committed by GitHub

Merge pull request #8416 from testinprod-io/tip/batch-decoder-span-batch-support

op-node: batch_decoder: Support Span Batch
parents 372d13b8 a77ac64f
......@@ -24,7 +24,12 @@ the transaction hash.
`batch_decoder reassemble` goes through all of the found frames in the cache & then turns them
into channels. It then stores the channels with metadata on disk where the file name is the Channel ID.
Each channel can contain multiple batches.
If the batch is span batch, `batch_decoder` derives span batch using `L2BlockTime`, `L2GenesisTime`, and `L2ChainID`.
These arguments can be provided to the binary using flags.
If the batch is a singular batch, `batch_decoder` does not derive and stores the batch as is.
### Force Close
......@@ -45,7 +50,7 @@ those frames need to be generated differently than simply closing the channel.
jq . $JSON_FILE
# Print the number of valid & invalid transactions
jq .valid_data $TX_DIR/* | sort | uniq -c
jq .valid_data $TX_DIR/* | sort | uniq -c
# Select all transactions that have invalid data & then print the transaction hash
jq "select(.valid_data == false)|.tx.hash" $TX_DIR
......
......@@ -4,11 +4,13 @@ import (
"context"
"fmt"
"log"
"math/big"
"os"
"time"
"github.com/ethereum-optimism/optimism/op-node/cmd/batch_decoder/fetch"
"github.com/ethereum-optimism/optimism/op-node/cmd/batch_decoder/reassemble"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient"
......@@ -77,7 +79,7 @@ func main() {
End: uint64(cliCtx.Int("end")),
ChainID: chainID,
BatchSenders: map[common.Address]struct{}{
common.HexToAddress(cliCtx.String("sender")): struct{}{},
common.HexToAddress(cliCtx.String("sender")): {},
},
BatchInbox: common.HexToAddress(cliCtx.String("inbox")),
OutDirectory: cliCtx.String("out"),
......@@ -92,13 +94,8 @@ func main() {
},
{
Name: "reassemble",
Usage: "Reassembles channels from fetched batches",
Usage: "Reassembles channels from fetched batch transactions and decode batches",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "inbox",
Value: "0xff00000000000000000000000000000000000420",
Usage: "Batch Inbox Address",
},
&cli.StringFlag{
Name: "in",
Value: "/tmp/batch_decoder/transactions_cache",
......@@ -109,12 +106,60 @@ func main() {
Value: "/tmp/batch_decoder/channel_cache",
Usage: "Cache directory for the found channels",
},
&cli.Uint64Flag{
Name: "l2-chain-id",
Value: 10,
Usage: "L2 chain id for span batch derivation. Default value from op-mainnet.",
},
&cli.Uint64Flag{
Name: "l2-genesis-timestamp",
Value: 1686068903,
Usage: "L2 genesis time for span batch derivation. Default value from op-mainnet. " +
"Superchain-registry prioritized when given value is inconsistent.",
},
&cli.Uint64Flag{
Name: "l2-block-time",
Value: 2,
Usage: "L2 block time for span batch derivation. Default value from op-mainnet. " +
"Superchain-registry prioritized when given value is inconsistent.",
},
&cli.StringFlag{
Name: "inbox",
Value: "0xFF00000000000000000000000000000000000010",
Usage: "Batch Inbox Address. Default value from op-mainnet. " +
"Superchain-registry prioritized when given value is inconsistent.",
},
},
Action: func(cliCtx *cli.Context) error {
var (
L2GenesisTime uint64 = cliCtx.Uint64("l2-genesis-timestamp")
L2BlockTime uint64 = cliCtx.Uint64("l2-block-time")
BatchInboxAddress common.Address = common.HexToAddress(cliCtx.String("inbox"))
)
L2ChainID := new(big.Int).SetUint64(cliCtx.Uint64("l2-chain-id"))
rollupCfg, err := rollup.LoadOPStackRollupConfig(L2ChainID.Uint64())
if err == nil {
// prioritize superchain config
if L2GenesisTime != rollupCfg.Genesis.L2Time {
L2GenesisTime = rollupCfg.Genesis.L2Time
fmt.Printf("L2GenesisTime overridden: %v\n", L2GenesisTime)
}
if L2BlockTime != rollupCfg.BlockTime {
L2BlockTime = rollupCfg.BlockTime
fmt.Printf("L2BlockTime overridden: %v\n", L2BlockTime)
}
if BatchInboxAddress != rollupCfg.BatchInboxAddress {
BatchInboxAddress = rollupCfg.BatchInboxAddress
fmt.Printf("BatchInboxAddress overridden: %v\n", BatchInboxAddress)
}
}
config := reassemble.Config{
BatchInbox: common.HexToAddress(cliCtx.String("inbox")),
InDirectory: cliCtx.String("in"),
OutDirectory: cliCtx.String("out"),
BatchInbox: BatchInboxAddress,
InDirectory: cliCtx.String("in"),
OutDirectory: cliCtx.String("out"),
L2ChainID: L2ChainID,
L2GenesisTime: L2GenesisTime,
L2BlockTime: L2BlockTime,
}
reassemble.Channels(config)
return nil
......
......@@ -5,13 +5,11 @@ import (
"fmt"
"io"
"log"
"math/big"
"os"
"path"
"sort"
"github.com/ethereum-optimism/optimism/op-node/chaincfg"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/cmd/batch_decoder/fetch"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-service/eth"
......@@ -24,7 +22,8 @@ type ChannelWithMetadata struct {
InvalidFrames bool `json:"invalid_frames"`
InvalidBatches bool `json:"invalid_batches"`
Frames []FrameWithMetadata `json:"frames"`
Batches []derive.BatchData `json:"batches"`
Batches []derive.Batch `json:"batches"`
BatchTypes []int `json:"batch_types"`
}
type FrameWithMetadata struct {
......@@ -36,9 +35,12 @@ type FrameWithMetadata struct {
}
type Config struct {
BatchInbox common.Address
InDirectory string
OutDirectory string
BatchInbox common.Address
InDirectory string
OutDirectory string
L2ChainID *big.Int
L2GenesisTime uint64
L2BlockTime uint64
}
func LoadFrames(directory string, inbox common.Address) []FrameWithMetadata {
......@@ -68,9 +70,8 @@ func Channels(config Config) {
for _, frame := range frames {
framesByChannel[frame.Frame.ID] = append(framesByChannel[frame.Frame.ID], frame)
}
cfg := chaincfg.Mainnet
for id, frames := range framesByChannel {
ch := processFrames(cfg, id, frames)
ch := processFrames(config, id, frames)
filename := path.Join(config.OutDirectory, fmt.Sprintf("%s.json", id.String()))
if err := writeChannel(ch, filename); err != nil {
log.Fatal(err)
......@@ -88,7 +89,7 @@ func writeChannel(ch ChannelWithMetadata, filename string) error {
return enc.Encode(ch)
}
func processFrames(cfg *rollup.Config, id derive.ChannelID, frames []FrameWithMetadata) ChannelWithMetadata {
func processFrames(cfg Config, id derive.ChannelID, frames []FrameWithMetadata) ChannelWithMetadata {
ch := derive.NewChannel(id, eth.L1BlockRef{Number: frames[0].InclusionBlock})
invalidFrame := false
......@@ -104,17 +105,39 @@ func processFrames(cfg *rollup.Config, id derive.ChannelID, frames []FrameWithMe
}
}
var batches []derive.BatchData
var batches []derive.Batch
var batchTypes []int
invalidBatches := false
if ch.IsReady() {
br, err := derive.BatchReader(ch.Reader())
if err == nil {
for batch, err := br(); err != io.EOF; batch, err = br() {
for batchData, err := br(); err != io.EOF; batchData, err = br() {
if err != nil {
fmt.Printf("Error reading batch for channel %v. Err: %v\n", id.String(), err)
fmt.Printf("Error reading batchData for channel %v. Err: %v\n", id.String(), err)
invalidBatches = true
} else {
batches = append(batches, *batch)
batchType := batchData.GetBatchType()
batchTypes = append(batchTypes, int(batchType))
switch batchType {
case derive.SingularBatchType:
singularBatch, err := derive.GetSingularBatch(batchData)
if err != nil {
invalidBatches = true
fmt.Printf("Error converting singularBatch from batchData for channel %v. Err: %v\n", id.String(), err)
}
// singularBatch will be nil when errored
batches = append(batches, singularBatch)
case derive.SpanBatchType:
spanBatch, err := derive.DeriveSpanBatch(batchData, cfg.L2BlockTime, cfg.L2GenesisTime, cfg.L2ChainID)
if err != nil {
invalidBatches = true
fmt.Printf("Error deriving spanBatch from batchData for channel %v. Err: %v\n", id.String(), err)
}
// spanBatch will be nil when errored
batches = append(batches, spanBatch)
default:
fmt.Printf("unrecognized batch type: %d for channel %v.\n", batchData.GetBatchType(), id.String())
}
}
}
} else {
......@@ -131,6 +154,7 @@ func processFrames(cfg *rollup.Config, id derive.ChannelID, frames []FrameWithMe
InvalidFrames: invalidFrame,
InvalidBatches: invalidBatches,
Batches: batches,
BatchTypes: batchTypes,
}
}
......
......@@ -172,9 +172,7 @@ func TestBatchRoundTrip(t *testing.T) {
err = dec.UnmarshalBinary(enc)
require.NoError(t, err)
if dec.GetBatchType() == SpanBatchType {
rawSpanBatch, ok := dec.inner.(*RawSpanBatch)
require.True(t, ok)
_, err := rawSpanBatch.derive(blockTime, genesisTimestamp, chainID)
_, err := DeriveSpanBatch(&dec, blockTime, genesisTimestamp, chainID)
require.NoError(t, err)
}
require.Equal(t, batch, &dec, "Batch not equal test case %v", i)
......@@ -222,9 +220,7 @@ func TestBatchRoundTripRLP(t *testing.T) {
err = dec.DecodeRLP(s)
require.NoError(t, err)
if dec.GetBatchType() == SpanBatchType {
rawSpanBatch, ok := dec.inner.(*RawSpanBatch)
require.True(t, ok)
_, err := rawSpanBatch.derive(blockTime, genesisTimestamp, chainID)
_, err = DeriveSpanBatch(&dec, blockTime, genesisTimestamp, chainID)
require.NoError(t, err)
}
require.Equal(t, batch, &dec, "Batch not equal test case %v", i)
......
......@@ -3,7 +3,6 @@ package derive
import (
"bytes"
"context"
"errors"
"fmt"
"io"
......@@ -92,11 +91,7 @@ func (cr *ChannelInReader) NextBatch(ctx context.Context) (Batch, error) {
}
switch batchData.GetBatchType() {
case SingularBatchType:
singularBatch, ok := batchData.inner.(*SingularBatch)
if !ok {
return nil, NewCriticalError(errors.New("failed type assertion to SingularBatch"))
}
return singularBatch, nil
return GetSingularBatch(batchData)
case SpanBatchType:
if origin := cr.Origin(); !cr.cfg.IsDelta(origin.Time) {
// Check hard fork activation with the L1 inclusion block time instead of the L1 origin block time.
......@@ -104,16 +99,7 @@ func (cr *ChannelInReader) NextBatch(ctx context.Context) (Batch, error) {
// This is just for early dropping invalid batches as soon as possible.
return nil, NewTemporaryError(fmt.Errorf("cannot accept span batch in L1 block %s at time %d", origin, origin.Time))
}
rawSpanBatch, ok := batchData.inner.(*RawSpanBatch)
if !ok {
return nil, NewCriticalError(errors.New("failed type assertion to SpanBatch"))
}
// If the batch type is Span batch, derive block inputs from RawSpanBatch.
spanBatch, err := rawSpanBatch.derive(cr.cfg.BlockTime, cr.cfg.Genesis.L2Time, cr.cfg.L2ChainID)
if err != nil {
return nil, err
}
return spanBatch, nil
return DeriveSpanBatch(batchData, cr.cfg.BlockTime, cr.cfg.Genesis.L2Time, cr.cfg.L2ChainID)
default:
// error is bubbled up to user, but pipeline can skip the batch and continue after.
return nil, NewTemporaryError(fmt.Errorf("unrecognized batch type: %d", batchData.GetBatchType()))
......
......@@ -2,6 +2,7 @@ package derive
import (
"bytes"
"errors"
"io"
"github.com/ethereum-optimism/optimism/op-node/rollup"
......@@ -65,3 +66,12 @@ func (b *SingularBatch) encode(w io.Writer) error {
func (b *SingularBatch) decode(r *bytes.Reader) error {
return rlp.Decode(r, b)
}
// GetSingularBatch retrieves SingularBatch from batchData
func GetSingularBatch(batchData *BatchData) (*SingularBatch, error) {
singularBatch, ok := batchData.inner.(*SingularBatch)
if !ok {
return nil, NewCriticalError(errors.New("failed type assertion to SingularBatch"))
}
return singularBatch, nil
}
......@@ -3,6 +3,7 @@ package derive
import (
"bytes"
"encoding/binary"
"encoding/json"
"errors"
"fmt"
"io"
......@@ -377,7 +378,7 @@ func (b *RawSpanBatch) encode(w io.Writer) error {
return nil
}
// derive converts RawSpanBatch into SpanBatch, which has a list of spanBatchElement.
// derive converts RawSpanBatch into SpanBatch, which has a list of SpanBatchElement.
// We need chain config constants to derive values for making payload attributes.
func (b *RawSpanBatch) derive(blockTime, genesisTimestamp uint64, chainID *big.Int) (*SpanBatch, error) {
if b.blockCount == 0 {
......@@ -401,35 +402,45 @@ func (b *RawSpanBatch) derive(blockTime, genesisTimestamp uint64, chainID *big.I
}
spanBatch := SpanBatch{
parentCheck: b.parentCheck,
l1OriginCheck: b.l1OriginCheck,
ParentCheck: b.parentCheck,
L1OriginCheck: b.l1OriginCheck,
}
txIdx := 0
for i := 0; i < int(b.blockCount); i++ {
batch := spanBatchElement{}
batch := SpanBatchElement{}
batch.Timestamp = genesisTimestamp + b.relTimestamp + blockTime*uint64(i)
batch.EpochNum = rollup.Epoch(blockOriginNums[i])
for j := 0; j < int(b.blockTxCounts[i]); j++ {
batch.Transactions = append(batch.Transactions, fullTxs[txIdx])
txIdx++
}
spanBatch.batches = append(spanBatch.batches, &batch)
spanBatch.Batches = append(spanBatch.Batches, &batch)
}
return &spanBatch, nil
}
// spanBatchElement is a derived form of input to build a L2 block.
// ToSpanBatch converts RawSpanBatch to SpanBatch,
// which implements a wrapper of derive method of RawSpanBatch
func (b *RawSpanBatch) ToSpanBatch(blockTime, genesisTimestamp uint64, chainID *big.Int) (*SpanBatch, error) {
spanBatch, err := b.derive(blockTime, genesisTimestamp, chainID)
if err != nil {
return nil, err
}
return spanBatch, nil
}
// SpanBatchElement is a derived form of input to build a L2 block.
// similar to SingularBatch, but does not have ParentHash and EpochHash
// because Span batch spec does not contain parent hash and epoch hash of every block in the span.
type spanBatchElement struct {
type SpanBatchElement struct {
EpochNum rollup.Epoch // aka l1 num
Timestamp uint64
Transactions []hexutil.Bytes
}
// singularBatchToElement converts a SingularBatch to a spanBatchElement
func singularBatchToElement(singularBatch *SingularBatch) *spanBatchElement {
return &spanBatchElement{
// singularBatchToElement converts a SingularBatch to a SpanBatchElement
func singularBatchToElement(singularBatch *SingularBatch) *SpanBatchElement {
return &SpanBatchElement{
EpochNum: singularBatch.EpochNum,
Timestamp: singularBatch.Timestamp,
Transactions: singularBatch.Transactions,
......@@ -437,11 +448,27 @@ func singularBatchToElement(singularBatch *SingularBatch) *spanBatchElement {
}
// SpanBatch is an implementation of Batch interface,
// containing the input to build a span of L2 blocks in derived form (spanBatchElement)
// containing the input to build a span of L2 blocks in derived form (SpanBatchElement)
type SpanBatch struct {
parentCheck [20]byte // First 20 bytes of the first block's parent hash
l1OriginCheck [20]byte // First 20 bytes of the last block's L1 origin hash
batches []*spanBatchElement // List of block input in derived form
ParentCheck [20]byte // First 20 bytes of the first block's parent hash
L1OriginCheck [20]byte // First 20 bytes of the last block's L1 origin hash
Batches []*SpanBatchElement // List of block input in derived form
}
// spanBatchMarshaling is a helper type used for JSON marshaling.
type spanBatchMarshaling struct {
ParentCheck []hexutil.Bytes `json:"parent_check"`
L1OriginCheck []hexutil.Bytes `json:"l1_origin_check"`
Batches []*SpanBatchElement `json:"span_batch_elements"`
}
func (b *SpanBatch) MarshalJSON() ([]byte, error) {
spanBatch := spanBatchMarshaling{
ParentCheck: []hexutil.Bytes{b.ParentCheck[:]},
L1OriginCheck: []hexutil.Bytes{b.L1OriginCheck[:]},
Batches: b.Batches,
}
return json.Marshal(spanBatch)
}
// GetBatchType returns its batch type (batch_version)
......@@ -451,100 +478,100 @@ func (b *SpanBatch) GetBatchType() int {
// GetTimestamp returns timestamp of the first block in the span
func (b *SpanBatch) GetTimestamp() uint64 {
return b.batches[0].Timestamp
return b.Batches[0].Timestamp
}
// LogContext creates a new log context that contains information of the batch
func (b *SpanBatch) LogContext(log log.Logger) log.Logger {
if len(b.batches) == 0 {
if len(b.Batches) == 0 {
return log.New("block_count", 0)
}
return log.New(
"batch_timestamp", b.batches[0].Timestamp,
"parent_check", hexutil.Encode(b.parentCheck[:]),
"origin_check", hexutil.Encode(b.l1OriginCheck[:]),
"batch_timestamp", b.Batches[0].Timestamp,
"parent_check", hexutil.Encode(b.ParentCheck[:]),
"origin_check", hexutil.Encode(b.L1OriginCheck[:]),
"start_epoch_number", b.GetStartEpochNum(),
"end_epoch_number", b.GetBlockEpochNum(len(b.batches)-1),
"block_count", len(b.batches),
"end_epoch_number", b.GetBlockEpochNum(len(b.Batches)-1),
"block_count", len(b.Batches),
)
}
// GetStartEpochNum returns epoch number(L1 origin block number) of the first block in the span
func (b *SpanBatch) GetStartEpochNum() rollup.Epoch {
return b.batches[0].EpochNum
return b.Batches[0].EpochNum
}
// CheckOriginHash checks if the l1OriginCheck matches the first 20 bytes of given hash, probably L1 block hash from the current canonical L1 chain.
func (b *SpanBatch) CheckOriginHash(hash common.Hash) bool {
return bytes.Equal(b.l1OriginCheck[:], hash.Bytes()[:20])
return bytes.Equal(b.L1OriginCheck[:], hash.Bytes()[:20])
}
// CheckParentHash checks if the parentCheck matches the first 20 bytes of given hash, probably the current L2 safe head.
func (b *SpanBatch) CheckParentHash(hash common.Hash) bool {
return bytes.Equal(b.parentCheck[:], hash.Bytes()[:20])
return bytes.Equal(b.ParentCheck[:], hash.Bytes()[:20])
}
// GetBlockEpochNum returns the epoch number(L1 origin block number) of the block at the given index in the span.
func (b *SpanBatch) GetBlockEpochNum(i int) uint64 {
return uint64(b.batches[i].EpochNum)
return uint64(b.Batches[i].EpochNum)
}
// GetBlockTimestamp returns the timestamp of the block at the given index in the span.
func (b *SpanBatch) GetBlockTimestamp(i int) uint64 {
return b.batches[i].Timestamp
return b.Batches[i].Timestamp
}
// GetBlockTransactions returns the encoded transactions of the block at the given index in the span.
func (b *SpanBatch) GetBlockTransactions(i int) []hexutil.Bytes {
return b.batches[i].Transactions
return b.Batches[i].Transactions
}
// GetBlockCount returns the number of blocks in the span
func (b *SpanBatch) GetBlockCount() int {
return len(b.batches)
return len(b.Batches)
}
// AppendSingularBatch appends a SingularBatch into the span batch
// updates l1OriginCheck or parentCheck if needed.
func (b *SpanBatch) AppendSingularBatch(singularBatch *SingularBatch) {
if len(b.batches) == 0 {
copy(b.parentCheck[:], singularBatch.ParentHash.Bytes()[:20])
if len(b.Batches) == 0 {
copy(b.ParentCheck[:], singularBatch.ParentHash.Bytes()[:20])
}
b.batches = append(b.batches, singularBatchToElement(singularBatch))
copy(b.l1OriginCheck[:], singularBatch.EpochHash.Bytes()[:20])
b.Batches = append(b.Batches, singularBatchToElement(singularBatch))
copy(b.L1OriginCheck[:], singularBatch.EpochHash.Bytes()[:20])
}
// ToRawSpanBatch merges SingularBatch List and initialize single RawSpanBatch
func (b *SpanBatch) ToRawSpanBatch(originChangedBit uint, genesisTimestamp uint64, chainID *big.Int) (*RawSpanBatch, error) {
if len(b.batches) == 0 {
if len(b.Batches) == 0 {
return nil, errors.New("cannot merge empty singularBatch list")
}
raw := RawSpanBatch{}
// Sort by timestamp of L2 block
sort.Slice(b.batches, func(i, j int) bool {
return b.batches[i].Timestamp < b.batches[j].Timestamp
sort.Slice(b.Batches, func(i, j int) bool {
return b.Batches[i].Timestamp < b.Batches[j].Timestamp
})
// spanBatchPrefix
span_start := b.batches[0]
span_end := b.batches[len(b.batches)-1]
span_start := b.Batches[0]
span_end := b.Batches[len(b.Batches)-1]
raw.relTimestamp = span_start.Timestamp - genesisTimestamp
raw.l1OriginNum = uint64(span_end.EpochNum)
raw.parentCheck = b.parentCheck
raw.l1OriginCheck = b.l1OriginCheck
raw.parentCheck = b.ParentCheck
raw.l1OriginCheck = b.L1OriginCheck
// spanBatchPayload
raw.blockCount = uint64(len(b.batches))
raw.blockCount = uint64(len(b.Batches))
raw.originBits = new(big.Int)
raw.originBits.SetBit(raw.originBits, 0, originChangedBit)
for i := 1; i < len(b.batches); i++ {
for i := 1; i < len(b.Batches); i++ {
bit := uint(0)
if b.batches[i-1].EpochNum < b.batches[i].EpochNum {
if b.Batches[i-1].EpochNum < b.Batches[i].EpochNum {
bit = 1
}
raw.originBits.SetBit(raw.originBits, i, bit)
}
var blockTxCounts []uint64
var txs [][]byte
for _, batch := range b.batches {
for _, batch := range b.Batches {
blockTxCount := uint64(len(batch.Transactions))
blockTxCounts = append(blockTxCounts, blockTxCount)
for _, rawTx := range batch.Transactions {
......@@ -560,13 +587,13 @@ func (b *SpanBatch) ToRawSpanBatch(originChangedBit uint, genesisTimestamp uint6
return &raw, nil
}
// GetSingularBatches converts spanBatchElements after L2 safe head to SingularBatches.
// Since spanBatchElement does not contain EpochHash, set EpochHash from the given L1 blocks.
// GetSingularBatches converts SpanBatchElements after L2 safe head to SingularBatches.
// Since SpanBatchElement does not contain EpochHash, set EpochHash from the given L1 blocks.
// The result SingularBatches do not contain ParentHash yet. It must be set by BatchQueue.
func (b *SpanBatch) GetSingularBatches(l1Origins []eth.L1BlockRef, l2SafeHead eth.L2BlockRef) ([]*SingularBatch, error) {
var singularBatches []*SingularBatch
originIdx := 0
for _, batch := range b.batches {
for _, batch := range b.Batches {
if batch.Timestamp <= l2SafeHead.Time {
continue
}
......@@ -592,20 +619,30 @@ func (b *SpanBatch) GetSingularBatches(l1Origins []eth.L1BlockRef, l2SafeHead et
return singularBatches, nil
}
// NewSpanBatch converts given singularBatches into spanBatchElements, and creates a new SpanBatch.
// NewSpanBatch converts given singularBatches into SpanBatchElements, and creates a new SpanBatch.
func NewSpanBatch(singularBatches []*SingularBatch) *SpanBatch {
spanBatch := &SpanBatch{}
if len(singularBatches) == 0 {
return spanBatch
}
copy(spanBatch.parentCheck[:], singularBatches[0].ParentHash.Bytes()[:20])
copy(spanBatch.l1OriginCheck[:], singularBatches[len(singularBatches)-1].EpochHash.Bytes()[:20])
copy(spanBatch.ParentCheck[:], singularBatches[0].ParentHash.Bytes()[:20])
copy(spanBatch.L1OriginCheck[:], singularBatches[len(singularBatches)-1].EpochHash.Bytes()[:20])
for _, singularBatch := range singularBatches {
spanBatch.batches = append(spanBatch.batches, singularBatchToElement(singularBatch))
spanBatch.Batches = append(spanBatch.Batches, singularBatchToElement(singularBatch))
}
return spanBatch
}
// DeriveSpanBatch derives SpanBatch from BatchData.
func DeriveSpanBatch(batchData *BatchData, blockTime, genesisTimestamp uint64, chainID *big.Int) (*SpanBatch, error) {
rawSpanBatch, ok := batchData.inner.(*RawSpanBatch)
if !ok {
return nil, NewCriticalError(errors.New("failed type assertion to SpanBatch"))
}
// If the batch type is Span batch, derive block inputs from RawSpanBatch.
return rawSpanBatch.ToSpanBatch(blockTime, genesisTimestamp, chainID)
}
// SpanBatchBuilder is a utility type to build a SpanBatch by adding a SingularBatch one by one.
// makes easier to stack SingularBatches and convert to RawSpanBatch for encoding.
type SpanBatchBuilder struct {
......@@ -642,7 +679,7 @@ func (b *SpanBatchBuilder) GetRawSpanBatch() (*RawSpanBatch, error) {
}
func (b *SpanBatchBuilder) GetBlockCount() int {
return len(b.spanBatch.batches)
return len(b.spanBatch.Batches)
}
func (b *SpanBatchBuilder) Reset() {
......
......@@ -331,18 +331,18 @@ func TestSpanBatchDerive(t *testing.T) {
require.NoError(t, err)
blockCount := len(singularBatches)
require.Equal(t, safeL2Head.Hash.Bytes()[:20], spanBatchDerived.parentCheck[:])
require.Equal(t, singularBatches[blockCount-1].Epoch().Hash.Bytes()[:20], spanBatchDerived.l1OriginCheck[:])
require.Equal(t, safeL2Head.Hash.Bytes()[:20], spanBatchDerived.ParentCheck[:])
require.Equal(t, singularBatches[blockCount-1].Epoch().Hash.Bytes()[:20], spanBatchDerived.L1OriginCheck[:])
require.Equal(t, len(singularBatches), int(rawSpanBatch.blockCount))
for i := 1; i < len(singularBatches); i++ {
require.Equal(t, spanBatchDerived.batches[i].Timestamp, spanBatchDerived.batches[i-1].Timestamp+l2BlockTime)
require.Equal(t, spanBatchDerived.Batches[i].Timestamp, spanBatchDerived.Batches[i-1].Timestamp+l2BlockTime)
}
for i := 0; i < len(singularBatches); i++ {
require.Equal(t, singularBatches[i].EpochNum, spanBatchDerived.batches[i].EpochNum)
require.Equal(t, singularBatches[i].Timestamp, spanBatchDerived.batches[i].Timestamp)
require.Equal(t, singularBatches[i].Transactions, spanBatchDerived.batches[i].Transactions)
require.Equal(t, singularBatches[i].EpochNum, spanBatchDerived.Batches[i].EpochNum)
require.Equal(t, singularBatches[i].Timestamp, spanBatchDerived.Batches[i].Timestamp)
require.Equal(t, singularBatches[i].Transactions, spanBatchDerived.Batches[i].Transactions)
}
}
}
......@@ -511,8 +511,8 @@ func TestSpanBatchBuilder(t *testing.T) {
for i := 0; i < len(singularBatches); i++ {
spanBatchBuilder.AppendSingularBatch(singularBatches[i], seqNum)
require.Equal(t, i+1, spanBatchBuilder.GetBlockCount())
require.Equal(t, singularBatches[0].ParentHash.Bytes()[:20], spanBatchBuilder.spanBatch.parentCheck[:])
require.Equal(t, singularBatches[i].EpochHash.Bytes()[:20], spanBatchBuilder.spanBatch.l1OriginCheck[:])
require.Equal(t, singularBatches[0].ParentHash.Bytes()[:20], spanBatchBuilder.spanBatch.ParentCheck[:])
require.Equal(t, singularBatches[i].EpochHash.Bytes()[:20], spanBatchBuilder.spanBatch.L1OriginCheck[:])
}
rawSpanBatch, err := spanBatchBuilder.GetRawSpanBatch()
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment