Commit a313e8b3 authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge branch 'develop' into aj/faster-dockerfile

parents fa2a69a5 2626dbef
......@@ -1142,7 +1142,7 @@ workflows:
working_directory: proxyd
- indexer-tests
- go-lint-test-build:
name: op-heartbeat tests
name: op-heartbeat-tests
binary_name: op-heartbeat
working_directory: op-heartbeat
- semgrep-scan
......@@ -1224,7 +1224,11 @@ workflows:
target: test-external-geth
- bedrock-go-tests:
requires:
- go-mod-tidy
- cannon-build-test-vectors
- cannon-go-lint-and-test
- check-generated-mocks-op-node
- check-generated-mocks-op-service
- op-batcher-lint
- op-bootnode-lint
- op-bindings-lint
......@@ -1238,6 +1242,7 @@ workflows:
- op-batcher-tests
- op-bindings-tests
- op-chain-ops-tests
- op-heartbeat-tests
- op-node-tests
- op-proposer-tests
- op-challenger-tests
......@@ -1245,6 +1250,7 @@ workflows:
- op-service-tests
- op-e2e-WS-tests
- op-e2e-HTTP-tests
- op-e2e-ext-geth-tests
- docker-build:
name: op-node-docker-build
docker_file: op-node/Dockerfile
......
comment:
layout: "reach, diff, flags, files"
behavior: default
require_changes: true # only post the comment if coverage changes
comment: false
ignore:
- "op-e2e"
- "**/*.t.sol"
......
package op_e2e
import (
"context"
"math/big"
"testing"
"time"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/stretchr/testify/require"
)
func TestMintOnRevertedDeposit(t *testing.T) {
InitParallel(t)
cfg := DefaultSystemConfig(t)
sys, err := cfg.Start(t)
require.Nil(t, err, "Error starting up system")
defer sys.Close()
l1Client := sys.Clients["l1"]
l2Verif := sys.Clients["verifier"]
// create signer
aliceKey := cfg.Secrets.Alice
opts, err := bind.NewKeyedTransactorWithChainID(aliceKey, cfg.L1ChainIDBig())
require.Nil(t, err)
fromAddr := opts.From
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
startBalance, err := l2Verif.BalanceAt(ctx, fromAddr, nil)
cancel()
require.Nil(t, err)
ctx, cancel = context.WithTimeout(context.Background(), 1*time.Second)
startNonce, err := l2Verif.NonceAt(ctx, fromAddr, nil)
require.NoError(t, err)
cancel()
toAddr := common.Address{0xff, 0xff}
mintAmount := big.NewInt(9_000_000)
opts.Value = mintAmount
SendDepositTx(t, cfg, l1Client, l2Verif, opts, func(l2Opts *DepositTxOpts) {
l2Opts.ToAddr = toAddr
// trigger a revert by transferring more than we have available
l2Opts.Value = new(big.Int).Mul(common.Big2, startBalance)
l2Opts.ExpectedStatus = types.ReceiptStatusFailed
})
// Confirm balance
ctx, cancel = context.WithTimeout(context.Background(), 1*time.Second)
endBalance, err := l2Verif.BalanceAt(ctx, fromAddr, nil)
cancel()
require.Nil(t, err)
ctx, cancel = context.WithTimeout(context.Background(), 1*time.Second)
toAddrBalance, err := l2Verif.BalanceAt(ctx, toAddr, nil)
require.NoError(t, err)
cancel()
diff := new(big.Int)
diff = diff.Sub(endBalance, startBalance)
require.Equal(t, mintAmount, diff, "Did not get expected balance change")
require.Equal(t, common.Big0.Int64(), toAddrBalance.Int64(), "The recipient account balance should be zero")
ctx, cancel = context.WithTimeout(context.Background(), 1*time.Second)
endNonce, err := l2Verif.NonceAt(ctx, fromAddr, nil)
require.NoError(t, err)
cancel()
require.Equal(t, startNonce+1, endNonce, "Nonce of deposit sender should increment on L2, even if the deposit fails")
}
func TestDepositTxCreateContract(t *testing.T) {
InitParallel(t)
cfg := DefaultSystemConfig(t)
delete(cfg.Nodes, "verifier")
sys, err := cfg.Start(t)
require.Nil(t, err, "Error starting up system")
defer sys.Close()
l1Client := sys.Clients["l1"]
l2Client := sys.Clients["sequencer"]
opts, err := bind.NewKeyedTransactorWithChainID(cfg.Secrets.Alice, cfg.L1ChainIDBig())
require.Nil(t, err)
// Simple constructor that is prefixed to the actual contract code
// Results in the contract code being returned as the code for the new contract
deployPrefixSize := byte(16)
deployPrefix := []byte{
// Copy input data after this prefix into memory starting at address 0x00
// CODECOPY arg size
byte(vm.PUSH1), deployPrefixSize,
byte(vm.CODESIZE),
byte(vm.SUB),
// CODECOPY arg offset
byte(vm.PUSH1), deployPrefixSize,
// CODECOPY arg destOffset
byte(vm.PUSH1), 0x00,
byte(vm.CODECOPY),
// Return code from memory
// RETURN arg size
byte(vm.PUSH1), deployPrefixSize,
byte(vm.CODESIZE),
byte(vm.SUB),
// RETURN arg offset
byte(vm.PUSH1), 0x00,
byte(vm.RETURN),
}
// Stores the first word from call data code to storage slot 0
sstoreContract := []byte{
// Load first word from call data
byte(vm.PUSH1), 0x00,
byte(vm.CALLDATALOAD),
// Store it to slot 0
byte(vm.PUSH1), 0x00,
byte(vm.SSTORE),
}
deployData := append(deployPrefix, sstoreContract...)
l2Receipt := SendDepositTx(t, cfg, l1Client, l2Client, opts, func(l2Opts *DepositTxOpts) {
l2Opts.Data = deployData
l2Opts.Value = common.Big0
l2Opts.IsCreation = true
l2Opts.ToAddr = common.Address{}
l2Opts.GasLimit = 1_000_000
})
require.NotEqual(t, common.Address{}, l2Receipt.ContractAddress, "should not have zero address")
code, err := l2Client.CodeAt(context.Background(), l2Receipt.ContractAddress, nil)
require.NoError(t, err, "get deployed contract code")
require.Equal(t, sstoreContract, code, "should have deployed correct contract code")
}
package geth
import (
"context"
"testing"
"time"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/eth/ethconfig"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p"
"github.com/stretchr/testify/require"
)
// ConnectP2P creates a p2p peer connection between node1 and node2.
func ConnectP2P(t *testing.T, node1 *ethclient.Client, node2 *ethclient.Client) {
var targetInfo p2p.NodeInfo
require.NoError(t, node2.Client().Call(&targetInfo, "admin_nodeInfo"), "get node info")
var peerAdded bool
require.NoError(t, node1.Client().Call(&peerAdded, "admin_addPeer", targetInfo.Enode), "add peer")
require.True(t, peerAdded, "should have added peer successfully")
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
err := wait.For(ctx, time.Second, func() (bool, error) {
var peerCount hexutil.Uint64
if err := node1.Client().Call(&peerCount, "net_peerCount"); err != nil {
return false, err
}
t.Logf("Peer count %v", uint64(peerCount))
return peerCount >= hexutil.Uint64(1), nil
})
require.NoError(t, err, "wait for a peer to be connected")
}
func WithP2P() func(ethCfg *ethconfig.Config, nodeCfg *node.Config) error {
return func(ethCfg *ethconfig.Config, nodeCfg *node.Config) error {
ethCfg.RollupDisableTxPoolGossip = false
nodeCfg.P2P = p2p.Config{
NoDiscovery: true,
ListenAddr: "127.0.0.1:0",
MaxPeers: 10,
}
return nil
}
}
package op_e2e
import (
"testing"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/geth"
"github.com/ethereum/go-ethereum/common"
"github.com/stretchr/testify/require"
)
func TestTxGossip(t *testing.T) {
InitParallel(t)
cfg := DefaultSystemConfig(t)
gethOpts := []GethOption{
geth.WithP2P(),
}
cfg.GethOptions["sequencer"] = gethOpts
cfg.GethOptions["verifier"] = gethOpts
sys, err := cfg.Start(t)
require.NoError(t, err, "Start system")
seqClient := sys.Clients["sequencer"]
verifClient := sys.Clients["verifier"]
geth.ConnectP2P(t, seqClient, verifClient)
// Send a transaction to the verifier and it should be gossiped to the sequencer and included in a block.
SendL2Tx(t, cfg, verifClient, cfg.Secrets.Alice, func(opts *TxOpts) {
opts.ToAddr = &common.Address{0xaa}
opts.Value = common.Big1
opts.VerifyOnClients(seqClient, verifClient)
})
}
......@@ -84,7 +84,7 @@ func DefaultSystemConfig(t *testing.T) SystemConfig {
require.NoError(t, err)
deployConfig := config.DeployConfig.Copy()
deployConfig.L1GenesisBlockTimestamp = hexutil.Uint64(time.Now().Unix())
require.NoError(t, deployConfig.Check())
require.NoError(t, deployConfig.Check(), "Deploy config is invalid, do you need to run make devnet-allocs?")
l1Deployments := config.L1Deployments.Copy()
require.NoError(t, l1Deployments.Check())
......@@ -457,7 +457,7 @@ func (cfg SystemConfig) Start(t *testing.T, _opts ...SystemConfigOption) (*Syste
ethClient = gethInst
} else {
if len(cfg.GethOptions[name]) > 0 {
t.Errorf("External L2 nodes do not support configuration through GethOptions")
t.Skip("External L2 nodes do not support configuration through GethOptions")
}
ethClient = (&ExternalRunner{
Name: name,
......
......@@ -11,7 +11,6 @@ import (
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/keystore"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
......@@ -337,67 +336,6 @@ func TestFinalize(t *testing.T) {
require.NotZerof(t, l2Finalized.NumberU64(), "must have finalized L2 block")
}
func TestMintOnRevertedDeposit(t *testing.T) {
InitParallel(t)
cfg := DefaultSystemConfig(t)
sys, err := cfg.Start(t)
require.Nil(t, err, "Error starting up system")
defer sys.Close()
l1Client := sys.Clients["l1"]
l2Verif := sys.Clients["verifier"]
l1Node := sys.EthInstances["l1"].(*GethInstance).Node
// create signer
ks := l1Node.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore)
opts, err := bind.NewKeyStoreTransactorWithChainID(ks, ks.Accounts()[0], cfg.L1ChainIDBig())
require.Nil(t, err)
fromAddr := opts.From
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
startBalance, err := l2Verif.BalanceAt(ctx, fromAddr, nil)
cancel()
require.Nil(t, err)
ctx, cancel = context.WithTimeout(context.Background(), 1*time.Second)
startNonce, err := l2Verif.NonceAt(ctx, fromAddr, nil)
require.NoError(t, err)
cancel()
toAddr := common.Address{0xff, 0xff}
mintAmount := big.NewInt(9_000_000)
opts.Value = mintAmount
SendDepositTx(t, cfg, l1Client, l2Verif, opts, func(l2Opts *DepositTxOpts) {
l2Opts.ToAddr = toAddr
// trigger a revert by transferring more than we have available
l2Opts.Value = new(big.Int).Mul(common.Big2, startBalance)
l2Opts.ExpectedStatus = types.ReceiptStatusFailed
})
// Confirm balance
ctx, cancel = context.WithTimeout(context.Background(), 1*time.Second)
endBalance, err := l2Verif.BalanceAt(ctx, fromAddr, nil)
cancel()
require.Nil(t, err)
ctx, cancel = context.WithTimeout(context.Background(), 1*time.Second)
toAddrBalance, err := l2Verif.BalanceAt(ctx, toAddr, nil)
require.NoError(t, err)
cancel()
diff := new(big.Int)
diff = diff.Sub(endBalance, startBalance)
require.Equal(t, mintAmount, diff, "Did not get expected balance change")
require.Equal(t, common.Big0.Int64(), toAddrBalance.Int64(), "The recipient account balance should be zero")
ctx, cancel = context.WithTimeout(context.Background(), 1*time.Second)
endNonce, err := l2Verif.NonceAt(ctx, fromAddr, nil)
require.NoError(t, err)
cancel()
require.Equal(t, startNonce+1, endNonce, "Nonce of deposit sender should increment on L2, even if the deposit fails")
}
func TestMissingBatchE2E(t *testing.T) {
InitParallel(t)
// Note this test zeroes the balance of the batch-submitter to make the batches unable to go into L1.
......
......@@ -21,7 +21,8 @@ import (
// The L1 transaction, including sender, is configured by the l1Opts param.
// The L2 transaction options can be configured by modifying the DepositTxOps value supplied to applyL2Opts
// Will verify that the transaction is included with the expected status on L1 and L2
func SendDepositTx(t *testing.T, cfg SystemConfig, l1Client *ethclient.Client, l2Client *ethclient.Client, l1Opts *bind.TransactOpts, applyL2Opts DepositTxOptsFn) {
// Returns the receipt of the L2 transaction
func SendDepositTx(t *testing.T, cfg SystemConfig, l1Client *ethclient.Client, l2Client *ethclient.Client, l1Opts *bind.TransactOpts, applyL2Opts DepositTxOptsFn) *types.Receipt {
l2Opts := defaultDepositTxOpts(l1Opts)
applyL2Opts(l2Opts)
......@@ -38,16 +39,17 @@ func SendDepositTx(t *testing.T, cfg SystemConfig, l1Client *ethclient.Client, l
require.Nil(t, err, "with deposit tx")
// Wait for transaction on L1
receipt, err := waitForTransaction(tx.Hash(), l1Client, 10*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
l1Receipt, err := waitForTransaction(tx.Hash(), l1Client, 10*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
require.Nil(t, err, "Waiting for deposit tx on L1")
// Wait for transaction to be included on L2
reconstructedDep, err := derive.UnmarshalDepositLogEvent(receipt.Logs[0])
reconstructedDep, err := derive.UnmarshalDepositLogEvent(l1Receipt.Logs[0])
require.NoError(t, err, "Could not reconstruct L2 Deposit")
tx = types.NewTx(reconstructedDep)
receipt, err = waitForTransaction(tx.Hash(), l2Client, 10*time.Duration(cfg.DeployConfig.L2BlockTime)*time.Second)
l2Receipt, err := waitForTransaction(tx.Hash(), l2Client, 10*time.Duration(cfg.DeployConfig.L2BlockTime)*time.Second)
require.NoError(t, err)
require.Equal(t, l2Opts.ExpectedStatus, receipt.Status, "l2 transaction status")
require.Equal(t, l2Opts.ExpectedStatus, l2Receipt.Status, "l2 transaction status")
return l2Receipt
}
type DepositTxOptsFn func(l2Opts *DepositTxOpts)
......@@ -91,16 +93,16 @@ func SendL2Tx(t *testing.T, cfg SystemConfig, l2Client *ethclient.Client, privKe
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
err := l2Client.SendTransaction(ctx, tx)
require.Nil(t, err, "Sending L2 tx")
require.NoError(t, err, "Sending L2 tx")
receipt, err := waitForTransaction(tx.Hash(), l2Client, 10*time.Duration(cfg.DeployConfig.L2BlockTime)*time.Second)
require.Nil(t, err, "Waiting for L2 tx")
require.NoError(t, err, "Waiting for L2 tx")
require.Equal(t, opts.ExpectedStatus, receipt.Status, "TX should have expected status")
for i, client := range opts.VerifyClients {
t.Logf("Waiting for tx %v on verification client %d", tx.Hash(), i)
receiptVerif, err := waitForTransaction(tx.Hash(), client, 10*time.Duration(cfg.DeployConfig.L2BlockTime)*time.Second)
require.Nilf(t, err, "Waiting for L2 tx on verification client %d", i)
require.NoErrorf(t, err, "Waiting for L2 tx on verification client %d", i)
require.Equalf(t, receipt, receiptVerif, "Receipts should be the same on sequencer and verification client %d", i)
}
return receipt
......
......@@ -37,7 +37,7 @@ func (d *DiskKV) pathKey(k common.Hash) string {
func (d *DiskKV) Put(k common.Hash, v []byte) error {
d.Lock()
defer d.Unlock()
f, err := os.CreateTemp(d.path, k.String()+".txt.*")
f, err := openTempFile(d.path, k.String()+".txt.*")
if err != nil {
return fmt.Errorf("failed to open temp file for pre-image %s: %w", k, err)
}
......@@ -57,6 +57,21 @@ func (d *DiskKV) Put(k common.Hash, v []byte) error {
return nil
}
func openTempFile(dir string, nameTemplate string) (*os.File, error) {
f, err := os.CreateTemp(dir, nameTemplate)
// Directory has been deleted out from underneath us. Recreate it.
if errors.Is(err, os.ErrNotExist) {
if mkdirErr := os.MkdirAll(dir, 0777); mkdirErr != nil {
return nil, errors.Join(fmt.Errorf("failed to create directory %v: %w", dir, mkdirErr), err)
}
f, err = os.CreateTemp(dir, nameTemplate)
}
if err != nil {
return nil, err
}
return f, nil
}
func (d *DiskKV) Get(k common.Hash) ([]byte, error) {
d.RLock()
defer d.RUnlock()
......
package kvstore
import "testing"
import (
"path/filepath"
"testing"
"github.com/ethereum/go-ethereum/crypto"
"github.com/stretchr/testify/require"
)
func TestDiskKV(t *testing.T) {
tmp := t.TempDir() // automatically removed by testing cleanup
kv := NewDiskKV(tmp)
kvTest(t, kv)
}
func TestCreateMissingDirectory(t *testing.T) {
tmp := t.TempDir()
dir := filepath.Join(tmp, "data")
kv := NewDiskKV(dir)
val := []byte{1, 2, 3, 4}
key := crypto.Keccak256Hash(val)
require.NoError(t, kv.Put(key, val))
}
......@@ -9,6 +9,16 @@
- [Introduction](#introduction)
- [Span batch format](#span-batch-format)
- [Optimization Strategies](#optimization-strategies)
- [Truncating information and storing only necessary data](#truncating-information-and-storing-only-necessary-data)
- [`tx_data_headers` removal from initial specs](#tx_data_headers-removal-from-initial-specs)
- [`Chain ID` removal from initial specs](#chain-id-removal-from-initial-specs)
- [Reorganization of constant length transaction fields](#reorganization-of-constant-length-transaction-fields)
- [RLP encoding for only variable length fields](#rlp-encoding-for-only-variable-length-fields)
- [Store `y_parity` instead of `v`](#store-y_parity-instead-of-v)
- [Adjust `txs` Data Layout for Better Compression](#adjust-txs-data-layout-for-better-compression)
- [`fee_recipients` Encoding Scheme](#fee_recipients-encoding-scheme)
- [How derivation works with Span Batch?](#how-derivation-works-with-span-batch)
- [Integration](#integration)
- [Channel Reader (Batch Decoding)](#channel-reader-batch-decoding)
- [Batch Queue](#batch-queue)
......@@ -53,7 +63,7 @@ Span-batches address these inefficiencies, with a new batch format version.
## Span batch format
Note that span-batches, unlike previous V0 batches,
Note that span-batches, unlike previous singular batches,
encode *a range of consecutive* L2 blocks at the same time.
Introduce version `1` to the [batch-format](./derivation.md#batch-format) table:
......@@ -63,36 +73,69 @@ Introduce version `1` to the [batch-format](./derivation.md#batch-format) table:
| 1 | `prefix ++ payload` |
Notation:
`++`: concatenation of byte-strings.
`anchor`: first L2 block in the span
`uvarint`: unsigned Base128 varint, as defined in [protobuf spec]
- `++`: concatenation of byte-strings
- `span_start`: first L2 block in the span
- `span_end`: last L2 block in the span
- `uvarint`: unsigned Base128 varint, as defined in [protobuf spec]
- `rlp_encode`: a function that encodes a batch according to the [RLP format],
and `[x, y, z]` denotes a list containing items `x`, `y` and `z`
[protobuf spec]: https://protobuf.dev/programming-guides/encoding/#varints
Where:
- `prefix = rel_timestamp ++ parent_check ++ l1_origin_check`
- `rel_timestamp`: relative time since genesis, i.e. `anchor.timestamp - config.genesis.timestamp`.
- `parent_check`: first 20 bytes of parent hash, i.e. `anchor.parent_hash[:20]`.
- `l1_origin_check`: to ensure the intended L1 origins of this span of
L2 blocks are consistent with the L1 chain, the blockhash of the last L1 origin is referenced.
The hash is truncated to 20 bytes for efficiency, i.e. `anchor.l1_origin.hash[:20]`.
- `payload = block_count ++ block_tx_counts ++ tx_data_headers ++ tx_data ++ tx_sigs`:
- `prefix = rel_timestamp ++ l1_origin_num ++ parent_check ++ l1_origin_check`
- `rel_timestamp`: `uvarint` relative timestamp since L2 genesis,
i.e. `span_start.timestamp - config.genesis.timestamp`.
- `l1_origin_num`: `uvarint` number of last l1 origin number. i.e. `span_end.l1_origin.number`
- `parent_check`: first 20 bytes of parent hash, the hash is truncated to 20 bytes for efficiency,
i.e. `span_start.parent_hash[:20]`.
- `l1_origin_check`: the block hash of the last L1 origin is referenced.
The hash is truncated to 20 bytes for efficiency, i.e. `span_end.l1_origin.hash[:20]`.
- `payload = block_count ++ origin_bits ++ block_tx_counts ++ txs`:
- `block_count`: `uvarint` number of L2 blocks.
- `origin_bits`: bitlist of `block_count` bits, right-padded to a multiple of 8 bits:
1 bit per L2 block, indicating if the L1 origin changed this L2 block.
- `block_tx_counts`: for each block, a `uvarint` of `len(block.transactions)`.
- `tx_data_headers`: lengths of each `tx_data` entry, encodes as concatenated `uvarint` entries, (empty if there are
no entries).
- `tx_data`: [EIP-2718] encoded transactions.
- The `tx_signature` is truncated from each [EIP-2718] encoded tx. To be reconstructed from `tx_sigs`.
- `legacy`: starting at `v` RLP field
- `1` ([EIP-2930]): starting at `signatureYParity` RLP field
- `2` ([EIP-1559]): starting at `signature_y_parity` RLP field
- `tx_sigs`: concatenated list of transaction signatures:
- `v`, or `y_parity`, is encoded as `uvarint` (some legacy transactions combine the chain ID)
- `txs`: L2 transactions which is reorganized and encoded as below.
- `txs = contract_creation_bits ++ y_parity_bits ++ tx_sigs ++ tx_tos ++ tx_datas ++ tx_nonces ++ tx_gases`
- `contract_creation_bits`: bit list of `sum(block_tx_counts)` bits, right-padded to a multiple of 8 bits,
1 bit per L2 transactions, indicating if transaction is a contract creation transaction.
- `y_parity_bits`: bit list of `sum(block_tx_counts)` bits, right-padded to a multiple of 8 bits,
1 bit per L2 transactions, indicating the y parity value when recovering transaction sender address.
- `tx_sigs`: concatenated list of transaction signatures
- `r` is encoded as big-endian `uint256`
- `s` is encoded as big-endian `uint256`
- `tx_tos`: concatenated list of `to` field. `to` field in contract creation transaction will be `nil` and ignored.
- `tx_datas`: concatenated list of variable length rlp encoded data,
matching the encoding of the fields as in the [EIP-2718] format of the `TransactionType`.
- `legacy`: `rlp_encode(value, gasPrice, data)`
- `1`: ([EIP-2930]): `0x01 ++ rlp_encode(value, gasPrice, data, accessList)`
- `2`: ([EIP-1559]): `0x02 ++ rlp_encode(value, max_priority_fee_per_gas, max_fee_per_gas, data, access_list)`
- `tx_nonces`: concatenated list of `uvarint` of `nonce` field.
- `tx_gases`: concatenated list of `uvarint` of gas limits.
- `legacy`: `gasLimit`
- `1`: ([EIP-2930]): `gasLimit`
- `2`: ([EIP-1559]): `gas_limit`
Introduce version `2` to the [batch-format](./derivation.md#batch-format) table:
| `batch_version` | `content` |
|-----------------|---------------------|
| 2 | `prefix ++ payload` |
Where:
- `prefix = rel_timestamp ++ l1_origin_num ++ parent_check ++ l1_origin_check`:
- Identical to `batch_version` 1
- `payload = block_count ++ origin_bits ++ block_tx_counts ++ txs ++ fee_recipients`:
- Every field definition identical to `batch_version` 1 except that `fee_recipients` is
added to support more decentralized sequencing.
- `fee_recipients = fee_recipients_idxs + fee_recipients_set`
- `fee_recipients_set`: concatenated list of unique L2 fee recipient address.
- `fee_recipients_idxs`: for each block,
`uvarint` number of index to decode fee recipients from `fee_recipients_set`.
[EIP-2718]: https://eips.ethereum.org/EIPS/eip-2718
......@@ -100,13 +143,91 @@ Where:
[EIP-1559]: https://eips.ethereum.org/EIPS/eip-1559
> **TODO research/experimentation questions:**
>
> - `tx_data` entries may be split up completely and tx attributes could be grouped into individual arrays, similar to
signatures.
> This may add more complexity, but organize data for improved compression.
> - Backtesting: using this format, how are different types of chain history affected? Improved or not? And by what
margin?
## Optimization Strategies
### Truncating information and storing only necessary data
The following fields stores truncated data:
- `rel_timestamp`: We can save two bytes by storing `rel_timestamp` instead of the full `span_start.timestamp`.
- `parent_check` and `l1_origin_check`: We can save twelve bytes by truncating twelve bytes from the full hash,
while having enough safety.
### `tx_data_headers` removal from initial specs
We do not need to store length per each `tx_datas` elements even if those are variable length,
because the elements itself is RLP encoded, containing their length in RLP prefix.
### `Chain ID` removal from initial specs
Every transaction has chain id. We do not need to include chain id in span batch because L2 already knows its chain id,
and use its own value for processing span batches while derivation.
### Reorganization of constant length transaction fields
`signature`, `nonce`, `gaslimit`, `to` field are constant size, so these were split up completely and
are grouped into individual arrays.
This adds more complexity, but organizes data for improved compression by grouping data with similar data pattern.
### RLP encoding for only variable length fields
Further size optimization can be done by packing variable length fields, such as `access_list`.
However, doing this will introduce much more code complexity, comparing to benefiting by size reduction.
Our goal is to find the sweet spot on code complexity - span batch size tradeoff.
I decided that using RLP for all variable length fields will be the best option,
not risking codebase with gnarly custom encoding/decoding implementations.
### Store `y_parity` instead of `v`
For legacy type transactions, `v = 2 * ChainID + y_parity`. For other types of transactions, `v = y_parity`.
We may only store `y_parity`, which is single bit per L2 transaction.
This optimization will benefit more when ratio between number of legacy type transactions over number of transactions
excluding deposit tx is higher.
Deposit transactions are excluded in batches and are never written at L1 so excluded while analyzing.
### Adjust `txs` Data Layout for Better Compression
There are (7 choose 2) * 5! = 2520 permutations of ordering fields of `txs`.
It is not 7! because `contract_creation_bits` must be first decoded in order to decode `tx_tos`.
We experimented to find out the best layout for compression.
It turned out placing random data together(`TxSigs`, `TxTos`, `TxDatas`),
then placing leftovers helped gzip to gain more size reduction.
### `fee_recipients` Encoding Scheme
Let `K` := number of unique fee recipients(cardinality) per span batch. Let `N` := number of L2 blocks.
If we naively encode each fee recipients by concatenating every fee recipients, it will need `20 * N` bytes.
If we manage `fee_recipients_idxs` and `fee_recipients_set`, It will need at most `max uvarint size * N = 8 * N`,
`20 * K` bytes each. If `20 * N > 8 * N + 20 * K` then maintaining an index of fee recipients is reduces the size.
we thought sequencer rotation happens not much often, so assumed that `K` will be much lesser than `N`.
The assumption makes upper inequality to hold. Therefore, we decided to manage `fee_recipients_idxs` and
`fee_recipients_set` separately. This adds complexity but reduces data.
## How derivation works with Span Batch?
- Block Timestamp
- The first L2 block's block timestamp is `rel_timestamp + L2Genesis.Timestamp`.
- Then we can derive other blocks timestamp by adding L2 block time for each.
- L1 Origin Number
- The parent of the first L2 block's L1 origin number is `l1_origin_num - sum(origin_bits)`
- Then we can derive other blocks' L1 origin number with `origin_bits`
- `ith block's L1 origin number = (i-1)th block's L1 origin number + (origin_bits[i] ? 1 : 0)`
- L1 Origin Hash
- We only need the `l1_origin_check`, the truncated L1 origin hash of the last L2 block of Span Batch.
- If the last block references canonical L1 chain as its origin,
we can ensure the all other blocks' origins are consistent with the canonical L1 chain.
- Parent hash
- In V0 Batch spec, we need batch's parent hash to validate if batch's parent is consistent with current L2 safe head.
- But in the case of Span Batch, because it contains consecutive L2 blocks in the span,
we do not need to validate all blocks' parent hash except the first block.
- Transactions
- Deposit transactions can be derived from its L1 origin, identical with V0 batch.
- User transactions can be derived by following way:
- Recover `V` value of TX signature from `y_parity_bits` and L2 chainId, as described in optimization strategies.
- When parsing `tx_tos`, `contract_creation_bits` is used to determine if the TX has `to` value or not.
## Integration
......@@ -114,11 +235,7 @@ Where:
The Channel Reader decodes the span-batch, as described in the [span-batch format](#span-batch-format).
A set of derived attributes is computed, cached with the decoded result:
- `l2_blocks_count`: number of L2 blocks in the span-batch
- `start_timestamp`: `config.genesis.timestamp + batch.rel_timestamp`
- `epoch_end`:
A set of derived attributes is computed as described above. Then cached with the decoded result:
### Batch Queue
......@@ -144,40 +261,46 @@ Span-batch rules, in validation order:
which makes the later L2 blocks invalid.
- Variables:
- `origin_changed_bit = origin_bits[0]`: `true` if the first L2 block changed its L1 origin, `false` otherwise.
- `start_epoch_num = safe_l2_head.origin.block_number + (origin_changed_bit ? 1 : 0)`
- `end_epoch_num = safe_l2_head.origin.block_number + sum(origin_bits)`: block number of last referenced L1 origin
- `start_epoch_num = batch.l1_origin_num - sum(origin_bits) + (origin_changed_bit ? 1 : 0)`
- `end_epoch_num = batch.l1_origin_num`
- Rules:
- `start_epoch_num + sequence_window_size < inclusion_block_number` -> `drop`:
i.e. the batch must be included timely.
- `end_epoch_num < epoch.number` -> `future`: i.e. all referenced L1 epochs must be there.
- `end_epoch_num == epoch.number`:
- If `batch.l1_origin_check != epoch.hash[:20]` -> `drop`: verify the batch is intended for this L1 chain.
- `end_epoch_num > epoch.number` -> `drop`: must have been duplicate batch,
we may be past this L1 block in the safe L2 chain.
- `start_epoch_num > epoch.number + 1` -> `drop`:
i.e. the L1 origin cannot change by more than one L1 block per L2 block.
- If `batch.l1_origin_check` does not match the canonical L1 chain at `end_epoch_num` -> `drop`:
verify the batch is intended for this L1 chain.
- After upper `l1_origin_check` check is passed, we don't need to check if the origin
is past `inclusion_block_number` because of the following invariant.
- Invariant: the epoch-num in the batch is always less than the inclusion block number,
if and only if the L1 epoch hash is correct.
- `start_epoch_num < epoch.number` -> `drop`: must have been duplicate batch,
we may be past this L1 block in the safe L2 chain. If a span-batch overlaps with older information,
it is dropped, since partially valid span-batches are not accepted.
- Max Sequencer time-drift checks:
- Note: The max time-drift is enforced for the *batch as a whole*, to keep the possible output variants small.
- Variables:
- `block_input`: an L2 block from the span-batch,
with L1 origin as derived from the `origin_bits` and now established canonical L1 chain.
- `next_epoch` is relative to the `block_input`,
and may reach to the next origin outside of the L1 origins of the span.
- `next_epoch`: `block_input.origin`'s next L1 block.
It may reach to the next origin outside the L1 origins of the span.
- Rules:
- For each `block_input` that can be read from the span-batch:
- `block_input.timestamp < block_input.origin.time` -> `drop`: enforce the min L2 timestamp rule.
- `block_input.timestamp > block_input.origin.time + max_sequencer_drift`: enforce the L2 timestamp drift rule,
but with exceptions to preserve above min L2 timestamp invariant:
- `len(block_input.transactions) == 0`:
- `epoch.number == batch.epoch_num`:
this implies the batch does not already advance the L1 origin,
- `origin_bits[i] == 0`: `i` is the index of `block_input` in the span batch.
So this implies the block_input did not advance the L1 origin,
and must thus be checked against `next_epoch`.
- If `next_epoch` is not known -> `undecided`:
without the next L1 origin we cannot yet determine if time invariant could have been kept.
- If `batch.timestamp >= next_epoch.time` -> `drop`:
- If `block_input.timestamp >= next_epoch.time` -> `drop`:
the batch could have adopted the next L1 origin without breaking the `L2 time >= L1 time` invariant.
- `len(batch.transactions) > 0`: -> `drop`:
- `len(block_input.transactions) > 0`: -> `drop`:
when exceeding the sequencer time drift, never allow the sequencer to include transactions.
- And for all transactions:
- `drop` if the `batch.transactions` list contains a transaction
- `drop` if the `batch.tx_datas` list contains a transaction
that is invalid or derived by other means exclusively:
- any transaction that is empty (zero length `tx_data`)
- any [deposited transactions][g-deposit-tx-type] (identified by the transaction type prefix byte in `tx_data`)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment