Commit a9ee3f7e authored by Matthew Slipper's avatar Matthew Slipper Committed by GitHub

Merge branch 'develop' into sc/ctb-migration-dictator

parents ff860ecf f9d42b88
---
'@eth-optimism/core-utils': minor
---
Changes the type for Bedrock withdrawal proofs
This diff is collapsed.
This diff is collapsed.
package op_batcher
import (
"context"
"github.com/ethereum-optimism/optimism/op-proposer/txmgr"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
)
// NOTE: This method SHOULD NOT publish the resulting transaction.
func (l *BatchSubmitter) CraftTx(ctx context.Context, data []byte, nonce uint64) (*types.Transaction, error) {
gasTipCap, err := l.cfg.L1Client.SuggestGasTipCap(ctx)
if err != nil {
return nil, err
}
head, err := l.cfg.L1Client.HeaderByNumber(ctx, nil)
if err != nil {
return nil, err
}
gasFeeCap := txmgr.CalcGasFeeCap(head.BaseFee, gasTipCap)
rawTx := &types.DynamicFeeTx{
ChainID: l.cfg.ChainID,
Nonce: nonce,
To: &l.cfg.BatchInboxAddress,
GasTipCap: gasTipCap,
GasFeeCap: gasFeeCap,
Data: data,
}
l.log.Debug("creating tx", "to", rawTx.To, "from", crypto.PubkeyToAddress(l.cfg.PrivKey.PublicKey))
gas, err := core.IntrinsicGas(rawTx.Data, nil, false, true, true)
if err != nil {
return nil, err
}
rawTx.Gas = gas
return types.SignNewTx(l.cfg.PrivKey, types.LatestSignerForChainID(l.cfg.ChainID), rawTx)
}
// UpdateGasPrice signs an otherwise identical txn to the one provided but with
// updated gas prices sampled from the existing network conditions.
//
// NOTE: Thie method SHOULD NOT publish the resulting transaction.
func (l *BatchSubmitter) UpdateGasPrice(ctx context.Context, tx *types.Transaction) (*types.Transaction, error) {
gasTipCap, err := l.cfg.L1Client.SuggestGasTipCap(ctx)
if err != nil {
return nil, err
}
head, err := l.cfg.L1Client.HeaderByNumber(ctx, nil)
if err != nil {
return nil, err
}
gasFeeCap := txmgr.CalcGasFeeCap(head.BaseFee, gasTipCap)
rawTx := &types.DynamicFeeTx{
ChainID: l.cfg.ChainID,
Nonce: tx.Nonce(),
To: tx.To(),
GasTipCap: gasTipCap,
GasFeeCap: gasFeeCap,
Gas: tx.Gas(),
Data: tx.Data(),
}
return types.SignNewTx(l.cfg.PrivKey, types.LatestSignerForChainID(l.cfg.ChainID), rawTx)
}
// SendTransaction injects a signed transaction into the pending pool for
// execution.
func (l *BatchSubmitter) SendTransaction(ctx context.Context, tx *types.Transaction) error {
return l.cfg.L1Client.SendTransaction(ctx, tx)
}
package op_batcher
import (
"context"
"fmt"
"github.com/ethereum-optimism/optimism/op-node/client"
"github.com/ethereum-optimism/optimism/op-node/sources"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/rpc"
)
// dialEthClientWithTimeout attempts to dial the L1 provider using the provided
// URL. If the dial doesn't complete within defaultDialTimeout seconds, this
// method will return an error.
func dialEthClientWithTimeout(ctx context.Context, url string) (*ethclient.Client, error) {
ctxt, cancel := context.WithTimeout(ctx, defaultDialTimeout)
defer cancel()
return ethclient.DialContext(ctxt, url)
}
// dialRollupClientWithTimeout attempts to dial the RPC provider using the provided
// URL. If the dial doesn't complete within defaultDialTimeout seconds, this
// method will return an error.
func dialRollupClientWithTimeout(ctx context.Context, url string) (*sources.RollupClient, error) {
ctxt, cancel := context.WithTimeout(ctx, defaultDialTimeout)
defer cancel()
rpcCl, err := rpc.DialContext(ctxt, url)
if err != nil {
return nil, err
}
return sources.NewRollupClient(client.NewBaseRPCClient(rpcCl)), nil
}
// parseAddress parses an ETH address from a hex string. This method will fail if
// the address is not a valid hexadecimal address.
func parseAddress(address string) (common.Address, error) {
if common.IsHexAddress(address) {
return common.HexToAddress(address), nil
}
return common.Address{}, fmt.Errorf("invalid address: %v", address)
}
......@@ -7,13 +7,13 @@ import (
"path/filepath"
"strings"
"github.com/ethereum-optimism/optimism/l2geth/core/rawdb"
"github.com/ethereum-optimism/optimism/l2geth/core/state"
"github.com/ethereum-optimism/optimism/l2geth/log"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-bindings/hardhat"
"github.com/ethereum-optimism/optimism/op-chain-ops/genesis"
op_state "github.com/ethereum-optimism/optimism/op-chain-ops/state"
"github.com/ethereum-optimism/optimism/op-chain-ops/genesis"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/mattn/go-isatty"
......@@ -137,7 +137,8 @@ func main() {
}
chaindataPath := filepath.Join(ctx.String("db-path"), "geth", "chaindata")
ldb, err := rawdb.NewLevelDBDatabase(chaindataPath, 1024, 64, "")
ancientPath := filepath.Join(ctx.String("db-path"), "ancient")
ldb, err := rawdb.NewLevelDBDatabaseWithFreezer(chaindataPath, int(1024), int(60), ancientPath, "", true)
if err != nil {
return err
}
......@@ -149,11 +150,7 @@ func main() {
num := rawdb.ReadHeaderNumber(ldb, hash)
header := rawdb.ReadHeader(ldb, hash, *num)
sdb, err := state.New(header.Root, state.NewDatabase(ldb))
if err != nil {
return err
}
wrappedDB, err := op_state.NewWrappedStateDB(nil, sdb)
sdb, err := state.New(header.Root, state.NewDatabase(ldb), nil)
if err != nil {
return err
}
......@@ -179,7 +176,7 @@ func main() {
L1ERC721BridgeProxy: l1ERC721BridgeProxyDeployment.Address,
}
if err := genesis.MigrateDB(wrappedDB, config, block, &l2Addrs, &migrationData); err != nil {
if err := genesis.MigrateDB(sdb, config, block, &l2Addrs, &migrationData); err != nil {
return err
}
......
package state
import (
"errors"
"math/big"
lcommon "github.com/ethereum-optimism/optimism/l2geth/common"
lstate "github.com/ethereum-optimism/optimism/l2geth/core/state"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
)
// WrappedStateDB wraps both the StateDB types from l2geth and upstream geth.
// This allows for a l2geth StateDB to be passed to functions that expect an
// upstream geth StateDB.
type WrappedStateDB struct {
statedb *state.StateDB
legacyStatedb *lstate.StateDB
}
// NewWrappedStateDB will create a WrappedStateDB. It can wrap either an
// upstream geth database or a legacy l2geth database.
func NewWrappedStateDB(statedb *state.StateDB, legacyStatedb *lstate.StateDB) (*WrappedStateDB, error) {
if statedb == nil && legacyStatedb == nil {
return nil, errors.New("must pass at least 1 database")
}
if statedb != nil && legacyStatedb != nil {
return nil, errors.New("cannot pass both databases")
}
return &WrappedStateDB{
statedb: statedb,
legacyStatedb: legacyStatedb,
}, nil
}
func (w *WrappedStateDB) CreateAccount(addr common.Address) {
if w.legacyStatedb != nil {
address := lcommon.BytesToAddress(addr.Bytes())
w.legacyStatedb.CreateAccount(address)
} else {
w.statedb.CreateAccount(addr)
}
}
func (w *WrappedStateDB) SubBalance(addr common.Address, value *big.Int) {
if w.legacyStatedb != nil {
address := lcommon.BytesToAddress(addr.Bytes())
w.legacyStatedb.SubBalance(address, value)
} else {
w.statedb.SubBalance(addr, value)
}
}
func (w *WrappedStateDB) AddBalance(addr common.Address, value *big.Int) {
if w.legacyStatedb != nil {
address := lcommon.BytesToAddress(addr.Bytes())
w.legacyStatedb.AddBalance(address, value)
} else {
w.statedb.AddBalance(addr, value)
}
}
func (w *WrappedStateDB) GetBalance(addr common.Address) *big.Int {
if w.legacyStatedb != nil {
address := lcommon.BytesToAddress(addr.Bytes())
return w.legacyStatedb.GetBalance(address)
} else {
return w.statedb.GetBalance(addr)
}
}
func (w *WrappedStateDB) GetNonce(addr common.Address) uint64 {
if w.legacyStatedb != nil {
address := lcommon.BytesToAddress(addr.Bytes())
return w.legacyStatedb.GetNonce(address)
} else {
return w.statedb.GetNonce(addr)
}
}
func (w *WrappedStateDB) SetNonce(addr common.Address, nonce uint64) {
if w.legacyStatedb != nil {
address := lcommon.BytesToAddress(addr.Bytes())
w.legacyStatedb.SetNonce(address, nonce)
} else {
w.statedb.SetNonce(addr, nonce)
}
}
func (w *WrappedStateDB) GetCodeHash(addr common.Address) common.Hash {
if w.legacyStatedb != nil {
address := lcommon.BytesToAddress(addr.Bytes())
hash := w.legacyStatedb.GetCodeHash(address)
return common.BytesToHash(hash.Bytes())
} else {
return w.statedb.GetCodeHash(addr)
}
}
func (w *WrappedStateDB) GetCode(addr common.Address) []byte {
if w.legacyStatedb != nil {
address := lcommon.BytesToAddress(addr.Bytes())
return w.legacyStatedb.GetCode(address)
} else {
return w.statedb.GetCode(addr)
}
}
func (w *WrappedStateDB) SetCode(addr common.Address, code []byte) {
if w.legacyStatedb != nil {
address := lcommon.BytesToAddress(addr.Bytes())
w.legacyStatedb.SetCode(address, code)
} else {
w.statedb.SetCode(addr, code)
}
}
func (w *WrappedStateDB) GetCodeSize(addr common.Address) int {
if w.legacyStatedb != nil {
address := lcommon.BytesToAddress(addr.Bytes())
return w.legacyStatedb.GetCodeSize(address)
} else {
return w.statedb.GetCodeSize(addr)
}
}
func (w *WrappedStateDB) AddRefund(refund uint64) {
if w.legacyStatedb != nil {
w.legacyStatedb.AddRefund(refund)
} else {
w.statedb.AddRefund(refund)
}
}
func (w *WrappedStateDB) SubRefund(refund uint64) {
if w.legacyStatedb != nil {
w.legacyStatedb.SubRefund(refund)
} else {
w.statedb.SubRefund(refund)
}
}
func (w *WrappedStateDB) GetRefund() uint64 {
if w.legacyStatedb != nil {
return w.legacyStatedb.GetRefund()
} else {
return w.statedb.GetRefund()
}
}
func (w *WrappedStateDB) GetCommittedState(addr common.Address, key common.Hash) common.Hash {
if w.legacyStatedb != nil {
address := lcommon.BytesToAddress(addr.Bytes())
lkey := lcommon.BytesToHash(key.Bytes())
value := w.legacyStatedb.GetCommittedState(address, lkey)
return common.BytesToHash(value.Bytes())
} else {
return w.statedb.GetCommittedState(addr, key)
}
}
func (w *WrappedStateDB) GetState(addr common.Address, key common.Hash) common.Hash {
if w.legacyStatedb != nil {
address := lcommon.BytesToAddress(addr.Bytes())
lkey := lcommon.BytesToHash(key.Bytes())
value := w.legacyStatedb.GetState(address, lkey)
return common.BytesToHash(value.Bytes())
} else {
return w.statedb.GetState(addr, key)
}
}
func (w *WrappedStateDB) SetState(addr common.Address, key, value common.Hash) {
if w.legacyStatedb != nil {
address := lcommon.BytesToAddress(addr.Bytes())
lkey := lcommon.BytesToHash(key.Bytes())
lvalue := lcommon.BytesToHash(value.Bytes())
w.legacyStatedb.SetState(address, lkey, lvalue)
} else {
w.statedb.SetState(addr, key, value)
}
}
func (w *WrappedStateDB) Suicide(addr common.Address) bool {
if w.legacyStatedb != nil {
address := lcommon.BytesToAddress(addr.Bytes())
return w.legacyStatedb.Suicide(address)
} else {
return w.statedb.Suicide(addr)
}
}
func (w *WrappedStateDB) HasSuicided(addr common.Address) bool {
if w.legacyStatedb != nil {
address := lcommon.BytesToAddress(addr.Bytes())
return w.legacyStatedb.HasSuicided(address)
} else {
return w.statedb.HasSuicided(addr)
}
}
func (w *WrappedStateDB) Exist(addr common.Address) bool {
if w.legacyStatedb != nil {
address := lcommon.BytesToAddress(addr.Bytes())
return w.legacyStatedb.Exist(address)
} else {
return w.statedb.Exist(addr)
}
}
func (w *WrappedStateDB) Empty(addr common.Address) bool {
if w.legacyStatedb != nil {
address := lcommon.BytesToAddress(addr.Bytes())
return w.legacyStatedb.Empty(address)
} else {
return w.statedb.Empty(addr)
}
}
func (w *WrappedStateDB) PrepareAccessList(sender common.Address, dest *common.Address, precompiles []common.Address, txAccesses types.AccessList) {
if w.legacyStatedb != nil {
panic("PrepareAccessList unimplemented")
} else {
w.statedb.PrepareAccessList(sender, dest, precompiles, txAccesses)
}
}
func (w *WrappedStateDB) AddressInAccessList(addr common.Address) bool {
if w.legacyStatedb != nil {
address := lcommon.BytesToAddress(addr.Bytes())
return w.legacyStatedb.AddressInAccessList(address)
} else {
return w.statedb.AddressInAccessList(addr)
}
}
func (w *WrappedStateDB) SlotInAccessList(addr common.Address, slot common.Hash) (addressOk bool, slotOk bool) {
if w.legacyStatedb != nil {
address := lcommon.BytesToAddress(addr.Bytes())
lslot := lcommon.BytesToHash(slot.Bytes())
return w.legacyStatedb.SlotInAccessList(address, lslot)
} else {
return w.statedb.SlotInAccessList(addr, slot)
}
}
func (w *WrappedStateDB) AddAddressToAccessList(addr common.Address) {
if w.legacyStatedb != nil {
address := lcommon.BytesToAddress(addr.Bytes())
w.legacyStatedb.AddAddressToAccessList(address)
} else {
w.statedb.AddAddressToAccessList(addr)
}
}
func (w *WrappedStateDB) AddSlotToAccessList(addr common.Address, slot common.Hash) {
if w.legacyStatedb != nil {
address := lcommon.BytesToAddress(addr.Bytes())
lslot := lcommon.BytesToHash(slot.Bytes())
w.legacyStatedb.AddSlotToAccessList(address, lslot)
} else {
w.statedb.AddSlotToAccessList(addr, slot)
}
}
func (w *WrappedStateDB) RevertToSnapshot(snapshot int) {
if w.legacyStatedb != nil {
w.legacyStatedb.RevertToSnapshot(snapshot)
} else {
w.statedb.RevertToSnapshot(snapshot)
}
}
func (w *WrappedStateDB) Snapshot() int {
if w.legacyStatedb != nil {
return w.legacyStatedb.Snapshot()
} else {
return w.statedb.Snapshot()
}
}
func (w *WrappedStateDB) AddLog(log *types.Log) {
if w.legacyStatedb != nil {
panic("AddLog unimplemented")
} else {
w.statedb.AddLog(log)
}
}
func (w *WrappedStateDB) AddPreimage(hash common.Hash, preimage []byte) {
if w.legacyStatedb != nil {
lhash := lcommon.BytesToHash(hash.Bytes())
w.legacyStatedb.AddPreimage(lhash, preimage)
} else {
w.statedb.AddPreimage(hash, preimage)
}
}
func (w *WrappedStateDB) ForEachStorage(addr common.Address, cb func(common.Hash, common.Hash) bool) error {
if w.legacyStatedb != nil {
address := lcommon.BytesToAddress(addr.Bytes())
return w.legacyStatedb.ForEachStorage(address, func(lkey, lvalue lcommon.Hash) bool {
key := common.BytesToHash(lkey.Bytes())
value := common.BytesToHash(lvalue.Bytes())
return cb(key, value)
})
} else {
return w.statedb.ForEachStorage(addr, cb)
}
}
......@@ -10,9 +10,6 @@ import (
"github.com/ethereum/go-ethereum/rlp"
)
// TODO: Full state machine for channel
// Open, Closed, Ready (todo - when to construct RLP reader)
// A Channel is a set of batches that are split into at least one, but possibly multiple frames.
// Frames are allowed to be ingested out of order.
// Each frame is ingested one by one. Once a frame with `closed` is added to the channel, the
......@@ -28,17 +25,15 @@ type Channel struct {
// true if we have buffered the last frame
closed bool
// TODO: implement this check
// highestFrameNumber is the highest frame number yet seen.
// This must be equal to `endFrameNumber`
// highestFrameNumber uint16
highestFrameNumber uint16
// endFrameNumber is the frame number of the frame where `isLast` is true
// No other frame number must be larger than this.
endFrameNumber uint16
// Store a map of frame number -> frame data for constant time ordering
inputs map[uint64][]byte
// Store a map of frame number -> frame for constant time ordering
inputs map[uint64]Frame
highestL1InclusionBlock eth.L1BlockRef
}
......@@ -46,7 +41,7 @@ type Channel struct {
func NewChannel(id ChannelID, openBlock eth.L1BlockRef) *Channel {
return &Channel{
id: id,
inputs: make(map[uint64][]byte),
inputs: make(map[uint64]Frame),
openBlock: openBlock,
}
}
......@@ -58,26 +53,43 @@ func (ch *Channel) AddFrame(frame Frame, l1InclusionBlock eth.L1BlockRef) error
if frame.ID != ch.id {
return fmt.Errorf("frame id does not match channel id. Expected %v, got %v", ch.id, frame.ID)
}
// These checks are specified and cannot be changed without a hard fork.
if frame.IsLast && ch.closed {
return fmt.Errorf("cannot add ending frame to a closed channel. id %v", ch.id)
}
if _, ok := ch.inputs[uint64(frame.FrameNumber)]; ok {
return DuplicateErr
}
// TODO: highest seen frame vs endFrameNumber
if ch.closed && frame.FrameNumber >= ch.endFrameNumber {
return fmt.Errorf("frame number (%d) is greater than or equal to end frame number (%d) of a closed channel", frame.FrameNumber, ch.endFrameNumber)
}
// Guaranteed to succeed. Now update internal state
if frame.IsLast {
ch.endFrameNumber = frame.FrameNumber
ch.closed = true
}
// Prune frames with a number higher than the closing frame number when we receive a closing frame
if frame.IsLast && ch.endFrameNumber < ch.highestFrameNumber {
// Do a linear scan over saved inputs instead of ranging over ID numbers
for id, prunedFrame := range ch.inputs {
if id >= uint64(ch.endFrameNumber) {
delete(ch.inputs, id)
}
ch.size -= frameSize(prunedFrame)
}
ch.highestFrameNumber = ch.endFrameNumber
}
// Update highest seen frame number after pruning
if frame.FrameNumber > ch.highestFrameNumber {
ch.highestFrameNumber = frame.FrameNumber
}
if ch.highestL1InclusionBlock.Number < l1InclusionBlock.Number {
ch.highestL1InclusionBlock = l1InclusionBlock
}
ch.inputs[uint64(frame.FrameNumber)] = frame.Data
ch.size += uint64(len(frame.Data)) + frameOverhead
// todo use `IsReady` + state to create final output reader
ch.inputs[uint64(frame.FrameNumber)] = frame
ch.size += frameSize(frame)
return nil
}
......@@ -121,11 +133,11 @@ func (ch *Channel) IsReady() bool {
func (ch *Channel) Reader() io.Reader {
var readers []io.Reader
for i := uint64(0); i <= uint64(ch.endFrameNumber); i++ {
data, ok := ch.inputs[i]
frame, ok := ch.inputs[i]
if !ok {
panic("dev error in channel.Reader. Must be called after the channel is ready.")
}
readers = append(readers, bytes.NewBuffer(data))
readers = append(readers, bytes.NewReader(frame.Data))
}
return io.MultiReader(readers...)
}
......
package derive
import (
"testing"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/stretchr/testify/require"
)
type frameValidityTC struct {
name string
frames []Frame
shouldErr []bool
sizes []uint64
}
func (tc *frameValidityTC) Run(t *testing.T) {
id := [16]byte{0xff}
block := eth.L1BlockRef{}
ch := NewChannel(id, block)
if len(tc.frames) != len(tc.shouldErr) || len(tc.frames) != len(tc.sizes) {
t.Errorf("lengths should be the same. frames: %d, shouldErr: %d, sizes: %d", len(tc.frames), len(tc.shouldErr), len(tc.sizes))
}
for i, frame := range tc.frames {
err := ch.AddFrame(frame, block)
if tc.shouldErr[i] {
require.NotNil(t, err)
} else {
require.Nil(t, err)
}
require.Equal(t, tc.sizes[i], ch.Size())
}
}
// TestFrameValidity inserts a list of frames into the channel. It checks if an error
// should be returned by `AddFrame` as well as checking the size of the channel.
func TestFrameValidity(t *testing.T) {
id := [16]byte{0xff}
testCases := []frameValidityTC{
{
name: "wrong channel",
frames: []Frame{{ID: [16]byte{0xee}}},
shouldErr: []bool{true},
sizes: []uint64{0},
},
{
name: "double close",
frames: []Frame{
{ID: id, FrameNumber: 2, IsLast: true, Data: []byte("four")},
{ID: id, FrameNumber: 1, IsLast: true}},
shouldErr: []bool{false, true},
sizes: []uint64{204, 204},
},
{
name: "duplicate frame",
frames: []Frame{
{ID: id, FrameNumber: 2, Data: []byte("four")},
{ID: id, FrameNumber: 2, Data: []byte("seven__")}},
shouldErr: []bool{false, true},
sizes: []uint64{204, 204},
},
{
name: "duplicate closing frames",
frames: []Frame{
{ID: id, FrameNumber: 2, IsLast: true, Data: []byte("four")},
{ID: id, FrameNumber: 2, IsLast: true, Data: []byte("seven__")}},
shouldErr: []bool{false, true},
sizes: []uint64{204, 204},
},
{
name: "frame past closing",
frames: []Frame{
{ID: id, FrameNumber: 2, IsLast: true, Data: []byte("four")},
{ID: id, FrameNumber: 10, Data: []byte("seven__")}},
shouldErr: []bool{false, true},
sizes: []uint64{204, 204},
},
{
name: "prune after close frame",
frames: []Frame{
{ID: id, FrameNumber: 10, IsLast: false, Data: []byte("seven__")},
{ID: id, FrameNumber: 2, IsLast: true, Data: []byte("four")}},
shouldErr: []bool{false, false},
sizes: []uint64{207, 204},
},
{
name: "multiple valid frames",
frames: []Frame{
{ID: id, FrameNumber: 10, Data: []byte("seven__")},
{ID: id, FrameNumber: 2, Data: []byte("four")}},
shouldErr: []bool{false, false},
sizes: []uint64{207, 411},
},
}
for _, tc := range testCases {
t.Run(tc.name, tc.Run)
}
}
......@@ -8,6 +8,14 @@ import (
// count the tagging info as 200 in terms of buffer size.
const frameOverhead = 200
// frameSize calculates the size of the frame + overhead for
// storing the frame. The sum of the frame size of each frame in
// a channel determines the channel's size. The sum of the channel
// sizes is used for pruning & compared against `MaxChannelBankSize`
func frameSize(frame Frame) uint64 {
return uint64(len(frame.Data)) + frameOverhead
}
const DerivationVersion0 = 0
// MaxChannelBankSize is the amount of memory space, in number of bytes,
......
......@@ -29,7 +29,6 @@
"@eth-optimism/contracts-bedrock/ds-test",
"@eth-optimism/contracts-bedrock/forge-std",
"@eth-optimism/contracts-bedrock/@rari-capital/solmate",
"@eth-optimism/contracts-bedrock/excessively-safe-call",
"@eth-optimism/contracts-bedrock/@openzeppelin/contracts",
"@eth-optimism/contracts-bedrock/@openzeppelin/contracts-upgradeable",
"@eth-optimism/contracts-periphery/ds-test",
......
......@@ -464,18 +464,18 @@ particular we extract a byte string that corresponds to the concatenation of the
transaction][g-batcher-transaction] belonging to the block. This byte stream encodes a stream of [channel
frames][g-channel-frame] (see the [Batch Submission Wire Format][wire-format] section for more info).
These frames are parsed, then grouped per [channel][g-channel] into a structure we call the *channel bank*.
These frames are parsed, then grouped per [channel][g-channel] into a structure we call the *channel bank*. When
adding frames the the channel, individual frames may be invalid, but the channel does not have a notion of validity
until the channel timeout is up. This enables adding the option to do a partial read from the channel in the future.
Some frames are ignored:
- Frames where `frame.frame_number <= highest_frame_number`, where `highest_frame_number` is the highest frame number
that was previously encountered for this channel.
- i.e. in case of duplicate frame, the first frame read from L1 is considered canonical.
- Frames with a higher number than that of the final frame of the channel (i.e. the first frame marked with
`frame.is_last == 1`) are ignored.
- These frames could still be written into the channel bank if we haven't seen the final frame yet. But they will
never be read out from the channel bank.
- Frames with a channel ID whose timestamp are higher than that of the L1 block on which the frame appears.
- Frames with the same frame number as an existing frame in the channel (a duplicate). The first seen frame is used.
- Frames that attempt to close an already closed channel. This would be the second frame with `frame.is_last == 1` even
if the frame number of the second frame is not the same as the first frame which closed the channel.
If a frame with `is_last == 1` is added to a channel, all frames with a higher frame number are removed from the
channel.
Channels are also recorded in FIFO order in a structure called the *channel queue*. A channel is added to the channel
queue the first time a frame belonging to the channel is seen. This structure is used in the next stage.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment