Commit ddccdfbc authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge branch 'develop' into inphi/proxyd-log

parents b9bb1a98 d11d4ad7
---
'@eth-optimism/l2geth': patch
---
Has l2geth return a NonceToHigh response if the txn nonce is greater than the expected nonce.
...@@ -555,8 +555,10 @@ func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error { ...@@ -555,8 +555,10 @@ func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error {
} }
// Ensure the transaction adheres to nonce ordering // Ensure the transaction adheres to nonce ordering
if rcfg.UsingOVM { if rcfg.UsingOVM {
if pool.currentState.GetNonce(from) != tx.Nonce() { if pool.currentState.GetNonce(from) > tx.Nonce() {
return ErrNonceTooLow return ErrNonceTooLow
} else if pool.currentState.GetNonce(from) < tx.Nonce() {
return ErrNonceTooHigh
} }
} else { } else {
if pool.currentState.GetNonce(from) > tx.Nonce() { if pool.currentState.GetNonce(from) > tx.Nonce() {
......
package main
import (
"errors"
"os"
"github.com/mattn/go-isatty"
"github.com/urfave/cli/v2"
"github.com/ethereum-optimism/optimism/op-chain-ops/eof"
"github.com/ethereum/go-ethereum/log"
)
func main() {
log.Root().SetHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(isatty.IsTerminal(os.Stderr.Fd()))))
app := &cli.App{
Name: "eof-crawler",
Usage: "Scan a Geth database for EOF-prefixed contracts",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "db-path",
Usage: "Path to the geth LevelDB",
},
&cli.StringFlag{
Name: "out",
Value: "eof-contracts.json",
Usage: "Path to the output file",
},
},
Action: func(ctx *cli.Context) error {
dbPath := ctx.String("db-path")
if len(dbPath) == 0 {
return errors.New("Must specify a db-path")
}
out := ctx.String("out")
return eof.IndexEOFContracts(dbPath, out)
},
}
if err := app.Run(os.Args); err != nil {
log.Crit("error indexing state", "err", err)
}
}
# `eof-crawler`
Simple CLI tool to scan all accounts in a geth LevelDB for contracts that begin with the EOF prefix.
## Usage
1. Pass the directory of the Geth DB into the tool
```sh
go run ./cmd/eof-crawler/main.go --db-path <db_path> [--out <out_file>]
```
2. Once the indexing has completed, an array of all EOF-prefixed contracts will be written to `eof_contracts.json` or the designated output file.
package eof
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"log"
"os"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
)
// Account represents an account in the state.
type Account struct {
Balance string `json:"balance"`
Nonce uint64 `json:"nonce"`
Root hexutil.Bytes `json:"root"`
CodeHash hexutil.Bytes `json:"codeHash"`
Code hexutil.Bytes `json:"code,omitempty"`
Address common.Address `json:"address,omitempty"`
SecureKey hexutil.Bytes `json:"key,omitempty"`
}
// emptyCodeHash is the known hash of an account with no code.
var emptyCodeHash = crypto.Keccak256(nil)
// IndexEOFContracts indexes all the EOF contracts in the state trie of the head block
// for the given db and writes them to a JSON file.
func IndexEOFContracts(dbPath string, out string) error {
// Open an existing Ethereum database
db, err := rawdb.NewLevelDBDatabase(dbPath, 16, 16, "", true)
if err != nil {
return fmt.Errorf("Failed to open database: %w", err)
}
stateDB := state.NewDatabase(db)
// Retrieve the head block
hash := rawdb.ReadHeadBlockHash(db)
number := rawdb.ReadHeaderNumber(db, hash)
if number == nil {
return errors.New("Failed to retrieve head block number")
}
head := rawdb.ReadBlock(db, hash, *number)
if head == nil {
return errors.New("Failed to retrieve head block")
}
// Retrieve the state belonging to the head block
st, err := trie.New(trie.StateTrieID(head.Root()), trie.NewDatabase(db))
if err != nil {
return fmt.Errorf("Failed to retrieve state trie: %w", err)
}
log.Printf("Indexing state trie at head block #%d [0x%x]", *number, hash)
// Iterate over the entire account trie to search for EOF-prefixed contracts
start := time.Now()
missingPreimages := uint64(0)
eoas := uint64(0)
nonEofContracts := uint64(0)
eofContracts := make([]Account, 0)
it := trie.NewIterator(st.NodeIterator(nil))
for it.Next() {
// Decode the state account
var data types.StateAccount
err := rlp.DecodeBytes(it.Value, &data)
if err != nil {
return fmt.Errorf("Failed to decode state account: %w", err)
}
// Check to see if the account has any code associated with it before performing
// more reads from the trie & db.
if bytes.Equal(data.CodeHash, emptyCodeHash) {
eoas++
continue
}
// Create a serializable `Account` object
account := Account{
Balance: data.Balance.String(),
Nonce: data.Nonce,
Root: data.Root[:],
CodeHash: data.CodeHash,
SecureKey: it.Key,
}
// Attempt to get the address of the account from the trie
addrBytes := st.Get(it.Key)
if addrBytes == nil {
// Preimage missing! Cannot continue.
missingPreimages++
continue
}
addr := common.BytesToAddress(addrBytes)
// Attempt to get the code of the account from the trie
code, err := stateDB.ContractCode(crypto.Keccak256Hash(addrBytes), common.BytesToHash(data.CodeHash))
if err != nil {
return fmt.Errorf("Could not load code for account %x: %w", addr, err)
}
// Check if the contract's runtime bytecode starts with the EOF prefix.
if len(code) >= 1 && code[0] == 0xEF {
// Append the account to the list of EOF contracts
account.Address = addr
account.Code = code
eofContracts = append(eofContracts, account)
} else {
nonEofContracts++
}
}
// Print finishing status
log.Printf("Indexing done in %v, found %d EOF contracts", time.Since(start), len(eofContracts))
log.Printf("Num missing preimages: %d", missingPreimages)
log.Printf("Non-EOF-prefixed contracts: %d", nonEofContracts)
log.Printf("Accounts with no code (EOAs): %d", eoas)
// Write the EOF contracts to a file
file, err := json.MarshalIndent(eofContracts, "", " ")
if err != nil {
return fmt.Errorf("Cannot marshal EOF contracts: %w", err)
}
err = os.WriteFile(out, file, 0644)
if err != nil {
return fmt.Errorf("Failed to write EOF contracts array to file: %w", err)
}
log.Printf("Wrote list of EOF contracts to `%v`", out)
return nil
}
package actions package actions
import ( import (
"context"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/ethereum-optimism/optimism/op-node/eth" "github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/metrics"
"github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-node/rollup/driver" "github.com/ethereum-optimism/optimism/op-node/rollup/driver"
) )
// MockL1OriginSelector is a shim to override the origin as sequencer, so we can force it to stay on an older origin.
type MockL1OriginSelector struct {
actual *driver.L1OriginSelector
originOverride eth.L1BlockRef // override which origin gets picked
}
func (m *MockL1OriginSelector) FindL1Origin(ctx context.Context, l1Head eth.L1BlockRef, l2Head eth.L2BlockRef) (eth.L1BlockRef, error) {
if m.originOverride != (eth.L1BlockRef{}) {
return m.originOverride, nil
}
return m.actual.FindL1Origin(ctx, l1Head, l2Head)
}
// L2Sequencer is an actor that functions like a rollup node, // L2Sequencer is an actor that functions like a rollup node,
// without the full P2P/API/Node stack, but just the derivation state, and simplified driver with sequencing ability. // without the full P2P/API/Node stack, but just the derivation state, and simplified driver with sequencing ability.
type L2Sequencer struct { type L2Sequencer struct {
L2Verifier L2Verifier
sequencer *driver.Sequencer sequencer *driver.Sequencer
l1OriginSelector *driver.L1OriginSelector
seqOldOrigin bool // stay on current L1 origin when sequencing a block, unless forced to adopt the next origin
failL2GossipUnsafeBlock error // mock error failL2GossipUnsafeBlock error // mock error
mockL1OriginSelector *MockL1OriginSelector
} }
func NewL2Sequencer(t Testing, log log.Logger, l1 derive.L1Fetcher, eng L2API, cfg *rollup.Config, seqConfDepth uint64) *L2Sequencer { func NewL2Sequencer(t Testing, log log.Logger, l1 derive.L1Fetcher, eng L2API, cfg *rollup.Config, seqConfDepth uint64) *L2Sequencer {
ver := NewL2Verifier(t, log, l1, eng, cfg) ver := NewL2Verifier(t, log, l1, eng, cfg)
attrBuilder := derive.NewFetchingAttributesBuilder(cfg, l1, eng) attrBuilder := derive.NewFetchingAttributesBuilder(cfg, l1, eng)
l1OriginSelector := &MockL1OriginSelector{
actual: driver.NewL1OriginSelector(log, cfg, l1, seqConfDepth),
}
return &L2Sequencer{ return &L2Sequencer{
L2Verifier: *ver, L2Verifier: *ver,
sequencer: driver.NewSequencer(log, cfg, eng, ver.derivation, attrBuilder, metrics.NoopMetrics), sequencer: driver.NewSequencer(log, cfg, ver.derivation, attrBuilder, l1OriginSelector),
l1OriginSelector: driver.NewL1OriginSelector(log, cfg, l1, seqConfDepth), mockL1OriginSelector: l1OriginSelector,
seqOldOrigin: false,
failL2GossipUnsafeBlock: nil, failL2GossipUnsafeBlock: nil,
} }
} }
...@@ -47,22 +62,7 @@ func (s *L2Sequencer) ActL2StartBlock(t Testing) { ...@@ -47,22 +62,7 @@ func (s *L2Sequencer) ActL2StartBlock(t Testing) {
return return
} }
parent := s.derivation.UnsafeL2Head() err := s.sequencer.StartBuildingBlock(t.Ctx(), s.l1State.L1Head())
var origin eth.L1BlockRef
if s.seqOldOrigin {
// force old origin, for testing purposes
oldOrigin, err := s.l1.L1BlockRefByHash(t.Ctx(), parent.L1Origin.Hash)
require.NoError(t, err, "failed to get current origin: %s", parent.L1Origin)
origin = oldOrigin
s.seqOldOrigin = false // don't repeat this
} else {
// select origin the real way
l1Origin, err := s.l1OriginSelector.FindL1Origin(t.Ctx(), s.l1State.L1Head(), parent)
require.NoError(t, err)
origin = l1Origin
}
err := s.sequencer.StartBuildingBlock(t.Ctx(), origin)
require.NoError(t, err, "failed to start block building") require.NoError(t, err, "failed to start block building")
s.l2Building = true s.l2Building = true
...@@ -76,24 +76,21 @@ func (s *L2Sequencer) ActL2EndBlock(t Testing) { ...@@ -76,24 +76,21 @@ func (s *L2Sequencer) ActL2EndBlock(t Testing) {
} }
s.l2Building = false s.l2Building = false
payload, err := s.sequencer.CompleteBuildingBlock(t.Ctx()) _, err := s.sequencer.CompleteBuildingBlock(t.Ctx())
// TODO: there may be legitimate temporary errors here, if we mock engine API RPC-failure. // TODO: there may be legitimate temporary errors here, if we mock engine API RPC-failure.
// For advanced tests we can catch those and print a warning instead. // For advanced tests we can catch those and print a warning instead.
require.NoError(t, err) require.NoError(t, err)
ref, err := derive.PayloadToBlockRef(payload, &s.rollupCfg.Genesis)
require.NoError(t, err, "payload must convert to block ref")
s.derivation.SetUnsafeHead(ref)
// TODO: action-test publishing of payload on p2p // TODO: action-test publishing of payload on p2p
} }
// ActL2KeepL1Origin makes the sequencer use the current L1 origin, even if the next origin is available. // ActL2KeepL1Origin makes the sequencer use the current L1 origin, even if the next origin is available.
func (s *L2Sequencer) ActL2KeepL1Origin(t Testing) { func (s *L2Sequencer) ActL2KeepL1Origin(t Testing) {
if s.seqOldOrigin { // don't do this twice parent := s.derivation.UnsafeL2Head()
t.InvalidAction("already decided to keep old L1 origin") // force old origin, for testing purposes
return oldOrigin, err := s.l1.L1BlockRefByHash(t.Ctx(), parent.L1Origin.Hash)
} require.NoError(t, err, "failed to get current origin: %s", parent.L1Origin)
s.seqOldOrigin = true s.mockL1OriginSelector.originOverride = oldOrigin
} }
// ActBuildToL1Head builds empty blocks until (incl.) the L1 head becomes the L2 origin // ActBuildToL1Head builds empty blocks until (incl.) the L1 head becomes the L2 origin
...@@ -109,7 +106,7 @@ func (s *L2Sequencer) ActBuildToL1Head(t Testing) { ...@@ -109,7 +106,7 @@ func (s *L2Sequencer) ActBuildToL1Head(t Testing) {
func (s *L2Sequencer) ActBuildToL1HeadExcl(t Testing) { func (s *L2Sequencer) ActBuildToL1HeadExcl(t Testing) {
for { for {
s.ActL2PipelineFull(t) s.ActL2PipelineFull(t)
nextOrigin, err := s.l1OriginSelector.FindL1Origin(t.Ctx(), s.l1State.L1Head(), s.derivation.UnsafeL2Head()) nextOrigin, err := s.mockL1OriginSelector.FindL1Origin(t.Ctx(), s.l1State.L1Head(), s.derivation.UnsafeL2Head())
require.NoError(t, err) require.NoError(t, err)
if nextOrigin.Number >= s.l1State.L1Head().Number { if nextOrigin.Number >= s.l1State.L1Head().Number {
break break
......
...@@ -123,7 +123,7 @@ func RollupNodeMain(ctx *cli.Context) error { ...@@ -123,7 +123,7 @@ func RollupNodeMain(ctx *cli.Context) error {
if cfg.Heartbeat.Enabled { if cfg.Heartbeat.Enabled {
var peerID string var peerID string
if cfg.P2P == nil { if cfg.P2P.Disabled() {
peerID = "disabled" peerID = "disabled"
} else { } else {
peerID = n.P2P().Host().ID().String() peerID = n.P2P().Host().ID().String()
......
...@@ -33,6 +33,7 @@ var DefaultBootnodes = []*enode.Node{ ...@@ -33,6 +33,7 @@ var DefaultBootnodes = []*enode.Node{
// SetupP2P provides a host and discovery service for usage in the rollup node. // SetupP2P provides a host and discovery service for usage in the rollup node.
type SetupP2P interface { type SetupP2P interface {
Check() error Check() error
Disabled() bool
// Host creates a libp2p host service. Returns nil, nil if p2p is disabled. // Host creates a libp2p host service. Returns nil, nil if p2p is disabled.
Host(log log.Logger, reporter metrics.Reporter) (host.Host, error) Host(log log.Logger, reporter metrics.Reporter) (host.Host, error)
// Discovery creates a disc-v5 service. Returns nil, nil, nil if discovery is disabled. // Discovery creates a disc-v5 service. Returns nil, nil, nil if discovery is disabled.
...@@ -134,6 +135,10 @@ func (conf *Config) TargetPeers() uint { ...@@ -134,6 +135,10 @@ func (conf *Config) TargetPeers() uint {
return conf.PeersLo return conf.PeersLo
} }
func (conf *Config) Disabled() bool {
return conf.DisableP2P
}
const maxMeshParam = 1000 const maxMeshParam = 1000
func (conf *Config) Check() error { func (conf *Config) Check() error {
......
...@@ -63,3 +63,7 @@ func (p *Prepared) Discovery(log log.Logger, rollupCfg *rollup.Config, tcpPort u ...@@ -63,3 +63,7 @@ func (p *Prepared) Discovery(log log.Logger, rollupCfg *rollup.Config, tcpPort u
func (p *Prepared) ConfigureGossip(params *pubsub.GossipSubParams) []pubsub.Option { func (p *Prepared) ConfigureGossip(params *pubsub.GossipSubParams) []pubsub.Option {
return nil return nil
} }
func (p *Prepared) Disabled() bool {
return false
}
...@@ -33,6 +33,31 @@ type Engine interface { ...@@ -33,6 +33,31 @@ type Engine interface {
SystemConfigL2Fetcher SystemConfigL2Fetcher
} }
// EngineState provides a read-only interface of the forkchoice state properties of the L2 Engine.
type EngineState interface {
Finalized() eth.L2BlockRef
UnsafeL2Head() eth.L2BlockRef
SafeL2Head() eth.L2BlockRef
}
// EngineControl enables other components to build blocks with the Engine,
// while keeping the forkchoice state and payload-id management internal to
// avoid state inconsistencies between different users of the EngineControl.
type EngineControl interface {
EngineState
// StartPayload requests the engine to start building a block with the given attributes.
// If updateSafe, the resulting block will be marked as a safe block.
StartPayload(ctx context.Context, parent eth.L2BlockRef, attrs *eth.PayloadAttributes, updateSafe bool) (errType BlockInsertionErrType, err error)
// ConfirmPayload requests the engine to complete the current block. If no block is being built, or if it fails, an error is returned.
ConfirmPayload(ctx context.Context) (out *eth.ExecutionPayload, errTyp BlockInsertionErrType, err error)
// CancelPayload requests the engine to stop building the current block without making it canonical.
// This is optional, as the engine expires building jobs that are left uncompleted, but can still save resources.
CancelPayload(ctx context.Context, force bool) error
// BuildingPayload indicates if a payload is being built, and onto which block it is being built, and whether or not it is a safe payload.
BuildingPayload() (onto eth.L2BlockRef, id eth.PayloadID, safe bool)
}
// Max memory used for buffering unsafe payloads // Max memory used for buffering unsafe payloads
const maxUnsafePayloadsMemory = 500 * 1024 * 1024 const maxUnsafePayloadsMemory = 500 * 1024 * 1024
...@@ -68,6 +93,10 @@ type EngineQueue struct { ...@@ -68,6 +93,10 @@ type EngineQueue struct {
safeHead eth.L2BlockRef safeHead eth.L2BlockRef
unsafeHead eth.L2BlockRef unsafeHead eth.L2BlockRef
buildingOnto eth.L2BlockRef
buildingID eth.PayloadID
buildingSafe bool
// Track when the rollup node changes the forkchoice without engine action, // Track when the rollup node changes the forkchoice without engine action,
// e.g. on a reset after a reorg, or after consolidating a block. // e.g. on a reset after a reorg, or after consolidating a block.
// This update may repeat if the engine returns a temporary error. // This update may repeat if the engine returns a temporary error.
...@@ -91,6 +120,8 @@ type EngineQueue struct { ...@@ -91,6 +120,8 @@ type EngineQueue struct {
l1Fetcher L1Fetcher l1Fetcher L1Fetcher
} }
var _ EngineControl = (*EngineQueue)(nil)
// NewEngineQueue creates a new EngineQueue, which should be Reset(origin) before use. // NewEngineQueue creates a new EngineQueue, which should be Reset(origin) before use.
func NewEngineQueue(log log.Logger, cfg *rollup.Config, engine Engine, metrics Metrics, prev NextAttributesProvider, l1Fetcher L1Fetcher) *EngineQueue { func NewEngineQueue(log log.Logger, cfg *rollup.Config, engine Engine, metrics Metrics, prev NextAttributesProvider, l1Fetcher L1Fetcher) *EngineQueue {
return &EngineQueue{ return &EngineQueue{
...@@ -416,13 +447,11 @@ func (eq *EngineQueue) forceNextSafeAttributes(ctx context.Context) error { ...@@ -416,13 +447,11 @@ func (eq *EngineQueue) forceNextSafeAttributes(ctx context.Context) error {
if len(eq.safeAttributes) == 0 { if len(eq.safeAttributes) == 0 {
return nil return nil
} }
fc := eth.ForkchoiceState{
HeadBlockHash: eq.safeHead.Hash,
SafeBlockHash: eq.safeHead.Hash,
FinalizedBlockHash: eq.finalized.Hash,
}
attrs := eq.safeAttributes[0] attrs := eq.safeAttributes[0]
payload, errType, err := InsertHeadBlock(ctx, eq.log, eq.engine, fc, attrs, true) errType, err := eq.StartPayload(ctx, eq.safeHead, attrs, true)
if err == nil {
_, errType, err = eq.ConfirmPayload(ctx)
}
if err != nil { if err != nil {
switch errType { switch errType {
case BlockInsertTemporaryErr: case BlockInsertTemporaryErr:
...@@ -457,21 +486,89 @@ func (eq *EngineQueue) forceNextSafeAttributes(ctx context.Context) error { ...@@ -457,21 +486,89 @@ func (eq *EngineQueue) forceNextSafeAttributes(ctx context.Context) error {
return NewCriticalError(fmt.Errorf("unknown InsertHeadBlock error type %d: %w", errType, err)) return NewCriticalError(fmt.Errorf("unknown InsertHeadBlock error type %d: %w", errType, err))
} }
} }
eq.safeAttributes = eq.safeAttributes[1:]
eq.logSyncProgress("processed safe block derived from L1")
return nil
}
func (eq *EngineQueue) StartPayload(ctx context.Context, parent eth.L2BlockRef, attrs *eth.PayloadAttributes, updateSafe bool) (errType BlockInsertionErrType, err error) {
if eq.buildingID != (eth.PayloadID{}) {
eq.log.Warn("did not finish previous block building, starting new building now", "prev_onto", eq.buildingOnto, "prev_payload_id", eq.buildingID, "new_onto", parent)
// TODO: maybe worth it to force-cancel the old payload ID here.
}
fc := eth.ForkchoiceState{
HeadBlockHash: parent.Hash,
SafeBlockHash: eq.safeHead.Hash,
FinalizedBlockHash: eq.finalized.Hash,
}
id, errTyp, err := StartPayload(ctx, eq.engine, fc, attrs)
if err != nil {
return errTyp, err
}
eq.buildingID = id
eq.buildingSafe = updateSafe
eq.buildingOnto = parent
return BlockInsertOK, nil
}
func (eq *EngineQueue) ConfirmPayload(ctx context.Context) (out *eth.ExecutionPayload, errTyp BlockInsertionErrType, err error) {
if eq.buildingID == (eth.PayloadID{}) {
return nil, BlockInsertPrestateErr, fmt.Errorf("cannot complete payload building: not currently building a payload")
}
if eq.buildingOnto.Hash != eq.unsafeHead.Hash { // E.g. when safe-attributes consolidation fails, it will drop the existing work.
eq.log.Warn("engine is building block that reorgs previous usafe head", "onto", eq.buildingOnto, "unsafe", eq.unsafeHead)
}
fc := eth.ForkchoiceState{
HeadBlockHash: common.Hash{}, // gets overridden
SafeBlockHash: eq.safeHead.Hash,
FinalizedBlockHash: eq.finalized.Hash,
}
payload, errTyp, err := ConfirmPayload(ctx, eq.log, eq.engine, fc, eq.buildingID, eq.buildingSafe)
if err != nil {
return nil, errTyp, fmt.Errorf("failed to complete building on top of L2 chain %s, id: %s, error (%d): %w", eq.buildingOnto, eq.buildingID, errTyp, err)
}
ref, err := PayloadToBlockRef(payload, &eq.cfg.Genesis) ref, err := PayloadToBlockRef(payload, &eq.cfg.Genesis)
if err != nil { if err != nil {
return NewTemporaryError(fmt.Errorf("failed to decode L2 block ref from payload: %w", err)) return nil, BlockInsertPayloadErr, NewResetError(fmt.Errorf("failed to decode L2 block ref from payload: %w", err))
} }
eq.safeHead = ref
eq.unsafeHead = ref eq.unsafeHead = ref
eq.metrics.RecordL2Ref("l2_safe", ref)
eq.metrics.RecordL2Ref("l2_unsafe", ref) eq.metrics.RecordL2Ref("l2_unsafe", ref)
eq.safeAttributes = eq.safeAttributes[1:]
eq.postProcessSafeL2()
eq.logSyncProgress("processed safe block derived from L1")
if eq.buildingSafe {
eq.safeHead = ref
eq.postProcessSafeL2()
eq.metrics.RecordL2Ref("l2_safe", ref)
}
eq.resetBuildingState()
return payload, BlockInsertOK, nil
}
func (eq *EngineQueue) CancelPayload(ctx context.Context, force bool) error {
// the building job gets wrapped up as soon as the payload is retrieved, there's no explicit cancel in the Engine API
eq.log.Error("cancelling old block sealing job", "payload", eq.buildingID)
_, err := eq.engine.GetPayload(ctx, eq.buildingID)
if err != nil {
eq.log.Error("failed to cancel block building job", "payload", eq.buildingID, "err", err)
if !force {
return err
}
}
eq.resetBuildingState()
return nil return nil
} }
func (eq *EngineQueue) BuildingPayload() (onto eth.L2BlockRef, id eth.PayloadID, safe bool) {
return eq.buildingOnto, eq.buildingID, eq.buildingSafe
}
func (eq *EngineQueue) resetBuildingState() {
eq.buildingID = eth.PayloadID{}
eq.buildingOnto = eth.L2BlockRef{}
eq.buildingSafe = false
}
// ResetStep Walks the L2 chain backwards until it finds an L2 block whose L1 origin is canonical. // ResetStep Walks the L2 chain backwards until it finds an L2 block whose L1 origin is canonical.
// The unsafe head is set to the head of the L2 chain, unless the existing safe head is not canonical. // The unsafe head is set to the head of the L2 chain, unless the existing safe head is not canonical.
func (eq *EngineQueue) Reset(ctx context.Context, _ eth.L1BlockRef, _ eth.SystemConfig) error { func (eq *EngineQueue) Reset(ctx context.Context, _ eth.L1BlockRef, _ eth.SystemConfig) error {
...@@ -517,6 +614,7 @@ func (eq *EngineQueue) Reset(ctx context.Context, _ eth.L1BlockRef, _ eth.System ...@@ -517,6 +614,7 @@ func (eq *EngineQueue) Reset(ctx context.Context, _ eth.L1BlockRef, _ eth.System
eq.unsafeHead = unsafe eq.unsafeHead = unsafe
eq.safeHead = safe eq.safeHead = safe
eq.finalized = finalized eq.finalized = finalized
eq.resetBuildingState()
eq.needForkchoiceUpdate = true eq.needForkchoiceUpdate = true
eq.finalityData = eq.finalityData[:0] eq.finalityData = eq.finalityData[:0]
// note: we do not clear the unsafe payloads queue; if the payloads are not applicable anymore the parent hash checks will clear out the old payloads. // note: we do not clear the unsafe payloads queue; if the payloads are not applicable anymore the parent hash checks will clear out the old payloads.
......
...@@ -79,19 +79,6 @@ const ( ...@@ -79,19 +79,6 @@ const (
BlockInsertPayloadErr BlockInsertPayloadErr
) )
// InsertHeadBlock creates, executes, and inserts the specified block as the head block.
// It first uses the given FC to start the block creation process and then after the payload is executed,
// sets the FC to the same safe and finalized hashes, but updates the head hash to the new block.
// If updateSafe is true, the head block is considered to be the safe head as well as the head.
// It returns the payload, an RPC error (if the payload might still be valid), and a payload error (if the payload was not valid)
func InsertHeadBlock(ctx context.Context, log log.Logger, eng Engine, fc eth.ForkchoiceState, attrs *eth.PayloadAttributes, updateSafe bool) (out *eth.ExecutionPayload, errTyp BlockInsertionErrType, err error) {
id, errTyp, err := StartPayload(ctx, eng, fc, attrs)
if err != nil {
return nil, errTyp, err
}
return ConfirmPayload(ctx, log, eng, fc, id, updateSafe)
}
// StartPayload starts an execution payload building process in the provided Engine, with the given attributes. // StartPayload starts an execution payload building process in the provided Engine, with the given attributes.
// The severity of the error is distinguished to determine whether the same payload attributes may be re-attempted later. // The severity of the error is distinguished to determine whether the same payload attributes may be re-attempted later.
func StartPayload(ctx context.Context, eng Engine, fc eth.ForkchoiceState, attrs *eth.PayloadAttributes) (id eth.PayloadID, errType BlockInsertionErrType, err error) { func StartPayload(ctx context.Context, eng Engine, fc eth.ForkchoiceState, attrs *eth.PayloadAttributes) (id eth.PayloadID, errType BlockInsertionErrType, err error) {
......
...@@ -31,6 +31,8 @@ type ResetableStage interface { ...@@ -31,6 +31,8 @@ type ResetableStage interface {
} }
type EngineQueueStage interface { type EngineQueueStage interface {
EngineControl
FinalizedL1() eth.L1BlockRef FinalizedL1() eth.L1BlockRef
Finalized() eth.L2BlockRef Finalized() eth.L2BlockRef
UnsafeL2Head() eth.L2BlockRef UnsafeL2Head() eth.L2BlockRef
...@@ -130,8 +132,20 @@ func (dp *DerivationPipeline) UnsafeL2Head() eth.L2BlockRef { ...@@ -130,8 +132,20 @@ func (dp *DerivationPipeline) UnsafeL2Head() eth.L2BlockRef {
return dp.eng.UnsafeL2Head() return dp.eng.UnsafeL2Head()
} }
func (dp *DerivationPipeline) SetUnsafeHead(head eth.L2BlockRef) { func (dp *DerivationPipeline) StartPayload(ctx context.Context, parent eth.L2BlockRef, attrs *eth.PayloadAttributes, updateSafe bool) (errType BlockInsertionErrType, err error) {
dp.eng.SetUnsafeHead(head) return dp.eng.StartPayload(ctx, parent, attrs, updateSafe)
}
func (dp *DerivationPipeline) ConfirmPayload(ctx context.Context) (out *eth.ExecutionPayload, errTyp BlockInsertionErrType, err error) {
return dp.eng.ConfirmPayload(ctx)
}
func (dp *DerivationPipeline) CancelPayload(ctx context.Context, force bool) error {
return dp.eng.CancelPayload(ctx, force)
}
func (dp *DerivationPipeline) BuildingPayload() (onto eth.L2BlockRef, id eth.PayloadID, safe bool) {
return dp.eng.BuildingPayload()
} }
// AddUnsafePayload schedules an execution payload to be processed, ahead of deriving it from L1 // AddUnsafePayload schedules an execution payload to be processed, ahead of deriving it from L1
......
...@@ -14,7 +14,6 @@ import ( ...@@ -14,7 +14,6 @@ import (
type Metrics interface { type Metrics interface {
RecordPipelineReset() RecordPipelineReset()
RecordSequencingError()
RecordPublishingError() RecordPublishingError()
RecordDerivationError() RecordDerivationError()
...@@ -28,9 +27,8 @@ type Metrics interface { ...@@ -28,9 +27,8 @@ type Metrics interface {
SetDerivationIdle(idle bool) SetDerivationIdle(idle bool)
RecordL1ReorgDepth(d uint64) RecordL1ReorgDepth(d uint64)
CountSequencedTxs(count int)
SequencerMetrics EngineMetrics
} }
type L1Chain interface { type L1Chain interface {
...@@ -48,7 +46,6 @@ type L2Chain interface { ...@@ -48,7 +46,6 @@ type L2Chain interface {
type DerivationPipeline interface { type DerivationPipeline interface {
Reset() Reset()
Step(ctx context.Context) error Step(ctx context.Context) error
SetUnsafeHead(head eth.L2BlockRef)
AddUnsafePayload(payload *eth.ExecutionPayload) AddUnsafePayload(payload *eth.ExecutionPayload)
Finalize(ref eth.L1BlockRef) Finalize(ref eth.L1BlockRef)
FinalizedL1() eth.L1BlockRef FinalizedL1() eth.L1BlockRef
...@@ -68,14 +65,12 @@ type L1StateIface interface { ...@@ -68,14 +65,12 @@ type L1StateIface interface {
L1Finalized() eth.L1BlockRef L1Finalized() eth.L1BlockRef
} }
type L1OriginSelectorIface interface {
FindL1Origin(ctx context.Context, l1Head eth.L1BlockRef, l2Head eth.L2BlockRef) (eth.L1BlockRef, error)
}
type SequencerIface interface { type SequencerIface interface {
StartBuildingBlock(ctx context.Context, l1Origin eth.L1BlockRef) error StartBuildingBlock(ctx context.Context, l1Head eth.L1BlockRef) error
CompleteBuildingBlock(ctx context.Context) (*eth.ExecutionPayload, error) CompleteBuildingBlock(ctx context.Context) (*eth.ExecutionPayload, error)
PlanNextSequencerAction(sequenceErr error) (delay time.Duration, seal bool, onto eth.BlockID) PlanNextSequencerAction() time.Duration
RunNextSequencerAction(ctx context.Context, l1Head eth.L1BlockRef) *eth.ExecutionPayload
BuildingOnto() eth.L2BlockRef
} }
type Network interface { type Network interface {
...@@ -90,7 +85,10 @@ func NewDriver(driverCfg *Config, cfg *rollup.Config, l2 L2Chain, l1 L1Chain, ne ...@@ -90,7 +85,10 @@ func NewDriver(driverCfg *Config, cfg *rollup.Config, l2 L2Chain, l1 L1Chain, ne
verifConfDepth := NewConfDepth(driverCfg.VerifierConfDepth, l1State.L1Head, l1) verifConfDepth := NewConfDepth(driverCfg.VerifierConfDepth, l1State.L1Head, l1)
derivationPipeline := derive.NewDerivationPipeline(log, cfg, verifConfDepth, l2, metrics) derivationPipeline := derive.NewDerivationPipeline(log, cfg, verifConfDepth, l2, metrics)
attrBuilder := derive.NewFetchingAttributesBuilder(cfg, l1, l2) attrBuilder := derive.NewFetchingAttributesBuilder(cfg, l1, l2)
sequencer := NewSequencer(log, cfg, l2, derivationPipeline, attrBuilder, metrics) engine := derivationPipeline
meteredEngine := NewMeteredEngine(cfg, engine, metrics, log)
sequencer := NewSequencer(log, cfg, meteredEngine, attrBuilder, findL1Origin)
return &Driver{ return &Driver{
l1State: l1State, l1State: l1State,
derivation: derivationPipeline, derivation: derivationPipeline,
...@@ -106,7 +104,6 @@ func NewDriver(driverCfg *Config, cfg *rollup.Config, l2 L2Chain, l1 L1Chain, ne ...@@ -106,7 +104,6 @@ func NewDriver(driverCfg *Config, cfg *rollup.Config, l2 L2Chain, l1 L1Chain, ne
snapshotLog: snapshotLog, snapshotLog: snapshotLog,
l1: l1, l1: l1,
l2: l2, l2: l2,
l1OriginSelector: findL1Origin,
sequencer: sequencer, sequencer: sequencer,
network: network, network: network,
metrics: metrics, metrics: metrics,
......
package driver
import (
"context"
"time"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
)
type EngineMetrics interface {
RecordSequencingError()
CountSequencedTxs(count int)
RecordSequencerBuildingDiffTime(duration time.Duration)
RecordSequencerSealingTime(duration time.Duration)
}
// MeteredEngine wraps an EngineControl and adds metrics such as block building time diff and sealing time
type MeteredEngine struct {
inner derive.EngineControl
cfg *rollup.Config
metrics EngineMetrics
log log.Logger
buildingStartTime time.Time
}
// MeteredEngine implements derive.EngineControl
var _ derive.EngineControl = (*MeteredEngine)(nil)
func NewMeteredEngine(cfg *rollup.Config, inner derive.EngineControl, metrics EngineMetrics, log log.Logger) *MeteredEngine {
return &MeteredEngine{
inner: inner,
cfg: cfg,
metrics: metrics,
log: log,
}
}
func (m *MeteredEngine) Finalized() eth.L2BlockRef {
return m.inner.Finalized()
}
func (m *MeteredEngine) UnsafeL2Head() eth.L2BlockRef {
return m.inner.UnsafeL2Head()
}
func (m *MeteredEngine) SafeL2Head() eth.L2BlockRef {
return m.inner.SafeL2Head()
}
func (m *MeteredEngine) StartPayload(ctx context.Context, parent eth.L2BlockRef, attrs *eth.PayloadAttributes, updateSafe bool) (errType derive.BlockInsertionErrType, err error) {
m.buildingStartTime = time.Now()
errType, err = m.inner.StartPayload(ctx, parent, attrs, updateSafe)
if err != nil {
m.metrics.RecordSequencingError()
}
return errType, err
}
func (m *MeteredEngine) ConfirmPayload(ctx context.Context) (out *eth.ExecutionPayload, errTyp derive.BlockInsertionErrType, err error) {
sealingStart := time.Now()
// Actually execute the block and add it to the head of the chain.
payload, errType, err := m.inner.ConfirmPayload(ctx)
if err != nil {
m.metrics.RecordSequencingError()
return payload, errType, err
}
now := time.Now()
sealTime := now.Sub(sealingStart)
buildTime := now.Sub(m.buildingStartTime)
m.metrics.RecordSequencerSealingTime(sealTime)
m.metrics.RecordSequencerBuildingDiffTime(buildTime - time.Duration(m.cfg.BlockTime)*time.Second)
m.metrics.CountSequencedTxs(len(payload.Transactions))
ref := m.inner.UnsafeL2Head()
m.log.Debug("Processed new L2 block", "l2_unsafe", ref, "l1_origin", ref.L1Origin,
"txs", len(payload.Transactions), "time", ref.Time, "seal_time", sealTime, "build_time", buildTime)
return payload, errType, err
}
func (m *MeteredEngine) CancelPayload(ctx context.Context, force bool) error {
return m.inner.CancelPayload(ctx, force)
}
func (m *MeteredEngine) BuildingPayload() (onto eth.L2BlockRef, id eth.PayloadID, safe bool) {
return m.inner.BuildingPayload()
}
This diff is collapsed.
This diff is collapsed.
...@@ -25,8 +25,6 @@ type SyncStatus = eth.SyncStatus ...@@ -25,8 +25,6 @@ type SyncStatus = eth.SyncStatus
// sealingDuration defines the expected time it takes to seal the block // sealingDuration defines the expected time it takes to seal the block
const sealingDuration = time.Millisecond * 50 const sealingDuration = time.Millisecond * 50
var UninitializedL1StateErr = errors.New("the L1 Head in L1 State is not initialized yet")
type Driver struct { type Driver struct {
l1State L1StateIface l1State L1StateIface
...@@ -71,11 +69,10 @@ type Driver struct { ...@@ -71,11 +69,10 @@ type Driver struct {
// L2 Signals: // L2 Signals:
unsafeL2Payloads chan *eth.ExecutionPayload unsafeL2Payloads chan *eth.ExecutionPayload
l1 L1Chain l1 L1Chain
l2 L2Chain l2 L2Chain
l1OriginSelector L1OriginSelectorIface sequencer SequencerIface
sequencer SequencerIface network Network // may be nil, network for is optional
network Network // may be nil, network for is optional
metrics Metrics metrics Metrics
log log.Logger log log.Logger
...@@ -142,75 +139,6 @@ func (s *Driver) OnUnsafeL2Payload(ctx context.Context, payload *eth.ExecutionPa ...@@ -142,75 +139,6 @@ func (s *Driver) OnUnsafeL2Payload(ctx context.Context, payload *eth.ExecutionPa
} }
} }
// startNewL2Block starts sequencing a new L2 block on top of the unsafe L2 Head.
func (s *Driver) startNewL2Block(ctx context.Context) error {
l2Head := s.derivation.UnsafeL2Head()
l1Head := s.l1State.L1Head()
if l1Head == (eth.L1BlockRef{}) {
return UninitializedL1StateErr
}
// Figure out which L1 origin block we're going to be building on top of.
l1Origin, err := s.l1OriginSelector.FindL1Origin(ctx, l1Head, l2Head)
if err != nil {
s.log.Error("Error finding next L1 Origin", "err", err)
return err
}
// Rollup is configured to not start producing blocks until a specific L1 block has been
// reached. Don't produce any blocks until we're at that genesis block.
if l1Origin.Number < s.config.Genesis.L1.Number {
s.log.Info("Skipping block production because the next L1 Origin is behind the L1 genesis", "next", l1Origin.ID(), "genesis", s.config.Genesis.L1)
return fmt.Errorf("the L1 origin %s cannot be before genesis at %s", l1Origin, s.config.Genesis.L1)
}
// Should never happen. Sequencer will halt if we get into this situation somehow.
nextL2Time := l2Head.Time + s.config.BlockTime
if nextL2Time < l1Origin.Time {
s.log.Error("Cannot build L2 block for time before L1 origin",
"l2Unsafe", l2Head, "nextL2Time", nextL2Time, "l1Origin", l1Origin, "l1OriginTime", l1Origin.Time)
return fmt.Errorf("cannot build L2 block on top %s for time %d before L1 origin %s at time %d",
l2Head, nextL2Time, l1Origin, l1Origin.Time)
}
// Start creating the new block.
return s.sequencer.StartBuildingBlock(ctx, l1Origin)
}
// completeNewBlock completes a previously started L2 block sequencing job.
func (s *Driver) completeNewBlock(ctx context.Context) error {
payload, err := s.sequencer.CompleteBuildingBlock(ctx)
if err != nil {
s.metrics.RecordSequencingError()
s.log.Error("Failed to seal block as sequencer", "err", err)
return err
}
// Generate an L2 block ref from the payload.
newUnsafeL2Head, err := derive.PayloadToBlockRef(payload, &s.config.Genesis)
if err != nil {
s.metrics.RecordSequencingError()
s.log.Error("Sequenced payload cannot be transformed into valid L2 block reference", "err", err)
return fmt.Errorf("sequenced payload cannot be transformed into valid L2 block reference: %w", err)
}
// Update our L2 head block based on the new unsafe block we just generated.
s.derivation.SetUnsafeHead(newUnsafeL2Head)
s.log.Info("Sequenced new l2 block", "l2_unsafe", newUnsafeL2Head, "l1_origin", newUnsafeL2Head.L1Origin, "txs", len(payload.Transactions), "time", newUnsafeL2Head.Time)
s.metrics.CountSequencedTxs(len(payload.Transactions))
if s.network != nil {
if err := s.network.PublishL2Payload(ctx, payload); err != nil {
s.log.Warn("failed to publish newly created block", "id", payload.ID(), "err", err)
s.metrics.RecordPublishingError()
// publishing of unsafe data via p2p is optional. Errors are not severe enough to change/halt sequencing but should be logged and metered.
}
}
return nil
}
// the eventLoop responds to L1 changes and internal timers to produce L2 blocks. // the eventLoop responds to L1 changes and internal timers to produce L2 blocks.
func (s *Driver) eventLoop() { func (s *Driver) eventLoop() {
defer s.wg.Done() defer s.wg.Done()
...@@ -259,34 +187,23 @@ func (s *Driver) eventLoop() { ...@@ -259,34 +187,23 @@ func (s *Driver) eventLoop() {
// L1 chain that we need to handle. // L1 chain that we need to handle.
reqStep() reqStep()
blockTime := time.Duration(s.config.BlockTime) * time.Second
var sequenceErr error
var sequenceErrTime time.Time
sequencerTimer := time.NewTimer(0) sequencerTimer := time.NewTimer(0)
var sequencerCh <-chan time.Time var sequencerCh <-chan time.Time
var sequencingPlannedOnto eth.BlockID
var sequencerSealNext bool
planSequencerAction := func() { planSequencerAction := func() {
delay, seal, onto := s.sequencer.PlanNextSequencerAction(sequenceErr) delay := s.sequencer.PlanNextSequencerAction()
if sequenceErr != nil && time.Since(sequenceErrTime) > delay {
sequenceErr = nil
}
sequencerCh = sequencerTimer.C sequencerCh = sequencerTimer.C
if len(sequencerCh) > 0 { // empty if not already drained before resetting if len(sequencerCh) > 0 { // empty if not already drained before resetting
<-sequencerCh <-sequencerCh
} }
sequencerTimer.Reset(delay) sequencerTimer.Reset(delay)
sequencingPlannedOnto = onto
sequencerSealNext = seal
} }
for { for {
// If we are sequencing, update the trigger for the next sequencer action. // If we are sequencing, and the L1 state is ready, update the trigger for the next sequencer action.
// This may adjust at any time based on fork-choice changes or previous errors. // This may adjust at any time based on fork-choice changes or previous errors.
if s.driverConfig.SequencerEnabled && !s.driverConfig.SequencerStopped { if s.driverConfig.SequencerEnabled && !s.driverConfig.SequencerStopped && s.l1State.L1Head() != (eth.L1BlockRef{}) {
// update sequencer time if the head changed // update sequencer time if the head changed
if sequencingPlannedOnto != s.derivation.UnsafeL2Head().ID() { if s.sequencer.BuildingOnto().ID() != s.derivation.UnsafeL2Head().ID() {
planSequencerAction() planSequencerAction()
} }
} else { } else {
...@@ -295,22 +212,14 @@ func (s *Driver) eventLoop() { ...@@ -295,22 +212,14 @@ func (s *Driver) eventLoop() {
select { select {
case <-sequencerCh: case <-sequencerCh:
s.log.Info("sequencing now!", "seal", sequencerSealNext, "idle_derivation", s.idleDerivation) payload := s.sequencer.RunNextSequencerAction(ctx, s.l1State.L1Head())
if sequencerSealNext { if s.network != nil && payload != nil {
// try to seal the current block task, and allow it to take up to 3 block times. // Publishing of unsafe data via p2p is optional.
// If this fails we will simply start a new block building job. // Errors are not severe enough to change/halt sequencing but should be logged and metered.
ctx, cancel := context.WithTimeout(ctx, 3*blockTime) if err := s.network.PublishL2Payload(ctx, payload); err != nil {
sequenceErr = s.completeNewBlock(ctx) s.log.Warn("failed to publish newly created block", "id", payload.ID(), "err", err)
cancel() s.metrics.RecordPublishingError()
} else { }
// Start the block building, don't allow the starting of sequencing to get stuck for more the time of 1 block.
ctx, cancel := context.WithTimeout(ctx, blockTime)
sequenceErr = s.startNewL2Block(ctx)
cancel()
}
if sequenceErr != nil {
s.log.Error("sequencing error", "err", sequenceErr)
sequenceErrTime = time.Now()
} }
planSequencerAction() // schedule the next sequencer action to keep the sequencing looping planSequencerAction() // schedule the next sequencer action to keep the sequencing looping
case payload := <-s.unsafeL2Payloads: case payload := <-s.unsafeL2Payloads:
...@@ -386,8 +295,8 @@ func (s *Driver) eventLoop() { ...@@ -386,8 +295,8 @@ func (s *Driver) eventLoop() {
} else { } else {
s.log.Info("Sequencer has been started") s.log.Info("Sequencer has been started")
s.driverConfig.SequencerStopped = false s.driverConfig.SequencerStopped = false
sequencingPlannedOnto = eth.BlockID{}
close(resp.err) close(resp.err)
planSequencerAction() // resume sequencing
} }
case respCh := <-s.stopSequencer: case respCh := <-s.stopSequencer:
if s.driverConfig.SequencerStopped { if s.driverConfig.SequencerStopped {
......
// On develop
package driver
import (
"context"
"errors"
"math/big"
"math/rand"
"testing"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/metrics"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-node/testutils"
)
type TestDummyOutputImpl struct {
willError bool
cfg *rollup.Config
l1Origin eth.L1BlockRef
l2Head eth.L2BlockRef
}
func (d *TestDummyOutputImpl) PlanNextSequencerAction(sequenceErr error) (delay time.Duration, seal bool, onto eth.BlockID) {
return 0, d.l1Origin != (eth.L1BlockRef{}), d.l2Head.ParentID()
}
func (d *TestDummyOutputImpl) StartBuildingBlock(ctx context.Context, l1Origin eth.L1BlockRef) error {
d.l1Origin = l1Origin
return nil
}
func (d *TestDummyOutputImpl) CompleteBuildingBlock(ctx context.Context) (*eth.ExecutionPayload, error) {
// If we're meant to error, return one
if d.willError {
return nil, errors.New("the TestDummyOutputImpl.createNewBlock operation failed")
}
info := &testutils.MockBlockInfo{
InfoHash: d.l1Origin.Hash,
InfoParentHash: d.l1Origin.ParentHash,
InfoCoinbase: common.Address{},
InfoRoot: common.Hash{},
InfoNum: d.l1Origin.Number,
InfoTime: d.l1Origin.Time,
InfoMixDigest: [32]byte{},
InfoBaseFee: big.NewInt(123),
InfoReceiptRoot: common.Hash{},
}
infoTx, err := derive.L1InfoDepositBytes(d.l2Head.SequenceNumber, info, eth.SystemConfig{})
if err != nil {
panic(err)
}
payload := eth.ExecutionPayload{
ParentHash: d.l2Head.Hash,
FeeRecipient: common.Address{},
StateRoot: eth.Bytes32{},
ReceiptsRoot: eth.Bytes32{},
LogsBloom: eth.Bytes256{},
PrevRandao: eth.Bytes32{},
BlockNumber: eth.Uint64Quantity(d.l2Head.Number + 1),
GasLimit: 0,
GasUsed: 0,
Timestamp: eth.Uint64Quantity(d.l2Head.Time + d.cfg.BlockTime),
ExtraData: nil,
BaseFeePerGas: eth.Uint256Quantity{},
BlockHash: common.Hash{123},
Transactions: []eth.Data{infoTx},
}
return &payload, nil
}
var _ SequencerIface = (*TestDummyOutputImpl)(nil)
type TestDummyDerivationPipeline struct {
DerivationPipeline
l2Head eth.L2BlockRef
l2SafeHead eth.L2BlockRef
l2Finalized eth.L2BlockRef
}
func (d TestDummyDerivationPipeline) Reset() {}
func (d TestDummyDerivationPipeline) Step(ctx context.Context) error { return nil }
func (d TestDummyDerivationPipeline) SetUnsafeHead(head eth.L2BlockRef) {}
func (d TestDummyDerivationPipeline) AddUnsafePayload(payload *eth.ExecutionPayload) {}
func (d TestDummyDerivationPipeline) Finalized() eth.L2BlockRef { return d.l2Head }
func (d TestDummyDerivationPipeline) SafeL2Head() eth.L2BlockRef { return d.l2SafeHead }
func (d TestDummyDerivationPipeline) UnsafeL2Head() eth.L2BlockRef { return d.l2Finalized }
type TestDummyL1OriginSelector struct {
retval eth.L1BlockRef
}
func (l TestDummyL1OriginSelector) FindL1Origin(ctx context.Context, l1Head eth.L1BlockRef, l2Head eth.L2BlockRef) (eth.L1BlockRef, error) {
return l.retval, nil
}
// TestRejectCreateBlockBadTimestamp tests that a block creation with invalid timestamps will be caught.
// This does not test:
// - The findL1Origin call (it is hardcoded to be the head)
// - The outputInterface used to create a new block from a given payload.
// - The DerivationPipeline setting unsafe head (a mock provider is used to pretend to set it)
// - Metrics (only mocked enough to let the method proceed)
// - Publishing (network is set to nil so publishing won't occur)
func TestRejectCreateBlockBadTimestamp(t *testing.T) {
// Create our random provider
rng := rand.New(rand.NewSource(rand.Int63()))
// Create our context for methods to execute under
ctx := context.Background()
// Create our fake L1/L2 heads and link them accordingly
l1HeadRef := testutils.RandomBlockRef(rng)
l2HeadRef := testutils.RandomL2BlockRef(rng)
l2l1OriginBlock := l1HeadRef
l2HeadRef.L1Origin = l2l1OriginBlock.ID()
// Create a rollup config
cfg := rollup.Config{
BlockTime: uint64(60),
Genesis: rollup.Genesis{
L1: l1HeadRef.ID(),
L2: l2HeadRef.ID(),
L2Time: 0x7000, // dummy value
},
}
// Patch our timestamp so we fail
l2HeadRef.Time = l2l1OriginBlock.Time - (cfg.BlockTime * 2)
// Create our outputter
outputProvider := &TestDummyOutputImpl{cfg: &cfg, l2Head: l2HeadRef, willError: false}
// Create our state
s := Driver{
l1State: &L1State{
l1Head: l1HeadRef,
log: log.New(),
metrics: metrics.NoopMetrics,
},
log: log.New(),
l1OriginSelector: TestDummyL1OriginSelector{retval: l1HeadRef},
config: &cfg,
sequencer: outputProvider,
derivation: TestDummyDerivationPipeline{},
metrics: metrics.NoopMetrics,
}
// Create a new block
// - L2Head's L1Origin, its timestamp should be greater than L1 genesis.
// - L2Head timestamp + BlockTime should be greater than or equal to the L1 Time.
err := s.startNewL2Block(ctx)
if err == nil {
err = s.completeNewBlock(ctx)
}
// Verify the L1Origin's block number is greater than L1 genesis in our config.
if l2l1OriginBlock.Number < s.config.Genesis.L1.Number {
require.NoError(t, err, "L1Origin block number should be greater than the L1 genesis block number")
}
// Verify the new L2 block to create will have a time stamp equal or newer than our L1 origin block we derive from.
if l2HeadRef.Time+cfg.BlockTime < l2l1OriginBlock.Time {
// If not, we expect a specific error.
// TODO: This isn't the cleanest, we should construct + compare the whole error message.
require.NotNil(t, err)
require.Contains(t, err.Error(), "cannot build L2 block on top")
require.Contains(t, err.Error(), "for time")
require.Contains(t, err.Error(), "before L1 origin")
return
}
// If we expected the outputter to error, capture that here
if outputProvider.willError {
require.NotNil(t, err, "outputInterface failed to createNewBlock, so createNewL2Block should also have failed")
return
}
// Otherwise we should have no error.
require.NoError(t, err, "error raised in TestRejectCreateBlockBadTimestamp")
}
// FuzzRejectCreateBlockBadTimestamp is a property test derived from the TestRejectCreateBlockBadTimestamp unit test.
// It fuzzes timestamps and block times to find a configuration to violate error checking.
func FuzzRejectCreateBlockBadTimestamp(f *testing.F) {
f.Fuzz(func(t *testing.T, randSeed int64, l2Time uint64, blockTime uint64, forceOutputFail bool, currentL2HeadTime uint64) {
// Create our random provider
rng := rand.New(rand.NewSource(randSeed))
// Create our context for methods to execute under
ctx := context.Background()
// Create our fake L1/L2 heads and link them accordingly
l1HeadRef := testutils.RandomBlockRef(rng)
l2HeadRef := testutils.RandomL2BlockRef(rng)
l2l1OriginBlock := l1HeadRef
l2HeadRef.L1Origin = l2l1OriginBlock.ID()
// TODO: Cap our block time so it doesn't overflow
if blockTime > 0x100000 {
blockTime = 0x100000
}
// Create a rollup config
cfg := rollup.Config{
BlockTime: blockTime,
Genesis: rollup.Genesis{
L1: l1HeadRef.ID(),
L2: l2HeadRef.ID(),
L2Time: l2Time, // dummy value
},
}
// Patch our timestamp so we fail
l2HeadRef.Time = currentL2HeadTime
// Create our outputter
outputProvider := &TestDummyOutputImpl{cfg: &cfg, l2Head: l2HeadRef, willError: forceOutputFail}
// Create our state
s := Driver{
l1State: &L1State{
l1Head: l1HeadRef,
log: log.New(),
metrics: metrics.NoopMetrics,
},
log: log.New(),
l1OriginSelector: TestDummyL1OriginSelector{retval: l1HeadRef},
config: &cfg,
sequencer: outputProvider,
derivation: TestDummyDerivationPipeline{},
metrics: metrics.NoopMetrics,
}
// Create a new block
// - L2Head's L1Origin, its timestamp should be greater than L1 genesis.
// - L2Head timestamp + BlockTime should be greater than or equal to the L1 Time.
err := s.startNewL2Block(ctx)
if err == nil {
err = s.completeNewBlock(ctx)
}
// Verify the L1Origin's timestamp is greater than L1 genesis in our config.
if l2l1OriginBlock.Number < s.config.Genesis.L1.Number {
require.NoError(t, err)
return
}
// Verify the new L2 block to create will have a time stamp equal or newer than our L1 origin block we derive from.
if l2HeadRef.Time+cfg.BlockTime < l2l1OriginBlock.Time {
// If not, we expect a specific error.
// TODO: This isn't the cleanest, we should construct + compare the whole error message.
require.NotNil(t, err)
require.Contains(t, err.Error(), "cannot build L2 block on top")
require.Contains(t, err.Error(), "for time")
require.Contains(t, err.Error(), "before L1 origin")
return
}
// Otherwise we should have no error.
require.Nil(t, err)
// If we expected the outputter to error, capture that here
if outputProvider.willError {
require.NotNil(t, err, "outputInterface failed to createNewBlock, so createNewL2Block should also have failed")
return
}
// Otherwise we should have no error.
require.NoError(t, err, "L1Origin block number should be greater than the L1 genesis block number")
})
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment