Commit deec5a91 authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge branch 'develop' into fix/witness-file-parsing

parents 4731750d a13ff5f8
...@@ -12,6 +12,7 @@ import ( ...@@ -12,6 +12,7 @@ import (
gethrpc "github.com/ethereum/go-ethereum/rpc" gethrpc "github.com/ethereum/go-ethereum/rpc"
"github.com/urfave/cli" "github.com/urfave/cli"
"github.com/ethereum-optimism/optimism/op-batcher/flags"
"github.com/ethereum-optimism/optimism/op-batcher/metrics" "github.com/ethereum-optimism/optimism/op-batcher/metrics"
"github.com/ethereum-optimism/optimism/op-batcher/rpc" "github.com/ethereum-optimism/optimism/op-batcher/rpc"
oplog "github.com/ethereum-optimism/optimism/op-service/log" oplog "github.com/ethereum-optimism/optimism/op-service/log"
...@@ -30,6 +31,9 @@ const ( ...@@ -30,6 +31,9 @@ const (
// of a closure allows the parameters bound to the top-level main package, e.g. // of a closure allows the parameters bound to the top-level main package, e.g.
// GitVersion, to be captured and used once the function is executed. // GitVersion, to be captured and used once the function is executed.
func Main(version string, cliCtx *cli.Context) error { func Main(version string, cliCtx *cli.Context) error {
if err := flags.CheckRequired(cliCtx); err != nil {
return err
}
cfg := NewConfig(cliCtx) cfg := NewConfig(cliCtx)
if err := cfg.Check(); err != nil { if err := cfg.Check(); err != nil {
return fmt.Errorf("invalid CLI flags: %w", err) return fmt.Errorf("invalid CLI flags: %w", err)
......
...@@ -137,7 +137,7 @@ func newMiniL2BlockWithNumberParent(numTx int, number *big.Int, parent common.Ha ...@@ -137,7 +137,7 @@ func newMiniL2BlockWithNumberParent(numTx int, number *big.Int, parent common.Ha
Difficulty: common.Big0, Difficulty: common.Big0,
Number: big.NewInt(100), Number: big.NewInt(100),
}, nil, nil, nil, trie.NewStackTrie(nil)) }, nil, nil, nil, trie.NewStackTrie(nil))
l1InfoTx, err := derive.L1InfoDeposit(0, l1Block, eth.SystemConfig{}, false) l1InfoTx, err := derive.L1InfoDeposit(0, eth.BlockToInfo(l1Block), eth.SystemConfig{}, false)
if err != nil { if err != nil {
panic(err) panic(err)
} }
...@@ -517,7 +517,7 @@ func TestChannelBuilder_OutputFramesMaxFrameIndex(t *testing.T) { ...@@ -517,7 +517,7 @@ func TestChannelBuilder_OutputFramesMaxFrameIndex(t *testing.T) {
Difficulty: common.Big0, Difficulty: common.Big0,
Number: common.Big0, Number: common.Big0,
}, nil, nil, nil, trie.NewStackTrie(nil)) }, nil, nil, nil, trie.NewStackTrie(nil))
l1InfoTx, _ := derive.L1InfoDeposit(0, lBlock, eth.SystemConfig{}, false) l1InfoTx, _ := derive.L1InfoDeposit(0, eth.BlockToInfo(lBlock), eth.SystemConfig{}, false)
txs := []*types.Transaction{types.NewTx(l1InfoTx)} txs := []*types.Transaction{types.NewTx(l1InfoTx)}
a := types.NewBlock(&types.Header{ a := types.NewBlock(&types.Header{
Number: big.NewInt(0), Number: big.NewInt(0),
......
package doc
import (
"encoding/json"
"fmt"
"os"
"strings"
"github.com/ethereum-optimism/optimism/op-batcher/metrics"
"github.com/olekukonko/tablewriter"
"github.com/urfave/cli"
)
var Subcommands = cli.Commands{
{
Name: "metrics",
Usage: "Dumps a list of supported metrics to stdout",
Flags: []cli.Flag{
cli.StringFlag{
Name: "format",
Value: "markdown",
Usage: "Output format (json|markdown)",
},
},
Action: func(ctx *cli.Context) error {
m := metrics.NewMetrics("default")
supportedMetrics := m.Document()
format := ctx.String("format")
if format != "markdown" && format != "json" {
return fmt.Errorf("invalid format: %s", format)
}
if format == "json" {
enc := json.NewEncoder(os.Stdout)
return enc.Encode(supportedMetrics)
}
table := tablewriter.NewWriter(os.Stdout)
table.SetBorders(tablewriter.Border{Left: true, Top: false, Right: true, Bottom: false})
table.SetCenterSeparator("|")
table.SetAutoWrapText(false)
table.SetHeader([]string{"Metric", "Description", "Labels", "Type"})
var data [][]string
for _, metric := range supportedMetrics {
labels := strings.Join(metric.Labels, ",")
data = append(data, []string{metric.Name, metric.Help, labels, metric.Type})
}
table.AppendBulk(data)
table.Render()
return nil
},
},
}
...@@ -7,6 +7,7 @@ import ( ...@@ -7,6 +7,7 @@ import (
"github.com/urfave/cli" "github.com/urfave/cli"
"github.com/ethereum-optimism/optimism/op-batcher/batcher" "github.com/ethereum-optimism/optimism/op-batcher/batcher"
"github.com/ethereum-optimism/optimism/op-batcher/cmd/doc"
"github.com/ethereum-optimism/optimism/op-batcher/flags" "github.com/ethereum-optimism/optimism/op-batcher/flags"
oplog "github.com/ethereum-optimism/optimism/op-service/log" oplog "github.com/ethereum-optimism/optimism/op-service/log"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
...@@ -26,10 +27,15 @@ func main() { ...@@ -26,10 +27,15 @@ func main() {
app.Version = fmt.Sprintf("%s-%s-%s", Version, GitCommit, GitDate) app.Version = fmt.Sprintf("%s-%s-%s", Version, GitCommit, GitDate)
app.Name = "op-batcher" app.Name = "op-batcher"
app.Usage = "Batch Submitter Service" app.Usage = "Batch Submitter Service"
app.Description = "Service for generating and submitting L2 tx batches " + app.Description = "Service for generating and submitting L2 tx batches to L1"
"to L1"
app.Action = curryMain(Version) app.Action = curryMain(Version)
app.Commands = []cli.Command{
{
Name: "doc",
Subcommands: doc.Subcommands,
},
}
err := app.Run(os.Args) err := app.Run(os.Args)
if err != nil { if err != nil {
log.Crit("Application failed", "message", err) log.Crit("Application failed", "message", err)
......
package flags package flags
import ( import (
"fmt"
"github.com/urfave/cli" "github.com/urfave/cli"
"github.com/ethereum-optimism/optimism/op-batcher/rpc" "github.com/ethereum-optimism/optimism/op-batcher/rpc"
...@@ -17,37 +19,32 @@ const envVarPrefix = "OP_BATCHER" ...@@ -17,37 +19,32 @@ const envVarPrefix = "OP_BATCHER"
var ( var (
// Required flags // Required flags
L1EthRpcFlag = cli.StringFlag{ L1EthRpcFlag = cli.StringFlag{
Name: "l1-eth-rpc", Name: "l1-eth-rpc",
Usage: "HTTP provider URL for L1", Usage: "HTTP provider URL for L1",
Required: true, EnvVar: opservice.PrefixEnvVar(envVarPrefix, "L1_ETH_RPC"),
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "L1_ETH_RPC"),
} }
L2EthRpcFlag = cli.StringFlag{ L2EthRpcFlag = cli.StringFlag{
Name: "l2-eth-rpc", Name: "l2-eth-rpc",
Usage: "HTTP provider URL for L2 execution engine", Usage: "HTTP provider URL for L2 execution engine",
Required: true, EnvVar: opservice.PrefixEnvVar(envVarPrefix, "L2_ETH_RPC"),
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "L2_ETH_RPC"),
} }
RollupRpcFlag = cli.StringFlag{ RollupRpcFlag = cli.StringFlag{
Name: "rollup-rpc", Name: "rollup-rpc",
Usage: "HTTP provider URL for Rollup node", Usage: "HTTP provider URL for Rollup node",
Required: true, EnvVar: opservice.PrefixEnvVar(envVarPrefix, "ROLLUP_RPC"),
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "ROLLUP_RPC"),
} }
SubSafetyMarginFlag = cli.Uint64Flag{ SubSafetyMarginFlag = cli.Uint64Flag{
Name: "sub-safety-margin", Name: "sub-safety-margin",
Usage: "The batcher tx submission safety margin (in #L1-blocks) to subtract " + Usage: "The batcher tx submission safety margin (in #L1-blocks) to subtract " +
"from a channel's timeout and sequencing window, to guarantee safe inclusion " + "from a channel's timeout and sequencing window, to guarantee safe inclusion " +
"of a channel on L1.", "of a channel on L1.",
Required: true, EnvVar: opservice.PrefixEnvVar(envVarPrefix, "SUB_SAFETY_MARGIN"),
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "SUB_SAFETY_MARGIN"),
} }
PollIntervalFlag = cli.DurationFlag{ PollIntervalFlag = cli.DurationFlag{
Name: "poll-interval", Name: "poll-interval",
Usage: "Delay between querying L2 for more transactions and " + Usage: "Delay between querying L2 for more transactions and " +
"creating a new batch", "creating a new batch",
Required: true, EnvVar: opservice.PrefixEnvVar(envVarPrefix, "POLL_INTERVAL"),
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "POLL_INTERVAL"),
} }
// Optional flags // Optional flags
...@@ -108,8 +105,7 @@ var optionalFlags = []cli.Flag{ ...@@ -108,8 +105,7 @@ var optionalFlags = []cli.Flag{
} }
func init() { func init() {
requiredFlags = append(requiredFlags, oprpc.CLIFlags(envVarPrefix)...) optionalFlags = append(optionalFlags, oprpc.CLIFlags(envVarPrefix)...)
optionalFlags = append(optionalFlags, oplog.CLIFlags(envVarPrefix)...) optionalFlags = append(optionalFlags, oplog.CLIFlags(envVarPrefix)...)
optionalFlags = append(optionalFlags, opmetrics.CLIFlags(envVarPrefix)...) optionalFlags = append(optionalFlags, opmetrics.CLIFlags(envVarPrefix)...)
optionalFlags = append(optionalFlags, oppprof.CLIFlags(envVarPrefix)...) optionalFlags = append(optionalFlags, oppprof.CLIFlags(envVarPrefix)...)
...@@ -121,3 +117,12 @@ func init() { ...@@ -121,3 +117,12 @@ func init() {
// Flags contains the list of configuration options available to the binary. // Flags contains the list of configuration options available to the binary.
var Flags []cli.Flag var Flags []cli.Flag
func CheckRequired(ctx *cli.Context) error {
for _, f := range requiredFlags {
if !ctx.GlobalIsSet(f.GetName()) {
return fmt.Errorf("flag %s is required", f.GetName())
}
}
return nil
}
...@@ -103,6 +103,10 @@ func main() { ...@@ -103,6 +103,10 @@ func main() {
Name: "evm-messages", Name: "evm-messages",
Usage: "Path to evm-messages.json", Usage: "Path to evm-messages.json",
}, },
&cli.StringFlag{
Name: "witness-file",
Usage: "Path to l2geth witness file",
},
&cli.StringFlag{ &cli.StringFlag{
Name: "private-key", Name: "private-key",
Usage: "Key to sign transactions with", Usage: "Key to sign transactions with",
...@@ -702,8 +706,9 @@ func newContracts(ctx *cli.Context, l1Backend, l2Backend bind.ContractBackend) ( ...@@ -702,8 +706,9 @@ func newContracts(ctx *cli.Context, l1Backend, l2Backend bind.ContractBackend) (
func newWithdrawals(ctx *cli.Context, l1ChainID *big.Int) ([]*crossdomain.LegacyWithdrawal, error) { func newWithdrawals(ctx *cli.Context, l1ChainID *big.Int) ([]*crossdomain.LegacyWithdrawal, error) {
ovmMsgs := ctx.String("ovm-messages") ovmMsgs := ctx.String("ovm-messages")
evmMsgs := ctx.String("evm-messages") evmMsgs := ctx.String("evm-messages")
witnessFile := ctx.String("witness-file")
log.Debug("Migration data", "ovm-path", ovmMsgs, "evm-messages", evmMsgs) log.Debug("Migration data", "ovm-path", ovmMsgs, "evm-messages", evmMsgs, "witness-file", witnessFile)
ovmMessages, err := crossdomain.NewSentMessageFromJSON(ovmMsgs) ovmMessages, err := crossdomain.NewSentMessageFromJSON(ovmMsgs)
if err != nil { if err != nil {
return nil, err return nil, err
...@@ -716,9 +721,19 @@ func newWithdrawals(ctx *cli.Context, l1ChainID *big.Int) ([]*crossdomain.Legacy ...@@ -716,9 +721,19 @@ func newWithdrawals(ctx *cli.Context, l1ChainID *big.Int) ([]*crossdomain.Legacy
ovmMessages = []*crossdomain.SentMessage{} ovmMessages = []*crossdomain.SentMessage{}
} }
evmMessages, err := crossdomain.NewSentMessageFromJSON(evmMsgs) var evmMessages []*crossdomain.SentMessage
if err != nil { if witnessFile != "" {
return nil, err evmMessages, _, err = crossdomain.ReadWitnessData(witnessFile)
if err != nil {
return nil, err
}
} else if evmMsgs != "" {
evmMessages, err = crossdomain.NewSentMessageFromJSON(evmMsgs)
if err != nil {
return nil, err
}
} else {
return nil, errors.New("must provide either witness file or evm messages")
} }
migrationData := crossdomain.MigrationData{ migrationData := crossdomain.MigrationData{
......
...@@ -100,7 +100,7 @@ func NewOpGeth(t *testing.T, ctx context.Context, cfg *SystemConfig) (*OpGeth, e ...@@ -100,7 +100,7 @@ func NewOpGeth(t *testing.T, ctx context.Context, cfg *SystemConfig) (*OpGeth, e
SystemConfig: rollupGenesis.SystemConfig, SystemConfig: rollupGenesis.SystemConfig,
L1ChainConfig: l1Genesis.Config, L1ChainConfig: l1Genesis.Config,
L2ChainConfig: l2Genesis.Config, L2ChainConfig: l2Genesis.Config,
L1Head: l1Block, L1Head: eth.BlockToInfo(l1Block),
L2Head: genesisPayload, L2Head: genesisPayload,
}, nil }, nil
} }
......
...@@ -5,10 +5,9 @@ import ( ...@@ -5,10 +5,9 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
) )
var _ BlockInfo = (&types.Block{})
type BlockInfo interface { type BlockInfo interface {
Hash() common.Hash Hash() common.Hash
ParentHash() common.Hash ParentHash() common.Hash
...@@ -21,6 +20,10 @@ type BlockInfo interface { ...@@ -21,6 +20,10 @@ type BlockInfo interface {
BaseFee() *big.Int BaseFee() *big.Int
ReceiptHash() common.Hash ReceiptHash() common.Hash
GasUsed() uint64 GasUsed() uint64
// HeaderRLP returns the RLP of the block header as per consensus rules
// Returns an error if the header RLP could not be written
HeaderRLP() ([]byte, error)
} }
func InfoToL1BlockRef(info BlockInfo) L1BlockRef { func InfoToL1BlockRef(info BlockInfo) L1BlockRef {
...@@ -44,6 +47,19 @@ func ToBlockID(b NumberAndHash) BlockID { ...@@ -44,6 +47,19 @@ func ToBlockID(b NumberAndHash) BlockID {
} }
} }
// blockInfo is a conversion type of types.Block turning it into a BlockInfo
type blockInfo struct{ *types.Block }
func (b blockInfo) HeaderRLP() ([]byte, error) {
return rlp.EncodeToBytes(b.Header())
}
func BlockToInfo(b *types.Block) BlockInfo {
return blockInfo{b}
}
var _ BlockInfo = (*blockInfo)(nil)
// headerBlockInfo is a conversion type of types.Header turning it into a // headerBlockInfo is a conversion type of types.Header turning it into a
// BlockInfo. // BlockInfo.
type headerBlockInfo struct{ *types.Header } type headerBlockInfo struct{ *types.Header }
...@@ -84,6 +100,10 @@ func (h headerBlockInfo) GasUsed() uint64 { ...@@ -84,6 +100,10 @@ func (h headerBlockInfo) GasUsed() uint64 {
return h.Header.GasUsed return h.Header.GasUsed
} }
func (h headerBlockInfo) HeaderRLP() ([]byte, error) {
return rlp.EncodeToBytes(h.Header)
}
// HeaderBlockInfo returns h as a BlockInfo implementation. // HeaderBlockInfo returns h as a BlockInfo implementation.
func HeaderBlockInfo(h *types.Header) BlockInfo { func HeaderBlockInfo(h *types.Header) BlockInfo {
return headerBlockInfo{h} return headerBlockInfo{h}
......
package eth
import (
"fmt"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
)
// EncodeReceipts encodes a list of receipts into raw receipts. Some non-consensus meta-data may be lost.
func EncodeReceipts(elems []*types.Receipt) ([]hexutil.Bytes, error) {
out := make([]hexutil.Bytes, len(elems))
for i, el := range elems {
dat, err := el.MarshalBinary()
if err != nil {
return nil, fmt.Errorf("failed to marshal receipt %d: %w", i, err)
}
out[i] = dat
}
return out, nil
}
// DecodeRawReceipts decodes receipts and adds additional blocks metadata.
// The contract-deployment addresses are not set however (high cost, depends on nonce values, unused by op-node).
func DecodeRawReceipts(block BlockID, rawReceipts []hexutil.Bytes, txHashes []common.Hash) ([]*types.Receipt, error) {
result := make([]*types.Receipt, len(rawReceipts))
totalIndex := uint(0)
prevCumulativeGasUsed := uint64(0)
for i, r := range rawReceipts {
var x types.Receipt
if err := x.UnmarshalBinary(r); err != nil {
return nil, fmt.Errorf("failed to decode receipt %d: %w", i, err)
}
x.TxHash = txHashes[i]
x.BlockHash = block.Hash
x.BlockNumber = new(big.Int).SetUint64(block.Number)
x.TransactionIndex = uint(i)
x.GasUsed = x.CumulativeGasUsed - prevCumulativeGasUsed
// contract address meta-data is not computed.
prevCumulativeGasUsed = x.CumulativeGasUsed
for _, l := range x.Logs {
l.BlockNumber = block.Number
l.TxHash = x.TxHash
l.TxIndex = uint(i)
l.BlockHash = block.Hash
l.Index = totalIndex
totalIndex += 1
}
result[i] = &x
}
return result, nil
}
package eth
import (
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
)
// EncodeTransactions encodes a list of transactions into opaque transactions.
func EncodeTransactions(elems []*types.Transaction) ([]hexutil.Bytes, error) {
out := make([]hexutil.Bytes, len(elems))
for i, el := range elems {
dat, err := el.MarshalBinary()
if err != nil {
return nil, fmt.Errorf("failed to marshal tx %d: %w", i, err)
}
out[i] = dat
}
return out, nil
}
// DecodeTransactions decodes a list of opaque transactions into transactions.
func DecodeTransactions(data []hexutil.Bytes) ([]*types.Transaction, error) {
dest := make([]*types.Transaction, len(data))
for i := range dest {
var x types.Transaction
if err := x.UnmarshalBinary(data[i]); err != nil {
return nil, fmt.Errorf("failed to unmarshal tx %d: %w", i, err)
}
dest[i] = &x
}
return dest, nil
}
// TransactionsToHashes computes the transaction-hash for every transaction in the input.
func TransactionsToHashes(elems []*types.Transaction) []common.Hash {
out := make([]common.Hash, len(elems))
for i, el := range elems {
out[i] = el.Hash()
}
return out
}
...@@ -256,28 +256,10 @@ func init() { ...@@ -256,28 +256,10 @@ func init() {
} }
func CheckRequired(ctx *cli.Context) error { func CheckRequired(ctx *cli.Context) error {
l1NodeAddr := ctx.GlobalString(L1NodeAddr.Name) for _, f := range requiredFlags {
if l1NodeAddr == "" { if !ctx.GlobalIsSet(f.GetName()) {
return fmt.Errorf("flag %s is required", L1NodeAddr.Name) return fmt.Errorf("flag %s is required", f.GetName())
} }
l2EngineAddr := ctx.GlobalString(L2EngineAddr.Name)
if l2EngineAddr == "" {
return fmt.Errorf("flag %s is required", L2EngineAddr.Name)
}
rollupConfig := ctx.GlobalString(RollupConfig.Name)
network := ctx.GlobalString(Network.Name)
if rollupConfig == "" && network == "" {
return fmt.Errorf("flag %s or %s is required", RollupConfig.Name, Network.Name)
}
if rollupConfig != "" && network != "" {
return fmt.Errorf("cannot specify both %s and %s", RollupConfig.Name, Network.Name)
}
rpcListenAddr := ctx.GlobalString(RPCListenAddr.Name)
if rpcListenAddr == "" {
return fmt.Errorf("flag %s is required", RPCListenAddr.Name)
}
if !ctx.GlobalIsSet(RPCListenPort.Name) {
return fmt.Errorf("flag %s is required", RPCListenPort.Name)
} }
return nil return nil
} }
...@@ -54,7 +54,8 @@ type GossipSetupConfigurables interface { ...@@ -54,7 +54,8 @@ type GossipSetupConfigurables interface {
PeerScoringParams() *pubsub.PeerScoreParams PeerScoringParams() *pubsub.PeerScoreParams
TopicScoringParams() *pubsub.TopicScoreParams TopicScoringParams() *pubsub.TopicScoreParams
BanPeers() bool BanPeers() bool
ConfigureGossip(params *pubsub.GossipSubParams) []pubsub.Option // ConfigureGossip creates configuration options to apply to the GossipSub setup
ConfigureGossip(rollupCfg *rollup.Config) []pubsub.Option
PeerBandScorer() *BandScoreThresholds PeerBandScorer() *BandScoreThresholds
} }
...@@ -124,7 +125,10 @@ func BuildMsgIdFn(cfg *rollup.Config) pubsub.MsgIdFunction { ...@@ -124,7 +125,10 @@ func BuildMsgIdFn(cfg *rollup.Config) pubsub.MsgIdFunction {
} }
} }
func (p *Config) ConfigureGossip(params *pubsub.GossipSubParams) []pubsub.Option { func (p *Config) ConfigureGossip(rollupCfg *rollup.Config) []pubsub.Option {
params := BuildGlobalGossipParams(rollupCfg)
// override with CLI changes
params.D = p.MeshD params.D = p.MeshD
params.Dlo = p.MeshDLo params.Dlo = p.MeshDLo
params.Dhi = p.MeshDHi params.Dhi = p.MeshDHi
...@@ -132,6 +136,7 @@ func (p *Config) ConfigureGossip(params *pubsub.GossipSubParams) []pubsub.Option ...@@ -132,6 +136,7 @@ func (p *Config) ConfigureGossip(params *pubsub.GossipSubParams) []pubsub.Option
// in the future we may add more advanced options like scoring and PX / direct-mesh / episub // in the future we may add more advanced options like scoring and PX / direct-mesh / episub
return []pubsub.Option{ return []pubsub.Option{
pubsub.WithGossipSubParams(params),
pubsub.WithFloodPublish(p.FloodPublish), pubsub.WithFloodPublish(p.FloodPublish),
} }
} }
...@@ -157,7 +162,6 @@ func NewGossipSub(p2pCtx context.Context, h host.Host, g ConnectionGater, cfg *r ...@@ -157,7 +162,6 @@ func NewGossipSub(p2pCtx context.Context, h host.Host, g ConnectionGater, cfg *r
if err != nil { if err != nil {
return nil, err return nil, err
} }
params := BuildGlobalGossipParams(cfg)
gossipOpts := []pubsub.Option{ gossipOpts := []pubsub.Option{
pubsub.WithMaxMessageSize(maxGossipSize), pubsub.WithMaxMessageSize(maxGossipSize),
pubsub.WithMessageIdFn(BuildMsgIdFn(cfg)), pubsub.WithMessageIdFn(BuildMsgIdFn(cfg)),
...@@ -170,11 +174,10 @@ func NewGossipSub(p2pCtx context.Context, h host.Host, g ConnectionGater, cfg *r ...@@ -170,11 +174,10 @@ func NewGossipSub(p2pCtx context.Context, h host.Host, g ConnectionGater, cfg *r
pubsub.WithSeenMessagesTTL(seenMessagesTTL), pubsub.WithSeenMessagesTTL(seenMessagesTTL),
pubsub.WithPeerExchange(false), pubsub.WithPeerExchange(false),
pubsub.WithBlacklist(denyList), pubsub.WithBlacklist(denyList),
pubsub.WithGossipSubParams(params),
pubsub.WithEventTracer(&gossipTracer{m: m}), pubsub.WithEventTracer(&gossipTracer{m: m}),
} }
gossipOpts = append(gossipOpts, ConfigurePeerScoring(h, g, gossipConf, m, log)...) gossipOpts = append(gossipOpts, ConfigurePeerScoring(h, g, gossipConf, m, log)...)
gossipOpts = append(gossipOpts, gossipConf.ConfigureGossip(&params)...) gossipOpts = append(gossipOpts, gossipConf.ConfigureGossip(cfg)...)
return pubsub.NewGossipSub(p2pCtx, h, gossipOpts...) return pubsub.NewGossipSub(p2pCtx, h, gossipOpts...)
} }
......
...@@ -62,8 +62,10 @@ func (p *Prepared) Discovery(log log.Logger, rollupCfg *rollup.Config, tcpPort u ...@@ -62,8 +62,10 @@ func (p *Prepared) Discovery(log log.Logger, rollupCfg *rollup.Config, tcpPort u
return p.LocalNode, p.UDPv5, nil return p.LocalNode, p.UDPv5, nil
} }
func (p *Prepared) ConfigureGossip(params *pubsub.GossipSubParams) []pubsub.Option { func (p *Prepared) ConfigureGossip(rollupCfg *rollup.Config) []pubsub.Option {
return nil return []pubsub.Option{
pubsub.WithGossipSubParams(BuildGlobalGossipParams(rollupCfg)),
}
} }
func (p *Prepared) PeerScoringParams() *pubsub.PeerScoreParams { func (p *Prepared) PeerScoringParams() *pubsub.PeerScoreParams {
......
...@@ -17,6 +17,11 @@ import ( ...@@ -17,6 +17,11 @@ import (
"github.com/ethereum-optimism/optimism/op-node/rollup/sync" "github.com/ethereum-optimism/optimism/op-node/rollup/sync"
) )
type attributesWithParent struct {
attributes *eth.PayloadAttributes
parent eth.L2BlockRef
}
type NextAttributesProvider interface { type NextAttributesProvider interface {
Origin() eth.L1BlockRef Origin() eth.L1BlockRef
NextAttributes(context.Context, eth.L2BlockRef) (*eth.PayloadAttributes, error) NextAttributes(context.Context, eth.L2BlockRef) (*eth.PayloadAttributes, error)
...@@ -114,9 +119,8 @@ type EngineQueue struct { ...@@ -114,9 +119,8 @@ type EngineQueue struct {
triedFinalizeAt eth.L1BlockRef triedFinalizeAt eth.L1BlockRef
// The queued-up attributes // The queued-up attributes
safeAttributesParent eth.L2BlockRef safeAttributes *attributesWithParent
safeAttributes *eth.PayloadAttributes unsafePayloads *PayloadsQueue // queue of unsafe payloads, ordered by ascending block number, may have gaps and duplicates
unsafePayloads *PayloadsQueue // queue of unsafe payloads, ordered by ascending block number, may have gaps and duplicates
// Tracks which L2 blocks where last derived from which L1 block. At most finalityLookback large. // Tracks which L2 blocks where last derived from which L1 block. At most finalityLookback large.
finalityData []FinalityData finalityData []FinalityData
...@@ -241,9 +245,11 @@ func (eq *EngineQueue) Step(ctx context.Context) error { ...@@ -241,9 +245,11 @@ func (eq *EngineQueue) Step(ctx context.Context) error {
} else if err != nil { } else if err != nil {
return err return err
} else { } else {
eq.safeAttributes = next eq.safeAttributes = &attributesWithParent{
eq.safeAttributesParent = eq.safeHead attributes: next,
eq.log.Debug("Adding next safe attributes", "safe_head", eq.safeHead, "next", eq.safeAttributes) parent: eq.safeHead,
}
eq.log.Debug("Adding next safe attributes", "safe_head", eq.safeHead, "next", next)
return NotEnoughData return NotEnoughData
} }
...@@ -482,15 +488,19 @@ func (eq *EngineQueue) tryNextSafeAttributes(ctx context.Context) error { ...@@ -482,15 +488,19 @@ func (eq *EngineQueue) tryNextSafeAttributes(ctx context.Context) error {
return nil return nil
} }
// validate the safe attributes before processing them. The engine may have completed processing them through other means. // validate the safe attributes before processing them. The engine may have completed processing them through other means.
if eq.safeHead != eq.safeAttributesParent { if eq.safeHead != eq.safeAttributes.parent {
if eq.safeHead.ParentHash != eq.safeAttributesParent.Hash { // Previously the attribute's parent was the safe head. If the safe head advances so safe head's parent is the same as the
return NewResetError(fmt.Errorf("safe head changed to %s with parent %s, conflicting with queued safe attributes on top of %s", // attribute's parent then we need to cancel the attributes.
eq.safeHead, eq.safeHead.ParentID(), eq.safeAttributesParent)) if eq.safeHead.ParentHash == eq.safeAttributes.parent.Hash {
eq.log.Warn("queued safe attributes are stale, safehead progressed",
"safe_head", eq.safeHead, "safe_head_parent", eq.safeHead.ParentID(), "attributes_parent", eq.safeAttributes.parent)
eq.safeAttributes = nil
return nil
} }
eq.log.Warn("queued safe attributes are stale, safe-head progressed", // If something other than a simple advance occurred, perform a full reset
"safe_head", eq.safeHead, "safe_head_parent", eq.safeHead.ParentID(), "attributes_parent", eq.safeAttributesParent) return NewResetError(fmt.Errorf("safe head changed to %s with parent %s, conflicting with queued safe attributes on top of %s",
eq.safeAttributes = nil eq.safeHead, eq.safeHead.ParentID(), eq.safeAttributes.parent))
return nil
} }
if eq.safeHead.Number < eq.unsafeHead.Number { if eq.safeHead.Number < eq.unsafeHead.Number {
return eq.consolidateNextSafeAttributes(ctx) return eq.consolidateNextSafeAttributes(ctx)
...@@ -520,7 +530,7 @@ func (eq *EngineQueue) consolidateNextSafeAttributes(ctx context.Context) error ...@@ -520,7 +530,7 @@ func (eq *EngineQueue) consolidateNextSafeAttributes(ctx context.Context) error
} }
return NewTemporaryError(fmt.Errorf("failed to get existing unsafe payload to compare against derived attributes from L1: %w", err)) return NewTemporaryError(fmt.Errorf("failed to get existing unsafe payload to compare against derived attributes from L1: %w", err))
} }
if err := AttributesMatchBlock(eq.safeAttributes, eq.safeHead.Hash, payload, eq.log); err != nil { if err := AttributesMatchBlock(eq.safeAttributes.attributes, eq.safeHead.Hash, payload, eq.log); err != nil {
eq.log.Warn("L2 reorg: existing unsafe block does not match derived attributes from L1", "err", err, "unsafe", eq.unsafeHead, "safe", eq.safeHead) eq.log.Warn("L2 reorg: existing unsafe block does not match derived attributes from L1", "err", err, "unsafe", eq.unsafeHead, "safe", eq.safeHead)
// geth cannot wind back a chain without reorging to a new, previously non-canonical, block // geth cannot wind back a chain without reorging to a new, previously non-canonical, block
return eq.forceNextSafeAttributes(ctx) return eq.forceNextSafeAttributes(ctx)
...@@ -545,7 +555,7 @@ func (eq *EngineQueue) forceNextSafeAttributes(ctx context.Context) error { ...@@ -545,7 +555,7 @@ func (eq *EngineQueue) forceNextSafeAttributes(ctx context.Context) error {
if eq.safeAttributes == nil { if eq.safeAttributes == nil {
return nil return nil
} }
attrs := eq.safeAttributes attrs := eq.safeAttributes.attributes
errType, err := eq.StartPayload(ctx, eq.safeHead, attrs, true) errType, err := eq.StartPayload(ctx, eq.safeHead, attrs, true)
if err == nil { if err == nil {
_, errType, err = eq.ConfirmPayload(ctx) _, errType, err = eq.ConfirmPayload(ctx)
...@@ -716,6 +726,7 @@ func (eq *EngineQueue) Reset(ctx context.Context, _ eth.L1BlockRef, _ eth.System ...@@ -716,6 +726,7 @@ func (eq *EngineQueue) Reset(ctx context.Context, _ eth.L1BlockRef, _ eth.System
eq.log.Debug("Reset engine queue", "safeHead", safe, "unsafe", unsafe, "safe_timestamp", safe.Time, "unsafe_timestamp", unsafe.Time, "l1Origin", l1Origin) eq.log.Debug("Reset engine queue", "safeHead", safe, "unsafe", unsafe, "safe_timestamp", safe.Time, "unsafe_timestamp", unsafe.Time, "l1Origin", l1Origin)
eq.unsafeHead = unsafe eq.unsafeHead = unsafe
eq.safeHead = safe eq.safeHead = safe
eq.safeAttributes = nil
eq.finalized = finalized eq.finalized = finalized
eq.resetBuildingState() eq.resetBuildingState()
eq.needForkchoiceUpdate = true eq.needForkchoiceUpdate = true
......
...@@ -15,6 +15,7 @@ import ( ...@@ -15,6 +15,7 @@ import (
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-node/eth" "github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/metrics"
"github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/testlog" "github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum-optimism/optimism/op-node/testutils" "github.com/ethereum-optimism/optimism/op-node/testutils"
...@@ -1007,3 +1008,102 @@ func TestBlockBuildingRace(t *testing.T) { ...@@ -1007,3 +1008,102 @@ func TestBlockBuildingRace(t *testing.T) {
l1F.AssertExpectations(t) l1F.AssertExpectations(t)
eng.AssertExpectations(t) eng.AssertExpectations(t)
} }
func TestResetLoop(t *testing.T) {
logger := testlog.Logger(t, log.LvlInfo)
eng := &testutils.MockEngine{}
l1F := &testutils.MockL1Source{}
rng := rand.New(rand.NewSource(1234))
refA := testutils.RandomBlockRef(rng)
refA0 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: 0,
ParentHash: common.Hash{},
Time: refA.Time,
L1Origin: refA.ID(),
SequenceNumber: 0,
}
gasLimit := eth.Uint64Quantity(20_000_000)
cfg := &rollup.Config{
Genesis: rollup.Genesis{
L1: refA.ID(),
L2: refA0.ID(),
L2Time: refA0.Time,
SystemConfig: eth.SystemConfig{
BatcherAddr: common.Address{42},
Overhead: [32]byte{123},
Scalar: [32]byte{42},
GasLimit: 20_000_000,
},
},
BlockTime: 1,
SeqWindowSize: 2,
}
refA1 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: refA0.Number + 1,
ParentHash: refA0.Hash,
Time: refA0.Time + cfg.BlockTime,
L1Origin: refA.ID(),
SequenceNumber: 1,
}
refA2 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: refA1.Number + 1,
ParentHash: refA1.Hash,
Time: refA1.Time + cfg.BlockTime,
L1Origin: refA.ID(),
SequenceNumber: 2,
}
attrs := &eth.PayloadAttributes{
Timestamp: eth.Uint64Quantity(refA2.Time),
PrevRandao: eth.Bytes32{},
SuggestedFeeRecipient: common.Address{},
Transactions: nil,
NoTxPool: false,
GasLimit: &gasLimit,
}
eng.ExpectL2BlockRefByLabel(eth.Finalized, refA0, nil)
eng.ExpectL2BlockRefByLabel(eth.Safe, refA1, nil)
eng.ExpectL2BlockRefByLabel(eth.Unsafe, refA2, nil)
eng.ExpectL2BlockRefByHash(refA1.Hash, refA1, nil)
eng.ExpectL2BlockRefByHash(refA0.Hash, refA0, nil)
eng.ExpectSystemConfigByL2Hash(refA0.Hash, cfg.Genesis.SystemConfig, nil)
l1F.ExpectL1BlockRefByNumber(refA.Number, refA, nil)
l1F.ExpectL1BlockRefByHash(refA.Hash, refA, nil)
l1F.ExpectL1BlockRefByHash(refA.Hash, refA, nil)
prev := &fakeAttributesQueue{origin: refA, attrs: attrs}
eq := NewEngineQueue(logger, cfg, eng, metrics.NoopMetrics, prev, l1F)
eq.unsafeHead = refA2
eq.safeHead = refA1
eq.finalized = refA0
// Qeueue up the safe attributes
require.Nil(t, eq.safeAttributes)
require.ErrorIs(t, eq.Step(context.Background()), NotEnoughData)
require.NotNil(t, eq.safeAttributes)
// Peform the reset
require.ErrorIs(t, eq.Reset(context.Background(), eth.L1BlockRef{}, eth.SystemConfig{}), io.EOF)
// Expect a FCU after the reset
preFc := &eth.ForkchoiceState{
HeadBlockHash: refA2.Hash,
SafeBlockHash: refA0.Hash,
FinalizedBlockHash: refA0.Hash,
}
eng.ExpectForkchoiceUpdate(preFc, nil, nil, nil)
require.NoError(t, eq.Step(context.Background()), "clean forkchoice state after reset")
// Crux of the test. Should be in a valid state after the reset.
require.ErrorIs(t, eq.Step(context.Background()), NotEnoughData, "Should be able to step after a reset")
l1F.AssertExpectations(t)
eng.AssertExpectations(t)
}
...@@ -4,6 +4,7 @@ import ( ...@@ -4,6 +4,7 @@ import (
"bytes" "bytes"
"encoding/binary" "encoding/binary"
"fmt" "fmt"
"io"
"math/big" "math/big"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
...@@ -45,71 +46,135 @@ type L1BlockInfo struct { ...@@ -45,71 +46,135 @@ type L1BlockInfo struct {
L1FeeScalar eth.Bytes32 L1FeeScalar eth.Bytes32
} }
//+---------+--------------------------+
//| Bytes | Field |
//+---------+--------------------------+
//| 4 | Function signature |
//| 24 | Padding for Number |
//| 8 | Number |
//| 24 | Padding for Time |
//| 8 | Time |
//| 32 | BaseFee |
//| 32 | BlockHash |
//| 24 | Padding for SequenceNumber|
//| 8 | SequenceNumber |
//| 12 | Padding for BatcherAddr |
//| 20 | BatcherAddr |
//| 32 | L1FeeOverhead |
//| 32 | L1FeeScalar |
//+---------+--------------------------+
func (info *L1BlockInfo) MarshalBinary() ([]byte, error) { func (info *L1BlockInfo) MarshalBinary() ([]byte, error) {
data := make([]byte, L1InfoLen) writer := bytes.NewBuffer(make([]byte, 0, L1InfoLen))
offset := 0
copy(data[offset:4], L1InfoFuncBytes4) writer.Write(L1InfoFuncBytes4)
offset += 4 if err := writeSolidityABIUint64(writer, info.Number); err != nil {
binary.BigEndian.PutUint64(data[offset+24:offset+32], info.Number) return nil, err
offset += 32 }
binary.BigEndian.PutUint64(data[offset+24:offset+32], info.Time) if err := writeSolidityABIUint64(writer, info.Time); err != nil {
offset += 32 return nil, err
}
// Ensure that the baseFee is not too large. // Ensure that the baseFee is not too large.
if info.BaseFee.BitLen() > 256 { if info.BaseFee.BitLen() > 256 {
return nil, fmt.Errorf("base fee exceeds 256 bits: %d", info.BaseFee) return nil, fmt.Errorf("base fee exceeds 256 bits: %d", info.BaseFee)
} }
info.BaseFee.FillBytes(data[offset : offset+32]) var baseFeeBuf [32]byte
offset += 32 info.BaseFee.FillBytes(baseFeeBuf[:])
copy(data[offset:offset+32], info.BlockHash.Bytes()) writer.Write(baseFeeBuf[:])
offset += 32 writer.Write(info.BlockHash.Bytes())
binary.BigEndian.PutUint64(data[offset+24:offset+32], info.SequenceNumber) if err := writeSolidityABIUint64(writer, info.SequenceNumber); err != nil {
offset += 32 return nil, err
copy(data[offset+12:offset+32], info.BatcherAddr[:]) }
offset += 32
copy(data[offset:offset+32], info.L1FeeOverhead[:]) var addrPadding [12]byte
offset += 32 writer.Write(addrPadding[:])
copy(data[offset:offset+32], info.L1FeeScalar[:]) writer.Write(info.BatcherAddr.Bytes())
return data, nil writer.Write(info.L1FeeOverhead[:])
writer.Write(info.L1FeeScalar[:])
return writer.Bytes(), nil
} }
func (info *L1BlockInfo) UnmarshalBinary(data []byte) error { func (info *L1BlockInfo) UnmarshalBinary(data []byte) error {
if len(data) != L1InfoLen { if len(data) != L1InfoLen {
return fmt.Errorf("data is unexpected length: %d", len(data)) return fmt.Errorf("data is unexpected length: %d", len(data))
} }
reader := bytes.NewReader(data)
funcSignature := make([]byte, 4)
if _, err := io.ReadFull(reader, funcSignature); err != nil || !bytes.Equal(funcSignature, L1InfoFuncBytes4) {
return fmt.Errorf("data does not match L1 info function signature: 0x%x", funcSignature)
}
if blockNumber, err := readSolidityABIUint64(reader); err != nil {
return err
} else {
info.Number = blockNumber
}
if blockTime, err := readSolidityABIUint64(reader); err != nil {
return err
} else {
info.Time = blockTime
}
var baseFeeBytes [32]byte
if _, err := io.ReadFull(reader, baseFeeBytes[:]); err != nil {
return fmt.Errorf("expected BaseFee length to be 32 bytes, but got %x", baseFeeBytes)
}
info.BaseFee = new(big.Int).SetBytes(baseFeeBytes[:])
var blockHashBytes [32]byte
if _, err := io.ReadFull(reader, blockHashBytes[:]); err != nil {
return fmt.Errorf("expected BlockHash length to be 32 bytes, but got %x", blockHashBytes)
}
info.BlockHash.SetBytes(blockHashBytes[:])
if sequenceNumber, err := readSolidityABIUint64(reader); err != nil {
return err
} else {
info.SequenceNumber = sequenceNumber
}
var addrPadding [12]byte
if _, err := io.ReadFull(reader, addrPadding[:]); err != nil {
return fmt.Errorf("expected addrPadding length to be 12 bytes, but got %x", addrPadding)
}
if _, err := io.ReadFull(reader, info.BatcherAddr[:]); err != nil {
return fmt.Errorf("expected BatcherAddr length to be 20 bytes, but got %x", info.BatcherAddr)
}
if _, err := io.ReadFull(reader, info.L1FeeOverhead[:]); err != nil {
return fmt.Errorf("expected L1FeeOverhead length to be 32 bytes, but got %x", info.L1FeeOverhead)
}
if _, err := io.ReadFull(reader, info.L1FeeScalar[:]); err != nil {
return fmt.Errorf("expected L1FeeScalar length to be 32 bytes, but got %x", info.L1FeeScalar)
}
return nil
}
func writeSolidityABIUint64(w io.Writer, num uint64) error {
var padding [24]byte var padding [24]byte
offset := 4 if _, err := w.Write(padding[:]); err != nil {
return err
if !bytes.Equal(data[0:offset], L1InfoFuncBytes4) { }
return fmt.Errorf("data does not match L1 info function signature: 0x%x", data[offset:4]) if err := binary.Write(w, binary.BigEndian, num); err != nil {
} return err
}
info.Number = binary.BigEndian.Uint64(data[offset+24 : offset+32])
if !bytes.Equal(data[offset:offset+24], padding[:]) {
return fmt.Errorf("l1 info number exceeds uint64 bounds: %x", data[offset:offset+32])
}
offset += 32
info.Time = binary.BigEndian.Uint64(data[offset+24 : offset+32])
if !bytes.Equal(data[offset:offset+24], padding[:]) {
return fmt.Errorf("l1 info time exceeds uint64 bounds: %x", data[offset:offset+32])
}
offset += 32
info.BaseFee = new(big.Int).SetBytes(data[offset : offset+32])
offset += 32
info.BlockHash.SetBytes(data[offset : offset+32])
offset += 32
info.SequenceNumber = binary.BigEndian.Uint64(data[offset+24 : offset+32])
if !bytes.Equal(data[offset:offset+24], padding[:]) {
return fmt.Errorf("l1 info sequence number exceeds uint64 bounds: %x", data[offset:offset+32])
}
offset += 32
info.BatcherAddr.SetBytes(data[offset+12 : offset+32])
offset += 32
copy(info.L1FeeOverhead[:], data[offset:offset+32])
offset += 32
copy(info.L1FeeScalar[:], data[offset:offset+32])
return nil return nil
} }
func readSolidityABIUint64(r io.Reader) (uint64, error) {
var (
padding, readPadding [24]byte
num uint64
)
if _, err := io.ReadFull(r, readPadding[:]); err != nil || !bytes.Equal(readPadding[:], padding[:]) {
return 0, fmt.Errorf("L1BlockInfo number exceeds uint64 bounds: %x", readPadding[:])
}
if err := binary.Read(r, binary.BigEndian, &num); err != nil {
return 0, fmt.Errorf("L1BlockInfo expected number length to be 8 bytes")
}
return num, nil
}
// L1InfoDepositTxData is the inverse of L1InfoDeposit, to see where the L2 chain is derived from // L1InfoDepositTxData is the inverse of L1InfoDeposit, to see where the L2 chain is derived from
func L1InfoDepositTxData(data []byte) (L1BlockInfo, error) { func L1InfoDepositTxData(data []byte) (L1BlockInfo, error) {
var info L1BlockInfo var info L1BlockInfo
......
...@@ -15,7 +15,7 @@ import ( ...@@ -15,7 +15,7 @@ import (
func RandomL2Block(rng *rand.Rand, txCount int) (*types.Block, []*types.Receipt) { func RandomL2Block(rng *rand.Rand, txCount int) (*types.Block, []*types.Receipt) {
l1Block := types.NewBlock(testutils.RandomHeader(rng), l1Block := types.NewBlock(testutils.RandomHeader(rng),
nil, nil, nil, trie.NewStackTrie(nil)) nil, nil, nil, trie.NewStackTrie(nil))
l1InfoTx, err := derive.L1InfoDeposit(0, l1Block, eth.SystemConfig{}, testutils.RandomBool(rng)) l1InfoTx, err := derive.L1InfoDeposit(0, eth.BlockToInfo(l1Block), eth.SystemConfig{}, testutils.RandomBool(rng))
if err != nil { if err != nil {
panic("L1InfoDeposit: " + err.Error()) panic("L1InfoDeposit: " + err.Error())
} }
......
...@@ -219,7 +219,7 @@ func (n numberID) CheckID(id eth.BlockID) error { ...@@ -219,7 +219,7 @@ func (n numberID) CheckID(id eth.BlockID) error {
return nil return nil
} }
func (s *EthClient) headerCall(ctx context.Context, method string, id rpcBlockID) (*HeaderInfo, error) { func (s *EthClient) headerCall(ctx context.Context, method string, id rpcBlockID) (eth.BlockInfo, error) {
var header *rpcHeader var header *rpcHeader
err := s.client.CallContext(ctx, &header, method, id.Arg(), false) // headers are just blocks without txs err := s.client.CallContext(ctx, &header, method, id.Arg(), false) // headers are just blocks without txs
if err != nil { if err != nil {
...@@ -239,7 +239,7 @@ func (s *EthClient) headerCall(ctx context.Context, method string, id rpcBlockID ...@@ -239,7 +239,7 @@ func (s *EthClient) headerCall(ctx context.Context, method string, id rpcBlockID
return info, nil return info, nil
} }
func (s *EthClient) blockCall(ctx context.Context, method string, id rpcBlockID) (*HeaderInfo, types.Transactions, error) { func (s *EthClient) blockCall(ctx context.Context, method string, id rpcBlockID) (eth.BlockInfo, types.Transactions, error) {
var block *rpcBlock var block *rpcBlock
err := s.client.CallContext(ctx, &block, method, id.Arg(), true) err := s.client.CallContext(ctx, &block, method, id.Arg(), true)
if err != nil { if err != nil {
...@@ -292,7 +292,7 @@ func (s *EthClient) ChainID(ctx context.Context) (*big.Int, error) { ...@@ -292,7 +292,7 @@ func (s *EthClient) ChainID(ctx context.Context) (*big.Int, error) {
func (s *EthClient) InfoByHash(ctx context.Context, hash common.Hash) (eth.BlockInfo, error) { func (s *EthClient) InfoByHash(ctx context.Context, hash common.Hash) (eth.BlockInfo, error) {
if header, ok := s.headersCache.Get(hash); ok { if header, ok := s.headersCache.Get(hash); ok {
return header.(*HeaderInfo), nil return header.(eth.BlockInfo), nil
} }
return s.headerCall(ctx, "eth_getBlockByHash", hashID(hash)) return s.headerCall(ctx, "eth_getBlockByHash", hashID(hash))
} }
...@@ -310,7 +310,7 @@ func (s *EthClient) InfoByLabel(ctx context.Context, label eth.BlockLabel) (eth. ...@@ -310,7 +310,7 @@ func (s *EthClient) InfoByLabel(ctx context.Context, label eth.BlockLabel) (eth.
func (s *EthClient) InfoAndTxsByHash(ctx context.Context, hash common.Hash) (eth.BlockInfo, types.Transactions, error) { func (s *EthClient) InfoAndTxsByHash(ctx context.Context, hash common.Hash) (eth.BlockInfo, types.Transactions, error) {
if header, ok := s.headersCache.Get(hash); ok { if header, ok := s.headersCache.Get(hash); ok {
if txs, ok := s.transactionsCache.Get(hash); ok { if txs, ok := s.transactionsCache.Get(hash); ok {
return header.(*HeaderInfo), txs.(types.Transactions), nil return header.(eth.BlockInfo), txs.(types.Transactions), nil
} }
} }
return s.blockCall(ctx, "eth_getBlockByHash", hashID(hash)) return s.blockCall(ctx, "eth_getBlockByHash", hashID(hash))
...@@ -356,10 +356,7 @@ func (s *EthClient) FetchReceipts(ctx context.Context, blockHash common.Hash) (e ...@@ -356,10 +356,7 @@ func (s *EthClient) FetchReceipts(ctx context.Context, blockHash common.Hash) (e
if v, ok := s.receiptsCache.Get(blockHash); ok { if v, ok := s.receiptsCache.Get(blockHash); ok {
job = v.(*receiptsFetchingJob) job = v.(*receiptsFetchingJob)
} else { } else {
txHashes := make([]common.Hash, len(txs)) txHashes := eth.TransactionsToHashes(txs)
for i := 0; i < len(txs); i++ {
txHashes[i] = txs[i].Hash()
}
job = NewReceiptsFetchingJob(s, s.client, s.maxBatchSize, eth.ToBlockID(info), info.ReceiptHash(), txHashes) job = NewReceiptsFetchingJob(s, s.client, s.maxBatchSize, eth.ToBlockID(info), info.ReceiptHash(), txHashes)
s.receiptsCache.Add(blockHash, job) s.receiptsCache.Add(blockHash, job)
} }
......
...@@ -4,7 +4,6 @@ import ( ...@@ -4,7 +4,6 @@ import (
"context" "context"
"fmt" "fmt"
"io" "io"
"math/big"
"sync" "sync"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
...@@ -420,29 +419,7 @@ func (job *receiptsFetchingJob) runAltMethod(ctx context.Context, m ReceiptsFetc ...@@ -420,29 +419,7 @@ func (job *receiptsFetchingJob) runAltMethod(ctx context.Context, m ReceiptsFetc
err = job.client.CallContext(ctx, &rawReceipts, "debug_getRawReceipts", job.block.Hash) err = job.client.CallContext(ctx, &rawReceipts, "debug_getRawReceipts", job.block.Hash)
if err == nil { if err == nil {
if len(rawReceipts) == len(job.txHashes) { if len(rawReceipts) == len(job.txHashes) {
result = make([]*types.Receipt, len(rawReceipts)) result, err = eth.DecodeRawReceipts(job.block, rawReceipts, job.txHashes)
totalIndex := uint(0)
prevCumulativeGasUsed := uint64(0)
for i, r := range rawReceipts {
var x types.Receipt
_ = x.UnmarshalBinary(r) // safe to ignore, we verify receipts against the receipts hash later
x.TxHash = job.txHashes[i]
x.BlockHash = job.block.Hash
x.BlockNumber = new(big.Int).SetUint64(job.block.Number)
x.TransactionIndex = uint(i)
x.GasUsed = x.CumulativeGasUsed - prevCumulativeGasUsed
// contract address meta-data is not computed.
prevCumulativeGasUsed = x.CumulativeGasUsed
for _, l := range x.Logs {
l.BlockNumber = job.block.Number
l.TxHash = x.TxHash
l.TxIndex = uint(i)
l.BlockHash = job.block.Hash
l.Index = totalIndex
totalIndex += 1
}
result[i] = &x
}
} else { } else {
err = fmt.Errorf("got %d raw receipts, but expected %d", len(rawReceipts), len(job.txHashes)) err = fmt.Errorf("got %d raw receipts, but expected %d", len(rawReceipts), len(job.txHashes))
} }
......
...@@ -6,6 +6,7 @@ import ( ...@@ -6,6 +6,7 @@ import (
"math/big" "math/big"
"strings" "strings"
"github.com/ethereum/go-ethereum/rlp"
"github.com/holiman/uint256" "github.com/holiman/uint256"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
...@@ -33,69 +34,57 @@ type CallContextFn func(ctx context.Context, result any, method string, args ... ...@@ -33,69 +34,57 @@ type CallContextFn func(ctx context.Context, result any, method string, args ...
// //
// This way we minimize RPC calls, enable batching, and can choose to verify what the RPC gives us. // This way we minimize RPC calls, enable batching, and can choose to verify what the RPC gives us.
// HeaderInfo contains all the header-info required to implement the eth.BlockInfo interface, // headerInfo is a conversion type of types.Header turning it into a
// used in the rollup state-transition, with pre-computed block hash. // BlockInfo, but using a cached hash value.
type HeaderInfo struct { type headerInfo struct {
hash common.Hash hash common.Hash
parentHash common.Hash *types.Header
coinbase common.Address
root common.Hash
number uint64
time uint64
mixDigest common.Hash // a.k.a. the randomness field post-merge.
baseFee *big.Int
txHash common.Hash
receiptHash common.Hash
gasUsed uint64
// withdrawalsRoot was added in Shapella and is thus optional
withdrawalsRoot *common.Hash
} }
var _ eth.BlockInfo = (*HeaderInfo)(nil) var _ eth.BlockInfo = (*headerInfo)(nil)
func (info *HeaderInfo) Hash() common.Hash { func (h headerInfo) Hash() common.Hash {
return info.hash return h.hash
} }
func (info *HeaderInfo) ParentHash() common.Hash { func (h headerInfo) ParentHash() common.Hash {
return info.parentHash return h.Header.ParentHash
} }
func (info *HeaderInfo) Coinbase() common.Address { func (h headerInfo) Coinbase() common.Address {
return info.coinbase return h.Header.Coinbase
} }
func (info *HeaderInfo) Root() common.Hash { func (h headerInfo) Root() common.Hash {
return info.root return h.Header.Root
} }
func (info *HeaderInfo) NumberU64() uint64 { func (h headerInfo) NumberU64() uint64 {
return info.number return h.Header.Number.Uint64()
} }
func (info *HeaderInfo) Time() uint64 { func (h headerInfo) Time() uint64 {
return info.time return h.Header.Time
} }
func (info *HeaderInfo) MixDigest() common.Hash { func (h headerInfo) MixDigest() common.Hash {
return info.mixDigest return h.Header.MixDigest
} }
func (info *HeaderInfo) BaseFee() *big.Int { func (h headerInfo) BaseFee() *big.Int {
return info.baseFee return h.Header.BaseFee
} }
func (info *HeaderInfo) ID() eth.BlockID { func (h headerInfo) ReceiptHash() common.Hash {
return eth.BlockID{Hash: info.hash, Number: info.number} return h.Header.ReceiptHash
} }
func (info *HeaderInfo) ReceiptHash() common.Hash { func (h headerInfo) GasUsed() uint64 {
return info.receiptHash return h.Header.GasUsed
} }
func (info *HeaderInfo) GasUsed() uint64 { func (h headerInfo) HeaderRLP() ([]byte, error) {
return info.gasUsed return rlp.EncodeToBytes(h.Header)
} }
type rpcHeader struct { type rpcHeader struct {
...@@ -149,7 +138,12 @@ func (hdr *rpcHeader) checkPostMerge() error { ...@@ -149,7 +138,12 @@ func (hdr *rpcHeader) checkPostMerge() error {
} }
func (hdr *rpcHeader) computeBlockHash() common.Hash { func (hdr *rpcHeader) computeBlockHash() common.Hash {
gethHeader := types.Header{ gethHeader := hdr.createGethHeader()
return gethHeader.Hash()
}
func (hdr *rpcHeader) createGethHeader() *types.Header {
return &types.Header{
ParentHash: hdr.ParentHash, ParentHash: hdr.ParentHash,
UncleHash: hdr.UncleHash, UncleHash: hdr.UncleHash,
Coinbase: hdr.Coinbase, Coinbase: hdr.Coinbase,
...@@ -168,10 +162,9 @@ func (hdr *rpcHeader) computeBlockHash() common.Hash { ...@@ -168,10 +162,9 @@ func (hdr *rpcHeader) computeBlockHash() common.Hash {
BaseFee: (*big.Int)(hdr.BaseFee), BaseFee: (*big.Int)(hdr.BaseFee),
WithdrawalsHash: hdr.WithdrawalsRoot, WithdrawalsHash: hdr.WithdrawalsRoot,
} }
return gethHeader.Hash()
} }
func (hdr *rpcHeader) Info(trustCache bool, mustBePostMerge bool) (*HeaderInfo, error) { func (hdr *rpcHeader) Info(trustCache bool, mustBePostMerge bool) (eth.BlockInfo, error) {
if mustBePostMerge { if mustBePostMerge {
if err := hdr.checkPostMerge(); err != nil { if err := hdr.checkPostMerge(); err != nil {
return nil, err return nil, err
...@@ -182,22 +175,7 @@ func (hdr *rpcHeader) Info(trustCache bool, mustBePostMerge bool) (*HeaderInfo, ...@@ -182,22 +175,7 @@ func (hdr *rpcHeader) Info(trustCache bool, mustBePostMerge bool) (*HeaderInfo,
return nil, fmt.Errorf("failed to verify block hash: computed %s but RPC said %s", computed, hdr.Hash) return nil, fmt.Errorf("failed to verify block hash: computed %s but RPC said %s", computed, hdr.Hash)
} }
} }
return &headerInfo{hdr.Hash, hdr.createGethHeader()}, nil
info := HeaderInfo{
hash: hdr.Hash,
parentHash: hdr.ParentHash,
coinbase: hdr.Coinbase,
root: hdr.Root,
number: uint64(hdr.Number),
time: uint64(hdr.Time),
mixDigest: hdr.MixDigest,
baseFee: (*big.Int)(hdr.BaseFee),
txHash: hdr.TxHash,
receiptHash: hdr.ReceiptHash,
gasUsed: uint64(hdr.GasUsed),
withdrawalsRoot: hdr.WithdrawalsRoot,
}
return &info, nil
} }
type rpcBlock struct { type rpcBlock struct {
...@@ -215,7 +193,7 @@ func (block *rpcBlock) verify() error { ...@@ -215,7 +193,7 @@ func (block *rpcBlock) verify() error {
return nil return nil
} }
func (block *rpcBlock) Info(trustCache bool, mustBePostMerge bool) (*HeaderInfo, types.Transactions, error) { func (block *rpcBlock) Info(trustCache bool, mustBePostMerge bool) (eth.BlockInfo, types.Transactions, error) {
if mustBePostMerge { if mustBePostMerge {
if err := block.checkPostMerge(); err != nil { if err := block.checkPostMerge(); err != nil {
return nil, nil, err return nil, nil, err
......
package testutils package testutils
import ( import (
"errors"
"math/big" "math/big"
"math/rand" "math/rand"
...@@ -22,6 +23,7 @@ type MockBlockInfo struct { ...@@ -22,6 +23,7 @@ type MockBlockInfo struct {
InfoBaseFee *big.Int InfoBaseFee *big.Int
InfoReceiptRoot common.Hash InfoReceiptRoot common.Hash
InfoGasUsed uint64 InfoGasUsed uint64
InfoHeaderRLP []byte
} }
func (l *MockBlockInfo) Hash() common.Hash { func (l *MockBlockInfo) Hash() common.Hash {
...@@ -68,6 +70,13 @@ func (l *MockBlockInfo) ID() eth.BlockID { ...@@ -68,6 +70,13 @@ func (l *MockBlockInfo) ID() eth.BlockID {
return eth.BlockID{Hash: l.InfoHash, Number: l.InfoNum} return eth.BlockID{Hash: l.InfoHash, Number: l.InfoNum}
} }
func (l *MockBlockInfo) HeaderRLP() ([]byte, error) {
if l.InfoHeaderRLP == nil {
return nil, errors.New("header rlp not available")
}
return l.InfoHeaderRLP, nil
}
func (l *MockBlockInfo) BlockRef() eth.L1BlockRef { func (l *MockBlockInfo) BlockRef() eth.L1BlockRef {
return eth.L1BlockRef{ return eth.L1BlockRef{
Hash: l.InfoHash, Hash: l.InfoHash,
......
...@@ -83,8 +83,8 @@ func (m *MockEthClient) PayloadByNumber(ctx context.Context, n uint64) (*eth.Exe ...@@ -83,8 +83,8 @@ func (m *MockEthClient) PayloadByNumber(ctx context.Context, n uint64) (*eth.Exe
return out[0].(*eth.ExecutionPayload), *out[1].(*error) return out[0].(*eth.ExecutionPayload), *out[1].(*error)
} }
func (m *MockEthClient) ExpectPayloadByNumber(hash common.Hash, payload *eth.ExecutionPayload, err error) { func (m *MockEthClient) ExpectPayloadByNumber(n uint64, payload *eth.ExecutionPayload, err error) {
m.Mock.On("PayloadByNumber", hash).Once().Return(payload, &err) m.Mock.On("PayloadByNumber", n).Once().Return(payload, &err)
} }
func (m *MockEthClient) PayloadByLabel(ctx context.Context, label eth.BlockLabel) (*eth.ExecutionPayload, error) { func (m *MockEthClient) PayloadByLabel(ctx context.Context, label eth.BlockLabel) (*eth.ExecutionPayload, error) {
......
...@@ -4,6 +4,7 @@ import ( ...@@ -4,6 +4,7 @@ import (
"math/rand" "math/rand"
"testing" "testing"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/testutils" "github.com/ethereum-optimism/optimism/op-node/testutils"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
...@@ -35,17 +36,17 @@ func TestCachingOracle_TransactionsByBlockHash(t *testing.T) { ...@@ -35,17 +36,17 @@ func TestCachingOracle_TransactionsByBlockHash(t *testing.T) {
block, _ := testutils.RandomBlock(rng, 3) block, _ := testutils.RandomBlock(rng, 3)
// Initial call retrieves from the stub // Initial call retrieves from the stub
stub.blocks[block.Hash()] = block stub.blocks[block.Hash()] = eth.BlockToInfo(block)
stub.txs[block.Hash()] = block.Transactions() stub.txs[block.Hash()] = block.Transactions()
actualBlock, actualTxs := oracle.TransactionsByBlockHash(block.Hash()) actualBlock, actualTxs := oracle.TransactionsByBlockHash(block.Hash())
require.Equal(t, block, actualBlock) require.Equal(t, eth.BlockToInfo(block), actualBlock)
require.Equal(t, block.Transactions(), actualTxs) require.Equal(t, block.Transactions(), actualTxs)
// Later calls should retrieve from cache // Later calls should retrieve from cache
delete(stub.blocks, block.Hash()) delete(stub.blocks, block.Hash())
delete(stub.txs, block.Hash()) delete(stub.txs, block.Hash())
actualBlock, actualTxs = oracle.TransactionsByBlockHash(block.Hash()) actualBlock, actualTxs = oracle.TransactionsByBlockHash(block.Hash())
require.Equal(t, block, actualBlock) require.Equal(t, eth.BlockToInfo(block), actualBlock)
require.Equal(t, block.Transactions(), actualTxs) require.Equal(t, block.Transactions(), actualTxs)
} }
...@@ -56,16 +57,16 @@ func TestCachingOracle_ReceiptsByBlockHash(t *testing.T) { ...@@ -56,16 +57,16 @@ func TestCachingOracle_ReceiptsByBlockHash(t *testing.T) {
block, rcpts := testutils.RandomBlock(rng, 3) block, rcpts := testutils.RandomBlock(rng, 3)
// Initial call retrieves from the stub // Initial call retrieves from the stub
stub.blocks[block.Hash()] = block stub.blocks[block.Hash()] = eth.BlockToInfo(block)
stub.rcpts[block.Hash()] = rcpts stub.rcpts[block.Hash()] = rcpts
actualBlock, actualRcpts := oracle.ReceiptsByBlockHash(block.Hash()) actualBlock, actualRcpts := oracle.ReceiptsByBlockHash(block.Hash())
require.Equal(t, block, actualBlock) require.Equal(t, eth.BlockToInfo(block), actualBlock)
require.EqualValues(t, rcpts, actualRcpts) require.EqualValues(t, rcpts, actualRcpts)
// Later calls should retrieve from cache // Later calls should retrieve from cache
delete(stub.blocks, block.Hash()) delete(stub.blocks, block.Hash())
delete(stub.rcpts, block.Hash()) delete(stub.rcpts, block.Hash())
actualBlock, actualRcpts = oracle.ReceiptsByBlockHash(block.Hash()) actualBlock, actualRcpts = oracle.ReceiptsByBlockHash(block.Hash())
require.Equal(t, block, actualBlock) require.Equal(t, eth.BlockToInfo(block), actualBlock)
require.EqualValues(t, rcpts, actualRcpts) require.EqualValues(t, rcpts, actualRcpts)
} }
...@@ -7,7 +7,6 @@ import ( ...@@ -7,7 +7,6 @@ import (
"github.com/ethereum-optimism/optimism/op-node/eth" "github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-node/sources"
"github.com/ethereum-optimism/optimism/op-node/testlog" "github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum-optimism/optimism/op-node/testutils" "github.com/ethereum-optimism/optimism/op-node/testutils"
"github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum"
...@@ -24,7 +23,7 @@ var head = blockNum(1000) ...@@ -24,7 +23,7 @@ var head = blockNum(1000)
func TestInfoByHash(t *testing.T) { func TestInfoByHash(t *testing.T) {
client, oracle := newClient(t) client, oracle := newClient(t)
hash := common.HexToHash("0xAABBCC") hash := common.HexToHash("0xAABBCC")
expected := &sources.HeaderInfo{} expected := &testutils.MockBlockInfo{}
oracle.blocks[hash] = expected oracle.blocks[hash] = expected
info, err := client.InfoByHash(context.Background(), hash) info, err := client.InfoByHash(context.Background(), hash)
...@@ -35,7 +34,7 @@ func TestInfoByHash(t *testing.T) { ...@@ -35,7 +34,7 @@ func TestInfoByHash(t *testing.T) {
func TestL1BlockRefByHash(t *testing.T) { func TestL1BlockRefByHash(t *testing.T) {
client, oracle := newClient(t) client, oracle := newClient(t)
hash := common.HexToHash("0xAABBCC") hash := common.HexToHash("0xAABBCC")
header := &sources.HeaderInfo{} header := &testutils.MockBlockInfo{}
oracle.blocks[hash] = header oracle.blocks[hash] = header
expected := eth.InfoToL1BlockRef(header) expected := eth.InfoToL1BlockRef(header)
...@@ -47,7 +46,7 @@ func TestL1BlockRefByHash(t *testing.T) { ...@@ -47,7 +46,7 @@ func TestL1BlockRefByHash(t *testing.T) {
func TestFetchReceipts(t *testing.T) { func TestFetchReceipts(t *testing.T) {
client, oracle := newClient(t) client, oracle := newClient(t)
hash := common.HexToHash("0xAABBCC") hash := common.HexToHash("0xAABBCC")
expectedInfo := &sources.HeaderInfo{} expectedInfo := &testutils.MockBlockInfo{}
expectedReceipts := types.Receipts{ expectedReceipts := types.Receipts{
&types.Receipt{}, &types.Receipt{},
} }
...@@ -63,7 +62,7 @@ func TestFetchReceipts(t *testing.T) { ...@@ -63,7 +62,7 @@ func TestFetchReceipts(t *testing.T) {
func TestInfoAndTxsByHash(t *testing.T) { func TestInfoAndTxsByHash(t *testing.T) {
client, oracle := newClient(t) client, oracle := newClient(t)
hash := common.HexToHash("0xAABBCC") hash := common.HexToHash("0xAABBCC")
expectedInfo := &sources.HeaderInfo{} expectedInfo := &testutils.MockBlockInfo{}
expectedTxs := types.Transactions{ expectedTxs := types.Transactions{
&types.Transaction{}, &types.Transaction{},
} }
......
package l1
import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum-optimism/optimism/op-program/preimage"
)
type BlockHeaderHint common.Hash
var _ preimage.Hint = BlockHeaderHint{}
func (l BlockHeaderHint) Hint() string {
return "l1-block-header " + (common.Hash)(l).String()
}
type TransactionsHint common.Hash
var _ preimage.Hint = TransactionsHint{}
func (l TransactionsHint) Hint() string {
return "l1-transactions " + (common.Hash)(l).String()
}
type ReceiptsHint common.Hash
var _ preimage.Hint = ReceiptsHint{}
func (l ReceiptsHint) Hint() string {
return "l1-receipts " + (common.Hash)(l).String()
}
package l1 package l1
import ( import (
"github.com/ethereum-optimism/optimism/op-node/eth" "fmt"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-program/client/mpt"
"github.com/ethereum-optimism/optimism/op-program/preimage"
) )
type Oracle interface { type Oracle interface {
...@@ -16,3 +22,67 @@ type Oracle interface { ...@@ -16,3 +22,67 @@ type Oracle interface {
// ReceiptsByBlockHash retrieves the receipts from the block with the given hash. // ReceiptsByBlockHash retrieves the receipts from the block with the given hash.
ReceiptsByBlockHash(blockHash common.Hash) (eth.BlockInfo, types.Receipts) ReceiptsByBlockHash(blockHash common.Hash) (eth.BlockInfo, types.Receipts)
} }
// PreimageOracle implements Oracle using by interfacing with the pure preimage.Oracle
// to fetch pre-images to decode into the requested data.
type PreimageOracle struct {
oracle preimage.Oracle
hint preimage.Hinter
}
var _ Oracle = (*PreimageOracle)(nil)
func NewPreimageOracle(raw preimage.Oracle, hint preimage.Hinter) *PreimageOracle {
return &PreimageOracle{
oracle: raw,
hint: hint,
}
}
func (p *PreimageOracle) headerByBlockHash(blockHash common.Hash) *types.Header {
p.hint.Hint(BlockHeaderHint(blockHash))
headerRlp := p.oracle.Get(preimage.Keccak256Key(blockHash))
var header types.Header
if err := rlp.DecodeBytes(headerRlp, &header); err != nil {
panic(fmt.Errorf("invalid block header %s: %w", blockHash, err))
}
return &header
}
func (p *PreimageOracle) HeaderByBlockHash(blockHash common.Hash) eth.BlockInfo {
return eth.HeaderBlockInfo(p.headerByBlockHash(blockHash))
}
func (p *PreimageOracle) TransactionsByBlockHash(blockHash common.Hash) (eth.BlockInfo, types.Transactions) {
header := p.headerByBlockHash(blockHash)
p.hint.Hint(TransactionsHint(blockHash))
opaqueTxs := mpt.ReadTrie(header.TxHash, func(key common.Hash) []byte {
return p.oracle.Get(preimage.Keccak256Key(key))
})
txs, err := eth.DecodeTransactions(opaqueTxs)
if err != nil {
panic(fmt.Errorf("failed to decode list of txs: %w", err))
}
return eth.HeaderBlockInfo(header), txs
}
func (p *PreimageOracle) ReceiptsByBlockHash(blockHash common.Hash) (eth.BlockInfo, types.Receipts) {
info, txs := p.TransactionsByBlockHash(blockHash)
p.hint.Hint(ReceiptsHint(blockHash))
opaqueReceipts := mpt.ReadTrie(info.ReceiptHash(), func(key common.Hash) []byte {
return p.oracle.Get(preimage.Keccak256Key(key))
})
txHashes := eth.TransactionsToHashes(txs)
receipts, err := eth.DecodeRawReceipts(eth.ToBlockID(info), opaqueReceipts, txHashes)
if err != nil {
panic(fmt.Errorf("bad receipts data for block %s: %w", blockHash, err))
}
return info, receipts
}
package l1
import (
"encoding/json"
"fmt"
"math/rand"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rlp"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/testutils"
"github.com/ethereum-optimism/optimism/op-program/client/mpt"
"github.com/ethereum-optimism/optimism/op-program/preimage"
)
// testBlock tests that the given block with receipts can be passed through the preimage oracle.
func testBlock(t *testing.T, block *types.Block, receipts []*types.Receipt) {
// Prepare the pre-images
preimages := make(map[common.Hash][]byte)
hdrBytes, err := rlp.EncodeToBytes(block.Header())
require.NoError(t, err)
preimages[preimage.Keccak256Key(block.Hash()).PreimageKey()] = hdrBytes
opaqueTxs, err := eth.EncodeTransactions(block.Transactions())
require.NoError(t, err)
_, txsNodes := mpt.WriteTrie(opaqueTxs)
for _, p := range txsNodes {
preimages[preimage.Keccak256Key(crypto.Keccak256Hash(p)).PreimageKey()] = p
}
opaqueReceipts, err := eth.EncodeReceipts(receipts)
require.NoError(t, err)
_, receiptNodes := mpt.WriteTrie(opaqueReceipts)
for _, p := range receiptNodes {
preimages[preimage.Keccak256Key(crypto.Keccak256Hash(p)).PreimageKey()] = p
}
// Prepare a raw mock pre-image oracle that will serve the pre-image data and handle hints
var hints mock.Mock
po := &PreimageOracle{
oracle: preimage.OracleFn(func(key preimage.Key) []byte {
v, ok := preimages[key.PreimageKey()]
require.True(t, ok, "preimage must exist")
return v
}),
hint: preimage.HinterFn(func(v preimage.Hint) {
hints.MethodCalled("hint", v.Hint())
}),
}
// Check if block-headers work
hints.On("hint", BlockHeaderHint(block.Hash()).Hint()).Once().Return()
gotHeader := po.HeaderByBlockHash(block.Hash())
hints.AssertExpectations(t)
got, err := json.MarshalIndent(gotHeader, " ", " ")
require.NoError(t, err)
expected, err := json.MarshalIndent(block.Header(), " ", " ")
require.NoError(t, err)
require.Equal(t, expected, got, "expecting matching headers")
// Check if blocks with txs work
hints.On("hint", BlockHeaderHint(block.Hash()).Hint()).Once().Return()
hints.On("hint", TransactionsHint(block.Hash()).Hint()).Once().Return()
inf, gotTxs := po.TransactionsByBlockHash(block.Hash())
hints.AssertExpectations(t)
require.Equal(t, inf.Hash(), block.Hash())
expectedTxs := block.Transactions()
require.Equal(t, len(expectedTxs), len(gotTxs), "expecting equal tx list length")
for i, tx := range gotTxs {
require.Equalf(t, tx.Hash(), expectedTxs[i].Hash(), "expecting tx %d to match", i)
}
// Check if blocks with receipts work
hints.On("hint", BlockHeaderHint(block.Hash()).Hint()).Once().Return()
hints.On("hint", TransactionsHint(block.Hash()).Hint()).Once().Return()
hints.On("hint", ReceiptsHint(block.Hash()).Hint()).Once().Return()
inf, gotReceipts := po.ReceiptsByBlockHash(block.Hash())
hints.AssertExpectations(t)
require.Equal(t, inf.Hash(), block.Hash())
require.Equal(t, len(receipts), len(gotReceipts), "expecting equal tx list length")
for i, r := range gotReceipts {
require.Equalf(t, r.TxHash, expectedTxs[i].Hash(), "expecting receipt to match tx %d", i)
}
}
func TestPreimageOracleBlockByHash(t *testing.T) {
rng := rand.New(rand.NewSource(123))
for i := 0; i < 10; i++ {
block, receipts := testutils.RandomBlock(rng, 10)
t.Run(fmt.Sprintf("block_%d", i), func(t *testing.T) {
testBlock(t, block, receipts)
})
}
}
...@@ -9,6 +9,7 @@ import ( ...@@ -9,6 +9,7 @@ import (
"github.com/ethereum-optimism/optimism/op-program/client/l2/engineapi" "github.com/ethereum-optimism/optimism/op-program/client/l2/engineapi"
"github.com/ethereum-optimism/optimism/op-program/client/l2/engineapi/test" "github.com/ethereum-optimism/optimism/op-program/client/l2/engineapi/test"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/consensus/beacon" "github.com/ethereum/go-ethereum/consensus/beacon"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/rawdb"
...@@ -149,6 +150,9 @@ func setupOracle(t *testing.T, blockCount int, headBlockNumber int) (*params.Cha ...@@ -149,6 +150,9 @@ func setupOracle(t *testing.T, blockCount int, headBlockNumber int) (*params.Cha
L2BlockTime: 2, L2BlockTime: 2,
FundDevAccounts: true, FundDevAccounts: true,
L2GenesisBlockGasLimit: 30_000_000, L2GenesisBlockGasLimit: 30_000_000,
// Arbitrary non-zero difficulty in genesis.
// This is slightly weird for a chain starting post-merge but it happens so need to make sure it works
L2GenesisBlockDifficulty: (*hexutil.Big)(big.NewInt(100)),
} }
l1Genesis, err := genesis.NewL1Genesis(deployConfig) l1Genesis, err := genesis.NewL1Genesis(deployConfig)
require.NoError(t, err) require.NoError(t, err)
......
...@@ -207,7 +207,7 @@ func (ea *L2EngineAPI) ForkchoiceUpdatedV1(ctx context.Context, state *eth.Forkc ...@@ -207,7 +207,7 @@ func (ea *L2EngineAPI) ForkchoiceUpdatedV1(ctx context.Context, state *eth.Forkc
// Block is known locally, just sanity check that the beacon client does not // Block is known locally, just sanity check that the beacon client does not
// attempt to push us back to before the merge. // attempt to push us back to before the merge.
// Note: Differs from op-geth implementation as pre-merge blocks are never supported here // Note: Differs from op-geth implementation as pre-merge blocks are never supported here
if block.Difficulty().BitLen() > 0 { if block.Difficulty().BitLen() > 0 && block.NumberU64() > 0 {
return STATUS_INVALID, errors.New("pre-merge blocks not supported") return STATUS_INVALID, errors.New("pre-merge blocks not supported")
} }
valid := func(id *engine.PayloadID) *eth.ForkchoiceUpdatedResult { valid := func(id *engine.PayloadID) *eth.ForkchoiceUpdatedResult {
......
package l2
import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum-optimism/optimism/op-program/preimage"
)
type BlockHeaderHint common.Hash
var _ preimage.Hint = BlockHeaderHint{}
func (l BlockHeaderHint) Hint() string {
return "l2-block-header " + (common.Hash)(l).String()
}
type TransactionsHint common.Hash
var _ preimage.Hint = TransactionsHint{}
func (l TransactionsHint) Hint() string {
return "l2-transactions " + (common.Hash)(l).String()
}
type CodeHint common.Hash
var _ preimage.Hint = CodeHint{}
func (l CodeHint) Hint() string {
return "l2-code " + (common.Hash)(l).String()
}
type StateNodeHint common.Hash
var _ preimage.Hint = StateNodeHint{}
func (l StateNodeHint) Hint() string {
return "l2-state-node " + (common.Hash)(l).String()
}
package l2 package l2
import ( import (
"fmt"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-program/client/mpt"
"github.com/ethereum-optimism/optimism/op-program/preimage"
) )
// StateOracle defines the high-level API used to retrieve L2 state data pre-images // StateOracle defines the high-level API used to retrieve L2 state data pre-images
...@@ -26,3 +33,55 @@ type Oracle interface { ...@@ -26,3 +33,55 @@ type Oracle interface {
// BlockByHash retrieves the block with the given hash. // BlockByHash retrieves the block with the given hash.
BlockByHash(blockHash common.Hash) *types.Block BlockByHash(blockHash common.Hash) *types.Block
} }
// PreimageOracle implements Oracle using by interfacing with the pure preimage.Oracle
// to fetch pre-images to decode into the requested data.
type PreimageOracle struct {
oracle preimage.Oracle
hint preimage.Hinter
}
var _ Oracle = (*PreimageOracle)(nil)
func NewPreimageOracle(raw preimage.Oracle, hint preimage.Hinter) *PreimageOracle {
return &PreimageOracle{
oracle: raw,
hint: hint,
}
}
func (p *PreimageOracle) headerByBlockHash(blockHash common.Hash) *types.Header {
p.hint.Hint(BlockHeaderHint(blockHash))
headerRlp := p.oracle.Get(preimage.Keccak256Key(blockHash))
var header types.Header
if err := rlp.DecodeBytes(headerRlp, &header); err != nil {
panic(fmt.Errorf("invalid block header %s: %w", blockHash, err))
}
return &header
}
func (p *PreimageOracle) BlockByHash(blockHash common.Hash) *types.Block {
header := p.headerByBlockHash(blockHash)
p.hint.Hint(TransactionsHint(blockHash))
opaqueTxs := mpt.ReadTrie(header.TxHash, func(key common.Hash) []byte {
return p.oracle.Get(preimage.Keccak256Key(key))
})
txs, err := eth.DecodeTransactions(opaqueTxs)
if err != nil {
panic(fmt.Errorf("failed to decode list of txs: %w", err))
}
return types.NewBlockWithHeader(header).WithBody(txs, nil)
}
func (p *PreimageOracle) NodeByHash(nodeHash common.Hash) []byte {
p.hint.Hint(StateNodeHint(nodeHash))
return p.oracle.Get(preimage.Keccak256Key(nodeHash))
}
func (p *PreimageOracle) CodeByHash(codeHash common.Hash) []byte {
p.hint.Hint(CodeHint(codeHash))
return p.oracle.Get(preimage.Keccak256Key(codeHash))
}
package l2
import (
"fmt"
"math/rand"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rlp"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/testutils"
"github.com/ethereum-optimism/optimism/op-program/client/mpt"
"github.com/ethereum-optimism/optimism/op-program/preimage"
)
func mockPreimageOracle(t *testing.T) (po *PreimageOracle, hintsMock *mock.Mock, preimages map[common.Hash][]byte) {
// Prepare the pre-images
preimages = make(map[common.Hash][]byte)
hintsMock = new(mock.Mock)
po = &PreimageOracle{
oracle: preimage.OracleFn(func(key preimage.Key) []byte {
v, ok := preimages[key.PreimageKey()]
require.True(t, ok, "preimage must exist")
return v
}),
hint: preimage.HinterFn(func(v preimage.Hint) {
hintsMock.MethodCalled("hint", v.Hint())
}),
}
return po, hintsMock, preimages
}
// testBlock tests that the given block can be passed through the preimage oracle.
func testBlock(t *testing.T, block *types.Block) {
po, hints, preimages := mockPreimageOracle(t)
hdrBytes, err := rlp.EncodeToBytes(block.Header())
require.NoError(t, err)
preimages[preimage.Keccak256Key(block.Hash()).PreimageKey()] = hdrBytes
opaqueTxs, err := eth.EncodeTransactions(block.Transactions())
require.NoError(t, err)
_, txsNodes := mpt.WriteTrie(opaqueTxs)
for _, p := range txsNodes {
preimages[preimage.Keccak256Key(crypto.Keccak256Hash(p)).PreimageKey()] = p
}
// Prepare a raw mock pre-image oracle that will serve the pre-image data and handle hints
// Check if blocks with txs work
hints.On("hint", BlockHeaderHint(block.Hash()).Hint()).Once().Return()
hints.On("hint", TransactionsHint(block.Hash()).Hint()).Once().Return()
gotBlock := po.BlockByHash(block.Hash())
hints.AssertExpectations(t)
require.Equal(t, gotBlock.Hash(), block.Hash())
expectedTxs := block.Transactions()
require.Equal(t, len(expectedTxs), len(gotBlock.Transactions()), "expecting equal tx list length")
for i, tx := range gotBlock.Transactions() {
require.Equalf(t, tx.Hash(), expectedTxs[i].Hash(), "expecting tx %d to match", i)
}
}
func TestPreimageOracleBlockByHash(t *testing.T) {
rng := rand.New(rand.NewSource(123))
for i := 0; i < 10; i++ {
block, _ := testutils.RandomBlock(rng, 10)
t.Run(fmt.Sprintf("block_%d", i), func(t *testing.T) {
testBlock(t, block)
})
}
}
func TestPreimageOracleNodeByHash(t *testing.T) {
rng := rand.New(rand.NewSource(123))
for i := 0; i < 10; i++ {
t.Run(fmt.Sprintf("node_%d", i), func(t *testing.T) {
po, hints, preimages := mockPreimageOracle(t)
node := make([]byte, 123)
rng.Read(node)
h := crypto.Keccak256Hash(node)
preimages[preimage.Keccak256Key(h).PreimageKey()] = node
hints.On("hint", StateNodeHint(h).Hint()).Once().Return()
gotNode := po.NodeByHash(h)
hints.AssertExpectations(t)
require.Equal(t, hexutil.Bytes(node), hexutil.Bytes(gotNode), "node matches")
})
}
}
func TestPreimageOracleCodeByHash(t *testing.T) {
rng := rand.New(rand.NewSource(123))
for i := 0; i < 10; i++ {
t.Run(fmt.Sprintf("code_%d", i), func(t *testing.T) {
po, hints, preimages := mockPreimageOracle(t)
node := make([]byte, 123)
rng.Read(node)
h := crypto.Keccak256Hash(node)
preimages[preimage.Keccak256Key(h).PreimageKey()] = node
hints.On("hint", CodeHint(h).Hint()).Once().Return()
gotNode := po.CodeByHash(h)
hints.AssertExpectations(t)
require.Equal(t, hexutil.Bytes(node), hexutil.Bytes(gotNode), "code matches")
})
}
}
package mpt
import "github.com/ethereum/go-ethereum/ethdb"
type Hooks struct {
Get func(key []byte) []byte
Put func(key []byte, value []byte)
Delete func(key []byte)
}
// DB implements the ethdb.Database to back the StateDB of Geth.
type DB struct {
db Hooks
}
func (p *DB) Has(key []byte) (bool, error) {
panic("not supported")
}
func (p *DB) Get(key []byte) ([]byte, error) {
return p.db.Get(key), nil
}
func (p *DB) Put(key []byte, value []byte) error {
p.db.Put(key, value)
return nil
}
func (p DB) Delete(key []byte) error {
p.db.Delete(key)
return nil
}
func (p DB) Stat(property string) (string, error) {
panic("not supported")
}
func (p DB) NewBatch() ethdb.Batch {
panic("not supported")
}
func (p DB) NewBatchWithSize(size int) ethdb.Batch {
panic("not supported")
}
func (p DB) NewIterator(prefix []byte, start []byte) ethdb.Iterator {
panic("not supported")
}
func (p DB) Compact(start []byte, limit []byte) error {
return nil // no-op
}
func (p DB) NewSnapshot() (ethdb.Snapshot, error) {
panic("not supported")
}
func (p DB) Close() error {
return nil
}
// We implement the full ethdb.Database bloat because the StateDB takes this full interface,
// even though it only uses the KeyValue subset.
func (p *DB) HasAncient(kind string, number uint64) (bool, error) {
panic("not supported")
}
func (p *DB) Ancient(kind string, number uint64) ([]byte, error) {
panic("not supported")
}
func (p *DB) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) {
panic("not supported")
}
func (p *DB) Ancients() (uint64, error) {
panic("not supported")
}
func (p *DB) Tail() (uint64, error) {
panic("not supported")
}
func (p *DB) AncientSize(kind string) (uint64, error) {
panic("not supported")
}
func (p *DB) ReadAncients(fn func(ethdb.AncientReaderOp) error) (err error) {
panic("not supported")
}
func (p *DB) ModifyAncients(f func(ethdb.AncientWriteOp) error) (int64, error) {
panic("not supported")
}
func (p *DB) TruncateHead(n uint64) error {
panic("not supported")
}
func (p *DB) TruncateTail(n uint64) error {
panic("not supported")
}
func (p *DB) Sync() error {
panic("not supported")
}
func (p *DB) MigrateTable(s string, f func([]byte) ([]byte, error)) error {
panic("not supported")
}
func (p *DB) AncientDatadir() (string, error) {
panic("not supported")
}
var _ ethdb.KeyValueStore = (*DB)(nil)
package mpt
import (
"bytes"
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
)
// ReadTrie takes a Merkle Patricia Trie (MPT) root of a "DerivableList", and a pre-image oracle getter,
// and traverses the implied MPT to collect all raw leaf nodes in order, which are then returned.
func ReadTrie(root common.Hash, getPreimage func(key common.Hash) []byte) []hexutil.Bytes {
odb := &DB{db: Hooks{
Get: func(key []byte) []byte {
if len(key) != 32 {
panic(fmt.Errorf("expected 32 byte key query, but got %d bytes: %x", len(key), key))
}
return getPreimage(*(*[32]byte)(key))
},
Put: func(key []byte, value []byte) {
panic("put not supported")
},
Delete: func(key []byte) {
panic("delete not supported")
},
}}
// trie.New backed with a trie.NodeReader and trie.Reader seems really promising
// for a simple node-fetching backend, but the interface is half-private,
// while we already have the full database code for doing the same thing.
// Maybe it's still worth a small diff in geth to expose it?
// Diff would be:
//
// type Node = node
//
// func DecodeNode(hash, buf []byte) (node, error) {
// return decodeNode(hash, buf)
// }
//
// And then still some code here to implement the trie.NodeReader and trie.Reader
// interfaces to map to the getPreimageFunction.
//
// For now we just use the state DB trie approach.
tdb := trie.NewDatabase(odb)
tr, err := trie.New(trie.TrieID(root), tdb)
if err != nil {
panic(err)
}
iter := tr.NodeIterator(nil)
// With small lists the iterator seems to use 0x80 (RLP empty string, unlike the others)
// as key for item 0, causing it to come last.
// Let's just remember the keys, and reorder them in the canonical order, to ensure it is correct.
var values [][]byte
var keys []uint64
for iter.Next(true) {
if iter.Leaf() {
k := iter.LeafKey()
var x uint64
err := rlp.DecodeBytes(k, &x)
if err != nil {
panic(fmt.Errorf("invalid key: %w", err))
}
keys = append(keys, x)
values = append(values, iter.LeafBlob())
}
}
out := make([]hexutil.Bytes, len(values))
for i, x := range keys {
if x >= uint64(len(values)) {
panic(fmt.Errorf("bad key: %d", x))
}
if out[x] != nil {
panic(fmt.Errorf("duplicate key %d", x))
}
out[x] = values[i]
}
return out
}
type rawList []hexutil.Bytes
func (r rawList) Len() int {
return len(r)
}
func (r rawList) EncodeIndex(i int, buf *bytes.Buffer) {
buf.Write(r[i])
}
var _ types.DerivableList = rawList(nil)
type noResetHasher struct {
*trie.StackTrie
}
// Reset is intercepted and is no-op, because we want to retain the writing function when calling types.DeriveSha
func (n noResetHasher) Reset() {}
// WriteTrie takes a list of values, and merkleizes them as a "DerivableList":
// a Merkle Patricia Trie (MPT) with values keyed by their RLP encoded index.
// This merkleization matches that of transactions, receipts, and withdrawals lists in the block header
// (at least up to the Shanghai L1 update).
// This then returns the MPT root and a list of pre-images of the trie.
// Note: empty values are illegal, and there may be less pre-images returned than values,
// if any values are less than 32 bytes and fit into branch-node slots that way.
func WriteTrie(values []hexutil.Bytes) (common.Hash, []hexutil.Bytes) {
var out []hexutil.Bytes
st := noResetHasher{trie.NewStackTrie(
func(owner common.Hash, path []byte, hash common.Hash, blob []byte) {
out = append(out, common.CopyBytes(blob)) // the stack hasher may mutate the blob bytes, so copy them.
})}
root := types.DeriveSha(rawList(values), st)
return root, out
}
package mpt
import (
"fmt"
"math/rand"
"testing"
"github.com/stretchr/testify/require"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/crypto"
)
type trieCase struct {
name string
elements []hexutil.Bytes
}
func (tc *trieCase) run(t *testing.T) {
root, preimages := WriteTrie(tc.elements)
byHash := make(map[common.Hash][]byte)
for _, v := range preimages {
k := crypto.Keccak256Hash(v)
byHash[k] = v
}
results := ReadTrie(root, func(key common.Hash) []byte {
v, ok := byHash[key]
if !ok {
panic(fmt.Errorf("missing key %s", key))
}
return v
})
require.Equal(t, len(tc.elements), len(results), "expected equal amount of values")
for i, result := range results {
// hex encoded for debugging readability
require.Equal(t, tc.elements[i].String(), result.String(),
"value %d does not match, expected equal value data", i)
}
}
func TestListTrieRoundtrip(t *testing.T) {
testCases := []trieCase{
{name: "empty list", elements: []hexutil.Bytes{}},
{name: "nil list", elements: nil},
{name: "simple", elements: []hexutil.Bytes{[]byte("hello"), []byte("world")}},
}
rng := rand.New(rand.NewSource(1234))
// add some randomized cases
for i := 0; i < 30; i++ {
n := rng.Intn(300)
elems := make([]hexutil.Bytes, n)
for i := range elems {
length := 1 + rng.Intn(300) // empty items not allowed
data := make([]byte, length)
rng.Read(data[:])
elems[i] = data
}
testCases = append(testCases, trieCase{name: fmt.Sprintf("rand_%d", i), elements: elems})
}
for _, tc := range testCases {
t.Run(tc.name, tc.run)
}
}
...@@ -9,6 +9,7 @@ import ( ...@@ -9,6 +9,7 @@ import (
"github.com/ethereum-optimism/optimism/op-node/eth" "github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/sources" "github.com/ethereum-optimism/optimism/op-node/sources"
"github.com/ethereum-optimism/optimism/op-node/testlog" "github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum-optimism/optimism/op-node/testutils"
cll1 "github.com/ethereum-optimism/optimism/op-program/client/l1" cll1 "github.com/ethereum-optimism/optimism/op-program/client/l1"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
...@@ -24,7 +25,7 @@ var _ Source = (*sources.L1Client)(nil) ...@@ -24,7 +25,7 @@ var _ Source = (*sources.L1Client)(nil)
func TestHeaderByHash(t *testing.T) { func TestHeaderByHash(t *testing.T) {
t.Run("Success", func(t *testing.T) { t.Run("Success", func(t *testing.T) {
expected := &sources.HeaderInfo{} expected := &testutils.MockBlockInfo{}
source := &stubSource{nextInfo: expected} source := &stubSource{nextInfo: expected}
oracle := newFetchingOracle(t, source) oracle := newFetchingOracle(t, source)
...@@ -54,7 +55,7 @@ func TestHeaderByHash(t *testing.T) { ...@@ -54,7 +55,7 @@ func TestHeaderByHash(t *testing.T) {
func TestTransactionsByHash(t *testing.T) { func TestTransactionsByHash(t *testing.T) {
t.Run("Success", func(t *testing.T) { t.Run("Success", func(t *testing.T) {
expectedInfo := &sources.HeaderInfo{} expectedInfo := &testutils.MockBlockInfo{}
expectedTxs := types.Transactions{ expectedTxs := types.Transactions{
&types.Transaction{}, &types.Transaction{},
} }
...@@ -75,7 +76,7 @@ func TestTransactionsByHash(t *testing.T) { ...@@ -75,7 +76,7 @@ func TestTransactionsByHash(t *testing.T) {
}) })
t.Run("UnknownBlock_NoTxs", func(t *testing.T) { t.Run("UnknownBlock_NoTxs", func(t *testing.T) {
oracle := newFetchingOracle(t, &stubSource{nextInfo: &sources.HeaderInfo{}}) oracle := newFetchingOracle(t, &stubSource{nextInfo: &testutils.MockBlockInfo{}})
hash := common.HexToHash("0x4455") hash := common.HexToHash("0x4455")
require.PanicsWithError(t, fmt.Errorf("unknown block: %s", hash).Error(), func() { require.PanicsWithError(t, fmt.Errorf("unknown block: %s", hash).Error(), func() {
oracle.TransactionsByBlockHash(hash) oracle.TransactionsByBlockHash(hash)
...@@ -96,7 +97,7 @@ func TestTransactionsByHash(t *testing.T) { ...@@ -96,7 +97,7 @@ func TestTransactionsByHash(t *testing.T) {
func TestReceiptsByHash(t *testing.T) { func TestReceiptsByHash(t *testing.T) {
t.Run("Success", func(t *testing.T) { t.Run("Success", func(t *testing.T) {
expectedInfo := &sources.HeaderInfo{} expectedInfo := &testutils.MockBlockInfo{}
expectedRcpts := types.Receipts{ expectedRcpts := types.Receipts{
&types.Receipt{}, &types.Receipt{},
} }
...@@ -117,7 +118,7 @@ func TestReceiptsByHash(t *testing.T) { ...@@ -117,7 +118,7 @@ func TestReceiptsByHash(t *testing.T) {
}) })
t.Run("UnknownBlock_NoTxs", func(t *testing.T) { t.Run("UnknownBlock_NoTxs", func(t *testing.T) {
oracle := newFetchingOracle(t, &stubSource{nextInfo: &sources.HeaderInfo{}}) oracle := newFetchingOracle(t, &stubSource{nextInfo: &testutils.MockBlockInfo{}})
hash := common.HexToHash("0x4455") hash := common.HexToHash("0x4455")
require.PanicsWithError(t, fmt.Errorf("unknown block: %s", hash).Error(), func() { require.PanicsWithError(t, fmt.Errorf("unknown block: %s", hash).Error(), func() {
oracle.ReceiptsByBlockHash(hash) oracle.ReceiptsByBlockHash(hash)
......
package preimage
import (
"encoding/binary"
"fmt"
"io"
)
// HintWriter writes hints to an io.Writer (e.g. a special file descriptor, or a debug log),
// for a pre-image oracle service to prepare specific pre-images.
type HintWriter struct {
w io.Writer
}
var _ Hinter = (*HintWriter)(nil)
func NewHintWriter(w io.Writer) *HintWriter {
return &HintWriter{w: w}
}
func (hw *HintWriter) Hint(v Hint) {
hint := v.Hint()
var hintBytes []byte
hintBytes = binary.BigEndian.AppendUint32(hintBytes, uint32(len(hint)))
hintBytes = append(hintBytes, []byte(hint)...)
hintBytes = append(hintBytes, 0) // to block writing on
_, err := hw.w.Write(hintBytes)
if err != nil {
panic(fmt.Errorf("failed to write pre-image hint: %w", err))
}
}
// HintReader reads the hints of HintWriter and passes them to a router for preparation of the requested pre-images.
// Onchain the written hints are no-op.
type HintReader struct {
r io.Reader
}
func NewHintReader(r io.Reader) *HintReader {
return &HintReader{r: r}
}
func (hr *HintReader) NextHint(router func(hint string) error) error {
var length uint32
if err := binary.Read(hr.r, binary.BigEndian, &length); err != nil {
if err == io.EOF {
return io.EOF
}
return fmt.Errorf("failed to read hint length prefix: %w", err)
}
payload := make([]byte, length)
if length > 0 {
if _, err := io.ReadFull(hr.r, payload); err != nil {
return fmt.Errorf("failed to read hint payload (length %d): %w", length, err)
}
}
if err := router(string(payload)); err != nil {
return fmt.Errorf("failed to handle hint: %w", err)
}
if _, err := hr.r.Read([]byte{0}); err != nil {
return fmt.Errorf("failed to read trailing no-op byte to unblock hint writer: %w", err)
}
return nil
}
package preimage
import (
"bytes"
"crypto/rand"
"io"
"testing"
"github.com/stretchr/testify/require"
)
type rawHint string
func (rh rawHint) Hint() string {
return string(rh)
}
func TestHints(t *testing.T) {
// Note: pretty much every string is valid communication:
// length, payload, 0. Worst case you run out of data, or allocate too much.
testHint := func(hints ...string) {
var buf bytes.Buffer
hw := NewHintWriter(&buf)
for _, h := range hints {
hw.Hint(rawHint(h))
}
hr := NewHintReader(&buf)
var got []string
for i := 0; i < 100; i++ { // sanity limit
err := hr.NextHint(func(hint string) error {
got = append(got, hint)
return nil
})
if err == io.EOF {
break
}
require.NoError(t, err)
}
require.Equal(t, len(hints), len(got), "got all hints")
for i, h := range hints {
require.Equal(t, h, got[i], "hints match")
}
}
t.Run("empty hint", func(t *testing.T) {
testHint("")
})
t.Run("hello world", func(t *testing.T) {
testHint("hello world")
})
t.Run("zero byte", func(t *testing.T) {
testHint(string([]byte{0}))
})
t.Run("many zeroes", func(t *testing.T) {
testHint(string(make([]byte, 1000)))
})
t.Run("random data", func(t *testing.T) {
dat := make([]byte, 1000)
_, _ = rand.Read(dat[:])
testHint(string(dat))
})
t.Run("multiple hints", func(t *testing.T) {
testHint("give me header a", "also header b", "foo bar")
})
t.Run("unexpected EOF", func(t *testing.T) {
var buf bytes.Buffer
hw := NewHintWriter(&buf)
hw.Hint(rawHint("hello"))
_, _ = buf.Read(make([]byte, 1)) // read one byte so it falls short, see if it's detected
hr := NewHintReader(&buf)
err := hr.NextHint(func(hint string) error { return nil })
require.ErrorIs(t, err, io.ErrUnexpectedEOF)
})
}
package preimage
import (
"encoding/binary"
"github.com/ethereum/go-ethereum/common"
)
type Key interface {
// PreimageKey changes the Key commitment into a
// 32-byte type-prefixed preimage key.
PreimageKey() common.Hash
}
type Oracle interface {
// Get the full pre-image of a given pre-image key.
// This returns no error: the client state-transition
// is invalid if there is any missing pre-image data.
Get(key Key) []byte
}
type OracleFn func(key Key) []byte
func (fn OracleFn) Get(key Key) []byte {
return fn(key)
}
// KeyType is the key-type of a pre-image, used to prefix the pre-image key with.
type KeyType byte
const (
// The zero key type is illegal to use, ensuring all keys are non-zero.
_ KeyType = 0
// LocalKeyType is for input-type pre-images, specific to the local program instance.
LocalKeyType KeyType = 1
// Keccak25Key6Type is for keccak256 pre-images, for any global shared pre-images.
Keccak25Key6Type KeyType = 2
)
// LocalIndexKey is a key local to the program, indexing a special program input.
type LocalIndexKey uint64
func (k LocalIndexKey) PreimageKey() (out common.Hash) {
out[0] = byte(LocalKeyType)
binary.BigEndian.PutUint64(out[24:], uint64(k))
return
}
// Keccak256Key wraps a keccak256 hash to use it as a typed pre-image key.
type Keccak256Key common.Hash
func (k Keccak256Key) PreimageKey() (out common.Hash) {
out = common.Hash(k) // copy the keccak hash
out[0] = byte(Keccak25Key6Type) // apply prefix
return
}
// Hint is an interface to enable any program type to function as a hint,
// when passed to the Hinter interface, returning a string representation
// of what data the host should prepare pre-images for.
type Hint interface {
Hint() string
}
// Hinter is an interface to write hints to the host.
// This may be implemented as a no-op or logging hinter
// if the program is executing in a read-only environment
// where the host is expected to have all pre-images ready.
type Hinter interface {
Hint(v Hint)
}
type HinterFn func(v Hint)
func (fn HinterFn) Hint(v Hint) {
fn(v)
}
package preimage
import (
"encoding/binary"
"fmt"
"io"
"github.com/ethereum/go-ethereum/common"
)
// OracleClient implements the Oracle by writing the pre-image key to the given stream,
// and reading back a length-prefixed value.
type OracleClient struct {
rw io.ReadWriter
}
func NewOracleClient(rw io.ReadWriter) *OracleClient {
return &OracleClient{rw: rw}
}
var _ Oracle = (*OracleClient)(nil)
func (o *OracleClient) Get(key Key) []byte {
h := key.PreimageKey()
if _, err := o.rw.Write(h[:]); err != nil {
panic(fmt.Errorf("failed to write key %s (%T) to pre-image oracle: %w", key, key, err))
}
var length uint64
if err := binary.Read(o.rw, binary.BigEndian, &length); err != nil {
panic(fmt.Errorf("failed to read pre-image length of key %s (%T) from pre-image oracle: %w", key, key, err))
}
payload := make([]byte, length)
if _, err := io.ReadFull(o.rw, payload); err != nil {
panic(fmt.Errorf("failed to read pre-image payload (length %d) of key %s (%T) from pre-image oracle: %w", length, key, key, err))
}
return payload
}
// OracleServer serves the pre-image requests of the OracleClient, implementing the same protocol as the onchain VM.
type OracleServer struct {
rw io.ReadWriter
}
func NewOracleServer(rw io.ReadWriter) *OracleServer {
return &OracleServer{rw: rw}
}
func (o *OracleServer) NextPreimageRequest(getPreimage func(key common.Hash) ([]byte, error)) error {
var key common.Hash
if _, err := io.ReadFull(o.rw, key[:]); err != nil {
if err == io.EOF {
return io.EOF
}
return fmt.Errorf("failed to read requested pre-image key: %w", err)
}
value, err := getPreimage(key)
if err != nil {
return fmt.Errorf("failed to serve pre-image %s request: %w", key, err)
}
if err := binary.Write(o.rw, binary.BigEndian, uint64(len(value))); err != nil {
return fmt.Errorf("failed to write length-prefix %d: %w", len(value), err)
}
if len(value) == 0 {
return nil
}
if _, err := o.rw.Write(value); err != nil {
return fmt.Errorf("failed to write pre-image value (%d long): %w", len(value), err)
}
return nil
}
package preimage
import (
"bytes"
"crypto/rand"
"fmt"
"io"
"sync"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/stretchr/testify/require"
)
type readWritePair struct {
io.Reader
io.Writer
}
func bidirectionalPipe() (a, b io.ReadWriter) {
ar, bw := io.Pipe()
br, aw := io.Pipe()
return readWritePair{Reader: ar, Writer: aw}, readWritePair{Reader: br, Writer: bw}
}
func TestOracle(t *testing.T) {
testPreimage := func(preimages ...[]byte) {
a, b := bidirectionalPipe()
cl := NewOracleClient(a)
srv := NewOracleServer(b)
preimageByHash := make(map[common.Hash][]byte)
for _, p := range preimages {
k := Keccak256Key(crypto.Keccak256Hash(p))
preimageByHash[k.PreimageKey()] = p
}
for _, p := range preimages {
k := Keccak256Key(crypto.Keccak256Hash(p))
var wg sync.WaitGroup
wg.Add(2)
go func(k Key, p []byte) {
result := cl.Get(k)
wg.Done()
expected := preimageByHash[k.PreimageKey()]
require.True(t, bytes.Equal(expected, result), "need correct preimage %x, got %x", expected, result)
}(k, p)
go func() {
err := srv.NextPreimageRequest(func(key common.Hash) ([]byte, error) {
dat, ok := preimageByHash[key]
if !ok {
return nil, fmt.Errorf("cannot find %s", key)
}
return dat, nil
})
wg.Done()
require.NoError(t, err)
}()
wg.Wait()
}
}
t.Run("empty preimage", func(t *testing.T) {
testPreimage([]byte{})
})
t.Run("nil preimage", func(t *testing.T) {
testPreimage(nil)
})
t.Run("zero", func(t *testing.T) {
testPreimage([]byte{0})
})
t.Run("multiple", func(t *testing.T) {
testPreimage([]byte("tx from alice"), []byte{0x13, 0x37}, []byte("tx from bob"))
})
t.Run("zeroes", func(t *testing.T) {
testPreimage(make([]byte, 1000))
})
t.Run("random", func(t *testing.T) {
dat := make([]byte, 1000)
_, _ = rand.Read(dat[:])
testPreimage(dat)
})
}
package doc
import (
"encoding/json"
"fmt"
"os"
"strings"
"github.com/ethereum-optimism/optimism/op-proposer/metrics"
"github.com/olekukonko/tablewriter"
"github.com/urfave/cli"
)
var Subcommands = cli.Commands{
{
Name: "metrics",
Usage: "Dumps a list of supported metrics to stdout",
Flags: []cli.Flag{
cli.StringFlag{
Name: "format",
Value: "markdown",
Usage: "Output format (json|markdown)",
},
},
Action: func(ctx *cli.Context) error {
m := metrics.NewMetrics("default")
supportedMetrics := m.Document()
format := ctx.String("format")
if format != "markdown" && format != "json" {
return fmt.Errorf("invalid format: %s", format)
}
if format == "json" {
enc := json.NewEncoder(os.Stdout)
return enc.Encode(supportedMetrics)
}
table := tablewriter.NewWriter(os.Stdout)
table.SetBorders(tablewriter.Border{Left: true, Top: false, Right: true, Bottom: false})
table.SetCenterSeparator("|")
table.SetAutoWrapText(false)
table.SetHeader([]string{"Metric", "Description", "Labels", "Type"})
var data [][]string
for _, metric := range supportedMetrics {
labels := strings.Join(metric.Labels, ",")
data = append(data, []string{metric.Name, metric.Help, labels, metric.Type})
}
table.AppendBulk(data)
table.Render()
return nil
},
},
}
...@@ -6,6 +6,7 @@ import ( ...@@ -6,6 +6,7 @@ import (
"github.com/urfave/cli" "github.com/urfave/cli"
"github.com/ethereum-optimism/optimism/op-proposer/cmd/doc"
"github.com/ethereum-optimism/optimism/op-proposer/flags" "github.com/ethereum-optimism/optimism/op-proposer/flags"
"github.com/ethereum-optimism/optimism/op-proposer/proposer" "github.com/ethereum-optimism/optimism/op-proposer/proposer"
oplog "github.com/ethereum-optimism/optimism/op-service/log" oplog "github.com/ethereum-optimism/optimism/op-service/log"
...@@ -27,8 +28,14 @@ func main() { ...@@ -27,8 +28,14 @@ func main() {
app.Name = "op-proposer" app.Name = "op-proposer"
app.Usage = "L2Output Submitter" app.Usage = "L2Output Submitter"
app.Description = "Service for generating and submitting L2 Output checkpoints to the L2OutputOracle contract" app.Description = "Service for generating and submitting L2 Output checkpoints to the L2OutputOracle contract"
app.Action = curryMain(Version) app.Action = curryMain(Version)
app.Commands = []cli.Command{
{
Name: "doc",
Subcommands: doc.Subcommands,
},
}
err := app.Run(os.Args) err := app.Run(os.Args)
if err != nil { if err != nil {
log.Crit("Application failed", "message", err) log.Crit("Application failed", "message", err)
......
package flags package flags
import ( import (
"fmt"
"github.com/urfave/cli" "github.com/urfave/cli"
opservice "github.com/ethereum-optimism/optimism/op-service" opservice "github.com/ethereum-optimism/optimism/op-service"
...@@ -16,29 +18,25 @@ const envVarPrefix = "OP_PROPOSER" ...@@ -16,29 +18,25 @@ const envVarPrefix = "OP_PROPOSER"
var ( var (
// Required Flags // Required Flags
L1EthRpcFlag = cli.StringFlag{ L1EthRpcFlag = cli.StringFlag{
Name: "l1-eth-rpc", Name: "l1-eth-rpc",
Usage: "HTTP provider URL for L1", Usage: "HTTP provider URL for L1",
Required: true, EnvVar: opservice.PrefixEnvVar(envVarPrefix, "L1_ETH_RPC"),
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "L1_ETH_RPC"),
} }
RollupRpcFlag = cli.StringFlag{ RollupRpcFlag = cli.StringFlag{
Name: "rollup-rpc", Name: "rollup-rpc",
Usage: "HTTP provider URL for the rollup node", Usage: "HTTP provider URL for the rollup node",
Required: true, EnvVar: opservice.PrefixEnvVar(envVarPrefix, "ROLLUP_RPC"),
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "ROLLUP_RPC"),
} }
L2OOAddressFlag = cli.StringFlag{ L2OOAddressFlag = cli.StringFlag{
Name: "l2oo-address", Name: "l2oo-address",
Usage: "Address of the L2OutputOracle contract", Usage: "Address of the L2OutputOracle contract",
Required: true, EnvVar: opservice.PrefixEnvVar(envVarPrefix, "L2OO_ADDRESS"),
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "L2OO_ADDRESS"),
} }
PollIntervalFlag = cli.DurationFlag{ PollIntervalFlag = cli.DurationFlag{
Name: "poll-interval", Name: "poll-interval",
Usage: "Delay between querying L2 for more transactions and " + Usage: "Delay between querying L2 for more transactions and " +
"creating a new batch", "creating a new batch",
Required: true, EnvVar: opservice.PrefixEnvVar(envVarPrefix, "POLL_INTERVAL"),
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "POLL_INTERVAL"),
} }
// Optional flags // Optional flags
AllowNonFinalizedFlag = cli.BoolFlag{ AllowNonFinalizedFlag = cli.BoolFlag{
...@@ -74,3 +72,12 @@ func init() { ...@@ -74,3 +72,12 @@ func init() {
// Flags contains the list of configuration options available to the binary. // Flags contains the list of configuration options available to the binary.
var Flags []cli.Flag var Flags []cli.Flag
func CheckRequired(ctx *cli.Context) error {
for _, f := range requiredFlags {
if !ctx.GlobalIsSet(f.GetName()) {
return fmt.Errorf("flag %s is required", f.GetName())
}
}
return nil
}
...@@ -104,3 +104,7 @@ const ( ...@@ -104,3 +104,7 @@ const (
func (m *Metrics) RecordL2BlocksProposed(l2ref eth.L2BlockRef) { func (m *Metrics) RecordL2BlocksProposed(l2ref eth.L2BlockRef) {
m.RecordL2Ref(BlockProposed, l2ref) m.RecordL2Ref(BlockProposed, l2ref)
} }
func (m *Metrics) Document() []opmetrics.DocumentedMetric {
return m.factory.Document()
}
...@@ -85,7 +85,7 @@ func MakeTxMetrics(ns string, factory metrics.Factory) TxMetrics { ...@@ -85,7 +85,7 @@ func MakeTxMetrics(ns string, factory metrics.Factory) TxMetrics {
txPublishError: factory.NewCounterVec(prometheus.CounterOpts{ txPublishError: factory.NewCounterVec(prometheus.CounterOpts{
Namespace: ns, Namespace: ns,
Name: "tx_publish_error_count", Name: "tx_publish_error_count",
Help: "Count of publish errors. Labells are sanitized error strings", Help: "Count of publish errors. Labels are sanitized error strings",
Subsystem: "txmgr", Subsystem: "txmgr",
}, []string{"error"}), }, []string{"error"}),
confirmEvent: metrics.NewEventVec(factory, ns, "txmgr", "confirm", "tx confirm", []string{"status"}), confirmEvent: metrics.NewEventVec(factory, ns, "txmgr", "confirm", "tx confirm", []string{"status"}),
...@@ -93,7 +93,7 @@ func MakeTxMetrics(ns string, factory metrics.Factory) TxMetrics { ...@@ -93,7 +93,7 @@ func MakeTxMetrics(ns string, factory metrics.Factory) TxMetrics {
rpcError: factory.NewCounter(prometheus.CounterOpts{ rpcError: factory.NewCounter(prometheus.CounterOpts{
Namespace: ns, Namespace: ns,
Name: "rpc_error_count", Name: "rpc_error_count",
Help: "Temporrary: Count of RPC errors (like timeouts) that have occurrred", Help: "Temporary: Count of RPC errors (like timeouts) that have occurred",
Subsystem: "txmgr", Subsystem: "txmgr",
}), }),
} }
......
...@@ -269,8 +269,11 @@ contract SystemConfig is OwnableUpgradeable, Semver { ...@@ -269,8 +269,11 @@ contract SystemConfig is OwnableUpgradeable, Semver {
_config.minimumBaseFee <= _config.maximumBaseFee, _config.minimumBaseFee <= _config.maximumBaseFee,
"SystemConfig: min base fee must be less than max base" "SystemConfig: min base fee must be less than max base"
); );
// Base fee change denominator must be greater than 0. // Base fee change denominator must be greater than 1.
require(_config.baseFeeMaxChangeDenominator > 1, "SystemConfig: denominator must be larger than 1"); require(
_config.baseFeeMaxChangeDenominator > 1,
"SystemConfig: denominator must be larger than 1"
);
// Max resource limit plus system tx gas must be less than or equal to the L2 gas limit. // Max resource limit plus system tx gas must be less than or equal to the L2 gas limit.
// The gas limit must be increased before these values can be increased. // The gas limit must be increased before these values can be increased.
require( require(
......
...@@ -1092,7 +1092,6 @@ contract OptimismPortalUpgradeable_Test is Portal_Initializer { ...@@ -1092,7 +1092,6 @@ contract OptimismPortalUpgradeable_Test is Portal_Initializer {
* broken by changing the config. * broken by changing the config.
*/ */
contract OptimismPortalResourceFuzz_Test is Portal_Initializer { contract OptimismPortalResourceFuzz_Test is Portal_Initializer {
/** /**
* @dev The max gas limit observed throughout this test. Setting this too high can cause * @dev The max gas limit observed throughout this test. Setting this too high can cause
* the test to take too long to run. * the test to take too long to run.
...@@ -1118,7 +1117,7 @@ contract OptimismPortalResourceFuzz_Test is Portal_Initializer { ...@@ -1118,7 +1117,7 @@ contract OptimismPortalResourceFuzz_Test is Portal_Initializer {
uint64 gasLimit = systemConfig.gasLimit(); uint64 gasLimit = systemConfig.gasLimit();
// Bound resource config // Bound resource config
_maxResourceLimit = uint32(bound(_maxResourceLimit, 21000, MAX_GAS_LIMIT / 8)); _maxResourceLimit = uint32(bound(_maxResourceLimit, 21000, MAX_GAS_LIMIT / 8));
_gasLimit = uint64(bound( _gasLimit, 21000, _maxResourceLimit)); _gasLimit = uint64(bound(_gasLimit, 21000, _maxResourceLimit));
_prevBaseFee = uint128(bound(_prevBaseFee, 0, 5 gwei)); _prevBaseFee = uint128(bound(_prevBaseFee, 0, 5 gwei));
// Prevent values that would cause reverts // Prevent values that would cause reverts
vm.assume(gasLimit >= _gasLimit); vm.assume(gasLimit >= _gasLimit);
...@@ -1127,7 +1126,8 @@ contract OptimismPortalResourceFuzz_Test is Portal_Initializer { ...@@ -1127,7 +1126,8 @@ contract OptimismPortalResourceFuzz_Test is Portal_Initializer {
vm.assume(uint256(_maxResourceLimit) + uint256(_systemTxMaxGas) <= gasLimit); vm.assume(uint256(_maxResourceLimit) + uint256(_systemTxMaxGas) <= gasLimit);
vm.assume(_elasticityMultiplier > 0); vm.assume(_elasticityMultiplier > 0);
vm.assume( vm.assume(
((_maxResourceLimit / _elasticityMultiplier) * _elasticityMultiplier) == _maxResourceLimit ((_maxResourceLimit / _elasticityMultiplier) * _elasticityMultiplier) ==
_maxResourceLimit
); );
_prevBoughtGas = uint64(bound(_prevBoughtGas, 0, _maxResourceLimit - _gasLimit)); _prevBoughtGas = uint64(bound(_prevBoughtGas, 0, _maxResourceLimit - _gasLimit));
_blockDiff = uint8(bound(_blockDiff, 0, 3)); _blockDiff = uint8(bound(_blockDiff, 0, 3));
......
...@@ -24,6 +24,14 @@ that maintains 1:1 compatibility with Ethereum. ...@@ -24,6 +24,14 @@ that maintains 1:1 compatibility with Ethereum.
- [Predeploys](predeploys.md) - [Predeploys](predeploys.md)
- [Glossary](glossary.md) - [Glossary](glossary.md)
### Experimental
Specifications of new features in active development.
- [Fault Proof](./fault-proof.md)
- [Dispute Game](./dispute-game.md)
- [Dispute Game Interface](./dispute-game-interface.md)
## Design Goals ## Design Goals
Our aim is to design a protocol specification that is: Our aim is to design a protocol specification that is:
......
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment