Commit b6a4eb96 authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge branch 'develop' into aj/e2e-helpers

parents 0e7f8358 09d23ee8
This diff is collapsed.
......@@ -233,6 +233,10 @@ type System struct {
Mocknet mocknet.Mocknet
}
func (sys *System) NodeEndpoint(name string) string {
return selectEndpoint(sys.Nodes[name])
}
func (sys *System) Close() {
if sys.L2OutputSubmitter != nil {
sys.L2OutputSubmitter.Stop()
......@@ -619,13 +623,17 @@ func (cfg SystemConfig) Start(_opts ...SystemConfigOption) (*System, error) {
return sys, nil
}
func configureL1(rollupNodeCfg *rollupNode.Config, l1Node *node.Node) {
l1EndpointConfig := l1Node.WSEndpoint()
func selectEndpoint(node *node.Node) string {
useHTTP := os.Getenv("OP_E2E_USE_HTTP") == "true"
if useHTTP {
log.Info("using HTTP client")
l1EndpointConfig = l1Node.HTTPEndpoint()
return node.HTTPEndpoint()
}
return node.WSEndpoint()
}
func configureL1(rollupNodeCfg *rollupNode.Config, l1Node *node.Node) {
l1EndpointConfig := selectEndpoint(l1Node)
rollupNodeCfg.L1 = &rollupNode.L1EndpointConfig{
L1NodeAddr: l1EndpointConfig,
L1TrustRPC: false,
......
package op_e2e
import (
"context"
"math/big"
"testing"
"time"
"github.com/ethereum-optimism/optimism/op-node/client"
"github.com/ethereum-optimism/optimism/op-node/sources"
"github.com/ethereum-optimism/optimism/op-node/testlog"
opp "github.com/ethereum-optimism/optimism/op-program/host"
oppconf "github.com/ethereum-optimism/optimism/op-program/host/config"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rpc"
"github.com/stretchr/testify/require"
)
func TestVerifyL2OutputRoot(t *testing.T) {
parallel(t)
ctx := context.Background()
cfg := DefaultSystemConfig(t)
// We don't need a verifier - just the sequencer is enough
delete(cfg.Nodes, "verifier")
sys, err := cfg.Start()
require.Nil(t, err, "Error starting up system")
defer sys.Close()
log := testlog.Logger(t, log.LvlInfo)
log.Info("genesis", "l2", sys.RollupConfig.Genesis.L2, "l1", sys.RollupConfig.Genesis.L1, "l2_time", sys.RollupConfig.Genesis.L2Time)
l1Client := sys.Clients["l1"]
l2Seq := sys.Clients["sequencer"]
rollupRPCClient, err := rpc.DialContext(context.Background(), sys.RollupNodes["sequencer"].HTTPEndpoint())
require.Nil(t, err)
rollupClient := sources.NewRollupClient(client.NewBaseRPCClient(rollupRPCClient))
// TODO (CLI-3855): Actually perform some tx to set up a more complex chain.
// Wait for the safe head to reach block 10
require.NoError(t, waitForSafeHead(ctx, 10, rollupClient))
// Use block 5 as the agreed starting block on L2
l2AgreedBlock, err := l2Seq.BlockByNumber(ctx, big.NewInt(5))
require.NoError(t, err, "could not retrieve l2 genesis")
l2Head := l2AgreedBlock.Hash() // Agreed starting L2 block
// Get the expected output at block 10
l2ClaimBlockNumber := uint64(10)
l2Output, err := rollupClient.OutputAtBlock(ctx, l2ClaimBlockNumber)
require.NoError(t, err, "could not get expected output")
l2Claim := l2Output.OutputRoot
// Find the current L1 head
l1BlockNumber, err := l1Client.BlockNumber(ctx)
require.NoError(t, err, "get l1 head block number")
l1HeadBlock, err := l1Client.BlockByNumber(ctx, new(big.Int).SetUint64(l1BlockNumber))
require.NoError(t, err, "get l1 head block")
l1Head := l1HeadBlock.Hash()
preimageDir := t.TempDir()
fppConfig := oppconf.NewConfig(sys.RollupConfig, sys.L2GenesisCfg.Config, l1Head, l2Head, common.Hash(l2Claim), l2ClaimBlockNumber)
fppConfig.L1URL = sys.NodeEndpoint("l1")
fppConfig.L2URL = sys.NodeEndpoint("sequencer")
fppConfig.DataDir = preimageDir
// Check the FPP confirms the expected output
t.Log("Running fault proof in fetching mode")
err = opp.FaultProofProgram(log, fppConfig)
require.NoError(t, err)
// Shutdown the nodes from the actual chain. Should now be able to run using only the pre-fetched data.
for _, node := range sys.Nodes {
require.NoError(t, node.Close())
}
t.Log("Running fault proof in offline mode")
// Should be able to rerun in offline mode using the pre-fetched images
fppConfig.L1URL = ""
fppConfig.L2URL = ""
err = opp.FaultProofProgram(log, fppConfig)
require.NoError(t, err)
// Check that a fault is detected if we provide an incorrect claim
t.Log("Running fault proof with invalid claim")
fppConfig.L2Claim = common.Hash{0xaa}
err = opp.FaultProofProgram(log, fppConfig)
require.ErrorIs(t, err, opp.ErrClaimNotValid)
}
func waitForSafeHead(ctx context.Context, safeBlockNum uint64, rollupClient *sources.RollupClient) error {
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
defer cancel()
for {
seqStatus, err := rollupClient.SyncStatus(ctx)
if err != nil {
return err
}
if seqStatus.SafeL2.Number >= safeBlockNum {
return nil
}
}
}
......@@ -24,18 +24,20 @@ type L2Source interface {
}
type Driver struct {
logger log.Logger
pipeline Derivation
l2OutputRoot func() (eth.Bytes32, error)
logger log.Logger
pipeline Derivation
l2OutputRoot func() (eth.Bytes32, error)
targetBlockNum uint64
}
func NewDriver(logger log.Logger, cfg *rollup.Config, l1Source derive.L1Fetcher, l2Source L2Source) *Driver {
func NewDriver(logger log.Logger, cfg *rollup.Config, l1Source derive.L1Fetcher, l2Source L2Source, targetBlockNum uint64) *Driver {
pipeline := derive.NewDerivationPipeline(logger, cfg, l1Source, l2Source, metrics.NoopMetrics)
pipeline.Reset()
return &Driver{
logger: logger,
pipeline: pipeline,
l2OutputRoot: l2Source.L2OutputRoot,
logger: logger,
pipeline: pipeline,
l2OutputRoot: l2Source.L2OutputRoot,
targetBlockNum: targetBlockNum,
}
}
......@@ -47,6 +49,11 @@ func (d *Driver) Step(ctx context.Context) error {
if err := d.pipeline.Step(ctx); errors.Is(err, io.EOF) {
return io.EOF
} else if errors.Is(err, derive.NotEnoughData) {
head := d.pipeline.SafeL2Head()
if head.Number >= d.targetBlockNum {
d.logger.Info("Target L2 block reached", "head", head)
return io.EOF
}
d.logger.Debug("Data is lacking")
return nil
} else if err != nil {
......
......@@ -39,6 +39,30 @@ func TestGenericError(t *testing.T) {
require.ErrorIs(t, err, expected)
}
func TestTargetBlock(t *testing.T) {
t.Run("Reached", func(t *testing.T) {
driver := createDriverWithNextBlock(t, derive.NotEnoughData, 1000)
driver.targetBlockNum = 1000
err := driver.Step(context.Background())
require.ErrorIs(t, err, io.EOF)
})
t.Run("Exceeded", func(t *testing.T) {
driver := createDriverWithNextBlock(t, derive.NotEnoughData, 1000)
driver.targetBlockNum = 500
err := driver.Step(context.Background())
require.ErrorIs(t, err, io.EOF)
})
t.Run("NotYetReached", func(t *testing.T) {
driver := createDriverWithNextBlock(t, derive.NotEnoughData, 1000)
driver.targetBlockNum = 1001
err := driver.Step(context.Background())
// No error to indicate derivation should continue
require.NoError(t, err)
})
}
func TestNoError(t *testing.T) {
driver := createDriver(t, nil)
err := driver.Step(context.Background())
......@@ -76,15 +100,21 @@ func TestValidateClaim(t *testing.T) {
}
func createDriver(t *testing.T, derivationResult error) *Driver {
derivation := &stubDerivation{nextErr: derivationResult}
return createDriverWithNextBlock(t, derivationResult, 0)
}
func createDriverWithNextBlock(t *testing.T, derivationResult error, nextBlockNum uint64) *Driver {
derivation := &stubDerivation{nextErr: derivationResult, nextBlockNum: nextBlockNum}
return &Driver{
logger: testlog.Logger(t, log.LvlDebug),
pipeline: derivation,
logger: testlog.Logger(t, log.LvlDebug),
pipeline: derivation,
targetBlockNum: 1_000_000,
}
}
type stubDerivation struct {
nextErr error
nextErr error
nextBlockNum uint64
}
func (s stubDerivation) Step(ctx context.Context) error {
......@@ -92,5 +122,7 @@ func (s stubDerivation) Step(ctx context.Context) error {
}
func (s stubDerivation) SafeL2Head() eth.L2BlockRef {
return eth.L2BlockRef{}
return eth.L2BlockRef{
Number: s.nextBlockNum,
}
}
package main
import (
"context"
"errors"
"fmt"
"io"
"os"
"github.com/ethereum-optimism/optimism/op-node/chaincfg"
"github.com/ethereum-optimism/optimism/op-node/client"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/sources"
cldr "github.com/ethereum-optimism/optimism/op-program/client/driver"
"github.com/ethereum-optimism/optimism/op-program/host"
"github.com/ethereum-optimism/optimism/op-program/host/config"
"github.com/ethereum-optimism/optimism/op-program/host/flags"
"github.com/ethereum-optimism/optimism/op-program/host/kvstore"
"github.com/ethereum-optimism/optimism/op-program/host/l1"
"github.com/ethereum-optimism/optimism/op-program/host/l2"
"github.com/ethereum-optimism/optimism/op-program/host/prefetcher"
"github.com/ethereum-optimism/optimism/op-program/host/version"
"github.com/ethereum-optimism/optimism/op-program/preimage"
oplog "github.com/ethereum-optimism/optimism/op-service/log"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/urfave/cli"
)
......@@ -46,15 +33,13 @@ var VersionWithMeta = func() string {
return v
}()
var (
ErrClaimNotValid = errors.New("invalid claim")
)
func main() {
args := os.Args
err := run(args, FaultProofProgram)
err := run(args, host.FaultProofProgram)
if err != nil {
log.Crit("Application failed", "message", err)
} else {
log.Info("Claim successfully verified")
}
}
......@@ -99,110 +84,3 @@ func setupLogging(ctx *cli.Context) (log.Logger, error) {
logger := oplog.NewLogger(logCfg)
return logger, nil
}
type L2Source struct {
*sources.L2Client
*sources.DebugClient
}
// FaultProofProgram is the programmatic entry-point for the fault proof program
func FaultProofProgram(logger log.Logger, cfg *config.Config) error {
if err := cfg.Check(); err != nil {
return fmt.Errorf("invalid config: %w", err)
}
cfg.Rollup.LogDescription(logger, chaincfg.L2ChainIDToNetworkName)
ctx := context.Background()
var kv kvstore.KV
if cfg.DataDir == "" {
logger.Info("Using in-memory storage")
kv = kvstore.NewMemKV()
} else {
logger.Info("Creating disk storage", "datadir", cfg.DataDir)
if err := os.MkdirAll(cfg.DataDir, 0755); err != nil {
return fmt.Errorf("creating datadir: %w", err)
}
kv = kvstore.NewDiskKV(cfg.DataDir)
}
var preimageOracle preimage.OracleFn
var hinter preimage.HinterFn
if cfg.FetchingEnabled() {
logger.Info("Connecting to L1 node", "l1", cfg.L1URL)
l1RPC, err := client.NewRPC(ctx, logger, cfg.L1URL)
if err != nil {
return fmt.Errorf("failed to setup L1 RPC: %w", err)
}
logger.Info("Connecting to L2 node", "l2", cfg.L2URL)
l2RPC, err := client.NewRPC(ctx, logger, cfg.L2URL)
if err != nil {
return fmt.Errorf("failed to setup L2 RPC: %w", err)
}
l1ClCfg := sources.L1ClientDefaultConfig(cfg.Rollup, cfg.L1TrustRPC, cfg.L1RPCKind)
l2ClCfg := sources.L2ClientDefaultConfig(cfg.Rollup, true)
l1Cl, err := sources.NewL1Client(l1RPC, logger, nil, l1ClCfg)
if err != nil {
return fmt.Errorf("failed to create L1 client: %w", err)
}
l2Cl, err := sources.NewL2Client(l2RPC, logger, nil, l2ClCfg)
if err != nil {
return fmt.Errorf("failed to create L2 client: %w", err)
}
l2DebugCl := &L2Source{L2Client: l2Cl, DebugClient: sources.NewDebugClient(l2RPC.CallContext)}
logger.Info("Setting up pre-fetcher")
prefetch := prefetcher.NewPrefetcher(l1Cl, l2DebugCl, kv)
preimageOracle = asOracleFn(func(key common.Hash) ([]byte, error) {
return prefetch.GetPreimage(ctx, key)
})
hinter = asHinter(prefetch.Hint)
} else {
logger.Info("Using offline mode. All required pre-images must be pre-populated.")
preimageOracle = asOracleFn(kv.Get)
hinter = func(v preimage.Hint) {
logger.Debug("ignoring prefetch hint", "hint", v)
}
}
l1Source := l1.NewSource(logger, preimageOracle, hinter, cfg.L1Head)
l2Source, err := l2.NewEngine(logger, preimageOracle, hinter, cfg)
if err != nil {
return fmt.Errorf("connect l2 oracle: %w", err)
}
logger.Info("Starting derivation")
d := cldr.NewDriver(logger, cfg.Rollup, l1Source, l2Source)
for {
if err = d.Step(ctx); errors.Is(err, io.EOF) {
break
} else if err != nil {
return err
}
}
claim := cfg.L2Claim
if !d.ValidateClaim(eth.Bytes32(claim)) {
return ErrClaimNotValid
}
return nil
}
func asOracleFn(getter func(key common.Hash) ([]byte, error)) preimage.OracleFn {
return func(key preimage.Key) []byte {
pre, err := getter(key.PreimageKey())
if err != nil {
panic(fmt.Errorf("preimage unavailable for key %v: %w", key, err))
}
return pre
}
}
func asHinter(hint func(hint string) error) preimage.HinterFn {
return func(v preimage.Hint) {
err := hint(v.Hint())
if err != nil {
panic(fmt.Errorf("hint rejected %v: %w", v, err))
}
}
}
......@@ -3,6 +3,7 @@ package main
import (
"encoding/json"
"os"
"strconv"
"testing"
"github.com/ethereum-optimism/optimism/op-node/chaincfg"
......@@ -14,15 +15,17 @@ import (
"github.com/stretchr/testify/require"
)
// Use HexToHash(...).Hex() to ensure the strings are the correct length for a hash
var l1HeadValue = common.HexToHash("0x111111").Hex()
var l2HeadValue = common.HexToHash("0x222222").Hex()
var l2ClaimValue = common.HexToHash("0x333333").Hex()
var l2Genesis = core.DefaultGoerliGenesisBlock()
var l2GenesisConfig = l2Genesis.Config
var (
// Use HexToHash(...).Hex() to ensure the strings are the correct length for a hash
l1HeadValue = common.HexToHash("0x111111").Hex()
l2HeadValue = common.HexToHash("0x222222").Hex()
l2ClaimValue = common.HexToHash("0x333333").Hex()
l2ClaimBlockNumber = uint64(1203)
l2Genesis = core.DefaultGoerliGenesisBlock()
l2GenesisConfig = l2Genesis.Config
)
func TestLogLevel(t *testing.T) {
t.Parallel()
t.Run("RejectInvalid", func(t *testing.T) {
verifyArgsInvalid(t, "unknown level: foo", addRequiredArgs(t, "--log.level=foo"))
})
......@@ -38,19 +41,18 @@ func TestLogLevel(t *testing.T) {
}
func TestDefaultCLIOptionsMatchDefaultConfig(t *testing.T) {
t.Parallel()
cfg := configForArgs(t, addRequiredArgs(t))
defaultCfg := config.NewConfig(
&chaincfg.Goerli,
l2GenesisConfig,
common.HexToHash(l1HeadValue),
common.HexToHash(l2HeadValue),
common.HexToHash(l2ClaimValue))
common.HexToHash(l2ClaimValue),
l2ClaimBlockNumber)
require.Equal(t, defaultCfg, cfg)
}
func TestNetwork(t *testing.T) {
t.Parallel()
t.Run("Unknown", func(t *testing.T) {
verifyArgsInvalid(t, "invalid network bar", replaceRequiredArg(t, "--network", "bar"))
})
......@@ -86,21 +88,18 @@ func TestNetwork(t *testing.T) {
}
func TestDataDir(t *testing.T) {
t.Parallel()
expected := "/tmp/mainTestDataDir"
cfg := configForArgs(t, addRequiredArgs(t, "--datadir", expected))
require.Equal(t, expected, cfg.DataDir)
}
func TestL2(t *testing.T) {
t.Parallel()
expected := "https://example.com:8545"
cfg := configForArgs(t, addRequiredArgs(t, "--l2", expected))
require.Equal(t, expected, cfg.L2URL)
}
func TestL2Genesis(t *testing.T) {
t.Parallel()
t.Run("Required", func(t *testing.T) {
verifyArgsInvalid(t, "flag l2.genesis is required", addRequiredArgsExcept(t, "--l2.genesis"))
})
......@@ -112,7 +111,6 @@ func TestL2Genesis(t *testing.T) {
}
func TestL2Head(t *testing.T) {
t.Parallel()
t.Run("Required", func(t *testing.T) {
verifyArgsInvalid(t, "flag l2.head is required", addRequiredArgsExcept(t, "--l2.head"))
})
......@@ -128,7 +126,6 @@ func TestL2Head(t *testing.T) {
}
func TestL1Head(t *testing.T) {
t.Parallel()
t.Run("Required", func(t *testing.T) {
verifyArgsInvalid(t, "flag l1.head is required", addRequiredArgsExcept(t, "--l1.head"))
})
......@@ -144,14 +141,12 @@ func TestL1Head(t *testing.T) {
}
func TestL1(t *testing.T) {
t.Parallel()
expected := "https://example.com:8545"
cfg := configForArgs(t, addRequiredArgs(t, "--l1", expected))
require.Equal(t, expected, cfg.L1URL)
}
func TestL1TrustRPC(t *testing.T) {
t.Parallel()
t.Run("DefaultFalse", func(t *testing.T) {
cfg := configForArgs(t, addRequiredArgs(t))
require.False(t, cfg.L1TrustRPC)
......@@ -171,7 +166,6 @@ func TestL1TrustRPC(t *testing.T) {
}
func TestL1RPCKind(t *testing.T) {
t.Parallel()
t.Run("DefaultBasic", func(t *testing.T) {
cfg := configForArgs(t, addRequiredArgs(t))
require.Equal(t, sources.RPCKindBasic, cfg.L1RPCKind)
......@@ -191,7 +185,6 @@ func TestL1RPCKind(t *testing.T) {
}
func TestL2Claim(t *testing.T) {
t.Parallel()
t.Run("Required", func(t *testing.T) {
verifyArgsInvalid(t, "flag l2.claim is required", addRequiredArgsExcept(t, "--l2.claim"))
})
......@@ -206,6 +199,21 @@ func TestL2Claim(t *testing.T) {
})
}
func TestL2BlockNumber(t *testing.T) {
t.Run("Required", func(t *testing.T) {
verifyArgsInvalid(t, "flag l2.blocknumber is required", addRequiredArgsExcept(t, "--l2.blocknumber"))
})
t.Run("Valid", func(t *testing.T) {
cfg := configForArgs(t, replaceRequiredArg(t, "--l2.blocknumber", strconv.FormatUint(l2ClaimBlockNumber, 10)))
require.EqualValues(t, l2ClaimBlockNumber, cfg.L2ClaimBlockNumber)
})
t.Run("Invalid", func(t *testing.T) {
verifyArgsInvalid(t, "invalid value \"something\" for flag -l2.blocknumber", replaceRequiredArg(t, "--l2.blocknumber", "something"))
})
}
func verifyArgsInvalid(t *testing.T, messageContains string, cliArgs []string) {
_, _, err := runWithArgs(cliArgs)
require.ErrorContains(t, err, messageContains)
......@@ -218,7 +226,7 @@ func configForArgs(t *testing.T, cliArgs []string) *config.Config {
}
func runWithArgs(cliArgs []string) (log.Logger, *config.Config, error) {
var cfg *config.Config
cfg := new(config.Config)
var logger log.Logger
fullArgs := append([]string{"op-program"}, cliArgs...)
err := run(fullArgs, func(log log.Logger, config *config.Config) error {
......@@ -252,11 +260,12 @@ func replaceRequiredArg(t *testing.T, name string, value string) []string {
func requiredArgs(t *testing.T) map[string]string {
genesisFile := writeValidGenesis(t)
return map[string]string{
"--network": "goerli",
"--l1.head": l1HeadValue,
"--l2.head": l2HeadValue,
"--l2.claim": l2ClaimValue,
"--l2.genesis": genesisFile,
"--network": "goerli",
"--l1.head": l1HeadValue,
"--l2.head": l2HeadValue,
"--l2.claim": l2ClaimValue,
"--l2.blocknumber": strconv.FormatUint(l2ClaimBlockNumber, 10),
"--l2.genesis": genesisFile,
}
}
......
......@@ -23,6 +23,7 @@ var (
ErrInvalidL2Head = errors.New("invalid l2 head")
ErrL1AndL2Inconsistent = errors.New("l1 and l2 options must be specified together or both omitted")
ErrInvalidL2Claim = errors.New("invalid l2 claim")
ErrInvalidL2ClaimBlock = errors.New("invalid l2 claim block number")
ErrDataDirRequired = errors.New("datadir must be specified when in non-fetching mode")
)
......@@ -43,6 +44,9 @@ type Config struct {
L2URL string
// L2Claim is the claimed L2 output root to verify
L2Claim common.Hash
// L2ClaimBlockNumber is the block number the claimed L2 output root is from
// Must be above 0 and to be a valid claim needs to be above the L2Head block.
L2ClaimBlockNumber uint64
// L2ChainConfig is the op-geth chain config for the L2 execution engine
L2ChainConfig *params.ChainConfig
}
......@@ -63,6 +67,9 @@ func (c *Config) Check() error {
if c.L2Claim == (common.Hash{}) {
return ErrInvalidL2Claim
}
if c.L2ClaimBlockNumber == 0 {
return ErrInvalidL2ClaimBlock
}
if c.L2ChainConfig == nil {
return ErrMissingL2Genesis
}
......@@ -80,14 +87,15 @@ func (c *Config) FetchingEnabled() bool {
}
// NewConfig creates a Config with all optional values set to the CLI default value
func NewConfig(rollupCfg *rollup.Config, l2Genesis *params.ChainConfig, l1Head common.Hash, l2Head common.Hash, l2Claim common.Hash) *Config {
func NewConfig(rollupCfg *rollup.Config, l2Genesis *params.ChainConfig, l1Head common.Hash, l2Head common.Hash, l2Claim common.Hash, l2ClaimBlockNum uint64) *Config {
return &Config{
Rollup: rollupCfg,
L2ChainConfig: l2Genesis,
L1Head: l1Head,
L2Head: l2Head,
L2Claim: l2Claim,
L1RPCKind: sources.RPCKindBasic,
Rollup: rollupCfg,
L2ChainConfig: l2Genesis,
L1Head: l1Head,
L2Head: l2Head,
L2Claim: l2Claim,
L2ClaimBlockNumber: l2ClaimBlockNum,
L1RPCKind: sources.RPCKindBasic,
}
}
......@@ -107,6 +115,7 @@ func NewConfigFromCLI(ctx *cli.Context) (*Config, error) {
if l2Claim == (common.Hash{}) {
return nil, ErrInvalidL2Claim
}
l2ClaimBlockNum := ctx.GlobalUint64(flags.L2BlockNumber.Name)
l1Head := common.HexToHash(ctx.GlobalString(flags.L1Head.Name))
if l1Head == (common.Hash{}) {
return nil, ErrInvalidL1Head
......@@ -117,16 +126,17 @@ func NewConfigFromCLI(ctx *cli.Context) (*Config, error) {
return nil, fmt.Errorf("invalid genesis: %w", err)
}
return &Config{
Rollup: rollupCfg,
DataDir: ctx.GlobalString(flags.DataDir.Name),
L2URL: ctx.GlobalString(flags.L2NodeAddr.Name),
L2ChainConfig: l2ChainConfig,
L2Head: l2Head,
L2Claim: l2Claim,
L1Head: l1Head,
L1URL: ctx.GlobalString(flags.L1NodeAddr.Name),
L1TrustRPC: ctx.GlobalBool(flags.L1TrustRPC.Name),
L1RPCKind: sources.RPCProviderKind(ctx.GlobalString(flags.L1RPCProviderKind.Name)),
Rollup: rollupCfg,
DataDir: ctx.GlobalString(flags.DataDir.Name),
L2URL: ctx.GlobalString(flags.L2NodeAddr.Name),
L2ChainConfig: l2ChainConfig,
L2Head: l2Head,
L2Claim: l2Claim,
L2ClaimBlockNumber: l2ClaimBlockNum,
L1Head: l1Head,
L1URL: ctx.GlobalString(flags.L1NodeAddr.Name),
L1TrustRPC: ctx.GlobalBool(flags.L1TrustRPC.Name),
L1RPCKind: sources.RPCProviderKind(ctx.GlobalString(flags.L1RPCProviderKind.Name)),
}, nil
}
......
......@@ -10,11 +10,14 @@ import (
"github.com/stretchr/testify/require"
)
var validRollupConfig = &chaincfg.Goerli
var validL2Genesis = params.GoerliChainConfig
var validL1Head = common.Hash{0xaa}
var validL2Head = common.Hash{0xbb}
var validL2Claim = common.Hash{0xcc}
var (
validRollupConfig = &chaincfg.Goerli
validL2Genesis = params.GoerliChainConfig
validL1Head = common.Hash{0xaa}
validL2Head = common.Hash{0xbb}
validL2Claim = common.Hash{0xcc}
validL2ClaimBlockNum = uint64(15)
)
// TestValidConfigIsValid checks that the config provided by validConfig is actually valid
func TestValidConfigIsValid(t *testing.T) {
......@@ -59,6 +62,13 @@ func TestL2ClaimRequired(t *testing.T) {
require.ErrorIs(t, err, ErrInvalidL2Claim)
}
func TestL2ClaimBlockNumberRequired(t *testing.T) {
config := validConfig()
config.L2ClaimBlockNumber = 0
err := config.Check()
require.ErrorIs(t, err, ErrInvalidL2ClaimBlock)
}
func TestL2GenesisRequired(t *testing.T) {
config := validConfig()
config.L2ChainConfig = nil
......@@ -133,7 +143,7 @@ func TestRequireDataDirInNonFetchingMode(t *testing.T) {
}
func validConfig() *Config {
cfg := NewConfig(validRollupConfig, validL2Genesis, validL1Head, validL2Head, validL2Claim)
cfg := NewConfig(validRollupConfig, validL2Genesis, validL1Head, validL2Head, validL2Claim, validL2ClaimBlockNum)
cfg.DataDir = "/tmp/configTest"
return cfg
}
......@@ -51,6 +51,11 @@ var (
Usage: "Claimed L2 output root to validate",
EnvVar: service.PrefixEnvVar(envVarPrefix, "L2_CLAIM"),
}
L2BlockNumber = cli.Uint64Flag{
Name: "l2.blocknumber",
Usage: "Number of the L2 block that the claim is from",
EnvVar: service.PrefixEnvVar(envVarPrefix, "L2_BLOCK_NUM"),
}
L2GenesisPath = cli.StringFlag{
Name: "l2.genesis",
Usage: "Path to the op-geth genesis file",
......@@ -85,6 +90,7 @@ var requiredFlags = []cli.Flag{
L1Head,
L2Head,
L2Claim,
L2BlockNumber,
L2GenesisPath,
}
var programFlags = []cli.Flag{
......@@ -113,7 +119,7 @@ func CheckRequired(ctx *cli.Context) error {
return fmt.Errorf("cannot specify both %s and %s", RollupConfig.Name, Network.Name)
}
for _, flag := range requiredFlags {
if ctx.GlobalString(flag.GetName()) == "" {
if !ctx.IsSet(flag.GetName()) {
return fmt.Errorf("flag %s is required", flag.GetName())
}
}
......
package host
import (
"context"
"errors"
"fmt"
"io"
"os"
"github.com/ethereum-optimism/optimism/op-node/chaincfg"
"github.com/ethereum-optimism/optimism/op-node/client"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/sources"
cldr "github.com/ethereum-optimism/optimism/op-program/client/driver"
"github.com/ethereum-optimism/optimism/op-program/host/config"
"github.com/ethereum-optimism/optimism/op-program/host/kvstore"
"github.com/ethereum-optimism/optimism/op-program/host/l1"
"github.com/ethereum-optimism/optimism/op-program/host/l2"
"github.com/ethereum-optimism/optimism/op-program/host/prefetcher"
"github.com/ethereum-optimism/optimism/op-program/preimage"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
)
var (
ErrClaimNotValid = errors.New("invalid claim")
)
type L2Source struct {
*sources.L2Client
*sources.DebugClient
}
// FaultProofProgram is the programmatic entry-point for the fault proof program
func FaultProofProgram(logger log.Logger, cfg *config.Config) error {
if err := cfg.Check(); err != nil {
return fmt.Errorf("invalid config: %w", err)
}
cfg.Rollup.LogDescription(logger, chaincfg.L2ChainIDToNetworkName)
ctx := context.Background()
var kv kvstore.KV
if cfg.DataDir == "" {
logger.Info("Using in-memory storage")
kv = kvstore.NewMemKV()
} else {
logger.Info("Creating disk storage", "datadir", cfg.DataDir)
if err := os.MkdirAll(cfg.DataDir, 0755); err != nil {
return fmt.Errorf("creating datadir: %w", err)
}
kv = kvstore.NewDiskKV(cfg.DataDir)
}
var preimageOracle preimage.OracleFn
var hinter preimage.HinterFn
if cfg.FetchingEnabled() {
logger.Info("Connecting to L1 node", "l1", cfg.L1URL)
l1RPC, err := client.NewRPC(ctx, logger, cfg.L1URL)
if err != nil {
return fmt.Errorf("failed to setup L1 RPC: %w", err)
}
logger.Info("Connecting to L2 node", "l2", cfg.L2URL)
l2RPC, err := client.NewRPC(ctx, logger, cfg.L2URL)
if err != nil {
return fmt.Errorf("failed to setup L2 RPC: %w", err)
}
l1ClCfg := sources.L1ClientDefaultConfig(cfg.Rollup, cfg.L1TrustRPC, cfg.L1RPCKind)
l2ClCfg := sources.L2ClientDefaultConfig(cfg.Rollup, true)
l1Cl, err := sources.NewL1Client(l1RPC, logger, nil, l1ClCfg)
if err != nil {
return fmt.Errorf("failed to create L1 client: %w", err)
}
l2Cl, err := sources.NewL2Client(l2RPC, logger, nil, l2ClCfg)
if err != nil {
return fmt.Errorf("failed to create L2 client: %w", err)
}
l2DebugCl := &L2Source{L2Client: l2Cl, DebugClient: sources.NewDebugClient(l2RPC.CallContext)}
logger.Info("Setting up pre-fetcher")
prefetch := prefetcher.NewPrefetcher(l1Cl, l2DebugCl, kv)
preimageOracle = asOracleFn(func(key common.Hash) ([]byte, error) {
return prefetch.GetPreimage(ctx, key)
})
hinter = asHinter(prefetch.Hint)
} else {
logger.Info("Using offline mode. All required pre-images must be pre-populated.")
preimageOracle = asOracleFn(kv.Get)
hinter = func(v preimage.Hint) {
logger.Debug("ignoring prefetch hint", "hint", v)
}
}
l1Source := l1.NewSource(logger, preimageOracle, hinter, cfg.L1Head)
l2Source, err := l2.NewEngine(logger, preimageOracle, hinter, cfg)
if err != nil {
return fmt.Errorf("connect l2 oracle: %w", err)
}
logger.Info("Starting derivation")
d := cldr.NewDriver(logger, cfg.Rollup, l1Source, l2Source, cfg.L2ClaimBlockNumber)
for {
if err = d.Step(ctx); errors.Is(err, io.EOF) {
break
} else if err != nil {
return err
}
}
if !d.ValidateClaim(eth.Bytes32(cfg.L2Claim)) {
return ErrClaimNotValid
}
return nil
}
func asOracleFn(getter func(key common.Hash) ([]byte, error)) preimage.OracleFn {
return func(key preimage.Key) []byte {
pre, err := getter(key.PreimageKey())
if err != nil {
panic(fmt.Errorf("preimage unavailable for key %v: %w", key, err))
}
return pre
}
}
func asHinter(hint func(hint string) error) preimage.HinterFn {
return func(v preimage.Hint) {
err := hint(v.Hint())
if err != nil {
panic(fmt.Errorf("hint rejected %v: %w", v, err))
}
}
}
......@@ -7,14 +7,14 @@ Bytes_toNibbles_Test:test_toNibbles_expectedResult5Bytes_works() (gas: 6132)
Bytes_toNibbles_Test:test_toNibbles_zeroLengthInput_works() (gas: 944)
CrossDomainMessenger_BaseGas_Test:test_baseGas_succeeds() (gas: 20097)
CrossDomainOwnable2_Test:test_onlyOwner_notMessenger_reverts() (gas: 8416)
CrossDomainOwnable2_Test:test_onlyOwner_notOwner2_reverts() (gas: 61872)
CrossDomainOwnable2_Test:test_onlyOwner_notOwner2_reverts() (gas: 57254)
CrossDomainOwnable2_Test:test_onlyOwner_notOwner_reverts() (gas: 16566)
CrossDomainOwnable2_Test:test_onlyOwner_succeeds() (gas: 75933)
CrossDomainOwnable2_Test:test_onlyOwner_succeeds() (gas: 73282)
CrossDomainOwnable3_Test:test_constructor_succeeds() (gas: 10554)
CrossDomainOwnable3_Test:test_crossDomainOnlyOwner_notMessenger_reverts() (gas: 28334)
CrossDomainOwnable3_Test:test_crossDomainOnlyOwner_notOwner2_reverts() (gas: 76381)
CrossDomainOwnable3_Test:test_crossDomainOnlyOwner_notOwner2_reverts() (gas: 73730)
CrossDomainOwnable3_Test:test_crossDomainOnlyOwner_notOwner_reverts() (gas: 31978)
CrossDomainOwnable3_Test:test_crossDomainTransferOwnership_succeeds() (gas: 93938)
CrossDomainOwnable3_Test:test_crossDomainTransferOwnership_succeeds() (gas: 91287)
CrossDomainOwnable3_Test:test_localOnlyOwner_notOwner_reverts() (gas: 13193)
CrossDomainOwnable3_Test:test_localOnlyOwner_succeeds() (gas: 35220)
CrossDomainOwnable3_Test:test_localTransferOwnership_succeeds() (gas: 52128)
......@@ -70,20 +70,18 @@ L1BlockTest:test_timestamp_succeeds() (gas: 7640)
L1BlockTest:test_updateValues_succeeds() (gas: 60482)
L1CrossDomainMessenger_Test:test_messageVersion_succeeds() (gas: 24715)
L1CrossDomainMessenger_Test:test_relayMessage_legacyOldReplay_reverts() (gas: 49394)
L1CrossDomainMessenger_Test:test_relayMessage_legacyRetryAfterFailureThenSuccess_reverts() (gas: 232952)
L1CrossDomainMessenger_Test:test_relayMessage_legacyRetryAfterFailure_succeeds() (gas: 206446)
L1CrossDomainMessenger_Test:test_relayMessage_legacyRetryAfterSuccess_reverts() (gas: 146819)
L1CrossDomainMessenger_Test:test_relayMessage_legacy_succeeds() (gas: 79729)
L1CrossDomainMessenger_Test:test_relayMessage_reentrancyDiffMessage_succeeds() (gas: 725335)
L1CrossDomainMessenger_Test:test_relayMessage_reentrancySameMessage_reverts() (gas: 662298)
L1CrossDomainMessenger_Test:test_relayMessage_retryAfterFailure_succeeds() (gas: 200353)
L1CrossDomainMessenger_Test:test_relayMessage_succeeds() (gas: 76665)
L1CrossDomainMessenger_Test:test_relayMessage_toSystemContract_reverts() (gas: 101282)
L1CrossDomainMessenger_Test:test_relayMessage_legacyRetryAfterFailureThenSuccess_reverts() (gas: 209286)
L1CrossDomainMessenger_Test:test_relayMessage_legacyRetryAfterFailure_succeeds() (gas: 203184)
L1CrossDomainMessenger_Test:test_relayMessage_legacyRetryAfterSuccess_reverts() (gas: 123784)
L1CrossDomainMessenger_Test:test_relayMessage_legacy_succeeds() (gas: 77098)
L1CrossDomainMessenger_Test:test_relayMessage_retryAfterFailure_succeeds() (gas: 197091)
L1CrossDomainMessenger_Test:test_relayMessage_succeeds() (gas: 74034)
L1CrossDomainMessenger_Test:test_relayMessage_toSystemContract_reverts() (gas: 56540)
L1CrossDomainMessenger_Test:test_relayMessage_v2_reverts() (gas: 12365)
L1CrossDomainMessenger_Test:test_replayMessage_withValue_reverts() (gas: 53445)
L1CrossDomainMessenger_Test:test_replayMessage_withValue_reverts() (gas: 31063)
L1CrossDomainMessenger_Test:test_sendMessage_succeeds() (gas: 304740)
L1CrossDomainMessenger_Test:test_sendMessage_twice_succeeds() (gas: 1496124)
L1CrossDomainMessenger_Test:test_xDomainMessageSender_reset_succeeds() (gas: 87194)
L1CrossDomainMessenger_Test:test_xDomainMessageSender_reset_succeeds() (gas: 84563)
L1CrossDomainMessenger_Test:test_xDomainSender_notSet_reverts() (gas: 24296)
L1ERC721Bridge_Test:test_bridgeERC721To_localTokenZeroAddress_reverts() (gas: 52707)
L1ERC721Bridge_Test:test_bridgeERC721To_remoteTokenZeroAddress_reverts() (gas: 27310)
......@@ -120,15 +118,13 @@ L1StandardBridge_Getter_Test:test_getters_succeeds() (gas: 32173)
L1StandardBridge_Initialize_Test:test_initialize_succeeds() (gas: 22050)
L1StandardBridge_Receive_Test:test_receive_succeeds() (gas: 525438)
L2CrossDomainMessenger_Test:test_messageVersion_succeeds() (gas: 8411)
L2CrossDomainMessenger_Test:test_relayMessage_reentrancyDiffMessage_succeeds() (gas: 680395)
L2CrossDomainMessenger_Test:test_relayMessage_reentrancySameMessage_reverts() (gas: 626456)
L2CrossDomainMessenger_Test:test_relayMessage_retry_succeeds() (gas: 166461)
L2CrossDomainMessenger_Test:test_relayMessage_succeeds() (gas: 54980)
L2CrossDomainMessenger_Test:test_relayMessage_toSystemContract_reverts() (gas: 51448)
L2CrossDomainMessenger_Test:test_relayMessage_retry_succeeds() (gas: 163159)
L2CrossDomainMessenger_Test:test_relayMessage_succeeds() (gas: 48640)
L2CrossDomainMessenger_Test:test_relayMessage_toSystemContract_reverts() (gas: 29021)
L2CrossDomainMessenger_Test:test_relayMessage_v2_reverts() (gas: 11711)
L2CrossDomainMessenger_Test:test_sendMessage_succeeds() (gas: 122508)
L2CrossDomainMessenger_Test:test_sendMessage_twice_succeeds() (gas: 134826)
L2CrossDomainMessenger_Test:test_xDomainMessageSender_reset_succeeds() (gas: 54580)
L2CrossDomainMessenger_Test:test_xDomainMessageSender_reset_succeeds() (gas: 48139)
L2CrossDomainMessenger_Test:test_xDomainSender_senderNotSet_reverts() (gas: 10590)
L2ERC721Bridge_Test:test_bridgeERC721To_localTokenZeroAddress_reverts() (gas: 26431)
L2ERC721Bridge_Test:test_bridgeERC721To_remoteTokenZeroAddress_reverts() (gas: 21814)
......@@ -147,28 +143,29 @@ L2ERC721Bridge_Test:test_finalizeBridgeERC721_notViaLocalMessenger_reverts() (ga
L2ERC721Bridge_Test:test_finalizeBridgeERC721_selfToken_reverts() (gas: 17659)
L2ERC721Bridge_Test:test_finalizeBridgeERC721_succeeds() (gas: 168970)
L2OutputOracleTest:test_computeL2Timestamp_succeeds() (gas: 37298)
L2OutputOracleTest:test_constructor_badTimestamp_reverts() (gas: 70947)
L2OutputOracleTest:test_constructor_badTimestamp_reverts() (gas: 70991)
L2OutputOracleTest:test_constructor_l2BlockTimeZero_reverts() (gas: 45954)
L2OutputOracleTest:test_constructor_succeeds() (gas: 33827)
L2OutputOracleTest:test_deleteL2Outputs_afterLatest_reverts() (gas: 212262)
L2OutputOracleTest:test_deleteL2Outputs_finalized_reverts() (gas: 108967)
L2OutputOracleTest:test_constructor_submissionInterval_reverts() (gas: 45942)
L2OutputOracleTest:test_constructor_succeeds() (gas: 33805)
L2OutputOracleTest:test_deleteL2Outputs_afterLatest_reverts() (gas: 212306)
L2OutputOracleTest:test_deleteL2Outputs_finalized_reverts() (gas: 108990)
L2OutputOracleTest:test_deleteL2Outputs_ifNotChallenger_reverts() (gas: 18918)
L2OutputOracleTest:test_deleteL2Outputs_nonExistent_reverts() (gas: 107339)
L2OutputOracleTest:test_deleteOutputs_multipleOutputs_succeeds() (gas: 302439)
L2OutputOracleTest:test_deleteOutputs_singleOutput_succeeds() (gas: 181038)
L2OutputOracleTest:test_getL2OutputIndexAfter_multipleOutputsExist_succeeds() (gas: 267120)
L2OutputOracleTest:test_getL2OutputIndexAfter_noOutputsExis_reverts() (gas: 17959)
L2OutputOracleTest:test_getL2OutputIndexAfter_previousBlock_succeeds() (gas: 96066)
L2OutputOracleTest:test_getL2OutputIndexAfter_sameBlock_succeeds() (gas: 95995)
L2OutputOracleTest:test_getL2Output_succeeds() (gas: 101634)
L2OutputOracleTest:test_latestBlockNumber_succeeds() (gas: 96962)
L2OutputOracleTest:test_nextBlockNumber_succeeds() (gas: 17490)
L2OutputOracleTest:test_proposeL2Output_emptyOutput_reverts() (gas: 26690)
L2OutputOracleTest:test_proposeL2Output_futureTimetamp_reverts() (gas: 28669)
L2OutputOracleTest:test_proposeL2Output_notProposer_reverts() (gas: 25783)
L2OutputOracleTest:test_proposeL2Output_proposeAnotherOutput_succeeds() (gas: 101028)
L2OutputOracleTest:test_deleteOutputs_multipleOutputs_succeeds() (gas: 302462)
L2OutputOracleTest:test_deleteOutputs_singleOutput_succeeds() (gas: 181016)
L2OutputOracleTest:test_getL2OutputIndexAfter_multipleOutputsExist_succeeds() (gas: 267098)
L2OutputOracleTest:test_getL2OutputIndexAfter_noOutputsExis_reverts() (gas: 17937)
L2OutputOracleTest:test_getL2OutputIndexAfter_previousBlock_succeeds() (gas: 96044)
L2OutputOracleTest:test_getL2OutputIndexAfter_sameBlock_succeeds() (gas: 95973)
L2OutputOracleTest:test_getL2Output_succeeds() (gas: 101612)
L2OutputOracleTest:test_latestBlockNumber_succeeds() (gas: 96940)
L2OutputOracleTest:test_nextBlockNumber_succeeds() (gas: 17468)
L2OutputOracleTest:test_proposeL2Output_emptyOutput_reverts() (gas: 26668)
L2OutputOracleTest:test_proposeL2Output_futureTimetamp_reverts() (gas: 28647)
L2OutputOracleTest:test_proposeL2Output_notProposer_reverts() (gas: 25806)
L2OutputOracleTest:test_proposeL2Output_proposeAnotherOutput_succeeds() (gas: 101006)
L2OutputOracleTest:test_proposeL2Output_unexpectedBlockNumber_reverts() (gas: 28381)
L2OutputOracleTest:test_proposeL2Output_unmatchedBlockhash_reverts() (gas: 29381)
L2OutputOracleTest:test_proposeL2Output_unmatchedBlockhash_reverts() (gas: 29404)
L2OutputOracleTest:test_proposeL2Output_wrongFork_reverts() (gas: 28984)
L2OutputOracleTest:test_proposeWithBlockhashAndHeight_succeeds() (gas: 95253)
L2OutputOracleUpgradeable_Test:test_initValuesOnProxy_succeeds() (gas: 26208)
......
......@@ -24,8 +24,7 @@
| xDomainMsgSender | address | 204 | 0 | 20 | contracts/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| msgNonce | uint240 | 205 | 0 | 30 | contracts/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| failedMessages | mapping(bytes32 => bool) | 206 | 0 | 32 | contracts/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| reentrancyLocks | mapping(bytes32 => bool) | 207 | 0 | 32 | contracts/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| __gap | uint256[41] | 208 | 0 | 1312 | contracts/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| __gap | uint256[42] | 207 | 0 | 1344 | contracts/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
=======================
➡ contracts/L1/L1StandardBridge.sol:L1StandardBridge
......@@ -135,8 +134,7 @@
| xDomainMsgSender | address | 204 | 0 | 20 | contracts/L2/L2CrossDomainMessenger.sol:L2CrossDomainMessenger |
| msgNonce | uint240 | 205 | 0 | 30 | contracts/L2/L2CrossDomainMessenger.sol:L2CrossDomainMessenger |
| failedMessages | mapping(bytes32 => bool) | 206 | 0 | 32 | contracts/L2/L2CrossDomainMessenger.sol:L2CrossDomainMessenger |
| reentrancyLocks | mapping(bytes32 => bool) | 207 | 0 | 32 | contracts/L2/L2CrossDomainMessenger.sol:L2CrossDomainMessenger |
| __gap | uint256[41] | 208 | 0 | 1312 | contracts/L2/L2CrossDomainMessenger.sol:L2CrossDomainMessenger |
| __gap | uint256[42] | 207 | 0 | 1344 | contracts/L2/L2CrossDomainMessenger.sol:L2CrossDomainMessenger |
=======================
➡ contracts/L2/L2StandardBridge.sol:L2StandardBridge
......
......@@ -78,7 +78,7 @@ contract L2OutputOracle is Initializable, Semver {
event OutputsDeleted(uint256 indexed prevNextOutputIndex, uint256 indexed newNextOutputIndex);
/**
* @custom:semver 1.2.0
* @custom:semver 1.3.0
*
* @param _submissionInterval Interval in blocks at which checkpoints must be submitted.
* @param _l2BlockTime The time per L2 block, in seconds.
......@@ -95,11 +95,11 @@ contract L2OutputOracle is Initializable, Semver {
address _proposer,
address _challenger,
uint256 _finalizationPeriodSeconds
) Semver(1, 2, 0) {
) Semver(1, 3, 0) {
require(_l2BlockTime > 0, "L2OutputOracle: L2 block time must be greater than 0");
require(
_submissionInterval > _l2BlockTime,
"L2OutputOracle: submission interval must be greater than L2 block time"
_submissionInterval > 0,
"L2OutputOracle: submission interval must be greater than 0"
);
SUBMISSION_INTERVAL = _submissionInterval;
......
// SPDX-License-Identifier: MIT
pragma solidity 0.8.15;
import { Messenger_Initializer, Reverter, CallerCaller } from "./CommonTest.t.sol";
import { Messenger_Initializer, Reverter, CallerCaller, CommonTest } from "./CommonTest.t.sol";
import { L1CrossDomainMessenger } from "../L1/L1CrossDomainMessenger.sol";
// Libraries
import { Predeploys } from "../libraries/Predeploys.sol";
import { Hashing } from "../libraries/Hashing.sol";
import { Encoding } from "../libraries/Encoding.sol";
// CrossDomainMessenger_Test is for testing functionality which is common to both the L1 and L2
// CrossDomainMessenger contracts. For simplicity, we use the L1 Messenger as the test contract.
......@@ -17,3 +23,149 @@ contract CrossDomainMessenger_BaseGas_Test is Messenger_Initializer {
L1Messenger.baseGas(hex"ff", _minGasLimit);
}
}
/**
* @title ExternalRelay
* @notice A mock external contract called via the SafeCall inside
* the CrossDomainMessenger's `relayMessage` function.
*/
contract ExternalRelay is CommonTest {
address internal op;
address internal fuzzedSender;
L1CrossDomainMessenger internal L1Messenger;
event FailedRelayedMessage(bytes32 indexed msgHash);
constructor(L1CrossDomainMessenger _l1Messenger, address _op) {
L1Messenger = _l1Messenger;
op = _op;
}
/**
* @notice Internal helper function to relay a message and perform assertions.
*/
function _internalRelay(address _innerSender) internal {
address initialSender = L1Messenger.xDomainMessageSender();
bytes memory callMessage = getCallData();
bytes32 hash = Hashing.hashCrossDomainMessage({
_nonce: Encoding.encodeVersionedNonce({ _nonce: 0, _version: 1 }),
_sender: _innerSender,
_target: address(this),
_value: 0,
_gasLimit: 0,
_data: callMessage
});
vm.expectEmit(true, true, true, true);
emit FailedRelayedMessage(hash);
vm.prank(address(op));
L1Messenger.relayMessage({
_nonce: Encoding.encodeVersionedNonce({ _nonce: 0, _version: 1 }),
_sender: _innerSender,
_target: address(this),
_value: 0,
_minGasLimit: 0,
_message: callMessage
});
assertTrue(L1Messenger.failedMessages(hash));
assertFalse(L1Messenger.successfulMessages(hash));
assertEq(initialSender, L1Messenger.xDomainMessageSender());
}
/**
* @notice externalCallWithMinGas is called by the CrossDomainMessenger.
*/
function externalCallWithMinGas() external payable {
for (uint256 i = 0; i < 10; i++) {
address _innerSender;
unchecked {
_innerSender = address(uint160(uint256(uint160(fuzzedSender)) + i));
}
_internalRelay(_innerSender);
}
}
/**
* @notice Helper function to get the callData for an `externalCallWithMinGas
*/
function getCallData() public returns (bytes memory) {
return abi.encodeWithSelector(ExternalRelay.externalCallWithMinGas.selector);
}
/**
* @notice Helper function to set the fuzzed sender
*/
function setFuzzedSender(address _fuzzedSender) public {
fuzzedSender = _fuzzedSender;
}
}
/**
* @title CrossDomainMessenger_RelayMessage_Test
* @notice Fuzz tests re-entrancy into the CrossDomainMessenger relayMessage function.
*/
contract CrossDomainMessenger_RelayMessage_Test is Messenger_Initializer {
// Storage slot of the l2Sender
uint256 constant senderSlotIndex = 50;
ExternalRelay public er;
function setUp() public override {
super.setUp();
er = new ExternalRelay(L1Messenger, address(op));
}
/**
* @dev This test mocks an OptimismPortal call to the L1CrossDomainMessenger via
* the relayMessage function. The relayMessage function will then use SafeCall's
* callWithMinGas to call the target with call data packed in the callMessage.
* For this test, the callWithMinGas will call the mock ExternalRelay test contract
* defined above, executing the externalCallWithMinGas function which will try to
* re-enter the CrossDomainMessenger's relayMessage function, resulting in that message
* being recorded as failed.
*/
function testFuzz_relayMessageReenter_succeeds(address _sender, uint256 _gasLimit) external {
vm.assume(_sender != Predeploys.L2_CROSS_DOMAIN_MESSENGER);
address sender = Predeploys.L2_CROSS_DOMAIN_MESSENGER;
er.setFuzzedSender(_sender);
address target = address(er);
bytes memory callMessage = er.getCallData();
vm.expectCall(target, callMessage);
uint64 gasLimit = uint64(bound(_gasLimit, 0, 30_000_000));
bytes32 hash = Hashing.hashCrossDomainMessage({
_nonce: Encoding.encodeVersionedNonce({ _nonce: 0, _version: 1 }),
_sender: sender,
_target: target,
_value: 0,
_gasLimit: gasLimit,
_data: callMessage
});
// set the value of op.l2Sender() to be the L2 Cross Domain Messenger.
vm.store(address(op), bytes32(senderSlotIndex), bytes32(abi.encode(sender)));
vm.prank(address(op));
L1Messenger.relayMessage({
_nonce: Encoding.encodeVersionedNonce({ _nonce: 0, _version: 1 }),
_sender: sender,
_target: target,
_value: 0,
_minGasLimit: gasLimit,
_message: callMessage
});
assertTrue(L1Messenger.successfulMessages(hash));
assertEq(L1Messenger.failedMessages(hash), false);
// Ensures that the `xDomainMsgSender` is set back to `Predeploys.L2_CROSS_DOMAIN_MESSENGER`
vm.expectRevert("CrossDomainMessenger: xDomainMessageSender is not set");
L1Messenger.xDomainMessageSender();
}
}
......@@ -100,10 +100,6 @@ contract L1CrossDomainMessenger_Test is Messenger_Initializer {
L1Messenger.xDomainMessageSender();
}
// xDomainMessageSender: should return the xDomainMsgSender address
// TODO: might need a test contract
// function test_xDomainSenderSetCorrectly() external {}
function test_relayMessage_v2_reverts() external {
address target = address(0xabcd);
address sender = Predeploys.L2_CROSS_DOMAIN_MESSENGER;
......@@ -295,173 +291,6 @@ contract L1CrossDomainMessenger_Test is Messenger_Initializer {
assertEq(L1Messenger.failedMessages(hash), true);
}
// relayMessage: Should revert if the recipient is trying to reenter with the
// same message.
function test_relayMessage_reentrancySameMessage_reverts() external {
ConfigurableCaller caller = new ConfigurableCaller();
address target = address(caller);
address sender = Predeploys.L2_CROSS_DOMAIN_MESSENGER;
bytes memory callMessage = abi.encodeWithSelector(caller.call.selector);
bytes32 hash = Hashing.hashCrossDomainMessage(
Encoding.encodeVersionedNonce({ _nonce: 0, _version: 1 }),
sender,
target,
0,
0,
callMessage
);
// Set the portal's `l2Sender` to the `sender`.
vm.store(address(op), bytes32(senderSlotIndex), bytes32(uint256(uint160(sender))));
// Act as the portal and call the `relayMessage` function with the `innerMessage`.
vm.prank(address(op));
vm.expectCall(target, callMessage);
L1Messenger.relayMessage(
Encoding.encodeVersionedNonce({ _nonce: 0, _version: 1 }),
sender,
target,
0,
0,
callMessage
);
// Assert that the message failed to be relayed
assertFalse(L1Messenger.successfulMessages(hash));
assertTrue(L1Messenger.failedMessages(hash));
// Set the configurable caller's target to `L1Messenger` and set the payload to `relayMessage(...)`.
caller.setDoRevert(false);
caller.setTarget(address(L1Messenger));
caller.setPayload(
abi.encodeWithSelector(
L1Messenger.relayMessage.selector,
Encoding.encodeVersionedNonce({ _nonce: 0, _version: 1 }),
sender,
target,
0,
0,
callMessage
)
);
// Attempt to replay the failed message, which will *not* immediately revert this time around,
// but attempt to reenter `relayMessage` with the same message hash. The reentrancy attempt should
// revert.
vm.expectEmit(true, true, true, true, target);
emit WhatHappened(
false,
abi.encodeWithSignature("Error(string)", "ReentrancyGuard: reentrant call")
);
L1Messenger.relayMessage(
Encoding.encodeVersionedNonce({ _nonce: 0, _version: 1 }), // nonce
sender,
target,
0,
0,
callMessage
);
// Assert that the message still failed to be relayed.
assertFalse(L1Messenger.successfulMessages(hash));
assertTrue(L1Messenger.failedMessages(hash));
}
// relayMessage: should not revert if the recipient reenters `relayMessage` with a different
// message hash.
function test_relayMessage_reentrancyDiffMessage_succeeds() external {
ConfigurableCaller caller = new ConfigurableCaller();
address target = address(caller);
address sender = Predeploys.L2_CROSS_DOMAIN_MESSENGER;
bytes memory messageA = abi.encodeWithSelector(caller.call.selector);
bytes memory messageB = hex"";
bytes32 hashA = Hashing.hashCrossDomainMessage(
Encoding.encodeVersionedNonce({ _nonce: 0, _version: 1 }),
sender,
target,
0,
0,
messageA
);
bytes32 hashB = Hashing.hashCrossDomainMessage(
Encoding.encodeVersionedNonce({ _nonce: 0, _version: 1 }),
sender,
target,
0,
0,
messageB
);
// Set the portal's `l2Sender` to the `sender`.
vm.store(address(op), bytes32(senderSlotIndex), bytes32(uint256(uint160(sender))));
// Act as the portal and call the `relayMessage` function with both `messageA` and `messageB`.
vm.startPrank(address(op));
vm.expectCall(target, messageA);
L1Messenger.relayMessage(
Encoding.encodeVersionedNonce({ _nonce: 0, _version: 1 }),
sender,
target,
0,
0,
messageA
);
vm.expectCall(target, messageB);
L1Messenger.relayMessage(
Encoding.encodeVersionedNonce({ _nonce: 0, _version: 1 }),
sender,
target,
0,
0,
messageB
);
// Stop acting as the portal
vm.stopPrank();
// Assert that both messages failed to be relayed
assertFalse(L1Messenger.successfulMessages(hashA));
assertFalse(L1Messenger.successfulMessages(hashB));
assertTrue(L1Messenger.failedMessages(hashA));
assertTrue(L1Messenger.failedMessages(hashB));
// Set the configurable caller's target to `L1Messenger` and set the payload to `relayMessage(...)`.
caller.setDoRevert(false);
caller.setTarget(address(L1Messenger));
caller.setPayload(
abi.encodeWithSelector(
L1Messenger.relayMessage.selector,
Encoding.encodeVersionedNonce({ _nonce: 0, _version: 1 }),
sender,
target,
0,
0,
messageB
)
);
// Attempt to replay the failed message, which will *not* immediately revert this time around,
// but attempt to reenter `relayMessage` with messageB. The reentrancy attempt should succeed
// because the message hashes are different.
vm.expectEmit(true, true, true, true, target);
emit WhatHappened(true, hex"");
L1Messenger.relayMessage(
Encoding.encodeVersionedNonce({ _nonce: 0, _version: 1 }),
sender,
target,
0,
0,
messageA
);
// Assert that both messages are now in the `successfulMessages` mapping.
assertTrue(L1Messenger.successfulMessages(hashA));
assertTrue(L1Messenger.successfulMessages(hashB));
}
function test_relayMessage_legacy_succeeds() external {
address target = address(0xabcd);
address sender = Predeploys.L2_CROSS_DOMAIN_MESSENGER;
......
......@@ -230,168 +230,4 @@ contract L2CrossDomainMessenger_Test is Messenger_Initializer {
assertEq(L2Messenger.successfulMessages(hash), true);
assertEq(L2Messenger.failedMessages(hash), true);
}
// relayMessage: Should revert if the recipient is trying to reenter with the
// same message.
function test_relayMessage_reentrancySameMessage_reverts() external {
ConfigurableCaller caller = new ConfigurableCaller();
address target = address(caller);
address sender = address(L1Messenger);
address l1XDMAlias = AddressAliasHelper.applyL1ToL2Alias(address(L1Messenger));
bytes memory callMessage = abi.encodeWithSelector(caller.call.selector);
bytes32 hash = Hashing.hashCrossDomainMessage(
Encoding.encodeVersionedNonce(0, 1),
sender,
target,
0,
0,
callMessage
);
// Act as the L1XDM and call the `relayMessage` function with the `innerMessage`.
vm.prank(l1XDMAlias);
vm.expectCall(target, callMessage);
L2Messenger.relayMessage(
Encoding.encodeVersionedNonce(0, 1),
sender,
target,
0,
0,
callMessage
);
// Assert that the message failed to be relayed
assertFalse(L2Messenger.successfulMessages(hash));
assertTrue(L2Messenger.failedMessages(hash));
// Set the configurable caller's target to `L2Messenger` and set the payload to `relayMessage(...)`.
caller.setDoRevert(false);
caller.setTarget(address(L2Messenger));
caller.setPayload(
abi.encodeWithSelector(
L2Messenger.relayMessage.selector,
Encoding.encodeVersionedNonce(0, 1),
sender,
target,
0,
0,
callMessage
)
);
// Attempt to replay the failed message, which will *not* immediately revert this time around,
// but attempt to reenter `relayMessage` with the same message hash. The reentrancy attempt should
// revert.
vm.expectEmit(true, true, true, true, target);
emit WhatHappened(
false,
abi.encodeWithSignature("Error(string)", "ReentrancyGuard: reentrant call")
);
L2Messenger.relayMessage(
Encoding.encodeVersionedNonce(0, 1),
sender,
target,
0,
0,
callMessage
);
// Assert that the message still failed to be relayed.
assertFalse(L2Messenger.successfulMessages(hash));
assertTrue(L2Messenger.failedMessages(hash));
}
// relayMessage: should not revert if the recipient reenters `relayMessage` with a different
// message hash.
function test_relayMessage_reentrancyDiffMessage_succeeds() external {
ConfigurableCaller caller = new ConfigurableCaller();
address target = address(caller);
address sender = address(L1Messenger);
address l1XDMAlias = AddressAliasHelper.applyL1ToL2Alias(address(L1Messenger));
bytes memory messageA = abi.encodeWithSelector(caller.call.selector);
bytes memory messageB = hex"";
bytes32 hashA = Hashing.hashCrossDomainMessage(
Encoding.encodeVersionedNonce(0, 1),
sender,
target,
0,
0,
messageA
);
bytes32 hashB = Hashing.hashCrossDomainMessage(
Encoding.encodeVersionedNonce(0, 1),
sender,
target,
0,
0,
messageB
);
// Act as the L1XDM and call the `relayMessage` function with both `messageA` and `messageB`.
vm.startPrank(l1XDMAlias);
vm.expectCall(target, messageA);
L2Messenger.relayMessage(
Encoding.encodeVersionedNonce(0, 1),
sender,
target,
0,
0,
messageA
);
vm.expectCall(target, messageB);
L2Messenger.relayMessage(
Encoding.encodeVersionedNonce(0, 1),
sender,
target,
0,
0,
messageB
);
// Stop acting as the L1XDM
vm.stopPrank();
// Assert that both messages failed to be relayed
assertFalse(L2Messenger.successfulMessages(hashA));
assertFalse(L2Messenger.successfulMessages(hashB));
assertTrue(L2Messenger.failedMessages(hashA));
assertTrue(L2Messenger.failedMessages(hashB));
// Set the configurable caller's target to `L2Messenger` and set the payload to `relayMessage(...)`.
caller.setDoRevert(false);
caller.setTarget(address(L2Messenger));
caller.setPayload(
abi.encodeWithSelector(
L2Messenger.relayMessage.selector,
Encoding.encodeVersionedNonce(0, 1),
sender,
target,
0,
0,
messageB
)
);
// Attempt to replay the failed message, which will *not* immediately revert this time around,
// but attempt to reenter `relayMessage` with messageB. The reentrancy attempt should succeed
// because the message hashes are different.
vm.expectEmit(true, true, true, true, target);
emit WhatHappened(true, hex"");
L2Messenger.relayMessage(
Encoding.encodeVersionedNonce(0, 1),
sender,
target,
0,
0,
messageA
);
// Assert that both messages are now in the `successfulMessages` mapping.
assertTrue(L2Messenger.successfulMessages(hashA));
assertTrue(L2Messenger.successfulMessages(hashB));
}
}
......@@ -47,21 +47,11 @@ contract L2OutputOracleTest is L2OutputOracle_Initializer {
});
}
function testFuzz_constructor_submissionIntervalLteL2BlockTime_reverts(
uint256 _submissionInterval,
uint256 _l2BlockTime
) external {
// Bound the _l2blockTime to be in the range of [1, type(uint256).max]
_l2BlockTime = bound(_l2BlockTime, 1, type(uint256).max);
// Roll the block number to _l2blockTime (the starting L2 timestamp must be less than or equal to the current time)
vm.roll(_l2BlockTime);
// Bound _submissionInterval to be less than or equal to _l2BlockTime
_submissionInterval = bound(_submissionInterval, 0, _l2BlockTime);
vm.expectRevert("L2OutputOracle: submission interval must be greater than L2 block time");
function test_constructor_submissionInterval_reverts() external {
vm.expectRevert("L2OutputOracle: submission interval must be greater than 0");
new L2OutputOracle({
_submissionInterval: _submissionInterval,
_l2BlockTime: _l2BlockTime,
_submissionInterval: 0,
_l2BlockTime: l2BlockTime,
_startingBlockNumber: startingBlockNumber,
_startingTimestamp: block.timestamp,
_proposer: proposer,
......
......@@ -1019,7 +1019,7 @@ contract OptimismPortal_FinalizeWithdrawal_Test is Portal_Initializer {
assertEq(outputRoot, Hashing.hashOutputRootProof(proof));
assertEq(withdrawalHash, Hashing.hashWithdrawal(_tx));
// Mock the call to the oracle
// Setup the Oracle to return the outputRoot
vm.mockCall(
address(oracle),
abi.encodeWithSelector(oracle.getL2Output.selector),
......@@ -1039,8 +1039,6 @@ contract OptimismPortal_FinalizeWithdrawal_Test is Portal_Initializer {
// Warp past the finalization period
vm.warp(block.timestamp + oracle.FINALIZATION_PERIOD_SECONDS() + 1);
uint256 targetBalanceBefore = _target.balance;
// Finalize the withdrawal transaction
vm.expectCallMinGas(_tx.target, _tx.value, uint64(_tx.gasLimit), _tx.data);
op.finalizeWithdrawalTransaction(_tx);
......
......@@ -175,17 +175,12 @@ abstract contract CrossDomainMessenger is
*/
mapping(bytes32 => bool) public failedMessages;
/**
* @notice A mapping of hashes to reentrancy locks.
*/
mapping(bytes32 => bool) internal reentrancyLocks;
/**
* @notice Reserve extra slots in the storage layout for future upgrades.
* A gap size of 41 was chosen here, so that the first slot used in a child contract
* would be a multiple of 50.
*/
uint256[41] private __gap;
uint256[42] private __gap;
/**
* @notice Emitted whenever a message is sent to the other chain.
......@@ -323,13 +318,6 @@ abstract contract CrossDomainMessenger is
_message
);
// Check if the reentrancy lock for the `versionedHash` is already set.
if (reentrancyLocks[versionedHash]) {
revert("ReentrancyGuard: reentrant call");
}
// Trigger the reentrancy lock for `versionedHash`
reentrancyLocks[versionedHash] = true;
if (_isOtherMessenger()) {
// These properties should always hold when the message is first submitted (as
// opposed to being replayed).
......@@ -357,6 +345,15 @@ abstract contract CrossDomainMessenger is
"CrossDomainMessenger: message has already been relayed"
);
// If `xDomainMsgSender` is not the default L2 sender, this function
// is being re-entered. This marks the message as failed to allow it
// to be replayed.
if (xDomainMsgSender != Constants.DEFAULT_L2_SENDER) {
failedMessages[versionedHash] = true;
emit FailedRelayedMessage(versionedHash);
return;
}
xDomainMsgSender = _sender;
bool success = SafeCall.callWithMinGas(_target, _minGasLimit, _value, _message);
xDomainMsgSender = Constants.DEFAULT_L2_SENDER;
......@@ -377,9 +374,6 @@ abstract contract CrossDomainMessenger is
revert("CrossDomainMessenger: failed to relay message");
}
}
// Clear the reentrancy lock for `versionedHash`
reentrancyLocks[versionedHash] = false;
}
/**
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment