Commit 707d8735 authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge branch 'develop' into jm/wd-mon-fix-metrics-label

parents a26c484a a09c7d01
......@@ -60,7 +60,7 @@ commands:
jobs:
yarn-monorepo:
docker:
- image: ethereumoptimism/ci-builder:latest
- image: us-docker.pkg.dev/oplabs-tools-artifacts/images/ci-builder:latest
resource_class: large
steps:
- checkout
......@@ -286,7 +286,7 @@ jobs:
command: |
./ops/scripts/ci-docker-tag-op-stack-release.sh <<parameters.registry>>/<<parameters.repo>> $CIRCLE_TAG $CIRCLE_SHA1
contracts-bedrock-tests:
contracts-bedrock-coverage:
docker:
- image: ethereumoptimism/ci-builder:latest
resource_class: large
......@@ -304,9 +304,8 @@ jobs:
command: forge --version
working_directory: packages/contracts-bedrock
- run:
name: test and generate coverage
name: generate coverage report
command: yarn coverage:lcov
no_output_timeout: 18m
environment:
FOUNDRY_PROFILE: ci
working_directory: packages/contracts-bedrock
......@@ -316,9 +315,33 @@ jobs:
environment:
FOUNDRY_PROFILE: ci
contracts-bedrock-tests:
docker:
- image: us-docker.pkg.dev/oplabs-tools-artifacts/images/ci-builder:latest
resource_class: large
steps:
- checkout
- attach_workspace: { at: "." }
- restore_cache:
name: Restore Yarn Package Cache
keys:
- yarn-packages-v2-{{ checksum "yarn.lock" }}
- check-changed:
patterns: contracts-bedrock,hardhat-deploy-config
- run:
name: print forge version
command: forge --version
working_directory: packages/contracts-bedrock
- run:
name: run tests
command: yarn test
environment:
FOUNDRY_PROFILE: ci
working_directory: packages/contracts-bedrock
contracts-bedrock-checks:
docker:
- image: ethereumoptimism/ci-builder:latest
- image: us-docker.pkg.dev/oplabs-tools-artifacts/images/ci-builder:latest
steps:
- checkout
- attach_workspace: { at: "." }
......@@ -378,7 +401,7 @@ jobs:
contracts-bedrock-slither:
docker:
- image: ethereumoptimism/ci-builder:latest
- image: us-docker.pkg.dev/oplabs-tools-artifacts/images/ci-builder:latest
resource_class: large
steps:
- checkout
......@@ -398,7 +421,7 @@ jobs:
contracts-bedrock-validate-spaces:
docker:
- image: ethereumoptimism/ci-builder:latest
- image: us-docker.pkg.dev/oplabs-tools-artifacts/images/ci-builder:latest
steps:
- checkout
- attach_workspace: { at: "." }
......@@ -415,7 +438,7 @@ jobs:
bedrock-echidna-build:
docker:
- image: ethereumoptimism/ci-builder:latest
- image: us-docker.pkg.dev/oplabs-tools-artifacts/images/ci-builder:latest
steps:
- checkout
- attach_workspace: { at: "." }
......@@ -433,7 +456,7 @@ jobs:
bedrock-echidna-run:
docker:
- image: ethereumoptimism/ci-builder:latest
- image: us-docker.pkg.dev/oplabs-tools-artifacts/images/ci-builder:latest
parameters:
echidna_target:
description: Which echidna fuzz contract to run
......@@ -460,7 +483,7 @@ jobs:
op-bindings-build:
docker:
- image: ethereumoptimism/ci-builder:latest
- image: us-docker.pkg.dev/oplabs-tools-artifacts/images/ci-builder:latest
resource_class: medium
steps:
- checkout
......@@ -489,7 +512,7 @@ jobs:
description: Coverage flag name
type: string
docker:
- image: ethereumoptimism/ci-builder:latest
- image: us-docker.pkg.dev/oplabs-tools-artifacts/images/ci-builder:latest
resource_class: large
steps:
- checkout
......@@ -535,7 +558,7 @@ jobs:
fuzz-op-node:
docker:
- image: ethereumoptimism/ci-builder:latest
- image: us-docker.pkg.dev/oplabs-tools-artifacts/images/ci-builder:latest
steps:
- checkout
- check-changed:
......@@ -547,7 +570,7 @@ jobs:
depcheck:
docker:
- image: ethereumoptimism/ci-builder:latest
- image: us-docker.pkg.dev/oplabs-tools-artifacts/images/ci-builder:latest
steps:
- checkout
- attach_workspace: { at: "." }
......@@ -609,7 +632,7 @@ jobs:
description: Go Module Name
type: string
docker:
- image: ethereumoptimism/ci-builder:latest # only used to enable codecov.
- image: us-docker.pkg.dev/oplabs-tools-artifacts/images/ci-builder:latest # only used to enable codecov.
resource_class: xlarge
steps:
- checkout
......@@ -637,7 +660,7 @@ jobs:
description: If the op-e2e package should use HTTP clients
type: string
docker:
- image: ethereumoptimism/ci-builder:latest
- image: us-docker.pkg.dev/oplabs-tools-artifacts/images/ci-builder:latest
resource_class: xlarge
steps:
- checkout
......@@ -676,7 +699,7 @@ jobs:
type: string
default: this-package-does-not-exist
docker:
- image: ethereumoptimism/ci-builder:latest
- image: us-docker.pkg.dev/oplabs-tools-artifacts/images/ci-builder:latest
- image: cimg/postgres:14.1
steps:
- checkout
......@@ -705,7 +728,7 @@ jobs:
geth-tests:
docker:
- image: ethereumoptimism/ci-builder:latest
- image: us-docker.pkg.dev/oplabs-tools-artifacts/images/ci-builder:latest
steps:
- checkout
- check-changed:
......@@ -925,7 +948,7 @@ jobs:
go-mod-tidy:
docker:
- image: ethereumoptimism/ci-builder:latest
- image: us-docker.pkg.dev/oplabs-tools-artifacts/images/ci-builder:latest
steps:
- checkout
- run:
......@@ -1007,6 +1030,9 @@ workflows:
- contracts-bedrock-tests:
requires:
- yarn-monorepo
- contracts-bedrock-coverage:
requires:
- yarn-monorepo
- contracts-bedrock-checks:
requires:
- yarn-monorepo
......@@ -1371,3 +1397,18 @@ workflows:
- oplabs-gcr-release
requires:
- hold
release-ci-builder:
jobs:
- docker-publish:
name: ci-builder-docker-publish
filters:
tags:
only: /^ci-builder\/v.*/
branches:
ignore: /.*/
docker_file: ./ops/docker/ci-builder/Dockerfile
docker_name: ci-builder
docker_tags: <<pipeline.git.revision>>,latest
docker_context: ./ops/docker/ci-builder
context:
- oplabs-gcr
\ No newline at end of file
......@@ -27,7 +27,6 @@ jobs:
op-exporter: ${{ steps.packages.outputs.op-exporter }}
l2geth-exporter: ${{ steps.packages.outputs.l2geth-exporter }}
batch-submitter-service: ${{ steps.packages.outputs.batch-submitter-service }}
ci-builder: ${{ steps.packages.outputs.ci-builder }}
foundry: ${{ steps.packages.outputs.foundry }}
endpoint-monitor: ${{ steps.packages.outputs.endpoint-monitor }}
......@@ -159,32 +158,6 @@ jobs:
push: true
tags: ethereumoptimism/hardhat-node:${{ needs.release.outputs.hardhat-node }},ethereumoptimism/hardhat-node:latest
ci-builder:
name: Publish ci-builder ${{ needs.release.outputs.ci-builder }}
needs: release
if: needs.release.outputs.ci-builder != ''
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Login to Docker Hub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_USERNAME }}
password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_SECRET }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Publish ci-builder
uses: docker/build-push-action@v2
with:
context: ./ops/docker/ci-builder
file: ./ops/docker/ci-builder/Dockerfile
push: true
tags: ethereumoptimism/ci-builder:${{ needs.release.outputs.ci-builder }},ethereumoptimism/ci-builder:latest
foundry:
name: Publish foundry ${{ needs.release.outputs.foundry }}
needs: release
......
......@@ -221,7 +221,7 @@ Once you’ve configured your network, it’s time to deploy the L1 smart contra
1. Once you’re ready, deploy the L1 smart contracts:
```bash
npx hardhat deploy --network getting-started
npx hardhat deploy --network getting-started --tags l1
```
Contract deployment can take up to 15 minutes. Please wait for all smart contracts to be fully deployed before continuing to the next step.
......
This diff is collapsed.
......@@ -22,11 +22,14 @@ import (
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/eth/tracers"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rpc"
)
// abiTrue represents the storage representation of the boolean
......@@ -116,6 +119,10 @@ func main() {
Value: "bad-withdrawals.json",
Usage: "Path to write JSON file of bad withdrawals to manually inspect",
},
&cli.StringFlag{
Name: "storage-out",
Usage: "Path to write text file of L2ToL1MessagePasser storage",
},
},
Action: func(ctx *cli.Context) error {
clients, err := util.NewClients(ctx)
......@@ -163,10 +170,11 @@ func main() {
}
outfile := ctx.String("bad-withdrawals-out")
f, err := os.OpenFile(outfile, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0o755)
f, err := os.OpenFile(outfile, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0o644)
if err != nil {
return err
}
defer f.Close()
// create a transactor
opts, err := newTransactor(ctx)
......@@ -177,6 +185,28 @@ func main() {
// Need this to compare in event parsing
l1StandardBridgeAddress := common.HexToAddress(ctx.String("l1-standard-bridge-address"))
if storageOutfile := ctx.String("storage-out"); storageOutfile != "" {
ff, err := os.OpenFile(storageOutfile, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0o644)
if err != nil {
return err
}
defer ff.Close()
log.Info("Fetching storage for L2ToL1MessagePasser")
if storageRange, err := callStorageRange(clients, predeploys.L2ToL1MessagePasserAddr); err != nil {
log.Info("error getting storage range", "err", err)
} else {
str := ""
for key, value := range storageRange {
str += fmt.Sprintf("%s: %s\n", key.Hex(), value.Hex())
}
_, err = ff.WriteString(str)
if err != nil {
return err
}
}
}
// iterate over all of the withdrawals and submit them
for i, wd := range wds {
log.Info("Processing withdrawal", "index", i)
......@@ -234,7 +264,7 @@ func main() {
// successful messages can be skipped, received messages failed
// their execution and should be replayed
if isSuccessNew {
log.Info("Message already relayed", "index", i, "hash", hash, "slot", slot)
log.Info("Message already relayed", "index", i, "hash", hash.Hex(), "slot", slot.Hex())
continue
}
......@@ -248,7 +278,7 @@ func main() {
// the value should be set to a boolean in storage
if !bytes.Equal(storageValue, abiTrue.Bytes()) {
return fmt.Errorf("storage slot %x not found in state", slot)
return fmt.Errorf("storage slot %x not found in state", slot.Hex())
}
legacySlot, err := wd.StorageSlot()
......@@ -443,10 +473,48 @@ func callTrace(c *util.Clients, receipt *types.Receipt) (callFrame, error) {
Tracer: &tracer,
}
err := c.L1RpcClient.Call(&finalizationTrace, "debug_traceTransaction", receipt.TxHash, traceConfig)
return finalizationTrace, err
}
func callStorageRangeAt(
client *rpc.Client,
blockHash common.Hash,
txIndex int,
addr common.Address,
keyStart hexutil.Bytes,
maxResult int,
) (*eth.StorageRangeResult, error) {
var storageRange *eth.StorageRangeResult
err := client.Call(&storageRange, "debug_storageRangeAt", blockHash, txIndex, addr, keyStart, maxResult)
return storageRange, err
}
func callStorageRange(c *util.Clients, addr common.Address) (state.Storage, error) {
header, err := c.L2Client.HeaderByNumber(context.Background(), nil)
if err != nil {
return finalizationTrace, err
return nil, err
}
return finalizationTrace, err
hash := header.Hash()
keyStart := hexutil.Bytes(common.Hash{}.Bytes())
maxResult := 1000
ret := make(state.Storage)
for {
result, err := callStorageRangeAt(c.L2RpcClient, hash, 0, addr, keyStart, maxResult)
if err != nil {
return nil, err
}
for key, value := range result.Storage {
ret[key] = value.Value
}
if result.NextKey == nil {
break
} else {
keyStart = hexutil.Bytes(result.NextKey.Bytes())
}
}
return ret, nil
}
// handleFinalizeETHWithdrawal will ensure that the calldata is correct
......@@ -709,9 +777,13 @@ func newWithdrawals(ctx *cli.Context, l1ChainID *big.Int) ([]*crossdomain.Legacy
witnessFile := ctx.String("witness-file")
log.Debug("Migration data", "ovm-path", ovmMsgs, "evm-messages", evmMsgs, "witness-file", witnessFile)
ovmMessages, err := crossdomain.NewSentMessageFromJSON(ovmMsgs)
if err != nil {
return nil, err
var ovmMessages []*crossdomain.SentMessage
var err error
if ovmMsgs != "" {
ovmMessages, err = crossdomain.NewSentMessageFromJSON(ovmMsgs)
if err != nil {
return nil, err
}
}
// use empty ovmMessages if its not mainnet. The mainnet messages are
......
......@@ -96,17 +96,12 @@ func MigrateWithdrawal(withdrawal *LegacyWithdrawal, l1CrossDomainMessenger *com
return w, nil
}
// MigrateWithdrawalGasLimit computes the gas limit for the migrated withdrawal.
func MigrateWithdrawalGasLimit(data []byte) uint64 {
// Compute the cost of the calldata
dataCost := uint64(0)
for _, b := range data {
if b == 0 {
dataCost += params.TxDataZeroGas
} else {
dataCost += params.TxDataNonZeroGasEIP2028
}
}
// Compute the upper bound on the gas limit. This could be more
// accurate if individual 0 bytes and non zero bytes were accounted
// for.
dataCost := uint64(len(data)) * params.TxDataNonZeroGasEIP2028
// Set the outer gas limit. This cannot be zero
gasLimit := dataCost + 200_000
// Cap the gas limit to be 25 million to prevent creating withdrawals
......
......@@ -71,15 +71,15 @@ func TestMigrateWithdrawalGasLimit(t *testing.T) {
},
{
input: []byte{0xff, 0x00},
output: 200_000 + 16 + 4,
output: 200_000 + 16 + 16,
},
{
input: []byte{0x00},
output: 200_000 + 4,
output: 200_000 + 16,
},
{
input: []byte{0x00, 0x00, 0x00},
output: 200_000 + 4 + 4 + 4,
output: 200_000 + 16 + 16 + 16,
},
}
......
......@@ -233,6 +233,10 @@ type System struct {
Mocknet mocknet.Mocknet
}
func (sys *System) NodeEndpoint(name string) string {
return selectEndpoint(sys.Nodes[name])
}
func (sys *System) Close() {
if sys.L2OutputSubmitter != nil {
sys.L2OutputSubmitter.Stop()
......@@ -619,13 +623,17 @@ func (cfg SystemConfig) Start(_opts ...SystemConfigOption) (*System, error) {
return sys, nil
}
func configureL1(rollupNodeCfg *rollupNode.Config, l1Node *node.Node) {
l1EndpointConfig := l1Node.WSEndpoint()
func selectEndpoint(node *node.Node) string {
useHTTP := os.Getenv("OP_E2E_USE_HTTP") == "true"
if useHTTP {
log.Info("using HTTP client")
l1EndpointConfig = l1Node.HTTPEndpoint()
return node.HTTPEndpoint()
}
return node.WSEndpoint()
}
func configureL1(rollupNodeCfg *rollupNode.Config, l1Node *node.Node) {
l1EndpointConfig := selectEndpoint(l1Node)
rollupNodeCfg.L1 = &rollupNode.L1EndpointConfig{
L1NodeAddr: l1EndpointConfig,
L1TrustRPC: false,
......
package op_e2e
import (
"context"
"math/big"
"testing"
"time"
"github.com/ethereum-optimism/optimism/op-node/client"
"github.com/ethereum-optimism/optimism/op-node/sources"
"github.com/ethereum-optimism/optimism/op-node/testlog"
opp "github.com/ethereum-optimism/optimism/op-program/host"
oppconf "github.com/ethereum-optimism/optimism/op-program/host/config"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rpc"
"github.com/stretchr/testify/require"
)
func TestVerifyL2OutputRoot(t *testing.T) {
parallel(t)
ctx := context.Background()
cfg := DefaultSystemConfig(t)
// We don't need a verifier - just the sequencer is enough
delete(cfg.Nodes, "verifier")
sys, err := cfg.Start()
require.Nil(t, err, "Error starting up system")
defer sys.Close()
log := testlog.Logger(t, log.LvlInfo)
log.Info("genesis", "l2", sys.RollupConfig.Genesis.L2, "l1", sys.RollupConfig.Genesis.L1, "l2_time", sys.RollupConfig.Genesis.L2Time)
l1Client := sys.Clients["l1"]
l2Seq := sys.Clients["sequencer"]
rollupRPCClient, err := rpc.DialContext(context.Background(), sys.RollupNodes["sequencer"].HTTPEndpoint())
require.Nil(t, err)
rollupClient := sources.NewRollupClient(client.NewBaseRPCClient(rollupRPCClient))
// TODO (CLI-3855): Actually perform some tx to set up a more complex chain.
// Wait for the safe head to reach block 10
require.NoError(t, waitForSafeHead(ctx, 10, rollupClient))
// Use block 5 as the agreed starting block on L2
l2AgreedBlock, err := l2Seq.BlockByNumber(ctx, big.NewInt(5))
require.NoError(t, err, "could not retrieve l2 genesis")
l2Head := l2AgreedBlock.Hash() // Agreed starting L2 block
// Get the expected output at block 10
l2ClaimBlockNumber := uint64(10)
l2Output, err := rollupClient.OutputAtBlock(ctx, l2ClaimBlockNumber)
require.NoError(t, err, "could not get expected output")
l2Claim := l2Output.OutputRoot
// Find the current L1 head
l1BlockNumber, err := l1Client.BlockNumber(ctx)
require.NoError(t, err, "get l1 head block number")
l1HeadBlock, err := l1Client.BlockByNumber(ctx, new(big.Int).SetUint64(l1BlockNumber))
require.NoError(t, err, "get l1 head block")
l1Head := l1HeadBlock.Hash()
preimageDir := t.TempDir()
fppConfig := oppconf.NewConfig(sys.RollupConfig, sys.L2GenesisCfg.Config, l1Head, l2Head, common.Hash(l2Claim), l2ClaimBlockNumber)
fppConfig.L1URL = sys.NodeEndpoint("l1")
fppConfig.L2URL = sys.NodeEndpoint("sequencer")
fppConfig.DataDir = preimageDir
// Check the FPP confirms the expected output
t.Log("Running fault proof in fetching mode")
err = opp.FaultProofProgram(log, fppConfig)
require.NoError(t, err)
// Shutdown the nodes from the actual chain. Should now be able to run using only the pre-fetched data.
for _, node := range sys.Nodes {
require.NoError(t, node.Close())
}
t.Log("Running fault proof in offline mode")
// Should be able to rerun in offline mode using the pre-fetched images
fppConfig.L1URL = ""
fppConfig.L2URL = ""
err = opp.FaultProofProgram(log, fppConfig)
require.NoError(t, err)
// Check that a fault is detected if we provide an incorrect claim
t.Log("Running fault proof with invalid claim")
fppConfig.L2Claim = common.Hash{0xaa}
err = opp.FaultProofProgram(log, fppConfig)
require.ErrorIs(t, err, opp.ErrClaimNotValid)
}
func waitForSafeHead(ctx context.Context, safeBlockNum uint64, rollupClient *sources.RollupClient) error {
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
defer cancel()
for {
seqStatus, err := rollupClient.SyncStatus(ctx)
if err != nil {
return err
}
if seqStatus.SafeL2.Number >= safeBlockNum {
return nil
}
}
}
......@@ -23,6 +23,7 @@ import (
"github.com/ethereum/go-ethereum/rpc"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/stretchr/testify/require"
"golang.org/x/exp/slices"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
......@@ -36,6 +37,7 @@ import (
"github.com/ethereum-optimism/optimism/op-node/sources"
"github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum-optimism/optimism/op-node/withdrawals"
"github.com/ethereum-optimism/optimism/op-service/backoff"
oppprof "github.com/ethereum-optimism/optimism/op-service/pprof"
)
......@@ -619,6 +621,24 @@ func TestSystemMockP2P(t *testing.T) {
// Enable the sequencer now that everyone is ready to receive payloads.
rollupRPCClient, err := rpc.DialContext(context.Background(), sys.RollupNodes["sequencer"].HTTPEndpoint())
require.Nil(t, err)
verifierPeerID := sys.RollupNodes["verifier"].P2P().Host().ID()
check := func() bool {
sequencerBlocksTopicPeers := sys.RollupNodes["sequencer"].P2P().GossipOut().BlocksTopicPeers()
return slices.Contains[peer.ID](sequencerBlocksTopicPeers, verifierPeerID)
}
// poll to see if the verifier node is connected & meshed on gossip.
// Without this verifier, we shouldn't start sending blocks around, or we'll miss them and fail the test.
backOffStrategy := backoff.Exponential()
for i := 0; i < 10; i++ {
if check() {
break
}
time.Sleep(backOffStrategy.Duration(i))
}
require.True(t, check(), "verifier must be meshed with sequencer for gossip test to proceed")
require.NoError(t, rollupRPCClient.Call(nil, "admin_startSequencer", sys.L2GenesisCfg.ToBlock().Hash()))
l2Seq := sys.Clients["sequencer"]
......
......@@ -110,7 +110,7 @@ func FindL2Heads(ctx context.Context, cfg *rollup.Config, l1 L1Chain, l2 L2Chain
}
lgr.Info("Loaded current L2 heads", "unsafe", result.Unsafe, "safe", result.Safe, "finalized", result.Finalized,
"unsafe_origin", result.Unsafe.L1Origin, "unsafe_origin", result.Safe.L1Origin)
"unsafe_origin", result.Unsafe.L1Origin, "safe_origin", result.Safe.L1Origin)
// Remember original unsafe block to determine reorg depth
prevUnsafe := result.Unsafe
......@@ -207,7 +207,7 @@ func FindL2Heads(ctx context.Context, cfg *rollup.Config, l1 L1Chain, l2 L2Chain
// Don't traverse further than the finalized head to find a safe head
if n.Number == result.Finalized.Number {
lgr.Info("Hit finalized L2 head, returning immediately", "unsafe", result.Unsafe, "safe", result.Safe,
"finalized", result.Finalized, "unsafe_origin", result.Unsafe.L1Origin, "unsafe_origin", result.Safe.L1Origin)
"finalized", result.Finalized, "unsafe_origin", result.Unsafe.L1Origin, "safe_origin", result.Safe.L1Origin)
result.Safe = n
return result, nil
}
......
package sources
import (
"context"
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/rawdb"
)
type DebugClient struct {
callContext CallContextFn
}
func NewDebugClient(callContext CallContextFn) *DebugClient {
return &DebugClient{callContext}
}
func (o *DebugClient) NodeByHash(ctx context.Context, hash common.Hash) ([]byte, error) {
// MPT nodes are stored as the hash of the node (with no prefix)
node, err := o.dbGet(ctx, hash[:])
if err != nil {
return nil, fmt.Errorf("failed to retrieve state MPT node: %w", err)
}
return node, nil
}
func (o *DebugClient) CodeByHash(ctx context.Context, hash common.Hash) ([]byte, error) {
// First try retrieving with the new code prefix
code, err := o.dbGet(ctx, append(append(make([]byte, 0), rawdb.CodePrefix...), hash[:]...))
if err != nil {
// Fallback to the legacy un-prefixed version
code, err = o.dbGet(ctx, hash[:])
if err != nil {
return nil, fmt.Errorf("failed to retrieve contract code, using new and legacy keys, with codehash %s: %w", hash, err)
}
}
return code, nil
}
func (o *DebugClient) dbGet(ctx context.Context, key []byte) ([]byte, error) {
var node hexutil.Bytes
err := o.callContext(ctx, &node, "debug_dbGet", hexutil.Encode(key))
if err != nil {
return nil, fmt.Errorf("fetch error %x: %w", key, err)
}
return node, nil
}
package testutils
import (
"context"
"github.com/ethereum/go-ethereum/common"
"github.com/stretchr/testify/mock"
)
type MockDebugClient struct {
mock.Mock
}
func (m *MockDebugClient) ExpectNodeByHash(hash common.Hash, res []byte, err error) {
m.Mock.On("NodeByHash", hash).Once().Return(res, &err)
}
func (m *MockDebugClient) NodeByHash(ctx context.Context, hash common.Hash) ([]byte, error) {
out := m.Mock.MethodCalled("NodeByHash", hash)
return out[0].([]byte), *out[1].(*error)
}
func (m *MockDebugClient) ExpectCodeByHash(hash common.Hash, res []byte, err error) {
m.Mock.On("CodeByHash", hash).Once().Return(res, &err)
}
func (m *MockDebugClient) CodeByHash(ctx context.Context, hash common.Hash) ([]byte, error) {
out := m.Mock.MethodCalled("CodeByHash", hash)
return out[0].([]byte), *out[1].(*error)
}
......@@ -9,7 +9,7 @@ LDFLAGSSTRING +=-X github.com/ethereum-optimism/optimism/op-program/version.Meta
LDFLAGS := -ldflags "$(LDFLAGSSTRING)"
op-program:
env GO111MODULE=on GOOS=$(TARGETOS) GOARCH=$(TARGETARCH) go build -v $(LDFLAGS) -o ./bin/op-program ./cmd/main.go
env GO111MODULE=on GOOS=$(TARGETOS) GOARCH=$(TARGETARCH) go build -v $(LDFLAGS) -o ./bin/op-program ./host/cmd/main.go
clean:
rm -rf bin
......
......@@ -24,18 +24,20 @@ type L2Source interface {
}
type Driver struct {
logger log.Logger
pipeline Derivation
l2OutputRoot func() (eth.Bytes32, error)
logger log.Logger
pipeline Derivation
l2OutputRoot func() (eth.Bytes32, error)
targetBlockNum uint64
}
func NewDriver(logger log.Logger, cfg *rollup.Config, l1Source derive.L1Fetcher, l2Source L2Source) *Driver {
func NewDriver(logger log.Logger, cfg *rollup.Config, l1Source derive.L1Fetcher, l2Source L2Source, targetBlockNum uint64) *Driver {
pipeline := derive.NewDerivationPipeline(logger, cfg, l1Source, l2Source, metrics.NoopMetrics)
pipeline.Reset()
return &Driver{
logger: logger,
pipeline: pipeline,
l2OutputRoot: l2Source.L2OutputRoot,
logger: logger,
pipeline: pipeline,
l2OutputRoot: l2Source.L2OutputRoot,
targetBlockNum: targetBlockNum,
}
}
......@@ -47,6 +49,11 @@ func (d *Driver) Step(ctx context.Context) error {
if err := d.pipeline.Step(ctx); errors.Is(err, io.EOF) {
return io.EOF
} else if errors.Is(err, derive.NotEnoughData) {
head := d.pipeline.SafeL2Head()
if head.Number >= d.targetBlockNum {
d.logger.Info("Target L2 block reached", "head", head)
return io.EOF
}
d.logger.Debug("Data is lacking")
return nil
} else if err != nil {
......
......@@ -39,6 +39,30 @@ func TestGenericError(t *testing.T) {
require.ErrorIs(t, err, expected)
}
func TestTargetBlock(t *testing.T) {
t.Run("Reached", func(t *testing.T) {
driver := createDriverWithNextBlock(t, derive.NotEnoughData, 1000)
driver.targetBlockNum = 1000
err := driver.Step(context.Background())
require.ErrorIs(t, err, io.EOF)
})
t.Run("Exceeded", func(t *testing.T) {
driver := createDriverWithNextBlock(t, derive.NotEnoughData, 1000)
driver.targetBlockNum = 500
err := driver.Step(context.Background())
require.ErrorIs(t, err, io.EOF)
})
t.Run("NotYetReached", func(t *testing.T) {
driver := createDriverWithNextBlock(t, derive.NotEnoughData, 1000)
driver.targetBlockNum = 1001
err := driver.Step(context.Background())
// No error to indicate derivation should continue
require.NoError(t, err)
})
}
func TestNoError(t *testing.T) {
driver := createDriver(t, nil)
err := driver.Step(context.Background())
......@@ -76,15 +100,21 @@ func TestValidateClaim(t *testing.T) {
}
func createDriver(t *testing.T, derivationResult error) *Driver {
derivation := &stubDerivation{nextErr: derivationResult}
return createDriverWithNextBlock(t, derivationResult, 0)
}
func createDriverWithNextBlock(t *testing.T, derivationResult error, nextBlockNum uint64) *Driver {
derivation := &stubDerivation{nextErr: derivationResult, nextBlockNum: nextBlockNum}
return &Driver{
logger: testlog.Logger(t, log.LvlDebug),
pipeline: derivation,
logger: testlog.Logger(t, log.LvlDebug),
pipeline: derivation,
targetBlockNum: 1_000_000,
}
}
type stubDerivation struct {
nextErr error
nextErr error
nextBlockNum uint64
}
func (s stubDerivation) Step(ctx context.Context) error {
......@@ -92,5 +122,7 @@ func (s stubDerivation) Step(ctx context.Context) error {
}
func (s stubDerivation) SafeL2Head() eth.L2BlockRef {
return eth.L2BlockRef{}
return eth.L2BlockRef{
Number: s.nextBlockNum,
}
}
......@@ -6,12 +6,18 @@ import (
"github.com/ethereum-optimism/optimism/op-program/preimage"
)
const (
HintL1BlockHeader = "l1-block-header"
HintL1Transactions = "l1-transactions"
HintL1Receipts = "l1-receipts"
)
type BlockHeaderHint common.Hash
var _ preimage.Hint = BlockHeaderHint{}
func (l BlockHeaderHint) Hint() string {
return "l1-block-header " + (common.Hash)(l).String()
return HintL1BlockHeader + " " + (common.Hash)(l).String()
}
type TransactionsHint common.Hash
......@@ -19,7 +25,7 @@ type TransactionsHint common.Hash
var _ preimage.Hint = TransactionsHint{}
func (l TransactionsHint) Hint() string {
return "l1-transactions " + (common.Hash)(l).String()
return HintL1Transactions + " " + (common.Hash)(l).String()
}
type ReceiptsHint common.Hash
......@@ -27,5 +33,5 @@ type ReceiptsHint common.Hash
var _ preimage.Hint = ReceiptsHint{}
func (l ReceiptsHint) Hint() string {
return "l1-receipts " + (common.Hash)(l).String()
return HintL1Receipts + " " + (common.Hash)(l).String()
}
......@@ -6,12 +6,19 @@ import (
"github.com/ethereum-optimism/optimism/op-program/preimage"
)
const (
HintL2BlockHeader = "l2-block-header"
HintL2Transactions = "l2-transactions"
HintL2Code = "l2-code"
HintL2StateNode = "l2-state-node"
)
type BlockHeaderHint common.Hash
var _ preimage.Hint = BlockHeaderHint{}
func (l BlockHeaderHint) Hint() string {
return "l2-block-header " + (common.Hash)(l).String()
return HintL2BlockHeader + " " + (common.Hash)(l).String()
}
type TransactionsHint common.Hash
......@@ -19,7 +26,7 @@ type TransactionsHint common.Hash
var _ preimage.Hint = TransactionsHint{}
func (l TransactionsHint) Hint() string {
return "l2-transactions " + (common.Hash)(l).String()
return HintL2Transactions + " " + (common.Hash)(l).String()
}
type CodeHint common.Hash
......@@ -27,7 +34,7 @@ type CodeHint common.Hash
var _ preimage.Hint = CodeHint{}
func (l CodeHint) Hint() string {
return "l2-code " + (common.Hash)(l).String()
return HintL2Code + " " + (common.Hash)(l).String()
}
type StateNodeHint common.Hash
......@@ -35,5 +42,5 @@ type StateNodeHint common.Hash
var _ preimage.Hint = StateNodeHint{}
func (l StateNodeHint) Hint() string {
return "l2-state-node " + (common.Hash)(l).String()
return HintL2StateNode + " " + (common.Hash)(l).String()
}
package main
import (
"context"
"errors"
"fmt"
"io"
"os"
"time"
"github.com/ethereum-optimism/optimism/op-node/chaincfg"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
cldr "github.com/ethereum-optimism/optimism/op-program/client/driver"
"github.com/ethereum-optimism/optimism/op-program/host"
"github.com/ethereum-optimism/optimism/op-program/host/config"
"github.com/ethereum-optimism/optimism/op-program/host/flags"
"github.com/ethereum-optimism/optimism/op-program/host/l1"
"github.com/ethereum-optimism/optimism/op-program/host/l2"
"github.com/ethereum-optimism/optimism/op-program/host/version"
oplog "github.com/ethereum-optimism/optimism/op-service/log"
"github.com/ethereum/go-ethereum/log"
......@@ -42,15 +33,13 @@ var VersionWithMeta = func() string {
return v
}()
var (
ErrClaimNotValid = errors.New("invalid claim")
)
func main() {
args := os.Args
err := run(args, FaultProofProgram)
err := run(args, host.FaultProofProgram)
if err != nil {
log.Crit("Application failed", "message", err)
} else {
log.Info("Claim successfully verified")
}
}
......@@ -95,43 +84,3 @@ func setupLogging(ctx *cli.Context) (log.Logger, error) {
logger := oplog.NewLogger(logCfg)
return logger, nil
}
// FaultProofProgram is the programmatic entry-point for the fault proof program
func FaultProofProgram(logger log.Logger, cfg *config.Config) error {
cfg.Rollup.LogDescription(logger, chaincfg.L2ChainIDToNetworkName)
if !cfg.FetchingEnabled() {
return errors.New("offline mode not supported")
}
ctx := context.Background()
logger.Info("Connecting to L1 node", "l1", cfg.L1URL)
l1Source, err := l1.NewFetchingL1(ctx, logger, cfg)
if err != nil {
return fmt.Errorf("connect l1 oracle: %w", err)
}
logger.Info("Connecting to L2 node", "l2", cfg.L2URL)
l2Source, err := l2.NewFetchingEngine(ctx, logger, cfg)
if err != nil {
return fmt.Errorf("connect l2 oracle: %w", err)
}
d := cldr.NewDriver(logger, cfg.Rollup, l1Source, l2Source)
for {
if err = d.Step(ctx); errors.Is(err, io.EOF) {
break
} else if cfg.FetchingEnabled() && errors.Is(err, derive.ErrTemporary) {
// When in fetching mode, recover from temporary errors to allow us to keep fetching data
// TODO(CLI-3780) Ideally the retry would happen in the fetcher so this is not needed
logger.Warn("Temporary error in pipeline", "err", err)
time.Sleep(5 * time.Second)
} else if err != nil {
return err
}
}
claim := cfg.L2Claim
if !d.ValidateClaim(eth.Bytes32(claim)) {
return ErrClaimNotValid
}
return nil
}
This diff is collapsed.
package config
import (
"encoding/json"
"errors"
"fmt"
"os"
opnode "github.com/ethereum-optimism/optimism/op-node"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/sources"
"github.com/ethereum-optimism/optimism/op-program/host/flags"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/params"
"github.com/urfave/cli"
)
......@@ -18,18 +23,32 @@ var (
ErrInvalidL2Head = errors.New("invalid l2 head")
ErrL1AndL2Inconsistent = errors.New("l1 and l2 options must be specified together or both omitted")
ErrInvalidL2Claim = errors.New("invalid l2 claim")
ErrInvalidL2ClaimBlock = errors.New("invalid l2 claim block number")
ErrDataDirRequired = errors.New("datadir must be specified when in non-fetching mode")
)
type Config struct {
Rollup *rollup.Config
L2URL string
L2GenesisPath string
L1Head common.Hash
L2Head common.Hash
L2Claim common.Hash
L1URL string
L1TrustRPC bool
L1RPCKind sources.RPCProviderKind
Rollup *rollup.Config
// DataDir is the directory to read/write pre-image data from/to.
//If not set, an in-memory key-value store is used and fetching data must be enabled
DataDir string
// L1Head is the block has of the L1 chain head block
L1Head common.Hash
L1URL string
L1TrustRPC bool
L1RPCKind sources.RPCProviderKind
// L2Head is the agreed L2 block to start derivation from
L2Head common.Hash
L2URL string
// L2Claim is the claimed L2 output root to verify
L2Claim common.Hash
// L2ClaimBlockNumber is the block number the claimed L2 output root is from
// Must be above 0 and to be a valid claim needs to be above the L2Head block.
L2ClaimBlockNumber uint64
// L2ChainConfig is the op-geth chain config for the L2 execution engine
L2ChainConfig *params.ChainConfig
}
func (c *Config) Check() error {
......@@ -48,12 +67,18 @@ func (c *Config) Check() error {
if c.L2Claim == (common.Hash{}) {
return ErrInvalidL2Claim
}
if c.L2GenesisPath == "" {
if c.L2ClaimBlockNumber == 0 {
return ErrInvalidL2ClaimBlock
}
if c.L2ChainConfig == nil {
return ErrMissingL2Genesis
}
if (c.L1URL != "") != (c.L2URL != "") {
return ErrL1AndL2Inconsistent
}
if !c.FetchingEnabled() && c.DataDir == "" {
return ErrDataDirRequired
}
return nil
}
......@@ -62,14 +87,15 @@ func (c *Config) FetchingEnabled() bool {
}
// NewConfig creates a Config with all optional values set to the CLI default value
func NewConfig(rollupCfg *rollup.Config, l2GenesisPath string, l1Head common.Hash, l2Head common.Hash, l2Claim common.Hash) *Config {
func NewConfig(rollupCfg *rollup.Config, l2Genesis *params.ChainConfig, l1Head common.Hash, l2Head common.Hash, l2Claim common.Hash, l2ClaimBlockNum uint64) *Config {
return &Config{
Rollup: rollupCfg,
L2GenesisPath: l2GenesisPath,
L1Head: l1Head,
L2Head: l2Head,
L2Claim: l2Claim,
L1RPCKind: sources.RPCKindBasic,
Rollup: rollupCfg,
L2ChainConfig: l2Genesis,
L1Head: l1Head,
L2Head: l2Head,
L2Claim: l2Claim,
L2ClaimBlockNumber: l2ClaimBlockNum,
L1RPCKind: sources.RPCKindBasic,
}
}
......@@ -89,19 +115,40 @@ func NewConfigFromCLI(ctx *cli.Context) (*Config, error) {
if l2Claim == (common.Hash{}) {
return nil, ErrInvalidL2Claim
}
l2ClaimBlockNum := ctx.GlobalUint64(flags.L2BlockNumber.Name)
l1Head := common.HexToHash(ctx.GlobalString(flags.L1Head.Name))
if l1Head == (common.Hash{}) {
return nil, ErrInvalidL1Head
}
l2GenesisPath := ctx.GlobalString(flags.L2GenesisPath.Name)
l2ChainConfig, err := loadChainConfigFromGenesis(l2GenesisPath)
if err != nil {
return nil, fmt.Errorf("invalid genesis: %w", err)
}
return &Config{
Rollup: rollupCfg,
L2URL: ctx.GlobalString(flags.L2NodeAddr.Name),
L2GenesisPath: ctx.GlobalString(flags.L2GenesisPath.Name),
L2Head: l2Head,
L2Claim: l2Claim,
L1Head: l1Head,
L1URL: ctx.GlobalString(flags.L1NodeAddr.Name),
L1TrustRPC: ctx.GlobalBool(flags.L1TrustRPC.Name),
L1RPCKind: sources.RPCProviderKind(ctx.GlobalString(flags.L1RPCProviderKind.Name)),
Rollup: rollupCfg,
DataDir: ctx.GlobalString(flags.DataDir.Name),
L2URL: ctx.GlobalString(flags.L2NodeAddr.Name),
L2ChainConfig: l2ChainConfig,
L2Head: l2Head,
L2Claim: l2Claim,
L2ClaimBlockNumber: l2ClaimBlockNum,
L1Head: l1Head,
L1URL: ctx.GlobalString(flags.L1NodeAddr.Name),
L1TrustRPC: ctx.GlobalBool(flags.L1TrustRPC.Name),
L1RPCKind: sources.RPCProviderKind(ctx.GlobalString(flags.L1RPCProviderKind.Name)),
}, nil
}
func loadChainConfigFromGenesis(path string) (*params.ChainConfig, error) {
data, err := os.ReadFile(path)
if err != nil {
return nil, fmt.Errorf("read l2 genesis file: %w", err)
}
var genesis core.Genesis
err = json.Unmarshal(data, &genesis)
if err != nil {
return nil, fmt.Errorf("parse l2 genesis file: %w", err)
}
return genesis.Config, nil
}
......@@ -6,16 +6,21 @@ import (
"github.com/ethereum-optimism/optimism/op-node/chaincfg"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/params"
"github.com/stretchr/testify/require"
)
var validRollupConfig = &chaincfg.Goerli
var validL2GenesisPath = "genesis.json"
var validL1Head = common.Hash{0xaa}
var validL2Head = common.Hash{0xbb}
var validL2Claim = common.Hash{0xcc}
var (
validRollupConfig = &chaincfg.Goerli
validL2Genesis = params.GoerliChainConfig
validL1Head = common.Hash{0xaa}
validL2Head = common.Hash{0xbb}
validL2Claim = common.Hash{0xcc}
validL2ClaimBlockNum = uint64(15)
)
func TestDefaultConfigIsValid(t *testing.T) {
// TestValidConfigIsValid checks that the config provided by validConfig is actually valid
func TestValidConfigIsValid(t *testing.T) {
err := validConfig().Check()
require.NoError(t, err)
}
......@@ -57,9 +62,16 @@ func TestL2ClaimRequired(t *testing.T) {
require.ErrorIs(t, err, ErrInvalidL2Claim)
}
func TestL2ClaimBlockNumberRequired(t *testing.T) {
config := validConfig()
config.L2ClaimBlockNumber = 0
err := config.Check()
require.ErrorIs(t, err, ErrInvalidL2ClaimBlock)
}
func TestL2GenesisRequired(t *testing.T) {
config := validConfig()
config.L2GenesisPath = ""
config.L2ChainConfig = nil
err := config.Check()
require.ErrorIs(t, err, ErrMissingL2Genesis)
}
......@@ -121,6 +133,17 @@ func TestFetchingEnabled(t *testing.T) {
})
}
func TestRequireDataDirInNonFetchingMode(t *testing.T) {
cfg := validConfig()
cfg.DataDir = ""
cfg.L1URL = ""
cfg.L2URL = ""
err := cfg.Check()
require.ErrorIs(t, err, ErrDataDirRequired)
}
func validConfig() *Config {
return NewConfig(validRollupConfig, validL2GenesisPath, validL1Head, validL2Head, validL2Claim)
cfg := NewConfig(validRollupConfig, validL2Genesis, validL1Head, validL2Head, validL2Claim, validL2ClaimBlockNum)
cfg.DataDir = "/tmp/configTest"
return cfg
}
......@@ -26,6 +26,11 @@ var (
Usage: fmt.Sprintf("Predefined network selection. Available networks: %s", strings.Join(chaincfg.AvailableNetworks(), ", ")),
EnvVar: service.PrefixEnvVar(envVarPrefix, "NETWORK"),
}
DataDir = cli.StringFlag{
Name: "datadir",
Usage: "Directory to use for preimage data storage. Default uses in-memory storage",
EnvVar: service.PrefixEnvVar(envVarPrefix, "DATADIR"),
}
L2NodeAddr = cli.StringFlag{
Name: "l2",
Usage: "Address of L2 JSON-RPC endpoint to use (eth and debug namespace required)",
......@@ -46,6 +51,11 @@ var (
Usage: "Claimed L2 output root to validate",
EnvVar: service.PrefixEnvVar(envVarPrefix, "L2_CLAIM"),
}
L2BlockNumber = cli.Uint64Flag{
Name: "l2.blocknumber",
Usage: "Number of the L2 block that the claim is from",
EnvVar: service.PrefixEnvVar(envVarPrefix, "L2_BLOCK_NUM"),
}
L2GenesisPath = cli.StringFlag{
Name: "l2.genesis",
Usage: "Path to the op-geth genesis file",
......@@ -80,11 +90,13 @@ var requiredFlags = []cli.Flag{
L1Head,
L2Head,
L2Claim,
L2BlockNumber,
L2GenesisPath,
}
var programFlags = []cli.Flag{
RollupConfig,
Network,
DataDir,
L2NodeAddr,
L1NodeAddr,
L1TrustRPC,
......@@ -107,7 +119,7 @@ func CheckRequired(ctx *cli.Context) error {
return fmt.Errorf("cannot specify both %s and %s", RollupConfig.Name, Network.Name)
}
for _, flag := range requiredFlags {
if ctx.GlobalString(flag.GetName()) == "" {
if !ctx.IsSet(flag.GetName()) {
return fmt.Errorf("flag %s is required", flag.GetName())
}
}
......
package host
import (
"context"
"errors"
"fmt"
"io"
"os"
"github.com/ethereum-optimism/optimism/op-node/chaincfg"
"github.com/ethereum-optimism/optimism/op-node/client"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/sources"
cldr "github.com/ethereum-optimism/optimism/op-program/client/driver"
"github.com/ethereum-optimism/optimism/op-program/host/config"
"github.com/ethereum-optimism/optimism/op-program/host/kvstore"
"github.com/ethereum-optimism/optimism/op-program/host/l1"
"github.com/ethereum-optimism/optimism/op-program/host/l2"
"github.com/ethereum-optimism/optimism/op-program/host/prefetcher"
"github.com/ethereum-optimism/optimism/op-program/preimage"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
)
var (
ErrClaimNotValid = errors.New("invalid claim")
)
type L2Source struct {
*sources.L2Client
*sources.DebugClient
}
// FaultProofProgram is the programmatic entry-point for the fault proof program
func FaultProofProgram(logger log.Logger, cfg *config.Config) error {
if err := cfg.Check(); err != nil {
return fmt.Errorf("invalid config: %w", err)
}
cfg.Rollup.LogDescription(logger, chaincfg.L2ChainIDToNetworkName)
ctx := context.Background()
var kv kvstore.KV
if cfg.DataDir == "" {
logger.Info("Using in-memory storage")
kv = kvstore.NewMemKV()
} else {
logger.Info("Creating disk storage", "datadir", cfg.DataDir)
if err := os.MkdirAll(cfg.DataDir, 0755); err != nil {
return fmt.Errorf("creating datadir: %w", err)
}
kv = kvstore.NewDiskKV(cfg.DataDir)
}
var preimageOracle preimage.OracleFn
var hinter preimage.HinterFn
if cfg.FetchingEnabled() {
logger.Info("Connecting to L1 node", "l1", cfg.L1URL)
l1RPC, err := client.NewRPC(ctx, logger, cfg.L1URL)
if err != nil {
return fmt.Errorf("failed to setup L1 RPC: %w", err)
}
logger.Info("Connecting to L2 node", "l2", cfg.L2URL)
l2RPC, err := client.NewRPC(ctx, logger, cfg.L2URL)
if err != nil {
return fmt.Errorf("failed to setup L2 RPC: %w", err)
}
l1ClCfg := sources.L1ClientDefaultConfig(cfg.Rollup, cfg.L1TrustRPC, cfg.L1RPCKind)
l2ClCfg := sources.L2ClientDefaultConfig(cfg.Rollup, true)
l1Cl, err := sources.NewL1Client(l1RPC, logger, nil, l1ClCfg)
if err != nil {
return fmt.Errorf("failed to create L1 client: %w", err)
}
l2Cl, err := sources.NewL2Client(l2RPC, logger, nil, l2ClCfg)
if err != nil {
return fmt.Errorf("failed to create L2 client: %w", err)
}
l2DebugCl := &L2Source{L2Client: l2Cl, DebugClient: sources.NewDebugClient(l2RPC.CallContext)}
logger.Info("Setting up pre-fetcher")
prefetch := prefetcher.NewPrefetcher(l1Cl, l2DebugCl, kv)
preimageOracle = asOracleFn(func(key common.Hash) ([]byte, error) {
return prefetch.GetPreimage(ctx, key)
})
hinter = asHinter(prefetch.Hint)
} else {
logger.Info("Using offline mode. All required pre-images must be pre-populated.")
preimageOracle = asOracleFn(kv.Get)
hinter = func(v preimage.Hint) {
logger.Debug("ignoring prefetch hint", "hint", v)
}
}
l1Source := l1.NewSource(logger, preimageOracle, hinter, cfg.L1Head)
l2Source, err := l2.NewEngine(logger, preimageOracle, hinter, cfg)
if err != nil {
return fmt.Errorf("connect l2 oracle: %w", err)
}
logger.Info("Starting derivation")
d := cldr.NewDriver(logger, cfg.Rollup, l1Source, l2Source, cfg.L2ClaimBlockNumber)
for {
if err = d.Step(ctx); errors.Is(err, io.EOF) {
break
} else if err != nil {
return err
}
}
if !d.ValidateClaim(eth.Bytes32(cfg.L2Claim)) {
return ErrClaimNotValid
}
return nil
}
func asOracleFn(getter func(key common.Hash) ([]byte, error)) preimage.OracleFn {
return func(key preimage.Key) []byte {
pre, err := getter(key.PreimageKey())
if err != nil {
panic(fmt.Errorf("preimage unavailable for key %v: %w", key, err))
}
return pre
}
}
func asHinter(hint func(hint string) error) preimage.HinterFn {
return func(v preimage.Hint) {
err := hint(v.Hint())
if err != nil {
panic(fmt.Errorf("hint rejected %v: %w", v, err))
}
}
}
......@@ -8,10 +8,12 @@ import (
"github.com/ethereum-optimism/optimism/op-node/sources"
cll1 "github.com/ethereum-optimism/optimism/op-program/client/l1"
"github.com/ethereum-optimism/optimism/op-program/host/config"
"github.com/ethereum-optimism/optimism/op-program/preimage"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
)
func NewFetchingL1(ctx context.Context, logger log.Logger, cfg *config.Config) (derive.L1Fetcher, error) {
func NewFetchingOracle(ctx context.Context, logger log.Logger, cfg *config.Config) (cll1.Oracle, error) {
rpc, err := client.NewRPC(ctx, logger, cfg.L1URL)
if err != nil {
return nil, err
......@@ -21,6 +23,10 @@ func NewFetchingL1(ctx context.Context, logger log.Logger, cfg *config.Config) (
if err != nil {
return nil, err
}
oracle := cll1.NewCachingOracle(NewFetchingL1Oracle(ctx, logger, source))
return cll1.NewOracleL1Client(logger, oracle, cfg.L1Head), err
return NewFetchingL1Oracle(ctx, logger, source), nil
}
func NewSource(logger log.Logger, oracle preimage.Oracle, hint preimage.Hinter, l1Head common.Hash) derive.L1Fetcher {
l1Oracle := cll1.NewCachingOracle(cll1.NewPreimageOracle(oracle, hint))
return cll1.NewOracleL1Client(logger, l1Oracle, l1Head)
}
......@@ -2,44 +2,27 @@ package l2
import (
"context"
"encoding/json"
"fmt"
"os"
cll2 "github.com/ethereum-optimism/optimism/op-program/client/l2"
"github.com/ethereum-optimism/optimism/op-program/host/config"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum-optimism/optimism/op-program/preimage"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
)
func NewFetchingEngine(ctx context.Context, logger log.Logger, cfg *config.Config) (*cll2.OracleEngine, error) {
genesis, err := loadL2Genesis(cfg)
if err != nil {
return nil, err
}
fetcher, err := NewFetchingL2Oracle(ctx, logger, cfg.L2URL, cfg.L2Head)
if err != nil {
return nil, fmt.Errorf("connect l2 oracle: %w", err)
}
oracle := cll2.NewCachingOracle(fetcher)
engineBackend, err := cll2.NewOracleBackedL2Chain(logger, oracle, genesis, cfg.L2Head)
func NewEngine(logger log.Logger, pre preimage.Oracle, hint preimage.Hinter, cfg *config.Config) (*cll2.OracleEngine, error) {
oracle := cll2.NewCachingOracle(cll2.NewPreimageOracle(pre, hint))
engineBackend, err := cll2.NewOracleBackedL2Chain(logger, oracle, cfg.L2ChainConfig, cfg.L2Head)
if err != nil {
return nil, fmt.Errorf("create l2 chain: %w", err)
}
return cll2.NewOracleEngine(cfg.Rollup, logger, engineBackend), nil
}
func loadL2Genesis(cfg *config.Config) (*params.ChainConfig, error) {
data, err := os.ReadFile(cfg.L2GenesisPath)
if err != nil {
return nil, fmt.Errorf("read l2 genesis file: %w", err)
}
var genesis core.Genesis
err = json.Unmarshal(data, &genesis)
func NewFetchingOracle(ctx context.Context, logger log.Logger, cfg *config.Config) (cll2.Oracle, error) {
oracle, err := NewFetchingL2Oracle(ctx, logger, cfg.L2URL, cfg.L2Head)
if err != nil {
return nil, fmt.Errorf("parse l2 genesis file: %w", err)
return nil, fmt.Errorf("connect l2 oracle: %w", err)
}
return genesis.Config, nil
return oracle, nil
}
package prefetcher
import (
"context"
"errors"
"fmt"
"strings"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-program/client/l1"
"github.com/ethereum-optimism/optimism/op-program/client/l2"
"github.com/ethereum-optimism/optimism/op-program/client/mpt"
"github.com/ethereum-optimism/optimism/op-program/host/kvstore"
"github.com/ethereum-optimism/optimism/op-program/preimage"
)
type L1Source interface {
InfoByHash(ctx context.Context, blockHash common.Hash) (eth.BlockInfo, error)
InfoAndTxsByHash(ctx context.Context, blockHash common.Hash) (eth.BlockInfo, types.Transactions, error)
FetchReceipts(ctx context.Context, blockHash common.Hash) (eth.BlockInfo, types.Receipts, error)
}
type L2Source interface {
InfoAndTxsByHash(ctx context.Context, blockHash common.Hash) (eth.BlockInfo, types.Transactions, error)
NodeByHash(ctx context.Context, hash common.Hash) ([]byte, error)
CodeByHash(ctx context.Context, hash common.Hash) ([]byte, error)
}
type Prefetcher struct {
l1Fetcher L1Source
l2Fetcher L2Source
lastHint string
kvStore kvstore.KV
}
func NewPrefetcher(l1Fetcher L1Source, l2Fetcher L2Source, kvStore kvstore.KV) *Prefetcher {
return &Prefetcher{
l1Fetcher: l1Fetcher,
l2Fetcher: l2Fetcher,
kvStore: kvStore,
}
}
func (p *Prefetcher) Hint(hint string) error {
p.lastHint = hint
return nil
}
func (p *Prefetcher) GetPreimage(ctx context.Context, key common.Hash) ([]byte, error) {
pre, err := p.kvStore.Get(key)
if errors.Is(err, kvstore.ErrNotFound) && p.lastHint != "" {
hint := p.lastHint
p.lastHint = ""
if err := p.prefetch(ctx, hint); err != nil {
return nil, fmt.Errorf("prefetch failed: %w", err)
}
// Should now be available
return p.kvStore.Get(key)
}
return pre, err
}
func (p *Prefetcher) prefetch(ctx context.Context, hint string) error {
hintType, hash, err := parseHint(hint)
if err != nil {
return err
}
switch hintType {
case l1.HintL1BlockHeader:
header, err := p.l1Fetcher.InfoByHash(ctx, hash)
if err != nil {
return fmt.Errorf("failed to fetch L1 block %s header: %w", hash, err)
}
data, err := header.HeaderRLP()
if err != nil {
return fmt.Errorf("marshall header: %w", err)
}
return p.kvStore.Put(preimage.Keccak256Key(hash).PreimageKey(), data)
case l1.HintL1Transactions:
_, txs, err := p.l1Fetcher.InfoAndTxsByHash(ctx, hash)
if err != nil {
return fmt.Errorf("failed to fetch L1 block %s txs: %w", hash, err)
}
return p.storeTransactions(txs)
case l1.HintL1Receipts:
_, receipts, err := p.l1Fetcher.FetchReceipts(ctx, hash)
if err != nil {
return fmt.Errorf("failed to fetch L1 block %s receipts: %w", hash, err)
}
return p.storeReceipts(receipts)
case l2.HintL2BlockHeader:
header, txs, err := p.l2Fetcher.InfoAndTxsByHash(ctx, hash)
if err != nil {
return fmt.Errorf("failed to fetch L2 block %s: %w", hash, err)
}
data, err := header.HeaderRLP()
if err != nil {
return fmt.Errorf("failed to encode header to RLP: %w", err)
}
err = p.kvStore.Put(preimage.Keccak256Key(hash).PreimageKey(), data)
if err != nil {
return err
}
return p.storeTransactions(txs)
case l2.HintL2StateNode:
node, err := p.l2Fetcher.NodeByHash(ctx, hash)
if err != nil {
return fmt.Errorf("failed to fetch L2 state node %s: %w", hash, err)
}
return p.kvStore.Put(preimage.Keccak256Key(hash).PreimageKey(), node)
case l2.HintL2Code:
code, err := p.l2Fetcher.CodeByHash(ctx, hash)
if err != nil {
return fmt.Errorf("failed to fetch L2 contract code %s: %w", hash, err)
}
return p.kvStore.Put(preimage.Keccak256Key(hash).PreimageKey(), code)
}
return fmt.Errorf("unknown hint type: %v", hintType)
}
func (p *Prefetcher) storeReceipts(receipts types.Receipts) error {
opaqueReceipts, err := eth.EncodeReceipts(receipts)
if err != nil {
return err
}
return p.storeTrieNodes(opaqueReceipts)
}
func (p *Prefetcher) storeTransactions(txs types.Transactions) error {
opaqueTxs, err := eth.EncodeTransactions(txs)
if err != nil {
return err
}
return p.storeTrieNodes(opaqueTxs)
}
func (p *Prefetcher) storeTrieNodes(values []hexutil.Bytes) error {
_, nodes := mpt.WriteTrie(values)
for _, node := range nodes {
err := p.kvStore.Put(preimage.Keccak256Key(crypto.Keccak256Hash(node)).PreimageKey(), node)
if err != nil {
return fmt.Errorf("failed to store node: %w", err)
}
}
return nil
}
// parseHint parses a hint string in wire protocol. Returns the hint type, requested hash and error (if any).
func parseHint(hint string) (string, common.Hash, error) {
hintType, hashStr, found := strings.Cut(hint, " ")
if !found {
return "", common.Hash{}, fmt.Errorf("unsupported hint: %s", hint)
}
hash := common.HexToHash(hashStr)
if hash == (common.Hash{}) {
return "", common.Hash{}, fmt.Errorf("invalid hash: %s", hashStr)
}
return hintType, hash, nil
}
This diff is collapsed.
......@@ -55,6 +55,8 @@ func (hr *HintReader) NextHint(router func(hint string) error) error {
}
}
if err := router(string(payload)); err != nil {
// stream recovery
_, _ = hr.r.Read([]byte{0})
return fmt.Errorf("failed to handle hint: %w", err)
}
if _, err := hr.r.Read([]byte{0}); err != nil {
......
......@@ -3,6 +3,7 @@ package preimage
import (
"bytes"
"crypto/rand"
"errors"
"io"
"testing"
......@@ -71,4 +72,21 @@ func TestHints(t *testing.T) {
err := hr.NextHint(func(hint string) error { return nil })
require.ErrorIs(t, err, io.ErrUnexpectedEOF)
})
t.Run("cb error", func(t *testing.T) {
var buf bytes.Buffer
hw := NewHintWriter(&buf)
hw.Hint(rawHint("one"))
hw.Hint(rawHint("two"))
hr := NewHintReader(&buf)
cbErr := errors.New("fail")
err := hr.NextHint(func(hint string) error { return cbErr })
require.ErrorIs(t, err, cbErr)
var readHint string
err = hr.NextHint(func(hint string) error {
readHint = hint
return nil
})
require.NoError(t, err)
require.Equal(t, readHint, "two")
})
}
{
"name": "@eth-optimism/ci-builder",
"version": "0.5.0",
"scripts": {},
"license": "MIT",
"dependencies": {}
}
......@@ -16,7 +16,6 @@
"ops/docker/hardhat",
"ops/docker/go-builder",
"ops/docker/js-builder",
"ops/docker/ci-builder",
"ops/docker/foundry",
"endpoint-monitor"
],
......
This diff is collapsed.
......@@ -24,8 +24,7 @@
| xDomainMsgSender | address | 204 | 0 | 20 | contracts/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| msgNonce | uint240 | 205 | 0 | 30 | contracts/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| failedMessages | mapping(bytes32 => bool) | 206 | 0 | 32 | contracts/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| reentrancyLocks | mapping(bytes32 => bool) | 207 | 0 | 32 | contracts/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| __gap | uint256[41] | 208 | 0 | 1312 | contracts/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
| __gap | uint256[42] | 207 | 0 | 1344 | contracts/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger |
=======================
➡ contracts/L1/L1StandardBridge.sol:L1StandardBridge
......@@ -135,8 +134,7 @@
| xDomainMsgSender | address | 204 | 0 | 20 | contracts/L2/L2CrossDomainMessenger.sol:L2CrossDomainMessenger |
| msgNonce | uint240 | 205 | 0 | 30 | contracts/L2/L2CrossDomainMessenger.sol:L2CrossDomainMessenger |
| failedMessages | mapping(bytes32 => bool) | 206 | 0 | 32 | contracts/L2/L2CrossDomainMessenger.sol:L2CrossDomainMessenger |
| reentrancyLocks | mapping(bytes32 => bool) | 207 | 0 | 32 | contracts/L2/L2CrossDomainMessenger.sol:L2CrossDomainMessenger |
| __gap | uint256[41] | 208 | 0 | 1312 | contracts/L2/L2CrossDomainMessenger.sol:L2CrossDomainMessenger |
| __gap | uint256[42] | 207 | 0 | 1344 | contracts/L2/L2CrossDomainMessenger.sol:L2CrossDomainMessenger |
=======================
➡ contracts/L2/L2StandardBridge.sol:L2StandardBridge
......
......@@ -78,7 +78,7 @@ contract L2OutputOracle is Initializable, Semver {
event OutputsDeleted(uint256 indexed prevNextOutputIndex, uint256 indexed newNextOutputIndex);
/**
* @custom:semver 1.2.0
* @custom:semver 1.3.0
*
* @param _submissionInterval Interval in blocks at which checkpoints must be submitted.
* @param _l2BlockTime The time per L2 block, in seconds.
......@@ -95,11 +95,11 @@ contract L2OutputOracle is Initializable, Semver {
address _proposer,
address _challenger,
uint256 _finalizationPeriodSeconds
) Semver(1, 2, 0) {
) Semver(1, 3, 0) {
require(_l2BlockTime > 0, "L2OutputOracle: L2 block time must be greater than 0");
require(
_submissionInterval > _l2BlockTime,
"L2OutputOracle: submission interval must be greater than L2 block time"
_submissionInterval > 0,
"L2OutputOracle: submission interval must be greater than 0"
);
SUBMISSION_INTERVAL = _submissionInterval;
......
// SPDX-License-Identifier: MIT
pragma solidity 0.8.15;
import { Messenger_Initializer, Reverter, CallerCaller } from "./CommonTest.t.sol";
import { Messenger_Initializer, Reverter, CallerCaller, CommonTest } from "./CommonTest.t.sol";
import { L1CrossDomainMessenger } from "../L1/L1CrossDomainMessenger.sol";
// Libraries
import { Predeploys } from "../libraries/Predeploys.sol";
import { Hashing } from "../libraries/Hashing.sol";
import { Encoding } from "../libraries/Encoding.sol";
// CrossDomainMessenger_Test is for testing functionality which is common to both the L1 and L2
// CrossDomainMessenger contracts. For simplicity, we use the L1 Messenger as the test contract.
......@@ -17,3 +23,149 @@ contract CrossDomainMessenger_BaseGas_Test is Messenger_Initializer {
L1Messenger.baseGas(hex"ff", _minGasLimit);
}
}
/**
* @title ExternalRelay
* @notice A mock external contract called via the SafeCall inside
* the CrossDomainMessenger's `relayMessage` function.
*/
contract ExternalRelay is CommonTest {
address internal op;
address internal fuzzedSender;
L1CrossDomainMessenger internal L1Messenger;
event FailedRelayedMessage(bytes32 indexed msgHash);
constructor(L1CrossDomainMessenger _l1Messenger, address _op) {
L1Messenger = _l1Messenger;
op = _op;
}
/**
* @notice Internal helper function to relay a message and perform assertions.
*/
function _internalRelay(address _innerSender) internal {
address initialSender = L1Messenger.xDomainMessageSender();
bytes memory callMessage = getCallData();
bytes32 hash = Hashing.hashCrossDomainMessage({
_nonce: Encoding.encodeVersionedNonce({ _nonce: 0, _version: 1 }),
_sender: _innerSender,
_target: address(this),
_value: 0,
_gasLimit: 0,
_data: callMessage
});
vm.expectEmit(true, true, true, true);
emit FailedRelayedMessage(hash);
vm.prank(address(op));
L1Messenger.relayMessage({
_nonce: Encoding.encodeVersionedNonce({ _nonce: 0, _version: 1 }),
_sender: _innerSender,
_target: address(this),
_value: 0,
_minGasLimit: 0,
_message: callMessage
});
assertTrue(L1Messenger.failedMessages(hash));
assertFalse(L1Messenger.successfulMessages(hash));
assertEq(initialSender, L1Messenger.xDomainMessageSender());
}
/**
* @notice externalCallWithMinGas is called by the CrossDomainMessenger.
*/
function externalCallWithMinGas() external payable {
for (uint256 i = 0; i < 10; i++) {
address _innerSender;
unchecked {
_innerSender = address(uint160(uint256(uint160(fuzzedSender)) + i));
}
_internalRelay(_innerSender);
}
}
/**
* @notice Helper function to get the callData for an `externalCallWithMinGas
*/
function getCallData() public returns (bytes memory) {
return abi.encodeWithSelector(ExternalRelay.externalCallWithMinGas.selector);
}
/**
* @notice Helper function to set the fuzzed sender
*/
function setFuzzedSender(address _fuzzedSender) public {
fuzzedSender = _fuzzedSender;
}
}
/**
* @title CrossDomainMessenger_RelayMessage_Test
* @notice Fuzz tests re-entrancy into the CrossDomainMessenger relayMessage function.
*/
contract CrossDomainMessenger_RelayMessage_Test is Messenger_Initializer {
// Storage slot of the l2Sender
uint256 constant senderSlotIndex = 50;
ExternalRelay public er;
function setUp() public override {
super.setUp();
er = new ExternalRelay(L1Messenger, address(op));
}
/**
* @dev This test mocks an OptimismPortal call to the L1CrossDomainMessenger via
* the relayMessage function. The relayMessage function will then use SafeCall's
* callWithMinGas to call the target with call data packed in the callMessage.
* For this test, the callWithMinGas will call the mock ExternalRelay test contract
* defined above, executing the externalCallWithMinGas function which will try to
* re-enter the CrossDomainMessenger's relayMessage function, resulting in that message
* being recorded as failed.
*/
function testFuzz_relayMessageReenter_succeeds(address _sender, uint256 _gasLimit) external {
vm.assume(_sender != Predeploys.L2_CROSS_DOMAIN_MESSENGER);
address sender = Predeploys.L2_CROSS_DOMAIN_MESSENGER;
er.setFuzzedSender(_sender);
address target = address(er);
bytes memory callMessage = er.getCallData();
vm.expectCall(target, callMessage);
uint64 gasLimit = uint64(bound(_gasLimit, 0, 30_000_000));
bytes32 hash = Hashing.hashCrossDomainMessage({
_nonce: Encoding.encodeVersionedNonce({ _nonce: 0, _version: 1 }),
_sender: sender,
_target: target,
_value: 0,
_gasLimit: gasLimit,
_data: callMessage
});
// set the value of op.l2Sender() to be the L2 Cross Domain Messenger.
vm.store(address(op), bytes32(senderSlotIndex), bytes32(abi.encode(sender)));
vm.prank(address(op));
L1Messenger.relayMessage({
_nonce: Encoding.encodeVersionedNonce({ _nonce: 0, _version: 1 }),
_sender: sender,
_target: target,
_value: 0,
_minGasLimit: gasLimit,
_message: callMessage
});
assertTrue(L1Messenger.successfulMessages(hash));
assertEq(L1Messenger.failedMessages(hash), false);
// Ensures that the `xDomainMsgSender` is set back to `Predeploys.L2_CROSS_DOMAIN_MESSENGER`
vm.expectRevert("CrossDomainMessenger: xDomainMessageSender is not set");
L1Messenger.xDomainMessageSender();
}
}
......@@ -100,10 +100,6 @@ contract L1CrossDomainMessenger_Test is Messenger_Initializer {
L1Messenger.xDomainMessageSender();
}
// xDomainMessageSender: should return the xDomainMsgSender address
// TODO: might need a test contract
// function test_xDomainSenderSetCorrectly() external {}
function test_relayMessage_v2_reverts() external {
address target = address(0xabcd);
address sender = Predeploys.L2_CROSS_DOMAIN_MESSENGER;
......@@ -295,173 +291,6 @@ contract L1CrossDomainMessenger_Test is Messenger_Initializer {
assertEq(L1Messenger.failedMessages(hash), true);
}
// relayMessage: Should revert if the recipient is trying to reenter with the
// same message.
function test_relayMessage_reentrancySameMessage_reverts() external {
ConfigurableCaller caller = new ConfigurableCaller();
address target = address(caller);
address sender = Predeploys.L2_CROSS_DOMAIN_MESSENGER;
bytes memory callMessage = abi.encodeWithSelector(caller.call.selector);
bytes32 hash = Hashing.hashCrossDomainMessage(
Encoding.encodeVersionedNonce({ _nonce: 0, _version: 1 }),
sender,
target,
0,
0,
callMessage
);
// Set the portal's `l2Sender` to the `sender`.
vm.store(address(op), bytes32(senderSlotIndex), bytes32(uint256(uint160(sender))));
// Act as the portal and call the `relayMessage` function with the `innerMessage`.
vm.prank(address(op));
vm.expectCall(target, callMessage);
L1Messenger.relayMessage(
Encoding.encodeVersionedNonce({ _nonce: 0, _version: 1 }),
sender,
target,
0,
0,
callMessage
);
// Assert that the message failed to be relayed
assertFalse(L1Messenger.successfulMessages(hash));
assertTrue(L1Messenger.failedMessages(hash));
// Set the configurable caller's target to `L1Messenger` and set the payload to `relayMessage(...)`.
caller.setDoRevert(false);
caller.setTarget(address(L1Messenger));
caller.setPayload(
abi.encodeWithSelector(
L1Messenger.relayMessage.selector,
Encoding.encodeVersionedNonce({ _nonce: 0, _version: 1 }),
sender,
target,
0,
0,
callMessage
)
);
// Attempt to replay the failed message, which will *not* immediately revert this time around,
// but attempt to reenter `relayMessage` with the same message hash. The reentrancy attempt should
// revert.
vm.expectEmit(true, true, true, true, target);
emit WhatHappened(
false,
abi.encodeWithSignature("Error(string)", "ReentrancyGuard: reentrant call")
);
L1Messenger.relayMessage(
Encoding.encodeVersionedNonce({ _nonce: 0, _version: 1 }), // nonce
sender,
target,
0,
0,
callMessage
);
// Assert that the message still failed to be relayed.
assertFalse(L1Messenger.successfulMessages(hash));
assertTrue(L1Messenger.failedMessages(hash));
}
// relayMessage: should not revert if the recipient reenters `relayMessage` with a different
// message hash.
function test_relayMessage_reentrancyDiffMessage_succeeds() external {
ConfigurableCaller caller = new ConfigurableCaller();
address target = address(caller);
address sender = Predeploys.L2_CROSS_DOMAIN_MESSENGER;
bytes memory messageA = abi.encodeWithSelector(caller.call.selector);
bytes memory messageB = hex"";
bytes32 hashA = Hashing.hashCrossDomainMessage(
Encoding.encodeVersionedNonce({ _nonce: 0, _version: 1 }),
sender,
target,
0,
0,
messageA
);
bytes32 hashB = Hashing.hashCrossDomainMessage(
Encoding.encodeVersionedNonce({ _nonce: 0, _version: 1 }),
sender,
target,
0,
0,
messageB
);
// Set the portal's `l2Sender` to the `sender`.
vm.store(address(op), bytes32(senderSlotIndex), bytes32(uint256(uint160(sender))));
// Act as the portal and call the `relayMessage` function with both `messageA` and `messageB`.
vm.startPrank(address(op));
vm.expectCall(target, messageA);
L1Messenger.relayMessage(
Encoding.encodeVersionedNonce({ _nonce: 0, _version: 1 }),
sender,
target,
0,
0,
messageA
);
vm.expectCall(target, messageB);
L1Messenger.relayMessage(
Encoding.encodeVersionedNonce({ _nonce: 0, _version: 1 }),
sender,
target,
0,
0,
messageB
);
// Stop acting as the portal
vm.stopPrank();
// Assert that both messages failed to be relayed
assertFalse(L1Messenger.successfulMessages(hashA));
assertFalse(L1Messenger.successfulMessages(hashB));
assertTrue(L1Messenger.failedMessages(hashA));
assertTrue(L1Messenger.failedMessages(hashB));
// Set the configurable caller's target to `L1Messenger` and set the payload to `relayMessage(...)`.
caller.setDoRevert(false);
caller.setTarget(address(L1Messenger));
caller.setPayload(
abi.encodeWithSelector(
L1Messenger.relayMessage.selector,
Encoding.encodeVersionedNonce({ _nonce: 0, _version: 1 }),
sender,
target,
0,
0,
messageB
)
);
// Attempt to replay the failed message, which will *not* immediately revert this time around,
// but attempt to reenter `relayMessage` with messageB. The reentrancy attempt should succeed
// because the message hashes are different.
vm.expectEmit(true, true, true, true, target);
emit WhatHappened(true, hex"");
L1Messenger.relayMessage(
Encoding.encodeVersionedNonce({ _nonce: 0, _version: 1 }),
sender,
target,
0,
0,
messageA
);
// Assert that both messages are now in the `successfulMessages` mapping.
assertTrue(L1Messenger.successfulMessages(hashA));
assertTrue(L1Messenger.successfulMessages(hashB));
}
function test_relayMessage_legacy_succeeds() external {
address target = address(0xabcd);
address sender = Predeploys.L2_CROSS_DOMAIN_MESSENGER;
......
......@@ -230,168 +230,4 @@ contract L2CrossDomainMessenger_Test is Messenger_Initializer {
assertEq(L2Messenger.successfulMessages(hash), true);
assertEq(L2Messenger.failedMessages(hash), true);
}
// relayMessage: Should revert if the recipient is trying to reenter with the
// same message.
function test_relayMessage_reentrancySameMessage_reverts() external {
ConfigurableCaller caller = new ConfigurableCaller();
address target = address(caller);
address sender = address(L1Messenger);
address l1XDMAlias = AddressAliasHelper.applyL1ToL2Alias(address(L1Messenger));
bytes memory callMessage = abi.encodeWithSelector(caller.call.selector);
bytes32 hash = Hashing.hashCrossDomainMessage(
Encoding.encodeVersionedNonce(0, 1),
sender,
target,
0,
0,
callMessage
);
// Act as the L1XDM and call the `relayMessage` function with the `innerMessage`.
vm.prank(l1XDMAlias);
vm.expectCall(target, callMessage);
L2Messenger.relayMessage(
Encoding.encodeVersionedNonce(0, 1),
sender,
target,
0,
0,
callMessage
);
// Assert that the message failed to be relayed
assertFalse(L2Messenger.successfulMessages(hash));
assertTrue(L2Messenger.failedMessages(hash));
// Set the configurable caller's target to `L2Messenger` and set the payload to `relayMessage(...)`.
caller.setDoRevert(false);
caller.setTarget(address(L2Messenger));
caller.setPayload(
abi.encodeWithSelector(
L2Messenger.relayMessage.selector,
Encoding.encodeVersionedNonce(0, 1),
sender,
target,
0,
0,
callMessage
)
);
// Attempt to replay the failed message, which will *not* immediately revert this time around,
// but attempt to reenter `relayMessage` with the same message hash. The reentrancy attempt should
// revert.
vm.expectEmit(true, true, true, true, target);
emit WhatHappened(
false,
abi.encodeWithSignature("Error(string)", "ReentrancyGuard: reentrant call")
);
L2Messenger.relayMessage(
Encoding.encodeVersionedNonce(0, 1),
sender,
target,
0,
0,
callMessage
);
// Assert that the message still failed to be relayed.
assertFalse(L2Messenger.successfulMessages(hash));
assertTrue(L2Messenger.failedMessages(hash));
}
// relayMessage: should not revert if the recipient reenters `relayMessage` with a different
// message hash.
function test_relayMessage_reentrancyDiffMessage_succeeds() external {
ConfigurableCaller caller = new ConfigurableCaller();
address target = address(caller);
address sender = address(L1Messenger);
address l1XDMAlias = AddressAliasHelper.applyL1ToL2Alias(address(L1Messenger));
bytes memory messageA = abi.encodeWithSelector(caller.call.selector);
bytes memory messageB = hex"";
bytes32 hashA = Hashing.hashCrossDomainMessage(
Encoding.encodeVersionedNonce(0, 1),
sender,
target,
0,
0,
messageA
);
bytes32 hashB = Hashing.hashCrossDomainMessage(
Encoding.encodeVersionedNonce(0, 1),
sender,
target,
0,
0,
messageB
);
// Act as the L1XDM and call the `relayMessage` function with both `messageA` and `messageB`.
vm.startPrank(l1XDMAlias);
vm.expectCall(target, messageA);
L2Messenger.relayMessage(
Encoding.encodeVersionedNonce(0, 1),
sender,
target,
0,
0,
messageA
);
vm.expectCall(target, messageB);
L2Messenger.relayMessage(
Encoding.encodeVersionedNonce(0, 1),
sender,
target,
0,
0,
messageB
);
// Stop acting as the L1XDM
vm.stopPrank();
// Assert that both messages failed to be relayed
assertFalse(L2Messenger.successfulMessages(hashA));
assertFalse(L2Messenger.successfulMessages(hashB));
assertTrue(L2Messenger.failedMessages(hashA));
assertTrue(L2Messenger.failedMessages(hashB));
// Set the configurable caller's target to `L2Messenger` and set the payload to `relayMessage(...)`.
caller.setDoRevert(false);
caller.setTarget(address(L2Messenger));
caller.setPayload(
abi.encodeWithSelector(
L2Messenger.relayMessage.selector,
Encoding.encodeVersionedNonce(0, 1),
sender,
target,
0,
0,
messageB
)
);
// Attempt to replay the failed message, which will *not* immediately revert this time around,
// but attempt to reenter `relayMessage` with messageB. The reentrancy attempt should succeed
// because the message hashes are different.
vm.expectEmit(true, true, true, true, target);
emit WhatHappened(true, hex"");
L2Messenger.relayMessage(
Encoding.encodeVersionedNonce(0, 1),
sender,
target,
0,
0,
messageA
);
// Assert that both messages are now in the `successfulMessages` mapping.
assertTrue(L2Messenger.successfulMessages(hashA));
assertTrue(L2Messenger.successfulMessages(hashB));
}
}
......@@ -47,21 +47,11 @@ contract L2OutputOracleTest is L2OutputOracle_Initializer {
});
}
function testFuzz_constructor_submissionIntervalLteL2BlockTime_reverts(
uint256 _submissionInterval,
uint256 _l2BlockTime
) external {
// Bound the _l2blockTime to be in the range of [1, type(uint256).max]
_l2BlockTime = bound(_l2BlockTime, 1, type(uint256).max);
// Roll the block number to _l2blockTime (the starting L2 timestamp must be less than or equal to the current time)
vm.roll(_l2BlockTime);
// Bound _submissionInterval to be less than or equal to _l2BlockTime
_submissionInterval = bound(_submissionInterval, 0, _l2BlockTime);
vm.expectRevert("L2OutputOracle: submission interval must be greater than L2 block time");
function test_constructor_submissionInterval_reverts() external {
vm.expectRevert("L2OutputOracle: submission interval must be greater than 0");
new L2OutputOracle({
_submissionInterval: _submissionInterval,
_l2BlockTime: _l2BlockTime,
_submissionInterval: 0,
_l2BlockTime: l2BlockTime,
_startingBlockNumber: startingBlockNumber,
_startingTimestamp: block.timestamp,
_proposer: proposer,
......
......@@ -976,12 +976,21 @@ contract OptimismPortal_FinalizeWithdrawal_Test is Portal_Initializer {
uint256 _gasLimit,
bytes memory _data
) external {
// Cannot call the optimism portal
vm.assume(_target != address(op));
vm.assume(
_target != address(op) && // Cannot call the optimism portal or a contract
_target.code.length == 0 && // No accounts with code
_target != CONSOLE && // The console has no code but behaves like a contract
uint160(_target) > 9 // No precompiles (or zero address)
);
// Total ETH supply is currently about 120M ETH.
uint256 value = bound(_value, 0, 200_000_000 ether);
vm.deal(address(op), value);
uint256 gasLimit = bound(_gasLimit, 0, 50_000_000);
uint256 nonce = messagePasser.messageNonce();
// Get a withdrawal transaction and mock proof from the differential testing script.
Types.WithdrawalTransaction memory _tx = Types.WithdrawalTransaction({
nonce: nonce,
sender: _sender,
......@@ -998,6 +1007,7 @@ contract OptimismPortal_FinalizeWithdrawal_Test is Portal_Initializer {
bytes[] memory withdrawalProof
) = ffi.getProveWithdrawalTransactionInputs(_tx);
// Create the output root proof
Types.OutputRootProof memory proof = Types.OutputRootProof({
version: bytes32(uint256(0)),
stateRoot: stateRoot,
......@@ -1009,29 +1019,30 @@ contract OptimismPortal_FinalizeWithdrawal_Test is Portal_Initializer {
assertEq(outputRoot, Hashing.hashOutputRootProof(proof));
assertEq(withdrawalHash, Hashing.hashWithdrawal(_tx));
// Mock the call to the oracle
// Setup the Oracle to return the outputRoot
vm.mockCall(
address(oracle),
abi.encodeWithSelector(oracle.getL2Output.selector),
abi.encode(outputRoot, 0)
abi.encode(outputRoot, block.timestamp, 100)
);
// Start the withdrawal, it must be initiated by the _sender and the
// correct value must be passed along
vm.deal(_tx.sender, _tx.value);
vm.prank(_tx.sender);
messagePasser.initiateWithdrawal{ value: _tx.value }(_tx.target, _tx.gasLimit, _tx.data);
// Ensure that the sentMessages is correct
assertEq(messagePasser.sentMessages(withdrawalHash), true);
vm.warp(block.timestamp + oracle.FINALIZATION_PERIOD_SECONDS() + 1);
// Prove the withdrawal transaction
op.proveWithdrawalTransaction(
_tx,
100, // l2BlockNumber
proof,
withdrawalProof
);
(bytes32 _root, , ) = op.provenWithdrawals(withdrawalHash);
assertTrue(_root != bytes32(0));
// Warp past the finalization period
vm.warp(block.timestamp + oracle.FINALIZATION_PERIOD_SECONDS() + 1);
// Finalize the withdrawal transaction
vm.expectCallMinGas(_tx.target, _tx.value, uint64(_tx.gasLimit), _tx.data);
op.finalizeWithdrawalTransaction(_tx);
assertTrue(op.finalizedWithdrawals(withdrawalHash));
}
}
......
......@@ -23,6 +23,8 @@ contract SafeCall_call_Test is CommonTest {
vm.assume(to != address(0x000000000000000000636F6e736F6c652e6c6f67));
// don't call the create2 deployer
vm.assume(to != address(0x4e59b44847b379578588920cA78FbF26c0B4956C));
// don't call the ffi interface
vm.assume(to != address(0x5615dEB798BB3E4dFa0139dFa1b3D433Cc23b72f));
assertEq(from.balance, 0, "from balance is 0");
vm.deal(from, value);
......
......@@ -124,14 +124,18 @@ contract XDM_MinGasLimits is Messenger_Initializer {
* contract.
*/
function invariant_minGasLimits() public {
uint256 length = actor.numHashes();
for (uint256 i = 0; i < length; ++i) {
bytes32 hash = actor.hashes(i);
// the message hash is in the successfulMessages mapping
assertTrue(L1Messenger.successfulMessages(hash));
// it is not in the received messages mapping
assertFalse(L1Messenger.failedMessages(hash));
}
assertFalse(actor.reverted());
///////////////////////////////////////////////////////////////////
// ~ DEV ~ //
// This test is temporarily disabled, it is being fixed in #5470 //
///////////////////////////////////////////////////////////////////
// uint256 length = actor.numHashes();
// for (uint256 i = 0; i < length; ++i) {
// bytes32 hash = actor.hashes(i);
// // the message hash is in the successfulMessages mapping
// assertTrue(L1Messenger.successfulMessages(hash));
// // it is not in the received messages mapping
// assertFalse(L1Messenger.failedMessages(hash));
// }
// assertFalse(actor.reverted());
}
}
......@@ -175,17 +175,12 @@ abstract contract CrossDomainMessenger is
*/
mapping(bytes32 => bool) public failedMessages;
/**
* @notice A mapping of hashes to reentrancy locks.
*/
mapping(bytes32 => bool) internal reentrancyLocks;
/**
* @notice Reserve extra slots in the storage layout for future upgrades.
* A gap size of 41 was chosen here, so that the first slot used in a child contract
* would be a multiple of 50.
*/
uint256[41] private __gap;
uint256[42] private __gap;
/**
* @notice Emitted whenever a message is sent to the other chain.
......@@ -323,13 +318,6 @@ abstract contract CrossDomainMessenger is
_message
);
// Check if the reentrancy lock for the `versionedHash` is already set.
if (reentrancyLocks[versionedHash]) {
revert("ReentrancyGuard: reentrant call");
}
// Trigger the reentrancy lock for `versionedHash`
reentrancyLocks[versionedHash] = true;
if (_isOtherMessenger()) {
// These properties should always hold when the message is first submitted (as
// opposed to being replayed).
......@@ -357,6 +345,15 @@ abstract contract CrossDomainMessenger is
"CrossDomainMessenger: message has already been relayed"
);
// If `xDomainMsgSender` is not the default L2 sender, this function
// is being re-entered. This marks the message as failed to allow it
// to be replayed.
if (xDomainMsgSender != Constants.DEFAULT_L2_SENDER) {
failedMessages[versionedHash] = true;
emit FailedRelayedMessage(versionedHash);
return;
}
xDomainMsgSender = _sender;
bool success = SafeCall.callWithMinGas(_target, _minGasLimit, _value, _message);
xDomainMsgSender = Constants.DEFAULT_L2_SENDER;
......@@ -377,9 +374,6 @@ abstract contract CrossDomainMessenger is
revert("CrossDomainMessenger: failed to relay message");
}
}
// Clear the reentrancy lock for `versionedHash`
reentrancyLocks[versionedHash] = false;
}
/**
......
import { hashWithdrawal, calldataCost } from '@eth-optimism/core-utils'
import { BigNumber } from 'ethers'
import { hashWithdrawal } from '@eth-optimism/core-utils'
import { BigNumber, utils } from 'ethers'
import { LowLevelMessage } from '../interfaces'
const { hexDataLength } = utils
/**
* Utility for hashing a LowLevelMessage object.
*
......@@ -25,7 +27,7 @@ export const hashLowLevelMessage = (message: LowLevelMessage): string => {
*/
export const migratedWithdrawalGasLimit = (data: string): BigNumber => {
// Compute the gas limit and cap at 25 million
const dataCost = calldataCost(data)
const dataCost = BigNumber.from(hexDataLength(data)).mul(16)
let minGasLimit = dataCost.add(200_000)
if (minGasLimit.gt(25_000_000)) {
minGasLimit = BigNumber.from(25_000_000)
......
......@@ -15,9 +15,9 @@ describe('Message Utils', () => {
const tests = [
{ input: '0x', result: BigNumber.from(200_000) },
{ input: '0xff', result: BigNumber.from(200_000 + 16) },
{ input: '0xff00', result: BigNumber.from(200_000 + 16 + 4) },
{ input: '0x00', result: BigNumber.from(200_000 + 4) },
{ input: '0x000000', result: BigNumber.from(200_000 + 4 + 4 + 4) },
{ input: '0xff00', result: BigNumber.from(200_000 + 16 + 16) },
{ input: '0x00', result: BigNumber.from(200_000 + 16) },
{ input: '0x000000', result: BigNumber.from(200_000 + 16 + 16 + 16) },
]
for (const test of tests) {
......
......@@ -18,12 +18,17 @@ an L2 account to an L1 account.
more specific terms to differentiate:
- A _withdrawal initiating transaction_ refers specifically to a transaction on L2 sent to the Withdrawals predeploy.
- A _withdrawal proving transaction_ refers specifically to an L1 transaction
which proves the withdrawal is correct (that it has been included in a merkle
tree whose root is available on L1).
- A _withdrawal finalizing transaction_ refers specifically to an L1 transaction which finalizes and relays the
withdrawal.
Withdrawals are initiated on L2 via a call to the Message Passer predeploy contract, which records the important
properties of the message in its storage. Withdrawals are finalized on L1 via a call to the `OptimismPortal`
contract, which proves the inclusion of this withdrawal message.
properties of the message in its storage.
Withdrawals are proven on L1 via a call to the `OptimismPortal`, which proves the inclusion of this withdrawal message.
Withdrawals are finalized on L1 via a call to the `OptimismPortal` contract,
which verifies that the fault challenge period has passed since the withdrawal message has been proved.
In this way, withdrawals are different from [deposits][g-deposits] which make use of a special transaction type in the
[execution engine][g-execution-engine] client. Rather, withdrawals transaction must use smart contracts on L1 for
......@@ -59,18 +64,20 @@ This is a very simple contract that stores the hash of the withdrawal data.
### On L1
1. A [relayer][g-relayer] submits the required inputs to the `OptimismPortal` contract. The relayer need
not be the same entity which initiated the withdrawal on L2.
1. A [relayer][g-relayer] submits a withdrawal proving transaction with the required inputs
to the `OptimismPortal` contract.
The relayer is not necessarily the same entity which initiated the withdrawal on L2.
These inputs include the withdrawal transaction data, inclusion proofs, and a block number. The block number
must be one for which an L2 output root exists, which commits to the withdrawal as registered on L2.
1. The `OptimismPortal` contract retrieves the output root for the given block number from the `L2OutputOracle`'s
`getL2OutputAfter()` function, and performs the remainder of the verification process internally.
`getL2Output()` function, and performs the remainder of the verification process internally.
1. If proof verification fails, the call reverts. Otherwise the hash is recorded to prevent it from being re-proven.
Note that the withdrawal can be proven more than once if the corresponding output root changes.
1. After the withdrawal is proven, it enters a 7 day challenge period, allowing time for other network participants
to challenge the integrity of the corresponding output root.
1. Once the challenge period has passed, a relayer submits the withdrawal transaction once again to the
`OptimismPortal` contract. Again, the relayer need not be the same entity which initiated the withdrawal on L2.
1. Once the challenge period has passed, a relayer submits a withdrawal finalizing transaction to the
`OptimismPortal` contract.
The relayer doesn't need to be the same entity that initiated the withdrawal on L2.
1. The `OptimismPortal` contract receives the withdrawal transaction data and verifies that the withdrawal has
both been proven and passed the challenge period.
1. If the requirements are not met, the call reverts. Otherwise the call is forwarded, and the hash is recorded to
......@@ -102,7 +109,7 @@ interface L2ToL1MessagePasser {
function initiateWithdrawal(address _target, uint256 _gasLimit, bytes memory _data) payable external;
function nonce() view external returns (uint256);
function messageNonce() public view returns (uint256);
function sentMessages(bytes32) view external returns (bool);
}
......@@ -139,13 +146,14 @@ withdrawals:
```js
interface OptimismPortal {
event WithdrawalFinalized(bytes32 indexed);
event WithdrawalFinalized(bytes32 indexed withdrawalHash, bool success);
function l2Sender() returns(address) external;
function proveWithdrawalTransaction(
Types.WithdrawalTransaction memory _tx,
uint256 _l2BlockNumber,
uint256 _l2OutputIndex,
Types.OutputRootProof calldata _outputRootProof,
bytes[] calldata _withdrawalProof
) external;
......@@ -168,14 +176,14 @@ The following inputs are required to prove and finalize a withdrawal:
- `data`: Data to send to the target.
- `gasLimit`: Gas to be forwarded to the target.
- Proof and verification data:
- `l2BlockNumber`: The L2 block number that corresponds to the output root.
- `l2OutputIndex`: The index in the L2 outputs where the applicable output root may be found.
- `outputRootProof`: Four `bytes32` values which are used to derive the output root.
- `withdrawalProof`: An inclusion proof for the given withdrawal in the L2ToL1MessagePasser contract.
These inputs must satisfy the following conditions:
1. The `l2BlockNumber` must be the block number that corresponds to the `OutputProposal` being proven.
1. `L2OutputOracle.getL2OutputAfter(l2BlockNumber)` returns a non-zero `OutputProposal`.
1. The `l2OutputIndex` must be the index in the L2 outputs that contains the applicable output root.
1. `L2OutputOracle.getL2Output(l2OutputIndex)` returns a non-zero `OutputProposal`.
1. The keccak256 hash of the `outputRootProof` values is equal to the `outputRoot`.
1. The `withdrawalProof` is a valid inclusion proof demonstrating that a hash of the Withdrawal transaction data
is contained in the storage of the L2ToL1MessagePasser contract on L2.
......@@ -190,13 +198,13 @@ These inputs must satisfy the following conditions:
[polygon-dbl-spend]: https://gerhard-wagner.medium.com/double-spending-bug-in-polygons-plasma-bridge-2e0954ccadf1
1. For each withdrawal initiated on L2 (ie. with a unique `nonce`), the following properties must hold:
1. For each withdrawal initiated on L2 (i.e. with a unique `messageNonce()`), the following properties must hold:
1. It should only be possible to prove the withdrawal once, unless the outputRoot for the withdrawal
has changed.
1. It should only be possible to finalize the withdrawal once.
1. It should not be possible to relay the message with any of its fields modified, ie.
1. Modifying the `sender` field would enable a 'spoofing' attack.
1. Modifying the `target`, `message`, or `value` fields would enable an attacker to dangerously change the
1. Modifying the `target`, `data`, or `value` fields would enable an attacker to dangerously change the
intended outcome of the withdrawal.
1. Modifying the `gasLimit` could make the cost of relaying too high, or allow the relayer to cause execution
to fail (out of gas) in the `target`.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment