Commit 3dca3d31 authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge branch 'develop' into fault-proof-specs

parents d8019fbe 1e58d20a
...@@ -61,7 +61,7 @@ jobs: ...@@ -61,7 +61,7 @@ jobs:
yarn-monorepo: yarn-monorepo:
docker: docker:
- image: ethereumoptimism/ci-builder:latest - image: ethereumoptimism/ci-builder:latest
resource_class: xlarge resource_class: large
steps: steps:
- checkout - checkout
- check-changed: - check-changed:
...@@ -142,7 +142,7 @@ jobs: ...@@ -142,7 +142,7 @@ jobs:
default: "oplabs-tools-artifacts/images" default: "oplabs-tools-artifacts/images"
machine: machine:
image: ubuntu-2204:2022.07.1 image: ubuntu-2204:2022.07.1
resource_class: xlarge resource_class: medium
steps: steps:
- checkout - checkout
- run: - run:
...@@ -207,7 +207,7 @@ jobs: ...@@ -207,7 +207,7 @@ jobs:
default: "linux/amd64" default: "linux/amd64"
machine: machine:
image: ubuntu-2204:2022.07.1 image: ubuntu-2204:2022.07.1
resource_class: xlarge resource_class: medium
steps: steps:
- gcp-oidc-authenticate - gcp-oidc-authenticate
# Below is CircleCI recommended way of specifying nameservers on an Ubuntu box: # Below is CircleCI recommended way of specifying nameservers on an Ubuntu box:
...@@ -261,7 +261,7 @@ jobs: ...@@ -261,7 +261,7 @@ jobs:
default: "linux/amd64" default: "linux/amd64"
machine: machine:
image: ubuntu-2204:2022.07.1 image: ubuntu-2204:2022.07.1
resource_class: xlarge resource_class: medium
steps: steps:
- gcp-cli/install - gcp-cli/install
- gcp-oidc-authenticate - gcp-oidc-authenticate
...@@ -379,7 +379,7 @@ jobs: ...@@ -379,7 +379,7 @@ jobs:
contracts-bedrock-slither: contracts-bedrock-slither:
docker: docker:
- image: ethereumoptimism/ci-builder:latest - image: ethereumoptimism/ci-builder:latest
resource_class: xlarge resource_class: large
steps: steps:
- checkout - checkout
- attach_workspace: { at: "." } - attach_workspace: { at: "." }
...@@ -600,7 +600,7 @@ jobs: ...@@ -600,7 +600,7 @@ jobs:
- run: - run:
name: run lint name: run lint
command: | command: |
golangci-lint run -E goimports,sqlclosecheck,bodyclose,asciicheck,misspell,errorlint --timeout 2m -e "errors.As" -e "errors.Is" ./... golangci-lint run -E goimports,sqlclosecheck,bodyclose,asciicheck,misspell,errorlint --timeout 5m -e "errors.As" -e "errors.Is" ./...
working_directory: <<parameters.module>> working_directory: <<parameters.module>>
go-test: go-test:
...@@ -900,7 +900,7 @@ jobs: ...@@ -900,7 +900,7 @@ jobs:
docker: docker:
- image: returntocorp/semgrep - image: returntocorp/semgrep
resource_class: xlarge resource_class: medium
steps: steps:
- checkout - checkout
- unless: - unless:
...@@ -942,7 +942,7 @@ jobs: ...@@ -942,7 +942,7 @@ jobs:
machine: machine:
image: ubuntu-2204:2022.07.1 image: ubuntu-2204:2022.07.1
docker_layer_caching: true docker_layer_caching: true
resource_class: xlarge resource_class: large
steps: steps:
- attach_workspace: - attach_workspace:
at: /tmp/docker_images at: /tmp/docker_images
......
...@@ -12,6 +12,7 @@ import ( ...@@ -12,6 +12,7 @@ import (
gethrpc "github.com/ethereum/go-ethereum/rpc" gethrpc "github.com/ethereum/go-ethereum/rpc"
"github.com/urfave/cli" "github.com/urfave/cli"
"github.com/ethereum-optimism/optimism/op-batcher/flags"
"github.com/ethereum-optimism/optimism/op-batcher/metrics" "github.com/ethereum-optimism/optimism/op-batcher/metrics"
"github.com/ethereum-optimism/optimism/op-batcher/rpc" "github.com/ethereum-optimism/optimism/op-batcher/rpc"
oplog "github.com/ethereum-optimism/optimism/op-service/log" oplog "github.com/ethereum-optimism/optimism/op-service/log"
...@@ -30,6 +31,9 @@ const ( ...@@ -30,6 +31,9 @@ const (
// of a closure allows the parameters bound to the top-level main package, e.g. // of a closure allows the parameters bound to the top-level main package, e.g.
// GitVersion, to be captured and used once the function is executed. // GitVersion, to be captured and used once the function is executed.
func Main(version string, cliCtx *cli.Context) error { func Main(version string, cliCtx *cli.Context) error {
if err := flags.CheckRequired(cliCtx); err != nil {
return err
}
cfg := NewConfig(cliCtx) cfg := NewConfig(cliCtx)
if err := cfg.Check(); err != nil { if err := cfg.Check(); err != nil {
return fmt.Errorf("invalid CLI flags: %w", err) return fmt.Errorf("invalid CLI flags: %w", err)
......
...@@ -82,7 +82,7 @@ func (s *channelManager) TxFailed(id txID) { ...@@ -82,7 +82,7 @@ func (s *channelManager) TxFailed(id txID) {
} }
s.metr.RecordBatchTxFailed() s.metr.RecordBatchTxFailed()
if s.closed && len(s.confirmedTransactions) == 0 && len(s.pendingTransactions) == 0 { if s.closed && len(s.confirmedTransactions) == 0 && len(s.pendingTransactions) == 0 && s.pendingChannel != nil {
s.log.Info("Channel has no submitted transactions, clearing for shutdown", "chID", s.pendingChannel.ID()) s.log.Info("Channel has no submitted transactions, clearing for shutdown", "chID", s.pendingChannel.ID())
s.clearPendingChannel() s.clearPendingChannel()
} }
......
...@@ -191,7 +191,7 @@ func (l *BatchSubmitter) loadBlocksIntoState(ctx context.Context) { ...@@ -191,7 +191,7 @@ func (l *BatchSubmitter) loadBlocksIntoState(ctx context.Context) {
if err != nil { if err != nil {
l.log.Warn("Error calculating L2 block range", "err", err) l.log.Warn("Error calculating L2 block range", "err", err)
return return
} else if start.Number == end.Number { } else if start.Number >= end.Number {
return return
} }
......
package doc
import (
"encoding/json"
"fmt"
"os"
"strings"
"github.com/ethereum-optimism/optimism/op-batcher/metrics"
"github.com/olekukonko/tablewriter"
"github.com/urfave/cli"
)
var Subcommands = cli.Commands{
{
Name: "metrics",
Usage: "Dumps a list of supported metrics to stdout",
Flags: []cli.Flag{
cli.StringFlag{
Name: "format",
Value: "markdown",
Usage: "Output format (json|markdown)",
},
},
Action: func(ctx *cli.Context) error {
m := metrics.NewMetrics("default")
supportedMetrics := m.Document()
format := ctx.String("format")
if format != "markdown" && format != "json" {
return fmt.Errorf("invalid format: %s", format)
}
if format == "json" {
enc := json.NewEncoder(os.Stdout)
return enc.Encode(supportedMetrics)
}
table := tablewriter.NewWriter(os.Stdout)
table.SetBorders(tablewriter.Border{Left: true, Top: false, Right: true, Bottom: false})
table.SetCenterSeparator("|")
table.SetAutoWrapText(false)
table.SetHeader([]string{"Metric", "Description", "Labels", "Type"})
var data [][]string
for _, metric := range supportedMetrics {
labels := strings.Join(metric.Labels, ",")
data = append(data, []string{metric.Name, metric.Help, labels, metric.Type})
}
table.AppendBulk(data)
table.Render()
return nil
},
},
}
...@@ -7,6 +7,7 @@ import ( ...@@ -7,6 +7,7 @@ import (
"github.com/urfave/cli" "github.com/urfave/cli"
"github.com/ethereum-optimism/optimism/op-batcher/batcher" "github.com/ethereum-optimism/optimism/op-batcher/batcher"
"github.com/ethereum-optimism/optimism/op-batcher/cmd/doc"
"github.com/ethereum-optimism/optimism/op-batcher/flags" "github.com/ethereum-optimism/optimism/op-batcher/flags"
oplog "github.com/ethereum-optimism/optimism/op-service/log" oplog "github.com/ethereum-optimism/optimism/op-service/log"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
...@@ -26,10 +27,15 @@ func main() { ...@@ -26,10 +27,15 @@ func main() {
app.Version = fmt.Sprintf("%s-%s-%s", Version, GitCommit, GitDate) app.Version = fmt.Sprintf("%s-%s-%s", Version, GitCommit, GitDate)
app.Name = "op-batcher" app.Name = "op-batcher"
app.Usage = "Batch Submitter Service" app.Usage = "Batch Submitter Service"
app.Description = "Service for generating and submitting L2 tx batches " + app.Description = "Service for generating and submitting L2 tx batches to L1"
"to L1"
app.Action = curryMain(Version) app.Action = curryMain(Version)
app.Commands = []cli.Command{
{
Name: "doc",
Subcommands: doc.Subcommands,
},
}
err := app.Run(os.Args) err := app.Run(os.Args)
if err != nil { if err != nil {
log.Crit("Application failed", "message", err) log.Crit("Application failed", "message", err)
......
package flags package flags
import ( import (
"fmt"
"github.com/urfave/cli" "github.com/urfave/cli"
"github.com/ethereum-optimism/optimism/op-batcher/rpc" "github.com/ethereum-optimism/optimism/op-batcher/rpc"
...@@ -17,37 +19,32 @@ const envVarPrefix = "OP_BATCHER" ...@@ -17,37 +19,32 @@ const envVarPrefix = "OP_BATCHER"
var ( var (
// Required flags // Required flags
L1EthRpcFlag = cli.StringFlag{ L1EthRpcFlag = cli.StringFlag{
Name: "l1-eth-rpc", Name: "l1-eth-rpc",
Usage: "HTTP provider URL for L1", Usage: "HTTP provider URL for L1",
Required: true, EnvVar: opservice.PrefixEnvVar(envVarPrefix, "L1_ETH_RPC"),
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "L1_ETH_RPC"),
} }
L2EthRpcFlag = cli.StringFlag{ L2EthRpcFlag = cli.StringFlag{
Name: "l2-eth-rpc", Name: "l2-eth-rpc",
Usage: "HTTP provider URL for L2 execution engine", Usage: "HTTP provider URL for L2 execution engine",
Required: true, EnvVar: opservice.PrefixEnvVar(envVarPrefix, "L2_ETH_RPC"),
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "L2_ETH_RPC"),
} }
RollupRpcFlag = cli.StringFlag{ RollupRpcFlag = cli.StringFlag{
Name: "rollup-rpc", Name: "rollup-rpc",
Usage: "HTTP provider URL for Rollup node", Usage: "HTTP provider URL for Rollup node",
Required: true, EnvVar: opservice.PrefixEnvVar(envVarPrefix, "ROLLUP_RPC"),
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "ROLLUP_RPC"),
} }
SubSafetyMarginFlag = cli.Uint64Flag{ SubSafetyMarginFlag = cli.Uint64Flag{
Name: "sub-safety-margin", Name: "sub-safety-margin",
Usage: "The batcher tx submission safety margin (in #L1-blocks) to subtract " + Usage: "The batcher tx submission safety margin (in #L1-blocks) to subtract " +
"from a channel's timeout and sequencing window, to guarantee safe inclusion " + "from a channel's timeout and sequencing window, to guarantee safe inclusion " +
"of a channel on L1.", "of a channel on L1.",
Required: true, EnvVar: opservice.PrefixEnvVar(envVarPrefix, "SUB_SAFETY_MARGIN"),
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "SUB_SAFETY_MARGIN"),
} }
PollIntervalFlag = cli.DurationFlag{ PollIntervalFlag = cli.DurationFlag{
Name: "poll-interval", Name: "poll-interval",
Usage: "Delay between querying L2 for more transactions and " + Usage: "Delay between querying L2 for more transactions and " +
"creating a new batch", "creating a new batch",
Required: true, EnvVar: opservice.PrefixEnvVar(envVarPrefix, "POLL_INTERVAL"),
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "POLL_INTERVAL"),
} }
// Optional flags // Optional flags
...@@ -108,8 +105,7 @@ var optionalFlags = []cli.Flag{ ...@@ -108,8 +105,7 @@ var optionalFlags = []cli.Flag{
} }
func init() { func init() {
requiredFlags = append(requiredFlags, oprpc.CLIFlags(envVarPrefix)...) optionalFlags = append(optionalFlags, oprpc.CLIFlags(envVarPrefix)...)
optionalFlags = append(optionalFlags, oplog.CLIFlags(envVarPrefix)...) optionalFlags = append(optionalFlags, oplog.CLIFlags(envVarPrefix)...)
optionalFlags = append(optionalFlags, opmetrics.CLIFlags(envVarPrefix)...) optionalFlags = append(optionalFlags, opmetrics.CLIFlags(envVarPrefix)...)
optionalFlags = append(optionalFlags, oppprof.CLIFlags(envVarPrefix)...) optionalFlags = append(optionalFlags, oppprof.CLIFlags(envVarPrefix)...)
...@@ -121,3 +117,12 @@ func init() { ...@@ -121,3 +117,12 @@ func init() {
// Flags contains the list of configuration options available to the binary. // Flags contains the list of configuration options available to the binary.
var Flags []cli.Flag var Flags []cli.Flag
func CheckRequired(ctx *cli.Context) error {
for _, f := range requiredFlags {
if !ctx.GlobalIsSet(f.GetName()) {
return fmt.Errorf("flag %s is required", f.GetName())
}
}
return nil
}
...@@ -58,12 +58,14 @@ type Metrics struct { ...@@ -58,12 +58,14 @@ type Metrics struct {
PendingBlocksCount prometheus.GaugeVec PendingBlocksCount prometheus.GaugeVec
BlocksAddedCount prometheus.Gauge BlocksAddedCount prometheus.Gauge
ChannelInputBytes prometheus.GaugeVec ChannelInputBytes prometheus.GaugeVec
ChannelReadyBytes prometheus.Gauge ChannelReadyBytes prometheus.Gauge
ChannelOutputBytes prometheus.Gauge ChannelOutputBytes prometheus.Gauge
ChannelClosedReason prometheus.Gauge ChannelClosedReason prometheus.Gauge
ChannelNumFrames prometheus.Gauge ChannelNumFrames prometheus.Gauge
ChannelComprRatio prometheus.Histogram ChannelComprRatio prometheus.Histogram
ChannelInputBytesTotal prometheus.Counter
ChannelOutputBytesTotal prometheus.Counter
BatcherTxEvs opmetrics.EventVec BatcherTxEvs opmetrics.EventVec
} }
...@@ -100,7 +102,7 @@ func NewMetrics(procName string) *Metrics { ...@@ -100,7 +102,7 @@ func NewMetrics(procName string) *Metrics {
Help: "1 if the op-batcher has finished starting up", Help: "1 if the op-batcher has finished starting up",
}), }),
ChannelEvs: opmetrics.NewEventVec(factory, ns, "channel", "Channel", []string{"stage"}), ChannelEvs: opmetrics.NewEventVec(factory, ns, "", "channel", "Channel", []string{"stage"}),
PendingBlocksCount: *factory.NewGaugeVec(prometheus.GaugeOpts{ PendingBlocksCount: *factory.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns, Namespace: ns,
...@@ -144,8 +146,18 @@ func NewMetrics(procName string) *Metrics { ...@@ -144,8 +146,18 @@ func NewMetrics(procName string) *Metrics {
Help: "Compression ratios of closed channel.", Help: "Compression ratios of closed channel.",
Buckets: append([]float64{0.1, 0.2}, prometheus.LinearBuckets(0.3, 0.05, 14)...), Buckets: append([]float64{0.1, 0.2}, prometheus.LinearBuckets(0.3, 0.05, 14)...),
}), }),
ChannelInputBytesTotal: factory.NewCounter(prometheus.CounterOpts{
Namespace: ns,
Name: "input_bytes_total",
Help: "Total number of bytes to a channel.",
}),
ChannelOutputBytesTotal: factory.NewCounter(prometheus.CounterOpts{
Namespace: ns,
Name: "output_bytes_total",
Help: "Total number of compressed output bytes from a channel.",
}),
BatcherTxEvs: opmetrics.NewEventVec(factory, ns, "batcher_tx", "BatcherTx", []string{"stage"}), BatcherTxEvs: opmetrics.NewEventVec(factory, ns, "", "batcher_tx", "BatcherTx", []string{"stage"}),
} }
} }
...@@ -219,6 +231,8 @@ func (m *Metrics) RecordChannelClosed(id derive.ChannelID, numPendingBlocks int, ...@@ -219,6 +231,8 @@ func (m *Metrics) RecordChannelClosed(id derive.ChannelID, numPendingBlocks int,
m.ChannelNumFrames.Set(float64(numFrames)) m.ChannelNumFrames.Set(float64(numFrames))
m.ChannelInputBytes.WithLabelValues(StageClosed).Set(float64(inputBytes)) m.ChannelInputBytes.WithLabelValues(StageClosed).Set(float64(inputBytes))
m.ChannelOutputBytes.Set(float64(outputComprBytes)) m.ChannelOutputBytes.Set(float64(outputComprBytes))
m.ChannelInputBytesTotal.Add(float64(inputBytes))
m.ChannelOutputBytesTotal.Add(float64(outputComprBytes))
var comprRatio float64 var comprRatio float64
if inputBytes > 0 { if inputBytes > 0 {
......
This diff is collapsed.
This diff is collapsed.
...@@ -103,6 +103,10 @@ func main() { ...@@ -103,6 +103,10 @@ func main() {
Name: "evm-messages", Name: "evm-messages",
Usage: "Path to evm-messages.json", Usage: "Path to evm-messages.json",
}, },
&cli.StringFlag{
Name: "witness-file",
Usage: "Path to l2geth witness file",
},
&cli.StringFlag{ &cli.StringFlag{
Name: "private-key", Name: "private-key",
Usage: "Key to sign transactions with", Usage: "Key to sign transactions with",
...@@ -702,8 +706,9 @@ func newContracts(ctx *cli.Context, l1Backend, l2Backend bind.ContractBackend) ( ...@@ -702,8 +706,9 @@ func newContracts(ctx *cli.Context, l1Backend, l2Backend bind.ContractBackend) (
func newWithdrawals(ctx *cli.Context, l1ChainID *big.Int) ([]*crossdomain.LegacyWithdrawal, error) { func newWithdrawals(ctx *cli.Context, l1ChainID *big.Int) ([]*crossdomain.LegacyWithdrawal, error) {
ovmMsgs := ctx.String("ovm-messages") ovmMsgs := ctx.String("ovm-messages")
evmMsgs := ctx.String("evm-messages") evmMsgs := ctx.String("evm-messages")
witnessFile := ctx.String("witness-file")
log.Debug("Migration data", "ovm-path", ovmMsgs, "evm-messages", evmMsgs) log.Debug("Migration data", "ovm-path", ovmMsgs, "evm-messages", evmMsgs, "witness-file", witnessFile)
ovmMessages, err := crossdomain.NewSentMessageFromJSON(ovmMsgs) ovmMessages, err := crossdomain.NewSentMessageFromJSON(ovmMsgs)
if err != nil { if err != nil {
return nil, err return nil, err
...@@ -716,9 +721,19 @@ func newWithdrawals(ctx *cli.Context, l1ChainID *big.Int) ([]*crossdomain.Legacy ...@@ -716,9 +721,19 @@ func newWithdrawals(ctx *cli.Context, l1ChainID *big.Int) ([]*crossdomain.Legacy
ovmMessages = []*crossdomain.SentMessage{} ovmMessages = []*crossdomain.SentMessage{}
} }
evmMessages, err := crossdomain.NewSentMessageFromJSON(evmMsgs) var evmMessages []*crossdomain.SentMessage
if err != nil { if witnessFile != "" {
return nil, err evmMessages, _, err = crossdomain.ReadWitnessData(witnessFile)
if err != nil {
return nil, err
}
} else if evmMsgs != "" {
evmMessages, err = crossdomain.NewSentMessageFromJSON(evmMsgs)
if err != nil {
return nil, err
}
} else {
return nil, errors.New("must provide either witness file or evm messages")
} }
migrationData := crossdomain.MigrationData{ migrationData := crossdomain.MigrationData{
......
...@@ -28,12 +28,11 @@ func TestBatchInLastPossibleBlocks(gt *testing.T) { ...@@ -28,12 +28,11 @@ func TestBatchInLastPossibleBlocks(gt *testing.T) {
signer := types.LatestSigner(sd.L2Cfg.Config) signer := types.LatestSigner(sd.L2Cfg.Config)
cl := sequencerEngine.EthClient() cl := sequencerEngine.EthClient()
aliceNonce := uint64(0) // manual nonce management to avoid geth pending-tx nonce non-determinism flakiness
aliceTx := func() { aliceTx := func() {
n, err := cl.PendingNonceAt(t.Ctx(), dp.Addresses.Alice)
require.NoError(t, err)
tx := types.MustSignNewTx(dp.Secrets.Alice, signer, &types.DynamicFeeTx{ tx := types.MustSignNewTx(dp.Secrets.Alice, signer, &types.DynamicFeeTx{
ChainID: sd.L2Cfg.Config.ChainID, ChainID: sd.L2Cfg.Config.ChainID,
Nonce: n, Nonce: aliceNonce,
GasTipCap: big.NewInt(2 * params.GWei), GasTipCap: big.NewInt(2 * params.GWei),
GasFeeCap: new(big.Int).Add(miner.l1Chain.CurrentBlock().BaseFee, big.NewInt(2*params.GWei)), GasFeeCap: new(big.Int).Add(miner.l1Chain.CurrentBlock().BaseFee, big.NewInt(2*params.GWei)),
Gas: params.TxGas, Gas: params.TxGas,
...@@ -41,6 +40,7 @@ func TestBatchInLastPossibleBlocks(gt *testing.T) { ...@@ -41,6 +40,7 @@ func TestBatchInLastPossibleBlocks(gt *testing.T) {
Value: e2eutils.Ether(2), Value: e2eutils.Ether(2),
}) })
require.NoError(gt, cl.SendTransaction(t.Ctx(), tx)) require.NoError(gt, cl.SendTransaction(t.Ctx(), tx))
aliceNonce += 1
} }
makeL2BlockWithAliceTx := func() { makeL2BlockWithAliceTx := func() {
aliceTx() aliceTx()
...@@ -139,12 +139,11 @@ func TestLargeL1Gaps(gt *testing.T) { ...@@ -139,12 +139,11 @@ func TestLargeL1Gaps(gt *testing.T) {
signer := types.LatestSigner(sd.L2Cfg.Config) signer := types.LatestSigner(sd.L2Cfg.Config)
cl := sequencerEngine.EthClient() cl := sequencerEngine.EthClient()
aliceNonce := uint64(0) // manual nonce, avoid pending-tx nonce management, that causes flakes
aliceTx := func() { aliceTx := func() {
n, err := cl.PendingNonceAt(t.Ctx(), dp.Addresses.Alice)
require.NoError(t, err)
tx := types.MustSignNewTx(dp.Secrets.Alice, signer, &types.DynamicFeeTx{ tx := types.MustSignNewTx(dp.Secrets.Alice, signer, &types.DynamicFeeTx{
ChainID: sd.L2Cfg.Config.ChainID, ChainID: sd.L2Cfg.Config.ChainID,
Nonce: n, Nonce: aliceNonce,
GasTipCap: big.NewInt(2 * params.GWei), GasTipCap: big.NewInt(2 * params.GWei),
GasFeeCap: new(big.Int).Add(miner.l1Chain.CurrentBlock().BaseFee, big.NewInt(2*params.GWei)), GasFeeCap: new(big.Int).Add(miner.l1Chain.CurrentBlock().BaseFee, big.NewInt(2*params.GWei)),
Gas: params.TxGas, Gas: params.TxGas,
...@@ -152,6 +151,7 @@ func TestLargeL1Gaps(gt *testing.T) { ...@@ -152,6 +151,7 @@ func TestLargeL1Gaps(gt *testing.T) {
Value: e2eutils.Ether(2), Value: e2eutils.Ether(2),
}) })
require.NoError(gt, cl.SendTransaction(t.Ctx(), tx)) require.NoError(gt, cl.SendTransaction(t.Ctx(), tx))
aliceNonce += 1
} }
makeL2BlockWithAliceTx := func() { makeL2BlockWithAliceTx := func() {
aliceTx() aliceTx()
......
...@@ -191,7 +191,7 @@ func TestL2EngineAPIFail(gt *testing.T) { ...@@ -191,7 +191,7 @@ func TestL2EngineAPIFail(gt *testing.T) {
} }
func TestEngineAPITests(t *testing.T) { func TestEngineAPITests(t *testing.T) {
test.RunEngineAPITests(t, func() engineapi.EngineBackend { test.RunEngineAPITests(t, func(t *testing.T) engineapi.EngineBackend {
jwtPath := e2eutils.WriteDefaultJWT(t) jwtPath := e2eutils.WriteDefaultJWT(t)
dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams) dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams)
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
......
...@@ -49,3 +49,46 @@ func TestDerivationWithFlakyL1RPC(gt *testing.T) { ...@@ -49,3 +49,46 @@ func TestDerivationWithFlakyL1RPC(gt *testing.T) {
// Verifier should be synced, even though it hit lots of temporary L1 RPC errors // Verifier should be synced, even though it hit lots of temporary L1 RPC errors
require.Equal(t, sequencer.L2Unsafe(), verifier.L2Safe(), "verifier is synced") require.Equal(t, sequencer.L2Unsafe(), verifier.L2Safe(), "verifier is synced")
} }
func TestFinalizeWhileSyncing(gt *testing.T) {
t := NewDefaultTesting(gt)
dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams)
sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlError) // mute all the temporary derivation errors that we forcefully create
_, _, miner, sequencer, _, verifier, _, batcher := setupReorgTestActors(t, dp, sd, log)
sequencer.ActL2PipelineFull(t)
verifier.ActL2PipelineFull(t)
verifierStartStatus := verifier.SyncStatus()
// Build an L1 chain with 64 + 1 blocks, containing batches of L2 chain.
// Enough to go past the finalityDelay of the engine queue,
// to make the verifier finalize while it syncs.
miner.ActEmptyBlock(t)
for i := 0; i < 64+1; i++ {
sequencer.ActL1HeadSignal(t)
sequencer.ActL2PipelineFull(t)
sequencer.ActBuildToL1Head(t)
batcher.ActSubmitAll(t)
miner.ActL1StartBlock(12)(t)
miner.ActL1IncludeTx(batcher.batcherAddr)(t)
miner.ActL1EndBlock(t)
}
l1Head := miner.l1Chain.CurrentHeader()
// finalize all of L1
miner.ActL1Safe(t, l1Head.Number.Uint64())
miner.ActL1Finalize(t, l1Head.Number.Uint64())
// Now signal L1 finality to the verifier, while the verifier is not synced.
verifier.ActL1HeadSignal(t)
verifier.ActL1SafeSignal(t)
verifier.ActL1FinalizedSignal(t)
// Now sync the verifier, without repeating the signal.
// While it's syncing, it should finalize on interval now, based on the future L1 finalized block it remembered.
verifier.ActL2PipelineFull(t)
// Verify the verifier finalized something new
require.Less(t, verifierStartStatus.FinalizedL2.Number, verifier.SyncStatus().FinalizedL2.Number, "verifier finalized L2 blocks during sync")
}
...@@ -589,8 +589,10 @@ func TestSystemMockP2P(t *testing.T) { ...@@ -589,8 +589,10 @@ func TestSystemMockP2P(t *testing.T) {
} }
cfg := DefaultSystemConfig(t) cfg := DefaultSystemConfig(t)
// Disable batcher, so we don't sync from L1 // Disable batcher, so we don't sync from L1 & set a large sequence window so we only have unsafe blocks
cfg.DisableBatcher = true cfg.DisableBatcher = true
cfg.DeployConfig.SequencerWindowSize = 100_000
cfg.DeployConfig.MaxSequencerDrift = 100_000
// disable at the start, so we don't miss any gossiped blocks. // disable at the start, so we don't miss any gossiped blocks.
cfg.Nodes["sequencer"].Driver.SequencerStopped = true cfg.Nodes["sequencer"].Driver.SequencerStopped = true
...@@ -640,11 +642,11 @@ func TestSystemMockP2P(t *testing.T) { ...@@ -640,11 +642,11 @@ func TestSystemMockP2P(t *testing.T) {
require.Nil(t, err, "Sending L2 tx to sequencer") require.Nil(t, err, "Sending L2 tx to sequencer")
// Wait for tx to be mined on the L2 sequencer chain // Wait for tx to be mined on the L2 sequencer chain
receiptSeq, err := waitForTransaction(tx.Hash(), l2Seq, 10*time.Duration(sys.RollupConfig.BlockTime)*time.Second) receiptSeq, err := waitForTransaction(tx.Hash(), l2Seq, 5*time.Minute)
require.Nil(t, err, "Waiting for L2 tx on sequencer") require.Nil(t, err, "Waiting for L2 tx on sequencer")
// Wait until the block it was first included in shows up in the safe chain on the verifier // Wait until the block it was first included in shows up in the safe chain on the verifier
receiptVerif, err := waitForTransaction(tx.Hash(), l2Verif, 10*time.Duration(sys.RollupConfig.BlockTime)*time.Second) receiptVerif, err := waitForTransaction(tx.Hash(), l2Verif, 5*time.Minute)
require.Nil(t, err, "Waiting for L2 tx on verifier") require.Nil(t, err, "Waiting for L2 tx on verifier")
require.Equal(t, receiptSeq, receiptVerif) require.Equal(t, receiptSeq, receiptVerif)
......
package eth
import (
"fmt"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
)
// EncodeReceipts encodes a list of receipts into raw receipts. Some non-consensus meta-data may be lost.
func EncodeReceipts(elems []*types.Receipt) ([]hexutil.Bytes, error) {
out := make([]hexutil.Bytes, len(elems))
for i, el := range elems {
dat, err := el.MarshalBinary()
if err != nil {
return nil, fmt.Errorf("failed to marshal receipt %d: %w", i, err)
}
out[i] = dat
}
return out, nil
}
// DecodeRawReceipts decodes receipts and adds additional blocks metadata.
// The contract-deployment addresses are not set however (high cost, depends on nonce values, unused by op-node).
func DecodeRawReceipts(block BlockID, rawReceipts []hexutil.Bytes, txHashes []common.Hash) ([]*types.Receipt, error) {
result := make([]*types.Receipt, len(rawReceipts))
totalIndex := uint(0)
prevCumulativeGasUsed := uint64(0)
for i, r := range rawReceipts {
var x types.Receipt
if err := x.UnmarshalBinary(r); err != nil {
return nil, fmt.Errorf("failed to decode receipt %d: %w", i, err)
}
x.TxHash = txHashes[i]
x.BlockHash = block.Hash
x.BlockNumber = new(big.Int).SetUint64(block.Number)
x.TransactionIndex = uint(i)
x.GasUsed = x.CumulativeGasUsed - prevCumulativeGasUsed
// contract address meta-data is not computed.
prevCumulativeGasUsed = x.CumulativeGasUsed
for _, l := range x.Logs {
l.BlockNumber = block.Number
l.TxHash = x.TxHash
l.TxIndex = uint(i)
l.BlockHash = block.Hash
l.Index = totalIndex
totalIndex += 1
}
result[i] = &x
}
return result, nil
}
package eth
import (
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
)
// EncodeTransactions encodes a list of transactions into opaque transactions.
func EncodeTransactions(elems []*types.Transaction) ([]hexutil.Bytes, error) {
out := make([]hexutil.Bytes, len(elems))
for i, el := range elems {
dat, err := el.MarshalBinary()
if err != nil {
return nil, fmt.Errorf("failed to marshal tx %d: %w", i, err)
}
out[i] = dat
}
return out, nil
}
// DecodeTransactions decodes a list of opaque transactions into transactions.
func DecodeTransactions(data []hexutil.Bytes) ([]*types.Transaction, error) {
dest := make([]*types.Transaction, len(data))
for i := range dest {
var x types.Transaction
if err := x.UnmarshalBinary(data[i]); err != nil {
return nil, fmt.Errorf("failed to unmarshal tx %d: %w", i, err)
}
dest[i] = &x
}
return dest, nil
}
// TransactionsToHashes computes the transaction-hash for every transaction in the input.
func TransactionsToHashes(elems []*types.Transaction) []common.Hash {
out := make([]common.Hash, len(elems))
for i, el := range elems {
out[i] = el.Hash()
}
return out
}
...@@ -256,28 +256,10 @@ func init() { ...@@ -256,28 +256,10 @@ func init() {
} }
func CheckRequired(ctx *cli.Context) error { func CheckRequired(ctx *cli.Context) error {
l1NodeAddr := ctx.GlobalString(L1NodeAddr.Name) for _, f := range requiredFlags {
if l1NodeAddr == "" { if !ctx.GlobalIsSet(f.GetName()) {
return fmt.Errorf("flag %s is required", L1NodeAddr.Name) return fmt.Errorf("flag %s is required", f.GetName())
} }
l2EngineAddr := ctx.GlobalString(L2EngineAddr.Name)
if l2EngineAddr == "" {
return fmt.Errorf("flag %s is required", L2EngineAddr.Name)
}
rollupConfig := ctx.GlobalString(RollupConfig.Name)
network := ctx.GlobalString(Network.Name)
if rollupConfig == "" && network == "" {
return fmt.Errorf("flag %s or %s is required", RollupConfig.Name, Network.Name)
}
if rollupConfig != "" && network != "" {
return fmt.Errorf("cannot specify both %s and %s", RollupConfig.Name, Network.Name)
}
rpcListenAddr := ctx.GlobalString(RPCListenAddr.Name)
if rpcListenAddr == "" {
return fmt.Errorf("flag %s is required", RPCListenAddr.Name)
}
if !ctx.GlobalIsSet(RPCListenPort.Name) {
return fmt.Errorf("flag %s is required", RPCListenPort.Name)
} }
return nil return nil
} }
...@@ -9,7 +9,6 @@ import ( ...@@ -9,7 +9,6 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys" "github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum-optimism/optimism/op-node/eth" "github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup"
...@@ -115,12 +114,7 @@ func (n *nodeAPI) OutputAtBlock(ctx context.Context, number hexutil.Uint64) (*et ...@@ -115,12 +114,7 @@ func (n *nodeAPI) OutputAtBlock(ctx context.Context, number hexutil.Uint64) (*et
} }
var l2OutputRootVersion eth.Bytes32 // it's zero for now var l2OutputRootVersion eth.Bytes32 // it's zero for now
l2OutputRoot, err := rollup.ComputeL2OutputRoot(&bindings.TypesOutputRootProof{ l2OutputRoot, err := rollup.ComputeL2OutputRootV0(head, proof.StorageHash)
Version: l2OutputRootVersion,
StateRoot: head.Root(),
MessagePasserStorageRoot: proof.StorageHash,
LatestBlockhash: head.Hash(),
})
if err != nil { if err != nil {
n.log.Error("Error computing L2 output root, nil ptr passed to hashing function") n.log.Error("Error computing L2 output root, nil ptr passed to hashing function")
return nil, err return nil, err
......
...@@ -17,6 +17,11 @@ import ( ...@@ -17,6 +17,11 @@ import (
"github.com/ethereum-optimism/optimism/op-node/rollup/sync" "github.com/ethereum-optimism/optimism/op-node/rollup/sync"
) )
type attributesWithParent struct {
attributes *eth.PayloadAttributes
parent eth.L2BlockRef
}
type NextAttributesProvider interface { type NextAttributesProvider interface {
Origin() eth.L1BlockRef Origin() eth.L1BlockRef
NextAttributes(context.Context, eth.L2BlockRef) (*eth.PayloadAttributes, error) NextAttributes(context.Context, eth.L2BlockRef) (*eth.PayloadAttributes, error)
...@@ -76,6 +81,10 @@ const maxUnsafePayloadsMemory = 500 * 1024 * 1024 ...@@ -76,6 +81,10 @@ const maxUnsafePayloadsMemory = 500 * 1024 * 1024
// And then we add 1 to make pruning easier by leaving room for a new item without pruning the 32*4. // And then we add 1 to make pruning easier by leaving room for a new item without pruning the 32*4.
const finalityLookback = 4*32 + 1 const finalityLookback = 4*32 + 1
// finalityDelay is the number of L1 blocks to traverse before trying to finalize L2 blocks again.
// We do not want to do this too often, since it requires fetching a L1 block by number, so no cache data.
const finalityDelay = 64
type FinalityData struct { type FinalityData struct {
// The last L2 block that was fully derived and inserted into the L2 engine while processing this L1 block. // The last L2 block that was fully derived and inserted into the L2 engine while processing this L1 block.
L2Block eth.L2BlockRef L2Block eth.L2BlockRef
...@@ -102,12 +111,16 @@ type EngineQueue struct { ...@@ -102,12 +111,16 @@ type EngineQueue struct {
// This update may repeat if the engine returns a temporary error. // This update may repeat if the engine returns a temporary error.
needForkchoiceUpdate bool needForkchoiceUpdate bool
// finalizedL1 is the currently perceived finalized L1 block.
// This may be ahead of the current traversed origin when syncing.
finalizedL1 eth.L1BlockRef finalizedL1 eth.L1BlockRef
// triedFinalizeAt tracks at which origin we last tried to finalize during sync.
triedFinalizeAt eth.L1BlockRef
// The queued-up attributes // The queued-up attributes
safeAttributesParent eth.L2BlockRef safeAttributes *attributesWithParent
safeAttributes *eth.PayloadAttributes unsafePayloads *PayloadsQueue // queue of unsafe payloads, ordered by ascending block number, may have gaps and duplicates
unsafePayloads *PayloadsQueue // queue of unsafe payloads, ordered by ascending block number, may have gaps and duplicates
// Tracks which L2 blocks where last derived from which L1 block. At most finalityLookback large. // Tracks which L2 blocks where last derived from which L1 block. At most finalityLookback large.
finalityData []FinalityData finalityData []FinalityData
...@@ -171,17 +184,23 @@ func (eq *EngineQueue) Finalize(l1Origin eth.L1BlockRef) { ...@@ -171,17 +184,23 @@ func (eq *EngineQueue) Finalize(l1Origin eth.L1BlockRef) {
eq.log.Error("ignoring old L1 finalized block signal! Is the L1 provider corrupted?", "prev_finalized_l1", eq.finalizedL1, "signaled_finalized_l1", l1Origin) eq.log.Error("ignoring old L1 finalized block signal! Is the L1 provider corrupted?", "prev_finalized_l1", eq.finalizedL1, "signaled_finalized_l1", l1Origin)
return return
} }
// Perform a safety check: the L1 finalization signal is only accepted if we previously processed the L1 block.
// This prevents a corrupt L1 provider from tricking us in recognizing a L1 block inconsistent with the L1 chain we are on. // remember the L1 finalization signal
// Missing a finality signal due to empty buffer is fine, it will finalize when the buffer is filled again. eq.finalizedL1 = l1Origin
// Sanity check: we only try to finalize L2 immediately, without fetching additional data,
// if we are on the same chain as the signal.
// If we are on a different chain, the signal will be ignored,
// and tryFinalizeL1Origin() will eventually detect that we are on the wrong chain,
// if not resetting due to reorg elsewhere already.
for _, fd := range eq.finalityData { for _, fd := range eq.finalityData {
if fd.L1Block == l1Origin.ID() { if fd.L1Block == l1Origin.ID() {
eq.finalizedL1 = l1Origin
eq.tryFinalizeL2() eq.tryFinalizeL2()
return return
} }
} }
eq.log.Warn("ignoring finalization signal for unknown L1 block, waiting for new L1 blocks in buffer", "prev_finalized_l1", eq.finalizedL1, "signaled_finalized_l1", l1Origin)
eq.log.Info("received L1 finality signal, but missing data for immediate L2 finalization", "prev_finalized_l1", eq.finalizedL1, "signaled_finalized_l1", l1Origin)
} }
// FinalizedL1 identifies the L1 chain (incl.) that included and/or produced all the finalized L2 blocks. // FinalizedL1 identifies the L1 chain (incl.) that included and/or produced all the finalized L2 blocks.
...@@ -217,14 +236,20 @@ func (eq *EngineQueue) Step(ctx context.Context) error { ...@@ -217,14 +236,20 @@ func (eq *EngineQueue) Step(ctx context.Context) error {
} }
eq.origin = newOrigin eq.origin = newOrigin
eq.postProcessSafeL2() // make sure we track the last L2 safe head for every new L1 block eq.postProcessSafeL2() // make sure we track the last L2 safe head for every new L1 block
// try to finalize the L2 blocks we have synced so far (no-op if L1 finality is behind)
if err := eq.tryFinalizePastL2Blocks(ctx); err != nil {
return err
}
if next, err := eq.prev.NextAttributes(ctx, eq.safeHead); err == io.EOF { if next, err := eq.prev.NextAttributes(ctx, eq.safeHead); err == io.EOF {
outOfData = true outOfData = true
} else if err != nil { } else if err != nil {
return err return err
} else { } else {
eq.safeAttributes = next eq.safeAttributes = &attributesWithParent{
eq.safeAttributesParent = eq.safeHead attributes: next,
eq.log.Debug("Adding next safe attributes", "safe_head", eq.safeHead, "next", eq.safeAttributes) parent: eq.safeHead,
}
eq.log.Debug("Adding next safe attributes", "safe_head", eq.safeHead, "next", next)
return NotEnoughData return NotEnoughData
} }
...@@ -271,6 +296,38 @@ func (eq *EngineQueue) verifyNewL1Origin(ctx context.Context, newOrigin eth.L1Bl ...@@ -271,6 +296,38 @@ func (eq *EngineQueue) verifyNewL1Origin(ctx context.Context, newOrigin eth.L1Bl
return nil return nil
} }
func (eq *EngineQueue) tryFinalizePastL2Blocks(ctx context.Context) error {
if eq.finalizedL1 == (eth.L1BlockRef{}) {
return nil
}
// If the L1 is finalized beyond the point we are traversing (e.g. during sync),
// then we should check if we can finalize this L1 block we are traversing.
// Otherwise, nothing to act on here, we will finalize later on a new finality signal matching the recent history.
if eq.finalizedL1.Number < eq.origin.Number {
return nil
}
// If we recently tried finalizing, then don't try again just yet, but traverse more of L1 first.
if eq.triedFinalizeAt != (eth.L1BlockRef{}) && eq.origin.Number <= eq.triedFinalizeAt.Number+finalityDelay {
return nil
}
eq.log.Info("processing L1 finality information", "l1_finalized", eq.finalizedL1, "l1_origin", eq.origin, "previous", eq.triedFinalizeAt)
// Sanity check we are indeed on the finalizing chain, and not stuck on something else.
// We assume that the block-by-number query is consistent with the previously received finalized chain signal
ref, err := eq.l1Fetcher.L1BlockRefByNumber(ctx, eq.origin.Number)
if err != nil {
return NewTemporaryError(fmt.Errorf("failed to check if on finalizing L1 chain: %w", err))
}
if ref.Hash != eq.origin.Hash {
return NewResetError(fmt.Errorf("need to reset, we are on %s, not on the finalizing L1 chain %s (towards %s)", eq.origin, ref, eq.finalizedL1))
}
eq.tryFinalizeL2()
return nil
}
// tryFinalizeL2 traverses the past L1 blocks, checks if any has been finalized, // tryFinalizeL2 traverses the past L1 blocks, checks if any has been finalized,
// and then marks the latest fully derived L2 block from this as finalized, // and then marks the latest fully derived L2 block from this as finalized,
// or defaults to the current finalized L2 block. // or defaults to the current finalized L2 block.
...@@ -278,6 +335,7 @@ func (eq *EngineQueue) tryFinalizeL2() { ...@@ -278,6 +335,7 @@ func (eq *EngineQueue) tryFinalizeL2() {
if eq.finalizedL1 == (eth.L1BlockRef{}) { if eq.finalizedL1 == (eth.L1BlockRef{}) {
return // if no L1 information is finalized yet, then skip this return // if no L1 information is finalized yet, then skip this
} }
eq.triedFinalizeAt = eq.origin
// default to keep the same finalized block // default to keep the same finalized block
finalizedL2 := eq.finalized finalizedL2 := eq.finalized
// go through the latest inclusion data, and find the last L2 block that was derived from a finalized L1 block // go through the latest inclusion data, and find the last L2 block that was derived from a finalized L1 block
...@@ -430,15 +488,19 @@ func (eq *EngineQueue) tryNextSafeAttributes(ctx context.Context) error { ...@@ -430,15 +488,19 @@ func (eq *EngineQueue) tryNextSafeAttributes(ctx context.Context) error {
return nil return nil
} }
// validate the safe attributes before processing them. The engine may have completed processing them through other means. // validate the safe attributes before processing them. The engine may have completed processing them through other means.
if eq.safeHead != eq.safeAttributesParent { if eq.safeHead != eq.safeAttributes.parent {
if eq.safeHead.ParentHash != eq.safeAttributesParent.Hash { // Previously the attribute's parent was the safe head. If the safe head advances so safe head's parent is the same as the
return NewResetError(fmt.Errorf("safe head changed to %s with parent %s, conflicting with queued safe attributes on top of %s", // attribute's parent then we need to cancel the attributes.
eq.safeHead, eq.safeHead.ParentID(), eq.safeAttributesParent)) if eq.safeHead.ParentHash == eq.safeAttributes.parent.Hash {
eq.log.Warn("queued safe attributes are stale, safehead progressed",
"safe_head", eq.safeHead, "safe_head_parent", eq.safeHead.ParentID(), "attributes_parent", eq.safeAttributes.parent)
eq.safeAttributes = nil
return nil
} }
eq.log.Warn("queued safe attributes are stale, safe-head progressed", // If something other than a simple advance occurred, perform a full reset
"safe_head", eq.safeHead, "safe_head_parent", eq.safeHead.ParentID(), "attributes_parent", eq.safeAttributesParent) return NewResetError(fmt.Errorf("safe head changed to %s with parent %s, conflicting with queued safe attributes on top of %s",
eq.safeAttributes = nil eq.safeHead, eq.safeHead.ParentID(), eq.safeAttributes.parent))
return nil
} }
if eq.safeHead.Number < eq.unsafeHead.Number { if eq.safeHead.Number < eq.unsafeHead.Number {
return eq.consolidateNextSafeAttributes(ctx) return eq.consolidateNextSafeAttributes(ctx)
...@@ -468,7 +530,7 @@ func (eq *EngineQueue) consolidateNextSafeAttributes(ctx context.Context) error ...@@ -468,7 +530,7 @@ func (eq *EngineQueue) consolidateNextSafeAttributes(ctx context.Context) error
} }
return NewTemporaryError(fmt.Errorf("failed to get existing unsafe payload to compare against derived attributes from L1: %w", err)) return NewTemporaryError(fmt.Errorf("failed to get existing unsafe payload to compare against derived attributes from L1: %w", err))
} }
if err := AttributesMatchBlock(eq.safeAttributes, eq.safeHead.Hash, payload, eq.log); err != nil { if err := AttributesMatchBlock(eq.safeAttributes.attributes, eq.safeHead.Hash, payload, eq.log); err != nil {
eq.log.Warn("L2 reorg: existing unsafe block does not match derived attributes from L1", "err", err, "unsafe", eq.unsafeHead, "safe", eq.safeHead) eq.log.Warn("L2 reorg: existing unsafe block does not match derived attributes from L1", "err", err, "unsafe", eq.unsafeHead, "safe", eq.safeHead)
// geth cannot wind back a chain without reorging to a new, previously non-canonical, block // geth cannot wind back a chain without reorging to a new, previously non-canonical, block
return eq.forceNextSafeAttributes(ctx) return eq.forceNextSafeAttributes(ctx)
...@@ -493,7 +555,7 @@ func (eq *EngineQueue) forceNextSafeAttributes(ctx context.Context) error { ...@@ -493,7 +555,7 @@ func (eq *EngineQueue) forceNextSafeAttributes(ctx context.Context) error {
if eq.safeAttributes == nil { if eq.safeAttributes == nil {
return nil return nil
} }
attrs := eq.safeAttributes attrs := eq.safeAttributes.attributes
errType, err := eq.StartPayload(ctx, eq.safeHead, attrs, true) errType, err := eq.StartPayload(ctx, eq.safeHead, attrs, true)
if err == nil { if err == nil {
_, errType, err = eq.ConfirmPayload(ctx) _, errType, err = eq.ConfirmPayload(ctx)
...@@ -664,10 +726,12 @@ func (eq *EngineQueue) Reset(ctx context.Context, _ eth.L1BlockRef, _ eth.System ...@@ -664,10 +726,12 @@ func (eq *EngineQueue) Reset(ctx context.Context, _ eth.L1BlockRef, _ eth.System
eq.log.Debug("Reset engine queue", "safeHead", safe, "unsafe", unsafe, "safe_timestamp", safe.Time, "unsafe_timestamp", unsafe.Time, "l1Origin", l1Origin) eq.log.Debug("Reset engine queue", "safeHead", safe, "unsafe", unsafe, "safe_timestamp", safe.Time, "unsafe_timestamp", unsafe.Time, "l1Origin", l1Origin)
eq.unsafeHead = unsafe eq.unsafeHead = unsafe
eq.safeHead = safe eq.safeHead = safe
eq.safeAttributes = nil
eq.finalized = finalized eq.finalized = finalized
eq.resetBuildingState() eq.resetBuildingState()
eq.needForkchoiceUpdate = true eq.needForkchoiceUpdate = true
eq.finalityData = eq.finalityData[:0] eq.finalityData = eq.finalityData[:0]
// note: finalizedL1 and triedFinalizeAt do not reset, since these do not change between reorgs.
// note: we do not clear the unsafe payloads queue; if the payloads are not applicable anymore the parent hash checks will clear out the old payloads. // note: we do not clear the unsafe payloads queue; if the payloads are not applicable anymore the parent hash checks will clear out the old payloads.
eq.origin = pipelineOrigin eq.origin = pipelineOrigin
eq.sysCfg = l1Cfg eq.sysCfg = l1Cfg
......
...@@ -15,6 +15,7 @@ import ( ...@@ -15,6 +15,7 @@ import (
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-node/eth" "github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/metrics"
"github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/testlog" "github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum-optimism/optimism/op-node/testutils" "github.com/ethereum-optimism/optimism/op-node/testutils"
...@@ -1007,3 +1008,102 @@ func TestBlockBuildingRace(t *testing.T) { ...@@ -1007,3 +1008,102 @@ func TestBlockBuildingRace(t *testing.T) {
l1F.AssertExpectations(t) l1F.AssertExpectations(t)
eng.AssertExpectations(t) eng.AssertExpectations(t)
} }
func TestResetLoop(t *testing.T) {
logger := testlog.Logger(t, log.LvlInfo)
eng := &testutils.MockEngine{}
l1F := &testutils.MockL1Source{}
rng := rand.New(rand.NewSource(1234))
refA := testutils.RandomBlockRef(rng)
refA0 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: 0,
ParentHash: common.Hash{},
Time: refA.Time,
L1Origin: refA.ID(),
SequenceNumber: 0,
}
gasLimit := eth.Uint64Quantity(20_000_000)
cfg := &rollup.Config{
Genesis: rollup.Genesis{
L1: refA.ID(),
L2: refA0.ID(),
L2Time: refA0.Time,
SystemConfig: eth.SystemConfig{
BatcherAddr: common.Address{42},
Overhead: [32]byte{123},
Scalar: [32]byte{42},
GasLimit: 20_000_000,
},
},
BlockTime: 1,
SeqWindowSize: 2,
}
refA1 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: refA0.Number + 1,
ParentHash: refA0.Hash,
Time: refA0.Time + cfg.BlockTime,
L1Origin: refA.ID(),
SequenceNumber: 1,
}
refA2 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: refA1.Number + 1,
ParentHash: refA1.Hash,
Time: refA1.Time + cfg.BlockTime,
L1Origin: refA.ID(),
SequenceNumber: 2,
}
attrs := &eth.PayloadAttributes{
Timestamp: eth.Uint64Quantity(refA2.Time),
PrevRandao: eth.Bytes32{},
SuggestedFeeRecipient: common.Address{},
Transactions: nil,
NoTxPool: false,
GasLimit: &gasLimit,
}
eng.ExpectL2BlockRefByLabel(eth.Finalized, refA0, nil)
eng.ExpectL2BlockRefByLabel(eth.Safe, refA1, nil)
eng.ExpectL2BlockRefByLabel(eth.Unsafe, refA2, nil)
eng.ExpectL2BlockRefByHash(refA1.Hash, refA1, nil)
eng.ExpectL2BlockRefByHash(refA0.Hash, refA0, nil)
eng.ExpectSystemConfigByL2Hash(refA0.Hash, cfg.Genesis.SystemConfig, nil)
l1F.ExpectL1BlockRefByNumber(refA.Number, refA, nil)
l1F.ExpectL1BlockRefByHash(refA.Hash, refA, nil)
l1F.ExpectL1BlockRefByHash(refA.Hash, refA, nil)
prev := &fakeAttributesQueue{origin: refA, attrs: attrs}
eq := NewEngineQueue(logger, cfg, eng, metrics.NoopMetrics, prev, l1F)
eq.unsafeHead = refA2
eq.safeHead = refA1
eq.finalized = refA0
// Qeueue up the safe attributes
require.Nil(t, eq.safeAttributes)
require.ErrorIs(t, eq.Step(context.Background()), NotEnoughData)
require.NotNil(t, eq.safeAttributes)
// Peform the reset
require.ErrorIs(t, eq.Reset(context.Background(), eth.L1BlockRef{}, eth.SystemConfig{}), io.EOF)
// Expect a FCU after the reset
preFc := &eth.ForkchoiceState{
HeadBlockHash: refA2.Hash,
SafeBlockHash: refA0.Hash,
FinalizedBlockHash: refA0.Hash,
}
eng.ExpectForkchoiceUpdate(preFc, nil, nil, nil)
require.NoError(t, eq.Step(context.Background()), "clean forkchoice state after reset")
// Crux of the test. Should be in a valid state after the reset.
require.ErrorIs(t, eq.Step(context.Background()), NotEnoughData, "Should be able to step after a reset")
l1F.AssertExpectations(t)
eng.AssertExpectations(t)
}
...@@ -24,3 +24,13 @@ func ComputeL2OutputRoot(proofElements *bindings.TypesOutputRootProof) (eth.Byte ...@@ -24,3 +24,13 @@ func ComputeL2OutputRoot(proofElements *bindings.TypesOutputRootProof) (eth.Byte
) )
return eth.Bytes32(digest), nil return eth.Bytes32(digest), nil
} }
func ComputeL2OutputRootV0(block eth.BlockInfo, storageRoot [32]byte) (eth.Bytes32, error) {
var l2OutputRootVersion eth.Bytes32 // it's zero for now
return ComputeL2OutputRoot(&bindings.TypesOutputRootProof{
Version: l2OutputRootVersion,
StateRoot: block.Root(),
MessagePasserStorageRoot: storageRoot,
LatestBlockhash: block.Hash(),
})
}
...@@ -24,6 +24,7 @@ type IterativeBatchCall[K any, V any] struct { ...@@ -24,6 +24,7 @@ type IterativeBatchCall[K any, V any] struct {
makeRequest func(K) (V, rpc.BatchElem) makeRequest func(K) (V, rpc.BatchElem)
getBatch BatchCallContextFn getBatch BatchCallContextFn
getSingle CallContextFn
requestsValues []V requestsValues []V
scheduled chan rpc.BatchElem scheduled chan rpc.BatchElem
...@@ -35,6 +36,7 @@ func NewIterativeBatchCall[K any, V any]( ...@@ -35,6 +36,7 @@ func NewIterativeBatchCall[K any, V any](
requestsKeys []K, requestsKeys []K,
makeRequest func(K) (V, rpc.BatchElem), makeRequest func(K) (V, rpc.BatchElem),
getBatch BatchCallContextFn, getBatch BatchCallContextFn,
getSingle CallContextFn,
batchSize int) *IterativeBatchCall[K, V] { batchSize int) *IterativeBatchCall[K, V] {
if len(requestsKeys) < batchSize { if len(requestsKeys) < batchSize {
...@@ -47,6 +49,7 @@ func NewIterativeBatchCall[K any, V any]( ...@@ -47,6 +49,7 @@ func NewIterativeBatchCall[K any, V any](
out := &IterativeBatchCall[K, V]{ out := &IterativeBatchCall[K, V]{
completed: 0, completed: 0,
getBatch: getBatch, getBatch: getBatch,
getSingle: getSingle,
requestsKeys: requestsKeys, requestsKeys: requestsKeys,
batchSize: batchSize, batchSize: batchSize,
makeRequest: makeRequest, makeRequest: makeRequest,
...@@ -84,6 +87,11 @@ func (ibc *IterativeBatchCall[K, V]) Fetch(ctx context.Context) error { ...@@ -84,6 +87,11 @@ func (ibc *IterativeBatchCall[K, V]) Fetch(ctx context.Context) error {
ibc.resetLock.RLock() ibc.resetLock.RLock()
defer ibc.resetLock.RUnlock() defer ibc.resetLock.RUnlock()
// return early if context is Done
if ctx.Err() != nil {
return ctx.Err()
}
// collect a batch from the requests channel // collect a batch from the requests channel
batch := make([]rpc.BatchElem, 0, ibc.batchSize) batch := make([]rpc.BatchElem, 0, ibc.batchSize)
// wait for first element // wait for first element
...@@ -119,11 +127,23 @@ func (ibc *IterativeBatchCall[K, V]) Fetch(ctx context.Context) error { ...@@ -119,11 +127,23 @@ func (ibc *IterativeBatchCall[K, V]) Fetch(ctx context.Context) error {
break break
} }
if err := ibc.getBatch(ctx, batch); err != nil { if len(batch) == 0 {
for _, r := range batch { return nil
ibc.scheduled <- r }
if ibc.batchSize == 1 {
first := batch[0]
if err := ibc.getSingle(ctx, &first.Result, first.Method, first.Args...); err != nil {
ibc.scheduled <- first
return err
}
} else {
if err := ibc.getBatch(ctx, batch); err != nil {
for _, r := range batch {
ibc.scheduled <- r
}
return fmt.Errorf("failed batch-retrieval: %w", err)
} }
return fmt.Errorf("failed batch-retrieval: %w", err)
} }
var result error var result error
for _, elem := range batch { for _, elem := range batch {
......
...@@ -34,7 +34,8 @@ type batchTestCase struct { ...@@ -34,7 +34,8 @@ type batchTestCase struct {
batchSize int batchSize int
batchCalls []batchCall batchCalls []batchCall
singleCalls []elemCall
mock.Mock mock.Mock
} }
...@@ -53,7 +54,14 @@ func (tc *batchTestCase) GetBatch(ctx context.Context, b []rpc.BatchElem) error ...@@ -53,7 +54,14 @@ func (tc *batchTestCase) GetBatch(ctx context.Context, b []rpc.BatchElem) error
if ctx.Err() != nil { if ctx.Err() != nil {
return ctx.Err() return ctx.Err()
} }
return tc.Mock.MethodCalled("get", b).Get(0).([]error)[0] return tc.Mock.MethodCalled("getBatch", b).Get(0).([]error)[0]
}
func (tc *batchTestCase) GetSingle(ctx context.Context, result any, method string, args ...any) error {
if ctx.Err() != nil {
return ctx.Err()
}
return tc.Mock.MethodCalled("getSingle", (*(result.(*interface{}))).(*string), method, args[0]).Get(0).([]error)[0]
} }
var mockErr = errors.New("mockErr") var mockErr = errors.New("mockErr")
...@@ -64,7 +72,7 @@ func (tc *batchTestCase) Run(t *testing.T) { ...@@ -64,7 +72,7 @@ func (tc *batchTestCase) Run(t *testing.T) {
keys[i] = i keys[i] = i
} }
makeMock := func(bci int, bc batchCall) func(args mock.Arguments) { makeBatchMock := func(bc batchCall) func(args mock.Arguments) {
return func(args mock.Arguments) { return func(args mock.Arguments) {
batch := args[0].([]rpc.BatchElem) batch := args[0].([]rpc.BatchElem)
for i, elem := range batch { for i, elem := range batch {
...@@ -83,7 +91,7 @@ func (tc *batchTestCase) Run(t *testing.T) { ...@@ -83,7 +91,7 @@ func (tc *batchTestCase) Run(t *testing.T) {
} }
} }
// mock all the results of the batch calls // mock all the results of the batch calls
for bci, bc := range tc.batchCalls { for _, bc := range tc.batchCalls {
var batch []rpc.BatchElem var batch []rpc.BatchElem
for _, elem := range bc.elems { for _, elem := range bc.elems {
batch = append(batch, rpc.BatchElem{ batch = append(batch, rpc.BatchElem{
...@@ -94,10 +102,30 @@ func (tc *batchTestCase) Run(t *testing.T) { ...@@ -94,10 +102,30 @@ func (tc *batchTestCase) Run(t *testing.T) {
}) })
} }
if len(bc.elems) > 0 { if len(bc.elems) > 0 {
tc.On("get", batch).Once().Run(makeMock(bci, bc)).Return([]error{bc.rpcErr}) // wrap to preserve nil as type of error tc.On("getBatch", batch).Once().Run(makeBatchMock(bc)).Return([]error{bc.rpcErr}) // wrap to preserve nil as type of error
}
}
makeSingleMock := func(ec elemCall) func(args mock.Arguments) {
return func(args mock.Arguments) {
result := args[0].(*string)
id := args[2].(int)
require.Equal(t, ec.id, id, "element should match expected element")
if ec.err {
*result = ""
} else {
*result = fmt.Sprintf("mock result id %d", id)
}
} }
} }
iter := NewIterativeBatchCall[int, *string](keys, makeTestRequest, tc.GetBatch, tc.batchSize) // mock the results of unbatched calls
for _, ec := range tc.singleCalls {
var ret error
if ec.err {
ret = mockErr
}
tc.On("getSingle", new(string), "testing_foobar", ec.id).Once().Run(makeSingleMock(ec)).Return([]error{ret})
}
iter := NewIterativeBatchCall[int, *string](keys, makeTestRequest, tc.GetBatch, tc.GetSingle, tc.batchSize)
for i, bc := range tc.batchCalls { for i, bc := range tc.batchCalls {
ctx := context.Background() ctx := context.Background()
if bc.makeCtx != nil { if bc.makeCtx != nil {
...@@ -116,6 +144,20 @@ func (tc *batchTestCase) Run(t *testing.T) { ...@@ -116,6 +144,20 @@ func (tc *batchTestCase) Run(t *testing.T) {
} }
} }
} }
for i, ec := range tc.singleCalls {
ctx := context.Background()
err := iter.Fetch(ctx)
if err == io.EOF {
require.Equal(t, i, len(tc.singleCalls)-1, "EOF only on last call")
} else {
require.False(t, iter.Complete())
if ec.err {
require.Error(t, err)
} else {
require.NoError(t, err)
}
}
}
require.True(t, iter.Complete(), "batch iter should be complete after the expected calls") require.True(t, iter.Complete(), "batch iter should be complete after the expected calls")
out, err := iter.Result() out, err := iter.Result()
require.NoError(t, err) require.NoError(t, err)
...@@ -154,6 +196,37 @@ func TestFetchBatched(t *testing.T) { ...@@ -154,6 +196,37 @@ func TestFetchBatched(t *testing.T) {
}, },
}, },
}, },
{
name: "single element",
items: 1,
batchSize: 4,
singleCalls: []elemCall{
{id: 0, err: false},
},
},
{
name: "unbatched",
items: 4,
batchSize: 1,
singleCalls: []elemCall{
{id: 0, err: false},
{id: 1, err: false},
{id: 2, err: false},
{id: 3, err: false},
},
},
{
name: "unbatched with retry",
items: 4,
batchSize: 1,
singleCalls: []elemCall{
{id: 0, err: false},
{id: 1, err: true},
{id: 2, err: false},
{id: 3, err: false},
{id: 1, err: false},
},
},
{ {
name: "split", name: "split",
items: 5, items: 5,
...@@ -240,7 +313,7 @@ func TestFetchBatched(t *testing.T) { ...@@ -240,7 +313,7 @@ func TestFetchBatched(t *testing.T) {
}, },
{ {
name: "context timeout", name: "context timeout",
items: 1, items: 2,
batchSize: 3, batchSize: 3,
batchCalls: []batchCall{ batchCalls: []batchCall{
{ {
...@@ -255,6 +328,7 @@ func TestFetchBatched(t *testing.T) { ...@@ -255,6 +328,7 @@ func TestFetchBatched(t *testing.T) {
{ {
elems: []elemCall{ elems: []elemCall{
{id: 0, err: false}, {id: 0, err: false},
{id: 1, err: false},
}, },
err: "", err: "",
}, },
......
...@@ -356,10 +356,7 @@ func (s *EthClient) FetchReceipts(ctx context.Context, blockHash common.Hash) (e ...@@ -356,10 +356,7 @@ func (s *EthClient) FetchReceipts(ctx context.Context, blockHash common.Hash) (e
if v, ok := s.receiptsCache.Get(blockHash); ok { if v, ok := s.receiptsCache.Get(blockHash); ok {
job = v.(*receiptsFetchingJob) job = v.(*receiptsFetchingJob)
} else { } else {
txHashes := make([]common.Hash, len(txs)) txHashes := eth.TransactionsToHashes(txs)
for i := 0; i < len(txs); i++ {
txHashes[i] = txs[i].Hash()
}
job = NewReceiptsFetchingJob(s, s.client, s.maxBatchSize, eth.ToBlockID(info), info.ReceiptHash(), txHashes) job = NewReceiptsFetchingJob(s, s.client, s.maxBatchSize, eth.ToBlockID(info), info.ReceiptHash(), txHashes)
s.receiptsCache.Add(blockHash, job) s.receiptsCache.Add(blockHash, job)
} }
......
...@@ -4,7 +4,6 @@ import ( ...@@ -4,7 +4,6 @@ import (
"context" "context"
"fmt" "fmt"
"io" "io"
"math/big"
"sync" "sync"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
...@@ -373,6 +372,7 @@ func (job *receiptsFetchingJob) runFetcher(ctx context.Context) error { ...@@ -373,6 +372,7 @@ func (job *receiptsFetchingJob) runFetcher(ctx context.Context) error {
job.txHashes, job.txHashes,
makeReceiptRequest, makeReceiptRequest,
job.client.BatchCallContext, job.client.BatchCallContext,
job.client.CallContext,
job.maxBatchSize, job.maxBatchSize,
) )
} }
...@@ -419,29 +419,7 @@ func (job *receiptsFetchingJob) runAltMethod(ctx context.Context, m ReceiptsFetc ...@@ -419,29 +419,7 @@ func (job *receiptsFetchingJob) runAltMethod(ctx context.Context, m ReceiptsFetc
err = job.client.CallContext(ctx, &rawReceipts, "debug_getRawReceipts", job.block.Hash) err = job.client.CallContext(ctx, &rawReceipts, "debug_getRawReceipts", job.block.Hash)
if err == nil { if err == nil {
if len(rawReceipts) == len(job.txHashes) { if len(rawReceipts) == len(job.txHashes) {
result = make([]*types.Receipt, len(rawReceipts)) result, err = eth.DecodeRawReceipts(job.block, rawReceipts, job.txHashes)
totalIndex := uint(0)
prevCumulativeGasUsed := uint64(0)
for i, r := range rawReceipts {
var x types.Receipt
_ = x.UnmarshalBinary(r) // safe to ignore, we verify receipts against the receipts hash later
x.TxHash = job.txHashes[i]
x.BlockHash = job.block.Hash
x.BlockNumber = new(big.Int).SetUint64(job.block.Number)
x.TransactionIndex = uint(i)
x.GasUsed = x.CumulativeGasUsed - prevCumulativeGasUsed
// contract address meta-data is not computed.
prevCumulativeGasUsed = x.CumulativeGasUsed
for _, l := range x.Logs {
l.BlockNumber = job.block.Number
l.TxHash = x.TxHash
l.TxIndex = uint(i)
l.BlockHash = job.block.Hash
l.Index = totalIndex
totalIndex += 1
}
result[i] = &x
}
} else { } else {
err = fmt.Errorf("got %d raw receipts, but expected %d", len(rawReceipts), len(job.txHashes)) err = fmt.Errorf("got %d raw receipts, but expected %d", len(rawReceipts), len(job.txHashes))
} }
......
...@@ -83,8 +83,8 @@ func (m *MockEthClient) PayloadByNumber(ctx context.Context, n uint64) (*eth.Exe ...@@ -83,8 +83,8 @@ func (m *MockEthClient) PayloadByNumber(ctx context.Context, n uint64) (*eth.Exe
return out[0].(*eth.ExecutionPayload), *out[1].(*error) return out[0].(*eth.ExecutionPayload), *out[1].(*error)
} }
func (m *MockEthClient) ExpectPayloadByNumber(hash common.Hash, payload *eth.ExecutionPayload, err error) { func (m *MockEthClient) ExpectPayloadByNumber(n uint64, payload *eth.ExecutionPayload, err error) {
m.Mock.On("PayloadByNumber", hash).Once().Return(payload, &err) m.Mock.On("PayloadByNumber", n).Once().Return(payload, &err)
} }
func (m *MockEthClient) PayloadByLabel(ctx context.Context, label eth.BlockLabel) (*eth.ExecutionPayload, error) { func (m *MockEthClient) PayloadByLabel(ctx context.Context, label eth.BlockLabel) (*eth.ExecutionPayload, error) {
......
...@@ -18,17 +18,24 @@ type Derivation interface { ...@@ -18,17 +18,24 @@ type Derivation interface {
SafeL2Head() eth.L2BlockRef SafeL2Head() eth.L2BlockRef
} }
type L2Source interface {
derive.Engine
L2OutputRoot() (eth.Bytes32, error)
}
type Driver struct { type Driver struct {
logger log.Logger logger log.Logger
pipeline Derivation pipeline Derivation
l2OutputRoot func() (eth.Bytes32, error)
} }
func NewDriver(logger log.Logger, cfg *rollup.Config, l1Source derive.L1Fetcher, l2Source derive.Engine) *Driver { func NewDriver(logger log.Logger, cfg *rollup.Config, l1Source derive.L1Fetcher, l2Source L2Source) *Driver {
pipeline := derive.NewDerivationPipeline(logger, cfg, l1Source, l2Source, metrics.NoopMetrics) pipeline := derive.NewDerivationPipeline(logger, cfg, l1Source, l2Source, metrics.NoopMetrics)
pipeline.Reset() pipeline.Reset()
return &Driver{ return &Driver{
logger: logger, logger: logger,
pipeline: pipeline, pipeline: pipeline,
l2OutputRoot: l2Source.L2OutputRoot,
} }
} }
...@@ -51,3 +58,13 @@ func (d *Driver) Step(ctx context.Context) error { ...@@ -51,3 +58,13 @@ func (d *Driver) Step(ctx context.Context) error {
func (d *Driver) SafeHead() eth.L2BlockRef { func (d *Driver) SafeHead() eth.L2BlockRef {
return d.pipeline.SafeL2Head() return d.pipeline.SafeL2Head()
} }
func (d *Driver) ValidateClaim(claimedOutputRoot eth.Bytes32) bool {
outputRoot, err := d.l2OutputRoot()
if err != nil {
d.logger.Info("Failed to calculate L2 output root", "err", err)
return false
}
d.logger.Info("Derivation complete", "head", d.SafeHead(), "output", outputRoot, "claim", claimedOutputRoot)
return claimedOutputRoot == outputRoot
}
...@@ -45,6 +45,36 @@ func TestNoError(t *testing.T) { ...@@ -45,6 +45,36 @@ func TestNoError(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
} }
func TestValidateClaim(t *testing.T) {
t.Run("Valid", func(t *testing.T) {
driver := createDriver(t, io.EOF)
expected := eth.Bytes32{0x11}
driver.l2OutputRoot = func() (eth.Bytes32, error) {
return expected, nil
}
valid := driver.ValidateClaim(expected)
require.True(t, valid)
})
t.Run("Invalid", func(t *testing.T) {
driver := createDriver(t, io.EOF)
driver.l2OutputRoot = func() (eth.Bytes32, error) {
return eth.Bytes32{0x22}, nil
}
valid := driver.ValidateClaim(eth.Bytes32{0x11})
require.False(t, valid)
})
t.Run("Error", func(t *testing.T) {
driver := createDriver(t, io.EOF)
driver.l2OutputRoot = func() (eth.Bytes32, error) {
return eth.Bytes32{}, errors.New("boom")
}
valid := driver.ValidateClaim(eth.Bytes32{0x11})
require.False(t, valid)
})
}
func createDriver(t *testing.T, derivationResult error) *Driver { func createDriver(t *testing.T, derivationResult error) *Driver {
derivation := &stubDerivation{nextErr: derivationResult} derivation := &stubDerivation{nextErr: derivationResult}
return &Driver{ return &Driver{
......
package l1
import (
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/hashicorp/golang-lru/v2/simplelru"
)
// Cache size is quite high as retrieving data from the pre-image oracle can be quite expensive
const cacheSize = 2000
// CachingOracle is an implementation of Oracle that delegates to another implementation, adding caching of all results
type CachingOracle struct {
oracle Oracle
blocks *simplelru.LRU[common.Hash, eth.BlockInfo]
txs *simplelru.LRU[common.Hash, types.Transactions]
rcpts *simplelru.LRU[common.Hash, types.Receipts]
}
func NewCachingOracle(oracle Oracle) *CachingOracle {
blockLRU, _ := simplelru.NewLRU[common.Hash, eth.BlockInfo](cacheSize, nil)
txsLRU, _ := simplelru.NewLRU[common.Hash, types.Transactions](cacheSize, nil)
rcptsLRU, _ := simplelru.NewLRU[common.Hash, types.Receipts](cacheSize, nil)
return &CachingOracle{
oracle: oracle,
blocks: blockLRU,
txs: txsLRU,
rcpts: rcptsLRU,
}
}
func (o *CachingOracle) HeaderByBlockHash(blockHash common.Hash) eth.BlockInfo {
block, ok := o.blocks.Get(blockHash)
if ok {
return block
}
block = o.oracle.HeaderByBlockHash(blockHash)
o.blocks.Add(blockHash, block)
return block
}
func (o *CachingOracle) TransactionsByBlockHash(blockHash common.Hash) (eth.BlockInfo, types.Transactions) {
txs, ok := o.txs.Get(blockHash)
if ok {
return o.HeaderByBlockHash(blockHash), txs
}
block, txs := o.oracle.TransactionsByBlockHash(blockHash)
o.blocks.Add(blockHash, block)
o.txs.Add(blockHash, txs)
return block, txs
}
func (o *CachingOracle) ReceiptsByBlockHash(blockHash common.Hash) (eth.BlockInfo, types.Receipts) {
rcpts, ok := o.rcpts.Get(blockHash)
if ok {
return o.HeaderByBlockHash(blockHash), rcpts
}
block, rcpts := o.oracle.ReceiptsByBlockHash(blockHash)
o.blocks.Add(blockHash, block)
o.rcpts.Add(blockHash, rcpts)
return block, rcpts
}
package l1
import (
"math/rand"
"testing"
"github.com/ethereum-optimism/optimism/op-node/testutils"
"github.com/stretchr/testify/require"
)
// Should implement Oracle
var _ Oracle = (*CachingOracle)(nil)
func TestCachingOracle_HeaderByBlockHash(t *testing.T) {
rng := rand.New(rand.NewSource(1))
stub := newStubOracle(t)
oracle := NewCachingOracle(stub)
block := testutils.RandomBlockInfo(rng)
// Initial call retrieves from the stub
stub.blocks[block.Hash()] = block
result := oracle.HeaderByBlockHash(block.Hash())
require.Equal(t, block, result)
// Later calls should retrieve from cache
delete(stub.blocks, block.Hash())
result = oracle.HeaderByBlockHash(block.Hash())
require.Equal(t, block, result)
}
func TestCachingOracle_TransactionsByBlockHash(t *testing.T) {
rng := rand.New(rand.NewSource(1))
stub := newStubOracle(t)
oracle := NewCachingOracle(stub)
block, _ := testutils.RandomBlock(rng, 3)
// Initial call retrieves from the stub
stub.blocks[block.Hash()] = block
stub.txs[block.Hash()] = block.Transactions()
actualBlock, actualTxs := oracle.TransactionsByBlockHash(block.Hash())
require.Equal(t, block, actualBlock)
require.Equal(t, block.Transactions(), actualTxs)
// Later calls should retrieve from cache
delete(stub.blocks, block.Hash())
delete(stub.txs, block.Hash())
actualBlock, actualTxs = oracle.TransactionsByBlockHash(block.Hash())
require.Equal(t, block, actualBlock)
require.Equal(t, block.Transactions(), actualTxs)
}
func TestCachingOracle_ReceiptsByBlockHash(t *testing.T) {
rng := rand.New(rand.NewSource(1))
stub := newStubOracle(t)
oracle := NewCachingOracle(stub)
block, rcpts := testutils.RandomBlock(rng, 3)
// Initial call retrieves from the stub
stub.blocks[block.Hash()] = block
stub.rcpts[block.Hash()] = rcpts
actualBlock, actualRcpts := oracle.ReceiptsByBlockHash(block.Hash())
require.Equal(t, block, actualBlock)
require.EqualValues(t, rcpts, actualRcpts)
// Later calls should retrieve from cache
delete(stub.blocks, block.Hash())
delete(stub.rcpts, block.Hash())
actualBlock, actualRcpts = oracle.ReceiptsByBlockHash(block.Hash())
require.Equal(t, block, actualBlock)
require.EqualValues(t, rcpts, actualRcpts)
}
package l1
import (
"context"
"errors"
"fmt"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
)
var (
ErrNotFound = ethereum.NotFound
ErrUnknownLabel = errors.New("unknown label")
)
type OracleL1Client struct {
oracle Oracle
head eth.L1BlockRef
}
func NewOracleL1Client(logger log.Logger, oracle Oracle, l1Head common.Hash) *OracleL1Client {
head := eth.InfoToL1BlockRef(oracle.HeaderByBlockHash(l1Head))
logger.Info("L1 head loaded", "hash", head.Hash, "number", head.Number)
return &OracleL1Client{
oracle: oracle,
head: head,
}
}
func (o *OracleL1Client) L1BlockRefByLabel(ctx context.Context, label eth.BlockLabel) (eth.L1BlockRef, error) {
if label != eth.Unsafe && label != eth.Safe && label != eth.Finalized {
return eth.L1BlockRef{}, fmt.Errorf("%w: %s", ErrUnknownLabel, label)
}
// The L1 head is pre-agreed and unchanging so it can be used for all of unsafe, safe and finalized
return o.head, nil
}
func (o *OracleL1Client) L1BlockRefByNumber(ctx context.Context, number uint64) (eth.L1BlockRef, error) {
if number > o.head.Number {
return eth.L1BlockRef{}, fmt.Errorf("%w: block number %d", ErrNotFound, number)
}
block := o.head
for block.Number > number {
block = eth.InfoToL1BlockRef(o.oracle.HeaderByBlockHash(block.ParentHash))
}
return block, nil
}
func (o *OracleL1Client) L1BlockRefByHash(ctx context.Context, hash common.Hash) (eth.L1BlockRef, error) {
return eth.InfoToL1BlockRef(o.oracle.HeaderByBlockHash(hash)), nil
}
func (o *OracleL1Client) InfoByHash(ctx context.Context, hash common.Hash) (eth.BlockInfo, error) {
return o.oracle.HeaderByBlockHash(hash), nil
}
func (o *OracleL1Client) FetchReceipts(ctx context.Context, blockHash common.Hash) (eth.BlockInfo, types.Receipts, error) {
info, rcpts := o.oracle.ReceiptsByBlockHash(blockHash)
return info, rcpts, nil
}
func (o *OracleL1Client) InfoAndTxsByHash(ctx context.Context, hash common.Hash) (eth.BlockInfo, types.Transactions, error) {
info, txs := o.oracle.TransactionsByBlockHash(hash)
return info, txs, nil
}
package l1
import (
"context"
"math/big"
"testing"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-node/sources"
"github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum-optimism/optimism/op-node/testutils"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require"
)
var _ derive.L1Fetcher = (*OracleL1Client)(nil)
var head = blockNum(1000)
func TestInfoByHash(t *testing.T) {
client, oracle := newClient(t)
hash := common.HexToHash("0xAABBCC")
expected := &sources.HeaderInfo{}
oracle.blocks[hash] = expected
info, err := client.InfoByHash(context.Background(), hash)
require.NoError(t, err)
require.Equal(t, expected, info)
}
func TestL1BlockRefByHash(t *testing.T) {
client, oracle := newClient(t)
hash := common.HexToHash("0xAABBCC")
header := &sources.HeaderInfo{}
oracle.blocks[hash] = header
expected := eth.InfoToL1BlockRef(header)
ref, err := client.L1BlockRefByHash(context.Background(), hash)
require.NoError(t, err)
require.Equal(t, expected, ref)
}
func TestFetchReceipts(t *testing.T) {
client, oracle := newClient(t)
hash := common.HexToHash("0xAABBCC")
expectedInfo := &sources.HeaderInfo{}
expectedReceipts := types.Receipts{
&types.Receipt{},
}
oracle.blocks[hash] = expectedInfo
oracle.rcpts[hash] = expectedReceipts
info, rcpts, err := client.FetchReceipts(context.Background(), hash)
require.NoError(t, err)
require.Equal(t, expectedInfo, info)
require.Equal(t, expectedReceipts, rcpts)
}
func TestInfoAndTxsByHash(t *testing.T) {
client, oracle := newClient(t)
hash := common.HexToHash("0xAABBCC")
expectedInfo := &sources.HeaderInfo{}
expectedTxs := types.Transactions{
&types.Transaction{},
}
oracle.blocks[hash] = expectedInfo
oracle.txs[hash] = expectedTxs
info, txs, err := client.InfoAndTxsByHash(context.Background(), hash)
require.NoError(t, err)
require.Equal(t, expectedInfo, info)
require.Equal(t, expectedTxs, txs)
}
func TestL1BlockRefByLabel(t *testing.T) {
t.Run("Unsafe", func(t *testing.T) {
client, _ := newClient(t)
ref, err := client.L1BlockRefByLabel(context.Background(), eth.Unsafe)
require.NoError(t, err)
require.Equal(t, eth.InfoToL1BlockRef(head), ref)
})
t.Run("Safe", func(t *testing.T) {
client, _ := newClient(t)
ref, err := client.L1BlockRefByLabel(context.Background(), eth.Safe)
require.NoError(t, err)
require.Equal(t, eth.InfoToL1BlockRef(head), ref)
})
t.Run("Finalized", func(t *testing.T) {
client, _ := newClient(t)
ref, err := client.L1BlockRefByLabel(context.Background(), eth.Finalized)
require.NoError(t, err)
require.Equal(t, eth.InfoToL1BlockRef(head), ref)
})
t.Run("UnknownLabel", func(t *testing.T) {
client, _ := newClient(t)
ref, err := client.L1BlockRefByLabel(context.Background(), eth.BlockLabel("unknown"))
require.ErrorIs(t, err, ErrUnknownLabel)
require.Equal(t, eth.L1BlockRef{}, ref)
})
}
func TestL1BlockRefByNumber(t *testing.T) {
t.Run("Head", func(t *testing.T) {
client, _ := newClient(t)
ref, err := client.L1BlockRefByNumber(context.Background(), head.NumberU64())
require.NoError(t, err)
require.Equal(t, eth.InfoToL1BlockRef(head), ref)
})
t.Run("AfterHead", func(t *testing.T) {
client, _ := newClient(t)
ref, err := client.L1BlockRefByNumber(context.Background(), head.NumberU64()+1)
// Must be ethereum.NotFound error so the derivation pipeline knows it has gone past the chain head
require.ErrorIs(t, err, ethereum.NotFound)
require.Equal(t, eth.L1BlockRef{}, ref)
})
t.Run("ParentOfHead", func(t *testing.T) {
client, oracle := newClient(t)
parent := blockNum(head.NumberU64() - 1)
oracle.blocks[parent.Hash()] = parent
ref, err := client.L1BlockRefByNumber(context.Background(), parent.NumberU64())
require.NoError(t, err)
require.Equal(t, eth.InfoToL1BlockRef(parent), ref)
})
t.Run("AncestorOfHead", func(t *testing.T) {
client, oracle := newClient(t)
block := head
blocks := []eth.BlockInfo{block}
for i := 0; i < 10; i++ {
block = blockNum(block.NumberU64() - 1)
oracle.blocks[block.Hash()] = block
blocks = append(blocks, block)
}
for _, block := range blocks {
ref, err := client.L1BlockRefByNumber(context.Background(), block.NumberU64())
require.NoError(t, err)
require.Equal(t, eth.InfoToL1BlockRef(block), ref)
}
})
}
func newClient(t *testing.T) (*OracleL1Client, *stubOracle) {
stub := newStubOracle(t)
stub.blocks[head.Hash()] = head
client := NewOracleL1Client(testlog.Logger(t, log.LvlDebug), stub, head.Hash())
return client, stub
}
func blockNum(num uint64) eth.BlockInfo {
parentNum := num - 1
return &testutils.MockBlockInfo{
InfoHash: common.BytesToHash(big.NewInt(int64(num)).Bytes()),
InfoParentHash: common.BytesToHash(big.NewInt(int64(parentNum)).Bytes()),
InfoCoinbase: common.Address{},
InfoRoot: common.Hash{},
InfoNum: num,
InfoTime: num * 2,
InfoMixDigest: [32]byte{},
InfoBaseFee: nil,
InfoReceiptRoot: common.Hash{},
InfoGasUsed: 0,
}
}
package l1
import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum-optimism/optimism/op-program/preimage"
)
type BlockHeaderHint common.Hash
var _ preimage.Hint = BlockHeaderHint{}
func (l BlockHeaderHint) Hint() string {
return "l1-block-header " + (common.Hash)(l).String()
}
type TransactionsHint common.Hash
var _ preimage.Hint = TransactionsHint{}
func (l TransactionsHint) Hint() string {
return "l1-transactions " + (common.Hash)(l).String()
}
type ReceiptsHint common.Hash
var _ preimage.Hint = ReceiptsHint{}
func (l ReceiptsHint) Hint() string {
return "l1-receipts " + (common.Hash)(l).String()
}
package l1
import (
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-program/client/mpt"
"github.com/ethereum-optimism/optimism/op-program/preimage"
)
type Oracle interface {
// HeaderByBlockHash retrieves the block header with the given hash.
HeaderByBlockHash(blockHash common.Hash) eth.BlockInfo
// TransactionsByBlockHash retrieves the transactions from the block with the given hash.
TransactionsByBlockHash(blockHash common.Hash) (eth.BlockInfo, types.Transactions)
// ReceiptsByBlockHash retrieves the receipts from the block with the given hash.
ReceiptsByBlockHash(blockHash common.Hash) (eth.BlockInfo, types.Receipts)
}
// PreimageOracle implements Oracle using by interfacing with the pure preimage.Oracle
// to fetch pre-images to decode into the requested data.
type PreimageOracle struct {
oracle preimage.Oracle
hint preimage.Hinter
}
var _ Oracle = (*PreimageOracle)(nil)
func NewPreimageOracle(raw preimage.Oracle, hint preimage.Hinter) *PreimageOracle {
return &PreimageOracle{
oracle: raw,
hint: hint,
}
}
func (p *PreimageOracle) headerByBlockHash(blockHash common.Hash) *types.Header {
p.hint.Hint(BlockHeaderHint(blockHash))
headerRlp := p.oracle.Get(preimage.Keccak256Key(blockHash))
var header types.Header
if err := rlp.DecodeBytes(headerRlp, &header); err != nil {
panic(fmt.Errorf("invalid block header %s: %w", blockHash, err))
}
return &header
}
func (p *PreimageOracle) HeaderByBlockHash(blockHash common.Hash) eth.BlockInfo {
return eth.HeaderBlockInfo(p.headerByBlockHash(blockHash))
}
func (p *PreimageOracle) TransactionsByBlockHash(blockHash common.Hash) (eth.BlockInfo, types.Transactions) {
header := p.headerByBlockHash(blockHash)
p.hint.Hint(TransactionsHint(blockHash))
opaqueTxs := mpt.ReadTrie(header.TxHash, func(key common.Hash) []byte {
return p.oracle.Get(preimage.Keccak256Key(key))
})
txs, err := eth.DecodeTransactions(opaqueTxs)
if err != nil {
panic(fmt.Errorf("failed to decode list of txs: %w", err))
}
return eth.HeaderBlockInfo(header), txs
}
func (p *PreimageOracle) ReceiptsByBlockHash(blockHash common.Hash) (eth.BlockInfo, types.Receipts) {
info, txs := p.TransactionsByBlockHash(blockHash)
p.hint.Hint(ReceiptsHint(blockHash))
opaqueReceipts := mpt.ReadTrie(info.ReceiptHash(), func(key common.Hash) []byte {
return p.oracle.Get(preimage.Keccak256Key(key))
})
txHashes := eth.TransactionsToHashes(txs)
receipts, err := eth.DecodeRawReceipts(eth.ToBlockID(info), opaqueReceipts, txHashes)
if err != nil {
panic(fmt.Errorf("bad receipts data for block %s: %w", blockHash, err))
}
return info, receipts
}
package l1
import (
"encoding/json"
"fmt"
"math/rand"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rlp"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/testutils"
"github.com/ethereum-optimism/optimism/op-program/client/mpt"
"github.com/ethereum-optimism/optimism/op-program/preimage"
)
// testBlock tests that the given block with receipts can be passed through the preimage oracle.
func testBlock(t *testing.T, block *types.Block, receipts []*types.Receipt) {
// Prepare the pre-images
preimages := make(map[common.Hash][]byte)
hdrBytes, err := rlp.EncodeToBytes(block.Header())
require.NoError(t, err)
preimages[preimage.Keccak256Key(block.Hash()).PreimageKey()] = hdrBytes
opaqueTxs, err := eth.EncodeTransactions(block.Transactions())
require.NoError(t, err)
_, txsNodes := mpt.WriteTrie(opaqueTxs)
for _, p := range txsNodes {
preimages[preimage.Keccak256Key(crypto.Keccak256Hash(p)).PreimageKey()] = p
}
opaqueReceipts, err := eth.EncodeReceipts(receipts)
require.NoError(t, err)
_, receiptNodes := mpt.WriteTrie(opaqueReceipts)
for _, p := range receiptNodes {
preimages[preimage.Keccak256Key(crypto.Keccak256Hash(p)).PreimageKey()] = p
}
// Prepare a raw mock pre-image oracle that will serve the pre-image data and handle hints
var hints mock.Mock
po := &PreimageOracle{
oracle: preimage.OracleFn(func(key preimage.Key) []byte {
v, ok := preimages[key.PreimageKey()]
require.True(t, ok, "preimage must exist")
return v
}),
hint: preimage.HinterFn(func(v preimage.Hint) {
hints.MethodCalled("hint", v.Hint())
}),
}
// Check if block-headers work
hints.On("hint", BlockHeaderHint(block.Hash()).Hint()).Once().Return()
gotHeader := po.HeaderByBlockHash(block.Hash())
hints.AssertExpectations(t)
got, err := json.MarshalIndent(gotHeader, " ", " ")
require.NoError(t, err)
expected, err := json.MarshalIndent(block.Header(), " ", " ")
require.NoError(t, err)
require.Equal(t, expected, got, "expecting matching headers")
// Check if blocks with txs work
hints.On("hint", BlockHeaderHint(block.Hash()).Hint()).Once().Return()
hints.On("hint", TransactionsHint(block.Hash()).Hint()).Once().Return()
inf, gotTxs := po.TransactionsByBlockHash(block.Hash())
hints.AssertExpectations(t)
require.Equal(t, inf.Hash(), block.Hash())
expectedTxs := block.Transactions()
require.Equal(t, len(expectedTxs), len(gotTxs), "expecting equal tx list length")
for i, tx := range gotTxs {
require.Equalf(t, tx.Hash(), expectedTxs[i].Hash(), "expecting tx %d to match", i)
}
// Check if blocks with receipts work
hints.On("hint", BlockHeaderHint(block.Hash()).Hint()).Once().Return()
hints.On("hint", TransactionsHint(block.Hash()).Hint()).Once().Return()
hints.On("hint", ReceiptsHint(block.Hash()).Hint()).Once().Return()
inf, gotReceipts := po.ReceiptsByBlockHash(block.Hash())
hints.AssertExpectations(t)
require.Equal(t, inf.Hash(), block.Hash())
require.Equal(t, len(receipts), len(gotReceipts), "expecting equal tx list length")
for i, r := range gotReceipts {
require.Equalf(t, r.TxHash, expectedTxs[i].Hash(), "expecting receipt to match tx %d", i)
}
}
func TestPreimageOracleBlockByHash(t *testing.T) {
rng := rand.New(rand.NewSource(123))
for i := 0; i < 10; i++ {
block, receipts := testutils.RandomBlock(rng, 10)
t.Run(fmt.Sprintf("block_%d", i), func(t *testing.T) {
testBlock(t, block, receipts)
})
}
}
package l1
import (
"testing"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
)
type stubOracle struct {
t *testing.T
// blocks maps block hash to eth.BlockInfo
blocks map[common.Hash]eth.BlockInfo
// txs maps block hash to transactions
txs map[common.Hash]types.Transactions
// rcpts maps Block hash to receipts
rcpts map[common.Hash]types.Receipts
}
func newStubOracle(t *testing.T) *stubOracle {
return &stubOracle{
t: t,
blocks: make(map[common.Hash]eth.BlockInfo),
txs: make(map[common.Hash]types.Transactions),
rcpts: make(map[common.Hash]types.Receipts),
}
}
func (o stubOracle) HeaderByBlockHash(blockHash common.Hash) eth.BlockInfo {
info, ok := o.blocks[blockHash]
if !ok {
o.t.Fatalf("unknown block %s", blockHash)
}
return info
}
func (o stubOracle) TransactionsByBlockHash(blockHash common.Hash) (eth.BlockInfo, types.Transactions) {
txs, ok := o.txs[blockHash]
if !ok {
o.t.Fatalf("unknown txs %s", blockHash)
}
return o.HeaderByBlockHash(blockHash), txs
}
func (o stubOracle) ReceiptsByBlockHash(blockHash common.Hash) (eth.BlockInfo, types.Receipts) {
rcpts, ok := o.rcpts[blockHash]
if !ok {
o.t.Fatalf("unknown rcpts %s", blockHash)
}
return o.HeaderByBlockHash(blockHash), rcpts
}
package l2
import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/hashicorp/golang-lru/v2/simplelru"
)
const blockCacheSize = 2_000
const nodeCacheSize = 100_000
const codeCacheSize = 10_000
type CachingOracle struct {
oracle Oracle
blocks *simplelru.LRU[common.Hash, *types.Block]
nodes *simplelru.LRU[common.Hash, []byte]
codes *simplelru.LRU[common.Hash, []byte]
}
func NewCachingOracle(oracle Oracle) *CachingOracle {
blockLRU, _ := simplelru.NewLRU[common.Hash, *types.Block](blockCacheSize, nil)
nodeLRU, _ := simplelru.NewLRU[common.Hash, []byte](nodeCacheSize, nil)
codeLRU, _ := simplelru.NewLRU[common.Hash, []byte](codeCacheSize, nil)
return &CachingOracle{
oracle: oracle,
blocks: blockLRU,
nodes: nodeLRU,
codes: codeLRU,
}
}
func (o *CachingOracle) NodeByHash(nodeHash common.Hash) []byte {
node, ok := o.nodes.Get(nodeHash)
if ok {
return node
}
node = o.oracle.NodeByHash(nodeHash)
o.nodes.Add(nodeHash, node)
return node
}
func (o *CachingOracle) CodeByHash(codeHash common.Hash) []byte {
code, ok := o.codes.Get(codeHash)
if ok {
return code
}
code = o.oracle.CodeByHash(codeHash)
o.codes.Add(codeHash, code)
return code
}
func (o *CachingOracle) BlockByHash(blockHash common.Hash) *types.Block {
block, ok := o.blocks.Get(blockHash)
if ok {
return block
}
block = o.oracle.BlockByHash(blockHash)
o.blocks.Add(blockHash, block)
return block
}
package l2
import (
"math/rand"
"testing"
"github.com/ethereum-optimism/optimism/op-node/testutils"
"github.com/ethereum/go-ethereum/common"
"github.com/stretchr/testify/require"
)
// Should be an Oracle implementation
var _ Oracle = (*CachingOracle)(nil)
func TestBlockByHash(t *testing.T) {
stub, _ := newStubOracle(t)
oracle := NewCachingOracle(stub)
rng := rand.New(rand.NewSource(1))
block, _ := testutils.RandomBlock(rng, 1)
// Initial call retrieves from the stub
stub.blocks[block.Hash()] = block
actual := oracle.BlockByHash(block.Hash())
require.Equal(t, block, actual)
// Later calls should retrieve from cache
delete(stub.blocks, block.Hash())
actual = oracle.BlockByHash(block.Hash())
require.Equal(t, block, actual)
}
func TestNodeByHash(t *testing.T) {
stub, stateStub := newStubOracle(t)
oracle := NewCachingOracle(stub)
node := []byte{12, 3, 4}
hash := common.Hash{0xaa}
// Initial call retrieves from the stub
stateStub.data[hash] = node
actual := oracle.NodeByHash(hash)
require.Equal(t, node, actual)
// Later calls should retrieve from cache
delete(stateStub.data, hash)
actual = oracle.NodeByHash(hash)
require.Equal(t, node, actual)
}
func TestCodeByHash(t *testing.T) {
stub, stateStub := newStubOracle(t)
oracle := NewCachingOracle(stub)
node := []byte{12, 3, 4}
hash := common.Hash{0xaa}
// Initial call retrieves from the stub
stateStub.code[hash] = node
actual := oracle.CodeByHash(hash)
require.Equal(t, node, actual)
// Later calls should retrieve from cache
delete(stateStub.code, hash)
actual = oracle.CodeByHash(hash)
require.Equal(t, node, actual)
}
...@@ -40,12 +40,12 @@ func (o *OracleKeyValueStore) Get(key []byte) ([]byte, error) { ...@@ -40,12 +40,12 @@ func (o *OracleKeyValueStore) Get(key []byte) ([]byte, error) {
if len(key) == codePrefixedKeyLength && bytes.HasPrefix(key, rawdb.CodePrefix) { if len(key) == codePrefixedKeyLength && bytes.HasPrefix(key, rawdb.CodePrefix) {
key = key[len(rawdb.CodePrefix):] key = key[len(rawdb.CodePrefix):]
return o.oracle.CodeByHash(*(*[common.HashLength]byte)(key)) return o.oracle.CodeByHash(*(*[common.HashLength]byte)(key)), nil
} }
if len(key) != common.HashLength { if len(key) != common.HashLength {
return nil, ErrInvalidKeyLength return nil, ErrInvalidKeyLength
} }
return o.oracle.NodeByHash(*(*[common.HashLength]byte)(key)) return o.oracle.NodeByHash(*(*[common.HashLength]byte)(key)), nil
} }
func (o *OracleKeyValueStore) NewBatch() ethdb.Batch { func (o *OracleKeyValueStore) NewBatch() ethdb.Batch {
......
package l2 package l2
import ( import (
"fmt"
"math/big" "math/big"
"testing" "testing"
...@@ -27,16 +26,8 @@ var ( ...@@ -27,16 +26,8 @@ var (
var _ ethdb.KeyValueStore = (*OracleKeyValueStore)(nil) var _ ethdb.KeyValueStore = (*OracleKeyValueStore)(nil)
func TestGet(t *testing.T) { func TestGet(t *testing.T) {
t.Run("UnknownKey", func(t *testing.T) {
oracle := newStubStateOracle()
db := NewOracleBackedDB(oracle)
val, err := db.Get(common.Hash{}.Bytes())
require.Error(t, err)
require.Nil(t, val)
})
t.Run("IncorrectLengthKey", func(t *testing.T) { t.Run("IncorrectLengthKey", func(t *testing.T) {
oracle := newStubStateOracle() oracle := newStubStateOracle(t)
db := NewOracleBackedDB(oracle) db := NewOracleBackedDB(oracle)
val, err := db.Get([]byte{1, 2, 3}) val, err := db.Get([]byte{1, 2, 3})
require.ErrorIs(t, err, ErrInvalidKeyLength) require.ErrorIs(t, err, ErrInvalidKeyLength)
...@@ -44,7 +35,7 @@ func TestGet(t *testing.T) { ...@@ -44,7 +35,7 @@ func TestGet(t *testing.T) {
}) })
t.Run("KeyWithCodePrefix", func(t *testing.T) { t.Run("KeyWithCodePrefix", func(t *testing.T) {
oracle := newStubStateOracle() oracle := newStubStateOracle(t)
db := NewOracleBackedDB(oracle) db := NewOracleBackedDB(oracle)
key := common.HexToHash("0x12345678") key := common.HexToHash("0x12345678")
prefixedKey := append(rawdb.CodePrefix, key.Bytes()...) prefixedKey := append(rawdb.CodePrefix, key.Bytes()...)
...@@ -58,7 +49,7 @@ func TestGet(t *testing.T) { ...@@ -58,7 +49,7 @@ func TestGet(t *testing.T) {
}) })
t.Run("NormalKeyThatHappensToStartWithCodePrefix", func(t *testing.T) { t.Run("NormalKeyThatHappensToStartWithCodePrefix", func(t *testing.T) {
oracle := newStubStateOracle() oracle := newStubStateOracle(t)
db := NewOracleBackedDB(oracle) db := NewOracleBackedDB(oracle)
key := make([]byte, common.HashLength) key := make([]byte, common.HashLength)
copy(rawdb.CodePrefix, key) copy(rawdb.CodePrefix, key)
...@@ -74,7 +65,7 @@ func TestGet(t *testing.T) { ...@@ -74,7 +65,7 @@ func TestGet(t *testing.T) {
t.Run("KnownKey", func(t *testing.T) { t.Run("KnownKey", func(t *testing.T) {
key := common.HexToHash("0xAA4488") key := common.HexToHash("0xAA4488")
expected := []byte{2, 6, 3, 8} expected := []byte{2, 6, 3, 8}
oracle := newStubStateOracle() oracle := newStubStateOracle(t)
oracle.data[key] = expected oracle.data[key] = expected
db := NewOracleBackedDB(oracle) db := NewOracleBackedDB(oracle)
val, err := db.Get(key.Bytes()) val, err := db.Get(key.Bytes())
...@@ -85,7 +76,7 @@ func TestGet(t *testing.T) { ...@@ -85,7 +76,7 @@ func TestGet(t *testing.T) {
func TestPut(t *testing.T) { func TestPut(t *testing.T) {
t.Run("NewKey", func(t *testing.T) { t.Run("NewKey", func(t *testing.T) {
oracle := newStubStateOracle() oracle := newStubStateOracle(t)
db := NewOracleBackedDB(oracle) db := NewOracleBackedDB(oracle)
key := common.HexToHash("0xAA4488") key := common.HexToHash("0xAA4488")
value := []byte{2, 6, 3, 8} value := []byte{2, 6, 3, 8}
...@@ -97,7 +88,7 @@ func TestPut(t *testing.T) { ...@@ -97,7 +88,7 @@ func TestPut(t *testing.T) {
require.Equal(t, value, actual) require.Equal(t, value, actual)
}) })
t.Run("ReplaceKey", func(t *testing.T) { t.Run("ReplaceKey", func(t *testing.T) {
oracle := newStubStateOracle() oracle := newStubStateOracle(t)
db := NewOracleBackedDB(oracle) db := NewOracleBackedDB(oracle)
key := common.HexToHash("0xAA4488") key := common.HexToHash("0xAA4488")
value1 := []byte{2, 6, 3, 8} value1 := []byte{2, 6, 3, 8}
...@@ -119,6 +110,7 @@ func TestSupportsStateDBOperations(t *testing.T) { ...@@ -119,6 +110,7 @@ func TestSupportsStateDBOperations(t *testing.T) {
genesisBlock := l2Genesis.MustCommit(realDb) genesisBlock := l2Genesis.MustCommit(realDb)
loader := &kvStateOracle{ loader := &kvStateOracle{
t: t,
source: realDb, source: realDb,
} }
assertStateDataAvailable(t, NewOracleBackedDB(loader), l2Genesis, genesisBlock) assertStateDataAvailable(t, NewOracleBackedDB(loader), l2Genesis, genesisBlock)
...@@ -126,7 +118,7 @@ func TestSupportsStateDBOperations(t *testing.T) { ...@@ -126,7 +118,7 @@ func TestSupportsStateDBOperations(t *testing.T) {
func TestUpdateState(t *testing.T) { func TestUpdateState(t *testing.T) {
l2Genesis := createGenesis() l2Genesis := createGenesis()
oracle := newStubStateOracle() oracle := newStubStateOracle(t)
db := rawdb.NewDatabase(NewOracleBackedDB(oracle)) db := rawdb.NewDatabase(NewOracleBackedDB(oracle))
genesisBlock := l2Genesis.MustCommit(db) genesisBlock := l2Genesis.MustCommit(db)
...@@ -202,44 +194,3 @@ func assertStateDataAvailable(t *testing.T, db ethdb.KeyValueStore, l2Genesis *c ...@@ -202,44 +194,3 @@ func assertStateDataAvailable(t *testing.T, db ethdb.KeyValueStore, l2Genesis *c
require.Nil(t, statedb.GetCode(unknownAccount), "unset account code") require.Nil(t, statedb.GetCode(unknownAccount), "unset account code")
require.Equal(t, common.Hash{}, statedb.GetCodeHash(unknownAccount), "unset account code hash") require.Equal(t, common.Hash{}, statedb.GetCodeHash(unknownAccount), "unset account code hash")
} }
func newStubStateOracle() *stubStateOracle {
return &stubStateOracle{
data: make(map[common.Hash][]byte),
code: make(map[common.Hash][]byte),
}
}
type stubStateOracle struct {
data map[common.Hash][]byte
code map[common.Hash][]byte
}
func (o *stubStateOracle) NodeByHash(nodeHash common.Hash) ([]byte, error) {
data, ok := o.data[nodeHash]
if !ok {
return nil, fmt.Errorf("no value for node %v", nodeHash)
}
return data, nil
}
func (o *stubStateOracle) CodeByHash(hash common.Hash) ([]byte, error) {
data, ok := o.code[hash]
if !ok {
return nil, fmt.Errorf("no value for code %v", hash)
}
return data, nil
}
// kvStateOracle loads data from a source ethdb.KeyValueStore
type kvStateOracle struct {
source ethdb.KeyValueStore
}
func (o *kvStateOracle) NodeByHash(nodeHash common.Hash) ([]byte, error) {
return o.source.Get(nodeHash.Bytes())
}
func (o *kvStateOracle) CodeByHash(hash common.Hash) ([]byte, error) {
return rawdb.ReadCode(o.source, hash), nil
}
...@@ -5,6 +5,7 @@ import ( ...@@ -5,6 +5,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum-optimism/optimism/op-node/eth" "github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-node/rollup/derive"
...@@ -33,19 +34,32 @@ func NewOracleEngine(rollupCfg *rollup.Config, logger log.Logger, backend engine ...@@ -33,19 +34,32 @@ func NewOracleEngine(rollupCfg *rollup.Config, logger log.Logger, backend engine
} }
} }
func (o OracleEngine) GetPayload(ctx context.Context, payloadId eth.PayloadID) (*eth.ExecutionPayload, error) { func (o *OracleEngine) L2OutputRoot() (eth.Bytes32, error) {
outBlock := o.backend.CurrentHeader()
stateDB, err := o.backend.StateAt(outBlock.Root)
if err != nil {
return eth.Bytes32{}, fmt.Errorf("failed to open L2 state db at block %s: %w", outBlock.Hash(), err)
}
withdrawalsTrie, err := stateDB.StorageTrie(predeploys.L2ToL1MessagePasserAddr)
if err != nil {
return eth.Bytes32{}, fmt.Errorf("withdrawals trie unavailable at block %v: %w", outBlock.Hash(), err)
}
return rollup.ComputeL2OutputRootV0(eth.HeaderBlockInfo(outBlock), withdrawalsTrie.Hash())
}
func (o *OracleEngine) GetPayload(ctx context.Context, payloadId eth.PayloadID) (*eth.ExecutionPayload, error) {
return o.api.GetPayloadV1(ctx, payloadId) return o.api.GetPayloadV1(ctx, payloadId)
} }
func (o OracleEngine) ForkchoiceUpdate(ctx context.Context, state *eth.ForkchoiceState, attr *eth.PayloadAttributes) (*eth.ForkchoiceUpdatedResult, error) { func (o *OracleEngine) ForkchoiceUpdate(ctx context.Context, state *eth.ForkchoiceState, attr *eth.PayloadAttributes) (*eth.ForkchoiceUpdatedResult, error) {
return o.api.ForkchoiceUpdatedV1(ctx, state, attr) return o.api.ForkchoiceUpdatedV1(ctx, state, attr)
} }
func (o OracleEngine) NewPayload(ctx context.Context, payload *eth.ExecutionPayload) (*eth.PayloadStatusV1, error) { func (o *OracleEngine) NewPayload(ctx context.Context, payload *eth.ExecutionPayload) (*eth.PayloadStatusV1, error) {
return o.api.NewPayloadV1(ctx, payload) return o.api.NewPayloadV1(ctx, payload)
} }
func (o OracleEngine) PayloadByHash(ctx context.Context, hash common.Hash) (*eth.ExecutionPayload, error) { func (o *OracleEngine) PayloadByHash(ctx context.Context, hash common.Hash) (*eth.ExecutionPayload, error) {
block := o.backend.GetBlockByHash(hash) block := o.backend.GetBlockByHash(hash)
if block == nil { if block == nil {
return nil, ErrNotFound return nil, ErrNotFound
...@@ -53,7 +67,7 @@ func (o OracleEngine) PayloadByHash(ctx context.Context, hash common.Hash) (*eth ...@@ -53,7 +67,7 @@ func (o OracleEngine) PayloadByHash(ctx context.Context, hash common.Hash) (*eth
return eth.BlockAsPayload(block) return eth.BlockAsPayload(block)
} }
func (o OracleEngine) PayloadByNumber(ctx context.Context, n uint64) (*eth.ExecutionPayload, error) { func (o *OracleEngine) PayloadByNumber(ctx context.Context, n uint64) (*eth.ExecutionPayload, error) {
hash := o.backend.GetCanonicalHash(n) hash := o.backend.GetCanonicalHash(n)
if hash == (common.Hash{}) { if hash == (common.Hash{}) {
return nil, ErrNotFound return nil, ErrNotFound
...@@ -61,7 +75,7 @@ func (o OracleEngine) PayloadByNumber(ctx context.Context, n uint64) (*eth.Execu ...@@ -61,7 +75,7 @@ func (o OracleEngine) PayloadByNumber(ctx context.Context, n uint64) (*eth.Execu
return o.PayloadByHash(ctx, hash) return o.PayloadByHash(ctx, hash)
} }
func (o OracleEngine) L2BlockRefByLabel(ctx context.Context, label eth.BlockLabel) (eth.L2BlockRef, error) { func (o *OracleEngine) L2BlockRefByLabel(ctx context.Context, label eth.BlockLabel) (eth.L2BlockRef, error) {
var header *types.Header var header *types.Header
switch label { switch label {
case eth.Unsafe: case eth.Unsafe:
...@@ -83,7 +97,7 @@ func (o OracleEngine) L2BlockRefByLabel(ctx context.Context, label eth.BlockLabe ...@@ -83,7 +97,7 @@ func (o OracleEngine) L2BlockRefByLabel(ctx context.Context, label eth.BlockLabe
return derive.L2BlockToBlockRef(block, &o.rollupCfg.Genesis) return derive.L2BlockToBlockRef(block, &o.rollupCfg.Genesis)
} }
func (o OracleEngine) L2BlockRefByHash(ctx context.Context, l2Hash common.Hash) (eth.L2BlockRef, error) { func (o *OracleEngine) L2BlockRefByHash(ctx context.Context, l2Hash common.Hash) (eth.L2BlockRef, error) {
block := o.backend.GetBlockByHash(l2Hash) block := o.backend.GetBlockByHash(l2Hash)
if block == nil { if block == nil {
return eth.L2BlockRef{}, ErrNotFound return eth.L2BlockRef{}, ErrNotFound
...@@ -91,7 +105,7 @@ func (o OracleEngine) L2BlockRefByHash(ctx context.Context, l2Hash common.Hash) ...@@ -91,7 +105,7 @@ func (o OracleEngine) L2BlockRefByHash(ctx context.Context, l2Hash common.Hash)
return derive.L2BlockToBlockRef(block, &o.rollupCfg.Genesis) return derive.L2BlockToBlockRef(block, &o.rollupCfg.Genesis)
} }
func (o OracleEngine) SystemConfigByL2Hash(ctx context.Context, hash common.Hash) (eth.SystemConfig, error) { func (o *OracleEngine) SystemConfigByL2Hash(ctx context.Context, hash common.Hash) (eth.SystemConfig, error) {
payload, err := o.PayloadByHash(ctx, hash) payload, err := o.PayloadByHash(ctx, hash)
if err != nil { if err != nil {
return eth.SystemConfig{}, err return eth.SystemConfig{}, err
......
...@@ -18,14 +18,15 @@ import ( ...@@ -18,14 +18,15 @@ import (
) )
type OracleBackedL2Chain struct { type OracleBackedL2Chain struct {
log log.Logger log log.Logger
oracle Oracle oracle Oracle
chainCfg *params.ChainConfig chainCfg *params.ChainConfig
engine consensus.Engine engine consensus.Engine
head *types.Header oracleHead *types.Header
safe *types.Header head *types.Header
finalized *types.Header safe *types.Header
vmCfg vm.Config finalized *types.Header
vmCfg vm.Config
// Inserted blocks // Inserted blocks
blocks map[common.Hash]*types.Block blocks map[common.Hash]*types.Block
...@@ -35,10 +36,8 @@ type OracleBackedL2Chain struct { ...@@ -35,10 +36,8 @@ type OracleBackedL2Chain struct {
var _ engineapi.EngineBackend = (*OracleBackedL2Chain)(nil) var _ engineapi.EngineBackend = (*OracleBackedL2Chain)(nil)
func NewOracleBackedL2Chain(logger log.Logger, oracle Oracle, chainCfg *params.ChainConfig, l2Head common.Hash) (*OracleBackedL2Chain, error) { func NewOracleBackedL2Chain(logger log.Logger, oracle Oracle, chainCfg *params.ChainConfig, l2Head common.Hash) (*OracleBackedL2Chain, error) {
head, err := oracle.BlockByHash(l2Head) head := oracle.BlockByHash(l2Head)
if err != nil { logger.Info("Loaded L2 head", "hash", head.Hash(), "number", head.Number())
return nil, fmt.Errorf("loading l2 head: %w", err)
}
return &OracleBackedL2Chain{ return &OracleBackedL2Chain{
log: logger, log: logger,
oracle: oracle, oracle: oracle,
...@@ -46,11 +45,12 @@ func NewOracleBackedL2Chain(logger log.Logger, oracle Oracle, chainCfg *params.C ...@@ -46,11 +45,12 @@ func NewOracleBackedL2Chain(logger log.Logger, oracle Oracle, chainCfg *params.C
engine: beacon.New(nil), engine: beacon.New(nil),
// Treat the agreed starting head as finalized - nothing before it can be disputed // Treat the agreed starting head as finalized - nothing before it can be disputed
head: head.Header(), head: head.Header(),
safe: head.Header(), safe: head.Header(),
finalized: head.Header(), finalized: head.Header(),
blocks: make(map[common.Hash]*types.Block), oracleHead: head.Header(),
db: NewOracleBackedDB(oracle), blocks: make(map[common.Hash]*types.Block),
db: NewOracleBackedDB(oracle),
}, nil }, nil
} }
...@@ -84,11 +84,7 @@ func (o *OracleBackedL2Chain) CurrentFinalBlock() *types.Header { ...@@ -84,11 +84,7 @@ func (o *OracleBackedL2Chain) CurrentFinalBlock() *types.Header {
} }
func (o *OracleBackedL2Chain) GetHeaderByHash(hash common.Hash) *types.Header { func (o *OracleBackedL2Chain) GetHeaderByHash(hash common.Hash) *types.Header {
block := o.GetBlockByHash(hash) return o.GetBlockByHash(hash).Header()
if block == nil {
return nil
}
return block.Header()
} }
func (o *OracleBackedL2Chain) GetBlockByHash(hash common.Hash) *types.Block { func (o *OracleBackedL2Chain) GetBlockByHash(hash common.Hash) *types.Block {
...@@ -98,18 +94,18 @@ func (o *OracleBackedL2Chain) GetBlockByHash(hash common.Hash) *types.Block { ...@@ -98,18 +94,18 @@ func (o *OracleBackedL2Chain) GetBlockByHash(hash common.Hash) *types.Block {
return block return block
} }
// Retrieve from the oracle // Retrieve from the oracle
block, err := o.oracle.BlockByHash(hash) return o.oracle.BlockByHash(hash)
if err != nil {
handleError(err)
}
if block == nil {
return nil
}
return block
} }
func (o *OracleBackedL2Chain) GetBlock(hash common.Hash, number uint64) *types.Block { func (o *OracleBackedL2Chain) GetBlock(hash common.Hash, number uint64) *types.Block {
block := o.GetBlockByHash(hash) var block *types.Block
if o.oracleHead.Number.Uint64() < number {
// For blocks above the chain head, only consider newly built blocks
// Avoids requesting an unknown block from the oracle which would panic.
block = o.blocks[hash]
} else {
block = o.GetBlockByHash(hash)
}
if block == nil { if block == nil {
return nil return nil
} }
...@@ -121,9 +117,6 @@ func (o *OracleBackedL2Chain) GetBlock(hash common.Hash, number uint64) *types.B ...@@ -121,9 +117,6 @@ func (o *OracleBackedL2Chain) GetBlock(hash common.Hash, number uint64) *types.B
func (o *OracleBackedL2Chain) GetHeader(hash common.Hash, u uint64) *types.Header { func (o *OracleBackedL2Chain) GetHeader(hash common.Hash, u uint64) *types.Header {
block := o.GetBlock(hash, u) block := o.GetBlock(hash, u)
if block == nil {
return nil
}
return block.Header() return block.Header()
} }
...@@ -194,7 +187,3 @@ func (o *OracleBackedL2Chain) SetFinalized(header *types.Header) { ...@@ -194,7 +187,3 @@ func (o *OracleBackedL2Chain) SetFinalized(header *types.Header) {
func (o *OracleBackedL2Chain) SetSafe(header *types.Header) { func (o *OracleBackedL2Chain) SetSafe(header *types.Header) {
o.safe = header o.safe = header
} }
func handleError(err error) {
panic(err)
}
...@@ -9,12 +9,12 @@ import ( ...@@ -9,12 +9,12 @@ import (
"github.com/ethereum-optimism/optimism/op-program/client/l2/engineapi" "github.com/ethereum-optimism/optimism/op-program/client/l2/engineapi"
"github.com/ethereum-optimism/optimism/op-program/client/l2/engineapi/test" "github.com/ethereum-optimism/optimism/op-program/client/l2/engineapi/test"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/consensus/beacon" "github.com/ethereum/go-ethereum/consensus/beacon"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
...@@ -42,17 +42,6 @@ func TestGetBlocks(t *testing.T) { ...@@ -42,17 +42,6 @@ func TestGetBlocks(t *testing.T) {
} }
} }
func TestUnknownBlock(t *testing.T) {
_, chain := setupOracleBackedChain(t, 1)
hash := common.HexToHash("0x556677881122")
blockNumber := uint64(1)
require.Nil(t, chain.GetBlockByHash(hash))
require.Nil(t, chain.GetHeaderByHash(hash))
require.Nil(t, chain.GetBlock(hash, blockNumber))
require.Nil(t, chain.GetHeader(hash, blockNumber))
require.False(t, chain.HasBlockAndState(hash, blockNumber))
}
func TestCanonicalHashNotFoundPastChainHead(t *testing.T) { func TestCanonicalHashNotFoundPastChainHead(t *testing.T) {
blocks, chain := setupOracleBackedChainWithLowerHead(t, 5, 3) blocks, chain := setupOracleBackedChainWithLowerHead(t, 5, 3)
...@@ -69,7 +58,7 @@ func TestCanonicalHashNotFoundPastChainHead(t *testing.T) { ...@@ -69,7 +58,7 @@ func TestCanonicalHashNotFoundPastChainHead(t *testing.T) {
func TestAppendToChain(t *testing.T) { func TestAppendToChain(t *testing.T) {
blocks, chain := setupOracleBackedChainWithLowerHead(t, 4, 3) blocks, chain := setupOracleBackedChainWithLowerHead(t, 4, 3)
newBlock := blocks[4] newBlock := blocks[4]
require.Nil(t, chain.GetBlockByHash(newBlock.Hash()), "block unknown before being added") require.Nil(t, chain.GetBlock(newBlock.Hash(), newBlock.NumberU64()), "block unknown before being added")
require.NoError(t, chain.InsertBlockWithoutSetHead(newBlock)) require.NoError(t, chain.InsertBlockWithoutSetHead(newBlock))
require.Equal(t, blocks[3].Header(), chain.CurrentHeader(), "should not update chain head yet") require.Equal(t, blocks[3].Header(), chain.CurrentHeader(), "should not update chain head yet")
...@@ -113,8 +102,7 @@ func TestUpdateStateDatabaseWhenImportingBlock(t *testing.T) { ...@@ -113,8 +102,7 @@ func TestUpdateStateDatabaseWhenImportingBlock(t *testing.T) {
require.NotEqual(t, blocks[1].Root(), newBlock.Root(), "block should have modified world state") require.NotEqual(t, blocks[1].Root(), newBlock.Root(), "block should have modified world state")
_, err = chain.StateAt(newBlock.Root()) require.False(t, chain.HasBlockAndState(newBlock.Root(), newBlock.NumberU64()), "state from non-imported block should not be available")
require.Error(t, err, "state from non-imported block should not be available")
err = chain.InsertBlockWithoutSetHead(newBlock) err = chain.InsertBlockWithoutSetHead(newBlock)
require.NoError(t, err) require.NoError(t, err)
...@@ -162,6 +150,9 @@ func setupOracle(t *testing.T, blockCount int, headBlockNumber int) (*params.Cha ...@@ -162,6 +150,9 @@ func setupOracle(t *testing.T, blockCount int, headBlockNumber int) (*params.Cha
L2BlockTime: 2, L2BlockTime: 2,
FundDevAccounts: true, FundDevAccounts: true,
L2GenesisBlockGasLimit: 30_000_000, L2GenesisBlockGasLimit: 30_000_000,
// Arbitrary non-zero difficulty in genesis.
// This is slightly weird for a chain starting post-merge but it happens so need to make sure it works
L2GenesisBlockDifficulty: (*hexutil.Big)(big.NewInt(100)),
} }
l1Genesis, err := genesis.NewL1Genesis(deployConfig) l1Genesis, err := genesis.NewL1Genesis(deployConfig)
require.NoError(t, err) require.NoError(t, err)
...@@ -180,7 +171,7 @@ func setupOracle(t *testing.T, blockCount int, headBlockNumber int) (*params.Cha ...@@ -180,7 +171,7 @@ func setupOracle(t *testing.T, blockCount int, headBlockNumber int) (*params.Cha
genesisBlock := l2Genesis.MustCommit(db) genesisBlock := l2Genesis.MustCommit(db)
blocks, _ := core.GenerateChain(chainCfg, genesisBlock, consensus, db, blockCount, func(i int, gen *core.BlockGen) {}) blocks, _ := core.GenerateChain(chainCfg, genesisBlock, consensus, db, blockCount, func(i int, gen *core.BlockGen) {})
blocks = append([]*types.Block{genesisBlock}, blocks...) blocks = append([]*types.Block{genesisBlock}, blocks...)
oracle := newStubBlockOracle(blocks[:headBlockNumber+1], db) oracle := newStubOracleWithBlocks(t, blocks[:headBlockNumber+1], db)
return chainCfg, blocks, oracle return chainCfg, blocks, oracle
} }
...@@ -207,28 +198,8 @@ func createBlock(t *testing.T, chain *OracleBackedL2Chain) *types.Block { ...@@ -207,28 +198,8 @@ func createBlock(t *testing.T, chain *OracleBackedL2Chain) *types.Block {
return blocks[0] return blocks[0]
} }
type stubBlockOracle struct {
blocks map[common.Hash]*types.Block
kvStateOracle
}
func newStubBlockOracle(chain []*types.Block, db ethdb.Database) *stubBlockOracle {
blocks := make(map[common.Hash]*types.Block, len(chain))
for _, block := range chain {
blocks[block.Hash()] = block
}
return &stubBlockOracle{
blocks: blocks,
kvStateOracle: kvStateOracle{source: db},
}
}
func (o stubBlockOracle) BlockByHash(blockHash common.Hash) (*types.Block, error) {
return o.blocks[blockHash], nil
}
func TestEngineAPITests(t *testing.T) { func TestEngineAPITests(t *testing.T) {
test.RunEngineAPITests(t, func() engineapi.EngineBackend { test.RunEngineAPITests(t, func(t *testing.T) engineapi.EngineBackend {
_, chain := setupOracleBackedChain(t, 0) _, chain := setupOracleBackedChain(t, 0)
return chain return chain
}) })
......
...@@ -207,7 +207,7 @@ func (ea *L2EngineAPI) ForkchoiceUpdatedV1(ctx context.Context, state *eth.Forkc ...@@ -207,7 +207,7 @@ func (ea *L2EngineAPI) ForkchoiceUpdatedV1(ctx context.Context, state *eth.Forkc
// Block is known locally, just sanity check that the beacon client does not // Block is known locally, just sanity check that the beacon client does not
// attempt to push us back to before the merge. // attempt to push us back to before the merge.
// Note: Differs from op-geth implementation as pre-merge blocks are never supported here // Note: Differs from op-geth implementation as pre-merge blocks are never supported here
if block.Difficulty().BitLen() > 0 { if block.Difficulty().BitLen() > 0 && block.NumberU64() > 0 {
return STATUS_INVALID, errors.New("pre-merge blocks not supported") return STATUS_INVALID, errors.New("pre-merge blocks not supported")
} }
valid := func(id *engine.PayloadID) *eth.ForkchoiceUpdatedResult { valid := func(id *engine.PayloadID) *eth.ForkchoiceUpdatedResult {
...@@ -301,7 +301,7 @@ func (ea *L2EngineAPI) NewPayloadV1(ctx context.Context, payload *eth.ExecutionP ...@@ -301,7 +301,7 @@ func (ea *L2EngineAPI) NewPayloadV1(ctx context.Context, payload *eth.ExecutionP
} }
// If we already have the block locally, ignore the entire execution and just // If we already have the block locally, ignore the entire execution and just
// return a fake success. // return a fake success.
if block := ea.backend.GetBlockByHash(payload.BlockHash); block != nil { if block := ea.backend.GetBlock(payload.BlockHash, uint64(payload.BlockNumber)); block != nil {
ea.log.Warn("Ignoring already known beacon payload", "number", payload.BlockNumber, "hash", payload.BlockHash, "age", common.PrettyAge(time.Unix(int64(block.Time()), 0))) ea.log.Warn("Ignoring already known beacon payload", "number", payload.BlockNumber, "hash", payload.BlockHash, "age", common.PrettyAge(time.Unix(int64(block.Time()), 0)))
hash := block.Hash() hash := block.Hash()
return &eth.PayloadStatusV1{Status: eth.ExecutionValid, LatestValidHash: &hash}, nil return &eth.PayloadStatusV1{Status: eth.ExecutionValid, LatestValidHash: &hash}, nil
......
...@@ -18,7 +18,7 @@ import ( ...@@ -18,7 +18,7 @@ import (
var gasLimit = eth.Uint64Quantity(30_000_000) var gasLimit = eth.Uint64Quantity(30_000_000)
var feeRecipient = common.Address{} var feeRecipient = common.Address{}
func RunEngineAPITests(t *testing.T, createBackend func() engineapi.EngineBackend) { func RunEngineAPITests(t *testing.T, createBackend func(t *testing.T) engineapi.EngineBackend) {
t.Run("CreateBlock", func(t *testing.T) { t.Run("CreateBlock", func(t *testing.T) {
api := newTestHelper(t, createBackend) api := newTestHelper(t, createBackend)
...@@ -292,10 +292,10 @@ type testHelper struct { ...@@ -292,10 +292,10 @@ type testHelper struct {
assert *require.Assertions assert *require.Assertions
} }
func newTestHelper(t *testing.T, createBackend func() engineapi.EngineBackend) *testHelper { func newTestHelper(t *testing.T, createBackend func(t *testing.T) engineapi.EngineBackend) *testHelper {
logger := testlog.Logger(t, log.LvlDebug) logger := testlog.Logger(t, log.LvlDebug)
ctx := context.Background() ctx := context.Background()
backend := createBackend() backend := createBackend(t)
api := engineapi.NewL2EngineAPI(logger, backend) api := engineapi.NewL2EngineAPI(logger, backend)
test := &testHelper{ test := &testHelper{
t: t, t: t,
......
package l2
import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum-optimism/optimism/op-program/preimage"
)
type BlockHeaderHint common.Hash
var _ preimage.Hint = BlockHeaderHint{}
func (l BlockHeaderHint) Hint() string {
return "l2-block-header " + (common.Hash)(l).String()
}
type TransactionsHint common.Hash
var _ preimage.Hint = TransactionsHint{}
func (l TransactionsHint) Hint() string {
return "l2-transactions " + (common.Hash)(l).String()
}
type CodeHint common.Hash
var _ preimage.Hint = CodeHint{}
func (l CodeHint) Hint() string {
return "l2-code " + (common.Hash)(l).String()
}
type StateNodeHint common.Hash
var _ preimage.Hint = StateNodeHint{}
func (l StateNodeHint) Hint() string {
return "l2-state-node " + (common.Hash)(l).String()
}
package l2 package l2
import ( import (
"fmt"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-program/client/mpt"
"github.com/ethereum-optimism/optimism/op-program/preimage"
) )
// StateOracle defines the high-level API used to retrieve L2 state data pre-images // StateOracle defines the high-level API used to retrieve L2 state data pre-images
...@@ -11,13 +18,11 @@ type StateOracle interface { ...@@ -11,13 +18,11 @@ type StateOracle interface {
// NodeByHash retrieves the merkle-patricia trie node pre-image for a given hash. // NodeByHash retrieves the merkle-patricia trie node pre-image for a given hash.
// Trie nodes may be from the world state trie or any account storage trie. // Trie nodes may be from the world state trie or any account storage trie.
// Contract code is not stored as part of the trie and must be retrieved via CodeByHash // Contract code is not stored as part of the trie and must be retrieved via CodeByHash
// Returns an error if the pre-image is unavailable. NodeByHash(nodeHash common.Hash) []byte
NodeByHash(nodeHash common.Hash) ([]byte, error)
// CodeByHash retrieves the contract code pre-image for a given hash. // CodeByHash retrieves the contract code pre-image for a given hash.
// codeHash should be retrieved from the world state account for a contract. // codeHash should be retrieved from the world state account for a contract.
// Returns an error if the pre-image is unavailable. CodeByHash(codeHash common.Hash) []byte
CodeByHash(codeHash common.Hash) ([]byte, error)
} }
// Oracle defines the high-level API used to retrieve L2 data. // Oracle defines the high-level API used to retrieve L2 data.
...@@ -26,6 +31,57 @@ type Oracle interface { ...@@ -26,6 +31,57 @@ type Oracle interface {
StateOracle StateOracle
// BlockByHash retrieves the block with the given hash. // BlockByHash retrieves the block with the given hash.
// Returns an error if the block is not available. BlockByHash(blockHash common.Hash) *types.Block
BlockByHash(blockHash common.Hash) (*types.Block, error) }
// PreimageOracle implements Oracle using by interfacing with the pure preimage.Oracle
// to fetch pre-images to decode into the requested data.
type PreimageOracle struct {
oracle preimage.Oracle
hint preimage.Hinter
}
var _ Oracle = (*PreimageOracle)(nil)
func NewPreimageOracle(raw preimage.Oracle, hint preimage.Hinter) *PreimageOracle {
return &PreimageOracle{
oracle: raw,
hint: hint,
}
}
func (p *PreimageOracle) headerByBlockHash(blockHash common.Hash) *types.Header {
p.hint.Hint(BlockHeaderHint(blockHash))
headerRlp := p.oracle.Get(preimage.Keccak256Key(blockHash))
var header types.Header
if err := rlp.DecodeBytes(headerRlp, &header); err != nil {
panic(fmt.Errorf("invalid block header %s: %w", blockHash, err))
}
return &header
}
func (p *PreimageOracle) BlockByHash(blockHash common.Hash) *types.Block {
header := p.headerByBlockHash(blockHash)
p.hint.Hint(TransactionsHint(blockHash))
opaqueTxs := mpt.ReadTrie(header.TxHash, func(key common.Hash) []byte {
return p.oracle.Get(preimage.Keccak256Key(key))
})
txs, err := eth.DecodeTransactions(opaqueTxs)
if err != nil {
panic(fmt.Errorf("failed to decode list of txs: %w", err))
}
return types.NewBlockWithHeader(header).WithBody(txs, nil)
}
func (p *PreimageOracle) NodeByHash(nodeHash common.Hash) []byte {
p.hint.Hint(StateNodeHint(nodeHash))
return p.oracle.Get(preimage.Keccak256Key(nodeHash))
}
func (p *PreimageOracle) CodeByHash(codeHash common.Hash) []byte {
p.hint.Hint(CodeHint(codeHash))
return p.oracle.Get(preimage.Keccak256Key(codeHash))
} }
package l2
import (
"fmt"
"math/rand"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rlp"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/testutils"
"github.com/ethereum-optimism/optimism/op-program/client/mpt"
"github.com/ethereum-optimism/optimism/op-program/preimage"
)
func mockPreimageOracle(t *testing.T) (po *PreimageOracle, hintsMock *mock.Mock, preimages map[common.Hash][]byte) {
// Prepare the pre-images
preimages = make(map[common.Hash][]byte)
hintsMock = new(mock.Mock)
po = &PreimageOracle{
oracle: preimage.OracleFn(func(key preimage.Key) []byte {
v, ok := preimages[key.PreimageKey()]
require.True(t, ok, "preimage must exist")
return v
}),
hint: preimage.HinterFn(func(v preimage.Hint) {
hintsMock.MethodCalled("hint", v.Hint())
}),
}
return po, hintsMock, preimages
}
// testBlock tests that the given block can be passed through the preimage oracle.
func testBlock(t *testing.T, block *types.Block) {
po, hints, preimages := mockPreimageOracle(t)
hdrBytes, err := rlp.EncodeToBytes(block.Header())
require.NoError(t, err)
preimages[preimage.Keccak256Key(block.Hash()).PreimageKey()] = hdrBytes
opaqueTxs, err := eth.EncodeTransactions(block.Transactions())
require.NoError(t, err)
_, txsNodes := mpt.WriteTrie(opaqueTxs)
for _, p := range txsNodes {
preimages[preimage.Keccak256Key(crypto.Keccak256Hash(p)).PreimageKey()] = p
}
// Prepare a raw mock pre-image oracle that will serve the pre-image data and handle hints
// Check if blocks with txs work
hints.On("hint", BlockHeaderHint(block.Hash()).Hint()).Once().Return()
hints.On("hint", TransactionsHint(block.Hash()).Hint()).Once().Return()
gotBlock := po.BlockByHash(block.Hash())
hints.AssertExpectations(t)
require.Equal(t, gotBlock.Hash(), block.Hash())
expectedTxs := block.Transactions()
require.Equal(t, len(expectedTxs), len(gotBlock.Transactions()), "expecting equal tx list length")
for i, tx := range gotBlock.Transactions() {
require.Equalf(t, tx.Hash(), expectedTxs[i].Hash(), "expecting tx %d to match", i)
}
}
func TestPreimageOracleBlockByHash(t *testing.T) {
rng := rand.New(rand.NewSource(123))
for i := 0; i < 10; i++ {
block, _ := testutils.RandomBlock(rng, 10)
t.Run(fmt.Sprintf("block_%d", i), func(t *testing.T) {
testBlock(t, block)
})
}
}
func TestPreimageOracleNodeByHash(t *testing.T) {
rng := rand.New(rand.NewSource(123))
for i := 0; i < 10; i++ {
t.Run(fmt.Sprintf("node_%d", i), func(t *testing.T) {
po, hints, preimages := mockPreimageOracle(t)
node := make([]byte, 123)
rng.Read(node)
h := crypto.Keccak256Hash(node)
preimages[preimage.Keccak256Key(h).PreimageKey()] = node
hints.On("hint", StateNodeHint(h).Hint()).Once().Return()
gotNode := po.NodeByHash(h)
hints.AssertExpectations(t)
require.Equal(t, hexutil.Bytes(node), hexutil.Bytes(gotNode), "node matches")
})
}
}
func TestPreimageOracleCodeByHash(t *testing.T) {
rng := rand.New(rand.NewSource(123))
for i := 0; i < 10; i++ {
t.Run(fmt.Sprintf("code_%d", i), func(t *testing.T) {
po, hints, preimages := mockPreimageOracle(t)
node := make([]byte, 123)
rng.Read(node)
h := crypto.Keccak256Hash(node)
preimages[preimage.Keccak256Key(h).PreimageKey()] = node
hints.On("hint", CodeHint(h).Hint()).Once().Return()
gotNode := po.CodeByHash(h)
hints.AssertExpectations(t)
require.Equal(t, hexutil.Bytes(node), hexutil.Bytes(gotNode), "code matches")
})
}
}
package l2
import (
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
)
type stubBlockOracle struct {
t *testing.T
blocks map[common.Hash]*types.Block
StateOracle
}
func newStubOracle(t *testing.T) (*stubBlockOracle, *stubStateOracle) {
stateOracle := newStubStateOracle(t)
blockOracle := stubBlockOracle{
t: t,
blocks: make(map[common.Hash]*types.Block),
StateOracle: stateOracle,
}
return &blockOracle, stateOracle
}
func newStubOracleWithBlocks(t *testing.T, chain []*types.Block, db ethdb.Database) *stubBlockOracle {
blocks := make(map[common.Hash]*types.Block, len(chain))
for _, block := range chain {
blocks[block.Hash()] = block
}
return &stubBlockOracle{
blocks: blocks,
StateOracle: &kvStateOracle{t: t, source: db},
}
}
func (o stubBlockOracle) BlockByHash(blockHash common.Hash) *types.Block {
block, ok := o.blocks[blockHash]
if !ok {
o.t.Fatalf("requested unknown block %s", blockHash)
}
return block
}
// kvStateOracle loads data from a source ethdb.KeyValueStore
type kvStateOracle struct {
t *testing.T
source ethdb.KeyValueStore
}
func (o *kvStateOracle) NodeByHash(nodeHash common.Hash) []byte {
val, err := o.source.Get(nodeHash.Bytes())
if err != nil {
o.t.Fatalf("error retrieving node %v: %v", nodeHash, err)
}
return val
}
func (o *kvStateOracle) CodeByHash(hash common.Hash) []byte {
return rawdb.ReadCode(o.source, hash)
}
func newStubStateOracle(t *testing.T) *stubStateOracle {
return &stubStateOracle{
t: t,
data: make(map[common.Hash][]byte),
code: make(map[common.Hash][]byte),
}
}
// Stub StateOracle implementation that reads from simple maps
type stubStateOracle struct {
t *testing.T
data map[common.Hash][]byte
code map[common.Hash][]byte
}
func (o *stubStateOracle) NodeByHash(nodeHash common.Hash) []byte {
data, ok := o.data[nodeHash]
if !ok {
o.t.Fatalf("no value for node %v", nodeHash)
}
return data
}
func (o *stubStateOracle) CodeByHash(hash common.Hash) []byte {
data, ok := o.code[hash]
if !ok {
o.t.Fatalf("no value for code %v", hash)
}
return data
}
package mpt
import "github.com/ethereum/go-ethereum/ethdb"
type Hooks struct {
Get func(key []byte) []byte
Put func(key []byte, value []byte)
Delete func(key []byte)
}
// DB implements the ethdb.Database to back the StateDB of Geth.
type DB struct {
db Hooks
}
func (p *DB) Has(key []byte) (bool, error) {
panic("not supported")
}
func (p *DB) Get(key []byte) ([]byte, error) {
return p.db.Get(key), nil
}
func (p *DB) Put(key []byte, value []byte) error {
p.db.Put(key, value)
return nil
}
func (p DB) Delete(key []byte) error {
p.db.Delete(key)
return nil
}
func (p DB) Stat(property string) (string, error) {
panic("not supported")
}
func (p DB) NewBatch() ethdb.Batch {
panic("not supported")
}
func (p DB) NewBatchWithSize(size int) ethdb.Batch {
panic("not supported")
}
func (p DB) NewIterator(prefix []byte, start []byte) ethdb.Iterator {
panic("not supported")
}
func (p DB) Compact(start []byte, limit []byte) error {
return nil // no-op
}
func (p DB) NewSnapshot() (ethdb.Snapshot, error) {
panic("not supported")
}
func (p DB) Close() error {
return nil
}
// We implement the full ethdb.Database bloat because the StateDB takes this full interface,
// even though it only uses the KeyValue subset.
func (p *DB) HasAncient(kind string, number uint64) (bool, error) {
panic("not supported")
}
func (p *DB) Ancient(kind string, number uint64) ([]byte, error) {
panic("not supported")
}
func (p *DB) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) {
panic("not supported")
}
func (p *DB) Ancients() (uint64, error) {
panic("not supported")
}
func (p *DB) Tail() (uint64, error) {
panic("not supported")
}
func (p *DB) AncientSize(kind string) (uint64, error) {
panic("not supported")
}
func (p *DB) ReadAncients(fn func(ethdb.AncientReaderOp) error) (err error) {
panic("not supported")
}
func (p *DB) ModifyAncients(f func(ethdb.AncientWriteOp) error) (int64, error) {
panic("not supported")
}
func (p *DB) TruncateHead(n uint64) error {
panic("not supported")
}
func (p *DB) TruncateTail(n uint64) error {
panic("not supported")
}
func (p *DB) Sync() error {
panic("not supported")
}
func (p *DB) MigrateTable(s string, f func([]byte) ([]byte, error)) error {
panic("not supported")
}
func (p *DB) AncientDatadir() (string, error) {
panic("not supported")
}
var _ ethdb.KeyValueStore = (*DB)(nil)
package mpt
import (
"bytes"
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
)
// ReadTrie takes a Merkle Patricia Trie (MPT) root of a "DerivableList", and a pre-image oracle getter,
// and traverses the implied MPT to collect all raw leaf nodes in order, which are then returned.
func ReadTrie(root common.Hash, getPreimage func(key common.Hash) []byte) []hexutil.Bytes {
odb := &DB{db: Hooks{
Get: func(key []byte) []byte {
if len(key) != 32 {
panic(fmt.Errorf("expected 32 byte key query, but got %d bytes: %x", len(key), key))
}
return getPreimage(*(*[32]byte)(key))
},
Put: func(key []byte, value []byte) {
panic("put not supported")
},
Delete: func(key []byte) {
panic("delete not supported")
},
}}
// trie.New backed with a trie.NodeReader and trie.Reader seems really promising
// for a simple node-fetching backend, but the interface is half-private,
// while we already have the full database code for doing the same thing.
// Maybe it's still worth a small diff in geth to expose it?
// Diff would be:
//
// type Node = node
//
// func DecodeNode(hash, buf []byte) (node, error) {
// return decodeNode(hash, buf)
// }
//
// And then still some code here to implement the trie.NodeReader and trie.Reader
// interfaces to map to the getPreimageFunction.
//
// For now we just use the state DB trie approach.
tdb := trie.NewDatabase(odb)
tr, err := trie.New(trie.TrieID(root), tdb)
if err != nil {
panic(err)
}
iter := tr.NodeIterator(nil)
// With small lists the iterator seems to use 0x80 (RLP empty string, unlike the others)
// as key for item 0, causing it to come last.
// Let's just remember the keys, and reorder them in the canonical order, to ensure it is correct.
var values [][]byte
var keys []uint64
for iter.Next(true) {
if iter.Leaf() {
k := iter.LeafKey()
var x uint64
err := rlp.DecodeBytes(k, &x)
if err != nil {
panic(fmt.Errorf("invalid key: %w", err))
}
keys = append(keys, x)
values = append(values, iter.LeafBlob())
}
}
out := make([]hexutil.Bytes, len(values))
for i, x := range keys {
if x >= uint64(len(values)) {
panic(fmt.Errorf("bad key: %d", x))
}
if out[x] != nil {
panic(fmt.Errorf("duplicate key %d", x))
}
out[x] = values[i]
}
return out
}
type rawList []hexutil.Bytes
func (r rawList) Len() int {
return len(r)
}
func (r rawList) EncodeIndex(i int, buf *bytes.Buffer) {
buf.Write(r[i])
}
var _ types.DerivableList = rawList(nil)
type noResetHasher struct {
*trie.StackTrie
}
// Reset is intercepted and is no-op, because we want to retain the writing function when calling types.DeriveSha
func (n noResetHasher) Reset() {}
// WriteTrie takes a list of values, and merkleizes them as a "DerivableList":
// a Merkle Patricia Trie (MPT) with values keyed by their RLP encoded index.
// This merkleization matches that of transactions, receipts, and withdrawals lists in the block header
// (at least up to the Shanghai L1 update).
// This then returns the MPT root and a list of pre-images of the trie.
// Note: empty values are illegal, and there may be less pre-images returned than values,
// if any values are less than 32 bytes and fit into branch-node slots that way.
func WriteTrie(values []hexutil.Bytes) (common.Hash, []hexutil.Bytes) {
var out []hexutil.Bytes
st := noResetHasher{trie.NewStackTrie(
func(owner common.Hash, path []byte, hash common.Hash, blob []byte) {
out = append(out, common.CopyBytes(blob)) // the stack hasher may mutate the blob bytes, so copy them.
})}
root := types.DeriveSha(rawList(values), st)
return root, out
}
package mpt
import (
"fmt"
"math/rand"
"testing"
"github.com/stretchr/testify/require"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/crypto"
)
type trieCase struct {
name string
elements []hexutil.Bytes
}
func (tc *trieCase) run(t *testing.T) {
root, preimages := WriteTrie(tc.elements)
byHash := make(map[common.Hash][]byte)
for _, v := range preimages {
k := crypto.Keccak256Hash(v)
byHash[k] = v
}
results := ReadTrie(root, func(key common.Hash) []byte {
v, ok := byHash[key]
if !ok {
panic(fmt.Errorf("missing key %s", key))
}
return v
})
require.Equal(t, len(tc.elements), len(results), "expected equal amount of values")
for i, result := range results {
// hex encoded for debugging readability
require.Equal(t, tc.elements[i].String(), result.String(),
"value %d does not match, expected equal value data", i)
}
}
func TestListTrieRoundtrip(t *testing.T) {
testCases := []trieCase{
{name: "empty list", elements: []hexutil.Bytes{}},
{name: "nil list", elements: nil},
{name: "simple", elements: []hexutil.Bytes{[]byte("hello"), []byte("world")}},
}
rng := rand.New(rand.NewSource(1234))
// add some randomized cases
for i := 0; i < 30; i++ {
n := rng.Intn(300)
elems := make([]hexutil.Bytes, n)
for i := range elems {
length := 1 + rng.Intn(300) // empty items not allowed
data := make([]byte, length)
rng.Read(data[:])
elems[i] = data
}
testCases = append(testCases, trieCase{name: fmt.Sprintf("rand_%d", i), elements: elems})
}
for _, tc := range testCases {
t.Run(tc.name, tc.run)
}
}
...@@ -9,6 +9,7 @@ import ( ...@@ -9,6 +9,7 @@ import (
"time" "time"
"github.com/ethereum-optimism/optimism/op-node/chaincfg" "github.com/ethereum-optimism/optimism/op-node/chaincfg"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-node/rollup/derive"
cldr "github.com/ethereum-optimism/optimism/op-program/client/driver" cldr "github.com/ethereum-optimism/optimism/op-program/client/driver"
"github.com/ethereum-optimism/optimism/op-program/host/config" "github.com/ethereum-optimism/optimism/op-program/host/config"
...@@ -41,6 +42,10 @@ var VersionWithMeta = func() string { ...@@ -41,6 +42,10 @@ var VersionWithMeta = func() string {
return v return v
}() }()
var (
ErrClaimNotValid = errors.New("invalid claim")
)
func main() { func main() {
args := os.Args args := os.Args
err := run(args, FaultProofProgram) err := run(args, FaultProofProgram)
...@@ -124,6 +129,9 @@ func FaultProofProgram(logger log.Logger, cfg *config.Config) error { ...@@ -124,6 +129,9 @@ func FaultProofProgram(logger log.Logger, cfg *config.Config) error {
return err return err
} }
} }
logger.Info("Derivation complete", "head", d.SafeHead()) claim := cfg.L2Claim
if !d.ValidateClaim(eth.Bytes32(claim)) {
return ErrClaimNotValid
}
return nil return nil
} }
...@@ -13,7 +13,10 @@ import ( ...@@ -13,7 +13,10 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
var l2HeadValue = "0x6303578b1fa9480389c51bbcef6fe045bb877da39740819e9eb5f36f94949bd0" // Use HexToHash(...).Hex() to ensure the strings are the correct length for a hash
var l1HeadValue = common.HexToHash("0x111111").Hex()
var l2HeadValue = common.HexToHash("0x222222").Hex()
var l2ClaimValue = common.HexToHash("0x333333").Hex()
func TestLogLevel(t *testing.T) { func TestLogLevel(t *testing.T) {
t.Run("RejectInvalid", func(t *testing.T) { t.Run("RejectInvalid", func(t *testing.T) {
...@@ -32,7 +35,13 @@ func TestLogLevel(t *testing.T) { ...@@ -32,7 +35,13 @@ func TestLogLevel(t *testing.T) {
func TestDefaultCLIOptionsMatchDefaultConfig(t *testing.T) { func TestDefaultCLIOptionsMatchDefaultConfig(t *testing.T) {
cfg := configForArgs(t, addRequiredArgs()) cfg := configForArgs(t, addRequiredArgs())
require.Equal(t, config.NewConfig(&chaincfg.Goerli, "genesis.json", common.HexToHash(l2HeadValue)), cfg) defaultCfg := config.NewConfig(
&chaincfg.Goerli,
"genesis.json",
common.HexToHash(l1HeadValue),
common.HexToHash(l2HeadValue),
common.HexToHash(l2ClaimValue))
require.Equal(t, defaultCfg, cfg)
} }
func TestNetwork(t *testing.T) { func TestNetwork(t *testing.T) {
...@@ -102,6 +111,21 @@ func TestL2Head(t *testing.T) { ...@@ -102,6 +111,21 @@ func TestL2Head(t *testing.T) {
}) })
} }
func TestL1Head(t *testing.T) {
t.Run("Required", func(t *testing.T) {
verifyArgsInvalid(t, "flag l1.head is required", addRequiredArgsExcept("--l1.head"))
})
t.Run("Valid", func(t *testing.T) {
cfg := configForArgs(t, replaceRequiredArg("--l1.head", l1HeadValue))
require.Equal(t, common.HexToHash(l1HeadValue), cfg.L1Head)
})
t.Run("Invalid", func(t *testing.T) {
verifyArgsInvalid(t, config.ErrInvalidL1Head.Error(), replaceRequiredArg("--l1.head", "something"))
})
}
func TestL1(t *testing.T) { func TestL1(t *testing.T) {
expected := "https://example.com:8545" expected := "https://example.com:8545"
cfg := configForArgs(t, addRequiredArgs("--l1", expected)) cfg := configForArgs(t, addRequiredArgs("--l1", expected))
...@@ -149,10 +173,26 @@ func TestL1RPCKind(t *testing.T) { ...@@ -149,10 +173,26 @@ func TestL1RPCKind(t *testing.T) {
// Offline support will be added later, but for now it just bails out with an error // Offline support will be added later, but for now it just bails out with an error
func TestOfflineModeNotSupported(t *testing.T) { func TestOfflineModeNotSupported(t *testing.T) {
logger := log.New() logger := log.New()
err := FaultProofProgram(logger, config.NewConfig(&chaincfg.Goerli, "genesis.json", common.HexToHash(l2HeadValue))) cfg := config.NewConfig(&chaincfg.Goerli, "genesis.json", common.HexToHash(l1HeadValue), common.HexToHash(l2HeadValue), common.HexToHash(l2ClaimValue))
err := FaultProofProgram(logger, cfg)
require.ErrorContains(t, err, "offline mode not supported") require.ErrorContains(t, err, "offline mode not supported")
} }
func TestL2Claim(t *testing.T) {
t.Run("Required", func(t *testing.T) {
verifyArgsInvalid(t, "flag l2.claim is required", addRequiredArgsExcept("--l2.claim"))
})
t.Run("Valid", func(t *testing.T) {
cfg := configForArgs(t, replaceRequiredArg("--l2.claim", l2ClaimValue))
require.EqualValues(t, common.HexToHash(l2ClaimValue), cfg.L2Claim)
})
t.Run("Invalid", func(t *testing.T) {
verifyArgsInvalid(t, config.ErrInvalidL2Claim.Error(), replaceRequiredArg("--l2.claim", "something"))
})
}
func verifyArgsInvalid(t *testing.T, messageContains string, cliArgs []string) { func verifyArgsInvalid(t *testing.T, messageContains string, cliArgs []string) {
_, _, err := runWithArgs(cliArgs) _, _, err := runWithArgs(cliArgs)
require.ErrorContains(t, err, messageContains) require.ErrorContains(t, err, messageContains)
...@@ -199,8 +239,10 @@ func replaceRequiredArg(name string, value string) []string { ...@@ -199,8 +239,10 @@ func replaceRequiredArg(name string, value string) []string {
func requiredArgs() map[string]string { func requiredArgs() map[string]string {
return map[string]string{ return map[string]string{
"--network": "goerli", "--network": "goerli",
"--l2.genesis": "genesis.json", "--l1.head": l1HeadValue,
"--l2.head": l2HeadValue, "--l2.head": l2HeadValue,
"--l2.claim": l2ClaimValue,
"--l2.genesis": "genesis.json",
} }
} }
......
...@@ -14,15 +14,19 @@ import ( ...@@ -14,15 +14,19 @@ import (
var ( var (
ErrMissingRollupConfig = errors.New("missing rollup config") ErrMissingRollupConfig = errors.New("missing rollup config")
ErrMissingL2Genesis = errors.New("missing l2 genesis") ErrMissingL2Genesis = errors.New("missing l2 genesis")
ErrInvalidL1Head = errors.New("invalid l1 head")
ErrInvalidL2Head = errors.New("invalid l2 head") ErrInvalidL2Head = errors.New("invalid l2 head")
ErrL1AndL2Inconsistent = errors.New("l1 and l2 options must be specified together or both omitted") ErrL1AndL2Inconsistent = errors.New("l1 and l2 options must be specified together or both omitted")
ErrInvalidL2Claim = errors.New("invalid l2 claim")
) )
type Config struct { type Config struct {
Rollup *rollup.Config Rollup *rollup.Config
L2URL string L2URL string
L2GenesisPath string L2GenesisPath string
L1Head common.Hash
L2Head common.Hash L2Head common.Hash
L2Claim common.Hash
L1URL string L1URL string
L1TrustRPC bool L1TrustRPC bool
L1RPCKind sources.RPCProviderKind L1RPCKind sources.RPCProviderKind
...@@ -35,12 +39,18 @@ func (c *Config) Check() error { ...@@ -35,12 +39,18 @@ func (c *Config) Check() error {
if err := c.Rollup.Check(); err != nil { if err := c.Rollup.Check(); err != nil {
return err return err
} }
if c.L2GenesisPath == "" { if c.L1Head == (common.Hash{}) {
return ErrMissingL2Genesis return ErrInvalidL1Head
} }
if c.L2Head == (common.Hash{}) { if c.L2Head == (common.Hash{}) {
return ErrInvalidL2Head return ErrInvalidL2Head
} }
if c.L2Claim == (common.Hash{}) {
return ErrInvalidL2Claim
}
if c.L2GenesisPath == "" {
return ErrMissingL2Genesis
}
if (c.L1URL != "") != (c.L2URL != "") { if (c.L1URL != "") != (c.L2URL != "") {
return ErrL1AndL2Inconsistent return ErrL1AndL2Inconsistent
} }
...@@ -52,11 +62,13 @@ func (c *Config) FetchingEnabled() bool { ...@@ -52,11 +62,13 @@ func (c *Config) FetchingEnabled() bool {
} }
// NewConfig creates a Config with all optional values set to the CLI default value // NewConfig creates a Config with all optional values set to the CLI default value
func NewConfig(rollupCfg *rollup.Config, l2GenesisPath string, l2Head common.Hash) *Config { func NewConfig(rollupCfg *rollup.Config, l2GenesisPath string, l1Head common.Hash, l2Head common.Hash, l2Claim common.Hash) *Config {
return &Config{ return &Config{
Rollup: rollupCfg, Rollup: rollupCfg,
L2GenesisPath: l2GenesisPath, L2GenesisPath: l2GenesisPath,
L1Head: l1Head,
L2Head: l2Head, L2Head: l2Head,
L2Claim: l2Claim,
L1RPCKind: sources.RPCKindBasic, L1RPCKind: sources.RPCKindBasic,
} }
} }
...@@ -73,11 +85,21 @@ func NewConfigFromCLI(ctx *cli.Context) (*Config, error) { ...@@ -73,11 +85,21 @@ func NewConfigFromCLI(ctx *cli.Context) (*Config, error) {
if l2Head == (common.Hash{}) { if l2Head == (common.Hash{}) {
return nil, ErrInvalidL2Head return nil, ErrInvalidL2Head
} }
l2Claim := common.HexToHash(ctx.GlobalString(flags.L2Claim.Name))
if l2Claim == (common.Hash{}) {
return nil, ErrInvalidL2Claim
}
l1Head := common.HexToHash(ctx.GlobalString(flags.L1Head.Name))
if l1Head == (common.Hash{}) {
return nil, ErrInvalidL1Head
}
return &Config{ return &Config{
Rollup: rollupCfg, Rollup: rollupCfg,
L2URL: ctx.GlobalString(flags.L2NodeAddr.Name), L2URL: ctx.GlobalString(flags.L2NodeAddr.Name),
L2GenesisPath: ctx.GlobalString(flags.L2GenesisPath.Name), L2GenesisPath: ctx.GlobalString(flags.L2GenesisPath.Name),
L2Head: l2Head, L2Head: l2Head,
L2Claim: l2Claim,
L1Head: l1Head,
L1URL: ctx.GlobalString(flags.L1NodeAddr.Name), L1URL: ctx.GlobalString(flags.L1NodeAddr.Name),
L1TrustRPC: ctx.GlobalBool(flags.L1TrustRPC.Name), L1TrustRPC: ctx.GlobalBool(flags.L1TrustRPC.Name),
L1RPCKind: sources.RPCProviderKind(ctx.GlobalString(flags.L1RPCProviderKind.Name)), L1RPCKind: sources.RPCProviderKind(ctx.GlobalString(flags.L1RPCProviderKind.Name)),
......
...@@ -11,66 +11,78 @@ import ( ...@@ -11,66 +11,78 @@ import (
var validRollupConfig = &chaincfg.Goerli var validRollupConfig = &chaincfg.Goerli
var validL2GenesisPath = "genesis.json" var validL2GenesisPath = "genesis.json"
var validL2Head = common.HexToHash("0x6303578b1fa9480389c51bbcef6fe045bb877da39740819e9eb5f36f94949bd0") var validL1Head = common.Hash{0xaa}
var validL2Head = common.Hash{0xbb}
var validL2Claim = common.Hash{0xcc}
func TestDefaultConfigIsValid(t *testing.T) { func TestDefaultConfigIsValid(t *testing.T) {
err := NewConfig(validRollupConfig, validL2GenesisPath, validL2Head).Check() err := validConfig().Check()
require.NoError(t, err) require.NoError(t, err)
} }
func TestRollupConfig(t *testing.T) { func TestRollupConfig(t *testing.T) {
t.Run("Required", func(t *testing.T) { t.Run("Required", func(t *testing.T) {
err := NewConfig(nil, validL2GenesisPath, validL2Head).Check() config := validConfig()
config.Rollup = nil
err := config.Check()
require.ErrorIs(t, err, ErrMissingRollupConfig) require.ErrorIs(t, err, ErrMissingRollupConfig)
}) })
t.Run("Invalid", func(t *testing.T) { t.Run("Invalid", func(t *testing.T) {
err := NewConfig(&rollup.Config{}, validL2GenesisPath, validL2Head).Check() config := validConfig()
config.Rollup = &rollup.Config{}
err := config.Check()
require.ErrorIs(t, err, rollup.ErrBlockTimeZero) require.ErrorIs(t, err, rollup.ErrBlockTimeZero)
}) })
} }
func TestL2Genesis(t *testing.T) { func TestL1HeadRequired(t *testing.T) {
t.Run("Required", func(t *testing.T) { config := validConfig()
err := NewConfig(validRollupConfig, "", validL2Head).Check() config.L1Head = common.Hash{}
require.ErrorIs(t, err, ErrMissingL2Genesis) err := config.Check()
}) require.ErrorIs(t, err, ErrInvalidL1Head)
}
t.Run("Valid", func(t *testing.T) { func TestL2HeadRequired(t *testing.T) {
err := NewConfig(validRollupConfig, validL2GenesisPath, validL2Head).Check() config := validConfig()
require.NoError(t, err) config.L2Head = common.Hash{}
}) err := config.Check()
require.ErrorIs(t, err, ErrInvalidL2Head)
} }
func TestL2Head(t *testing.T) { func TestL2ClaimRequired(t *testing.T) {
t.Run("Required", func(t *testing.T) { config := validConfig()
err := NewConfig(validRollupConfig, validL2GenesisPath, common.Hash{}).Check() config.L2Claim = common.Hash{}
require.ErrorIs(t, err, ErrInvalidL2Head) err := config.Check()
}) require.ErrorIs(t, err, ErrInvalidL2Claim)
}
t.Run("Valid", func(t *testing.T) { func TestL2GenesisRequired(t *testing.T) {
err := NewConfig(validRollupConfig, validL2GenesisPath, validL2Head).Check() config := validConfig()
require.NoError(t, err) config.L2GenesisPath = ""
}) err := config.Check()
require.ErrorIs(t, err, ErrMissingL2Genesis)
} }
func TestFetchingArgConsistency(t *testing.T) { func TestFetchingArgConsistency(t *testing.T) {
t.Run("RequireL2WhenL1Set", func(t *testing.T) { t.Run("RequireL2WhenL1Set", func(t *testing.T) {
cfg := NewConfig(&chaincfg.Beta1, validL2GenesisPath, validL2Head) cfg := validConfig()
cfg.L1URL = "https://example.com:1234" cfg.L1URL = "https://example.com:1234"
require.ErrorIs(t, cfg.Check(), ErrL1AndL2Inconsistent) require.ErrorIs(t, cfg.Check(), ErrL1AndL2Inconsistent)
}) })
t.Run("RequireL1WhenL2Set", func(t *testing.T) { t.Run("RequireL1WhenL2Set", func(t *testing.T) {
cfg := NewConfig(&chaincfg.Beta1, validL2GenesisPath, validL2Head) cfg := validConfig()
cfg.L2URL = "https://example.com:1234" cfg.L2URL = "https://example.com:1234"
require.ErrorIs(t, cfg.Check(), ErrL1AndL2Inconsistent) require.ErrorIs(t, cfg.Check(), ErrL1AndL2Inconsistent)
}) })
t.Run("AllowNeitherSet", func(t *testing.T) { t.Run("AllowNeitherSet", func(t *testing.T) {
cfg := NewConfig(&chaincfg.Beta1, validL2GenesisPath, validL2Head) cfg := validConfig()
cfg.L1URL = ""
cfg.L2URL = ""
require.NoError(t, cfg.Check()) require.NoError(t, cfg.Check())
}) })
t.Run("AllowBothSet", func(t *testing.T) { t.Run("AllowBothSet", func(t *testing.T) {
cfg := NewConfig(&chaincfg.Beta1, validL2GenesisPath, validL2Head) cfg := validConfig()
cfg.L1URL = "https://example.com:1234" cfg.L1URL = "https://example.com:1234"
cfg.L2URL = "https://example.com:4678" cfg.L2URL = "https://example.com:4678"
require.NoError(t, cfg.Check()) require.NoError(t, cfg.Check())
...@@ -79,32 +91,36 @@ func TestFetchingArgConsistency(t *testing.T) { ...@@ -79,32 +91,36 @@ func TestFetchingArgConsistency(t *testing.T) {
func TestFetchingEnabled(t *testing.T) { func TestFetchingEnabled(t *testing.T) {
t.Run("FetchingNotEnabledWhenNoFetcherUrlsSpecified", func(t *testing.T) { t.Run("FetchingNotEnabledWhenNoFetcherUrlsSpecified", func(t *testing.T) {
cfg := NewConfig(&chaincfg.Beta1, validL2GenesisPath, validL2Head) cfg := validConfig()
require.False(t, cfg.FetchingEnabled(), "Should not enable fetching when node URL not supplied") require.False(t, cfg.FetchingEnabled(), "Should not enable fetching when node URL not supplied")
}) })
t.Run("FetchingEnabledWhenFetcherUrlsSpecified", func(t *testing.T) { t.Run("FetchingEnabledWhenFetcherUrlsSpecified", func(t *testing.T) {
cfg := NewConfig(&chaincfg.Beta1, validL2GenesisPath, validL2Head) cfg := validConfig()
cfg.L2URL = "https://example.com:1234" cfg.L2URL = "https://example.com:1234"
require.False(t, cfg.FetchingEnabled(), "Should not enable fetching when node URL not supplied") require.False(t, cfg.FetchingEnabled(), "Should not enable fetching when node URL not supplied")
}) })
t.Run("FetchingNotEnabledWhenNoL1UrlSpecified", func(t *testing.T) { t.Run("FetchingNotEnabledWhenNoL1UrlSpecified", func(t *testing.T) {
cfg := NewConfig(&chaincfg.Beta1, validL2GenesisPath, validL2Head) cfg := validConfig()
cfg.L2URL = "https://example.com:1234" cfg.L2URL = "https://example.com:1234"
require.False(t, cfg.FetchingEnabled(), "Should not enable L1 fetching when L1 node URL not supplied") require.False(t, cfg.FetchingEnabled(), "Should not enable L1 fetching when L1 node URL not supplied")
}) })
t.Run("FetchingNotEnabledWhenNoL2UrlSpecified", func(t *testing.T) { t.Run("FetchingNotEnabledWhenNoL2UrlSpecified", func(t *testing.T) {
cfg := NewConfig(&chaincfg.Beta1, validL2GenesisPath, validL2Head) cfg := validConfig()
cfg.L1URL = "https://example.com:1234" cfg.L1URL = "https://example.com:1234"
require.False(t, cfg.FetchingEnabled(), "Should not enable L2 fetching when L2 node URL not supplied") require.False(t, cfg.FetchingEnabled(), "Should not enable L2 fetching when L2 node URL not supplied")
}) })
t.Run("FetchingEnabledWhenBothFetcherUrlsSpecified", func(t *testing.T) { t.Run("FetchingEnabledWhenBothFetcherUrlsSpecified", func(t *testing.T) {
cfg := NewConfig(&chaincfg.Beta1, validL2GenesisPath, validL2Head) cfg := validConfig()
cfg.L1URL = "https://example.com:1234" cfg.L1URL = "https://example.com:1234"
cfg.L2URL = "https://example.com:5678" cfg.L2URL = "https://example.com:5678"
require.True(t, cfg.FetchingEnabled(), "Should enable fetching when node URL supplied") require.True(t, cfg.FetchingEnabled(), "Should enable fetching when node URL supplied")
}) })
} }
func validConfig() *Config {
return NewConfig(validRollupConfig, validL2GenesisPath, validL1Head, validL2Head, validL2Claim)
}
...@@ -31,16 +31,26 @@ var ( ...@@ -31,16 +31,26 @@ var (
Usage: "Address of L2 JSON-RPC endpoint to use (eth and debug namespace required)", Usage: "Address of L2 JSON-RPC endpoint to use (eth and debug namespace required)",
EnvVar: service.PrefixEnvVar(envVarPrefix, "L2_RPC"), EnvVar: service.PrefixEnvVar(envVarPrefix, "L2_RPC"),
} }
L2GenesisPath = cli.StringFlag{ L1Head = cli.StringFlag{
Name: "l2.genesis", Name: "l1.head",
Usage: "Path to the op-geth genesis file", Usage: "Hash of the L1 head block. Derivation stops after this block is processed.",
EnvVar: service.PrefixEnvVar(envVarPrefix, "L2_GENESIS"), EnvVar: service.PrefixEnvVar(envVarPrefix, "L1_HEAD"),
} }
L2Head = cli.StringFlag{ L2Head = cli.StringFlag{
Name: "l2.head", Name: "l2.head",
Usage: "Hash of the agreed L2 block to start derivation from", Usage: "Hash of the agreed L2 block to start derivation from",
EnvVar: service.PrefixEnvVar(envVarPrefix, "L2_HEAD"), EnvVar: service.PrefixEnvVar(envVarPrefix, "L2_HEAD"),
} }
L2Claim = cli.StringFlag{
Name: "l2.claim",
Usage: "Claimed L2 output root to validate",
EnvVar: service.PrefixEnvVar(envVarPrefix, "L2_CLAIM"),
}
L2GenesisPath = cli.StringFlag{
Name: "l2.genesis",
Usage: "Path to the op-geth genesis file",
EnvVar: service.PrefixEnvVar(envVarPrefix, "L2_GENESIS"),
}
L1NodeAddr = cli.StringFlag{ L1NodeAddr = cli.StringFlag{
Name: "l1", Name: "l1",
Usage: "Address of L1 JSON-RPC endpoint to use (eth namespace required)", Usage: "Address of L1 JSON-RPC endpoint to use (eth namespace required)",
...@@ -66,12 +76,16 @@ var ( ...@@ -66,12 +76,16 @@ var (
// Flags contains the list of configuration options available to the binary. // Flags contains the list of configuration options available to the binary.
var Flags []cli.Flag var Flags []cli.Flag
var requiredFlags = []cli.Flag{
L1Head,
L2Head,
L2Claim,
L2GenesisPath,
}
var programFlags = []cli.Flag{ var programFlags = []cli.Flag{
RollupConfig, RollupConfig,
Network, Network,
L2NodeAddr, L2NodeAddr,
L2GenesisPath,
L2Head,
L1NodeAddr, L1NodeAddr,
L1TrustRPC, L1TrustRPC,
L1RPCProviderKind, L1RPCProviderKind,
...@@ -79,6 +93,7 @@ var programFlags = []cli.Flag{ ...@@ -79,6 +93,7 @@ var programFlags = []cli.Flag{
func init() { func init() {
Flags = append(Flags, oplog.CLIFlags(envVarPrefix)...) Flags = append(Flags, oplog.CLIFlags(envVarPrefix)...)
Flags = append(Flags, requiredFlags...)
Flags = append(Flags, programFlags...) Flags = append(Flags, programFlags...)
} }
...@@ -91,11 +106,10 @@ func CheckRequired(ctx *cli.Context) error { ...@@ -91,11 +106,10 @@ func CheckRequired(ctx *cli.Context) error {
if rollupConfig != "" && network != "" { if rollupConfig != "" && network != "" {
return fmt.Errorf("cannot specify both %s and %s", RollupConfig.Name, Network.Name) return fmt.Errorf("cannot specify both %s and %s", RollupConfig.Name, Network.Name)
} }
if ctx.GlobalString(L2GenesisPath.Name) == "" { for _, flag := range requiredFlags {
return fmt.Errorf("flag %s is required", L2GenesisPath.Name) if ctx.GlobalString(flag.GetName()) == "" {
} return fmt.Errorf("flag %s is required", flag.GetName())
if ctx.GlobalString(L2Head.Name) == "" { }
return fmt.Errorf("flag %s is required", L2Head.Name)
} }
return nil return nil
} }
package kvstore
import (
"encoding/hex"
"errors"
"fmt"
"io"
"os"
"path"
"sync"
"github.com/ethereum/go-ethereum/common"
)
// read/write mode for user/group/other, not executable.
const diskPermission = 0666
// DiskKV is a disk-backed key-value store, every key-value pair is a hex-encoded .txt file, with the value as content.
// DiskKV is safe for concurrent use with a single DiskKV instance.
// DiskKV is not safe for concurrent use between different DiskKV instances of the same disk directory:
// a Put needs to be completed before another DiskKV Get retrieves the values.
type DiskKV struct {
sync.RWMutex
path string
}
// NewDiskKV creates a DiskKV that puts/gets pre-images as files in the given directory path.
// The path must exist, or subsequent Put/Get calls will error when it does not.
func NewDiskKV(path string) *DiskKV {
return &DiskKV{path: path}
}
func (d *DiskKV) pathKey(k common.Hash) string {
return path.Join(d.path, k.String()+".txt")
}
func (d *DiskKV) Put(k common.Hash, v []byte) error {
d.Lock()
defer d.Unlock()
f, err := os.OpenFile(d.pathKey(k), os.O_WRONLY|os.O_CREATE|os.O_EXCL|os.O_TRUNC, diskPermission)
if err != nil {
if errors.Is(err, os.ErrExist) {
return ErrAlreadyExists
}
return fmt.Errorf("failed to open new pre-image file %s: %w", k, err)
}
if _, err := f.Write([]byte(hex.EncodeToString(v))); err != nil {
_ = f.Close()
return fmt.Errorf("failed to write pre-image %s to disk: %w", k, err)
}
if err := f.Close(); err != nil {
return fmt.Errorf("failed to close pre-image %s file: %w", k, err)
}
return nil
}
func (d *DiskKV) Get(k common.Hash) ([]byte, error) {
d.RLock()
defer d.RUnlock()
f, err := os.OpenFile(d.pathKey(k), os.O_RDONLY, diskPermission)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
return nil, ErrNotFound
}
return nil, fmt.Errorf("failed to open pre-image file %s: %w", k, err)
}
defer f.Close() // fine to ignore closing error here
dat, err := io.ReadAll(f)
if err != nil {
return nil, fmt.Errorf("failed to read pre-image from file %s: %w", k, err)
}
return hex.DecodeString(string(dat))
}
var _ KV = (*DiskKV)(nil)
package kvstore
import "testing"
func TestDiskKV(t *testing.T) {
tmp := t.TempDir() // automatically removed by testing cleanup
kv := NewDiskKV(tmp)
kvTest(t, kv)
}
package kvstore
import (
"errors"
"github.com/ethereum/go-ethereum/common"
)
// ErrNotFound is returned when a pre-image cannot be found in the KV store.
var ErrNotFound = errors.New("not found")
// ErrAlreadyExists is returned when a pre-image already exists in the KV store.
var ErrAlreadyExists = errors.New("already exists")
// KV is a Key-Value store interface for pre-image data.
type KV interface {
// Put puts the pre-image value v in the key-value store with key k.
// It returns ErrAlreadyExists when the key already exists.
// KV store implementations may return additional errors specific to the KV storage.
Put(k common.Hash, v []byte) error
// Get retrieves the pre-image with key k from the key-value store.
// It returns ErrNotFound when the pre-image cannot be found.
// KV store implementations may return additional errors specific to the KV storage.
Get(k common.Hash) ([]byte, error)
}
package kvstore
import (
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/stretchr/testify/require"
)
func kvTest(t *testing.T, kv KV) {
t.Run("roundtrip", func(t *testing.T) {
t.Parallel()
_, err := kv.Get(common.Hash{0xaa})
require.Equal(t, err, ErrNotFound, "file (in new tmp dir) does not exist yet")
require.NoError(t, kv.Put(common.Hash{0xaa}, []byte("hello world")))
dat, err := kv.Get(common.Hash{0xaa})
require.NoError(t, err, "pre-image must exist now")
require.Equal(t, "hello world", string(dat), "pre-image must match")
})
t.Run("empty pre-image", func(t *testing.T) {
t.Parallel()
require.NoError(t, kv.Put(common.Hash{0xbb}, []byte{}))
dat, err := kv.Get(common.Hash{0xbb})
require.NoError(t, err, "pre-image must exist now")
require.Zero(t, len(dat), "pre-image must be empty")
})
t.Run("zero pre-image key", func(t *testing.T) {
t.Parallel()
// in case we give a pre-image a special empty key. If it was a hash then we wouldn't know the pre-image.
require.NoError(t, kv.Put(common.Hash{}, []byte("hello")))
dat, err := kv.Get(common.Hash{})
require.NoError(t, err, "pre-image must exist now")
require.Equal(t, "hello", string(dat), "pre-image must match")
})
t.Run("non-string value", func(t *testing.T) {
t.Parallel()
// in case we give a pre-image a special empty key. If it was a hash then we wouldn't know the pre-image.
require.NoError(t, kv.Put(common.Hash{0xcc}, []byte{4, 2}))
dat, err := kv.Get(common.Hash{0xcc})
require.NoError(t, err, "pre-image must exist now")
require.Equal(t, []byte{4, 2}, dat, "pre-image must match")
})
t.Run("not overwriting pre-image", func(t *testing.T) {
t.Parallel()
require.NoError(t, kv.Put(common.Hash{0xdd}, []byte{4, 2}))
require.ErrorIs(t, kv.Put(common.Hash{0xdd}, []byte{4, 2}), ErrAlreadyExists)
})
}
package kvstore
import (
"sync"
"github.com/ethereum/go-ethereum/common"
)
// MemKV implements the KV store interface in memory, backed by a regular Go map.
// This should only be used in testing, as large programs may require more pre-image data than available memory.
// MemKV is safe for concurrent use.
type MemKV struct {
sync.RWMutex
m map[common.Hash][]byte
}
var _ KV = (*MemKV)(nil)
func NewMemKV() *MemKV {
return &MemKV{m: make(map[common.Hash][]byte)}
}
func (m *MemKV) Put(k common.Hash, v []byte) error {
m.Lock()
defer m.Unlock()
if _, ok := m.m[k]; ok {
return ErrAlreadyExists
}
m.m[k] = v
return nil
}
func (m *MemKV) Get(k common.Hash) ([]byte, error) {
m.RLock()
defer m.RUnlock()
v, ok := m.m[k]
if !ok {
return nil, ErrNotFound
}
return v, nil
}
package kvstore
import "testing"
func TestMemKV(t *testing.T) {
kv := NewMemKV()
kvTest(t, kv)
}
package l1
import (
"context"
"fmt"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
)
type Source interface {
InfoByHash(ctx context.Context, blockHash common.Hash) (eth.BlockInfo, error)
InfoAndTxsByHash(ctx context.Context, blockHash common.Hash) (eth.BlockInfo, types.Transactions, error)
FetchReceipts(ctx context.Context, blockHash common.Hash) (eth.BlockInfo, types.Receipts, error)
}
type FetchingL1Oracle struct {
ctx context.Context
logger log.Logger
source Source
}
func NewFetchingL1Oracle(ctx context.Context, logger log.Logger, source Source) *FetchingL1Oracle {
return &FetchingL1Oracle{
ctx: ctx,
logger: logger,
source: source,
}
}
func (o *FetchingL1Oracle) HeaderByBlockHash(blockHash common.Hash) eth.BlockInfo {
o.logger.Trace("HeaderByBlockHash", "hash", blockHash)
info, err := o.source.InfoByHash(o.ctx, blockHash)
if err != nil {
panic(fmt.Errorf("retrieve block %s: %w", blockHash, err))
}
if info == nil {
panic(fmt.Errorf("unknown block: %s", blockHash))
}
return info
}
func (o *FetchingL1Oracle) TransactionsByBlockHash(blockHash common.Hash) (eth.BlockInfo, types.Transactions) {
o.logger.Trace("TransactionsByBlockHash", "hash", blockHash)
info, txs, err := o.source.InfoAndTxsByHash(o.ctx, blockHash)
if err != nil {
panic(fmt.Errorf("retrieve transactions for block %s: %w", blockHash, err))
}
if info == nil || txs == nil {
panic(fmt.Errorf("unknown block: %s", blockHash))
}
return info, txs
}
func (o *FetchingL1Oracle) ReceiptsByBlockHash(blockHash common.Hash) (eth.BlockInfo, types.Receipts) {
o.logger.Trace("ReceiptsByBlockHash", "hash", blockHash)
info, rcpts, err := o.source.FetchReceipts(o.ctx, blockHash)
if err != nil {
panic(fmt.Errorf("retrieve receipts for block %s: %w", blockHash, err))
}
if info == nil || rcpts == nil {
panic(fmt.Errorf("unknown block: %s", blockHash))
}
return info, rcpts
}
package l1
import (
"context"
"errors"
"fmt"
"testing"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/sources"
"github.com/ethereum-optimism/optimism/op-node/testlog"
cll1 "github.com/ethereum-optimism/optimism/op-program/client/l1"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require"
)
// Needs to implement the Oracle interface
var _ cll1.Oracle = (*FetchingL1Oracle)(nil)
// Want to be able to use an L1Client as the data source
var _ Source = (*sources.L1Client)(nil)
func TestHeaderByHash(t *testing.T) {
t.Run("Success", func(t *testing.T) {
expected := &sources.HeaderInfo{}
source := &stubSource{nextInfo: expected}
oracle := newFetchingOracle(t, source)
actual := oracle.HeaderByBlockHash(expected.Hash())
require.Equal(t, expected, actual)
})
t.Run("UnknownBlock", func(t *testing.T) {
oracle := newFetchingOracle(t, &stubSource{})
hash := common.HexToHash("0x4455")
require.PanicsWithError(t, fmt.Errorf("unknown block: %s", hash).Error(), func() {
oracle.HeaderByBlockHash(hash)
})
})
t.Run("Error", func(t *testing.T) {
err := errors.New("kaboom")
source := &stubSource{nextErr: err}
oracle := newFetchingOracle(t, source)
hash := common.HexToHash("0x8888")
require.PanicsWithError(t, fmt.Errorf("retrieve block %s: %w", hash, err).Error(), func() {
oracle.HeaderByBlockHash(hash)
})
})
}
func TestTransactionsByHash(t *testing.T) {
t.Run("Success", func(t *testing.T) {
expectedInfo := &sources.HeaderInfo{}
expectedTxs := types.Transactions{
&types.Transaction{},
}
source := &stubSource{nextInfo: expectedInfo, nextTxs: expectedTxs}
oracle := newFetchingOracle(t, source)
info, txs := oracle.TransactionsByBlockHash(expectedInfo.Hash())
require.Equal(t, expectedInfo, info)
require.Equal(t, expectedTxs, txs)
})
t.Run("UnknownBlock_NoInfo", func(t *testing.T) {
oracle := newFetchingOracle(t, &stubSource{})
hash := common.HexToHash("0x4455")
require.PanicsWithError(t, fmt.Errorf("unknown block: %s", hash).Error(), func() {
oracle.TransactionsByBlockHash(hash)
})
})
t.Run("UnknownBlock_NoTxs", func(t *testing.T) {
oracle := newFetchingOracle(t, &stubSource{nextInfo: &sources.HeaderInfo{}})
hash := common.HexToHash("0x4455")
require.PanicsWithError(t, fmt.Errorf("unknown block: %s", hash).Error(), func() {
oracle.TransactionsByBlockHash(hash)
})
})
t.Run("Error", func(t *testing.T) {
err := errors.New("kaboom")
source := &stubSource{nextErr: err}
oracle := newFetchingOracle(t, source)
hash := common.HexToHash("0x8888")
require.PanicsWithError(t, fmt.Errorf("retrieve transactions for block %s: %w", hash, err).Error(), func() {
oracle.TransactionsByBlockHash(hash)
})
})
}
func TestReceiptsByHash(t *testing.T) {
t.Run("Success", func(t *testing.T) {
expectedInfo := &sources.HeaderInfo{}
expectedRcpts := types.Receipts{
&types.Receipt{},
}
source := &stubSource{nextInfo: expectedInfo, nextRcpts: expectedRcpts}
oracle := newFetchingOracle(t, source)
info, rcpts := oracle.ReceiptsByBlockHash(expectedInfo.Hash())
require.Equal(t, expectedInfo, info)
require.Equal(t, expectedRcpts, rcpts)
})
t.Run("UnknownBlock_NoInfo", func(t *testing.T) {
oracle := newFetchingOracle(t, &stubSource{})
hash := common.HexToHash("0x4455")
require.PanicsWithError(t, fmt.Errorf("unknown block: %s", hash).Error(), func() {
oracle.ReceiptsByBlockHash(hash)
})
})
t.Run("UnknownBlock_NoTxs", func(t *testing.T) {
oracle := newFetchingOracle(t, &stubSource{nextInfo: &sources.HeaderInfo{}})
hash := common.HexToHash("0x4455")
require.PanicsWithError(t, fmt.Errorf("unknown block: %s", hash).Error(), func() {
oracle.ReceiptsByBlockHash(hash)
})
})
t.Run("Error", func(t *testing.T) {
err := errors.New("kaboom")
source := &stubSource{nextErr: err}
oracle := newFetchingOracle(t, source)
hash := common.HexToHash("0x8888")
require.PanicsWithError(t, fmt.Errorf("retrieve receipts for block %s: %w", hash, err).Error(), func() {
oracle.ReceiptsByBlockHash(hash)
})
})
}
func newFetchingOracle(t *testing.T, source Source) *FetchingL1Oracle {
return NewFetchingL1Oracle(context.Background(), testlog.Logger(t, log.LvlDebug), source)
}
type stubSource struct {
nextInfo eth.BlockInfo
nextTxs types.Transactions
nextRcpts types.Receipts
nextErr error
}
func (s stubSource) InfoByHash(ctx context.Context, blockHash common.Hash) (eth.BlockInfo, error) {
return s.nextInfo, s.nextErr
}
func (s stubSource) InfoAndTxsByHash(ctx context.Context, blockHash common.Hash) (eth.BlockInfo, types.Transactions, error) {
return s.nextInfo, s.nextTxs, s.nextErr
}
func (s stubSource) FetchReceipts(ctx context.Context, blockHash common.Hash) (eth.BlockInfo, types.Receipts, error) {
return s.nextInfo, s.nextRcpts, s.nextErr
}
...@@ -6,6 +6,7 @@ import ( ...@@ -6,6 +6,7 @@ import (
"github.com/ethereum-optimism/optimism/op-node/client" "github.com/ethereum-optimism/optimism/op-node/client"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-node/sources" "github.com/ethereum-optimism/optimism/op-node/sources"
cll1 "github.com/ethereum-optimism/optimism/op-program/client/l1"
"github.com/ethereum-optimism/optimism/op-program/host/config" "github.com/ethereum-optimism/optimism/op-program/host/config"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
) )
...@@ -16,5 +17,10 @@ func NewFetchingL1(ctx context.Context, logger log.Logger, cfg *config.Config) ( ...@@ -16,5 +17,10 @@ func NewFetchingL1(ctx context.Context, logger log.Logger, cfg *config.Config) (
return nil, err return nil, err
} }
return sources.NewL1Client(rpc, logger, nil, sources.L1ClientDefaultConfig(cfg.Rollup, cfg.L1TrustRPC, cfg.L1RPCKind)) source, err := sources.NewL1Client(rpc, logger, nil, sources.L1ClientDefaultConfig(cfg.Rollup, cfg.L1TrustRPC, cfg.L1RPCKind))
if err != nil {
return nil, err
}
oracle := cll1.NewCachingOracle(NewFetchingL1Oracle(ctx, logger, source))
return cll1.NewOracleL1Client(logger, oracle, cfg.L1Head), err
} }
...@@ -4,6 +4,7 @@ import ( ...@@ -4,6 +4,7 @@ import (
"context" "context"
"fmt" "fmt"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/rawdb"
...@@ -24,37 +25,50 @@ type CallContext interface { ...@@ -24,37 +25,50 @@ type CallContext interface {
type FetchingL2Oracle struct { type FetchingL2Oracle struct {
ctx context.Context ctx context.Context
logger log.Logger logger log.Logger
head eth.BlockInfo
blockSource BlockSource blockSource BlockSource
callContext CallContext callContext CallContext
} }
func NewFetchingL2Oracle(ctx context.Context, logger log.Logger, l2Url string) (*FetchingL2Oracle, error) { func NewFetchingL2Oracle(ctx context.Context, logger log.Logger, l2Url string, l2Head common.Hash) (*FetchingL2Oracle, error) {
rpcClient, err := rpc.Dial(l2Url) rpcClient, err := rpc.Dial(l2Url)
if err != nil { if err != nil {
return nil, err return nil, err
} }
ethClient := ethclient.NewClient(rpcClient) ethClient := ethclient.NewClient(rpcClient)
head, err := ethClient.HeaderByHash(ctx, l2Head)
if err != nil {
return nil, fmt.Errorf("retrieve l2 head %v: %w", l2Head, err)
}
return &FetchingL2Oracle{ return &FetchingL2Oracle{
ctx: ctx, ctx: ctx,
logger: logger, logger: logger,
head: eth.HeaderBlockInfo(head),
blockSource: ethClient, blockSource: ethClient,
callContext: rpcClient, callContext: rpcClient,
}, nil }, nil
} }
func (o *FetchingL2Oracle) NodeByHash(hash common.Hash) ([]byte, error) { func (o *FetchingL2Oracle) NodeByHash(hash common.Hash) []byte {
// MPT nodes are stored as the hash of the node (with no prefix) // MPT nodes are stored as the hash of the node (with no prefix)
return o.dbGet(hash.Bytes()) node, err := o.dbGet(hash.Bytes())
if err != nil {
panic(err)
}
return node
} }
func (o *FetchingL2Oracle) CodeByHash(hash common.Hash) ([]byte, error) { func (o *FetchingL2Oracle) CodeByHash(hash common.Hash) []byte {
// First try retrieving with the new code prefix // First try retrieving with the new code prefix
code, err := o.dbGet(append(rawdb.CodePrefix, hash.Bytes()...)) code, err := o.dbGet(append(rawdb.CodePrefix, hash.Bytes()...))
if err != nil { if err != nil {
// Fallback to the legacy un-prefixed version // Fallback to the legacy un-prefixed version
return o.dbGet(hash.Bytes()) code, err = o.dbGet(hash.Bytes())
if err != nil {
panic(err)
}
} }
return code, nil return code
} }
func (o *FetchingL2Oracle) dbGet(key []byte) ([]byte, error) { func (o *FetchingL2Oracle) dbGet(key []byte) ([]byte, error) {
...@@ -66,10 +80,13 @@ func (o *FetchingL2Oracle) dbGet(key []byte) ([]byte, error) { ...@@ -66,10 +80,13 @@ func (o *FetchingL2Oracle) dbGet(key []byte) ([]byte, error) {
return node, nil return node, nil
} }
func (o *FetchingL2Oracle) BlockByHash(blockHash common.Hash) (*types.Block, error) { func (o *FetchingL2Oracle) BlockByHash(blockHash common.Hash) *types.Block {
block, err := o.blockSource.BlockByHash(o.ctx, blockHash) block, err := o.blockSource.BlockByHash(o.ctx, blockHash)
if err != nil { if err != nil {
return nil, fmt.Errorf("fetch block %s: %w", blockHash.Hex(), err) panic(fmt.Errorf("fetch block %s: %w", blockHash.Hex(), err))
}
if block.NumberU64() > o.head.NumberU64() {
panic(fmt.Errorf("fetched block %v number %d above head block number %d", blockHash, block.NumberU64(), o.head.NumberU64()))
} }
return block, nil return block
} }
...@@ -5,12 +5,14 @@ import ( ...@@ -5,12 +5,14 @@ import (
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"math/big"
"math/rand" "math/rand"
"reflect" "reflect"
"testing" "testing"
"github.com/ethereum-optimism/optimism/op-node/testutils" "github.com/ethereum-optimism/optimism/op-node/testutils"
cll2 "github.com/ethereum-optimism/optimism/op-program/client/l2" cll2 "github.com/ethereum-optimism/optimism/op-program/client/l2"
"github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
...@@ -23,35 +25,7 @@ import ( ...@@ -23,35 +25,7 @@ import (
// Require the fetching oracle to implement StateOracle // Require the fetching oracle to implement StateOracle
var _ cll2.StateOracle = (*FetchingL2Oracle)(nil) var _ cll2.StateOracle = (*FetchingL2Oracle)(nil)
type callContextRequest struct { const headBlockNumber = 1000
ctx context.Context
method string
args []interface{}
}
type stubCallContext struct {
nextResult any
nextErr error
requests []callContextRequest
}
func (c *stubCallContext) CallContext(ctx context.Context, result any, method string, args ...interface{}) error {
if result != nil && reflect.TypeOf(result).Kind() != reflect.Ptr {
return fmt.Errorf("call result parameter must be pointer or nil interface: %v", result)
}
c.requests = append(c.requests, callContextRequest{ctx: ctx, method: method, args: args})
if c.nextErr != nil {
return c.nextErr
}
res, err := json.Marshal(c.nextResult)
if err != nil {
return fmt.Errorf("json marshal: %w", err)
}
err = json.Unmarshal(res, result)
if err != nil {
return fmt.Errorf("json unmarshal: %w", err)
}
return nil
}
func TestNodeByHash(t *testing.T) { func TestNodeByHash(t *testing.T) {
rng := rand.New(rand.NewSource(1234)) rng := rand.New(rand.NewSource(1234))
...@@ -63,9 +37,9 @@ func TestNodeByHash(t *testing.T) { ...@@ -63,9 +37,9 @@ func TestNodeByHash(t *testing.T) {
} }
fetcher := newFetcher(nil, stub) fetcher := newFetcher(nil, stub)
node, err := fetcher.NodeByHash(hash) require.Panics(t, func() {
require.ErrorIs(t, err, stub.nextErr) fetcher.NodeByHash(hash)
require.Nil(t, node) })
}) })
t.Run("Success", func(t *testing.T) { t.Run("Success", func(t *testing.T) {
...@@ -75,8 +49,7 @@ func TestNodeByHash(t *testing.T) { ...@@ -75,8 +49,7 @@ func TestNodeByHash(t *testing.T) {
} }
fetcher := newFetcher(nil, stub) fetcher := newFetcher(nil, stub)
node, err := fetcher.NodeByHash(hash) node := fetcher.NodeByHash(hash)
require.NoError(t, err)
require.EqualValues(t, expected, node) require.EqualValues(t, expected, node)
}) })
...@@ -86,7 +59,7 @@ func TestNodeByHash(t *testing.T) { ...@@ -86,7 +59,7 @@ func TestNodeByHash(t *testing.T) {
} }
fetcher := newFetcher(nil, stub) fetcher := newFetcher(nil, stub)
_, _ = fetcher.NodeByHash(hash) fetcher.NodeByHash(hash)
require.Len(t, stub.requests, 1, "should make single request") require.Len(t, stub.requests, 1, "should make single request")
req := stub.requests[0] req := stub.requests[0]
require.Equal(t, "debug_dbGet", req.method) require.Equal(t, "debug_dbGet", req.method)
...@@ -104,9 +77,7 @@ func TestCodeByHash(t *testing.T) { ...@@ -104,9 +77,7 @@ func TestCodeByHash(t *testing.T) {
} }
fetcher := newFetcher(nil, stub) fetcher := newFetcher(nil, stub)
node, err := fetcher.CodeByHash(hash) require.Panics(t, func() { fetcher.CodeByHash(hash) })
require.ErrorIs(t, err, stub.nextErr)
require.Nil(t, node)
}) })
t.Run("Success", func(t *testing.T) { t.Run("Success", func(t *testing.T) {
...@@ -116,8 +87,7 @@ func TestCodeByHash(t *testing.T) { ...@@ -116,8 +87,7 @@ func TestCodeByHash(t *testing.T) {
} }
fetcher := newFetcher(nil, stub) fetcher := newFetcher(nil, stub)
node, err := fetcher.CodeByHash(hash) node := fetcher.CodeByHash(hash)
require.NoError(t, err)
require.EqualValues(t, expected, node) require.EqualValues(t, expected, node)
}) })
...@@ -127,7 +97,7 @@ func TestCodeByHash(t *testing.T) { ...@@ -127,7 +97,7 @@ func TestCodeByHash(t *testing.T) {
} }
fetcher := newFetcher(nil, stub) fetcher := newFetcher(nil, stub)
_, _ = fetcher.CodeByHash(hash) fetcher.CodeByHash(hash)
require.Len(t, stub.requests, 1, "should make single request") require.Len(t, stub.requests, 1, "should make single request")
req := stub.requests[0] req := stub.requests[0]
require.Equal(t, "debug_dbGet", req.method) require.Equal(t, "debug_dbGet", req.method)
...@@ -141,7 +111,8 @@ func TestCodeByHash(t *testing.T) { ...@@ -141,7 +111,8 @@ func TestCodeByHash(t *testing.T) {
} }
fetcher := newFetcher(nil, stub) fetcher := newFetcher(nil, stub)
_, _ = fetcher.CodeByHash(hash) // Panics because the code can't be found with or without the prefix
require.Panics(t, func() { fetcher.CodeByHash(hash) })
require.Len(t, stub.requests, 2, "should request with and without prefix") require.Len(t, stub.requests, 2, "should request with and without prefix")
req := stub.requests[0] req := stub.requests[0]
require.Equal(t, "debug_dbGet", req.method) require.Equal(t, "debug_dbGet", req.method)
...@@ -155,36 +126,16 @@ func TestCodeByHash(t *testing.T) { ...@@ -155,36 +126,16 @@ func TestCodeByHash(t *testing.T) {
}) })
} }
type blockRequest struct {
ctx context.Context
blockHash common.Hash
}
type stubBlockSource struct {
requests []blockRequest
nextErr error
nextResult *types.Block
}
func (s *stubBlockSource) BlockByHash(ctx context.Context, blockHash common.Hash) (*types.Block, error) {
s.requests = append(s.requests, blockRequest{
ctx: ctx,
blockHash: blockHash,
})
return s.nextResult, s.nextErr
}
func TestBlockByHash(t *testing.T) { func TestBlockByHash(t *testing.T) {
rng := rand.New(rand.NewSource(1234)) rng := rand.New(rand.NewSource(1234))
hash := testutils.RandomHash(rng) hash := testutils.RandomHash(rng)
t.Run("Success", func(t *testing.T) { t.Run("Success", func(t *testing.T) {
block, _ := testutils.RandomBlock(rng, 1) block := blockWithNumber(rng, headBlockNumber-1)
stub := &stubBlockSource{nextResult: block} stub := &stubBlockSource{nextResult: block}
fetcher := newFetcher(stub, nil) fetcher := newFetcher(stub, nil)
res, err := fetcher.BlockByHash(hash) res := fetcher.BlockByHash(hash)
require.NoError(t, err)
require.Same(t, block, res) require.Same(t, block, res)
}) })
...@@ -192,26 +143,101 @@ func TestBlockByHash(t *testing.T) { ...@@ -192,26 +143,101 @@ func TestBlockByHash(t *testing.T) {
stub := &stubBlockSource{nextErr: errors.New("boom")} stub := &stubBlockSource{nextErr: errors.New("boom")}
fetcher := newFetcher(stub, nil) fetcher := newFetcher(stub, nil)
res, err := fetcher.BlockByHash(hash) require.Panics(t, func() {
require.ErrorIs(t, err, stub.nextErr) fetcher.BlockByHash(hash)
require.Nil(t, res) })
}) })
t.Run("RequestArgs", func(t *testing.T) { t.Run("RequestArgs", func(t *testing.T) {
stub := &stubBlockSource{} stub := &stubBlockSource{nextResult: blockWithNumber(rng, 1)}
fetcher := newFetcher(stub, nil) fetcher := newFetcher(stub, nil)
_, _ = fetcher.BlockByHash(hash) fetcher.BlockByHash(hash)
require.Len(t, stub.requests, 1, "should make single request") require.Len(t, stub.requests, 1, "should make single request")
req := stub.requests[0] req := stub.requests[0]
require.Equal(t, hash, req.blockHash) require.Equal(t, hash, req.blockHash)
}) })
t.Run("PanicWhenBlockAboveHeadRequested", func(t *testing.T) {
// Block that the source can provide but is above the head block number
block := blockWithNumber(rng, headBlockNumber+1)
stub := &stubBlockSource{nextResult: block}
fetcher := newFetcher(stub, nil)
require.Panics(t, func() {
fetcher.BlockByHash(block.Hash())
})
})
}
func blockWithNumber(rng *rand.Rand, num int64) *types.Block {
header := testutils.RandomHeader(rng)
header.Number = big.NewInt(num)
return types.NewBlock(header, nil, nil, nil, trie.NewStackTrie(nil))
}
type blockRequest struct {
ctx context.Context
blockHash common.Hash
}
type stubBlockSource struct {
requests []blockRequest
nextErr error
nextResult *types.Block
}
func (s *stubBlockSource) BlockByHash(ctx context.Context, blockHash common.Hash) (*types.Block, error) {
s.requests = append(s.requests, blockRequest{
ctx: ctx,
blockHash: blockHash,
})
return s.nextResult, s.nextErr
}
type callContextRequest struct {
ctx context.Context
method string
args []interface{}
}
type stubCallContext struct {
nextResult any
nextErr error
requests []callContextRequest
}
func (c *stubCallContext) CallContext(ctx context.Context, result any, method string, args ...interface{}) error {
if result != nil && reflect.TypeOf(result).Kind() != reflect.Ptr {
return fmt.Errorf("call result parameter must be pointer or nil interface: %v", result)
}
c.requests = append(c.requests, callContextRequest{ctx: ctx, method: method, args: args})
if c.nextErr != nil {
return c.nextErr
}
res, err := json.Marshal(c.nextResult)
if err != nil {
return fmt.Errorf("json marshal: %w", err)
}
err = json.Unmarshal(res, result)
if err != nil {
return fmt.Errorf("json unmarshal: %w", err)
}
return nil
} }
func newFetcher(blockSource BlockSource, callContext CallContext) *FetchingL2Oracle { func newFetcher(blockSource BlockSource, callContext CallContext) *FetchingL2Oracle {
rng := rand.New(rand.NewSource(int64(1)))
head := testutils.MakeBlockInfo(func(i *testutils.MockBlockInfo) {
i.InfoNum = headBlockNumber
})(rng)
return &FetchingL2Oracle{ return &FetchingL2Oracle{
ctx: context.Background(),
logger: log.New(), logger: log.New(),
head: head,
blockSource: blockSource, blockSource: blockSource,
callContext: callContext, callContext: callContext,
} }
......
...@@ -6,7 +6,6 @@ import ( ...@@ -6,7 +6,6 @@ import (
"fmt" "fmt"
"os" "os"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
cll2 "github.com/ethereum-optimism/optimism/op-program/client/l2" cll2 "github.com/ethereum-optimism/optimism/op-program/client/l2"
"github.com/ethereum-optimism/optimism/op-program/host/config" "github.com/ethereum-optimism/optimism/op-program/host/config"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
...@@ -14,15 +13,16 @@ import ( ...@@ -14,15 +13,16 @@ import (
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
) )
func NewFetchingEngine(ctx context.Context, logger log.Logger, cfg *config.Config) (derive.Engine, error) { func NewFetchingEngine(ctx context.Context, logger log.Logger, cfg *config.Config) (*cll2.OracleEngine, error) {
genesis, err := loadL2Genesis(cfg) genesis, err := loadL2Genesis(cfg)
if err != nil { if err != nil {
return nil, err return nil, err
} }
oracle, err := NewFetchingL2Oracle(ctx, logger, cfg.L2URL) fetcher, err := NewFetchingL2Oracle(ctx, logger, cfg.L2URL, cfg.L2Head)
if err != nil { if err != nil {
return nil, fmt.Errorf("connect l2 oracle: %w", err) return nil, fmt.Errorf("connect l2 oracle: %w", err)
} }
oracle := cll2.NewCachingOracle(fetcher)
engineBackend, err := cll2.NewOracleBackedL2Chain(logger, oracle, genesis, cfg.L2Head) engineBackend, err := cll2.NewOracleBackedL2Chain(logger, oracle, genesis, cfg.L2Head)
if err != nil { if err != nil {
......
package preimage
import (
"encoding/binary"
"fmt"
"io"
)
// HintWriter writes hints to an io.Writer (e.g. a special file descriptor, or a debug log),
// for a pre-image oracle service to prepare specific pre-images.
type HintWriter struct {
w io.Writer
}
var _ Hinter = (*HintWriter)(nil)
func NewHintWriter(w io.Writer) *HintWriter {
return &HintWriter{w: w}
}
func (hw *HintWriter) Hint(v Hint) {
hint := v.Hint()
var hintBytes []byte
hintBytes = binary.BigEndian.AppendUint32(hintBytes, uint32(len(hint)))
hintBytes = append(hintBytes, []byte(hint)...)
hintBytes = append(hintBytes, 0) // to block writing on
_, err := hw.w.Write(hintBytes)
if err != nil {
panic(fmt.Errorf("failed to write pre-image hint: %w", err))
}
}
// HintReader reads the hints of HintWriter and passes them to a router for preparation of the requested pre-images.
// Onchain the written hints are no-op.
type HintReader struct {
r io.Reader
}
func NewHintReader(r io.Reader) *HintReader {
return &HintReader{r: r}
}
func (hr *HintReader) NextHint(router func(hint string) error) error {
var length uint32
if err := binary.Read(hr.r, binary.BigEndian, &length); err != nil {
if err == io.EOF {
return io.EOF
}
return fmt.Errorf("failed to read hint length prefix: %w", err)
}
payload := make([]byte, length)
if length > 0 {
if _, err := io.ReadFull(hr.r, payload); err != nil {
return fmt.Errorf("failed to read hint payload (length %d): %w", length, err)
}
}
if err := router(string(payload)); err != nil {
return fmt.Errorf("failed to handle hint: %w", err)
}
if _, err := hr.r.Read([]byte{0}); err != nil {
return fmt.Errorf("failed to read trailing no-op byte to unblock hint writer: %w", err)
}
return nil
}
package preimage
import (
"bytes"
"crypto/rand"
"io"
"testing"
"github.com/stretchr/testify/require"
)
type rawHint string
func (rh rawHint) Hint() string {
return string(rh)
}
func TestHints(t *testing.T) {
// Note: pretty much every string is valid communication:
// length, payload, 0. Worst case you run out of data, or allocate too much.
testHint := func(hints ...string) {
var buf bytes.Buffer
hw := NewHintWriter(&buf)
for _, h := range hints {
hw.Hint(rawHint(h))
}
hr := NewHintReader(&buf)
var got []string
for i := 0; i < 100; i++ { // sanity limit
err := hr.NextHint(func(hint string) error {
got = append(got, hint)
return nil
})
if err == io.EOF {
break
}
require.NoError(t, err)
}
require.Equal(t, len(hints), len(got), "got all hints")
for i, h := range hints {
require.Equal(t, h, got[i], "hints match")
}
}
t.Run("empty hint", func(t *testing.T) {
testHint("")
})
t.Run("hello world", func(t *testing.T) {
testHint("hello world")
})
t.Run("zero byte", func(t *testing.T) {
testHint(string([]byte{0}))
})
t.Run("many zeroes", func(t *testing.T) {
testHint(string(make([]byte, 1000)))
})
t.Run("random data", func(t *testing.T) {
dat := make([]byte, 1000)
_, _ = rand.Read(dat[:])
testHint(string(dat))
})
t.Run("multiple hints", func(t *testing.T) {
testHint("give me header a", "also header b", "foo bar")
})
t.Run("unexpected EOF", func(t *testing.T) {
var buf bytes.Buffer
hw := NewHintWriter(&buf)
hw.Hint(rawHint("hello"))
_, _ = buf.Read(make([]byte, 1)) // read one byte so it falls short, see if it's detected
hr := NewHintReader(&buf)
err := hr.NextHint(func(hint string) error { return nil })
require.ErrorIs(t, err, io.ErrUnexpectedEOF)
})
}
package preimage
import (
"encoding/binary"
"github.com/ethereum/go-ethereum/common"
)
type Key interface {
// PreimageKey changes the Key commitment into a
// 32-byte type-prefixed preimage key.
PreimageKey() common.Hash
}
type Oracle interface {
// Get the full pre-image of a given pre-image key.
// This returns no error: the client state-transition
// is invalid if there is any missing pre-image data.
Get(key Key) []byte
}
type OracleFn func(key Key) []byte
func (fn OracleFn) Get(key Key) []byte {
return fn(key)
}
// KeyType is the key-type of a pre-image, used to prefix the pre-image key with.
type KeyType byte
const (
// The zero key type is illegal to use, ensuring all keys are non-zero.
_ KeyType = 0
// LocalKeyType is for input-type pre-images, specific to the local program instance.
LocalKeyType KeyType = 1
// Keccak25Key6Type is for keccak256 pre-images, for any global shared pre-images.
Keccak25Key6Type KeyType = 2
)
// LocalIndexKey is a key local to the program, indexing a special program input.
type LocalIndexKey uint64
func (k LocalIndexKey) PreimageKey() (out common.Hash) {
out[0] = byte(LocalKeyType)
binary.BigEndian.PutUint64(out[24:], uint64(k))
return
}
// Keccak256Key wraps a keccak256 hash to use it as a typed pre-image key.
type Keccak256Key common.Hash
func (k Keccak256Key) PreimageKey() (out common.Hash) {
out = common.Hash(k) // copy the keccak hash
out[0] = byte(Keccak25Key6Type) // apply prefix
return
}
// Hint is an interface to enable any program type to function as a hint,
// when passed to the Hinter interface, returning a string representation
// of what data the host should prepare pre-images for.
type Hint interface {
Hint() string
}
// Hinter is an interface to write hints to the host.
// This may be implemented as a no-op or logging hinter
// if the program is executing in a read-only environment
// where the host is expected to have all pre-images ready.
type Hinter interface {
Hint(v Hint)
}
type HinterFn func(v Hint)
func (fn HinterFn) Hint(v Hint) {
fn(v)
}
package preimage
import (
"encoding/binary"
"fmt"
"io"
"github.com/ethereum/go-ethereum/common"
)
// OracleClient implements the Oracle by writing the pre-image key to the given stream,
// and reading back a length-prefixed value.
type OracleClient struct {
rw io.ReadWriter
}
func NewOracleClient(rw io.ReadWriter) *OracleClient {
return &OracleClient{rw: rw}
}
var _ Oracle = (*OracleClient)(nil)
func (o *OracleClient) Get(key Key) []byte {
h := key.PreimageKey()
if _, err := o.rw.Write(h[:]); err != nil {
panic(fmt.Errorf("failed to write key %s (%T) to pre-image oracle: %w", key, key, err))
}
var length uint64
if err := binary.Read(o.rw, binary.BigEndian, &length); err != nil {
panic(fmt.Errorf("failed to read pre-image length of key %s (%T) from pre-image oracle: %w", key, key, err))
}
payload := make([]byte, length)
if _, err := io.ReadFull(o.rw, payload); err != nil {
panic(fmt.Errorf("failed to read pre-image payload (length %d) of key %s (%T) from pre-image oracle: %w", length, key, key, err))
}
return payload
}
// OracleServer serves the pre-image requests of the OracleClient, implementing the same protocol as the onchain VM.
type OracleServer struct {
rw io.ReadWriter
}
func NewOracleServer(rw io.ReadWriter) *OracleServer {
return &OracleServer{rw: rw}
}
func (o *OracleServer) NextPreimageRequest(getPreimage func(key common.Hash) ([]byte, error)) error {
var key common.Hash
if _, err := io.ReadFull(o.rw, key[:]); err != nil {
if err == io.EOF {
return io.EOF
}
return fmt.Errorf("failed to read requested pre-image key: %w", err)
}
value, err := getPreimage(key)
if err != nil {
return fmt.Errorf("failed to serve pre-image %s request: %w", key, err)
}
if err := binary.Write(o.rw, binary.BigEndian, uint64(len(value))); err != nil {
return fmt.Errorf("failed to write length-prefix %d: %w", len(value), err)
}
if len(value) == 0 {
return nil
}
if _, err := o.rw.Write(value); err != nil {
return fmt.Errorf("failed to write pre-image value (%d long): %w", len(value), err)
}
return nil
}
package preimage
import (
"bytes"
"crypto/rand"
"fmt"
"io"
"sync"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/stretchr/testify/require"
)
type readWritePair struct {
io.Reader
io.Writer
}
func bidirectionalPipe() (a, b io.ReadWriter) {
ar, bw := io.Pipe()
br, aw := io.Pipe()
return readWritePair{Reader: ar, Writer: aw}, readWritePair{Reader: br, Writer: bw}
}
func TestOracle(t *testing.T) {
testPreimage := func(preimages ...[]byte) {
a, b := bidirectionalPipe()
cl := NewOracleClient(a)
srv := NewOracleServer(b)
preimageByHash := make(map[common.Hash][]byte)
for _, p := range preimages {
k := Keccak256Key(crypto.Keccak256Hash(p))
preimageByHash[k.PreimageKey()] = p
}
for _, p := range preimages {
k := Keccak256Key(crypto.Keccak256Hash(p))
var wg sync.WaitGroup
wg.Add(2)
go func(k Key, p []byte) {
result := cl.Get(k)
wg.Done()
expected := preimageByHash[k.PreimageKey()]
require.True(t, bytes.Equal(expected, result), "need correct preimage %x, got %x", expected, result)
}(k, p)
go func() {
err := srv.NextPreimageRequest(func(key common.Hash) ([]byte, error) {
dat, ok := preimageByHash[key]
if !ok {
return nil, fmt.Errorf("cannot find %s", key)
}
return dat, nil
})
wg.Done()
require.NoError(t, err)
}()
wg.Wait()
}
}
t.Run("empty preimage", func(t *testing.T) {
testPreimage([]byte{})
})
t.Run("nil preimage", func(t *testing.T) {
testPreimage(nil)
})
t.Run("zero", func(t *testing.T) {
testPreimage([]byte{0})
})
t.Run("multiple", func(t *testing.T) {
testPreimage([]byte("tx from alice"), []byte{0x13, 0x37}, []byte("tx from bob"))
})
t.Run("zeroes", func(t *testing.T) {
testPreimage(make([]byte, 1000))
})
t.Run("random", func(t *testing.T) {
dat := make([]byte, 1000)
_, _ = rand.Read(dat[:])
testPreimage(dat)
})
}
package doc
import (
"encoding/json"
"fmt"
"os"
"strings"
"github.com/ethereum-optimism/optimism/op-proposer/metrics"
"github.com/olekukonko/tablewriter"
"github.com/urfave/cli"
)
var Subcommands = cli.Commands{
{
Name: "metrics",
Usage: "Dumps a list of supported metrics to stdout",
Flags: []cli.Flag{
cli.StringFlag{
Name: "format",
Value: "markdown",
Usage: "Output format (json|markdown)",
},
},
Action: func(ctx *cli.Context) error {
m := metrics.NewMetrics("default")
supportedMetrics := m.Document()
format := ctx.String("format")
if format != "markdown" && format != "json" {
return fmt.Errorf("invalid format: %s", format)
}
if format == "json" {
enc := json.NewEncoder(os.Stdout)
return enc.Encode(supportedMetrics)
}
table := tablewriter.NewWriter(os.Stdout)
table.SetBorders(tablewriter.Border{Left: true, Top: false, Right: true, Bottom: false})
table.SetCenterSeparator("|")
table.SetAutoWrapText(false)
table.SetHeader([]string{"Metric", "Description", "Labels", "Type"})
var data [][]string
for _, metric := range supportedMetrics {
labels := strings.Join(metric.Labels, ",")
data = append(data, []string{metric.Name, metric.Help, labels, metric.Type})
}
table.AppendBulk(data)
table.Render()
return nil
},
},
}
...@@ -6,6 +6,7 @@ import ( ...@@ -6,6 +6,7 @@ import (
"github.com/urfave/cli" "github.com/urfave/cli"
"github.com/ethereum-optimism/optimism/op-proposer/cmd/doc"
"github.com/ethereum-optimism/optimism/op-proposer/flags" "github.com/ethereum-optimism/optimism/op-proposer/flags"
"github.com/ethereum-optimism/optimism/op-proposer/proposer" "github.com/ethereum-optimism/optimism/op-proposer/proposer"
oplog "github.com/ethereum-optimism/optimism/op-service/log" oplog "github.com/ethereum-optimism/optimism/op-service/log"
...@@ -27,8 +28,14 @@ func main() { ...@@ -27,8 +28,14 @@ func main() {
app.Name = "op-proposer" app.Name = "op-proposer"
app.Usage = "L2Output Submitter" app.Usage = "L2Output Submitter"
app.Description = "Service for generating and submitting L2 Output checkpoints to the L2OutputOracle contract" app.Description = "Service for generating and submitting L2 Output checkpoints to the L2OutputOracle contract"
app.Action = curryMain(Version) app.Action = curryMain(Version)
app.Commands = []cli.Command{
{
Name: "doc",
Subcommands: doc.Subcommands,
},
}
err := app.Run(os.Args) err := app.Run(os.Args)
if err != nil { if err != nil {
log.Crit("Application failed", "message", err) log.Crit("Application failed", "message", err)
......
package flags package flags
import ( import (
"fmt"
"github.com/urfave/cli" "github.com/urfave/cli"
opservice "github.com/ethereum-optimism/optimism/op-service" opservice "github.com/ethereum-optimism/optimism/op-service"
...@@ -16,29 +18,25 @@ const envVarPrefix = "OP_PROPOSER" ...@@ -16,29 +18,25 @@ const envVarPrefix = "OP_PROPOSER"
var ( var (
// Required Flags // Required Flags
L1EthRpcFlag = cli.StringFlag{ L1EthRpcFlag = cli.StringFlag{
Name: "l1-eth-rpc", Name: "l1-eth-rpc",
Usage: "HTTP provider URL for L1", Usage: "HTTP provider URL for L1",
Required: true, EnvVar: opservice.PrefixEnvVar(envVarPrefix, "L1_ETH_RPC"),
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "L1_ETH_RPC"),
} }
RollupRpcFlag = cli.StringFlag{ RollupRpcFlag = cli.StringFlag{
Name: "rollup-rpc", Name: "rollup-rpc",
Usage: "HTTP provider URL for the rollup node", Usage: "HTTP provider URL for the rollup node",
Required: true, EnvVar: opservice.PrefixEnvVar(envVarPrefix, "ROLLUP_RPC"),
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "ROLLUP_RPC"),
} }
L2OOAddressFlag = cli.StringFlag{ L2OOAddressFlag = cli.StringFlag{
Name: "l2oo-address", Name: "l2oo-address",
Usage: "Address of the L2OutputOracle contract", Usage: "Address of the L2OutputOracle contract",
Required: true, EnvVar: opservice.PrefixEnvVar(envVarPrefix, "L2OO_ADDRESS"),
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "L2OO_ADDRESS"),
} }
PollIntervalFlag = cli.DurationFlag{ PollIntervalFlag = cli.DurationFlag{
Name: "poll-interval", Name: "poll-interval",
Usage: "Delay between querying L2 for more transactions and " + Usage: "Delay between querying L2 for more transactions and " +
"creating a new batch", "creating a new batch",
Required: true, EnvVar: opservice.PrefixEnvVar(envVarPrefix, "POLL_INTERVAL"),
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "POLL_INTERVAL"),
} }
// Optional flags // Optional flags
AllowNonFinalizedFlag = cli.BoolFlag{ AllowNonFinalizedFlag = cli.BoolFlag{
...@@ -74,3 +72,12 @@ func init() { ...@@ -74,3 +72,12 @@ func init() {
// Flags contains the list of configuration options available to the binary. // Flags contains the list of configuration options available to the binary.
var Flags []cli.Flag var Flags []cli.Flag
func CheckRequired(ctx *cli.Context) error {
for _, f := range requiredFlags {
if !ctx.GlobalIsSet(f.GetName()) {
return fmt.Errorf("flag %s is required", f.GetName())
}
}
return nil
}
...@@ -104,3 +104,7 @@ const ( ...@@ -104,3 +104,7 @@ const (
func (m *Metrics) RecordL2BlocksProposed(l2ref eth.L2BlockRef) { func (m *Metrics) RecordL2BlocksProposed(l2ref eth.L2BlockRef) {
m.RecordL2Ref(BlockProposed, l2ref) m.RecordL2Ref(BlockProposed, l2ref)
} }
func (m *Metrics) Document() []opmetrics.DocumentedMetric {
return m.factory.Document()
}
...@@ -15,6 +15,7 @@ import ( ...@@ -15,6 +15,7 @@ import (
"github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/urfave/cli" "github.com/urfave/cli"
...@@ -334,7 +335,11 @@ func (l *L2OutputSubmitter) sendTransaction(ctx context.Context, output *eth.Out ...@@ -334,7 +335,11 @@ func (l *L2OutputSubmitter) sendTransaction(ctx context.Context, output *eth.Out
if err != nil { if err != nil {
return err return err
} }
l.log.Info("proposer tx successfully published", "tx_hash", receipt.TxHash) if receipt.Status == types.ReceiptStatusFailed {
l.log.Error("proposer tx successfully published but reverted", "tx_hash", receipt.TxHash)
} else {
l.log.Info("proposer tx successfully published", "tx_hash", receipt.TxHash)
}
return nil return nil
} }
......
...@@ -16,17 +16,19 @@ func (e *Event) Record() { ...@@ -16,17 +16,19 @@ func (e *Event) Record() {
e.LastTime.SetToCurrentTime() e.LastTime.SetToCurrentTime()
} }
func NewEvent(factory Factory, ns string, name string, displayName string) Event { func NewEvent(factory Factory, ns string, subsystem string, name string, displayName string) Event {
return Event{ return Event{
Total: factory.NewCounter(prometheus.CounterOpts{ Total: factory.NewCounter(prometheus.CounterOpts{
Namespace: ns, Namespace: ns,
Name: fmt.Sprintf("%s_total", name), Name: fmt.Sprintf("%s_total", name),
Help: fmt.Sprintf("Count of %s events", displayName), Help: fmt.Sprintf("Count of %s events", displayName),
Subsystem: subsystem,
}), }),
LastTime: factory.NewGauge(prometheus.GaugeOpts{ LastTime: factory.NewGauge(prometheus.GaugeOpts{
Namespace: ns, Namespace: ns,
Name: fmt.Sprintf("last_%s_unix", name), Name: fmt.Sprintf("last_%s_unix", name),
Help: fmt.Sprintf("Timestamp of last %s event", displayName), Help: fmt.Sprintf("Timestamp of last %s event", displayName),
Subsystem: subsystem,
}), }),
} }
} }
...@@ -41,17 +43,19 @@ func (e *EventVec) Record(lvs ...string) { ...@@ -41,17 +43,19 @@ func (e *EventVec) Record(lvs ...string) {
e.LastTime.WithLabelValues(lvs...).SetToCurrentTime() e.LastTime.WithLabelValues(lvs...).SetToCurrentTime()
} }
func NewEventVec(factory Factory, ns string, name string, displayName string, labelNames []string) EventVec { func NewEventVec(factory Factory, ns string, subsystem string, name string, displayName string, labelNames []string) EventVec {
return EventVec{ return EventVec{
Total: *factory.NewCounterVec(prometheus.CounterOpts{ Total: *factory.NewCounterVec(prometheus.CounterOpts{
Namespace: ns, Namespace: ns,
Name: fmt.Sprintf("%s_total", name), Name: fmt.Sprintf("%s_total", name),
Help: fmt.Sprintf("Count of %s events", displayName), Help: fmt.Sprintf("Count of %s events", displayName),
Subsystem: subsystem,
}, labelNames), }, labelNames),
LastTime: *factory.NewGaugeVec(prometheus.GaugeOpts{ LastTime: *factory.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns, Namespace: ns,
Name: fmt.Sprintf("last_%s_unix", name), Name: fmt.Sprintf("last_%s_unix", name),
Help: fmt.Sprintf("Timestamp of last %s event", displayName), Help: fmt.Sprintf("Timestamp of last %s event", displayName),
Subsystem: subsystem,
}, labelNames), }, labelNames),
} }
} }
...@@ -85,15 +85,15 @@ func MakeTxMetrics(ns string, factory metrics.Factory) TxMetrics { ...@@ -85,15 +85,15 @@ func MakeTxMetrics(ns string, factory metrics.Factory) TxMetrics {
txPublishError: factory.NewCounterVec(prometheus.CounterOpts{ txPublishError: factory.NewCounterVec(prometheus.CounterOpts{
Namespace: ns, Namespace: ns,
Name: "tx_publish_error_count", Name: "tx_publish_error_count",
Help: "Count of publish errors. Labells are sanitized error strings", Help: "Count of publish errors. Labels are sanitized error strings",
Subsystem: "txmgr", Subsystem: "txmgr",
}, []string{"error"}), }, []string{"error"}),
confirmEvent: metrics.NewEventVec(factory, ns, "confirm", "tx confirm", []string{"status"}), confirmEvent: metrics.NewEventVec(factory, ns, "txmgr", "confirm", "tx confirm", []string{"status"}),
publishEvent: metrics.NewEvent(factory, ns, "publish", "tx publish"), publishEvent: metrics.NewEvent(factory, ns, "txmgr", "publish", "tx publish"),
rpcError: factory.NewCounter(prometheus.CounterOpts{ rpcError: factory.NewCounter(prometheus.CounterOpts{
Namespace: ns, Namespace: ns,
Name: "rpc_error_count", Name: "rpc_error_count",
Help: "Temporrary: Count of RPC errors (like timeouts) that have occurrred", Help: "Temporary: Count of RPC errors (like timeouts) that have occurred",
Subsystem: "txmgr", Subsystem: "txmgr",
}), }),
} }
......
...@@ -67,10 +67,10 @@ type ETHBackend interface { ...@@ -67,10 +67,10 @@ type ETHBackend interface {
// NonceAt returns the account nonce of the given account. // NonceAt returns the account nonce of the given account.
// The block number can be nil, in which case the nonce is taken from the latest known block. // The block number can be nil, in which case the nonce is taken from the latest known block.
NonceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (uint64, error) NonceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (uint64, error)
// PendingNonce returns the pending nonce. // PendingNonceAt returns the pending nonce.
PendingNonceAt(ctx context.Context, account common.Address) (uint64, error) PendingNonceAt(ctx context.Context, account common.Address) (uint64, error)
/// EstimateGas returns an estimate of the amount of gas needed to execute the given // EstimateGas returns an estimate of the amount of gas needed to execute the given
/// transaction against the current pending block. // transaction against the current pending block.
EstimateGas(ctx context.Context, msg ethereum.CallMsg) (uint64, error) EstimateGas(ctx context.Context, msg ethereum.CallMsg) (uint64, error)
} }
......
...@@ -110,7 +110,7 @@ contract SystemConfig_Setters_TestFail is SystemConfig_Init { ...@@ -110,7 +110,7 @@ contract SystemConfig_Setters_TestFail is SystemConfig_Init {
maximumBaseFee: 2 gwei maximumBaseFee: 2 gwei
}); });
vm.prank(sysConf.owner()); vm.prank(sysConf.owner());
vm.expectRevert("SystemConfig: denominator cannot be 0"); vm.expectRevert("SystemConfig: denominator must be larger than 1");
sysConf.setResourceConfig(config); sysConf.setResourceConfig(config);
} }
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment