Commit 64058146 authored by protolambda's avatar protolambda

Merge branch 'develop' into tip/spanbatch-logs-metrics

parents 0d78ad65 301e996d
...@@ -63,7 +63,7 @@ acts as an example of using `cast` to manually call `attack` and `defend`. ...@@ -63,7 +63,7 @@ acts as an example of using `cast` to manually call `attack` and `defend`.
### Performing Steps ### Performing Steps
Attacking or defending are teh only available actions before the maximum depth of the game is reached. To counter claims Attacking or defending are the only available actions before the maximum depth of the game is reached. To counter claims
at the maximum depth, a step must be performed instead. Calling the `step` method in the `FaultDisputeGame` contract at the maximum depth, a step must be performed instead. Calling the `step` method in the `FaultDisputeGame` contract
counters a claim at the maximum depth by running a single step of the cannon VM on chain. The `step` method will revert counters a claim at the maximum depth by running a single step of the cannon VM on chain. The `step` method will revert
unless the cannon execution confirms the claim being countered is invalid. Note, if an actor's clock runs out at any unless the cannon execution confirms the claim being countered is invalid. Note, if an actor's clock runs out at any
......
...@@ -38,7 +38,7 @@ require ( ...@@ -38,7 +38,7 @@ require (
github.com/pkg/profile v1.7.0 github.com/pkg/profile v1.7.0
github.com/prometheus/client_golang v1.17.0 github.com/prometheus/client_golang v1.17.0
github.com/stretchr/testify v1.8.4 github.com/stretchr/testify v1.8.4
github.com/urfave/cli/v2 v2.25.7 github.com/urfave/cli/v2 v2.26.0
golang.org/x/crypto v0.16.0 golang.org/x/crypto v0.16.0
golang.org/x/exp v0.0.0-20231006140011-7918f672742d golang.org/x/exp v0.0.0-20231006140011-7918f672742d
golang.org/x/sync v0.5.0 golang.org/x/sync v0.5.0
......
...@@ -699,8 +699,8 @@ github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9f ...@@ -699,8 +699,8 @@ github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9f
github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8= github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8=
github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U= github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U=
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs= github.com/urfave/cli/v2 v2.26.0 h1:3f3AMg3HpThFNT4I++TKOejZO8yU55t3JnnSr4S4QEI=
github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ= github.com/urfave/cli/v2 v2.26.0/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
......
...@@ -34,6 +34,6 @@ ...@@ -34,6 +34,6 @@
}, },
"devDependencies": { "devDependencies": {
"tsup": "^8.0.1", "tsup": "^8.0.1",
"vitest": "^0.34.4" "vitest": "^1.0.1"
} }
} }
This diff is collapsed.
...@@ -214,6 +214,8 @@ type DeployConfig struct { ...@@ -214,6 +214,8 @@ type DeployConfig struct {
FaultGameMaxDuration uint64 `json:"faultGameMaxDuration"` FaultGameMaxDuration uint64 `json:"faultGameMaxDuration"`
// OutputBisectionGameGenesisBlock is the block number for genesis. // OutputBisectionGameGenesisBlock is the block number for genesis.
OutputBisectionGameGenesisBlock uint64 `json:"outputBisectionGameGenesisBlock"` OutputBisectionGameGenesisBlock uint64 `json:"outputBisectionGameGenesisBlock"`
// OutputBisectionGameGenesisOutputRoot is the output root for the genesis block.
OutputBisectionGameGenesisOutputRoot common.Hash `json:"outputBisectionGameGenesisOutputRoot"`
// OutputBisectionGameSplitDepth is the depth at which the output bisection game splits. // OutputBisectionGameSplitDepth is the depth at which the output bisection game splits.
OutputBisectionGameSplitDepth uint64 `json:"outputBisectionGameSplitDepth"` OutputBisectionGameSplitDepth uint64 `json:"outputBisectionGameSplitDepth"`
// FundDevAccounts configures whether or not to fund the dev accounts. Should only be used // FundDevAccounts configures whether or not to fund the dev accounts. Should only be used
......
...@@ -69,6 +69,7 @@ ...@@ -69,6 +69,7 @@
"faultGameMaxDepth": 63, "faultGameMaxDepth": 63,
"faultGameMaxDuration": 604800, "faultGameMaxDuration": 604800,
"outputBisectionGameGenesisBlock": 0, "outputBisectionGameGenesisBlock": 0,
"outputBisectionGameGenesisOutputRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"outputBisectionGameSplitDepth": 0, "outputBisectionGameSplitDepth": 0,
"systemConfigStartBlock": 0, "systemConfigStartBlock": 0,
"requiredProtocolVersion": "0x0000000000000000000000000000000000000000000000000000000000000000", "requiredProtocolVersion": "0x0000000000000000000000000000000000000000000000000000000000000000",
......
...@@ -22,7 +22,7 @@ import ( ...@@ -22,7 +22,7 @@ import (
var ( var (
cannonGameType = uint8(0) cannonGameType = uint8(0)
outputCannonGameType = uint8(253) // TODO(client-pod#43): Switch the output cannon game type to 1 outputCannonGameType = uint8(1)
outputAlphabetGameType = uint8(254) outputAlphabetGameType = uint8(254)
alphabetGameType = uint8(255) alphabetGameType = uint8(255)
) )
...@@ -121,7 +121,11 @@ func registerOutputCannon( ...@@ -121,7 +121,11 @@ func registerOutputCannon(
if err != nil { if err != nil {
return nil, err return nil, err
} }
accessor, err := outputs.NewOutputCannonTraceAccessor(ctx, logger, m, cfg, l2Client, contract, dir, gameDepth, agreed, disputed) splitDepth, err := contract.GetSplitDepth(ctx)
if err != nil {
return nil, fmt.Errorf("failed to load split depth: %w", err)
}
accessor, err := outputs.NewOutputCannonTraceAccessor(ctx, logger, m, cfg, l2Client, contract, dir, gameDepth, splitDepth, agreed, disputed)
if err != nil { if err != nil {
return nil, err return nil, err
} }
......
...@@ -25,13 +25,12 @@ func NewOutputCannonTraceAccessor( ...@@ -25,13 +25,12 @@ func NewOutputCannonTraceAccessor(
contract cannon.L1HeadSource, contract cannon.L1HeadSource,
dir string, dir string,
gameDepth uint64, gameDepth uint64,
splitDepth uint64,
prestateBlock uint64, prestateBlock uint64,
poststateBlock uint64, poststateBlock uint64,
) (*trace.Accessor, error) { ) (*trace.Accessor, error) {
// TODO(client-pod#43): Load depths from the contract bottomDepth := gameDepth - splitDepth
topDepth := gameDepth / 2 outputProvider, err := NewTraceProvider(ctx, logger, cfg.RollupRpc, splitDepth, prestateBlock, poststateBlock)
bottomDepth := gameDepth - topDepth
outputProvider, err := NewTraceProvider(ctx, logger, cfg.RollupRpc, topDepth, prestateBlock, poststateBlock)
if err != nil { if err != nil {
return nil, err return nil, err
} }
...@@ -48,6 +47,6 @@ func NewOutputCannonTraceAccessor( ...@@ -48,6 +47,6 @@ func NewOutputCannonTraceAccessor(
} }
cache := NewProviderCache(m, "output_cannon_provider", cannonCreator) cache := NewProviderCache(m, "output_cannon_provider", cannonCreator)
selector := split.NewSplitProviderSelector(outputProvider, int(topDepth), OutputRootSplitAdapter(outputProvider, cache.GetOrCreate)) selector := split.NewSplitProviderSelector(outputProvider, int(splitDepth), OutputRootSplitAdapter(outputProvider, cache.GetOrCreate))
return trace.NewAccessor(selector), nil return trace.NewAccessor(selector), nil
} }
...@@ -35,7 +35,7 @@ import ( ...@@ -35,7 +35,7 @@ import (
const alphabetGameType uint8 = 255 const alphabetGameType uint8 = 255
const cannonGameType uint8 = 0 const cannonGameType uint8 = 0
const outputCannonGameType uint8 = 253 // TODO(client-pod#43): Switch this game type to 1 const outputCannonGameType uint8 = 1
const alphabetGameDepth = 4 const alphabetGameDepth = 4
var lastAlphabetTraceIndex = big.NewInt(1<<alphabetGameDepth - 1) var lastAlphabetTraceIndex = big.NewInt(1<<alphabetGameDepth - 1)
...@@ -143,6 +143,7 @@ func (h *FactoryHelper) StartAlphabetGame(ctx context.Context, claimedAlphabet s ...@@ -143,6 +143,7 @@ func (h *FactoryHelper) StartAlphabetGame(ctx context.Context, claimedAlphabet s
func (h *FactoryHelper) StartOutputCannonGame(ctx context.Context, rollupEndpoint string, rootClaim common.Hash) *OutputCannonGameHelper { func (h *FactoryHelper) StartOutputCannonGame(ctx context.Context, rollupEndpoint string, rootClaim common.Hash) *OutputCannonGameHelper {
rollupClient, err := dial.DialRollupClientWithTimeout(ctx, 30*time.Second, testlog.Logger(h.t, log.LvlInfo), rollupEndpoint) rollupClient, err := dial.DialRollupClientWithTimeout(ctx, 30*time.Second, testlog.Logger(h.t, log.LvlInfo), rollupEndpoint)
h.require.NoError(err) h.require.NoError(err)
h.t.Cleanup(rollupClient.Close)
extraData, _ := h.createBisectionGameExtraData(ctx, rollupClient) extraData, _ := h.createBisectionGameExtraData(ctx, rollupClient)
...@@ -279,11 +280,22 @@ func (h *FactoryHelper) createCannonGame(ctx context.Context, rootClaim common.H ...@@ -279,11 +280,22 @@ func (h *FactoryHelper) createCannonGame(ctx context.Context, rootClaim common.H
} }
func (h *FactoryHelper) createBisectionGameExtraData(ctx context.Context, client *sources.RollupClient) (extraData []byte, l2BlockNumber uint64) { func (h *FactoryHelper) createBisectionGameExtraData(ctx context.Context, client *sources.RollupClient) (extraData []byte, l2BlockNumber uint64) {
timeoutCtx, cancel := context.WithTimeout(ctx, 1*time.Minute)
defer cancel()
err := wait.For(timeoutCtx, time.Second, func() (bool, error) {
status, err := client.SyncStatus(ctx)
if err != nil {
return false, err
}
return status.SafeL2.Number > 0, nil
})
h.require.NoError(err, "Safe head did not progress past genesis")
syncStatus, err := client.SyncStatus(ctx) syncStatus, err := client.SyncStatus(ctx)
h.require.NoError(err, "failed to get sync status") h.require.NoError(err, "failed to get sync status")
l2BlockNumber = syncStatus.SafeL2.Number l2BlockNumber = syncStatus.SafeL2.Number
h.t.Logf("Creating game with l2 block number: %v", l2BlockNumber)
extraData = make([]byte, 32) extraData = make([]byte, 32)
binary.BigEndian.PutUint64(extraData, l2BlockNumber) binary.BigEndian.PutUint64(extraData[24:], l2BlockNumber)
return return
} }
......
...@@ -402,10 +402,12 @@ func (g *OutputGameHelper) gameData(ctx context.Context) string { ...@@ -402,10 +402,12 @@ func (g *OutputGameHelper) gameData(ctx context.Context) string {
info = info + fmt.Sprintf("%v - Position: %v, Depth: %v, IndexAtDepth: %v Trace Index: %v, Value: %v, Countered: %v, ParentIndex: %v\n", info = info + fmt.Sprintf("%v - Position: %v, Depth: %v, IndexAtDepth: %v Trace Index: %v, Value: %v, Countered: %v, ParentIndex: %v\n",
i, claim.Position.Int64(), pos.Depth(), pos.IndexAtDepth(), pos.TraceIndex(maxDepth), common.Hash(claim.Claim).Hex(), claim.Countered, claim.ParentIndex) i, claim.Position.Int64(), pos.Depth(), pos.IndexAtDepth(), pos.TraceIndex(maxDepth), common.Hash(claim.Claim).Hex(), claim.Countered, claim.ParentIndex)
} }
l2BlockNum, err := g.game.L2BlockNumber(opts)
g.require.NoError(err, "Load l2 block number")
status, err := g.game.Status(opts) status, err := g.game.Status(opts)
g.require.NoError(err, "Load game status") g.require.NoError(err, "Load game status")
return fmt.Sprintf("Game %v - %v - Split Depth: %v - Max Depth: %v:\n%v\n", return fmt.Sprintf("Game %v - %v - L2 Block: %v - Split Depth: %v - Max Depth: %v:\n%v\n",
g.addr, Status(status), splitDepth, maxDepth, info) g.addr, Status(status), l2BlockNum.Uint64(), splitDepth, maxDepth, info)
} }
func (g *OutputGameHelper) LogGameData(ctx context.Context) { func (g *OutputGameHelper) LogGameData(ctx context.Context) {
......
...@@ -138,7 +138,7 @@ func (f *fakePoS) Start() error { ...@@ -138,7 +138,7 @@ func (f *fakePoS) Start() error {
tim.Stop() tim.Stop()
return nil return nil
} }
envelope, err := f.engineAPI.GetPayloadV2(*res.PayloadID) envelope, err := f.engineAPI.GetPayloadV3(*res.PayloadID)
if err != nil { if err != nil {
f.log.Error("failed to finish building L1 block", "err", err) f.log.Error("failed to finish building L1 block", "err", err)
continue continue
...@@ -178,7 +178,7 @@ func (f *fakePoS) Start() error { ...@@ -178,7 +178,7 @@ func (f *fakePoS) Start() error {
continue continue
} }
} }
if _, err := f.engineAPI.ForkchoiceUpdatedV2(engine.ForkchoiceStateV1{ if _, err := f.engineAPI.ForkchoiceUpdatedV3(engine.ForkchoiceStateV1{
HeadBlockHash: envelope.ExecutionPayload.BlockHash, HeadBlockHash: envelope.ExecutionPayload.BlockHash,
SafeBlockHash: safe.Hash(), SafeBlockHash: safe.Hash(),
FinalizedBlockHash: finalized.Hash(), FinalizedBlockHash: finalized.Hash(),
......
package e2eutils
import (
"fmt"
"os"
"strings"
"testing"
"unicode"
"unicode/utf8"
"github.com/stretchr/testify/require"
)
func TempDir(t *testing.T) string {
// Drop unusual characters (such as path separators or
// characters interacting with globs) from the directory name to
// avoid surprising os.MkdirTemp behavior.
// Taken from the t.TempDir() implementation in the standard library.
mapper := func(r rune) rune {
if r < utf8.RuneSelf {
const allowed = "!#$%&()+,-.=@^_{}~ "
if '0' <= r && r <= '9' ||
'a' <= r && r <= 'z' ||
'A' <= r && r <= 'Z' {
return r
}
if strings.ContainsRune(allowed, r) {
return r
}
} else if unicode.IsLetter(r) || unicode.IsNumber(r) {
return r
}
return -1
}
dir, err := os.MkdirTemp("", strings.Map(mapper, fmt.Sprintf("op-e2e-%s", t.Name())))
require.NoError(t, err)
t.Cleanup(func() {
err := os.RemoveAll(dir)
if err != nil {
t.Logf("Error removing temp dir %s: %s", dir, err)
}
})
return dir
}
package transactions
import (
"crypto/ecdsa"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto/kzg4844"
"github.com/holiman/uint256"
)
var (
emptyBlob kzg4844.Blob
emptyBlobCommit kzg4844.Commitment
emptyBlobProof kzg4844.Proof
)
func init() {
var err error
emptyBlob = kzg4844.Blob{}
emptyBlobCommit, err = kzg4844.BlobToCommitment(emptyBlob)
if err != nil {
panic("failed to create empty blob commitment: " + err.Error())
}
emptyBlobProof, err = kzg4844.ComputeBlobProof(emptyBlob, emptyBlobCommit)
if err != nil {
panic("failed to create empty blob proof: " + err.Error())
}
}
// with thanks to fjl
// https://github.com/ethereum/go-ethereum/commit/2a6beb6a39d7cb3c5906dd4465d65da6efcc73cd
func CreateEmptyBlobTx(key *ecdsa.PrivateKey, withSidecar bool, chainID uint64) *types.BlobTx {
sidecar := &types.BlobTxSidecar{
Blobs: []kzg4844.Blob{emptyBlob},
Commitments: []kzg4844.Commitment{emptyBlobCommit},
Proofs: []kzg4844.Proof{emptyBlobProof},
}
blobTx := &types.BlobTx{
ChainID: uint256.NewInt(chainID),
Nonce: 0,
GasTipCap: uint256.NewInt(2200000000000),
GasFeeCap: uint256.NewInt(5000000000000),
Gas: 25000,
To: common.Address{0x03, 0x04, 0x05},
Value: uint256.NewInt(99),
Data: make([]byte, 50),
BlobFeeCap: uint256.NewInt(150000000000),
BlobHashes: sidecar.BlobHashes(),
}
if withSidecar {
blobTx.Sidecar = sidecar
}
return blobTx
}
...@@ -2,12 +2,15 @@ package main ...@@ -2,12 +2,15 @@ package main
import ( import (
"encoding/json" "encoding/json"
"errors"
"flag" "flag"
"fmt" "fmt"
"os" "os"
"os/exec" "os/exec"
"os/signal"
"path/filepath" "path/filepath"
"strconv" "strconv"
"syscall"
"time" "time"
"github.com/ethereum-optimism/optimism/op-e2e/external" "github.com/ethereum-optimism/optimism/op-e2e/external"
...@@ -67,11 +70,39 @@ func run(configPath string) error { ...@@ -67,11 +70,39 @@ func run(configPath string) error {
} }
fmt.Printf("================== op-geth shim awaiting termination ==========================\n") fmt.Printf("================== op-geth shim awaiting termination ==========================\n")
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)
select { select {
case <-sigs:
fmt.Printf("================== op-geth shim caught signal, killing ==========================\n")
sess.session.Terminate()
return awaitExit(sess.session)
case <-sess.session.Exited: case <-sess.session.Exited:
return fmt.Errorf("geth exited") return fmt.Errorf("geth exited with code %d", sess.session.ExitCode())
case <-time.After(30 * time.Minute): case <-time.After(30 * time.Minute):
return fmt.Errorf("exiting after 30 minute timeout") fmt.Printf("================== op-geth shim timed out, killing ==========================\n")
sess.session.Terminate()
if err := awaitExit(sess.session); err != nil {
fmt.Printf("error killing geth: %v\n", err)
}
return errors.New("geth timed out after 30 minutes")
}
}
func awaitExit(sess *gexec.Session) error {
select {
case <-sess.Exited:
return nil
case <-time.After(5 * time.Second):
sess.Kill()
select {
case <-sess.Exited:
return nil
case <-time.After(30 * time.Second):
return fmt.Errorf("exiting after 30 second timeout")
}
} }
} }
......
...@@ -38,12 +38,8 @@ func TestOutputCannonGame(t *testing.T) { ...@@ -38,12 +38,8 @@ func TestOutputCannonGame(t *testing.T) {
game.Attack(ctx, i, common.Hash{0xaa}) game.Attack(ctx, i, common.Hash{0xaa})
game.LogGameData(ctx) game.LogGameData(ctx)
} }
game.WaitForCorrectOutputRoot(ctx, splitDepth)
// Post the first cannon output root (with 01 status code to show the output root is invalid) // Wait for the challenger to post the first claim in the cannon trace
game.Attack(ctx, splitDepth, common.Hash{0x01}) game.WaitForClaimAtDepth(ctx, int(splitDepth+1))
// Challenger should counter
game.WaitForClaimAtDepth(ctx, int(splitDepth+2))
game.LogGameData(ctx) game.LogGameData(ctx)
} }
...@@ -47,6 +47,7 @@ type OpGeth struct { ...@@ -47,6 +47,7 @@ type OpGeth struct {
L1Head eth.BlockInfo L1Head eth.BlockInfo
L2Head *eth.ExecutionPayload L2Head *eth.ExecutionPayload
sequenceNum uint64 sequenceNum uint64
lgr log.Logger
} }
func NewOpGeth(t *testing.T, ctx context.Context, cfg *SystemConfig) (*OpGeth, error) { func NewOpGeth(t *testing.T, ctx context.Context, cfg *SystemConfig) (*OpGeth, error) {
...@@ -117,11 +118,14 @@ func NewOpGeth(t *testing.T, ctx context.Context, cfg *SystemConfig) (*OpGeth, e ...@@ -117,11 +118,14 @@ func NewOpGeth(t *testing.T, ctx context.Context, cfg *SystemConfig) (*OpGeth, e
L2ChainConfig: l2Genesis.Config, L2ChainConfig: l2Genesis.Config,
L1Head: eth.BlockToInfo(l1Block), L1Head: eth.BlockToInfo(l1Block),
L2Head: genesisPayload, L2Head: genesisPayload,
lgr: logger,
}, nil }, nil
} }
func (d *OpGeth) Close() { func (d *OpGeth) Close() {
_ = d.node.Close() if err := d.node.Close(); err != nil {
d.lgr.Error("error closing node", "err", err)
}
d.l2Engine.Close() d.l2Engine.Close()
d.L2Client.Close() d.L2Client.Close()
} }
......
...@@ -31,6 +31,8 @@ import ( ...@@ -31,6 +31,8 @@ import (
"github.com/ethereum-optimism/optimism/op-bindings/bindings" "github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys" "github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum-optimism/optimism/op-e2e/config" "github.com/ethereum-optimism/optimism/op-e2e/config"
gethutils "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/geth"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/transactions"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait"
"github.com/ethereum-optimism/optimism/op-node/metrics" "github.com/ethereum-optimism/optimism/op-node/metrics"
rollupNode "github.com/ethereum-optimism/optimism/op-node/node" rollupNode "github.com/ethereum-optimism/optimism/op-node/node"
...@@ -171,6 +173,42 @@ func TestSystemE2EDencunAtGenesis(t *testing.T) { ...@@ -171,6 +173,42 @@ func TestSystemE2EDencunAtGenesis(t *testing.T) {
require.NotNil(t, head.ExcessBlobGas(), "L1 is building dencun blocks since genesis") require.NotNil(t, head.ExcessBlobGas(), "L1 is building dencun blocks since genesis")
} }
// TestSystemE2EDencunAtGenesis tests if L2 finalizes when blobs are present on L1
func TestSystemE2EDencunAtGenesisWithBlobs(t *testing.T) {
InitParallel(t)
cfg := DefaultSystemConfig(t)
//cancun is on from genesis:
genesisActivation := uint64(0)
cfg.DeployConfig.L1CancunTimeOffset = &genesisActivation // i.e. turn cancun on at genesis time + 0
sys, err := cfg.Start(t)
require.Nil(t, err, "Error starting up system")
defer sys.Close()
// send a blob-containing txn on l1
ethPrivKey := sys.Cfg.Secrets.Alice
txData := transactions.CreateEmptyBlobTx(ethPrivKey, true, sys.Cfg.L1ChainIDBig().Uint64())
tx := types.MustSignNewTx(ethPrivKey, types.LatestSignerForChainID(cfg.L1ChainIDBig()), txData)
// send blob-containing txn
sendCtx, sendCancel := context.WithTimeout(context.Background(), 15*time.Second)
defer sendCancel()
l1Client := sys.Clients["l1"]
err = l1Client.SendTransaction(sendCtx, tx)
require.NoError(t, err, "Sending L1 empty blob tx")
// Wait for transaction on L1
blockContainsBlob, err := geth.WaitForTransaction(tx.Hash(), l1Client, 30*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
require.Nil(t, err, "Waiting for blob tx on L1")
// end sending blob-containing txns on l1
l2Client := sys.Clients["sequencer"]
finalizedBlock, err := gethutils.WaitForL1OriginOnL2(blockContainsBlob.BlockNumber.Uint64(), l2Client, 30*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
require.Nil(t, err, "Waiting for L1 origin of blob tx on L2")
finalizationTimeout := 30 * time.Duration(cfg.DeployConfig.L1BlockTime) * time.Second
_, err = gethutils.WaitForBlockToBeSafe(finalizedBlock.Header().Number, l2Client, finalizationTimeout)
require.Nil(t, err, "Waiting for safety of L2 block")
}
// TestSystemE2E sets up a L1 Geth node, a rollup node, and a L2 geth node and then confirms that L1 deposits are reflected on L2. // TestSystemE2E sets up a L1 Geth node, a rollup node, and a L2 geth node and then confirms that L1 deposits are reflected on L2.
// All nodes are run in process (but are the full nodes, not mocked or stubbed). // All nodes are run in process (but are the full nodes, not mocked or stubbed).
func TestSystemE2E(t *testing.T) { func TestSystemE2E(t *testing.T) {
......
...@@ -24,7 +24,12 @@ the transaction hash. ...@@ -24,7 +24,12 @@ the transaction hash.
`batch_decoder reassemble` goes through all of the found frames in the cache & then turns them `batch_decoder reassemble` goes through all of the found frames in the cache & then turns them
into channels. It then stores the channels with metadata on disk where the file name is the Channel ID. into channels. It then stores the channels with metadata on disk where the file name is the Channel ID.
Each channel can contain multiple batches.
If the batch is span batch, `batch_decoder` derives span batch using `L2BlockTime`, `L2GenesisTime`, and `L2ChainID`.
These arguments can be provided to the binary using flags.
If the batch is a singular batch, `batch_decoder` does not derive and stores the batch as is.
### Force Close ### Force Close
...@@ -45,7 +50,7 @@ those frames need to be generated differently than simply closing the channel. ...@@ -45,7 +50,7 @@ those frames need to be generated differently than simply closing the channel.
jq . $JSON_FILE jq . $JSON_FILE
# Print the number of valid & invalid transactions # Print the number of valid & invalid transactions
jq .valid_data $TX_DIR/* | sort | uniq -c jq .valid_data $TX_DIR/* | sort | uniq -c
# Select all transactions that have invalid data & then print the transaction hash # Select all transactions that have invalid data & then print the transaction hash
jq "select(.valid_data == false)|.tx.hash" $TX_DIR jq "select(.valid_data == false)|.tx.hash" $TX_DIR
......
...@@ -4,11 +4,13 @@ import ( ...@@ -4,11 +4,13 @@ import (
"context" "context"
"fmt" "fmt"
"log" "log"
"math/big"
"os" "os"
"time" "time"
"github.com/ethereum-optimism/optimism/op-node/cmd/batch_decoder/fetch" "github.com/ethereum-optimism/optimism/op-node/cmd/batch_decoder/fetch"
"github.com/ethereum-optimism/optimism/op-node/cmd/batch_decoder/reassemble" "github.com/ethereum-optimism/optimism/op-node/cmd/batch_decoder/reassemble"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethclient"
...@@ -77,7 +79,7 @@ func main() { ...@@ -77,7 +79,7 @@ func main() {
End: uint64(cliCtx.Int("end")), End: uint64(cliCtx.Int("end")),
ChainID: chainID, ChainID: chainID,
BatchSenders: map[common.Address]struct{}{ BatchSenders: map[common.Address]struct{}{
common.HexToAddress(cliCtx.String("sender")): struct{}{}, common.HexToAddress(cliCtx.String("sender")): {},
}, },
BatchInbox: common.HexToAddress(cliCtx.String("inbox")), BatchInbox: common.HexToAddress(cliCtx.String("inbox")),
OutDirectory: cliCtx.String("out"), OutDirectory: cliCtx.String("out"),
...@@ -92,13 +94,8 @@ func main() { ...@@ -92,13 +94,8 @@ func main() {
}, },
{ {
Name: "reassemble", Name: "reassemble",
Usage: "Reassembles channels from fetched batches", Usage: "Reassembles channels from fetched batch transactions and decode batches",
Flags: []cli.Flag{ Flags: []cli.Flag{
&cli.StringFlag{
Name: "inbox",
Value: "0xff00000000000000000000000000000000000420",
Usage: "Batch Inbox Address",
},
&cli.StringFlag{ &cli.StringFlag{
Name: "in", Name: "in",
Value: "/tmp/batch_decoder/transactions_cache", Value: "/tmp/batch_decoder/transactions_cache",
...@@ -109,12 +106,60 @@ func main() { ...@@ -109,12 +106,60 @@ func main() {
Value: "/tmp/batch_decoder/channel_cache", Value: "/tmp/batch_decoder/channel_cache",
Usage: "Cache directory for the found channels", Usage: "Cache directory for the found channels",
}, },
&cli.Uint64Flag{
Name: "l2-chain-id",
Value: 10,
Usage: "L2 chain id for span batch derivation. Default value from op-mainnet.",
},
&cli.Uint64Flag{
Name: "l2-genesis-timestamp",
Value: 1686068903,
Usage: "L2 genesis time for span batch derivation. Default value from op-mainnet. " +
"Superchain-registry prioritized when given value is inconsistent.",
},
&cli.Uint64Flag{
Name: "l2-block-time",
Value: 2,
Usage: "L2 block time for span batch derivation. Default value from op-mainnet. " +
"Superchain-registry prioritized when given value is inconsistent.",
},
&cli.StringFlag{
Name: "inbox",
Value: "0xFF00000000000000000000000000000000000010",
Usage: "Batch Inbox Address. Default value from op-mainnet. " +
"Superchain-registry prioritized when given value is inconsistent.",
},
}, },
Action: func(cliCtx *cli.Context) error { Action: func(cliCtx *cli.Context) error {
var (
L2GenesisTime uint64 = cliCtx.Uint64("l2-genesis-timestamp")
L2BlockTime uint64 = cliCtx.Uint64("l2-block-time")
BatchInboxAddress common.Address = common.HexToAddress(cliCtx.String("inbox"))
)
L2ChainID := new(big.Int).SetUint64(cliCtx.Uint64("l2-chain-id"))
rollupCfg, err := rollup.LoadOPStackRollupConfig(L2ChainID.Uint64())
if err == nil {
// prioritize superchain config
if L2GenesisTime != rollupCfg.Genesis.L2Time {
L2GenesisTime = rollupCfg.Genesis.L2Time
fmt.Printf("L2GenesisTime overridden: %v\n", L2GenesisTime)
}
if L2BlockTime != rollupCfg.BlockTime {
L2BlockTime = rollupCfg.BlockTime
fmt.Printf("L2BlockTime overridden: %v\n", L2BlockTime)
}
if BatchInboxAddress != rollupCfg.BatchInboxAddress {
BatchInboxAddress = rollupCfg.BatchInboxAddress
fmt.Printf("BatchInboxAddress overridden: %v\n", BatchInboxAddress)
}
}
config := reassemble.Config{ config := reassemble.Config{
BatchInbox: common.HexToAddress(cliCtx.String("inbox")), BatchInbox: BatchInboxAddress,
InDirectory: cliCtx.String("in"), InDirectory: cliCtx.String("in"),
OutDirectory: cliCtx.String("out"), OutDirectory: cliCtx.String("out"),
L2ChainID: L2ChainID,
L2GenesisTime: L2GenesisTime,
L2BlockTime: L2BlockTime,
} }
reassemble.Channels(config) reassemble.Channels(config)
return nil return nil
......
...@@ -5,13 +5,11 @@ import ( ...@@ -5,13 +5,11 @@ import (
"fmt" "fmt"
"io" "io"
"log" "log"
"math/big"
"os" "os"
"path" "path"
"sort" "sort"
"github.com/ethereum-optimism/optimism/op-node/chaincfg"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/cmd/batch_decoder/fetch" "github.com/ethereum-optimism/optimism/op-node/cmd/batch_decoder/fetch"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/eth"
...@@ -24,7 +22,8 @@ type ChannelWithMetadata struct { ...@@ -24,7 +22,8 @@ type ChannelWithMetadata struct {
InvalidFrames bool `json:"invalid_frames"` InvalidFrames bool `json:"invalid_frames"`
InvalidBatches bool `json:"invalid_batches"` InvalidBatches bool `json:"invalid_batches"`
Frames []FrameWithMetadata `json:"frames"` Frames []FrameWithMetadata `json:"frames"`
Batches []derive.BatchData `json:"batches"` Batches []derive.Batch `json:"batches"`
BatchTypes []int `json:"batch_types"`
} }
type FrameWithMetadata struct { type FrameWithMetadata struct {
...@@ -36,9 +35,12 @@ type FrameWithMetadata struct { ...@@ -36,9 +35,12 @@ type FrameWithMetadata struct {
} }
type Config struct { type Config struct {
BatchInbox common.Address BatchInbox common.Address
InDirectory string InDirectory string
OutDirectory string OutDirectory string
L2ChainID *big.Int
L2GenesisTime uint64
L2BlockTime uint64
} }
func LoadFrames(directory string, inbox common.Address) []FrameWithMetadata { func LoadFrames(directory string, inbox common.Address) []FrameWithMetadata {
...@@ -68,9 +70,8 @@ func Channels(config Config) { ...@@ -68,9 +70,8 @@ func Channels(config Config) {
for _, frame := range frames { for _, frame := range frames {
framesByChannel[frame.Frame.ID] = append(framesByChannel[frame.Frame.ID], frame) framesByChannel[frame.Frame.ID] = append(framesByChannel[frame.Frame.ID], frame)
} }
cfg := chaincfg.Mainnet
for id, frames := range framesByChannel { for id, frames := range framesByChannel {
ch := processFrames(cfg, id, frames) ch := processFrames(config, id, frames)
filename := path.Join(config.OutDirectory, fmt.Sprintf("%s.json", id.String())) filename := path.Join(config.OutDirectory, fmt.Sprintf("%s.json", id.String()))
if err := writeChannel(ch, filename); err != nil { if err := writeChannel(ch, filename); err != nil {
log.Fatal(err) log.Fatal(err)
...@@ -88,7 +89,7 @@ func writeChannel(ch ChannelWithMetadata, filename string) error { ...@@ -88,7 +89,7 @@ func writeChannel(ch ChannelWithMetadata, filename string) error {
return enc.Encode(ch) return enc.Encode(ch)
} }
func processFrames(cfg *rollup.Config, id derive.ChannelID, frames []FrameWithMetadata) ChannelWithMetadata { func processFrames(cfg Config, id derive.ChannelID, frames []FrameWithMetadata) ChannelWithMetadata {
ch := derive.NewChannel(id, eth.L1BlockRef{Number: frames[0].InclusionBlock}) ch := derive.NewChannel(id, eth.L1BlockRef{Number: frames[0].InclusionBlock})
invalidFrame := false invalidFrame := false
...@@ -104,17 +105,39 @@ func processFrames(cfg *rollup.Config, id derive.ChannelID, frames []FrameWithMe ...@@ -104,17 +105,39 @@ func processFrames(cfg *rollup.Config, id derive.ChannelID, frames []FrameWithMe
} }
} }
var batches []derive.BatchData var batches []derive.Batch
var batchTypes []int
invalidBatches := false invalidBatches := false
if ch.IsReady() { if ch.IsReady() {
br, err := derive.BatchReader(ch.Reader()) br, err := derive.BatchReader(ch.Reader())
if err == nil { if err == nil {
for batch, err := br(); err != io.EOF; batch, err = br() { for batchData, err := br(); err != io.EOF; batchData, err = br() {
if err != nil { if err != nil {
fmt.Printf("Error reading batch for channel %v. Err: %v\n", id.String(), err) fmt.Printf("Error reading batchData for channel %v. Err: %v\n", id.String(), err)
invalidBatches = true invalidBatches = true
} else { } else {
batches = append(batches, *batch) batchType := batchData.GetBatchType()
batchTypes = append(batchTypes, int(batchType))
switch batchType {
case derive.SingularBatchType:
singularBatch, err := derive.GetSingularBatch(batchData)
if err != nil {
invalidBatches = true
fmt.Printf("Error converting singularBatch from batchData for channel %v. Err: %v\n", id.String(), err)
}
// singularBatch will be nil when errored
batches = append(batches, singularBatch)
case derive.SpanBatchType:
spanBatch, err := derive.DeriveSpanBatch(batchData, cfg.L2BlockTime, cfg.L2GenesisTime, cfg.L2ChainID)
if err != nil {
invalidBatches = true
fmt.Printf("Error deriving spanBatch from batchData for channel %v. Err: %v\n", id.String(), err)
}
// spanBatch will be nil when errored
batches = append(batches, spanBatch)
default:
fmt.Printf("unrecognized batch type: %d for channel %v.\n", batchData.GetBatchType(), id.String())
}
} }
} }
} else { } else {
...@@ -131,6 +154,7 @@ func processFrames(cfg *rollup.Config, id derive.ChannelID, frames []FrameWithMe ...@@ -131,6 +154,7 @@ func processFrames(cfg *rollup.Config, id derive.ChannelID, frames []FrameWithMe
InvalidFrames: invalidFrame, InvalidFrames: invalidFrame,
InvalidBatches: invalidBatches, InvalidBatches: invalidBatches,
Batches: batches, Batches: batches,
BatchTypes: batchTypes,
} }
} }
......
...@@ -172,9 +172,7 @@ func TestBatchRoundTrip(t *testing.T) { ...@@ -172,9 +172,7 @@ func TestBatchRoundTrip(t *testing.T) {
err = dec.UnmarshalBinary(enc) err = dec.UnmarshalBinary(enc)
require.NoError(t, err) require.NoError(t, err)
if dec.GetBatchType() == SpanBatchType { if dec.GetBatchType() == SpanBatchType {
rawSpanBatch, ok := dec.inner.(*RawSpanBatch) _, err := DeriveSpanBatch(&dec, blockTime, genesisTimestamp, chainID)
require.True(t, ok)
_, err := rawSpanBatch.derive(blockTime, genesisTimestamp, chainID)
require.NoError(t, err) require.NoError(t, err)
} }
require.Equal(t, batch, &dec, "Batch not equal test case %v", i) require.Equal(t, batch, &dec, "Batch not equal test case %v", i)
...@@ -222,9 +220,7 @@ func TestBatchRoundTripRLP(t *testing.T) { ...@@ -222,9 +220,7 @@ func TestBatchRoundTripRLP(t *testing.T) {
err = dec.DecodeRLP(s) err = dec.DecodeRLP(s)
require.NoError(t, err) require.NoError(t, err)
if dec.GetBatchType() == SpanBatchType { if dec.GetBatchType() == SpanBatchType {
rawSpanBatch, ok := dec.inner.(*RawSpanBatch) _, err = DeriveSpanBatch(&dec, blockTime, genesisTimestamp, chainID)
require.True(t, ok)
_, err := rawSpanBatch.derive(blockTime, genesisTimestamp, chainID)
require.NoError(t, err) require.NoError(t, err)
} }
require.Equal(t, batch, &dec, "Batch not equal test case %v", i) require.Equal(t, batch, &dec, "Batch not equal test case %v", i)
......
...@@ -3,7 +3,6 @@ package derive ...@@ -3,7 +3,6 @@ package derive
import ( import (
"bytes" "bytes"
"context" "context"
"errors"
"fmt" "fmt"
"io" "io"
...@@ -92,13 +91,9 @@ func (cr *ChannelInReader) NextBatch(ctx context.Context) (Batch, error) { ...@@ -92,13 +91,9 @@ func (cr *ChannelInReader) NextBatch(ctx context.Context) (Batch, error) {
} }
switch batchData.GetBatchType() { switch batchData.GetBatchType() {
case SingularBatchType: case SingularBatchType:
singularBatch, ok := batchData.inner.(*SingularBatch)
if !ok {
return nil, NewCriticalError(errors.New("failed type assertion to SingularBatch"))
}
cr.log.Debug("decoded singular batch from channel") cr.log.Debug("decoded singular batch from channel")
cr.metrics.RecordDerivedBatches("singular") cr.metrics.RecordDerivedBatches("singular")
return singularBatch, nil return GetSingularBatch(batchData)
case SpanBatchType: case SpanBatchType:
if origin := cr.Origin(); !cr.cfg.IsDelta(origin.Time) { if origin := cr.Origin(); !cr.cfg.IsDelta(origin.Time) {
// Check hard fork activation with the L1 inclusion block time instead of the L1 origin block time. // Check hard fork activation with the L1 inclusion block time instead of the L1 origin block time.
...@@ -106,18 +101,9 @@ func (cr *ChannelInReader) NextBatch(ctx context.Context) (Batch, error) { ...@@ -106,18 +101,9 @@ func (cr *ChannelInReader) NextBatch(ctx context.Context) (Batch, error) {
// This is just for early dropping invalid batches as soon as possible. // This is just for early dropping invalid batches as soon as possible.
return nil, NewTemporaryError(fmt.Errorf("cannot accept span batch in L1 block %s at time %d", origin, origin.Time)) return nil, NewTemporaryError(fmt.Errorf("cannot accept span batch in L1 block %s at time %d", origin, origin.Time))
} }
rawSpanBatch, ok := batchData.inner.(*RawSpanBatch)
if !ok {
return nil, NewCriticalError(errors.New("failed type assertion to SpanBatch"))
}
// If the batch type is Span batch, derive block inputs from RawSpanBatch.
spanBatch, err := rawSpanBatch.derive(cr.cfg.BlockTime, cr.cfg.Genesis.L2Time, cr.cfg.L2ChainID)
if err != nil {
return nil, err
}
cr.log.Debug("decoded span batch from channel") cr.log.Debug("decoded span batch from channel")
cr.metrics.RecordDerivedBatches("span") cr.metrics.RecordDerivedBatches("span")
return spanBatch, nil return DeriveSpanBatch(batchData, cr.cfg.BlockTime, cr.cfg.Genesis.L2Time, cr.cfg.L2ChainID)
default: default:
// error is bubbled up to user, but pipeline can skip the batch and continue after. // error is bubbled up to user, but pipeline can skip the batch and continue after.
return nil, NewTemporaryError(fmt.Errorf("unrecognized batch type: %d", batchData.GetBatchType())) return nil, NewTemporaryError(fmt.Errorf("unrecognized batch type: %d", batchData.GetBatchType()))
......
...@@ -2,6 +2,7 @@ package derive ...@@ -2,6 +2,7 @@ package derive
import ( import (
"bytes" "bytes"
"errors"
"io" "io"
"github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup"
...@@ -66,3 +67,12 @@ func (b *SingularBatch) encode(w io.Writer) error { ...@@ -66,3 +67,12 @@ func (b *SingularBatch) encode(w io.Writer) error {
func (b *SingularBatch) decode(r *bytes.Reader) error { func (b *SingularBatch) decode(r *bytes.Reader) error {
return rlp.Decode(r, b) return rlp.Decode(r, b)
} }
// GetSingularBatch retrieves SingularBatch from batchData
func GetSingularBatch(batchData *BatchData) (*SingularBatch, error) {
singularBatch, ok := batchData.inner.(*SingularBatch)
if !ok {
return nil, NewCriticalError(errors.New("failed type assertion to SingularBatch"))
}
return singularBatch, nil
}
This diff is collapsed.
...@@ -331,18 +331,18 @@ func TestSpanBatchDerive(t *testing.T) { ...@@ -331,18 +331,18 @@ func TestSpanBatchDerive(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
blockCount := len(singularBatches) blockCount := len(singularBatches)
require.Equal(t, safeL2Head.Hash.Bytes()[:20], spanBatchDerived.parentCheck[:]) require.Equal(t, safeL2Head.Hash.Bytes()[:20], spanBatchDerived.ParentCheck[:])
require.Equal(t, singularBatches[blockCount-1].Epoch().Hash.Bytes()[:20], spanBatchDerived.l1OriginCheck[:]) require.Equal(t, singularBatches[blockCount-1].Epoch().Hash.Bytes()[:20], spanBatchDerived.L1OriginCheck[:])
require.Equal(t, len(singularBatches), int(rawSpanBatch.blockCount)) require.Equal(t, len(singularBatches), int(rawSpanBatch.blockCount))
for i := 1; i < len(singularBatches); i++ { for i := 1; i < len(singularBatches); i++ {
require.Equal(t, spanBatchDerived.batches[i].Timestamp, spanBatchDerived.batches[i-1].Timestamp+l2BlockTime) require.Equal(t, spanBatchDerived.Batches[i].Timestamp, spanBatchDerived.Batches[i-1].Timestamp+l2BlockTime)
} }
for i := 0; i < len(singularBatches); i++ { for i := 0; i < len(singularBatches); i++ {
require.Equal(t, singularBatches[i].EpochNum, spanBatchDerived.batches[i].EpochNum) require.Equal(t, singularBatches[i].EpochNum, spanBatchDerived.Batches[i].EpochNum)
require.Equal(t, singularBatches[i].Timestamp, spanBatchDerived.batches[i].Timestamp) require.Equal(t, singularBatches[i].Timestamp, spanBatchDerived.Batches[i].Timestamp)
require.Equal(t, singularBatches[i].Transactions, spanBatchDerived.batches[i].Transactions) require.Equal(t, singularBatches[i].Transactions, spanBatchDerived.Batches[i].Transactions)
} }
} }
} }
...@@ -511,8 +511,8 @@ func TestSpanBatchBuilder(t *testing.T) { ...@@ -511,8 +511,8 @@ func TestSpanBatchBuilder(t *testing.T) {
for i := 0; i < len(singularBatches); i++ { for i := 0; i < len(singularBatches); i++ {
spanBatchBuilder.AppendSingularBatch(singularBatches[i], seqNum) spanBatchBuilder.AppendSingularBatch(singularBatches[i], seqNum)
require.Equal(t, i+1, spanBatchBuilder.GetBlockCount()) require.Equal(t, i+1, spanBatchBuilder.GetBlockCount())
require.Equal(t, singularBatches[0].ParentHash.Bytes()[:20], spanBatchBuilder.spanBatch.parentCheck[:]) require.Equal(t, singularBatches[0].ParentHash.Bytes()[:20], spanBatchBuilder.spanBatch.ParentCheck[:])
require.Equal(t, singularBatches[i].EpochHash.Bytes()[:20], spanBatchBuilder.spanBatch.l1OriginCheck[:]) require.Equal(t, singularBatches[i].EpochHash.Bytes()[:20], spanBatchBuilder.spanBatch.L1OriginCheck[:])
} }
rawSpanBatch, err := spanBatchBuilder.GetRawSpanBatch() rawSpanBatch, err := spanBatchBuilder.GetRawSpanBatch()
......
...@@ -8,6 +8,18 @@ SHA=$(cat ./.foundryrc) ...@@ -8,6 +8,18 @@ SHA=$(cat ./.foundryrc)
# Check if there is a nightly tag corresponding to the `.foundryrc` commit hash # Check if there is a nightly tag corresponding to the `.foundryrc` commit hash
TAG="nightly-$SHA" TAG="nightly-$SHA"
# If the foundry repository exists and a branch is checked out, we need to abort
# any changes inside ~/.foundry/foundry-rs/foundry. This is because foundryup will
# attempt to pull the latest changes from the remote repository, which will fail
# if there are any uncommitted changes.
if [ -d ~/.foundry/foundry-rs/foundry ]; then
echo "Foundry repository exists! Aborting any changes..."
cd ~/.foundry/foundry-rs/foundry
git reset --hard
git clean -fd
cd -
fi
# Create a temporary directory # Create a temporary directory
TMP_DIR=$(mktemp -d) TMP_DIR=$(mktemp -d)
echo "Created tempdir @ $TMP_DIR" echo "Created tempdir @ $TMP_DIR"
......
...@@ -47,10 +47,11 @@ ...@@ -47,10 +47,11 @@
"l2GenesisDeltaTimeOffset": null, "l2GenesisDeltaTimeOffset": null,
"l2GenesisCanyonTimeOffset": "0x0", "l2GenesisCanyonTimeOffset": "0x0",
"faultGameAbsolutePrestate": "0x03c7ae758795765c6664a5d39bf63841c71ff191e9189522bad8ebff5d4eca98", "faultGameAbsolutePrestate": "0x03c7ae758795765c6664a5d39bf63841c71ff191e9189522bad8ebff5d4eca98",
"faultGameMaxDepth": 30, "faultGameMaxDepth": 44,
"faultGameMaxDuration": 1200, "faultGameMaxDuration": 1200,
"outputBisectionGameGenesisBlock": 0, "outputBisectionGameGenesisBlock": 0,
"outputBisectionGameSplitDepth": 15, "outputBisectionGameGenesisOutputRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"outputBisectionGameSplitDepth": 14,
"systemConfigStartBlock": 0, "systemConfigStartBlock": 0,
"requiredProtocolVersion": "0x0000000000000000000000000000000000000000000000000000000000000000", "requiredProtocolVersion": "0x0000000000000000000000000000000000000000000000000000000000000000",
"recommendedProtocolVersion": "0x0000000000000000000000000000000000000000000000000000000000000000" "recommendedProtocolVersion": "0x0000000000000000000000000000000000000000000000000000000000000000"
......
This source diff could not be displayed because it is too large. You can view the blob instead.
...@@ -1024,11 +1024,11 @@ contract Deploy is Deployer { ...@@ -1024,11 +1024,11 @@ contract Deploy is Deployer {
// Set the Cannon FaultDisputeGame implementation in the factory. // Set the Cannon FaultDisputeGame implementation in the factory.
_setFaultGameImplementation({ _setFaultGameImplementation({
_factory: factory, _factory: factory,
_gameType: GameTypes.FAULT, _gameType: GameTypes.CANNON,
_absolutePrestate: loadMipsAbsolutePrestate(), _absolutePrestate: loadMipsAbsolutePrestate(),
_faultVm: IBigStepper(mustGetAddress("Mips")), _faultVm: IBigStepper(mustGetAddress("Mips")),
_maxGameDepth: cfg.faultGameMaxDepth() _maxGameDepth: 30 // Hard code depth for legacy game to keep e2e tests fast
}); });
} }
/// @notice Sets the implementation for the `OUTPUT_CANNON` game type in the `DisputeGameFactory` /// @notice Sets the implementation for the `OUTPUT_CANNON` game type in the `DisputeGameFactory`
...@@ -1106,6 +1106,7 @@ contract Deploy is Deployer { ...@@ -1106,6 +1106,7 @@ contract Deploy is Deployer {
_gameType: _gameType, _gameType: _gameType,
_absolutePrestate: _absolutePrestate, _absolutePrestate: _absolutePrestate,
_genesisBlockNumber: cfg.outputBisectionGameGenesisBlock(), _genesisBlockNumber: cfg.outputBisectionGameGenesisBlock(),
_genesisOutputRoot: Hash.wrap(cfg.outputBisectionGameGenesisOutputRoot()),
_maxGameDepth: _maxGameDepth, _maxGameDepth: _maxGameDepth,
_splitDepth: cfg.outputBisectionGameSplitDepth(), _splitDepth: cfg.outputBisectionGameSplitDepth(),
_gameDuration: Duration.wrap(uint64(cfg.faultGameMaxDuration())), _gameDuration: Duration.wrap(uint64(cfg.faultGameMaxDuration())),
...@@ -1130,13 +1131,13 @@ contract Deploy is Deployer { ...@@ -1130,13 +1131,13 @@ contract Deploy is Deployer {
uint8 rawGameType = GameType.unwrap(_gameType); uint8 rawGameType = GameType.unwrap(_gameType);
string memory gameTypeString; string memory gameTypeString;
if (rawGameType == 0) { if (rawGameType == GameType.unwrap(GameTypes.CANNON)) {
gameTypeString = "Cannon"; gameTypeString = "Cannon";
} else if (rawGameType == 253) { } else if (rawGameType == GameType.unwrap(GameTypes.OUTPUT_CANNON)) {
gameTypeString = "OutputBisectionCannon"; gameTypeString = "OutputBisectionCannon";
} else if (rawGameType == 254) { } else if (rawGameType == GameType.unwrap(GameTypes.OUTPUT_ALPHABET)) {
gameTypeString = "OutputBisectionAlphabet"; gameTypeString = "OutputBisectionAlphabet";
} else if (rawGameType == 255) { } else if (rawGameType == GameType.unwrap(GameTypes.ALPHABET)) {
gameTypeString = "Alphabet"; gameTypeString = "Alphabet";
} else { } else {
gameTypeString = "Unknown"; gameTypeString = "Unknown";
......
...@@ -51,6 +51,7 @@ contract DeployConfig is Script { ...@@ -51,6 +51,7 @@ contract DeployConfig is Script {
uint256 public faultGameMaxDepth; uint256 public faultGameMaxDepth;
uint256 public faultGameMaxDuration; uint256 public faultGameMaxDuration;
uint256 public outputBisectionGameGenesisBlock; uint256 public outputBisectionGameGenesisBlock;
bytes32 public outputBisectionGameGenesisOutputRoot;
uint256 public outputBisectionGameSplitDepth; uint256 public outputBisectionGameSplitDepth;
uint256 public systemConfigStartBlock; uint256 public systemConfigStartBlock;
uint256 public requiredProtocolVersion; uint256 public requiredProtocolVersion;
...@@ -107,6 +108,7 @@ contract DeployConfig is Script { ...@@ -107,6 +108,7 @@ contract DeployConfig is Script {
faultGameMaxDepth = stdJson.readUint(_json, "$.faultGameMaxDepth"); faultGameMaxDepth = stdJson.readUint(_json, "$.faultGameMaxDepth");
faultGameMaxDuration = stdJson.readUint(_json, "$.faultGameMaxDuration"); faultGameMaxDuration = stdJson.readUint(_json, "$.faultGameMaxDuration");
outputBisectionGameGenesisBlock = stdJson.readUint(_json, "$.outputBisectionGameGenesisBlock"); outputBisectionGameGenesisBlock = stdJson.readUint(_json, "$.outputBisectionGameGenesisBlock");
outputBisectionGameGenesisOutputRoot = stdJson.readBytes32(_json, "$.outputBisectionGameGenesisOutputRoot");
outputBisectionGameSplitDepth = stdJson.readUint(_json, "$.outputBisectionGameSplitDepth"); outputBisectionGameSplitDepth = stdJson.readUint(_json, "$.outputBisectionGameSplitDepth");
} }
} }
......
...@@ -18,30 +18,59 @@ TESTDATA_DIR="$CONTRACTS_DIR/.testdata" ...@@ -18,30 +18,59 @@ TESTDATA_DIR="$CONTRACTS_DIR/.testdata"
OUTFILE_L2="$TESTDATA_DIR/genesis.json" OUTFILE_L2="$TESTDATA_DIR/genesis.json"
OUTFILE_ROLLUP="$TESTDATA_DIR/rollup.json" OUTFILE_ROLLUP="$TESTDATA_DIR/rollup.json"
mkdir -p "$TESTDATA_DIR"
LOCKDIR="/tmp/lock-generate-l2-genesis"
if [ ! -f "$DEPLOY_ARTIFACT" ]; then
forge script $CONTRACTS_DIR/scripts/Deploy.s.sol:Deploy > /dev/null 2>&1 cleanup() {
fi rm -rf -- "$LOCKDIR"
}
if [ ! -f "$OUTFILE_L2" ]; then
go run $OP_NODE genesis l2 \ # Wait for the L2 outfile to be over 8M for up to $2 iterations
--deploy-config "$CONTRACTS_DIR/deploy-config/hardhat.json" \ # of $1 seconds. This is a hack to ensure that the outfile is fully
--l1-deployments "$DEPLOY_ARTIFACT" \ # written before the solidity tests try to read it
--l1-starting-block "$L1_STARTING_BLOCK_PATH" \ wait_l2_outfile() {
--outfile.l2 "$OUTFILE_L2" \ i=1
--outfile.rollup "$OUTFILE_ROLLUP" > /dev/null 2>&1 while [ $i -le $2 ]; do
fi i=$(($i + 1))
# Wait for the L2 outfile to be over 8M for up to 2 seconds if [ ! -f "$OUTFILE_L2" ]; then
# This is a hack to ensure that the outfile is fully written sleep $1
# before the solidity tests try to read it continue
for i in {1..8}; do fi
if [ $(du -m "$OUTFILE_L2" | cut -f1) -ge 8 ]; then
if [ $(du -m "$OUTFILE_L2" | cut -f1) -lt 8 ]; then
sleep $1
continue
fi
exit 0 exit 0
done
echo "L2 genesis file not generated in time. Exiting."
exit 1
}
# Directory creations are atomic, so we can use mkdir to
# create a lockfile that prevents subsequent invocations
# of the script from running concurrently.
if mkdir -- "$LOCKDIR" > /dev/null 2>&1; then
trap 'cleanup' EXIT
mkdir -p "$TESTDATA_DIR"
if [ ! -f "$DEPLOY_ARTIFACT" ]; then
forge script $CONTRACTS_DIR/scripts/Deploy.s.sol:Deploy > /dev/null 2>&1
fi fi
sleep 0.25
done
echo "L2 genesis file not generated in time. Exiting." if [ ! -f "$OUTFILE_L2" ]; then
exit 1 go run $OP_NODE genesis l2 \
\ No newline at end of file --deploy-config "$CONTRACTS_DIR/deploy-config/hardhat.json" \
--l1-deployments "$DEPLOY_ARTIFACT" \
--l1-starting-block "$L1_STARTING_BLOCK_PATH" \
--outfile.l2 "$OUTFILE_L2" \
--outfile.rollup "$OUTFILE_ROLLUP" > /dev/null 2>&1
fi
else
# Wait up to 5 minutes for the lock to be released
wait_l2_outfile 0.25 1200
fi
\ No newline at end of file
...@@ -100,8 +100,8 @@ ...@@ -100,8 +100,8 @@
"sourceCodeHash": "0x1d0cacaf259aff7802aae91a793e3c7234a4d063614cf9c72176fb04738e7c97" "sourceCodeHash": "0x1d0cacaf259aff7802aae91a793e3c7234a4d063614cf9c72176fb04738e7c97"
}, },
"src/dispute/OutputBisectionGame.sol": { "src/dispute/OutputBisectionGame.sol": {
"initCodeHash": "0xc5ac9d76d7c46ccc073f3e5d74a78253bf2f627b04af9b2e3c86803c44890075", "initCodeHash": "0x400a99278755979b815712d1d26598463dd98ed193df8cd1736ae2ae5831d7c7",
"sourceCodeHash": "0x68df25016fa101a9d40e5d00f839dd053e7b5aa0c73c06c61d483cdfda0be124" "sourceCodeHash": "0x7e267ad18eb946a0242df41ba044c5ee6f0b456e74bef07605a7dd2eb5b3ed01"
}, },
"src/legacy/DeployerWhitelist.sol": { "src/legacy/DeployerWhitelist.sol": {
"initCodeHash": "0x8de80fb23b26dd9d849f6328e56ea7c173cd9e9ce1f05c9beea559d1720deb3d", "initCodeHash": "0x8de80fb23b26dd9d849f6328e56ea7c173cd9e9ce1f05c9beea559d1720deb3d",
......
...@@ -16,6 +16,11 @@ ...@@ -16,6 +16,11 @@
"name": "_genesisBlockNumber", "name": "_genesisBlockNumber",
"type": "uint256" "type": "uint256"
}, },
{
"internalType": "Hash",
"name": "_genesisOutputRoot",
"type": "bytes32"
},
{ {
"internalType": "uint256", "internalType": "uint256",
"name": "_maxGameDepth", "name": "_maxGameDepth",
...@@ -198,6 +203,19 @@ ...@@ -198,6 +203,19 @@
"stateMutability": "view", "stateMutability": "view",
"type": "function" "type": "function"
}, },
{
"inputs": [],
"name": "GENESIS_OUTPUT_ROOT",
"outputs": [
{
"internalType": "Hash",
"name": "",
"type": "bytes32"
}
],
"stateMutability": "view",
"type": "function"
},
{ {
"inputs": [], "inputs": [],
"name": "MAX_GAME_DEPTH", "name": "MAX_GAME_DEPTH",
......
...@@ -61,10 +61,7 @@ interface IOutputBisectionGame is IDisputeGame { ...@@ -61,10 +61,7 @@ interface IOutputBisectionGame is IDisputeGame {
/// @param _claimIndex The index of the subgame root claim to resolve. /// @param _claimIndex The index of the subgame root claim to resolve.
function resolveClaim(uint256 _claimIndex) external payable; function resolveClaim(uint256 _claimIndex) external payable;
/// @notice An L1 block hash that contains the disputed output root, fetched from the /// @notice A block hash on the L1 that contains the disputed output root.
/// `BlockOracle` and verified by referencing the timestamp associated with the
/// first L2 Output Proposal in the `L2OutputOracle` that contains the disputed
/// L2 block number.
function l1Head() external view returns (Hash l1Head_); function l1Head() external view returns (Hash l1Head_);
/// @notice The l2BlockNumber of the disputed output root in the `L2OutputOracle`. /// @notice The l2BlockNumber of the disputed output root in the `L2OutputOracle`.
......
...@@ -79,17 +79,11 @@ enum GameStatus ...@@ -79,17 +79,11 @@ enum GameStatus
/// @title GameTypes /// @title GameTypes
/// @notice A library that defines the IDs of games that can be played. /// @notice A library that defines the IDs of games that can be played.
library GameTypes { library GameTypes {
/// @dev The game will use a `IDisputeGame` implementation that utilizes fault proofs. /// @dev A dispute game type the uses the cannon vm.
GameType internal constant FAULT = GameType.wrap(0); GameType internal constant CANNON = GameType.wrap(0);
/// @dev The game will use a `IDisputeGame` implementation that utilizes validity proofs.
GameType internal constant VALIDITY = GameType.wrap(1);
/// @dev The game will use a `IDisputeGame` implementation that utilizes attestation proofs.
GameType internal constant ATTESTATION = GameType.wrap(2);
/// @dev A dispute game type that performs output bisection and then uses the cannon vm. /// @dev A dispute game type that performs output bisection and then uses the cannon vm.
GameType internal constant OUTPUT_CANNON = GameType.wrap(253); GameType internal constant OUTPUT_CANNON = GameType.wrap(1);
/// @dev A dispute game type that performs output bisection and then uses an alphabet vm. /// @dev A dispute game type that performs output bisection and then uses an alphabet vm.
/// Note intended for production use. /// Note intended for production use.
......
...@@ -116,17 +116,17 @@ contract DisputeGameFactory_Create_Test is DisputeGameFactory_Init { ...@@ -116,17 +116,17 @@ contract DisputeGameFactory_Create_Test is DisputeGameFactory_Init {
contract DisputeGameFactory_SetImplementation_Test is DisputeGameFactory_Init { contract DisputeGameFactory_SetImplementation_Test is DisputeGameFactory_Init {
/// @dev Tests that the `setImplementation` function properly sets the implementation for a given `GameType`. /// @dev Tests that the `setImplementation` function properly sets the implementation for a given `GameType`.
function test_setImplementation_succeeds() public { function test_setImplementation_succeeds() public {
// There should be no implementation for the `GameTypes.FAULT` enum value, it has not been set. // There should be no implementation for the `GameTypes.CANNON` enum value, it has not been set.
assertEq(address(factory.gameImpls(GameTypes.FAULT)), address(0)); assertEq(address(factory.gameImpls(GameTypes.CANNON)), address(0));
vm.expectEmit(true, true, true, true, address(factory)); vm.expectEmit(true, true, true, true, address(factory));
emit ImplementationSet(address(1), GameTypes.FAULT); emit ImplementationSet(address(1), GameTypes.CANNON);
// Set the implementation for the `GameTypes.FAULT` enum value. // Set the implementation for the `GameTypes.CANNON` enum value.
factory.setImplementation(GameTypes.FAULT, IDisputeGame(address(1))); factory.setImplementation(GameTypes.CANNON, IDisputeGame(address(1)));
// Ensure that the implementation for the `GameTypes.FAULT` enum value is set. // Ensure that the implementation for the `GameTypes.CANNON` enum value is set.
assertEq(address(factory.gameImpls(GameTypes.FAULT)), address(1)); assertEq(address(factory.gameImpls(GameTypes.CANNON)), address(1));
} }
/// @dev Tests that the `setImplementation` function reverts when called by a non-owner. /// @dev Tests that the `setImplementation` function reverts when called by a non-owner.
...@@ -134,7 +134,7 @@ contract DisputeGameFactory_SetImplementation_Test is DisputeGameFactory_Init { ...@@ -134,7 +134,7 @@ contract DisputeGameFactory_SetImplementation_Test is DisputeGameFactory_Init {
// Ensure that the `setImplementation` function reverts when called by a non-owner. // Ensure that the `setImplementation` function reverts when called by a non-owner.
vm.prank(address(0)); vm.prank(address(0));
vm.expectRevert("Ownable: caller is not the owner"); vm.expectRevert("Ownable: caller is not the owner");
factory.setImplementation(GameTypes.FAULT, IDisputeGame(address(1))); factory.setImplementation(GameTypes.CANNON, IDisputeGame(address(1)));
} }
} }
......
...@@ -152,7 +152,7 @@ contract FaultDisputeGame_Test is FaultDisputeGame_Init { ...@@ -152,7 +152,7 @@ contract FaultDisputeGame_Test is FaultDisputeGame_Init {
if (vmStatus == 1 || vmStatus == 2) rootClaim = changeClaimStatus(rootClaim, VMStatuses.VALID); if (vmStatus == 1 || vmStatus == 2) rootClaim = changeClaimStatus(rootClaim, VMStatuses.VALID);
vm.expectRevert(abi.encodeWithSelector(UnexpectedRootClaim.selector, rootClaim)); vm.expectRevert(abi.encodeWithSelector(UnexpectedRootClaim.selector, rootClaim));
factory.create(GameTypes.FAULT, rootClaim, extraData); factory.create(GameTypes.CANNON, rootClaim, extraData);
} }
/// @dev Tests that the game is initialized with the correct data. /// @dev Tests that the game is initialized with the correct data.
......
...@@ -62,7 +62,7 @@ ...@@ -62,7 +62,7 @@
"tsup": "^8.0.1", "tsup": "^8.0.1",
"typescript": "^5.3.2", "typescript": "^5.3.2",
"vite": "^5.0.4", "vite": "^5.0.4",
"vitest": "^0.34.2" "vitest": "^1.0.1"
}, },
"peerDependencies": { "peerDependencies": {
"@wagmi/core": ">1.0.0", "@wagmi/core": ">1.0.0",
......
...@@ -49,7 +49,7 @@ ...@@ -49,7 +49,7 @@
"node-fetch": "^2.6.7" "node-fetch": "^2.6.7"
}, },
"devDependencies": { "devDependencies": {
"@types/node": "^20.10.2", "@types/node": "^20.10.3",
"mocha": "^10.2.0" "mocha": "^10.2.0"
} }
} }
...@@ -46,7 +46,7 @@ ...@@ -46,7 +46,7 @@
"typescript": "^5.3.2", "typescript": "^5.3.2",
"viem": "^1.19.11", "viem": "^1.19.11",
"vite": "^5.0.4", "vite": "^5.0.4",
"vitest": "^0.34.2" "vitest": "^1.0.1"
}, },
"peerDependencies": { "peerDependencies": {
"viem": "^0.3.30" "viem": "^0.3.30"
......
...@@ -44,7 +44,7 @@ ...@@ -44,7 +44,7 @@
"@types/chai": "^4.3.11", "@types/chai": "^4.3.11",
"@types/chai-as-promised": "^7.1.8", "@types/chai-as-promised": "^7.1.8",
"@types/mocha": "^10.0.6", "@types/mocha": "^10.0.6",
"@types/node": "^20.10.2", "@types/node": "^20.10.3",
"chai-as-promised": "^7.1.1", "chai-as-promised": "^7.1.1",
"ethereum-waffle": "^4.0.10", "ethereum-waffle": "^4.0.10",
"ethers": "^5.7.2", "ethers": "^5.7.2",
...@@ -57,7 +57,7 @@ ...@@ -57,7 +57,7 @@
"typedoc": "^0.25.4", "typedoc": "^0.25.4",
"typescript": "^5.3.2", "typescript": "^5.3.2",
"viem": "^1.19.11", "viem": "^1.19.11",
"vitest": "^0.34.2", "vitest": "^1.0.1",
"zod": "^3.22.4" "zod": "^3.22.4"
}, },
"dependencies": { "dependencies": {
......
...@@ -39,7 +39,7 @@ ...@@ -39,7 +39,7 @@
"typescript": "^5.3.2", "typescript": "^5.3.2",
"viem": "^1.19.11", "viem": "^1.19.11",
"vite": "^5.0.4", "vite": "^5.0.4",
"vitest": "^0.34.1", "vitest": "^1.0.1",
"zod": "^3.22.4" "zod": "^3.22.4"
}, },
"dependencies": { "dependencies": {
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment