Commit 0b7e45e6 authored by vicotor's avatar vicotor

update code

parent aac26f02
{
"recommendations": [
"editorconfig.editorconfig",
"nomicfoundation.hardhat-solidity",
"golang.go"
]
}
{
"editorconfig.generateAuto": false,
"files.trimTrailingWhitespace": true
}
package engine
import (
"context"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/params"
"github.com/exchain/go-exchain/exchain"
nebulav1 "github.com/exchain/go-exchain/exchain/protocol/gen/go/nebula/v1"
"github.com/exchain/go-exchain/op-node/p2p"
"github.com/exchain/go-exchain/op-node/rollup/driver"
"github.com/exchain/go-exchain/op-node/rollup/sync"
"github.com/exchain/go-exchain/op-service/eth"
"math/big"
)
type EngineAPI struct {
}
func (e *EngineAPI) SignalSuperchainV1(ctx context.Context, recommended, required params.ProtocolVersion) (params.ProtocolVersion, error) {
//TODO implement me
panic("implement me")
}
func (e *EngineAPI) InfoByHash(ctx context.Context, hash common.Hash) (eth.BlockInfo, error) {
//TODO implement me
panic("implement me")
}
func (e *EngineAPI) GetProof(ctx context.Context, address common.Address, storage []common.Hash, blockTag string) (*eth.AccountResult, error) {
//TODO implement me
panic("implement me")
}
func (e *EngineAPI) BlockRefByNumber(ctx context.Context, num uint64) (eth.BlockRef, error) {
//TODO implement me
panic("implement me")
}
func (e *EngineAPI) FetchReceipts(ctx context.Context, blockHash common.Hash) (eth.BlockInfo, types.Receipts, error) {
//TODO implement me
panic("implement me")
}
func (e *EngineAPI) OutputV0AtBlock(ctx context.Context, blockHash common.Hash) (*eth.OutputV0, error) {
//TODO implement me
panic("implement me")
}
func (e *EngineAPI) ChainID(ctx context.Context) (*big.Int, error) {
//TODO implement me
panic("implement me")
}
func (e *EngineAPI) NewPayload(params exchain.PayloadParams) (exchain.ExecutionResult, error) {
//TODO implement me
panic("implement me")
}
func (e *EngineAPI) ProcessPayload(block *nebulav1.Block) error {
//TODO implement me
panic("implement me")
}
func (e *EngineAPI) PayloadByHash(ctx context.Context, hash common.Hash) (*eth.ExecutionPayloadEnvelope, error) {
//TODO implement me
panic("implement me")
}
func (e *EngineAPI) PayloadByNumber(ctx context.Context, u uint64) (*eth.ExecutionPayloadEnvelope, error) {
//TODO implement me
panic("implement me")
}
func (e *EngineAPI) L2BlockRefByLabel(ctx context.Context, label eth.BlockLabel) (eth.L2BlockRef, error) {
//TODO implement me
panic("implement me")
}
func (e *EngineAPI) L2BlockRefByHash(ctx context.Context, l2Hash common.Hash) (eth.L2BlockRef, error) {
//TODO implement me
panic("implement me")
}
func (e *EngineAPI) L2BlockRefByNumber(ctx context.Context, num uint64) (eth.L2BlockRef, error) {
//TODO implement me
panic("implement me")
}
func (e *EngineAPI) SystemConfigByL2Hash(ctx context.Context, hash common.Hash) (eth.SystemConfig, error) {
//TODO implement me
panic("implement me")
}
func (e *EngineAPI) Close() {
}
var (
_ p2p.L2Chain = (*EngineAPI)(nil)
_ sync.L2Chain = (*EngineAPI)(nil)
_ driver.L2Chain = (*EngineAPI)(nil)
)
func NewEngineAPI(database exchain.Database) *EngineAPI {
return &EngineAPI{}
}
package mockengine
import (
"github.com/exchain/go-exchain/exchain"
nebulav1 "github.com/exchain/go-exchain/exchain/protocol/gen/go/nebula/v1"
)
type MockEngine struct {
}
func (m MockEngine) Start() error {
//TODO implement me
panic("implement me")
}
func (m MockEngine) NewPayload(params exchain.PayloadParams) (exchain.ExecutionResult, error) {
//TODO implement me
panic("implement me")
}
func (m MockEngine) ProcessPayload(block *nebulav1.Block) error {
//TODO implement me
panic("implement me")
}
func NewEngine() exchain.Engine {
return &MockEngine{}
}
...@@ -579,8 +579,7 @@ func checkBeaconBlockRoot(ctx context.Context, env *actionEnv) error { ...@@ -579,8 +579,7 @@ func checkBeaconBlockRoot(ctx context.Context, env *actionEnv) error {
if err != nil { if err != nil {
return fmt.Errorf("failed to retrieve rollup config: %w", err) return fmt.Errorf("failed to retrieve rollup config: %w", err)
} }
l2RPC := client.NewBaseRPCClient(env.l2.Client()) l2EthCl, err := sources.NewL2Client(env.log, nil,
l2EthCl, err := sources.NewL2Client(l2RPC, env.log, nil,
sources.L2ClientDefaultConfig(rollupCfg, false)) sources.L2ClientDefaultConfig(rollupCfg, false))
if err != nil { if err != nil {
return fmt.Errorf("failed to create eth client") return fmt.Errorf("failed to create eth client")
......
external_*/shim
op-e2e/interop/jwt.secret
num_cores := $(shell nproc)
# Generally, JUNIT_FILE is set in CI but may be specified to an arbitrary file location to emulate CI locally
# If JUNIT_FILE is set, JSON_LOG_FILE should also be set
ifdef JUNIT_FILE
go_test = OP_TESTLOG_DISABLE_COLOR=true OP_E2E_DISABLE_PARALLEL=false gotestsum --format=testname --junitfile=$(JUNIT_FILE) --jsonfile=$(JSON_LOG_FILE) -- -failfast
# Note: -parallel must be set to match the number of cores in the resource class
go_test_flags = -timeout=60m -parallel=$(num_cores)
else
go_test = go test
go_test_flags = -v
endif
test: pre-test test-ws
.PHONY: test
test-external-%: pre-test
make -C ./external_$*/
$(go_test) $(go_test_flags) --externalL2 ./external_$*/
test-ws: pre-test
$(go_test) $(go_test_flags) ./system/... ./e2eutils/... ./opgeth/... ./interop/...
.PHONY: test-ws
test-actions: pre-test
$(go_test) $(go_test_flags) ./actions/...
.PHONY: test-actions
test-http: pre-test
OP_E2E_USE_HTTP=true $(go_test) $(go_test_flags) ./system/... ./e2eutils/... ./opgeth/... ./interop/...
.PHONY: test-http
test-cannon: pre-test
OP_E2E_CANNON_ENABLED=true $(go_test) $(go_test_flags) ./faultproofs
.PHONY: test-cannon
test-fault-proofs: pre-test
$(go_test) $(go_test_flags) ./faultproofs
.PHONY: test-faultproofs
cannon-prestates:
make -C .. cannon-prestate
make -C .. cannon-prestate-mt
.PHONY: cannon-prestate
pre-test: pre-test-cannon
.PHONY: pre-test
pre-test-cannon:
@if [ ! -e ../op-program/bin ]; then \
make cannon-prestates; \
fi
.PHONY: pre-test-cannon
clean:
rm -r ../.devnet
rm -r ../op-program/bin
.PHONY: clean
fuzz:
printf "%s\n" \
"go test -run NOTAREALTEST -tags cgo_test -v -fuzztime 10s -fuzz FuzzFjordCostFunction ./opgeth" \
"go test -run NOTAREALTEST -tags cgo_test -v -fuzztime 10s -fuzz FuzzFastLzGethSolidity ./opgeth" \
"go test -run NOTAREALTEST -tags cgo_test -v -fuzztime 10s -fuzz FuzzFastLzCgo ./opgeth" \
| parallel -j 8 {}
# `op-e2e`
Issues: [monorepo](https://github.com/ethereum-optimism/optimism/issues?q=is%3Aissue%20state%3Aopen%20label%3AA-op-e2e)
Pull requests: [monorepo](https://github.com/ethereum-optimism/optimism/pulls?q=is%3Aopen+is%3Apr+label%3AA-op-e2e)
Design docs:
- [test infra draft design-doc]: active discussion of end-to-end testing approach
[test infra draft design-doc](https://github.com/ethereum-optimism/design-docs/pull/165)
`op-e2e` is a collection of Go integration tests.
It is named `e2e` after end-to-end testing,
for those tests where we integration-test the full system, rather than only specific services.
## Quickstart
```bash
make test-actions
make test-ws
```
## Overview
`op-e2e` can be categorized as following:
- `op-e2e/actions/`: imperative test style, more DSL-like, with a focus on the state-transition parts of services.
Parallel processing is actively avoided, and a mock clock is used.
- `op-e2e/actions/*`: sub-packages categorize specific domains to test.
- `op-e2e/actions/interop`: notable sub-package, where multiple L2s are attached together,
for integration-testing across multiple L2 chains.
- `op-e2e/actions/proofs`: notable sub-package, where proof-related state-transition testing is implemented,
with experimental support to cover alternative proof implementations.
- `op-e2e/system`: integration tests with a L1 miner and a L2 with sequencer, verifier, batcher and proposer.
These tests do run each service almost fully, including parallel background jobs and real system clock.
These tests focus less on the onchain state-transition aspects, and more on the offchain integration aspects.
- `op-e2e/faultproofs`: system tests with fault-proofs stack attached
- `op-e2e/interop`: system tests with a distinct Interop "SuperSystem", to run multiple L2 chains.
- `op-e2e/opgeth`: integration tests between test-mocks and op-geth execution-engine.
- also includes upgrade-tests to ensure testing of op-stack Go components around a network upgrade.
### `action`-tests
Action tests are set up in a compositional way:
each service is instantiated as actor, and tests can choose to run just the relevant set of actors.
E.g. a test about data-availability can instantiate the batcher, but omit the proposer.
One action, across all services, runs at a time.
No live background processing or system clock affects the actors:
this enables individual actions to be deterministic and reproducible.
With this synchronous processing, action-test can reliably navigate towards
these otherwise hard-to-reach edge-cases, and ensure the state-transition of service,
and the interactions between this state, are covered.
Action-tests do not cover background processes or peripherals.
E.g. P2P, CLI usage, and dynamic block building are not covered.
### `system`-tests
System tests are more complete than `action` tests, but also require a live system.
This trade-off enables coverage of most of each Go service,
at the cost of making navigation to cover the known edge-cases less reliable and reproducible.
This test-type is thus used primarily for testing of the offchain service aspects.
By running a more full system, test-runners also run into resource-limits more quickly.
This may result in lag or even stalled services.
Improvements, as described in the [test infra draft design-doc],
are in active development, to make test execution more reliable.
### `op-e2e/opgeth`
Integration-testing with op-geth, to cover engine behavior, without setting up a full test environment.
These tests are limited in scope, and may be changed at a later stage, to support alternative EL implementations.
## Product
### Optimization target
Historically `op-e2e` has been optimized for test-coverage of the Go OP-Stack.
This is changing with the advance of alternative OP-Stack client implementations.
New test framework improvements should optimize for multi-client testing.
### Vision
Generally, design-discussion and feedback from active test users converges on:
- a need to share test-resources, to host more tests while reducing overhead.
- a need for a DSL, to better express common test constructs.
- less involved test pre-requisites: the environment should be light and simple, welcoming new contributors.
E.g. no undocumented one-off makefile prerequisites.
## Design principles
- Interfaces first. We should not hardcode test-utilities against any specific client implementation,
this makes a test less parameterizable and less cross-client portable.
- Abstract setup to make it the default to reduce resource usage.
E.g. RPC transports can run in-process, and avoid websocket or HTTP costs,
and ideally the test-writer does not have to think about the difference.
- Avoid one-off test chain-configurations. Tests with more realistic parameters are more comparable to production,
and easier consolidated onto shared testing resources.
- Write helpers and DSL utilities, avoid re-implementing common testing steps.
The better the test environment, the more inviting it is for someone new to help improve test coverage.
- Use the right test-type. Do not spawn a full system for something of very limited scope,
e.g. when it fits better in a unit-test.
This diff is collapsed.
package batcher
import (
"testing"
"github.com/exchain/go-exchain/op-e2e/actions/helpers"
"github.com/stretchr/testify/require"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
batcherFlags "github.com/exchain/go-exchain/op-batcher/flags"
"github.com/exchain/go-exchain/op-e2e/e2eutils"
"github.com/exchain/go-exchain/op-node/rollup/sync"
"github.com/exchain/go-exchain/op-service/eth"
"github.com/exchain/go-exchain/op-service/testlog"
)
func setupEIP4844Test(t helpers.Testing, log log.Logger) (*e2eutils.SetupData, *e2eutils.DeployParams, *helpers.L1Miner, *helpers.L2Sequencer, *helpers.L2Engine, *helpers.L2Verifier, *helpers.L2Engine) {
dp := e2eutils.MakeDeployParams(t, helpers.DefaultRollupTestParams())
genesisActivation := hexutil.Uint64(0)
dp.DeployConfig.L1CancunTimeOffset = &genesisActivation
dp.DeployConfig.L2GenesisCanyonTimeOffset = &genesisActivation
dp.DeployConfig.L2GenesisDeltaTimeOffset = &genesisActivation
dp.DeployConfig.L2GenesisEcotoneTimeOffset = &genesisActivation
sd := e2eutils.Setup(t, dp, helpers.DefaultAlloc)
miner, seqEngine, sequencer := helpers.SetupSequencerTest(t, sd, log)
miner.ActL1SetFeeRecipient(common.Address{'A'})
sequencer.ActL2PipelineFull(t)
verifEngine, verifier := helpers.SetupVerifier(t, sd, log, miner.L1Client(t, sd.RollupCfg), miner.BlobStore(), &sync.Config{})
return sd, dp, miner, sequencer, seqEngine, verifier, verifEngine
}
func setupBatcher(t helpers.Testing, log log.Logger, sd *e2eutils.SetupData, dp *e2eutils.DeployParams, miner *helpers.L1Miner,
sequencer *helpers.L2Sequencer, engine *helpers.L2Engine, daType batcherFlags.DataAvailabilityType,
) *helpers.L2Batcher {
return helpers.NewL2Batcher(log, sd.RollupCfg, &helpers.BatcherCfg{
MinL1TxSize: 0,
MaxL1TxSize: 128_000,
BatcherKey: dp.Secrets.Batcher,
DataAvailabilityType: daType,
}, sequencer.RollupClient(), miner.EthClient(), engine.EthClient(), engine.EngineClient(t, sd.RollupCfg))
}
func TestEIP4844DataAvailability(gt *testing.T) {
t := helpers.NewDefaultTesting(gt)
log := testlog.Logger(t, log.LevelDebug)
sd, dp, miner, sequencer, seqEngine, verifier, _ := setupEIP4844Test(t, log)
batcher := setupBatcher(t, log, sd, dp, miner, sequencer, seqEngine, batcherFlags.BlobsType)
sequencer.ActL2PipelineFull(t)
verifier.ActL2PipelineFull(t)
// build empty L1 block
miner.ActEmptyBlock(t)
// finalize it, so the L1 geth blob pool doesn't log errors about missing finality
miner.ActL1SafeNext(t)
miner.ActL1FinalizeNext(t)
// Create L2 blocks, and reference the L1 head as origin
sequencer.ActL1HeadSignal(t)
sequencer.ActBuildToL1Head(t)
// submit all new L2 blocks
batcher.ActSubmitAll(t)
batchTx := batcher.LastSubmitted
require.Equal(t, uint8(types.BlobTxType), batchTx.Type(), "batch tx must be blob-tx")
// new L1 block with L2 batch
miner.ActL1StartBlock(12)(t)
miner.ActL1IncludeTxByHash(batchTx.Hash())(t)
miner.ActL1EndBlock(t)
// verifier picks up the L2 chain that was submitted
verifier.ActL1HeadSignal(t)
verifier.ActL2PipelineFull(t)
require.Equal(t, verifier.L2Safe(), sequencer.L2Unsafe(), "verifier syncs from sequencer via L1")
require.NotEqual(t, sequencer.L2Safe(), sequencer.L2Unsafe(), "sequencer has not processed L1 yet")
}
func TestEIP4844MultiBlobs(gt *testing.T) {
t := helpers.NewDefaultTesting(gt)
log := testlog.Logger(t, log.LevelDebug)
sd, dp, miner, sequencer, seqEngine, verifier, _ := setupEIP4844Test(t, log)
batcher := setupBatcher(t, log, sd, dp, miner, sequencer, seqEngine, batcherFlags.BlobsType)
sequencer.ActL2PipelineFull(t)
verifier.ActL2PipelineFull(t)
// build empty L1 block
miner.ActEmptyBlock(t)
// finalize it, so the L1 geth blob pool doesn't log errors about missing finality
miner.ActL1SafeNext(t)
miner.ActL1FinalizeNext(t)
// Create L2 blocks, and reference the L1 head as origin
sequencer.ActL1HeadSignal(t)
sequencer.ActBuildToL1Head(t)
// submit all new L2 blocks
batcher.ActSubmitAllMultiBlobs(t, eth.MaxBlobsPerBlobTx)
batchTx := batcher.LastSubmitted
require.Equal(t, uint8(types.BlobTxType), batchTx.Type(), "batch tx must be blob-tx")
require.Len(t, batchTx.BlobTxSidecar().Blobs, eth.MaxBlobsPerBlobTx)
// new L1 block with L2 batch
miner.ActL1StartBlock(12)(t)
miner.ActL1IncludeTxByHash(batchTx.Hash())(t)
miner.ActL1EndBlock(t)
// verifier picks up the L2 chain that was submitted
verifier.ActL1HeadSignal(t)
verifier.ActL2PipelineFull(t)
require.Equal(t, verifier.L2Safe(), sequencer.L2Unsafe(), "verifier syncs from sequencer via L1")
require.NotEqual(t, sequencer.L2Safe(), sequencer.L2Unsafe(), "sequencer has not processed L1 yet")
}
func TestEIP4844DataAvailabilitySwitch(gt *testing.T) {
t := helpers.NewDefaultTesting(gt)
log := testlog.Logger(t, log.LevelDebug)
sd, dp, miner, sequencer, seqEngine, verifier, _ := setupEIP4844Test(t, log)
oldBatcher := setupBatcher(t, log, sd, dp, miner, sequencer, seqEngine, batcherFlags.CalldataType)
sequencer.ActL2PipelineFull(t)
verifier.ActL2PipelineFull(t)
// build empty L1 block
miner.ActEmptyBlock(t)
// finalize it, so the L1 geth blob pool doesn't log errors about missing finality
miner.ActL1SafeNext(t)
miner.ActL1FinalizeNext(t)
// Create L2 blocks, and reference the L1 head as origin
sequencer.ActL1HeadSignal(t)
sequencer.ActBuildToL1Head(t)
// submit all new L2 blocks, with legacy calldata DA
oldBatcher.ActSubmitAll(t)
batchTx := oldBatcher.LastSubmitted
require.Equal(t, uint8(types.DynamicFeeTxType), batchTx.Type(), "batch tx must be eip1559 tx")
// new L1 block with L2 batch
miner.ActL1StartBlock(12)(t)
miner.ActL1IncludeTxByHash(batchTx.Hash())(t)
miner.ActL1EndBlock(t)
// verifier picks up the L2 chain that was submitted
verifier.ActL1HeadSignal(t)
verifier.ActL2PipelineFull(t)
require.Equal(t, verifier.L2Safe(), sequencer.L2Unsafe(), "verifier syncs from sequencer via L1")
require.NotEqual(t, sequencer.L2Safe(), sequencer.L2Unsafe(), "sequencer has not processed L1 yet")
newBatcher := setupBatcher(t, log, sd, dp, miner, sequencer, seqEngine, batcherFlags.BlobsType)
// build empty L1 block
miner.ActEmptyBlock(t)
// Create L2 blocks, and reference the L1 head as origin
sequencer.ActL1HeadSignal(t)
sequencer.ActBuildToL1Head(t)
// submit all new L2 blocks, now with Blobs DA!
newBatcher.ActSubmitAll(t)
batchTx = newBatcher.LastSubmitted
// new L1 block with L2 batch
miner.ActL1StartBlock(12)(t)
miner.ActL1IncludeTxByHash(batchTx.Hash())(t)
miner.ActL1EndBlock(t)
require.Equal(t, uint8(types.BlobTxType), batchTx.Type(), "batch tx must be blob-tx")
// verifier picks up the L2 chain that was submitted
verifier.ActL1HeadSignal(t)
verifier.ActL2PipelineFull(t)
require.Equal(t, verifier.L2Safe(), sequencer.L2Unsafe(), "verifier syncs from sequencer via L1")
require.NotEqual(t, sequencer.L2Safe(), sequencer.L2Unsafe(), "sequencer has not processed L1 yet")
}
This diff is collapsed.
package derivation
import (
"testing"
"github.com/exchain/go-exchain/op-e2e/config"
altda "github.com/exchain/go-exchain/op-alt-da"
batcherFlags "github.com/exchain/go-exchain/op-batcher/flags"
"github.com/exchain/go-exchain/op-e2e/actions/helpers"
upgradesHelpers "github.com/exchain/go-exchain/op-e2e/actions/upgrades/helpers"
"github.com/exchain/go-exchain/op-e2e/e2eutils"
"github.com/exchain/go-exchain/op-node/node/safedb"
"github.com/exchain/go-exchain/op-node/rollup/derive"
"github.com/exchain/go-exchain/op-node/rollup/sync"
"github.com/exchain/go-exchain/op-service/sources"
"github.com/exchain/go-exchain/op-service/testlog"
"github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require"
)
// TestDeriveChainFromNearL1Genesis tests a corner case where when the derivation pipeline starts, the
// safe head has an L1 origin of block 1. The derivation then starts with pipeline origin of L1 genesis,
// just one block prior to the origin of the safe head.
// This is a regression test, previously the pipeline encountered got stuck in a reset loop with the error:
// buffered L1 chain epoch %s in batch queue does not match safe head origin %s
func TestDeriveChainFromNearL1Genesis(gt *testing.T) {
t := helpers.NewDefaultTesting(gt)
p := &e2eutils.TestParams{
MaxSequencerDrift: 20, // larger than L1 block time we simulate in this test (12)
SequencerWindowSize: 24,
ChannelTimeout: 20,
L1BlockTime: 12,
AllocType: config.AllocTypeStandard,
}
dp := e2eutils.MakeDeployParams(t, p)
// do not activate Delta hardfork for verifier
upgradesHelpers.ApplyDeltaTimeOffset(dp, nil)
sd := e2eutils.Setup(t, dp, helpers.DefaultAlloc)
logger := testlog.Logger(t, log.LevelInfo)
miner, seqEngine, sequencer := helpers.SetupSequencerTest(t, sd, logger)
miner.ActEmptyBlock(t)
require.EqualValues(gt, 1, miner.L1Chain().CurrentBlock().Number.Uint64())
ref, err := derive.L2BlockToBlockRef(sequencer.RollupCfg, seqEngine.L2Chain().Genesis())
require.NoError(gt, err)
require.EqualValues(gt, 0, ref.L1Origin.Number)
sequencer.ActL1HeadSignal(t)
sequencer.ActBuildToL1Head(t)
l2BlockNum := seqEngine.L2Chain().CurrentBlock().Number.Uint64()
ref, err = derive.L2BlockToBlockRef(sequencer.RollupCfg, seqEngine.L2Chain().GetBlockByNumber(l2BlockNum))
require.NoError(gt, err)
require.EqualValues(gt, 1, ref.L1Origin.Number)
miner.ActEmptyBlock(t)
rollupSeqCl := sequencer.RollupClient()
// Force batcher to submit SingularBatches to L1.
batcher := helpers.NewL2Batcher(logger, sd.RollupCfg, &helpers.BatcherCfg{
MinL1TxSize: 0,
MaxL1TxSize: 128_000,
BatcherKey: dp.Secrets.Batcher,
DataAvailabilityType: batcherFlags.CalldataType,
}, rollupSeqCl, miner.EthClient(), seqEngine.EthClient(), seqEngine.EngineClient(t, sd.RollupCfg))
batcher.ActSubmitAll(t)
require.EqualValues(gt, l2BlockNum, batcher.L2BufferedBlock.Number)
// confirm batch on L1
miner.ActL1StartBlock(12)(t)
miner.ActL1IncludeTx(dp.Addresses.Batcher)(t)
miner.ActL1EndBlock(t)
bl := miner.L1Chain().CurrentBlock()
logger.Info("Produced L1 block with batch",
"num", miner.L1Chain().CurrentBlock().Number.Uint64(),
"txs", len(miner.L1Chain().GetBlockByHash(bl.Hash()).Transactions()))
// Process batches so safe head updates
sequencer.ActL1HeadSignal(t)
sequencer.ActL2PipelineFull(t)
require.EqualValues(gt, l2BlockNum, seqEngine.L2Chain().CurrentSafeBlock().Number.Uint64())
// Finalize L1 and process so L2 finalized updates
miner.ActL1Safe(t, miner.L1Chain().CurrentBlock().Number.Uint64())
miner.ActL1Finalize(t, miner.L1Chain().CurrentBlock().Number.Uint64())
sequencer.ActL1SafeSignal(t)
sequencer.ActL1FinalizedSignal(t)
sequencer.ActL2PipelineFull(t)
require.EqualValues(gt, l2BlockNum, seqEngine.L2Chain().CurrentFinalBlock().Number.Uint64())
// Create a new verifier using the existing engine so it already has the safe and finalized heads set.
// This is the same situation as if op-node restarted at this point.
l2Cl, err := sources.NewEngineClient(seqEngine.RPCClient(), logger, nil, sources.EngineClientDefaultConfig(sd.RollupCfg))
require.NoError(gt, err)
verifier := helpers.NewL2Verifier(t, logger, miner.L1Client(t, sd.RollupCfg), miner.BlobStore(), altda.Disabled,
l2Cl, sequencer.RollupCfg, &sync.Config{}, safedb.Disabled)
verifier.ActL2PipelineFull(t) // Should not get stuck in a reset loop forever
require.EqualValues(gt, l2BlockNum, seqEngine.L2Chain().CurrentSafeBlock().Number.Uint64())
require.EqualValues(gt, l2BlockNum, seqEngine.L2Chain().CurrentFinalBlock().Number.Uint64())
syncStatus := verifier.SyncStatus()
require.EqualValues(gt, l2BlockNum, syncStatus.SafeL2.Number)
require.EqualValues(gt, l2BlockNum, syncStatus.FinalizedL2.Number)
}
This diff is collapsed.
package derivation
import (
"testing"
"github.com/exchain/go-exchain/op-e2e/config"
"github.com/exchain/go-exchain/op-e2e/actions/helpers"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require"
"github.com/exchain/go-exchain/op-e2e/e2eutils"
"github.com/exchain/go-exchain/op-service/testlog"
)
func TestL2Verifier_SequenceWindow(gt *testing.T) {
t := helpers.NewDefaultTesting(gt)
p := &e2eutils.TestParams{
MaxSequencerDrift: 10,
SequencerWindowSize: 24,
ChannelTimeout: 10,
L1BlockTime: 15,
AllocType: config.AllocTypeStandard,
}
dp := e2eutils.MakeDeployParams(t, p)
sd := e2eutils.Setup(t, dp, helpers.DefaultAlloc)
log := testlog.Logger(t, log.LevelDebug)
miner, engine, verifier := helpers.SetupVerifierOnlyTest(t, sd, log)
miner.ActL1SetFeeRecipient(common.Address{'A'})
// Make two sequence windows worth of empty L1 blocks. After we pass the first sequence window, the L2 chain should get blocks
for miner.L1Chain().CurrentBlock().Number.Uint64() < sd.RollupCfg.SeqWindowSize*2 {
miner.ActL1StartBlock(10)(t)
miner.ActL1EndBlock(t)
verifier.ActL2PipelineFull(t)
l1Head := miner.L1Chain().CurrentBlock().Number.Uint64()
expectedL1Origin := uint64(0)
// as soon as we complete the sequence window, we force-adopt the L1 origin
if l1Head >= sd.RollupCfg.SeqWindowSize {
expectedL1Origin = l1Head - sd.RollupCfg.SeqWindowSize
}
require.Equal(t, expectedL1Origin, verifier.SyncStatus().SafeL2.L1Origin.Number, "L1 origin is forced in, given enough L1 blocks pass by")
require.LessOrEqual(t, miner.L1Chain().GetBlockByNumber(expectedL1Origin).Time(), engine.L2Chain().CurrentBlock().Time, "L2 time higher than L1 origin time")
}
tip2N := verifier.SyncStatus()
// Do a deep L1 reorg as deep as a sequence window, this should affect the safe L2 chain
miner.ActL1RewindDepth(sd.RollupCfg.SeqWindowSize)(t)
// Without new L1 block, the L1 appears to not be synced, and the node shouldn't reorg
verifier.ActL2PipelineFull(t)
require.Equal(t, tip2N.SafeL2, verifier.SyncStatus().SafeL2, "still the same after verifier work")
// Make a new empty L1 block with different data than there was before.
miner.ActL1SetFeeRecipient(common.Address{'B'})
miner.ActL1StartBlock(10)(t)
miner.ActL1EndBlock(t)
reorgL1Block := miner.L1Chain().CurrentBlock()
// Still no reorg, we need more L1 blocks first, before the reorged L1 block is forced in by sequence window
verifier.ActL2PipelineFull(t)
require.Equal(t, tip2N.SafeL2, verifier.SyncStatus().SafeL2)
for miner.L1Chain().CurrentBlock().Number.Uint64() < sd.RollupCfg.SeqWindowSize*2 {
miner.ActL1StartBlock(10)(t)
miner.ActL1EndBlock(t)
}
// workaround: in L1Traversal we only recognize the reorg once we see origin N+1, we don't reorg to shorter L1 chains
miner.ActL1StartBlock(10)(t)
miner.ActL1EndBlock(t)
// Now it will reorg
verifier.ActL2PipelineFull(t)
got := miner.L1Chain().GetBlockByHash(miner.L1Chain().GetBlockByHash(verifier.SyncStatus().SafeL2.L1Origin.Hash).Hash())
require.Equal(t, reorgL1Block.Hash(), got.Hash(), "must have reorged L2 chain to the new L1 chain")
}
This diff is collapsed.
This diff is collapsed.
package helpers
import (
"context"
op_e2e "github.com/exchain/go-exchain/op-e2e"
"github.com/exchain/go-exchain/op-e2e/e2eutils"
)
// Testing is an interface to Go-like testing,
// extended with a context getter for the test runner to shut down individual actions without interrupting the test,
// and a signaling function for when an invalid action is hit.
// This helps custom test runners navigate slow or invalid actions, e.g. during fuzzing.
type Testing interface {
e2eutils.TestingBase
// Ctx shares a context to execute an action with, the test runner may interrupt the action without stopping the test.
Ctx() context.Context
// InvalidAction indicates the failure is due to action incompatibility, does not stop the test.
InvalidAction(format string, args ...any)
}
// Action is a function that may change the state of one or more actors or check their state.
// Action definitions are meant to be very small building blocks,
// and then composed into larger patterns to write more elaborate tests.
type Action func(t Testing)
// ActionStatus defines the state of an action, to make a basic distinction between InvalidAction() and other calls.
type ActionStatus uint
const (
// ActionOK indicates the action is valid to apply
ActionOK ActionStatus = iota
// ActionInvalid indicates the action is not applicable, and a different next action may taken.
ActionInvalid
// More action status types may be used to indicate e.g. required rewinds,
// simple skips, or special cases for fuzzing.
)
// defaultTesting is a simple implementation of Testing that takes standard Go testing framework,
// and handles invalid actions as errors, and exposes a Reset function to change the context and action state,
// to recover after an invalid action or cancelled context.
type defaultTesting struct {
e2eutils.TestingBase
ctx context.Context
state ActionStatus
}
type StatefulTesting interface {
Testing
Reset(actionCtx context.Context)
State() ActionStatus
}
// NewDefaultTesting returns a new testing obj, and enables parallel test execution.
// Returns an interface, we're likely changing the behavior here as we build more action tests.
func NewDefaultTesting(tb e2eutils.TestingBase) StatefulTesting {
op_e2e.InitParallel(tb)
return &defaultTesting{
TestingBase: tb,
ctx: context.Background(),
state: ActionOK,
}
}
// Ctx shares a context to execute an action with, the test runner may interrupt the action without stopping the test.
func (st *defaultTesting) Ctx() context.Context {
return st.ctx
}
// InvalidAction indicates the failure is due to action incompatibility, does not stop the test.
// The format and args behave the same as fmt.Sprintf, testing.T.Errorf, etc.
func (st *defaultTesting) InvalidAction(format string, args ...any) {
st.TestingBase.Helper() // report the error on the call-site to make debugging clear, not here.
st.Errorf("invalid action err: "+format, args...)
st.state = ActionInvalid
}
// Reset prepares the testing util for the next action, changing the context and state back to OK.
func (st *defaultTesting) Reset(actionCtx context.Context) {
st.state = ActionOK
st.ctx = actionCtx
}
// State shares the current action state.
func (st *defaultTesting) State() ActionStatus {
return st.state
}
var _ Testing = (*defaultTesting)(nil)
package helpers
import (
"math/rand"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/exchain/go-exchain/op-chain-ops/genesis"
e2ecfg "github.com/exchain/go-exchain/op-e2e/config"
"github.com/exchain/go-exchain/op-e2e/e2eutils"
"github.com/exchain/go-exchain/op-node/rollup"
"github.com/exchain/go-exchain/op-node/rollup/sync"
"github.com/exchain/go-exchain/op-service/testlog"
)
type Env struct {
Log log.Logger
Logs *testlog.CapturingHandler
DeployParams *e2eutils.DeployParams
SetupData *e2eutils.SetupData
Miner *L1Miner
Seq *L2Sequencer
SeqEngine *L2Engine
Verifier *L2Verifier
VerifEngine *L2Engine
Batcher *L2Batcher
Alice *CrossLayerUser
AddressCorpora []common.Address
}
type EnvOpt struct {
DeployConfigMod func(*genesis.DeployConfig)
}
func WithActiveFork(fork rollup.ForkName, offset uint64) EnvOpt {
return EnvOpt{
DeployConfigMod: func(d *genesis.DeployConfig) {
d.ActivateForkAtOffset(fork, offset)
},
}
}
func WithActiveGenesisFork(fork rollup.ForkName) EnvOpt {
return WithActiveFork(fork, 0)
}
// DefaultFork specifies the default fork to use when setting up the action test environment.
// Currently manually set to Holocene.
// Replace with `var DefaultFork = func() rollup.ForkName { return rollup.AllForks[len(rollup.AllForks)-1] }()` after Interop launch.
const DefaultFork = rollup.Holocene
// SetupEnv sets up a default action test environment. If no fork is specified, the default fork as
// specified by the package variable [defaultFork] is used.
func SetupEnv(t Testing, opts ...EnvOpt) (env Env) {
dp := e2eutils.MakeDeployParams(t, DefaultRollupTestParams())
env.DeployParams = dp
log, logs := testlog.CaptureLogger(t, log.LevelDebug)
env.Log, env.Logs = log, logs
dp.DeployConfig.ActivateForkAtGenesis(DefaultFork)
for _, opt := range opts {
if dcMod := opt.DeployConfigMod; dcMod != nil {
dcMod(dp.DeployConfig)
}
}
sd := e2eutils.Setup(t, dp, DefaultAlloc)
env.SetupData = sd
env.AddressCorpora = e2eutils.CollectAddresses(sd, dp)
env.Miner, env.SeqEngine, env.Seq = SetupSequencerTest(t, sd, log)
env.Miner.ActL1SetFeeRecipient(common.Address{'A'})
env.VerifEngine, env.Verifier = SetupVerifier(t, sd, log, env.Miner.L1Client(t, sd.RollupCfg), env.Miner.BlobStore(), &sync.Config{})
rollupSeqCl := env.Seq.RollupClient()
env.Batcher = NewL2Batcher(log, sd.RollupCfg, DefaultBatcherCfg(dp),
rollupSeqCl, env.Miner.EthClient(), env.SeqEngine.EthClient(), env.SeqEngine.EngineClient(t, sd.RollupCfg))
alice := NewCrossLayerUser(log, dp.Secrets.Alice, rand.New(rand.NewSource(0xa57b)), e2ecfg.AllocTypeStandard)
alice.L1.SetUserEnv(env.L1UserEnv(t))
alice.L2.SetUserEnv(env.L2UserEnv(t))
env.Alice = alice
return
}
func (env Env) L1UserEnv(t Testing) *BasicUserEnv[*L1Bindings] {
l1EthCl := env.Miner.EthClient()
return &BasicUserEnv[*L1Bindings]{
EthCl: l1EthCl,
Signer: types.LatestSigner(env.SetupData.L1Cfg.Config),
AddressCorpora: env.AddressCorpora,
Bindings: NewL1Bindings(t, l1EthCl, e2ecfg.AllocTypeStandard),
}
}
func (env Env) L2UserEnv(t Testing) *BasicUserEnv[*L2Bindings] {
l2EthCl := env.SeqEngine.EthClient()
return &BasicUserEnv[*L2Bindings]{
EthCl: l2EthCl,
Signer: types.LatestSigner(env.SetupData.L2Cfg.Config),
AddressCorpora: env.AddressCorpora,
Bindings: NewL2Bindings(t, l2EthCl, env.SeqEngine.GethClient()),
}
}
func (env Env) ActBatchSubmitAllAndMine(t Testing) (l1InclusionBlock *types.Block) {
env.Batcher.ActSubmitAll(t)
batchTx := env.Batcher.LastSubmitted
env.Miner.ActL1StartBlock(12)(t)
env.Miner.ActL1IncludeTxByHash(batchTx.Hash())(t)
return env.Miner.ActL1EndBlock(t)
}
package helpers
import (
"bytes"
"compress/gzip"
"compress/zlib"
"crypto/rand"
"errors"
"fmt"
"io"
"github.com/exchain/go-exchain/op-node/rollup"
"github.com/exchain/go-exchain/op-node/rollup/derive"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
)
type GarbageKind int64
const (
STRIP_VERSION GarbageKind = iota
RANDOM
TRUNCATE_END
DIRTY_APPEND
INVALID_COMPRESSION
MALFORM_RLP
)
var GarbageKinds = []GarbageKind{
STRIP_VERSION,
RANDOM,
TRUNCATE_END,
DIRTY_APPEND,
INVALID_COMPRESSION,
MALFORM_RLP,
}
func (gk GarbageKind) String() string {
switch gk {
case STRIP_VERSION:
return "STRIP_VERSION"
case RANDOM:
return "RANDOM"
case TRUNCATE_END:
return "TRUNCATE_END"
case DIRTY_APPEND:
return "DIRTY_APPEND"
case INVALID_COMPRESSION:
return "INVALID_COMPRESSION"
case MALFORM_RLP:
return "MALFORM_RLP"
default:
return "UNKNOWN"
}
}
// GarbageChannelCfg is the configuration for a `GarbageChannelOut`
type GarbageChannelCfg struct {
UseInvalidCompression bool
MalformRLP bool
}
// Writer is the interface shared between `zlib.Writer` and `gzip.Writer`
type Writer interface {
Close() error
Flush() error
Reset(io.Writer)
Write([]byte) (int, error)
}
// ChannelOutIface is the interface implemented by ChannelOut & GarbageChannelOut
type ChannelOutIface interface {
ID() derive.ChannelID
Reset() error
AddBlock(rollupCfg *rollup.Config, block *types.Block) (*derive.L1BlockInfo, error)
ReadyBytes() int
Flush() error
Close() error
OutputFrame(w *bytes.Buffer, maxSize uint64) (uint16, error)
}
// Compile-time check for ChannelOutIface interface implementation for the SingularChannelOut type.
var _ ChannelOutIface = (*derive.SingularChannelOut)(nil)
// Compile-time check for ChannelOutIface interface implementation for the SpanChannelOut type.
var _ ChannelOutIface = (*derive.SpanChannelOut)(nil)
// Compile-time check for ChannelOutIface interface implementation for the GarbageChannelOut type.
var _ ChannelOutIface = (*GarbageChannelOut)(nil)
// GarbageChannelOut is a modified `derive.ChannelOut` that can be configured to behave differently
// than the original
type GarbageChannelOut struct {
id derive.ChannelID
// Frame ID of the next frame to emit. Increment after emitting
frame uint64
// rlpLength is the uncompressed size of the channel. Must be less than MAX_RLP_BYTES_PER_CHANNEL
rlpLength int
// Compressor stage. Write input data to it
compress Writer
// post compression buffer
buf bytes.Buffer
closed bool
// Garbage channel configuration
cfg *GarbageChannelCfg
}
func (co *GarbageChannelOut) ID() derive.ChannelID {
return co.id
}
// NewGarbageChannelOut creates a new `GarbageChannelOut` with the given configuration.
func NewGarbageChannelOut(cfg *GarbageChannelCfg) (*GarbageChannelOut, error) {
c := &GarbageChannelOut{
id: derive.ChannelID{}, // TODO: use GUID here instead of fully random data
frame: 0,
rlpLength: 0,
cfg: cfg,
}
_, err := rand.Read(c.id[:])
if err != nil {
return nil, err
}
// Optionally use zlib or gzip compression
var compress Writer
if cfg.UseInvalidCompression {
compress, err = gzip.NewWriterLevel(&c.buf, gzip.BestCompression)
} else {
compress, err = zlib.NewWriterLevel(&c.buf, zlib.BestCompression)
}
if err != nil {
return nil, err
}
c.compress = compress
return c, nil
}
// TODO: reuse ChannelOut for performance
func (co *GarbageChannelOut) Reset() error {
co.frame = 0
co.rlpLength = 0
co.buf.Reset()
co.compress.Reset(&co.buf)
co.closed = false
_, err := rand.Read(co.id[:])
return err
}
// AddBlock adds a block to the channel. It returns an error
// if there is a problem adding the block. The only sentinel
// error that it returns is ErrTooManyRLPBytes. If this error
// is returned, the channel should be closed and a new one
// should be made.
func (co *GarbageChannelOut) AddBlock(rollupCfg *rollup.Config, block *types.Block) (*derive.L1BlockInfo, error) {
if co.closed {
return nil, errors.New("already closed")
}
batch, l1Info, err := blockToBatch(rollupCfg, block)
if err != nil {
return nil, err
}
// We encode to a temporary buffer to determine the encoded length to
// ensure that the total size of all RLP elements is less than or equal to MAX_RLP_BYTES_PER_CHANNEL
var buf bytes.Buffer
if err := rlp.Encode(&buf, batch); err != nil {
return nil, err
}
if co.cfg.MalformRLP {
// Malform the RLP by incrementing the length prefix by 1.
bufBytes := buf.Bytes()
bufBytes[0] += 1
buf.Reset()
buf.Write(bufBytes)
}
chainSpec := rollup.NewChainSpec(rollupCfg)
maxRLPBytesPerChannel := chainSpec.MaxRLPBytesPerChannel(block.Time())
if co.rlpLength+buf.Len() > int(maxRLPBytesPerChannel) {
return nil, fmt.Errorf("could not add %d bytes to channel of %d bytes, max is %d. err: %w",
buf.Len(), co.rlpLength, maxRLPBytesPerChannel, derive.ErrTooManyRLPBytes)
}
co.rlpLength += buf.Len()
_, err = io.Copy(co.compress, &buf)
return l1Info, err
}
// ReadyBytes returns the number of bytes that the channel out can immediately output into a frame.
// Use `Flush` or `Close` to move data from the compression buffer into the ready buffer if more bytes
// are needed. Add blocks may add to the ready buffer, but it is not guaranteed due to the compression stage.
func (co *GarbageChannelOut) ReadyBytes() int {
return co.buf.Len()
}
// Flush flushes the internal compression stage to the ready buffer. It enables pulling a larger & more
// complete frame. It reduces the compression efficiency.
func (co *GarbageChannelOut) Flush() error {
return co.compress.Flush()
}
func (co *GarbageChannelOut) Close() error {
if co.closed {
return errors.New("already closed")
}
co.closed = true
return co.compress.Close()
}
// OutputFrame writes a frame to w with a given max size
// Use `ReadyBytes`, `Flush`, and `Close` to modify the ready buffer.
// Returns io.EOF when the channel is closed & there are no more frames
// Returns nil if there is still more buffered data.
// Returns and error if it ran into an error during processing.
func (co *GarbageChannelOut) OutputFrame(w *bytes.Buffer, maxSize uint64) (uint16, error) {
f := derive.Frame{
ID: co.id,
FrameNumber: uint16(co.frame),
}
fn := f.FrameNumber
// Copy data from the local buffer into the frame data buffer
// Don't go past the maxSize with the fixed frame overhead.
// Fixed overhead: 32 + 8 + 2 + 4 + 1 = 47 bytes.
// Add one extra byte for the version byte (for the entire L1 tx though)
maxDataSize := maxSize - 47 - 1
if maxDataSize >= uint64(co.buf.Len()) {
maxDataSize = uint64(co.buf.Len())
// If we are closed & will not spill past the current frame
// mark it as the final frame of the channel.
if co.closed {
f.IsLast = true
}
}
f.Data = make([]byte, maxDataSize)
if _, err := io.ReadFull(&co.buf, f.Data); err != nil {
return fn, err
}
if err := f.MarshalBinary(w); err != nil {
return fn, err
}
co.frame += 1
if f.IsLast {
return fn, io.EOF
} else {
return fn, nil
}
}
// blockToBatch transforms a block into a batch object that can easily be RLP encoded.
func blockToBatch(rollupCfg *rollup.Config, block *types.Block) (*derive.BatchData, *derive.L1BlockInfo, error) {
opaqueTxs := make([]hexutil.Bytes, 0, len(block.Transactions()))
for i, tx := range block.Transactions() {
if tx.Type() == types.DepositTxType {
continue
}
otx, err := tx.MarshalBinary()
if err != nil {
return nil, nil, fmt.Errorf("could not encode tx %v in block %v: %w", i, tx.Hash(), err)
}
opaqueTxs = append(opaqueTxs, otx)
}
l1InfoTx := block.Transactions()[0]
if l1InfoTx.Type() != types.DepositTxType {
return nil, nil, derive.ErrNotDepositTx
}
l1Info, err := derive.L1BlockInfoFromBytes(rollupCfg, block.Time(), l1InfoTx.Data())
if err != nil {
return nil, nil, fmt.Errorf("could not parse the L1 Info deposit: %w", err)
}
singularBatch := &derive.SingularBatch{
ParentHash: block.ParentHash(),
EpochNum: rollup.Epoch(l1Info.Number),
EpochHash: l1Info.BlockHash,
Timestamp: block.Time(),
Transactions: opaqueTxs,
}
return derive.NewBatchData(singularBatch), l1Info, nil
}
This diff is collapsed.
package helpers
import (
"math/big"
"testing"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/stretchr/testify/require"
"github.com/exchain/go-exchain/op-e2e/e2eutils"
"github.com/exchain/go-exchain/op-service/testlog"
)
func TestL1Miner_BuildBlock(gt *testing.T) {
t := NewDefaultTesting(gt)
dp := e2eutils.MakeDeployParams(t, DefaultRollupTestParams())
sd := e2eutils.Setup(t, dp, DefaultAlloc)
log := testlog.Logger(t, log.LevelDebug)
miner := NewL1Miner(t, log, sd.L1Cfg)
t.Cleanup(func() {
_ = miner.Close()
})
cl := miner.EthClient()
signer := types.LatestSigner(sd.L1Cfg.Config)
// send a tx to the miner
tx := types.MustSignNewTx(dp.Secrets.Alice, signer, &types.DynamicFeeTx{
ChainID: sd.L1Cfg.Config.ChainID,
Nonce: 0,
GasTipCap: big.NewInt(2 * params.GWei),
GasFeeCap: new(big.Int).Add(miner.l1Chain.CurrentBlock().BaseFee, big.NewInt(2*params.GWei)),
Gas: params.TxGas,
To: &dp.Addresses.Bob,
Value: e2eutils.Ether(2),
})
require.NoError(gt, cl.SendTransaction(t.Ctx(), tx))
// make an empty block, even though a tx may be waiting
miner.ActL1StartBlock(10)(t)
miner.ActL1EndBlock(t)
header := miner.l1Chain.CurrentBlock()
bl := miner.l1Chain.GetBlockByHash(header.Hash())
require.Equal(t, uint64(1), bl.NumberU64())
require.Zero(gt, bl.Transactions().Len())
// now include the tx when we want it to
miner.ActL1StartBlock(10)(t)
miner.ActL1IncludeTx(dp.Addresses.Alice)(t)
miner.ActL1EndBlock(t)
header = miner.l1Chain.CurrentBlock()
bl = miner.l1Chain.GetBlockByHash(header.Hash())
require.Equal(t, uint64(2), bl.NumberU64())
require.Equal(t, 1, bl.Transactions().Len())
require.Equal(t, tx.Hash(), bl.Transactions()[0].Hash())
// now make a replica that syncs these two blocks from the miner
replica := NewL1Replica(t, log, sd.L1Cfg)
t.Cleanup(func() {
_ = replica.Close()
})
replica.ActL1Sync(miner.CanonL1Chain())(t)
replica.ActL1Sync(miner.CanonL1Chain())(t)
require.Equal(t, replica.l1Chain.CurrentBlock().Hash(), miner.l1Chain.CurrentBlock().Hash())
}
package helpers
import (
"errors"
"github.com/stretchr/testify/require"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/txpool/blobpool"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/eth/ethconfig"
"github.com/ethereum/go-ethereum/eth/tracers"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/rpc"
"github.com/exchain/go-exchain/op-node/rollup"
"github.com/exchain/go-exchain/op-service/client"
"github.com/exchain/go-exchain/op-service/sources"
"github.com/exchain/go-exchain/op-service/testutils"
)
// L1CanonSrc is used to sync L1 from another node.
// The other node always has the canonical chain.
// May be nil if there is nothing to sync from
type L1CanonSrc func(num uint64) *types.Block
// L1Replica is an instrumented in-memory L1 geth node that:
// - can sync from the given canonical L1 blocks source
// - can rewind the chain back (for reorgs)
// - can provide an RPC with mock errors
type L1Replica struct {
log log.Logger
node *node.Node
Eth *eth.Ethereum
// L1 evm / chain
l1Chain *core.BlockChain
l1Database ethdb.Database
l1Cfg *core.Genesis
l1Signer types.Signer
failL1RPC func(call []rpc.BatchElem) error // mock error
}
// NewL1Replica constructs a L1Replica starting at the given genesis.
func NewL1Replica(t Testing, log log.Logger, genesis *core.Genesis) *L1Replica {
ethCfg := &ethconfig.Config{
NetworkId: genesis.Config.ChainID.Uint64(),
Genesis: genesis,
RollupDisableTxPoolGossip: true,
StateScheme: rawdb.HashScheme,
NoPruning: true,
BlobPool: blobpool.Config{
Datadir: t.TempDir(),
Datacap: blobpool.DefaultConfig.Datacap,
PriceBump: blobpool.DefaultConfig.PriceBump,
},
}
nodeCfg := &node.Config{
Name: "l1-geth",
WSHost: "127.0.0.1",
WSPort: 0,
HTTPHost: "127.0.0.1",
HTTPPort: 0,
WSModules: []string{"debug", "admin", "eth", "txpool", "net", "rpc", "web3", "personal"},
HTTPModules: []string{"debug", "admin", "eth", "txpool", "net", "rpc", "web3", "personal"},
DataDir: "", // in-memory
P2P: p2p.Config{
NoDiscovery: true,
NoDial: true,
},
}
n, err := node.New(nodeCfg)
require.NoError(t, err)
t.Cleanup(func() {
_ = n.Close()
})
backend, err := eth.New(n, ethCfg)
require.NoError(t, err)
n.RegisterAPIs(tracers.APIs(backend.APIBackend))
require.NoError(t, n.Start(), "failed to start L1 geth node")
return &L1Replica{
log: log,
node: n,
Eth: backend,
l1Chain: backend.BlockChain(),
l1Database: backend.ChainDb(),
l1Cfg: genesis,
l1Signer: types.LatestSigner(genesis.Config),
failL1RPC: nil,
}
}
// ActL1RewindToParent rewinds the L1 chain to parent block of head
func (s *L1Replica) ActL1RewindToParent(t Testing) {
s.ActL1RewindDepth(1)(t)
}
func (s *L1Replica) ActL1RewindDepth(depth uint64) Action {
return func(t Testing) {
if depth == 0 {
return
}
head := s.l1Chain.CurrentHeader().Number.Uint64()
if head < depth {
t.InvalidAction("cannot rewind L1 past genesis (current: %d, rewind depth: %d)", head, depth)
return
}
finalized := s.l1Chain.CurrentFinalBlock()
if finalized != nil && head < finalized.Number.Uint64()+depth {
t.InvalidAction("cannot rewind head of chain past finalized block %d with rewind depth %d", finalized.Number.Uint64(), depth)
return
}
if err := s.l1Chain.SetHead(head - depth); err != nil {
t.Fatalf("failed to rewind L1 chain to nr %d: %v", head-depth, err)
}
}
}
// ActL1Sync processes the next canonical L1 block,
// or rewinds one block if the canonical block cannot be applied to the head.
func (s *L1Replica) ActL1Sync(canonL1 func(num uint64) *types.Block) Action {
return func(t Testing) {
selfHead := s.l1Chain.CurrentHeader()
n := selfHead.Number.Uint64()
expected := canonL1(n)
if expected == nil || selfHead.Hash() != expected.Hash() {
s.ActL1RewindToParent(t)
return
}
next := canonL1(n + 1)
if next == nil {
t.InvalidAction("already fully synced to head %s (%d), n+1 is not there", selfHead.Hash(), n)
return
}
if next.ParentHash() != selfHead.Hash() {
// canonical chain must be set up wrong - with actions one by one it is not supposed to reorg during a single sync step.
t.Fatalf("canonical L1 source reorged unexpectedly from %s (num %d) to next block %s (parent %s)", n, selfHead.Hash(), next.Hash(), next.ParentHash())
}
_, err := s.l1Chain.InsertChain([]*types.Block{next})
require.NoError(t, err, "L1 replica could not sync next canonical L1 block %s (%d)", next.Hash(), next.NumberU64())
}
}
func (s *L1Replica) CanonL1Chain() func(num uint64) *types.Block {
return s.l1Chain.GetBlockByNumber
}
// ActL1RPCFail makes the next L1 RPC request to this node fail
func (s *L1Replica) ActL1RPCFail(t Testing) {
s.failL1RPC = func(call []rpc.BatchElem) error {
s.failL1RPC = nil
return errors.New("mock L1 RPC error")
}
}
func (s *L1Replica) MockL1RPCErrors(fn func() error) {
s.failL1RPC = func(call []rpc.BatchElem) error {
return fn()
}
}
func (s *L1Replica) HTTPEndpoint() string {
return s.node.HTTPEndpoint()
}
func (s *L1Replica) EthClient() *ethclient.Client {
cl := s.node.Attach()
return ethclient.NewClient(cl)
}
func (s *L1Replica) RPCClient() client.RPC {
cl := s.node.Attach()
return testutils.RPCErrFaker{
RPC: client.NewBaseRPCClient(cl),
ErrFn: func(call []rpc.BatchElem) error {
if s.failL1RPC == nil {
return nil
}
return s.failL1RPC(call)
},
}
}
func (s *L1Replica) L1Client(t Testing, cfg *rollup.Config) *sources.L1Client {
l1F, err := sources.NewL1Client(s.RPCClient(), s.log, nil, sources.L1ClientDefaultConfig(cfg, false, sources.RPCKindStandard))
require.NoError(t, err)
return l1F
}
func (s *L1Replica) L1ClientSimple(t Testing) *sources.L1Client {
l1F, err := sources.NewL1Client(s.RPCClient(), s.log, nil, sources.L1ClientSimpleConfig(false, sources.RPCKindStandard, 100))
require.NoError(t, err)
return l1F
}
func (s *L1Replica) L1Chain() *core.BlockChain {
return s.l1Chain
}
func (s *L1Replica) UnsafeNum() uint64 {
head := s.l1Chain.CurrentBlock()
headNum := uint64(0)
if head != nil {
headNum = head.Number.Uint64()
}
return headNum
}
func (s *L1Replica) SafeNum() uint64 {
safe := s.l1Chain.CurrentSafeBlock()
safeNum := uint64(0)
if safe != nil {
safeNum = safe.Number.Uint64()
}
return safeNum
}
func (s *L1Replica) FinalizedNum() uint64 {
finalized := s.l1Chain.CurrentFinalBlock()
finalizedNum := uint64(0)
if finalized != nil {
finalizedNum = finalized.Number.Uint64()
}
return finalizedNum
}
// ActL1Finalize finalizes a later block, which must be marked as safe before doing so (see ActL1SafeNext).
func (s *L1Replica) ActL1Finalize(t Testing, num uint64) {
safeNum := s.SafeNum()
finalizedNum := s.FinalizedNum()
if safeNum < num {
t.InvalidAction("need to move forward safe block before moving finalized block")
return
}
newFinalized := s.l1Chain.GetHeaderByNumber(num)
if newFinalized == nil {
t.Fatalf("expected block at %d after finalized L1 block %d, safe head is ahead", num, finalizedNum)
}
s.l1Chain.SetFinalized(newFinalized)
}
// ActL1FinalizeNext finalizes the next block, which must be marked as safe before doing so (see ActL1SafeNext).
func (s *L1Replica) ActL1FinalizeNext(t Testing) {
n := s.FinalizedNum() + 1
s.ActL1Finalize(t, n)
}
// ActL1Safe marks the given unsafe block as safe.
func (s *L1Replica) ActL1Safe(t Testing, num uint64) {
newSafe := s.l1Chain.GetHeaderByNumber(num)
if newSafe == nil {
t.InvalidAction("could not find L1 block %d, cannot label it as safe", num)
return
}
s.l1Chain.SetSafe(newSafe)
}
// ActL1SafeNext marks the next unsafe block as safe.
func (s *L1Replica) ActL1SafeNext(t Testing) {
n := s.SafeNum() + 1
s.ActL1Safe(t, n)
}
func (s *L1Replica) Close() error {
return s.node.Close()
}
package helpers
import (
"encoding/binary"
"testing"
"github.com/ethereum/go-ethereum/consensus/beacon"
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/triedb"
"github.com/ethereum/go-ethereum/triedb/hashdb"
"github.com/stretchr/testify/require"
"github.com/exchain/go-exchain/op-e2e/e2eutils"
"github.com/exchain/go-exchain/op-service/eth"
"github.com/exchain/go-exchain/op-service/sources"
"github.com/exchain/go-exchain/op-service/testlog"
)
// Test if we can mock an RPC failure
func TestL1Replica_ActL1RPCFail(gt *testing.T) {
t := NewDefaultTesting(gt)
dp := e2eutils.MakeDeployParams(t, DefaultRollupTestParams())
sd := e2eutils.Setup(t, dp, DefaultAlloc)
log := testlog.Logger(t, log.LevelDebug)
replica := NewL1Replica(t, log, sd.L1Cfg)
t.Cleanup(func() {
_ = replica.Close()
})
// mock an RPC failure
replica.ActL1RPCFail(t)
// check RPC failure
l1Cl, err := sources.NewL1Client(replica.RPCClient(), log, nil, sources.L1ClientDefaultConfig(sd.RollupCfg, false, sources.RPCKindStandard))
require.NoError(t, err)
_, err = l1Cl.InfoByLabel(t.Ctx(), eth.Unsafe)
require.ErrorContains(t, err, "mock")
head, err := l1Cl.InfoByLabel(t.Ctx(), eth.Unsafe)
require.NoError(t, err)
require.Equal(gt, sd.L1Cfg.ToBlock().Hash(), head.Hash(), "expecting replica to start at genesis")
}
// Test if we can make the replica sync an artificial L1 chain, rewind it, and reorg it
func TestL1Replica_ActL1Sync(gt *testing.T) {
t := NewDefaultTesting(gt)
dp := e2eutils.MakeDeployParams(t, DefaultRollupTestParams())
dp.DeployConfig.L1CancunTimeOffset = nil
sd := e2eutils.Setup(t, dp, DefaultAlloc)
log := testlog.Logger(t, log.LevelDebug)
genesisBlock := sd.L1Cfg.ToBlock()
consensus := beacon.New(ethash.NewFaker())
db := rawdb.NewMemoryDatabase()
tdb := triedb.NewDatabase(db, &triedb.Config{HashDB: hashdb.Defaults})
sd.L1Cfg.MustCommit(db, tdb)
gen := func(s string) func(n int, g *core.BlockGen) {
return func(n int, g *core.BlockGen) {
root := crypto.Keccak256Hash([]byte(s),
binary.BigEndian.AppendUint64(nil, uint64(n)))
g.SetParentBeaconRoot(root)
}
}
chainA, _ := core.GenerateChain(sd.L1Cfg.Config, genesisBlock, consensus, db, 10, gen("A"))
chainA = append(append([]*types.Block{}, genesisBlock), chainA...)
chainB, _ := core.GenerateChain(sd.L1Cfg.Config, chainA[3], consensus, db, 10, gen("B"))
chainB = append(append([]*types.Block{}, chainA[:4]...), chainB...)
require.NotEqual(t, chainA[9], chainB[9], "need different chains")
canonL1 := func(blocks []*types.Block) func(num uint64) *types.Block {
return func(num uint64) *types.Block {
if num >= uint64(len(blocks)) {
return nil
}
return blocks[num]
}
}
// Enough setup, create the test actor and run the actual actions
replica1 := NewL1Replica(t, log, sd.L1Cfg)
t.Cleanup(func() {
_ = replica1.Close()
})
syncFromA := replica1.ActL1Sync(canonL1(chainA))
// sync canonical chain A
for replica1.l1Chain.CurrentBlock().Number.Uint64()+1 < uint64(len(chainA)) {
syncFromA(t)
}
require.Equal(t, replica1.l1Chain.CurrentBlock().Hash(), chainA[len(chainA)-1].Hash(), "sync replica1 to head of chain A")
replica1.ActL1RewindToParent(t)
require.Equal(t, replica1.l1Chain.CurrentBlock().Hash(), chainA[len(chainA)-2].Hash(), "rewind replica1 to parent of chain A")
// sync new canonical chain B
syncFromB := replica1.ActL1Sync(canonL1(chainB))
for replica1.l1Chain.CurrentBlock().Number.Uint64()+1 < uint64(len(chainB)) {
syncFromB(t)
}
require.Equal(t, replica1.l1Chain.CurrentBlock().Hash(), chainB[len(chainB)-1].Hash(), "sync replica1 to head of chain B")
// Adding and syncing a new replica
replica2 := NewL1Replica(t, log, sd.L1Cfg)
t.Cleanup(func() {
_ = replica2.Close()
})
syncFromOther := replica2.ActL1Sync(replica1.CanonL1Chain())
for replica2.l1Chain.CurrentBlock().Number.Uint64()+1 < uint64(len(chainB)) {
syncFromOther(t)
}
require.Equal(t, replica2.l1Chain.CurrentBlock().Hash(), chainB[len(chainB)-1].Hash(), "sync replica2 to head of chain B")
}
This diff is collapsed.
package helpers
import (
"errors"
"github.com/exchain/go-exchain/op-e2e/e2eutils"
"github.com/exchain/go-exchain/op-program/client/l2/engineapi"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/stretchr/testify/require"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
geth "github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/eth/ethconfig"
"github.com/ethereum/go-ethereum/eth/tracers"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/ethclient/gethclient"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/rpc"
"github.com/exchain/go-exchain/op-node/rollup"
"github.com/exchain/go-exchain/op-service/client"
"github.com/exchain/go-exchain/op-service/sources"
"github.com/exchain/go-exchain/op-service/testutils"
)
// L2Engine is an in-memory implementation of the Engine API,
// without support for snap-sync, and no concurrency or background processes.
type L2Engine struct {
log log.Logger
node *node.Node
Eth *geth.Ethereum
// L2 evm / chain
l2Chain *core.BlockChain
l2Signer types.Signer
EngineApi *engineapi.L2EngineAPI
FailL2RPC func(call []rpc.BatchElem) error // mock error
}
type EngineOption func(ethCfg *ethconfig.Config, nodeCfg *node.Config) error
func NewL2Engine(t Testing, log log.Logger, genesis *core.Genesis, jwtPath string, options ...EngineOption) *L2Engine {
n, ethBackend, apiBackend := newBackend(t, genesis, jwtPath, options)
engineApi := engineapi.NewL2EngineAPI(log, apiBackend, ethBackend.Downloader())
chain := ethBackend.BlockChain()
eng := &L2Engine{
log: log,
node: n,
Eth: ethBackend,
l2Chain: chain,
l2Signer: types.LatestSigner(genesis.Config),
EngineApi: engineApi,
}
// register the custom engine API, so we can serve engine requests while having more control
// over sequencing of individual txs.
n.RegisterAPIs([]rpc.API{
{
Namespace: "engine",
Service: eng.EngineApi,
Authenticated: true,
},
})
require.NoError(t, n.Start(), "failed to start L2 op-geth node")
return eng
}
func newBackend(t e2eutils.TestingBase, genesis *core.Genesis, jwtPath string, options []EngineOption) (*node.Node, *geth.Ethereum, *engineApiBackend) {
ethCfg := &ethconfig.Config{
NetworkId: genesis.Config.ChainID.Uint64(),
Genesis: genesis,
StateScheme: rawdb.HashScheme,
NoPruning: true,
}
nodeCfg := &node.Config{
Name: "l2-geth",
WSHost: "127.0.0.1",
WSPort: 0,
HTTPHost: "127.0.0.1",
HTTPPort: 0,
AuthAddr: "127.0.0.1",
AuthPort: 0,
WSModules: []string{"debug", "admin", "eth", "txpool", "net", "rpc", "web3", "personal"},
HTTPModules: []string{"debug", "admin", "eth", "txpool", "net", "rpc", "web3", "personal"},
JWTSecret: jwtPath,
}
for i, opt := range options {
require.NoError(t, opt(ethCfg, nodeCfg), "engine option %d failed", i)
}
n, err := node.New(nodeCfg)
require.NoError(t, err)
t.Cleanup(func() {
_ = n.Close()
})
backend, err := geth.New(n, ethCfg)
require.NoError(t, err)
n.RegisterAPIs(tracers.APIs(backend.APIBackend))
chain := backend.BlockChain()
db := backend.ChainDb()
apiBackend := &engineApiBackend{
BlockChain: chain,
db: db,
genesis: genesis,
}
return n, backend, apiBackend
}
type engineApiBackend struct {
*core.BlockChain
db ethdb.Database
genesis *core.Genesis
}
func (e *engineApiBackend) Database() ethdb.Database {
return e.db
}
func (e *engineApiBackend) Genesis() *core.Genesis {
return e.genesis
}
func (s *L2Engine) L2Chain() *core.BlockChain {
return s.l2Chain
}
func (s *L2Engine) Enode() *enode.Node {
return s.node.Server().LocalNode().Node()
}
func (s *L2Engine) AddPeers(peers ...*enode.Node) {
for _, en := range peers {
s.node.Server().AddPeer(en)
}
}
func (s *L2Engine) PeerCount() int {
return s.node.Server().PeerCount()
}
func (s *L2Engine) HTTPEndpoint() string {
return s.node.HTTPEndpoint()
}
func (s *L2Engine) EthClient() *ethclient.Client {
cl := s.node.Attach()
return ethclient.NewClient(cl)
}
func (s *L2Engine) GethClient() *gethclient.Client {
cl := s.node.Attach()
return gethclient.New(cl)
}
func (e *L2Engine) RPCClient() client.RPC {
cl := e.node.Attach()
return testutils.RPCErrFaker{
RPC: client.NewBaseRPCClient(cl),
ErrFn: func(call []rpc.BatchElem) error {
if e.FailL2RPC == nil {
return nil
}
return e.FailL2RPC(call)
},
}
}
func (e *L2Engine) EngineClient(t Testing, cfg *rollup.Config) *sources.EngineClient {
l2Cl, err := sources.NewEngineClient(e.RPCClient(), e.log, nil, sources.EngineClientDefaultConfig(cfg))
require.NoError(t, err)
return l2Cl
}
// ActL2RPCFail makes the next L2 RPC request fail with given error
func (e *L2Engine) ActL2RPCFail(t Testing, err error) {
if e.FailL2RPC != nil { // already set to fail?
t.InvalidAction("already set a mock L2 rpc error")
return
}
e.FailL2RPC = func(call []rpc.BatchElem) error {
e.FailL2RPC = nil
return err
}
}
// ActL2IncludeTx includes the next transaction from the given address in the block that is being built,
// skipping the usual check for e.EngineApi.ForcedEmpty()
func (e *L2Engine) ActL2IncludeTxIgnoreForcedEmpty(from common.Address) Action {
return func(t Testing) {
if e.EngineApi.ForcedEmpty() {
e.log.Info("Ignoring e.L2ForceEmpty=true")
}
tx := firstValidTx(t, from, e.EngineApi.PendingIndices, e.Eth.TxPool().ContentFrom, e.EthClient().NonceAt)
err := e.EngineApi.IncludeTx(tx, from)
if errors.Is(err, engineapi.ErrNotBuildingBlock) {
t.InvalidAction(err.Error())
} else if errors.Is(err, engineapi.ErrUsesTooMuchGas) {
t.InvalidAction("included tx uses too much gas: %v", err)
} else if err != nil {
require.NoError(t, err, "include tx")
}
}
}
// ActL2IncludeTx includes the next transaction from the given address in the block that is being built
func (e *L2Engine) ActL2IncludeTx(from common.Address) Action {
return func(t Testing) {
if e.EngineApi.ForcedEmpty() {
e.log.Info("Skipping including a transaction because e.L2ForceEmpty is true")
return
}
tx := firstValidTx(t, from, e.EngineApi.PendingIndices, e.Eth.TxPool().ContentFrom, e.EthClient().NonceAt)
err := e.EngineApi.IncludeTx(tx, from)
if errors.Is(err, engineapi.ErrNotBuildingBlock) {
t.InvalidAction(err.Error())
} else if errors.Is(err, engineapi.ErrUsesTooMuchGas) {
t.InvalidAction("included tx uses too much gas: %v", err)
} else if err != nil {
require.NoError(t, err, "include tx")
}
}
}
func (e *L2Engine) Close() error {
return e.node.Close()
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
package helpers
import (
altda "github.com/exchain/go-exchain/op-alt-da"
"github.com/exchain/go-exchain/op-e2e/actions/upgrades/helpers"
"github.com/exchain/go-exchain/op-e2e/e2eutils"
"github.com/exchain/go-exchain/op-node/rollup/derive"
"github.com/exchain/go-exchain/op-node/rollup/sync"
"github.com/exchain/go-exchain/op-service/sources"
"github.com/exchain/go-exchain/op-service/testlog"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require"
)
func SetupSequencerTest(t Testing, sd *e2eutils.SetupData, log log.Logger, opts ...SequencerOpt) (*L1Miner, *L2Engine, *L2Sequencer) {
jwtPath := e2eutils.WriteDefaultJWT(t)
cfg := DefaultSequencerConfig()
for _, opt := range opts {
opt(cfg)
}
miner := NewL1Miner(t, log.New("role", "l1-miner"), sd.L1Cfg)
l1F, err := sources.NewL1Client(miner.RPCClient(), log, nil, sources.L1ClientDefaultConfig(sd.RollupCfg, false, sources.RPCKindStandard))
require.NoError(t, err)
engine := NewL2Engine(t, log.New("role", "sequencer-engine"), sd.L2Cfg, jwtPath, EngineWithP2P())
l2Cl, err := sources.NewEngineClient(engine.RPCClient(), log, nil, sources.EngineClientDefaultConfig(sd.RollupCfg))
require.NoError(t, err)
sequencer := NewL2Sequencer(t, log.New("role", "sequencer"), l1F, miner.BlobStore(), altda.Disabled, l2Cl, sd.RollupCfg, 0)
return miner, engine, sequencer
}
func SetupVerifier(t Testing, sd *e2eutils.SetupData, log log.Logger,
l1F derive.L1Fetcher, blobSrc derive.L1BlobsFetcher, syncCfg *sync.Config, opts ...VerifierOpt) (*L2Engine, *L2Verifier) {
cfg := DefaultVerifierCfg()
for _, opt := range opts {
opt(cfg)
}
jwtPath := e2eutils.WriteDefaultJWT(t)
engine := NewL2Engine(t, log.New("role", "verifier-engine"), sd.L2Cfg, jwtPath, EngineWithP2P())
engCl := engine.EngineClient(t, sd.RollupCfg)
verifier := NewL2Verifier(t, log.New("role", "verifier"), l1F, blobSrc, altda.Disabled, engCl, sd.RollupCfg, syncCfg, cfg.SafeHeadListener)
return engine, verifier
}
func SetupVerifierOnlyTest(t Testing, sd *e2eutils.SetupData, log log.Logger) (*L1Miner, *L2Engine, *L2Verifier) {
miner := NewL1Miner(t, log, sd.L1Cfg)
l1Cl := miner.L1Client(t, sd.RollupCfg)
engine, verifier := SetupVerifier(t, sd, log, l1Cl, miner.BlobStore(), &sync.Config{})
return miner, engine, verifier
}
func SetupReorgTest(t Testing, config *e2eutils.TestParams, deltaTimeOffset *hexutil.Uint64) (*e2eutils.SetupData, *e2eutils.DeployParams, *L1Miner, *L2Sequencer, *L2Engine, *L2Verifier, *L2Engine, *L2Batcher) {
dp := e2eutils.MakeDeployParams(t, config)
helpers.ApplyDeltaTimeOffset(dp, deltaTimeOffset)
sd := e2eutils.Setup(t, dp, DefaultAlloc)
log := testlog.Logger(t, log.LevelDebug)
return SetupReorgTestActors(t, dp, sd, log)
}
func SetupReorgTestActors(t Testing, dp *e2eutils.DeployParams, sd *e2eutils.SetupData, log log.Logger) (*e2eutils.SetupData, *e2eutils.DeployParams, *L1Miner, *L2Sequencer, *L2Engine, *L2Verifier, *L2Engine, *L2Batcher) {
miner, seqEngine, sequencer := SetupSequencerTest(t, sd, log)
miner.ActL1SetFeeRecipient(common.Address{'A'})
sequencer.ActL2PipelineFull(t)
verifEngine, verifier := SetupVerifier(t, sd, log, miner.L1Client(t, sd.RollupCfg), miner.BlobStore(), &sync.Config{})
rollupSeqCl := sequencer.RollupClient()
batcher := NewL2Batcher(log, sd.RollupCfg, DefaultBatcherCfg(dp),
rollupSeqCl, miner.EthClient(), seqEngine.EthClient(), seqEngine.EngineClient(t, sd.RollupCfg))
return sd, dp, miner, sequencer, seqEngine, verifier, verifEngine, batcher
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment