Commit bead5e12 authored by Matthew Slipper's avatar Matthew Slipper Committed by GitHub

ci: Parallelize op-e2e (#3635)

* ci: Parallelize op-e2e

Replaces my previous PRs since `t.Parallel()` works fine. Also exposes the listener on the op-node, so that we can use a port of zero and allow the kernel to allocate a port.

* close other geth nodes

* missed one

* use t.cleanup
parent 028edbe5
......@@ -288,7 +288,7 @@ jobs:
- run:
name: test op-e2e
command: |
gotestsum --junitfile /test-results/op-e2e.xml -- -coverpkg=github.com/ethereum-optimism/optimism/... -coverprofile=coverage.out -covermode=atomic ./...
gotestsum --format standard-verbose --junitfile /test-results/op-e2e.xml -- -coverpkg=github.com/ethereum-optimism/optimism/... -coverprofile=coverage.out -covermode=atomic ./...
working_directory: op-e2e
- run:
name: test op-service
......
......@@ -150,3 +150,7 @@ func (s *L1Miner) ActL1EndBlock(t Testing) {
t.Fatalf("failed to insert block into l1 chain")
}
}
func (s *L1Miner) Close() error {
return s.L1Replica.Close()
}
......@@ -19,6 +19,9 @@ func TestL1Miner_BuildBlock(gt *testing.T) {
sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlDebug)
miner := NewL1Miner(log, sd.L1Cfg)
t.Cleanup(func() {
_ = miner.Close()
})
cl := miner.EthClient()
signer := types.LatestSigner(sd.L1Cfg.Config)
......@@ -53,6 +56,9 @@ func TestL1Miner_BuildBlock(gt *testing.T) {
// now make a replica that syncs these two blocks from the miner
replica := NewL1Replica(log, sd.L1Cfg)
t.Cleanup(func() {
_ = replica.Close()
})
replica.ActL1Sync(miner.CanonL1Chain())(t)
replica.ActL1Sync(miner.CanonL1Chain())(t)
require.Equal(t, replica.l1Chain.CurrentBlock().Hash(), miner.l1Chain.CurrentBlock().Hash())
......
......@@ -195,3 +195,7 @@ func (s *L1Replica) ActL1SafeNext(t Testing) {
}
s.l1Chain.SetSafe(next)
}
func (s *L1Replica) Close() error {
return s.node.Close()
}
......@@ -33,6 +33,9 @@ func TestL1Replica_ActL1RPCFail(gt *testing.T) {
sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlDebug)
replica := NewL1Replica(log, sd.L1Cfg)
t.Cleanup(func() {
_ = replica.Close()
})
// mock an RPC failure
replica.ActL1RPCFail(t)
// check RPC failure
......@@ -76,6 +79,9 @@ func TestL1Replica_ActL1Sync(gt *testing.T) {
// Enough setup, create the test actor and run the actual actions
replica1 := NewL1Replica(log, sd.L1Cfg)
t.Cleanup(func() {
_ = replica1.Close()
})
syncFromA := replica1.ActL1Sync(canonL1(chainA))
// sync canonical chain A
for replica1.l1Chain.CurrentBlock().NumberU64()+1 < uint64(len(chainA)) {
......@@ -94,6 +100,9 @@ func TestL1Replica_ActL1Sync(gt *testing.T) {
// Adding and syncing a new replica
replica2 := NewL1Replica(log, sd.L1Cfg)
t.Cleanup(func() {
_ = replica2.Close()
})
syncFromOther := replica2.ActL1Sync(replica1.CanonL1Chain())
for replica2.l1Chain.CurrentBlock().NumberU64()+1 < uint64(len(chainB)) {
syncFromOther(t)
......
......@@ -175,3 +175,7 @@ func (e *L2Engine) ActL2IncludeTx(from common.Address) Action {
e.l2Transactions = append(e.l2Transactions, tx)
}
}
func (e *L2Engine) Close() error {
return e.node.Close()
}
......@@ -95,6 +95,9 @@ func TestL2EngineAPIBlockBuilding(gt *testing.T) {
sd.L2Cfg.MustCommit(db)
engine := NewL2Engine(log, sd.L2Cfg, sd.RollupCfg.Genesis.L1, jwtPath)
t.Cleanup(func() {
_ = engine.Close()
})
cl := engine.EthClient()
signer := types.LatestSigner(sd.L2Cfg.Config)
......
......@@ -541,17 +541,11 @@ func (cfg SystemConfig) start() (*System, error) {
}
}
rollupEndpoint := fmt.Sprintf(
"http://%s:%d",
sys.cfg.Nodes["sequencer"].RPC.ListenAddr,
sys.cfg.Nodes["sequencer"].RPC.ListenPort,
)
// L2Output Submitter
sys.l2OutputSubmitter, err = l2os.NewL2OutputSubmitter(l2os.Config{
L1EthRpc: sys.nodes["l1"].WSEndpoint(),
L2EthRpc: sys.nodes["sequencer"].WSEndpoint(),
RollupRpc: rollupEndpoint,
RollupRpc: sys.rollupNodes["sequencer"].HTTPEndpoint(),
L2OOAddress: sys.L2OOContractAddr.String(),
PollInterval: 50 * time.Millisecond,
NumConfirmations: 1,
......@@ -576,7 +570,7 @@ func (cfg SystemConfig) start() (*System, error) {
sys.batchSubmitter, err = bss.NewBatchSubmitter(bss.Config{
L1EthRpc: sys.nodes["l1"].WSEndpoint(),
L2EthRpc: sys.nodes["sequencer"].WSEndpoint(),
RollupRpc: rollupEndpoint,
RollupRpc: sys.rollupNodes["sequencer"].HTTPEndpoint(),
MinL1TxSize: 1,
MaxL1TxSize: 120000,
ChannelTimeout: sys.cfg.RollupConfig.ChannelTimeout,
......
......@@ -121,7 +121,7 @@ func defaultSystemConfig(t *testing.T) SystemConfig {
// Submitter PrivKey is set in system start for rollup nodes where sequencer = true
RPC: node.RPCConfig{
ListenAddr: "127.0.0.1",
ListenPort: 9093,
ListenPort: 0,
EnableAdmin: true,
},
L1EpochPollInterval: time.Second * 4,
......@@ -154,6 +154,7 @@ func defaultSystemConfig(t *testing.T) SystemConfig {
}
func TestL2OutputSubmitter(t *testing.T) {
t.Parallel()
if !verboseGethNodes {
log.Root().SetHandler(log.DiscardHandler())
}
......@@ -166,7 +167,7 @@ func TestL2OutputSubmitter(t *testing.T) {
l1Client := sys.Clients["l1"]
rollupRPCClient, err := rpc.DialContext(context.Background(), cfg.Nodes["sequencer"].RPC.HttpEndpoint())
rollupRPCClient, err := rpc.DialContext(context.Background(), sys.rollupNodes["sequencer"].HTTPEndpoint())
require.Nil(t, err)
rollupClient := sources.NewRollupClient(rollupRPCClient)
......@@ -229,6 +230,7 @@ func TestL2OutputSubmitter(t *testing.T) {
// TestSystemE2E sets up a L1 Geth node, a rollup node, and a L2 geth node and then confirms that L1 deposits are reflected on L2.
// All nodes are run in process (but are the full nodes, not mocked or stubbed).
func TestSystemE2E(t *testing.T) {
t.Parallel()
if !verboseGethNodes {
log.Root().SetHandler(log.DiscardHandler())
}
......@@ -328,7 +330,7 @@ func TestSystemE2E(t *testing.T) {
require.Equal(t, verifBlock.ParentHash(), seqBlock.ParentHash(), "Verifier and sequencer blocks parent hashes not the same after including a batch tx")
require.Equal(t, verifBlock.Hash(), seqBlock.Hash(), "Verifier and sequencer blocks not the same after including a batch tx")
rollupRPCClient, err := rpc.DialContext(context.Background(), cfg.Nodes["sequencer"].RPC.HttpEndpoint())
rollupRPCClient, err := rpc.DialContext(context.Background(), sys.rollupNodes["sequencer"].HTTPEndpoint())
require.Nil(t, err)
rollupClient := sources.NewRollupClient(rollupRPCClient)
// basic check that sync status works
......@@ -343,6 +345,7 @@ func TestSystemE2E(t *testing.T) {
// TestConfirmationDepth runs the rollup with both sequencer and verifier not immediately processing the tip of the chain.
func TestConfirmationDepth(t *testing.T) {
t.Parallel()
if !verboseGethNodes {
log.Root().SetHandler(log.DiscardHandler())
}
......@@ -390,6 +393,7 @@ func TestConfirmationDepth(t *testing.T) {
// TestFinalize tests if L2 finalizes after sufficient time after L1 finalizes
func TestFinalize(t *testing.T) {
t.Parallel()
if !verboseGethNodes {
log.Root().SetHandler(log.DiscardHandler())
}
......@@ -417,6 +421,7 @@ func TestFinalize(t *testing.T) {
}
func TestMintOnRevertedDeposit(t *testing.T) {
t.Parallel()
if !verboseGethNodes {
log.Root().SetHandler(log.DiscardHandler())
}
......@@ -490,6 +495,7 @@ func TestMintOnRevertedDeposit(t *testing.T) {
}
func TestMissingBatchE2E(t *testing.T) {
t.Parallel()
if !verboseGethNodes {
log.Root().SetHandler(log.DiscardHandler())
}
......@@ -599,6 +605,7 @@ func L1InfoFromState(ctx context.Context, contract *bindings.L1Block, l2Number *
// TestSystemMockP2P sets up a L1 Geth node, a rollup node, and a L2 geth node and then confirms that
// the nodes can sync L2 blocks before they are confirmed on L1.
func TestSystemMockP2P(t *testing.T) {
t.Parallel()
if !verboseGethNodes {
log.Root().SetHandler(log.DiscardHandler())
}
......@@ -672,6 +679,7 @@ func TestSystemMockP2P(t *testing.T) {
}
func TestL1InfoContract(t *testing.T) {
t.Parallel()
if !verboseGethNodes {
log.Root().SetHandler(log.DiscardHandler())
}
......@@ -795,6 +803,7 @@ func calcL1GasUsed(data []byte, overhead *big.Int) *big.Int {
// balance changes on L1 and L2 and has to include gas fees in the balance checks.
// It does not check that the withdrawal can be executed prior to the end of the finality period.
func TestWithdrawals(t *testing.T) {
t.Parallel()
if !verboseGethNodes {
log.Root().SetHandler(log.DiscardHandler())
}
......@@ -970,6 +979,7 @@ func TestWithdrawals(t *testing.T) {
// TestFees checks that L1/L2 fees are handled.
func TestFees(t *testing.T) {
t.Parallel()
if !verboseGethNodes {
log.Root().SetHandler(log.DiscardHandler())
}
......
......@@ -355,3 +355,11 @@ func (n *OpNode) Close() error {
}
return result.ErrorOrNil()
}
func (n *OpNode) ListenAddr() string {
return n.server.listenAddr.String()
}
func (n *OpNode) HTTPEndpoint() string {
return fmt.Sprintf("http://%s", n.ListenAddr())
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment