Commit 4c656b3a authored by Matthew Slipper's avatar Matthew Slipper Committed by GitHub

op-e2e: Minor test updates (#12877)

Attempt at fixing the ongoing test flakiness with the batcher tests. Makes two changes:

- Waits for the L1 to be up for all end-to-end tests to mitigate the I/O and context timeouts we've been seeing.
- Update the multi batcher test to use an algorithm that's more tolerant of when the L1 doesn't immediately include the transaction.
parent 85c9a9ea
...@@ -9,6 +9,8 @@ import ( ...@@ -9,6 +9,8 @@ import (
"strings" "strings"
"time" "time"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
...@@ -164,3 +166,27 @@ func AndGet[T interface{}](ctx context.Context, pollRate time.Duration, get func ...@@ -164,3 +166,27 @@ func AndGet[T interface{}](ctx context.Context, pollRate time.Duration, get func
} }
} }
} }
func ForNodeUp(ctx context.Context, client *ethclient.Client, lgr log.Logger) error {
for {
select {
case <-ctx.Done():
return ctx.Err()
default:
// Create a new context deliberately. The shorter timeout is used to detect
// potential I/O timeouts on the RPC so we can retry.
callCtx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond)
_, err := client.BlockNumber(callCtx)
cancel()
if err == nil {
lgr.Info("node is up")
return nil
}
if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, os.ErrDeadlineExceeded) {
lgr.Info("timeout waiting for node come up, trying again")
continue
}
return err
}
}
}
...@@ -32,10 +32,8 @@ func TestBatcherMultiTx(t *testing.T) { ...@@ -32,10 +32,8 @@ func TestBatcherMultiTx(t *testing.T) {
_, err = geth.WaitForBlock(big.NewInt(10), l2Seq) _, err = geth.WaitForBlock(big.NewInt(10), l2Seq)
require.NoError(t, err, "Waiting for L2 blocks") require.NoError(t, err, "Waiting for L2 blocks")
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel() defer cancel()
l1Number, err := l1Client.BlockNumber(ctx)
require.NoError(t, err)
// start batch submission // start batch submission
driver := sys.BatchSubmitter.TestDriver() driver := sys.BatchSubmitter.TestDriver()
...@@ -43,22 +41,34 @@ func TestBatcherMultiTx(t *testing.T) { ...@@ -43,22 +41,34 @@ func TestBatcherMultiTx(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
totalBatcherTxsCount := int64(0) totalBatcherTxsCount := int64(0)
// wait for up to 5 L1 blocks, usually only 3 is required, but it's
// possible additional L1 blocks will be created before the batcher starts, headNum, err := l1Client.BlockNumber(ctx)
// so we wait additional blocks. require.NoError(t, err)
for i := int64(0); i < 5; i++ { stopNum := headNum + 10
block, err := geth.WaitForBlock(big.NewInt(int64(l1Number)+i), l1Client) startBlock := uint64(1)
require.NoError(t, err, "Waiting for l1 blocks")
// there are possibly other services (proposer/challenger) in the background sending txs for {
// so we only count the batcher txs for i := startBlock; i <= headNum; i++ {
block, err := l1Client.BlockByNumber(ctx, big.NewInt(int64(i)))
require.NoError(t, err)
batcherTxCount, err := transactions.TransactionsBySender(block, cfg.DeployConfig.BatchSenderAddress) batcherTxCount, err := transactions.TransactionsBySender(block, cfg.DeployConfig.BatchSenderAddress)
require.NoError(t, err) require.NoError(t, err)
totalBatcherTxsCount += int64(batcherTxCount) totalBatcherTxsCount += batcherTxCount
if totalBatcherTxsCount >= 10 { if totalBatcherTxsCount >= 10 {
return return
} }
} }
headNum++
if headNum > stopNum {
break
}
startBlock = headNum
_, err = geth.WaitForBlock(big.NewInt(int64(headNum)), l1Client)
require.NoError(t, err)
}
t.Fatal("Expected at least 10 transactions from the batcher") t.Fatal("Expected at least 10 transactions from the batcher")
} }
...@@ -16,6 +16,8 @@ import ( ...@@ -16,6 +16,8 @@ import (
"testing" "testing"
"time" "time"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"golang.org/x/exp/maps" "golang.org/x/exp/maps"
...@@ -671,6 +673,14 @@ func (cfg SystemConfig) Start(t *testing.T, startOpts ...StartOption) (*System, ...@@ -671,6 +673,14 @@ func (cfg SystemConfig) Start(t *testing.T, startOpts ...StartOption) (*System,
return nil, err return nil, err
} }
sysLogger := testlog.Logger(t, log.LevelInfo).New("role", "system")
l1UpCtx, l1UpCancel := context.WithTimeout(context.Background(), 30*time.Second)
defer l1UpCancel()
if err := wait.ForNodeUp(l1UpCtx, sys.NodeClient(RoleL1), sysLogger); err != nil {
return nil, fmt.Errorf("l1 never came up: %w", err)
}
// Ordered such that the Sequencer is initialized first. Setup this way so that // Ordered such that the Sequencer is initialized first. Setup this way so that
// the `RollupSequencerHTTP` GethOption can be supplied to any sentry nodes. // the `RollupSequencerHTTP` GethOption can be supplied to any sentry nodes.
l2Nodes := []string{RoleSeq} l2Nodes := []string{RoleSeq}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment