Commit 052ca792 authored by Sam Stokes's avatar Sam Stokes Committed by GitHub

op-proposer: retry failed output proposals (#11291)

* op-proposer: add retries to output proposal

* op-proposer: proposeOutput returns err to help trigger retry

* op-proposer: use retry.Do for FetchOutput, add unit tests

* op-proposer: improve output fetching retry impl

* op-proposer: move done signal check into inner loop

---------
Co-authored-by: default avatarSebastian Stammler <seb@oplabs.co>
parent f940301c
......@@ -3,6 +3,7 @@ package actions
import (
"context"
"crypto/ecdsa"
"encoding/binary"
"math/big"
"time"
......@@ -31,6 +32,7 @@ type ProposerCfg struct {
OutputOracleAddr *common.Address
DisputeGameFactoryAddr *common.Address
ProposalInterval time.Duration
ProposalRetryInterval time.Duration
DisputeGameType uint32
ProposerKey *ecdsa.PrivateKey
AllowNonFinalized bool
......@@ -77,6 +79,7 @@ func NewL2Proposer(t Testing, log log.Logger, cfg *ProposerCfg, l1 *ethclient.Cl
PollInterval: time.Second,
NetworkTimeout: time.Second,
ProposalInterval: cfg.ProposalInterval,
OutputRetryInterval: cfg.ProposalRetryInterval,
L2OutputOracleAddr: cfg.OutputOracleAddr,
DisputeGameFactoryAddr: cfg.DisputeGameFactoryAddr,
DisputeGameType: cfg.DisputeGameType,
......@@ -206,18 +209,12 @@ func toCallArg(msg ethereum.CallMsg) interface{} {
func (p *L2Proposer) fetchNextOutput(t Testing) (*eth.OutputResponse, bool, error) {
if e2eutils.UseFaultProofs() {
blockNumber, err := p.driver.FetchCurrentBlockNumber(t.Ctx())
output, err := p.driver.FetchDGFOutput(t.Ctx())
if err != nil {
return nil, false, err
}
output, _, err := p.driver.FetchOutput(t.Ctx(), blockNumber)
if err != nil {
return nil, false, err
}
encodedBlockNumber := make([]byte, 32)
copy(encodedBlockNumber[32-len(blockNumber.Bytes()):], blockNumber.Bytes())
binary.BigEndian.PutUint64(encodedBlockNumber[24:], output.BlockRef.Number)
game, err := p.disputeGameFactory.Games(&bind.CallOpts{}, p.driver.Cfg.DisputeGameType, output.OutputRoot, encodedBlockNumber)
if err != nil {
return nil, false, err
......@@ -228,7 +225,7 @@ func (p *L2Proposer) fetchNextOutput(t Testing) (*eth.OutputResponse, bool, erro
return output, true, nil
} else {
return p.driver.FetchNextOutputInfo(t.Ctx())
return p.driver.FetchL2OOOutput(t.Ctx())
}
}
......
......@@ -64,15 +64,17 @@ func RunProposerTest(gt *testing.T, deltaTimeOffset *hexutil.Uint64) {
proposer = NewL2Proposer(t, log, &ProposerCfg{
DisputeGameFactoryAddr: &sd.DeploymentsL1.DisputeGameFactoryProxy,
ProposalInterval: 6 * time.Second,
ProposalRetryInterval: 3 * time.Second,
DisputeGameType: respectedGameType,
ProposerKey: dp.Secrets.Proposer,
AllowNonFinalized: true,
}, miner.EthClient(), rollupSeqCl)
} else {
proposer = NewL2Proposer(t, log, &ProposerCfg{
OutputOracleAddr: &sd.DeploymentsL1.L2OutputOracleProxy,
ProposerKey: dp.Secrets.Proposer,
AllowNonFinalized: false,
OutputOracleAddr: &sd.DeploymentsL1.L2OutputOracleProxy,
ProposerKey: dp.Secrets.Proposer,
ProposalRetryInterval: 3 * time.Second,
AllowNonFinalized: false,
}, miner.EthClient(), rollupSeqCl)
}
......
......@@ -144,15 +144,17 @@ func runCrossLayerUserTest(gt *testing.T, test hardforkScheduledTest) {
proposer = NewL2Proposer(t, log, &ProposerCfg{
DisputeGameFactoryAddr: &sd.DeploymentsL1.DisputeGameFactoryProxy,
ProposalInterval: 6 * time.Second,
ProposalRetryInterval: 3 * time.Second,
DisputeGameType: respectedGameType,
ProposerKey: dp.Secrets.Proposer,
AllowNonFinalized: true,
}, miner.EthClient(), seq.RollupClient())
} else {
proposer = NewL2Proposer(t, log, &ProposerCfg{
OutputOracleAddr: &sd.DeploymentsL1.L2OutputOracleProxy,
ProposerKey: dp.Secrets.Proposer,
AllowNonFinalized: true,
OutputOracleAddr: &sd.DeploymentsL1.L2OutputOracleProxy,
ProposerKey: dp.Secrets.Proposer,
ProposalRetryInterval: 3 * time.Second,
AllowNonFinalized: true,
}, miner.EthClient(), seq.RollupClient())
}
......
......@@ -845,14 +845,15 @@ func (cfg SystemConfig) Start(t *testing.T, _opts ...SystemConfigOption) (*Syste
var proposerCLIConfig *l2os.CLIConfig
if e2eutils.UseFaultProofs() {
proposerCLIConfig = &l2os.CLIConfig{
L1EthRpc: sys.EthInstances[RoleL1].WSEndpoint(),
RollupRpc: sys.RollupNodes[RoleSeq].HTTPEndpoint(),
DGFAddress: config.L1Deployments.DisputeGameFactoryProxy.Hex(),
ProposalInterval: 6 * time.Second,
DisputeGameType: 254, // Fast game type
PollInterval: 50 * time.Millisecond,
TxMgrConfig: newTxMgrConfig(sys.EthInstances[RoleL1].WSEndpoint(), cfg.Secrets.Proposer),
AllowNonFinalized: cfg.NonFinalizedProposals,
L1EthRpc: sys.EthInstances[RoleL1].WSEndpoint(),
RollupRpc: sys.RollupNodes[RoleSeq].HTTPEndpoint(),
DGFAddress: config.L1Deployments.DisputeGameFactoryProxy.Hex(),
ProposalInterval: 6 * time.Second,
DisputeGameType: 254, // Fast game type
PollInterval: 50 * time.Millisecond,
OutputRetryInterval: 10 * time.Millisecond,
TxMgrConfig: newTxMgrConfig(sys.EthInstances[RoleL1].WSEndpoint(), cfg.Secrets.Proposer),
AllowNonFinalized: cfg.NonFinalizedProposals,
LogConfig: oplog.CLIConfig{
Level: log.LvlInfo,
Format: oplog.FormatText,
......@@ -860,12 +861,13 @@ func (cfg SystemConfig) Start(t *testing.T, _opts ...SystemConfigOption) (*Syste
}
} else {
proposerCLIConfig = &l2os.CLIConfig{
L1EthRpc: sys.EthInstances[RoleL1].WSEndpoint(),
RollupRpc: sys.RollupNodes[RoleSeq].HTTPEndpoint(),
L2OOAddress: config.L1Deployments.L2OutputOracleProxy.Hex(),
PollInterval: 50 * time.Millisecond,
TxMgrConfig: newTxMgrConfig(sys.EthInstances[RoleL1].WSEndpoint(), cfg.Secrets.Proposer),
AllowNonFinalized: cfg.NonFinalizedProposals,
L1EthRpc: sys.EthInstances[RoleL1].WSEndpoint(),
RollupRpc: sys.RollupNodes[RoleSeq].HTTPEndpoint(),
L2OOAddress: config.L1Deployments.L2OutputOracleProxy.Hex(),
PollInterval: 50 * time.Millisecond,
OutputRetryInterval: 10 * time.Millisecond,
TxMgrConfig: newTxMgrConfig(sys.EthInstances[RoleL1].WSEndpoint(), cfg.Secrets.Proposer),
AllowNonFinalized: cfg.NonFinalizedProposals,
LogConfig: oplog.CLIConfig{
Level: log.LvlInfo,
Format: oplog.FormatText,
......
......@@ -41,8 +41,8 @@ var (
}
PollIntervalFlag = &cli.DurationFlag{
Name: "poll-interval",
Usage: "How frequently to poll L2 for new blocks",
Value: 6 * time.Second,
Usage: "How frequently to poll L2 for new blocks (legacy L2OO)",
Value: 12 * time.Second,
EnvVars: prefixEnvVars("POLL_INTERVAL"),
}
AllowNonFinalizedFlag = &cli.BoolFlag{
......@@ -60,6 +60,12 @@ var (
Usage: "Interval between submitting L2 output proposals when the dispute game factory address is set",
EnvVars: prefixEnvVars("PROPOSAL_INTERVAL"),
}
OutputRetryIntervalFlag = &cli.DurationFlag{
Name: "output-retry-interval",
Usage: "Interval between retrying output fetching (DGF)",
Value: 12 * time.Second,
EnvVars: prefixEnvVars("OUTPUT_RETRY_INTERVAL"),
}
DisputeGameTypeFlag = &cli.UintFlag{
Name: "game-type",
Usage: "Dispute game type to create via the configured DisputeGameFactory",
......@@ -95,6 +101,7 @@ var optionalFlags = []cli.Flag{
L2OutputHDPathFlag,
DisputeGameFactoryAddressFlag,
ProposalIntervalFlag,
OutputRetryIntervalFlag,
DisputeGameTypeFlag,
ActiveSequencerCheckDurationFlag,
WaitNodeSyncFlag,
......
......@@ -53,6 +53,9 @@ type CLIConfig struct {
// ProposalInterval is the delay between submitting L2 output proposals when the DGFAddress is set.
ProposalInterval time.Duration
// OutputRetryInterval is the delay between retrying output fetch if one fails.
OutputRetryInterval time.Duration
// DisputeGameType is the type of dispute game to create when submitting an output proposal.
DisputeGameType uint32
......@@ -110,6 +113,7 @@ func NewConfig(ctx *cli.Context) *CLIConfig {
PprofConfig: oppprof.ReadCLIConfig(ctx),
DGFAddress: ctx.String(flags.DisputeGameFactoryAddressFlag.Name),
ProposalInterval: ctx.Duration(flags.ProposalIntervalFlag.Name),
OutputRetryInterval: ctx.Duration(flags.OutputRetryIntervalFlag.Name),
DisputeGameType: uint32(ctx.Uint(flags.DisputeGameTypeFlag.Name)),
ActiveSequencerCheckDuration: ctx.Duration(flags.ActiveSequencerCheckDurationFlag.Name),
WaitNodeSync: ctx.Bool(flags.WaitNodeSyncFlag.Name),
......
......@@ -39,6 +39,11 @@ type L1Client interface {
CallContract(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int) ([]byte, error)
}
type L2OOContract interface {
Version(*bind.CallOpts) (string, error)
NextBlockNumber(*bind.CallOpts) (*big.Int, error)
}
type RollupClient interface {
SyncStatus(ctx context.Context) (*eth.SyncStatus, error)
OutputAtBlock(ctx context.Context, blockNum uint64) (*eth.OutputResponse, error)
......@@ -68,7 +73,7 @@ type L2OutputSubmitter struct {
mutex sync.Mutex
running bool
l2ooContract *bindings.L2OutputOracleCaller
l2ooContract L2OOContract
l2ooABI *abi.ABI
dgfContract *bindings.DisputeGameFactoryCaller
......@@ -207,9 +212,12 @@ func (l *L2OutputSubmitter) StopL2OutputSubmitting() error {
return nil
}
// FetchNextOutputInfo gets the block number of the next proposal.
// It returns: the next block number, if the proposal should be made, error
func (l *L2OutputSubmitter) FetchNextOutputInfo(ctx context.Context) (*eth.OutputResponse, bool, error) {
// FetchL2OOOutput gets the next output proposal for the L2OO.
// It queries the L2OO for the earliest next block number that should be proposed.
// It returns the output to propose, and whether the proposal should be submitted at all.
// The passed context is expected to be a lifecycle context. A network timeout
// context will be derived from it.
func (l *L2OutputSubmitter) FetchL2OOOutput(ctx context.Context) (*eth.OutputResponse, bool, error) {
if l.l2ooContract == nil {
return nil, false, fmt.Errorf("L2OutputOracle contract not set, cannot fetch next output info")
}
......@@ -220,11 +228,11 @@ func (l *L2OutputSubmitter) FetchNextOutputInfo(ctx context.Context) (*eth.Outpu
From: l.Txmgr.From(),
Context: cCtx,
}
nextCheckpointBlock, err := l.l2ooContract.NextBlockNumber(callOpts)
nextCheckpointBlockBig, err := l.l2ooContract.NextBlockNumber(callOpts)
if err != nil {
l.Log.Error("Proposer unable to get next block number", "err", err)
return nil, false, err
return nil, false, fmt.Errorf("querying next block number: %w", err)
}
nextCheckpointBlock := nextCheckpointBlockBig.Uint64()
// Fetch the current L2 heads
currentBlockNumber, err := l.FetchCurrentBlockNumber(ctx)
if err != nil {
......@@ -232,76 +240,79 @@ func (l *L2OutputSubmitter) FetchNextOutputInfo(ctx context.Context) (*eth.Outpu
}
// Ensure that we do not submit a block in the future
if currentBlockNumber.Cmp(nextCheckpointBlock) < 0 {
if currentBlockNumber < nextCheckpointBlock {
l.Log.Debug("Proposer submission interval has not elapsed", "currentBlockNumber", currentBlockNumber, "nextBlockNumber", nextCheckpointBlock)
return nil, false, nil
}
return l.FetchOutput(ctx, nextCheckpointBlock)
output, err := l.FetchOutput(ctx, nextCheckpointBlock)
if err != nil {
return nil, false, fmt.Errorf("fetching output: %w", err)
}
// Always propose if it's part of the Finalized L2 chain. Or if allowed, if it's part of the safe L2 chain.
if output.BlockRef.Number > output.Status.FinalizedL2.Number && (!l.Cfg.AllowNonFinalized || output.BlockRef.Number > output.Status.SafeL2.Number) {
l.Log.Debug("Not proposing yet, L2 block is not ready for proposal",
"l2_proposal", output.BlockRef,
"l2_safe", output.Status.SafeL2,
"l2_finalized", output.Status.FinalizedL2,
"allow_non_finalized", l.Cfg.AllowNonFinalized)
return output, false, nil
}
return output, true, nil
}
// FetchDGFOutput gets the next output proposal for the DGF.
// The passed context is expected to be a lifecycle context. A network timeout
// context will be derived from it.
func (l *L2OutputSubmitter) FetchDGFOutput(ctx context.Context) (*eth.OutputResponse, error) {
ctx, cancel := context.WithTimeout(ctx, l.Cfg.NetworkTimeout)
defer cancel()
blockNum, err := l.FetchCurrentBlockNumber(ctx)
if err != nil {
return nil, err
}
return l.FetchOutput(ctx, blockNum)
}
// FetchCurrentBlockNumber gets the current block number from the [L2OutputSubmitter]'s [RollupClient]. If the `AllowNonFinalized` configuration
// option is set, it will return the safe head block number, and if not, it will return the finalized head block number.
func (l *L2OutputSubmitter) FetchCurrentBlockNumber(ctx context.Context) (*big.Int, error) {
func (l *L2OutputSubmitter) FetchCurrentBlockNumber(ctx context.Context) (uint64, error) {
rollupClient, err := l.RollupProvider.RollupClient(ctx)
if err != nil {
l.Log.Error("Proposer unable to get rollup client", "err", err)
return nil, err
return 0, fmt.Errorf("getting rollup client: %w", err)
}
cCtx, cancel := context.WithTimeout(ctx, l.Cfg.NetworkTimeout)
defer cancel()
status, err := rollupClient.SyncStatus(cCtx)
status, err := rollupClient.SyncStatus(ctx)
if err != nil {
l.Log.Error("Proposer unable to get sync status", "err", err)
return nil, err
return 0, fmt.Errorf("getting sync status: %w", err)
}
// Use either the finalized or safe head depending on the config. Finalized head is default & safer.
var currentBlockNumber *big.Int
if l.Cfg.AllowNonFinalized {
currentBlockNumber = new(big.Int).SetUint64(status.SafeL2.Number)
} else {
currentBlockNumber = new(big.Int).SetUint64(status.FinalizedL2.Number)
return status.SafeL2.Number, nil
}
return currentBlockNumber, nil
return status.FinalizedL2.Number, nil
}
func (l *L2OutputSubmitter) FetchOutput(ctx context.Context, block *big.Int) (*eth.OutputResponse, bool, error) {
func (l *L2OutputSubmitter) FetchOutput(ctx context.Context, block uint64) (*eth.OutputResponse, error) {
rollupClient, err := l.RollupProvider.RollupClient(ctx)
if err != nil {
l.Log.Error("Proposer unable to get rollup client", "err", err)
return nil, false, err
return nil, fmt.Errorf("getting rollup client: %w", err)
}
cCtx, cancel := context.WithTimeout(ctx, l.Cfg.NetworkTimeout)
defer cancel()
output, err := rollupClient.OutputAtBlock(cCtx, block.Uint64())
output, err := rollupClient.OutputAtBlock(ctx, block)
if err != nil {
l.Log.Error("Failed to fetch output at block", "block", block, "err", err)
return nil, false, err
return nil, fmt.Errorf("fetching output at block %d: %w", block, err)
}
if output.Version != supportedL2OutputVersion {
l.Log.Error("Unsupported l2 output version", "output_version", output.Version, "supported_version", supportedL2OutputVersion)
return nil, false, errors.New("unsupported l2 output version")
return nil, fmt.Errorf("unsupported l2 output version: %v, supported: %v", output.Version, supportedL2OutputVersion)
}
if output.BlockRef.Number != block.Uint64() { // sanity check, e.g. in case of bad RPC caching
l.Log.Error("Invalid blockNumber", "next_block", block, "output_block", output.BlockRef.Number)
return nil, false, errors.New("invalid blockNumber")
if onum := output.BlockRef.Number; onum != block { // sanity check, e.g. in case of bad RPC caching
return nil, fmt.Errorf("output block number %d mismatches requested %d", output.BlockRef.Number, block)
}
// Always propose if it's part of the Finalized L2 chain. Or if allowed, if it's part of the safe L2 chain.
if output.BlockRef.Number > output.Status.FinalizedL2.Number && (!l.Cfg.AllowNonFinalized || output.BlockRef.Number > output.Status.SafeL2.Number) {
l.Log.Debug("Not proposing yet, L2 block is not ready for proposal",
"l2_proposal", output.BlockRef,
"l2_safe", output.Status.SafeL2,
"l2_finalized", output.Status.FinalizedL2,
"allow_non_finalized", l.Cfg.AllowNonFinalized)
return nil, false, nil
}
return output, true, nil
return output, nil
}
// ProposeL2OutputTxData creates the transaction data for the ProposeL2Output function
......@@ -450,15 +461,33 @@ func (l *L2OutputSubmitter) waitNodeSync() error {
return dial.WaitRollupSync(l.ctx, l.Log, rollupClient, l1head, time.Second*12)
}
// The loopL2OO regularly polls the L2OO for the next block to propose,
// and if the current finalized (or safe) block is past that next block, it
// proposes it.
func (l *L2OutputSubmitter) loopL2OO(ctx context.Context) {
defer l.Log.Info("loopL2OO returning")
ticker := time.NewTicker(l.Cfg.PollInterval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
output, shouldPropose, err := l.FetchNextOutputInfo(ctx)
if err != nil || !shouldPropose {
break
// prioritize quit signal
select {
case <-l.done:
return
default:
}
// A note on retrying: the outer ticker already runs on a short
// poll interval, which has a default value of 6 seconds. So no
// retry logic is needed around output fetching here.
output, shouldPropose, err := l.FetchL2OOOutput(ctx)
if err != nil {
l.Log.Warn("Error getting L2OO output", "err", err)
continue
} else if !shouldPropose {
// debug logging already in FetchL2OOOutput
continue
}
l.proposeOutput(ctx, output)
......@@ -468,20 +497,36 @@ func (l *L2OutputSubmitter) loopL2OO(ctx context.Context) {
}
}
// The loopDGF proposes a new output every proposal interval. It does _not_ query
// the DGF for when to next propose, as the DGF doesn't have the concept of a
// proposal interval, like in the L2OO case. For this reason, it has to keep track
// of the interval itself, for which it uses an internal ticker.
func (l *L2OutputSubmitter) loopDGF(ctx context.Context) {
defer l.Log.Info("loopDGF returning")
ticker := time.NewTicker(l.Cfg.ProposalInterval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
blockNumber, err := l.FetchCurrentBlockNumber(ctx)
if err != nil {
break
}
output, shouldPropose, err := l.FetchOutput(ctx, blockNumber)
if err != nil || !shouldPropose {
break
var (
output *eth.OutputResponse
err error
)
// A note on retrying: because the proposal interval is usually much
// larger than the interval at which to retry proposing on a failed attempt,
// we want to keep retrying getting the output proposal until we succeed.
for output == nil || err != nil {
select {
case <-l.done:
return
default:
}
output, err = l.FetchDGFOutput(ctx)
if err != nil {
l.Log.Warn("Error getting DGF output, retrying...", "err", err)
time.Sleep(l.Cfg.OutputRetryInterval)
}
}
l.proposeOutput(ctx, output)
......
package proposer
import (
"context"
"fmt"
"math/big"
"testing"
"time"
"github.com/ethereum-optimism/optimism/op-proposer/bindings"
"github.com/ethereum-optimism/optimism/op-proposer/metrics"
"github.com/ethereum-optimism/optimism/op-service/dial"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/testlog"
"github.com/ethereum-optimism/optimism/op-service/testutils"
txmgrmocks "github.com/ethereum-optimism/optimism/op-service/txmgr/mocks"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
type MockL2OOContract struct {
mock.Mock
}
func (m *MockL2OOContract) Version(opts *bind.CallOpts) (string, error) {
args := m.Called(opts)
return args.String(0), args.Error(1)
}
func (m *MockL2OOContract) NextBlockNumber(opts *bind.CallOpts) (*big.Int, error) {
args := m.Called(opts)
return args.Get(0).(*big.Int), args.Error(1)
}
type mockRollupEndpointProvider struct {
rollupClient *testutils.MockRollupClient
rollupClientErr error
}
func newEndpointProvider() *mockRollupEndpointProvider {
return &mockRollupEndpointProvider{
rollupClient: new(testutils.MockRollupClient),
}
}
func (p *mockRollupEndpointProvider) RollupClient(context.Context) (dial.RollupClientInterface, error) {
return p.rollupClient, p.rollupClientErr
}
func (p *mockRollupEndpointProvider) Close() {}
func setup(t *testing.T) (*L2OutputSubmitter, *mockRollupEndpointProvider, *MockL2OOContract, *txmgrmocks.TxManager, *testlog.CapturingHandler) {
ep := newEndpointProvider()
l2OutputOracleAddr := common.HexToAddress("0x3F8A862E63E759a77DA22d384027D21BF096bA9E")
proposerConfig := ProposerConfig{
PollInterval: time.Microsecond,
ProposalInterval: time.Microsecond,
OutputRetryInterval: time.Microsecond,
L2OutputOracleAddr: &l2OutputOracleAddr,
}
txmgr := txmgrmocks.NewTxManager(t)
lgr, logs := testlog.CaptureLogger(t, log.LevelDebug)
setup := DriverSetup{
Log: lgr,
Metr: metrics.NoopMetrics,
Cfg: proposerConfig,
Txmgr: txmgr,
RollupProvider: ep,
}
parsed, err := bindings.L2OutputOracleMetaData.GetAbi()
require.NoError(t, err)
ctx, cancel := context.WithCancel(context.Background())
l2ooContract := new(MockL2OOContract)
l2OutputSubmitter := L2OutputSubmitter{
DriverSetup: setup,
done: make(chan struct{}),
l2ooContract: l2ooContract,
l2ooABI: parsed,
ctx: ctx,
cancel: cancel,
}
txmgr.On("BlockNumber", mock.Anything).Return(uint64(100), nil).Once()
txmgr.On("Send", mock.Anything, mock.Anything).
Return(&types.Receipt{Status: uint64(1), TxHash: common.Hash{}}, nil).
Once().
Run(func(_ mock.Arguments) {
// let loops return after first Send call
t.Log("Closing proposer.")
close(l2OutputSubmitter.done)
})
return &l2OutputSubmitter, ep, l2ooContract, txmgr, logs
}
func TestL2OutputSubmitter_OutputRetry(t *testing.T) {
tests := []struct {
name string
}{
{name: "L2OO"},
{name: "DGF"},
}
const numFails = 3
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ps, ep, l2ooContract, txmgr, logs := setup(t)
ep.rollupClient.On("SyncStatus").Return(&eth.SyncStatus{FinalizedL2: eth.L2BlockRef{Number: 42}}, nil).Times(numFails + 1)
ep.rollupClient.ExpectOutputAtBlock(42, nil, fmt.Errorf("TEST: failed to fetch output")).Times(numFails)
ep.rollupClient.ExpectOutputAtBlock(
42,
&eth.OutputResponse{
Version: supportedL2OutputVersion,
BlockRef: eth.L2BlockRef{Number: 42},
Status: &eth.SyncStatus{
CurrentL1: eth.L1BlockRef{Hash: common.Hash{}},
FinalizedL2: eth.L2BlockRef{Number: 42},
},
},
nil,
)
if tt.name == "DGF" {
ps.loopDGF(ps.ctx)
} else {
txmgr.On("From").Return(common.Address{}).Times(numFails + 1)
l2ooContract.On("NextBlockNumber", mock.AnythingOfType("*bind.CallOpts")).Return(big.NewInt(42), nil).Times(numFails + 1)
ps.loopL2OO(ps.ctx)
}
ep.rollupClient.AssertExpectations(t)
l2ooContract.AssertExpectations(t)
require.Len(t, logs.FindLogs(testlog.NewMessageContainsFilter("Error getting "+tt.name)), numFails)
require.NotNil(t, logs.FindLog(testlog.NewMessageFilter("Proposer tx successfully published")))
require.NotNil(t, logs.FindLog(testlog.NewMessageFilter("loop"+tt.name+" returning")))
})
}
}
......@@ -32,6 +32,9 @@ type ProposerConfig struct {
PollInterval time.Duration
NetworkTimeout time.Duration
// How frequently to retry fetching an output if one fails
OutputRetryInterval time.Duration
// How frequently to post L2 outputs when the DisputeGameFactory is configured
ProposalInterval time.Duration
......@@ -89,6 +92,7 @@ func (ps *ProposerService) initFromCLIConfig(ctx context.Context, version string
ps.initMetrics(cfg)
ps.PollInterval = cfg.PollInterval
ps.OutputRetryInterval = cfg.OutputRetryInterval
ps.NetworkTimeout = cfg.TxMgrConfig.NetworkTimeout
ps.AllowNonFinalized = cfg.AllowNonFinalized
ps.WaitNodeSync = cfg.WaitNodeSync
......
......@@ -18,8 +18,8 @@ func (m *MockRollupClient) OutputAtBlock(ctx context.Context, blockNum uint64) (
return out.Get(0).(*eth.OutputResponse), out.Error(1)
}
func (m *MockRollupClient) ExpectOutputAtBlock(blockNum uint64, response *eth.OutputResponse, err error) {
m.Mock.On("OutputAtBlock", blockNum).Once().Return(response, err)
func (m *MockRollupClient) ExpectOutputAtBlock(blockNum uint64, response *eth.OutputResponse, err error) *mock.Call {
return m.Mock.On("OutputAtBlock", blockNum).Once().Return(response, err)
}
func (m *MockRollupClient) SyncStatus(ctx context.Context) (*eth.SyncStatus, error) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment