Commit 566fc288 authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge branch 'develop' into clabby/ctb/update-ctb-readme

parents e449b680 6b1fa532
---
'@eth-optimism/chain-mon': minor
---
Introduces the balance-mon service to chain-mon.
......@@ -18,6 +18,7 @@ jobs:
l2geth: ${{ steps.packages.outputs.l2geth }}
message-relayer: ${{ steps.packages.outputs.message-relayer }}
fault-detector: ${{ steps.packages.outputs.fault-detector }}
balance-mon: ${{ steps.packages.outputs.balance-mon }}
drippie-mon: ${{ steps.packages.outputs.drippie-mon }}
wd-mon: ${{ steps.packages.outputs.wd-mon }}
data-transport-layer: ${{ steps.packages.outputs.data-transport-layer }}
......@@ -230,6 +231,33 @@ jobs:
push: true
tags: ethereumoptimism/fault-detector:${{ needs.canary-publish.outputs.canary-docker-tag }}
balance-mon:
name: Publish Balance Monitor Version ${{ needs.canary-publish.outputs.canary-docker-tag }}
needs: canary-publish
if: needs.canary-publish.outputs.balance-mon != ''
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to Docker Hub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_USERNAME }}
password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_SECRET }}
- name: Build and push
uses: docker/build-push-action@v2
with:
context: .
file: ./ops/docker/Dockerfile.packages
target: balance-mon
push: true
tags: ethereumoptimism/balance-mon:${{ needs.canary-publish.outputs.canary-docker-tag }}
drippie-mon:
name: Publish Drippie Monitor Version ${{ needs.canary-publish.outputs.canary-docker-tag }}
needs: canary-publish
......
......@@ -14,6 +14,7 @@ jobs:
l2geth: ${{ steps.packages.outputs.l2geth }}
message-relayer: ${{ steps.packages.outputs.message-relayer }}
fault-detector: ${{ steps.packages.outputs.fault-detector }}
balance-mon: ${{ steps.packages.outputs.drippie-mon }}
drippie-mon: ${{ steps.packages.outputs.drippie-mon }}
wd-mon: ${{ steps.packages.outputs.wd-mon }}
data-transport-layer: ${{ steps.packages.outputs.data-transport-layer }}
......@@ -364,6 +365,33 @@ jobs:
push: true
tags: ethereumoptimism/wd-mon:${{ needs.release.outputs.wd-mon }},ethereumoptimism/wd-mon:latest
drippie-mon:
name: Publish Balance Monitor Version ${{ needs.release.outputs.balance-mon }}
needs: release
if: needs.release.outputs.balance-mon != ''
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to Docker Hub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_USERNAME }}
password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_SECRET }}
- name: Build and push
uses: docker/build-push-action@v2
with:
context: .
file: ./ops/docker/Dockerfile.packages
target: balance-mon
push: true
tags: ethereumoptimism/balance-mon:${{ needs.release.outputs.balance-mon }},ethereumoptimism/balance-mon:latest
drippie-mon:
name: Publish Drippie Monitor Version ${{ needs.release.outputs.drippie-mon }}
needs: release
......
......@@ -201,7 +201,7 @@ Once you’ve built both repositories, you’ll need head back to the Optimism M
- Replace `"BATCHER"` with the address of the Batcher account you generated earlier.
- Replace `"SEQUENCER"` with the address of the Sequencer account you generated earlier.
- Replace `"BLOCKHASH"` with the blockhash you got from the `cast` command.
- Replace `"TIMESTAMP"` with the timestamp you got from the `cast` command. Note that although all the other fields are strings, this field is a number! Don’t include the quotation marks.
- Replace `TIMESTAMP` with the timestamp you got from the `cast` command. Note that although all the other fields are strings, this field is a number! Don’t include the quotation marks.
## Deploy the L1 contracts
......@@ -390,9 +390,7 @@ Head over to the `op-node` package and start the `op-node` using the following c
--rollup.config=./rollup.json \
--rpc.addr=0.0.0.0 \
--rpc.port=8547 \
--p2p.listen.ip=0.0.0.0 \
--p2p.listen.tcp=9003 \
--p2p.listen.udp=9003 \
--p2p.disable \
--rpc.enable-admin \
--p2p.sequencer.key=<SEQUENCERKEY> \
--l1=<RPC> \
......@@ -402,6 +400,26 @@ Head over to the `op-node` package and start the `op-node` using the following c
Once you run this command, you should start seeing the `op-node` begin to process all of the L1 information after the starting block number that you picked earlier. Once the `op-node` has enough information, it’ll begin sending Engine API payloads to `op-geth`. At that point, you’ll start to see blocks being created inside of `op-geth`. We’re live!
::: tip Peer to peer synchronization
If you use a chain ID that is also used by others, for example the default (42069), your `op-node` will try to use peer to peer to speed up synchronization.
These attempts will fail, because they will be signed with the wrong key, but they will waste time and network resources.
To avoid this , we start with peer to peer synchronization disabled (`--p2p.disable`).
Once you have multiple nodes, it makes sense to use these command line parameters to synchronize between them without getting confused by other blockchains.
```
--p2p.static=<nodes> \
--p2p.listen.ip=0.0.0.0 \
--p2p.listen.tcp=9003 \
--p2p.listen.udp=9003 \
```
:::
## Run op-batcher
The final component necessary to put all the pieces together is the `op-batcher`. The `op-batcher` takes transactions from the Sequencer and publishes those transactions to L1. Once transactions are on L1, they’re officially part of the Rollup. Without the `op-batcher`, transactions sent to the Sequencer would never make it to L1 and wouldn’t become part of the canonical chain. The `op-batcher` is critical!
......@@ -516,15 +534,47 @@ To use any other development stack, see the getting started tutorial, just repla
### Stopping your Rollup
To stop `op-geth` you should use Ctrl-C.
An orderly shutdown is done in the reverse order to the order in which components were started:
1. Stop `op-batcher`.
1. Stop `op-node`.
1. Stop `op-geth`.
### Starting your Rollup
To restart the blockchain, use the same order of components you did when you initialized it.
1. `op-geth`
1. `op-node`
1. `op-batcher`
::: tip Synchronization takes time
`op-batcher` might have warning messages similar to:
```
WARN [03-21|14:13:55.248] Error calculating L2 block range err="failed to get sync status: Post \"http://localhost:8547\": context deadline exceeded"
WARN [03-21|14:13:57.328] Error calculating L2 block range err="failed to get sync status: Post \"http://localhost:8547\": context deadline exceeded"
```
This means that `op-node` is not yet synchronized up to the present time.
Just wait until it is.
:::
If `op-geth` aborts (for example, because the computer it is running on crashes), you will get these errors on `op-node`:
### Errors
#### Corrupt data directory
If `op-geth` aborts (for example, because the computer it is running on crashes), you might get these errors on `op-node`:
```
WARN [02-16|21:22:02.868] Derivation process temporary error attempts=14 err="stage 0 failed resetting: temp: failed to find the L2 Heads to start from: failed to fetch L2 block by hash 0x0000000000000000000000000000000000000000000000000000000000000000: failed to determine block-hash of hash 0x0000000000000000000000000000000000000000000000000000000000000000, could not get payload: not found"
```
In that case, you need to remove `datadir`, reinitialize it:
This means that the data directory is corrupt and you need to reinitialize it:
```bash
cd ~/op-geth
......@@ -536,17 +586,23 @@ echo "<SEQUENCER KEY HERE>" > datadir/block-signer-key
./build/bin/geth init --datadir=./datadir ./genesis.json
```
### Starting your Rollup
To restart the blockchain, use the same order of components you did when you initialized it.
#### Batcher out of ETH
1. `op-geth`
2. `op-node`
3. `op-batcher`
If `op-batcher` runs out of ETH, it cannot submit write new transaction batches to L1.
You will get error messages similar to this one:
```
INFO [03-21|14:22:32.754] publishing transaction service=batcher txHash=2ace6d..7eb248 nonce=2516 gasTipCap=2,340,741 gasFeeCap=172,028,434,515
ERROR[03-21|14:22:32.844] unable to publish transaction service=batcher txHash=2ace6d..7eb248 nonce=2516 gasTipCap=2,340,741 gasFeeCap=172,028,434,515 err="insufficient funds for gas * price + value"
```
Just send more ETH and to the batcher, and the problem will be resolved.
## Adding nodes
To add nodes to the rollup, you need to initialize `op-node` and `op-geth`, similar to what you did for the first node:
To add nodes to the rollup, you need to initialize `op-node` and `op-geth`, similar to what you did for the first node.
You should *not* add an `op-bathcer`, there should be only one.
1. Configure the OS and prerequisites as you did for the first node.
1. Build the Optimism monorepo and `op-geth` as you did for the first node.
......@@ -574,8 +630,8 @@ To add nodes to the rollup, you need to initialize `op-node` and `op-geth`, simi
1. Start `op-geth` (using the same command line you used on the initial node)
1. Start `op-node` (using the same command line you used on the initial node)
1. Wait while the node synchronizes
## What’s next?
You can use this rollup the same way you’d use any other test blockchain. Once the superchain is available, this blockchain should be able to join the test version. Alternatively, you could [modify the blockchain in various ways](./hacks.md). **Please note that OP Stack Hacks are unofficial and are not explicitly supported by the OP Stack.** You will not be able to receive significant developer support for any modifications you make to the OP Stack.
\ No newline at end of file
You can use this rollup the same way you’d use any other test blockchain. Once the superchain is available, this blockchain should be able to join the test version. Alternatively, you could [modify the blockchain in various ways](./hacks.md). **Please note that OP Stack Hacks are unofficial and are not explicitly supported by the OP Stack.** You will not be able to receive significant developer support for any modifications you make to the OP Stack.
......@@ -34,6 +34,7 @@ require (
golang.org/x/crypto v0.6.0
golang.org/x/exp v0.0.0-20230213192124-5e25df0256eb
golang.org/x/term v0.5.0
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af
)
require (
......@@ -178,7 +179,6 @@ require (
golang.org/x/sync v0.1.0 // indirect
golang.org/x/sys v0.5.0 // indirect
golang.org/x/text v0.7.0 // indirect
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af // indirect
golang.org/x/tools v0.6.0 // indirect
google.golang.org/protobuf v1.28.1 // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
......
......@@ -110,6 +110,14 @@ type CLIConfig struct {
/* Optional Params */
// TxManagerTimeout is the max amount of time to wait for the [txmgr].
// This will default to: 10 * time.Minute.
TxManagerTimeout time.Duration
// OfflineGasEstimation specifies whether the batcher should calculate
// gas estimations offline using the [core.IntrinsicGas] function.
OfflineGasEstimation bool
// MaxL1TxSize is the maximum size of a batch tx submitted to L1.
MaxL1TxSize uint64
......@@ -168,19 +176,21 @@ func NewConfig(ctx *cli.Context) CLIConfig {
ResubmissionTimeout: ctx.GlobalDuration(flags.ResubmissionTimeoutFlag.Name),
/* Optional Flags */
MaxChannelDuration: ctx.GlobalUint64(flags.MaxChannelDurationFlag.Name),
MaxL1TxSize: ctx.GlobalUint64(flags.MaxL1TxSizeBytesFlag.Name),
TargetL1TxSize: ctx.GlobalUint64(flags.TargetL1TxSizeBytesFlag.Name),
TargetNumFrames: ctx.GlobalInt(flags.TargetNumFramesFlag.Name),
ApproxComprRatio: ctx.GlobalFloat64(flags.ApproxComprRatioFlag.Name),
Stopped: ctx.GlobalBool(flags.StoppedFlag.Name),
Mnemonic: ctx.GlobalString(flags.MnemonicFlag.Name),
SequencerHDPath: ctx.GlobalString(flags.SequencerHDPathFlag.Name),
PrivateKey: ctx.GlobalString(flags.PrivateKeyFlag.Name),
RPCConfig: rpc.ReadCLIConfig(ctx),
LogConfig: oplog.ReadCLIConfig(ctx),
MetricsConfig: opmetrics.ReadCLIConfig(ctx),
PprofConfig: oppprof.ReadCLIConfig(ctx),
SignerConfig: opsigner.ReadCLIConfig(ctx),
OfflineGasEstimation: ctx.GlobalBool(flags.OfflineGasEstimationFlag.Name),
TxManagerTimeout: ctx.GlobalDuration(flags.TxManagerTimeoutFlag.Name),
MaxChannelDuration: ctx.GlobalUint64(flags.MaxChannelDurationFlag.Name),
MaxL1TxSize: ctx.GlobalUint64(flags.MaxL1TxSizeBytesFlag.Name),
TargetL1TxSize: ctx.GlobalUint64(flags.TargetL1TxSizeBytesFlag.Name),
TargetNumFrames: ctx.GlobalInt(flags.TargetNumFramesFlag.Name),
ApproxComprRatio: ctx.GlobalFloat64(flags.ApproxComprRatioFlag.Name),
Stopped: ctx.GlobalBool(flags.StoppedFlag.Name),
Mnemonic: ctx.GlobalString(flags.MnemonicFlag.Name),
SequencerHDPath: ctx.GlobalString(flags.SequencerHDPathFlag.Name),
PrivateKey: ctx.GlobalString(flags.PrivateKeyFlag.Name),
RPCConfig: rpc.ReadCLIConfig(ctx),
LogConfig: oplog.ReadCLIConfig(ctx),
MetricsConfig: opmetrics.ReadCLIConfig(ctx),
PprofConfig: oppprof.ReadCLIConfig(ctx),
SignerConfig: opsigner.ReadCLIConfig(ctx),
}
}
......@@ -15,6 +15,7 @@ import (
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
opcrypto "github.com/ethereum-optimism/optimism/op-service/crypto"
"github.com/ethereum-optimism/optimism/op-service/txmgr"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
)
......@@ -24,7 +25,7 @@ import (
type BatchSubmitter struct {
Config // directly embed the config + sources
txMgr *TransactionManager
txMgr txmgr.TxManager
wg sync.WaitGroup
done chan struct{}
......@@ -79,6 +80,7 @@ func NewBatchSubmitterFromCLIConfig(cfg CLIConfig, l log.Logger, m metrics.Metri
NumConfirmations: cfg.NumConfirmations,
SafeAbortNonceTooLowCount: cfg.SafeAbortNonceTooLowCount,
From: fromAddress,
ChainID: rcfg.L1ChainID,
Signer: signer(rcfg.L1ChainID),
}
......@@ -125,10 +127,8 @@ func NewBatchSubmitter(ctx context.Context, cfg Config, l log.Logger, m metrics.
return &BatchSubmitter{
Config: cfg,
txMgr: NewTransactionManager(l,
cfg.TxManagerConfig, cfg.Rollup.BatchInboxAddress, cfg.Rollup.L1ChainID,
cfg.From, cfg.L1Client),
state: NewChannelManager(l, m, cfg.Channel),
txMgr: txmgr.NewSimpleTxManager("batcher", l, cfg.TxManagerConfig, cfg.L1Client),
state: NewChannelManager(l, m, cfg.Channel),
}, nil
}
......@@ -226,7 +226,7 @@ func (l *BatchSubmitter) loadBlocksIntoState(ctx context.Context) {
// loadBlockIntoState fetches & stores a single block into `state`. It returns the block it loaded.
func (l *BatchSubmitter) loadBlockIntoState(ctx context.Context, blockNumber uint64) (*types.Block, error) {
ctx, cancel := context.WithTimeout(ctx, networkTimeout)
ctx, cancel := context.WithTimeout(ctx, txManagerTimeout)
defer cancel()
block, err := l.L2Client.BlockByNumber(ctx, new(big.Int).SetUint64(blockNumber))
if err != nil {
......@@ -244,7 +244,7 @@ func (l *BatchSubmitter) loadBlockIntoState(ctx context.Context, blockNumber uin
// calculateL2BlockRangeToStore determines the range (start,end] that should be loaded into the local state.
// It also takes care of initializing some local state (i.e. will modify l.lastStoredBlock in certain conditions)
func (l *BatchSubmitter) calculateL2BlockRangeToStore(ctx context.Context) (eth.BlockID, eth.BlockID, error) {
childCtx, cancel := context.WithTimeout(ctx, networkTimeout)
childCtx, cancel := context.WithTimeout(ctx, txManagerTimeout)
defer cancel()
syncStatus, err := l.RollupNode.SyncStatus(childCtx)
// Ensure that we have the sync status
......@@ -312,8 +312,9 @@ func (l *BatchSubmitter) loop() {
l.log.Error("unable to get tx data", "err", err)
break
}
// Record TX Status
if receipt, err := l.txMgr.SendTransaction(l.ctx, txdata.Bytes()); err != nil {
if receipt, err := l.SendTransaction(l.ctx, txdata.Bytes()); err != nil {
l.recordFailedTx(txdata.ID(), err)
} else {
l.recordConfirmedTx(txdata.ID(), receipt)
......@@ -335,6 +336,46 @@ func (l *BatchSubmitter) loop() {
}
}
const networkTimeout = 2 * time.Second // How long a single network request can take. TODO: put in a config somewhere
// fix(refcell):
// combined with above, these config variables should also be replicated in the op-proposer
// along with op-proposer changes to include the updated tx manager
const txManagerTimeout = 2 * time.Minute // How long the tx manager can take to send a transaction.
// SendTransaction creates & submits a transaction to the batch inbox address with the given `data`.
// It currently uses the underlying `txmgr` to handle transaction sending & price management.
// This is a blocking method. It should not be called concurrently.
func (l *BatchSubmitter) SendTransaction(ctx context.Context, data []byte) (*types.Receipt, error) {
// Do the gas estimation offline. A value of 0 will cause the [txmgr] to estimate the gas limit.
intrinsicGas, err := core.IntrinsicGas(data, nil, false, true, true, false)
if err != nil {
return nil, fmt.Errorf("failed to calculate intrinsic gas: %w", err)
}
// Create the transaction
tx, err := l.txMgr.CraftTx(ctx, txmgr.TxCandidate{
To: l.Rollup.BatchInboxAddress,
TxData: data,
From: l.From,
GasLimit: intrinsicGas,
})
if err != nil {
return nil, fmt.Errorf("failed to create tx: %w", err)
}
// Send the transaction through the txmgr
ctx, cancel := context.WithTimeout(ctx, txManagerTimeout)
defer cancel()
if receipt, err := l.txMgr.Send(ctx, tx); err != nil {
l.log.Warn("unable to publish tx", "err", err, "data_size", len(data))
return nil, err
} else {
l.log.Info("tx successfully published", "tx_hash", receipt.TxHash, "data_size", len(data))
return receipt, nil
}
}
func (l *BatchSubmitter) recordL1Tip(l1tip eth.L1BlockRef) {
if l.lastL1Tip == l1tip {
return
......
package batcher
import (
"context"
"math/big"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum-optimism/optimism/op-service/txmgr"
"github.com/ethereum-optimism/optimism/op-service/txmgr/mocks"
)
// TestBatchSubmitter_SendTransaction tests the driver's
// [SendTransaction] external facing function.
func TestBatchSubmitter_SendTransaction(t *testing.T) {
log := testlog.Logger(t, log.LvlCrit)
txMgr := mocks.TxManager{}
batcherInboxAddress := common.HexToAddress("0x42000000000000000000000000000000000000ff")
chainID := big.NewInt(1)
sender := common.HexToAddress("0xdeadbeef")
bs := BatchSubmitter{
Config: Config{
log: log,
From: sender,
Rollup: &rollup.Config{
L1ChainID: chainID,
BatchInboxAddress: batcherInboxAddress,
},
},
txMgr: &txMgr,
}
txData := []byte{0x00, 0x01, 0x02}
gasTipCap := big.NewInt(136)
gasFeeCap := big.NewInt(137)
gas := uint64(1337)
// Candidate gas should be calculated with [core.IntrinsicGas]
intrinsicGas, err := core.IntrinsicGas(txData, nil, false, true, true, false)
require.NoError(t, err)
candidate := txmgr.TxCandidate{
To: batcherInboxAddress,
TxData: txData,
From: sender,
GasLimit: intrinsicGas,
}
tx := types.NewTx(&types.DynamicFeeTx{
ChainID: chainID,
Nonce: 0,
GasTipCap: gasTipCap,
GasFeeCap: gasFeeCap,
Gas: gas,
To: &batcherInboxAddress,
Data: txData,
})
txHash := tx.Hash()
expectedReceipt := types.Receipt{
Type: 1,
PostState: []byte{},
Status: uint64(1),
CumulativeGasUsed: gas,
TxHash: txHash,
GasUsed: gas,
}
txMgr.On("CraftTx", mock.Anything, candidate).Return(tx, nil)
txMgr.On("Send", mock.Anything, tx).Return(&expectedReceipt, nil)
receipt, err := bs.SendTransaction(context.Background(), tx.Data())
require.NoError(t, err)
require.Equal(t, receipt, &expectedReceipt)
}
package batcher
import (
"context"
"fmt"
"math/big"
"time"
"github.com/ethereum-optimism/optimism/op-service/txmgr"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
opcrypto "github.com/ethereum-optimism/optimism/op-service/crypto"
)
const networkTimeout = 2 * time.Second // How long a single network request can take. TODO: put in a config somewhere
// TransactionManager wraps the simple txmgr package to make it easy to send & wait for transactions
type TransactionManager struct {
// Config
batchInboxAddress common.Address
senderAddress common.Address
chainID *big.Int
// Outside world
txMgr txmgr.TxManager
l1Client *ethclient.Client
signerFn opcrypto.SignerFn
log log.Logger
}
func NewTransactionManager(log log.Logger, txMgrConfg txmgr.Config, batchInboxAddress common.Address, chainID *big.Int, senderAddress common.Address, l1Client *ethclient.Client) *TransactionManager {
t := &TransactionManager{
batchInboxAddress: batchInboxAddress,
senderAddress: senderAddress,
chainID: chainID,
txMgr: txmgr.NewSimpleTxManager("batcher", log, txMgrConfg, l1Client),
l1Client: l1Client,
signerFn: txMgrConfg.Signer,
log: log,
}
return t
}
// SendTransaction creates & submits a transaction to the batch inbox address with the given `data`.
// It currently uses the underlying `txmgr` to handle transaction sending & price management.
// This is a blocking method. It should not be called concurrently.
// TODO: where to put concurrent transaction handling logic.
func (t *TransactionManager) SendTransaction(ctx context.Context, data []byte) (*types.Receipt, error) {
tx, err := t.CraftTx(ctx, data)
if err != nil {
return nil, fmt.Errorf("failed to create tx: %w", err)
}
ctx, cancel := context.WithTimeout(ctx, 10*time.Minute) // TODO: Select a timeout that makes sense here.
defer cancel()
if receipt, err := t.txMgr.Send(ctx, tx); err != nil {
t.log.Warn("unable to publish tx", "err", err, "data_size", len(data))
return nil, err
} else {
t.log.Info("tx successfully published", "tx_hash", receipt.TxHash, "data_size", len(data))
return receipt, nil
}
}
// calcGasTipAndFeeCap queries L1 to determine what a suitable miner tip & basefee limit would be for timely inclusion
func (t *TransactionManager) calcGasTipAndFeeCap(ctx context.Context) (gasTipCap *big.Int, gasFeeCap *big.Int, err error) {
childCtx, cancel := context.WithTimeout(ctx, networkTimeout)
gasTipCap, err = t.l1Client.SuggestGasTipCap(childCtx)
cancel()
if err != nil {
return nil, nil, fmt.Errorf("failed to get suggested gas tip cap: %w", err)
}
if gasTipCap == nil {
t.log.Warn("unexpected unset gasTipCap, using default 2 gwei")
gasTipCap = new(big.Int).SetUint64(params.GWei * 2)
}
childCtx, cancel = context.WithTimeout(ctx, networkTimeout)
head, err := t.l1Client.HeaderByNumber(childCtx, nil)
cancel()
if err != nil || head == nil {
return nil, nil, fmt.Errorf("failed to get L1 head block for fee cap: %w", err)
}
if head.BaseFee == nil {
return nil, nil, fmt.Errorf("failed to get L1 basefee in block %d for fee cap", head.Number)
}
gasFeeCap = txmgr.CalcGasFeeCap(head.BaseFee, gasTipCap)
return gasTipCap, gasFeeCap, nil
}
// CraftTx creates the signed transaction to the batchInboxAddress.
// It queries L1 for the current fee market conditions as well as for the nonce.
// NOTE: This method SHOULD NOT publish the resulting transaction.
func (t *TransactionManager) CraftTx(ctx context.Context, data []byte) (*types.Transaction, error) {
gasTipCap, gasFeeCap, err := t.calcGasTipAndFeeCap(ctx)
if err != nil {
return nil, err
}
childCtx, cancel := context.WithTimeout(ctx, networkTimeout)
nonce, err := t.l1Client.NonceAt(childCtx, t.senderAddress, nil)
cancel()
if err != nil {
return nil, fmt.Errorf("failed to get nonce: %w", err)
}
rawTx := &types.DynamicFeeTx{
ChainID: t.chainID,
Nonce: nonce,
To: &t.batchInboxAddress,
GasTipCap: gasTipCap,
GasFeeCap: gasFeeCap,
Data: data,
}
t.log.Info("creating tx", "to", rawTx.To, "from", t.senderAddress)
gas, err := core.IntrinsicGas(rawTx.Data, nil, false, true, true, false)
if err != nil {
return nil, fmt.Errorf("failed to calculate intrinsic gas: %w", err)
}
rawTx.Gas = gas
ctx, cancel = context.WithTimeout(ctx, networkTimeout)
defer cancel()
tx := types.NewTx(rawTx)
return t.signerFn(ctx, t.senderAddress, tx)
}
package flags
import (
"time"
"github.com/urfave/cli"
"github.com/ethereum-optimism/optimism/op-batcher/rpc"
......@@ -74,7 +76,17 @@ var (
}
/* Optional flags */
OfflineGasEstimationFlag = cli.BoolFlag{
Name: "offline-gas-estimation",
Usage: "Whether to use offline gas estimation",
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "OFFLINE_GAS_ESTIMATION"),
}
TxManagerTimeoutFlag = cli.DurationFlag{
Name: "tx-manager-timeout",
Usage: "Maximum duration to wait for L1 transactions, including resubmissions",
Value: 10 * time.Minute,
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "TX_MANAGER_TIMEOUT"),
}
MaxChannelDurationFlag = cli.Uint64Flag{
Name: "max-channel-duration",
Usage: "The maximum duration of L1-blocks to keep a channel open. 0 to disable.",
......@@ -141,6 +153,8 @@ var requiredFlags = []cli.Flag{
}
var optionalFlags = []cli.Flag{
OfflineGasEstimationFlag,
TxManagerTimeoutFlag,
MaxChannelDurationFlag,
MaxL1TxSizeBytesFlag,
TargetL1TxSizeBytesFlag,
......
......@@ -52,6 +52,7 @@ func NewL2Proposer(t Testing, log log.Logger, cfg *ProposerCfg, l1 *ethclient.Cl
NumConfirmations: 1,
SafeAbortNonceTooLowCount: 4,
From: from,
ChainID: big.NewInt(420),
// Signer is loaded in `proposer.NewL2OutputSubmitter`
},
L1Client: l1,
......
......@@ -270,9 +270,12 @@ func TestMigration(t *testing.T) {
snapLog.SetHandler(log.DiscardHandler())
rollupNodeConfig := &node.Config{
L1: &node.L1EndpointConfig{
L1NodeAddr: forkedL1URL,
L1TrustRPC: false,
L1RPCKind: sources.RPCKindBasic,
L1NodeAddr: forkedL1URL,
L1TrustRPC: false,
L1RPCKind: sources.RPCKindBasic,
RateLimit: 0,
BatchSize: 20,
HttpPollInterval: 12 * time.Second,
},
L2: &node.L2EndpointConfig{
L2EngineAddr: gethNode.HTTPAuthEndpoint(),
......@@ -329,6 +332,8 @@ func TestMigration(t *testing.T) {
L1EthRpc: forkedL1URL,
L2EthRpc: gethNode.WSEndpoint(),
RollupRpc: rollupNode.HTTPEndpoint(),
TxManagerTimeout: 10 * time.Minute,
OfflineGasEstimation: true,
MaxChannelDuration: 1,
MaxL1TxSize: 120_000,
TargetL1TxSize: 100_000,
......
......@@ -75,7 +75,7 @@ func NewOpGeth(t *testing.T, ctx context.Context, cfg *SystemConfig) (*OpGeth, e
require.Nil(t, node.Start())
auth := rpc.WithHTTPAuth(gn.NewJWTAuth(cfg.JWTSecret))
l2Node, err := client.NewRPC(ctx, logger, node.WSAuthEndpoint(), auth)
l2Node, err := client.NewRPC(ctx, logger, node.WSAuthEndpoint(), client.WithGethRPCOptions(auth))
require.Nil(t, err)
// Finally create the engine client
......
......@@ -413,9 +413,12 @@ func (cfg SystemConfig) Start(_opts ...SystemConfigOption) (*System, error) {
l2EndpointConfig = sys.Nodes[name].HTTPAuthEndpoint()
}
rollupCfg.L1 = &rollupNode.L1EndpointConfig{
L1NodeAddr: l1EndpointConfig,
L1TrustRPC: false,
L1RPCKind: sources.RPCKindBasic,
L1NodeAddr: l1EndpointConfig,
L1TrustRPC: false,
L1RPCKind: sources.RPCKindBasic,
RateLimit: 0,
BatchSize: 20,
HttpPollInterval: time.Duration(cfg.DeployConfig.L1BlockTime) * time.Second / 10,
}
rollupCfg.L2 = &rollupNode.L2EndpointConfig{
L2EngineAddr: l2EndpointConfig,
......@@ -590,10 +593,13 @@ func (cfg SystemConfig) Start(_opts ...SystemConfigOption) (*System, error) {
}
// Batch Submitter
txManagerTimeout := 10 * time.Minute
sys.BatchSubmitter, err = bss.NewBatchSubmitterFromCLIConfig(bss.CLIConfig{
L1EthRpc: sys.Nodes["l1"].WSEndpoint(),
L2EthRpc: sys.Nodes["sequencer"].WSEndpoint(),
RollupRpc: sys.RollupNodes["sequencer"].HTTPEndpoint(),
TxManagerTimeout: txManagerTimeout,
OfflineGasEstimation: true,
MaxChannelDuration: 1,
MaxL1TxSize: 120_000,
TargetL1TxSize: 100_000,
......
......@@ -57,7 +57,7 @@ func NewPollingClient(ctx context.Context, lgr log.Logger, c RPC, opts ...Wrappe
res := &PollingClient{
c: c,
lgr: lgr,
pollRate: 250 * time.Millisecond,
pollRate: 12 * time.Second,
ctx: ctx,
cancel: cancel,
pollReqCh: make(chan struct{}, 1),
......
package client
import (
"context"
"time"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/rpc"
"golang.org/x/time/rate"
)
// RateLimitingClient is a wrapper around a pure RPC that implements a global rate-limit on requests.
type RateLimitingClient struct {
c RPC
rl *rate.Limiter
}
// NewRateLimitingClient implements a global rate-limit for all RPC requests.
// A limit of N will ensure that over a long enough time-frame the given number of tokens per second is targeted.
// Burst limits how far off we can be from the target, by specifying how many requests are allowed at once.
func NewRateLimitingClient(c RPC, limit rate.Limit, burst int) *RateLimitingClient {
return &RateLimitingClient{c: c, rl: rate.NewLimiter(limit, burst)}
}
func (b *RateLimitingClient) Close() {
b.c.Close()
}
func (b *RateLimitingClient) CallContext(ctx context.Context, result any, method string, args ...any) error {
if err := b.rl.Wait(ctx); err != nil {
return err
}
cCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
defer cancel()
return b.c.CallContext(cCtx, result, method, args...)
}
func (b *RateLimitingClient) BatchCallContext(ctx context.Context, batch []rpc.BatchElem) error {
if err := b.rl.WaitN(ctx, len(batch)); err != nil {
return err
}
cCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
defer cancel()
return b.c.BatchCallContext(cCtx, batch)
}
func (b *RateLimitingClient) EthSubscribe(ctx context.Context, channel any, args ...any) (ethereum.Subscription, error) {
if err := b.rl.Wait(ctx); err != nil {
return nil, err
}
return b.c.EthSubscribe(ctx, channel, args...)
}
......@@ -10,6 +10,7 @@ import (
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/log"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/time/rate"
"github.com/ethereum-optimism/optimism/op-node/metrics"
"github.com/ethereum/go-ethereum/rpc"
......@@ -24,27 +25,85 @@ type RPC interface {
EthSubscribe(ctx context.Context, channel any, args ...any) (ethereum.Subscription, error)
}
type rpcConfig struct {
gethRPCOptions []rpc.ClientOption
httpPollInterval time.Duration
backoffAttempts int
limit float64
burst int
}
type RPCOption func(cfg *rpcConfig) error
// WithDialBackoff configures the number of attempts for the initial dial to the RPC,
// attempts are executed with an exponential backoff strategy.
func WithDialBackoff(attempts int) RPCOption {
return func(cfg *rpcConfig) error {
cfg.backoffAttempts = attempts
return nil
}
}
// WithHttpPollInterval configures the RPC to poll at the given rate, in case RPC subscriptions are not available.
func WithHttpPollInterval(duration time.Duration) RPCOption {
return func(cfg *rpcConfig) error {
cfg.httpPollInterval = duration
return nil
}
}
// WithGethRPCOptions passes the list of go-ethereum RPC options to the internal RPC instance.
func WithGethRPCOptions(gethRPCOptions ...rpc.ClientOption) RPCOption {
return func(cfg *rpcConfig) error {
cfg.gethRPCOptions = append(cfg.gethRPCOptions, gethRPCOptions...)
return nil
}
}
// WithRateLimit configures the RPC to target the given rate limit (in requests / second).
// See NewRateLimitingClient for more details.
func WithRateLimit(rateLimit float64, burst int) RPCOption {
return func(cfg *rpcConfig) error {
cfg.limit = rateLimit
cfg.burst = burst
return nil
}
}
// NewRPC returns the correct client.RPC instance for a given RPC url.
func NewRPC(ctx context.Context, lgr log.Logger, addr string, opts ...rpc.ClientOption) (RPC, error) {
underlying, err := DialRPCClientWithBackoff(ctx, lgr, addr, opts...)
func NewRPC(ctx context.Context, lgr log.Logger, addr string, opts ...RPCOption) (RPC, error) {
var cfg rpcConfig
for i, opt := range opts {
if err := opt(&cfg); err != nil {
return nil, fmt.Errorf("rpc option %d failed to apply to RPC config: %w", i, err)
}
}
if cfg.backoffAttempts < 1 { // default to at least 1 attempt, or it always fails to dial.
cfg.backoffAttempts = 1
}
underlying, err := dialRPCClientWithBackoff(ctx, lgr, addr, cfg.backoffAttempts, cfg.gethRPCOptions...)
if err != nil {
return nil, err
}
wrapped := &BaseRPCClient{
c: underlying,
var wrapped RPC = &BaseRPCClient{c: underlying}
if cfg.limit != 0 {
wrapped = NewRateLimitingClient(wrapped, rate.Limit(cfg.limit), cfg.burst)
}
if httpRegex.MatchString(addr) {
return NewPollingClient(ctx, lgr, wrapped), nil
wrapped = NewPollingClient(ctx, lgr, wrapped, WithPollRate(cfg.httpPollInterval))
}
return wrapped, nil
}
// Dials a JSON-RPC endpoint repeatedly, with a backoff, until a client connection is established. Auth is optional.
func DialRPCClientWithBackoff(ctx context.Context, log log.Logger, addr string, opts ...rpc.ClientOption) (*rpc.Client, error) {
func dialRPCClientWithBackoff(ctx context.Context, log log.Logger, addr string, attempts int, opts ...rpc.ClientOption) (*rpc.Client, error) {
bOff := backoff.Exponential()
var ret *rpc.Client
err := backoff.DoCtx(ctx, 10, bOff, func() error {
err := backoff.DoCtx(ctx, attempts, bOff, func() error {
client, err := rpc.DialOptions(ctx, addr, opts...)
if err != nil {
if client == nil {
......
......@@ -52,6 +52,12 @@ jq "select(.valid_data == false)|.tx.hash" $TX_DIR
# Select all channels that are not ready and then get the id and inclusion block & tx hash of the first frame.
jq "select(.is_ready == false)|[.id, .frames[0].inclusion_block, .frames[0].transaction_hash]" $CHANNEL_DIR
# Show all of the frames in a channel without seeing the batches or frame data
jq 'del(.batches)|del(.frames[]|.frame.data)' $CHANNEL_FILE
# Show all batches (without timestamps) in a channel
jq '.batches|del(.[]|.Transactions)' $CHANNEL_FILE
```
......
......@@ -14,10 +14,10 @@ import (
// Flags
const envVarPrefix = "OP_NODE_"
const envVarPrefix = "OP_NODE"
func prefixEnvVar(name string) string {
return envVarPrefix + name
return envVarPrefix + "_" + name
}
var (
......@@ -75,6 +75,24 @@ var (
return &out
}(),
}
L1RPCRateLimit = cli.Float64Flag{
Name: "l1.rpc-rate-limit",
Usage: "Optional self-imposed global rate-limit on L1 RPC requests, specified in requests / second. Disabled if set to 0.",
EnvVar: prefixEnvVar("L1_RPC_RATE_LIMIT"),
Value: 0,
}
L1RPCMaxBatchSize = cli.IntFlag{
Name: "l1.rpc-max-batch-size",
Usage: "Maximum number of RPC requests to bundle, e.g. during L1 blocks receipt fetching. The L1 RPC rate limit counts this as N items, but allows it to burst at once.",
EnvVar: prefixEnvVar("L1_RPC_MAX_BATCH_SIZE"),
Value: 20,
}
L1HTTPPollInterval = cli.DurationFlag{
Name: "l1.http-poll-interval",
Usage: "Polling interval for latest-block subscription when using an HTTP RPC provider. Ignored for other types of RPC endpoints.",
EnvVar: prefixEnvVar("L1_HTTP_POLL_INTERVAL"),
Value: time.Second * 12,
}
L2EngineJWTSecret = cli.StringFlag{
Name: "l2.jwt-secret",
Usage: "Path to JWT secret key. Keys are 32 bytes, hex encoded in a file. A new key will be generated if left empty.",
......@@ -196,6 +214,9 @@ var optionalFlags = []cli.Flag{
Network,
L1TrustRPC,
L1RPCProviderKind,
L1RPCRateLimit,
L1RPCMaxBatchSize,
L1HTTPPollInterval,
L2EngineJWTSecret,
VerifierL1Confs,
SequencerEnabledFlag,
......
......@@ -4,8 +4,10 @@ import (
"context"
"errors"
"fmt"
"time"
"github.com/ethereum-optimism/optimism/op-node/client"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/sources"
"github.com/ethereum/go-ethereum/log"
......@@ -15,14 +17,14 @@ import (
type L2EndpointSetup interface {
// Setup a RPC client to a L2 execution engine to process rollup blocks with.
Setup(ctx context.Context, log log.Logger) (cl client.RPC, err error)
Setup(ctx context.Context, log log.Logger, rollupCfg *rollup.Config) (cl client.RPC, rpcCfg *sources.EngineClientConfig, err error)
Check() error
}
type L2SyncEndpointSetup interface {
// Setup a RPC client to another L2 node to sync L2 blocks from.
// It may return a nil client with nil error if RPC based sync is not enabled.
Setup(ctx context.Context, log log.Logger) (cl client.RPC, trust bool, err error)
Setup(ctx context.Context, log log.Logger, rollupCfg *rollup.Config) (cl client.RPC, rpcCfg *sources.SyncClientConfig, err error)
Check() error
}
......@@ -30,7 +32,8 @@ type L1EndpointSetup interface {
// Setup a RPC client to a L1 node to pull rollup input-data from.
// The results of the RPC client may be trusted for faster processing, or strictly validated.
// The kind of the RPC may be non-basic, to optimize RPC usage.
Setup(ctx context.Context, log log.Logger) (cl client.RPC, trust bool, kind sources.RPCProviderKind, err error)
Setup(ctx context.Context, log log.Logger, rollupCfg *rollup.Config) (cl client.RPC, rpcCfg *sources.L1ClientConfig, err error)
Check() error
}
type L2EndpointConfig struct {
......@@ -51,17 +54,17 @@ func (cfg *L2EndpointConfig) Check() error {
return nil
}
func (cfg *L2EndpointConfig) Setup(ctx context.Context, log log.Logger) (client.RPC, error) {
func (cfg *L2EndpointConfig) Setup(ctx context.Context, log log.Logger, rollupCfg *rollup.Config) (client.RPC, *sources.EngineClientConfig, error) {
if err := cfg.Check(); err != nil {
return nil, err
return nil, nil, err
}
auth := rpc.WithHTTPAuth(gn.NewJWTAuth(cfg.L2EngineJWTSecret))
l2Node, err := client.NewRPC(ctx, log, cfg.L2EngineAddr, auth)
l2Node, err := client.NewRPC(ctx, log, cfg.L2EngineAddr, client.WithGethRPCOptions(auth))
if err != nil {
return nil, err
return nil, nil, err
}
return l2Node, nil
return l2Node, sources.EngineClientDefaultConfig(rollupCfg), nil
}
// PreparedL2Endpoints enables testing with in-process pre-setup RPC connections to L2 engines
......@@ -78,8 +81,8 @@ func (p *PreparedL2Endpoints) Check() error {
var _ L2EndpointSetup = (*PreparedL2Endpoints)(nil)
func (p *PreparedL2Endpoints) Setup(ctx context.Context, log log.Logger) (client.RPC, error) {
return p.Client, nil
func (p *PreparedL2Endpoints) Setup(ctx context.Context, log log.Logger, rollupCfg *rollup.Config) (client.RPC, *sources.EngineClientConfig, error) {
return p.Client, sources.EngineClientDefaultConfig(rollupCfg), nil
}
// L2SyncEndpointConfig contains configuration for the fallback sync endpoint
......@@ -93,16 +96,16 @@ var _ L2SyncEndpointSetup = (*L2SyncEndpointConfig)(nil)
// Setup creates an RPC client to sync from.
// It will return nil without error if no sync method is configured.
func (cfg *L2SyncEndpointConfig) Setup(ctx context.Context, log log.Logger) (cl client.RPC, trust bool, err error) {
func (cfg *L2SyncEndpointConfig) Setup(ctx context.Context, log log.Logger, rollupCfg *rollup.Config) (client.RPC, *sources.SyncClientConfig, error) {
if cfg.L2NodeAddr == "" {
return nil, false, nil
return nil, nil, nil
}
l2Node, err := client.NewRPC(ctx, log, cfg.L2NodeAddr)
if err != nil {
return nil, false, err
return nil, nil, err
}
return l2Node, cfg.TrustRPC, nil
return l2Node, sources.SyncClientDefaultConfig(rollupCfg, cfg.TrustRPC), nil
}
func (cfg *L2SyncEndpointConfig) Check() error {
......@@ -118,8 +121,8 @@ type PreparedL2SyncEndpoint struct {
var _ L2SyncEndpointSetup = (*PreparedL2SyncEndpoint)(nil)
func (cfg *PreparedL2SyncEndpoint) Setup(ctx context.Context, log log.Logger) (cl client.RPC, trust bool, err error) {
return cfg.Client, cfg.TrustRPC, nil
func (cfg *PreparedL2SyncEndpoint) Setup(ctx context.Context, log log.Logger, rollupCfg *rollup.Config) (client.RPC, *sources.SyncClientConfig, error) {
return cfg.Client, sources.SyncClientDefaultConfig(rollupCfg, cfg.TrustRPC), nil
}
func (cfg *PreparedL2SyncEndpoint) Check() error {
......@@ -137,16 +140,48 @@ type L1EndpointConfig struct {
// L1RPCKind identifies the RPC provider kind that serves the RPC,
// to inform the optimal usage of the RPC for transaction receipts fetching.
L1RPCKind sources.RPCProviderKind
// RateLimit specifies a self-imposed rate-limit on L1 requests. 0 is no rate-limit.
RateLimit float64
// BatchSize specifies the maximum batch-size, which also applies as L1 rate-limit burst amount (if set).
BatchSize int
// HttpPollInterval specifies the interval between polling for the latest L1 block,
// when the RPC is detected to be an HTTP type.
// It is recommended to use websockets or IPC for efficient following of the changing block.
// Setting this to 0 disables polling.
HttpPollInterval time.Duration
}
var _ L1EndpointSetup = (*L1EndpointConfig)(nil)
func (cfg *L1EndpointConfig) Setup(ctx context.Context, log log.Logger) (cl client.RPC, trust bool, kind sources.RPCProviderKind, err error) {
l1Node, err := client.NewRPC(ctx, log, cfg.L1NodeAddr)
func (cfg *L1EndpointConfig) Check() error {
if cfg.BatchSize < 1 || cfg.BatchSize > 500 {
return fmt.Errorf("batch size is invalid or unreasonable: %d", cfg.BatchSize)
}
if cfg.RateLimit < 0 {
return fmt.Errorf("rate limit cannot be negative")
}
return nil
}
func (cfg *L1EndpointConfig) Setup(ctx context.Context, log log.Logger, rollupCfg *rollup.Config) (client.RPC, *sources.L1ClientConfig, error) {
opts := []client.RPCOption{
client.WithHttpPollInterval(cfg.HttpPollInterval),
client.WithDialBackoff(10),
}
if cfg.RateLimit != 0 {
opts = append(opts, client.WithRateLimit(cfg.RateLimit, cfg.BatchSize))
}
l1Node, err := client.NewRPC(ctx, log, cfg.L1NodeAddr, opts...)
if err != nil {
return nil, false, sources.RPCKindBasic, fmt.Errorf("failed to dial L1 address (%s): %w", cfg.L1NodeAddr, err)
return nil, nil, fmt.Errorf("failed to dial L1 address (%s): %w", cfg.L1NodeAddr, err)
}
return l1Node, cfg.L1TrustRPC, cfg.L1RPCKind, nil
rpcCfg := sources.L1ClientDefaultConfig(rollupCfg, cfg.L1TrustRPC, cfg.L1RPCKind)
rpcCfg.MaxRequestsPerBatch = cfg.BatchSize
return l1Node, rpcCfg, nil
}
// PreparedL1Endpoint enables testing with an in-process pre-setup RPC connection to L1
......@@ -158,6 +193,14 @@ type PreparedL1Endpoint struct {
var _ L1EndpointSetup = (*PreparedL1Endpoint)(nil)
func (p *PreparedL1Endpoint) Setup(ctx context.Context, log log.Logger) (cl client.RPC, trust bool, kind sources.RPCProviderKind, err error) {
return p.Client, p.TrustRPC, p.RPCProviderKind, nil
func (p *PreparedL1Endpoint) Setup(ctx context.Context, log log.Logger, rollupCfg *rollup.Config) (client.RPC, *sources.L1ClientConfig, error) {
return p.Client, sources.L1ClientDefaultConfig(rollupCfg, p.TrustRPC, p.RPCProviderKind), nil
}
func (cfg *PreparedL1Endpoint) Check() error {
if cfg.Client == nil {
return errors.New("rpc client cannot be nil")
}
return nil
}
......@@ -116,14 +116,13 @@ func (n *OpNode) initTracer(ctx context.Context, cfg *Config) error {
}
func (n *OpNode) initL1(ctx context.Context, cfg *Config) error {
l1Node, trustRPC, rpcProvKind, err := cfg.L1.Setup(ctx, n.log)
l1Node, rpcCfg, err := cfg.L1.Setup(ctx, n.log, &cfg.Rollup)
if err != nil {
return fmt.Errorf("failed to get L1 RPC client: %w", err)
}
n.l1Source, err = sources.NewL1Client(
client.NewInstrumentedRPC(l1Node, n.metrics), n.log, n.metrics.L1SourceCache,
sources.L1ClientDefaultConfig(&cfg.Rollup, trustRPC, rpcProvKind))
client.NewInstrumentedRPC(l1Node, n.metrics), n.log, n.metrics.L1SourceCache, rpcCfg)
if err != nil {
return fmt.Errorf("failed to create L1 source: %w", err)
}
......@@ -184,14 +183,13 @@ func (n *OpNode) initRuntimeConfig(ctx context.Context, cfg *Config) error {
}
func (n *OpNode) initL2(ctx context.Context, cfg *Config, snapshotLog log.Logger) error {
rpcClient, err := cfg.L2.Setup(ctx, n.log)
rpcClient, rpcCfg, err := cfg.L2.Setup(ctx, n.log, &cfg.Rollup)
if err != nil {
return fmt.Errorf("failed to setup L2 execution-engine RPC client: %w", err)
}
n.l2Source, err = sources.NewEngineClient(
client.NewInstrumentedRPC(rpcClient, n.metrics), n.log, n.metrics.L2SourceCache,
sources.EngineClientDefaultConfig(&cfg.Rollup),
client.NewInstrumentedRPC(rpcClient, n.metrics), n.log, n.metrics.L2SourceCache, rpcCfg,
)
if err != nil {
return fmt.Errorf("failed to create Engine client: %w", err)
......@@ -207,17 +205,14 @@ func (n *OpNode) initL2(ctx context.Context, cfg *Config, snapshotLog log.Logger
}
func (n *OpNode) initRPCSync(ctx context.Context, cfg *Config) error {
rpcSyncClient, trustRPC, err := cfg.L2Sync.Setup(ctx, n.log)
rpcSyncClient, rpcCfg, err := cfg.L2Sync.Setup(ctx, n.log, &cfg.Rollup)
if err != nil {
return fmt.Errorf("failed to setup L2 execution-engine RPC client for backup sync: %w", err)
}
if rpcSyncClient == nil { // if no RPC client is configured to sync from, then don't add the RPC sync client
return nil
}
config := sources.SyncClientDefaultConfig(&cfg.Rollup, trustRPC)
syncClient, err := sources.NewSyncClient(n.OnUnsafeL2Payload, rpcSyncClient, n.log, n.metrics.L2SourceCache, config)
syncClient, err := sources.NewSyncClient(n.OnUnsafeL2Payload, rpcSyncClient, n.log, n.metrics.L2SourceCache, rpcCfg)
if err != nil {
return fmt.Errorf("failed to create sync client: %w", err)
}
......
......@@ -115,7 +115,7 @@ func TestOutputAtBlock(t *testing.T) {
require.NoError(t, server.Start())
defer server.Stop()
client, err := rpcclient.DialRPCClientWithBackoff(context.Background(), log, "http://"+server.Addr().String())
client, err := rpcclient.NewRPC(context.Background(), log, "http://"+server.Addr().String(), rpcclient.WithDialBackoff(3))
require.NoError(t, err)
var out *eth.OutputResponse
......@@ -147,7 +147,7 @@ func TestVersion(t *testing.T) {
assert.NoError(t, server.Start())
defer server.Stop()
client, err := rpcclient.DialRPCClientWithBackoff(context.Background(), log, "http://"+server.Addr().String())
client, err := rpcclient.NewRPC(context.Background(), log, "http://"+server.Addr().String(), rpcclient.WithDialBackoff(3))
assert.NoError(t, err)
var out string
......@@ -189,7 +189,7 @@ func TestSyncStatus(t *testing.T) {
assert.NoError(t, server.Start())
defer server.Stop()
client, err := rpcclient.DialRPCClientWithBackoff(context.Background(), log, "http://"+server.Addr().String())
client, err := rpcclient.NewRPC(context.Background(), log, "http://"+server.Addr().String(), rpcclient.WithDialBackoff(3))
assert.NoError(t, err)
var out *eth.SyncStatus
......
......@@ -117,6 +117,11 @@ func NewScorer(peerGater PeerGater, peerStore Peerstore, metricer GossipMetricer
func (s *scorer) SnapshotHook() pubsub.ExtendedPeerScoreInspectFn {
return func(m map[peer.ID]*pubsub.PeerScoreSnapshot) {
scoreMap := make(map[string]float64)
// Zero out all bands.
for _, b := range s.bandScoreThresholds.bands {
scoreMap[b.band] = 0
}
// Now set the new scores.
for id, snap := range m {
band := s.bandScoreThresholds.Bucket(snap.Score)
scoreMap[band] += 1
......
......@@ -29,7 +29,7 @@ func (testSuite *PeerScorerTestSuite) SetupTest() {
testSuite.mockGater = &p2pMocks.PeerGater{}
testSuite.mockStore = &p2pMocks.Peerstore{}
testSuite.mockMetricer = &p2pMocks.GossipMetricer{}
bandScorer, err := p2p.NewBandScorer("0:graylist;")
bandScorer, err := p2p.NewBandScorer("-40:graylist;0:friend;")
testSuite.NoError(err)
testSuite.bandScorer = bandScorer
testSuite.logger = testlog.Logger(testSuite.T(), log.LvlError)
......@@ -76,12 +76,13 @@ func (testSuite *PeerScorerTestSuite) TestScorer_SnapshotHook() {
inspectFn := scorer.SnapshotHook()
// Mock the peer gater call
testSuite.mockGater.On("Update", peer.ID("peer1"), float64(-100)).Return(nil)
testSuite.mockGater.On("Update", peer.ID("peer1"), float64(-100)).Return(nil).Once()
// The metricer should then be called with the peer score band map
testSuite.mockMetricer.On("SetPeerScores", map[string]float64{
"friend": 0,
"graylist": 1,
}).Return(nil)
}).Return(nil).Once()
// Apply the snapshot
snapshotMap := map[peer.ID]*pubsub.PeerScoreSnapshot{
......@@ -90,6 +91,23 @@ func (testSuite *PeerScorerTestSuite) TestScorer_SnapshotHook() {
},
}
inspectFn(snapshotMap)
// Change the peer score now to a different band
testSuite.mockGater.On("Update", peer.ID("peer1"), float64(0)).Return(nil).Once()
// The metricer should then be called with the peer score band map
testSuite.mockMetricer.On("SetPeerScores", map[string]float64{
"friend": 1,
"graylist": 0,
}).Return(nil).Once()
// Apply the snapshot
snapshotMap = map[peer.ID]*pubsub.PeerScoreSnapshot{
peer.ID("peer1"): {
Score: 0,
},
}
inspectFn(snapshotMap)
}
// TestScorer_SnapshotHookBlocksPeer tests running the snapshot hook on the peer scorer with a peer score below the threshold.
......@@ -109,6 +127,7 @@ func (testSuite *PeerScorerTestSuite) TestScorer_SnapshotHookBlocksPeer() {
// The metricer should then be called with the peer score band map
testSuite.mockMetricer.On("SetPeerScores", map[string]float64{
"friend": 0,
"graylist": 1,
}).Return(nil)
......
......@@ -109,6 +109,9 @@ func FindL2Heads(ctx context.Context, cfg *rollup.Config, l1 L1Chain, l2 L2Chain
return nil, fmt.Errorf("failed to fetch current L2 forkchoice state: %w", err)
}
lgr.Info("Loaded current L2 heads", "unsafe", result.Unsafe, "safe", result.Safe, "finalized", result.Finalized,
"unsafe_origin", result.Unsafe.L1Origin, "unsafe_origin", result.Safe.L1Origin)
// Remember original unsafe block to determine reorg depth
prevUnsafe := result.Unsafe
......@@ -134,6 +137,7 @@ func FindL2Heads(ctx context.Context, cfg *rollup.Config, l1 L1Chain, l2 L2Chain
// Exit, find-sync start should start over, to move to an available L1 chain with block-by-number / not-found case.
return nil, fmt.Errorf("failed to retrieve L1 block: %w", err)
}
lgr.Info("Walking back L1Block by hash", "curr", l1Block, "next", b, "l2block", n)
l1Block = b
ahead = false
} else if l1Block == (eth.L1BlockRef{}) || n.L1Origin.Hash != l1Block.Hash {
......@@ -145,9 +149,10 @@ func FindL2Heads(ctx context.Context, cfg *rollup.Config, l1 L1Chain, l2 L2Chain
}
l1Block = b
ahead = notFound
lgr.Info("Walking back L1Block by number", "curr", l1Block, "next", b, "l2block", n)
}
lgr.Trace("walking sync start", "number", n.Number)
lgr.Trace("walking sync start", "l2block", n)
// Don't walk past genesis. If we were at the L2 genesis, but could not find its L1 origin,
// the L2 chain is building on the wrong L1 branch.
......@@ -201,6 +206,8 @@ func FindL2Heads(ctx context.Context, cfg *rollup.Config, l1 L1Chain, l2 L2Chain
// Don't traverse further than the finalized head to find a safe head
if n.Number == result.Finalized.Number {
lgr.Info("Hit finalized L2 head, returning immediately", "unsafe", result.Unsafe, "safe", result.Safe,
"finalized", result.Finalized, "unsafe_origin", result.Unsafe.L1Origin, "unsafe_origin", result.Safe.L1Origin)
result.Safe = n
return result, nil
}
......
......@@ -95,9 +95,12 @@ func NewConfig(ctx *cli.Context, log log.Logger) (*node.Config, error) {
func NewL1EndpointConfig(ctx *cli.Context) *node.L1EndpointConfig {
return &node.L1EndpointConfig{
L1NodeAddr: ctx.GlobalString(flags.L1NodeAddr.Name),
L1TrustRPC: ctx.GlobalBool(flags.L1TrustRPC.Name),
L1RPCKind: sources.RPCProviderKind(strings.ToLower(ctx.GlobalString(flags.L1RPCProviderKind.Name))),
L1NodeAddr: ctx.GlobalString(flags.L1NodeAddr.Name),
L1TrustRPC: ctx.GlobalBool(flags.L1TrustRPC.Name),
L1RPCKind: sources.RPCProviderKind(strings.ToLower(ctx.GlobalString(flags.L1RPCProviderKind.Name))),
RateLimit: ctx.GlobalFloat64(flags.L1RPCRateLimit.Name),
BatchSize: ctx.GlobalInt(flags.L1RPCMaxBatchSize.Name),
HttpPollInterval: ctx.Duration(flags.L1HTTPPollInterval.Name),
}
}
......
......@@ -13,6 +13,7 @@ import (
"context"
"fmt"
"math/big"
"time"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
......@@ -56,6 +57,11 @@ type EthClientConfig struct {
// RPCProviderKind is a hint at what type of RPC provider we are dealing with
RPCProviderKind RPCProviderKind
// Method reset duration defines how long we stick to available RPC methods,
// till we re-attempt the user-preferred methods.
// If this is 0 then the client does not fall back to less optimal but available methods.
MethodResetDuration time.Duration
}
func (c *EthClientConfig) Check() error {
......@@ -118,9 +124,25 @@ type EthClient struct {
// This may be modified concurrently, but we don't lock since it's a single
// uint64 that's not critical (fine to miss or mix up a modification)
availableReceiptMethods ReceiptsFetchingMethod
// lastMethodsReset tracks when availableReceiptMethods was last reset.
// When receipt-fetching fails it falls back to available methods,
// but periodically it will try to reset to the preferred optimal methods.
lastMethodsReset time.Time
// methodResetDuration defines how long we take till we reset lastMethodsReset
methodResetDuration time.Duration
}
func (s *EthClient) PickReceiptsMethod(txCount uint64) ReceiptsFetchingMethod {
if now := time.Now(); now.Sub(s.lastMethodsReset) > s.methodResetDuration {
m := AvailableReceiptsFetchingMethods(s.provKind)
if s.availableReceiptMethods != m {
s.log.Warn("resetting back RPC preferences, please review RPC provider kind setting", "kind", s.provKind.String())
}
s.availableReceiptMethods = m
s.lastMethodsReset = now
}
return PickBestReceiptsFetchingMethod(s.provKind, s.availableReceiptMethods, txCount)
}
......@@ -128,7 +150,7 @@ func (s *EthClient) OnReceiptsMethodErr(m ReceiptsFetchingMethod, err error) {
if unusableMethod(err) {
// clear the bit of the method that errored
s.availableReceiptMethods &^= m
s.log.Warn("failed to use selected RPC method for receipt fetching, falling back to alternatives",
s.log.Warn("failed to use selected RPC method for receipt fetching, temporarily falling back to alternatives",
"provider_kind", s.provKind, "failed_method", m, "fallback", s.availableReceiptMethods, "err", err)
} else {
s.log.Debug("failed to use selected RPC method for receipt fetching, but method does appear to be available, so we continue to use it",
......@@ -155,6 +177,8 @@ func NewEthClient(client client.RPC, log log.Logger, metrics caching.Metrics, co
headersCache: caching.NewLRUCache(metrics, "headers", config.HeadersCacheSize),
payloadsCache: caching.NewLRUCache(metrics, "payloads", config.PayloadsCacheSize),
availableReceiptMethods: AvailableReceiptsFetchingMethods(config.RPCProviderKind),
lastMethodsReset: time.Now(),
methodResetDuration: config.MethodResetDuration,
}, nil
}
......
......@@ -4,6 +4,7 @@ import (
"context"
"fmt"
"strings"
"time"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
......@@ -40,6 +41,7 @@ func L1ClientDefaultConfig(config *rollup.Config, trustRPC bool, kind RPCProvide
TrustRPC: trustRPC,
MustBePostMerge: false,
RPCProviderKind: kind,
MethodResetDuration: time.Minute,
},
// Not bounded by span, to cover find-sync-start range fully for speedy recovery after errors.
L1BlockRefsCacheSize: fullSpan,
......
......@@ -4,6 +4,7 @@ import (
"context"
"fmt"
"strings"
"time"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
......@@ -50,6 +51,7 @@ func L2ClientDefaultConfig(config *rollup.Config, trustRPC bool) *L2ClientConfig
TrustRPC: trustRPC,
MustBePostMerge: true,
RPCProviderKind: RPCKindBasic,
MethodResetDuration: time.Minute,
},
// Not bounded by span, to cover find-sync-start range fully for speedy recovery after errors.
L2BlockRefsCacheSize: fullSpan,
......
......@@ -6,6 +6,7 @@ import (
"fmt"
"math/rand"
"testing"
"time"
"github.com/ethereum-optimism/optimism/op-node/client"
"github.com/ethereum-optimism/optimism/op-node/eth"
......@@ -85,6 +86,7 @@ func (e *methodNotFoundError) Error() string {
type ReceiptsTestCase struct {
name string
providerKind RPCProviderKind
staticMethod bool
setup func(t *testing.T) (*rpcBlock, []ReceiptsRequest)
}
......@@ -142,6 +144,10 @@ func (tc *ReceiptsTestCase) Run(t *testing.T) {
TrustRPC: false,
MustBePostMerge: false,
RPCProviderKind: tc.providerKind,
MethodResetDuration: time.Minute,
}
if tc.staticMethod { // if static, instantly reset, for fast clock-independent testing
testCfg.MethodResetDuration = 0
}
logger := testlog.Logger(t, log.LvlError)
ethCl, err := NewEthClient(client.NewBaseRPCClient(cl), logger, nil, testCfg)
......@@ -226,6 +232,12 @@ func TestEthClient_FetchReceipts(t *testing.T) {
providerKind: RPCKindAlchemy,
setup: fallbackCase(30, AlchemyGetTransactionReceipts),
},
{
name: "alchemy sticky",
providerKind: RPCKindAlchemy,
staticMethod: true,
setup: fallbackCase(30, AlchemyGetTransactionReceipts, AlchemyGetTransactionReceipts),
},
{
name: "alchemy fallback 1",
providerKind: RPCKindAlchemy,
......
......@@ -169,12 +169,18 @@ func NewL2OutputSubmitterFromCLIConfig(cfg CLIConfig, l log.Logger, m metrics.Me
return nil, err
}
chainID, err := l1Client.ChainID(context.Background())
if err != nil {
return nil, err
}
txMgrConfg := txmgr.Config{
ResubmissionTimeout: cfg.ResubmissionTimeout,
ReceiptQueryInterval: time.Second,
NumConfirmations: cfg.NumConfirmations,
SafeAbortNonceTooLowCount: cfg.SafeAbortNonceTooLowCount,
From: fromAddress,
ChainID: chainID,
}
proposerCfg := Config{
......
......@@ -29,6 +29,9 @@ func Do(maxAttempts int, strategy Strategy, op Operation) error {
}
func DoCtx(ctx context.Context, maxAttempts int, strategy Strategy, op Operation) error {
if maxAttempts < 1 {
return fmt.Errorf("need at least 1 attempt to run op, but have %d max attempts", maxAttempts)
}
var attempt int
reattemptCh := make(chan struct{}, 1)
......
// Code generated by mockery v2.22.1. DO NOT EDIT.
package mocks
import (
context "context"
txmgr "github.com/ethereum-optimism/optimism/op-service/txmgr"
mock "github.com/stretchr/testify/mock"
types "github.com/ethereum/go-ethereum/core/types"
)
// TxManager is an autogenerated mock type for the TxManager type
type TxManager struct {
mock.Mock
}
// CraftTx provides a mock function with given fields: ctx, candidate
func (_m *TxManager) CraftTx(ctx context.Context, candidate txmgr.TxCandidate) (*types.Transaction, error) {
ret := _m.Called(ctx, candidate)
var r0 *types.Transaction
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, txmgr.TxCandidate) (*types.Transaction, error)); ok {
return rf(ctx, candidate)
}
if rf, ok := ret.Get(0).(func(context.Context, txmgr.TxCandidate) *types.Transaction); ok {
r0 = rf(ctx, candidate)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.Transaction)
}
}
if rf, ok := ret.Get(1).(func(context.Context, txmgr.TxCandidate) error); ok {
r1 = rf(ctx, candidate)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Send provides a mock function with given fields: ctx, tx
func (_m *TxManager) Send(ctx context.Context, tx *types.Transaction) (*types.Receipt, error) {
ret := _m.Called(ctx, tx)
var r0 *types.Receipt
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, *types.Transaction) (*types.Receipt, error)); ok {
return rf(ctx, tx)
}
if rf, ok := ret.Get(0).(func(context.Context, *types.Transaction) *types.Receipt); ok {
r0 = rf(ctx, tx)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*types.Receipt)
}
}
if rf, ok := ret.Get(1).(func(context.Context, *types.Transaction) error); ok {
r1 = rf(ctx, tx)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
type mockConstructorTestingTNewTxManager interface {
mock.TestingT
Cleanup(func())
}
// NewTxManager creates a new instance of TxManager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
func NewTxManager(t mockConstructorTestingTNewTxManager) *TxManager {
mock := &TxManager{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}
......@@ -3,14 +3,17 @@ package txmgr
import (
"context"
"errors"
"fmt"
"math/big"
"sync"
"time"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/txpool"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
opcrypto "github.com/ethereum-optimism/optimism/op-service/crypto"
)
......@@ -38,6 +41,15 @@ type Config struct {
// attempted.
ResubmissionTimeout time.Duration
// ChainID is the chain ID of the L1 chain.
ChainID *big.Int
// NetworkTimeout is the allowed duration for a single network request.
// This is intended to be used for network requests that can be replayed.
//
// If not set, this will default to 2 seconds.
NetworkTimeout time.Duration
// RequireQueryInterval is the interval at which the tx manager will
// query the backend to check for confirmations after a tx at a
// specific gas price has been published.
......@@ -59,6 +71,8 @@ type Config struct {
// TxManager is an interface that allows callers to reliably publish txs,
// bumping the gas price if needed, and obtain the receipt of the resulting tx.
//
//go:generate mockery --name TxManager --output ./mocks
type TxManager interface {
// Send is used to publish a transaction with incrementally higher gas
// prices until the transaction eventually confirms. This method blocks
......@@ -69,6 +83,9 @@ type TxManager interface {
//
// NOTE: Send should be called by AT MOST one caller at a time.
Send(ctx context.Context, tx *types.Transaction) (*types.Receipt, error)
// CraftTx is used to craft a transaction using a [TxCandidate].
CraftTx(ctx context.Context, candidate TxCandidate) (*types.Transaction, error)
}
// ETHBackend is the set of methods that the transaction manager uses to resubmit gas & determine
......@@ -89,18 +106,119 @@ type ETHBackend interface {
// TODO(CLI-3318): Maybe need a generic interface to support different RPC providers
HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error)
SuggestGasTipCap(ctx context.Context) (*big.Int, error)
// NonceAt returns the account nonce of the given account.
// The block number can be nil, in which case the nonce is taken from the latest known block.
NonceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (uint64, error)
/// EstimateGas returns an estimate of the amount of gas needed to execute the given
/// transaction against the current pending block.
EstimateGas(ctx context.Context, msg ethereum.CallMsg) (uint64, error)
}
// SimpleTxManager is a implementation of TxManager that performs linear fee
// bumping of a tx until it confirms.
type SimpleTxManager struct {
Config // embed the config directly
name string
Config // embed the config directly
name string
chainID *big.Int
backend ETHBackend
l log.Logger
}
// TxCandidate is a transaction candidate that can be submitted to ask the
// [TxManager] to construct a transaction with gas price bounds.
type TxCandidate struct {
// TxData is the transaction data to be used in the constructed tx.
TxData []byte
// To is the recipient of the constructed tx.
To common.Address
// GasLimit is the gas limit to be used in the constructed tx.
GasLimit uint64
// From is the sender (or `from`) of the constructed tx.
From common.Address
}
// calcGasTipAndFeeCap queries L1 to determine what a suitable miner tip & basefee limit would be for timely inclusion
func (m *SimpleTxManager) calcGasTipAndFeeCap(ctx context.Context) (gasTipCap *big.Int, gasFeeCap *big.Int, err error) {
childCtx, cancel := context.WithTimeout(ctx, m.Config.NetworkTimeout)
gasTipCap, err = m.backend.SuggestGasTipCap(childCtx)
cancel()
if err != nil {
return nil, nil, fmt.Errorf("failed to get suggested gas tip cap: %w", err)
}
if gasTipCap == nil {
m.l.Warn("unexpected unset gasTipCap, using default 2 gwei")
gasTipCap = new(big.Int).SetUint64(params.GWei * 2)
}
childCtx, cancel = context.WithTimeout(ctx, m.Config.NetworkTimeout)
head, err := m.backend.HeaderByNumber(childCtx, nil)
cancel()
if err != nil || head == nil {
return nil, nil, fmt.Errorf("failed to get L1 head block for fee cap: %w", err)
}
if head.BaseFee == nil {
return nil, nil, fmt.Errorf("failed to get L1 basefee in block %d for fee cap", head.Number)
}
gasFeeCap = CalcGasFeeCap(head.BaseFee, gasTipCap)
return gasTipCap, gasFeeCap, nil
}
// CraftTx creates the signed transaction to the batchInboxAddress.
// It queries L1 for the current fee market conditions as well as for the nonce.
// NOTE: This method SHOULD NOT publish the resulting transaction.
// NOTE: If the [TxCandidate.GasLimit] is non-zero, it will be used as the transaction's gas.
// NOTE: Otherwise, the [SimpleTxManager] will query the specified backend for an estimate.
func (m *SimpleTxManager) CraftTx(ctx context.Context, candidate TxCandidate) (*types.Transaction, error) {
gasTipCap, gasFeeCap, err := m.calcGasTipAndFeeCap(ctx)
if err != nil {
return nil, err
}
// Fetch the sender's nonce from the latest known block (nil `blockNumber`)
childCtx, cancel := context.WithTimeout(ctx, m.Config.NetworkTimeout)
defer cancel()
nonce, err := m.backend.NonceAt(childCtx, candidate.From, nil)
if err != nil {
return nil, fmt.Errorf("failed to get nonce: %w", err)
}
rawTx := &types.DynamicFeeTx{
ChainID: m.chainID,
Nonce: nonce,
To: &candidate.To,
GasTipCap: gasTipCap,
GasFeeCap: gasFeeCap,
Data: candidate.TxData,
}
m.l.Info("creating tx", "to", rawTx.To, "from", candidate.From)
// If the gas limit is set, we can use that as the gas
if candidate.GasLimit != 0 {
rawTx.Gas = candidate.GasLimit
} else {
// Calculate the intrinsic gas for the transaction
gas, err := m.backend.EstimateGas(ctx, ethereum.CallMsg{
From: candidate.From,
To: &candidate.To,
GasFeeCap: gasFeeCap,
GasTipCap: gasTipCap,
Data: rawTx.Data,
})
if err != nil {
return nil, fmt.Errorf("failed to estimate gas: %w", err)
}
rawTx.Gas = gas
}
ctx, cancel = context.WithTimeout(ctx, m.Config.NetworkTimeout)
defer cancel()
return m.Signer(ctx, candidate.From, types.NewTx(rawTx))
}
// IncreaseGasPrice takes the previous transaction & potentially clones then signs it with a higher tip.
// If the tip + basefee suggested by the network are not greater than the previous values, the same transaction
// will be returned. If they are greater, this function will ensure that they are at least greater by 15% than
......@@ -196,8 +314,12 @@ func NewSimpleTxManager(name string, l log.Logger, cfg Config, backend ETHBacken
if cfg.NumConfirmations == 0 {
panic("txmgr: NumConfirmations cannot be zero")
}
if cfg.NetworkTimeout == 0 {
cfg.NetworkTimeout = 2 * time.Second
}
return &SimpleTxManager{
chainID: cfg.ChainID,
name: name,
Config: cfg,
backend: backend,
......
......@@ -14,6 +14,7 @@ import (
"github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum-optimism/optimism/op-node/testutils"
opcrypto "github.com/ethereum-optimism/optimism/op-service/crypto"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
......@@ -50,6 +51,18 @@ func newTestHarness(t *testing.T) *testHarness {
return newTestHarnessWithConfig(t, configWithNumConfs(1))
}
// createTxCandidate creates a mock [TxCandidate].
func (h testHarness) createTxCandidate() TxCandidate {
inbox := common.HexToAddress("0x42000000000000000000000000000000000000ff")
sender := common.HexToAddress("0xdeadbeef")
return TxCandidate{
To: inbox,
TxData: []byte{0x00, 0x01, 0x02},
From: sender,
GasLimit: uint64(1337),
}
}
func configWithNumConfs(numConfirmations uint64) Config {
return Config{
ResubmissionTimeout: time.Second,
......@@ -175,6 +188,10 @@ func (b *mockBackend) HeaderByNumber(ctx context.Context, number *big.Int) (*typ
}, nil
}
func (b *mockBackend) EstimateGas(ctx context.Context, msg ethereum.CallMsg) (uint64, error) {
return b.g.basefee().Uint64(), nil
}
func (b *mockBackend) SuggestGasTipCap(ctx context.Context) (*big.Int, error) {
tip, _ := b.g.sample()
return tip, nil
......@@ -185,7 +202,14 @@ func (b *mockBackend) SendTransaction(ctx context.Context, tx *types.Transaction
panic("set sender function was not set")
}
return b.send(ctx, tx)
}
func (b *mockBackend) NonceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (uint64, error) {
return 0, nil
}
func (*mockBackend) ChainID(ctx context.Context) (*big.Int, error) {
return big.NewInt(1), nil
}
// TransactionReceipt queries the mockBackend for a mined txHash. If none is
......@@ -330,6 +354,51 @@ func TestTxMgrBlocksOnFailingRpcCalls(t *testing.T) {
require.Nil(t, receipt)
}
// TestTxMgr_CraftTx ensures that the tx manager will create transactions as expected.
func TestTxMgr_CraftTx(t *testing.T) {
t.Parallel()
h := newTestHarness(t)
candidate := h.createTxCandidate()
// Craft the transaction.
gasTipCap, gasFeeCap := h.gasPricer.feesForEpoch(h.gasPricer.epoch + 1)
tx, err := h.mgr.CraftTx(context.Background(), candidate)
require.Nil(t, err)
require.NotNil(t, tx)
// Validate the gas tip cap and fee cap.
require.Equal(t, gasTipCap, tx.GasTipCap())
require.Equal(t, gasFeeCap, tx.GasFeeCap())
// Validate the nonce was set correctly using the backend.
require.Zero(t, tx.Nonce())
// Check that the gas was set using the gas limit.
require.Equal(t, candidate.GasLimit, tx.Gas())
}
// TestTxMgr_EstimateGas ensures that the tx manager will estimate
// the gas when candidate gas limit is zero in [CraftTx].
func TestTxMgr_EstimateGas(t *testing.T) {
t.Parallel()
h := newTestHarness(t)
candidate := h.createTxCandidate()
// Set the gas limit to zero to trigger gas estimation.
candidate.GasLimit = 0
// Gas estimate
gasEstimate := h.gasPricer.baseBaseFee.Uint64()
// Craft the transaction.
tx, err := h.mgr.CraftTx(context.Background(), candidate)
require.Nil(t, err)
require.NotNil(t, tx)
// Check that the gas was estimated correctly.
require.Equal(t, gasEstimate, tx.Gas())
}
// TestTxMgrOnlyOnePublicationSucceeds asserts that the tx manager will return a
// receipt so long as at least one of the publications is able to succeed with a
// simulated rpc failure.
......@@ -577,6 +646,18 @@ func (b *failingBackend) SuggestGasTipCap(_ context.Context) (*big.Int, error) {
return b.gasTip, nil
}
func (b *failingBackend) EstimateGas(ctx context.Context, msg ethereum.CallMsg) (uint64, error) {
return b.baseFee.Uint64(), nil
}
func (b *failingBackend) NonceAt(_ context.Context, _ common.Address, _ *big.Int) (uint64, error) {
return 0, errors.New("unimplemented")
}
func (b *failingBackend) ChainID(ctx context.Context) (*big.Int, error) {
return nil, errors.New("unimplemented")
}
// TestWaitMinedReturnsReceiptAfterFailure asserts that WaitMined is able to
// recover from failed calls to the backend. It uses the failedBackend to
// simulate an rpc call failure, followed by the successful return of a receipt.
......
......@@ -123,6 +123,8 @@ services:
OP_BATCHER_L1_ETH_RPC: http://l1:8545
OP_BATCHER_L2_ETH_RPC: http://l2:8545
OP_BATCHER_ROLLUP_RPC: http://op-node:8545
TX_MANAGER_TIMEOUT: 10m
OFFLINE_GAS_ESTIMATION: false
OP_BATCHER_MAX_CHANNEL_DURATION: 1
OP_BATCHER_MAX_L1_TX_SIZE_BYTES: 120000
OP_BATCHER_TARGET_L1_TX_SIZE_BYTES: 100000
......
......@@ -117,6 +117,10 @@ FROM base as replica-healthcheck
WORKDIR /opt/optimism/packages/replica-healthcheck
ENTRYPOINT ["npm", "run", "start"]
FROM base as balance-mon
WORKDIR /opt/optimism/packages/chain-mon
ENTRYPOINT ["npm", "run", "start:balance-mon"]
FROM base as drippie-mon
WORKDIR /opt/optimism/packages/chain-mon
ENTRYPOINT ["npm", "run", "start:drippie-mon"]
......
###############################################################################
# ↓ balance-mon ↓ #
###############################################################################
# RPC pointing to network to monitor balances on
BALANCE_MON__RPC=
# JSON array in the format [{ "address": <address>, "nickname": <nickname> }, ... ]
BALANCE_MON__ACCOUNTS=
###############################################################################
# ↓ drippie-mon ↓ #
###############################################################################
......
......@@ -9,6 +9,7 @@
"dist/*"
],
"scripts": {
"start:balance-mon": "ts-node ./src/balance-mon/service.ts",
"start:drippie-mon": "ts-node ./src/drippie-mon/service.ts",
"start:wd-mon": "ts-node ./src/wd-mon/service.ts",
"test:coverage": "echo 'No tests defined.'",
......
import {
BaseServiceV2,
StandardOptions,
Gauge,
Counter,
validators,
} from '@eth-optimism/common-ts'
import { Provider } from '@ethersproject/abstract-provider'
import { ethers } from 'ethers'
import { version } from '../../package.json'
type BalanceMonOptions = {
rpc: Provider
accounts: string
}
type BalanceMonMetrics = {
balances: Gauge
unexpectedRpcErrors: Counter
}
type BalanceMonState = {
accounts: Array<{ address: string; nickname: string }>
}
export class BalanceMonService extends BaseServiceV2<
BalanceMonOptions,
BalanceMonMetrics,
BalanceMonState
> {
constructor(options?: Partial<BalanceMonOptions & StandardOptions>) {
super({
version,
name: 'balance-mon',
loop: true,
options: {
loopIntervalMs: 60_000,
...options,
},
optionsSpec: {
rpc: {
validator: validators.provider,
desc: 'Provider for network to monitor balances on',
},
accounts: {
validator: validators.str,
desc: 'JSON array of [{ address, nickname }] to monitor balances of',
public: true,
},
},
metricsSpec: {
balances: {
type: Gauge,
desc: 'Balances of addresses',
labels: ['address', 'nickname'],
},
unexpectedRpcErrors: {
type: Counter,
desc: 'Number of unexpected RPC errors',
labels: ['section', 'name'],
},
},
})
}
protected async init(): Promise<void> {
this.state.accounts = JSON.parse(this.options.accounts)
}
protected async main(): Promise<void> {
for (const account of this.state.accounts) {
let balance: ethers.BigNumber
try {
balance = await this.options.rpc.getBalance(account.address)
} catch (err) {
this.logger.info(`got unexpected RPC error`, {
section: 'balances',
name: 'getBalance',
err,
})
this.metrics.unexpectedRpcErrors.inc({
section: 'balances',
name: 'getBalance',
})
continue
}
this.logger.info(`got balance`, {
address: account.address,
nickname: account.nickname,
balance: balance.toString(),
})
// Parse the balance as an integer instead of via toNumber() to avoid ethers throwing an
// an error. We might get rounding errors but we don't need perfect precision here, just a
// generally accurate sense for what the current balance is.
this.metrics.balances.set(
{ address: account.address, nickname: account.nickname },
parseInt(balance.toString(), 10)
)
}
}
}
if (require.main === module) {
const service = new BalanceMonService()
service.run()
}
export * from './balance-mon/service'
export * from './drippie-mon/service'
export * from './wd-mon/service'
......@@ -21,7 +21,7 @@
"l2OutputOracleSubmissionInterval": 120,
"l2OutputOracleStartingBlockNumber": 0,
"l2OutputOracleStartingTimestamp": "TIMESTAMP",
"l2OutputOracleStartingTimestamp": TIMESTAMP,
"l2OutputOracleProposer": "PROPOSER",
"l2OutputOracleChallenger": "ADMIN",
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment