Commit 26235d2a authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge branch 'develop' into qbzzt/230322-pause-unpause

parents ffb28ca1 70da7ad1
---
'@eth-optimism/batch-submitter-service': patch
---
Allow deposit only batches
---
'@eth-optimism/chain-mon': minor
---
Introduces the balance-mon service to chain-mon.
......@@ -18,6 +18,7 @@ jobs:
l2geth: ${{ steps.packages.outputs.l2geth }}
message-relayer: ${{ steps.packages.outputs.message-relayer }}
fault-detector: ${{ steps.packages.outputs.fault-detector }}
balance-mon: ${{ steps.packages.outputs.balance-mon }}
drippie-mon: ${{ steps.packages.outputs.drippie-mon }}
wd-mon: ${{ steps.packages.outputs.wd-mon }}
data-transport-layer: ${{ steps.packages.outputs.data-transport-layer }}
......@@ -230,6 +231,33 @@ jobs:
push: true
tags: ethereumoptimism/fault-detector:${{ needs.canary-publish.outputs.canary-docker-tag }}
balance-mon:
name: Publish Balance Monitor Version ${{ needs.canary-publish.outputs.canary-docker-tag }}
needs: canary-publish
if: needs.canary-publish.outputs.balance-mon != ''
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to Docker Hub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_USERNAME }}
password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_SECRET }}
- name: Build and push
uses: docker/build-push-action@v2
with:
context: .
file: ./ops/docker/Dockerfile.packages
target: balance-mon
push: true
tags: ethereumoptimism/balance-mon:${{ needs.canary-publish.outputs.canary-docker-tag }}
drippie-mon:
name: Publish Drippie Monitor Version ${{ needs.canary-publish.outputs.canary-docker-tag }}
needs: canary-publish
......
......@@ -14,6 +14,7 @@ jobs:
l2geth: ${{ steps.packages.outputs.l2geth }}
message-relayer: ${{ steps.packages.outputs.message-relayer }}
fault-detector: ${{ steps.packages.outputs.fault-detector }}
balance-mon: ${{ steps.packages.outputs.drippie-mon }}
drippie-mon: ${{ steps.packages.outputs.drippie-mon }}
wd-mon: ${{ steps.packages.outputs.wd-mon }}
data-transport-layer: ${{ steps.packages.outputs.data-transport-layer }}
......@@ -364,6 +365,33 @@ jobs:
push: true
tags: ethereumoptimism/wd-mon:${{ needs.release.outputs.wd-mon }},ethereumoptimism/wd-mon:latest
balance-mon:
name: Publish Balance Monitor Version ${{ needs.release.outputs.balance-mon }}
needs: release
if: needs.release.outputs.balance-mon != ''
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to Docker Hub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_USERNAME }}
password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_SECRET }}
- name: Build and push
uses: docker/build-push-action@v2
with:
context: .
file: ./ops/docker/Dockerfile.packages
target: balance-mon
push: true
tags: ethereumoptimism/balance-mon:${{ needs.release.outputs.balance-mon }},ethereumoptimism/balance-mon:latest
drippie-mon:
name: Publish Drippie Monitor Version ${{ needs.release.outputs.drippie-mon }}
needs: release
......
......@@ -66,6 +66,7 @@ You'll need the following:
* [Yarn](https://classic.yarnpkg.com/en/docs/install)
* [Docker](https://docs.docker.com/get-docker/)
* [Docker Compose](https://docs.docker.com/compose/install/)
* [Go](https://go.dev/dl/)
* [Foundry](https://getfoundry.sh)
### Setup
......
......@@ -6,6 +6,7 @@ import (
"github.com/ethereum-optimism/optimism/batch-submitter/drivers/sequencer"
l2common "github.com/ethereum-optimism/optimism/l2geth/common"
"github.com/ethereum-optimism/optimism/l2geth/core/types"
l2types "github.com/ethereum-optimism/optimism/l2geth/core/types"
"github.com/stretchr/testify/require"
)
......@@ -47,3 +48,76 @@ func TestBatchElementFromBlock(t *testing.T) {
require.False(t, element.IsSequencerTx())
require.Nil(t, element.Tx)
}
func TestGenSequencerParams(t *testing.T) {
tx := types.NewTransaction(0, l2common.Address{}, big.NewInt(0), 0, big.NewInt(0), []byte{})
shouldStartAtElement := uint64(1)
blockOffset := uint64(1)
batches := []sequencer.BatchElement{
{Timestamp: 1, BlockNumber: 1},
{Timestamp: 1, BlockNumber: 1, Tx: sequencer.NewCachedTx(tx)},
}
params, err := sequencer.GenSequencerBatchParams(shouldStartAtElement, blockOffset, batches)
require.NoError(t, err)
require.Equal(t, uint64(0), params.ShouldStartAtElement)
require.Equal(t, uint64(len(batches)), params.TotalElementsToAppend)
require.Equal(t, len(batches), len(params.Contexts))
// There is only 1 sequencer tx
require.Equal(t, 1, len(params.Txs))
// There are 2 contexts
// The first context contains the deposit
context1 := params.Contexts[0]
require.Equal(t, uint64(0), context1.NumSequencedTxs)
require.Equal(t, uint64(1), context1.NumSubsequentQueueTxs)
require.Equal(t, uint64(1), context1.Timestamp)
require.Equal(t, uint64(1), context1.BlockNumber)
// The second context contains the sequencer tx
context2 := params.Contexts[1]
require.Equal(t, uint64(1), context2.NumSequencedTxs)
require.Equal(t, uint64(0), context2.NumSubsequentQueueTxs)
require.Equal(t, uint64(1), context2.Timestamp)
require.Equal(t, uint64(1), context2.BlockNumber)
}
func TestGenSequencerParamsOnlyDeposits(t *testing.T) {
shouldStartAtElement := uint64(1)
blockOffset := uint64(1)
batches := []sequencer.BatchElement{
{Timestamp: 1, BlockNumber: 1},
{Timestamp: 1, BlockNumber: 1},
{Timestamp: 2, BlockNumber: 2},
}
params, err := sequencer.GenSequencerBatchParams(shouldStartAtElement, blockOffset, batches)
require.NoError(t, err)
// The batches will pack deposits into the same context when their
// timestamps and blocknumbers are the same
require.Equal(t, uint64(0), params.ShouldStartAtElement)
require.Equal(t, uint64(len(batches)), params.TotalElementsToAppend)
// 2 deposits have the same timestamp + blocknumber, they go in the
// same context. 1 deposit has a different timestamp + blocknumber,
// it goes into a different context. Therefore there are 2 contexts
require.Equal(t, 2, len(params.Contexts))
// No sequencer txs
require.Equal(t, 0, len(params.Txs))
// There are 2 contexts
// The first context contains the deposit
context1 := params.Contexts[0]
require.Equal(t, uint64(0), context1.NumSequencedTxs)
require.Equal(t, uint64(2), context1.NumSubsequentQueueTxs)
require.Equal(t, uint64(1), context1.Timestamp)
require.Equal(t, uint64(1), context1.BlockNumber)
context2 := params.Contexts[1]
require.Equal(t, uint64(0), context2.NumSequencedTxs)
require.Equal(t, uint64(1), context2.NumSubsequentQueueTxs)
require.Equal(t, uint64(2), context2.Timestamp)
require.Equal(t, uint64(2), context2.BlockNumber)
}
......@@ -222,11 +222,6 @@ func (p *AppendSequencerBatchParams) Write(
return ErrMalformedBatch
}
// There must be transactions if there are contexts
if len(p.Txs) == 0 && len(p.Contexts) != 0 {
return ErrMalformedBatch
}
// copy the contexts as to not malleate the struct
// when it is a typed batch
contexts := make([]BatchContext, 0, len(p.Contexts)+1)
......@@ -361,9 +356,6 @@ func (p *AppendSequencerBatchParams) Read(r io.Reader) error {
if len(p.Contexts) == 0 && len(p.Txs) != 0 {
return ErrMalformedBatch
}
if len(p.Txs) == 0 && len(p.Contexts) != 0 {
return ErrMalformedBatch
}
return closeReader()
} else if err != nil {
return err
......
......@@ -46,7 +46,7 @@
}
],
"txs": [],
"error": true
"error": false
},
{
"name": "multiple-contexts-no-txs",
......@@ -80,7 +80,7 @@
}
],
"txs": [],
"error": true
"error": false
},
{
"name": "complex",
......
......@@ -201,7 +201,7 @@ Once you’ve built both repositories, you’ll need head back to the Optimism M
- Replace `"BATCHER"` with the address of the Batcher account you generated earlier.
- Replace `"SEQUENCER"` with the address of the Sequencer account you generated earlier.
- Replace `"BLOCKHASH"` with the blockhash you got from the `cast` command.
- Replace `"TIMESTAMP"` with the timestamp you got from the `cast` command. Note that although all the other fields are strings, this field is a number! Don’t include the quotation marks.
- Replace `TIMESTAMP` with the timestamp you got from the `cast` command. Note that although all the other fields are strings, this field is a number! Don’t include the quotation marks.
## Deploy the L1 contracts
......@@ -390,9 +390,7 @@ Head over to the `op-node` package and start the `op-node` using the following c
--rollup.config=./rollup.json \
--rpc.addr=0.0.0.0 \
--rpc.port=8547 \
--p2p.listen.ip=0.0.0.0 \
--p2p.listen.tcp=9003 \
--p2p.listen.udp=9003 \
--p2p.disable \
--rpc.enable-admin \
--p2p.sequencer.key=<SEQUENCERKEY> \
--l1=<RPC> \
......@@ -402,6 +400,26 @@ Head over to the `op-node` package and start the `op-node` using the following c
Once you run this command, you should start seeing the `op-node` begin to process all of the L1 information after the starting block number that you picked earlier. Once the `op-node` has enough information, it’ll begin sending Engine API payloads to `op-geth`. At that point, you’ll start to see blocks being created inside of `op-geth`. We’re live!
::: tip Peer to peer synchronization
If you use a chain ID that is also used by others, for example the default (42069), your `op-node` will try to use peer to peer to speed up synchronization.
These attempts will fail, because they will be signed with the wrong key, but they will waste time and network resources.
To avoid this , we start with peer to peer synchronization disabled (`--p2p.disable`).
Once you have multiple nodes, it makes sense to use these command line parameters to synchronize between them without getting confused by other blockchains.
```
--p2p.static=<nodes> \
--p2p.listen.ip=0.0.0.0 \
--p2p.listen.tcp=9003 \
--p2p.listen.udp=9003 \
```
:::
## Run op-batcher
The final component necessary to put all the pieces together is the `op-batcher`. The `op-batcher` takes transactions from the Sequencer and publishes those transactions to L1. Once transactions are on L1, they’re officially part of the Rollup. Without the `op-batcher`, transactions sent to the Sequencer would never make it to L1 and wouldn’t become part of the canonical chain. The `op-batcher` is critical!
......@@ -516,15 +534,47 @@ To use any other development stack, see the getting started tutorial, just repla
### Stopping your Rollup
To stop `op-geth` you should use Ctrl-C.
An orderly shutdown is done in the reverse order to the order in which components were started:
1. Stop `op-batcher`.
1. Stop `op-node`.
1. Stop `op-geth`.
### Starting your Rollup
To restart the blockchain, use the same order of components you did when you initialized it.
1. `op-geth`
1. `op-node`
1. `op-batcher`
::: tip Synchronization takes time
`op-batcher` might have warning messages similar to:
```
WARN [03-21|14:13:55.248] Error calculating L2 block range err="failed to get sync status: Post \"http://localhost:8547\": context deadline exceeded"
WARN [03-21|14:13:57.328] Error calculating L2 block range err="failed to get sync status: Post \"http://localhost:8547\": context deadline exceeded"
```
This means that `op-node` is not yet synchronized up to the present time.
Just wait until it is.
:::
If `op-geth` aborts (for example, because the computer it is running on crashes), you will get these errors on `op-node`:
### Errors
#### Corrupt data directory
If `op-geth` aborts (for example, because the computer it is running on crashes), you might get these errors on `op-node`:
```
WARN [02-16|21:22:02.868] Derivation process temporary error attempts=14 err="stage 0 failed resetting: temp: failed to find the L2 Heads to start from: failed to fetch L2 block by hash 0x0000000000000000000000000000000000000000000000000000000000000000: failed to determine block-hash of hash 0x0000000000000000000000000000000000000000000000000000000000000000, could not get payload: not found"
```
In that case, you need to remove `datadir`, reinitialize it:
This means that the data directory is corrupt and you need to reinitialize it:
```bash
cd ~/op-geth
......@@ -536,17 +586,23 @@ echo "<SEQUENCER KEY HERE>" > datadir/block-signer-key
./build/bin/geth init --datadir=./datadir ./genesis.json
```
### Starting your Rollup
To restart the blockchain, use the same order of components you did when you initialized it.
#### Batcher out of ETH
1. `op-geth`
2. `op-node`
3. `op-batcher`
If `op-batcher` runs out of ETH, it cannot submit write new transaction batches to L1.
You will get error messages similar to this one:
```
INFO [03-21|14:22:32.754] publishing transaction service=batcher txHash=2ace6d..7eb248 nonce=2516 gasTipCap=2,340,741 gasFeeCap=172,028,434,515
ERROR[03-21|14:22:32.844] unable to publish transaction service=batcher txHash=2ace6d..7eb248 nonce=2516 gasTipCap=2,340,741 gasFeeCap=172,028,434,515 err="insufficient funds for gas * price + value"
```
Just send more ETH and to the batcher, and the problem will be resolved.
## Adding nodes
To add nodes to the rollup, you need to initialize `op-node` and `op-geth`, similar to what you did for the first node:
To add nodes to the rollup, you need to initialize `op-node` and `op-geth`, similar to what you did for the first node.
You should *not* add an `op-bathcer`, there should be only one.
1. Configure the OS and prerequisites as you did for the first node.
1. Build the Optimism monorepo and `op-geth` as you did for the first node.
......@@ -574,8 +630,8 @@ To add nodes to the rollup, you need to initialize `op-node` and `op-geth`, simi
1. Start `op-geth` (using the same command line you used on the initial node)
1. Start `op-node` (using the same command line you used on the initial node)
1. Wait while the node synchronizes
## What’s next?
You can use this rollup the same way you’d use any other test blockchain. Once the superchain is available, this blockchain should be able to join the test version. Alternatively, you could [modify the blockchain in various ways](./hacks.md). **Please note that OP Stack Hacks are unofficial and are not explicitly supported by the OP Stack.** You will not be able to receive significant developer support for any modifications you make to the OP Stack.
\ No newline at end of file
You can use this rollup the same way you’d use any other test blockchain. Once the superchain is available, this blockchain should be able to join the test version. Alternatively, you could [modify the blockchain in various ways](./hacks.md). **Please note that OP Stack Hacks are unofficial and are not explicitly supported by the OP Stack.** You will not be able to receive significant developer support for any modifications you make to the OP Stack.
......@@ -9,7 +9,7 @@ require (
github.com/docker/docker v20.10.21+incompatible
github.com/docker/go-connections v0.4.0
github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3
github.com/ethereum/go-ethereum v1.11.4
github.com/ethereum/go-ethereum v1.11.5
github.com/fsnotify/fsnotify v1.6.0
github.com/golang/snappy v0.0.4
github.com/google/go-cmp v0.5.9
......@@ -34,6 +34,7 @@ require (
golang.org/x/crypto v0.6.0
golang.org/x/exp v0.0.0-20230213192124-5e25df0256eb
golang.org/x/term v0.5.0
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af
)
require (
......@@ -178,7 +179,6 @@ require (
golang.org/x/sync v0.1.0 // indirect
golang.org/x/sys v0.5.0 // indirect
golang.org/x/text v0.7.0 // indirect
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af // indirect
golang.org/x/tools v0.6.0 // indirect
google.golang.org/protobuf v1.28.1 // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
......@@ -189,6 +189,6 @@ require (
nhooyr.io/websocket v1.8.7 // indirect
)
replace github.com/ethereum/go-ethereum v1.11.4 => github.com/ethereum-optimism/op-geth v1.11.2-de8c5df46.0.20230321002540-11f0554a4313
replace github.com/ethereum/go-ethereum v1.11.5 => github.com/ethereum-optimism/op-geth v1.11.2-de8c5df46.0.20230324105532-555b76f39878
//replace github.com/ethereum/go-ethereum v1.11.4 => ../go-ethereum
//replace github.com/ethereum/go-ethereum v1.11.5 => ../go-ethereum
......@@ -184,8 +184,8 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7
github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw=
github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3 h1:RWHKLhCrQThMfch+QJ1Z8veEq5ZO3DfIhZ7xgRP9WTc=
github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3/go.mod h1:QziizLAiF0KqyLdNJYD7O5cpDlaFMNZzlxYNcWsJUxs=
github.com/ethereum-optimism/op-geth v1.11.2-de8c5df46.0.20230321002540-11f0554a4313 h1:dBPc4CEzqmHUeU/Awk7Lw2mAaTc59T5W8CvAr+4YuzU=
github.com/ethereum-optimism/op-geth v1.11.2-de8c5df46.0.20230321002540-11f0554a4313/go.mod h1:SGLXBOtu2JlKrNoUG76EatI2uJX/WZRY4nmEyvE9Q38=
github.com/ethereum-optimism/op-geth v1.11.2-de8c5df46.0.20230324105532-555b76f39878 h1:pk3lFrP6zay7+jT+yoFAWxvGbP1Z/5lsorimXGrQoxE=
github.com/ethereum-optimism/op-geth v1.11.2-de8c5df46.0.20230324105532-555b76f39878/go.mod h1:SGLXBOtu2JlKrNoUG76EatI2uJX/WZRY4nmEyvE9Q38=
github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8=
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
github.com/fjl/memsize v0.0.1 h1:+zhkb+dhUgx0/e+M8sF0QqiouvMQUiKR+QYvdxIOKcQ=
......
......@@ -51,7 +51,7 @@ func Main(version string, cliCtx *cli.Context) error {
return err
}
}
defer batchSubmitter.StopIfRunning()
defer batchSubmitter.StopIfRunning(context.Background())
ctx, cancel := context.WithCancel(context.Background())
......@@ -73,7 +73,7 @@ func Main(version string, cliCtx *cli.Context) error {
l.Error("error starting metrics server", err)
}
}()
m.StartBalanceMetrics(ctx, l, batchSubmitter.L1Client, batchSubmitter.From)
m.StartBalanceMetrics(ctx, l, batchSubmitter.L1Client, batchSubmitter.TxManager.From())
}
rpcCfg := cfg.RPCConfig
......
......@@ -18,6 +18,7 @@ var (
ErrMaxDurationReached = errors.New("max channel duration reached")
ErrChannelTimeoutClose = errors.New("close to channel timeout")
ErrSeqWindowClose = errors.New("close to sequencer window timeout")
ErrTerminated = errors.New("channel terminated")
)
type ChannelFullError struct {
......@@ -188,7 +189,7 @@ func (c *channelBuilder) Reset() error {
}
// AddBlock adds a block to the channel compression pipeline. IsFull should be
// called aftewards to test whether the channel is full. If full, a new channel
// called afterwards to test whether the channel is full. If full, a new channel
// must be started.
//
// AddBlock returns a ChannelFullError if called even though the channel is
......@@ -307,16 +308,17 @@ func (c *channelBuilder) IsFull() bool {
// FullErr returns the reason why the channel is full. If not full yet, it
// returns nil.
//
// It returns a ChannelFullError wrapping one of six possible reasons for the
// channel being full:
// It returns a ChannelFullError wrapping one of the following possible reasons
// for the channel being full:
// - ErrInputTargetReached if the target amount of input data has been reached,
// - derive.MaxRLPBytesPerChannel if the general maximum amount of input data
// would have been exceeded by the latest AddBlock call,
// - ErrMaxFrameIndex if the maximum number of frames has been generated
// (uint16),
// - ErrMaxDurationReached if the max channel duration got reached.
// - ErrChannelTimeoutClose if the consensus channel timeout got too close.
// - ErrSeqWindowClose if the end of the sequencer window got too close.
// - ErrMaxDurationReached if the max channel duration got reached,
// - ErrChannelTimeoutClose if the consensus channel timeout got too close,
// - ErrSeqWindowClose if the end of the sequencer window got too close,
// - ErrTerminated if the channel was explicitly terminated.
func (c *channelBuilder) FullErr() error {
return c.fullErr
}
......@@ -402,6 +404,14 @@ func (c *channelBuilder) outputFrame() error {
return err // possibly io.EOF (last frame)
}
// Close immediately marks the channel as full with an ErrTerminated
// if the channel is not already full.
func (c *channelBuilder) Close() {
if !c.IsFull() {
c.setFullErr(ErrTerminated)
}
}
// HasFrame returns whether there's any available frame. If true, it can be
// popped using NextFrame().
//
......
......@@ -41,6 +41,9 @@ type channelManager struct {
pendingTransactions map[txID]txData
// Set of confirmed txID -> inclusion block. For determining if the channel is timed out
confirmedTransactions map[txID]eth.BlockID
// if set to true, prevents production of any new channel frames
closed bool
}
func NewChannelManager(log log.Logger, metr metrics.Metricer, cfg ChannelConfig) *channelManager {
......@@ -60,6 +63,7 @@ func (s *channelManager) Clear() {
s.log.Trace("clearing channel manager state")
s.blocks = s.blocks[:0]
s.tip = common.Hash{}
s.closed = false
s.clearPendingChannel()
}
......@@ -78,6 +82,10 @@ func (s *channelManager) TxFailed(id txID) {
}
s.metr.RecordBatchTxFailed()
if s.closed && len(s.confirmedTransactions) == 0 && len(s.pendingTransactions) == 0 {
s.log.Info("Channel has no submitted transactions, clearing for shutdown", "chID", s.pendingChannel.ID())
s.clearPendingChannel()
}
}
// TxConfirmed marks a transaction as confirmed on L1. Unfortunately even if all frames in
......@@ -179,8 +187,8 @@ func (s *channelManager) TxData(l1Head eth.BlockID) (txData, error) {
dataPending := s.pendingChannel != nil && s.pendingChannel.HasFrame()
s.log.Debug("Requested tx data", "l1Head", l1Head, "data_pending", dataPending, "blocks_pending", len(s.blocks))
// Short circuit if there is a pending frame.
if dataPending {
// Short circuit if there is a pending frame or the channel manager is closed.
if dataPending || s.closed {
return s.nextTxData()
}
......@@ -344,3 +352,27 @@ func l2BlockRefFromBlockAndL1Info(block *types.Block, l1info derive.L1BlockInfo)
SequenceNumber: l1info.SequenceNumber,
}
}
// Close closes the current pending channel, if one exists, outputs any remaining frames,
// and prevents the creation of any new channels.
// Any outputted frames still need to be published.
func (s *channelManager) Close() error {
if s.closed {
return nil
}
s.closed = true
// Any pending state can be proactively cleared if there are no submitted transactions
if len(s.confirmedTransactions) == 0 && len(s.pendingTransactions) == 0 {
s.clearPendingChannel()
}
if s.pendingChannel == nil {
return nil
}
s.pendingChannel.Close()
return s.outputFrames()
}
......@@ -363,3 +363,145 @@ func TestChannelManager_TxResend(t *testing.T) {
require.NoError(err)
require.Len(fs, 1)
}
// TestChannelManagerCloseBeforeFirstUse ensures that the channel manager
// will not produce any frames if closed immediately.
func TestChannelManagerCloseBeforeFirstUse(t *testing.T) {
require := require.New(t)
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
log := testlog.Logger(t, log.LvlCrit)
m := NewChannelManager(log, metrics.NoopMetrics,
ChannelConfig{
TargetFrameSize: 0,
MaxFrameSize: 100,
ApproxComprRatio: 1.0,
ChannelTimeout: 1000,
})
a, _ := derivetest.RandomL2Block(rng, 4)
m.Close()
err := m.AddL2Block(a)
require.NoError(err, "Failed to add L2 block")
_, err = m.TxData(eth.BlockID{})
require.ErrorIs(err, io.EOF, "Expected closed channel manager to contain no tx data")
}
// TestChannelManagerCloseNoPendingChannel ensures that the channel manager
// can gracefully close with no pending channels, and will not emit any new
// channel frames.
func TestChannelManagerCloseNoPendingChannel(t *testing.T) {
require := require.New(t)
log := testlog.Logger(t, log.LvlCrit)
m := NewChannelManager(log, metrics.NoopMetrics,
ChannelConfig{
TargetFrameSize: 0,
MaxFrameSize: 100,
ApproxComprRatio: 1.0,
ChannelTimeout: 1000,
})
a := newMiniL2Block(0)
b := newMiniL2BlockWithNumberParent(0, big.NewInt(1), a.Hash())
err := m.AddL2Block(a)
require.NoError(err, "Failed to add L2 block")
txdata, err := m.TxData(eth.BlockID{})
require.NoError(err, "Expected channel manager to return valid tx data")
m.TxConfirmed(txdata.ID(), eth.BlockID{})
_, err = m.TxData(eth.BlockID{})
require.ErrorIs(err, io.EOF, "Expected channel manager to EOF")
m.Close()
err = m.AddL2Block(b)
require.NoError(err, "Failed to add L2 block")
_, err = m.TxData(eth.BlockID{})
require.ErrorIs(err, io.EOF, "Expected closed channel manager to return no new tx data")
}
// TestChannelManagerCloseNoPendingChannel ensures that the channel manager
// can gracefully close with a pending channel, and will not produce any
// new channel frames after this point.
func TestChannelManagerClosePendingChannel(t *testing.T) {
require := require.New(t)
log := testlog.Logger(t, log.LvlCrit)
m := NewChannelManager(log, metrics.NoopMetrics,
ChannelConfig{
TargetNumFrames: 100,
TargetFrameSize: 1000,
MaxFrameSize: 1000,
ApproxComprRatio: 1.0,
ChannelTimeout: 1000,
})
a := newMiniL2Block(50_000)
b := newMiniL2BlockWithNumberParent(10, big.NewInt(1), a.Hash())
err := m.AddL2Block(a)
require.NoError(err, "Failed to add L2 block")
txdata, err := m.TxData(eth.BlockID{})
require.NoError(err, "Expected channel manager to produce valid tx data")
m.TxConfirmed(txdata.ID(), eth.BlockID{})
m.Close()
txdata, err = m.TxData(eth.BlockID{})
require.NoError(err, "Expected channel manager to produce tx data from remaining L2 block data")
m.TxConfirmed(txdata.ID(), eth.BlockID{})
_, err = m.TxData(eth.BlockID{})
require.ErrorIs(err, io.EOF, "Expected channel manager to have no more tx data")
err = m.AddL2Block(b)
require.NoError(err, "Failed to add L2 block")
_, err = m.TxData(eth.BlockID{})
require.ErrorIs(err, io.EOF, "Expected closed channel manager to produce no more tx data")
}
// TestChannelManagerCloseAllTxsFailed ensures that the channel manager
// can gracefully close after producing transaction frames if none of these
// have successfully landed on chain.
func TestChannelManagerCloseAllTxsFailed(t *testing.T) {
require := require.New(t)
log := testlog.Logger(t, log.LvlCrit)
m := NewChannelManager(log, metrics.NoopMetrics,
ChannelConfig{
TargetNumFrames: 100,
TargetFrameSize: 1000,
MaxFrameSize: 1000,
ApproxComprRatio: 1.0,
ChannelTimeout: 1000,
})
a := newMiniL2Block(50_000)
err := m.AddL2Block(a)
require.NoError(err, "Failed to add L2 block")
txdata, err := m.TxData(eth.BlockID{})
require.NoError(err, "Expected channel manager to produce valid tx data")
m.TxFailed(txdata.ID())
// Show that this data will continue to be emitted as long as the transaction
// fails and the channel manager is not closed
txdata, err = m.TxData(eth.BlockID{})
require.NoError(err, "Expected channel manager to re-attempt the failed transaction")
m.TxFailed(txdata.ID())
m.Close()
_, err = m.TxData(eth.BlockID{})
require.ErrorIs(err, io.EOF, "Expected closed channel manager to produce no more tx data")
}
......@@ -3,7 +3,6 @@ package batcher
import (
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
"github.com/urfave/cli"
......@@ -17,7 +16,6 @@ import (
opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics"
oppprof "github.com/ethereum-optimism/optimism/op-service/pprof"
"github.com/ethereum-optimism/optimism/op-service/txmgr"
opsigner "github.com/ethereum-optimism/optimism/op-signer/client"
)
type Config struct {
......@@ -26,11 +24,9 @@ type Config struct {
L1Client *ethclient.Client
L2Client *ethclient.Client
RollupNode *sources.RollupClient
TxManager txmgr.TxManager
PollInterval time.Duration
From common.Address
TxManagerConfig txmgr.Config
// RollupConfig is queried at startup
Rollup *rollup.Config
......@@ -51,8 +47,6 @@ func (c *Config) Check() error {
}
type CLIConfig struct {
/* Required Params */
// L1EthRpc is the HTTP provider URL for L1.
L1EthRpc string
......@@ -81,35 +75,6 @@ type CLIConfig struct {
// and creating a new batch.
PollInterval time.Duration
// NumConfirmations is the number of confirmations which we will wait after
// appending new batches.
NumConfirmations uint64
// SafeAbortNonceTooLowCount is the number of ErrNonceTooLowObservations
// required to give up on a tx at a particular nonce without receiving
// confirmation.
SafeAbortNonceTooLowCount uint64
// ResubmissionTimeout is time we will wait before resubmitting a
// transaction.
ResubmissionTimeout time.Duration
// Mnemonic is the HD seed used to derive the wallet private keys for both
// the sequence and proposer. Must be used in conjunction with
// SequencerHDPath and ProposerHDPath.
Mnemonic string
// SequencerHDPath is the derivation path used to obtain the private key for
// batched submission of sequencer transactions.
SequencerHDPath string
// PrivateKey is the private key used to submit sequencer transactions.
PrivateKey string
RPCConfig rpc.CLIConfig
/* Optional Params */
// MaxL1TxSize is the maximum size of a batch tx submitted to L1.
MaxL1TxSize uint64
......@@ -125,14 +90,11 @@ type CLIConfig struct {
Stopped bool
LogConfig oplog.CLIConfig
TxMgrConfig txmgr.CLIConfig
RPCConfig rpc.CLIConfig
LogConfig oplog.CLIConfig
MetricsConfig opmetrics.CLIConfig
PprofConfig oppprof.CLIConfig
// SignerConfig contains the client config for op-signer service
SignerConfig opsigner.CLIConfig
PprofConfig oppprof.CLIConfig
}
func (c CLIConfig) Check() error {
......@@ -148,7 +110,7 @@ func (c CLIConfig) Check() error {
if err := c.PprofConfig.Check(); err != nil {
return err
}
if err := c.SignerConfig.Check(); err != nil {
if err := c.TxMgrConfig.Check(); err != nil {
return err
}
return nil
......@@ -158,14 +120,11 @@ func (c CLIConfig) Check() error {
func NewConfig(ctx *cli.Context) CLIConfig {
return CLIConfig{
/* Required Flags */
L1EthRpc: ctx.GlobalString(flags.L1EthRpcFlag.Name),
L2EthRpc: ctx.GlobalString(flags.L2EthRpcFlag.Name),
RollupRpc: ctx.GlobalString(flags.RollupRpcFlag.Name),
SubSafetyMargin: ctx.GlobalUint64(flags.SubSafetyMarginFlag.Name),
PollInterval: ctx.GlobalDuration(flags.PollIntervalFlag.Name),
NumConfirmations: ctx.GlobalUint64(flags.NumConfirmationsFlag.Name),
SafeAbortNonceTooLowCount: ctx.GlobalUint64(flags.SafeAbortNonceTooLowCountFlag.Name),
ResubmissionTimeout: ctx.GlobalDuration(flags.ResubmissionTimeoutFlag.Name),
L1EthRpc: ctx.GlobalString(flags.L1EthRpcFlag.Name),
L2EthRpc: ctx.GlobalString(flags.L2EthRpcFlag.Name),
RollupRpc: ctx.GlobalString(flags.RollupRpcFlag.Name),
SubSafetyMargin: ctx.GlobalUint64(flags.SubSafetyMarginFlag.Name),
PollInterval: ctx.GlobalDuration(flags.PollIntervalFlag.Name),
/* Optional Flags */
MaxChannelDuration: ctx.GlobalUint64(flags.MaxChannelDurationFlag.Name),
......@@ -174,13 +133,10 @@ func NewConfig(ctx *cli.Context) CLIConfig {
TargetNumFrames: ctx.GlobalInt(flags.TargetNumFramesFlag.Name),
ApproxComprRatio: ctx.GlobalFloat64(flags.ApproxComprRatioFlag.Name),
Stopped: ctx.GlobalBool(flags.StoppedFlag.Name),
Mnemonic: ctx.GlobalString(flags.MnemonicFlag.Name),
SequencerHDPath: ctx.GlobalString(flags.SequencerHDPathFlag.Name),
PrivateKey: ctx.GlobalString(flags.PrivateKeyFlag.Name),
TxMgrConfig: txmgr.ReadCLIConfig(ctx),
RPCConfig: rpc.ReadCLIConfig(ctx),
LogConfig: oplog.ReadCLIConfig(ctx),
MetricsConfig: opmetrics.ReadCLIConfig(ctx),
PprofConfig: oppprof.ReadCLIConfig(ctx),
SignerConfig: opsigner.ReadCLIConfig(ctx),
}
}
This diff is collapsed.
package batcher
import (
"context"
"fmt"
"math/big"
"time"
"github.com/ethereum-optimism/optimism/op-service/txmgr"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
opcrypto "github.com/ethereum-optimism/optimism/op-service/crypto"
)
const networkTimeout = 2 * time.Second // How long a single network request can take. TODO: put in a config somewhere
// TransactionManager wraps the simple txmgr package to make it easy to send & wait for transactions
type TransactionManager struct {
// Config
batchInboxAddress common.Address
senderAddress common.Address
chainID *big.Int
// Outside world
txMgr txmgr.TxManager
l1Client *ethclient.Client
signerFn opcrypto.SignerFn
log log.Logger
}
func NewTransactionManager(log log.Logger, txMgrConfg txmgr.Config, batchInboxAddress common.Address, chainID *big.Int, senderAddress common.Address, l1Client *ethclient.Client) *TransactionManager {
t := &TransactionManager{
batchInboxAddress: batchInboxAddress,
senderAddress: senderAddress,
chainID: chainID,
txMgr: txmgr.NewSimpleTxManager("batcher", log, txMgrConfg, l1Client),
l1Client: l1Client,
signerFn: txMgrConfg.Signer,
log: log,
}
return t
}
// SendTransaction creates & submits a transaction to the batch inbox address with the given `data`.
// It currently uses the underlying `txmgr` to handle transaction sending & price management.
// This is a blocking method. It should not be called concurrently.
// TODO: where to put concurrent transaction handling logic.
func (t *TransactionManager) SendTransaction(ctx context.Context, data []byte) (*types.Receipt, error) {
tx, err := t.CraftTx(ctx, data)
if err != nil {
return nil, fmt.Errorf("failed to create tx: %w", err)
}
ctx, cancel := context.WithTimeout(ctx, 10*time.Minute) // TODO: Select a timeout that makes sense here.
defer cancel()
if receipt, err := t.txMgr.Send(ctx, tx); err != nil {
t.log.Warn("unable to publish tx", "err", err, "data_size", len(data))
return nil, err
} else {
t.log.Info("tx successfully published", "tx_hash", receipt.TxHash, "data_size", len(data))
return receipt, nil
}
}
// calcGasTipAndFeeCap queries L1 to determine what a suitable miner tip & basefee limit would be for timely inclusion
func (t *TransactionManager) calcGasTipAndFeeCap(ctx context.Context) (gasTipCap *big.Int, gasFeeCap *big.Int, err error) {
childCtx, cancel := context.WithTimeout(ctx, networkTimeout)
gasTipCap, err = t.l1Client.SuggestGasTipCap(childCtx)
cancel()
if err != nil {
return nil, nil, fmt.Errorf("failed to get suggested gas tip cap: %w", err)
}
if gasTipCap == nil {
t.log.Warn("unexpected unset gasTipCap, using default 2 gwei")
gasTipCap = new(big.Int).SetUint64(params.GWei * 2)
}
childCtx, cancel = context.WithTimeout(ctx, networkTimeout)
head, err := t.l1Client.HeaderByNumber(childCtx, nil)
cancel()
if err != nil || head == nil {
return nil, nil, fmt.Errorf("failed to get L1 head block for fee cap: %w", err)
}
if head.BaseFee == nil {
return nil, nil, fmt.Errorf("failed to get L1 basefee in block %d for fee cap", head.Number)
}
gasFeeCap = txmgr.CalcGasFeeCap(head.BaseFee, gasTipCap)
return gasTipCap, gasFeeCap, nil
}
// CraftTx creates the signed transaction to the batchInboxAddress.
// It queries L1 for the current fee market conditions as well as for the nonce.
// NOTE: This method SHOULD NOT publish the resulting transaction.
func (t *TransactionManager) CraftTx(ctx context.Context, data []byte) (*types.Transaction, error) {
gasTipCap, gasFeeCap, err := t.calcGasTipAndFeeCap(ctx)
if err != nil {
return nil, err
}
childCtx, cancel := context.WithTimeout(ctx, networkTimeout)
nonce, err := t.l1Client.NonceAt(childCtx, t.senderAddress, nil)
cancel()
if err != nil {
return nil, fmt.Errorf("failed to get nonce: %w", err)
}
rawTx := &types.DynamicFeeTx{
ChainID: t.chainID,
Nonce: nonce,
To: &t.batchInboxAddress,
GasTipCap: gasTipCap,
GasFeeCap: gasFeeCap,
Data: data,
}
t.log.Info("creating tx", "to", rawTx.To, "from", t.senderAddress)
gas, err := core.IntrinsicGas(rawTx.Data, nil, false, true, true, false)
if err != nil {
return nil, fmt.Errorf("failed to calculate intrinsic gas: %w", err)
}
rawTx.Gas = gas
ctx, cancel = context.WithTimeout(ctx, networkTimeout)
defer cancel()
tx := types.NewTx(rawTx)
return t.signerFn(ctx, t.senderAddress, tx)
}
......@@ -9,14 +9,13 @@ import (
opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics"
oppprof "github.com/ethereum-optimism/optimism/op-service/pprof"
oprpc "github.com/ethereum-optimism/optimism/op-service/rpc"
opsigner "github.com/ethereum-optimism/optimism/op-signer/client"
"github.com/ethereum-optimism/optimism/op-service/txmgr"
)
const envVarPrefix = "OP_BATCHER"
var (
/* Required flags */
// Required flags
L1EthRpcFlag = cli.StringFlag{
Name: "l1-eth-rpc",
Usage: "HTTP provider URL for L1",
......@@ -50,31 +49,8 @@ var (
Required: true,
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "POLL_INTERVAL"),
}
NumConfirmationsFlag = cli.Uint64Flag{
Name: "num-confirmations",
Usage: "Number of confirmations which we will wait after " +
"appending a new batch",
Required: true,
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "NUM_CONFIRMATIONS"),
}
SafeAbortNonceTooLowCountFlag = cli.Uint64Flag{
Name: "safe-abort-nonce-too-low-count",
Usage: "Number of ErrNonceTooLow observations required to " +
"give up on a tx at a particular nonce without receiving " +
"confirmation",
Required: true,
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "SAFE_ABORT_NONCE_TOO_LOW_COUNT"),
}
ResubmissionTimeoutFlag = cli.DurationFlag{
Name: "resubmission-timeout",
Usage: "Duration we will wait before resubmitting a " +
"transaction to L1",
Required: true,
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "RESUBMISSION_TIMEOUT"),
}
/* Optional flags */
// Optional flags
MaxChannelDurationFlag = cli.Uint64Flag{
Name: "max-channel-duration",
Usage: "The maximum duration of L1-blocks to keep a channel open. 0 to disable.",
......@@ -110,23 +86,8 @@ var (
Usage: "Initialize the batcher in a stopped state. The batcher can be started using the admin_startBatcher RPC",
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "STOPPED"),
}
MnemonicFlag = cli.StringFlag{
Name: "mnemonic",
Usage: "The mnemonic used to derive the wallets for either the " +
"sequencer or the l2output",
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "MNEMONIC"),
}
SequencerHDPathFlag = cli.StringFlag{
Name: "sequencer-hd-path",
Usage: "The HD path used to derive the sequencer wallet from the " +
"mnemonic. The mnemonic flag must also be set.",
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "SEQUENCER_HD_PATH"),
}
PrivateKeyFlag = cli.StringFlag{
Name: "private-key",
Usage: "The private key to use with the l2output wallet. Must not be used with mnemonic.",
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "PRIVATE_KEY"),
}
// Legacy Flags
SequencerHDPathFlag = txmgr.SequencerHDPathFlag
)
var requiredFlags = []cli.Flag{
......@@ -135,9 +96,6 @@ var requiredFlags = []cli.Flag{
RollupRpcFlag,
SubSafetyMarginFlag,
PollIntervalFlag,
NumConfirmationsFlag,
SafeAbortNonceTooLowCountFlag,
ResubmissionTimeoutFlag,
}
var optionalFlags = []cli.Flag{
......@@ -147,9 +105,6 @@ var optionalFlags = []cli.Flag{
TargetNumFramesFlag,
ApproxComprRatioFlag,
StoppedFlag,
MnemonicFlag,
SequencerHDPathFlag,
PrivateKeyFlag,
}
func init() {
......@@ -158,8 +113,8 @@ func init() {
optionalFlags = append(optionalFlags, oplog.CLIFlags(envVarPrefix)...)
optionalFlags = append(optionalFlags, opmetrics.CLIFlags(envVarPrefix)...)
optionalFlags = append(optionalFlags, oppprof.CLIFlags(envVarPrefix)...)
optionalFlags = append(optionalFlags, opsigner.CLIFlags(envVarPrefix)...)
optionalFlags = append(optionalFlags, rpc.CLIFlags(envVarPrefix)...)
optionalFlags = append(optionalFlags, txmgr.CLIFlags(envVarPrefix)...)
Flags = append(requiredFlags, optionalFlags...)
}
......
......@@ -185,7 +185,7 @@ func (m *Metrics) RecordLatestL1Block(l1ref eth.L1BlockRef) {
m.RecordL1Ref("latest", l1ref)
}
// RecordL2BlockLoaded should be called when a new L2 block was loaded into the
// RecordL2BlocksLoaded should be called when a new L2 block was loaded into the
// channel manager (but not processed yet).
func (m *Metrics) RecordL2BlocksLoaded(l2ref eth.L2BlockRef) {
m.RecordL2Ref(StageLoaded, l2ref)
......
......@@ -6,7 +6,7 @@ import (
type batcherClient interface {
Start() error
Stop() error
Stop(ctx context.Context) error
}
type adminAPI struct {
......@@ -23,6 +23,6 @@ func (a *adminAPI) StartBatcher(_ context.Context) error {
return a.b.Start()
}
func (a *adminAPI) StopBatcher(_ context.Context) error {
return a.b.Stop()
func (a *adminAPI) StopBatcher(ctx context.Context) error {
return a.b.Stop(ctx)
}
This source diff could not be displayed because it is too large. You can view the blob instead.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -16,7 +16,6 @@ import (
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
......@@ -320,7 +319,7 @@ func PostCheckPredeploys(prevDB, currDB *state.StateDB) error {
// PostCheckPredeployStorage will ensure that the predeploys had their storage
// wiped correctly.
func PostCheckPredeployStorage(db vm.StateDB, finalSystemOwner common.Address, proxyAdminOwner common.Address) error {
func PostCheckPredeployStorage(db *state.StateDB, finalSystemOwner common.Address, proxyAdminOwner common.Address) error {
for name, addr := range predeploys.Predeploys {
if addr == nil {
return fmt.Errorf("nil address in predeploys mapping for %s", name)
......@@ -468,7 +467,7 @@ func PostCheckLegacyETH(prevDB, migratedDB *state.StateDB, migrationData crossdo
}
// PostCheckL1Block checks that the L1Block contract was properly set to the L1 origin.
func PostCheckL1Block(db vm.StateDB, info *derive.L1BlockInfo) error {
func PostCheckL1Block(db *state.StateDB, info *derive.L1BlockInfo) error {
// Slot 0 is the concatenation of the block number and timestamp
data := db.GetState(predeploys.L1BlockAddr, common.Hash{}).Bytes()
blockNumber := binary.BigEndian.Uint64(data[24:])
......@@ -558,7 +557,7 @@ func PostCheckL1Block(db vm.StateDB, info *derive.L1BlockInfo) error {
return nil
}
func CheckWithdrawalsAfter(db vm.StateDB, data crossdomain.MigrationData, l1CrossDomainMessenger *common.Address) error {
func CheckWithdrawalsAfter(db *state.StateDB, data crossdomain.MigrationData, l1CrossDomainMessenger *common.Address) error {
wds, invalidMessages, err := data.ToWithdrawals()
if err != nil {
return err
......
......@@ -8,6 +8,7 @@ import (
"strings"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
......@@ -22,19 +23,45 @@ import (
"github.com/ethereum-optimism/optimism/op-chain-ops/state"
)
var proxies = []string{
"SystemConfigProxy",
"L2OutputOracleProxy",
"L1CrossDomainMessengerProxy",
"L1StandardBridgeProxy",
"OptimismPortalProxy",
"OptimismMintableERC20FactoryProxy",
}
var portalMeteringSlot = common.Hash{31: 0x01}
var (
// proxies represents the set of proxies in front of contracts.
proxies = []string{
"SystemConfigProxy",
"L2OutputOracleProxy",
"L1CrossDomainMessengerProxy",
"L1StandardBridgeProxy",
"OptimismPortalProxy",
"OptimismMintableERC20FactoryProxy",
}
// portalMeteringSlot is the storage slot containing the metering params.
portalMeteringSlot = common.Hash{31: 0x01}
// zeroHash represents the zero value for a hash.
zeroHash = common.Hash{}
// uint128Max is type(uint128).max and is set in the init function.
uint128Max = new(big.Int)
// The default values for the ResourceConfig, used as part of
// an EIP-1559 curve for deposit gas.
defaultResourceConfig = bindings.ResourceMeteringResourceConfig{
MaxResourceLimit: 20_000_000,
ElasticityMultiplier: 10,
BaseFeeMaxChangeDenominator: 8,
MinimumBaseFee: params.GWei,
SystemTxMaxGas: 1_000_000,
}
)
var zeroHash common.Hash
func init() {
var ok bool
uint128Max, ok = new(big.Int).SetString("ffffffffffffffffffffffffffffffff", 16)
if !ok {
panic("bad uint128Max")
}
// Set the maximum base fee on the default config.
defaultResourceConfig.MaximumBaseFee = uint128Max
}
// BuildL1DeveloperGenesis will create a L1 genesis block after creating
// all of the state required for an Optimism network to function.
func BuildL1DeveloperGenesis(config *DeployConfig) (*core.Genesis, error) {
if config.L2OutputOracleStartingTimestamp != -1 {
return nil, errors.New("l2oo starting timestamp must be -1")
......@@ -67,6 +94,26 @@ func BuildL1DeveloperGenesis(config *DeployConfig) (*core.Genesis, error) {
if err != nil {
return nil, err
}
portalABI, err := bindings.OptimismPortalMetaData.GetAbi()
if err != nil {
return nil, err
}
// Initialize the OptimismPortal without being paused
data, err := portalABI.Pack("initialize", false)
if err != nil {
return nil, fmt.Errorf("cannot abi encode initialize for OptimismPortal: %w", err)
}
if _, err := upgradeProxy(
backend,
opts,
depsByName["OptimismPortalProxy"].Address,
depsByName["OptimismPortal"].Address,
data,
); err != nil {
return nil, fmt.Errorf("cannot upgrade OptimismPortalProxy: %w", err)
}
sysCfgABI, err := bindings.SystemConfigMetaData.GetAbi()
if err != nil {
return nil, err
......@@ -75,7 +122,8 @@ func BuildL1DeveloperGenesis(config *DeployConfig) (*core.Genesis, error) {
if gasLimit == 0 {
gasLimit = defaultL2GasLimit
}
data, err := sysCfgABI.Pack(
data, err = sysCfgABI.Pack(
"initialize",
config.FinalSystemOwner,
uint642Big(config.GasPriceOracleOverhead),
......@@ -83,6 +131,7 @@ func BuildL1DeveloperGenesis(config *DeployConfig) (*core.Genesis, error) {
config.BatchSenderAddress.Hash(),
gasLimit,
config.P2PSequencerAddress,
defaultResourceConfig,
)
if err != nil {
return nil, fmt.Errorf("cannot abi encode initialize for SystemConfig: %w", err)
......@@ -94,7 +143,7 @@ func BuildL1DeveloperGenesis(config *DeployConfig) (*core.Genesis, error) {
depsByName["SystemConfig"].Address,
data,
); err != nil {
return nil, err
return nil, fmt.Errorf("cannot upgrade SystemConfigProxy: %w", err)
}
l2ooABI, err := bindings.L2OutputOracleMetaData.GetAbi()
......@@ -119,24 +168,6 @@ func BuildL1DeveloperGenesis(config *DeployConfig) (*core.Genesis, error) {
return nil, err
}
portalABI, err := bindings.OptimismPortalMetaData.GetAbi()
if err != nil {
return nil, err
}
// Initialize the OptimismPortal without being paused
data, err = portalABI.Pack("initialize", false)
if err != nil {
return nil, fmt.Errorf("cannot abi encode initialize for OptimismPortal: %w", err)
}
if _, err := upgradeProxy(
backend,
opts,
depsByName["OptimismPortalProxy"].Address,
depsByName["OptimismPortal"].Address,
data,
); err != nil {
return nil, err
}
l1XDMABI, err := bindings.L1CrossDomainMessengerMetaData.GetAbi()
if err != nil {
return nil, err
......@@ -264,6 +295,7 @@ func deployL1Contracts(config *DeployConfig, backend *backends.SimulatedBackend)
if gasLimit == 0 {
gasLimit = defaultL2GasLimit
}
constructors = append(constructors, []deployer.Constructor{
{
Name: "SystemConfig",
......@@ -274,6 +306,7 @@ func deployL1Contracts(config *DeployConfig, backend *backends.SimulatedBackend)
config.BatchSenderAddress.Hash(), // left-padded 32 bytes value, version is zero anyway
gasLimit,
config.P2PSequencerAddress,
defaultResourceConfig,
},
},
{
......@@ -297,6 +330,7 @@ func deployL1Contracts(config *DeployConfig, backend *backends.SimulatedBackend)
predeploys.DevL2OutputOracleAddr,
config.PortalGuardian,
true, // _paused
predeploys.DevSystemConfigAddr,
},
},
{
......@@ -342,6 +376,7 @@ func l1Deployer(backend *backends.SimulatedBackend, opts *bind.TransactOpts, dep
deployment.Args[3].(common.Hash),
deployment.Args[4].(uint64),
deployment.Args[5].(common.Address),
deployment.Args[6].(bindings.ResourceMeteringResourceConfig),
)
case "L2OutputOracle":
_, tx, _, err = bindings.DeployL2OutputOracle(
......@@ -362,6 +397,7 @@ func l1Deployer(backend *backends.SimulatedBackend, opts *bind.TransactOpts, dep
deployment.Args[0].(common.Address),
deployment.Args[1].(common.Address),
deployment.Args[2].(bool),
deployment.Args[3].(common.Address),
)
case "L1CrossDomainMessenger":
_, tx, _, err = bindings.DeployL1CrossDomainMessenger(
......@@ -421,6 +457,15 @@ func l1Deployer(backend *backends.SimulatedBackend, opts *bind.TransactOpts, dep
func upgradeProxy(backend *backends.SimulatedBackend, opts *bind.TransactOpts, proxyAddr common.Address, implAddr common.Address, callData []byte) (*types.Transaction, error) {
var tx *types.Transaction
code, err := backend.CodeAt(context.Background(), implAddr, nil)
if err != nil {
return nil, err
}
if len(code) == 0 {
return nil, fmt.Errorf("no code at %s", implAddr)
}
proxy, err := bindings.NewProxy(proxyAddr, backend)
if err != nil {
return nil, err
......
......@@ -8,6 +8,7 @@ import (
"testing"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
......@@ -100,6 +101,30 @@ func TestBuildL1DeveloperGenesis(t *testing.T) {
require.NoError(t, err)
require.Equal(t, "Wrapped Ether", name)
sysCfg, err := bindings.NewSystemConfig(predeploys.DevSystemConfigAddr, sim)
require.NoError(t, err)
cfg, err := sysCfg.ResourceConfig(&bind.CallOpts{})
require.NoError(t, err)
require.Equal(t, cfg, defaultResourceConfig)
owner, err = sysCfg.Owner(&bind.CallOpts{})
require.NoError(t, err)
require.Equal(t, owner, config.FinalSystemOwner)
overhead, err := sysCfg.Overhead(&bind.CallOpts{})
require.NoError(t, err)
require.Equal(t, overhead.Uint64(), config.GasPriceOracleOverhead)
scalar, err := sysCfg.Scalar(&bind.CallOpts{})
require.NoError(t, err)
require.Equal(t, scalar.Uint64(), config.GasPriceOracleScalar)
batcherHash, err := sysCfg.BatcherHash(&bind.CallOpts{})
require.NoError(t, err)
require.Equal(t, common.Hash(batcherHash), config.BatchSenderAddress.Hash())
gasLimit, err := sysCfg.GasLimit(&bind.CallOpts{})
require.NoError(t, err)
require.Equal(t, gasLimit, uint64(config.L2GenesisBlockGasLimit))
unsafeBlockSigner, err := sysCfg.UnsafeBlockSigner(&bind.CallOpts{})
require.NoError(t, err)
require.Equal(t, unsafeBlockSigner, config.P2PSequencerAddress)
// test that we can do deposits, etc.
priv, err := crypto.HexToECDSA("ac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80")
require.NoError(t, err)
......
......@@ -16,7 +16,7 @@
"l1BlockTime": 15,
"l1GenesisBlockNonce": "0x0",
"cliqueSignerAddress": "0x0000000000000000000000000000000000000000",
"l1GenesisBlockGasLimit": "0xe4e1c0",
"l1GenesisBlockGasLimit": "0x1c9c380",
"l1GenesisBlockDifficulty": "0x1",
"finalSystemOwner": "0x0000000000000000000000000000000000000111",
"portalGuardian": "0x0000000000000000000000000000000000000112",
......@@ -29,7 +29,7 @@
"l1GenesisBlockTimestamp": "0x0",
"l1GenesisBlockBaseFeePerGas": "0x3b9aca00",
"l2GenesisBlockNonce": "0x0",
"l2GenesisBlockGasLimit": "0xe4e1c0",
"l2GenesisBlockGasLimit": "0x1c9c380",
"l2GenesisBlockDifficulty": "0x1",
"l2GenesisBlockMixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"l2GenesisBlockNumber": "0x0",
......
......@@ -72,6 +72,7 @@ func NewL1Replica(t Testing, log log.Logger, genesis *core.Genesis) *L1Replica {
backend, err := eth.New(n, ethCfg)
require.NoError(t, err)
backend.Merger().FinalizePoS()
n.RegisterAPIs(tracers.APIs(backend.APIBackend))
......
......@@ -6,16 +6,18 @@ import (
"math/big"
"time"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/stretchr/testify/require"
"github.com/ethereum-optimism/optimism/op-node/sources"
"github.com/ethereum-optimism/optimism/op-proposer/metrics"
"github.com/ethereum-optimism/optimism/op-proposer/proposer"
opcrypto "github.com/ethereum-optimism/optimism/op-service/crypto"
"github.com/ethereum-optimism/optimism/op-service/txmgr"
)
......@@ -26,51 +28,91 @@ type ProposerCfg struct {
}
type L2Proposer struct {
log log.Logger
l1 *ethclient.Client
driver *proposer.L2OutputSubmitter
address common.Address
lastTx common.Hash
log log.Logger
l1 *ethclient.Client
driver *proposer.L2OutputSubmitter
address common.Address
privKey *ecdsa.PrivateKey
contractAddr common.Address
lastTx common.Hash
}
type fakeTxMgr struct {
from common.Address
}
func (f fakeTxMgr) From() common.Address {
return f.from
}
func (f fakeTxMgr) Send(_ context.Context, _ txmgr.TxCandidate) (*types.Receipt, error) {
panic("unimplemented")
}
func NewL2Proposer(t Testing, log log.Logger, cfg *ProposerCfg, l1 *ethclient.Client, rollupCl *sources.RollupClient) *L2Proposer {
signer := func(chainID *big.Int) opcrypto.SignerFn {
s := opcrypto.PrivateKeySignerFn(cfg.ProposerKey, chainID)
return func(_ context.Context, addr common.Address, tx *types.Transaction) (*types.Transaction, error) {
return s(addr, tx)
}
}
from := crypto.PubkeyToAddress(cfg.ProposerKey.PublicKey)
proposerCfg := proposer.Config{
L2OutputOracleAddr: cfg.OutputOracleAddr,
PollInterval: time.Second,
TxManagerConfig: txmgr.Config{
ResubmissionTimeout: 5 * time.Second,
ReceiptQueryInterval: time.Second,
NumConfirmations: 1,
SafeAbortNonceTooLowCount: 4,
From: from,
// Signer is loaded in `proposer.NewL2OutputSubmitter`
},
L1Client: l1,
RollupClient: rollupCl,
AllowNonFinalized: cfg.AllowNonFinalized,
From: from,
SignerFnFactory: signer,
L1Client: l1,
RollupClient: rollupCl,
AllowNonFinalized: cfg.AllowNonFinalized,
// We use custom signing here instead of using the transaction manager.
TxManager: fakeTxMgr{from: crypto.PubkeyToAddress(cfg.ProposerKey.PublicKey)},
}
dr, err := proposer.NewL2OutputSubmitter(proposerCfg, log)
dr, err := proposer.NewL2OutputSubmitter(proposerCfg, log, metrics.NoopMetrics)
require.NoError(t, err)
return &L2Proposer{
log: log,
l1: l1,
driver: dr,
address: crypto.PubkeyToAddress(cfg.ProposerKey.PublicKey),
log: log,
l1: l1,
driver: dr,
address: crypto.PubkeyToAddress(cfg.ProposerKey.PublicKey),
privKey: cfg.ProposerKey,
contractAddr: cfg.OutputOracleAddr,
}
}
// sendTx reimplements creating & sending transactions because we need to do the final send as async in
// the action tests while we do it synchronously in the real system.
func (p *L2Proposer) sendTx(t Testing, data []byte) {
gasTipCap := big.NewInt(2 * params.GWei)
pendingHeader, err := p.l1.HeaderByNumber(t.Ctx(), big.NewInt(-1))
require.NoError(t, err, "need l1 pending header for gas price estimation")
gasFeeCap := new(big.Int).Add(gasTipCap, new(big.Int).Mul(pendingHeader.BaseFee, big.NewInt(2)))
chainID, err := p.l1.ChainID(t.Ctx())
require.NoError(t, err)
nonce, err := p.l1.NonceAt(t.Ctx(), p.address, nil)
require.NoError(t, err)
gasLimit, err := p.l1.EstimateGas(t.Ctx(), ethereum.CallMsg{
From: p.address,
To: &p.contractAddr,
GasFeeCap: gasFeeCap,
GasTipCap: gasTipCap,
Data: data,
})
require.NoError(t, err)
rawTx := &types.DynamicFeeTx{
Nonce: nonce,
To: &p.contractAddr,
Data: data,
GasFeeCap: gasFeeCap,
GasTipCap: gasTipCap,
Gas: gasLimit,
ChainID: chainID,
}
tx, err := types.SignNewTx(p.privKey, types.LatestSignerForChainID(chainID), rawTx)
require.NoError(t, err, "need to sign tx")
err = p.l1.SendTransaction(t.Ctx(), tx)
require.NoError(t, err, "need to send tx")
p.lastTx = tx.Hash()
}
func (p *L2Proposer) CanPropose(t Testing) bool {
_, shouldPropose, err := p.driver.FetchNextOutputInfo(t.Ctx())
require.NoError(t, err)
......@@ -84,15 +126,12 @@ func (p *L2Proposer) ActMakeProposalTx(t Testing) {
}
require.NoError(t, err)
tx, err := p.driver.CreateProposalTx(t.Ctx(), output)
txData, err := p.driver.ProposeL2OutputTxData(output)
require.NoError(t, err)
// Note: Use L1 instead of the output submitter's transaction manager because
// this is non-blocking while the txmgr is blocking & deadlocks the tests
err = p.l1.SendTransaction(t.Ctx(), tx)
require.NoError(t, err)
p.lastTx = tx.Hash()
p.sendTx(t, txData)
}
func (p *L2Proposer) LastProposalTx() common.Hash {
......
......@@ -79,7 +79,7 @@ func MakeDeployParams(t require.TestingT, tp *TestParams) *DeployParams {
L1GenesisBlockNonce: 0,
CliqueSignerAddress: common.Address{}, // proof of stake, no clique
L1GenesisBlockTimestamp: hexutil.Uint64(time.Now().Unix()),
L1GenesisBlockGasLimit: 15_000_000,
L1GenesisBlockGasLimit: 30_000_000,
L1GenesisBlockDifficulty: uint64ToBig(1),
L1GenesisBlockMixHash: common.Hash{},
L1GenesisBlockCoinbase: common.Address{},
......@@ -90,7 +90,7 @@ func MakeDeployParams(t require.TestingT, tp *TestParams) *DeployParams {
FinalizationPeriodSeconds: 12,
L2GenesisBlockNonce: 0,
L2GenesisBlockGasLimit: 15_000_000,
L2GenesisBlockGasLimit: 30_000_000,
L2GenesisBlockDifficulty: uint64ToBig(0),
L2GenesisBlockMixHash: common.Hash{},
L2GenesisBlockNumber: 0,
......
......@@ -11,6 +11,7 @@ import (
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts/keystore"
"github.com/ethereum/go-ethereum/beacon/engine"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
......@@ -21,6 +22,7 @@ import (
"github.com/ethereum/go-ethereum/eth/tracers"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/miner"
"github.com/ethereum/go-ethereum/node"
)
......@@ -112,7 +114,6 @@ func initL1Geth(cfg *SystemConfig, genesis *core.Genesis, opts ...GethOption) (*
ethConfig := &ethconfig.Config{
NetworkId: cfg.DeployConfig.L1ChainID,
Genesis: genesis,
Miner: miner.Config{Etherbase: cfg.DeployConfig.CliqueSignerAddress},
}
nodeConfig := &node.Config{
Name: "l1-geth",
......@@ -128,44 +129,107 @@ func initL1Geth(cfg *SystemConfig, genesis *core.Genesis, opts ...GethOption) (*
if err != nil {
return nil, nil, err
}
// Activate merge
l1Eth.Merger().FinalizePoS()
// Clique does not have safe/finalized block info. But we do want to test the usage of that,
// since post-merge L1 has it (incl. Goerli testnet which is already upgraded). So we mock it on top of clique.
l1Node.RegisterLifecycle(&fakeSafeFinalizedL1{
eth: l1Eth,
// Instead of running a whole beacon node, we run this fake-proof-of-stake sidecar that sequences L1 blocks using the Engine API.
l1Node.RegisterLifecycle(&fakePoS{
eth: l1Eth,
log: log.Root(), // geth logger is global anyway. Would be nice to replace with a local logger though.
blockTime: cfg.DeployConfig.L1BlockTime,
// for testing purposes we make it really fast, otherwise we don't see it finalize in short tests
finalizedDistance: 8,
safeDistance: 4,
engineAPI: catalyst.NewConsensusAPI(l1Eth),
})
return l1Node, l1Eth, nil
}
type fakeSafeFinalizedL1 struct {
eth *eth.Ethereum
// fakePoS is a testing-only utility to attach to Geth,
// to build a fake proof-of-stake L1 chain with fixed block time and basic lagging safe/finalized blocks.
type fakePoS struct {
eth *eth.Ethereum
log log.Logger
blockTime uint64
finalizedDistance uint64
safeDistance uint64
sub ethereum.Subscription
}
var _ node.Lifecycle = (*fakeSafeFinalizedL1)(nil)
engineAPI *catalyst.ConsensusAPI
sub ethereum.Subscription
}
func (f *fakeSafeFinalizedL1) Start() error {
headChanges := make(chan core.ChainHeadEvent, 10)
headsSub := f.eth.BlockChain().SubscribeChainHeadEvent(headChanges)
func (f *fakePoS) Start() error {
f.sub = event.NewSubscription(func(quit <-chan struct{}) error {
defer headsSub.Unsubscribe()
// poll every half a second: enough to catch up with any block time when ticks are missed
t := time.NewTicker(time.Second / 2)
for {
select {
case head := <-headChanges:
num := head.Block.NumberU64()
if num > f.finalizedDistance {
toFinalize := f.eth.BlockChain().GetHeaderByNumber(num - f.finalizedDistance)
f.eth.BlockChain().SetFinalized(toFinalize)
case now := <-t.C:
chain := f.eth.BlockChain()
head := chain.CurrentBlock()
finalized := chain.CurrentFinalBlock()
if finalized == nil { // fallback to genesis if nothing is finalized
finalized = chain.Genesis().Header()
}
safe := chain.CurrentSafeBlock()
if safe == nil { // fallback to finalized if nothing is safe
safe = finalized
}
if head.Number.Uint64() > f.finalizedDistance { // progress finalized block, if we can
finalized = f.eth.BlockChain().GetHeaderByNumber(head.Number.Uint64() - f.finalizedDistance)
}
if head.Number.Uint64() > f.safeDistance { // progress safe block, if we can
safe = f.eth.BlockChain().GetHeaderByNumber(head.Number.Uint64() - f.safeDistance)
}
// start building the block as soon as we are past the current head time
if head.Time >= uint64(now.Unix()) {
continue
}
res, err := f.engineAPI.ForkchoiceUpdatedV1(engine.ForkchoiceStateV1{
HeadBlockHash: head.Hash(),
SafeBlockHash: safe.Hash(),
FinalizedBlockHash: finalized.Hash(),
}, &engine.PayloadAttributes{
Timestamp: head.Time + f.blockTime,
Random: common.Hash{},
SuggestedFeeRecipient: head.Coinbase,
})
if err != nil {
f.log.Error("failed to start building L1 block", "err", err)
continue
}
if res.PayloadID == nil {
f.log.Error("failed to start block building", "res", res)
continue
}
// wait with sealing, if we are not behind already
delay := time.Until(time.Unix(int64(head.Time+f.blockTime), 0))
tim := time.NewTimer(delay)
select {
case <-tim.C:
// no-op
case <-quit:
tim.Stop()
return nil
}
payload, err := f.engineAPI.GetPayloadV1(*res.PayloadID)
if err != nil {
f.log.Error("failed to finish building L1 block", "err", err)
continue
}
if _, err := f.engineAPI.NewPayloadV1(*payload); err != nil {
f.log.Error("failed to insert built L1 block", "err", err)
continue
}
if num > f.safeDistance {
toSafe := f.eth.BlockChain().GetHeaderByNumber(num - f.safeDistance)
f.eth.BlockChain().SetSafe(toSafe)
if _, err := f.engineAPI.ForkchoiceUpdatedV1(engine.ForkchoiceStateV1{
HeadBlockHash: payload.BlockHash,
SafeBlockHash: safe.Hash(),
FinalizedBlockHash: finalized.Hash(),
}, nil); err != nil {
f.log.Error("failed to make built L1 block canonical", "err", err)
continue
}
case <-quit:
return nil
......@@ -175,7 +239,7 @@ func (f *fakeSafeFinalizedL1) Start() error {
return nil
}
func (f *fakeSafeFinalizedL1) Stop() error {
func (f *fakePoS) Stop() error {
f.sub.Unsubscribe()
return nil
}
......
......@@ -15,8 +15,10 @@ import (
batchermetrics "github.com/ethereum-optimism/optimism/op-batcher/metrics"
"github.com/ethereum-optimism/optimism/op-node/chaincfg"
"github.com/ethereum-optimism/optimism/op-node/sources"
proposermetrics "github.com/ethereum-optimism/optimism/op-proposer/metrics"
l2os "github.com/ethereum-optimism/optimism/op-proposer/proposer"
oplog "github.com/ethereum-optimism/optimism/op-service/log"
"github.com/ethereum-optimism/optimism/op-service/txmgr"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
......@@ -269,14 +271,18 @@ func TestMigration(t *testing.T) {
snapLog.SetHandler(log.DiscardHandler())
rollupNodeConfig := &node.Config{
L1: &node.L1EndpointConfig{
L1NodeAddr: forkedL1URL,
L1TrustRPC: false,
L1RPCKind: sources.RPCKindBasic,
L1NodeAddr: forkedL1URL,
L1TrustRPC: false,
L1RPCKind: sources.RPCKindBasic,
RateLimit: 0,
BatchSize: 20,
HttpPollInterval: 12 * time.Second,
},
L2: &node.L2EndpointConfig{
L2EngineAddr: gethNode.HTTPAuthEndpoint(),
L2EngineJWTSecret: testingJWTSecret,
},
L2Sync: &node.PreparedL2SyncEndpoint{Client: nil, TrustRPC: false},
Driver: driver.Config{
VerifierConfDepth: 0,
SequencerConfDepth: 0,
......@@ -324,45 +330,53 @@ func TestMigration(t *testing.T) {
})
batcher, err := bss.NewBatchSubmitterFromCLIConfig(bss.CLIConfig{
L1EthRpc: forkedL1URL,
L2EthRpc: gethNode.WSEndpoint(),
RollupRpc: rollupNode.HTTPEndpoint(),
MaxChannelDuration: 1,
MaxL1TxSize: 120_000,
TargetL1TxSize: 100_000,
TargetNumFrames: 1,
ApproxComprRatio: 0.4,
SubSafetyMargin: 4,
PollInterval: 50 * time.Millisecond,
NumConfirmations: 1,
ResubmissionTimeout: 5 * time.Second,
SafeAbortNonceTooLowCount: 3,
L1EthRpc: forkedL1URL,
L2EthRpc: gethNode.WSEndpoint(),
RollupRpc: rollupNode.HTTPEndpoint(),
MaxChannelDuration: 1,
MaxL1TxSize: 120_000,
TargetL1TxSize: 100_000,
TargetNumFrames: 1,
ApproxComprRatio: 0.4,
SubSafetyMargin: 4,
PollInterval: 50 * time.Millisecond,
TxMgrConfig: txmgr.CLIConfig{
L1RPCURL: forkedL1URL,
PrivateKey: hexPriv(secrets.Batcher),
NumConfirmations: 1,
ResubmissionTimeout: 5 * time.Second,
SafeAbortNonceTooLowCount: 3,
},
LogConfig: oplog.CLIConfig{
Level: "info",
Format: "text",
},
PrivateKey: hexPriv(secrets.Batcher),
}, lgr.New("module", "batcher"), batchermetrics.NoopMetrics)
require.NoError(t, err)
t.Cleanup(func() {
batcher.StopIfRunning()
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
batcher.StopIfRunning(ctx)
})
proposer, err := l2os.NewL2OutputSubmitterFromCLIConfig(l2os.CLIConfig{
L1EthRpc: forkedL1URL,
RollupRpc: rollupNode.HTTPEndpoint(),
L2OOAddress: l2OS.Address.String(),
PollInterval: 50 * time.Millisecond,
NumConfirmations: 1,
ResubmissionTimeout: 3 * time.Second,
SafeAbortNonceTooLowCount: 3,
AllowNonFinalized: true,
L1EthRpc: forkedL1URL,
RollupRpc: rollupNode.HTTPEndpoint(),
L2OOAddress: l2OS.Address.String(),
PollInterval: 50 * time.Millisecond,
AllowNonFinalized: true,
TxMgrConfig: txmgr.CLIConfig{
L1RPCURL: forkedL1URL,
PrivateKey: hexPriv(secrets.Proposer),
NumConfirmations: 1,
ResubmissionTimeout: 3 * time.Second,
SafeAbortNonceTooLowCount: 3,
},
LogConfig: oplog.CLIConfig{
Level: "info",
Format: "text",
},
PrivateKey: hexPriv(secrets.Proposer),
}, lgr.New("module", "proposer"))
}, lgr.New("module", "proposer"), proposermetrics.NoopMetrics)
require.NoError(t, err)
t.Cleanup(func() {
proposer.Stop()
......
......@@ -75,7 +75,7 @@ func NewOpGeth(t *testing.T, ctx context.Context, cfg *SystemConfig) (*OpGeth, e
require.Nil(t, node.Start())
auth := rpc.WithHTTPAuth(gn.NewJWTAuth(cfg.JWTSecret))
l2Node, err := client.NewRPC(ctx, logger, node.WSAuthEndpoint(), auth)
l2Node, err := client.NewRPC(ctx, logger, node.WSAuthEndpoint(), client.WithGethRPCOptions(auth))
require.Nil(t, err)
// Finally create the engine client
......
......@@ -37,8 +37,10 @@ import (
"github.com/ethereum-optimism/optimism/op-node/rollup/driver"
"github.com/ethereum-optimism/optimism/op-node/sources"
"github.com/ethereum-optimism/optimism/op-node/testlog"
proposermetrics "github.com/ethereum-optimism/optimism/op-proposer/metrics"
l2os "github.com/ethereum-optimism/optimism/op-proposer/proposer"
oplog "github.com/ethereum-optimism/optimism/op-service/log"
"github.com/ethereum-optimism/optimism/op-service/txmgr"
)
var (
......@@ -72,9 +74,9 @@ func DefaultSystemConfig(t *testing.T) SystemConfig {
L1BlockTime: 2,
L1GenesisBlockNonce: 4660,
CliqueSignerAddress: addresses.CliqueSigner,
CliqueSignerAddress: common.Address{}, // op-e2e used to run Clique, but now uses fake Proof of Stake.
L1GenesisBlockTimestamp: hexutil.Uint64(time.Now().Unix()),
L1GenesisBlockGasLimit: 8_000_000,
L1GenesisBlockGasLimit: 30_000_000,
L1GenesisBlockDifficulty: uint642big(1),
L1GenesisBlockMixHash: common.Hash{},
L1GenesisBlockCoinbase: common.Address{},
......@@ -84,7 +86,7 @@ func DefaultSystemConfig(t *testing.T) SystemConfig {
L1GenesisBlockBaseFeePerGas: uint642big(7),
L2GenesisBlockNonce: 0,
L2GenesisBlockGasLimit: 8_000_000,
L2GenesisBlockGasLimit: 30_000_000,
L2GenesisBlockDifficulty: uint642big(1),
L2GenesisBlockMixHash: common.Hash{},
L2GenesisBlockNumber: 0,
......@@ -193,6 +195,9 @@ type SystemConfig struct {
// If the proposer can make proposals for L2 blocks derived from L1 blocks which are not finalized on L1 yet.
NonFinalizedProposals bool
// Explicitly disable batcher, for tests that rely on unsafe L2 payloads
DisableBatcher bool
}
type System struct {
......@@ -215,7 +220,9 @@ func (sys *System) Close() {
sys.L2OutputSubmitter.Stop()
}
if sys.BatchSubmitter != nil {
sys.BatchSubmitter.StopIfRunning()
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
sys.BatchSubmitter.StopIfRunning(ctx)
}
for _, node := range sys.RollupNodes {
......@@ -377,11 +384,6 @@ func (cfg SystemConfig) Start(_opts ...SystemConfigOption) (*System, error) {
didErrAfterStart = true
return nil, err
}
err = l1Backend.StartMining(1)
if err != nil {
didErrAfterStart = true
return nil, err
}
for name, node := range sys.Nodes {
if name == "l1" {
continue
......@@ -409,14 +411,21 @@ func (cfg SystemConfig) Start(_opts ...SystemConfigOption) (*System, error) {
l2EndpointConfig = sys.Nodes[name].HTTPAuthEndpoint()
}
rollupCfg.L1 = &rollupNode.L1EndpointConfig{
L1NodeAddr: l1EndpointConfig,
L1TrustRPC: false,
L1RPCKind: sources.RPCKindBasic,
L1NodeAddr: l1EndpointConfig,
L1TrustRPC: false,
L1RPCKind: sources.RPCKindBasic,
RateLimit: 0,
BatchSize: 20,
HttpPollInterval: time.Duration(cfg.DeployConfig.L1BlockTime) * time.Second / 10,
}
rollupCfg.L2 = &rollupNode.L2EndpointConfig{
L2EngineAddr: l2EndpointConfig,
L2EngineJWTSecret: cfg.JWTSecret,
}
rollupCfg.L2Sync = &rollupNode.PreparedL2SyncEndpoint{
Client: nil,
TrustRPC: false,
}
}
// Geth Clients
......@@ -559,20 +568,24 @@ func (cfg SystemConfig) Start(_opts ...SystemConfigOption) (*System, error) {
// L2Output Submitter
sys.L2OutputSubmitter, err = l2os.NewL2OutputSubmitterFromCLIConfig(l2os.CLIConfig{
L1EthRpc: sys.Nodes["l1"].WSEndpoint(),
RollupRpc: sys.RollupNodes["sequencer"].HTTPEndpoint(),
L2OOAddress: predeploys.DevL2OutputOracleAddr.String(),
PollInterval: 50 * time.Millisecond,
NumConfirmations: 1,
ResubmissionTimeout: 3 * time.Second,
SafeAbortNonceTooLowCount: 3,
AllowNonFinalized: cfg.NonFinalizedProposals,
L1EthRpc: sys.Nodes["l1"].WSEndpoint(),
RollupRpc: sys.RollupNodes["sequencer"].HTTPEndpoint(),
L2OOAddress: predeploys.DevL2OutputOracleAddr.String(),
PollInterval: 50 * time.Millisecond,
TxMgrConfig: txmgr.CLIConfig{
L1RPCURL: sys.Nodes["l1"].WSEndpoint(),
PrivateKey: hexPriv(cfg.Secrets.Proposer),
NumConfirmations: 1,
SafeAbortNonceTooLowCount: 3,
ResubmissionTimeout: 3 * time.Second,
ReceiptQueryInterval: 50 * time.Millisecond,
},
AllowNonFinalized: cfg.NonFinalizedProposals,
LogConfig: oplog.CLIConfig{
Level: "info",
Format: "text",
},
PrivateKey: hexPriv(cfg.Secrets.Proposer),
}, sys.cfg.Loggers["proposer"])
}, sys.cfg.Loggers["proposer"], proposermetrics.NoopMetrics)
if err != nil {
return nil, fmt.Errorf("unable to setup l2 output submitter: %w", err)
}
......@@ -583,31 +596,38 @@ func (cfg SystemConfig) Start(_opts ...SystemConfigOption) (*System, error) {
// Batch Submitter
sys.BatchSubmitter, err = bss.NewBatchSubmitterFromCLIConfig(bss.CLIConfig{
L1EthRpc: sys.Nodes["l1"].WSEndpoint(),
L2EthRpc: sys.Nodes["sequencer"].WSEndpoint(),
RollupRpc: sys.RollupNodes["sequencer"].HTTPEndpoint(),
MaxChannelDuration: 1,
MaxL1TxSize: 120_000,
TargetL1TxSize: 100_000,
TargetNumFrames: 1,
ApproxComprRatio: 0.4,
SubSafetyMargin: 4,
PollInterval: 50 * time.Millisecond,
NumConfirmations: 1,
ResubmissionTimeout: 5 * time.Second,
SafeAbortNonceTooLowCount: 3,
L1EthRpc: sys.Nodes["l1"].WSEndpoint(),
L2EthRpc: sys.Nodes["sequencer"].WSEndpoint(),
RollupRpc: sys.RollupNodes["sequencer"].HTTPEndpoint(),
MaxChannelDuration: 1,
MaxL1TxSize: 120_000,
TargetL1TxSize: 100_000,
TargetNumFrames: 1,
ApproxComprRatio: 0.4,
SubSafetyMargin: 4,
PollInterval: 50 * time.Millisecond,
TxMgrConfig: txmgr.CLIConfig{
L1RPCURL: sys.Nodes["l1"].WSEndpoint(),
PrivateKey: hexPriv(cfg.Secrets.Batcher),
NumConfirmations: 1,
SafeAbortNonceTooLowCount: 3,
ResubmissionTimeout: 3 * time.Second,
ReceiptQueryInterval: 50 * time.Millisecond,
},
LogConfig: oplog.CLIConfig{
Level: "info",
Format: "text",
},
PrivateKey: hexPriv(cfg.Secrets.Batcher),
}, sys.cfg.Loggers["batcher"], batchermetrics.NoopMetrics)
if err != nil {
return nil, fmt.Errorf("failed to setup batch submitter: %w", err)
}
if err := sys.BatchSubmitter.Start(); err != nil {
return nil, fmt.Errorf("unable to start batch submitter: %w", err)
// Batcher may be enabled later
if !sys.cfg.DisableBatcher {
if err := sys.BatchSubmitter.Start(); err != nil {
return nil, fmt.Errorf("unable to start batch submitter: %w", err)
}
}
return sys, nil
......
......@@ -304,7 +304,7 @@ func TestPendingGasLimit(t *testing.T) {
cfg := DefaultSystemConfig(t)
// configure the L2 gas limit to be high, and the pending gas limits to be lower for resource saving.
cfg.DeployConfig.L2GenesisBlockGasLimit = 20_000_000
cfg.DeployConfig.L2GenesisBlockGasLimit = 30_000_000
cfg.GethOptions["sequencer"] = []GethOption{
func(ethCfg *ethconfig.Config, nodeCfg *node.Config) error {
ethCfg.Miner.GasCeil = 10_000_000
......@@ -342,8 +342,8 @@ func TestPendingGasLimit(t *testing.T) {
for {
checkGasLimit(l2Seq, big.NewInt(-1), 10_000_000)
checkGasLimit(l2Verif, big.NewInt(-1), 9_000_000)
checkGasLimit(l2Seq, nil, 20_000_000)
latestVerifHeader := checkGasLimit(l2Verif, nil, 20_000_000)
checkGasLimit(l2Seq, nil, 30_000_000)
latestVerifHeader := checkGasLimit(l2Verif, nil, 30_000_000)
// Stop once the verifier passes genesis:
// this implies we checked a new block from the sequencer, on both sequencer and verifier nodes.
......@@ -649,7 +649,7 @@ func TestSystemMockP2P(t *testing.T) {
require.Contains(t, received, receiptVerif.BlockHash)
}
// TestSystemMockP2P sets up a L1 Geth node, a rollup node, and a L2 geth node and then confirms that
// TestSystemRPCAltSync sets up a L1 Geth node, a rollup node, and a L2 geth node and then confirms that
// the nodes can sync L2 blocks before they are confirmed on L1.
//
// Test steps:
......@@ -660,24 +660,28 @@ func TestSystemMockP2P(t *testing.T) {
// 6. Wait for the RPC sync method to grab the block from the sequencer over RPC and insert it into the verifier's unsafe chain.
// 7. Wait for the verifier to sync the unsafe chain into the safe chain.
// 8. Verify that the TX is included in the verifier's safe chain.
func TestSystemMockAltSync(t *testing.T) {
func TestSystemRPCAltSync(t *testing.T) {
parallel(t)
if !verboseGethNodes {
log.Root().SetHandler(log.DiscardHandler())
}
cfg := DefaultSystemConfig(t)
// slow down L1 blocks so we can see the L2 blocks arrive well before the L1 blocks do.
// Keep the seq window small so the L2 chain is started quick
cfg.DeployConfig.L1BlockTime = 10
// the default is nil, but this may change in the future.
// This test must ensure the blocks are not synced via Gossip, but instead via the alt RPC based sync.
cfg.P2PTopology = nil
// Disable batcher, so there will not be any L1 data to sync from
cfg.DisableBatcher = true
var published, received []common.Hash
var published, received []string
seqTracer, verifTracer := new(FnTracer), new(FnTracer)
// The sequencer still publishes the blocks to the tracer, even if they do not reach the network due to disabled P2P
seqTracer.OnPublishL2PayloadFn = func(ctx context.Context, payload *eth.ExecutionPayload) {
published = append(published, payload.BlockHash)
published = append(published, payload.ID().String())
}
// Blocks are now received via the RPC based alt-sync method
verifTracer.OnUnsafeL2PayloadFn = func(ctx context.Context, from peer.ID, payload *eth.ExecutionPayload) {
received = append(received, payload.BlockHash)
received = append(received, payload.ID().String())
}
cfg.Nodes["sequencer"].Tracer = seqTracer
cfg.Nodes["verifier"].Tracer = verifTracer
......@@ -687,8 +691,8 @@ func TestSystemMockAltSync(t *testing.T) {
role: "sequencer",
action: func(sCfg *SystemConfig, system *System) {
rpc, _ := system.Nodes["sequencer"].Attach() // never errors
cfg.Nodes["verifier"].L2Sync = &rollupNode.L2SyncRPCConfig{
Rpc: client.NewBaseRPCClient(rpc),
cfg.Nodes["verifier"].L2Sync = &rollupNode.PreparedL2SyncEndpoint{
Client: client.NewBaseRPCClient(rpc),
}
},
})
......@@ -726,7 +730,7 @@ func TestSystemMockAltSync(t *testing.T) {
require.Equal(t, receiptSeq, receiptVerif)
// Verify that the tx was received via RPC sync (P2P is disabled)
require.Contains(t, received, receiptVerif.BlockHash)
require.Contains(t, received, eth.BlockID{Hash: receiptVerif.BlockHash, Number: receiptVerif.BlockNumber.Uint64()}.String())
// Verify that everything that was received was published
require.GreaterOrEqual(t, len(published), len(received))
......@@ -1445,7 +1449,7 @@ func TestStopStartBatcher(t *testing.T) {
require.Greater(t, newSeqStatus.SafeL2.Number, seqStatus.SafeL2.Number, "Safe chain did not advance")
// stop the batch submission
err = sys.BatchSubmitter.Stop()
err = sys.BatchSubmitter.Stop(context.Background())
require.Nil(t, err)
// wait for any old safe blocks being submitted / derived
......
......@@ -57,7 +57,7 @@ func NewPollingClient(ctx context.Context, lgr log.Logger, c RPC, opts ...Wrappe
res := &PollingClient{
c: c,
lgr: lgr,
pollRate: 250 * time.Millisecond,
pollRate: 12 * time.Second,
ctx: ctx,
cancel: cancel,
pollReqCh: make(chan struct{}, 1),
......
package client
import (
"context"
"time"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/rpc"
"golang.org/x/time/rate"
)
// RateLimitingClient is a wrapper around a pure RPC that implements a global rate-limit on requests.
type RateLimitingClient struct {
c RPC
rl *rate.Limiter
}
// NewRateLimitingClient implements a global rate-limit for all RPC requests.
// A limit of N will ensure that over a long enough time-frame the given number of tokens per second is targeted.
// Burst limits how far off we can be from the target, by specifying how many requests are allowed at once.
func NewRateLimitingClient(c RPC, limit rate.Limit, burst int) *RateLimitingClient {
return &RateLimitingClient{c: c, rl: rate.NewLimiter(limit, burst)}
}
func (b *RateLimitingClient) Close() {
b.c.Close()
}
func (b *RateLimitingClient) CallContext(ctx context.Context, result any, method string, args ...any) error {
if err := b.rl.Wait(ctx); err != nil {
return err
}
cCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
defer cancel()
return b.c.CallContext(cCtx, result, method, args...)
}
func (b *RateLimitingClient) BatchCallContext(ctx context.Context, batch []rpc.BatchElem) error {
if err := b.rl.WaitN(ctx, len(batch)); err != nil {
return err
}
cCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
defer cancel()
return b.c.BatchCallContext(cCtx, batch)
}
func (b *RateLimitingClient) EthSubscribe(ctx context.Context, channel any, args ...any) (ethereum.Subscription, error) {
if err := b.rl.Wait(ctx); err != nil {
return nil, err
}
return b.c.EthSubscribe(ctx, channel, args...)
}
......@@ -10,6 +10,7 @@ import (
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/log"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/time/rate"
"github.com/ethereum-optimism/optimism/op-node/metrics"
"github.com/ethereum/go-ethereum/rpc"
......@@ -24,27 +25,85 @@ type RPC interface {
EthSubscribe(ctx context.Context, channel any, args ...any) (ethereum.Subscription, error)
}
type rpcConfig struct {
gethRPCOptions []rpc.ClientOption
httpPollInterval time.Duration
backoffAttempts int
limit float64
burst int
}
type RPCOption func(cfg *rpcConfig) error
// WithDialBackoff configures the number of attempts for the initial dial to the RPC,
// attempts are executed with an exponential backoff strategy.
func WithDialBackoff(attempts int) RPCOption {
return func(cfg *rpcConfig) error {
cfg.backoffAttempts = attempts
return nil
}
}
// WithHttpPollInterval configures the RPC to poll at the given rate, in case RPC subscriptions are not available.
func WithHttpPollInterval(duration time.Duration) RPCOption {
return func(cfg *rpcConfig) error {
cfg.httpPollInterval = duration
return nil
}
}
// WithGethRPCOptions passes the list of go-ethereum RPC options to the internal RPC instance.
func WithGethRPCOptions(gethRPCOptions ...rpc.ClientOption) RPCOption {
return func(cfg *rpcConfig) error {
cfg.gethRPCOptions = append(cfg.gethRPCOptions, gethRPCOptions...)
return nil
}
}
// WithRateLimit configures the RPC to target the given rate limit (in requests / second).
// See NewRateLimitingClient for more details.
func WithRateLimit(rateLimit float64, burst int) RPCOption {
return func(cfg *rpcConfig) error {
cfg.limit = rateLimit
cfg.burst = burst
return nil
}
}
// NewRPC returns the correct client.RPC instance for a given RPC url.
func NewRPC(ctx context.Context, lgr log.Logger, addr string, opts ...rpc.ClientOption) (RPC, error) {
underlying, err := DialRPCClientWithBackoff(ctx, lgr, addr, opts...)
func NewRPC(ctx context.Context, lgr log.Logger, addr string, opts ...RPCOption) (RPC, error) {
var cfg rpcConfig
for i, opt := range opts {
if err := opt(&cfg); err != nil {
return nil, fmt.Errorf("rpc option %d failed to apply to RPC config: %w", i, err)
}
}
if cfg.backoffAttempts < 1 { // default to at least 1 attempt, or it always fails to dial.
cfg.backoffAttempts = 1
}
underlying, err := dialRPCClientWithBackoff(ctx, lgr, addr, cfg.backoffAttempts, cfg.gethRPCOptions...)
if err != nil {
return nil, err
}
wrapped := &BaseRPCClient{
c: underlying,
var wrapped RPC = &BaseRPCClient{c: underlying}
if cfg.limit != 0 {
wrapped = NewRateLimitingClient(wrapped, rate.Limit(cfg.limit), cfg.burst)
}
if httpRegex.MatchString(addr) {
return NewPollingClient(ctx, lgr, wrapped), nil
wrapped = NewPollingClient(ctx, lgr, wrapped, WithPollRate(cfg.httpPollInterval))
}
return wrapped, nil
}
// Dials a JSON-RPC endpoint repeatedly, with a backoff, until a client connection is established. Auth is optional.
func DialRPCClientWithBackoff(ctx context.Context, log log.Logger, addr string, opts ...rpc.ClientOption) (*rpc.Client, error) {
func dialRPCClientWithBackoff(ctx context.Context, log log.Logger, addr string, attempts int, opts ...rpc.ClientOption) (*rpc.Client, error) {
bOff := backoff.Exponential()
var ret *rpc.Client
err := backoff.DoCtx(ctx, 10, bOff, func() error {
err := backoff.DoCtx(ctx, attempts, bOff, func() error {
client, err := rpc.DialOptions(ctx, addr, opts...)
if err != nil {
if client == nil {
......
......@@ -52,6 +52,12 @@ jq "select(.valid_data == false)|.tx.hash" $TX_DIR
# Select all channels that are not ready and then get the id and inclusion block & tx hash of the first frame.
jq "select(.is_ready == false)|[.id, .frames[0].inclusion_block, .frames[0].transaction_hash]" $CHANNEL_DIR
# Show all of the frames in a channel without seeing the batches or frame data
jq 'del(.batches)|del(.frames[]|.frame.data)' $CHANNEL_FILE
# Show all batches (without timestamps) in a channel
jq '.batches|del(.[]|.Transactions)' $CHANNEL_FILE
```
......
......@@ -17,3 +17,9 @@ const (
// - L2: Derived chain tip from finalized L1 data
Finalized = "finalized"
)
func (label BlockLabel) Arg() any { return string(label) }
func (BlockLabel) CheckID(id BlockID) error {
return nil
}
......@@ -14,10 +14,10 @@ import (
// Flags
const envVarPrefix = "OP_NODE_"
const envVarPrefix = "OP_NODE"
func prefixEnvVar(name string) string {
return envVarPrefix + name
return envVarPrefix + "_" + name
}
var (
......@@ -75,6 +75,24 @@ var (
return &out
}(),
}
L1RPCRateLimit = cli.Float64Flag{
Name: "l1.rpc-rate-limit",
Usage: "Optional self-imposed global rate-limit on L1 RPC requests, specified in requests / second. Disabled if set to 0.",
EnvVar: prefixEnvVar("L1_RPC_RATE_LIMIT"),
Value: 0,
}
L1RPCMaxBatchSize = cli.IntFlag{
Name: "l1.rpc-max-batch-size",
Usage: "Maximum number of RPC requests to bundle, e.g. during L1 blocks receipt fetching. The L1 RPC rate limit counts this as N items, but allows it to burst at once.",
EnvVar: prefixEnvVar("L1_RPC_MAX_BATCH_SIZE"),
Value: 20,
}
L1HTTPPollInterval = cli.DurationFlag{
Name: "l1.http-poll-interval",
Usage: "Polling interval for latest-block subscription when using an HTTP RPC provider. Ignored for other types of RPC endpoints.",
EnvVar: prefixEnvVar("L1_HTTP_POLL_INTERVAL"),
Value: time.Second * 12,
}
L2EngineJWTSecret = cli.StringFlag{
Name: "l2.jwt-secret",
Usage: "Path to JWT secret key. Keys are 32 bytes, hex encoded in a file. A new key will be generated if left empty.",
......@@ -100,6 +118,13 @@ var (
Usage: "Initialize the sequencer in a stopped state. The sequencer can be started using the admin_startSequencer RPC",
EnvVar: prefixEnvVar("SEQUENCER_STOPPED"),
}
SequencerMaxSafeLagFlag = cli.Uint64Flag{
Name: "sequencer.max-safe-lag",
Usage: "Maximum number of L2 blocks for restricting the distance between L2 safe and unsafe. Disabled if 0.",
EnvVar: prefixEnvVar("SEQUENCER_MAX_SAFE_LAG"),
Required: false,
Value: 0,
}
SequencerL1Confs = cli.Uint64Flag{
Name: "sequencer.l1-confs",
Usage: "Number of L1 blocks to keep distance from the L1 head as a sequencer for picking an L1 origin.",
......@@ -175,6 +200,13 @@ var (
EnvVar: prefixEnvVar("L2_BACKUP_UNSAFE_SYNC_RPC"),
Required: false,
}
BackupL2UnsafeSyncRPCTrustRPC = cli.StringFlag{
Name: "l2.backup-unsafe-sync-rpc.trustrpc",
Usage: "Like l1.trustrpc, configure if response data from the RPC needs to be verified, e.g. blockhash computation." +
"This does not include checks if the blockhash is part of the canonical chain.",
EnvVar: prefixEnvVar("L2_BACKUP_UNSAFE_SYNC_RPC_TRUST_RPC"),
Required: false,
}
)
var requiredFlags = []cli.Flag{
......@@ -189,10 +221,14 @@ var optionalFlags = []cli.Flag{
Network,
L1TrustRPC,
L1RPCProviderKind,
L1RPCRateLimit,
L1RPCMaxBatchSize,
L1HTTPPollInterval,
L2EngineJWTSecret,
VerifierL1Confs,
SequencerEnabledFlag,
SequencerStoppedFlag,
SequencerMaxSafeLagFlag,
SequencerL1Confs,
L1EpochPollIntervalFlag,
RPCEnableAdmin,
......@@ -207,6 +243,7 @@ var optionalFlags = []cli.Flag{
HeartbeatMonikerFlag,
HeartbeatURLFlag,
BackupL2UnsafeSyncRPC,
BackupL2UnsafeSyncRPCTrustRPC,
}
// Flags contains the list of configuration options available to the binary.
......
......@@ -34,6 +34,15 @@ var (
Value: "none",
EnvVar: p2pEnv("PEER_SCORING"),
}
PeerScoreBands = cli.StringFlag{
Name: "p2p.score.bands",
Usage: "Sets the peer score bands used primarily for peer score metrics. " +
"Should be provided in following format: <threshold>:<label>;<threshold>:<label>;..." +
"For example: -40:graylist;-20:restricted;0:nopx;20:friend;",
Required: false,
Value: "-40:graylist;-20:restricted;0:nopx;20:friend;",
EnvVar: p2pEnv("SCORE_BANDS"),
}
// Banning Flag - whether or not we want to act on the scoring
Banning = cli.BoolFlag{
......@@ -276,6 +285,10 @@ var p2pFlags = []cli.Flag{
NoDiscovery,
P2PPrivPath,
P2PPrivRaw,
PeerScoring,
PeerScoreBands,
Banning,
TopicScoring,
ListenIP,
ListenTCPPort,
ListenUDPPort,
......
......@@ -15,7 +15,6 @@ import (
pb "github.com/libp2p/go-libp2p-pubsub/pb"
libp2pmetrics "github.com/libp2p/go-libp2p/core/metrics"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/collectors"
"github.com/prometheus/client_golang/prometheus/promhttp"
......@@ -66,7 +65,7 @@ type Metricer interface {
RecordSequencerSealingTime(duration time.Duration)
Document() []metrics.DocumentedMetric
// P2P Metrics
RecordPeerScoring(peerID peer.ID, score float64)
SetPeerScores(scores map[string]float64)
}
// Metrics tracks all the metrics for the op-node.
......@@ -287,21 +286,24 @@ func NewMetrics(procName string) *Metrics {
Name: "peer_count",
Help: "Count of currently connected p2p peers",
}),
StreamCount: factory.NewGauge(prometheus.GaugeOpts{
Namespace: ns,
Subsystem: "p2p",
Name: "stream_count",
Help: "Count of currently connected p2p streams",
}),
// Notice: We cannot use peer ids as [Labels] in the GaugeVec
// since peer ids would open a service attack vector.
// Each peer id would be a separate metric, flooding prometheus.
//
// [Labels]: https://prometheus.io/docs/practices/naming/#labels
PeerScores: factory.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns,
Subsystem: "p2p",
Name: "peer_scores",
Help: "Peer scoring",
Help: "Count of peer scores grouped by score",
}, []string{
// No label names here since peer ids would open a service attack vector.
// Each peer id would be a separate metric, flooding prometheus.
// See: https://prometheus.io/docs/practices/naming/#labels
"band",
}),
StreamCount: factory.NewGauge(prometheus.GaugeOpts{
Namespace: ns,
Subsystem: "p2p",
Name: "stream_count",
Help: "Count of currently connected p2p streams",
}),
GossipEventsTotal: factory.NewCounterVec(prometheus.CounterOpts{
Namespace: ns,
......@@ -350,6 +352,14 @@ func NewMetrics(procName string) *Metrics {
}
}
// SetPeerScores updates the peer score [prometheus.GaugeVec].
// This takes a map of labels to scores.
func (m *Metrics) SetPeerScores(scores map[string]float64) {
for label, score := range scores {
m.PeerScores.WithLabelValues(label).Set(score)
}
}
// RecordInfo sets a pseudo-metric that contains versioning and
// config info for the opnode.
func (m *Metrics) RecordInfo(version string) {
......@@ -491,10 +501,6 @@ func (m *Metrics) RecordGossipEvent(evType int32) {
m.GossipEventsTotal.WithLabelValues(pb.TraceEvent_Type_name[evType]).Inc()
}
func (m *Metrics) RecordPeerScoring(peerID peer.ID, score float64) {
m.PeerScores.WithLabelValues(peerID.String()).Set(score)
}
func (m *Metrics) IncPeerCount() {
m.PeerCount.Inc()
}
......@@ -627,7 +633,7 @@ func (n *noopMetricer) RecordSequencerReset() {
func (n *noopMetricer) RecordGossipEvent(evType int32) {
}
func (n *noopMetricer) RecordPeerScoring(peerID peer.ID, score float64) {
func (n *noopMetricer) SetPeerScores(scores map[string]float64) {
}
func (n *noopMetricer) IncPeerCount() {
......
......@@ -4,8 +4,10 @@ import (
"context"
"errors"
"fmt"
"time"
"github.com/ethereum-optimism/optimism/op-node/client"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/sources"
"github.com/ethereum/go-ethereum/log"
......@@ -15,12 +17,14 @@ import (
type L2EndpointSetup interface {
// Setup a RPC client to a L2 execution engine to process rollup blocks with.
Setup(ctx context.Context, log log.Logger) (cl client.RPC, err error)
Setup(ctx context.Context, log log.Logger, rollupCfg *rollup.Config) (cl client.RPC, rpcCfg *sources.EngineClientConfig, err error)
Check() error
}
type L2SyncEndpointSetup interface {
Setup(ctx context.Context, log log.Logger) (cl client.RPC, err error)
// Setup a RPC client to another L2 node to sync L2 blocks from.
// It may return a nil client with nil error if RPC based sync is not enabled.
Setup(ctx context.Context, log log.Logger, rollupCfg *rollup.Config) (cl client.RPC, rpcCfg *sources.SyncClientConfig, err error)
Check() error
}
......@@ -28,7 +32,8 @@ type L1EndpointSetup interface {
// Setup a RPC client to a L1 node to pull rollup input-data from.
// The results of the RPC client may be trusted for faster processing, or strictly validated.
// The kind of the RPC may be non-basic, to optimize RPC usage.
Setup(ctx context.Context, log log.Logger) (cl client.RPC, trust bool, kind sources.RPCProviderKind, err error)
Setup(ctx context.Context, log log.Logger, rollupCfg *rollup.Config) (cl client.RPC, rpcCfg *sources.L1ClientConfig, err error)
Check() error
}
type L2EndpointConfig struct {
......@@ -49,17 +54,17 @@ func (cfg *L2EndpointConfig) Check() error {
return nil
}
func (cfg *L2EndpointConfig) Setup(ctx context.Context, log log.Logger) (client.RPC, error) {
func (cfg *L2EndpointConfig) Setup(ctx context.Context, log log.Logger, rollupCfg *rollup.Config) (client.RPC, *sources.EngineClientConfig, error) {
if err := cfg.Check(); err != nil {
return nil, err
return nil, nil, err
}
auth := rpc.WithHTTPAuth(gn.NewJWTAuth(cfg.L2EngineJWTSecret))
l2Node, err := client.NewRPC(ctx, log, cfg.L2EngineAddr, auth)
l2Node, err := client.NewRPC(ctx, log, cfg.L2EngineAddr, client.WithGethRPCOptions(auth))
if err != nil {
return nil, err
return nil, nil, err
}
return l2Node, nil
return l2Node, sources.EngineClientDefaultConfig(rollupCfg), nil
}
// PreparedL2Endpoints enables testing with in-process pre-setup RPC connections to L2 engines
......@@ -76,51 +81,51 @@ func (p *PreparedL2Endpoints) Check() error {
var _ L2EndpointSetup = (*PreparedL2Endpoints)(nil)
func (p *PreparedL2Endpoints) Setup(ctx context.Context, log log.Logger) (client.RPC, error) {
return p.Client, nil
func (p *PreparedL2Endpoints) Setup(ctx context.Context, log log.Logger, rollupCfg *rollup.Config) (client.RPC, *sources.EngineClientConfig, error) {
return p.Client, sources.EngineClientDefaultConfig(rollupCfg), nil
}
// L2SyncEndpointConfig contains configuration for the fallback sync endpoint
type L2SyncEndpointConfig struct {
// Address of the L2 RPC to use for backup sync
// Address of the L2 RPC to use for backup sync, may be empty if RPC alt-sync is disabled.
L2NodeAddr string
TrustRPC bool
}
var _ L2SyncEndpointSetup = (*L2SyncEndpointConfig)(nil)
func (cfg *L2SyncEndpointConfig) Setup(ctx context.Context, log log.Logger) (client.RPC, error) {
// Setup creates an RPC client to sync from.
// It will return nil without error if no sync method is configured.
func (cfg *L2SyncEndpointConfig) Setup(ctx context.Context, log log.Logger, rollupCfg *rollup.Config) (client.RPC, *sources.SyncClientConfig, error) {
if cfg.L2NodeAddr == "" {
return nil, nil, nil
}
l2Node, err := client.NewRPC(ctx, log, cfg.L2NodeAddr)
if err != nil {
return nil, err
return nil, nil, err
}
return l2Node, nil
return l2Node, sources.SyncClientDefaultConfig(rollupCfg, cfg.TrustRPC), nil
}
func (cfg *L2SyncEndpointConfig) Check() error {
if cfg.L2NodeAddr == "" {
return errors.New("empty L2 Node Address")
}
// empty addr is valid, as it is optional.
return nil
}
type L2SyncRPCConfig struct {
// RPC endpoint to use for syncing
Rpc client.RPC
type PreparedL2SyncEndpoint struct {
// RPC endpoint to use for syncing, may be nil if RPC alt-sync is disabled.
Client client.RPC
TrustRPC bool
}
var _ L2SyncEndpointSetup = (*L2SyncRPCConfig)(nil)
var _ L2SyncEndpointSetup = (*PreparedL2SyncEndpoint)(nil)
func (cfg *L2SyncRPCConfig) Setup(ctx context.Context, log log.Logger) (client.RPC, error) {
return cfg.Rpc, nil
func (cfg *PreparedL2SyncEndpoint) Setup(ctx context.Context, log log.Logger, rollupCfg *rollup.Config) (client.RPC, *sources.SyncClientConfig, error) {
return cfg.Client, sources.SyncClientDefaultConfig(rollupCfg, cfg.TrustRPC), nil
}
func (cfg *L2SyncRPCConfig) Check() error {
if cfg.Rpc == nil {
return errors.New("rpc cannot be nil")
}
func (cfg *PreparedL2SyncEndpoint) Check() error {
return nil
}
......@@ -135,16 +140,48 @@ type L1EndpointConfig struct {
// L1RPCKind identifies the RPC provider kind that serves the RPC,
// to inform the optimal usage of the RPC for transaction receipts fetching.
L1RPCKind sources.RPCProviderKind
// RateLimit specifies a self-imposed rate-limit on L1 requests. 0 is no rate-limit.
RateLimit float64
// BatchSize specifies the maximum batch-size, which also applies as L1 rate-limit burst amount (if set).
BatchSize int
// HttpPollInterval specifies the interval between polling for the latest L1 block,
// when the RPC is detected to be an HTTP type.
// It is recommended to use websockets or IPC for efficient following of the changing block.
// Setting this to 0 disables polling.
HttpPollInterval time.Duration
}
var _ L1EndpointSetup = (*L1EndpointConfig)(nil)
func (cfg *L1EndpointConfig) Setup(ctx context.Context, log log.Logger) (cl client.RPC, trust bool, kind sources.RPCProviderKind, err error) {
l1Node, err := client.NewRPC(ctx, log, cfg.L1NodeAddr)
func (cfg *L1EndpointConfig) Check() error {
if cfg.BatchSize < 1 || cfg.BatchSize > 500 {
return fmt.Errorf("batch size is invalid or unreasonable: %d", cfg.BatchSize)
}
if cfg.RateLimit < 0 {
return fmt.Errorf("rate limit cannot be negative")
}
return nil
}
func (cfg *L1EndpointConfig) Setup(ctx context.Context, log log.Logger, rollupCfg *rollup.Config) (client.RPC, *sources.L1ClientConfig, error) {
opts := []client.RPCOption{
client.WithHttpPollInterval(cfg.HttpPollInterval),
client.WithDialBackoff(10),
}
if cfg.RateLimit != 0 {
opts = append(opts, client.WithRateLimit(cfg.RateLimit, cfg.BatchSize))
}
l1Node, err := client.NewRPC(ctx, log, cfg.L1NodeAddr, opts...)
if err != nil {
return nil, false, sources.RPCKindBasic, fmt.Errorf("failed to dial L1 address (%s): %w", cfg.L1NodeAddr, err)
return nil, nil, fmt.Errorf("failed to dial L1 address (%s): %w", cfg.L1NodeAddr, err)
}
return l1Node, cfg.L1TrustRPC, cfg.L1RPCKind, nil
rpcCfg := sources.L1ClientDefaultConfig(rollupCfg, cfg.L1TrustRPC, cfg.L1RPCKind)
rpcCfg.MaxRequestsPerBatch = cfg.BatchSize
return l1Node, rpcCfg, nil
}
// PreparedL1Endpoint enables testing with an in-process pre-setup RPC connection to L1
......@@ -156,6 +193,14 @@ type PreparedL1Endpoint struct {
var _ L1EndpointSetup = (*PreparedL1Endpoint)(nil)
func (p *PreparedL1Endpoint) Setup(ctx context.Context, log log.Logger) (cl client.RPC, trust bool, kind sources.RPCProviderKind, err error) {
return p.Client, p.TrustRPC, p.RPCProviderKind, nil
func (p *PreparedL1Endpoint) Setup(ctx context.Context, log log.Logger, rollupCfg *rollup.Config) (client.RPC, *sources.L1ClientConfig, error) {
return p.Client, sources.L1ClientDefaultConfig(rollupCfg, p.TrustRPC, p.RPCProviderKind), nil
}
func (cfg *PreparedL1Endpoint) Check() error {
if cfg.Client == nil {
return errors.New("rpc client cannot be nil")
}
return nil
}
......@@ -80,6 +80,9 @@ func (cfg *Config) Check() error {
if err := cfg.L2.Check(); err != nil {
return fmt.Errorf("l2 endpoint config error: %w", err)
}
if err := cfg.L2Sync.Check(); err != nil {
return fmt.Errorf("sync config error: %w", err)
}
if err := cfg.Rollup.Check(); err != nil {
return fmt.Errorf("rollup config error: %w", err)
}
......
......@@ -33,6 +33,7 @@ type OpNode struct {
l1Source *sources.L1Client // L1 Client to fetch data from
l2Driver *driver.Driver // L2 Engine to Sync
l2Source *sources.EngineClient // L2 Execution Engine RPC bindings
rpcSync *sources.SyncClient // Alt-sync RPC client, optional (may be nil)
server *rpcServer // RPC server hosting the rollup-node API
p2pNode *p2p.NodeP2P // P2P node functionality
p2pSigner p2p.Signer // p2p gogssip application messages will be signed with this signer
......@@ -86,6 +87,9 @@ func (n *OpNode) init(ctx context.Context, cfg *Config, snapshotLog log.Logger)
if err := n.initL2(ctx, cfg, snapshotLog); err != nil {
return err
}
if err := n.initRPCSync(ctx, cfg); err != nil {
return err
}
if err := n.initP2PSigner(ctx, cfg); err != nil {
return err
}
......@@ -112,14 +116,13 @@ func (n *OpNode) initTracer(ctx context.Context, cfg *Config) error {
}
func (n *OpNode) initL1(ctx context.Context, cfg *Config) error {
l1Node, trustRPC, rpcProvKind, err := cfg.L1.Setup(ctx, n.log)
l1Node, rpcCfg, err := cfg.L1.Setup(ctx, n.log, &cfg.Rollup)
if err != nil {
return fmt.Errorf("failed to get L1 RPC client: %w", err)
}
n.l1Source, err = sources.NewL1Client(
client.NewInstrumentedRPC(l1Node, n.metrics), n.log, n.metrics.L1SourceCache,
sources.L1ClientDefaultConfig(&cfg.Rollup, trustRPC, rpcProvKind))
client.NewInstrumentedRPC(l1Node, n.metrics), n.log, n.metrics.L1SourceCache, rpcCfg)
if err != nil {
return fmt.Errorf("failed to create L1 source: %w", err)
}
......@@ -180,14 +183,13 @@ func (n *OpNode) initRuntimeConfig(ctx context.Context, cfg *Config) error {
}
func (n *OpNode) initL2(ctx context.Context, cfg *Config, snapshotLog log.Logger) error {
rpcClient, err := cfg.L2.Setup(ctx, n.log)
rpcClient, rpcCfg, err := cfg.L2.Setup(ctx, n.log, &cfg.Rollup)
if err != nil {
return fmt.Errorf("failed to setup L2 execution-engine RPC client: %w", err)
}
n.l2Source, err = sources.NewEngineClient(
client.NewInstrumentedRPC(rpcClient, n.metrics), n.log, n.metrics.L2SourceCache,
sources.EngineClientDefaultConfig(&cfg.Rollup),
client.NewInstrumentedRPC(rpcClient, n.metrics), n.log, n.metrics.L2SourceCache, rpcCfg,
)
if err != nil {
return fmt.Errorf("failed to create Engine client: %w", err)
......@@ -197,29 +199,24 @@ func (n *OpNode) initL2(ctx context.Context, cfg *Config, snapshotLog log.Logger
return err
}
var syncClient *sources.SyncClient
// If the L2 sync config is present, use it to create a sync client
if cfg.L2Sync != nil {
if err := cfg.L2Sync.Check(); err != nil {
log.Info("L2 sync config is not present, skipping L2 sync client setup", "err", err)
} else {
rpcSyncClient, err := cfg.L2Sync.Setup(ctx, n.log)
if err != nil {
return fmt.Errorf("failed to setup L2 execution-engine RPC client for backup sync: %w", err)
}
n.l2Driver = driver.NewDriver(&cfg.Driver, &cfg.Rollup, n.l2Source, n.l1Source, n, n, n.log, snapshotLog, n.metrics)
// The sync client's RPC is always trusted
config := sources.SyncClientDefaultConfig(&cfg.Rollup, true)
return nil
}
syncClient, err = sources.NewSyncClient(n.OnUnsafeL2Payload, rpcSyncClient, n.log, n.metrics.L2SourceCache, config)
if err != nil {
return fmt.Errorf("failed to create sync client: %w", err)
}
}
func (n *OpNode) initRPCSync(ctx context.Context, cfg *Config) error {
rpcSyncClient, rpcCfg, err := cfg.L2Sync.Setup(ctx, n.log, &cfg.Rollup)
if err != nil {
return fmt.Errorf("failed to setup L2 execution-engine RPC client for backup sync: %w", err)
}
n.l2Driver = driver.NewDriver(&cfg.Driver, &cfg.Rollup, n.l2Source, n.l1Source, syncClient, n, n.log, snapshotLog, n.metrics)
if rpcSyncClient == nil { // if no RPC client is configured to sync from, then don't add the RPC sync client
return nil
}
syncClient, err := sources.NewSyncClient(n.OnUnsafeL2Payload, rpcSyncClient, n.log, n.metrics.L2SourceCache, rpcCfg)
if err != nil {
return fmt.Errorf("failed to create sync client: %w", err)
}
n.rpcSync = syncClient
return nil
}
......@@ -292,11 +289,12 @@ func (n *OpNode) Start(ctx context.Context) error {
}
// If the backup unsafe sync client is enabled, start its event loop
if n.l2Driver.L2SyncCl != nil {
if err := n.l2Driver.L2SyncCl.Start(); err != nil {
if n.rpcSync != nil {
if err := n.rpcSync.Start(); err != nil {
n.log.Error("Could not start the backup sync client", "err", err)
return err
}
n.log.Info("Started L2-RPC sync service")
}
return nil
......@@ -375,6 +373,14 @@ func (n *OpNode) OnUnsafeL2Payload(ctx context.Context, from peer.ID, payload *e
return nil
}
func (n *OpNode) RequestL2Range(ctx context.Context, start, end uint64) error {
if n.rpcSync != nil {
return n.rpcSync.RequestL2Range(ctx, start, end)
}
n.log.Debug("ignoring request to sync L2 range, no sync method available")
return nil
}
func (n *OpNode) P2P() p2p.Node {
return n.p2pNode
}
......@@ -413,8 +419,8 @@ func (n *OpNode) Close() error {
}
// If the L2 sync client is present & running, close it.
if n.l2Driver.L2SyncCl != nil {
if err := n.l2Driver.L2SyncCl.Close(); err != nil {
if n.rpcSync != nil {
if err := n.rpcSync.Close(); err != nil {
result = multierror.Append(result, fmt.Errorf("failed to close L2 engine backup sync client cleanly: %w", err))
}
}
......
......@@ -115,7 +115,7 @@ func TestOutputAtBlock(t *testing.T) {
require.NoError(t, server.Start())
defer server.Stop()
client, err := rpcclient.DialRPCClientWithBackoff(context.Background(), log, "http://"+server.Addr().String())
client, err := rpcclient.NewRPC(context.Background(), log, "http://"+server.Addr().String(), rpcclient.WithDialBackoff(3))
require.NoError(t, err)
var out *eth.OutputResponse
......@@ -147,7 +147,7 @@ func TestVersion(t *testing.T) {
assert.NoError(t, server.Start())
defer server.Stop()
client, err := rpcclient.DialRPCClientWithBackoff(context.Background(), log, "http://"+server.Addr().String())
client, err := rpcclient.NewRPC(context.Background(), log, "http://"+server.Addr().String(), rpcclient.WithDialBackoff(3))
assert.NoError(t, err)
var out string
......@@ -189,7 +189,7 @@ func TestSyncStatus(t *testing.T) {
assert.NoError(t, server.Start())
defer server.Stop()
client, err := rpcclient.DialRPCClientWithBackoff(context.Background(), log, "http://"+server.Addr().String())
client, err := rpcclient.NewRPC(context.Background(), log, "http://"+server.Addr().String(), rpcclient.WithDialBackoff(3))
assert.NoError(t, err)
var out *eth.SyncStatus
......
package p2p
import (
"testing"
"github.com/stretchr/testify/require"
)
// TestBandScorer_ParseDefault tests the [BandScorer.Parse] function
// on the default band scores cli flag value.
func TestBandScorer_ParseDefault(t *testing.T) {
// Create a new band scorer.
bandScorer, err := NewBandScorer("-40:graylist;-20:restricted;0:nopx;20:friend;")
require.NoError(t, err)
// Validate the [BandScorer] internals.
require.ElementsMatch(t, bandScorer.bands, []scorePair{
{band: "graylist", threshold: -40},
{band: "restricted", threshold: -20},
{band: "nopx", threshold: 0},
{band: "friend", threshold: 20},
})
}
// TestBandScorer_BucketCorrectly tests the [BandScorer.Bucket] function
// on a variety of scores.
func TestBandScorer_BucketCorrectly(t *testing.T) {
// Create a new band scorer.
bandScorer, err := NewBandScorer("-40:graylist;-20:restricted;0:nopx;20:friend;")
require.NoError(t, err)
// Validate the [BandScorer] internals.
require.Equal(t, bandScorer.Bucket(-100), "graylist")
require.Equal(t, bandScorer.Bucket(-40), "graylist")
require.Equal(t, bandScorer.Bucket(-39), "restricted")
require.Equal(t, bandScorer.Bucket(-20), "restricted")
require.Equal(t, bandScorer.Bucket(-19), "nopx")
require.Equal(t, bandScorer.Bucket(0), "nopx")
require.Equal(t, bandScorer.Bucket(1), "friend")
require.Equal(t, bandScorer.Bucket(20), "friend")
require.Equal(t, bandScorer.Bucket(21), "friend")
}
// TestBandScorer_BucketInverted tests the [BandScorer.Bucket] function
// on a variety of scores, in descending order.
func TestBandScorer_BucketInverted(t *testing.T) {
// Create a new band scorer.
bandScorer, err := NewBandScorer("20:friend;0:nopx;-20:restricted;-40:graylist;")
require.NoError(t, err)
// Validate the [BandScorer] internals.
require.Equal(t, bandScorer.Bucket(-100), "graylist")
require.Equal(t, bandScorer.Bucket(-40), "graylist")
require.Equal(t, bandScorer.Bucket(-39), "restricted")
require.Equal(t, bandScorer.Bucket(-20), "restricted")
require.Equal(t, bandScorer.Bucket(-19), "nopx")
require.Equal(t, bandScorer.Bucket(0), "nopx")
require.Equal(t, bandScorer.Bucket(1), "friend")
require.Equal(t, bandScorer.Bucket(20), "friend")
require.Equal(t, bandScorer.Bucket(21), "friend")
}
// TestBandScorer_ParseEmpty tests the [BandScorer.Parse] function
// on an empty string.
func TestBandScorer_ParseEmpty(t *testing.T) {
// Create a band scorer on an empty string.
bandScorer, err := NewBandScorer("")
require.NoError(t, err)
// Validate the [BandScorer] internals.
require.Len(t, bandScorer.bands, 0)
}
// TestBandScorer_ParseWhitespace tests the [BandScorer.Parse] function
// on a variety of whitespaced strings.
func TestBandScorer_ParseWhitespace(t *testing.T) {
// Create a band scorer on an empty string.
bandScorer, err := NewBandScorer(" ; ; ; ")
require.NoError(t, err)
// Validate the [BandScorer] internals.
require.Len(t, bandScorer.bands, 0)
}
......@@ -58,6 +58,10 @@ func NewConfig(ctx *cli.Context, blockTime uint64) (*p2p.Config, error) {
return nil, fmt.Errorf("failed to load p2p peer scoring options: %w", err)
}
if err := loadPeerScoreBands(conf, ctx); err != nil {
return nil, fmt.Errorf("failed to load p2p peer score bands: %w", err)
}
if err := loadBanningOption(conf, ctx); err != nil {
return nil, fmt.Errorf("failed to load banning option: %w", err)
}
......@@ -121,6 +125,17 @@ func loadPeerScoringParams(conf *p2p.Config, ctx *cli.Context, blockTime uint64)
return nil
}
// loadPeerScoreBands loads [p2p.BandScorer] from the CLI context.
func loadPeerScoreBands(conf *p2p.Config, ctx *cli.Context) error {
scoreBands := ctx.GlobalString(flags.PeerScoreBands.Name)
bandScorer, err := p2p.NewBandScorer(scoreBands)
if err != nil {
return err
}
conf.BandScoreThresholds = *bandScorer
return nil
}
// loadBanningOption loads whether or not to ban peers from the CLI context.
func loadBanningOption(conf *p2p.Config, ctx *cli.Context) error {
ban := ctx.GlobalBool(flags.Banning.Name)
......
......@@ -54,6 +54,9 @@ type Config struct {
PeerScoring pubsub.PeerScoreParams
TopicScoring pubsub.TopicScoreParams
// Peer Score Band Thresholds
BandScoreThresholds BandScoreThresholds
// Whether to ban peers based on their [PeerScoring] score.
BanningEnabled bool
......@@ -151,6 +154,10 @@ func (conf *Config) PeerScoringParams() *pubsub.PeerScoreParams {
return &conf.PeerScoring
}
func (conf *Config) PeerBandScorer() *BandScoreThresholds {
return &conf.BandScoreThresholds
}
func (conf *Config) BanPeers() bool {
return conf.BanningEnabled
}
......
......@@ -55,6 +55,7 @@ type GossipSetupConfigurables interface {
TopicScoringParams() *pubsub.TopicScoreParams
BanPeers() bool
ConfigureGossip(params *pubsub.GossipSubParams) []pubsub.Option
PeerBandScorer() *BandScoreThresholds
}
type GossipRuntimeConfig interface {
......@@ -64,7 +65,8 @@ type GossipRuntimeConfig interface {
//go:generate mockery --name GossipMetricer
type GossipMetricer interface {
RecordGossipEvent(evType int32)
RecordPeerScoring(peerID peer.ID, score float64)
// Peer Scoring Metric Funcs
SetPeerScores(map[string]float64)
}
func blocksTopicV1(cfg *rollup.Config) string {
......
// Code generated by mockery v2.14.0. DO NOT EDIT.
// Code generated by mockery v2.22.1. DO NOT EDIT.
package mocks
......@@ -123,13 +123,16 @@ func (_m *ConnectionGater) InterceptUpgraded(_a0 network.Conn) (bool, control.Di
ret := _m.Called(_a0)
var r0 bool
var r1 control.DisconnectReason
if rf, ok := ret.Get(0).(func(network.Conn) (bool, control.DisconnectReason)); ok {
return rf(_a0)
}
if rf, ok := ret.Get(0).(func(network.Conn) bool); ok {
r0 = rf(_a0)
} else {
r0 = ret.Get(0).(bool)
}
var r1 control.DisconnectReason
if rf, ok := ret.Get(1).(func(network.Conn) control.DisconnectReason); ok {
r1 = rf(_a0)
} else {
......@@ -139,6 +142,20 @@ func (_m *ConnectionGater) InterceptUpgraded(_a0 network.Conn) (bool, control.Di
return r0, r1
}
// IsBlocked provides a mock function with given fields: p
func (_m *ConnectionGater) IsBlocked(p peer.ID) bool {
ret := _m.Called(p)
var r0 bool
if rf, ok := ret.Get(0).(func(peer.ID) bool); ok {
r0 = rf(p)
} else {
r0 = ret.Get(0).(bool)
}
return r0
}
// ListBlockedAddrs provides a mock function with given fields:
func (_m *ConnectionGater) ListBlockedAddrs() []net.IP {
ret := _m.Called()
......
// Code generated by mockery v2.14.0. DO NOT EDIT.
// Code generated by mockery v2.22.1. DO NOT EDIT.
package mocks
import (
mock "github.com/stretchr/testify/mock"
peer "github.com/libp2p/go-libp2p/core/peer"
)
import mock "github.com/stretchr/testify/mock"
// GossipMetricer is an autogenerated mock type for the GossipMetricer type
type GossipMetricer struct {
......@@ -18,9 +14,9 @@ func (_m *GossipMetricer) RecordGossipEvent(evType int32) {
_m.Called(evType)
}
// RecordPeerScoring provides a mock function with given fields: peerID, score
func (_m *GossipMetricer) RecordPeerScoring(peerID peer.ID, score float64) {
_m.Called(peerID, score)
// SetPeerScores provides a mock function with given fields: _a0
func (_m *GossipMetricer) SetPeerScores(_a0 map[string]float64) {
_m.Called(_a0)
}
type mockConstructorTestingTNewGossipMetricer interface {
......
// Code generated by mockery v2.14.0. DO NOT EDIT.
// Code generated by mockery v2.22.1. DO NOT EDIT.
package mocks
......@@ -13,6 +13,20 @@ type PeerGater struct {
mock.Mock
}
// IsBlocked provides a mock function with given fields: _a0
func (_m *PeerGater) IsBlocked(_a0 peer.ID) bool {
ret := _m.Called(_a0)
var r0 bool
if rf, ok := ret.Get(0).(func(peer.ID) bool); ok {
r0 = rf(_a0)
} else {
r0 = ret.Get(0).(bool)
}
return r0
}
// Update provides a mock function with given fields: _a0, _a1
func (_m *PeerGater) Update(_a0 peer.ID, _a1 float64) {
_m.Called(_a0, _a1)
......
// Code generated by mockery v2.14.0. DO NOT EDIT.
// Code generated by mockery v2.22.1. DO NOT EDIT.
package mocks
......
......@@ -3,7 +3,6 @@ package p2p
import (
log "github.com/ethereum/go-ethereum/log"
peer "github.com/libp2p/go-libp2p/core/peer"
slices "golang.org/x/exp/slices"
)
// ConnectionFactor is the factor by which we multiply the connection score.
......@@ -15,6 +14,7 @@ const PeerScoreThreshold = -100
// gater is an internal implementation of the [PeerGater] interface.
type gater struct {
connGater ConnectionGater
blockedMap map[peer.ID]bool
log log.Logger
banEnabled bool
}
......@@ -25,29 +25,51 @@ type gater struct {
type PeerGater interface {
// Update handles a peer score update and blocks/unblocks the peer if necessary.
Update(peer.ID, float64)
// IsBlocked returns true if the given [peer.ID] is blocked.
IsBlocked(peer.ID) bool
}
// NewPeerGater returns a new peer gater.
func NewPeerGater(connGater ConnectionGater, log log.Logger, banEnabled bool) PeerGater {
return &gater{
connGater: connGater,
blockedMap: make(map[peer.ID]bool),
log: log,
banEnabled: banEnabled,
}
}
// IsBlocked returns true if the given [peer.ID] is blocked.
func (s *gater) IsBlocked(peerID peer.ID) bool {
return s.blockedMap[peerID]
}
// setBlocked sets the blocked status of the given [peer.ID].
func (s *gater) setBlocked(peerID peer.ID, blocked bool) {
s.blockedMap[peerID] = blocked
}
// Update handles a peer score update and blocks/unblocks the peer if necessary.
func (s *gater) Update(id peer.ID, score float64) {
// Check if the peer score is below the threshold
// If so, we need to block the peer
if score < PeerScoreThreshold && s.banEnabled {
isAlreadyBlocked := s.IsBlocked(id)
if score < PeerScoreThreshold && s.banEnabled && !isAlreadyBlocked {
s.log.Warn("peer blocking enabled, blocking peer", "id", id.String(), "score", score)
err := s.connGater.BlockPeer(id)
s.log.Warn("connection gater failed to block peer", id.String(), "err", err)
if err != nil {
s.log.Warn("connection gater failed to block peer", "id", id.String(), "err", err)
}
// Set the peer as blocked in the blocked map
s.setBlocked(id, true)
}
// Unblock peers whose score has recovered to an acceptable level
if (score > PeerScoreThreshold) && slices.Contains(s.connGater.ListBlockedPeers(), id) {
if (score > PeerScoreThreshold) && isAlreadyBlocked {
err := s.connGater.UnblockPeer(id)
s.log.Warn("connection gater failed to unblock peer", id.String(), "err", err)
if err != nil {
s.log.Warn("connection gater failed to unblock peer", "id", id.String(), "err", err)
}
// Set the peer as unblocked in the blocked map
s.setBlocked(id, false)
}
}
......@@ -37,30 +37,59 @@ func (testSuite *PeerGaterTestSuite) TestPeerScoreConstants() {
}
// TestPeerGaterUpdate tests the peer gater update hook.
func (testSuite *PeerGaterTestSuite) TestPeerGaterUpdate() {
func (testSuite *PeerGaterTestSuite) TestPeerGater_UpdateBansPeers() {
gater := p2p.NewPeerGater(
testSuite.mockGater,
testSuite.logger,
true,
)
// Return an empty list of already blocked peers
testSuite.mockGater.On("ListBlockedPeers").Return([]peer.ID{}).Once()
// Mock a connection gater peer block call
// Since the peer score is below the [PeerScoreThreshold] of -100,
// the [BlockPeer] method should be called
testSuite.mockGater.On("BlockPeer", peer.ID("peer1")).Return(nil)
testSuite.mockGater.On("BlockPeer", peer.ID("peer1")).Return(nil).Once()
// The peer should initially be unblocked
testSuite.False(gater.IsBlocked(peer.ID("peer1")))
// Apply the peer gater update
gater.Update(peer.ID("peer1"), float64(-100))
gater.Update(peer.ID("peer1"), float64(-101))
// The peer should be considered blocked
testSuite.True(gater.IsBlocked(peer.ID("peer1")))
// Now let's unblock the peer
testSuite.mockGater.On("UnblockPeer", peer.ID("peer1")).Return(nil).Once()
gater.Update(peer.ID("peer1"), float64(0))
// The peer should be considered unblocked
testSuite.False(gater.IsBlocked(peer.ID("peer1")))
}
// TestPeerGaterUpdateNoBanning tests the peer gater update hook without banning set
func (testSuite *PeerGaterTestSuite) TestPeerGaterUpdateNoBanning() {
func (testSuite *PeerGaterTestSuite) TestPeerGater_UpdateNoBanning() {
gater := p2p.NewPeerGater(
testSuite.mockGater,
testSuite.logger,
false,
)
// Return an empty list of already blocked peers
testSuite.mockGater.On("ListBlockedPeers").Return([]peer.ID{})
// Notice: [BlockPeer] should not be called since banning is not enabled
// even though the peer score is way below the [PeerScoreThreshold] of -100
gater.Update(peer.ID("peer1"), float64(-100000))
// The peer should be unblocked
testSuite.False(gater.IsBlocked(peer.ID("peer1")))
// Make sure that if we then "unblock" the peer, nothing happens
gater.Update(peer.ID("peer1"), float64(0))
// The peer should still be unblocked
testSuite.False(gater.IsBlocked(peer.ID("peer1")))
}
package p2p
import (
"fmt"
"sort"
"strconv"
"strings"
log "github.com/ethereum/go-ethereum/log"
pubsub "github.com/libp2p/go-libp2p-pubsub"
peer "github.com/libp2p/go-libp2p/core/peer"
)
type scorer struct {
peerStore Peerstore
metricer GossipMetricer
log log.Logger
gater PeerGater
peerStore Peerstore
metricer GossipMetricer
log log.Logger
gater PeerGater
bandScoreThresholds *BandScoreThresholds
}
// scorePair holds a band and its corresponding threshold.
type scorePair struct {
band string
threshold float64
}
// BandScoreThresholds holds the thresholds for classifying peers
// into different score bands.
type BandScoreThresholds struct {
bands []scorePair
}
// NewBandScorer constructs a new [BandScoreThresholds] instance.
func NewBandScorer(str string) (*BandScoreThresholds, error) {
s := &BandScoreThresholds{
bands: make([]scorePair, 0),
}
for _, band := range strings.Split(str, ";") {
// Skip empty band strings.
band := strings.TrimSpace(band)
if band == "" {
continue
}
split := strings.Split(band, ":")
if len(split) != 2 {
return nil, fmt.Errorf("invalid score band: %s", band)
}
threshold, err := strconv.ParseFloat(split[0], 64)
if err != nil {
return nil, err
}
s.bands = append(s.bands, scorePair{
band: split[1],
threshold: threshold,
})
}
// Order the bands by threshold in ascending order.
sort.Slice(s.bands, func(i, j int) bool {
return s.bands[i].threshold < s.bands[j].threshold
})
return s, nil
}
// Bucket returns the appropriate band for a given score.
func (s *BandScoreThresholds) Bucket(score float64) string {
for _, pair := range s.bands {
if score <= pair.threshold {
return pair.band
}
}
// If there is no band threshold higher than the score,
// the peer must be placed in the highest bucket.
if len(s.bands) > 0 {
return s.bands[len(s.bands)-1].band
}
return ""
}
// Peerstore is a subset of the libp2p peerstore.Peerstore interface.
......@@ -34,12 +101,13 @@ type Scorer interface {
}
// NewScorer returns a new peer scorer.
func NewScorer(peerGater PeerGater, peerStore Peerstore, metricer GossipMetricer, log log.Logger) Scorer {
func NewScorer(peerGater PeerGater, peerStore Peerstore, metricer GossipMetricer, bandScoreThresholds *BandScoreThresholds, log log.Logger) Scorer {
return &scorer{
peerStore: peerStore,
metricer: metricer,
log: log,
gater: peerGater,
peerStore: peerStore,
metricer: metricer,
log: log,
gater: peerGater,
bandScoreThresholds: bandScoreThresholds,
}
}
......@@ -48,13 +116,18 @@ func NewScorer(peerGater PeerGater, peerStore Peerstore, metricer GossipMetricer
// The returned [pubsub.ExtendedPeerScoreInspectFn] is called with a mapping of peer IDs to peer score snapshots.
func (s *scorer) SnapshotHook() pubsub.ExtendedPeerScoreInspectFn {
return func(m map[peer.ID]*pubsub.PeerScoreSnapshot) {
scoreMap := make(map[string]float64)
// Zero out all bands.
for _, b := range s.bandScoreThresholds.bands {
scoreMap[b.band] = 0
}
// Now set the new scores.
for id, snap := range m {
// Record peer score in the metricer
s.metricer.RecordPeerScoring(id, snap.Score)
// Update with the peer gater
band := s.bandScoreThresholds.Bucket(snap.Score)
scoreMap[band] += 1
s.gater.Update(id, snap.Score)
}
s.metricer.SetPeerScores(scoreMap)
}
}
......
......@@ -20,15 +20,18 @@ type PeerScorerTestSuite struct {
mockGater *p2pMocks.PeerGater
mockStore *p2pMocks.Peerstore
mockMetricer *p2pMocks.GossipMetricer
bandScorer *p2p.BandScoreThresholds
logger log.Logger
}
// SetupTest sets up the test suite.
func (testSuite *PeerScorerTestSuite) SetupTest() {
testSuite.mockGater = &p2pMocks.PeerGater{}
// testSuite.mockConnGater = &p2pMocks.ConnectionGater{}
testSuite.mockStore = &p2pMocks.Peerstore{}
testSuite.mockMetricer = &p2pMocks.GossipMetricer{}
bandScorer, err := p2p.NewBandScorer("-40:graylist;0:friend;")
testSuite.NoError(err)
testSuite.bandScorer = bandScorer
testSuite.logger = testlog.Logger(testSuite.T(), log.LvlError)
}
......@@ -37,44 +40,49 @@ func TestPeerScorer(t *testing.T) {
suite.Run(t, new(PeerScorerTestSuite))
}
// TestPeerScorerOnConnect ensures we can call the OnConnect method on the peer scorer.
func (testSuite *PeerScorerTestSuite) TestPeerScorerOnConnect() {
// TestScorer_OnConnect ensures we can call the OnConnect method on the peer scorer.
func (testSuite *PeerScorerTestSuite) TestScorer_OnConnect() {
scorer := p2p.NewScorer(
testSuite.mockGater,
testSuite.mockStore,
testSuite.mockMetricer,
testSuite.bandScorer,
testSuite.logger,
)
scorer.OnConnect()
}
// TestPeerScorerOnDisconnect ensures we can call the OnDisconnect method on the peer scorer.
func (testSuite *PeerScorerTestSuite) TestPeerScorerOnDisconnect() {
// TestScorer_OnDisconnect ensures we can call the OnDisconnect method on the peer scorer.
func (testSuite *PeerScorerTestSuite) TestScorer_OnDisconnect() {
scorer := p2p.NewScorer(
testSuite.mockGater,
testSuite.mockStore,
testSuite.mockMetricer,
testSuite.bandScorer,
testSuite.logger,
)
scorer.OnDisconnect()
}
// TestSnapshotHook tests running the snapshot hook on the peer scorer.
func (testSuite *PeerScorerTestSuite) TestSnapshotHook() {
// TestScorer_SnapshotHook tests running the snapshot hook on the peer scorer.
func (testSuite *PeerScorerTestSuite) TestScorer_SnapshotHook() {
scorer := p2p.NewScorer(
testSuite.mockGater,
testSuite.mockStore,
testSuite.mockMetricer,
testSuite.bandScorer,
testSuite.logger,
)
inspectFn := scorer.SnapshotHook()
// Mock the snapshot updates
// This doesn't return anything
testSuite.mockMetricer.On("RecordPeerScoring", peer.ID("peer1"), float64(-100)).Return(nil)
// Mock the peer gater call
testSuite.mockGater.On("Update", peer.ID("peer1"), float64(-100)).Return(nil)
testSuite.mockGater.On("Update", peer.ID("peer1"), float64(-100)).Return(nil).Once()
// The metricer should then be called with the peer score band map
testSuite.mockMetricer.On("SetPeerScores", map[string]float64{
"friend": 0,
"graylist": 1,
}).Return(nil).Once()
// Apply the snapshot
snapshotMap := map[peer.ID]*pubsub.PeerScoreSnapshot{
......@@ -83,26 +91,46 @@ func (testSuite *PeerScorerTestSuite) TestSnapshotHook() {
},
}
inspectFn(snapshotMap)
// Change the peer score now to a different band
testSuite.mockGater.On("Update", peer.ID("peer1"), float64(0)).Return(nil).Once()
// The metricer should then be called with the peer score band map
testSuite.mockMetricer.On("SetPeerScores", map[string]float64{
"friend": 1,
"graylist": 0,
}).Return(nil).Once()
// Apply the snapshot
snapshotMap = map[peer.ID]*pubsub.PeerScoreSnapshot{
peer.ID("peer1"): {
Score: 0,
},
}
inspectFn(snapshotMap)
}
// TestSnapshotHookBlockPeer tests running the snapshot hook on the peer scorer with a peer score below the threshold.
// TestScorer_SnapshotHookBlocksPeer tests running the snapshot hook on the peer scorer with a peer score below the threshold.
// This implies that the peer should be blocked.
func (testSuite *PeerScorerTestSuite) TestSnapshotHookBlockPeer() {
func (testSuite *PeerScorerTestSuite) TestScorer_SnapshotHookBlocksPeer() {
scorer := p2p.NewScorer(
testSuite.mockGater,
testSuite.mockStore,
testSuite.mockMetricer,
testSuite.bandScorer,
testSuite.logger,
)
inspectFn := scorer.SnapshotHook()
// Mock the snapshot updates
// This doesn't return anything
testSuite.mockMetricer.On("RecordPeerScoring", peer.ID("peer1"), float64(-101)).Return(nil)
// Mock the peer gater call
testSuite.mockGater.On("Update", peer.ID("peer1"), float64(-101)).Return(nil)
// The metricer should then be called with the peer score band map
testSuite.mockMetricer.On("SetPeerScores", map[string]float64{
"friend": 0,
"graylist": 1,
}).Return(nil)
// Apply the snapshot
snapshotMap := map[peer.ID]*pubsub.PeerScoreSnapshot{
peer.ID("peer1"): {
......
......@@ -14,7 +14,7 @@ func ConfigurePeerScoring(h host.Host, g ConnectionGater, gossipConf GossipSetup
peerScoreThresholds := NewPeerScoreThresholds()
banEnabled := gossipConf.BanPeers()
peerGater := NewPeerGater(g, log, banEnabled)
scorer := NewScorer(peerGater, h.Peerstore(), m, log)
scorer := NewScorer(peerGater, h.Peerstore(), m, gossipConf.PeerBandScorer(), log)
opts := []pubsub.Option{}
// Check the app specific score since libp2p doesn't export it's [validate] function :/
if peerScoreParams != nil && peerScoreParams.AppSpecificScore != nil {
......
......@@ -11,7 +11,7 @@ import (
p2pMocks "github.com/ethereum-optimism/optimism/op-node/p2p/mocks"
testlog "github.com/ethereum-optimism/optimism/op-node/testlog"
mock "github.com/stretchr/testify/mock"
"github.com/stretchr/testify/mock"
suite "github.com/stretchr/testify/suite"
log "github.com/ethereum/go-ethereum/log"
......@@ -30,6 +30,7 @@ type PeerScoresTestSuite struct {
mockGater *p2pMocks.ConnectionGater
mockStore *p2pMocks.Peerstore
mockMetricer *p2pMocks.GossipMetricer
bandScorer p2p.BandScoreThresholds
logger log.Logger
}
......@@ -38,6 +39,9 @@ func (testSuite *PeerScoresTestSuite) SetupTest() {
testSuite.mockGater = &p2pMocks.ConnectionGater{}
testSuite.mockStore = &p2pMocks.Peerstore{}
testSuite.mockMetricer = &p2pMocks.GossipMetricer{}
bandScorer, err := p2p.NewBandScorer("0:graylist;")
testSuite.NoError(err)
testSuite.bandScorer = *bandScorer
testSuite.logger = testlog.Logger(testSuite.T(), log.LvlError)
}
......@@ -68,6 +72,7 @@ func newGossipSubs(testSuite *PeerScoresTestSuite, ctx context.Context, hosts []
rt := pubsub.DefaultGossipSubRouter(h)
opts := []pubsub.Option{}
opts = append(opts, p2p.ConfigurePeerScoring(h, testSuite.mockGater, &p2p.Config{
BandScoreThresholds: testSuite.bandScorer,
PeerScoring: pubsub.PeerScoreParams{
AppSpecificScore: func(p peer.ID) float64 {
if p == hosts[0].ID() {
......@@ -118,8 +123,7 @@ func (testSuite *PeerScoresTestSuite) TestNegativeScores() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
testSuite.mockMetricer.On("RecordPeerScoring", mock.Anything, float64(0)).Return(nil)
testSuite.mockMetricer.On("RecordPeerScoring", mock.Anything, float64(-1000)).Return(nil)
testSuite.mockMetricer.On("SetPeerScores", mock.Anything).Return(nil)
testSuite.mockGater.On("ListBlockedPeers").Return([]peer.ID{})
......
......@@ -68,6 +68,10 @@ func (p *Prepared) PeerScoringParams() *pubsub.PeerScoreParams {
return nil
}
func (p *Prepared) PeerBandScorer() *BandScoreThresholds {
return nil
}
func (p *Prepared) BanPeers() bool {
return false
}
......
......@@ -107,7 +107,7 @@ type EngineQueue struct {
// The queued-up attributes
safeAttributesParent eth.L2BlockRef
safeAttributes *eth.PayloadAttributes
unsafePayloads PayloadsQueue // queue of unsafe payloads, ordered by ascending block number, may have gaps
unsafePayloads *PayloadsQueue // queue of unsafe payloads, ordered by ascending block number, may have gaps and duplicates
// Tracks which L2 blocks where last derived from which L1 block. At most finalityLookback large.
finalityData []FinalityData
......@@ -127,18 +127,14 @@ var _ EngineControl = (*EngineQueue)(nil)
// NewEngineQueue creates a new EngineQueue, which should be Reset(origin) before use.
func NewEngineQueue(log log.Logger, cfg *rollup.Config, engine Engine, metrics Metrics, prev NextAttributesProvider, l1Fetcher L1Fetcher) *EngineQueue {
return &EngineQueue{
log: log,
cfg: cfg,
engine: engine,
metrics: metrics,
finalityData: make([]FinalityData, 0, finalityLookback),
unsafePayloads: PayloadsQueue{
MaxSize: maxUnsafePayloadsMemory,
SizeFn: payloadMemSize,
blockNos: make(map[uint64]bool),
},
prev: prev,
l1Fetcher: l1Fetcher,
log: log,
cfg: cfg,
engine: engine,
metrics: metrics,
finalityData: make([]FinalityData, 0, finalityLookback),
unsafePayloads: NewPayloadsQueue(maxUnsafePayloadsMemory, payloadMemSize),
prev: prev,
l1Fetcher: l1Fetcher,
}
}
......@@ -682,7 +678,8 @@ func (eq *EngineQueue) Reset(ctx context.Context, _ eth.L1BlockRef, _ eth.System
return io.EOF
}
// GetUnsafeQueueGap retrieves the current [start, end] range of the gap between the tip of the unsafe priority queue and the unsafe head.
// GetUnsafeQueueGap retrieves the current [start, end) range (incl. start, excl. end)
// of the gap between the tip of the unsafe priority queue and the unsafe head.
// If there is no gap, the difference between end and start will be 0.
func (eq *EngineQueue) GetUnsafeQueueGap(expectedNumber uint64) (start uint64, end uint64) {
// The start of the gap is always the unsafe head + 1
......@@ -691,9 +688,11 @@ func (eq *EngineQueue) GetUnsafeQueueGap(expectedNumber uint64) (start uint64, e
// If the priority queue is empty, the end is the first block number at the top of the priority queue
// Otherwise, the end is the expected block number
if first := eq.unsafePayloads.Peek(); first != nil {
// Don't include the payload we already have in the sync range
end = first.ID().Number
} else {
end = expectedNumber
// Include the expected payload in the sync range
end = expectedNumber + 1
}
return start, end
......
......@@ -5,6 +5,8 @@ import (
"errors"
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum-optimism/optimism/op-node/eth"
)
......@@ -48,8 +50,8 @@ func (pq *payloadsByNumber) Pop() any {
}
const (
// ~580 bytes per payload, with some margin for overhead
payloadMemFixedCost uint64 = 600
// ~580 bytes per payload, with some margin for overhead like map data
payloadMemFixedCost uint64 = 800
// 24 bytes per tx overhead (size of slice header in memory)
payloadTxMemOverhead uint64 = 24
)
......@@ -72,15 +74,25 @@ func payloadMemSize(p *eth.ExecutionPayload) uint64 {
// without the need to use heap.Push/heap.Pop as caller.
// PayloadsQueue maintains a MaxSize by counting and tracking sizes of added eth.ExecutionPayload entries.
// When the size grows too large, the first (lowest block-number) payload is removed from the queue.
// PayloadsQueue allows entries with same block number, or even full duplicates.
// PayloadsQueue allows entries with same block number, but does not allow duplicate blocks
type PayloadsQueue struct {
pq payloadsByNumber
currentSize uint64
MaxSize uint64
blockNos map[uint64]bool
blockHashes map[common.Hash]struct{}
SizeFn func(p *eth.ExecutionPayload) uint64
}
func NewPayloadsQueue(maxSize uint64, sizeFn func(p *eth.ExecutionPayload) uint64) *PayloadsQueue {
return &PayloadsQueue{
pq: nil,
currentSize: 0,
MaxSize: maxSize,
blockHashes: make(map[common.Hash]struct{}),
SizeFn: sizeFn,
}
}
func (upq *PayloadsQueue) Len() int {
return len(upq.pq)
}
......@@ -100,8 +112,8 @@ func (upq *PayloadsQueue) Push(p *eth.ExecutionPayload) error {
if p == nil {
return errors.New("cannot add nil payload")
}
if upq.blockNos[p.ID().Number] {
return errors.New("cannot add duplicate payload")
if _, ok := upq.blockHashes[p.BlockHash]; ok {
return fmt.Errorf("cannot add duplicate payload %s", p.ID())
}
size := upq.SizeFn(p)
if size > upq.MaxSize {
......@@ -115,7 +127,7 @@ func (upq *PayloadsQueue) Push(p *eth.ExecutionPayload) error {
for upq.currentSize > upq.MaxSize {
upq.Pop()
}
upq.blockNos[p.ID().Number] = true
upq.blockHashes[p.BlockHash] = struct{}{}
return nil
}
......@@ -137,7 +149,7 @@ func (upq *PayloadsQueue) Pop() *eth.ExecutionPayload {
}
ps := heap.Pop(&upq.pq).(payloadAndSize) // nosemgrep
upq.currentSize -= ps.size
// remove the key from the blockNos map
delete(upq.blockNos, ps.payload.ID().Number)
// remove the key from the block hashes map
delete(upq.blockHashes, ps.payload.BlockHash)
return ps.payload
}
......@@ -4,6 +4,7 @@ import (
"container/heap"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/stretchr/testify/require"
"github.com/ethereum-optimism/optimism/op-node/eth"
......@@ -74,20 +75,17 @@ func TestPayloadMemSize(t *testing.T) {
}
func TestPayloadsQueue(t *testing.T) {
pq := PayloadsQueue{
MaxSize: payloadMemFixedCost * 3,
SizeFn: payloadMemSize,
blockNos: make(map[uint64]bool),
}
pq := NewPayloadsQueue(payloadMemFixedCost*3, payloadMemSize)
require.Equal(t, 0, pq.Len())
require.Equal(t, (*eth.ExecutionPayload)(nil), pq.Peek())
require.Equal(t, (*eth.ExecutionPayload)(nil), pq.Pop())
a := &eth.ExecutionPayload{BlockNumber: 3}
b := &eth.ExecutionPayload{BlockNumber: 4}
c := &eth.ExecutionPayload{BlockNumber: 5}
d := &eth.ExecutionPayload{BlockNumber: 6}
bAlt := &eth.ExecutionPayload{BlockNumber: 4}
a := &eth.ExecutionPayload{BlockNumber: 3, BlockHash: common.Hash{3}}
b := &eth.ExecutionPayload{BlockNumber: 4, BlockHash: common.Hash{4}}
c := &eth.ExecutionPayload{BlockNumber: 5, BlockHash: common.Hash{5}}
d := &eth.ExecutionPayload{BlockNumber: 6, BlockHash: common.Hash{6}}
bAlt := &eth.ExecutionPayload{BlockNumber: 4, BlockHash: common.Hash{0xff}}
bDup := &eth.ExecutionPayload{BlockNumber: 4, BlockHash: common.Hash{4}}
require.NoError(t, pq.Push(b))
require.Equal(t, pq.Len(), 1)
require.Equal(t, pq.Peek(), b)
......@@ -130,7 +128,9 @@ func TestPayloadsQueue(t *testing.T) {
require.Equal(t, pq.Peek(), a)
// No duplicates allowed
require.Error(t, pq.Push(bAlt))
require.Error(t, pq.Push(bDup))
// But reorg data allowed
require.NoError(t, pq.Push(bAlt))
require.NoError(t, pq.Push(d))
require.Equal(t, pq.Len(), 3)
......
package derive
import (
"math/big"
"testing"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/stretchr/testify/require"
)
var (
// ABI encoding helpers
dynBytes, _ = abi.NewType("bytes", "", nil)
address, _ = abi.NewType("address", "", nil)
uint256T, _ = abi.NewType("uint256", "", nil)
addressArgs = abi.Arguments{
{Type: address},
}
bytesArgs = abi.Arguments{
{Type: dynBytes},
}
twoUint256 = abi.Arguments{
{Type: uint256T},
{Type: uint256T},
}
oneUint256 = abi.Arguments{
{Type: uint256T},
}
)
// TestProcessSystemConfigUpdateLogEvent tests the parsing of an event and mutating the
// SystemConfig. The hook will build the ABI encoded data dynamically. All tests create
// a new SystemConfig and apply a log against it and then assert that the mutated system
// config is equal to the defined system config in the test.
func TestProcessSystemConfigUpdateLogEvent(t *testing.T) {
tests := []struct {
name string
log *types.Log
config eth.SystemConfig
hook func(*testing.T, *types.Log) *types.Log
err bool
}{
{
// The log data is ignored by consensus and no modifications to the
// system config occur.
name: "SystemConfigUpdateUnsafeBlockSigner",
log: &types.Log{
Topics: []common.Hash{
ConfigUpdateEventABIHash,
ConfigUpdateEventVersion0,
SystemConfigUpdateUnsafeBlockSigner,
},
},
hook: func(t *testing.T, log *types.Log) *types.Log {
addr := common.Address{}
data, err := addressArgs.Pack(&addr)
require.NoError(t, err)
log.Data = data
return log
},
config: eth.SystemConfig{},
err: false,
},
{
// The batcher address should be updated.
name: "SystemConfigUpdateBatcher",
log: &types.Log{
Topics: []common.Hash{
ConfigUpdateEventABIHash,
ConfigUpdateEventVersion0,
SystemConfigUpdateBatcher,
},
},
hook: func(t *testing.T, log *types.Log) *types.Log {
addr := common.Address{19: 0xaa}
addrData, err := addressArgs.Pack(&addr)
require.NoError(t, err)
data, err := bytesArgs.Pack(addrData)
require.NoError(t, err)
log.Data = data
return log
},
config: eth.SystemConfig{
BatcherAddr: common.Address{19: 0xaa},
},
err: false,
},
{
// The overhead and the scalar should be updated.
name: "SystemConfigUpdateGasConfig",
log: &types.Log{
Topics: []common.Hash{
ConfigUpdateEventABIHash,
ConfigUpdateEventVersion0,
SystemConfigUpdateGasConfig,
},
},
hook: func(t *testing.T, log *types.Log) *types.Log {
overhead := big.NewInt(0xff)
scalar := big.NewInt(0xaa)
numberData, err := twoUint256.Pack(overhead, scalar)
require.NoError(t, err)
data, err := bytesArgs.Pack(numberData)
require.NoError(t, err)
log.Data = data
return log
},
config: eth.SystemConfig{
Overhead: eth.Bytes32{31: 0xff},
Scalar: eth.Bytes32{31: 0xaa},
},
err: false,
},
{
// The gas limit should be updated.
name: "SystemConfigUpdateGasLimit",
log: &types.Log{
Topics: []common.Hash{
ConfigUpdateEventABIHash,
ConfigUpdateEventVersion0,
SystemConfigUpdateGasLimit,
},
},
hook: func(t *testing.T, log *types.Log) *types.Log {
gasLimit := big.NewInt(0xbb)
numberData, err := oneUint256.Pack(gasLimit)
require.NoError(t, err)
data, err := bytesArgs.Pack(numberData)
require.NoError(t, err)
log.Data = data
return log
},
config: eth.SystemConfig{
GasLimit: 0xbb,
},
err: false,
},
{
name: "SystemConfigOneTopic",
log: &types.Log{
Topics: []common.Hash{
ConfigUpdateEventABIHash,
},
},
hook: func(t *testing.T, log *types.Log) *types.Log {
return log
},
config: eth.SystemConfig{},
err: true,
},
}
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
config := eth.SystemConfig{}
err := ProcessSystemConfigUpdateLogEvent(&config, test.hook(t, test.log))
if test.err {
require.Error(t, err)
} else {
require.NoError(t, err)
}
require.Equal(t, config, test.config)
})
}
}
......@@ -16,4 +16,8 @@ type Config struct {
// SequencerStopped is false when the driver should sequence new blocks.
SequencerStopped bool `json:"sequencer_stopped"`
// SequencerMaxSafeLag is the maximum number of L2 blocks for restricting the distance between L2 safe and unsafe.
// Disabled if 0.
SequencerMaxSafeLag uint64 `json:"sequencer_max_safe_lag"`
}
......@@ -10,7 +10,6 @@ import (
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-node/sources"
)
type Metrics interface {
......@@ -82,8 +81,19 @@ type Network interface {
PublishL2Payload(ctx context.Context, payload *eth.ExecutionPayload) error
}
type AltSync interface {
// RequestL2Range informs the sync source that the given range of L2 blocks is missing,
// and should be retrieved from any available alternative syncing source.
// The start of the range is inclusive, the end is exclusive.
// The sync results should be returned back to the driver via the OnUnsafeL2Payload(ctx, payload) method.
// The latest requested range should always take priority over previous requests.
// There may be overlaps in requested ranges.
// An error may be returned if the scheduling fails immediately, e.g. a context timeout.
RequestL2Range(ctx context.Context, start, end uint64) error
}
// NewDriver composes an events handler that tracks L1 state, triggers L2 derivation, and optionally sequences new L2 blocks.
func NewDriver(driverCfg *Config, cfg *rollup.Config, l2 L2Chain, l1 L1Chain, syncClient *sources.SyncClient, network Network, log log.Logger, snapshotLog log.Logger, metrics Metrics) *Driver {
func NewDriver(driverCfg *Config, cfg *rollup.Config, l2 L2Chain, l1 L1Chain, altSync AltSync, network Network, log log.Logger, snapshotLog log.Logger, metrics Metrics) *Driver {
l1State := NewL1State(log, metrics)
sequencerConfDepth := NewConfDepth(driverCfg.SequencerConfDepth, l1State.L1Head, l1)
findL1Origin := NewL1OriginSelector(log, cfg, sequencerConfDepth)
......@@ -115,6 +125,6 @@ func NewDriver(driverCfg *Config, cfg *rollup.Config, l2 L2Chain, l1 L1Chain, sy
l1SafeSig: make(chan eth.L1BlockRef, 10),
l1FinalizedSig: make(chan eth.L1BlockRef, 10),
unsafeL2Payloads: make(chan *eth.ExecutionPayload, 10),
L2SyncCl: syncClient,
altSync: altSync,
}
}
......@@ -16,7 +16,6 @@ import (
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-node/sources"
"github.com/ethereum-optimism/optimism/op-service/backoff"
)
......@@ -64,8 +63,8 @@ type Driver struct {
l1SafeSig chan eth.L1BlockRef
l1FinalizedSig chan eth.L1BlockRef
// Backup unsafe sync client
L2SyncCl *sources.SyncClient
// Interface to signal the L2 block range to sync.
altSync AltSync
// L2 Signals:
......@@ -200,11 +199,12 @@ func (s *Driver) eventLoop() {
sequencerTimer.Reset(delay)
}
// Create a ticker to check if there is a gap in the engine queue every 15 seconds
// If there is, we send requests to the backup RPC to retrieve the missing payloads
// and add them to the unsafe queue.
altSyncTicker := time.NewTicker(15 * time.Second)
// Create a ticker to check if there is a gap in the engine queue. Whenever
// there is, we send requests to sync source to retrieve the missing payloads.
syncCheckInterval := time.Duration(s.config.BlockTime) * time.Second * 2
altSyncTicker := time.NewTicker(syncCheckInterval)
defer altSyncTicker.Stop()
lastUnsafeL2 := s.derivation.UnsafeL2Head()
for {
// If we are sequencing, and the L1 state is ready, update the trigger for the next sequencer action.
......@@ -212,14 +212,35 @@ func (s *Driver) eventLoop() {
// And avoid sequencing if the derivation pipeline indicates the engine is not ready.
if s.driverConfig.SequencerEnabled && !s.driverConfig.SequencerStopped &&
s.l1State.L1Head() != (eth.L1BlockRef{}) && s.derivation.EngineReady() {
// update sequencer time if the head changed
if s.sequencer.BuildingOnto().ID() != s.derivation.UnsafeL2Head().ID() {
if s.driverConfig.SequencerMaxSafeLag > 0 && s.derivation.SafeL2Head().Number+s.driverConfig.SequencerMaxSafeLag <= s.derivation.UnsafeL2Head().Number {
// If the safe head has fallen behind by a significant number of blocks, delay creating new blocks
// until the safe lag is below SequencerMaxSafeLag.
if sequencerCh != nil {
s.log.Warn(
"Delay creating new block since safe lag exceeds limit",
"safe_l2", s.derivation.SafeL2Head(),
"unsafe_l2", s.derivation.UnsafeL2Head(),
)
sequencerCh = nil
}
} else if s.sequencer.BuildingOnto().ID() != s.derivation.UnsafeL2Head().ID() {
// If we are sequencing, and the L1 state is ready, update the trigger for the next sequencer action.
// This may adjust at any time based on fork-choice changes or previous errors.
//
// update sequencer time if the head changed
planSequencerAction()
}
} else {
sequencerCh = nil
}
// If the engine is not ready, or if the L2 head is actively changing, then reset the alt-sync:
// there is no need to request L2 blocks when we are syncing already.
if head := s.derivation.UnsafeL2Head(); head != lastUnsafeL2 || !s.derivation.EngineReady() {
lastUnsafeL2 = head
altSyncTicker.Reset(syncCheckInterval)
}
select {
case <-sequencerCh:
payload, err := s.sequencer.RunNextSequencerAction(ctx)
......@@ -237,10 +258,12 @@ func (s *Driver) eventLoop() {
}
planSequencerAction() // schedule the next sequencer action to keep the sequencing looping
case <-altSyncTicker.C:
// Check if there is a gap in the current unsafe payload queue. If there is, attempt to fetch
// missing payloads from the backup RPC (if it is configured).
if s.L2SyncCl != nil {
s.checkForGapInUnsafeQueue(ctx)
// Check if there is a gap in the current unsafe payload queue.
ctx, cancel := context.WithTimeout(ctx, time.Second*2)
err := s.checkForGapInUnsafeQueue(ctx)
cancel()
if err != nil {
s.log.Warn("failed to check for unsafe L2 blocks to sync", "err", err)
}
case payload := <-s.unsafeL2Payloads:
s.snapshot("New unsafe payload")
......@@ -462,35 +485,29 @@ type hashAndErrorChannel struct {
err chan error
}
// checkForGapInUnsafeQueue checks if there is a gap in the unsafe queue and attempts to retrieve the missing payloads from the backup RPC.
// WARNING: The sync client's attempt to retrieve the missing payloads is not guaranteed to succeed, and it will fail silently (besides
// emitting warning logs) if the requests fail.
func (s *Driver) checkForGapInUnsafeQueue(ctx context.Context) {
// checkForGapInUnsafeQueue checks if there is a gap in the unsafe queue and attempts to retrieve the missing payloads from an alt-sync method.
// WARNING: This is only an outgoing signal, the blocks are not guaranteed to be retrieved.
// Results are received through OnUnsafeL2Payload.
func (s *Driver) checkForGapInUnsafeQueue(ctx context.Context) error {
// subtract genesis time from wall clock to get the time elapsed since genesis, and then divide that
// difference by the block time to get the expected L2 block number at the current time. If the
// unsafe head does not have this block number, then there is a gap in the queue.
wallClock := uint64(time.Now().Unix())
genesisTimestamp := s.config.Genesis.L2Time
if wallClock < genesisTimestamp {
s.log.Debug("nothing to sync, did not reach genesis L2 time yet", "genesis", genesisTimestamp)
return nil
}
wallClockGenesisDiff := wallClock - genesisTimestamp
expectedL2Block := wallClockGenesisDiff / s.config.BlockTime
// Note: round down, we should not request blocks into the future.
blocksSinceGenesis := wallClockGenesisDiff / s.config.BlockTime
expectedL2Block := s.config.Genesis.L2.Number + blocksSinceGenesis
start, end := s.derivation.GetUnsafeQueueGap(expectedL2Block)
size := end - start
// Check if there is a gap between the unsafe head and the expected L2 block number at the current time.
if size > 0 {
s.log.Warn("Gap in payload queue tip and expected unsafe chain detected", "start", start, "end", end, "size", size)
s.log.Info("Attempting to fetch missing payloads from backup RPC", "start", start, "end", end, "size", size)
// Attempt to fetch the missing payloads from the backup unsafe sync RPC concurrently.
// Concurrent requests are safe here due to the engine queue being a priority queue.
for blockNumber := start; blockNumber <= end; blockNumber++ {
select {
case s.L2SyncCl.FetchUnsafeBlock <- blockNumber:
// Do nothing- the block number was successfully sent into the channel
default:
return // If the channel is full, return and wait for the next iteration of the event loop
}
}
if end > start {
s.log.Debug("requesting missing unsafe L2 block range", "start", start, "end", end, "size", end-start)
return s.altSync.RequestL2Range(ctx, start, end)
}
return nil
}
......@@ -109,6 +109,9 @@ func FindL2Heads(ctx context.Context, cfg *rollup.Config, l1 L1Chain, l2 L2Chain
return nil, fmt.Errorf("failed to fetch current L2 forkchoice state: %w", err)
}
lgr.Info("Loaded current L2 heads", "unsafe", result.Unsafe, "safe", result.Safe, "finalized", result.Finalized,
"unsafe_origin", result.Unsafe.L1Origin, "unsafe_origin", result.Safe.L1Origin)
// Remember original unsafe block to determine reorg depth
prevUnsafe := result.Unsafe
......@@ -134,6 +137,7 @@ func FindL2Heads(ctx context.Context, cfg *rollup.Config, l1 L1Chain, l2 L2Chain
// Exit, find-sync start should start over, to move to an available L1 chain with block-by-number / not-found case.
return nil, fmt.Errorf("failed to retrieve L1 block: %w", err)
}
lgr.Info("Walking back L1Block by hash", "curr", l1Block, "next", b, "l2block", n)
l1Block = b
ahead = false
} else if l1Block == (eth.L1BlockRef{}) || n.L1Origin.Hash != l1Block.Hash {
......@@ -145,9 +149,10 @@ func FindL2Heads(ctx context.Context, cfg *rollup.Config, l1 L1Chain, l2 L2Chain
}
l1Block = b
ahead = notFound
lgr.Info("Walking back L1Block by number", "curr", l1Block, "next", b, "l2block", n)
}
lgr.Trace("walking sync start", "number", n.Number)
lgr.Trace("walking sync start", "l2block", n)
// Don't walk past genesis. If we were at the L2 genesis, but could not find its L1 origin,
// the L2 chain is building on the wrong L1 branch.
......@@ -201,6 +206,8 @@ func FindL2Heads(ctx context.Context, cfg *rollup.Config, l1 L1Chain, l2 L2Chain
// Don't traverse further than the finalized head to find a safe head
if n.Number == result.Finalized.Number {
lgr.Info("Hit finalized L2 head, returning immediately", "unsafe", result.Unsafe, "safe", result.Safe,
"finalized", result.Finalized, "unsafe_origin", result.Unsafe.L1Origin, "unsafe_origin", result.Safe.L1Origin)
result.Safe = n
return result, nil
}
......
......@@ -95,9 +95,12 @@ func NewConfig(ctx *cli.Context, log log.Logger) (*node.Config, error) {
func NewL1EndpointConfig(ctx *cli.Context) *node.L1EndpointConfig {
return &node.L1EndpointConfig{
L1NodeAddr: ctx.GlobalString(flags.L1NodeAddr.Name),
L1TrustRPC: ctx.GlobalBool(flags.L1TrustRPC.Name),
L1RPCKind: sources.RPCProviderKind(strings.ToLower(ctx.GlobalString(flags.L1RPCProviderKind.Name))),
L1NodeAddr: ctx.GlobalString(flags.L1NodeAddr.Name),
L1TrustRPC: ctx.GlobalBool(flags.L1TrustRPC.Name),
L1RPCKind: sources.RPCProviderKind(strings.ToLower(ctx.GlobalString(flags.L1RPCProviderKind.Name))),
RateLimit: ctx.GlobalFloat64(flags.L1RPCRateLimit.Name),
BatchSize: ctx.GlobalInt(flags.L1RPCMaxBatchSize.Name),
HttpPollInterval: ctx.Duration(flags.L1HTTPPollInterval.Name),
}
}
......@@ -136,15 +139,17 @@ func NewL2EndpointConfig(ctx *cli.Context, log log.Logger) (*node.L2EndpointConf
func NewL2SyncEndpointConfig(ctx *cli.Context) *node.L2SyncEndpointConfig {
return &node.L2SyncEndpointConfig{
L2NodeAddr: ctx.GlobalString(flags.BackupL2UnsafeSyncRPC.Name),
TrustRPC: ctx.GlobalBool(flags.BackupL2UnsafeSyncRPCTrustRPC.Name),
}
}
func NewDriverConfig(ctx *cli.Context) *driver.Config {
return &driver.Config{
VerifierConfDepth: ctx.GlobalUint64(flags.VerifierL1Confs.Name),
SequencerConfDepth: ctx.GlobalUint64(flags.SequencerL1Confs.Name),
SequencerEnabled: ctx.GlobalBool(flags.SequencerEnabledFlag.Name),
SequencerStopped: ctx.GlobalBool(flags.SequencerStoppedFlag.Name),
VerifierConfDepth: ctx.GlobalUint64(flags.VerifierL1Confs.Name),
SequencerConfDepth: ctx.GlobalUint64(flags.SequencerL1Confs.Name),
SequencerEnabled: ctx.GlobalBool(flags.SequencerEnabledFlag.Name),
SequencerStopped: ctx.GlobalBool(flags.SequencerStoppedFlag.Name),
SequencerMaxSafeLag: ctx.GlobalUint64(flags.SequencerMaxSafeLagFlag.Name),
}
}
......
......@@ -13,6 +13,7 @@ import (
"context"
"fmt"
"math/big"
"time"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
......@@ -56,6 +57,11 @@ type EthClientConfig struct {
// RPCProviderKind is a hint at what type of RPC provider we are dealing with
RPCProviderKind RPCProviderKind
// Method reset duration defines how long we stick to available RPC methods,
// till we re-attempt the user-preferred methods.
// If this is 0 then the client does not fall back to less optimal but available methods.
MethodResetDuration time.Duration
}
func (c *EthClientConfig) Check() error {
......@@ -118,9 +124,25 @@ type EthClient struct {
// This may be modified concurrently, but we don't lock since it's a single
// uint64 that's not critical (fine to miss or mix up a modification)
availableReceiptMethods ReceiptsFetchingMethod
// lastMethodsReset tracks when availableReceiptMethods was last reset.
// When receipt-fetching fails it falls back to available methods,
// but periodically it will try to reset to the preferred optimal methods.
lastMethodsReset time.Time
// methodResetDuration defines how long we take till we reset lastMethodsReset
methodResetDuration time.Duration
}
func (s *EthClient) PickReceiptsMethod(txCount uint64) ReceiptsFetchingMethod {
if now := time.Now(); now.Sub(s.lastMethodsReset) > s.methodResetDuration {
m := AvailableReceiptsFetchingMethods(s.provKind)
if s.availableReceiptMethods != m {
s.log.Warn("resetting back RPC preferences, please review RPC provider kind setting", "kind", s.provKind.String())
}
s.availableReceiptMethods = m
s.lastMethodsReset = now
}
return PickBestReceiptsFetchingMethod(s.provKind, s.availableReceiptMethods, txCount)
}
......@@ -128,7 +150,7 @@ func (s *EthClient) OnReceiptsMethodErr(m ReceiptsFetchingMethod, err error) {
if unusableMethod(err) {
// clear the bit of the method that errored
s.availableReceiptMethods &^= m
s.log.Warn("failed to use selected RPC method for receipt fetching, falling back to alternatives",
s.log.Warn("failed to use selected RPC method for receipt fetching, temporarily falling back to alternatives",
"provider_kind", s.provKind, "failed_method", m, "fallback", s.availableReceiptMethods, "err", err)
} else {
s.log.Debug("failed to use selected RPC method for receipt fetching, but method does appear to be available, so we continue to use it",
......@@ -155,6 +177,8 @@ func NewEthClient(client client.RPC, log log.Logger, metrics caching.Metrics, co
headersCache: caching.NewLRUCache(metrics, "headers", config.HeadersCacheSize),
payloadsCache: caching.NewLRUCache(metrics, "payloads", config.PayloadsCacheSize),
availableReceiptMethods: AvailableReceiptsFetchingMethods(config.RPCProviderKind),
lastMethodsReset: time.Now(),
methodResetDuration: config.MethodResetDuration,
}, nil
}
......@@ -165,9 +189,39 @@ func (s *EthClient) SubscribeNewHead(ctx context.Context, ch chan<- *types.Heade
return s.client.EthSubscribe(ctx, ch, "newHeads")
}
func (s *EthClient) headerCall(ctx context.Context, method string, id any) (*HeaderInfo, error) {
// rpcBlockID is an internal type to enforce header and block call results match the requested identifier
type rpcBlockID interface {
// Arg translates the object into an RPC argument
Arg() any
// CheckID verifies a block/header result matches the requested block identifier
CheckID(id eth.BlockID) error
}
// hashID implements rpcBlockID for safe block-by-hash fetching
type hashID common.Hash
func (h hashID) Arg() any { return common.Hash(h) }
func (h hashID) CheckID(id eth.BlockID) error {
if common.Hash(h) != id.Hash {
return fmt.Errorf("expected block hash %s but got block %s", common.Hash(h), id)
}
return nil
}
// numberID implements rpcBlockID for safe block-by-number fetching
type numberID uint64
func (n numberID) Arg() any { return hexutil.EncodeUint64(uint64(n)) }
func (n numberID) CheckID(id eth.BlockID) error {
if uint64(n) != id.Number {
return fmt.Errorf("expected block number %d but got block %s", uint64(n), id)
}
return nil
}
func (s *EthClient) headerCall(ctx context.Context, method string, id rpcBlockID) (*HeaderInfo, error) {
var header *rpcHeader
err := s.client.CallContext(ctx, &header, method, id, false) // headers are just blocks without txs
err := s.client.CallContext(ctx, &header, method, id.Arg(), false) // headers are just blocks without txs
if err != nil {
return nil, err
}
......@@ -178,13 +232,16 @@ func (s *EthClient) headerCall(ctx context.Context, method string, id any) (*Hea
if err != nil {
return nil, err
}
if err := id.CheckID(eth.ToBlockID(info)); err != nil {
return nil, fmt.Errorf("fetched block header does not match requested ID: %w", err)
}
s.headersCache.Add(info.Hash(), info)
return info, nil
}
func (s *EthClient) blockCall(ctx context.Context, method string, id any) (*HeaderInfo, types.Transactions, error) {
func (s *EthClient) blockCall(ctx context.Context, method string, id rpcBlockID) (*HeaderInfo, types.Transactions, error) {
var block *rpcBlock
err := s.client.CallContext(ctx, &block, method, id, true)
err := s.client.CallContext(ctx, &block, method, id.Arg(), true)
if err != nil {
return nil, nil, err
}
......@@ -195,14 +252,17 @@ func (s *EthClient) blockCall(ctx context.Context, method string, id any) (*Head
if err != nil {
return nil, nil, err
}
if err := id.CheckID(eth.ToBlockID(info)); err != nil {
return nil, nil, fmt.Errorf("fetched block data does not match requested ID: %w", err)
}
s.headersCache.Add(info.Hash(), info)
s.transactionsCache.Add(info.Hash(), txs)
return info, txs, nil
}
func (s *EthClient) payloadCall(ctx context.Context, method string, id any) (*eth.ExecutionPayload, error) {
func (s *EthClient) payloadCall(ctx context.Context, method string, id rpcBlockID) (*eth.ExecutionPayload, error) {
var block *rpcBlock
err := s.client.CallContext(ctx, &block, method, id, true)
err := s.client.CallContext(ctx, &block, method, id.Arg(), true)
if err != nil {
return nil, err
}
......@@ -213,6 +273,9 @@ func (s *EthClient) payloadCall(ctx context.Context, method string, id any) (*et
if err != nil {
return nil, err
}
if err := id.CheckID(payload.ID()); err != nil {
return nil, fmt.Errorf("fetched payload does not match requested ID: %w", err)
}
s.payloadsCache.Add(payload.BlockHash, payload)
return payload, nil
}
......@@ -231,17 +294,17 @@ func (s *EthClient) InfoByHash(ctx context.Context, hash common.Hash) (eth.Block
if header, ok := s.headersCache.Get(hash); ok {
return header.(*HeaderInfo), nil
}
return s.headerCall(ctx, "eth_getBlockByHash", hash)
return s.headerCall(ctx, "eth_getBlockByHash", hashID(hash))
}
func (s *EthClient) InfoByNumber(ctx context.Context, number uint64) (eth.BlockInfo, error) {
// can't hit the cache when querying by number due to reorgs.
return s.headerCall(ctx, "eth_getBlockByNumber", hexutil.EncodeUint64(number))
return s.headerCall(ctx, "eth_getBlockByNumber", numberID(number))
}
func (s *EthClient) InfoByLabel(ctx context.Context, label eth.BlockLabel) (eth.BlockInfo, error) {
// can't hit the cache when querying the head due to reorgs / changes.
return s.headerCall(ctx, "eth_getBlockByNumber", string(label))
return s.headerCall(ctx, "eth_getBlockByNumber", label)
}
func (s *EthClient) InfoAndTxsByHash(ctx context.Context, hash common.Hash) (eth.BlockInfo, types.Transactions, error) {
......@@ -250,32 +313,32 @@ func (s *EthClient) InfoAndTxsByHash(ctx context.Context, hash common.Hash) (eth
return header.(*HeaderInfo), txs.(types.Transactions), nil
}
}
return s.blockCall(ctx, "eth_getBlockByHash", hash)
return s.blockCall(ctx, "eth_getBlockByHash", hashID(hash))
}
func (s *EthClient) InfoAndTxsByNumber(ctx context.Context, number uint64) (eth.BlockInfo, types.Transactions, error) {
// can't hit the cache when querying by number due to reorgs.
return s.blockCall(ctx, "eth_getBlockByNumber", hexutil.EncodeUint64(number))
return s.blockCall(ctx, "eth_getBlockByNumber", numberID(number))
}
func (s *EthClient) InfoAndTxsByLabel(ctx context.Context, label eth.BlockLabel) (eth.BlockInfo, types.Transactions, error) {
// can't hit the cache when querying the head due to reorgs / changes.
return s.blockCall(ctx, "eth_getBlockByNumber", string(label))
return s.blockCall(ctx, "eth_getBlockByNumber", label)
}
func (s *EthClient) PayloadByHash(ctx context.Context, hash common.Hash) (*eth.ExecutionPayload, error) {
if payload, ok := s.payloadsCache.Get(hash); ok {
return payload.(*eth.ExecutionPayload), nil
}
return s.payloadCall(ctx, "eth_getBlockByHash", hash)
return s.payloadCall(ctx, "eth_getBlockByHash", hashID(hash))
}
func (s *EthClient) PayloadByNumber(ctx context.Context, number uint64) (*eth.ExecutionPayload, error) {
return s.payloadCall(ctx, "eth_getBlockByNumber", hexutil.EncodeUint64(number))
return s.payloadCall(ctx, "eth_getBlockByNumber", numberID(number))
}
func (s *EthClient) PayloadByLabel(ctx context.Context, label eth.BlockLabel) (*eth.ExecutionPayload, error) {
return s.payloadCall(ctx, "eth_getBlockByNumber", string(label))
return s.payloadCall(ctx, "eth_getBlockByNumber", label)
}
// FetchReceipts returns a block info and all of the receipts associated with transactions in the block.
......
......@@ -140,3 +140,40 @@ func TestEthClient_InfoByNumber(t *testing.T) {
require.Equal(t, info, expectedInfo)
m.Mock.AssertExpectations(t)
}
func TestEthClient_WrongInfoByNumber(t *testing.T) {
m := new(mockRPC)
_, rhdr := randHeader()
rhdr2 := *rhdr
rhdr2.Number += 1
n := rhdr.Number
ctx := context.Background()
m.On("CallContext", ctx, new(*rpcHeader),
"eth_getBlockByNumber", []any{n.String(), false}).Run(func(args mock.Arguments) {
*args[1].(**rpcHeader) = &rhdr2
}).Return([]error{nil})
s, err := NewL1Client(m, nil, nil, L1ClientDefaultConfig(&rollup.Config{SeqWindowSize: 10}, true, RPCKindBasic))
require.NoError(t, err)
_, err = s.InfoByNumber(ctx, uint64(n))
require.Error(t, err, "cannot accept the wrong block")
m.Mock.AssertExpectations(t)
}
func TestEthClient_WrongInfoByHash(t *testing.T) {
m := new(mockRPC)
_, rhdr := randHeader()
rhdr2 := *rhdr
rhdr2.Root[0] += 1
rhdr2.Hash = rhdr2.computeBlockHash()
k := rhdr.Hash
ctx := context.Background()
m.On("CallContext", ctx, new(*rpcHeader),
"eth_getBlockByHash", []any{k, false}).Run(func(args mock.Arguments) {
*args[1].(**rpcHeader) = &rhdr2
}).Return([]error{nil})
s, err := NewL1Client(m, nil, nil, L1ClientDefaultConfig(&rollup.Config{SeqWindowSize: 10}, true, RPCKindBasic))
require.NoError(t, err)
_, err = s.InfoByHash(ctx, k)
require.Error(t, err, "cannot accept the wrong block")
m.Mock.AssertExpectations(t)
}
......@@ -4,6 +4,7 @@ import (
"context"
"fmt"
"strings"
"time"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
......@@ -40,6 +41,7 @@ func L1ClientDefaultConfig(config *rollup.Config, trustRPC bool, kind RPCProvide
TrustRPC: trustRPC,
MustBePostMerge: false,
RPCProviderKind: kind,
MethodResetDuration: time.Minute,
},
// Not bounded by span, to cover find-sync-start range fully for speedy recovery after errors.
L1BlockRefsCacheSize: fullSpan,
......
......@@ -4,6 +4,7 @@ import (
"context"
"fmt"
"strings"
"time"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
......@@ -50,6 +51,7 @@ func L2ClientDefaultConfig(config *rollup.Config, trustRPC bool) *L2ClientConfig
TrustRPC: trustRPC,
MustBePostMerge: true,
RPCProviderKind: RPCKindBasic,
MethodResetDuration: time.Minute,
},
// Not bounded by span, to cover find-sync-start range fully for speedy recovery after errors.
L2BlockRefsCacheSize: fullSpan,
......
......@@ -6,6 +6,7 @@ import (
"fmt"
"math/rand"
"testing"
"time"
"github.com/ethereum-optimism/optimism/op-node/client"
"github.com/ethereum-optimism/optimism/op-node/eth"
......@@ -85,6 +86,7 @@ func (e *methodNotFoundError) Error() string {
type ReceiptsTestCase struct {
name string
providerKind RPCProviderKind
staticMethod bool
setup func(t *testing.T) (*rpcBlock, []ReceiptsRequest)
}
......@@ -142,6 +144,10 @@ func (tc *ReceiptsTestCase) Run(t *testing.T) {
TrustRPC: false,
MustBePostMerge: false,
RPCProviderKind: tc.providerKind,
MethodResetDuration: time.Minute,
}
if tc.staticMethod { // if static, instantly reset, for fast clock-independent testing
testCfg.MethodResetDuration = 0
}
logger := testlog.Logger(t, log.LvlError)
ethCl, err := NewEthClient(client.NewBaseRPCClient(cl), logger, nil, testCfg)
......@@ -226,6 +232,12 @@ func TestEthClient_FetchReceipts(t *testing.T) {
providerKind: RPCKindAlchemy,
setup: fallbackCase(30, AlchemyGetTransactionReceipts),
},
{
name: "alchemy sticky",
providerKind: RPCKindAlchemy,
staticMethod: true,
setup: fallbackCase(30, AlchemyGetTransactionReceipts, AlchemyGetTransactionReceipts),
},
{
name: "alchemy fallback 1",
providerKind: RPCKindAlchemy,
......
This diff is collapsed.
......@@ -245,3 +245,23 @@ func RandomBlockPrependTxs(rng *rand.Rand, txCount int, ptxs ...*types.Transacti
}
return block, receipts
}
func RandomOutputResponse(rng *rand.Rand) *eth.OutputResponse {
return &eth.OutputResponse{
Version: eth.Bytes32(RandomHash(rng)),
OutputRoot: eth.Bytes32(RandomHash(rng)),
BlockRef: RandomL2BlockRef(rng),
WithdrawalStorageRoot: RandomHash(rng),
StateRoot: RandomHash(rng),
Status: &eth.SyncStatus{
CurrentL1: RandomBlockRef(rng),
CurrentL1Finalized: RandomBlockRef(rng),
HeadL1: RandomBlockRef(rng),
SafeL1: RandomBlockRef(rng),
FinalizedL1: RandomBlockRef(rng),
UnsafeL2: RandomL2BlockRef(rng),
SafeL2: RandomL2BlockRef(rng),
FinalizedL2: RandomL2BlockRef(rng),
},
}
}
......@@ -8,14 +8,13 @@ import (
opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics"
oppprof "github.com/ethereum-optimism/optimism/op-service/pprof"
oprpc "github.com/ethereum-optimism/optimism/op-service/rpc"
opsigner "github.com/ethereum-optimism/optimism/op-signer/client"
"github.com/ethereum-optimism/optimism/op-service/txmgr"
)
const envVarPrefix = "OP_PROPOSER"
var (
/* Required Flags */
// Required Flags
L1EthRpcFlag = cli.StringFlag{
Name: "l1-eth-rpc",
Usage: "HTTP provider URL for L1",
......@@ -41,53 +40,14 @@ var (
Required: true,
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "POLL_INTERVAL"),
}
NumConfirmationsFlag = cli.Uint64Flag{
Name: "num-confirmations",
Usage: "Number of confirmations which we will wait after " +
"appending a new batch",
Required: true,
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "NUM_CONFIRMATIONS"),
}
SafeAbortNonceTooLowCountFlag = cli.Uint64Flag{
Name: "safe-abort-nonce-too-low-count",
Usage: "Number of ErrNonceTooLow observations required to " +
"give up on a tx at a particular nonce without receiving " +
"confirmation",
Required: true,
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "SAFE_ABORT_NONCE_TOO_LOW_COUNT"),
}
ResubmissionTimeoutFlag = cli.DurationFlag{
Name: "resubmission-timeout",
Usage: "Duration we will wait before resubmitting a " +
"transaction to L1",
Required: true,
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "RESUBMISSION_TIMEOUT"),
}
/* Optional flags */
MnemonicFlag = cli.StringFlag{
Name: "mnemonic",
Usage: "The mnemonic used to derive the wallets for either the " +
"sequencer or the l2output",
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "MNEMONIC"),
}
L2OutputHDPathFlag = cli.StringFlag{
Name: "l2-output-hd-path",
Usage: "The HD path used to derive the l2output wallet from the " +
"mnemonic. The mnemonic flag must also be set.",
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "L2_OUTPUT_HD_PATH"),
}
PrivateKeyFlag = cli.StringFlag{
Name: "private-key",
Usage: "The private key to use with the l2output wallet. Must not be used with mnemonic.",
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "PRIVATE_KEY"),
}
// Optional flags
AllowNonFinalizedFlag = cli.BoolFlag{
Name: "allow-non-finalized",
Usage: "Allow the proposer to submit proposals for L2 blocks derived from non-finalized L1 blocks.",
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "ALLOW_NON_FINALIZED"),
}
// Legacy Flags
L2OutputHDPathFlag = txmgr.L2OutputHDPathFlag
)
var requiredFlags = []cli.Flag{
......@@ -95,15 +55,9 @@ var requiredFlags = []cli.Flag{
RollupRpcFlag,
L2OOAddressFlag,
PollIntervalFlag,
NumConfirmationsFlag,
SafeAbortNonceTooLowCountFlag,
ResubmissionTimeoutFlag,
}
var optionalFlags = []cli.Flag{
MnemonicFlag,
L2OutputHDPathFlag,
PrivateKeyFlag,
AllowNonFinalizedFlag,
}
......@@ -113,7 +67,7 @@ func init() {
optionalFlags = append(optionalFlags, oplog.CLIFlags(envVarPrefix)...)
optionalFlags = append(optionalFlags, opmetrics.CLIFlags(envVarPrefix)...)
optionalFlags = append(optionalFlags, oppprof.CLIFlags(envVarPrefix)...)
optionalFlags = append(optionalFlags, opsigner.CLIFlags(envVarPrefix)...)
optionalFlags = append(optionalFlags, txmgr.CLIFlags(envVarPrefix)...)
Flags = append(requiredFlags, optionalFlags...)
}
......
This diff is collapsed.
package metrics
import (
"github.com/ethereum-optimism/optimism/op-node/eth"
opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics"
)
type noopMetrics struct{ opmetrics.NoopRefMetrics }
var NoopMetrics Metricer = new(noopMetrics)
func (*noopMetrics) RecordInfo(version string) {}
func (*noopMetrics) RecordUp() {}
func (*noopMetrics) RecordL2BlocksProposed(l2ref eth.L2BlockRef) {}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -29,6 +29,9 @@ func Do(maxAttempts int, strategy Strategy, op Operation) error {
}
func DoCtx(ctx context.Context, maxAttempts int, strategy Strategy, op Operation) error {
if maxAttempts < 1 {
return fmt.Errorf("need at least 1 attempt to run op, but have %d max attempts", maxAttempts)
}
var attempt int
reattemptCh := make(chan struct{}, 1)
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -61,6 +61,8 @@ services:
--p2p.listen.ip=0.0.0.0
--p2p.listen.tcp=9003
--p2p.listen.udp=9003
--p2p.scoring.peers=light
--p2p.ban.peers=true
--snapshotlog.file=/op_log/snapshot.log
--p2p.priv.path=/config/p2p-node-key.txt
--metrics.enabled
......@@ -121,6 +123,8 @@ services:
OP_BATCHER_L1_ETH_RPC: http://l1:8545
OP_BATCHER_L2_ETH_RPC: http://l2:8545
OP_BATCHER_ROLLUP_RPC: http://op-node:8545
TX_MANAGER_TIMEOUT: 10m
OFFLINE_GAS_ESTIMATION: false
OP_BATCHER_MAX_CHANNEL_DURATION: 1
OP_BATCHER_MAX_L1_TX_SIZE_BYTES: 120000
OP_BATCHER_TARGET_L1_TX_SIZE_BYTES: 100000
......
This diff is collapsed.
......@@ -16,7 +16,7 @@ WORKDIR /opt/foundry
# Only diff from upstream docker image is this clone instead
# of COPY. We select a specific commit to use.
RUN git clone https://github.com/foundry-rs/foundry.git . \
&& git checkout 8f3fca9c608d58981daaffe11e7f8076644cb753
&& git checkout da2392e58bb8a7fefeba46b40c4df1afad8ccd22
RUN source $HOME/.profile && \
cargo build --release && \
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment