Commit 46c639ef authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge branch 'develop' into alt-sync-improvements

parents 163d0720 c12366c4
---
'@eth-optimism/sdk': patch
---
Have SDK automatically create Standard and ETH bridges when L1StandardBridge is provided.
---
'@eth-optimism/batch-submitter-service': patch
---
Allow deposit only batches
---
'@eth-optimism/contracts-bedrock': patch
---
Added a contsructor to the System Dictator
---
'@eth-optimism/batch-submitter-service': patch
---
fix flag name for MaxStateRootElements in batch-submitter
fix log package for proposer
...@@ -518,6 +518,10 @@ jobs: ...@@ -518,6 +518,10 @@ jobs:
patterns: packages patterns: packages
# Note: The below needs to be manually configured whenever we # Note: The below needs to be manually configured whenever we
# add a new package to CI. # add a new package to CI.
- run:
name: Check common-ts
command: npx depcheck
working_directory: packages/common-ts
- run: - run:
name: Check contracts name: Check contracts
command: npx depcheck command: npx depcheck
......
...@@ -66,6 +66,7 @@ You'll need the following: ...@@ -66,6 +66,7 @@ You'll need the following:
* [Yarn](https://classic.yarnpkg.com/en/docs/install) * [Yarn](https://classic.yarnpkg.com/en/docs/install)
* [Docker](https://docs.docker.com/get-docker/) * [Docker](https://docs.docker.com/get-docker/)
* [Docker Compose](https://docs.docker.com/compose/install/) * [Docker Compose](https://docs.docker.com/compose/install/)
* [Go](https://go.dev/dl/)
* [Foundry](https://getfoundry.sh) * [Foundry](https://getfoundry.sh)
### Setup ### Setup
......
...@@ -209,7 +209,7 @@ func NewConfig(ctx *cli.Context) (Config, error) { ...@@ -209,7 +209,7 @@ func NewConfig(ctx *cli.Context) (Config, error) {
MaxL1TxSize: ctx.GlobalUint64(flags.MaxL1TxSizeFlag.Name), MaxL1TxSize: ctx.GlobalUint64(flags.MaxL1TxSizeFlag.Name),
MaxPlaintextBatchSize: ctx.GlobalUint64(flags.MaxPlaintextBatchSizeFlag.Name), MaxPlaintextBatchSize: ctx.GlobalUint64(flags.MaxPlaintextBatchSizeFlag.Name),
MinStateRootElements: ctx.GlobalUint64(flags.MinStateRootElementsFlag.Name), MinStateRootElements: ctx.GlobalUint64(flags.MinStateRootElementsFlag.Name),
MaxStateRootElements: ctx.GlobalUint64(flags.MinStateRootElementsFlag.Name), MaxStateRootElements: ctx.GlobalUint64(flags.MaxStateRootElementsFlag.Name),
MaxBatchSubmissionTime: ctx.GlobalDuration(flags.MaxBatchSubmissionTimeFlag.Name), MaxBatchSubmissionTime: ctx.GlobalDuration(flags.MaxBatchSubmissionTimeFlag.Name),
PollInterval: ctx.GlobalDuration(flags.PollIntervalFlag.Name), PollInterval: ctx.GlobalDuration(flags.PollIntervalFlag.Name),
NumConfirmations: ctx.GlobalUint64(flags.NumConfirmationsFlag.Name), NumConfirmations: ctx.GlobalUint64(flags.NumConfirmationsFlag.Name),
......
...@@ -13,13 +13,13 @@ import ( ...@@ -13,13 +13,13 @@ import (
"github.com/ethereum-optimism/optimism/bss-core/metrics" "github.com/ethereum-optimism/optimism/bss-core/metrics"
"github.com/ethereum-optimism/optimism/bss-core/txmgr" "github.com/ethereum-optimism/optimism/bss-core/txmgr"
l2ethclient "github.com/ethereum-optimism/optimism/l2geth/ethclient" l2ethclient "github.com/ethereum-optimism/optimism/l2geth/ethclient"
"github.com/ethereum-optimism/optimism/l2geth/log"
"github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
) )
// stateRootSize is the size in bytes of a state root. // stateRootSize is the size in bytes of a state root.
......
...@@ -6,6 +6,7 @@ import ( ...@@ -6,6 +6,7 @@ import (
"github.com/ethereum-optimism/optimism/batch-submitter/drivers/sequencer" "github.com/ethereum-optimism/optimism/batch-submitter/drivers/sequencer"
l2common "github.com/ethereum-optimism/optimism/l2geth/common" l2common "github.com/ethereum-optimism/optimism/l2geth/common"
"github.com/ethereum-optimism/optimism/l2geth/core/types"
l2types "github.com/ethereum-optimism/optimism/l2geth/core/types" l2types "github.com/ethereum-optimism/optimism/l2geth/core/types"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
...@@ -47,3 +48,76 @@ func TestBatchElementFromBlock(t *testing.T) { ...@@ -47,3 +48,76 @@ func TestBatchElementFromBlock(t *testing.T) {
require.False(t, element.IsSequencerTx()) require.False(t, element.IsSequencerTx())
require.Nil(t, element.Tx) require.Nil(t, element.Tx)
} }
func TestGenSequencerParams(t *testing.T) {
tx := types.NewTransaction(0, l2common.Address{}, big.NewInt(0), 0, big.NewInt(0), []byte{})
shouldStartAtElement := uint64(1)
blockOffset := uint64(1)
batches := []sequencer.BatchElement{
{Timestamp: 1, BlockNumber: 1},
{Timestamp: 1, BlockNumber: 1, Tx: sequencer.NewCachedTx(tx)},
}
params, err := sequencer.GenSequencerBatchParams(shouldStartAtElement, blockOffset, batches)
require.NoError(t, err)
require.Equal(t, uint64(0), params.ShouldStartAtElement)
require.Equal(t, uint64(len(batches)), params.TotalElementsToAppend)
require.Equal(t, len(batches), len(params.Contexts))
// There is only 1 sequencer tx
require.Equal(t, 1, len(params.Txs))
// There are 2 contexts
// The first context contains the deposit
context1 := params.Contexts[0]
require.Equal(t, uint64(0), context1.NumSequencedTxs)
require.Equal(t, uint64(1), context1.NumSubsequentQueueTxs)
require.Equal(t, uint64(1), context1.Timestamp)
require.Equal(t, uint64(1), context1.BlockNumber)
// The second context contains the sequencer tx
context2 := params.Contexts[1]
require.Equal(t, uint64(1), context2.NumSequencedTxs)
require.Equal(t, uint64(0), context2.NumSubsequentQueueTxs)
require.Equal(t, uint64(1), context2.Timestamp)
require.Equal(t, uint64(1), context2.BlockNumber)
}
func TestGenSequencerParamsOnlyDeposits(t *testing.T) {
shouldStartAtElement := uint64(1)
blockOffset := uint64(1)
batches := []sequencer.BatchElement{
{Timestamp: 1, BlockNumber: 1},
{Timestamp: 1, BlockNumber: 1},
{Timestamp: 2, BlockNumber: 2},
}
params, err := sequencer.GenSequencerBatchParams(shouldStartAtElement, blockOffset, batches)
require.NoError(t, err)
// The batches will pack deposits into the same context when their
// timestamps and blocknumbers are the same
require.Equal(t, uint64(0), params.ShouldStartAtElement)
require.Equal(t, uint64(len(batches)), params.TotalElementsToAppend)
// 2 deposits have the same timestamp + blocknumber, they go in the
// same context. 1 deposit has a different timestamp + blocknumber,
// it goes into a different context. Therefore there are 2 contexts
require.Equal(t, 2, len(params.Contexts))
// No sequencer txs
require.Equal(t, 0, len(params.Txs))
// There are 2 contexts
// The first context contains the deposit
context1 := params.Contexts[0]
require.Equal(t, uint64(0), context1.NumSequencedTxs)
require.Equal(t, uint64(2), context1.NumSubsequentQueueTxs)
require.Equal(t, uint64(1), context1.Timestamp)
require.Equal(t, uint64(1), context1.BlockNumber)
context2 := params.Contexts[1]
require.Equal(t, uint64(0), context2.NumSequencedTxs)
require.Equal(t, uint64(1), context2.NumSubsequentQueueTxs)
require.Equal(t, uint64(2), context2.Timestamp)
require.Equal(t, uint64(2), context2.BlockNumber)
}
...@@ -222,11 +222,6 @@ func (p *AppendSequencerBatchParams) Write( ...@@ -222,11 +222,6 @@ func (p *AppendSequencerBatchParams) Write(
return ErrMalformedBatch return ErrMalformedBatch
} }
// There must be transactions if there are contexts
if len(p.Txs) == 0 && len(p.Contexts) != 0 {
return ErrMalformedBatch
}
// copy the contexts as to not malleate the struct // copy the contexts as to not malleate the struct
// when it is a typed batch // when it is a typed batch
contexts := make([]BatchContext, 0, len(p.Contexts)+1) contexts := make([]BatchContext, 0, len(p.Contexts)+1)
...@@ -361,9 +356,6 @@ func (p *AppendSequencerBatchParams) Read(r io.Reader) error { ...@@ -361,9 +356,6 @@ func (p *AppendSequencerBatchParams) Read(r io.Reader) error {
if len(p.Contexts) == 0 && len(p.Txs) != 0 { if len(p.Contexts) == 0 && len(p.Txs) != 0 {
return ErrMalformedBatch return ErrMalformedBatch
} }
if len(p.Txs) == 0 && len(p.Contexts) != 0 {
return ErrMalformedBatch
}
return closeReader() return closeReader()
} else if err != nil { } else if err != nil {
return err return err
......
...@@ -46,7 +46,7 @@ ...@@ -46,7 +46,7 @@
} }
], ],
"txs": [], "txs": [],
"error": true "error": false
}, },
{ {
"name": "multiple-contexts-no-txs", "name": "multiple-contexts-no-txs",
...@@ -80,7 +80,7 @@ ...@@ -80,7 +80,7 @@
} }
], ],
"txs": [], "txs": [],
"error": true "error": false
}, },
{ {
"name": "complex", "name": "complex",
......
...@@ -166,6 +166,7 @@ module.exports = { ...@@ -166,6 +166,7 @@ module.exports = {
children: [ children: [
"/docs/build/tutorials/add-attr.md", "/docs/build/tutorials/add-attr.md",
"/docs/build/tutorials/new-precomp.md", "/docs/build/tutorials/new-precomp.md",
"/docs/build/tutorials/predeploys.md"
] ]
} // End of tutorials } // End of tutorials
], ],
......
...@@ -32,6 +32,11 @@ ...@@ -32,6 +32,11 @@
<i class="fab fa-discord"></i> Discord community <i class="fab fa-discord"></i> Discord community
</a> </a>
</li> </li>
<li>
<a href="https://wkf.ms/3XTdpLl" target="_blank" rel="noopener noreferrer">
<i class="far fa-comment-dots"></i> Get support for going live
</a>
</li>
</ul> </ul>
</div> </div>
</div> </div>
......
---
title: Modifying Predeployed Contracts
lang: en-US
---
::: warning 🚧 OP Stack Hacks are explicitly things that you can do with the OP Stack that are *not* currently intended for production use
OP Stack Hacks are not for the faint of heart. You will not be able to receive significant developer support for OP Stack Hacks — be prepared to get your hands dirty and to work without support.
:::
OP Stack blockchains have a number of [predeployed contracts](https://github.com/ethereum-optimism/optimism/blob/develop/packages/contracts-bedrock/src/constants.ts) that provide important functionality.
Most of those contracts are proxies that can be upgraded using the `proxyAdminOwner` which was configured when the network was initially deployed.
The predeploys are controlled from a predeploy called [`ProxyAdmin`](https://github.com/ethereum-optimism/optimism/blob/develop/packages/contracts-bedrock/contracts/universal/ProxyAdmin.sol), whose address is `0x4200000000000000000000000000000000000018`.
The function to call is [`upgrade(address,address)`](https://github.com/ethereum-optimism/optimism/blob/develop/packages/contracts-bedrock/contracts/universal/ProxyAdmin.sol#L211-L229).
The first parameter is the proxy to upgrade, and the second is the address of a new implementation.
For example, the legacy `L1BlockNumber` contract is at `0x420...013`.
To disable this function, we'll set the implementation to `0x00...00`.
We do this using the [Foundry](https://book.getfoundry.sh/) command `cast`.
1. We'll need several constants.
- Set these addresses as variables in your terminal.
```sh
L1BLOCKNUM=0x4200000000000000000000000000000000000013
PROXY_ADMIN=0x4200000000000000000000000000000000000018
ZERO_ADDR=0x0000000000000000000000000000000000000000
```
- Set `PRIVKEY` to the private key of your ADMIN account.
- Set `ETH_RPC_URL`. If you're on the computer that runs the blockchain, use this command.
```sh
export ETH_RPC_URL=http://localhost:8545
```
1. Verify `L1BlockNumber` works correctly.
See that when you call the contract you get a block number, and twelve seconds later you get the next one (block time on L1 is twelve seconds).
```sh
cast call $L1BLOCKNUM 'number()' | cast --to-dec
sleep 12 && cast call $L1BLOCKNUM 'number()' | cast --to-dec
```
1. Get the current implementation for the contract.
```sh
L1BLOCKNUM_IMPLEMENTATION=`cast call $L1BLOCKNUM "implementation()" | sed 's/000000000000000000000000//'`
echo $L1BLOCKNUM_IMPLEMENTATION
```
1. Change the implementation to the zero address
```sh
cast send --private-key $PRIVKEY $PROXY_ADMIN "upgrade(address,address)" $L1BLOCKNUM $ZERO_ADDR
```
1. See that the implementation is address zero, and that calling it fails.
```sh
cast call $L1BLOCKNUM 'implementation()'
cast call $L1BLOCKNUM 'number()'
```
1. Fix the predeploy by returning it to the previous implementation, and verify it works.
```sh
cast send --private-key $PRIVKEY $PROXY_ADMIN "upgrade(address,address)" $L1BLOCKNUM $L1BLOCKNUM_IMPLEMENTATION
cast call $L1BLOCKNUM 'number()' | cast --to-dec
```
\ No newline at end of file
module github.com/ethereum-optimism/optimism module github.com/ethereum-optimism/optimism
go 1.18 go 1.19
require ( require (
github.com/btcsuite/btcd v0.23.3 github.com/btcsuite/btcd v0.23.3
...@@ -9,7 +9,7 @@ require ( ...@@ -9,7 +9,7 @@ require (
github.com/docker/docker v20.10.21+incompatible github.com/docker/docker v20.10.21+incompatible
github.com/docker/go-connections v0.4.0 github.com/docker/go-connections v0.4.0
github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3 github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3
github.com/ethereum/go-ethereum v1.11.2 github.com/ethereum/go-ethereum v1.11.4
github.com/fsnotify/fsnotify v1.6.0 github.com/fsnotify/fsnotify v1.6.0
github.com/golang/snappy v0.0.4 github.com/golang/snappy v0.0.4
github.com/google/go-cmp v0.5.9 github.com/google/go-cmp v0.5.9
...@@ -69,11 +69,11 @@ require ( ...@@ -69,11 +69,11 @@ require (
github.com/francoispqt/gojay v1.2.13 // indirect github.com/francoispqt/gojay v1.2.13 // indirect
github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 // indirect github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 // indirect
github.com/getsentry/sentry-go v0.18.0 // indirect github.com/getsentry/sentry-go v0.18.0 // indirect
github.com/go-kit/kit v0.10.0 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-stack/stack v1.8.1 // indirect github.com/go-stack/stack v1.8.1 // indirect
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect
github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect
github.com/gofrs/flock v0.8.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v4 v4.4.2 // indirect github.com/golang-jwt/jwt/v4 v4.4.2 // indirect
github.com/golang/mock v1.6.0 // indirect github.com/golang/mock v1.6.0 // indirect
...@@ -86,7 +86,6 @@ require ( ...@@ -86,7 +86,6 @@ require (
github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-bexpr v0.1.11 // indirect github.com/hashicorp/go-bexpr v0.1.11 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.1 // indirect github.com/hashicorp/golang-lru/v2 v2.0.1 // indirect
github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e // indirect
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
github.com/huin/goupnp v1.1.0 // indirect github.com/huin/goupnp v1.1.0 // indirect
github.com/influxdata/influxdb v1.8.3 // indirect github.com/influxdata/influxdb v1.8.3 // indirect
...@@ -147,7 +146,6 @@ require ( ...@@ -147,7 +146,6 @@ require (
github.com/prometheus/client_model v0.3.0 // indirect github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/common v0.39.0 // indirect github.com/prometheus/common v0.39.0 // indirect
github.com/prometheus/procfs v0.9.0 // indirect github.com/prometheus/procfs v0.9.0 // indirect
github.com/prometheus/tsdb v0.10.0 // indirect
github.com/quic-go/qpack v0.4.0 // indirect github.com/quic-go/qpack v0.4.0 // indirect
github.com/quic-go/qtls-go1-18 v0.2.0 // indirect github.com/quic-go/qtls-go1-18 v0.2.0 // indirect
github.com/quic-go/qtls-go1-19 v0.2.0 // indirect github.com/quic-go/qtls-go1-19 v0.2.0 // indirect
...@@ -191,6 +189,6 @@ require ( ...@@ -191,6 +189,6 @@ require (
nhooyr.io/websocket v1.8.7 // indirect nhooyr.io/websocket v1.8.7 // indirect
) )
replace github.com/ethereum/go-ethereum v1.11.2 => github.com/ethereum-optimism/op-geth v1.11.2-de8c5df46.0.20230308025559-13ee9ab9153b replace github.com/ethereum/go-ethereum v1.11.4 => github.com/ethereum-optimism/op-geth v1.11.2-de8c5df46.0.20230321002540-11f0554a4313
//replace github.com/ethereum/go-ethereum v1.11.2 => ../go-ethereum //replace github.com/ethereum/go-ethereum v1.11.4 => ../go-ethereum
This diff is collapsed.
FROM --platform=$BUILDPLATFORM golang:1.18.0-alpine3.15 as builder FROM --platform=$BUILDPLATFORM golang:1.19.0-alpine3.15 as builder
ARG VERSION=v0.0.0 ARG VERSION=v0.0.0
......
...@@ -20,6 +20,7 @@ lint: ...@@ -20,6 +20,7 @@ lint:
golangci-lint run -E goimports,sqlclosecheck,bodyclose,asciicheck,misspell,errorlint -e "errors.As" -e "errors.Is" golangci-lint run -E goimports,sqlclosecheck,bodyclose,asciicheck,misspell,errorlint -e "errors.As" -e "errors.Is"
fuzz: fuzz:
go test -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzChannelConfig_CheckTimeout ./batcher
go test -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzDurationZero ./batcher go test -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzDurationZero ./batcher
go test -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzDurationTimeoutMaxChannelDuration ./batcher go test -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzDurationTimeoutMaxChannelDuration ./batcher
go test -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzDurationTimeoutZeroMaxChannelDuration ./batcher go test -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzDurationTimeoutZeroMaxChannelDuration ./batcher
......
...@@ -12,8 +12,6 @@ import ( ...@@ -12,8 +12,6 @@ import (
) )
var ( var (
ErrZeroMaxFrameSize = errors.New("max frame size cannot be zero")
ErrSmallMaxFrameSize = errors.New("max frame size cannot be less than 23")
ErrInvalidChannelTimeout = errors.New("channel timeout is less than the safety margin") ErrInvalidChannelTimeout = errors.New("channel timeout is less than the safety margin")
ErrInputTargetReached = errors.New("target amount of input data reached") ErrInputTargetReached = errors.New("target amount of input data reached")
ErrMaxFrameIndex = errors.New("max frame index reached (uint16)") ErrMaxFrameIndex = errors.New("max frame index reached (uint16)")
...@@ -83,15 +81,15 @@ func (cc *ChannelConfig) Check() error { ...@@ -83,15 +81,15 @@ func (cc *ChannelConfig) Check() error {
// will infinitely loop when trying to create frames in the // will infinitely loop when trying to create frames in the
// [channelBuilder.OutputFrames] function. // [channelBuilder.OutputFrames] function.
if cc.MaxFrameSize == 0 { if cc.MaxFrameSize == 0 {
return ErrZeroMaxFrameSize return errors.New("max frame size cannot be zero")
} }
// If the [MaxFrameSize] is set to < 23, the channel out // If the [MaxFrameSize] is less than [FrameV0OverHeadSize], the channel
// will underflow the maxSize variable in the [derive.ChannelOut]. // out will underflow the maxSize variable in the [derive.ChannelOut].
// Since it is of type uint64, it will wrap around to a very large // Since it is of type uint64, it will wrap around to a very large
// number, making the frame size extremely large. // number, making the frame size extremely large.
if cc.MaxFrameSize < 23 { if cc.MaxFrameSize < derive.FrameV0OverHeadSize {
return ErrSmallMaxFrameSize return fmt.Errorf("max frame size %d is less than the minimum 23", cc.MaxFrameSize)
} }
return nil return nil
......
This diff is collapsed.
...@@ -32,8 +32,7 @@ func TestPendingChannelTimeout(t *testing.T) { ...@@ -32,8 +32,7 @@ func TestPendingChannelTimeout(t *testing.T) {
require.False(t, timeout) require.False(t, timeout)
// Set the pending channel // Set the pending channel
err := m.ensurePendingChannel(eth.BlockID{}) require.NoError(t, m.ensurePendingChannel(eth.BlockID{}))
require.NoError(t, err)
// There are no confirmed transactions so // There are no confirmed transactions so
// the pending channel cannot be timed out // the pending channel cannot be timed out
...@@ -85,14 +84,10 @@ func TestChannelManagerReturnsErrReorg(t *testing.T) { ...@@ -85,14 +84,10 @@ func TestChannelManagerReturnsErrReorg(t *testing.T) {
ParentHash: common.Hash{0xff}, ParentHash: common.Hash{0xff},
}, nil, nil, nil, nil) }, nil, nil, nil, nil)
err := m.AddL2Block(a) require.NoError(t, m.AddL2Block(a))
require.NoError(t, err) require.NoError(t, m.AddL2Block(b))
err = m.AddL2Block(b) require.NoError(t, m.AddL2Block(c))
require.NoError(t, err) require.ErrorIs(t, m.AddL2Block(x), ErrReorg)
err = m.AddL2Block(c)
require.NoError(t, err)
err = m.AddL2Block(x)
require.ErrorIs(t, err, ErrReorg)
require.Equal(t, []*types.Block{a, b, c}, m.blocks) require.Equal(t, []*types.Block{a, b, c}, m.blocks)
} }
...@@ -111,16 +106,14 @@ func TestChannelManagerReturnsErrReorgWhenDrained(t *testing.T) { ...@@ -111,16 +106,14 @@ func TestChannelManagerReturnsErrReorgWhenDrained(t *testing.T) {
a := newMiniL2Block(0) a := newMiniL2Block(0)
x := newMiniL2BlockWithNumberParent(0, big.NewInt(1), common.Hash{0xff}) x := newMiniL2BlockWithNumberParent(0, big.NewInt(1), common.Hash{0xff})
err := m.AddL2Block(a) require.NoError(t, m.AddL2Block(a))
require.NoError(t, err)
_, err = m.TxData(eth.BlockID{}) _, err := m.TxData(eth.BlockID{})
require.NoError(t, err) require.NoError(t, err)
_, err = m.TxData(eth.BlockID{}) _, err = m.TxData(eth.BlockID{})
require.ErrorIs(t, err, io.EOF) require.ErrorIs(t, err, io.EOF)
err = m.AddL2Block(x) require.ErrorIs(t, m.AddL2Block(x), ErrReorg)
require.ErrorIs(t, err, ErrReorg)
} }
// TestChannelManagerNextTxData checks the nextTxData function. // TestChannelManagerNextTxData checks the nextTxData function.
...@@ -136,8 +129,7 @@ func TestChannelManagerNextTxData(t *testing.T) { ...@@ -136,8 +129,7 @@ func TestChannelManagerNextTxData(t *testing.T) {
// Set the pending channel // Set the pending channel
// The nextTxData function should still return EOF // The nextTxData function should still return EOF
// since the pending channel has no frames // since the pending channel has no frames
err = m.ensurePendingChannel(eth.BlockID{}) require.NoError(t, m.ensurePendingChannel(eth.BlockID{}))
require.NoError(t, err)
returnedTxData, err = m.nextTxData() returnedTxData, err = m.nextTxData()
require.ErrorIs(t, err, io.EOF) require.ErrorIs(t, err, io.EOF)
require.Equal(t, txData{}, returnedTxData) require.Equal(t, txData{}, returnedTxData)
...@@ -164,8 +156,10 @@ func TestChannelManagerNextTxData(t *testing.T) { ...@@ -164,8 +156,10 @@ func TestChannelManagerNextTxData(t *testing.T) {
require.Equal(t, expectedTxData, m.pendingTransactions[expectedChannelID]) require.Equal(t, expectedTxData, m.pendingTransactions[expectedChannelID])
} }
// TestClearChannelManager tests clearing the channel manager. // TestChannelManager_Clear tests clearing the channel manager.
func TestClearChannelManager(t *testing.T) { func TestChannelManager_Clear(t *testing.T) {
require := require.New(t)
// Create a channel manager // Create a channel manager
log := testlog.Logger(t, log.LvlCrit) log := testlog.Logger(t, log.LvlCrit)
rng := rand.New(rand.NewSource(time.Now().UnixNano())) rng := rand.New(rand.NewSource(time.Now().UnixNano()))
...@@ -176,15 +170,17 @@ func TestClearChannelManager(t *testing.T) { ...@@ -176,15 +170,17 @@ func TestClearChannelManager(t *testing.T) {
ChannelTimeout: 10, ChannelTimeout: 10,
// Have to set the max frame size here otherwise the channel builder would not // Have to set the max frame size here otherwise the channel builder would not
// be able to output any frames // be able to output any frames
MaxFrameSize: 1, MaxFrameSize: 24,
TargetFrameSize: 24,
ApproxComprRatio: 1.0,
}) })
// Channel Manager state should be empty by default // Channel Manager state should be empty by default
require.Empty(t, m.blocks) require.Empty(m.blocks)
require.Equal(t, common.Hash{}, m.tip) require.Equal(common.Hash{}, m.tip)
require.Nil(t, m.pendingChannel) require.Nil(m.pendingChannel)
require.Empty(t, m.pendingTransactions) require.Empty(m.pendingTransactions)
require.Empty(t, m.confirmedTransactions) require.Empty(m.confirmedTransactions)
// Add a block to the channel manager // Add a block to the channel manager
a, _ := derivetest.RandomL2Block(rng, 4) a, _ := derivetest.RandomL2Block(rng, 4)
...@@ -193,28 +189,25 @@ func TestClearChannelManager(t *testing.T) { ...@@ -193,28 +189,25 @@ func TestClearChannelManager(t *testing.T) {
Hash: a.Hash(), Hash: a.Hash(),
Number: a.NumberU64(), Number: a.NumberU64(),
} }
err := m.AddL2Block(a) require.NoError(m.AddL2Block(a))
require.NoError(t, err)
// Make sure there is a channel builder // Make sure there is a channel builder
err = m.ensurePendingChannel(l1BlockID) require.NoError(m.ensurePendingChannel(l1BlockID))
require.NoError(t, err) require.NotNil(m.pendingChannel)
require.NotNil(t, m.pendingChannel) require.Len(m.confirmedTransactions, 0)
require.Equal(t, 0, len(m.confirmedTransactions))
// Process the blocks // Process the blocks
// We should have a pending channel with 1 frame // We should have a pending channel with 1 frame
// and no more blocks since processBlocks consumes // and no more blocks since processBlocks consumes
// the list // the list
err = m.processBlocks() require.NoError(m.processBlocks())
require.NoError(t, err) require.NoError(m.pendingChannel.co.Flush())
err = m.pendingChannel.OutputFrames() require.NoError(m.pendingChannel.OutputFrames())
require.NoError(t, err) _, err := m.nextTxData()
_, err = m.nextTxData() require.NoError(err)
require.NoError(t, err) require.Len(m.blocks, 0)
require.Equal(t, 0, len(m.blocks)) require.Equal(newL1Tip, m.tip)
require.Equal(t, newL1Tip, m.tip) require.Len(m.pendingTransactions, 1)
require.Equal(t, 1, len(m.pendingTransactions))
// Add a new block so we can test clearing // Add a new block so we can test clearing
// the channel manager with a full state // the channel manager with a full state
...@@ -222,20 +215,19 @@ func TestClearChannelManager(t *testing.T) { ...@@ -222,20 +215,19 @@ func TestClearChannelManager(t *testing.T) {
Number: big.NewInt(1), Number: big.NewInt(1),
ParentHash: a.Hash(), ParentHash: a.Hash(),
}, nil, nil, nil, nil) }, nil, nil, nil, nil)
err = m.AddL2Block(b) require.NoError(m.AddL2Block(b))
require.NoError(t, err) require.Len(m.blocks, 1)
require.Equal(t, 1, len(m.blocks)) require.Equal(b.Hash(), m.tip)
require.Equal(t, b.Hash(), m.tip)
// Clear the channel manager // Clear the channel manager
m.Clear() m.Clear()
// Check that the entire channel manager state cleared // Check that the entire channel manager state cleared
require.Empty(t, m.blocks) require.Empty(m.blocks)
require.Equal(t, common.Hash{}, m.tip) require.Equal(common.Hash{}, m.tip)
require.Nil(t, m.pendingChannel) require.Nil(m.pendingChannel)
require.Empty(t, m.pendingTransactions) require.Empty(m.pendingTransactions)
require.Empty(t, m.confirmedTransactions) require.Empty(m.confirmedTransactions)
} }
// TestChannelManagerTxConfirmed checks the [ChannelManager.TxConfirmed] function. // TestChannelManagerTxConfirmed checks the [ChannelManager.TxConfirmed] function.
...@@ -251,8 +243,7 @@ func TestChannelManagerTxConfirmed(t *testing.T) { ...@@ -251,8 +243,7 @@ func TestChannelManagerTxConfirmed(t *testing.T) {
// Let's add a valid pending transaction to the channel manager // Let's add a valid pending transaction to the channel manager
// So we can demonstrate that TxConfirmed's correctness // So we can demonstrate that TxConfirmed's correctness
err := m.ensurePendingChannel(eth.BlockID{}) require.NoError(t, m.ensurePendingChannel(eth.BlockID{}))
require.NoError(t, err)
channelID := m.pendingChannel.ID() channelID := m.pendingChannel.ID()
frame := frameData{ frame := frameData{
data: []byte{}, data: []byte{},
...@@ -270,7 +261,7 @@ func TestChannelManagerTxConfirmed(t *testing.T) { ...@@ -270,7 +261,7 @@ func TestChannelManagerTxConfirmed(t *testing.T) {
require.Equal(t, expectedTxData, returnedTxData) require.Equal(t, expectedTxData, returnedTxData)
require.Equal(t, 0, m.pendingChannel.NumFrames()) require.Equal(t, 0, m.pendingChannel.NumFrames())
require.Equal(t, expectedTxData, m.pendingTransactions[expectedChannelID]) require.Equal(t, expectedTxData, m.pendingTransactions[expectedChannelID])
require.Equal(t, 1, len(m.pendingTransactions)) require.Len(t, m.pendingTransactions, 1)
// An unknown pending transaction should not be marked as confirmed // An unknown pending transaction should not be marked as confirmed
// and should not be removed from the pending transactions map // and should not be removed from the pending transactions map
...@@ -281,14 +272,14 @@ func TestChannelManagerTxConfirmed(t *testing.T) { ...@@ -281,14 +272,14 @@ func TestChannelManagerTxConfirmed(t *testing.T) {
blockID := eth.BlockID{Number: 0, Hash: common.Hash{0x69}} blockID := eth.BlockID{Number: 0, Hash: common.Hash{0x69}}
m.TxConfirmed(unknownTxID, blockID) m.TxConfirmed(unknownTxID, blockID)
require.Empty(t, m.confirmedTransactions) require.Empty(t, m.confirmedTransactions)
require.Equal(t, 1, len(m.pendingTransactions)) require.Len(t, m.pendingTransactions, 1)
// Now let's mark the pending transaction as confirmed // Now let's mark the pending transaction as confirmed
// and check that it is removed from the pending transactions map // and check that it is removed from the pending transactions map
// and added to the confirmed transactions map // and added to the confirmed transactions map
m.TxConfirmed(expectedChannelID, blockID) m.TxConfirmed(expectedChannelID, blockID)
require.Empty(t, m.pendingTransactions) require.Empty(t, m.pendingTransactions)
require.Equal(t, 1, len(m.confirmedTransactions)) require.Len(t, m.confirmedTransactions, 1)
require.Equal(t, blockID, m.confirmedTransactions[expectedChannelID]) require.Equal(t, blockID, m.confirmedTransactions[expectedChannelID])
} }
...@@ -300,8 +291,7 @@ func TestChannelManagerTxFailed(t *testing.T) { ...@@ -300,8 +291,7 @@ func TestChannelManagerTxFailed(t *testing.T) {
// Let's add a valid pending transaction to the channel // Let's add a valid pending transaction to the channel
// manager so we can demonstrate correctness // manager so we can demonstrate correctness
err := m.ensurePendingChannel(eth.BlockID{}) require.NoError(t, m.ensurePendingChannel(eth.BlockID{}))
require.NoError(t, err)
channelID := m.pendingChannel.ID() channelID := m.pendingChannel.ID()
frame := frameData{ frame := frameData{
data: []byte{}, data: []byte{},
...@@ -319,7 +309,7 @@ func TestChannelManagerTxFailed(t *testing.T) { ...@@ -319,7 +309,7 @@ func TestChannelManagerTxFailed(t *testing.T) {
require.Equal(t, expectedTxData, returnedTxData) require.Equal(t, expectedTxData, returnedTxData)
require.Equal(t, 0, m.pendingChannel.NumFrames()) require.Equal(t, 0, m.pendingChannel.NumFrames())
require.Equal(t, expectedTxData, m.pendingTransactions[expectedChannelID]) require.Equal(t, expectedTxData, m.pendingTransactions[expectedChannelID])
require.Equal(t, 1, len(m.pendingTransactions)) require.Len(t, m.pendingTransactions, 1)
// Trying to mark an unknown pending transaction as failed // Trying to mark an unknown pending transaction as failed
// shouldn't modify state // shouldn't modify state
...@@ -348,8 +338,7 @@ func TestChannelManager_TxResend(t *testing.T) { ...@@ -348,8 +338,7 @@ func TestChannelManager_TxResend(t *testing.T) {
a, _ := derivetest.RandomL2Block(rng, 4) a, _ := derivetest.RandomL2Block(rng, 4)
err := m.AddL2Block(a) require.NoError(m.AddL2Block(a))
require.NoError(err)
txdata0, err := m.TxData(eth.BlockID{}) txdata0, err := m.TxData(eth.BlockID{})
require.NoError(err) require.NoError(err)
......
...@@ -185,7 +185,7 @@ func (m *Metrics) RecordLatestL1Block(l1ref eth.L1BlockRef) { ...@@ -185,7 +185,7 @@ func (m *Metrics) RecordLatestL1Block(l1ref eth.L1BlockRef) {
m.RecordL1Ref("latest", l1ref) m.RecordL1Ref("latest", l1ref)
} }
// RecordL2BlockLoaded should be called when a new L2 block was loaded into the // RecordL2BlocksLoaded should be called when a new L2 block was loaded into the
// channel manager (but not processed yet). // channel manager (but not processed yet).
func (m *Metrics) RecordL2BlocksLoaded(l2ref eth.L2BlockRef) { func (m *Metrics) RecordL2BlocksLoaded(l2ref eth.L2BlockRef) {
m.RecordL2Ref(StageLoaded, l2ref) m.RecordL2Ref(StageLoaded, l2ref)
......
This diff is collapsed.
FROM golang:1.18.0-alpine3.15 as builder FROM golang:1.19.0-alpine3.15 as builder
RUN apk add --no-cache make gcc musl-dev linux-headers git jq bash RUN apk add --no-cache make gcc musl-dev linux-headers git jq bash
......
This diff is collapsed.
package ether package ether
import ( import (
"fmt"
"math/big" "math/big"
"math/rand" "math/rand"
"testing" "testing"
"github.com/ethereum-optimism/optimism/op-chain-ops/util"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys" "github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum-optimism/optimism/op-chain-ops/crossdomain" "github.com/ethereum-optimism/optimism/op-chain-ops/crossdomain"
...@@ -190,7 +191,7 @@ func TestMigrateBalances(t *testing.T) { ...@@ -190,7 +191,7 @@ func TestMigrateBalances(t *testing.T) {
} }
} }
func makeLegacyETH(t *testing.T, totalSupply *big.Int, balances map[common.Address]*big.Int, allowances map[common.Address]common.Address) (*state.StateDB, DBFactory) { func makeLegacyETH(t *testing.T, totalSupply *big.Int, balances map[common.Address]*big.Int, allowances map[common.Address]common.Address) (*state.StateDB, util.DBFactory) {
memDB := rawdb.NewMemoryDatabase() memDB := rawdb.NewMemoryDatabase()
db, err := state.New(common.Hash{}, state.NewDatabaseWithConfig(memDB, &trie.Config{ db, err := state.New(common.Hash{}, state.NewDatabaseWithConfig(memDB, &trie.Config{
Preimages: true, Preimages: true,
...@@ -228,35 +229,12 @@ func makeLegacyETH(t *testing.T, totalSupply *big.Int, balances map[common.Addre ...@@ -228,35 +229,12 @@ func makeLegacyETH(t *testing.T, totalSupply *big.Int, balances map[common.Addre
} }
} }
// TestMigrateBalancesRandom tests that the pre-check balances function works // TestMigrateBalancesRandomOK tests that the pre-check balances function works
// with random addresses. This test makes sure that the partition logic doesn't // with random addresses. This test makes sure that the partition logic doesn't
// miss anything. // miss anything, and helps detect concurrency errors.
func TestMigrateBalancesRandom(t *testing.T) { func TestMigrateBalancesRandomOK(t *testing.T) {
for i := 0; i < 100; i++ { for i := 0; i < 100; i++ {
addresses := make([]common.Address, 0) addresses, stateBalances, allowances, stateAllowances, totalSupply := setupRandTest(t)
stateBalances := make(map[common.Address]*big.Int)
allowances := make([]*crossdomain.Allowance, 0)
stateAllowances := make(map[common.Address]common.Address)
totalSupply := big.NewInt(0)
for j := 0; j < rand.Intn(10000); j++ {
addr := randAddr(t)
addresses = append(addresses, addr)
stateBalances[addr] = big.NewInt(int64(rand.Intn(1_000_000)))
totalSupply = new(big.Int).Add(totalSupply, stateBalances[addr])
}
for j := 0; j < rand.Intn(1000); j++ {
addr := randAddr(t)
to := randAddr(t)
allowances = append(allowances, &crossdomain.Allowance{
From: addr,
To: to,
})
stateAllowances[addr] = to
}
db, factory := makeLegacyETH(t, totalSupply, stateBalances, stateAllowances) db, factory := makeLegacyETH(t, totalSupply, stateBalances, stateAllowances)
err := doMigration(db, factory, addresses, allowances, big.NewInt(0), false) err := doMigration(db, factory, addresses, allowances, big.NewInt(0), false)
...@@ -269,83 +247,41 @@ func TestMigrateBalancesRandom(t *testing.T) { ...@@ -269,83 +247,41 @@ func TestMigrateBalancesRandom(t *testing.T) {
} }
} }
func TestPartitionKeyspace(t *testing.T) { // TestMigrateBalancesRandomMissing tests that the pre-check balances function works
tests := []struct { // with random addresses when some of them are missing. This helps make sure that the
i int // partition logic doesn't miss anything, and helps detect concurrency errors.
count int func TestMigrateBalancesRandomMissing(t *testing.T) {
expected [2]common.Hash for i := 0; i < 100; i++ {
}{ addresses, stateBalances, allowances, stateAllowances, totalSupply := setupRandTest(t)
{
i: 0, if len(addresses) == 0 {
count: 1, continue
expected: [2]common.Hash{ }
common.HexToHash("0x00"),
common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), // Remove a random address from the list of witnesses
}, idx := rand.Intn(len(addresses))
}, addresses = append(addresses[:idx], addresses[idx+1:]...)
{
i: 0, db, factory := makeLegacyETH(t, totalSupply, stateBalances, stateAllowances)
count: 2, err := doMigration(db, factory, addresses, allowances, big.NewInt(0), false)
expected: [2]common.Hash{ require.ErrorContains(t, err, "unknown storage slot")
common.HexToHash("0x00"),
common.HexToHash("0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
},
},
{
i: 1,
count: 2,
expected: [2]common.Hash{
common.HexToHash("0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
},
},
{
i: 0,
count: 3,
expected: [2]common.Hash{
common.HexToHash("0x00"),
common.HexToHash("0x5555555555555555555555555555555555555555555555555555555555555555"),
},
},
{
i: 1,
count: 3,
expected: [2]common.Hash{
common.HexToHash("0x5555555555555555555555555555555555555555555555555555555555555555"),
common.HexToHash("0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
},
},
{
i: 2,
count: 3,
expected: [2]common.Hash{
common.HexToHash("0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
},
},
}
for _, tt := range tests {
t.Run(fmt.Sprintf("i %d, count %d", tt.i, tt.count), func(t *testing.T) {
start, end := PartitionKeyspace(tt.i, tt.count)
require.Equal(t, tt.expected[0], start)
require.Equal(t, tt.expected[1], end)
})
} }
t.Run("panics on invalid i or count", func(t *testing.T) { for i := 0; i < 100; i++ {
require.Panics(t, func() { addresses, stateBalances, allowances, stateAllowances, totalSupply := setupRandTest(t)
PartitionKeyspace(1, 1)
}) if len(allowances) == 0 {
require.Panics(t, func() { continue
PartitionKeyspace(-1, 1) }
})
require.Panics(t, func() { // Remove a random allowance from the list of witnesses
PartitionKeyspace(0, -1) idx := rand.Intn(len(allowances))
}) allowances = append(allowances[:idx], allowances[idx+1:]...)
require.Panics(t, func() {
PartitionKeyspace(-1, -1) db, factory := makeLegacyETH(t, totalSupply, stateBalances, stateAllowances)
}) err := doMigration(db, factory, addresses, allowances, big.NewInt(0), false)
}) require.ErrorContains(t, err, "unknown storage slot")
}
} }
func randAddr(t *testing.T) common.Address { func randAddr(t *testing.T) common.Address {
...@@ -354,3 +290,32 @@ func randAddr(t *testing.T) common.Address { ...@@ -354,3 +290,32 @@ func randAddr(t *testing.T) common.Address {
require.NoError(t, err) require.NoError(t, err)
return addr return addr
} }
func setupRandTest(t *testing.T) ([]common.Address, map[common.Address]*big.Int, []*crossdomain.Allowance, map[common.Address]common.Address, *big.Int) {
addresses := make([]common.Address, 0)
stateBalances := make(map[common.Address]*big.Int)
allowances := make([]*crossdomain.Allowance, 0)
stateAllowances := make(map[common.Address]common.Address)
totalSupply := big.NewInt(0)
for j := 0; j < rand.Intn(10000); j++ {
addr := randAddr(t)
addresses = append(addresses, addr)
stateBalances[addr] = big.NewInt(int64(rand.Intn(1_000_000)))
totalSupply = new(big.Int).Add(totalSupply, stateBalances[addr])
}
for j := 0; j < rand.Intn(1000); j++ {
addr := randAddr(t)
to := randAddr(t)
allowances = append(allowances, &crossdomain.Allowance{
From: addr,
To: to,
})
stateAllowances[addr] = to
}
return addresses, stateBalances, allowances, stateAllowances, totalSupply
}
...@@ -6,6 +6,7 @@ import ( ...@@ -6,6 +6,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"math/big" "math/big"
"math/rand"
"github.com/ethereum-optimism/optimism/op-chain-ops/util" "github.com/ethereum-optimism/optimism/op-chain-ops/util"
...@@ -26,11 +27,21 @@ import ( ...@@ -26,11 +27,21 @@ import (
"github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-node/rollup/derive"
) )
// MaxSlotChecks is the maximum number of storage slots to check const (
// when validating the untouched predeploys. This limit is in place // MaxPredeploySlotChecks is the maximum number of storage slots to check
// to bound execution time of the migration. We can parallelize this // when validating the untouched predeploys. This limit is in place
// in the future. // to bound execution time of the migration. We can parallelize this
const MaxSlotChecks = 1000 // in the future.
MaxPredeploySlotChecks = 1000
// MaxOVMETHSlotChecks is the maximum number of OVM ETH storage slots to check
// when validating the OVM ETH migration.
MaxOVMETHSlotChecks = 5000
// OVMETHSampleLikelihood is the probability that a storage slot will be checked
// when validating the OVM ETH migration.
OVMETHSampleLikelihood = 0.1
)
type StorageCheckMap = map[common.Hash]common.Hash type StorageCheckMap = map[common.Hash]common.Hash
...@@ -148,7 +159,7 @@ func PostCheckMigratedDB( ...@@ -148,7 +159,7 @@ func PostCheckMigratedDB(
} }
log.Info("checked L1Block") log.Info("checked L1Block")
if err := PostCheckLegacyETH(db); err != nil { if err := PostCheckLegacyETH(prevDB, db, migrationData); err != nil {
return err return err
} }
log.Info("checked legacy eth") log.Info("checked legacy eth")
...@@ -210,7 +221,7 @@ func PostCheckUntouchables(udb state.Database, currDB *state.StateDB, prevRoot c ...@@ -210,7 +221,7 @@ func PostCheckUntouchables(udb state.Database, currDB *state.StateDB, prevRoot c
if err := prevDB.ForEachStorage(addr, func(key, value common.Hash) bool { if err := prevDB.ForEachStorage(addr, func(key, value common.Hash) bool {
count++ count++
expSlots[key] = value expSlots[key] = value
return count < MaxSlotChecks return count < MaxPredeploySlotChecks
}); err != nil { }); err != nil {
return fmt.Errorf("error iterating over storage: %w", err) return fmt.Errorf("error iterating over storage: %w", err)
} }
...@@ -365,14 +376,94 @@ func PostCheckPredeployStorage(db vm.StateDB, finalSystemOwner common.Address, p ...@@ -365,14 +376,94 @@ func PostCheckPredeployStorage(db vm.StateDB, finalSystemOwner common.Address, p
} }
// PostCheckLegacyETH checks that the legacy eth migration was successful. // PostCheckLegacyETH checks that the legacy eth migration was successful.
// It currently only checks that the total supply was set to 0. // It checks that the total supply was set to 0, and randomly samples storage
func PostCheckLegacyETH(db vm.StateDB) error { // slots pre- and post-migration to ensure that balances were correctly migrated.
func PostCheckLegacyETH(prevDB, migratedDB *state.StateDB, migrationData crossdomain.MigrationData) error {
allowanceSlots := make(map[common.Hash]bool)
addresses := make(map[common.Hash]common.Address)
log.Info("recomputing witness data")
for _, allowance := range migrationData.OvmAllowances {
key := ether.CalcAllowanceStorageKey(allowance.From, allowance.To)
allowanceSlots[key] = true
}
for _, addr := range migrationData.Addresses() {
addresses[ether.CalcOVMETHStorageKey(addr)] = addr
}
log.Info("checking legacy eth fixed storage slots")
for slot, expValue := range LegacyETHCheckSlots { for slot, expValue := range LegacyETHCheckSlots {
actValue := db.GetState(predeploys.LegacyERC20ETHAddr, slot) actValue := migratedDB.GetState(predeploys.LegacyERC20ETHAddr, slot)
if actValue != expValue { if actValue != expValue {
return fmt.Errorf("expected slot %s on %s to be %s, but got %s", slot, predeploys.LegacyERC20ETHAddr, expValue, actValue) return fmt.Errorf("expected slot %s on %s to be %s, but got %s", slot, predeploys.LegacyERC20ETHAddr, expValue, actValue)
} }
} }
var count int
threshold := 100 - int(100*OVMETHSampleLikelihood)
progress := util.ProgressLogger(100, "checking legacy eth balance slots")
var innerErr error
err := prevDB.ForEachStorage(predeploys.LegacyERC20ETHAddr, func(key, value common.Hash) bool {
val := rand.Intn(100)
// Randomly sample storage slots.
if val > threshold {
return true
}
// Ignore fixed slots.
if _, ok := LegacyETHCheckSlots[key]; ok {
return true
}
// Ignore allowances.
if allowanceSlots[key] {
return true
}
// Grab the address, and bail if we can't find it.
addr, ok := addresses[key]
if !ok {
innerErr = fmt.Errorf("unknown OVM_ETH storage slot %s", key)
return false
}
// Pull out the pre-migration OVM ETH balance, and the state balance.
ovmETHBalance := value.Big()
ovmETHStateBalance := prevDB.GetBalance(addr)
// Pre-migration state balance should be zero.
if ovmETHStateBalance.Cmp(common.Big0) != 0 {
innerErr = fmt.Errorf("expected OVM_ETH pre-migration state balance for %s to be 0, but got %s", addr, ovmETHStateBalance)
return false
}
// Migrated state balance should equal the OVM ETH balance.
migratedStateBalance := migratedDB.GetBalance(addr)
if migratedStateBalance.Cmp(ovmETHBalance) != 0 {
innerErr = fmt.Errorf("expected OVM_ETH post-migration state balance for %s to be %s, but got %s", addr, ovmETHStateBalance, migratedStateBalance)
return false
}
// Migrated OVM ETH balance should be zero, since we wipe the slots.
migratedBalance := migratedDB.GetState(predeploys.LegacyERC20ETHAddr, key)
if migratedBalance.Big().Cmp(common.Big0) != 0 {
innerErr = fmt.Errorf("expected OVM_ETH post-migration ERC20 balance for %s to be 0, but got %s", addr, migratedBalance)
return false
}
progress()
count++
// Stop iterating if we've checked enough slots.
return count < MaxOVMETHSlotChecks
})
if err != nil {
return fmt.Errorf("error iterating over OVM_ETH storage: %w", err)
}
if innerErr != nil {
return innerErr
}
return nil return nil
} }
......
package util
import (
"fmt"
"math/big"
"sync"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
)
var (
// maxSlot is the maximum possible storage slot.
maxSlot = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
)
type DBFactory func() (*state.StateDB, error)
type StateCallback func(db *state.StateDB, key, value common.Hash) error
func IterateState(dbFactory DBFactory, address common.Address, cb StateCallback, workers int) error {
if workers <= 0 {
panic("workers must be greater than 0")
}
// WaitGroup to wait for all workers to finish.
var wg sync.WaitGroup
// Channel to receive errors from each iteration job.
errCh := make(chan error, workers)
// Channel to cancel all iteration jobs.
cancelCh := make(chan struct{})
worker := func(start, end common.Hash) {
// Decrement the WaitGroup when the function returns.
defer wg.Done()
db, err := dbFactory()
if err != nil {
// Should never happen, so explode if it does.
log.Crit("cannot create state db", "err", err)
}
st, err := db.StorageTrie(address)
if err != nil {
// Should never happen, so explode if it does.
log.Crit("cannot get storage trie", "address", address, "err", err)
}
// st can be nil if the account doesn't exist.
if st == nil {
errCh <- fmt.Errorf("account does not exist: %s", address.Hex())
return
}
it := trie.NewIterator(st.NodeIterator(start.Bytes()))
// Below code is largely based on db.ForEachStorage. We can't use that
// because it doesn't allow us to specify a start and end key.
for it.Next() {
select {
case <-cancelCh:
// If one of the workers encounters an error, cancel all of them.
return
default:
break
}
// Use the raw (i.e., secure hashed) key to check if we've reached
// the end of the partition. Use > rather than >= here to account for
// the fact that the values returned by PartitionKeys are inclusive.
// Duplicate addresses that may be returned by this iteration are
// filtered out in the collector.
if new(big.Int).SetBytes(it.Key).Cmp(end.Big()) > 0 {
return
}
// Skip if the value is empty.
rawValue := it.Value
if len(rawValue) == 0 {
continue
}
// Get the preimage.
rawKey := st.GetKey(it.Key)
if rawKey == nil {
// Should never happen, so explode if it does.
log.Crit("cannot get preimage for storage key", "key", it.Key)
}
key := common.BytesToHash(rawKey)
// Parse the raw value.
_, content, _, err := rlp.Split(rawValue)
if err != nil {
// Should never happen, so explode if it does.
log.Crit("mal-formed data in state: %v", err)
}
value := common.BytesToHash(content)
// Call the callback with the DB, key, and value. Errors get
// bubbled up to the errCh.
if err := cb(db, key, value); err != nil {
errCh <- err
return
}
}
}
for i := 0; i < workers; i++ {
wg.Add(1)
// Partition the keyspace per worker.
start, end := PartitionKeyspace(i, workers)
// Kick off our worker.
go worker(start, end)
}
wg.Wait()
for len(errCh) > 0 {
err := <-errCh
if err != nil {
return err
}
}
return nil
}
// PartitionKeyspace divides the key space into partitions by dividing the maximum keyspace
// by count then multiplying by i. This will leave some slots left over, which we handle below. It
// returns the start and end keys for the partition as a common.Hash. Note that the returned range
// of keys is inclusive, i.e., [start, end] NOT [start, end).
func PartitionKeyspace(i int, count int) (common.Hash, common.Hash) {
if i < 0 || count < 0 {
panic("i and count must be greater than 0")
}
if i > count-1 {
panic("i must be less than count - 1")
}
// Divide the key space into partitions by dividing the key space by the number
// of jobs. This will leave some slots left over, which we handle below.
partSize := new(big.Int).Div(maxSlot.Big(), big.NewInt(int64(count)))
start := common.BigToHash(new(big.Int).Mul(big.NewInt(int64(i)), partSize))
var end common.Hash
if i < count-1 {
// If this is not the last partition, use the next partition's start key as the end.
end = common.BigToHash(new(big.Int).Mul(big.NewInt(int64(i+1)), partSize))
} else {
// If this is the last partition, use the max slot as the end.
end = maxSlot
}
return start, end
}
package util
import (
crand "crypto/rand"
"fmt"
"math/rand"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/trie"
"github.com/stretchr/testify/require"
)
var testAddr = common.Address{0: 0xff}
func TestStateIteratorWorkers(t *testing.T) {
_, factory, _ := setupRandTest(t)
for i := -1; i <= 0; i++ {
require.Panics(t, func() {
_ = IterateState(factory, testAddr, func(db *state.StateDB, key, value common.Hash) error {
return nil
}, i)
})
}
}
func TestStateIteratorNonexistentAccount(t *testing.T) {
_, factory, _ := setupRandTest(t)
require.ErrorContains(t, IterateState(factory, common.Address{}, func(db *state.StateDB, key, value common.Hash) error {
return nil
}, 1), "account does not exist")
}
func TestStateIteratorRandomOK(t *testing.T) {
for i := 0; i < 100; i++ {
hashes, factory, workerCount := setupRandTest(t)
seenHashes := make(map[common.Hash]bool)
hashCh := make(chan common.Hash)
doneCh := make(chan struct{})
go func() {
defer close(doneCh)
for hash := range hashCh {
seenHashes[hash] = true
}
}()
require.NoError(t, IterateState(factory, testAddr, func(db *state.StateDB, key, value common.Hash) error {
hashCh <- key
return nil
}, workerCount))
close(hashCh)
<-doneCh
// Perform a less or equal check here in case of duplicates. The map check below will assert
// that all of the hashes are accounted for.
require.LessOrEqual(t, len(seenHashes), len(hashes))
// Every hash we put into state should have been iterated over.
for _, hash := range hashes {
require.Contains(t, seenHashes, hash)
}
}
}
func TestStateIteratorRandomError(t *testing.T) {
for i := 0; i < 100; i++ {
hashes, factory, workerCount := setupRandTest(t)
failHash := hashes[rand.Intn(len(hashes))]
require.ErrorContains(t, IterateState(factory, testAddr, func(db *state.StateDB, key, value common.Hash) error {
if key == failHash {
return fmt.Errorf("test error")
}
return nil
}, workerCount), "test error")
}
}
func TestPartitionKeyspace(t *testing.T) {
tests := []struct {
i int
count int
expected [2]common.Hash
}{
{
i: 0,
count: 1,
expected: [2]common.Hash{
common.HexToHash("0x00"),
common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
},
},
{
i: 0,
count: 2,
expected: [2]common.Hash{
common.HexToHash("0x00"),
common.HexToHash("0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
},
},
{
i: 1,
count: 2,
expected: [2]common.Hash{
common.HexToHash("0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
},
},
{
i: 0,
count: 3,
expected: [2]common.Hash{
common.HexToHash("0x00"),
common.HexToHash("0x5555555555555555555555555555555555555555555555555555555555555555"),
},
},
{
i: 1,
count: 3,
expected: [2]common.Hash{
common.HexToHash("0x5555555555555555555555555555555555555555555555555555555555555555"),
common.HexToHash("0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
},
},
{
i: 2,
count: 3,
expected: [2]common.Hash{
common.HexToHash("0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
},
},
}
for _, tt := range tests {
t.Run(fmt.Sprintf("i %d, count %d", tt.i, tt.count), func(t *testing.T) {
start, end := PartitionKeyspace(tt.i, tt.count)
require.Equal(t, tt.expected[0], start)
require.Equal(t, tt.expected[1], end)
})
}
t.Run("panics on invalid i or count", func(t *testing.T) {
require.Panics(t, func() {
PartitionKeyspace(1, 1)
})
require.Panics(t, func() {
PartitionKeyspace(-1, 1)
})
require.Panics(t, func() {
PartitionKeyspace(0, -1)
})
require.Panics(t, func() {
PartitionKeyspace(-1, -1)
})
})
}
func setupRandTest(t *testing.T) ([]common.Hash, DBFactory, int) {
memDB := rawdb.NewMemoryDatabase()
db, err := state.New(common.Hash{}, state.NewDatabaseWithConfig(memDB, &trie.Config{
Preimages: true,
Cache: 1024,
}), nil)
require.NoError(t, err)
hashCount := rand.Intn(100)
if hashCount == 0 {
hashCount = 1
}
hashes := make([]common.Hash, hashCount)
db.CreateAccount(testAddr)
for j := 0; j < hashCount; j++ {
hashes[j] = randHash(t)
db.SetState(testAddr, hashes[j], hashes[j])
}
root, err := db.Commit(false)
require.NoError(t, err)
err = db.Database().TrieDB().Commit(root, true)
require.NoError(t, err)
factory := func() (*state.StateDB, error) {
return state.New(root, state.NewDatabaseWithConfig(memDB, &trie.Config{
Preimages: true,
Cache: 1024,
}), nil)
}
workerCount := rand.Intn(64)
if workerCount == 0 {
workerCount = 1
}
return hashes, factory, workerCount
}
func randHash(t *testing.T) common.Hash {
var h common.Hash
_, err := crand.Read(h[:])
require.NoError(t, err)
return h
}
...@@ -35,7 +35,7 @@ func TestBatchInLastPossibleBlocks(gt *testing.T) { ...@@ -35,7 +35,7 @@ func TestBatchInLastPossibleBlocks(gt *testing.T) {
ChainID: sd.L2Cfg.Config.ChainID, ChainID: sd.L2Cfg.Config.ChainID,
Nonce: n, Nonce: n,
GasTipCap: big.NewInt(2 * params.GWei), GasTipCap: big.NewInt(2 * params.GWei),
GasFeeCap: new(big.Int).Add(miner.l1Chain.CurrentBlock().BaseFee(), big.NewInt(2*params.GWei)), GasFeeCap: new(big.Int).Add(miner.l1Chain.CurrentBlock().BaseFee, big.NewInt(2*params.GWei)),
Gas: params.TxGas, Gas: params.TxGas,
To: &dp.Addresses.Bob, To: &dp.Addresses.Bob,
Value: e2eutils.Ether(2), Value: e2eutils.Ether(2),
...@@ -146,7 +146,7 @@ func TestLargeL1Gaps(gt *testing.T) { ...@@ -146,7 +146,7 @@ func TestLargeL1Gaps(gt *testing.T) {
ChainID: sd.L2Cfg.Config.ChainID, ChainID: sd.L2Cfg.Config.ChainID,
Nonce: n, Nonce: n,
GasTipCap: big.NewInt(2 * params.GWei), GasTipCap: big.NewInt(2 * params.GWei),
GasFeeCap: new(big.Int).Add(miner.l1Chain.CurrentBlock().BaseFee(), big.NewInt(2*params.GWei)), GasFeeCap: new(big.Int).Add(miner.l1Chain.CurrentBlock().BaseFee, big.NewInt(2*params.GWei)),
Gas: params.TxGas, Gas: params.TxGas,
To: &dp.Addresses.Bob, To: &dp.Addresses.Bob,
Value: e2eutils.Ether(2), Value: e2eutils.Ether(2),
......
...@@ -21,7 +21,7 @@ func TestShapellaL1Fork(gt *testing.T) { ...@@ -21,7 +21,7 @@ func TestShapellaL1Fork(gt *testing.T) {
_, _, miner, sequencer, _, verifier, _, batcher := setupReorgTestActors(t, dp, sd, log) _, _, miner, sequencer, _, verifier, _, batcher := setupReorgTestActors(t, dp, sd, log)
require.False(t, sd.L1Cfg.Config.IsShanghai(miner.l1Chain.CurrentBlock().Time()), "not active yet") require.False(t, sd.L1Cfg.Config.IsShanghai(miner.l1Chain.CurrentBlock().Time), "not active yet")
// start op-nodes // start op-nodes
sequencer.ActL2PipelineFull(t) sequencer.ActL2PipelineFull(t)
...@@ -34,7 +34,7 @@ func TestShapellaL1Fork(gt *testing.T) { ...@@ -34,7 +34,7 @@ func TestShapellaL1Fork(gt *testing.T) {
// verify Shanghai is active // verify Shanghai is active
l1Head := miner.l1Chain.CurrentBlock() l1Head := miner.l1Chain.CurrentBlock()
require.True(t, sd.L1Cfg.Config.IsShanghai(l1Head.Time())) require.True(t, sd.L1Cfg.Config.IsShanghai(l1Head.Time))
// build L2 chain up to and including L2 blocks referencing shanghai L1 blocks // build L2 chain up to and including L2 blocks referencing shanghai L1 blocks
sequencer.ActL1HeadSignal(t) sequencer.ActL1HeadSignal(t)
......
...@@ -31,7 +31,7 @@ func TestL1Miner_BuildBlock(gt *testing.T) { ...@@ -31,7 +31,7 @@ func TestL1Miner_BuildBlock(gt *testing.T) {
ChainID: sd.L1Cfg.Config.ChainID, ChainID: sd.L1Cfg.Config.ChainID,
Nonce: 0, Nonce: 0,
GasTipCap: big.NewInt(2 * params.GWei), GasTipCap: big.NewInt(2 * params.GWei),
GasFeeCap: new(big.Int).Add(miner.l1Chain.CurrentBlock().BaseFee(), big.NewInt(2*params.GWei)), GasFeeCap: new(big.Int).Add(miner.l1Chain.CurrentBlock().BaseFee, big.NewInt(2*params.GWei)),
Gas: params.TxGas, Gas: params.TxGas,
To: &dp.Addresses.Bob, To: &dp.Addresses.Bob,
Value: e2eutils.Ether(2), Value: e2eutils.Ether(2),
...@@ -41,7 +41,8 @@ func TestL1Miner_BuildBlock(gt *testing.T) { ...@@ -41,7 +41,8 @@ func TestL1Miner_BuildBlock(gt *testing.T) {
// make an empty block, even though a tx may be waiting // make an empty block, even though a tx may be waiting
miner.ActL1StartBlock(10)(t) miner.ActL1StartBlock(10)(t)
miner.ActL1EndBlock(t) miner.ActL1EndBlock(t)
bl := miner.l1Chain.CurrentBlock() header := miner.l1Chain.CurrentBlock()
bl := miner.l1Chain.GetBlockByHash(header.Hash())
require.Equal(t, uint64(1), bl.NumberU64()) require.Equal(t, uint64(1), bl.NumberU64())
require.Zero(gt, bl.Transactions().Len()) require.Zero(gt, bl.Transactions().Len())
...@@ -49,7 +50,8 @@ func TestL1Miner_BuildBlock(gt *testing.T) { ...@@ -49,7 +50,8 @@ func TestL1Miner_BuildBlock(gt *testing.T) {
miner.ActL1StartBlock(10)(t) miner.ActL1StartBlock(10)(t)
miner.ActL1IncludeTx(dp.Addresses.Alice)(t) miner.ActL1IncludeTx(dp.Addresses.Alice)(t)
miner.ActL1EndBlock(t) miner.ActL1EndBlock(t)
bl = miner.l1Chain.CurrentBlock() header = miner.l1Chain.CurrentBlock()
bl = miner.l1Chain.GetBlockByHash(header.Hash())
require.Equal(t, uint64(2), bl.NumberU64()) require.Equal(t, uint64(2), bl.NumberU64())
require.Equal(t, 1, bl.Transactions().Len()) require.Equal(t, 1, bl.Transactions().Len())
require.Equal(t, tx.Hash(), bl.Transactions()[0].Hash()) require.Equal(t, tx.Hash(), bl.Transactions()[0].Hash())
......
...@@ -103,9 +103,9 @@ func (s *L1Replica) ActL1RewindDepth(depth uint64) Action { ...@@ -103,9 +103,9 @@ func (s *L1Replica) ActL1RewindDepth(depth uint64) Action {
t.InvalidAction("cannot rewind L1 past genesis (current: %d, rewind depth: %d)", head, depth) t.InvalidAction("cannot rewind L1 past genesis (current: %d, rewind depth: %d)", head, depth)
return return
} }
finalized := s.l1Chain.CurrentFinalizedBlock() finalized := s.l1Chain.CurrentFinalBlock()
if finalized != nil && head < finalized.NumberU64()+depth { if finalized != nil && head < finalized.Number.Uint64()+depth {
t.InvalidAction("cannot rewind head of chain past finalized block %d with rewind depth %d", finalized.NumberU64(), depth) t.InvalidAction("cannot rewind head of chain past finalized block %d with rewind depth %d", finalized.Number.Uint64(), depth)
return return
} }
if err := s.l1Chain.SetHead(head - depth); err != nil { if err := s.l1Chain.SetHead(head - depth); err != nil {
...@@ -188,7 +188,7 @@ func (s *L1Replica) UnsafeNum() uint64 { ...@@ -188,7 +188,7 @@ func (s *L1Replica) UnsafeNum() uint64 {
head := s.l1Chain.CurrentBlock() head := s.l1Chain.CurrentBlock()
headNum := uint64(0) headNum := uint64(0)
if head != nil { if head != nil {
headNum = head.NumberU64() headNum = head.Number.Uint64()
} }
return headNum return headNum
} }
...@@ -197,16 +197,16 @@ func (s *L1Replica) SafeNum() uint64 { ...@@ -197,16 +197,16 @@ func (s *L1Replica) SafeNum() uint64 {
safe := s.l1Chain.CurrentSafeBlock() safe := s.l1Chain.CurrentSafeBlock()
safeNum := uint64(0) safeNum := uint64(0)
if safe != nil { if safe != nil {
safeNum = safe.NumberU64() safeNum = safe.Number.Uint64()
} }
return safeNum return safeNum
} }
func (s *L1Replica) FinalizedNum() uint64 { func (s *L1Replica) FinalizedNum() uint64 {
finalized := s.l1Chain.CurrentFinalizedBlock() finalized := s.l1Chain.CurrentFinalBlock()
finalizedNum := uint64(0) finalizedNum := uint64(0)
if finalized != nil { if finalized != nil {
finalizedNum = finalized.NumberU64() finalizedNum = finalized.Number.Uint64()
} }
return finalizedNum return finalizedNum
} }
...@@ -219,7 +219,7 @@ func (s *L1Replica) ActL1Finalize(t Testing, num uint64) { ...@@ -219,7 +219,7 @@ func (s *L1Replica) ActL1Finalize(t Testing, num uint64) {
t.InvalidAction("need to move forward safe block before moving finalized block") t.InvalidAction("need to move forward safe block before moving finalized block")
return return
} }
newFinalized := s.l1Chain.GetBlockByNumber(num) newFinalized := s.l1Chain.GetHeaderByNumber(num)
if newFinalized == nil { if newFinalized == nil {
t.Fatalf("expected block at %d after finalized L1 block %d, safe head is ahead", num, finalizedNum) t.Fatalf("expected block at %d after finalized L1 block %d, safe head is ahead", num, finalizedNum)
} }
...@@ -234,7 +234,7 @@ func (s *L1Replica) ActL1FinalizeNext(t Testing) { ...@@ -234,7 +234,7 @@ func (s *L1Replica) ActL1FinalizeNext(t Testing) {
// ActL1Safe marks the given unsafe block as safe. // ActL1Safe marks the given unsafe block as safe.
func (s *L1Replica) ActL1Safe(t Testing, num uint64) { func (s *L1Replica) ActL1Safe(t Testing, num uint64) {
newSafe := s.l1Chain.GetBlockByNumber(num) newSafe := s.l1Chain.GetHeaderByNumber(num)
if newSafe == nil { if newSafe == nil {
t.InvalidAction("could not find L1 block %d, cannot label it as safe", num) t.InvalidAction("could not find L1 block %d, cannot label it as safe", num)
return return
......
...@@ -85,7 +85,7 @@ func TestL1Replica_ActL1Sync(gt *testing.T) { ...@@ -85,7 +85,7 @@ func TestL1Replica_ActL1Sync(gt *testing.T) {
}) })
syncFromA := replica1.ActL1Sync(canonL1(chainA)) syncFromA := replica1.ActL1Sync(canonL1(chainA))
// sync canonical chain A // sync canonical chain A
for replica1.l1Chain.CurrentBlock().NumberU64()+1 < uint64(len(chainA)) { for replica1.l1Chain.CurrentBlock().Number.Uint64()+1 < uint64(len(chainA)) {
syncFromA(t) syncFromA(t)
} }
require.Equal(t, replica1.l1Chain.CurrentBlock().Hash(), chainA[len(chainA)-1].Hash(), "sync replica1 to head of chain A") require.Equal(t, replica1.l1Chain.CurrentBlock().Hash(), chainA[len(chainA)-1].Hash(), "sync replica1 to head of chain A")
...@@ -94,7 +94,7 @@ func TestL1Replica_ActL1Sync(gt *testing.T) { ...@@ -94,7 +94,7 @@ func TestL1Replica_ActL1Sync(gt *testing.T) {
// sync new canonical chain B // sync new canonical chain B
syncFromB := replica1.ActL1Sync(canonL1(chainB)) syncFromB := replica1.ActL1Sync(canonL1(chainB))
for replica1.l1Chain.CurrentBlock().NumberU64()+1 < uint64(len(chainB)) { for replica1.l1Chain.CurrentBlock().Number.Uint64()+1 < uint64(len(chainB)) {
syncFromB(t) syncFromB(t)
} }
require.Equal(t, replica1.l1Chain.CurrentBlock().Hash(), chainB[len(chainB)-1].Hash(), "sync replica1 to head of chain B") require.Equal(t, replica1.l1Chain.CurrentBlock().Hash(), chainB[len(chainB)-1].Hash(), "sync replica1 to head of chain B")
...@@ -105,7 +105,7 @@ func TestL1Replica_ActL1Sync(gt *testing.T) { ...@@ -105,7 +105,7 @@ func TestL1Replica_ActL1Sync(gt *testing.T) {
_ = replica2.Close() _ = replica2.Close()
}) })
syncFromOther := replica2.ActL1Sync(replica1.CanonL1Chain()) syncFromOther := replica2.ActL1Sync(replica1.CanonL1Chain())
for replica2.l1Chain.CurrentBlock().NumberU64()+1 < uint64(len(chainB)) { for replica2.l1Chain.CurrentBlock().Number.Uint64()+1 < uint64(len(chainB)) {
syncFromOther(t) syncFromOther(t)
} }
require.Equal(t, replica2.l1Chain.CurrentBlock().Hash(), chainB[len(chainB)-1].Hash(), "sync replica2 to head of chain B") require.Equal(t, replica2.l1Chain.CurrentBlock().Hash(), chainB[len(chainB)-1].Hash(), "sync replica2 to head of chain B")
......
...@@ -48,7 +48,7 @@ func TestBatcher(gt *testing.T) { ...@@ -48,7 +48,7 @@ func TestBatcher(gt *testing.T) {
ChainID: sd.L2Cfg.Config.ChainID, ChainID: sd.L2Cfg.Config.ChainID,
Nonce: n, Nonce: n,
GasTipCap: big.NewInt(2 * params.GWei), GasTipCap: big.NewInt(2 * params.GWei),
GasFeeCap: new(big.Int).Add(miner.l1Chain.CurrentBlock().BaseFee(), big.NewInt(2*params.GWei)), GasFeeCap: new(big.Int).Add(miner.l1Chain.CurrentBlock().BaseFee, big.NewInt(2*params.GWei)),
Gas: params.TxGas, Gas: params.TxGas,
To: &dp.Addresses.Bob, To: &dp.Addresses.Bob,
Value: e2eutils.Ether(2), Value: e2eutils.Ether(2),
...@@ -73,7 +73,7 @@ func TestBatcher(gt *testing.T) { ...@@ -73,7 +73,7 @@ func TestBatcher(gt *testing.T) {
miner.ActL1IncludeTx(dp.Addresses.Batcher)(t) miner.ActL1IncludeTx(dp.Addresses.Batcher)(t)
miner.ActL1EndBlock(t) miner.ActL1EndBlock(t)
bl := miner.l1Chain.CurrentBlock() bl := miner.l1Chain.CurrentBlock()
log.Info("bl", "txs", len(bl.Transactions())) log.Info("bl", "txs", len(miner.l1Chain.GetBlockByHash(bl.Hash()).Transactions()))
// Now make enough L1 blocks that the verifier will have to derive a L2 block // Now make enough L1 blocks that the verifier will have to derive a L2 block
// It will also eagerly derive the block from the batcher // It will also eagerly derive the block from the batcher
...@@ -437,7 +437,7 @@ func TestBigL2Txs(gt *testing.T) { ...@@ -437,7 +437,7 @@ func TestBigL2Txs(gt *testing.T) {
} }
sequencer.ActL1HeadSignal(t) sequencer.ActL1HeadSignal(t)
sequencer.ActL2StartBlock(t) sequencer.ActL2StartBlock(t)
baseFee := engine.l2Chain.CurrentBlock().BaseFee() // this will go quite high, since so many consecutive blocks are filled at capacity. baseFee := engine.l2Chain.CurrentBlock().BaseFee // this will go quite high, since so many consecutive blocks are filled at capacity.
// fill the block with large L2 txs from alice // fill the block with large L2 txs from alice
for n := aliceNonce; ; n++ { for n := aliceNonce; ; n++ {
require.NoError(t, err) require.NoError(t, err)
......
...@@ -202,30 +202,30 @@ func (ea *L2EngineAPI) ForkchoiceUpdatedV1(ctx context.Context, state *eth.Forkc ...@@ -202,30 +202,30 @@ func (ea *L2EngineAPI) ForkchoiceUpdatedV1(ctx context.Context, state *eth.Forkc
// chain final and completely in PoS mode. // chain final and completely in PoS mode.
if state.FinalizedBlockHash != (common.Hash{}) { if state.FinalizedBlockHash != (common.Hash{}) {
// If the finalized block is not in our canonical tree, somethings wrong // If the finalized block is not in our canonical tree, somethings wrong
finalBlock := ea.l2Chain.GetBlockByHash(state.FinalizedBlockHash) finalHeader := ea.l2Chain.GetHeaderByHash(state.FinalizedBlockHash)
if finalBlock == nil { if finalHeader == nil {
ea.log.Warn("Final block not available in database", "hash", state.FinalizedBlockHash) ea.log.Warn("Final block not available in database", "hash", state.FinalizedBlockHash)
return STATUS_INVALID, engine.InvalidForkChoiceState.With(errors.New("final block not available in database")) return STATUS_INVALID, engine.InvalidForkChoiceState.With(errors.New("final block not available in database"))
} else if rawdb.ReadCanonicalHash(ea.l2Database, finalBlock.NumberU64()) != state.FinalizedBlockHash { } else if rawdb.ReadCanonicalHash(ea.l2Database, finalHeader.Number.Uint64()) != state.FinalizedBlockHash {
ea.log.Warn("Final block not in canonical chain", "number", block.NumberU64(), "hash", state.HeadBlockHash) ea.log.Warn("Final block not in canonical chain", "number", block.NumberU64(), "hash", state.HeadBlockHash)
return STATUS_INVALID, engine.InvalidForkChoiceState.With(errors.New("final block not in canonical chain")) return STATUS_INVALID, engine.InvalidForkChoiceState.With(errors.New("final block not in canonical chain"))
} }
// Set the finalized block // Set the finalized block
ea.l2Chain.SetFinalized(finalBlock) ea.l2Chain.SetFinalized(finalHeader)
} }
// Check if the safe block hash is in our canonical tree, if not somethings wrong // Check if the safe block hash is in our canonical tree, if not somethings wrong
if state.SafeBlockHash != (common.Hash{}) { if state.SafeBlockHash != (common.Hash{}) {
safeBlock := ea.l2Chain.GetBlockByHash(state.SafeBlockHash) safeHeader := ea.l2Chain.GetHeaderByHash(state.SafeBlockHash)
if safeBlock == nil { if safeHeader == nil {
ea.log.Warn("Safe block not available in database") ea.log.Warn("Safe block not available in database")
return STATUS_INVALID, engine.InvalidForkChoiceState.With(errors.New("safe block not available in database")) return STATUS_INVALID, engine.InvalidForkChoiceState.With(errors.New("safe block not available in database"))
} }
if rawdb.ReadCanonicalHash(ea.l2Database, safeBlock.NumberU64()) != state.SafeBlockHash { if rawdb.ReadCanonicalHash(ea.l2Database, safeHeader.Number.Uint64()) != state.SafeBlockHash {
ea.log.Warn("Safe block not in canonical chain") ea.log.Warn("Safe block not in canonical chain")
return STATUS_INVALID, engine.InvalidForkChoiceState.With(errors.New("safe block not in canonical chain")) return STATUS_INVALID, engine.InvalidForkChoiceState.With(errors.New("safe block not in canonical chain"))
} }
// Set the safe block // Set the safe block
ea.l2Chain.SetSafe(safeBlock) ea.l2Chain.SetSafe(safeHeader)
} }
// If payload generation was requested, create a new block to be potentially // If payload generation was requested, create a new block to be potentially
// sealed by the beacon client. The payload will be requested later, and we // sealed by the beacon client. The payload will be requested later, and we
......
...@@ -107,7 +107,7 @@ func TestL2EngineAPIBlockBuilding(gt *testing.T) { ...@@ -107,7 +107,7 @@ func TestL2EngineAPIBlockBuilding(gt *testing.T) {
ChainID: sd.L2Cfg.Config.ChainID, ChainID: sd.L2Cfg.Config.ChainID,
Nonce: 0, Nonce: 0,
GasTipCap: big.NewInt(2 * params.GWei), GasTipCap: big.NewInt(2 * params.GWei),
GasFeeCap: new(big.Int).Add(engine.l2Chain.CurrentBlock().BaseFee(), big.NewInt(2*params.GWei)), GasFeeCap: new(big.Int).Add(engine.l2Chain.CurrentBlock().BaseFee, big.NewInt(2*params.GWei)),
Gas: params.TxGas, Gas: params.TxGas,
To: &dp.Addresses.Bob, To: &dp.Addresses.Bob,
Value: e2eutils.Ether(2), Value: e2eutils.Ether(2),
...@@ -125,7 +125,7 @@ func TestL2EngineAPIBlockBuilding(gt *testing.T) { ...@@ -125,7 +125,7 @@ func TestL2EngineAPIBlockBuilding(gt *testing.T) {
SafeBlockHash: genesisBlock.Hash(), SafeBlockHash: genesisBlock.Hash(),
FinalizedBlockHash: genesisBlock.Hash(), FinalizedBlockHash: genesisBlock.Hash(),
}, &eth.PayloadAttributes{ }, &eth.PayloadAttributes{
Timestamp: eth.Uint64Quantity(parent.Time()) + 2, Timestamp: eth.Uint64Quantity(parent.Time) + 2,
PrevRandao: eth.Bytes32{}, PrevRandao: eth.Bytes32{},
SuggestedFeeRecipient: common.Address{'C'}, SuggestedFeeRecipient: common.Address{'C'},
Transactions: nil, Transactions: nil,
...@@ -161,12 +161,12 @@ func TestL2EngineAPIBlockBuilding(gt *testing.T) { ...@@ -161,12 +161,12 @@ func TestL2EngineAPIBlockBuilding(gt *testing.T) {
require.Equal(t, payload.BlockHash, engine.l2Chain.CurrentBlock().Hash(), "now payload is canonical") require.Equal(t, payload.BlockHash, engine.l2Chain.CurrentBlock().Hash(), "now payload is canonical")
} }
buildBlock(false) buildBlock(false)
require.Zero(t, engine.l2Chain.CurrentBlock().Transactions().Len(), "no tx included") require.Zero(t, engine.l2Chain.GetBlockByHash(engine.l2Chain.CurrentBlock().Hash()).Transactions().Len(), "no tx included")
buildBlock(true) buildBlock(true)
require.Equal(gt, 1, engine.l2Chain.CurrentBlock().Transactions().Len(), "tx from alice is included") require.Equal(gt, 1, engine.l2Chain.GetBlockByHash(engine.l2Chain.CurrentBlock().Hash()).Transactions().Len(), "tx from alice is included")
buildBlock(false) buildBlock(false)
require.Zero(t, engine.l2Chain.CurrentBlock().Transactions().Len(), "no tx included") require.Zero(t, engine.l2Chain.GetBlockByHash(engine.l2Chain.CurrentBlock().Hash()).Transactions().Len(), "no tx included")
require.Equal(t, uint64(3), engine.l2Chain.CurrentBlock().NumberU64(), "built 3 blocks") require.Equal(t, uint64(3), engine.l2Chain.CurrentBlock().Number.Uint64(), "built 3 blocks")
} }
func TestL2EngineAPIFail(gt *testing.T) { func TestL2EngineAPIFail(gt *testing.T) {
......
...@@ -14,6 +14,7 @@ import ( ...@@ -14,6 +14,7 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/ethereum-optimism/optimism/op-node/sources" "github.com/ethereum-optimism/optimism/op-node/sources"
"github.com/ethereum-optimism/optimism/op-proposer/metrics"
"github.com/ethereum-optimism/optimism/op-proposer/proposer" "github.com/ethereum-optimism/optimism/op-proposer/proposer"
opcrypto "github.com/ethereum-optimism/optimism/op-service/crypto" opcrypto "github.com/ethereum-optimism/optimism/op-service/crypto"
"github.com/ethereum-optimism/optimism/op-service/txmgr" "github.com/ethereum-optimism/optimism/op-service/txmgr"
...@@ -60,7 +61,7 @@ func NewL2Proposer(t Testing, log log.Logger, cfg *ProposerCfg, l1 *ethclient.Cl ...@@ -60,7 +61,7 @@ func NewL2Proposer(t Testing, log log.Logger, cfg *ProposerCfg, l1 *ethclient.Cl
SignerFnFactory: signer, SignerFnFactory: signer,
} }
dr, err := proposer.NewL2OutputSubmitter(proposerCfg, log) dr, err := proposer.NewL2OutputSubmitter(proposerCfg, log, metrics.NoopMetrics)
require.NoError(t, err) require.NoError(t, err)
return &L2Proposer{ return &L2Proposer{
......
...@@ -55,7 +55,7 @@ func TestL2Sequencer_SequencerDrift(gt *testing.T) { ...@@ -55,7 +55,7 @@ func TestL2Sequencer_SequencerDrift(gt *testing.T) {
ChainID: sd.L2Cfg.Config.ChainID, ChainID: sd.L2Cfg.Config.ChainID,
Nonce: n, Nonce: n,
GasTipCap: big.NewInt(2 * params.GWei), GasTipCap: big.NewInt(2 * params.GWei),
GasFeeCap: new(big.Int).Add(miner.l1Chain.CurrentBlock().BaseFee(), big.NewInt(2*params.GWei)), GasFeeCap: new(big.Int).Add(miner.l1Chain.CurrentBlock().BaseFee, big.NewInt(2*params.GWei)),
Gas: params.TxGas, Gas: params.TxGas,
To: &dp.Addresses.Bob, To: &dp.Addresses.Bob,
Value: e2eutils.Ether(2), Value: e2eutils.Ether(2),
...@@ -76,7 +76,7 @@ func TestL2Sequencer_SequencerDrift(gt *testing.T) { ...@@ -76,7 +76,7 @@ func TestL2Sequencer_SequencerDrift(gt *testing.T) {
origin := miner.l1Chain.CurrentBlock() origin := miner.l1Chain.CurrentBlock()
// L2 makes blocks to catch up // L2 makes blocks to catch up
for sequencer.SyncStatus().UnsafeL2.Time+sd.RollupCfg.BlockTime < origin.Time() { for sequencer.SyncStatus().UnsafeL2.Time+sd.RollupCfg.BlockTime < origin.Time {
makeL2BlockWithAliceTx() makeL2BlockWithAliceTx()
require.Equal(t, uint64(0), sequencer.SyncStatus().UnsafeL2.L1Origin.Number, "no L1 origin change before time matches") require.Equal(t, uint64(0), sequencer.SyncStatus().UnsafeL2.L1Origin.Number, "no L1 origin change before time matches")
} }
...@@ -89,7 +89,7 @@ func TestL2Sequencer_SequencerDrift(gt *testing.T) { ...@@ -89,7 +89,7 @@ func TestL2Sequencer_SequencerDrift(gt *testing.T) {
sequencer.ActL1HeadSignal(t) sequencer.ActL1HeadSignal(t)
// Make blocks up till the sequencer drift is about to surpass, but keep the old L1 origin // Make blocks up till the sequencer drift is about to surpass, but keep the old L1 origin
for sequencer.SyncStatus().UnsafeL2.Time+sd.RollupCfg.BlockTime <= origin.Time()+sd.RollupCfg.MaxSequencerDrift { for sequencer.SyncStatus().UnsafeL2.Time+sd.RollupCfg.BlockTime <= origin.Time+sd.RollupCfg.MaxSequencerDrift {
sequencer.ActL2KeepL1Origin(t) sequencer.ActL2KeepL1Origin(t)
makeL2BlockWithAliceTx() makeL2BlockWithAliceTx()
require.Equal(t, uint64(1), sequencer.SyncStatus().UnsafeL2.L1Origin.Number, "expected to keep old L1 origin") require.Equal(t, uint64(1), sequencer.SyncStatus().UnsafeL2.L1Origin.Number, "expected to keep old L1 origin")
......
...@@ -41,20 +41,20 @@ func TestL2Verifier_SequenceWindow(gt *testing.T) { ...@@ -41,20 +41,20 @@ func TestL2Verifier_SequenceWindow(gt *testing.T) {
miner.ActL1SetFeeRecipient(common.Address{'A'}) miner.ActL1SetFeeRecipient(common.Address{'A'})
// Make two sequence windows worth of empty L1 blocks. After we pass the first sequence window, the L2 chain should get blocks // Make two sequence windows worth of empty L1 blocks. After we pass the first sequence window, the L2 chain should get blocks
for miner.l1Chain.CurrentBlock().NumberU64() < sd.RollupCfg.SeqWindowSize*2 { for miner.l1Chain.CurrentBlock().Number.Uint64() < sd.RollupCfg.SeqWindowSize*2 {
miner.ActL1StartBlock(10)(t) miner.ActL1StartBlock(10)(t)
miner.ActL1EndBlock(t) miner.ActL1EndBlock(t)
verifier.ActL2PipelineFull(t) verifier.ActL2PipelineFull(t)
l1Head := miner.l1Chain.CurrentBlock().NumberU64() l1Head := miner.l1Chain.CurrentBlock().Number.Uint64()
expectedL1Origin := uint64(0) expectedL1Origin := uint64(0)
// as soon as we complete the sequence window, we force-adopt the L1 origin // as soon as we complete the sequence window, we force-adopt the L1 origin
if l1Head >= sd.RollupCfg.SeqWindowSize { if l1Head >= sd.RollupCfg.SeqWindowSize {
expectedL1Origin = l1Head - sd.RollupCfg.SeqWindowSize expectedL1Origin = l1Head - sd.RollupCfg.SeqWindowSize
} }
require.Equal(t, expectedL1Origin, verifier.SyncStatus().SafeL2.L1Origin.Number, "L1 origin is forced in, given enough L1 blocks pass by") require.Equal(t, expectedL1Origin, verifier.SyncStatus().SafeL2.L1Origin.Number, "L1 origin is forced in, given enough L1 blocks pass by")
require.LessOrEqual(t, miner.l1Chain.GetBlockByNumber(expectedL1Origin).Time(), engine.l2Chain.CurrentBlock().Time(), "L2 time higher than L1 origin time") require.LessOrEqual(t, miner.l1Chain.GetBlockByNumber(expectedL1Origin).Time(), engine.l2Chain.CurrentBlock().Time, "L2 time higher than L1 origin time")
} }
tip2N := verifier.SyncStatus() tip2N := verifier.SyncStatus()
...@@ -75,7 +75,7 @@ func TestL2Verifier_SequenceWindow(gt *testing.T) { ...@@ -75,7 +75,7 @@ func TestL2Verifier_SequenceWindow(gt *testing.T) {
verifier.ActL2PipelineFull(t) verifier.ActL2PipelineFull(t)
require.Equal(t, tip2N.SafeL2, verifier.SyncStatus().SafeL2) require.Equal(t, tip2N.SafeL2, verifier.SyncStatus().SafeL2)
for miner.l1Chain.CurrentBlock().NumberU64() < sd.RollupCfg.SeqWindowSize*2 { for miner.l1Chain.CurrentBlock().Number.Uint64() < sd.RollupCfg.SeqWindowSize*2 {
miner.ActL1StartBlock(10)(t) miner.ActL1StartBlock(10)(t)
miner.ActL1EndBlock(t) miner.ActL1EndBlock(t)
} }
......
...@@ -80,7 +80,7 @@ func TestBatcherKeyRotation(gt *testing.T) { ...@@ -80,7 +80,7 @@ func TestBatcherKeyRotation(gt *testing.T) {
miner.ActL1StartBlock(12)(t) miner.ActL1StartBlock(12)(t)
miner.ActL1IncludeTx(dp.Addresses.SysCfgOwner)(t) miner.ActL1IncludeTx(dp.Addresses.SysCfgOwner)(t)
miner.ActL1EndBlock(t) miner.ActL1EndBlock(t)
cfgChangeL1BlockNum := miner.l1Chain.CurrentBlock().NumberU64() cfgChangeL1BlockNum := miner.l1Chain.CurrentBlock().Number.Uint64()
// sequence L2 blocks, and submit with new batcher // sequence L2 blocks, and submit with new batcher
sequencer.ActL1HeadSignal(t) sequencer.ActL1HeadSignal(t)
...@@ -200,7 +200,7 @@ func TestGPOParamsChange(gt *testing.T) { ...@@ -200,7 +200,7 @@ func TestGPOParamsChange(gt *testing.T) {
miner.ActEmptyBlock(t) miner.ActEmptyBlock(t)
sequencer.ActL1HeadSignal(t) sequencer.ActL1HeadSignal(t)
sequencer.ActBuildToL1Head(t) sequencer.ActBuildToL1Head(t)
basefee := miner.l1Chain.CurrentBlock().BaseFee() basefee := miner.l1Chain.CurrentBlock().BaseFee
// alice makes a L2 tx, sequencer includes it // alice makes a L2 tx, sequencer includes it
alice.ActResetTxOpts(t) alice.ActResetTxOpts(t)
...@@ -238,7 +238,7 @@ func TestGPOParamsChange(gt *testing.T) { ...@@ -238,7 +238,7 @@ func TestGPOParamsChange(gt *testing.T) {
miner.ActL1StartBlock(12)(t) miner.ActL1StartBlock(12)(t)
miner.ActL1IncludeTx(dp.Addresses.SysCfgOwner)(t) miner.ActL1IncludeTx(dp.Addresses.SysCfgOwner)(t)
miner.ActL1EndBlock(t) miner.ActL1EndBlock(t)
basefeeGPOUpdate := miner.l1Chain.CurrentBlock().BaseFee() basefeeGPOUpdate := miner.l1Chain.CurrentBlock().BaseFee
// build empty L2 chain, up to but excluding the L2 block with the L1 origin that processes the GPO change // build empty L2 chain, up to but excluding the L2 block with the L1 origin that processes the GPO change
sequencer.ActL1HeadSignal(t) sequencer.ActL1HeadSignal(t)
...@@ -274,7 +274,7 @@ func TestGPOParamsChange(gt *testing.T) { ...@@ -274,7 +274,7 @@ func TestGPOParamsChange(gt *testing.T) {
// build more L2 blocks, with new L1 origin // build more L2 blocks, with new L1 origin
miner.ActEmptyBlock(t) miner.ActEmptyBlock(t)
basefee = miner.l1Chain.CurrentBlock().BaseFee() basefee = miner.l1Chain.CurrentBlock().BaseFee
sequencer.ActL1HeadSignal(t) sequencer.ActL1HeadSignal(t)
sequencer.ActBuildToL1Head(t) sequencer.ActBuildToL1Head(t)
// and Alice makes a tx again // and Alice makes a tx again
...@@ -313,7 +313,7 @@ func TestGasLimitChange(gt *testing.T) { ...@@ -313,7 +313,7 @@ func TestGasLimitChange(gt *testing.T) {
sequencer.ActL1HeadSignal(t) sequencer.ActL1HeadSignal(t)
sequencer.ActBuildToL1Head(t) sequencer.ActBuildToL1Head(t)
oldGasLimit := seqEngine.l2Chain.CurrentBlock().GasLimit() oldGasLimit := seqEngine.l2Chain.CurrentBlock().GasLimit
require.Equal(t, oldGasLimit, uint64(dp.DeployConfig.L2GenesisBlockGasLimit)) require.Equal(t, oldGasLimit, uint64(dp.DeployConfig.L2GenesisBlockGasLimit))
// change gas limit on L1 to triple what it was // change gas limit on L1 to triple what it was
...@@ -335,12 +335,12 @@ func TestGasLimitChange(gt *testing.T) { ...@@ -335,12 +335,12 @@ func TestGasLimitChange(gt *testing.T) {
sequencer.ActL1HeadSignal(t) sequencer.ActL1HeadSignal(t)
sequencer.ActBuildToL1HeadExcl(t) sequencer.ActBuildToL1HeadExcl(t)
require.Equal(t, oldGasLimit, seqEngine.l2Chain.CurrentBlock().GasLimit()) require.Equal(t, oldGasLimit, seqEngine.l2Chain.CurrentBlock().GasLimit)
require.Equal(t, uint64(1), sequencer.SyncStatus().UnsafeL2.L1Origin.Number) require.Equal(t, uint64(1), sequencer.SyncStatus().UnsafeL2.L1Origin.Number)
// now include the L1 block with the gaslimit change, and see if it changes as expected // now include the L1 block with the gaslimit change, and see if it changes as expected
sequencer.ActBuildToL1Head(t) sequencer.ActBuildToL1Head(t)
require.Equal(t, oldGasLimit*3, seqEngine.l2Chain.CurrentBlock().GasLimit()) require.Equal(t, oldGasLimit*3, seqEngine.l2Chain.CurrentBlock().GasLimit)
require.Equal(t, uint64(2), sequencer.SyncStatus().UnsafeL2.L1Origin.Number) require.Equal(t, uint64(2), sequencer.SyncStatus().UnsafeL2.L1Origin.Number)
// now submit all this to L1, and see if a verifier can sync and reproduce it // now submit all this to L1, and see if a verifier can sync and reproduce it
......
...@@ -132,7 +132,7 @@ func runCrossLayerUserTest(gt *testing.T, test regolithScheduledTest) { ...@@ -132,7 +132,7 @@ func runCrossLayerUserTest(gt *testing.T, test regolithScheduledTest) {
seq.ActL1HeadSignal(t) seq.ActL1HeadSignal(t)
// sync sequencer build enough blocks to adopt latest L1 origin // sync sequencer build enough blocks to adopt latest L1 origin
for seq.SyncStatus().UnsafeL2.L1Origin.Number < miner.l1Chain.CurrentBlock().NumberU64() { for seq.SyncStatus().UnsafeL2.L1Origin.Number < miner.l1Chain.CurrentBlock().Number.Uint64() {
seq.ActL2StartBlock(t) seq.ActL2StartBlock(t)
seq.ActL2EndBlock(t) seq.ActL2EndBlock(t)
} }
......
...@@ -169,7 +169,7 @@ func Setup(t require.TestingT, deployParams *DeployParams, alloc *AllocParams) * ...@@ -169,7 +169,7 @@ func Setup(t require.TestingT, deployParams *DeployParams, alloc *AllocParams) *
if alloc.PrefundTestUsers { if alloc.PrefundTestUsers {
for _, addr := range deployParams.Addresses.All() { for _, addr := range deployParams.Addresses.All() {
l1Genesis.Alloc[addr] = core.GenesisAccount{ l1Genesis.Alloc[addr] = core.GenesisAccount{
Balance: Ether(1e6), Balance: Ether(1e12),
} }
} }
} }
...@@ -184,7 +184,7 @@ func Setup(t require.TestingT, deployParams *DeployParams, alloc *AllocParams) * ...@@ -184,7 +184,7 @@ func Setup(t require.TestingT, deployParams *DeployParams, alloc *AllocParams) *
if alloc.PrefundTestUsers { if alloc.PrefundTestUsers {
for _, addr := range deployParams.Addresses.All() { for _, addr := range deployParams.Addresses.All() {
l2Genesis.Alloc[addr] = core.GenesisAccount{ l2Genesis.Alloc[addr] = core.GenesisAccount{
Balance: Ether(1e6), Balance: Ether(1e12),
} }
} }
} }
......
...@@ -27,10 +27,10 @@ func TestSetup(t *testing.T) { ...@@ -27,10 +27,10 @@ func TestSetup(t *testing.T) {
alloc := &AllocParams{PrefundTestUsers: true} alloc := &AllocParams{PrefundTestUsers: true}
sd := Setup(t, dp, alloc) sd := Setup(t, dp, alloc)
require.Contains(t, sd.L1Cfg.Alloc, dp.Addresses.Alice) require.Contains(t, sd.L1Cfg.Alloc, dp.Addresses.Alice)
require.Equal(t, sd.L1Cfg.Alloc[dp.Addresses.Alice].Balance, Ether(1e6)) require.Equal(t, sd.L1Cfg.Alloc[dp.Addresses.Alice].Balance, Ether(1e12))
require.Contains(t, sd.L2Cfg.Alloc, dp.Addresses.Alice) require.Contains(t, sd.L2Cfg.Alloc, dp.Addresses.Alice)
require.Equal(t, sd.L2Cfg.Alloc[dp.Addresses.Alice].Balance, Ether(1e6)) require.Equal(t, sd.L2Cfg.Alloc[dp.Addresses.Alice].Balance, Ether(1e12))
require.Contains(t, sd.L1Cfg.Alloc, predeploys.DevOptimismPortalAddr) require.Contains(t, sd.L1Cfg.Alloc, predeploys.DevOptimismPortalAddr)
require.Contains(t, sd.L2Cfg.Alloc, predeploys.L1BlockAddr) require.Contains(t, sd.L2Cfg.Alloc, predeploys.L1BlockAddr)
......
...@@ -160,11 +160,11 @@ func (f *fakeSafeFinalizedL1) Start() error { ...@@ -160,11 +160,11 @@ func (f *fakeSafeFinalizedL1) Start() error {
case head := <-headChanges: case head := <-headChanges:
num := head.Block.NumberU64() num := head.Block.NumberU64()
if num > f.finalizedDistance { if num > f.finalizedDistance {
toFinalize := f.eth.BlockChain().GetBlockByNumber(num - f.finalizedDistance) toFinalize := f.eth.BlockChain().GetHeaderByNumber(num - f.finalizedDistance)
f.eth.BlockChain().SetFinalized(toFinalize) f.eth.BlockChain().SetFinalized(toFinalize)
} }
if num > f.safeDistance { if num > f.safeDistance {
toSafe := f.eth.BlockChain().GetBlockByNumber(num - f.safeDistance) toSafe := f.eth.BlockChain().GetHeaderByNumber(num - f.safeDistance)
f.eth.BlockChain().SetSafe(toSafe) f.eth.BlockChain().SetSafe(toSafe)
} }
case <-quit: case <-quit:
......
...@@ -15,6 +15,7 @@ import ( ...@@ -15,6 +15,7 @@ import (
batchermetrics "github.com/ethereum-optimism/optimism/op-batcher/metrics" batchermetrics "github.com/ethereum-optimism/optimism/op-batcher/metrics"
"github.com/ethereum-optimism/optimism/op-node/chaincfg" "github.com/ethereum-optimism/optimism/op-node/chaincfg"
"github.com/ethereum-optimism/optimism/op-node/sources" "github.com/ethereum-optimism/optimism/op-node/sources"
proposermetrics "github.com/ethereum-optimism/optimism/op-proposer/metrics"
l2os "github.com/ethereum-optimism/optimism/op-proposer/proposer" l2os "github.com/ethereum-optimism/optimism/op-proposer/proposer"
oplog "github.com/ethereum-optimism/optimism/op-service/log" oplog "github.com/ethereum-optimism/optimism/op-service/log"
...@@ -363,7 +364,7 @@ func TestMigration(t *testing.T) { ...@@ -363,7 +364,7 @@ func TestMigration(t *testing.T) {
Format: "text", Format: "text",
}, },
PrivateKey: hexPriv(secrets.Proposer), PrivateKey: hexPriv(secrets.Proposer),
}, lgr.New("module", "proposer")) }, lgr.New("module", "proposer"), proposermetrics.NoopMetrics)
require.NoError(t, err) require.NoError(t, err)
t.Cleanup(func() { t.Cleanup(func() {
proposer.Stop() proposer.Stop()
......
...@@ -37,6 +37,7 @@ import ( ...@@ -37,6 +37,7 @@ import (
"github.com/ethereum-optimism/optimism/op-node/rollup/driver" "github.com/ethereum-optimism/optimism/op-node/rollup/driver"
"github.com/ethereum-optimism/optimism/op-node/sources" "github.com/ethereum-optimism/optimism/op-node/sources"
"github.com/ethereum-optimism/optimism/op-node/testlog" "github.com/ethereum-optimism/optimism/op-node/testlog"
proposermetrics "github.com/ethereum-optimism/optimism/op-proposer/metrics"
l2os "github.com/ethereum-optimism/optimism/op-proposer/proposer" l2os "github.com/ethereum-optimism/optimism/op-proposer/proposer"
oplog "github.com/ethereum-optimism/optimism/op-service/log" oplog "github.com/ethereum-optimism/optimism/op-service/log"
) )
...@@ -579,7 +580,7 @@ func (cfg SystemConfig) Start(_opts ...SystemConfigOption) (*System, error) { ...@@ -579,7 +580,7 @@ func (cfg SystemConfig) Start(_opts ...SystemConfigOption) (*System, error) {
Format: "text", Format: "text",
}, },
PrivateKey: hexPriv(cfg.Secrets.Proposer), PrivateKey: hexPriv(cfg.Secrets.Proposer),
}, sys.cfg.Loggers["proposer"]) }, sys.cfg.Loggers["proposer"], proposermetrics.NoopMetrics)
if err != nil { if err != nil {
return nil, fmt.Errorf("unable to setup l2 output submitter: %w", err) return nil, fmt.Errorf("unable to setup l2 output submitter: %w", err)
} }
......
FROM golang:1.18.0-alpine3.15 as builder FROM golang:1.19.0-alpine3.15 as builder
# build from root of repo # build from root of repo
COPY ./op-exporter /app COPY ./op-exporter /app
......
FROM --platform=$BUILDPLATFORM golang:1.18.0-alpine3.15 as builder FROM --platform=$BUILDPLATFORM golang:1.19.0-alpine3.15 as builder
ARG VERSION=v0.0.0 ARG VERSION=v0.0.0
......
...@@ -14,9 +14,17 @@ import ( ...@@ -14,9 +14,17 @@ import (
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
) )
var ErrMaxFrameSizeTooSmall = errors.New("maxSize is too small to fit the fixed frame overhead")
var ErrNotDepositTx = errors.New("first transaction in block is not a deposit tx") var ErrNotDepositTx = errors.New("first transaction in block is not a deposit tx")
var ErrTooManyRLPBytes = errors.New("batch would cause RLP bytes to go over limit") var ErrTooManyRLPBytes = errors.New("batch would cause RLP bytes to go over limit")
// FrameV0OverHeadSize is the absolute minimum size of a frame.
// This is the fixed overhead frame size, calculated as specified
// in the [Frame Format] specs: 16 + 2 + 4 + 1 = 23 bytes.
//
// [Frame Format]: https://github.com/ethereum-optimism/optimism/blob/develop/specs/derivation.md#frame-format
const FrameV0OverHeadSize = 23
type ChannelOut struct { type ChannelOut struct {
id ChannelID id ChannelID
// Frame ID of the next frame to emit. Increment after emitting // Frame ID of the next frame to emit. Increment after emitting
...@@ -141,19 +149,23 @@ func (co *ChannelOut) Close() error { ...@@ -141,19 +149,23 @@ func (co *ChannelOut) Close() error {
// OutputFrame writes a frame to w with a given max size and returns the frame // OutputFrame writes a frame to w with a given max size and returns the frame
// number. // number.
// Use `ReadyBytes`, `Flush`, and `Close` to modify the ready buffer. // Use `ReadyBytes`, `Flush`, and `Close` to modify the ready buffer.
// Returns io.EOF when the channel is closed & there are no more frames // Returns an error if the `maxSize` < FrameV0OverHeadSize.
// Returns io.EOF when the channel is closed & there are no more frames.
// Returns nil if there is still more buffered data. // Returns nil if there is still more buffered data.
// Returns and error if it ran into an error during processing. // Returns an error if it ran into an error during processing.
func (co *ChannelOut) OutputFrame(w *bytes.Buffer, maxSize uint64) (uint16, error) { func (co *ChannelOut) OutputFrame(w *bytes.Buffer, maxSize uint64) (uint16, error) {
f := Frame{ f := Frame{
ID: co.id, ID: co.id,
FrameNumber: uint16(co.frame), FrameNumber: uint16(co.frame),
} }
// Check that the maxSize is large enough for the frame overhead size.
if maxSize < FrameV0OverHeadSize {
return 0, ErrMaxFrameSizeTooSmall
}
// Copy data from the local buffer into the frame data buffer // Copy data from the local buffer into the frame data buffer
// Don't go past the maxSize with the fixed frame overhead. maxDataSize := maxSize - FrameV0OverHeadSize
// Fixed overhead: 16 + 2 + 4 + 1 = 23 bytes.
maxDataSize := maxSize - 23
if maxDataSize > uint64(co.buf.Len()) { if maxDataSize > uint64(co.buf.Len()) {
maxDataSize = uint64(co.buf.Len()) maxDataSize = uint64(co.buf.Len())
// If we are closed & will not spill past the current frame // If we are closed & will not spill past the current frame
......
...@@ -29,6 +29,22 @@ func TestChannelOutAddBlock(t *testing.T) { ...@@ -29,6 +29,22 @@ func TestChannelOutAddBlock(t *testing.T) {
}) })
} }
// TestOutputFrameSmallMaxSize tests that calling [OutputFrame] with a small
// max size that is below the fixed frame size overhead of 23, will return
// an error.
func TestOutputFrameSmallMaxSize(t *testing.T) {
cout, err := NewChannelOut()
require.NoError(t, err)
// Call OutputFrame with the range of small max size values that err
var w bytes.Buffer
for i := 0; i < 23; i++ {
fid, err := cout.OutputFrame(&w, uint64(i))
require.ErrorIs(t, err, ErrMaxFrameSizeTooSmall)
require.Zero(t, fid)
}
}
// TestRLPByteLimit ensures that stream encoder is properly limiting the length. // TestRLPByteLimit ensures that stream encoder is properly limiting the length.
// It will decode the input if `len(input) <= inputLimit`. // It will decode the input if `len(input) <= inputLimit`.
func TestRLPByteLimit(t *testing.T) { func TestRLPByteLimit(t *testing.T) {
......
FROM --platform=$BUILDPLATFORM golang:1.18.0-alpine3.15 as builder FROM --platform=$BUILDPLATFORM golang:1.19.0-alpine3.15 as builder
ARG VERSION=v0.0.0 ARG VERSION=v0.0.0
......
package metrics
import (
"context"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
"github.com/prometheus/client_golang/prometheus"
opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics"
)
const Namespace = "op_proposer"
type Metricer interface {
RecordInfo(version string)
RecordUp()
// Records all L1 and L2 block events
opmetrics.RefMetricer
RecordL2BlocksProposed(l2ref eth.L2BlockRef)
}
type Metrics struct {
ns string
registry *prometheus.Registry
factory opmetrics.Factory
opmetrics.RefMetrics
Info prometheus.GaugeVec
Up prometheus.Gauge
}
var _ Metricer = (*Metrics)(nil)
func NewMetrics(procName string) *Metrics {
if procName == "" {
procName = "default"
}
ns := Namespace + "_" + procName
registry := opmetrics.NewRegistry()
factory := opmetrics.With(registry)
return &Metrics{
ns: ns,
registry: registry,
factory: factory,
RefMetrics: opmetrics.MakeRefMetrics(ns, factory),
Info: *factory.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns,
Name: "info",
Help: "Pseudo-metric tracking version and config info",
}, []string{
"version",
}),
Up: factory.NewGauge(prometheus.GaugeOpts{
Namespace: ns,
Name: "up",
Help: "1 if the op-proposer has finished starting up",
}),
}
}
func (m *Metrics) Serve(ctx context.Context, host string, port int) error {
return opmetrics.ListenAndServe(ctx, m.registry, host, port)
}
func (m *Metrics) StartBalanceMetrics(ctx context.Context,
l log.Logger, client *ethclient.Client, account common.Address) {
opmetrics.LaunchBalanceMetrics(ctx, l, m.registry, m.ns, client, account)
}
// RecordInfo sets a pseudo-metric that contains versioning and
// config info for the op-proposer.
func (m *Metrics) RecordInfo(version string) {
m.Info.WithLabelValues(version).Set(1)
}
// RecordUp sets the up metric to 1.
func (m *Metrics) RecordUp() {
prometheus.MustRegister()
m.Up.Set(1)
}
const (
BlockProposed = "proposed"
)
// RecordL2BlocksProposed should be called when new L2 block is proposed
func (m *Metrics) RecordL2BlocksProposed(l2ref eth.L2BlockRef) {
m.RecordL2Ref(BlockProposed, l2ref)
}
package metrics
import (
"github.com/ethereum-optimism/optimism/op-node/eth"
opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics"
)
type noopMetrics struct{ opmetrics.NoopRefMetrics }
var NoopMetrics Metricer = new(noopMetrics)
func (*noopMetrics) RecordInfo(version string) {}
func (*noopMetrics) RecordUp() {}
func (*noopMetrics) RecordL2BlocksProposed(l2ref eth.L2BlockRef) {}
...@@ -24,9 +24,9 @@ import ( ...@@ -24,9 +24,9 @@ import (
"github.com/ethereum-optimism/optimism/op-bindings/bindings" "github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-node/eth" "github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/sources" "github.com/ethereum-optimism/optimism/op-node/sources"
"github.com/ethereum-optimism/optimism/op-proposer/metrics"
opcrypto "github.com/ethereum-optimism/optimism/op-service/crypto" opcrypto "github.com/ethereum-optimism/optimism/op-service/crypto"
oplog "github.com/ethereum-optimism/optimism/op-service/log" oplog "github.com/ethereum-optimism/optimism/op-service/log"
opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics"
oppprof "github.com/ethereum-optimism/optimism/op-service/pprof" oppprof "github.com/ethereum-optimism/optimism/op-service/pprof"
oprpc "github.com/ethereum-optimism/optimism/op-service/rpc" oprpc "github.com/ethereum-optimism/optimism/op-service/rpc"
"github.com/ethereum-optimism/optimism/op-service/txmgr" "github.com/ethereum-optimism/optimism/op-service/txmgr"
...@@ -49,9 +49,10 @@ func Main(version string, cliCtx *cli.Context) error { ...@@ -49,9 +49,10 @@ func Main(version string, cliCtx *cli.Context) error {
} }
l := oplog.NewLogger(cfg.LogConfig) l := oplog.NewLogger(cfg.LogConfig)
m := metrics.NewMetrics("default")
l.Info("Initializing L2 Output Submitter") l.Info("Initializing L2 Output Submitter")
l2OutputSubmitter, err := NewL2OutputSubmitterFromCLIConfig(cfg, l) l2OutputSubmitter, err := NewL2OutputSubmitterFromCLIConfig(cfg, l, m)
if err != nil { if err != nil {
l.Error("Unable to create the L2 Output Submitter", "error", err) l.Error("Unable to create the L2 Output Submitter", "error", err)
return err return err
...@@ -78,17 +79,15 @@ func Main(version string, cliCtx *cli.Context) error { ...@@ -78,17 +79,15 @@ func Main(version string, cliCtx *cli.Context) error {
}() }()
} }
registry := opmetrics.NewRegistry()
metricsCfg := cfg.MetricsConfig metricsCfg := cfg.MetricsConfig
if metricsCfg.Enabled { if metricsCfg.Enabled {
l.Info("starting metrics server", "addr", metricsCfg.ListenAddr, "port", metricsCfg.ListenPort) l.Info("starting metrics server", "addr", metricsCfg.ListenAddr, "port", metricsCfg.ListenPort)
go func() { go func() {
if err := opmetrics.ListenAndServe(ctx, registry, metricsCfg.ListenAddr, metricsCfg.ListenPort); err != nil { if err := m.Serve(ctx, metricsCfg.ListenAddr, metricsCfg.ListenPort); err != nil {
l.Error("error starting metrics server", err) l.Error("error starting metrics server", err)
} }
}() }()
addr := l2OutputSubmitter.from m.StartBalanceMetrics(ctx, l, l2OutputSubmitter.l1Client, l2OutputSubmitter.from)
opmetrics.LaunchBalanceMetrics(ctx, l, registry, "", l2OutputSubmitter.l1Client, addr)
} }
rpcCfg := cfg.RPCConfig rpcCfg := cfg.RPCConfig
...@@ -98,6 +97,9 @@ func Main(version string, cliCtx *cli.Context) error { ...@@ -98,6 +97,9 @@ func Main(version string, cliCtx *cli.Context) error {
return fmt.Errorf("error starting RPC server: %w", err) return fmt.Errorf("error starting RPC server: %w", err)
} }
m.RecordInfo(version)
m.RecordUp()
interruptChannel := make(chan os.Signal, 1) interruptChannel := make(chan os.Signal, 1)
signal.Notify(interruptChannel, []os.Signal{ signal.Notify(interruptChannel, []os.Signal{
os.Interrupt, os.Interrupt,
...@@ -117,6 +119,7 @@ type L2OutputSubmitter struct { ...@@ -117,6 +119,7 @@ type L2OutputSubmitter struct {
wg sync.WaitGroup wg sync.WaitGroup
done chan struct{} done chan struct{}
log log.Logger log log.Logger
metr metrics.Metricer
ctx context.Context ctx context.Context
cancel context.CancelFunc cancel context.CancelFunc
...@@ -143,7 +146,7 @@ type L2OutputSubmitter struct { ...@@ -143,7 +146,7 @@ type L2OutputSubmitter struct {
} }
// NewL2OutputSubmitterFromCLIConfig creates a new L2 Output Submitter given the CLI Config // NewL2OutputSubmitterFromCLIConfig creates a new L2 Output Submitter given the CLI Config
func NewL2OutputSubmitterFromCLIConfig(cfg CLIConfig, l log.Logger) (*L2OutputSubmitter, error) { func NewL2OutputSubmitterFromCLIConfig(cfg CLIConfig, l log.Logger, m metrics.Metricer) (*L2OutputSubmitter, error) {
signer, fromAddress, err := opcrypto.SignerFactoryFromConfig(l, cfg.PrivateKey, cfg.Mnemonic, cfg.L2OutputHDPath, cfg.SignerConfig) signer, fromAddress, err := opcrypto.SignerFactoryFromConfig(l, cfg.PrivateKey, cfg.Mnemonic, cfg.L2OutputHDPath, cfg.SignerConfig)
if err != nil { if err != nil {
return nil, err return nil, err
...@@ -185,11 +188,11 @@ func NewL2OutputSubmitterFromCLIConfig(cfg CLIConfig, l log.Logger) (*L2OutputSu ...@@ -185,11 +188,11 @@ func NewL2OutputSubmitterFromCLIConfig(cfg CLIConfig, l log.Logger) (*L2OutputSu
SignerFnFactory: signer, SignerFnFactory: signer,
} }
return NewL2OutputSubmitter(proposerCfg, l) return NewL2OutputSubmitter(proposerCfg, l, m)
} }
// NewL2OutputSubmitter creates a new L2 Output Submitter // NewL2OutputSubmitter creates a new L2 Output Submitter
func NewL2OutputSubmitter(cfg Config, l log.Logger) (*L2OutputSubmitter, error) { func NewL2OutputSubmitter(cfg Config, l log.Logger, m metrics.Metricer) (*L2OutputSubmitter, error) {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
cCtx, cCancel := context.WithTimeout(ctx, defaultDialTimeout) cCtx, cCancel := context.WithTimeout(ctx, defaultDialTimeout)
...@@ -228,6 +231,7 @@ func NewL2OutputSubmitter(cfg Config, l log.Logger) (*L2OutputSubmitter, error) ...@@ -228,6 +231,7 @@ func NewL2OutputSubmitter(cfg Config, l log.Logger) (*L2OutputSubmitter, error)
log: l, log: l,
ctx: ctx, ctx: ctx,
cancel: cancel, cancel: cancel,
metr: m,
l1Client: cfg.L1Client, l1Client: cfg.L1Client,
rollupClient: cfg.RollupClient, rollupClient: cfg.RollupClient,
...@@ -413,9 +417,9 @@ func (l *L2OutputSubmitter) loop() { ...@@ -413,9 +417,9 @@ func (l *L2OutputSubmitter) loop() {
l.log.Error("Failed to send proposal transaction", "err", err) l.log.Error("Failed to send proposal transaction", "err", err)
cancel() cancel()
break break
} else {
cancel()
} }
l.metr.RecordL2BlocksProposed(output.BlockRef)
cancel()
case <-l.done: case <-l.done:
return return
......
FROM golang:1.18.0-alpine3.15 as builder FROM golang:1.19.0-alpine3.15 as builder
RUN apk add --no-cache make gcc musl-dev linux-headers RUN apk add --no-cache make gcc musl-dev linux-headers
......
This diff is collapsed.
...@@ -16,7 +16,7 @@ WORKDIR /opt/foundry ...@@ -16,7 +16,7 @@ WORKDIR /opt/foundry
# Only diff from upstream docker image is this clone instead # Only diff from upstream docker image is this clone instead
# of COPY. We select a specific commit to use. # of COPY. We select a specific commit to use.
RUN git clone https://github.com/foundry-rs/foundry.git . \ RUN git clone https://github.com/foundry-rs/foundry.git . \
&& git checkout 8f3fca9c608d58981daaffe11e7f8076644cb753 && git checkout da2392e58bb8a7fefeba46b40c4df1afad8ccd22
RUN source $HOME/.profile && \ RUN source $HOME/.profile && \
cargo build --release && \ cargo build --release && \
......
ignores: [
"@babel/eslint-parser",
"@typescript-eslint/parser",
"eslint-plugin-import",
"eslint-plugin-unicorn",
"eslint-plugin-jsdoc",
"eslint-plugin-prefer-arrow",
"eslint-plugin-react",
"@typescript-eslint/eslint-plugin",
"eslint-config-prettier",
"eslint-plugin-prettier"
]
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
"lint": "yarn lint:fix && yarn lint:check", "lint": "yarn lint:fix && yarn lint:check",
"pre-commit": "lint-staged", "pre-commit": "lint-staged",
"test": "ts-mocha test/*.spec.ts", "test": "ts-mocha test/*.spec.ts",
"test:coverage": "echo 'no coverage'" "test:coverage": "nyc ts-mocha test/*.spec.ts && nyc merge .nyc_output coverage.json"
}, },
"keywords": [ "keywords": [
"optimism", "optimism",
...@@ -48,8 +48,7 @@ ...@@ -48,8 +48,7 @@
"pino": "^6.11.3", "pino": "^6.11.3",
"pino-multi-stream": "^5.3.0", "pino-multi-stream": "^5.3.0",
"pino-sentry": "^0.7.0", "pino-sentry": "^0.7.0",
"prom-client": "^13.1.0", "prom-client": "^13.1.0"
"qs": "^6.10.5"
}, },
"devDependencies": { "devDependencies": {
"@ethersproject/abstract-provider": "^5.7.0", "@ethersproject/abstract-provider": "^5.7.0",
......
...@@ -3,11 +3,11 @@ import request from 'supertest' ...@@ -3,11 +3,11 @@ import request from 'supertest'
import chai = require('chai') import chai = require('chai')
const expect = chai.expect const expect = chai.expect
import { Logger, Metrics, createMetricsServer } from '../src' import { Logger, LegacyMetrics, createMetricsServer } from '../src'
describe('Metrics', () => { describe('Metrics', () => {
it('shoud serve metrics', async () => { it('shoud serve metrics', async () => {
const metrics = new Metrics({ const metrics = new LegacyMetrics({
prefix: 'test_metrics', prefix: 'test_metrics',
}) })
const registry = metrics.registry const registry = metrics.registry
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment