Commit 46c639ef authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge branch 'develop' into alt-sync-improvements

parents 163d0720 c12366c4
---
'@eth-optimism/sdk': patch
---
Have SDK automatically create Standard and ETH bridges when L1StandardBridge is provided.
---
'@eth-optimism/batch-submitter-service': patch
---
Allow deposit only batches
---
'@eth-optimism/contracts-bedrock': patch
---
Added a contsructor to the System Dictator
---
'@eth-optimism/batch-submitter-service': patch
---
fix flag name for MaxStateRootElements in batch-submitter
fix log package for proposer
......@@ -518,6 +518,10 @@ jobs:
patterns: packages
# Note: The below needs to be manually configured whenever we
# add a new package to CI.
- run:
name: Check common-ts
command: npx depcheck
working_directory: packages/common-ts
- run:
name: Check contracts
command: npx depcheck
......
......@@ -66,6 +66,7 @@ You'll need the following:
* [Yarn](https://classic.yarnpkg.com/en/docs/install)
* [Docker](https://docs.docker.com/get-docker/)
* [Docker Compose](https://docs.docker.com/compose/install/)
* [Go](https://go.dev/dl/)
* [Foundry](https://getfoundry.sh)
### Setup
......
......@@ -209,7 +209,7 @@ func NewConfig(ctx *cli.Context) (Config, error) {
MaxL1TxSize: ctx.GlobalUint64(flags.MaxL1TxSizeFlag.Name),
MaxPlaintextBatchSize: ctx.GlobalUint64(flags.MaxPlaintextBatchSizeFlag.Name),
MinStateRootElements: ctx.GlobalUint64(flags.MinStateRootElementsFlag.Name),
MaxStateRootElements: ctx.GlobalUint64(flags.MinStateRootElementsFlag.Name),
MaxStateRootElements: ctx.GlobalUint64(flags.MaxStateRootElementsFlag.Name),
MaxBatchSubmissionTime: ctx.GlobalDuration(flags.MaxBatchSubmissionTimeFlag.Name),
PollInterval: ctx.GlobalDuration(flags.PollIntervalFlag.Name),
NumConfirmations: ctx.GlobalUint64(flags.NumConfirmationsFlag.Name),
......
......@@ -13,13 +13,13 @@ import (
"github.com/ethereum-optimism/optimism/bss-core/metrics"
"github.com/ethereum-optimism/optimism/bss-core/txmgr"
l2ethclient "github.com/ethereum-optimism/optimism/l2geth/ethclient"
"github.com/ethereum-optimism/optimism/l2geth/log"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
)
// stateRootSize is the size in bytes of a state root.
......
......@@ -6,6 +6,7 @@ import (
"github.com/ethereum-optimism/optimism/batch-submitter/drivers/sequencer"
l2common "github.com/ethereum-optimism/optimism/l2geth/common"
"github.com/ethereum-optimism/optimism/l2geth/core/types"
l2types "github.com/ethereum-optimism/optimism/l2geth/core/types"
"github.com/stretchr/testify/require"
)
......@@ -47,3 +48,76 @@ func TestBatchElementFromBlock(t *testing.T) {
require.False(t, element.IsSequencerTx())
require.Nil(t, element.Tx)
}
func TestGenSequencerParams(t *testing.T) {
tx := types.NewTransaction(0, l2common.Address{}, big.NewInt(0), 0, big.NewInt(0), []byte{})
shouldStartAtElement := uint64(1)
blockOffset := uint64(1)
batches := []sequencer.BatchElement{
{Timestamp: 1, BlockNumber: 1},
{Timestamp: 1, BlockNumber: 1, Tx: sequencer.NewCachedTx(tx)},
}
params, err := sequencer.GenSequencerBatchParams(shouldStartAtElement, blockOffset, batches)
require.NoError(t, err)
require.Equal(t, uint64(0), params.ShouldStartAtElement)
require.Equal(t, uint64(len(batches)), params.TotalElementsToAppend)
require.Equal(t, len(batches), len(params.Contexts))
// There is only 1 sequencer tx
require.Equal(t, 1, len(params.Txs))
// There are 2 contexts
// The first context contains the deposit
context1 := params.Contexts[0]
require.Equal(t, uint64(0), context1.NumSequencedTxs)
require.Equal(t, uint64(1), context1.NumSubsequentQueueTxs)
require.Equal(t, uint64(1), context1.Timestamp)
require.Equal(t, uint64(1), context1.BlockNumber)
// The second context contains the sequencer tx
context2 := params.Contexts[1]
require.Equal(t, uint64(1), context2.NumSequencedTxs)
require.Equal(t, uint64(0), context2.NumSubsequentQueueTxs)
require.Equal(t, uint64(1), context2.Timestamp)
require.Equal(t, uint64(1), context2.BlockNumber)
}
func TestGenSequencerParamsOnlyDeposits(t *testing.T) {
shouldStartAtElement := uint64(1)
blockOffset := uint64(1)
batches := []sequencer.BatchElement{
{Timestamp: 1, BlockNumber: 1},
{Timestamp: 1, BlockNumber: 1},
{Timestamp: 2, BlockNumber: 2},
}
params, err := sequencer.GenSequencerBatchParams(shouldStartAtElement, blockOffset, batches)
require.NoError(t, err)
// The batches will pack deposits into the same context when their
// timestamps and blocknumbers are the same
require.Equal(t, uint64(0), params.ShouldStartAtElement)
require.Equal(t, uint64(len(batches)), params.TotalElementsToAppend)
// 2 deposits have the same timestamp + blocknumber, they go in the
// same context. 1 deposit has a different timestamp + blocknumber,
// it goes into a different context. Therefore there are 2 contexts
require.Equal(t, 2, len(params.Contexts))
// No sequencer txs
require.Equal(t, 0, len(params.Txs))
// There are 2 contexts
// The first context contains the deposit
context1 := params.Contexts[0]
require.Equal(t, uint64(0), context1.NumSequencedTxs)
require.Equal(t, uint64(2), context1.NumSubsequentQueueTxs)
require.Equal(t, uint64(1), context1.Timestamp)
require.Equal(t, uint64(1), context1.BlockNumber)
context2 := params.Contexts[1]
require.Equal(t, uint64(0), context2.NumSequencedTxs)
require.Equal(t, uint64(1), context2.NumSubsequentQueueTxs)
require.Equal(t, uint64(2), context2.Timestamp)
require.Equal(t, uint64(2), context2.BlockNumber)
}
......@@ -222,11 +222,6 @@ func (p *AppendSequencerBatchParams) Write(
return ErrMalformedBatch
}
// There must be transactions if there are contexts
if len(p.Txs) == 0 && len(p.Contexts) != 0 {
return ErrMalformedBatch
}
// copy the contexts as to not malleate the struct
// when it is a typed batch
contexts := make([]BatchContext, 0, len(p.Contexts)+1)
......@@ -361,9 +356,6 @@ func (p *AppendSequencerBatchParams) Read(r io.Reader) error {
if len(p.Contexts) == 0 && len(p.Txs) != 0 {
return ErrMalformedBatch
}
if len(p.Txs) == 0 && len(p.Contexts) != 0 {
return ErrMalformedBatch
}
return closeReader()
} else if err != nil {
return err
......
......@@ -46,7 +46,7 @@
}
],
"txs": [],
"error": true
"error": false
},
{
"name": "multiple-contexts-no-txs",
......@@ -80,7 +80,7 @@
}
],
"txs": [],
"error": true
"error": false
},
{
"name": "complex",
......
......@@ -166,6 +166,7 @@ module.exports = {
children: [
"/docs/build/tutorials/add-attr.md",
"/docs/build/tutorials/new-precomp.md",
"/docs/build/tutorials/predeploys.md"
]
} // End of tutorials
],
......
......@@ -32,6 +32,11 @@
<i class="fab fa-discord"></i> Discord community
</a>
</li>
<li>
<a href="https://wkf.ms/3XTdpLl" target="_blank" rel="noopener noreferrer">
<i class="far fa-comment-dots"></i> Get support for going live
</a>
</li>
</ul>
</div>
</div>
......
---
title: Modifying Predeployed Contracts
lang: en-US
---
::: warning 🚧 OP Stack Hacks are explicitly things that you can do with the OP Stack that are *not* currently intended for production use
OP Stack Hacks are not for the faint of heart. You will not be able to receive significant developer support for OP Stack Hacks — be prepared to get your hands dirty and to work without support.
:::
OP Stack blockchains have a number of [predeployed contracts](https://github.com/ethereum-optimism/optimism/blob/develop/packages/contracts-bedrock/src/constants.ts) that provide important functionality.
Most of those contracts are proxies that can be upgraded using the `proxyAdminOwner` which was configured when the network was initially deployed.
The predeploys are controlled from a predeploy called [`ProxyAdmin`](https://github.com/ethereum-optimism/optimism/blob/develop/packages/contracts-bedrock/contracts/universal/ProxyAdmin.sol), whose address is `0x4200000000000000000000000000000000000018`.
The function to call is [`upgrade(address,address)`](https://github.com/ethereum-optimism/optimism/blob/develop/packages/contracts-bedrock/contracts/universal/ProxyAdmin.sol#L211-L229).
The first parameter is the proxy to upgrade, and the second is the address of a new implementation.
For example, the legacy `L1BlockNumber` contract is at `0x420...013`.
To disable this function, we'll set the implementation to `0x00...00`.
We do this using the [Foundry](https://book.getfoundry.sh/) command `cast`.
1. We'll need several constants.
- Set these addresses as variables in your terminal.
```sh
L1BLOCKNUM=0x4200000000000000000000000000000000000013
PROXY_ADMIN=0x4200000000000000000000000000000000000018
ZERO_ADDR=0x0000000000000000000000000000000000000000
```
- Set `PRIVKEY` to the private key of your ADMIN account.
- Set `ETH_RPC_URL`. If you're on the computer that runs the blockchain, use this command.
```sh
export ETH_RPC_URL=http://localhost:8545
```
1. Verify `L1BlockNumber` works correctly.
See that when you call the contract you get a block number, and twelve seconds later you get the next one (block time on L1 is twelve seconds).
```sh
cast call $L1BLOCKNUM 'number()' | cast --to-dec
sleep 12 && cast call $L1BLOCKNUM 'number()' | cast --to-dec
```
1. Get the current implementation for the contract.
```sh
L1BLOCKNUM_IMPLEMENTATION=`cast call $L1BLOCKNUM "implementation()" | sed 's/000000000000000000000000//'`
echo $L1BLOCKNUM_IMPLEMENTATION
```
1. Change the implementation to the zero address
```sh
cast send --private-key $PRIVKEY $PROXY_ADMIN "upgrade(address,address)" $L1BLOCKNUM $ZERO_ADDR
```
1. See that the implementation is address zero, and that calling it fails.
```sh
cast call $L1BLOCKNUM 'implementation()'
cast call $L1BLOCKNUM 'number()'
```
1. Fix the predeploy by returning it to the previous implementation, and verify it works.
```sh
cast send --private-key $PRIVKEY $PROXY_ADMIN "upgrade(address,address)" $L1BLOCKNUM $L1BLOCKNUM_IMPLEMENTATION
cast call $L1BLOCKNUM 'number()' | cast --to-dec
```
\ No newline at end of file
module github.com/ethereum-optimism/optimism
go 1.18
go 1.19
require (
github.com/btcsuite/btcd v0.23.3
......@@ -9,7 +9,7 @@ require (
github.com/docker/docker v20.10.21+incompatible
github.com/docker/go-connections v0.4.0
github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3
github.com/ethereum/go-ethereum v1.11.2
github.com/ethereum/go-ethereum v1.11.4
github.com/fsnotify/fsnotify v1.6.0
github.com/golang/snappy v0.0.4
github.com/google/go-cmp v0.5.9
......@@ -69,11 +69,11 @@ require (
github.com/francoispqt/gojay v1.2.13 // indirect
github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 // indirect
github.com/getsentry/sentry-go v0.18.0 // indirect
github.com/go-kit/kit v0.10.0 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-stack/stack v1.8.1 // indirect
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect
github.com/godbus/dbus/v5 v5.1.0 // indirect
github.com/gofrs/flock v0.8.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v4 v4.4.2 // indirect
github.com/golang/mock v1.6.0 // indirect
......@@ -86,7 +86,6 @@ require (
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-bexpr v0.1.11 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.1 // indirect
github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e // indirect
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
github.com/huin/goupnp v1.1.0 // indirect
github.com/influxdata/influxdb v1.8.3 // indirect
......@@ -147,7 +146,6 @@ require (
github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/common v0.39.0 // indirect
github.com/prometheus/procfs v0.9.0 // indirect
github.com/prometheus/tsdb v0.10.0 // indirect
github.com/quic-go/qpack v0.4.0 // indirect
github.com/quic-go/qtls-go1-18 v0.2.0 // indirect
github.com/quic-go/qtls-go1-19 v0.2.0 // indirect
......@@ -191,6 +189,6 @@ require (
nhooyr.io/websocket v1.8.7 // indirect
)
replace github.com/ethereum/go-ethereum v1.11.2 => github.com/ethereum-optimism/op-geth v1.11.2-de8c5df46.0.20230308025559-13ee9ab9153b
replace github.com/ethereum/go-ethereum v1.11.4 => github.com/ethereum-optimism/op-geth v1.11.2-de8c5df46.0.20230321002540-11f0554a4313
//replace github.com/ethereum/go-ethereum v1.11.2 => ../go-ethereum
//replace github.com/ethereum/go-ethereum v1.11.4 => ../go-ethereum
This diff is collapsed.
FROM --platform=$BUILDPLATFORM golang:1.18.0-alpine3.15 as builder
FROM --platform=$BUILDPLATFORM golang:1.19.0-alpine3.15 as builder
ARG VERSION=v0.0.0
......
......@@ -20,6 +20,7 @@ lint:
golangci-lint run -E goimports,sqlclosecheck,bodyclose,asciicheck,misspell,errorlint -e "errors.As" -e "errors.Is"
fuzz:
go test -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzChannelConfig_CheckTimeout ./batcher
go test -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzDurationZero ./batcher
go test -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzDurationTimeoutMaxChannelDuration ./batcher
go test -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzDurationTimeoutZeroMaxChannelDuration ./batcher
......
......@@ -12,8 +12,6 @@ import (
)
var (
ErrZeroMaxFrameSize = errors.New("max frame size cannot be zero")
ErrSmallMaxFrameSize = errors.New("max frame size cannot be less than 23")
ErrInvalidChannelTimeout = errors.New("channel timeout is less than the safety margin")
ErrInputTargetReached = errors.New("target amount of input data reached")
ErrMaxFrameIndex = errors.New("max frame index reached (uint16)")
......@@ -83,15 +81,15 @@ func (cc *ChannelConfig) Check() error {
// will infinitely loop when trying to create frames in the
// [channelBuilder.OutputFrames] function.
if cc.MaxFrameSize == 0 {
return ErrZeroMaxFrameSize
return errors.New("max frame size cannot be zero")
}
// If the [MaxFrameSize] is set to < 23, the channel out
// will underflow the maxSize variable in the [derive.ChannelOut].
// If the [MaxFrameSize] is less than [FrameV0OverHeadSize], the channel
// out will underflow the maxSize variable in the [derive.ChannelOut].
// Since it is of type uint64, it will wrap around to a very large
// number, making the frame size extremely large.
if cc.MaxFrameSize < 23 {
return ErrSmallMaxFrameSize
if cc.MaxFrameSize < derive.FrameV0OverHeadSize {
return fmt.Errorf("max frame size %d is less than the minimum 23", cc.MaxFrameSize)
}
return nil
......
This diff is collapsed.
......@@ -32,8 +32,7 @@ func TestPendingChannelTimeout(t *testing.T) {
require.False(t, timeout)
// Set the pending channel
err := m.ensurePendingChannel(eth.BlockID{})
require.NoError(t, err)
require.NoError(t, m.ensurePendingChannel(eth.BlockID{}))
// There are no confirmed transactions so
// the pending channel cannot be timed out
......@@ -85,14 +84,10 @@ func TestChannelManagerReturnsErrReorg(t *testing.T) {
ParentHash: common.Hash{0xff},
}, nil, nil, nil, nil)
err := m.AddL2Block(a)
require.NoError(t, err)
err = m.AddL2Block(b)
require.NoError(t, err)
err = m.AddL2Block(c)
require.NoError(t, err)
err = m.AddL2Block(x)
require.ErrorIs(t, err, ErrReorg)
require.NoError(t, m.AddL2Block(a))
require.NoError(t, m.AddL2Block(b))
require.NoError(t, m.AddL2Block(c))
require.ErrorIs(t, m.AddL2Block(x), ErrReorg)
require.Equal(t, []*types.Block{a, b, c}, m.blocks)
}
......@@ -111,16 +106,14 @@ func TestChannelManagerReturnsErrReorgWhenDrained(t *testing.T) {
a := newMiniL2Block(0)
x := newMiniL2BlockWithNumberParent(0, big.NewInt(1), common.Hash{0xff})
err := m.AddL2Block(a)
require.NoError(t, err)
require.NoError(t, m.AddL2Block(a))
_, err = m.TxData(eth.BlockID{})
_, err := m.TxData(eth.BlockID{})
require.NoError(t, err)
_, err = m.TxData(eth.BlockID{})
require.ErrorIs(t, err, io.EOF)
err = m.AddL2Block(x)
require.ErrorIs(t, err, ErrReorg)
require.ErrorIs(t, m.AddL2Block(x), ErrReorg)
}
// TestChannelManagerNextTxData checks the nextTxData function.
......@@ -136,8 +129,7 @@ func TestChannelManagerNextTxData(t *testing.T) {
// Set the pending channel
// The nextTxData function should still return EOF
// since the pending channel has no frames
err = m.ensurePendingChannel(eth.BlockID{})
require.NoError(t, err)
require.NoError(t, m.ensurePendingChannel(eth.BlockID{}))
returnedTxData, err = m.nextTxData()
require.ErrorIs(t, err, io.EOF)
require.Equal(t, txData{}, returnedTxData)
......@@ -164,8 +156,10 @@ func TestChannelManagerNextTxData(t *testing.T) {
require.Equal(t, expectedTxData, m.pendingTransactions[expectedChannelID])
}
// TestClearChannelManager tests clearing the channel manager.
func TestClearChannelManager(t *testing.T) {
// TestChannelManager_Clear tests clearing the channel manager.
func TestChannelManager_Clear(t *testing.T) {
require := require.New(t)
// Create a channel manager
log := testlog.Logger(t, log.LvlCrit)
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
......@@ -176,15 +170,17 @@ func TestClearChannelManager(t *testing.T) {
ChannelTimeout: 10,
// Have to set the max frame size here otherwise the channel builder would not
// be able to output any frames
MaxFrameSize: 1,
MaxFrameSize: 24,
TargetFrameSize: 24,
ApproxComprRatio: 1.0,
})
// Channel Manager state should be empty by default
require.Empty(t, m.blocks)
require.Equal(t, common.Hash{}, m.tip)
require.Nil(t, m.pendingChannel)
require.Empty(t, m.pendingTransactions)
require.Empty(t, m.confirmedTransactions)
require.Empty(m.blocks)
require.Equal(common.Hash{}, m.tip)
require.Nil(m.pendingChannel)
require.Empty(m.pendingTransactions)
require.Empty(m.confirmedTransactions)
// Add a block to the channel manager
a, _ := derivetest.RandomL2Block(rng, 4)
......@@ -193,28 +189,25 @@ func TestClearChannelManager(t *testing.T) {
Hash: a.Hash(),
Number: a.NumberU64(),
}
err := m.AddL2Block(a)
require.NoError(t, err)
require.NoError(m.AddL2Block(a))
// Make sure there is a channel builder
err = m.ensurePendingChannel(l1BlockID)
require.NoError(t, err)
require.NotNil(t, m.pendingChannel)
require.Equal(t, 0, len(m.confirmedTransactions))
require.NoError(m.ensurePendingChannel(l1BlockID))
require.NotNil(m.pendingChannel)
require.Len(m.confirmedTransactions, 0)
// Process the blocks
// We should have a pending channel with 1 frame
// and no more blocks since processBlocks consumes
// the list
err = m.processBlocks()
require.NoError(t, err)
err = m.pendingChannel.OutputFrames()
require.NoError(t, err)
_, err = m.nextTxData()
require.NoError(t, err)
require.Equal(t, 0, len(m.blocks))
require.Equal(t, newL1Tip, m.tip)
require.Equal(t, 1, len(m.pendingTransactions))
require.NoError(m.processBlocks())
require.NoError(m.pendingChannel.co.Flush())
require.NoError(m.pendingChannel.OutputFrames())
_, err := m.nextTxData()
require.NoError(err)
require.Len(m.blocks, 0)
require.Equal(newL1Tip, m.tip)
require.Len(m.pendingTransactions, 1)
// Add a new block so we can test clearing
// the channel manager with a full state
......@@ -222,20 +215,19 @@ func TestClearChannelManager(t *testing.T) {
Number: big.NewInt(1),
ParentHash: a.Hash(),
}, nil, nil, nil, nil)
err = m.AddL2Block(b)
require.NoError(t, err)
require.Equal(t, 1, len(m.blocks))
require.Equal(t, b.Hash(), m.tip)
require.NoError(m.AddL2Block(b))
require.Len(m.blocks, 1)
require.Equal(b.Hash(), m.tip)
// Clear the channel manager
m.Clear()
// Check that the entire channel manager state cleared
require.Empty(t, m.blocks)
require.Equal(t, common.Hash{}, m.tip)
require.Nil(t, m.pendingChannel)
require.Empty(t, m.pendingTransactions)
require.Empty(t, m.confirmedTransactions)
require.Empty(m.blocks)
require.Equal(common.Hash{}, m.tip)
require.Nil(m.pendingChannel)
require.Empty(m.pendingTransactions)
require.Empty(m.confirmedTransactions)
}
// TestChannelManagerTxConfirmed checks the [ChannelManager.TxConfirmed] function.
......@@ -251,8 +243,7 @@ func TestChannelManagerTxConfirmed(t *testing.T) {
// Let's add a valid pending transaction to the channel manager
// So we can demonstrate that TxConfirmed's correctness
err := m.ensurePendingChannel(eth.BlockID{})
require.NoError(t, err)
require.NoError(t, m.ensurePendingChannel(eth.BlockID{}))
channelID := m.pendingChannel.ID()
frame := frameData{
data: []byte{},
......@@ -270,7 +261,7 @@ func TestChannelManagerTxConfirmed(t *testing.T) {
require.Equal(t, expectedTxData, returnedTxData)
require.Equal(t, 0, m.pendingChannel.NumFrames())
require.Equal(t, expectedTxData, m.pendingTransactions[expectedChannelID])
require.Equal(t, 1, len(m.pendingTransactions))
require.Len(t, m.pendingTransactions, 1)
// An unknown pending transaction should not be marked as confirmed
// and should not be removed from the pending transactions map
......@@ -281,14 +272,14 @@ func TestChannelManagerTxConfirmed(t *testing.T) {
blockID := eth.BlockID{Number: 0, Hash: common.Hash{0x69}}
m.TxConfirmed(unknownTxID, blockID)
require.Empty(t, m.confirmedTransactions)
require.Equal(t, 1, len(m.pendingTransactions))
require.Len(t, m.pendingTransactions, 1)
// Now let's mark the pending transaction as confirmed
// and check that it is removed from the pending transactions map
// and added to the confirmed transactions map
m.TxConfirmed(expectedChannelID, blockID)
require.Empty(t, m.pendingTransactions)
require.Equal(t, 1, len(m.confirmedTransactions))
require.Len(t, m.confirmedTransactions, 1)
require.Equal(t, blockID, m.confirmedTransactions[expectedChannelID])
}
......@@ -300,8 +291,7 @@ func TestChannelManagerTxFailed(t *testing.T) {
// Let's add a valid pending transaction to the channel
// manager so we can demonstrate correctness
err := m.ensurePendingChannel(eth.BlockID{})
require.NoError(t, err)
require.NoError(t, m.ensurePendingChannel(eth.BlockID{}))
channelID := m.pendingChannel.ID()
frame := frameData{
data: []byte{},
......@@ -319,7 +309,7 @@ func TestChannelManagerTxFailed(t *testing.T) {
require.Equal(t, expectedTxData, returnedTxData)
require.Equal(t, 0, m.pendingChannel.NumFrames())
require.Equal(t, expectedTxData, m.pendingTransactions[expectedChannelID])
require.Equal(t, 1, len(m.pendingTransactions))
require.Len(t, m.pendingTransactions, 1)
// Trying to mark an unknown pending transaction as failed
// shouldn't modify state
......@@ -348,8 +338,7 @@ func TestChannelManager_TxResend(t *testing.T) {
a, _ := derivetest.RandomL2Block(rng, 4)
err := m.AddL2Block(a)
require.NoError(err)
require.NoError(m.AddL2Block(a))
txdata0, err := m.TxData(eth.BlockID{})
require.NoError(err)
......
......@@ -185,7 +185,7 @@ func (m *Metrics) RecordLatestL1Block(l1ref eth.L1BlockRef) {
m.RecordL1Ref("latest", l1ref)
}
// RecordL2BlockLoaded should be called when a new L2 block was loaded into the
// RecordL2BlocksLoaded should be called when a new L2 block was loaded into the
// channel manager (but not processed yet).
func (m *Metrics) RecordL2BlocksLoaded(l2ref eth.L2BlockRef) {
m.RecordL2Ref(StageLoaded, l2ref)
......
This diff is collapsed.
FROM golang:1.18.0-alpine3.15 as builder
FROM golang:1.19.0-alpine3.15 as builder
RUN apk add --no-cache make gcc musl-dev linux-headers git jq bash
......
This diff is collapsed.
package ether
import (
"fmt"
"math/big"
"math/rand"
"testing"
"github.com/ethereum-optimism/optimism/op-chain-ops/util"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum-optimism/optimism/op-chain-ops/crossdomain"
......@@ -190,7 +191,7 @@ func TestMigrateBalances(t *testing.T) {
}
}
func makeLegacyETH(t *testing.T, totalSupply *big.Int, balances map[common.Address]*big.Int, allowances map[common.Address]common.Address) (*state.StateDB, DBFactory) {
func makeLegacyETH(t *testing.T, totalSupply *big.Int, balances map[common.Address]*big.Int, allowances map[common.Address]common.Address) (*state.StateDB, util.DBFactory) {
memDB := rawdb.NewMemoryDatabase()
db, err := state.New(common.Hash{}, state.NewDatabaseWithConfig(memDB, &trie.Config{
Preimages: true,
......@@ -228,11 +229,69 @@ func makeLegacyETH(t *testing.T, totalSupply *big.Int, balances map[common.Addre
}
}
// TestMigrateBalancesRandom tests that the pre-check balances function works
// TestMigrateBalancesRandomOK tests that the pre-check balances function works
// with random addresses. This test makes sure that the partition logic doesn't
// miss anything.
func TestMigrateBalancesRandom(t *testing.T) {
// miss anything, and helps detect concurrency errors.
func TestMigrateBalancesRandomOK(t *testing.T) {
for i := 0; i < 100; i++ {
addresses, stateBalances, allowances, stateAllowances, totalSupply := setupRandTest(t)
db, factory := makeLegacyETH(t, totalSupply, stateBalances, stateAllowances)
err := doMigration(db, factory, addresses, allowances, big.NewInt(0), false)
require.NoError(t, err)
for addr, expBal := range stateBalances {
actBal := db.GetBalance(addr)
require.EqualValues(t, expBal, actBal)
}
}
}
// TestMigrateBalancesRandomMissing tests that the pre-check balances function works
// with random addresses when some of them are missing. This helps make sure that the
// partition logic doesn't miss anything, and helps detect concurrency errors.
func TestMigrateBalancesRandomMissing(t *testing.T) {
for i := 0; i < 100; i++ {
addresses, stateBalances, allowances, stateAllowances, totalSupply := setupRandTest(t)
if len(addresses) == 0 {
continue
}
// Remove a random address from the list of witnesses
idx := rand.Intn(len(addresses))
addresses = append(addresses[:idx], addresses[idx+1:]...)
db, factory := makeLegacyETH(t, totalSupply, stateBalances, stateAllowances)
err := doMigration(db, factory, addresses, allowances, big.NewInt(0), false)
require.ErrorContains(t, err, "unknown storage slot")
}
for i := 0; i < 100; i++ {
addresses, stateBalances, allowances, stateAllowances, totalSupply := setupRandTest(t)
if len(allowances) == 0 {
continue
}
// Remove a random allowance from the list of witnesses
idx := rand.Intn(len(allowances))
allowances = append(allowances[:idx], allowances[idx+1:]...)
db, factory := makeLegacyETH(t, totalSupply, stateBalances, stateAllowances)
err := doMigration(db, factory, addresses, allowances, big.NewInt(0), false)
require.ErrorContains(t, err, "unknown storage slot")
}
}
func randAddr(t *testing.T) common.Address {
var addr common.Address
_, err := rand.Read(addr[:])
require.NoError(t, err)
return addr
}
func setupRandTest(t *testing.T) ([]common.Address, map[common.Address]*big.Int, []*crossdomain.Allowance, map[common.Address]common.Address, *big.Int) {
addresses := make([]common.Address, 0)
stateBalances := make(map[common.Address]*big.Int)
......@@ -258,99 +317,5 @@ func TestMigrateBalancesRandom(t *testing.T) {
stateAllowances[addr] = to
}
db, factory := makeLegacyETH(t, totalSupply, stateBalances, stateAllowances)
err := doMigration(db, factory, addresses, allowances, big.NewInt(0), false)
require.NoError(t, err)
for addr, expBal := range stateBalances {
actBal := db.GetBalance(addr)
require.EqualValues(t, expBal, actBal)
}
}
}
func TestPartitionKeyspace(t *testing.T) {
tests := []struct {
i int
count int
expected [2]common.Hash
}{
{
i: 0,
count: 1,
expected: [2]common.Hash{
common.HexToHash("0x00"),
common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
},
},
{
i: 0,
count: 2,
expected: [2]common.Hash{
common.HexToHash("0x00"),
common.HexToHash("0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
},
},
{
i: 1,
count: 2,
expected: [2]common.Hash{
common.HexToHash("0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
},
},
{
i: 0,
count: 3,
expected: [2]common.Hash{
common.HexToHash("0x00"),
common.HexToHash("0x5555555555555555555555555555555555555555555555555555555555555555"),
},
},
{
i: 1,
count: 3,
expected: [2]common.Hash{
common.HexToHash("0x5555555555555555555555555555555555555555555555555555555555555555"),
common.HexToHash("0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
},
},
{
i: 2,
count: 3,
expected: [2]common.Hash{
common.HexToHash("0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
},
},
}
for _, tt := range tests {
t.Run(fmt.Sprintf("i %d, count %d", tt.i, tt.count), func(t *testing.T) {
start, end := PartitionKeyspace(tt.i, tt.count)
require.Equal(t, tt.expected[0], start)
require.Equal(t, tt.expected[1], end)
})
}
t.Run("panics on invalid i or count", func(t *testing.T) {
require.Panics(t, func() {
PartitionKeyspace(1, 1)
})
require.Panics(t, func() {
PartitionKeyspace(-1, 1)
})
require.Panics(t, func() {
PartitionKeyspace(0, -1)
})
require.Panics(t, func() {
PartitionKeyspace(-1, -1)
})
})
}
func randAddr(t *testing.T) common.Address {
var addr common.Address
_, err := rand.Read(addr[:])
require.NoError(t, err)
return addr
return addresses, stateBalances, allowances, stateAllowances, totalSupply
}
......@@ -6,6 +6,7 @@ import (
"errors"
"fmt"
"math/big"
"math/rand"
"github.com/ethereum-optimism/optimism/op-chain-ops/util"
......@@ -26,11 +27,21 @@ import (
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
)
// MaxSlotChecks is the maximum number of storage slots to check
// when validating the untouched predeploys. This limit is in place
// to bound execution time of the migration. We can parallelize this
// in the future.
const MaxSlotChecks = 1000
const (
// MaxPredeploySlotChecks is the maximum number of storage slots to check
// when validating the untouched predeploys. This limit is in place
// to bound execution time of the migration. We can parallelize this
// in the future.
MaxPredeploySlotChecks = 1000
// MaxOVMETHSlotChecks is the maximum number of OVM ETH storage slots to check
// when validating the OVM ETH migration.
MaxOVMETHSlotChecks = 5000
// OVMETHSampleLikelihood is the probability that a storage slot will be checked
// when validating the OVM ETH migration.
OVMETHSampleLikelihood = 0.1
)
type StorageCheckMap = map[common.Hash]common.Hash
......@@ -148,7 +159,7 @@ func PostCheckMigratedDB(
}
log.Info("checked L1Block")
if err := PostCheckLegacyETH(db); err != nil {
if err := PostCheckLegacyETH(prevDB, db, migrationData); err != nil {
return err
}
log.Info("checked legacy eth")
......@@ -210,7 +221,7 @@ func PostCheckUntouchables(udb state.Database, currDB *state.StateDB, prevRoot c
if err := prevDB.ForEachStorage(addr, func(key, value common.Hash) bool {
count++
expSlots[key] = value
return count < MaxSlotChecks
return count < MaxPredeploySlotChecks
}); err != nil {
return fmt.Errorf("error iterating over storage: %w", err)
}
......@@ -365,14 +376,94 @@ func PostCheckPredeployStorage(db vm.StateDB, finalSystemOwner common.Address, p
}
// PostCheckLegacyETH checks that the legacy eth migration was successful.
// It currently only checks that the total supply was set to 0.
func PostCheckLegacyETH(db vm.StateDB) error {
// It checks that the total supply was set to 0, and randomly samples storage
// slots pre- and post-migration to ensure that balances were correctly migrated.
func PostCheckLegacyETH(prevDB, migratedDB *state.StateDB, migrationData crossdomain.MigrationData) error {
allowanceSlots := make(map[common.Hash]bool)
addresses := make(map[common.Hash]common.Address)
log.Info("recomputing witness data")
for _, allowance := range migrationData.OvmAllowances {
key := ether.CalcAllowanceStorageKey(allowance.From, allowance.To)
allowanceSlots[key] = true
}
for _, addr := range migrationData.Addresses() {
addresses[ether.CalcOVMETHStorageKey(addr)] = addr
}
log.Info("checking legacy eth fixed storage slots")
for slot, expValue := range LegacyETHCheckSlots {
actValue := db.GetState(predeploys.LegacyERC20ETHAddr, slot)
actValue := migratedDB.GetState(predeploys.LegacyERC20ETHAddr, slot)
if actValue != expValue {
return fmt.Errorf("expected slot %s on %s to be %s, but got %s", slot, predeploys.LegacyERC20ETHAddr, expValue, actValue)
}
}
var count int
threshold := 100 - int(100*OVMETHSampleLikelihood)
progress := util.ProgressLogger(100, "checking legacy eth balance slots")
var innerErr error
err := prevDB.ForEachStorage(predeploys.LegacyERC20ETHAddr, func(key, value common.Hash) bool {
val := rand.Intn(100)
// Randomly sample storage slots.
if val > threshold {
return true
}
// Ignore fixed slots.
if _, ok := LegacyETHCheckSlots[key]; ok {
return true
}
// Ignore allowances.
if allowanceSlots[key] {
return true
}
// Grab the address, and bail if we can't find it.
addr, ok := addresses[key]
if !ok {
innerErr = fmt.Errorf("unknown OVM_ETH storage slot %s", key)
return false
}
// Pull out the pre-migration OVM ETH balance, and the state balance.
ovmETHBalance := value.Big()
ovmETHStateBalance := prevDB.GetBalance(addr)
// Pre-migration state balance should be zero.
if ovmETHStateBalance.Cmp(common.Big0) != 0 {
innerErr = fmt.Errorf("expected OVM_ETH pre-migration state balance for %s to be 0, but got %s", addr, ovmETHStateBalance)
return false
}
// Migrated state balance should equal the OVM ETH balance.
migratedStateBalance := migratedDB.GetBalance(addr)
if migratedStateBalance.Cmp(ovmETHBalance) != 0 {
innerErr = fmt.Errorf("expected OVM_ETH post-migration state balance for %s to be %s, but got %s", addr, ovmETHStateBalance, migratedStateBalance)
return false
}
// Migrated OVM ETH balance should be zero, since we wipe the slots.
migratedBalance := migratedDB.GetState(predeploys.LegacyERC20ETHAddr, key)
if migratedBalance.Big().Cmp(common.Big0) != 0 {
innerErr = fmt.Errorf("expected OVM_ETH post-migration ERC20 balance for %s to be 0, but got %s", addr, migratedBalance)
return false
}
progress()
count++
// Stop iterating if we've checked enough slots.
return count < MaxOVMETHSlotChecks
})
if err != nil {
return fmt.Errorf("error iterating over OVM_ETH storage: %w", err)
}
if innerErr != nil {
return innerErr
}
return nil
}
......
package util
import (
"fmt"
"math/big"
"sync"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
)
var (
// maxSlot is the maximum possible storage slot.
maxSlot = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
)
type DBFactory func() (*state.StateDB, error)
type StateCallback func(db *state.StateDB, key, value common.Hash) error
func IterateState(dbFactory DBFactory, address common.Address, cb StateCallback, workers int) error {
if workers <= 0 {
panic("workers must be greater than 0")
}
// WaitGroup to wait for all workers to finish.
var wg sync.WaitGroup
// Channel to receive errors from each iteration job.
errCh := make(chan error, workers)
// Channel to cancel all iteration jobs.
cancelCh := make(chan struct{})
worker := func(start, end common.Hash) {
// Decrement the WaitGroup when the function returns.
defer wg.Done()
db, err := dbFactory()
if err != nil {
// Should never happen, so explode if it does.
log.Crit("cannot create state db", "err", err)
}
st, err := db.StorageTrie(address)
if err != nil {
// Should never happen, so explode if it does.
log.Crit("cannot get storage trie", "address", address, "err", err)
}
// st can be nil if the account doesn't exist.
if st == nil {
errCh <- fmt.Errorf("account does not exist: %s", address.Hex())
return
}
it := trie.NewIterator(st.NodeIterator(start.Bytes()))
// Below code is largely based on db.ForEachStorage. We can't use that
// because it doesn't allow us to specify a start and end key.
for it.Next() {
select {
case <-cancelCh:
// If one of the workers encounters an error, cancel all of them.
return
default:
break
}
// Use the raw (i.e., secure hashed) key to check if we've reached
// the end of the partition. Use > rather than >= here to account for
// the fact that the values returned by PartitionKeys are inclusive.
// Duplicate addresses that may be returned by this iteration are
// filtered out in the collector.
if new(big.Int).SetBytes(it.Key).Cmp(end.Big()) > 0 {
return
}
// Skip if the value is empty.
rawValue := it.Value
if len(rawValue) == 0 {
continue
}
// Get the preimage.
rawKey := st.GetKey(it.Key)
if rawKey == nil {
// Should never happen, so explode if it does.
log.Crit("cannot get preimage for storage key", "key", it.Key)
}
key := common.BytesToHash(rawKey)
// Parse the raw value.
_, content, _, err := rlp.Split(rawValue)
if err != nil {
// Should never happen, so explode if it does.
log.Crit("mal-formed data in state: %v", err)
}
value := common.BytesToHash(content)
// Call the callback with the DB, key, and value. Errors get
// bubbled up to the errCh.
if err := cb(db, key, value); err != nil {
errCh <- err
return
}
}
}
for i := 0; i < workers; i++ {
wg.Add(1)
// Partition the keyspace per worker.
start, end := PartitionKeyspace(i, workers)
// Kick off our worker.
go worker(start, end)
}
wg.Wait()
for len(errCh) > 0 {
err := <-errCh
if err != nil {
return err
}
}
return nil
}
// PartitionKeyspace divides the key space into partitions by dividing the maximum keyspace
// by count then multiplying by i. This will leave some slots left over, which we handle below. It
// returns the start and end keys for the partition as a common.Hash. Note that the returned range
// of keys is inclusive, i.e., [start, end] NOT [start, end).
func PartitionKeyspace(i int, count int) (common.Hash, common.Hash) {
if i < 0 || count < 0 {
panic("i and count must be greater than 0")
}
if i > count-1 {
panic("i must be less than count - 1")
}
// Divide the key space into partitions by dividing the key space by the number
// of jobs. This will leave some slots left over, which we handle below.
partSize := new(big.Int).Div(maxSlot.Big(), big.NewInt(int64(count)))
start := common.BigToHash(new(big.Int).Mul(big.NewInt(int64(i)), partSize))
var end common.Hash
if i < count-1 {
// If this is not the last partition, use the next partition's start key as the end.
end = common.BigToHash(new(big.Int).Mul(big.NewInt(int64(i+1)), partSize))
} else {
// If this is the last partition, use the max slot as the end.
end = maxSlot
}
return start, end
}
package util
import (
crand "crypto/rand"
"fmt"
"math/rand"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/trie"
"github.com/stretchr/testify/require"
)
var testAddr = common.Address{0: 0xff}
func TestStateIteratorWorkers(t *testing.T) {
_, factory, _ := setupRandTest(t)
for i := -1; i <= 0; i++ {
require.Panics(t, func() {
_ = IterateState(factory, testAddr, func(db *state.StateDB, key, value common.Hash) error {
return nil
}, i)
})
}
}
func TestStateIteratorNonexistentAccount(t *testing.T) {
_, factory, _ := setupRandTest(t)
require.ErrorContains(t, IterateState(factory, common.Address{}, func(db *state.StateDB, key, value common.Hash) error {
return nil
}, 1), "account does not exist")
}
func TestStateIteratorRandomOK(t *testing.T) {
for i := 0; i < 100; i++ {
hashes, factory, workerCount := setupRandTest(t)
seenHashes := make(map[common.Hash]bool)
hashCh := make(chan common.Hash)
doneCh := make(chan struct{})
go func() {
defer close(doneCh)
for hash := range hashCh {
seenHashes[hash] = true
}
}()
require.NoError(t, IterateState(factory, testAddr, func(db *state.StateDB, key, value common.Hash) error {
hashCh <- key
return nil
}, workerCount))
close(hashCh)
<-doneCh
// Perform a less or equal check here in case of duplicates. The map check below will assert
// that all of the hashes are accounted for.
require.LessOrEqual(t, len(seenHashes), len(hashes))
// Every hash we put into state should have been iterated over.
for _, hash := range hashes {
require.Contains(t, seenHashes, hash)
}
}
}
func TestStateIteratorRandomError(t *testing.T) {
for i := 0; i < 100; i++ {
hashes, factory, workerCount := setupRandTest(t)
failHash := hashes[rand.Intn(len(hashes))]
require.ErrorContains(t, IterateState(factory, testAddr, func(db *state.StateDB, key, value common.Hash) error {
if key == failHash {
return fmt.Errorf("test error")
}
return nil
}, workerCount), "test error")
}
}
func TestPartitionKeyspace(t *testing.T) {
tests := []struct {
i int
count int
expected [2]common.Hash
}{
{
i: 0,
count: 1,
expected: [2]common.Hash{
common.HexToHash("0x00"),
common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
},
},
{
i: 0,
count: 2,
expected: [2]common.Hash{
common.HexToHash("0x00"),
common.HexToHash("0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
},
},
{
i: 1,
count: 2,
expected: [2]common.Hash{
common.HexToHash("0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
},
},
{
i: 0,
count: 3,
expected: [2]common.Hash{
common.HexToHash("0x00"),
common.HexToHash("0x5555555555555555555555555555555555555555555555555555555555555555"),
},
},
{
i: 1,
count: 3,
expected: [2]common.Hash{
common.HexToHash("0x5555555555555555555555555555555555555555555555555555555555555555"),
common.HexToHash("0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
},
},
{
i: 2,
count: 3,
expected: [2]common.Hash{
common.HexToHash("0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
},
},
}
for _, tt := range tests {
t.Run(fmt.Sprintf("i %d, count %d", tt.i, tt.count), func(t *testing.T) {
start, end := PartitionKeyspace(tt.i, tt.count)
require.Equal(t, tt.expected[0], start)
require.Equal(t, tt.expected[1], end)
})
}
t.Run("panics on invalid i or count", func(t *testing.T) {
require.Panics(t, func() {
PartitionKeyspace(1, 1)
})
require.Panics(t, func() {
PartitionKeyspace(-1, 1)
})
require.Panics(t, func() {
PartitionKeyspace(0, -1)
})
require.Panics(t, func() {
PartitionKeyspace(-1, -1)
})
})
}
func setupRandTest(t *testing.T) ([]common.Hash, DBFactory, int) {
memDB := rawdb.NewMemoryDatabase()
db, err := state.New(common.Hash{}, state.NewDatabaseWithConfig(memDB, &trie.Config{
Preimages: true,
Cache: 1024,
}), nil)
require.NoError(t, err)
hashCount := rand.Intn(100)
if hashCount == 0 {
hashCount = 1
}
hashes := make([]common.Hash, hashCount)
db.CreateAccount(testAddr)
for j := 0; j < hashCount; j++ {
hashes[j] = randHash(t)
db.SetState(testAddr, hashes[j], hashes[j])
}
root, err := db.Commit(false)
require.NoError(t, err)
err = db.Database().TrieDB().Commit(root, true)
require.NoError(t, err)
factory := func() (*state.StateDB, error) {
return state.New(root, state.NewDatabaseWithConfig(memDB, &trie.Config{
Preimages: true,
Cache: 1024,
}), nil)
}
workerCount := rand.Intn(64)
if workerCount == 0 {
workerCount = 1
}
return hashes, factory, workerCount
}
func randHash(t *testing.T) common.Hash {
var h common.Hash
_, err := crand.Read(h[:])
require.NoError(t, err)
return h
}
......@@ -35,7 +35,7 @@ func TestBatchInLastPossibleBlocks(gt *testing.T) {
ChainID: sd.L2Cfg.Config.ChainID,
Nonce: n,
GasTipCap: big.NewInt(2 * params.GWei),
GasFeeCap: new(big.Int).Add(miner.l1Chain.CurrentBlock().BaseFee(), big.NewInt(2*params.GWei)),
GasFeeCap: new(big.Int).Add(miner.l1Chain.CurrentBlock().BaseFee, big.NewInt(2*params.GWei)),
Gas: params.TxGas,
To: &dp.Addresses.Bob,
Value: e2eutils.Ether(2),
......@@ -146,7 +146,7 @@ func TestLargeL1Gaps(gt *testing.T) {
ChainID: sd.L2Cfg.Config.ChainID,
Nonce: n,
GasTipCap: big.NewInt(2 * params.GWei),
GasFeeCap: new(big.Int).Add(miner.l1Chain.CurrentBlock().BaseFee(), big.NewInt(2*params.GWei)),
GasFeeCap: new(big.Int).Add(miner.l1Chain.CurrentBlock().BaseFee, big.NewInt(2*params.GWei)),
Gas: params.TxGas,
To: &dp.Addresses.Bob,
Value: e2eutils.Ether(2),
......
......@@ -21,7 +21,7 @@ func TestShapellaL1Fork(gt *testing.T) {
_, _, miner, sequencer, _, verifier, _, batcher := setupReorgTestActors(t, dp, sd, log)
require.False(t, sd.L1Cfg.Config.IsShanghai(miner.l1Chain.CurrentBlock().Time()), "not active yet")
require.False(t, sd.L1Cfg.Config.IsShanghai(miner.l1Chain.CurrentBlock().Time), "not active yet")
// start op-nodes
sequencer.ActL2PipelineFull(t)
......@@ -34,7 +34,7 @@ func TestShapellaL1Fork(gt *testing.T) {
// verify Shanghai is active
l1Head := miner.l1Chain.CurrentBlock()
require.True(t, sd.L1Cfg.Config.IsShanghai(l1Head.Time()))
require.True(t, sd.L1Cfg.Config.IsShanghai(l1Head.Time))
// build L2 chain up to and including L2 blocks referencing shanghai L1 blocks
sequencer.ActL1HeadSignal(t)
......
......@@ -31,7 +31,7 @@ func TestL1Miner_BuildBlock(gt *testing.T) {
ChainID: sd.L1Cfg.Config.ChainID,
Nonce: 0,
GasTipCap: big.NewInt(2 * params.GWei),
GasFeeCap: new(big.Int).Add(miner.l1Chain.CurrentBlock().BaseFee(), big.NewInt(2*params.GWei)),
GasFeeCap: new(big.Int).Add(miner.l1Chain.CurrentBlock().BaseFee, big.NewInt(2*params.GWei)),
Gas: params.TxGas,
To: &dp.Addresses.Bob,
Value: e2eutils.Ether(2),
......@@ -41,7 +41,8 @@ func TestL1Miner_BuildBlock(gt *testing.T) {
// make an empty block, even though a tx may be waiting
miner.ActL1StartBlock(10)(t)
miner.ActL1EndBlock(t)
bl := miner.l1Chain.CurrentBlock()
header := miner.l1Chain.CurrentBlock()
bl := miner.l1Chain.GetBlockByHash(header.Hash())
require.Equal(t, uint64(1), bl.NumberU64())
require.Zero(gt, bl.Transactions().Len())
......@@ -49,7 +50,8 @@ func TestL1Miner_BuildBlock(gt *testing.T) {
miner.ActL1StartBlock(10)(t)
miner.ActL1IncludeTx(dp.Addresses.Alice)(t)
miner.ActL1EndBlock(t)
bl = miner.l1Chain.CurrentBlock()
header = miner.l1Chain.CurrentBlock()
bl = miner.l1Chain.GetBlockByHash(header.Hash())
require.Equal(t, uint64(2), bl.NumberU64())
require.Equal(t, 1, bl.Transactions().Len())
require.Equal(t, tx.Hash(), bl.Transactions()[0].Hash())
......
......@@ -103,9 +103,9 @@ func (s *L1Replica) ActL1RewindDepth(depth uint64) Action {
t.InvalidAction("cannot rewind L1 past genesis (current: %d, rewind depth: %d)", head, depth)
return
}
finalized := s.l1Chain.CurrentFinalizedBlock()
if finalized != nil && head < finalized.NumberU64()+depth {
t.InvalidAction("cannot rewind head of chain past finalized block %d with rewind depth %d", finalized.NumberU64(), depth)
finalized := s.l1Chain.CurrentFinalBlock()
if finalized != nil && head < finalized.Number.Uint64()+depth {
t.InvalidAction("cannot rewind head of chain past finalized block %d with rewind depth %d", finalized.Number.Uint64(), depth)
return
}
if err := s.l1Chain.SetHead(head - depth); err != nil {
......@@ -188,7 +188,7 @@ func (s *L1Replica) UnsafeNum() uint64 {
head := s.l1Chain.CurrentBlock()
headNum := uint64(0)
if head != nil {
headNum = head.NumberU64()
headNum = head.Number.Uint64()
}
return headNum
}
......@@ -197,16 +197,16 @@ func (s *L1Replica) SafeNum() uint64 {
safe := s.l1Chain.CurrentSafeBlock()
safeNum := uint64(0)
if safe != nil {
safeNum = safe.NumberU64()
safeNum = safe.Number.Uint64()
}
return safeNum
}
func (s *L1Replica) FinalizedNum() uint64 {
finalized := s.l1Chain.CurrentFinalizedBlock()
finalized := s.l1Chain.CurrentFinalBlock()
finalizedNum := uint64(0)
if finalized != nil {
finalizedNum = finalized.NumberU64()
finalizedNum = finalized.Number.Uint64()
}
return finalizedNum
}
......@@ -219,7 +219,7 @@ func (s *L1Replica) ActL1Finalize(t Testing, num uint64) {
t.InvalidAction("need to move forward safe block before moving finalized block")
return
}
newFinalized := s.l1Chain.GetBlockByNumber(num)
newFinalized := s.l1Chain.GetHeaderByNumber(num)
if newFinalized == nil {
t.Fatalf("expected block at %d after finalized L1 block %d, safe head is ahead", num, finalizedNum)
}
......@@ -234,7 +234,7 @@ func (s *L1Replica) ActL1FinalizeNext(t Testing) {
// ActL1Safe marks the given unsafe block as safe.
func (s *L1Replica) ActL1Safe(t Testing, num uint64) {
newSafe := s.l1Chain.GetBlockByNumber(num)
newSafe := s.l1Chain.GetHeaderByNumber(num)
if newSafe == nil {
t.InvalidAction("could not find L1 block %d, cannot label it as safe", num)
return
......
......@@ -85,7 +85,7 @@ func TestL1Replica_ActL1Sync(gt *testing.T) {
})
syncFromA := replica1.ActL1Sync(canonL1(chainA))
// sync canonical chain A
for replica1.l1Chain.CurrentBlock().NumberU64()+1 < uint64(len(chainA)) {
for replica1.l1Chain.CurrentBlock().Number.Uint64()+1 < uint64(len(chainA)) {
syncFromA(t)
}
require.Equal(t, replica1.l1Chain.CurrentBlock().Hash(), chainA[len(chainA)-1].Hash(), "sync replica1 to head of chain A")
......@@ -94,7 +94,7 @@ func TestL1Replica_ActL1Sync(gt *testing.T) {
// sync new canonical chain B
syncFromB := replica1.ActL1Sync(canonL1(chainB))
for replica1.l1Chain.CurrentBlock().NumberU64()+1 < uint64(len(chainB)) {
for replica1.l1Chain.CurrentBlock().Number.Uint64()+1 < uint64(len(chainB)) {
syncFromB(t)
}
require.Equal(t, replica1.l1Chain.CurrentBlock().Hash(), chainB[len(chainB)-1].Hash(), "sync replica1 to head of chain B")
......@@ -105,7 +105,7 @@ func TestL1Replica_ActL1Sync(gt *testing.T) {
_ = replica2.Close()
})
syncFromOther := replica2.ActL1Sync(replica1.CanonL1Chain())
for replica2.l1Chain.CurrentBlock().NumberU64()+1 < uint64(len(chainB)) {
for replica2.l1Chain.CurrentBlock().Number.Uint64()+1 < uint64(len(chainB)) {
syncFromOther(t)
}
require.Equal(t, replica2.l1Chain.CurrentBlock().Hash(), chainB[len(chainB)-1].Hash(), "sync replica2 to head of chain B")
......
......@@ -48,7 +48,7 @@ func TestBatcher(gt *testing.T) {
ChainID: sd.L2Cfg.Config.ChainID,
Nonce: n,
GasTipCap: big.NewInt(2 * params.GWei),
GasFeeCap: new(big.Int).Add(miner.l1Chain.CurrentBlock().BaseFee(), big.NewInt(2*params.GWei)),
GasFeeCap: new(big.Int).Add(miner.l1Chain.CurrentBlock().BaseFee, big.NewInt(2*params.GWei)),
Gas: params.TxGas,
To: &dp.Addresses.Bob,
Value: e2eutils.Ether(2),
......@@ -73,7 +73,7 @@ func TestBatcher(gt *testing.T) {
miner.ActL1IncludeTx(dp.Addresses.Batcher)(t)
miner.ActL1EndBlock(t)
bl := miner.l1Chain.CurrentBlock()
log.Info("bl", "txs", len(bl.Transactions()))
log.Info("bl", "txs", len(miner.l1Chain.GetBlockByHash(bl.Hash()).Transactions()))
// Now make enough L1 blocks that the verifier will have to derive a L2 block
// It will also eagerly derive the block from the batcher
......@@ -437,7 +437,7 @@ func TestBigL2Txs(gt *testing.T) {
}
sequencer.ActL1HeadSignal(t)
sequencer.ActL2StartBlock(t)
baseFee := engine.l2Chain.CurrentBlock().BaseFee() // this will go quite high, since so many consecutive blocks are filled at capacity.
baseFee := engine.l2Chain.CurrentBlock().BaseFee // this will go quite high, since so many consecutive blocks are filled at capacity.
// fill the block with large L2 txs from alice
for n := aliceNonce; ; n++ {
require.NoError(t, err)
......
......@@ -202,30 +202,30 @@ func (ea *L2EngineAPI) ForkchoiceUpdatedV1(ctx context.Context, state *eth.Forkc
// chain final and completely in PoS mode.
if state.FinalizedBlockHash != (common.Hash{}) {
// If the finalized block is not in our canonical tree, somethings wrong
finalBlock := ea.l2Chain.GetBlockByHash(state.FinalizedBlockHash)
if finalBlock == nil {
finalHeader := ea.l2Chain.GetHeaderByHash(state.FinalizedBlockHash)
if finalHeader == nil {
ea.log.Warn("Final block not available in database", "hash", state.FinalizedBlockHash)
return STATUS_INVALID, engine.InvalidForkChoiceState.With(errors.New("final block not available in database"))
} else if rawdb.ReadCanonicalHash(ea.l2Database, finalBlock.NumberU64()) != state.FinalizedBlockHash {
} else if rawdb.ReadCanonicalHash(ea.l2Database, finalHeader.Number.Uint64()) != state.FinalizedBlockHash {
ea.log.Warn("Final block not in canonical chain", "number", block.NumberU64(), "hash", state.HeadBlockHash)
return STATUS_INVALID, engine.InvalidForkChoiceState.With(errors.New("final block not in canonical chain"))
}
// Set the finalized block
ea.l2Chain.SetFinalized(finalBlock)
ea.l2Chain.SetFinalized(finalHeader)
}
// Check if the safe block hash is in our canonical tree, if not somethings wrong
if state.SafeBlockHash != (common.Hash{}) {
safeBlock := ea.l2Chain.GetBlockByHash(state.SafeBlockHash)
if safeBlock == nil {
safeHeader := ea.l2Chain.GetHeaderByHash(state.SafeBlockHash)
if safeHeader == nil {
ea.log.Warn("Safe block not available in database")
return STATUS_INVALID, engine.InvalidForkChoiceState.With(errors.New("safe block not available in database"))
}
if rawdb.ReadCanonicalHash(ea.l2Database, safeBlock.NumberU64()) != state.SafeBlockHash {
if rawdb.ReadCanonicalHash(ea.l2Database, safeHeader.Number.Uint64()) != state.SafeBlockHash {
ea.log.Warn("Safe block not in canonical chain")
return STATUS_INVALID, engine.InvalidForkChoiceState.With(errors.New("safe block not in canonical chain"))
}
// Set the safe block
ea.l2Chain.SetSafe(safeBlock)
ea.l2Chain.SetSafe(safeHeader)
}
// If payload generation was requested, create a new block to be potentially
// sealed by the beacon client. The payload will be requested later, and we
......
......@@ -107,7 +107,7 @@ func TestL2EngineAPIBlockBuilding(gt *testing.T) {
ChainID: sd.L2Cfg.Config.ChainID,
Nonce: 0,
GasTipCap: big.NewInt(2 * params.GWei),
GasFeeCap: new(big.Int).Add(engine.l2Chain.CurrentBlock().BaseFee(), big.NewInt(2*params.GWei)),
GasFeeCap: new(big.Int).Add(engine.l2Chain.CurrentBlock().BaseFee, big.NewInt(2*params.GWei)),
Gas: params.TxGas,
To: &dp.Addresses.Bob,
Value: e2eutils.Ether(2),
......@@ -125,7 +125,7 @@ func TestL2EngineAPIBlockBuilding(gt *testing.T) {
SafeBlockHash: genesisBlock.Hash(),
FinalizedBlockHash: genesisBlock.Hash(),
}, &eth.PayloadAttributes{
Timestamp: eth.Uint64Quantity(parent.Time()) + 2,
Timestamp: eth.Uint64Quantity(parent.Time) + 2,
PrevRandao: eth.Bytes32{},
SuggestedFeeRecipient: common.Address{'C'},
Transactions: nil,
......@@ -161,12 +161,12 @@ func TestL2EngineAPIBlockBuilding(gt *testing.T) {
require.Equal(t, payload.BlockHash, engine.l2Chain.CurrentBlock().Hash(), "now payload is canonical")
}
buildBlock(false)
require.Zero(t, engine.l2Chain.CurrentBlock().Transactions().Len(), "no tx included")
require.Zero(t, engine.l2Chain.GetBlockByHash(engine.l2Chain.CurrentBlock().Hash()).Transactions().Len(), "no tx included")
buildBlock(true)
require.Equal(gt, 1, engine.l2Chain.CurrentBlock().Transactions().Len(), "tx from alice is included")
require.Equal(gt, 1, engine.l2Chain.GetBlockByHash(engine.l2Chain.CurrentBlock().Hash()).Transactions().Len(), "tx from alice is included")
buildBlock(false)
require.Zero(t, engine.l2Chain.CurrentBlock().Transactions().Len(), "no tx included")
require.Equal(t, uint64(3), engine.l2Chain.CurrentBlock().NumberU64(), "built 3 blocks")
require.Zero(t, engine.l2Chain.GetBlockByHash(engine.l2Chain.CurrentBlock().Hash()).Transactions().Len(), "no tx included")
require.Equal(t, uint64(3), engine.l2Chain.CurrentBlock().Number.Uint64(), "built 3 blocks")
}
func TestL2EngineAPIFail(gt *testing.T) {
......
......@@ -14,6 +14,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/ethereum-optimism/optimism/op-node/sources"
"github.com/ethereum-optimism/optimism/op-proposer/metrics"
"github.com/ethereum-optimism/optimism/op-proposer/proposer"
opcrypto "github.com/ethereum-optimism/optimism/op-service/crypto"
"github.com/ethereum-optimism/optimism/op-service/txmgr"
......@@ -60,7 +61,7 @@ func NewL2Proposer(t Testing, log log.Logger, cfg *ProposerCfg, l1 *ethclient.Cl
SignerFnFactory: signer,
}
dr, err := proposer.NewL2OutputSubmitter(proposerCfg, log)
dr, err := proposer.NewL2OutputSubmitter(proposerCfg, log, metrics.NoopMetrics)
require.NoError(t, err)
return &L2Proposer{
......
......@@ -55,7 +55,7 @@ func TestL2Sequencer_SequencerDrift(gt *testing.T) {
ChainID: sd.L2Cfg.Config.ChainID,
Nonce: n,
GasTipCap: big.NewInt(2 * params.GWei),
GasFeeCap: new(big.Int).Add(miner.l1Chain.CurrentBlock().BaseFee(), big.NewInt(2*params.GWei)),
GasFeeCap: new(big.Int).Add(miner.l1Chain.CurrentBlock().BaseFee, big.NewInt(2*params.GWei)),
Gas: params.TxGas,
To: &dp.Addresses.Bob,
Value: e2eutils.Ether(2),
......@@ -76,7 +76,7 @@ func TestL2Sequencer_SequencerDrift(gt *testing.T) {
origin := miner.l1Chain.CurrentBlock()
// L2 makes blocks to catch up
for sequencer.SyncStatus().UnsafeL2.Time+sd.RollupCfg.BlockTime < origin.Time() {
for sequencer.SyncStatus().UnsafeL2.Time+sd.RollupCfg.BlockTime < origin.Time {
makeL2BlockWithAliceTx()
require.Equal(t, uint64(0), sequencer.SyncStatus().UnsafeL2.L1Origin.Number, "no L1 origin change before time matches")
}
......@@ -89,7 +89,7 @@ func TestL2Sequencer_SequencerDrift(gt *testing.T) {
sequencer.ActL1HeadSignal(t)
// Make blocks up till the sequencer drift is about to surpass, but keep the old L1 origin
for sequencer.SyncStatus().UnsafeL2.Time+sd.RollupCfg.BlockTime <= origin.Time()+sd.RollupCfg.MaxSequencerDrift {
for sequencer.SyncStatus().UnsafeL2.Time+sd.RollupCfg.BlockTime <= origin.Time+sd.RollupCfg.MaxSequencerDrift {
sequencer.ActL2KeepL1Origin(t)
makeL2BlockWithAliceTx()
require.Equal(t, uint64(1), sequencer.SyncStatus().UnsafeL2.L1Origin.Number, "expected to keep old L1 origin")
......
......@@ -41,20 +41,20 @@ func TestL2Verifier_SequenceWindow(gt *testing.T) {
miner.ActL1SetFeeRecipient(common.Address{'A'})
// Make two sequence windows worth of empty L1 blocks. After we pass the first sequence window, the L2 chain should get blocks
for miner.l1Chain.CurrentBlock().NumberU64() < sd.RollupCfg.SeqWindowSize*2 {
for miner.l1Chain.CurrentBlock().Number.Uint64() < sd.RollupCfg.SeqWindowSize*2 {
miner.ActL1StartBlock(10)(t)
miner.ActL1EndBlock(t)
verifier.ActL2PipelineFull(t)
l1Head := miner.l1Chain.CurrentBlock().NumberU64()
l1Head := miner.l1Chain.CurrentBlock().Number.Uint64()
expectedL1Origin := uint64(0)
// as soon as we complete the sequence window, we force-adopt the L1 origin
if l1Head >= sd.RollupCfg.SeqWindowSize {
expectedL1Origin = l1Head - sd.RollupCfg.SeqWindowSize
}
require.Equal(t, expectedL1Origin, verifier.SyncStatus().SafeL2.L1Origin.Number, "L1 origin is forced in, given enough L1 blocks pass by")
require.LessOrEqual(t, miner.l1Chain.GetBlockByNumber(expectedL1Origin).Time(), engine.l2Chain.CurrentBlock().Time(), "L2 time higher than L1 origin time")
require.LessOrEqual(t, miner.l1Chain.GetBlockByNumber(expectedL1Origin).Time(), engine.l2Chain.CurrentBlock().Time, "L2 time higher than L1 origin time")
}
tip2N := verifier.SyncStatus()
......@@ -75,7 +75,7 @@ func TestL2Verifier_SequenceWindow(gt *testing.T) {
verifier.ActL2PipelineFull(t)
require.Equal(t, tip2N.SafeL2, verifier.SyncStatus().SafeL2)
for miner.l1Chain.CurrentBlock().NumberU64() < sd.RollupCfg.SeqWindowSize*2 {
for miner.l1Chain.CurrentBlock().Number.Uint64() < sd.RollupCfg.SeqWindowSize*2 {
miner.ActL1StartBlock(10)(t)
miner.ActL1EndBlock(t)
}
......
......@@ -80,7 +80,7 @@ func TestBatcherKeyRotation(gt *testing.T) {
miner.ActL1StartBlock(12)(t)
miner.ActL1IncludeTx(dp.Addresses.SysCfgOwner)(t)
miner.ActL1EndBlock(t)
cfgChangeL1BlockNum := miner.l1Chain.CurrentBlock().NumberU64()
cfgChangeL1BlockNum := miner.l1Chain.CurrentBlock().Number.Uint64()
// sequence L2 blocks, and submit with new batcher
sequencer.ActL1HeadSignal(t)
......@@ -200,7 +200,7 @@ func TestGPOParamsChange(gt *testing.T) {
miner.ActEmptyBlock(t)
sequencer.ActL1HeadSignal(t)
sequencer.ActBuildToL1Head(t)
basefee := miner.l1Chain.CurrentBlock().BaseFee()
basefee := miner.l1Chain.CurrentBlock().BaseFee
// alice makes a L2 tx, sequencer includes it
alice.ActResetTxOpts(t)
......@@ -238,7 +238,7 @@ func TestGPOParamsChange(gt *testing.T) {
miner.ActL1StartBlock(12)(t)
miner.ActL1IncludeTx(dp.Addresses.SysCfgOwner)(t)
miner.ActL1EndBlock(t)
basefeeGPOUpdate := miner.l1Chain.CurrentBlock().BaseFee()
basefeeGPOUpdate := miner.l1Chain.CurrentBlock().BaseFee
// build empty L2 chain, up to but excluding the L2 block with the L1 origin that processes the GPO change
sequencer.ActL1HeadSignal(t)
......@@ -274,7 +274,7 @@ func TestGPOParamsChange(gt *testing.T) {
// build more L2 blocks, with new L1 origin
miner.ActEmptyBlock(t)
basefee = miner.l1Chain.CurrentBlock().BaseFee()
basefee = miner.l1Chain.CurrentBlock().BaseFee
sequencer.ActL1HeadSignal(t)
sequencer.ActBuildToL1Head(t)
// and Alice makes a tx again
......@@ -313,7 +313,7 @@ func TestGasLimitChange(gt *testing.T) {
sequencer.ActL1HeadSignal(t)
sequencer.ActBuildToL1Head(t)
oldGasLimit := seqEngine.l2Chain.CurrentBlock().GasLimit()
oldGasLimit := seqEngine.l2Chain.CurrentBlock().GasLimit
require.Equal(t, oldGasLimit, uint64(dp.DeployConfig.L2GenesisBlockGasLimit))
// change gas limit on L1 to triple what it was
......@@ -335,12 +335,12 @@ func TestGasLimitChange(gt *testing.T) {
sequencer.ActL1HeadSignal(t)
sequencer.ActBuildToL1HeadExcl(t)
require.Equal(t, oldGasLimit, seqEngine.l2Chain.CurrentBlock().GasLimit())
require.Equal(t, oldGasLimit, seqEngine.l2Chain.CurrentBlock().GasLimit)
require.Equal(t, uint64(1), sequencer.SyncStatus().UnsafeL2.L1Origin.Number)
// now include the L1 block with the gaslimit change, and see if it changes as expected
sequencer.ActBuildToL1Head(t)
require.Equal(t, oldGasLimit*3, seqEngine.l2Chain.CurrentBlock().GasLimit())
require.Equal(t, oldGasLimit*3, seqEngine.l2Chain.CurrentBlock().GasLimit)
require.Equal(t, uint64(2), sequencer.SyncStatus().UnsafeL2.L1Origin.Number)
// now submit all this to L1, and see if a verifier can sync and reproduce it
......
......@@ -132,7 +132,7 @@ func runCrossLayerUserTest(gt *testing.T, test regolithScheduledTest) {
seq.ActL1HeadSignal(t)
// sync sequencer build enough blocks to adopt latest L1 origin
for seq.SyncStatus().UnsafeL2.L1Origin.Number < miner.l1Chain.CurrentBlock().NumberU64() {
for seq.SyncStatus().UnsafeL2.L1Origin.Number < miner.l1Chain.CurrentBlock().Number.Uint64() {
seq.ActL2StartBlock(t)
seq.ActL2EndBlock(t)
}
......
......@@ -169,7 +169,7 @@ func Setup(t require.TestingT, deployParams *DeployParams, alloc *AllocParams) *
if alloc.PrefundTestUsers {
for _, addr := range deployParams.Addresses.All() {
l1Genesis.Alloc[addr] = core.GenesisAccount{
Balance: Ether(1e6),
Balance: Ether(1e12),
}
}
}
......@@ -184,7 +184,7 @@ func Setup(t require.TestingT, deployParams *DeployParams, alloc *AllocParams) *
if alloc.PrefundTestUsers {
for _, addr := range deployParams.Addresses.All() {
l2Genesis.Alloc[addr] = core.GenesisAccount{
Balance: Ether(1e6),
Balance: Ether(1e12),
}
}
}
......
......@@ -27,10 +27,10 @@ func TestSetup(t *testing.T) {
alloc := &AllocParams{PrefundTestUsers: true}
sd := Setup(t, dp, alloc)
require.Contains(t, sd.L1Cfg.Alloc, dp.Addresses.Alice)
require.Equal(t, sd.L1Cfg.Alloc[dp.Addresses.Alice].Balance, Ether(1e6))
require.Equal(t, sd.L1Cfg.Alloc[dp.Addresses.Alice].Balance, Ether(1e12))
require.Contains(t, sd.L2Cfg.Alloc, dp.Addresses.Alice)
require.Equal(t, sd.L2Cfg.Alloc[dp.Addresses.Alice].Balance, Ether(1e6))
require.Equal(t, sd.L2Cfg.Alloc[dp.Addresses.Alice].Balance, Ether(1e12))
require.Contains(t, sd.L1Cfg.Alloc, predeploys.DevOptimismPortalAddr)
require.Contains(t, sd.L2Cfg.Alloc, predeploys.L1BlockAddr)
......
......@@ -160,11 +160,11 @@ func (f *fakeSafeFinalizedL1) Start() error {
case head := <-headChanges:
num := head.Block.NumberU64()
if num > f.finalizedDistance {
toFinalize := f.eth.BlockChain().GetBlockByNumber(num - f.finalizedDistance)
toFinalize := f.eth.BlockChain().GetHeaderByNumber(num - f.finalizedDistance)
f.eth.BlockChain().SetFinalized(toFinalize)
}
if num > f.safeDistance {
toSafe := f.eth.BlockChain().GetBlockByNumber(num - f.safeDistance)
toSafe := f.eth.BlockChain().GetHeaderByNumber(num - f.safeDistance)
f.eth.BlockChain().SetSafe(toSafe)
}
case <-quit:
......
......@@ -15,6 +15,7 @@ import (
batchermetrics "github.com/ethereum-optimism/optimism/op-batcher/metrics"
"github.com/ethereum-optimism/optimism/op-node/chaincfg"
"github.com/ethereum-optimism/optimism/op-node/sources"
proposermetrics "github.com/ethereum-optimism/optimism/op-proposer/metrics"
l2os "github.com/ethereum-optimism/optimism/op-proposer/proposer"
oplog "github.com/ethereum-optimism/optimism/op-service/log"
......@@ -363,7 +364,7 @@ func TestMigration(t *testing.T) {
Format: "text",
},
PrivateKey: hexPriv(secrets.Proposer),
}, lgr.New("module", "proposer"))
}, lgr.New("module", "proposer"), proposermetrics.NoopMetrics)
require.NoError(t, err)
t.Cleanup(func() {
proposer.Stop()
......
......@@ -37,6 +37,7 @@ import (
"github.com/ethereum-optimism/optimism/op-node/rollup/driver"
"github.com/ethereum-optimism/optimism/op-node/sources"
"github.com/ethereum-optimism/optimism/op-node/testlog"
proposermetrics "github.com/ethereum-optimism/optimism/op-proposer/metrics"
l2os "github.com/ethereum-optimism/optimism/op-proposer/proposer"
oplog "github.com/ethereum-optimism/optimism/op-service/log"
)
......@@ -579,7 +580,7 @@ func (cfg SystemConfig) Start(_opts ...SystemConfigOption) (*System, error) {
Format: "text",
},
PrivateKey: hexPriv(cfg.Secrets.Proposer),
}, sys.cfg.Loggers["proposer"])
}, sys.cfg.Loggers["proposer"], proposermetrics.NoopMetrics)
if err != nil {
return nil, fmt.Errorf("unable to setup l2 output submitter: %w", err)
}
......
FROM golang:1.18.0-alpine3.15 as builder
FROM golang:1.19.0-alpine3.15 as builder
# build from root of repo
COPY ./op-exporter /app
......
FROM --platform=$BUILDPLATFORM golang:1.18.0-alpine3.15 as builder
FROM --platform=$BUILDPLATFORM golang:1.19.0-alpine3.15 as builder
ARG VERSION=v0.0.0
......
......@@ -14,9 +14,17 @@ import (
"github.com/ethereum/go-ethereum/rlp"
)
var ErrMaxFrameSizeTooSmall = errors.New("maxSize is too small to fit the fixed frame overhead")
var ErrNotDepositTx = errors.New("first transaction in block is not a deposit tx")
var ErrTooManyRLPBytes = errors.New("batch would cause RLP bytes to go over limit")
// FrameV0OverHeadSize is the absolute minimum size of a frame.
// This is the fixed overhead frame size, calculated as specified
// in the [Frame Format] specs: 16 + 2 + 4 + 1 = 23 bytes.
//
// [Frame Format]: https://github.com/ethereum-optimism/optimism/blob/develop/specs/derivation.md#frame-format
const FrameV0OverHeadSize = 23
type ChannelOut struct {
id ChannelID
// Frame ID of the next frame to emit. Increment after emitting
......@@ -141,19 +149,23 @@ func (co *ChannelOut) Close() error {
// OutputFrame writes a frame to w with a given max size and returns the frame
// number.
// Use `ReadyBytes`, `Flush`, and `Close` to modify the ready buffer.
// Returns io.EOF when the channel is closed & there are no more frames
// Returns an error if the `maxSize` < FrameV0OverHeadSize.
// Returns io.EOF when the channel is closed & there are no more frames.
// Returns nil if there is still more buffered data.
// Returns and error if it ran into an error during processing.
// Returns an error if it ran into an error during processing.
func (co *ChannelOut) OutputFrame(w *bytes.Buffer, maxSize uint64) (uint16, error) {
f := Frame{
ID: co.id,
FrameNumber: uint16(co.frame),
}
// Check that the maxSize is large enough for the frame overhead size.
if maxSize < FrameV0OverHeadSize {
return 0, ErrMaxFrameSizeTooSmall
}
// Copy data from the local buffer into the frame data buffer
// Don't go past the maxSize with the fixed frame overhead.
// Fixed overhead: 16 + 2 + 4 + 1 = 23 bytes.
maxDataSize := maxSize - 23
maxDataSize := maxSize - FrameV0OverHeadSize
if maxDataSize > uint64(co.buf.Len()) {
maxDataSize = uint64(co.buf.Len())
// If we are closed & will not spill past the current frame
......
......@@ -29,6 +29,22 @@ func TestChannelOutAddBlock(t *testing.T) {
})
}
// TestOutputFrameSmallMaxSize tests that calling [OutputFrame] with a small
// max size that is below the fixed frame size overhead of 23, will return
// an error.
func TestOutputFrameSmallMaxSize(t *testing.T) {
cout, err := NewChannelOut()
require.NoError(t, err)
// Call OutputFrame with the range of small max size values that err
var w bytes.Buffer
for i := 0; i < 23; i++ {
fid, err := cout.OutputFrame(&w, uint64(i))
require.ErrorIs(t, err, ErrMaxFrameSizeTooSmall)
require.Zero(t, fid)
}
}
// TestRLPByteLimit ensures that stream encoder is properly limiting the length.
// It will decode the input if `len(input) <= inputLimit`.
func TestRLPByteLimit(t *testing.T) {
......
FROM --platform=$BUILDPLATFORM golang:1.18.0-alpine3.15 as builder
FROM --platform=$BUILDPLATFORM golang:1.19.0-alpine3.15 as builder
ARG VERSION=v0.0.0
......
package metrics
import (
"context"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
"github.com/prometheus/client_golang/prometheus"
opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics"
)
const Namespace = "op_proposer"
type Metricer interface {
RecordInfo(version string)
RecordUp()
// Records all L1 and L2 block events
opmetrics.RefMetricer
RecordL2BlocksProposed(l2ref eth.L2BlockRef)
}
type Metrics struct {
ns string
registry *prometheus.Registry
factory opmetrics.Factory
opmetrics.RefMetrics
Info prometheus.GaugeVec
Up prometheus.Gauge
}
var _ Metricer = (*Metrics)(nil)
func NewMetrics(procName string) *Metrics {
if procName == "" {
procName = "default"
}
ns := Namespace + "_" + procName
registry := opmetrics.NewRegistry()
factory := opmetrics.With(registry)
return &Metrics{
ns: ns,
registry: registry,
factory: factory,
RefMetrics: opmetrics.MakeRefMetrics(ns, factory),
Info: *factory.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns,
Name: "info",
Help: "Pseudo-metric tracking version and config info",
}, []string{
"version",
}),
Up: factory.NewGauge(prometheus.GaugeOpts{
Namespace: ns,
Name: "up",
Help: "1 if the op-proposer has finished starting up",
}),
}
}
func (m *Metrics) Serve(ctx context.Context, host string, port int) error {
return opmetrics.ListenAndServe(ctx, m.registry, host, port)
}
func (m *Metrics) StartBalanceMetrics(ctx context.Context,
l log.Logger, client *ethclient.Client, account common.Address) {
opmetrics.LaunchBalanceMetrics(ctx, l, m.registry, m.ns, client, account)
}
// RecordInfo sets a pseudo-metric that contains versioning and
// config info for the op-proposer.
func (m *Metrics) RecordInfo(version string) {
m.Info.WithLabelValues(version).Set(1)
}
// RecordUp sets the up metric to 1.
func (m *Metrics) RecordUp() {
prometheus.MustRegister()
m.Up.Set(1)
}
const (
BlockProposed = "proposed"
)
// RecordL2BlocksProposed should be called when new L2 block is proposed
func (m *Metrics) RecordL2BlocksProposed(l2ref eth.L2BlockRef) {
m.RecordL2Ref(BlockProposed, l2ref)
}
package metrics
import (
"github.com/ethereum-optimism/optimism/op-node/eth"
opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics"
)
type noopMetrics struct{ opmetrics.NoopRefMetrics }
var NoopMetrics Metricer = new(noopMetrics)
func (*noopMetrics) RecordInfo(version string) {}
func (*noopMetrics) RecordUp() {}
func (*noopMetrics) RecordL2BlocksProposed(l2ref eth.L2BlockRef) {}
......@@ -24,9 +24,9 @@ import (
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/sources"
"github.com/ethereum-optimism/optimism/op-proposer/metrics"
opcrypto "github.com/ethereum-optimism/optimism/op-service/crypto"
oplog "github.com/ethereum-optimism/optimism/op-service/log"
opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics"
oppprof "github.com/ethereum-optimism/optimism/op-service/pprof"
oprpc "github.com/ethereum-optimism/optimism/op-service/rpc"
"github.com/ethereum-optimism/optimism/op-service/txmgr"
......@@ -49,9 +49,10 @@ func Main(version string, cliCtx *cli.Context) error {
}
l := oplog.NewLogger(cfg.LogConfig)
m := metrics.NewMetrics("default")
l.Info("Initializing L2 Output Submitter")
l2OutputSubmitter, err := NewL2OutputSubmitterFromCLIConfig(cfg, l)
l2OutputSubmitter, err := NewL2OutputSubmitterFromCLIConfig(cfg, l, m)
if err != nil {
l.Error("Unable to create the L2 Output Submitter", "error", err)
return err
......@@ -78,17 +79,15 @@ func Main(version string, cliCtx *cli.Context) error {
}()
}
registry := opmetrics.NewRegistry()
metricsCfg := cfg.MetricsConfig
if metricsCfg.Enabled {
l.Info("starting metrics server", "addr", metricsCfg.ListenAddr, "port", metricsCfg.ListenPort)
go func() {
if err := opmetrics.ListenAndServe(ctx, registry, metricsCfg.ListenAddr, metricsCfg.ListenPort); err != nil {
if err := m.Serve(ctx, metricsCfg.ListenAddr, metricsCfg.ListenPort); err != nil {
l.Error("error starting metrics server", err)
}
}()
addr := l2OutputSubmitter.from
opmetrics.LaunchBalanceMetrics(ctx, l, registry, "", l2OutputSubmitter.l1Client, addr)
m.StartBalanceMetrics(ctx, l, l2OutputSubmitter.l1Client, l2OutputSubmitter.from)
}
rpcCfg := cfg.RPCConfig
......@@ -98,6 +97,9 @@ func Main(version string, cliCtx *cli.Context) error {
return fmt.Errorf("error starting RPC server: %w", err)
}
m.RecordInfo(version)
m.RecordUp()
interruptChannel := make(chan os.Signal, 1)
signal.Notify(interruptChannel, []os.Signal{
os.Interrupt,
......@@ -117,6 +119,7 @@ type L2OutputSubmitter struct {
wg sync.WaitGroup
done chan struct{}
log log.Logger
metr metrics.Metricer
ctx context.Context
cancel context.CancelFunc
......@@ -143,7 +146,7 @@ type L2OutputSubmitter struct {
}
// NewL2OutputSubmitterFromCLIConfig creates a new L2 Output Submitter given the CLI Config
func NewL2OutputSubmitterFromCLIConfig(cfg CLIConfig, l log.Logger) (*L2OutputSubmitter, error) {
func NewL2OutputSubmitterFromCLIConfig(cfg CLIConfig, l log.Logger, m metrics.Metricer) (*L2OutputSubmitter, error) {
signer, fromAddress, err := opcrypto.SignerFactoryFromConfig(l, cfg.PrivateKey, cfg.Mnemonic, cfg.L2OutputHDPath, cfg.SignerConfig)
if err != nil {
return nil, err
......@@ -185,11 +188,11 @@ func NewL2OutputSubmitterFromCLIConfig(cfg CLIConfig, l log.Logger) (*L2OutputSu
SignerFnFactory: signer,
}
return NewL2OutputSubmitter(proposerCfg, l)
return NewL2OutputSubmitter(proposerCfg, l, m)
}
// NewL2OutputSubmitter creates a new L2 Output Submitter
func NewL2OutputSubmitter(cfg Config, l log.Logger) (*L2OutputSubmitter, error) {
func NewL2OutputSubmitter(cfg Config, l log.Logger, m metrics.Metricer) (*L2OutputSubmitter, error) {
ctx, cancel := context.WithCancel(context.Background())
cCtx, cCancel := context.WithTimeout(ctx, defaultDialTimeout)
......@@ -228,6 +231,7 @@ func NewL2OutputSubmitter(cfg Config, l log.Logger) (*L2OutputSubmitter, error)
log: l,
ctx: ctx,
cancel: cancel,
metr: m,
l1Client: cfg.L1Client,
rollupClient: cfg.RollupClient,
......@@ -413,9 +417,9 @@ func (l *L2OutputSubmitter) loop() {
l.log.Error("Failed to send proposal transaction", "err", err)
cancel()
break
} else {
cancel()
}
l.metr.RecordL2BlocksProposed(output.BlockRef)
cancel()
case <-l.done:
return
......
FROM golang:1.18.0-alpine3.15 as builder
FROM golang:1.19.0-alpine3.15 as builder
RUN apk add --no-cache make gcc musl-dev linux-headers
......
......@@ -10,6 +10,7 @@ import (
"path/filepath"
"strings"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus/beacon"
"github.com/ethereum/go-ethereum/consensus/ethash"
......@@ -80,11 +81,11 @@ type HeadFn func(headState *state.StateDB) error
// and updates the blockchain headers indexes to reflect the new state-root, so geth will believe the cheat
// (unless it ever re-applies the block).
func (ch *Cheater) RunAndClose(fn HeadFn) error {
preBlock := ch.Blockchain.CurrentBlock()
if a, b := preBlock.NumberU64(), ch.Blockchain.Genesis().NumberU64(); a <= b {
preHeader := ch.Blockchain.CurrentBlock()
if a, b := preHeader.Number.Uint64(), ch.Blockchain.Genesis().NumberU64(); a <= b {
return fmt.Errorf("cheating at genesis (head block %d <= genesis block %d) is not supported", a, b)
}
state, err := ch.Blockchain.StateAt(preBlock.Root())
state, err := ch.Blockchain.StateAt(preHeader.Root)
if err != nil {
_ = ch.Close()
return fmt.Errorf("failed to look up head state: %w", err)
......@@ -103,7 +104,7 @@ func (ch *Cheater) RunAndClose(fn HeadFn) error {
_ = ch.Close()
return fmt.Errorf("failed to commit state change: %w", err)
}
header := preBlock.Header()
header := preHeader // copy the header
header.Root = stateRoot
blockHash := header.Hash()
......@@ -115,14 +116,15 @@ func (ch *Cheater) RunAndClose(fn HeadFn) error {
// based on core.BlockChain.writeHeadBlock:
// Add the block to the canonical chain number scheme and mark as the head
batch := ch.DB.NewBatch()
if ch.Blockchain.CurrentFinalizedBlock().Hash() == preBlock.Hash() {
preID := eth.BlockID{Hash: preHeader.Hash(), Number: preHeader.Number.Uint64()}
if ch.Blockchain.CurrentFinalBlock().Hash() == preID.Hash {
rawdb.WriteFinalizedBlockHash(batch, blockHash)
}
rawdb.DeleteHeaderNumber(batch, preBlock.Hash())
rawdb.DeleteHeaderNumber(batch, preHeader.Hash())
rawdb.WriteHeadHeaderHash(batch, blockHash)
rawdb.WriteHeadFastBlockHash(batch, blockHash)
rawdb.WriteCanonicalHash(batch, blockHash, preBlock.NumberU64())
rawdb.WriteHeaderNumber(batch, blockHash, preBlock.NumberU64())
rawdb.WriteCanonicalHash(batch, blockHash, preID.Number)
rawdb.WriteHeaderNumber(batch, blockHash, preID.Number)
rawdb.WriteHeader(batch, header)
// not keyed by blockhash, and we didn't remove any txs, so we just leave this one as-is.
// rawdb.WriteTxLookupEntriesByBlock(batch, block)
......@@ -131,17 +133,17 @@ func (ch *Cheater) RunAndClose(fn HeadFn) error {
// Geth stores the TD for each block separately from the block itself. We must update this
// manually, otherwise Geth thinks we haven't reached TTD yet and tries to build a block
// using Clique consensus, which causes a panic.
rawdb.WriteTd(batch, blockHash, preBlock.NumberU64(), ch.Blockchain.GetTd(preBlock.Hash(), preBlock.NumberU64()))
rawdb.WriteTd(batch, blockHash, preID.Number, ch.Blockchain.GetTd(preID.Hash, preID.Number))
// Need to copy over receipts since they are keyed by block hash.
receipts := rawdb.ReadReceipts(ch.DB, preBlock.Hash(), preBlock.NumberU64(), ch.Blockchain.Config())
rawdb.WriteReceipts(batch, blockHash, preBlock.NumberU64(), receipts)
receipts := rawdb.ReadReceipts(ch.DB, preID.Hash, preID.Number, ch.Blockchain.Config())
rawdb.WriteReceipts(batch, blockHash, preID.Number, receipts)
// Geth maintains an internal mapping between block bodies and their hashes. None of the database
// accessors above update this mapping, so we need to do it manually.
oldKey := blockBodyKey(preBlock.NumberU64(), preBlock.Hash())
oldBody := rawdb.ReadBodyRLP(ch.DB, preBlock.Hash(), preBlock.NumberU64())
newKey := blockBodyKey(preBlock.NumberU64(), blockHash)
oldKey := blockBodyKey(preID.Number, preID.Hash)
oldBody := rawdb.ReadBodyRLP(ch.DB, preID.Hash, preID.Number)
newKey := blockBodyKey(preID.Number, blockHash)
if err := batch.Delete(oldKey); err != nil {
return fmt.Errorf("error deleting old block body key")
}
......
......@@ -16,7 +16,7 @@ WORKDIR /opt/foundry
# Only diff from upstream docker image is this clone instead
# of COPY. We select a specific commit to use.
RUN git clone https://github.com/foundry-rs/foundry.git . \
&& git checkout 8f3fca9c608d58981daaffe11e7f8076644cb753
&& git checkout da2392e58bb8a7fefeba46b40c4df1afad8ccd22
RUN source $HOME/.profile && \
cargo build --release && \
......
ignores: [
"@babel/eslint-parser",
"@typescript-eslint/parser",
"eslint-plugin-import",
"eslint-plugin-unicorn",
"eslint-plugin-jsdoc",
"eslint-plugin-prefer-arrow",
"eslint-plugin-react",
"@typescript-eslint/eslint-plugin",
"eslint-config-prettier",
"eslint-plugin-prettier"
]
......@@ -17,7 +17,7 @@
"lint": "yarn lint:fix && yarn lint:check",
"pre-commit": "lint-staged",
"test": "ts-mocha test/*.spec.ts",
"test:coverage": "echo 'no coverage'"
"test:coverage": "nyc ts-mocha test/*.spec.ts && nyc merge .nyc_output coverage.json"
},
"keywords": [
"optimism",
......@@ -48,8 +48,7 @@
"pino": "^6.11.3",
"pino-multi-stream": "^5.3.0",
"pino-sentry": "^0.7.0",
"prom-client": "^13.1.0",
"qs": "^6.10.5"
"prom-client": "^13.1.0"
},
"devDependencies": {
"@ethersproject/abstract-provider": "^5.7.0",
......
......@@ -3,11 +3,11 @@ import request from 'supertest'
import chai = require('chai')
const expect = chai.expect
import { Logger, Metrics, createMetricsServer } from '../src'
import { Logger, LegacyMetrics, createMetricsServer } from '../src'
describe('Metrics', () => {
it('shoud serve metrics', async () => {
const metrics = new Metrics({
const metrics = new LegacyMetrics({
prefix: 'test_metrics',
})
const registry = metrics.registry
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment