Commit 4551be44 authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge branch 'develop' into madhur/op-proposer/add-l2-proposed-metrics

parents 595d89c3 6cc0ca23
---
'@eth-optimism/sdk': patch
---
Have SDK automatically create Standard and ETH bridges when L1StandardBridge is provided.
---
'@eth-optimism/contracts-bedrock': patch
---
Added a contsructor to the System Dictator
---
'@eth-optimism/batch-submitter-service': patch
---
fix flag name for MaxStateRootElements in batch-submitter
fix log package for proposer
......@@ -518,6 +518,10 @@ jobs:
patterns: packages
# Note: The below needs to be manually configured whenever we
# add a new package to CI.
- run:
name: Check common-ts
command: npx depcheck
working_directory: packages/common-ts
- run:
name: Check contracts
command: npx depcheck
......
......@@ -66,6 +66,7 @@ You'll need the following:
* [Yarn](https://classic.yarnpkg.com/en/docs/install)
* [Docker](https://docs.docker.com/get-docker/)
* [Docker Compose](https://docs.docker.com/compose/install/)
* [Go](https://go.dev/dl/)
* [Foundry](https://getfoundry.sh)
### Setup
......
......@@ -209,7 +209,7 @@ func NewConfig(ctx *cli.Context) (Config, error) {
MaxL1TxSize: ctx.GlobalUint64(flags.MaxL1TxSizeFlag.Name),
MaxPlaintextBatchSize: ctx.GlobalUint64(flags.MaxPlaintextBatchSizeFlag.Name),
MinStateRootElements: ctx.GlobalUint64(flags.MinStateRootElementsFlag.Name),
MaxStateRootElements: ctx.GlobalUint64(flags.MinStateRootElementsFlag.Name),
MaxStateRootElements: ctx.GlobalUint64(flags.MaxStateRootElementsFlag.Name),
MaxBatchSubmissionTime: ctx.GlobalDuration(flags.MaxBatchSubmissionTimeFlag.Name),
PollInterval: ctx.GlobalDuration(flags.PollIntervalFlag.Name),
NumConfirmations: ctx.GlobalUint64(flags.NumConfirmationsFlag.Name),
......
......@@ -13,13 +13,13 @@ import (
"github.com/ethereum-optimism/optimism/bss-core/metrics"
"github.com/ethereum-optimism/optimism/bss-core/txmgr"
l2ethclient "github.com/ethereum-optimism/optimism/l2geth/ethclient"
"github.com/ethereum-optimism/optimism/l2geth/log"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
)
// stateRootSize is the size in bytes of a state root.
......
......@@ -150,6 +150,7 @@ module.exports = {
'/docs/build/getting-started.md',
'/docs/build/conf.md',
'/docs/build/explorer.md',
'/docs/build/sdk.md',
{
title: "OP Stack Hacks",
collapsable: true,
......@@ -165,6 +166,7 @@ module.exports = {
children: [
"/docs/build/tutorials/add-attr.md",
"/docs/build/tutorials/new-precomp.md",
"/docs/build/tutorials/predeploys.md"
]
} // End of tutorials
],
......
import event from '@vuepress/plugin-pwa/lib/event'
export default ({ router }) => {
registerAutoReload();
router.addRoutes([
{ path: '/docs/', redirect: '/' },
])
}
// When new content is detected by the app, this will automatically
// refresh the page, so that users do not need to manually click
// the refresh button. For more details see:
// https://linear.app/optimism/issue/FE-1003/investigate-archive-issue-on-docs
const registerAutoReload = () => {
event.$on('sw-updated', e => e.skipWaiting().then(() => {
location.reload(true);
}))
}
......@@ -32,6 +32,11 @@
<i class="fab fa-discord"></i> Discord community
</a>
</li>
<li>
<a href="https://wkf.ms/3XTdpLl" target="_blank" rel="noopener noreferrer">
<i class="far fa-comment-dots"></i> Get support for going live
</a>
</li>
</ul>
</div>
</div>
......
......@@ -39,12 +39,11 @@ This tutorial was checked on:
| Software | Version | Installation command(s) |
| -------- | ---------- | - |
| Ubuntu | 20.04 LTS | |
| git | OS default | |
| make | 4.2.1-1.2 | `sudo apt install -y make`
| git, curl, and make | OS default | `sudo apt install -y git curl make` |
| Go | 1.20 | `sudo apt update` <br> `wget https://go.dev/dl/go1.20.linux-amd64.tar.gz` <br> `tar xvzf go1.20.linux-amd64.tar.gz` <br> `sudo cp go/bin/go /usr/bin/go` <br> `sudo mv go /usr/lib` <br> `echo export GOROOT=/usr/lib/go >> ~/.bashrc`
| Node | 16.19.0 | `curl -fsSL https://deb.nodesource.com/setup_16.x | sudo -E bash -` <br> `sudo apt-get install -y nodejs`
| Node | 16.19.0 | `curl -fsSL https://deb.nodesource.com/setup_16.x | sudo -E bash -` <br> `sudo apt-get install -y nodejs npm`
| yarn | 1.22.19 | `sudo npm install -g yarn`
| Foundry | 0.2.0 | `curl -L https://foundry.paradigm.xyz | bash` <br> `sudo bash` <br> `foundryup`
| Foundry | 0.2.0 | `curl -L https://foundry.paradigm.xyz | bash` <br> `. ~/.bashrc` <br> `foundryup`
## Build the Source Code
......@@ -74,7 +73,8 @@ We’re going to be spinning up an EVM Rollup from the OP Stack source code. Yo
1. Build the various packages inside of the Optimism Monorepo.
```bash
make build
make op-node op-batcher
yarn build
```
### Build op-geth
......@@ -440,20 +440,27 @@ Once you’ve connected your wallet, you’ll probably notice that you don’t h
cd ~/optimism/packages/contracts-bedrock
```
1. Grab the address of the `OptimismPortalProxy` contract:
1. Grab the address of the proxy to the L1 standard bridge contract:
```bash
cat deployments/getting-started/OptimismPortalProxy.json | grep \"address\":
cat deployments/getting-started/Proxy__OVM_L1StandardBridge.json.json | grep \"address\":
```
You should see a result like the following (**your address will be different**):
```
"address": "0x264B5fde6B37fb6f1C92AaC17BA144cf9e3DcFE9",
"address": "0x264B5fde6B37fb6f1C92AaC17BA144cf9e3DcFE9",
"address": "0x874f2E16D803c044F10314A978322da3c9b075c7",
"internalType": "address",
"type": "address"
"internalType": "address",
"type": "address"
"internalType": "address",
"type": "address"
"internalType": "address",
"type": "address"
```
1. Grab the `OptimismPortalProxy` address and, using the wallet that you want to have ETH on your Rollup, send that address a small amount of ETH on Goerli (0.1 or less is fine). It may take up to 5 minutes for that ETH to appear in your wallet on L2.
1. Grab the L1 bridge proxy contract address and, using the wallet that you want to have ETH on your Rollup, send that address a small amount of ETH on Goerli (0.1 or less is fine). It may take up to 5 minutes for that ETH to appear in your wallet on L2.
## Use your Rollup
......
---
title: Using the SDK with OP Stack
lang: en-US
---
When building applications for use with your OP Stack, you can continue to use [the Optimism JavaScript SDK](https://sdk.optimism.io/).
The main difference is you need to provide some contract addresses to the `CrossDomainMessenger` because they aren't preconfigured.
## Contract addresses
### L1 contract addresses
The contract addresses are in `.../optimism/packages/contracts-bedrock/deployments/getting-started`, which you created when you deployed the L1 contracts.
| Contract name when creating `CrossDomainMessenger` | File with address |
| - | - |
| `AddressManager` | `Lib_AddressManager.json`
| `L1CrossDomainMessenger` | `Proxy__OVM_L1CrossDomainMessenger.json`
| `L1StandardBridge` | `Proxy__OVM_L1StandardBridge.json`
| `OptimismPortal` | `OptimismPortalProxy.json`
| `L2OutputOracle` | `L2OutputOracleProxy.json`
### Unneeded contract addresses
Some contracts are required by the SDK, but not actually used.
For these contracts you can just specify the zero address:
- `StateCommitmentChain`
- `CanonicalTransactionChain`
- `BondManager`
In JavaScript you can create the zero address using the expression `"0x".padEnd(42, "0")`.
## The CrossChainMessenger object
These directions assume you are inside the [Hardhat console](https://hardhat.org/hardhat-runner/docs/guides/hardhat-console).
They further assume that your project already includes the Optimism SDK [`@eth-optimism/sdk`](https://www.npmjs.com/package/@eth-optimism/sdk).
1. Import the SDK
```js
optimismSDK = require("@eth-optimism/sdk")
```
1. Set the configuration parameters.
| Variable name | Value |
| - | - |
| `l1Url` | URL to an RPC provider for L1, for example `https://eth-goerli.g.alchemy.com/v2/<api key>`
| `l2Url` | URL to your OP Stack. If running on the same computer, it is `http://localhost:8545`
| `privKey` | The private key for an account that has some ETH on the L1
1. Create the [providers](https://docs.ethers.org/v5/api/providers/) and [signers](https://docs.ethers.org/v5/api/signer/).
```js
l1Provider = new ethers.providers.JsonRpcProvider(l1Url)
l2Provider = new ethers.providers.JsonRpcProvider(l2Url)
l1Signer = new ethers.Wallet(privKey).connect(l1Provider)
l2Signer = new ethers.Wallet(privKey).connect(l2Provider)
```
1. Create the L1 contracts structure.
```js
zeroAddr = "0x".padEnd(42, "0")
l1Contracts = {
StateCommitmentChain: zeroAddr,
CanonicalTransactionChain: zeroAddr,
BondManager: zeroAddr,
// These contracts have the addresses you found out earlier.
AddressManager: "0x....", // Lib_AddressManager.json
L1CrossDomainMessenger: "0x....", // Proxy__OVM_L1CrossDomainMessenger.json
L1StandardBridge: "0x....", // Proxy__OVM_L1StandardBridge.json
OptimismPortal: "0x....", // OptimismPortalProxy.json
L2OutputOracle: "0x....", // L2OutputOracleProxy.json
}
```
1. Create the data structure for the standard bridge.
```js
bridges = {
Standard: {
l1Bridge: l1Contracts.L1StandardBridge,
l2Bridge: "0x4200000000000000000000000000000000000010",
Adapter: optimismSDK.StandardBridgeAdapter
},
ETH: {
l1Bridge: l1Contracts.L1StandardBridge,
l2Bridge: "0x4200000000000000000000000000000000000010",
Adapter: optimismSDK.ETHBridgeAdapter
}
}
```
1. Create the [`CrossChainMessenger`](https://sdk.optimism.io/classes/crosschainmessenger) object.
```js
crossChainMessenger = new optimismSDK.CrossChainMessenger({
bedrock: true,
contracts: {
l1: l1Contracts
},
bridges: bridges,
l1ChainId: await l1Signer.getChainId(),
l2ChainId: await l2Signer.getChainId(),
l1SignerOrProvider: l1Signer,
l2SignerOrProvider: l2Signer,
})
```
## Verify SDK functionality
To verify the SDK's functionality, transfer some ETH from L1 to L2.
1. Get the current balances.
```js
balances0 = [
await l1Provider.getBalance(l1Signer.address),
await l2Provider.getBalance(l1Signer.address)
]
```
1. Transfer 1 gwei.
```js
tx = await crossChainMessenger.depositETH(1e9)
rcpt = await tx.wait()
```
1. Get the balances after the transfer.
```js
balances1 = [
await l1Provider.getBalance(l1Signer.address),
await l2Provider.getBalance(l1Signer.address)
]
```
1. See that the L1 balance changed (probably by a lot more than 1 gwei because of the cost of the transaction).
```js
(balances0[0]-balances1[0])/1e9
```
1. See that the L2 balance changed (it might take a few minutes).
```js
((await l2Provider.getBalance(l1Signer.address))-balances0[1])/1e9
```
---
title: Modifying Predeployed Contracts
lang: en-US
---
::: warning 🚧 OP Stack Hacks are explicitly things that you can do with the OP Stack that are *not* currently intended for production use
OP Stack Hacks are not for the faint of heart. You will not be able to receive significant developer support for OP Stack Hacks — be prepared to get your hands dirty and to work without support.
:::
OP Stack blockchains have a number of [predeployed contracts](https://github.com/ethereum-optimism/optimism/blob/develop/packages/contracts-bedrock/src/constants.ts) that provide important functionality.
Most of those contracts are proxies that can be upgraded using the `proxyAdminOwner` which was configured when the network was initially deployed.
The predeploys are controlled from a predeploy called [`ProxyAdmin`](https://github.com/ethereum-optimism/optimism/blob/develop/packages/contracts-bedrock/contracts/universal/ProxyAdmin.sol), whose address is `0x4200000000000000000000000000000000000018`.
The function to call is [`upgrade(address,address)`](https://github.com/ethereum-optimism/optimism/blob/develop/packages/contracts-bedrock/contracts/universal/ProxyAdmin.sol#L211-L229).
The first parameter is the proxy to upgrade, and the second is the address of a new implementation.
For example, the legacy `L1BlockNumber` contract is at `0x420...013`.
To disable this function, we'll set the implementation to `0x00...00`.
We do this using the [Foundry](https://book.getfoundry.sh/) command `cast`.
1. We'll need several constants.
- Set these addresses as variables in your terminal.
```sh
L1BLOCKNUM=0x4200000000000000000000000000000000000013
PROXY_ADMIN=0x4200000000000000000000000000000000000018
ZERO_ADDR=0x0000000000000000000000000000000000000000
```
- Set `PRIVKEY` to the private key of your ADMIN account.
- Set `ETH_RPC_URL`. If you're on the computer that runs the blockchain, use this command.
```sh
export ETH_RPC_URL=http://localhost:8545
```
1. Verify `L1BlockNumber` works correctly.
See that when you call the contract you get a block number, and twelve seconds later you get the next one (block time on L1 is twelve seconds).
```sh
cast call $L1BLOCKNUM 'number()' | cast --to-dec
sleep 12 && cast call $L1BLOCKNUM 'number()' | cast --to-dec
```
1. Get the current implementation for the contract.
```sh
L1BLOCKNUM_IMPLEMENTATION=`cast call $L1BLOCKNUM "implementation()" | sed 's/000000000000000000000000//'`
echo $L1BLOCKNUM_IMPLEMENTATION
```
1. Change the implementation to the zero address
```sh
cast send --private-key $PRIVKEY $PROXY_ADMIN "upgrade(address,address)" $L1BLOCKNUM $ZERO_ADDR
```
1. See that the implementation is address zero, and that calling it fails.
```sh
cast call $L1BLOCKNUM 'implementation()'
cast call $L1BLOCKNUM 'number()'
```
1. Fix the predeploy by returning it to the previous implementation, and verify it works.
```sh
cast send --private-key $PRIVKEY $PROXY_ADMIN "upgrade(address,address)" $L1BLOCKNUM $L1BLOCKNUM_IMPLEMENTATION
cast call $L1BLOCKNUM 'number()' | cast --to-dec
```
\ No newline at end of file
module github.com/ethereum-optimism/optimism
go 1.18
go 1.19
require (
github.com/btcsuite/btcd v0.23.3
......@@ -9,7 +9,7 @@ require (
github.com/docker/docker v20.10.21+incompatible
github.com/docker/go-connections v0.4.0
github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3
github.com/ethereum/go-ethereum v1.11.2
github.com/ethereum/go-ethereum v1.11.4
github.com/fsnotify/fsnotify v1.6.0
github.com/golang/snappy v0.0.4
github.com/google/go-cmp v0.5.9
......@@ -69,11 +69,11 @@ require (
github.com/francoispqt/gojay v1.2.13 // indirect
github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 // indirect
github.com/getsentry/sentry-go v0.18.0 // indirect
github.com/go-kit/kit v0.10.0 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-stack/stack v1.8.1 // indirect
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect
github.com/godbus/dbus/v5 v5.1.0 // indirect
github.com/gofrs/flock v0.8.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v4 v4.4.2 // indirect
github.com/golang/mock v1.6.0 // indirect
......@@ -86,7 +86,6 @@ require (
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-bexpr v0.1.11 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.1 // indirect
github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e // indirect
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
github.com/huin/goupnp v1.1.0 // indirect
github.com/influxdata/influxdb v1.8.3 // indirect
......@@ -147,7 +146,6 @@ require (
github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/common v0.39.0 // indirect
github.com/prometheus/procfs v0.9.0 // indirect
github.com/prometheus/tsdb v0.10.0 // indirect
github.com/quic-go/qpack v0.4.0 // indirect
github.com/quic-go/qtls-go1-18 v0.2.0 // indirect
github.com/quic-go/qtls-go1-19 v0.2.0 // indirect
......@@ -191,6 +189,6 @@ require (
nhooyr.io/websocket v1.8.7 // indirect
)
replace github.com/ethereum/go-ethereum v1.11.2 => github.com/ethereum-optimism/op-geth v1.11.2-de8c5df46.0.20230308025559-13ee9ab9153b
replace github.com/ethereum/go-ethereum v1.11.4 => github.com/ethereum-optimism/op-geth v1.11.2-de8c5df46.0.20230321002540-11f0554a4313
//replace github.com/ethereum/go-ethereum v1.11.2 => ../go-ethereum
//replace github.com/ethereum/go-ethereum v1.11.4 => ../go-ethereum
This diff is collapsed.
FROM --platform=$BUILDPLATFORM golang:1.18.0-alpine3.15 as builder
FROM --platform=$BUILDPLATFORM golang:1.19.0-alpine3.15 as builder
ARG VERSION=v0.0.0
......
......@@ -20,6 +20,7 @@ lint:
golangci-lint run -E goimports,sqlclosecheck,bodyclose,asciicheck,misspell,errorlint -e "errors.As" -e "errors.Is"
fuzz:
go test -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzChannelConfig_CheckTimeout ./batcher
go test -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzDurationZero ./batcher
go test -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzDurationTimeoutMaxChannelDuration ./batcher
go test -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzDurationTimeoutZeroMaxChannelDuration ./batcher
......
......@@ -12,8 +12,6 @@ import (
)
var (
ErrZeroMaxFrameSize = errors.New("max frame size cannot be zero")
ErrSmallMaxFrameSize = errors.New("max frame size cannot be less than 23")
ErrInvalidChannelTimeout = errors.New("channel timeout is less than the safety margin")
ErrInputTargetReached = errors.New("target amount of input data reached")
ErrMaxFrameIndex = errors.New("max frame index reached (uint16)")
......@@ -83,15 +81,15 @@ func (cc *ChannelConfig) Check() error {
// will infinitely loop when trying to create frames in the
// [channelBuilder.OutputFrames] function.
if cc.MaxFrameSize == 0 {
return ErrZeroMaxFrameSize
return errors.New("max frame size cannot be zero")
}
// If the [MaxFrameSize] is set to < 23, the channel out
// will underflow the maxSize variable in the [derive.ChannelOut].
// If the [MaxFrameSize] is less than [FrameV0OverHeadSize], the channel
// out will underflow the maxSize variable in the [derive.ChannelOut].
// Since it is of type uint64, it will wrap around to a very large
// number, making the frame size extremely large.
if cc.MaxFrameSize < 23 {
return ErrSmallMaxFrameSize
if cc.MaxFrameSize < derive.FrameV0OverHeadSize {
return fmt.Errorf("max frame size %d is less than the minimum 23", cc.MaxFrameSize)
}
return nil
......
This diff is collapsed.
......@@ -32,8 +32,7 @@ func TestPendingChannelTimeout(t *testing.T) {
require.False(t, timeout)
// Set the pending channel
err := m.ensurePendingChannel(eth.BlockID{})
require.NoError(t, err)
require.NoError(t, m.ensurePendingChannel(eth.BlockID{}))
// There are no confirmed transactions so
// the pending channel cannot be timed out
......@@ -85,14 +84,10 @@ func TestChannelManagerReturnsErrReorg(t *testing.T) {
ParentHash: common.Hash{0xff},
}, nil, nil, nil, nil)
err := m.AddL2Block(a)
require.NoError(t, err)
err = m.AddL2Block(b)
require.NoError(t, err)
err = m.AddL2Block(c)
require.NoError(t, err)
err = m.AddL2Block(x)
require.ErrorIs(t, err, ErrReorg)
require.NoError(t, m.AddL2Block(a))
require.NoError(t, m.AddL2Block(b))
require.NoError(t, m.AddL2Block(c))
require.ErrorIs(t, m.AddL2Block(x), ErrReorg)
require.Equal(t, []*types.Block{a, b, c}, m.blocks)
}
......@@ -111,16 +106,14 @@ func TestChannelManagerReturnsErrReorgWhenDrained(t *testing.T) {
a := newMiniL2Block(0)
x := newMiniL2BlockWithNumberParent(0, big.NewInt(1), common.Hash{0xff})
err := m.AddL2Block(a)
require.NoError(t, err)
require.NoError(t, m.AddL2Block(a))
_, err = m.TxData(eth.BlockID{})
_, err := m.TxData(eth.BlockID{})
require.NoError(t, err)
_, err = m.TxData(eth.BlockID{})
require.ErrorIs(t, err, io.EOF)
err = m.AddL2Block(x)
require.ErrorIs(t, err, ErrReorg)
require.ErrorIs(t, m.AddL2Block(x), ErrReorg)
}
// TestChannelManagerNextTxData checks the nextTxData function.
......@@ -136,8 +129,7 @@ func TestChannelManagerNextTxData(t *testing.T) {
// Set the pending channel
// The nextTxData function should still return EOF
// since the pending channel has no frames
err = m.ensurePendingChannel(eth.BlockID{})
require.NoError(t, err)
require.NoError(t, m.ensurePendingChannel(eth.BlockID{}))
returnedTxData, err = m.nextTxData()
require.ErrorIs(t, err, io.EOF)
require.Equal(t, txData{}, returnedTxData)
......@@ -164,8 +156,10 @@ func TestChannelManagerNextTxData(t *testing.T) {
require.Equal(t, expectedTxData, m.pendingTransactions[expectedChannelID])
}
// TestClearChannelManager tests clearing the channel manager.
func TestClearChannelManager(t *testing.T) {
// TestChannelManager_Clear tests clearing the channel manager.
func TestChannelManager_Clear(t *testing.T) {
require := require.New(t)
// Create a channel manager
log := testlog.Logger(t, log.LvlCrit)
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
......@@ -176,15 +170,17 @@ func TestClearChannelManager(t *testing.T) {
ChannelTimeout: 10,
// Have to set the max frame size here otherwise the channel builder would not
// be able to output any frames
MaxFrameSize: 1,
MaxFrameSize: 24,
TargetFrameSize: 24,
ApproxComprRatio: 1.0,
})
// Channel Manager state should be empty by default
require.Empty(t, m.blocks)
require.Equal(t, common.Hash{}, m.tip)
require.Nil(t, m.pendingChannel)
require.Empty(t, m.pendingTransactions)
require.Empty(t, m.confirmedTransactions)
require.Empty(m.blocks)
require.Equal(common.Hash{}, m.tip)
require.Nil(m.pendingChannel)
require.Empty(m.pendingTransactions)
require.Empty(m.confirmedTransactions)
// Add a block to the channel manager
a, _ := derivetest.RandomL2Block(rng, 4)
......@@ -193,28 +189,25 @@ func TestClearChannelManager(t *testing.T) {
Hash: a.Hash(),
Number: a.NumberU64(),
}
err := m.AddL2Block(a)
require.NoError(t, err)
require.NoError(m.AddL2Block(a))
// Make sure there is a channel builder
err = m.ensurePendingChannel(l1BlockID)
require.NoError(t, err)
require.NotNil(t, m.pendingChannel)
require.Equal(t, 0, len(m.confirmedTransactions))
require.NoError(m.ensurePendingChannel(l1BlockID))
require.NotNil(m.pendingChannel)
require.Len(m.confirmedTransactions, 0)
// Process the blocks
// We should have a pending channel with 1 frame
// and no more blocks since processBlocks consumes
// the list
err = m.processBlocks()
require.NoError(t, err)
err = m.pendingChannel.OutputFrames()
require.NoError(t, err)
_, err = m.nextTxData()
require.NoError(t, err)
require.Equal(t, 0, len(m.blocks))
require.Equal(t, newL1Tip, m.tip)
require.Equal(t, 1, len(m.pendingTransactions))
require.NoError(m.processBlocks())
require.NoError(m.pendingChannel.co.Flush())
require.NoError(m.pendingChannel.OutputFrames())
_, err := m.nextTxData()
require.NoError(err)
require.Len(m.blocks, 0)
require.Equal(newL1Tip, m.tip)
require.Len(m.pendingTransactions, 1)
// Add a new block so we can test clearing
// the channel manager with a full state
......@@ -222,20 +215,19 @@ func TestClearChannelManager(t *testing.T) {
Number: big.NewInt(1),
ParentHash: a.Hash(),
}, nil, nil, nil, nil)
err = m.AddL2Block(b)
require.NoError(t, err)
require.Equal(t, 1, len(m.blocks))
require.Equal(t, b.Hash(), m.tip)
require.NoError(m.AddL2Block(b))
require.Len(m.blocks, 1)
require.Equal(b.Hash(), m.tip)
// Clear the channel manager
m.Clear()
// Check that the entire channel manager state cleared
require.Empty(t, m.blocks)
require.Equal(t, common.Hash{}, m.tip)
require.Nil(t, m.pendingChannel)
require.Empty(t, m.pendingTransactions)
require.Empty(t, m.confirmedTransactions)
require.Empty(m.blocks)
require.Equal(common.Hash{}, m.tip)
require.Nil(m.pendingChannel)
require.Empty(m.pendingTransactions)
require.Empty(m.confirmedTransactions)
}
// TestChannelManagerTxConfirmed checks the [ChannelManager.TxConfirmed] function.
......@@ -251,8 +243,7 @@ func TestChannelManagerTxConfirmed(t *testing.T) {
// Let's add a valid pending transaction to the channel manager
// So we can demonstrate that TxConfirmed's correctness
err := m.ensurePendingChannel(eth.BlockID{})
require.NoError(t, err)
require.NoError(t, m.ensurePendingChannel(eth.BlockID{}))
channelID := m.pendingChannel.ID()
frame := frameData{
data: []byte{},
......@@ -270,7 +261,7 @@ func TestChannelManagerTxConfirmed(t *testing.T) {
require.Equal(t, expectedTxData, returnedTxData)
require.Equal(t, 0, m.pendingChannel.NumFrames())
require.Equal(t, expectedTxData, m.pendingTransactions[expectedChannelID])
require.Equal(t, 1, len(m.pendingTransactions))
require.Len(t, m.pendingTransactions, 1)
// An unknown pending transaction should not be marked as confirmed
// and should not be removed from the pending transactions map
......@@ -281,14 +272,14 @@ func TestChannelManagerTxConfirmed(t *testing.T) {
blockID := eth.BlockID{Number: 0, Hash: common.Hash{0x69}}
m.TxConfirmed(unknownTxID, blockID)
require.Empty(t, m.confirmedTransactions)
require.Equal(t, 1, len(m.pendingTransactions))
require.Len(t, m.pendingTransactions, 1)
// Now let's mark the pending transaction as confirmed
// and check that it is removed from the pending transactions map
// and added to the confirmed transactions map
m.TxConfirmed(expectedChannelID, blockID)
require.Empty(t, m.pendingTransactions)
require.Equal(t, 1, len(m.confirmedTransactions))
require.Len(t, m.confirmedTransactions, 1)
require.Equal(t, blockID, m.confirmedTransactions[expectedChannelID])
}
......@@ -300,8 +291,7 @@ func TestChannelManagerTxFailed(t *testing.T) {
// Let's add a valid pending transaction to the channel
// manager so we can demonstrate correctness
err := m.ensurePendingChannel(eth.BlockID{})
require.NoError(t, err)
require.NoError(t, m.ensurePendingChannel(eth.BlockID{}))
channelID := m.pendingChannel.ID()
frame := frameData{
data: []byte{},
......@@ -319,7 +309,7 @@ func TestChannelManagerTxFailed(t *testing.T) {
require.Equal(t, expectedTxData, returnedTxData)
require.Equal(t, 0, m.pendingChannel.NumFrames())
require.Equal(t, expectedTxData, m.pendingTransactions[expectedChannelID])
require.Equal(t, 1, len(m.pendingTransactions))
require.Len(t, m.pendingTransactions, 1)
// Trying to mark an unknown pending transaction as failed
// shouldn't modify state
......@@ -348,8 +338,7 @@ func TestChannelManager_TxResend(t *testing.T) {
a, _ := derivetest.RandomL2Block(rng, 4)
err := m.AddL2Block(a)
require.NoError(err)
require.NoError(m.AddL2Block(a))
txdata0, err := m.TxData(eth.BlockID{})
require.NoError(err)
......
This diff is collapsed.
FROM golang:1.18.0-alpine3.15 as builder
FROM golang:1.19.0-alpine3.15 as builder
RUN apk add --no-cache make gcc musl-dev linux-headers git jq bash
......
This diff is collapsed.
package ether
import (
"fmt"
"math/big"
"math/rand"
"testing"
"github.com/ethereum-optimism/optimism/op-chain-ops/util"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum-optimism/optimism/op-chain-ops/crossdomain"
......@@ -190,7 +191,7 @@ func TestMigrateBalances(t *testing.T) {
}
}
func makeLegacyETH(t *testing.T, totalSupply *big.Int, balances map[common.Address]*big.Int, allowances map[common.Address]common.Address) (*state.StateDB, DBFactory) {
func makeLegacyETH(t *testing.T, totalSupply *big.Int, balances map[common.Address]*big.Int, allowances map[common.Address]common.Address) (*state.StateDB, util.DBFactory) {
memDB := rawdb.NewMemoryDatabase()
db, err := state.New(common.Hash{}, state.NewDatabaseWithConfig(memDB, &trie.Config{
Preimages: true,
......@@ -228,11 +229,69 @@ func makeLegacyETH(t *testing.T, totalSupply *big.Int, balances map[common.Addre
}
}
// TestMigrateBalancesRandom tests that the pre-check balances function works
// TestMigrateBalancesRandomOK tests that the pre-check balances function works
// with random addresses. This test makes sure that the partition logic doesn't
// miss anything.
func TestMigrateBalancesRandom(t *testing.T) {
// miss anything, and helps detect concurrency errors.
func TestMigrateBalancesRandomOK(t *testing.T) {
for i := 0; i < 100; i++ {
addresses, stateBalances, allowances, stateAllowances, totalSupply := setupRandTest(t)
db, factory := makeLegacyETH(t, totalSupply, stateBalances, stateAllowances)
err := doMigration(db, factory, addresses, allowances, big.NewInt(0), false)
require.NoError(t, err)
for addr, expBal := range stateBalances {
actBal := db.GetBalance(addr)
require.EqualValues(t, expBal, actBal)
}
}
}
// TestMigrateBalancesRandomMissing tests that the pre-check balances function works
// with random addresses when some of them are missing. This helps make sure that the
// partition logic doesn't miss anything, and helps detect concurrency errors.
func TestMigrateBalancesRandomMissing(t *testing.T) {
for i := 0; i < 100; i++ {
addresses, stateBalances, allowances, stateAllowances, totalSupply := setupRandTest(t)
if len(addresses) == 0 {
continue
}
// Remove a random address from the list of witnesses
idx := rand.Intn(len(addresses))
addresses = append(addresses[:idx], addresses[idx+1:]...)
db, factory := makeLegacyETH(t, totalSupply, stateBalances, stateAllowances)
err := doMigration(db, factory, addresses, allowances, big.NewInt(0), false)
require.ErrorContains(t, err, "unknown storage slot")
}
for i := 0; i < 100; i++ {
addresses, stateBalances, allowances, stateAllowances, totalSupply := setupRandTest(t)
if len(allowances) == 0 {
continue
}
// Remove a random allowance from the list of witnesses
idx := rand.Intn(len(allowances))
allowances = append(allowances[:idx], allowances[idx+1:]...)
db, factory := makeLegacyETH(t, totalSupply, stateBalances, stateAllowances)
err := doMigration(db, factory, addresses, allowances, big.NewInt(0), false)
require.ErrorContains(t, err, "unknown storage slot")
}
}
func randAddr(t *testing.T) common.Address {
var addr common.Address
_, err := rand.Read(addr[:])
require.NoError(t, err)
return addr
}
func setupRandTest(t *testing.T) ([]common.Address, map[common.Address]*big.Int, []*crossdomain.Allowance, map[common.Address]common.Address, *big.Int) {
addresses := make([]common.Address, 0)
stateBalances := make(map[common.Address]*big.Int)
......@@ -258,99 +317,5 @@ func TestMigrateBalancesRandom(t *testing.T) {
stateAllowances[addr] = to
}
db, factory := makeLegacyETH(t, totalSupply, stateBalances, stateAllowances)
err := doMigration(db, factory, addresses, allowances, big.NewInt(0), false)
require.NoError(t, err)
for addr, expBal := range stateBalances {
actBal := db.GetBalance(addr)
require.EqualValues(t, expBal, actBal)
}
}
}
func TestPartitionKeyspace(t *testing.T) {
tests := []struct {
i int
count int
expected [2]common.Hash
}{
{
i: 0,
count: 1,
expected: [2]common.Hash{
common.HexToHash("0x00"),
common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
},
},
{
i: 0,
count: 2,
expected: [2]common.Hash{
common.HexToHash("0x00"),
common.HexToHash("0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
},
},
{
i: 1,
count: 2,
expected: [2]common.Hash{
common.HexToHash("0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
},
},
{
i: 0,
count: 3,
expected: [2]common.Hash{
common.HexToHash("0x00"),
common.HexToHash("0x5555555555555555555555555555555555555555555555555555555555555555"),
},
},
{
i: 1,
count: 3,
expected: [2]common.Hash{
common.HexToHash("0x5555555555555555555555555555555555555555555555555555555555555555"),
common.HexToHash("0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
},
},
{
i: 2,
count: 3,
expected: [2]common.Hash{
common.HexToHash("0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
},
},
}
for _, tt := range tests {
t.Run(fmt.Sprintf("i %d, count %d", tt.i, tt.count), func(t *testing.T) {
start, end := PartitionKeyspace(tt.i, tt.count)
require.Equal(t, tt.expected[0], start)
require.Equal(t, tt.expected[1], end)
})
}
t.Run("panics on invalid i or count", func(t *testing.T) {
require.Panics(t, func() {
PartitionKeyspace(1, 1)
})
require.Panics(t, func() {
PartitionKeyspace(-1, 1)
})
require.Panics(t, func() {
PartitionKeyspace(0, -1)
})
require.Panics(t, func() {
PartitionKeyspace(-1, -1)
})
})
}
func randAddr(t *testing.T) common.Address {
var addr common.Address
_, err := rand.Read(addr[:])
require.NoError(t, err)
return addr
return addresses, stateBalances, allowances, stateAllowances, totalSupply
}
......@@ -6,6 +6,7 @@ import (
"errors"
"fmt"
"math/big"
"math/rand"
"github.com/ethereum-optimism/optimism/op-chain-ops/util"
......@@ -26,11 +27,21 @@ import (
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
)
// MaxSlotChecks is the maximum number of storage slots to check
// when validating the untouched predeploys. This limit is in place
// to bound execution time of the migration. We can parallelize this
// in the future.
const MaxSlotChecks = 1000
const (
// MaxPredeploySlotChecks is the maximum number of storage slots to check
// when validating the untouched predeploys. This limit is in place
// to bound execution time of the migration. We can parallelize this
// in the future.
MaxPredeploySlotChecks = 1000
// MaxOVMETHSlotChecks is the maximum number of OVM ETH storage slots to check
// when validating the OVM ETH migration.
MaxOVMETHSlotChecks = 5000
// OVMETHSampleLikelihood is the probability that a storage slot will be checked
// when validating the OVM ETH migration.
OVMETHSampleLikelihood = 0.1
)
type StorageCheckMap = map[common.Hash]common.Hash
......@@ -148,7 +159,7 @@ func PostCheckMigratedDB(
}
log.Info("checked L1Block")
if err := PostCheckLegacyETH(db); err != nil {
if err := PostCheckLegacyETH(prevDB, db, migrationData); err != nil {
return err
}
log.Info("checked legacy eth")
......@@ -210,7 +221,7 @@ func PostCheckUntouchables(udb state.Database, currDB *state.StateDB, prevRoot c
if err := prevDB.ForEachStorage(addr, func(key, value common.Hash) bool {
count++
expSlots[key] = value
return count < MaxSlotChecks
return count < MaxPredeploySlotChecks
}); err != nil {
return fmt.Errorf("error iterating over storage: %w", err)
}
......@@ -365,14 +376,94 @@ func PostCheckPredeployStorage(db vm.StateDB, finalSystemOwner common.Address, p
}
// PostCheckLegacyETH checks that the legacy eth migration was successful.
// It currently only checks that the total supply was set to 0.
func PostCheckLegacyETH(db vm.StateDB) error {
// It checks that the total supply was set to 0, and randomly samples storage
// slots pre- and post-migration to ensure that balances were correctly migrated.
func PostCheckLegacyETH(prevDB, migratedDB *state.StateDB, migrationData crossdomain.MigrationData) error {
allowanceSlots := make(map[common.Hash]bool)
addresses := make(map[common.Hash]common.Address)
log.Info("recomputing witness data")
for _, allowance := range migrationData.OvmAllowances {
key := ether.CalcAllowanceStorageKey(allowance.From, allowance.To)
allowanceSlots[key] = true
}
for _, addr := range migrationData.Addresses() {
addresses[ether.CalcOVMETHStorageKey(addr)] = addr
}
log.Info("checking legacy eth fixed storage slots")
for slot, expValue := range LegacyETHCheckSlots {
actValue := db.GetState(predeploys.LegacyERC20ETHAddr, slot)
actValue := migratedDB.GetState(predeploys.LegacyERC20ETHAddr, slot)
if actValue != expValue {
return fmt.Errorf("expected slot %s on %s to be %s, but got %s", slot, predeploys.LegacyERC20ETHAddr, expValue, actValue)
}
}
var count int
threshold := 100 - int(100*OVMETHSampleLikelihood)
progress := util.ProgressLogger(100, "checking legacy eth balance slots")
var innerErr error
err := prevDB.ForEachStorage(predeploys.LegacyERC20ETHAddr, func(key, value common.Hash) bool {
val := rand.Intn(100)
// Randomly sample storage slots.
if val > threshold {
return true
}
// Ignore fixed slots.
if _, ok := LegacyETHCheckSlots[key]; ok {
return true
}
// Ignore allowances.
if allowanceSlots[key] {
return true
}
// Grab the address, and bail if we can't find it.
addr, ok := addresses[key]
if !ok {
innerErr = fmt.Errorf("unknown OVM_ETH storage slot %s", key)
return false
}
// Pull out the pre-migration OVM ETH balance, and the state balance.
ovmETHBalance := value.Big()
ovmETHStateBalance := prevDB.GetBalance(addr)
// Pre-migration state balance should be zero.
if ovmETHStateBalance.Cmp(common.Big0) != 0 {
innerErr = fmt.Errorf("expected OVM_ETH pre-migration state balance for %s to be 0, but got %s", addr, ovmETHStateBalance)
return false
}
// Migrated state balance should equal the OVM ETH balance.
migratedStateBalance := migratedDB.GetBalance(addr)
if migratedStateBalance.Cmp(ovmETHBalance) != 0 {
innerErr = fmt.Errorf("expected OVM_ETH post-migration state balance for %s to be %s, but got %s", addr, ovmETHStateBalance, migratedStateBalance)
return false
}
// Migrated OVM ETH balance should be zero, since we wipe the slots.
migratedBalance := migratedDB.GetState(predeploys.LegacyERC20ETHAddr, key)
if migratedBalance.Big().Cmp(common.Big0) != 0 {
innerErr = fmt.Errorf("expected OVM_ETH post-migration ERC20 balance for %s to be 0, but got %s", addr, migratedBalance)
return false
}
progress()
count++
// Stop iterating if we've checked enough slots.
return count < MaxOVMETHSlotChecks
})
if err != nil {
return fmt.Errorf("error iterating over OVM_ETH storage: %w", err)
}
if innerErr != nil {
return innerErr
}
return nil
}
......
package util
import (
"fmt"
"math/big"
"sync"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
)
var (
// maxSlot is the maximum possible storage slot.
maxSlot = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
)
type DBFactory func() (*state.StateDB, error)
type StateCallback func(db *state.StateDB, key, value common.Hash) error
func IterateState(dbFactory DBFactory, address common.Address, cb StateCallback, workers int) error {
if workers <= 0 {
panic("workers must be greater than 0")
}
// WaitGroup to wait for all workers to finish.
var wg sync.WaitGroup
// Channel to receive errors from each iteration job.
errCh := make(chan error, workers)
// Channel to cancel all iteration jobs.
cancelCh := make(chan struct{})
worker := func(start, end common.Hash) {
// Decrement the WaitGroup when the function returns.
defer wg.Done()
db, err := dbFactory()
if err != nil {
// Should never happen, so explode if it does.
log.Crit("cannot create state db", "err", err)
}
st, err := db.StorageTrie(address)
if err != nil {
// Should never happen, so explode if it does.
log.Crit("cannot get storage trie", "address", address, "err", err)
}
// st can be nil if the account doesn't exist.
if st == nil {
errCh <- fmt.Errorf("account does not exist: %s", address.Hex())
return
}
it := trie.NewIterator(st.NodeIterator(start.Bytes()))
// Below code is largely based on db.ForEachStorage. We can't use that
// because it doesn't allow us to specify a start and end key.
for it.Next() {
select {
case <-cancelCh:
// If one of the workers encounters an error, cancel all of them.
return
default:
break
}
// Use the raw (i.e., secure hashed) key to check if we've reached
// the end of the partition. Use > rather than >= here to account for
// the fact that the values returned by PartitionKeys are inclusive.
// Duplicate addresses that may be returned by this iteration are
// filtered out in the collector.
if new(big.Int).SetBytes(it.Key).Cmp(end.Big()) > 0 {
return
}
// Skip if the value is empty.
rawValue := it.Value
if len(rawValue) == 0 {
continue
}
// Get the preimage.
rawKey := st.GetKey(it.Key)
if rawKey == nil {
// Should never happen, so explode if it does.
log.Crit("cannot get preimage for storage key", "key", it.Key)
}
key := common.BytesToHash(rawKey)
// Parse the raw value.
_, content, _, err := rlp.Split(rawValue)
if err != nil {
// Should never happen, so explode if it does.
log.Crit("mal-formed data in state: %v", err)
}
value := common.BytesToHash(content)
// Call the callback with the DB, key, and value. Errors get
// bubbled up to the errCh.
if err := cb(db, key, value); err != nil {
errCh <- err
return
}
}
}
for i := 0; i < workers; i++ {
wg.Add(1)
// Partition the keyspace per worker.
start, end := PartitionKeyspace(i, workers)
// Kick off our worker.
go worker(start, end)
}
wg.Wait()
for len(errCh) > 0 {
err := <-errCh
if err != nil {
return err
}
}
return nil
}
// PartitionKeyspace divides the key space into partitions by dividing the maximum keyspace
// by count then multiplying by i. This will leave some slots left over, which we handle below. It
// returns the start and end keys for the partition as a common.Hash. Note that the returned range
// of keys is inclusive, i.e., [start, end] NOT [start, end).
func PartitionKeyspace(i int, count int) (common.Hash, common.Hash) {
if i < 0 || count < 0 {
panic("i and count must be greater than 0")
}
if i > count-1 {
panic("i must be less than count - 1")
}
// Divide the key space into partitions by dividing the key space by the number
// of jobs. This will leave some slots left over, which we handle below.
partSize := new(big.Int).Div(maxSlot.Big(), big.NewInt(int64(count)))
start := common.BigToHash(new(big.Int).Mul(big.NewInt(int64(i)), partSize))
var end common.Hash
if i < count-1 {
// If this is not the last partition, use the next partition's start key as the end.
end = common.BigToHash(new(big.Int).Mul(big.NewInt(int64(i+1)), partSize))
} else {
// If this is the last partition, use the max slot as the end.
end = maxSlot
}
return start, end
}
package util
import (
crand "crypto/rand"
"fmt"
"math/rand"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/trie"
"github.com/stretchr/testify/require"
)
var testAddr = common.Address{0: 0xff}
func TestStateIteratorWorkers(t *testing.T) {
_, factory, _ := setupRandTest(t)
for i := -1; i <= 0; i++ {
require.Panics(t, func() {
_ = IterateState(factory, testAddr, func(db *state.StateDB, key, value common.Hash) error {
return nil
}, i)
})
}
}
func TestStateIteratorNonexistentAccount(t *testing.T) {
_, factory, _ := setupRandTest(t)
require.ErrorContains(t, IterateState(factory, common.Address{}, func(db *state.StateDB, key, value common.Hash) error {
return nil
}, 1), "account does not exist")
}
func TestStateIteratorRandomOK(t *testing.T) {
for i := 0; i < 100; i++ {
hashes, factory, workerCount := setupRandTest(t)
seenHashes := make(map[common.Hash]bool)
hashCh := make(chan common.Hash)
doneCh := make(chan struct{})
go func() {
defer close(doneCh)
for hash := range hashCh {
seenHashes[hash] = true
}
}()
require.NoError(t, IterateState(factory, testAddr, func(db *state.StateDB, key, value common.Hash) error {
hashCh <- key
return nil
}, workerCount))
close(hashCh)
<-doneCh
// Perform a less or equal check here in case of duplicates. The map check below will assert
// that all of the hashes are accounted for.
require.LessOrEqual(t, len(seenHashes), len(hashes))
// Every hash we put into state should have been iterated over.
for _, hash := range hashes {
require.Contains(t, seenHashes, hash)
}
}
}
func TestStateIteratorRandomError(t *testing.T) {
for i := 0; i < 100; i++ {
hashes, factory, workerCount := setupRandTest(t)
failHash := hashes[rand.Intn(len(hashes))]
require.ErrorContains(t, IterateState(factory, testAddr, func(db *state.StateDB, key, value common.Hash) error {
if key == failHash {
return fmt.Errorf("test error")
}
return nil
}, workerCount), "test error")
}
}
func TestPartitionKeyspace(t *testing.T) {
tests := []struct {
i int
count int
expected [2]common.Hash
}{
{
i: 0,
count: 1,
expected: [2]common.Hash{
common.HexToHash("0x00"),
common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
},
},
{
i: 0,
count: 2,
expected: [2]common.Hash{
common.HexToHash("0x00"),
common.HexToHash("0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
},
},
{
i: 1,
count: 2,
expected: [2]common.Hash{
common.HexToHash("0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
},
},
{
i: 0,
count: 3,
expected: [2]common.Hash{
common.HexToHash("0x00"),
common.HexToHash("0x5555555555555555555555555555555555555555555555555555555555555555"),
},
},
{
i: 1,
count: 3,
expected: [2]common.Hash{
common.HexToHash("0x5555555555555555555555555555555555555555555555555555555555555555"),
common.HexToHash("0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
},
},
{
i: 2,
count: 3,
expected: [2]common.Hash{
common.HexToHash("0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
},
},
}
for _, tt := range tests {
t.Run(fmt.Sprintf("i %d, count %d", tt.i, tt.count), func(t *testing.T) {
start, end := PartitionKeyspace(tt.i, tt.count)
require.Equal(t, tt.expected[0], start)
require.Equal(t, tt.expected[1], end)
})
}
t.Run("panics on invalid i or count", func(t *testing.T) {
require.Panics(t, func() {
PartitionKeyspace(1, 1)
})
require.Panics(t, func() {
PartitionKeyspace(-1, 1)
})
require.Panics(t, func() {
PartitionKeyspace(0, -1)
})
require.Panics(t, func() {
PartitionKeyspace(-1, -1)
})
})
}
func setupRandTest(t *testing.T) ([]common.Hash, DBFactory, int) {
memDB := rawdb.NewMemoryDatabase()
db, err := state.New(common.Hash{}, state.NewDatabaseWithConfig(memDB, &trie.Config{
Preimages: true,
Cache: 1024,
}), nil)
require.NoError(t, err)
hashCount := rand.Intn(100)
if hashCount == 0 {
hashCount = 1
}
hashes := make([]common.Hash, hashCount)
db.CreateAccount(testAddr)
for j := 0; j < hashCount; j++ {
hashes[j] = randHash(t)
db.SetState(testAddr, hashes[j], hashes[j])
}
root, err := db.Commit(false)
require.NoError(t, err)
err = db.Database().TrieDB().Commit(root, true)
require.NoError(t, err)
factory := func() (*state.StateDB, error) {
return state.New(root, state.NewDatabaseWithConfig(memDB, &trie.Config{
Preimages: true,
Cache: 1024,
}), nil)
}
workerCount := rand.Intn(64)
if workerCount == 0 {
workerCount = 1
}
return hashes, factory, workerCount
}
func randHash(t *testing.T) common.Hash {
var h common.Hash
_, err := crand.Read(h[:])
require.NoError(t, err)
return h
}
......@@ -35,7 +35,7 @@ func TestBatchInLastPossibleBlocks(gt *testing.T) {
ChainID: sd.L2Cfg.Config.ChainID,
Nonce: n,
GasTipCap: big.NewInt(2 * params.GWei),
GasFeeCap: new(big.Int).Add(miner.l1Chain.CurrentBlock().BaseFee(), big.NewInt(2*params.GWei)),
GasFeeCap: new(big.Int).Add(miner.l1Chain.CurrentBlock().BaseFee, big.NewInt(2*params.GWei)),
Gas: params.TxGas,
To: &dp.Addresses.Bob,
Value: e2eutils.Ether(2),
......@@ -146,7 +146,7 @@ func TestLargeL1Gaps(gt *testing.T) {
ChainID: sd.L2Cfg.Config.ChainID,
Nonce: n,
GasTipCap: big.NewInt(2 * params.GWei),
GasFeeCap: new(big.Int).Add(miner.l1Chain.CurrentBlock().BaseFee(), big.NewInt(2*params.GWei)),
GasFeeCap: new(big.Int).Add(miner.l1Chain.CurrentBlock().BaseFee, big.NewInt(2*params.GWei)),
Gas: params.TxGas,
To: &dp.Addresses.Bob,
Value: e2eutils.Ether(2),
......
......@@ -21,7 +21,7 @@ func TestShapellaL1Fork(gt *testing.T) {
_, _, miner, sequencer, _, verifier, _, batcher := setupReorgTestActors(t, dp, sd, log)
require.False(t, sd.L1Cfg.Config.IsShanghai(miner.l1Chain.CurrentBlock().Time()), "not active yet")
require.False(t, sd.L1Cfg.Config.IsShanghai(miner.l1Chain.CurrentBlock().Time), "not active yet")
// start op-nodes
sequencer.ActL2PipelineFull(t)
......@@ -34,7 +34,7 @@ func TestShapellaL1Fork(gt *testing.T) {
// verify Shanghai is active
l1Head := miner.l1Chain.CurrentBlock()
require.True(t, sd.L1Cfg.Config.IsShanghai(l1Head.Time()))
require.True(t, sd.L1Cfg.Config.IsShanghai(l1Head.Time))
// build L2 chain up to and including L2 blocks referencing shanghai L1 blocks
sequencer.ActL1HeadSignal(t)
......
......@@ -31,7 +31,7 @@ func TestL1Miner_BuildBlock(gt *testing.T) {
ChainID: sd.L1Cfg.Config.ChainID,
Nonce: 0,
GasTipCap: big.NewInt(2 * params.GWei),
GasFeeCap: new(big.Int).Add(miner.l1Chain.CurrentBlock().BaseFee(), big.NewInt(2*params.GWei)),
GasFeeCap: new(big.Int).Add(miner.l1Chain.CurrentBlock().BaseFee, big.NewInt(2*params.GWei)),
Gas: params.TxGas,
To: &dp.Addresses.Bob,
Value: e2eutils.Ether(2),
......@@ -41,7 +41,8 @@ func TestL1Miner_BuildBlock(gt *testing.T) {
// make an empty block, even though a tx may be waiting
miner.ActL1StartBlock(10)(t)
miner.ActL1EndBlock(t)
bl := miner.l1Chain.CurrentBlock()
header := miner.l1Chain.CurrentBlock()
bl := miner.l1Chain.GetBlockByHash(header.Hash())
require.Equal(t, uint64(1), bl.NumberU64())
require.Zero(gt, bl.Transactions().Len())
......@@ -49,7 +50,8 @@ func TestL1Miner_BuildBlock(gt *testing.T) {
miner.ActL1StartBlock(10)(t)
miner.ActL1IncludeTx(dp.Addresses.Alice)(t)
miner.ActL1EndBlock(t)
bl = miner.l1Chain.CurrentBlock()
header = miner.l1Chain.CurrentBlock()
bl = miner.l1Chain.GetBlockByHash(header.Hash())
require.Equal(t, uint64(2), bl.NumberU64())
require.Equal(t, 1, bl.Transactions().Len())
require.Equal(t, tx.Hash(), bl.Transactions()[0].Hash())
......
......@@ -103,9 +103,9 @@ func (s *L1Replica) ActL1RewindDepth(depth uint64) Action {
t.InvalidAction("cannot rewind L1 past genesis (current: %d, rewind depth: %d)", head, depth)
return
}
finalized := s.l1Chain.CurrentFinalizedBlock()
if finalized != nil && head < finalized.NumberU64()+depth {
t.InvalidAction("cannot rewind head of chain past finalized block %d with rewind depth %d", finalized.NumberU64(), depth)
finalized := s.l1Chain.CurrentFinalBlock()
if finalized != nil && head < finalized.Number.Uint64()+depth {
t.InvalidAction("cannot rewind head of chain past finalized block %d with rewind depth %d", finalized.Number.Uint64(), depth)
return
}
if err := s.l1Chain.SetHead(head - depth); err != nil {
......@@ -188,7 +188,7 @@ func (s *L1Replica) UnsafeNum() uint64 {
head := s.l1Chain.CurrentBlock()
headNum := uint64(0)
if head != nil {
headNum = head.NumberU64()
headNum = head.Number.Uint64()
}
return headNum
}
......@@ -197,16 +197,16 @@ func (s *L1Replica) SafeNum() uint64 {
safe := s.l1Chain.CurrentSafeBlock()
safeNum := uint64(0)
if safe != nil {
safeNum = safe.NumberU64()
safeNum = safe.Number.Uint64()
}
return safeNum
}
func (s *L1Replica) FinalizedNum() uint64 {
finalized := s.l1Chain.CurrentFinalizedBlock()
finalized := s.l1Chain.CurrentFinalBlock()
finalizedNum := uint64(0)
if finalized != nil {
finalizedNum = finalized.NumberU64()
finalizedNum = finalized.Number.Uint64()
}
return finalizedNum
}
......@@ -219,7 +219,7 @@ func (s *L1Replica) ActL1Finalize(t Testing, num uint64) {
t.InvalidAction("need to move forward safe block before moving finalized block")
return
}
newFinalized := s.l1Chain.GetBlockByNumber(num)
newFinalized := s.l1Chain.GetHeaderByNumber(num)
if newFinalized == nil {
t.Fatalf("expected block at %d after finalized L1 block %d, safe head is ahead", num, finalizedNum)
}
......@@ -234,7 +234,7 @@ func (s *L1Replica) ActL1FinalizeNext(t Testing) {
// ActL1Safe marks the given unsafe block as safe.
func (s *L1Replica) ActL1Safe(t Testing, num uint64) {
newSafe := s.l1Chain.GetBlockByNumber(num)
newSafe := s.l1Chain.GetHeaderByNumber(num)
if newSafe == nil {
t.InvalidAction("could not find L1 block %d, cannot label it as safe", num)
return
......
......@@ -85,7 +85,7 @@ func TestL1Replica_ActL1Sync(gt *testing.T) {
})
syncFromA := replica1.ActL1Sync(canonL1(chainA))
// sync canonical chain A
for replica1.l1Chain.CurrentBlock().NumberU64()+1 < uint64(len(chainA)) {
for replica1.l1Chain.CurrentBlock().Number.Uint64()+1 < uint64(len(chainA)) {
syncFromA(t)
}
require.Equal(t, replica1.l1Chain.CurrentBlock().Hash(), chainA[len(chainA)-1].Hash(), "sync replica1 to head of chain A")
......@@ -94,7 +94,7 @@ func TestL1Replica_ActL1Sync(gt *testing.T) {
// sync new canonical chain B
syncFromB := replica1.ActL1Sync(canonL1(chainB))
for replica1.l1Chain.CurrentBlock().NumberU64()+1 < uint64(len(chainB)) {
for replica1.l1Chain.CurrentBlock().Number.Uint64()+1 < uint64(len(chainB)) {
syncFromB(t)
}
require.Equal(t, replica1.l1Chain.CurrentBlock().Hash(), chainB[len(chainB)-1].Hash(), "sync replica1 to head of chain B")
......@@ -105,7 +105,7 @@ func TestL1Replica_ActL1Sync(gt *testing.T) {
_ = replica2.Close()
})
syncFromOther := replica2.ActL1Sync(replica1.CanonL1Chain())
for replica2.l1Chain.CurrentBlock().NumberU64()+1 < uint64(len(chainB)) {
for replica2.l1Chain.CurrentBlock().Number.Uint64()+1 < uint64(len(chainB)) {
syncFromOther(t)
}
require.Equal(t, replica2.l1Chain.CurrentBlock().Hash(), chainB[len(chainB)-1].Hash(), "sync replica2 to head of chain B")
......
......@@ -48,7 +48,7 @@ func TestBatcher(gt *testing.T) {
ChainID: sd.L2Cfg.Config.ChainID,
Nonce: n,
GasTipCap: big.NewInt(2 * params.GWei),
GasFeeCap: new(big.Int).Add(miner.l1Chain.CurrentBlock().BaseFee(), big.NewInt(2*params.GWei)),
GasFeeCap: new(big.Int).Add(miner.l1Chain.CurrentBlock().BaseFee, big.NewInt(2*params.GWei)),
Gas: params.TxGas,
To: &dp.Addresses.Bob,
Value: e2eutils.Ether(2),
......@@ -73,7 +73,7 @@ func TestBatcher(gt *testing.T) {
miner.ActL1IncludeTx(dp.Addresses.Batcher)(t)
miner.ActL1EndBlock(t)
bl := miner.l1Chain.CurrentBlock()
log.Info("bl", "txs", len(bl.Transactions()))
log.Info("bl", "txs", len(miner.l1Chain.GetBlockByHash(bl.Hash()).Transactions()))
// Now make enough L1 blocks that the verifier will have to derive a L2 block
// It will also eagerly derive the block from the batcher
......@@ -437,7 +437,7 @@ func TestBigL2Txs(gt *testing.T) {
}
sequencer.ActL1HeadSignal(t)
sequencer.ActL2StartBlock(t)
baseFee := engine.l2Chain.CurrentBlock().BaseFee() // this will go quite high, since so many consecutive blocks are filled at capacity.
baseFee := engine.l2Chain.CurrentBlock().BaseFee // this will go quite high, since so many consecutive blocks are filled at capacity.
// fill the block with large L2 txs from alice
for n := aliceNonce; ; n++ {
require.NoError(t, err)
......
......@@ -202,30 +202,30 @@ func (ea *L2EngineAPI) ForkchoiceUpdatedV1(ctx context.Context, state *eth.Forkc
// chain final and completely in PoS mode.
if state.FinalizedBlockHash != (common.Hash{}) {
// If the finalized block is not in our canonical tree, somethings wrong
finalBlock := ea.l2Chain.GetBlockByHash(state.FinalizedBlockHash)
if finalBlock == nil {
finalHeader := ea.l2Chain.GetHeaderByHash(state.FinalizedBlockHash)
if finalHeader == nil {
ea.log.Warn("Final block not available in database", "hash", state.FinalizedBlockHash)
return STATUS_INVALID, engine.InvalidForkChoiceState.With(errors.New("final block not available in database"))
} else if rawdb.ReadCanonicalHash(ea.l2Database, finalBlock.NumberU64()) != state.FinalizedBlockHash {
} else if rawdb.ReadCanonicalHash(ea.l2Database, finalHeader.Number.Uint64()) != state.FinalizedBlockHash {
ea.log.Warn("Final block not in canonical chain", "number", block.NumberU64(), "hash", state.HeadBlockHash)
return STATUS_INVALID, engine.InvalidForkChoiceState.With(errors.New("final block not in canonical chain"))
}
// Set the finalized block
ea.l2Chain.SetFinalized(finalBlock)
ea.l2Chain.SetFinalized(finalHeader)
}
// Check if the safe block hash is in our canonical tree, if not somethings wrong
if state.SafeBlockHash != (common.Hash{}) {
safeBlock := ea.l2Chain.GetBlockByHash(state.SafeBlockHash)
if safeBlock == nil {
safeHeader := ea.l2Chain.GetHeaderByHash(state.SafeBlockHash)
if safeHeader == nil {
ea.log.Warn("Safe block not available in database")
return STATUS_INVALID, engine.InvalidForkChoiceState.With(errors.New("safe block not available in database"))
}
if rawdb.ReadCanonicalHash(ea.l2Database, safeBlock.NumberU64()) != state.SafeBlockHash {
if rawdb.ReadCanonicalHash(ea.l2Database, safeHeader.Number.Uint64()) != state.SafeBlockHash {
ea.log.Warn("Safe block not in canonical chain")
return STATUS_INVALID, engine.InvalidForkChoiceState.With(errors.New("safe block not in canonical chain"))
}
// Set the safe block
ea.l2Chain.SetSafe(safeBlock)
ea.l2Chain.SetSafe(safeHeader)
}
// If payload generation was requested, create a new block to be potentially
// sealed by the beacon client. The payload will be requested later, and we
......
......@@ -107,7 +107,7 @@ func TestL2EngineAPIBlockBuilding(gt *testing.T) {
ChainID: sd.L2Cfg.Config.ChainID,
Nonce: 0,
GasTipCap: big.NewInt(2 * params.GWei),
GasFeeCap: new(big.Int).Add(engine.l2Chain.CurrentBlock().BaseFee(), big.NewInt(2*params.GWei)),
GasFeeCap: new(big.Int).Add(engine.l2Chain.CurrentBlock().BaseFee, big.NewInt(2*params.GWei)),
Gas: params.TxGas,
To: &dp.Addresses.Bob,
Value: e2eutils.Ether(2),
......@@ -125,7 +125,7 @@ func TestL2EngineAPIBlockBuilding(gt *testing.T) {
SafeBlockHash: genesisBlock.Hash(),
FinalizedBlockHash: genesisBlock.Hash(),
}, &eth.PayloadAttributes{
Timestamp: eth.Uint64Quantity(parent.Time()) + 2,
Timestamp: eth.Uint64Quantity(parent.Time) + 2,
PrevRandao: eth.Bytes32{},
SuggestedFeeRecipient: common.Address{'C'},
Transactions: nil,
......@@ -161,12 +161,12 @@ func TestL2EngineAPIBlockBuilding(gt *testing.T) {
require.Equal(t, payload.BlockHash, engine.l2Chain.CurrentBlock().Hash(), "now payload is canonical")
}
buildBlock(false)
require.Zero(t, engine.l2Chain.CurrentBlock().Transactions().Len(), "no tx included")
require.Zero(t, engine.l2Chain.GetBlockByHash(engine.l2Chain.CurrentBlock().Hash()).Transactions().Len(), "no tx included")
buildBlock(true)
require.Equal(gt, 1, engine.l2Chain.CurrentBlock().Transactions().Len(), "tx from alice is included")
require.Equal(gt, 1, engine.l2Chain.GetBlockByHash(engine.l2Chain.CurrentBlock().Hash()).Transactions().Len(), "tx from alice is included")
buildBlock(false)
require.Zero(t, engine.l2Chain.CurrentBlock().Transactions().Len(), "no tx included")
require.Equal(t, uint64(3), engine.l2Chain.CurrentBlock().NumberU64(), "built 3 blocks")
require.Zero(t, engine.l2Chain.GetBlockByHash(engine.l2Chain.CurrentBlock().Hash()).Transactions().Len(), "no tx included")
require.Equal(t, uint64(3), engine.l2Chain.CurrentBlock().Number.Uint64(), "built 3 blocks")
}
func TestL2EngineAPIFail(gt *testing.T) {
......
......@@ -55,7 +55,7 @@ func TestL2Sequencer_SequencerDrift(gt *testing.T) {
ChainID: sd.L2Cfg.Config.ChainID,
Nonce: n,
GasTipCap: big.NewInt(2 * params.GWei),
GasFeeCap: new(big.Int).Add(miner.l1Chain.CurrentBlock().BaseFee(), big.NewInt(2*params.GWei)),
GasFeeCap: new(big.Int).Add(miner.l1Chain.CurrentBlock().BaseFee, big.NewInt(2*params.GWei)),
Gas: params.TxGas,
To: &dp.Addresses.Bob,
Value: e2eutils.Ether(2),
......@@ -76,7 +76,7 @@ func TestL2Sequencer_SequencerDrift(gt *testing.T) {
origin := miner.l1Chain.CurrentBlock()
// L2 makes blocks to catch up
for sequencer.SyncStatus().UnsafeL2.Time+sd.RollupCfg.BlockTime < origin.Time() {
for sequencer.SyncStatus().UnsafeL2.Time+sd.RollupCfg.BlockTime < origin.Time {
makeL2BlockWithAliceTx()
require.Equal(t, uint64(0), sequencer.SyncStatus().UnsafeL2.L1Origin.Number, "no L1 origin change before time matches")
}
......@@ -89,7 +89,7 @@ func TestL2Sequencer_SequencerDrift(gt *testing.T) {
sequencer.ActL1HeadSignal(t)
// Make blocks up till the sequencer drift is about to surpass, but keep the old L1 origin
for sequencer.SyncStatus().UnsafeL2.Time+sd.RollupCfg.BlockTime <= origin.Time()+sd.RollupCfg.MaxSequencerDrift {
for sequencer.SyncStatus().UnsafeL2.Time+sd.RollupCfg.BlockTime <= origin.Time+sd.RollupCfg.MaxSequencerDrift {
sequencer.ActL2KeepL1Origin(t)
makeL2BlockWithAliceTx()
require.Equal(t, uint64(1), sequencer.SyncStatus().UnsafeL2.L1Origin.Number, "expected to keep old L1 origin")
......
......@@ -41,20 +41,20 @@ func TestL2Verifier_SequenceWindow(gt *testing.T) {
miner.ActL1SetFeeRecipient(common.Address{'A'})
// Make two sequence windows worth of empty L1 blocks. After we pass the first sequence window, the L2 chain should get blocks
for miner.l1Chain.CurrentBlock().NumberU64() < sd.RollupCfg.SeqWindowSize*2 {
for miner.l1Chain.CurrentBlock().Number.Uint64() < sd.RollupCfg.SeqWindowSize*2 {
miner.ActL1StartBlock(10)(t)
miner.ActL1EndBlock(t)
verifier.ActL2PipelineFull(t)
l1Head := miner.l1Chain.CurrentBlock().NumberU64()
l1Head := miner.l1Chain.CurrentBlock().Number.Uint64()
expectedL1Origin := uint64(0)
// as soon as we complete the sequence window, we force-adopt the L1 origin
if l1Head >= sd.RollupCfg.SeqWindowSize {
expectedL1Origin = l1Head - sd.RollupCfg.SeqWindowSize
}
require.Equal(t, expectedL1Origin, verifier.SyncStatus().SafeL2.L1Origin.Number, "L1 origin is forced in, given enough L1 blocks pass by")
require.LessOrEqual(t, miner.l1Chain.GetBlockByNumber(expectedL1Origin).Time(), engine.l2Chain.CurrentBlock().Time(), "L2 time higher than L1 origin time")
require.LessOrEqual(t, miner.l1Chain.GetBlockByNumber(expectedL1Origin).Time(), engine.l2Chain.CurrentBlock().Time, "L2 time higher than L1 origin time")
}
tip2N := verifier.SyncStatus()
......@@ -75,7 +75,7 @@ func TestL2Verifier_SequenceWindow(gt *testing.T) {
verifier.ActL2PipelineFull(t)
require.Equal(t, tip2N.SafeL2, verifier.SyncStatus().SafeL2)
for miner.l1Chain.CurrentBlock().NumberU64() < sd.RollupCfg.SeqWindowSize*2 {
for miner.l1Chain.CurrentBlock().Number.Uint64() < sd.RollupCfg.SeqWindowSize*2 {
miner.ActL1StartBlock(10)(t)
miner.ActL1EndBlock(t)
}
......
......@@ -80,7 +80,7 @@ func TestBatcherKeyRotation(gt *testing.T) {
miner.ActL1StartBlock(12)(t)
miner.ActL1IncludeTx(dp.Addresses.SysCfgOwner)(t)
miner.ActL1EndBlock(t)
cfgChangeL1BlockNum := miner.l1Chain.CurrentBlock().NumberU64()
cfgChangeL1BlockNum := miner.l1Chain.CurrentBlock().Number.Uint64()
// sequence L2 blocks, and submit with new batcher
sequencer.ActL1HeadSignal(t)
......@@ -200,7 +200,7 @@ func TestGPOParamsChange(gt *testing.T) {
miner.ActEmptyBlock(t)
sequencer.ActL1HeadSignal(t)
sequencer.ActBuildToL1Head(t)
basefee := miner.l1Chain.CurrentBlock().BaseFee()
basefee := miner.l1Chain.CurrentBlock().BaseFee
// alice makes a L2 tx, sequencer includes it
alice.ActResetTxOpts(t)
......@@ -238,7 +238,7 @@ func TestGPOParamsChange(gt *testing.T) {
miner.ActL1StartBlock(12)(t)
miner.ActL1IncludeTx(dp.Addresses.SysCfgOwner)(t)
miner.ActL1EndBlock(t)
basefeeGPOUpdate := miner.l1Chain.CurrentBlock().BaseFee()
basefeeGPOUpdate := miner.l1Chain.CurrentBlock().BaseFee
// build empty L2 chain, up to but excluding the L2 block with the L1 origin that processes the GPO change
sequencer.ActL1HeadSignal(t)
......@@ -274,7 +274,7 @@ func TestGPOParamsChange(gt *testing.T) {
// build more L2 blocks, with new L1 origin
miner.ActEmptyBlock(t)
basefee = miner.l1Chain.CurrentBlock().BaseFee()
basefee = miner.l1Chain.CurrentBlock().BaseFee
sequencer.ActL1HeadSignal(t)
sequencer.ActBuildToL1Head(t)
// and Alice makes a tx again
......@@ -313,7 +313,7 @@ func TestGasLimitChange(gt *testing.T) {
sequencer.ActL1HeadSignal(t)
sequencer.ActBuildToL1Head(t)
oldGasLimit := seqEngine.l2Chain.CurrentBlock().GasLimit()
oldGasLimit := seqEngine.l2Chain.CurrentBlock().GasLimit
require.Equal(t, oldGasLimit, uint64(dp.DeployConfig.L2GenesisBlockGasLimit))
// change gas limit on L1 to triple what it was
......@@ -335,12 +335,12 @@ func TestGasLimitChange(gt *testing.T) {
sequencer.ActL1HeadSignal(t)
sequencer.ActBuildToL1HeadExcl(t)
require.Equal(t, oldGasLimit, seqEngine.l2Chain.CurrentBlock().GasLimit())
require.Equal(t, oldGasLimit, seqEngine.l2Chain.CurrentBlock().GasLimit)
require.Equal(t, uint64(1), sequencer.SyncStatus().UnsafeL2.L1Origin.Number)
// now include the L1 block with the gaslimit change, and see if it changes as expected
sequencer.ActBuildToL1Head(t)
require.Equal(t, oldGasLimit*3, seqEngine.l2Chain.CurrentBlock().GasLimit())
require.Equal(t, oldGasLimit*3, seqEngine.l2Chain.CurrentBlock().GasLimit)
require.Equal(t, uint64(2), sequencer.SyncStatus().UnsafeL2.L1Origin.Number)
// now submit all this to L1, and see if a verifier can sync and reproduce it
......
......@@ -132,7 +132,7 @@ func runCrossLayerUserTest(gt *testing.T, test regolithScheduledTest) {
seq.ActL1HeadSignal(t)
// sync sequencer build enough blocks to adopt latest L1 origin
for seq.SyncStatus().UnsafeL2.L1Origin.Number < miner.l1Chain.CurrentBlock().NumberU64() {
for seq.SyncStatus().UnsafeL2.L1Origin.Number < miner.l1Chain.CurrentBlock().Number.Uint64() {
seq.ActL2StartBlock(t)
seq.ActL2EndBlock(t)
}
......
......@@ -169,7 +169,7 @@ func Setup(t require.TestingT, deployParams *DeployParams, alloc *AllocParams) *
if alloc.PrefundTestUsers {
for _, addr := range deployParams.Addresses.All() {
l1Genesis.Alloc[addr] = core.GenesisAccount{
Balance: Ether(1e6),
Balance: Ether(1e12),
}
}
}
......@@ -184,7 +184,7 @@ func Setup(t require.TestingT, deployParams *DeployParams, alloc *AllocParams) *
if alloc.PrefundTestUsers {
for _, addr := range deployParams.Addresses.All() {
l2Genesis.Alloc[addr] = core.GenesisAccount{
Balance: Ether(1e6),
Balance: Ether(1e12),
}
}
}
......
......@@ -27,10 +27,10 @@ func TestSetup(t *testing.T) {
alloc := &AllocParams{PrefundTestUsers: true}
sd := Setup(t, dp, alloc)
require.Contains(t, sd.L1Cfg.Alloc, dp.Addresses.Alice)
require.Equal(t, sd.L1Cfg.Alloc[dp.Addresses.Alice].Balance, Ether(1e6))
require.Equal(t, sd.L1Cfg.Alloc[dp.Addresses.Alice].Balance, Ether(1e12))
require.Contains(t, sd.L2Cfg.Alloc, dp.Addresses.Alice)
require.Equal(t, sd.L2Cfg.Alloc[dp.Addresses.Alice].Balance, Ether(1e6))
require.Equal(t, sd.L2Cfg.Alloc[dp.Addresses.Alice].Balance, Ether(1e12))
require.Contains(t, sd.L1Cfg.Alloc, predeploys.DevOptimismPortalAddr)
require.Contains(t, sd.L2Cfg.Alloc, predeploys.L1BlockAddr)
......
......@@ -160,11 +160,11 @@ func (f *fakeSafeFinalizedL1) Start() error {
case head := <-headChanges:
num := head.Block.NumberU64()
if num > f.finalizedDistance {
toFinalize := f.eth.BlockChain().GetBlockByNumber(num - f.finalizedDistance)
toFinalize := f.eth.BlockChain().GetHeaderByNumber(num - f.finalizedDistance)
f.eth.BlockChain().SetFinalized(toFinalize)
}
if num > f.safeDistance {
toSafe := f.eth.BlockChain().GetBlockByNumber(num - f.safeDistance)
toSafe := f.eth.BlockChain().GetHeaderByNumber(num - f.safeDistance)
f.eth.BlockChain().SetSafe(toSafe)
}
case <-quit:
......
FROM golang:1.18.0-alpine3.15 as builder
FROM golang:1.19.0-alpine3.15 as builder
# build from root of repo
COPY ./op-exporter /app
......
FROM --platform=$BUILDPLATFORM golang:1.18.0-alpine3.15 as builder
FROM --platform=$BUILDPLATFORM golang:1.19.0-alpine3.15 as builder
ARG VERSION=v0.0.0
......
......@@ -14,9 +14,17 @@ import (
"github.com/ethereum/go-ethereum/rlp"
)
var ErrMaxFrameSizeTooSmall = errors.New("maxSize is too small to fit the fixed frame overhead")
var ErrNotDepositTx = errors.New("first transaction in block is not a deposit tx")
var ErrTooManyRLPBytes = errors.New("batch would cause RLP bytes to go over limit")
// FrameV0OverHeadSize is the absolute minimum size of a frame.
// This is the fixed overhead frame size, calculated as specified
// in the [Frame Format] specs: 16 + 2 + 4 + 1 = 23 bytes.
//
// [Frame Format]: https://github.com/ethereum-optimism/optimism/blob/develop/specs/derivation.md#frame-format
const FrameV0OverHeadSize = 23
type ChannelOut struct {
id ChannelID
// Frame ID of the next frame to emit. Increment after emitting
......@@ -141,19 +149,23 @@ func (co *ChannelOut) Close() error {
// OutputFrame writes a frame to w with a given max size and returns the frame
// number.
// Use `ReadyBytes`, `Flush`, and `Close` to modify the ready buffer.
// Returns io.EOF when the channel is closed & there are no more frames
// Returns an error if the `maxSize` < FrameV0OverHeadSize.
// Returns io.EOF when the channel is closed & there are no more frames.
// Returns nil if there is still more buffered data.
// Returns and error if it ran into an error during processing.
// Returns an error if it ran into an error during processing.
func (co *ChannelOut) OutputFrame(w *bytes.Buffer, maxSize uint64) (uint16, error) {
f := Frame{
ID: co.id,
FrameNumber: uint16(co.frame),
}
// Check that the maxSize is large enough for the frame overhead size.
if maxSize < FrameV0OverHeadSize {
return 0, ErrMaxFrameSizeTooSmall
}
// Copy data from the local buffer into the frame data buffer
// Don't go past the maxSize with the fixed frame overhead.
// Fixed overhead: 16 + 2 + 4 + 1 = 23 bytes.
maxDataSize := maxSize - 23
maxDataSize := maxSize - FrameV0OverHeadSize
if maxDataSize > uint64(co.buf.Len()) {
maxDataSize = uint64(co.buf.Len())
// If we are closed & will not spill past the current frame
......
......@@ -29,6 +29,22 @@ func TestChannelOutAddBlock(t *testing.T) {
})
}
// TestOutputFrameSmallMaxSize tests that calling [OutputFrame] with a small
// max size that is below the fixed frame size overhead of 23, will return
// an error.
func TestOutputFrameSmallMaxSize(t *testing.T) {
cout, err := NewChannelOut()
require.NoError(t, err)
// Call OutputFrame with the range of small max size values that err
var w bytes.Buffer
for i := 0; i < 23; i++ {
fid, err := cout.OutputFrame(&w, uint64(i))
require.ErrorIs(t, err, ErrMaxFrameSizeTooSmall)
require.Zero(t, fid)
}
}
// TestRLPByteLimit ensures that stream encoder is properly limiting the length.
// It will decode the input if `len(input) <= inputLimit`.
func TestRLPByteLimit(t *testing.T) {
......
FROM --platform=$BUILDPLATFORM golang:1.18.0-alpine3.15 as builder
FROM --platform=$BUILDPLATFORM golang:1.19.0-alpine3.15 as builder
ARG VERSION=v0.0.0
......
FROM golang:1.18.0-alpine3.15 as builder
FROM golang:1.19.0-alpine3.15 as builder
RUN apk add --no-cache make gcc musl-dev linux-headers
......
......@@ -10,6 +10,7 @@ import (
"path/filepath"
"strings"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus/beacon"
"github.com/ethereum/go-ethereum/consensus/ethash"
......@@ -80,11 +81,11 @@ type HeadFn func(headState *state.StateDB) error
// and updates the blockchain headers indexes to reflect the new state-root, so geth will believe the cheat
// (unless it ever re-applies the block).
func (ch *Cheater) RunAndClose(fn HeadFn) error {
preBlock := ch.Blockchain.CurrentBlock()
if a, b := preBlock.NumberU64(), ch.Blockchain.Genesis().NumberU64(); a <= b {
preHeader := ch.Blockchain.CurrentBlock()
if a, b := preHeader.Number.Uint64(), ch.Blockchain.Genesis().NumberU64(); a <= b {
return fmt.Errorf("cheating at genesis (head block %d <= genesis block %d) is not supported", a, b)
}
state, err := ch.Blockchain.StateAt(preBlock.Root())
state, err := ch.Blockchain.StateAt(preHeader.Root)
if err != nil {
_ = ch.Close()
return fmt.Errorf("failed to look up head state: %w", err)
......@@ -103,7 +104,7 @@ func (ch *Cheater) RunAndClose(fn HeadFn) error {
_ = ch.Close()
return fmt.Errorf("failed to commit state change: %w", err)
}
header := preBlock.Header()
header := preHeader // copy the header
header.Root = stateRoot
blockHash := header.Hash()
......@@ -115,14 +116,15 @@ func (ch *Cheater) RunAndClose(fn HeadFn) error {
// based on core.BlockChain.writeHeadBlock:
// Add the block to the canonical chain number scheme and mark as the head
batch := ch.DB.NewBatch()
if ch.Blockchain.CurrentFinalizedBlock().Hash() == preBlock.Hash() {
preID := eth.BlockID{Hash: preHeader.Hash(), Number: preHeader.Number.Uint64()}
if ch.Blockchain.CurrentFinalBlock().Hash() == preID.Hash {
rawdb.WriteFinalizedBlockHash(batch, blockHash)
}
rawdb.DeleteHeaderNumber(batch, preBlock.Hash())
rawdb.DeleteHeaderNumber(batch, preHeader.Hash())
rawdb.WriteHeadHeaderHash(batch, blockHash)
rawdb.WriteHeadFastBlockHash(batch, blockHash)
rawdb.WriteCanonicalHash(batch, blockHash, preBlock.NumberU64())
rawdb.WriteHeaderNumber(batch, blockHash, preBlock.NumberU64())
rawdb.WriteCanonicalHash(batch, blockHash, preID.Number)
rawdb.WriteHeaderNumber(batch, blockHash, preID.Number)
rawdb.WriteHeader(batch, header)
// not keyed by blockhash, and we didn't remove any txs, so we just leave this one as-is.
// rawdb.WriteTxLookupEntriesByBlock(batch, block)
......@@ -131,17 +133,17 @@ func (ch *Cheater) RunAndClose(fn HeadFn) error {
// Geth stores the TD for each block separately from the block itself. We must update this
// manually, otherwise Geth thinks we haven't reached TTD yet and tries to build a block
// using Clique consensus, which causes a panic.
rawdb.WriteTd(batch, blockHash, preBlock.NumberU64(), ch.Blockchain.GetTd(preBlock.Hash(), preBlock.NumberU64()))
rawdb.WriteTd(batch, blockHash, preID.Number, ch.Blockchain.GetTd(preID.Hash, preID.Number))
// Need to copy over receipts since they are keyed by block hash.
receipts := rawdb.ReadReceipts(ch.DB, preBlock.Hash(), preBlock.NumberU64(), ch.Blockchain.Config())
rawdb.WriteReceipts(batch, blockHash, preBlock.NumberU64(), receipts)
receipts := rawdb.ReadReceipts(ch.DB, preID.Hash, preID.Number, ch.Blockchain.Config())
rawdb.WriteReceipts(batch, blockHash, preID.Number, receipts)
// Geth maintains an internal mapping between block bodies and their hashes. None of the database
// accessors above update this mapping, so we need to do it manually.
oldKey := blockBodyKey(preBlock.NumberU64(), preBlock.Hash())
oldBody := rawdb.ReadBodyRLP(ch.DB, preBlock.Hash(), preBlock.NumberU64())
newKey := blockBodyKey(preBlock.NumberU64(), blockHash)
oldKey := blockBodyKey(preID.Number, preID.Hash)
oldBody := rawdb.ReadBodyRLP(ch.DB, preID.Hash, preID.Number)
newKey := blockBodyKey(preID.Number, blockHash)
if err := batch.Delete(oldKey); err != nil {
return fmt.Errorf("error deleting old block body key")
}
......
......@@ -16,7 +16,7 @@ WORKDIR /opt/foundry
# Only diff from upstream docker image is this clone instead
# of COPY. We select a specific commit to use.
RUN git clone https://github.com/foundry-rs/foundry.git . \
&& git checkout 8f3fca9c608d58981daaffe11e7f8076644cb753
&& git checkout da2392e58bb8a7fefeba46b40c4df1afad8ccd22
RUN source $HOME/.profile && \
cargo build --release && \
......
ignores: [
"@babel/eslint-parser",
"@typescript-eslint/parser",
"eslint-plugin-import",
"eslint-plugin-unicorn",
"eslint-plugin-jsdoc",
"eslint-plugin-prefer-arrow",
"eslint-plugin-react",
"@typescript-eslint/eslint-plugin",
"eslint-config-prettier",
"eslint-plugin-prettier"
]
......@@ -17,7 +17,7 @@
"lint": "yarn lint:fix && yarn lint:check",
"pre-commit": "lint-staged",
"test": "ts-mocha test/*.spec.ts",
"test:coverage": "echo 'no coverage'"
"test:coverage": "nyc ts-mocha test/*.spec.ts && nyc merge .nyc_output coverage.json"
},
"keywords": [
"optimism",
......@@ -48,8 +48,7 @@
"pino": "^6.11.3",
"pino-multi-stream": "^5.3.0",
"pino-sentry": "^0.7.0",
"prom-client": "^13.1.0",
"qs": "^6.10.5"
"prom-client": "^13.1.0"
},
"devDependencies": {
"@ethersproject/abstract-provider": "^5.7.0",
......
......@@ -3,11 +3,11 @@ import request from 'supertest'
import chai = require('chai')
const expect = chai.expect
import { Logger, Metrics, createMetricsServer } from '../src'
import { Logger, LegacyMetrics, createMetricsServer } from '../src'
describe('Metrics', () => {
it('shoud serve metrics', async () => {
const metrics = new Metrics({
const metrics = new LegacyMetrics({
prefix: 'test_metrics',
})
const registry = metrics.registry
......
......@@ -155,6 +155,33 @@ contract SystemDictator is OwnableUpgradeable {
currentStep++;
}
/**
* @notice Constructor required to ensure that the implementation of the SystemDictator is
* initialized upon deployment.
*/
constructor() {
// Using this shorter variable as an alias for address(0) just prevents us from having to
// to use a new line for every single parameter.
address zero = address(0);
initialize(
DeployConfig(
GlobalConfig(AddressManager(zero), ProxyAdmin(zero), zero, zero),
ProxyAddressConfig(zero, zero, zero, zero, zero, zero, zero),
ImplementationAddressConfig(
L2OutputOracle(zero),
OptimismPortal(payable(zero)),
L1CrossDomainMessenger(zero),
L1StandardBridge(payable(zero)),
OptimismMintableERC20Factory(zero),
L1ERC721Bridge(zero),
PortalSender(zero),
SystemConfig(zero)
),
SystemConfigConfig(zero, 0, 0, bytes32(0), 0, zero)
)
);
}
/**
* @param _config System configuration.
*/
......
......@@ -139,16 +139,20 @@ contract Bytes_slice_Test is Test {
vm.assume(_length <= _input.length - _start);
// Grab the free memory pointer before the slice operation
uint256 initPtr;
uint64 initPtr;
assembly {
initPtr := mload(0x40)
}
uint64 expectedPtr = uint64(initPtr + 0x20 + ((_length + 0x1f) & ~uint256(0x1f)));
// Ensure that all memory outside of the expected range is safe.
vm.expectSafeMemory(initPtr, expectedPtr);
// Slice the input bytes array from `_start` to `_start + _length`
bytes memory slice = Bytes.slice(_input, _start, _length);
// Grab the free memory pointer after the slice operation
uint256 finalPtr;
uint64 finalPtr;
assembly {
finalPtr := mload(0x40)
}
......@@ -165,10 +169,11 @@ contract Bytes_slice_Test is Test {
// Note that we use a slightly less efficient, but equivalent method of rounding
// up `_length` to the next multiple of 32 than is used in the `slice` function.
// This is to diff test the method used in `slice`.
assertEq(finalPtr, initPtr + 0x20 + (((_length + 0x1F) >> 5) << 5));
uint64 _expectedPtr = uint64(initPtr + 0x20 + (((_length + 0x1F) >> 5) << 5));
assertEq(finalPtr, _expectedPtr);
// Sanity check for equivalence of the rounding methods.
assertEq(((_length + 0x1F) >> 5) << 5, (_length + 0x1F) & ~uint256(0x1F));
assertEq(_expectedPtr, expectedPtr);
}
// The slice length should be equal to `_length`
......
......@@ -12,7 +12,7 @@
"l2OutputOracleStartingTimestamp": -1,
"l2OutputOracleProposer": "0x70997970C51812dc3A010C7d01b50e0d17dc79C8",
"l2OutputOracleChallenger": "0x15d34AAf54267DB7D7c367839AAf71A00a2C6A65",
"l2GenesisBlockGasLimit": "0xE4E1C0",
"l2GenesisBlockGasLimit": "0x1c9c380",
"l1BlockTime": 3,
"cliqueSignerAddress": "0xca062b0fd91172d89bcd4bb084ac4e21972cc467",
"baseFeeVaultRecipient": "0xBcd4042DE499D14e55001CcbB24a551F3b954096",
......
......@@ -28,7 +28,7 @@
"governanceTokenSymbol": "OP",
"governanceTokenOwner": "0x038a8825A3C3B0c08d52Cc76E5E361953Cf6Dc76",
"l2GenesisBlockGasLimit": "0x17D7840",
"l2GenesisBlockGasLimit": "0x1c9c380",
"l2GenesisBlockCoinbase": "0x4200000000000000000000000000000000000011",
"l2GenesisBlockBaseFeePerGas": "0x3b9aca00",
......
......@@ -40,7 +40,7 @@
"governanceTokenName": "Optimism",
"governanceTokenOwner": "ADMIN",
"l2GenesisBlockGasLimit": "0x17D7840",
"l2GenesisBlockGasLimit": "0x1c9c380",
"l2GenesisBlockBaseFeePerGas": "0x3b9aca00",
"l2GenesisRegolithTimeOffset": "0x0",
......
......@@ -77,7 +77,7 @@
"dotenv": "^16.0.0",
"ds-test": "https://github.com/dapphub/ds-test.git#9310e879db8ba3ea6d5c6489a579118fd264a3f5",
"ethereum-waffle": "^3.0.0",
"forge-std": "https://github.com/foundry-rs/forge-std.git#fd86115ed6aba8e234ee0fb86c12fe35eff0b2a0",
"forge-std": "https://github.com/foundry-rs/forge-std.git#46264e9788017fc74f9f58b7efa0bc6e1df6d410",
"glob": "^7.1.6",
"hardhat-deploy": "^0.11.4",
"solhint": "^3.3.7",
......
......@@ -22,6 +22,7 @@
"scripts": {
"build": "yarn build:contracts && yarn copy:contracts && yarn autogen:artifacts && yarn build:typescript",
"build:typescript": "tsc -p ./tsconfig.json",
"build:bindings": "./scripts/legacy-bindings.sh ./../../op-bindings/legacy-bindings",
"build:contracts": "hardhat compile --show-stack-traces",
"autogen:markdown": "ts-node scripts/generate-markdown.ts",
"autogen:artifacts": "ts-node scripts/generate-artifacts.ts && ts-node scripts/generate-deployed-artifacts.ts",
......
#!/bin/bash
OUTDIR="$1"
if [ ! -d "$OUTDIR" ]; then
echo "Must pass output directory"
exit 1
fi
CONTRACTS=("CanonicalTransactionChain")
PKG=legacy_bindings
for contract in ${CONTRACTS[@]}; do
TMPFILE=$(mktemp)
npx hardhat inspect $contract bytecode > "$TMPFILE"
ABI=$(npx hardhat inspect $contract abi)
outfile="$OUTDIR/$contract.go"
echo "$ABI" | abigen --abi - --pkg "$PKG" --bin "$TMPFILE" --type $contract --out "$outfile"
rm "$TMPFILE"
done
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment