Commit c68e365a authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge branch 'develop' into aj/channel-pruning

parents 5ae0d8cb 00b799bc
---
'@eth-optimism/batch-submitter-service': patch
---
fix flag name for MaxStateRootElements in batch-submitter
fix log package for proposer
...@@ -209,7 +209,7 @@ func NewConfig(ctx *cli.Context) (Config, error) { ...@@ -209,7 +209,7 @@ func NewConfig(ctx *cli.Context) (Config, error) {
MaxL1TxSize: ctx.GlobalUint64(flags.MaxL1TxSizeFlag.Name), MaxL1TxSize: ctx.GlobalUint64(flags.MaxL1TxSizeFlag.Name),
MaxPlaintextBatchSize: ctx.GlobalUint64(flags.MaxPlaintextBatchSizeFlag.Name), MaxPlaintextBatchSize: ctx.GlobalUint64(flags.MaxPlaintextBatchSizeFlag.Name),
MinStateRootElements: ctx.GlobalUint64(flags.MinStateRootElementsFlag.Name), MinStateRootElements: ctx.GlobalUint64(flags.MinStateRootElementsFlag.Name),
MaxStateRootElements: ctx.GlobalUint64(flags.MinStateRootElementsFlag.Name), MaxStateRootElements: ctx.GlobalUint64(flags.MaxStateRootElementsFlag.Name),
MaxBatchSubmissionTime: ctx.GlobalDuration(flags.MaxBatchSubmissionTimeFlag.Name), MaxBatchSubmissionTime: ctx.GlobalDuration(flags.MaxBatchSubmissionTimeFlag.Name),
PollInterval: ctx.GlobalDuration(flags.PollIntervalFlag.Name), PollInterval: ctx.GlobalDuration(flags.PollIntervalFlag.Name),
NumConfirmations: ctx.GlobalUint64(flags.NumConfirmationsFlag.Name), NumConfirmations: ctx.GlobalUint64(flags.NumConfirmationsFlag.Name),
......
...@@ -13,13 +13,13 @@ import ( ...@@ -13,13 +13,13 @@ import (
"github.com/ethereum-optimism/optimism/bss-core/metrics" "github.com/ethereum-optimism/optimism/bss-core/metrics"
"github.com/ethereum-optimism/optimism/bss-core/txmgr" "github.com/ethereum-optimism/optimism/bss-core/txmgr"
l2ethclient "github.com/ethereum-optimism/optimism/l2geth/ethclient" l2ethclient "github.com/ethereum-optimism/optimism/l2geth/ethclient"
"github.com/ethereum-optimism/optimism/l2geth/log"
"github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
) )
// stateRootSize is the size in bytes of a state root. // stateRootSize is the size in bytes of a state root.
......
module github.com/ethereum-optimism/optimism module github.com/ethereum-optimism/optimism
go 1.18 go 1.19
require ( require (
github.com/btcsuite/btcd v0.23.3 github.com/btcsuite/btcd v0.23.3
...@@ -9,7 +9,7 @@ require ( ...@@ -9,7 +9,7 @@ require (
github.com/docker/docker v20.10.21+incompatible github.com/docker/docker v20.10.21+incompatible
github.com/docker/go-connections v0.4.0 github.com/docker/go-connections v0.4.0
github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3 github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3
github.com/ethereum/go-ethereum v1.11.2 github.com/ethereum/go-ethereum v1.11.4
github.com/fsnotify/fsnotify v1.6.0 github.com/fsnotify/fsnotify v1.6.0
github.com/golang/snappy v0.0.4 github.com/golang/snappy v0.0.4
github.com/google/go-cmp v0.5.9 github.com/google/go-cmp v0.5.9
...@@ -69,11 +69,11 @@ require ( ...@@ -69,11 +69,11 @@ require (
github.com/francoispqt/gojay v1.2.13 // indirect github.com/francoispqt/gojay v1.2.13 // indirect
github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 // indirect github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 // indirect
github.com/getsentry/sentry-go v0.18.0 // indirect github.com/getsentry/sentry-go v0.18.0 // indirect
github.com/go-kit/kit v0.10.0 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-stack/stack v1.8.1 // indirect github.com/go-stack/stack v1.8.1 // indirect
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect
github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect
github.com/gofrs/flock v0.8.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v4 v4.4.2 // indirect github.com/golang-jwt/jwt/v4 v4.4.2 // indirect
github.com/golang/mock v1.6.0 // indirect github.com/golang/mock v1.6.0 // indirect
...@@ -86,7 +86,6 @@ require ( ...@@ -86,7 +86,6 @@ require (
github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-bexpr v0.1.11 // indirect github.com/hashicorp/go-bexpr v0.1.11 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.1 // indirect github.com/hashicorp/golang-lru/v2 v2.0.1 // indirect
github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e // indirect
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
github.com/huin/goupnp v1.1.0 // indirect github.com/huin/goupnp v1.1.0 // indirect
github.com/influxdata/influxdb v1.8.3 // indirect github.com/influxdata/influxdb v1.8.3 // indirect
...@@ -147,7 +146,6 @@ require ( ...@@ -147,7 +146,6 @@ require (
github.com/prometheus/client_model v0.3.0 // indirect github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/common v0.39.0 // indirect github.com/prometheus/common v0.39.0 // indirect
github.com/prometheus/procfs v0.9.0 // indirect github.com/prometheus/procfs v0.9.0 // indirect
github.com/prometheus/tsdb v0.10.0 // indirect
github.com/quic-go/qpack v0.4.0 // indirect github.com/quic-go/qpack v0.4.0 // indirect
github.com/quic-go/qtls-go1-18 v0.2.0 // indirect github.com/quic-go/qtls-go1-18 v0.2.0 // indirect
github.com/quic-go/qtls-go1-19 v0.2.0 // indirect github.com/quic-go/qtls-go1-19 v0.2.0 // indirect
...@@ -191,6 +189,6 @@ require ( ...@@ -191,6 +189,6 @@ require (
nhooyr.io/websocket v1.8.7 // indirect nhooyr.io/websocket v1.8.7 // indirect
) )
replace github.com/ethereum/go-ethereum v1.11.2 => github.com/ethereum-optimism/op-geth v1.11.2-de8c5df46.0.20230308025559-13ee9ab9153b replace github.com/ethereum/go-ethereum v1.11.4 => github.com/ethereum-optimism/op-geth v1.11.2-de8c5df46.0.20230321002540-11f0554a4313
//replace github.com/ethereum/go-ethereum v1.11.2 => ../go-ethereum //replace github.com/ethereum/go-ethereum v1.11.4 => ../go-ethereum
This diff is collapsed.
FROM --platform=$BUILDPLATFORM golang:1.18.0-alpine3.15 as builder FROM --platform=$BUILDPLATFORM golang:1.19.0-alpine3.15 as builder
ARG VERSION=v0.0.0 ARG VERSION=v0.0.0
......
FROM golang:1.18.0-alpine3.15 as builder FROM golang:1.19.0-alpine3.15 as builder
RUN apk add --no-cache make gcc musl-dev linux-headers git jq bash RUN apk add --no-cache make gcc musl-dev linux-headers git jq bash
......
...@@ -109,7 +109,7 @@ func doMigration(mutableDB *state.StateDB, dbFactory DBFactory, addresses []comm ...@@ -109,7 +109,7 @@ func doMigration(mutableDB *state.StateDB, dbFactory DBFactory, addresses []comm
outCh := make(chan accountData) outCh := make(chan accountData)
// Channel to receive errors from each iteration job. // Channel to receive errors from each iteration job.
errCh := make(chan error, checkJobs) errCh := make(chan error, checkJobs)
// Channel to cancel all iteration jobs as well as the collector. // Channel to cancel all iteration jobs.
cancelCh := make(chan struct{}) cancelCh := make(chan struct{})
// Define a worker function to iterate over each partition. // Define a worker function to iterate over each partition.
...@@ -244,16 +244,17 @@ func doMigration(mutableDB *state.StateDB, dbFactory DBFactory, addresses []comm ...@@ -244,16 +244,17 @@ func doMigration(mutableDB *state.StateDB, dbFactory DBFactory, addresses []comm
go worker(start, end) go worker(start, end)
} }
// Make a channel to make sure that the collector process completes. // Make a channel to track when collector process completes.
collectorCloseCh := make(chan struct{}) collectorClosedCh := make(chan struct{})
// Make a channel to cancel the collector process.
collectorCancelCh := make(chan struct{})
// Keep track of the last error seen. // Keep track of the last error seen.
var lastErr error var lastErr error
// There are multiple ways that the cancel channel can be closed: // The cancel channel can be closed if any of the workers returns an error.
// - if we receive an error from the errCh // We wrap the close in a sync.Once to ensure that it's only closed once.
// - if the collector process completes
// To prevent panics, we wrap the close in a sync.Once.
var cancelOnce sync.Once var cancelOnce sync.Once
// Create a map of accounts we've seen so that we can filter out duplicates. // Create a map of accounts we've seen so that we can filter out duplicates.
...@@ -268,7 +269,7 @@ func doMigration(mutableDB *state.StateDB, dbFactory DBFactory, addresses []comm ...@@ -268,7 +269,7 @@ func doMigration(mutableDB *state.StateDB, dbFactory DBFactory, addresses []comm
progress := util.ProgressLogger(1000, "Migrated OVM_ETH storage slot") progress := util.ProgressLogger(1000, "Migrated OVM_ETH storage slot")
go func() { go func() {
defer func() { defer func() {
collectorCloseCh <- struct{}{} collectorClosedCh <- struct{}{}
}() }()
for { for {
select { select {
...@@ -291,10 +292,20 @@ func doMigration(mutableDB *state.StateDB, dbFactory DBFactory, addresses []comm ...@@ -291,10 +292,20 @@ func doMigration(mutableDB *state.StateDB, dbFactory DBFactory, addresses []comm
seenAccounts[account.address] = true seenAccounts[account.address] = true
case err := <-errCh: case err := <-errCh:
cancelOnce.Do(func() { cancelOnce.Do(func() {
lastErr = err
close(cancelCh) close(cancelCh)
lastErr = err
}) })
case <-cancelCh: case <-collectorCancelCh:
// Explicitly drain the error channel. Since the error channel is buffered, it's possible
// for the wg.Wait() call below to unblock and cancel this goroutine before the error gets
// processed by the case statement above.
for len(errCh) > 0 {
err := <-errCh
if lastErr == nil {
lastErr = err
}
}
return return
} }
} }
...@@ -302,13 +313,10 @@ func doMigration(mutableDB *state.StateDB, dbFactory DBFactory, addresses []comm ...@@ -302,13 +313,10 @@ func doMigration(mutableDB *state.StateDB, dbFactory DBFactory, addresses []comm
// Wait for the workers to finish. // Wait for the workers to finish.
wg.Wait() wg.Wait()
// Close the cancel channel to signal the collector process to stop.
cancelOnce.Do(func() {
close(cancelCh)
})
// Wait for the collector process to finish. // Close the collector, and wait for it to finish.
<-collectorCloseCh close(collectorCancelCh)
<-collectorClosedCh
// If we saw an error, return it. // If we saw an error, return it.
if lastErr != nil { if lastErr != nil {
......
...@@ -228,44 +228,58 @@ func makeLegacyETH(t *testing.T, totalSupply *big.Int, balances map[common.Addre ...@@ -228,44 +228,58 @@ func makeLegacyETH(t *testing.T, totalSupply *big.Int, balances map[common.Addre
} }
} }
// TestMigrateBalancesRandom tests that the pre-check balances function works // TestMigrateBalancesRandomOK tests that the pre-check balances function works
// with random addresses. This test makes sure that the partition logic doesn't // with random addresses. This test makes sure that the partition logic doesn't
// miss anything. // miss anything, and helps detect concurrency errors.
func TestMigrateBalancesRandom(t *testing.T) { func TestMigrateBalancesRandomOK(t *testing.T) {
for i := 0; i < 100; i++ { for i := 0; i < 100; i++ {
addresses := make([]common.Address, 0) addresses, stateBalances, allowances, stateAllowances, totalSupply := setupRandTest(t)
stateBalances := make(map[common.Address]*big.Int)
allowances := make([]*crossdomain.Allowance, 0) db, factory := makeLegacyETH(t, totalSupply, stateBalances, stateAllowances)
stateAllowances := make(map[common.Address]common.Address) err := doMigration(db, factory, addresses, allowances, big.NewInt(0), false)
require.NoError(t, err)
totalSupply := big.NewInt(0)
for j := 0; j < rand.Intn(10000); j++ { for addr, expBal := range stateBalances {
addr := randAddr(t) actBal := db.GetBalance(addr)
addresses = append(addresses, addr) require.EqualValues(t, expBal, actBal)
stateBalances[addr] = big.NewInt(int64(rand.Intn(1_000_000)))
totalSupply = new(big.Int).Add(totalSupply, stateBalances[addr])
} }
}
}
for j := 0; j < rand.Intn(1000); j++ { // TestMigrateBalancesRandomMissing tests that the pre-check balances function works
addr := randAddr(t) // with random addresses when some of them are missing. This helps make sure that the
to := randAddr(t) // partition logic doesn't miss anything, and helps detect concurrency errors.
allowances = append(allowances, &crossdomain.Allowance{ func TestMigrateBalancesRandomMissing(t *testing.T) {
From: addr, for i := 0; i < 100; i++ {
To: to, addresses, stateBalances, allowances, stateAllowances, totalSupply := setupRandTest(t)
})
stateAllowances[addr] = to if len(addresses) == 0 {
continue
} }
// Remove a random address from the list of witnesses
idx := rand.Intn(len(addresses))
addresses = append(addresses[:idx], addresses[idx+1:]...)
db, factory := makeLegacyETH(t, totalSupply, stateBalances, stateAllowances) db, factory := makeLegacyETH(t, totalSupply, stateBalances, stateAllowances)
err := doMigration(db, factory, addresses, allowances, big.NewInt(0), false) err := doMigration(db, factory, addresses, allowances, big.NewInt(0), false)
require.NoError(t, err) require.ErrorContains(t, err, "unknown storage slot")
}
for addr, expBal := range stateBalances { for i := 0; i < 100; i++ {
actBal := db.GetBalance(addr) addresses, stateBalances, allowances, stateAllowances, totalSupply := setupRandTest(t)
require.EqualValues(t, expBal, actBal)
if len(allowances) == 0 {
continue
} }
// Remove a random allowance from the list of witnesses
idx := rand.Intn(len(allowances))
allowances = append(allowances[:idx], allowances[idx+1:]...)
db, factory := makeLegacyETH(t, totalSupply, stateBalances, stateAllowances)
err := doMigration(db, factory, addresses, allowances, big.NewInt(0), false)
require.ErrorContains(t, err, "unknown storage slot")
} }
} }
...@@ -354,3 +368,32 @@ func randAddr(t *testing.T) common.Address { ...@@ -354,3 +368,32 @@ func randAddr(t *testing.T) common.Address {
require.NoError(t, err) require.NoError(t, err)
return addr return addr
} }
func setupRandTest(t *testing.T) ([]common.Address, map[common.Address]*big.Int, []*crossdomain.Allowance, map[common.Address]common.Address, *big.Int) {
addresses := make([]common.Address, 0)
stateBalances := make(map[common.Address]*big.Int)
allowances := make([]*crossdomain.Allowance, 0)
stateAllowances := make(map[common.Address]common.Address)
totalSupply := big.NewInt(0)
for j := 0; j < rand.Intn(10000); j++ {
addr := randAddr(t)
addresses = append(addresses, addr)
stateBalances[addr] = big.NewInt(int64(rand.Intn(1_000_000)))
totalSupply = new(big.Int).Add(totalSupply, stateBalances[addr])
}
for j := 0; j < rand.Intn(1000); j++ {
addr := randAddr(t)
to := randAddr(t)
allowances = append(allowances, &crossdomain.Allowance{
From: addr,
To: to,
})
stateAllowances[addr] = to
}
return addresses, stateBalances, allowances, stateAllowances, totalSupply
}
...@@ -35,7 +35,7 @@ func TestBatchInLastPossibleBlocks(gt *testing.T) { ...@@ -35,7 +35,7 @@ func TestBatchInLastPossibleBlocks(gt *testing.T) {
ChainID: sd.L2Cfg.Config.ChainID, ChainID: sd.L2Cfg.Config.ChainID,
Nonce: n, Nonce: n,
GasTipCap: big.NewInt(2 * params.GWei), GasTipCap: big.NewInt(2 * params.GWei),
GasFeeCap: new(big.Int).Add(miner.l1Chain.CurrentBlock().BaseFee(), big.NewInt(2*params.GWei)), GasFeeCap: new(big.Int).Add(miner.l1Chain.CurrentBlock().BaseFee, big.NewInt(2*params.GWei)),
Gas: params.TxGas, Gas: params.TxGas,
To: &dp.Addresses.Bob, To: &dp.Addresses.Bob,
Value: e2eutils.Ether(2), Value: e2eutils.Ether(2),
...@@ -146,7 +146,7 @@ func TestLargeL1Gaps(gt *testing.T) { ...@@ -146,7 +146,7 @@ func TestLargeL1Gaps(gt *testing.T) {
ChainID: sd.L2Cfg.Config.ChainID, ChainID: sd.L2Cfg.Config.ChainID,
Nonce: n, Nonce: n,
GasTipCap: big.NewInt(2 * params.GWei), GasTipCap: big.NewInt(2 * params.GWei),
GasFeeCap: new(big.Int).Add(miner.l1Chain.CurrentBlock().BaseFee(), big.NewInt(2*params.GWei)), GasFeeCap: new(big.Int).Add(miner.l1Chain.CurrentBlock().BaseFee, big.NewInt(2*params.GWei)),
Gas: params.TxGas, Gas: params.TxGas,
To: &dp.Addresses.Bob, To: &dp.Addresses.Bob,
Value: e2eutils.Ether(2), Value: e2eutils.Ether(2),
......
...@@ -21,7 +21,7 @@ func TestShapellaL1Fork(gt *testing.T) { ...@@ -21,7 +21,7 @@ func TestShapellaL1Fork(gt *testing.T) {
_, _, miner, sequencer, _, verifier, _, batcher := setupReorgTestActors(t, dp, sd, log) _, _, miner, sequencer, _, verifier, _, batcher := setupReorgTestActors(t, dp, sd, log)
require.False(t, sd.L1Cfg.Config.IsShanghai(miner.l1Chain.CurrentBlock().Time()), "not active yet") require.False(t, sd.L1Cfg.Config.IsShanghai(miner.l1Chain.CurrentBlock().Time), "not active yet")
// start op-nodes // start op-nodes
sequencer.ActL2PipelineFull(t) sequencer.ActL2PipelineFull(t)
...@@ -34,7 +34,7 @@ func TestShapellaL1Fork(gt *testing.T) { ...@@ -34,7 +34,7 @@ func TestShapellaL1Fork(gt *testing.T) {
// verify Shanghai is active // verify Shanghai is active
l1Head := miner.l1Chain.CurrentBlock() l1Head := miner.l1Chain.CurrentBlock()
require.True(t, sd.L1Cfg.Config.IsShanghai(l1Head.Time())) require.True(t, sd.L1Cfg.Config.IsShanghai(l1Head.Time))
// build L2 chain up to and including L2 blocks referencing shanghai L1 blocks // build L2 chain up to and including L2 blocks referencing shanghai L1 blocks
sequencer.ActL1HeadSignal(t) sequencer.ActL1HeadSignal(t)
......
...@@ -31,7 +31,7 @@ func TestL1Miner_BuildBlock(gt *testing.T) { ...@@ -31,7 +31,7 @@ func TestL1Miner_BuildBlock(gt *testing.T) {
ChainID: sd.L1Cfg.Config.ChainID, ChainID: sd.L1Cfg.Config.ChainID,
Nonce: 0, Nonce: 0,
GasTipCap: big.NewInt(2 * params.GWei), GasTipCap: big.NewInt(2 * params.GWei),
GasFeeCap: new(big.Int).Add(miner.l1Chain.CurrentBlock().BaseFee(), big.NewInt(2*params.GWei)), GasFeeCap: new(big.Int).Add(miner.l1Chain.CurrentBlock().BaseFee, big.NewInt(2*params.GWei)),
Gas: params.TxGas, Gas: params.TxGas,
To: &dp.Addresses.Bob, To: &dp.Addresses.Bob,
Value: e2eutils.Ether(2), Value: e2eutils.Ether(2),
...@@ -41,7 +41,8 @@ func TestL1Miner_BuildBlock(gt *testing.T) { ...@@ -41,7 +41,8 @@ func TestL1Miner_BuildBlock(gt *testing.T) {
// make an empty block, even though a tx may be waiting // make an empty block, even though a tx may be waiting
miner.ActL1StartBlock(10)(t) miner.ActL1StartBlock(10)(t)
miner.ActL1EndBlock(t) miner.ActL1EndBlock(t)
bl := miner.l1Chain.CurrentBlock() header := miner.l1Chain.CurrentBlock()
bl := miner.l1Chain.GetBlockByHash(header.Hash())
require.Equal(t, uint64(1), bl.NumberU64()) require.Equal(t, uint64(1), bl.NumberU64())
require.Zero(gt, bl.Transactions().Len()) require.Zero(gt, bl.Transactions().Len())
...@@ -49,7 +50,8 @@ func TestL1Miner_BuildBlock(gt *testing.T) { ...@@ -49,7 +50,8 @@ func TestL1Miner_BuildBlock(gt *testing.T) {
miner.ActL1StartBlock(10)(t) miner.ActL1StartBlock(10)(t)
miner.ActL1IncludeTx(dp.Addresses.Alice)(t) miner.ActL1IncludeTx(dp.Addresses.Alice)(t)
miner.ActL1EndBlock(t) miner.ActL1EndBlock(t)
bl = miner.l1Chain.CurrentBlock() header = miner.l1Chain.CurrentBlock()
bl = miner.l1Chain.GetBlockByHash(header.Hash())
require.Equal(t, uint64(2), bl.NumberU64()) require.Equal(t, uint64(2), bl.NumberU64())
require.Equal(t, 1, bl.Transactions().Len()) require.Equal(t, 1, bl.Transactions().Len())
require.Equal(t, tx.Hash(), bl.Transactions()[0].Hash()) require.Equal(t, tx.Hash(), bl.Transactions()[0].Hash())
......
...@@ -103,9 +103,9 @@ func (s *L1Replica) ActL1RewindDepth(depth uint64) Action { ...@@ -103,9 +103,9 @@ func (s *L1Replica) ActL1RewindDepth(depth uint64) Action {
t.InvalidAction("cannot rewind L1 past genesis (current: %d, rewind depth: %d)", head, depth) t.InvalidAction("cannot rewind L1 past genesis (current: %d, rewind depth: %d)", head, depth)
return return
} }
finalized := s.l1Chain.CurrentFinalizedBlock() finalized := s.l1Chain.CurrentFinalBlock()
if finalized != nil && head < finalized.NumberU64()+depth { if finalized != nil && head < finalized.Number.Uint64()+depth {
t.InvalidAction("cannot rewind head of chain past finalized block %d with rewind depth %d", finalized.NumberU64(), depth) t.InvalidAction("cannot rewind head of chain past finalized block %d with rewind depth %d", finalized.Number.Uint64(), depth)
return return
} }
if err := s.l1Chain.SetHead(head - depth); err != nil { if err := s.l1Chain.SetHead(head - depth); err != nil {
...@@ -188,7 +188,7 @@ func (s *L1Replica) UnsafeNum() uint64 { ...@@ -188,7 +188,7 @@ func (s *L1Replica) UnsafeNum() uint64 {
head := s.l1Chain.CurrentBlock() head := s.l1Chain.CurrentBlock()
headNum := uint64(0) headNum := uint64(0)
if head != nil { if head != nil {
headNum = head.NumberU64() headNum = head.Number.Uint64()
} }
return headNum return headNum
} }
...@@ -197,16 +197,16 @@ func (s *L1Replica) SafeNum() uint64 { ...@@ -197,16 +197,16 @@ func (s *L1Replica) SafeNum() uint64 {
safe := s.l1Chain.CurrentSafeBlock() safe := s.l1Chain.CurrentSafeBlock()
safeNum := uint64(0) safeNum := uint64(0)
if safe != nil { if safe != nil {
safeNum = safe.NumberU64() safeNum = safe.Number.Uint64()
} }
return safeNum return safeNum
} }
func (s *L1Replica) FinalizedNum() uint64 { func (s *L1Replica) FinalizedNum() uint64 {
finalized := s.l1Chain.CurrentFinalizedBlock() finalized := s.l1Chain.CurrentFinalBlock()
finalizedNum := uint64(0) finalizedNum := uint64(0)
if finalized != nil { if finalized != nil {
finalizedNum = finalized.NumberU64() finalizedNum = finalized.Number.Uint64()
} }
return finalizedNum return finalizedNum
} }
...@@ -219,7 +219,7 @@ func (s *L1Replica) ActL1Finalize(t Testing, num uint64) { ...@@ -219,7 +219,7 @@ func (s *L1Replica) ActL1Finalize(t Testing, num uint64) {
t.InvalidAction("need to move forward safe block before moving finalized block") t.InvalidAction("need to move forward safe block before moving finalized block")
return return
} }
newFinalized := s.l1Chain.GetBlockByNumber(num) newFinalized := s.l1Chain.GetHeaderByNumber(num)
if newFinalized == nil { if newFinalized == nil {
t.Fatalf("expected block at %d after finalized L1 block %d, safe head is ahead", num, finalizedNum) t.Fatalf("expected block at %d after finalized L1 block %d, safe head is ahead", num, finalizedNum)
} }
...@@ -234,7 +234,7 @@ func (s *L1Replica) ActL1FinalizeNext(t Testing) { ...@@ -234,7 +234,7 @@ func (s *L1Replica) ActL1FinalizeNext(t Testing) {
// ActL1Safe marks the given unsafe block as safe. // ActL1Safe marks the given unsafe block as safe.
func (s *L1Replica) ActL1Safe(t Testing, num uint64) { func (s *L1Replica) ActL1Safe(t Testing, num uint64) {
newSafe := s.l1Chain.GetBlockByNumber(num) newSafe := s.l1Chain.GetHeaderByNumber(num)
if newSafe == nil { if newSafe == nil {
t.InvalidAction("could not find L1 block %d, cannot label it as safe", num) t.InvalidAction("could not find L1 block %d, cannot label it as safe", num)
return return
......
...@@ -85,7 +85,7 @@ func TestL1Replica_ActL1Sync(gt *testing.T) { ...@@ -85,7 +85,7 @@ func TestL1Replica_ActL1Sync(gt *testing.T) {
}) })
syncFromA := replica1.ActL1Sync(canonL1(chainA)) syncFromA := replica1.ActL1Sync(canonL1(chainA))
// sync canonical chain A // sync canonical chain A
for replica1.l1Chain.CurrentBlock().NumberU64()+1 < uint64(len(chainA)) { for replica1.l1Chain.CurrentBlock().Number.Uint64()+1 < uint64(len(chainA)) {
syncFromA(t) syncFromA(t)
} }
require.Equal(t, replica1.l1Chain.CurrentBlock().Hash(), chainA[len(chainA)-1].Hash(), "sync replica1 to head of chain A") require.Equal(t, replica1.l1Chain.CurrentBlock().Hash(), chainA[len(chainA)-1].Hash(), "sync replica1 to head of chain A")
...@@ -94,7 +94,7 @@ func TestL1Replica_ActL1Sync(gt *testing.T) { ...@@ -94,7 +94,7 @@ func TestL1Replica_ActL1Sync(gt *testing.T) {
// sync new canonical chain B // sync new canonical chain B
syncFromB := replica1.ActL1Sync(canonL1(chainB)) syncFromB := replica1.ActL1Sync(canonL1(chainB))
for replica1.l1Chain.CurrentBlock().NumberU64()+1 < uint64(len(chainB)) { for replica1.l1Chain.CurrentBlock().Number.Uint64()+1 < uint64(len(chainB)) {
syncFromB(t) syncFromB(t)
} }
require.Equal(t, replica1.l1Chain.CurrentBlock().Hash(), chainB[len(chainB)-1].Hash(), "sync replica1 to head of chain B") require.Equal(t, replica1.l1Chain.CurrentBlock().Hash(), chainB[len(chainB)-1].Hash(), "sync replica1 to head of chain B")
...@@ -105,7 +105,7 @@ func TestL1Replica_ActL1Sync(gt *testing.T) { ...@@ -105,7 +105,7 @@ func TestL1Replica_ActL1Sync(gt *testing.T) {
_ = replica2.Close() _ = replica2.Close()
}) })
syncFromOther := replica2.ActL1Sync(replica1.CanonL1Chain()) syncFromOther := replica2.ActL1Sync(replica1.CanonL1Chain())
for replica2.l1Chain.CurrentBlock().NumberU64()+1 < uint64(len(chainB)) { for replica2.l1Chain.CurrentBlock().Number.Uint64()+1 < uint64(len(chainB)) {
syncFromOther(t) syncFromOther(t)
} }
require.Equal(t, replica2.l1Chain.CurrentBlock().Hash(), chainB[len(chainB)-1].Hash(), "sync replica2 to head of chain B") require.Equal(t, replica2.l1Chain.CurrentBlock().Hash(), chainB[len(chainB)-1].Hash(), "sync replica2 to head of chain B")
......
...@@ -48,7 +48,7 @@ func TestBatcher(gt *testing.T) { ...@@ -48,7 +48,7 @@ func TestBatcher(gt *testing.T) {
ChainID: sd.L2Cfg.Config.ChainID, ChainID: sd.L2Cfg.Config.ChainID,
Nonce: n, Nonce: n,
GasTipCap: big.NewInt(2 * params.GWei), GasTipCap: big.NewInt(2 * params.GWei),
GasFeeCap: new(big.Int).Add(miner.l1Chain.CurrentBlock().BaseFee(), big.NewInt(2*params.GWei)), GasFeeCap: new(big.Int).Add(miner.l1Chain.CurrentBlock().BaseFee, big.NewInt(2*params.GWei)),
Gas: params.TxGas, Gas: params.TxGas,
To: &dp.Addresses.Bob, To: &dp.Addresses.Bob,
Value: e2eutils.Ether(2), Value: e2eutils.Ether(2),
...@@ -73,7 +73,7 @@ func TestBatcher(gt *testing.T) { ...@@ -73,7 +73,7 @@ func TestBatcher(gt *testing.T) {
miner.ActL1IncludeTx(dp.Addresses.Batcher)(t) miner.ActL1IncludeTx(dp.Addresses.Batcher)(t)
miner.ActL1EndBlock(t) miner.ActL1EndBlock(t)
bl := miner.l1Chain.CurrentBlock() bl := miner.l1Chain.CurrentBlock()
log.Info("bl", "txs", len(bl.Transactions())) log.Info("bl", "txs", len(miner.l1Chain.GetBlockByHash(bl.Hash()).Transactions()))
// Now make enough L1 blocks that the verifier will have to derive a L2 block // Now make enough L1 blocks that the verifier will have to derive a L2 block
// It will also eagerly derive the block from the batcher // It will also eagerly derive the block from the batcher
...@@ -437,7 +437,7 @@ func TestBigL2Txs(gt *testing.T) { ...@@ -437,7 +437,7 @@ func TestBigL2Txs(gt *testing.T) {
} }
sequencer.ActL1HeadSignal(t) sequencer.ActL1HeadSignal(t)
sequencer.ActL2StartBlock(t) sequencer.ActL2StartBlock(t)
baseFee := engine.l2Chain.CurrentBlock().BaseFee() // this will go quite high, since so many consecutive blocks are filled at capacity. baseFee := engine.l2Chain.CurrentBlock().BaseFee // this will go quite high, since so many consecutive blocks are filled at capacity.
// fill the block with large L2 txs from alice // fill the block with large L2 txs from alice
for n := aliceNonce; ; n++ { for n := aliceNonce; ; n++ {
require.NoError(t, err) require.NoError(t, err)
......
...@@ -202,30 +202,30 @@ func (ea *L2EngineAPI) ForkchoiceUpdatedV1(ctx context.Context, state *eth.Forkc ...@@ -202,30 +202,30 @@ func (ea *L2EngineAPI) ForkchoiceUpdatedV1(ctx context.Context, state *eth.Forkc
// chain final and completely in PoS mode. // chain final and completely in PoS mode.
if state.FinalizedBlockHash != (common.Hash{}) { if state.FinalizedBlockHash != (common.Hash{}) {
// If the finalized block is not in our canonical tree, somethings wrong // If the finalized block is not in our canonical tree, somethings wrong
finalBlock := ea.l2Chain.GetBlockByHash(state.FinalizedBlockHash) finalHeader := ea.l2Chain.GetHeaderByHash(state.FinalizedBlockHash)
if finalBlock == nil { if finalHeader == nil {
ea.log.Warn("Final block not available in database", "hash", state.FinalizedBlockHash) ea.log.Warn("Final block not available in database", "hash", state.FinalizedBlockHash)
return STATUS_INVALID, engine.InvalidForkChoiceState.With(errors.New("final block not available in database")) return STATUS_INVALID, engine.InvalidForkChoiceState.With(errors.New("final block not available in database"))
} else if rawdb.ReadCanonicalHash(ea.l2Database, finalBlock.NumberU64()) != state.FinalizedBlockHash { } else if rawdb.ReadCanonicalHash(ea.l2Database, finalHeader.Number.Uint64()) != state.FinalizedBlockHash {
ea.log.Warn("Final block not in canonical chain", "number", block.NumberU64(), "hash", state.HeadBlockHash) ea.log.Warn("Final block not in canonical chain", "number", block.NumberU64(), "hash", state.HeadBlockHash)
return STATUS_INVALID, engine.InvalidForkChoiceState.With(errors.New("final block not in canonical chain")) return STATUS_INVALID, engine.InvalidForkChoiceState.With(errors.New("final block not in canonical chain"))
} }
// Set the finalized block // Set the finalized block
ea.l2Chain.SetFinalized(finalBlock) ea.l2Chain.SetFinalized(finalHeader)
} }
// Check if the safe block hash is in our canonical tree, if not somethings wrong // Check if the safe block hash is in our canonical tree, if not somethings wrong
if state.SafeBlockHash != (common.Hash{}) { if state.SafeBlockHash != (common.Hash{}) {
safeBlock := ea.l2Chain.GetBlockByHash(state.SafeBlockHash) safeHeader := ea.l2Chain.GetHeaderByHash(state.SafeBlockHash)
if safeBlock == nil { if safeHeader == nil {
ea.log.Warn("Safe block not available in database") ea.log.Warn("Safe block not available in database")
return STATUS_INVALID, engine.InvalidForkChoiceState.With(errors.New("safe block not available in database")) return STATUS_INVALID, engine.InvalidForkChoiceState.With(errors.New("safe block not available in database"))
} }
if rawdb.ReadCanonicalHash(ea.l2Database, safeBlock.NumberU64()) != state.SafeBlockHash { if rawdb.ReadCanonicalHash(ea.l2Database, safeHeader.Number.Uint64()) != state.SafeBlockHash {
ea.log.Warn("Safe block not in canonical chain") ea.log.Warn("Safe block not in canonical chain")
return STATUS_INVALID, engine.InvalidForkChoiceState.With(errors.New("safe block not in canonical chain")) return STATUS_INVALID, engine.InvalidForkChoiceState.With(errors.New("safe block not in canonical chain"))
} }
// Set the safe block // Set the safe block
ea.l2Chain.SetSafe(safeBlock) ea.l2Chain.SetSafe(safeHeader)
} }
// If payload generation was requested, create a new block to be potentially // If payload generation was requested, create a new block to be potentially
// sealed by the beacon client. The payload will be requested later, and we // sealed by the beacon client. The payload will be requested later, and we
......
...@@ -107,7 +107,7 @@ func TestL2EngineAPIBlockBuilding(gt *testing.T) { ...@@ -107,7 +107,7 @@ func TestL2EngineAPIBlockBuilding(gt *testing.T) {
ChainID: sd.L2Cfg.Config.ChainID, ChainID: sd.L2Cfg.Config.ChainID,
Nonce: 0, Nonce: 0,
GasTipCap: big.NewInt(2 * params.GWei), GasTipCap: big.NewInt(2 * params.GWei),
GasFeeCap: new(big.Int).Add(engine.l2Chain.CurrentBlock().BaseFee(), big.NewInt(2*params.GWei)), GasFeeCap: new(big.Int).Add(engine.l2Chain.CurrentBlock().BaseFee, big.NewInt(2*params.GWei)),
Gas: params.TxGas, Gas: params.TxGas,
To: &dp.Addresses.Bob, To: &dp.Addresses.Bob,
Value: e2eutils.Ether(2), Value: e2eutils.Ether(2),
...@@ -125,7 +125,7 @@ func TestL2EngineAPIBlockBuilding(gt *testing.T) { ...@@ -125,7 +125,7 @@ func TestL2EngineAPIBlockBuilding(gt *testing.T) {
SafeBlockHash: genesisBlock.Hash(), SafeBlockHash: genesisBlock.Hash(),
FinalizedBlockHash: genesisBlock.Hash(), FinalizedBlockHash: genesisBlock.Hash(),
}, &eth.PayloadAttributes{ }, &eth.PayloadAttributes{
Timestamp: eth.Uint64Quantity(parent.Time()) + 2, Timestamp: eth.Uint64Quantity(parent.Time) + 2,
PrevRandao: eth.Bytes32{}, PrevRandao: eth.Bytes32{},
SuggestedFeeRecipient: common.Address{'C'}, SuggestedFeeRecipient: common.Address{'C'},
Transactions: nil, Transactions: nil,
...@@ -161,12 +161,12 @@ func TestL2EngineAPIBlockBuilding(gt *testing.T) { ...@@ -161,12 +161,12 @@ func TestL2EngineAPIBlockBuilding(gt *testing.T) {
require.Equal(t, payload.BlockHash, engine.l2Chain.CurrentBlock().Hash(), "now payload is canonical") require.Equal(t, payload.BlockHash, engine.l2Chain.CurrentBlock().Hash(), "now payload is canonical")
} }
buildBlock(false) buildBlock(false)
require.Zero(t, engine.l2Chain.CurrentBlock().Transactions().Len(), "no tx included") require.Zero(t, engine.l2Chain.GetBlockByHash(engine.l2Chain.CurrentBlock().Hash()).Transactions().Len(), "no tx included")
buildBlock(true) buildBlock(true)
require.Equal(gt, 1, engine.l2Chain.CurrentBlock().Transactions().Len(), "tx from alice is included") require.Equal(gt, 1, engine.l2Chain.GetBlockByHash(engine.l2Chain.CurrentBlock().Hash()).Transactions().Len(), "tx from alice is included")
buildBlock(false) buildBlock(false)
require.Zero(t, engine.l2Chain.CurrentBlock().Transactions().Len(), "no tx included") require.Zero(t, engine.l2Chain.GetBlockByHash(engine.l2Chain.CurrentBlock().Hash()).Transactions().Len(), "no tx included")
require.Equal(t, uint64(3), engine.l2Chain.CurrentBlock().NumberU64(), "built 3 blocks") require.Equal(t, uint64(3), engine.l2Chain.CurrentBlock().Number.Uint64(), "built 3 blocks")
} }
func TestL2EngineAPIFail(gt *testing.T) { func TestL2EngineAPIFail(gt *testing.T) {
......
...@@ -55,7 +55,7 @@ func TestL2Sequencer_SequencerDrift(gt *testing.T) { ...@@ -55,7 +55,7 @@ func TestL2Sequencer_SequencerDrift(gt *testing.T) {
ChainID: sd.L2Cfg.Config.ChainID, ChainID: sd.L2Cfg.Config.ChainID,
Nonce: n, Nonce: n,
GasTipCap: big.NewInt(2 * params.GWei), GasTipCap: big.NewInt(2 * params.GWei),
GasFeeCap: new(big.Int).Add(miner.l1Chain.CurrentBlock().BaseFee(), big.NewInt(2*params.GWei)), GasFeeCap: new(big.Int).Add(miner.l1Chain.CurrentBlock().BaseFee, big.NewInt(2*params.GWei)),
Gas: params.TxGas, Gas: params.TxGas,
To: &dp.Addresses.Bob, To: &dp.Addresses.Bob,
Value: e2eutils.Ether(2), Value: e2eutils.Ether(2),
...@@ -76,7 +76,7 @@ func TestL2Sequencer_SequencerDrift(gt *testing.T) { ...@@ -76,7 +76,7 @@ func TestL2Sequencer_SequencerDrift(gt *testing.T) {
origin := miner.l1Chain.CurrentBlock() origin := miner.l1Chain.CurrentBlock()
// L2 makes blocks to catch up // L2 makes blocks to catch up
for sequencer.SyncStatus().UnsafeL2.Time+sd.RollupCfg.BlockTime < origin.Time() { for sequencer.SyncStatus().UnsafeL2.Time+sd.RollupCfg.BlockTime < origin.Time {
makeL2BlockWithAliceTx() makeL2BlockWithAliceTx()
require.Equal(t, uint64(0), sequencer.SyncStatus().UnsafeL2.L1Origin.Number, "no L1 origin change before time matches") require.Equal(t, uint64(0), sequencer.SyncStatus().UnsafeL2.L1Origin.Number, "no L1 origin change before time matches")
} }
...@@ -89,7 +89,7 @@ func TestL2Sequencer_SequencerDrift(gt *testing.T) { ...@@ -89,7 +89,7 @@ func TestL2Sequencer_SequencerDrift(gt *testing.T) {
sequencer.ActL1HeadSignal(t) sequencer.ActL1HeadSignal(t)
// Make blocks up till the sequencer drift is about to surpass, but keep the old L1 origin // Make blocks up till the sequencer drift is about to surpass, but keep the old L1 origin
for sequencer.SyncStatus().UnsafeL2.Time+sd.RollupCfg.BlockTime <= origin.Time()+sd.RollupCfg.MaxSequencerDrift { for sequencer.SyncStatus().UnsafeL2.Time+sd.RollupCfg.BlockTime <= origin.Time+sd.RollupCfg.MaxSequencerDrift {
sequencer.ActL2KeepL1Origin(t) sequencer.ActL2KeepL1Origin(t)
makeL2BlockWithAliceTx() makeL2BlockWithAliceTx()
require.Equal(t, uint64(1), sequencer.SyncStatus().UnsafeL2.L1Origin.Number, "expected to keep old L1 origin") require.Equal(t, uint64(1), sequencer.SyncStatus().UnsafeL2.L1Origin.Number, "expected to keep old L1 origin")
......
...@@ -41,20 +41,20 @@ func TestL2Verifier_SequenceWindow(gt *testing.T) { ...@@ -41,20 +41,20 @@ func TestL2Verifier_SequenceWindow(gt *testing.T) {
miner.ActL1SetFeeRecipient(common.Address{'A'}) miner.ActL1SetFeeRecipient(common.Address{'A'})
// Make two sequence windows worth of empty L1 blocks. After we pass the first sequence window, the L2 chain should get blocks // Make two sequence windows worth of empty L1 blocks. After we pass the first sequence window, the L2 chain should get blocks
for miner.l1Chain.CurrentBlock().NumberU64() < sd.RollupCfg.SeqWindowSize*2 { for miner.l1Chain.CurrentBlock().Number.Uint64() < sd.RollupCfg.SeqWindowSize*2 {
miner.ActL1StartBlock(10)(t) miner.ActL1StartBlock(10)(t)
miner.ActL1EndBlock(t) miner.ActL1EndBlock(t)
verifier.ActL2PipelineFull(t) verifier.ActL2PipelineFull(t)
l1Head := miner.l1Chain.CurrentBlock().NumberU64() l1Head := miner.l1Chain.CurrentBlock().Number.Uint64()
expectedL1Origin := uint64(0) expectedL1Origin := uint64(0)
// as soon as we complete the sequence window, we force-adopt the L1 origin // as soon as we complete the sequence window, we force-adopt the L1 origin
if l1Head >= sd.RollupCfg.SeqWindowSize { if l1Head >= sd.RollupCfg.SeqWindowSize {
expectedL1Origin = l1Head - sd.RollupCfg.SeqWindowSize expectedL1Origin = l1Head - sd.RollupCfg.SeqWindowSize
} }
require.Equal(t, expectedL1Origin, verifier.SyncStatus().SafeL2.L1Origin.Number, "L1 origin is forced in, given enough L1 blocks pass by") require.Equal(t, expectedL1Origin, verifier.SyncStatus().SafeL2.L1Origin.Number, "L1 origin is forced in, given enough L1 blocks pass by")
require.LessOrEqual(t, miner.l1Chain.GetBlockByNumber(expectedL1Origin).Time(), engine.l2Chain.CurrentBlock().Time(), "L2 time higher than L1 origin time") require.LessOrEqual(t, miner.l1Chain.GetBlockByNumber(expectedL1Origin).Time(), engine.l2Chain.CurrentBlock().Time, "L2 time higher than L1 origin time")
} }
tip2N := verifier.SyncStatus() tip2N := verifier.SyncStatus()
...@@ -75,7 +75,7 @@ func TestL2Verifier_SequenceWindow(gt *testing.T) { ...@@ -75,7 +75,7 @@ func TestL2Verifier_SequenceWindow(gt *testing.T) {
verifier.ActL2PipelineFull(t) verifier.ActL2PipelineFull(t)
require.Equal(t, tip2N.SafeL2, verifier.SyncStatus().SafeL2) require.Equal(t, tip2N.SafeL2, verifier.SyncStatus().SafeL2)
for miner.l1Chain.CurrentBlock().NumberU64() < sd.RollupCfg.SeqWindowSize*2 { for miner.l1Chain.CurrentBlock().Number.Uint64() < sd.RollupCfg.SeqWindowSize*2 {
miner.ActL1StartBlock(10)(t) miner.ActL1StartBlock(10)(t)
miner.ActL1EndBlock(t) miner.ActL1EndBlock(t)
} }
......
...@@ -80,7 +80,7 @@ func TestBatcherKeyRotation(gt *testing.T) { ...@@ -80,7 +80,7 @@ func TestBatcherKeyRotation(gt *testing.T) {
miner.ActL1StartBlock(12)(t) miner.ActL1StartBlock(12)(t)
miner.ActL1IncludeTx(dp.Addresses.SysCfgOwner)(t) miner.ActL1IncludeTx(dp.Addresses.SysCfgOwner)(t)
miner.ActL1EndBlock(t) miner.ActL1EndBlock(t)
cfgChangeL1BlockNum := miner.l1Chain.CurrentBlock().NumberU64() cfgChangeL1BlockNum := miner.l1Chain.CurrentBlock().Number.Uint64()
// sequence L2 blocks, and submit with new batcher // sequence L2 blocks, and submit with new batcher
sequencer.ActL1HeadSignal(t) sequencer.ActL1HeadSignal(t)
...@@ -200,7 +200,7 @@ func TestGPOParamsChange(gt *testing.T) { ...@@ -200,7 +200,7 @@ func TestGPOParamsChange(gt *testing.T) {
miner.ActEmptyBlock(t) miner.ActEmptyBlock(t)
sequencer.ActL1HeadSignal(t) sequencer.ActL1HeadSignal(t)
sequencer.ActBuildToL1Head(t) sequencer.ActBuildToL1Head(t)
basefee := miner.l1Chain.CurrentBlock().BaseFee() basefee := miner.l1Chain.CurrentBlock().BaseFee
// alice makes a L2 tx, sequencer includes it // alice makes a L2 tx, sequencer includes it
alice.ActResetTxOpts(t) alice.ActResetTxOpts(t)
...@@ -238,7 +238,7 @@ func TestGPOParamsChange(gt *testing.T) { ...@@ -238,7 +238,7 @@ func TestGPOParamsChange(gt *testing.T) {
miner.ActL1StartBlock(12)(t) miner.ActL1StartBlock(12)(t)
miner.ActL1IncludeTx(dp.Addresses.SysCfgOwner)(t) miner.ActL1IncludeTx(dp.Addresses.SysCfgOwner)(t)
miner.ActL1EndBlock(t) miner.ActL1EndBlock(t)
basefeeGPOUpdate := miner.l1Chain.CurrentBlock().BaseFee() basefeeGPOUpdate := miner.l1Chain.CurrentBlock().BaseFee
// build empty L2 chain, up to but excluding the L2 block with the L1 origin that processes the GPO change // build empty L2 chain, up to but excluding the L2 block with the L1 origin that processes the GPO change
sequencer.ActL1HeadSignal(t) sequencer.ActL1HeadSignal(t)
...@@ -274,7 +274,7 @@ func TestGPOParamsChange(gt *testing.T) { ...@@ -274,7 +274,7 @@ func TestGPOParamsChange(gt *testing.T) {
// build more L2 blocks, with new L1 origin // build more L2 blocks, with new L1 origin
miner.ActEmptyBlock(t) miner.ActEmptyBlock(t)
basefee = miner.l1Chain.CurrentBlock().BaseFee() basefee = miner.l1Chain.CurrentBlock().BaseFee
sequencer.ActL1HeadSignal(t) sequencer.ActL1HeadSignal(t)
sequencer.ActBuildToL1Head(t) sequencer.ActBuildToL1Head(t)
// and Alice makes a tx again // and Alice makes a tx again
...@@ -313,7 +313,7 @@ func TestGasLimitChange(gt *testing.T) { ...@@ -313,7 +313,7 @@ func TestGasLimitChange(gt *testing.T) {
sequencer.ActL1HeadSignal(t) sequencer.ActL1HeadSignal(t)
sequencer.ActBuildToL1Head(t) sequencer.ActBuildToL1Head(t)
oldGasLimit := seqEngine.l2Chain.CurrentBlock().GasLimit() oldGasLimit := seqEngine.l2Chain.CurrentBlock().GasLimit
require.Equal(t, oldGasLimit, uint64(dp.DeployConfig.L2GenesisBlockGasLimit)) require.Equal(t, oldGasLimit, uint64(dp.DeployConfig.L2GenesisBlockGasLimit))
// change gas limit on L1 to triple what it was // change gas limit on L1 to triple what it was
...@@ -335,12 +335,12 @@ func TestGasLimitChange(gt *testing.T) { ...@@ -335,12 +335,12 @@ func TestGasLimitChange(gt *testing.T) {
sequencer.ActL1HeadSignal(t) sequencer.ActL1HeadSignal(t)
sequencer.ActBuildToL1HeadExcl(t) sequencer.ActBuildToL1HeadExcl(t)
require.Equal(t, oldGasLimit, seqEngine.l2Chain.CurrentBlock().GasLimit()) require.Equal(t, oldGasLimit, seqEngine.l2Chain.CurrentBlock().GasLimit)
require.Equal(t, uint64(1), sequencer.SyncStatus().UnsafeL2.L1Origin.Number) require.Equal(t, uint64(1), sequencer.SyncStatus().UnsafeL2.L1Origin.Number)
// now include the L1 block with the gaslimit change, and see if it changes as expected // now include the L1 block with the gaslimit change, and see if it changes as expected
sequencer.ActBuildToL1Head(t) sequencer.ActBuildToL1Head(t)
require.Equal(t, oldGasLimit*3, seqEngine.l2Chain.CurrentBlock().GasLimit()) require.Equal(t, oldGasLimit*3, seqEngine.l2Chain.CurrentBlock().GasLimit)
require.Equal(t, uint64(2), sequencer.SyncStatus().UnsafeL2.L1Origin.Number) require.Equal(t, uint64(2), sequencer.SyncStatus().UnsafeL2.L1Origin.Number)
// now submit all this to L1, and see if a verifier can sync and reproduce it // now submit all this to L1, and see if a verifier can sync and reproduce it
......
...@@ -132,7 +132,7 @@ func runCrossLayerUserTest(gt *testing.T, test regolithScheduledTest) { ...@@ -132,7 +132,7 @@ func runCrossLayerUserTest(gt *testing.T, test regolithScheduledTest) {
seq.ActL1HeadSignal(t) seq.ActL1HeadSignal(t)
// sync sequencer build enough blocks to adopt latest L1 origin // sync sequencer build enough blocks to adopt latest L1 origin
for seq.SyncStatus().UnsafeL2.L1Origin.Number < miner.l1Chain.CurrentBlock().NumberU64() { for seq.SyncStatus().UnsafeL2.L1Origin.Number < miner.l1Chain.CurrentBlock().Number.Uint64() {
seq.ActL2StartBlock(t) seq.ActL2StartBlock(t)
seq.ActL2EndBlock(t) seq.ActL2EndBlock(t)
} }
......
...@@ -169,7 +169,7 @@ func Setup(t require.TestingT, deployParams *DeployParams, alloc *AllocParams) * ...@@ -169,7 +169,7 @@ func Setup(t require.TestingT, deployParams *DeployParams, alloc *AllocParams) *
if alloc.PrefundTestUsers { if alloc.PrefundTestUsers {
for _, addr := range deployParams.Addresses.All() { for _, addr := range deployParams.Addresses.All() {
l1Genesis.Alloc[addr] = core.GenesisAccount{ l1Genesis.Alloc[addr] = core.GenesisAccount{
Balance: Ether(1e6), Balance: Ether(1e12),
} }
} }
} }
...@@ -184,7 +184,7 @@ func Setup(t require.TestingT, deployParams *DeployParams, alloc *AllocParams) * ...@@ -184,7 +184,7 @@ func Setup(t require.TestingT, deployParams *DeployParams, alloc *AllocParams) *
if alloc.PrefundTestUsers { if alloc.PrefundTestUsers {
for _, addr := range deployParams.Addresses.All() { for _, addr := range deployParams.Addresses.All() {
l2Genesis.Alloc[addr] = core.GenesisAccount{ l2Genesis.Alloc[addr] = core.GenesisAccount{
Balance: Ether(1e6), Balance: Ether(1e12),
} }
} }
} }
......
...@@ -27,10 +27,10 @@ func TestSetup(t *testing.T) { ...@@ -27,10 +27,10 @@ func TestSetup(t *testing.T) {
alloc := &AllocParams{PrefundTestUsers: true} alloc := &AllocParams{PrefundTestUsers: true}
sd := Setup(t, dp, alloc) sd := Setup(t, dp, alloc)
require.Contains(t, sd.L1Cfg.Alloc, dp.Addresses.Alice) require.Contains(t, sd.L1Cfg.Alloc, dp.Addresses.Alice)
require.Equal(t, sd.L1Cfg.Alloc[dp.Addresses.Alice].Balance, Ether(1e6)) require.Equal(t, sd.L1Cfg.Alloc[dp.Addresses.Alice].Balance, Ether(1e12))
require.Contains(t, sd.L2Cfg.Alloc, dp.Addresses.Alice) require.Contains(t, sd.L2Cfg.Alloc, dp.Addresses.Alice)
require.Equal(t, sd.L2Cfg.Alloc[dp.Addresses.Alice].Balance, Ether(1e6)) require.Equal(t, sd.L2Cfg.Alloc[dp.Addresses.Alice].Balance, Ether(1e12))
require.Contains(t, sd.L1Cfg.Alloc, predeploys.DevOptimismPortalAddr) require.Contains(t, sd.L1Cfg.Alloc, predeploys.DevOptimismPortalAddr)
require.Contains(t, sd.L2Cfg.Alloc, predeploys.L1BlockAddr) require.Contains(t, sd.L2Cfg.Alloc, predeploys.L1BlockAddr)
......
...@@ -160,11 +160,11 @@ func (f *fakeSafeFinalizedL1) Start() error { ...@@ -160,11 +160,11 @@ func (f *fakeSafeFinalizedL1) Start() error {
case head := <-headChanges: case head := <-headChanges:
num := head.Block.NumberU64() num := head.Block.NumberU64()
if num > f.finalizedDistance { if num > f.finalizedDistance {
toFinalize := f.eth.BlockChain().GetBlockByNumber(num - f.finalizedDistance) toFinalize := f.eth.BlockChain().GetHeaderByNumber(num - f.finalizedDistance)
f.eth.BlockChain().SetFinalized(toFinalize) f.eth.BlockChain().SetFinalized(toFinalize)
} }
if num > f.safeDistance { if num > f.safeDistance {
toSafe := f.eth.BlockChain().GetBlockByNumber(num - f.safeDistance) toSafe := f.eth.BlockChain().GetHeaderByNumber(num - f.safeDistance)
f.eth.BlockChain().SetSafe(toSafe) f.eth.BlockChain().SetSafe(toSafe)
} }
case <-quit: case <-quit:
......
FROM golang:1.18.0-alpine3.15 as builder FROM golang:1.19.0-alpine3.15 as builder
# build from root of repo # build from root of repo
COPY ./op-exporter /app COPY ./op-exporter /app
......
FROM --platform=$BUILDPLATFORM golang:1.18.0-alpine3.15 as builder FROM --platform=$BUILDPLATFORM golang:1.19.0-alpine3.15 as builder
ARG VERSION=v0.0.0 ARG VERSION=v0.0.0
......
FROM --platform=$BUILDPLATFORM golang:1.18.0-alpine3.15 as builder FROM --platform=$BUILDPLATFORM golang:1.19.0-alpine3.15 as builder
ARG VERSION=v0.0.0 ARG VERSION=v0.0.0
......
FROM golang:1.18.0-alpine3.15 as builder FROM golang:1.19.0-alpine3.15 as builder
RUN apk add --no-cache make gcc musl-dev linux-headers RUN apk add --no-cache make gcc musl-dev linux-headers
......
...@@ -10,6 +10,7 @@ import ( ...@@ -10,6 +10,7 @@ import (
"path/filepath" "path/filepath"
"strings" "strings"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus/beacon" "github.com/ethereum/go-ethereum/consensus/beacon"
"github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/consensus/ethash"
...@@ -80,11 +81,11 @@ type HeadFn func(headState *state.StateDB) error ...@@ -80,11 +81,11 @@ type HeadFn func(headState *state.StateDB) error
// and updates the blockchain headers indexes to reflect the new state-root, so geth will believe the cheat // and updates the blockchain headers indexes to reflect the new state-root, so geth will believe the cheat
// (unless it ever re-applies the block). // (unless it ever re-applies the block).
func (ch *Cheater) RunAndClose(fn HeadFn) error { func (ch *Cheater) RunAndClose(fn HeadFn) error {
preBlock := ch.Blockchain.CurrentBlock() preHeader := ch.Blockchain.CurrentBlock()
if a, b := preBlock.NumberU64(), ch.Blockchain.Genesis().NumberU64(); a <= b { if a, b := preHeader.Number.Uint64(), ch.Blockchain.Genesis().NumberU64(); a <= b {
return fmt.Errorf("cheating at genesis (head block %d <= genesis block %d) is not supported", a, b) return fmt.Errorf("cheating at genesis (head block %d <= genesis block %d) is not supported", a, b)
} }
state, err := ch.Blockchain.StateAt(preBlock.Root()) state, err := ch.Blockchain.StateAt(preHeader.Root)
if err != nil { if err != nil {
_ = ch.Close() _ = ch.Close()
return fmt.Errorf("failed to look up head state: %w", err) return fmt.Errorf("failed to look up head state: %w", err)
...@@ -103,7 +104,7 @@ func (ch *Cheater) RunAndClose(fn HeadFn) error { ...@@ -103,7 +104,7 @@ func (ch *Cheater) RunAndClose(fn HeadFn) error {
_ = ch.Close() _ = ch.Close()
return fmt.Errorf("failed to commit state change: %w", err) return fmt.Errorf("failed to commit state change: %w", err)
} }
header := preBlock.Header() header := preHeader // copy the header
header.Root = stateRoot header.Root = stateRoot
blockHash := header.Hash() blockHash := header.Hash()
...@@ -115,14 +116,15 @@ func (ch *Cheater) RunAndClose(fn HeadFn) error { ...@@ -115,14 +116,15 @@ func (ch *Cheater) RunAndClose(fn HeadFn) error {
// based on core.BlockChain.writeHeadBlock: // based on core.BlockChain.writeHeadBlock:
// Add the block to the canonical chain number scheme and mark as the head // Add the block to the canonical chain number scheme and mark as the head
batch := ch.DB.NewBatch() batch := ch.DB.NewBatch()
if ch.Blockchain.CurrentFinalizedBlock().Hash() == preBlock.Hash() { preID := eth.BlockID{Hash: preHeader.Hash(), Number: preHeader.Number.Uint64()}
if ch.Blockchain.CurrentFinalBlock().Hash() == preID.Hash {
rawdb.WriteFinalizedBlockHash(batch, blockHash) rawdb.WriteFinalizedBlockHash(batch, blockHash)
} }
rawdb.DeleteHeaderNumber(batch, preBlock.Hash()) rawdb.DeleteHeaderNumber(batch, preHeader.Hash())
rawdb.WriteHeadHeaderHash(batch, blockHash) rawdb.WriteHeadHeaderHash(batch, blockHash)
rawdb.WriteHeadFastBlockHash(batch, blockHash) rawdb.WriteHeadFastBlockHash(batch, blockHash)
rawdb.WriteCanonicalHash(batch, blockHash, preBlock.NumberU64()) rawdb.WriteCanonicalHash(batch, blockHash, preID.Number)
rawdb.WriteHeaderNumber(batch, blockHash, preBlock.NumberU64()) rawdb.WriteHeaderNumber(batch, blockHash, preID.Number)
rawdb.WriteHeader(batch, header) rawdb.WriteHeader(batch, header)
// not keyed by blockhash, and we didn't remove any txs, so we just leave this one as-is. // not keyed by blockhash, and we didn't remove any txs, so we just leave this one as-is.
// rawdb.WriteTxLookupEntriesByBlock(batch, block) // rawdb.WriteTxLookupEntriesByBlock(batch, block)
...@@ -131,17 +133,17 @@ func (ch *Cheater) RunAndClose(fn HeadFn) error { ...@@ -131,17 +133,17 @@ func (ch *Cheater) RunAndClose(fn HeadFn) error {
// Geth stores the TD for each block separately from the block itself. We must update this // Geth stores the TD for each block separately from the block itself. We must update this
// manually, otherwise Geth thinks we haven't reached TTD yet and tries to build a block // manually, otherwise Geth thinks we haven't reached TTD yet and tries to build a block
// using Clique consensus, which causes a panic. // using Clique consensus, which causes a panic.
rawdb.WriteTd(batch, blockHash, preBlock.NumberU64(), ch.Blockchain.GetTd(preBlock.Hash(), preBlock.NumberU64())) rawdb.WriteTd(batch, blockHash, preID.Number, ch.Blockchain.GetTd(preID.Hash, preID.Number))
// Need to copy over receipts since they are keyed by block hash. // Need to copy over receipts since they are keyed by block hash.
receipts := rawdb.ReadReceipts(ch.DB, preBlock.Hash(), preBlock.NumberU64(), ch.Blockchain.Config()) receipts := rawdb.ReadReceipts(ch.DB, preID.Hash, preID.Number, ch.Blockchain.Config())
rawdb.WriteReceipts(batch, blockHash, preBlock.NumberU64(), receipts) rawdb.WriteReceipts(batch, blockHash, preID.Number, receipts)
// Geth maintains an internal mapping between block bodies and their hashes. None of the database // Geth maintains an internal mapping between block bodies and their hashes. None of the database
// accessors above update this mapping, so we need to do it manually. // accessors above update this mapping, so we need to do it manually.
oldKey := blockBodyKey(preBlock.NumberU64(), preBlock.Hash()) oldKey := blockBodyKey(preID.Number, preID.Hash)
oldBody := rawdb.ReadBodyRLP(ch.DB, preBlock.Hash(), preBlock.NumberU64()) oldBody := rawdb.ReadBodyRLP(ch.DB, preID.Hash, preID.Number)
newKey := blockBodyKey(preBlock.NumberU64(), blockHash) newKey := blockBodyKey(preID.Number, blockHash)
if err := batch.Delete(oldKey); err != nil { if err := batch.Delete(oldKey); err != nil {
return fmt.Errorf("error deleting old block body key") return fmt.Errorf("error deleting old block body key")
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment