Commit fcd6c7f3 authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge branch 'develop' into jg/ci

parents c58379bb 18747f11
---
'@eth-optimism/data-transport-layer': patch
---
Add better logging to DTL about shutoff block
---
'@eth-optimism/contracts-bedrock': patch
---
Makes the Proxy contract inheritable by making functions (public virtual).
...@@ -31,12 +31,13 @@ ...@@ -31,12 +31,13 @@
/op-proposer @ethereum-optimism/go-reviewers /op-proposer @ethereum-optimism/go-reviewers
/op-program @ethereum-optimism/go-reviewers /op-program @ethereum-optimism/go-reviewers
/op-service @ethereum-optimism/go-reviewers /op-service @ethereum-optimism/go-reviewers
/ops-bedrock @ethereum-optimism/go-reviewers
# Ops # Ops
/.circleci @ethereum-optimism/infra-reviewers /.circleci @ethereum-optimism/infra-reviewers
/.github @ethereum-optimism/infra-reviewers /.github @ethereum-optimism/infra-reviewers
/ops @ethereum-optimism/infra-reviewers /ops @ethereum-optimism/infra-reviewers
/ops-bedrock @ethereum-optimism/infra-reviewers
/op-signer @ethereum-optimism/infra-reviewers /op-signer @ethereum-optimism/infra-reviewers
# Misc # Misc
......
...@@ -29,6 +29,7 @@ pull_request_rules: ...@@ -29,6 +29,7 @@ pull_request_rules:
queue: queue:
name: default name: default
method: merge method: merge
merge_bot_account: OptimismBot
- name: Add merge train label - name: Add merge train label
conditions: conditions:
- "queue-position >= 0" - "queue-position >= 0"
......
[submodule "tests"] [submodule "tests"]
path = l2geth/tests/testdata path = l2geth/tests/testdata
url = https://github.com/ethereum/tests url = https://github.com/ethereum/tests
[submodule "packages/contracts-periphery/lib/multicall"]
path = packages/contracts-periphery/lib/multicall
url = https://github.com/mds1/multicall
[submodule "lib/multicall"]
branch = v3.1.0
...@@ -24,7 +24,7 @@ If you want to build Optimism, check out the [Protocol Specs](./specs/). ...@@ -24,7 +24,7 @@ If you want to build Optimism, check out the [Protocol Specs](./specs/).
## Community ## Community
General discussion happens most frequently on the [Optimism discord](https://discord-gateway.optimism.io). General discussion happens most frequently on the [Optimism discord](https://discord.gg/optimism).
Governance discussion can also be found on the [Optimism Governance Forum](https://gov.optimism.io/). Governance discussion can also be found on the [Optimism Governance Forum](https://gov.optimism.io/).
## Contributing ## Contributing
......
# The OP Stack Docs # The OP Stack Docs
[![Discord](https://img.shields.io/discord/667044843901681675.svg?color=768AD4&label=discord&logo=https%3A%2F%2Fdiscordapp.com%2Fassets%2F8c9701b98ad4372b58f13fd9f65f966e.svg)](https://discord-gateway.optimism.io) [![Discord](https://img.shields.io/discord/667044843901681675.svg?color=768AD4&label=discord&logo=https%3A%2F%2Fdiscordapp.com%2Fassets%2F8c9701b98ad4372b58f13fd9f65f966e.svg)](https://discord.gg/optimism)
[![Twitter Follow](https://img.shields.io/twitter/follow/optimismPBC.svg?label=optimismPBC&style=social)](https://twitter.com/optimismPBC) [![Twitter Follow](https://img.shields.io/twitter/follow/optimismPBC.svg?label=optimismPBC&style=social)](https://twitter.com/optimismPBC)
The OP Stack is an open, collectively maintained development stack for blockchain ecosystems. The OP Stack is an open, collectively maintained development stack for blockchain ecosystems.
......
...@@ -57,7 +57,7 @@ export default Vue.extend({ ...@@ -57,7 +57,7 @@ export default Vue.extend({
"Support" "Support"
]), ]),
h("div", { class: "anchor-support-links" }, [ h("div", { class: "anchor-support-links" }, [
h("a", { attrs: { href: "https://discord.optimism.io", target: "_blank" } }, [ h("a", { attrs: { href: "https://discord.gg/optimism", target: "_blank" } }, [
h("div", [ h("div", [
h("i", { attrs: { class: "fab fa-discord" } }), h("i", { attrs: { class: "fab fa-discord" } }),
" Discord community " " Discord community "
......
...@@ -30,4 +30,4 @@ If you’re looking for other ways to get involved, here are a few options: ...@@ -30,4 +30,4 @@ If you’re looking for other ways to get involved, here are a few options:
- Grab an idea from the [project ideas list](https://github.com/ethereum-optimism/optimism-project-ideas) to and building - Grab an idea from the [project ideas list](https://github.com/ethereum-optimism/optimism-project-ideas) to and building
- Suggest a new idea for the [project ideas list](https://github.com/ethereum-optimism/optimism-project-ideas) - Suggest a new idea for the [project ideas list](https://github.com/ethereum-optimism/optimism-project-ideas)
- Improve the [Optimism Community Hub](https://community.optimism.io/) [documentation](https://github.com/ethereum-optimism/community-hub) or [tutorials](https://github.com/ethereum-optimism/optimism-tutorial) - Improve the [Optimism Community Hub](https://community.optimism.io/) [documentation](https://github.com/ethereum-optimism/community-hub) or [tutorials](https://github.com/ethereum-optimism/optimism-tutorial)
- Become an Optimism Ambassador, Support Nerd, and more in the [Optimism Discord](https://discord-gateway.optimism.io/) - Become an Optimism Ambassador, Support Nerd, and more in the [Optimism Discord](https://discord.gg/optimism)
...@@ -6,7 +6,7 @@ require ( ...@@ -6,7 +6,7 @@ require (
github.com/btcsuite/btcd v0.23.3 github.com/btcsuite/btcd v0.23.3
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0
github.com/docker/docker v20.10.21+incompatible github.com/docker/docker v20.10.24+incompatible
github.com/docker/go-connections v0.4.0 github.com/docker/go-connections v0.4.0
github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3 github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3
github.com/ethereum/go-ethereum v1.11.5 github.com/ethereum/go-ethereum v1.11.5
......
...@@ -158,8 +158,8 @@ github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUn ...@@ -158,8 +158,8 @@ github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUn
github.com/dlclark/regexp2 v1.7.0 h1:7lJfhqlPssTb1WQx4yvTHN0uElPEv52sbaECrAQxjAo= github.com/dlclark/regexp2 v1.7.0 h1:7lJfhqlPssTb1WQx4yvTHN0uElPEv52sbaECrAQxjAo=
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v20.10.21+incompatible h1:UTLdBmHk3bEY+w8qeO5KttOhy6OmXWsl/FEet9Uswog= github.com/docker/docker v20.10.24+incompatible h1:Ugvxm7a8+Gz6vqQYQQ2W7GYq5EUPaAiuPgIfVyI3dYE=
github.com/docker/docker v20.10.21+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.24+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
......
...@@ -51,9 +51,10 @@ func Main(version string, cliCtx *cli.Context) error { ...@@ -51,9 +51,10 @@ func Main(version string, cliCtx *cli.Context) error {
return err return err
} }
} }
defer batchSubmitter.StopIfRunning(context.Background())
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() // Stop pprof and metrics only after main loop returns
defer batchSubmitter.StopIfRunning(context.Background())
pprofConfig := cfg.PprofConfig pprofConfig := cfg.PprofConfig
if pprofConfig.Enabled { if pprofConfig.Enabled {
...@@ -106,7 +107,8 @@ func Main(version string, cliCtx *cli.Context) error { ...@@ -106,7 +107,8 @@ func Main(version string, cliCtx *cli.Context) error {
syscall.SIGQUIT, syscall.SIGQUIT,
}...) }...)
<-interruptChannel <-interruptChannel
cancel() if err := server.Stop(); err != nil {
_ = server.Stop() l.Error("Error shutting down http server: %w", err)
}
return nil return nil
} }
...@@ -9,21 +9,25 @@ import ( ...@@ -9,21 +9,25 @@ import (
"sync" "sync"
"time" "time"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/mattn/go-isatty"
"github.com/urfave/cli/v2"
"github.com/ethereum-optimism/optimism/op-chain-ops/util"
"github.com/ethereum-optimism/optimism/op-bindings/bindings" "github.com/ethereum-optimism/optimism/op-bindings/bindings"
legacy_bindings "github.com/ethereum-optimism/optimism/op-bindings/legacy-bindings" legacy_bindings "github.com/ethereum-optimism/optimism/op-bindings/legacy-bindings"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum-optimism/optimism/op-chain-ops/util"
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
"github.com/urfave/cli/v2"
) )
func main() { func main() {
log.Root().SetHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(isatty.IsTerminal(os.Stderr.Fd()))))
app := cli.NewApp() app := cli.NewApp()
app.Name = "rollover" app.Name = "rollover"
app.Usage = "Commands for assisting in the rollover of the system" app.Usage = "Commands for assisting in the rollover of the system"
...@@ -149,6 +153,9 @@ func main() { ...@@ -149,6 +153,9 @@ func main() {
return err return err
} }
log.Info("Remaining deposits that must be submitted", "count", finalPending) log.Info("Remaining deposits that must be submitted", "count", finalPending)
if finalPending.Cmp(common.Big0) == 0 {
log.Info("All deposits have been batch submitted")
}
return nil return nil
}, },
}, },
...@@ -183,11 +190,11 @@ func main() { ...@@ -183,11 +190,11 @@ func main() {
log.Info("Waiting for CanonicalTransactionChain") log.Info("Waiting for CanonicalTransactionChain")
wg.Add(1) wg.Add(1)
go waitForTotalElements(&wg, ctc, clients.L2Client) go waitForTotalElements(&wg, ctc, clients.L2Client, "CanonicalTransactionChain")
log.Info("Waiting for StateCommitmentChain") log.Info("Waiting for StateCommitmentChain")
wg.Add(1) wg.Add(1)
go waitForTotalElements(&wg, scc, clients.L2Client) go waitForTotalElements(&wg, scc, clients.L2Client, "StateCommitmentChain")
wg.Wait() wg.Wait()
log.Info("All batches have been submitted") log.Info("All batches have been submitted")
...@@ -210,7 +217,7 @@ type RollupContract interface { ...@@ -210,7 +217,7 @@ type RollupContract interface {
} }
// waitForTotalElements will poll to see // waitForTotalElements will poll to see
func waitForTotalElements(wg *sync.WaitGroup, contract RollupContract, client *ethclient.Client) { func waitForTotalElements(wg *sync.WaitGroup, contract RollupContract, client *ethclient.Client, name string) {
defer wg.Done() defer wg.Done()
for { for {
...@@ -228,9 +235,16 @@ func waitForTotalElements(wg *sync.WaitGroup, contract RollupContract, client *e ...@@ -228,9 +235,16 @@ func waitForTotalElements(wg *sync.WaitGroup, contract RollupContract, client *e
} }
if totalElements.Uint64() == bn { if totalElements.Uint64() == bn {
log.Info("Total elements matches block number", "name", name, "count", bn)
return return
} }
log.Info("Waiting for elements to be submitted", "count", totalElements.Uint64()-bn, "height", bn, "total-elements", totalElements.Uint64()) log.Info(
"Waiting for elements to be submitted",
"name", name,
"count", totalElements.Uint64()-bn,
"height", bn,
"total-elements", totalElements.Uint64(),
)
time.Sleep(3 * time.Second) time.Sleep(3 * time.Second)
} }
......
...@@ -39,21 +39,21 @@ func NewClients(ctx *cli.Context) (*Clients, error) { ...@@ -39,21 +39,21 @@ func NewClients(ctx *cli.Context) (*Clients, error) {
l1RpcURL := ctx.String("l1-rpc-url") l1RpcURL := ctx.String("l1-rpc-url")
l1Client, err := ethclient.Dial(l1RpcURL) l1Client, err := ethclient.Dial(l1RpcURL)
if err != nil { if err != nil {
return nil, err return nil, fmt.Errorf("cannot dial L1: %w", err)
} }
l1ChainID, err := l1Client.ChainID(context.Background()) l1ChainID, err := l1Client.ChainID(context.Background())
if err != nil { if err != nil {
return nil, err return nil, fmt.Errorf("cannot fetch L1 chainid: %w", err)
} }
l2RpcURL := ctx.String("l2-rpc-url") l2RpcURL := ctx.String("l2-rpc-url")
l2Client, err := ethclient.Dial(l2RpcURL) l2Client, err := ethclient.Dial(l2RpcURL)
if err != nil { if err != nil {
return nil, err return nil, fmt.Errorf("cannot dial L2: %w", err)
} }
l2ChainID, err := l2Client.ChainID(context.Background()) l2ChainID, err := l2Client.ChainID(context.Background())
if err != nil { if err != nil {
return nil, err return nil, fmt.Errorf("cannot fetch L2 chainid: %w", err)
} }
l1RpcClient, err := rpc.DialContext(context.Background(), l1RpcURL) l1RpcClient, err := rpc.DialContext(context.Background(), l1RpcURL)
......
...@@ -447,7 +447,7 @@ func TestBigL2Txs(gt *testing.T) { ...@@ -447,7 +447,7 @@ func TestBigL2Txs(gt *testing.T) {
require.NoError(t, err) require.NoError(t, err)
gas, err := core.IntrinsicGas(data, nil, false, true, true, false) gas, err := core.IntrinsicGas(data, nil, false, true, true, false)
require.NoError(t, err) require.NoError(t, err)
if gas > engine.l2GasPool.Gas() { if gas > engine.engineApi.RemainingBlockGas() {
break break
} }
tx := types.MustSignNewTx(dp.Secrets.Alice, signer, &types.DynamicFeeTx{ tx := types.MustSignNewTx(dp.Secrets.Alice, signer, &types.DynamicFeeTx{
......
...@@ -3,12 +3,12 @@ package actions ...@@ -3,12 +3,12 @@ package actions
import ( import (
"errors" "errors"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils"
"github.com/ethereum-optimism/optimism/op-program/l2/engineapi"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/ethereum/go-ethereum/beacon/engine"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
geth "github.com/ethereum/go-ethereum/eth" geth "github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/eth/ethconfig"
...@@ -38,22 +38,10 @@ type L2Engine struct { ...@@ -38,22 +38,10 @@ type L2Engine struct {
rollupGenesis *rollup.Genesis rollupGenesis *rollup.Genesis
// L2 evm / chain // L2 evm / chain
l2Chain *core.BlockChain l2Chain *core.BlockChain
l2Database ethdb.Database l2Signer types.Signer
l2Cfg *core.Genesis
l2Signer types.Signer engineApi *engineapi.L2EngineAPI
// L2 block building data
l2BuildingHeader *types.Header // block header that we add txs to for block building
l2BuildingState *state.StateDB // state used for block building
l2GasPool *core.GasPool // track gas used of ongoing building
pendingIndices map[common.Address]uint64 // per account, how many txs from the pool were already included in the block, since the pool is lagging behind block mining.
l2Transactions []*types.Transaction // collects txs that were successfully included into current block build
l2Receipts []*types.Receipt // collect receipts of ongoing building
l2ForceEmpty bool // when no additional txs may be processed (i.e. when sequencer drift runs out)
l2TxFailed []*types.Transaction // log of failed transactions which could not be included
payloadID engine.PayloadID // ID of payload that is currently being built
failL2RPC error // mock error failL2RPC error // mock error
} }
...@@ -61,6 +49,38 @@ type L2Engine struct { ...@@ -61,6 +49,38 @@ type L2Engine struct {
type EngineOption func(ethCfg *ethconfig.Config, nodeCfg *node.Config) error type EngineOption func(ethCfg *ethconfig.Config, nodeCfg *node.Config) error
func NewL2Engine(t Testing, log log.Logger, genesis *core.Genesis, rollupGenesisL1 eth.BlockID, jwtPath string, options ...EngineOption) *L2Engine { func NewL2Engine(t Testing, log log.Logger, genesis *core.Genesis, rollupGenesisL1 eth.BlockID, jwtPath string, options ...EngineOption) *L2Engine {
n, ethBackend, apiBackend := newBackend(t, genesis, jwtPath, options)
engineApi := engineapi.NewL2EngineAPI(log, apiBackend)
chain := ethBackend.BlockChain()
genesisBlock := chain.Genesis()
eng := &L2Engine{
log: log,
node: n,
eth: ethBackend,
rollupGenesis: &rollup.Genesis{
L1: rollupGenesisL1,
L2: eth.BlockID{Hash: genesisBlock.Hash(), Number: genesisBlock.NumberU64()},
L2Time: genesis.Timestamp,
},
l2Chain: chain,
l2Signer: types.LatestSigner(genesis.Config),
engineApi: engineApi,
}
// register the custom engine API, so we can serve engine requests while having more control
// over sequencing of individual txs.
n.RegisterAPIs([]rpc.API{
{
Namespace: "engine",
Service: eng.engineApi,
Authenticated: true,
},
})
require.NoError(t, n.Start(), "failed to start L2 op-geth node")
return eng
}
func newBackend(t e2eutils.TestingBase, genesis *core.Genesis, jwtPath string, options []EngineOption) (*node.Node, *geth.Ethereum, *engineApiBackend) {
ethCfg := &ethconfig.Config{ ethCfg := &ethconfig.Config{
NetworkId: genesis.Config.ChainID.Uint64(), NetworkId: genesis.Config.ChainID.Uint64(),
Genesis: genesis, Genesis: genesis,
...@@ -89,33 +109,26 @@ func NewL2Engine(t Testing, log log.Logger, genesis *core.Genesis, rollupGenesis ...@@ -89,33 +109,26 @@ func NewL2Engine(t Testing, log log.Logger, genesis *core.Genesis, rollupGenesis
chain := backend.BlockChain() chain := backend.BlockChain()
db := backend.ChainDb() db := backend.ChainDb()
genesisBlock := chain.Genesis() apiBackend := &engineApiBackend{
eng := &L2Engine{ BlockChain: chain,
log: log, db: db,
node: n, genesis: genesis,
eth: backend,
rollupGenesis: &rollup.Genesis{
L1: rollupGenesisL1,
L2: eth.BlockID{Hash: genesisBlock.Hash(), Number: genesisBlock.NumberU64()},
L2Time: genesis.Timestamp,
},
l2Chain: chain,
l2Database: db,
l2Cfg: genesis,
l2Signer: types.LatestSigner(genesis.Config),
} }
// register the custom engine API, so we can serve engine requests while having more control return n, backend, apiBackend
// over sequencing of individual txs. }
n.RegisterAPIs([]rpc.API{
{
Namespace: "engine",
Service: (*L2EngineAPI)(eng),
Authenticated: true,
},
})
require.NoError(t, n.Start(), "failed to start L2 op-geth node")
return eng type engineApiBackend struct {
*core.BlockChain
db ethdb.Database
genesis *core.Genesis
}
func (e *engineApiBackend) Database() ethdb.Database {
return e.db
}
func (e *engineApiBackend) Genesis() *core.Genesis {
return e.genesis
} }
func (s *L2Engine) EthClient() *ethclient.Client { func (s *L2Engine) EthClient() *ethclient.Client {
...@@ -158,39 +171,25 @@ func (e *L2Engine) ActL2RPCFail(t Testing) { ...@@ -158,39 +171,25 @@ func (e *L2Engine) ActL2RPCFail(t Testing) {
// ActL2IncludeTx includes the next transaction from the given address in the block that is being built // ActL2IncludeTx includes the next transaction from the given address in the block that is being built
func (e *L2Engine) ActL2IncludeTx(from common.Address) Action { func (e *L2Engine) ActL2IncludeTx(from common.Address) Action {
return func(t Testing) { return func(t Testing) {
if e.l2BuildingHeader == nil { if e.engineApi.ForcedEmpty() {
t.InvalidAction("not currently building a block, cannot include tx from queue")
return
}
if e.l2ForceEmpty {
e.log.Info("Skipping including a transaction because e.L2ForceEmpty is true") e.log.Info("Skipping including a transaction because e.L2ForceEmpty is true")
// t.InvalidAction("cannot include any sequencer txs")
return return
} }
i := e.pendingIndices[from] i := e.engineApi.PendingIndices(from)
txs, q := e.eth.TxPool().ContentFrom(from) txs, q := e.eth.TxPool().ContentFrom(from)
if uint64(len(txs)) <= i { if uint64(len(txs)) <= i {
t.Fatalf("no pending txs from %s, and have %d unprocessable queued txs from this account", from, len(q)) t.Fatalf("no pending txs from %s, and have %d unprocessable queued txs from this account", from, len(q))
} }
tx := txs[i] tx := txs[i]
if tx.Gas() > e.l2BuildingHeader.GasLimit { err := e.engineApi.IncludeTx(tx, from)
t.Fatalf("tx consumes %d gas, more than available in L2 block %d", tx.Gas(), e.l2BuildingHeader.GasLimit) if errors.Is(err, engineapi.ErrNotBuildingBlock) {
} t.InvalidAction(err.Error())
if tx.Gas() > uint64(*e.l2GasPool) { } else if errors.Is(err, engineapi.ErrUsesTooMuchGas) {
t.InvalidAction("action takes too much gas: %d, only have %d", tx.Gas(), uint64(*e.l2GasPool)) t.InvalidAction("included tx uses too much gas: %v", err)
return } else if err != nil {
} t.Fatalf("include tx: %v", err)
e.pendingIndices[from] = i + 1 // won't retry the tx
e.l2BuildingState.SetTxContext(tx.Hash(), len(e.l2Transactions))
receipt, err := core.ApplyTransaction(e.l2Cfg.Config, e.l2Chain, &e.l2BuildingHeader.Coinbase,
e.l2GasPool, e.l2BuildingState, e.l2BuildingHeader, tx, &e.l2BuildingHeader.GasUsed, *e.l2Chain.GetVMConfig())
if err != nil {
e.l2TxFailed = append(e.l2TxFailed, tx)
t.Fatalf("failed to apply transaction to L2 block (tx %d): %v", len(e.l2Transactions), err)
} }
e.l2Receipts = append(e.l2Receipts, receipt)
e.l2Transactions = append(e.l2Transactions, tx)
} }
} }
......
...@@ -4,6 +4,8 @@ import ( ...@@ -4,6 +4,8 @@ import (
"math/big" "math/big"
"testing" "testing"
"github.com/ethereum-optimism/optimism/op-program/l2/engineapi"
"github.com/ethereum-optimism/optimism/op-program/l2/engineapi/test"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus/beacon" "github.com/ethereum/go-ethereum/consensus/beacon"
"github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/consensus/ethash"
...@@ -187,3 +189,15 @@ func TestL2EngineAPIFail(gt *testing.T) { ...@@ -187,3 +189,15 @@ func TestL2EngineAPIFail(gt *testing.T) {
require.NoError(t, err) require.NoError(t, err)
require.Equal(gt, sd.L2Cfg.ToBlock().Hash(), head.Hash(), "expecting engine to start at genesis") require.Equal(gt, sd.L2Cfg.ToBlock().Hash(), head.Hash(), "expecting engine to start at genesis")
} }
func TestEngineAPITests(t *testing.T) {
test.RunEngineAPITests(t, func() engineapi.EngineBackend {
jwtPath := e2eutils.WriteDefaultJWT(t)
dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams)
sd := e2eutils.Setup(t, dp, defaultAlloc)
n, _, apiBackend := newBackend(t, sd.L2Cfg, jwtPath, nil)
err := n.Start()
require.NoError(t, err)
return apiBackend
})
}
...@@ -98,7 +98,7 @@ func TestL2Sequencer_SequencerDrift(gt *testing.T) { ...@@ -98,7 +98,7 @@ func TestL2Sequencer_SequencerDrift(gt *testing.T) {
// We passed the sequencer drift: we can still keep the old origin, but can't include any txs // We passed the sequencer drift: we can still keep the old origin, but can't include any txs
sequencer.ActL2KeepL1Origin(t) sequencer.ActL2KeepL1Origin(t)
sequencer.ActL2StartBlock(t) sequencer.ActL2StartBlock(t)
require.True(t, engine.l2ForceEmpty, "engine should not be allowed to include anything after sequencer drift is surpassed") require.True(t, engine.engineApi.ForcedEmpty(), "engine should not be allowed to include anything after sequencer drift is surpassed")
} }
// TestL2Sequencer_SequencerOnlyReorg regression-tests a Goerli halt where the sequencer // TestL2Sequencer_SequencerOnlyReorg regression-tests a Goerli halt where the sequencer
......
...@@ -202,11 +202,18 @@ type SyncClient struct { ...@@ -202,11 +202,18 @@ type SyncClient struct {
results chan syncResult results chan syncResult
receivePayload receivePayloadFn
// resource context: all peers and mainLoop tasks inherit this, and start shutting down once resCancel() is called.
resCtx context.Context resCtx context.Context
resCancel context.CancelFunc resCancel context.CancelFunc
receivePayload receivePayloadFn // wait group: wait for the resources to close. Adding to this is only safe if the peersLock is held.
wg sync.WaitGroup wg sync.WaitGroup
// Don't allow anything to be added to the wait-group while, or after, we are shutting down.
// This is protected by peersLock.
closingPeers bool
} }
func NewSyncClient(log log.Logger, cfg *rollup.Config, newStream newStreamFn, rcv receivePayloadFn, metrics SyncClientMetrics) *SyncClient { func NewSyncClient(log log.Logger, cfg *rollup.Config, newStream newStreamFn, rcv receivePayloadFn, metrics SyncClientMetrics) *SyncClient {
...@@ -239,7 +246,9 @@ func NewSyncClient(log log.Logger, cfg *rollup.Config, newStream newStreamFn, rc ...@@ -239,7 +246,9 @@ func NewSyncClient(log log.Logger, cfg *rollup.Config, newStream newStreamFn, rc
} }
func (s *SyncClient) Start() { func (s *SyncClient) Start() {
s.peersLock.Lock()
s.wg.Add(1) s.wg.Add(1)
s.peersLock.Unlock()
go s.mainLoop() go s.mainLoop()
} }
...@@ -250,6 +259,9 @@ func (s *SyncClient) AddPeer(id peer.ID) { ...@@ -250,6 +259,9 @@ func (s *SyncClient) AddPeer(id peer.ID) {
s.log.Warn("cannot register peer for sync duties, peer was already registered", "peer", id) s.log.Warn("cannot register peer for sync duties, peer was already registered", "peer", id)
return return
} }
if s.closingPeers {
return
}
s.wg.Add(1) s.wg.Add(1)
// add new peer routine // add new peer routine
ctx, cancel := context.WithCancel(s.resCtx) ctx, cancel := context.WithCancel(s.resCtx)
...@@ -269,7 +281,12 @@ func (s *SyncClient) RemovePeer(id peer.ID) { ...@@ -269,7 +281,12 @@ func (s *SyncClient) RemovePeer(id peer.ID) {
delete(s.peers, id) delete(s.peers, id)
} }
// Close will shut down the sync client and all attached work, and block until shutdown is complete.
// This will block if the Start() has not created the main background loop.
func (s *SyncClient) Close() error { func (s *SyncClient) Close() error {
s.peersLock.Lock()
s.closingPeers = true
s.peersLock.Unlock()
s.resCancel() s.resCancel()
s.wg.Wait() s.wg.Wait()
return nil return nil
......
...@@ -2,10 +2,8 @@ package p2p ...@@ -2,10 +2,8 @@ package p2p
import ( import (
"context" "context"
"math"
"math/big" "math/big"
"testing" "testing"
"time"
"github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/network"
...@@ -13,13 +11,14 @@ import ( ...@@ -13,13 +11,14 @@ import (
mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" mocknet "github.com/libp2p/go-libp2p/p2p/net/mock"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-node/eth" "github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/metrics" "github.com/ethereum-optimism/optimism/op-node/metrics"
"github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/testlog" "github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
) )
type mockPayloadFn func(n uint64) (*eth.ExecutionPayload, error) type mockPayloadFn func(n uint64) (*eth.ExecutionPayload, error)
...@@ -120,18 +119,12 @@ func TestSinglePeerSync(t *testing.T) { ...@@ -120,18 +119,12 @@ func TestSinglePeerSync(t *testing.T) {
require.NoError(t, cl.RequestL2Range(ctx, l2Ref(10), l2Ref(20))) require.NoError(t, cl.RequestL2Range(ctx, l2Ref(10), l2Ref(20)))
// and wait for the sync results to come in (in reverse order) // and wait for the sync results to come in (in reverse order)
receiveCtx, receiveCancel := context.WithTimeout(ctx, time.Second*5)
defer receiveCancel()
for i := uint64(19); i > 10; i-- { for i := uint64(19); i > 10; i-- {
select { p := <-received
case p := <-received: require.Equal(t, uint64(p.BlockNumber), i, "expecting payloads in order")
require.Equal(t, uint64(p.BlockNumber), i, "expecting payloads in order") exp, ok := payloads[uint64(p.BlockNumber)]
exp, ok := payloads[uint64(p.BlockNumber)] require.True(t, ok, "expecting known payload")
require.True(t, ok, "expecting known payload") require.Equal(t, exp.BlockHash, p.BlockHash, "expecting the correct payload")
require.Equal(t, exp.BlockHash, p.BlockHash, "expecting the correct payload")
case <-receiveCtx.Done():
t.Fatal("did not receive all expected payloads within expected time")
}
} }
} }
...@@ -202,34 +195,20 @@ func TestMultiPeerSync(t *testing.T) { ...@@ -202,34 +195,20 @@ func TestMultiPeerSync(t *testing.T) {
// With such large range to request we are going to hit the rate-limits of B and C, // With such large range to request we are going to hit the rate-limits of B and C,
// but that means we'll balance the work between the peers. // but that means we'll balance the work between the peers.
p := <-recvA
// wait for the results to come in, based on the expected rate limit, divided by 2 (because we have 2 servers), with a buffer of 2 seconds exp, ok := payloads[uint64(p.BlockNumber)]
receiveCtx, receiveCancel := context.WithTimeout(ctx, time.Second*time.Duration(math.Ceil(float64((89-10)/peerServerBlocksRateLimit)))/2+time.Second*2) require.True(t, ok, "expecting known payload")
defer receiveCancel() require.Equal(t, exp.BlockHash, p.BlockHash, "expecting the correct payload")
for i := uint64(89); i > 10; i-- {
select {
case p := <-recvA:
exp, ok := payloads[uint64(p.BlockNumber)]
require.True(t, ok, "expecting known payload")
require.Equal(t, exp.BlockHash, p.BlockHash, "expecting the correct payload")
case <-receiveCtx.Done():
t.Fatal("did not receive all expected payloads within expected time")
}
}
// now see if B can sync a range, and fill the gap with a re-request // now see if B can sync a range, and fill the gap with a re-request
bl25 := payloads[25] // temporarily remove it from the available payloads. This will create a gap bl25 := payloads[25] // temporarily remove it from the available payloads. This will create a gap
delete(payloads, uint64(25)) delete(payloads, uint64(25))
require.NoError(t, clB.RequestL2Range(ctx, l2Ref(20), l2Ref(30))) require.NoError(t, clB.RequestL2Range(ctx, l2Ref(20), l2Ref(30)))
for i := uint64(29); i > 25; i-- { for i := uint64(29); i > 25; i-- {
select { p := <-recvB
case p := <-recvB: exp, ok := payloads[uint64(p.BlockNumber)]
exp, ok := payloads[uint64(p.BlockNumber)] require.True(t, ok, "expecting known payload")
require.True(t, ok, "expecting known payload") require.Equal(t, exp.BlockHash, p.BlockHash, "expecting the correct payload")
require.Equal(t, exp.BlockHash, p.BlockHash, "expecting the correct payload")
case <-receiveCtx.Done():
t.Fatal("did not receive all expected payloads within expected time")
}
} }
// the request for 25 should fail. See: // the request for 25 should fail. See:
// server: WARN peer requested unknown block by number num=25 // server: WARN peer requested unknown block by number num=25
...@@ -239,16 +218,11 @@ func TestMultiPeerSync(t *testing.T) { ...@@ -239,16 +218,11 @@ func TestMultiPeerSync(t *testing.T) {
payloads[25] = bl25 payloads[25] = bl25
// And request a range again, 25 is there now, and 21-24 should follow quickly (some may already have been fetched and wait in quarantine) // And request a range again, 25 is there now, and 21-24 should follow quickly (some may already have been fetched and wait in quarantine)
require.NoError(t, clB.RequestL2Range(ctx, l2Ref(20), l2Ref(26))) require.NoError(t, clB.RequestL2Range(ctx, l2Ref(20), l2Ref(26)))
receiveCtx, receiveCancel = context.WithTimeout(ctx, time.Second*10)
defer receiveCancel()
for i := uint64(25); i > 20; i-- { for i := uint64(25); i > 20; i-- {
select { p := <-recvB
case p := <-recvB: exp, ok := payloads[uint64(p.BlockNumber)]
exp, ok := payloads[uint64(p.BlockNumber)] require.True(t, ok, "expecting known payload")
require.True(t, ok, "expecting known payload") require.Equal(t, exp.BlockHash, p.BlockHash, "expecting the correct payload")
require.Equal(t, exp.BlockHash, p.BlockHash, "expecting the correct payload")
case <-receiveCtx.Done():
t.Fatal("did not receive all expected payloads within expected time")
}
} }
} }
This diff is collapsed.
...@@ -52,7 +52,6 @@ func NewEventVec(factory Factory, ns string, name string, displayName string, la ...@@ -52,7 +52,6 @@ func NewEventVec(factory Factory, ns string, name string, displayName string, la
Namespace: ns, Namespace: ns,
Name: fmt.Sprintf("last_%s_unix", name), Name: fmt.Sprintf("last_%s_unix", name),
Help: fmt.Sprintf("Timestamp of last %s event", displayName), Help: fmt.Sprintf("Timestamp of last %s event", displayName),
}, }, labelNames),
labelNames),
} }
} }
...@@ -4,4 +4,9 @@ import "github.com/ethereum/go-ethereum/core/types" ...@@ -4,4 +4,9 @@ import "github.com/ethereum/go-ethereum/core/types"
type NoopTxMetrics struct{} type NoopTxMetrics struct{}
func (*NoopTxMetrics) RecordL1GasFee(*types.Receipt) {} func (*NoopTxMetrics) RecordNonce(uint64) {}
func (*NoopTxMetrics) RecordGasBumpCount(int) {}
func (*NoopTxMetrics) RecordTxConfirmationLatency(int64) {}
func (*NoopTxMetrics) TxConfirmed(*types.Receipt) {}
func (*NoopTxMetrics) TxPublished(string) {}
func (*NoopTxMetrics) RPCError() {}
...@@ -9,11 +9,34 @@ import ( ...@@ -9,11 +9,34 @@ import (
) )
type TxMetricer interface { type TxMetricer interface {
RecordL1GasFee(receipt *types.Receipt) RecordGasBumpCount(int)
RecordTxConfirmationLatency(int64)
RecordNonce(uint64)
TxConfirmed(*types.Receipt)
TxPublished(string)
RPCError()
} }
type TxMetrics struct { type TxMetrics struct {
TxL1GasFee prometheus.Gauge TxL1GasFee prometheus.Gauge
TxGasBump prometheus.Gauge
LatencyConfirmedTx prometheus.Gauge
currentNonce prometheus.Gauge
txPublishError *prometheus.CounterVec
publishEvent metrics.Event
confirmEvent metrics.EventVec
rpcError prometheus.Counter
}
func receiptStatusString(receipt *types.Receipt) string {
switch receipt.Status {
case types.ReceiptStatusSuccessful:
return "success"
case types.ReceiptStatusFailed:
return "failed"
default:
return "unknown_status"
}
} }
var _ TxMetricer = (*TxMetrics)(nil) var _ TxMetricer = (*TxMetrics)(nil)
...@@ -26,9 +49,67 @@ func MakeTxMetrics(ns string, factory metrics.Factory) TxMetrics { ...@@ -26,9 +49,67 @@ func MakeTxMetrics(ns string, factory metrics.Factory) TxMetrics {
Help: "L1 gas fee for transactions in GWEI", Help: "L1 gas fee for transactions in GWEI",
Subsystem: "txmgr", Subsystem: "txmgr",
}), }),
TxGasBump: factory.NewGauge(prometheus.GaugeOpts{
Namespace: ns,
Name: "tx_gas_bump",
Help: "Number of times a transaction gas needed to be bumped before it got included",
Subsystem: "txmgr",
}),
LatencyConfirmedTx: factory.NewGauge(prometheus.GaugeOpts{
Namespace: ns,
Name: "tx_confirmed_latency_ms",
Help: "Latency of a confirmed transaction in milliseconds",
Subsystem: "txmgr",
}),
currentNonce: factory.NewGauge(prometheus.GaugeOpts{
Namespace: ns,
Name: "current_nonce",
Help: "Current nonce of the from address",
Subsystem: "txmgr",
}),
txPublishError: factory.NewCounterVec(prometheus.CounterOpts{
Namespace: ns,
Name: "tx_publish_error_count",
Help: "Count of publish errors. Labells are sanitized error strings",
Subsystem: "txmgr",
}, []string{"error"}),
confirmEvent: metrics.NewEventVec(factory, ns, "confirm", "tx confirm", []string{"status"}),
publishEvent: metrics.NewEvent(factory, ns, "publish", "tx publish"),
rpcError: factory.NewCounter(prometheus.CounterOpts{
Namespace: ns,
Name: "rpc_error_count",
Help: "Temporrary: Count of RPC errors (like timeouts) that have occurrred",
Subsystem: "txmgr",
}),
} }
} }
func (t *TxMetrics) RecordL1GasFee(receipt *types.Receipt) { func (t *TxMetrics) RecordNonce(nonce uint64) {
t.currentNonce.Set(float64(nonce))
}
// TxConfirmed records lots of information about the confirmed transaction
func (t *TxMetrics) TxConfirmed(receipt *types.Receipt) {
t.confirmEvent.Record(receiptStatusString(receipt))
t.TxL1GasFee.Set(float64(receipt.EffectiveGasPrice.Uint64() * receipt.GasUsed / params.GWei)) t.TxL1GasFee.Set(float64(receipt.EffectiveGasPrice.Uint64() * receipt.GasUsed / params.GWei))
} }
func (t *TxMetrics) RecordGasBumpCount(times int) {
t.TxGasBump.Set(float64(times))
}
func (t *TxMetrics) RecordTxConfirmationLatency(latency int64) {
t.LatencyConfirmedTx.Set(float64(latency))
}
func (t *TxMetrics) TxPublished(errString string) {
if errString != "" {
t.txPublishError.WithLabelValues(errString).Inc()
} else {
t.publishEvent.Record()
}
}
func (t *TxMetrics) RPCError() {
t.rpcError.Inc()
}
...@@ -148,6 +148,7 @@ func (m *SimpleTxManager) Send(ctx context.Context, candidate TxCandidate) (*typ ...@@ -148,6 +148,7 @@ func (m *SimpleTxManager) Send(ctx context.Context, candidate TxCandidate) (*typ
func (m *SimpleTxManager) craftTx(ctx context.Context, candidate TxCandidate) (*types.Transaction, error) { func (m *SimpleTxManager) craftTx(ctx context.Context, candidate TxCandidate) (*types.Transaction, error) {
gasTipCap, basefee, err := m.suggestGasPriceCaps(ctx) gasTipCap, basefee, err := m.suggestGasPriceCaps(ctx)
if err != nil { if err != nil {
m.metr.RPCError()
return nil, fmt.Errorf("failed to get gas price info: %w", err) return nil, fmt.Errorf("failed to get gas price info: %w", err)
} }
gasFeeCap := calcGasFeeCap(basefee, gasTipCap) gasFeeCap := calcGasFeeCap(basefee, gasTipCap)
...@@ -157,8 +158,10 @@ func (m *SimpleTxManager) craftTx(ctx context.Context, candidate TxCandidate) (* ...@@ -157,8 +158,10 @@ func (m *SimpleTxManager) craftTx(ctx context.Context, candidate TxCandidate) (*
defer cancel() defer cancel()
nonce, err := m.backend.NonceAt(childCtx, m.cfg.From, nil) nonce, err := m.backend.NonceAt(childCtx, m.cfg.From, nil)
if err != nil { if err != nil {
m.metr.RPCError()
return nil, fmt.Errorf("failed to get nonce: %w", err) return nil, fmt.Errorf("failed to get nonce: %w", err)
} }
m.metr.RecordNonce(nonce)
rawTx := &types.DynamicFeeTx{ rawTx := &types.DynamicFeeTx{
ChainID: m.chainID, ChainID: m.chainID,
...@@ -216,6 +219,7 @@ func (m *SimpleTxManager) send(ctx context.Context, tx *types.Transaction) (*typ ...@@ -216,6 +219,7 @@ func (m *SimpleTxManager) send(ctx context.Context, tx *types.Transaction) (*typ
ticker := time.NewTicker(m.cfg.ResubmissionTimeout) ticker := time.NewTicker(m.cfg.ResubmissionTimeout)
defer ticker.Stop() defer ticker.Stop()
bumpCounter := 0
for { for {
select { select {
case <-ticker.C: case <-ticker.C:
...@@ -231,12 +235,15 @@ func (m *SimpleTxManager) send(ctx context.Context, tx *types.Transaction) (*typ ...@@ -231,12 +235,15 @@ func (m *SimpleTxManager) send(ctx context.Context, tx *types.Transaction) (*typ
// Increase the gas price & submit the new transaction // Increase the gas price & submit the new transaction
tx = m.increaseGasPrice(ctx, tx) tx = m.increaseGasPrice(ctx, tx)
wg.Add(1) wg.Add(1)
bumpCounter += 1
go sendTxAsync(tx) go sendTxAsync(tx)
case <-ctx.Done(): case <-ctx.Done():
return nil, ctx.Err() return nil, ctx.Err()
case receipt := <-receiptChan: case receipt := <-receiptChan:
m.metr.RecordGasBumpCount(bumpCounter)
m.metr.TxConfirmed(receipt)
return receipt, nil return receipt, nil
} }
} }
...@@ -251,6 +258,7 @@ func (m *SimpleTxManager) publishAndWaitForTx(ctx context.Context, tx *types.Tra ...@@ -251,6 +258,7 @@ func (m *SimpleTxManager) publishAndWaitForTx(ctx context.Context, tx *types.Tra
cCtx, cancel := context.WithTimeout(ctx, m.cfg.NetworkTimeout) cCtx, cancel := context.WithTimeout(ctx, m.cfg.NetworkTimeout)
defer cancel() defer cancel()
t := time.Now()
err := m.backend.SendTransaction(cCtx, tx) err := m.backend.SendTransaction(cCtx, tx)
sendState.ProcessSendError(err) sendState.ProcessSendError(err)
...@@ -259,19 +267,28 @@ func (m *SimpleTxManager) publishAndWaitForTx(ctx context.Context, tx *types.Tra ...@@ -259,19 +267,28 @@ func (m *SimpleTxManager) publishAndWaitForTx(ctx context.Context, tx *types.Tra
switch { switch {
case errStringMatch(err, core.ErrNonceTooLow): case errStringMatch(err, core.ErrNonceTooLow):
log.Warn("nonce too low", "err", err) log.Warn("nonce too low", "err", err)
m.metr.TxPublished("nonce_to_low")
case errStringMatch(err, context.Canceled): case errStringMatch(err, context.Canceled):
m.metr.RPCError()
log.Warn("transaction send cancelled", "err", err) log.Warn("transaction send cancelled", "err", err)
m.metr.TxPublished("context_cancelled")
case errStringMatch(err, txpool.ErrAlreadyKnown): case errStringMatch(err, txpool.ErrAlreadyKnown):
log.Warn("resubmitted already known transaction", "err", err) log.Warn("resubmitted already known transaction", "err", err)
m.metr.TxPublished("tx_already_known")
case errStringMatch(err, txpool.ErrReplaceUnderpriced): case errStringMatch(err, txpool.ErrReplaceUnderpriced):
log.Warn("transaction replacement is underpriced", "err", err) log.Warn("transaction replacement is underpriced", "err", err)
m.metr.TxPublished("tx_replacement_underpriced")
case errStringMatch(err, txpool.ErrUnderpriced): case errStringMatch(err, txpool.ErrUnderpriced):
log.Warn("transaction is underpriced", "err", err) log.Warn("transaction is underpriced", "err", err)
m.metr.TxPublished("tx_underpriced")
default: default:
m.metr.RPCError()
log.Error("unable to publish transaction", "err", err) log.Error("unable to publish transaction", "err", err)
m.metr.TxPublished("unknown_error")
} }
return return
} }
m.metr.TxPublished("")
log.Info("Transaction successfully published") log.Info("Transaction successfully published")
// Poll for the transaction to be ready & then send the result to receiptChan // Poll for the transaction to be ready & then send the result to receiptChan
...@@ -282,7 +299,7 @@ func (m *SimpleTxManager) publishAndWaitForTx(ctx context.Context, tx *types.Tra ...@@ -282,7 +299,7 @@ func (m *SimpleTxManager) publishAndWaitForTx(ctx context.Context, tx *types.Tra
} }
select { select {
case receiptChan <- receipt: case receiptChan <- receipt:
m.metr.RecordL1GasFee(receipt) m.metr.RecordTxConfirmationLatency(time.Since(t).Milliseconds())
default: default:
} }
} }
...@@ -314,9 +331,11 @@ func (m *SimpleTxManager) queryReceipt(ctx context.Context, txHash common.Hash, ...@@ -314,9 +331,11 @@ func (m *SimpleTxManager) queryReceipt(ctx context.Context, txHash common.Hash,
m.l.Trace("Transaction not yet mined", "hash", txHash) m.l.Trace("Transaction not yet mined", "hash", txHash)
return nil return nil
} else if err != nil { } else if err != nil {
m.metr.RPCError()
m.l.Info("Receipt retrieval failed", "hash", txHash, "err", err) m.l.Info("Receipt retrieval failed", "hash", txHash, "err", err)
return nil return nil
} else if receipt == nil { } else if receipt == nil {
m.metr.RPCError()
m.l.Warn("Receipt and error are both nil", "hash", txHash) m.l.Warn("Receipt and error are both nil", "hash", txHash)
return nil return nil
} }
...@@ -400,6 +419,7 @@ func (m *SimpleTxManager) suggestGasPriceCaps(ctx context.Context) (*big.Int, *b ...@@ -400,6 +419,7 @@ func (m *SimpleTxManager) suggestGasPriceCaps(ctx context.Context) (*big.Int, *b
defer cancel() defer cancel()
tip, err := m.backend.SuggestGasTipCap(cCtx) tip, err := m.backend.SuggestGasTipCap(cCtx)
if err != nil { if err != nil {
m.metr.RPCError()
return nil, nil, fmt.Errorf("failed to fetch the suggested gas tip cap: %w", err) return nil, nil, fmt.Errorf("failed to fetch the suggested gas tip cap: %w", err)
} else if tip == nil { } else if tip == nil {
return nil, nil, errors.New("the suggested tip was nil") return nil, nil, errors.New("the suggested tip was nil")
...@@ -408,6 +428,7 @@ func (m *SimpleTxManager) suggestGasPriceCaps(ctx context.Context) (*big.Int, *b ...@@ -408,6 +428,7 @@ func (m *SimpleTxManager) suggestGasPriceCaps(ctx context.Context) (*big.Int, *b
defer cancel() defer cancel()
head, err := m.backend.HeaderByNumber(cCtx, nil) head, err := m.backend.HeaderByNumber(cCtx, nil)
if err != nil { if err != nil {
m.metr.RPCError()
return nil, nil, fmt.Errorf("failed to fetch the suggested basefee: %w", err) return nil, nil, fmt.Errorf("failed to fetch the suggested basefee: %w", err)
} else if head.BaseFee == nil { } else if head.BaseFee == nil {
return nil, nil, errors.New("txmgr does not support pre-london blocks that do not have a basefee") return nil, nil, errors.New("txmgr does not support pre-london blocks that do not have a basefee")
......
...@@ -691,6 +691,7 @@ func TestWaitMinedReturnsReceiptAfterFailure(t *testing.T) { ...@@ -691,6 +691,7 @@ func TestWaitMinedReturnsReceiptAfterFailure(t *testing.T) {
name: "TEST", name: "TEST",
backend: &borkedBackend, backend: &borkedBackend,
l: testlog.Logger(t, log.LvlCrit), l: testlog.Logger(t, log.LvlCrit),
metr: &metrics.NoopTxMetrics{},
} }
// Don't mine the tx with the default backend. The failingBackend will // Don't mine the tx with the default backend. The failingBackend will
...@@ -726,6 +727,7 @@ func doGasPriceIncrease(t *testing.T, txTipCap, txFeeCap, newTip, newBaseFee int ...@@ -726,6 +727,7 @@ func doGasPriceIncrease(t *testing.T, txTipCap, txFeeCap, newTip, newBaseFee int
name: "TEST", name: "TEST",
backend: &borkedBackend, backend: &borkedBackend,
l: testlog.Logger(t, log.LvlCrit), l: testlog.Logger(t, log.LvlCrit),
metr: &metrics.NoopTxMetrics{},
} }
tx := types.NewTx(&types.DynamicFeeTx{ tx := types.NewTx(&types.DynamicFeeTx{
...@@ -829,6 +831,7 @@ func TestIncreaseGasPriceNotExponential(t *testing.T) { ...@@ -829,6 +831,7 @@ func TestIncreaseGasPriceNotExponential(t *testing.T) {
name: "TEST", name: "TEST",
backend: &borkedBackend, backend: &borkedBackend,
l: testlog.Logger(t, log.LvlCrit), l: testlog.Logger(t, log.LvlCrit),
metr: &metrics.NoopTxMetrics{},
} }
tx := types.NewTx(&types.DynamicFeeTx{ tx := types.NewTx(&types.DynamicFeeTx{
GasTipCap: big.NewInt(10), GasTipCap: big.NewInt(10),
......
...@@ -63,7 +63,7 @@ Also some more hooks exported by the cli but these are likely the only ones you ...@@ -63,7 +63,7 @@ Also some more hooks exported by the cli but these are likely the only ones you
Please see our [contributing.md](https://github.com/ethereum-optimism/optimism/blob/develop/CONTRIBUTING.md). No contribution is too small. Please see our [contributing.md](https://github.com/ethereum-optimism/optimism/blob/develop/CONTRIBUTING.md). No contribution is too small.
Having your contribution denied feels bad. Having your contribution denied feels bad.
Please consider [opening an issue](https://github.com/ethereum-optimism/optimism/issues) before adding any new features or apis. Please consider [opening an issue](https://github.com/ethereum-optimism/optimism/issues) before adding any new features or apis.
...@@ -73,5 +73,5 @@ If you have any problems, these resources could help you: ...@@ -73,5 +73,5 @@ If you have any problems, these resources could help you:
- [sdk documentation](https://github.com/ethereum-optimism/optimism/blob/develop/packages/atst/docs/sdk.md) - [sdk documentation](https://github.com/ethereum-optimism/optimism/blob/develop/packages/atst/docs/sdk.md)
- [cli documentation](https://github.com/ethereum-optimism/optimism/blob/develop/packages/atst/docs/cli.md) - [cli documentation](https://github.com/ethereum-optimism/optimism/blob/develop/packages/atst/docs/cli.md)
- [Optimism Discord](https://discord-gateway.optimism.io/) - [Optimism Discord](https://discord.gg/optimism)
- [Telegram group](https://t.me/+zwpJ8Ohqgl8yNjNh) - [Telegram group](https://t.me/+zwpJ8Ohqgl8yNjNh)
...@@ -215,7 +215,7 @@ contract SystemDictator is OwnableUpgradeable { ...@@ -215,7 +215,7 @@ contract SystemDictator is OwnableUpgradeable {
/** /**
* @notice Configures the ProxyAdmin contract. * @notice Configures the ProxyAdmin contract.
*/ */
function step1() external onlyOwner step(1) { function step1() public onlyOwner step(1) {
// Set the AddressManager in the ProxyAdmin. // Set the AddressManager in the ProxyAdmin.
config.globalConfig.proxyAdmin.setAddressManager(config.globalConfig.addressManager); config.globalConfig.proxyAdmin.setAddressManager(config.globalConfig.addressManager);
...@@ -260,7 +260,7 @@ contract SystemDictator is OwnableUpgradeable { ...@@ -260,7 +260,7 @@ contract SystemDictator is OwnableUpgradeable {
* @notice Pauses the system by shutting down the L1CrossDomainMessenger and setting the * @notice Pauses the system by shutting down the L1CrossDomainMessenger and setting the
* deposit halt flag to tell the Sequencer's DTL to stop accepting deposits. * deposit halt flag to tell the Sequencer's DTL to stop accepting deposits.
*/ */
function step2() external onlyOwner step(2) { function step2() public onlyOwner step(2) {
// Store the address of the old L1CrossDomainMessenger implementation. We will need this // Store the address of the old L1CrossDomainMessenger implementation. We will need this
// address in the case that we have to exit early. // address in the case that we have to exit early.
oldL1CrossDomainMessenger = config.globalConfig.addressManager.getAddress( oldL1CrossDomainMessenger = config.globalConfig.addressManager.getAddress(
...@@ -410,6 +410,14 @@ contract SystemDictator is OwnableUpgradeable { ...@@ -410,6 +410,14 @@ contract SystemDictator is OwnableUpgradeable {
); );
} }
/**
* @notice Calls the first 2 steps of the migration process.
*/
function phase1() external onlyOwner {
step1();
step2();
}
/** /**
* @notice Tranfers admin ownership to the final owner. * @notice Tranfers admin ownership to the final owner.
*/ */
......
...@@ -84,7 +84,7 @@ contract Proxy { ...@@ -84,7 +84,7 @@ contract Proxy {
* *
* @param _implementation Address of the implementation contract. * @param _implementation Address of the implementation contract.
*/ */
function upgradeTo(address _implementation) external proxyCallIfNotAdmin { function upgradeTo(address _implementation) public virtual proxyCallIfNotAdmin {
_setImplementation(_implementation); _setImplementation(_implementation);
} }
...@@ -96,8 +96,9 @@ contract Proxy { ...@@ -96,8 +96,9 @@ contract Proxy {
* @param _data Calldata to delegatecall the new implementation with. * @param _data Calldata to delegatecall the new implementation with.
*/ */
function upgradeToAndCall(address _implementation, bytes calldata _data) function upgradeToAndCall(address _implementation, bytes calldata _data)
external public
payable payable
virtual
proxyCallIfNotAdmin proxyCallIfNotAdmin
returns (bytes memory) returns (bytes memory)
{ {
...@@ -112,7 +113,7 @@ contract Proxy { ...@@ -112,7 +113,7 @@ contract Proxy {
* *
* @param _admin New owner of the proxy contract. * @param _admin New owner of the proxy contract.
*/ */
function changeAdmin(address _admin) external proxyCallIfNotAdmin { function changeAdmin(address _admin) public virtual proxyCallIfNotAdmin {
_changeAdmin(_admin); _changeAdmin(_admin);
} }
...@@ -121,7 +122,7 @@ contract Proxy { ...@@ -121,7 +122,7 @@ contract Proxy {
* *
* @return Owner address. * @return Owner address.
*/ */
function admin() external proxyCallIfNotAdmin returns (address) { function admin() public virtual proxyCallIfNotAdmin returns (address) {
return _getAdmin(); return _getAdmin();
} }
...@@ -130,7 +131,7 @@ contract Proxy { ...@@ -130,7 +131,7 @@ contract Proxy {
* *
* @return Implementation address. * @return Implementation address.
*/ */
function implementation() external proxyCallIfNotAdmin returns (address) { function implementation() public virtual proxyCallIfNotAdmin returns (address) {
return _getImplementation(); return _getImplementation();
} }
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
"finalSystemOwner": "0x9965507D1a55bcC2695C58ba16FB37d819B0A4dc", "finalSystemOwner": "0x9965507D1a55bcC2695C58ba16FB37d819B0A4dc",
"portalGuardian": "0x9965507D1a55bcC2695C58ba16FB37d819B0A4dc", "portalGuardian": "0x9965507D1a55bcC2695C58ba16FB37d819B0A4dc",
"controller": "0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266", "controller": "0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266",
"proxyAdminOwner": "0x9965507D1a55bcC2695C58ba16FB37d819B0A4dc",
"l1StartingBlockTag": "earliest", "l1StartingBlockTag": "earliest",
"l1ChainID": 900, "l1ChainID": 900,
...@@ -16,12 +17,22 @@ ...@@ -16,12 +17,22 @@
"batchSenderAddress": "0x3C44CdDdB6a900fa2b585dd299e03d12FA4293BC", "batchSenderAddress": "0x3C44CdDdB6a900fa2b585dd299e03d12FA4293BC",
"l2OutputOracleSubmissionInterval": 6, "l2OutputOracleSubmissionInterval": 6,
"l2OutputOracleStartingTimestamp": -1, "l2OutputOracleStartingTimestamp": 0,
"l2OutputOracleStartingBlockNumber": 0,
"l2OutputOracleProposer": "0x70997970C51812dc3A010C7d01b50e0d17dc79C8", "l2OutputOracleProposer": "0x70997970C51812dc3A010C7d01b50e0d17dc79C8",
"l2OutputOracleChallenger": "0x6925B8704Ff96DEe942623d6FB5e946EF5884b63", "l2OutputOracleChallenger": "0x6925B8704Ff96DEe942623d6FB5e946EF5884b63",
"l2GenesisBlockBaseFeePerGas": "0x3B9ACA00", "l2GenesisBlockBaseFeePerGas": "0x3B9ACA00",
"baseFeeVaultRecipient": "0xBcd4042DE499D14e55001CcbB24a551F3b954096", "l2GenesisBlockGasLimit": "0x17D7840",
"baseFeeVaultRecipient": "0x9965507D1a55bcC2695C58ba16FB37d819B0A4dc",
"l1FeeVaultRecipient": "0x9965507D1a55bcC2695C58ba16FB37d819B0A4dc",
"sequencerFeeVaultRecipient": "0x9965507D1a55bcC2695C58ba16FB37d819B0A4dc",
"governanceTokenName": "Optimism",
"governanceTokenSymbol": "OP",
"governanceTokenOwner": "0x9965507D1a55bcC2695C58ba16FB37d819B0A4dc",
"l1FeeVaultRecipient": "0x71bE63f3384f5fb98995898A86B02Fb2426c5788", "l1FeeVaultRecipient": "0x71bE63f3384f5fb98995898A86B02Fb2426c5788",
"sequencerFeeVaultRecipient": "0xfabb0ac9d68b0b445fb7357272ff202c5651694a", "sequencerFeeVaultRecipient": "0xfabb0ac9d68b0b445fb7357272ff202c5651694a",
......
...@@ -11,10 +11,8 @@ import { ...@@ -11,10 +11,8 @@ import {
assertContractVariable, assertContractVariable,
getContractsFromArtifacts, getContractsFromArtifacts,
getDeploymentAddress, getDeploymentAddress,
doStep, doOwnershipTransfer,
jsonifyTransaction, doPhase,
getTenderlySimulationLink,
getCastCommand,
} from '../src/deploy-utils' } from '../src/deploy-utils'
const uint128Max = ethers.BigNumber.from('0xffffffffffffffffffffffffffffffff') const uint128Max = ethers.BigNumber.from('0xffffffffffffffffffffffffffffffff')
...@@ -73,10 +71,13 @@ const deployFn: DeployFunction = async (hre) => { ...@@ -73,10 +71,13 @@ const deployFn: DeployFunction = async (hre) => {
// Transfer ownership of the ProxyAdmin to the SystemDictator. // Transfer ownership of the ProxyAdmin to the SystemDictator.
if ((await ProxyAdmin.owner()) !== SystemDictator.address) { if ((await ProxyAdmin.owner()) !== SystemDictator.address) {
console.log(`Setting ProxyAdmin owner to MSD`) await doOwnershipTransfer({
await ProxyAdmin.transferOwnership(SystemDictator.address) isLiveDeployer,
} else { proxy: ProxyAdmin,
console.log(`Proxy admin already owned by MSD`) name: 'ProxyAdmin',
transferFunc: 'transferOwnership',
dictator: SystemDictator,
})
} }
// We don't need to transfer proxy addresses if we're already beyond the proxy transfer step. // We don't need to transfer proxy addresses if we're already beyond the proxy transfer step.
...@@ -89,31 +90,13 @@ const deployFn: DeployFunction = async (hre) => { ...@@ -89,31 +90,13 @@ const deployFn: DeployFunction = async (hre) => {
needsProxyTransfer && needsProxyTransfer &&
(await AddressManager.owner()) !== SystemDictator.address (await AddressManager.owner()) !== SystemDictator.address
) { ) {
if (isLiveDeployer) { await doOwnershipTransfer({
console.log(`Setting AddressManager owner to MSD`) isLiveDeployer,
await AddressManager.transferOwnership(SystemDictator.address) proxy: AddressManager,
} else { name: 'AddressManager',
const tx = await AddressManager.populateTransaction.transferOwnership( transferFunc: 'transferOwnership',
SystemDictator.address dictator: SystemDictator,
) })
console.log(`Please transfer AddressManager owner to MSD`)
console.log(`AddressManager address: ${AddressManager.address}`)
console.log(`MSD address: ${SystemDictator.address}`)
console.log(`JSON:`)
console.log(jsonifyTransaction(tx))
console.log(getCastCommand(tx))
console.log(await getTenderlySimulationLink(SystemDictator.provider, tx))
}
// Wait for the ownership transfer to complete.
await awaitCondition(
async () => {
const owner = await AddressManager.owner()
return owner === SystemDictator.address
},
5000,
1000
)
} else { } else {
console.log(`AddressManager already owned by the SystemDictator`) console.log(`AddressManager already owned by the SystemDictator`)
} }
...@@ -125,35 +108,13 @@ const deployFn: DeployFunction = async (hre) => { ...@@ -125,35 +108,13 @@ const deployFn: DeployFunction = async (hre) => {
from: ethers.constants.AddressZero, from: ethers.constants.AddressZero,
})) !== SystemDictator.address })) !== SystemDictator.address
) { ) {
if (isLiveDeployer) { await doOwnershipTransfer({
console.log(`Setting L1StandardBridge owner to MSD`) isLiveDeployer,
await L1StandardBridgeProxyWithSigner.setOwner(SystemDictator.address) proxy: L1StandardBridgeProxyWithSigner,
} else { name: 'L1StandardBridgeProxy',
const tx = await L1StandardBridgeProxy.populateTransaction.setOwner( transferFunc: 'setOwner',
SystemDictator.address dictator: SystemDictator,
) })
console.log(`Please transfer L1StandardBridge (proxy) owner to MSD`)
console.log(
`L1StandardBridgeProxy address: ${L1StandardBridgeProxy.address}`
)
console.log(`MSD address: ${SystemDictator.address}`)
console.log(`JSON:`)
console.log(jsonifyTransaction(tx))
console.log(getCastCommand(tx))
console.log(await getTenderlySimulationLink(SystemDictator.provider, tx))
}
// Wait for the ownership transfer to complete.
await awaitCondition(
async () => {
const owner = await L1StandardBridgeProxy.callStatic.getOwner({
from: ethers.constants.AddressZero,
})
return owner === SystemDictator.address
},
5000,
1000
)
} else { } else {
console.log(`L1StandardBridge already owned by MSD`) console.log(`L1StandardBridge already owned by MSD`)
} }
...@@ -165,47 +126,58 @@ const deployFn: DeployFunction = async (hre) => { ...@@ -165,47 +126,58 @@ const deployFn: DeployFunction = async (hre) => {
from: ethers.constants.AddressZero, from: ethers.constants.AddressZero,
})) !== SystemDictator.address })) !== SystemDictator.address
) { ) {
if (isLiveDeployer) { await doOwnershipTransfer({
console.log(`Setting L1ERC721Bridge owner to MSD`) isLiveDeployer,
await L1ERC721BridgeProxyWithSigner.changeAdmin(SystemDictator.address) proxy: L1ERC721BridgeProxyWithSigner,
} else { name: 'L1ERC721BridgeProxy',
const tx = await L1ERC721BridgeProxy.populateTransaction.changeAdmin( transferFunc: 'changeAdmin',
SystemDictator.address dictator: SystemDictator,
) })
console.log(`Please transfer L1ERC721Bridge (proxy) owner to MSD`)
console.log(`L1ERC721BridgeProxy address: ${L1ERC721BridgeProxy.address}`)
console.log(`MSD address: ${SystemDictator.address}`)
console.log(`JSON:`)
console.log(jsonifyTransaction(tx))
console.log(getCastCommand(tx))
console.log(await getTenderlySimulationLink(SystemDictator.provider, tx))
}
// Wait for the ownership transfer to complete.
await awaitCondition(
async () => {
const owner = await L1ERC721BridgeProxy.callStatic.admin({
from: ethers.constants.AddressZero,
})
return owner === SystemDictator.address
},
5000,
1000
)
} else { } else {
console.log(`L1ERC721Bridge already owned by MSD`) console.log(`L1ERC721Bridge already owned by MSD`)
} }
// Step 1 is a freebie, it doesn't impact the system. // Wait for the ownership transfers to complete before continuing.
await doStep({ await awaitCondition(
async (): Promise<boolean> => {
const proxyAdminOwner = await ProxyAdmin.owner()
const addressManagerOwner = await AddressManager.owner()
const l1StandardBridgeOwner =
await L1StandardBridgeProxy.callStatic.getOwner({
from: ethers.constants.AddressZero,
})
const l1Erc721BridgeOwner = await L1ERC721BridgeProxy.callStatic.admin({
from: ethers.constants.AddressZero,
})
return (
proxyAdminOwner === SystemDictator.address &&
addressManagerOwner === SystemDictator.address &&
l1StandardBridgeOwner === SystemDictator.address &&
l1Erc721BridgeOwner === SystemDictator.address
)
},
5000,
1000
)
await doPhase({
isLiveDeployer, isLiveDeployer,
SystemDictator, SystemDictator,
step: 1, phase: 1,
message: ` message: `
Phase 1 includes the following steps:
Step 1 will configure the ProxyAdmin contract, you can safely execute this step at any time Step 1 will configure the ProxyAdmin contract, you can safely execute this step at any time
without impacting the functionality of the rest of the system. without impacting the functionality of the rest of the system.
Step 2 will stop deposits and withdrawals via the L1CrossDomainMessenger and will stop the
DTL from syncing new deposits via the CTC, effectively shutting down the legacy system. Once
this step has been executed, you should immediately begin the L2 migration process. If you
need to restart the system, run exit1() followed by finalize().
`, `,
checks: async () => { checks: async () => {
// Step 1 checks
await assertContractVariable( await assertContractVariable(
ProxyAdmin, ProxyAdmin,
'addressManager', 'addressManager',
...@@ -264,21 +236,8 @@ const deployFn: DeployFunction = async (hre) => { ...@@ -264,21 +236,8 @@ const deployFn: DeployFunction = async (hre) => {
assert(config.systemTxMaxGas === 1_000_000) assert(config.systemTxMaxGas === 1_000_000)
assert(ethers.utils.parseUnits('1', 'gwei').eq(config.minimumBaseFee)) assert(ethers.utils.parseUnits('1', 'gwei').eq(config.minimumBaseFee))
assert(config.maximumBaseFee.eq(uint128Max)) assert(config.maximumBaseFee.eq(uint128Max))
},
})
// Step 2 shuts down the system. // Step 2 checks
await doStep({
isLiveDeployer,
SystemDictator,
step: 2,
message: `
Step 2 will stop deposits and withdrawals via the L1CrossDomainMessenger and will stop the
DTL from syncing new deposits via the CTC, effectively shutting down the legacy system. Once
this step has been executed, you should immediately begin the L2 migration process. If you
need to restart the system, run exit1() followed by finalize().
`,
checks: async () => {
const messenger = await AddressManager.getAddress( const messenger = await AddressManager.getAddress(
'OVM_L1CrossDomainMessenger' 'OVM_L1CrossDomainMessenger'
) )
......
...@@ -10,11 +10,11 @@ import '@nomiclabs/hardhat-ethers' ...@@ -10,11 +10,11 @@ import '@nomiclabs/hardhat-ethers'
import { import {
assertContractVariable, assertContractVariable,
getContractsFromArtifacts, getContractsFromArtifacts,
jsonifyTransaction, printJsonTransaction,
isStep, isStep,
doStep, doStep,
getTenderlySimulationLink, printTenderlySimulationLink,
getCastCommand, printCastCommand,
} from '../src/deploy-utils' } from '../src/deploy-utils'
const deployFn: DeployFunction = async (hre) => { const deployFn: DeployFunction = async (hre) => {
...@@ -206,10 +206,9 @@ const deployFn: DeployFunction = async (hre) => { ...@@ -206,10 +206,9 @@ const deployFn: DeployFunction = async (hre) => {
) )
) )
console.log(`MSD address: ${SystemDictator.address}`) console.log(`MSD address: ${SystemDictator.address}`)
console.log(`JSON:`) printJsonTransaction(tx)
console.log(jsonifyTransaction(tx)) printCastCommand(tx)
console.log(getCastCommand(tx)) await printTenderlySimulationLink(SystemDictator.provider, tx)
console.log(await getTenderlySimulationLink(SystemDictator.provider, tx))
} }
await awaitCondition( await awaitCondition(
...@@ -318,10 +317,9 @@ const deployFn: DeployFunction = async (hre) => { ...@@ -318,10 +317,9 @@ const deployFn: DeployFunction = async (hre) => {
const tx = await OptimismPortal.populateTransaction.unpause() const tx = await OptimismPortal.populateTransaction.unpause()
console.log(`Please unpause the OptimismPortal...`) console.log(`Please unpause the OptimismPortal...`)
console.log(`OptimismPortal address: ${OptimismPortal.address}`) console.log(`OptimismPortal address: ${OptimismPortal.address}`)
console.log(`JSON:`) printJsonTransaction(tx)
console.log(jsonifyTransaction(tx)) printCastCommand(tx)
console.log(getCastCommand(tx)) await printTenderlySimulationLink(SystemDictator.provider, tx)
console.log(await getTenderlySimulationLink(SystemDictator.provider, tx))
} }
await awaitCondition( await awaitCondition(
...@@ -348,10 +346,9 @@ const deployFn: DeployFunction = async (hre) => { ...@@ -348,10 +346,9 @@ const deployFn: DeployFunction = async (hre) => {
const tx = await SystemDictator.populateTransaction.finalize() const tx = await SystemDictator.populateTransaction.finalize()
console.log(`Please finalize deployment...`) console.log(`Please finalize deployment...`)
console.log(`MSD address: ${SystemDictator.address}`) console.log(`MSD address: ${SystemDictator.address}`)
console.log(`JSON:`) printJsonTransaction(tx)
console.log(jsonifyTransaction(tx)) printCastCommand(tx)
console.log(getCastCommand(tx)) await printTenderlySimulationLink(SystemDictator.provider, tx)
console.log(await getTenderlySimulationLink(SystemDictator.provider, tx))
} }
await awaitCondition( await awaitCondition(
......
...@@ -3,7 +3,11 @@ ...@@ -3,7 +3,11 @@
rm -rf artifacts forge-artifacts rm -rf artifacts forge-artifacts
# See slither.config.json for slither settings # See slither.config.json for slither settings
if [ -n "$TRIAGE_MODE" ]; then if [[ -z "$TRIAGE_MODE" ]]; then
echo "Running slither"
slither .
else
echo "Running slither in triage mode"
# Slither's triage mode will run an 'interview' in the terminal, allowing you to review each of # Slither's triage mode will run an 'interview' in the terminal, allowing you to review each of
# its findings, and specify which should be ignored in future runs of slither. This will update # its findings, and specify which should be ignored in future runs of slither. This will update
# (or create) the slither.db.json file. This DB is a cleaner alternative to adding slither-disable # (or create) the slither.db.json file. This DB is a cleaner alternative to adding slither-disable
...@@ -20,6 +24,4 @@ if [ -n "$TRIAGE_MODE" ]; then ...@@ -20,6 +24,4 @@ if [ -n "$TRIAGE_MODE" ]; then
mv $DB $TEMP_DB mv $DB $TEMP_DB
jq 'walk(if type == "object" then del(.filename_absolute) else . end)' $TEMP_DB > $DB jq 'walk(if type == "object" then del(.filename_absolute) else . end)' $TEMP_DB > $DB
rm -f $TEMP_DB rm -f $TEMP_DB
else
slither .
fi fi
{ {
"detectors_to_exclude": "assembly-usage,block-timestamp,naming-convention,solc-version", "detectors_to_exclude": "assembly-usage,block-timestamp,naming-convention,solc-version,low-level-calls",
"exclude_informational": true, "exclude_informational": true,
"exclude_low": true, "exclude_low": true,
"exclude_medium": true, "exclude_medium": true,
......
...@@ -305,20 +305,56 @@ export const getDeploymentAddress = async ( ...@@ -305,20 +305,56 @@ export const getDeploymentAddress = async (
* @param tx Ethers transaction object. * @param tx Ethers transaction object.
* @returns JSON-ified transaction object. * @returns JSON-ified transaction object.
*/ */
export const jsonifyTransaction = (tx: ethers.PopulatedTransaction): string => { export const printJsonTransaction = (tx: ethers.PopulatedTransaction): void => {
return JSON.stringify( console.log(
{ 'JSON transaction parameters:\n' +
from: tx.from, JSON.stringify(
to: tx.to, {
data: tx.data, from: tx.from,
value: tx.value, to: tx.to,
chainId: tx.chainId, data: tx.data,
}, value: tx.value,
null, chainId: tx.chainId,
2 },
null,
2
)
) )
} }
/**
* Mini helper for transferring a Proxy to the MSD
*
* @param opts Options for executing the step.
* @param opts.isLiveDeployer True if the deployer is live.
* @param opts.proxy proxy contract.
* @param opts.dictator dictator contract.
*/
export const doOwnershipTransfer = async (opts: {
isLiveDeployer?: boolean
proxy: ethers.Contract
name: string
transferFunc: string
dictator: ethers.Contract
}): Promise<void> => {
if (opts.isLiveDeployer) {
console.log(`Setting ${opts.name} owner to MSD`)
await opts.proxy[opts.transferFunc](opts.dictator.address)
} else {
const tx = await opts.proxy.populateTransaction[opts.transferFunc](
opts.dictator.address
)
console.log(`
Please transfer ${opts.name} (proxy) owner to MSD
- ${opts.name} address: ${opts.proxy.address}
- MSD address: ${opts.dictator.address}
`)
printJsonTransaction(tx)
printCastCommand(tx)
await printTenderlySimulationLink(opts.dictator.provider, tx)
}
}
/** /**
* Mini helper for checking if the current step is a target step. * Mini helper for checking if the current step is a target step.
* *
...@@ -333,6 +369,25 @@ export const isStep = async ( ...@@ -333,6 +369,25 @@ export const isStep = async (
return (await dictator.currentStep()) === step return (await dictator.currentStep()) === step
} }
/**
* Mini helper for checking if the current step is the first step in target phase.
*
* @param dictator SystemDictator contract.
* @param phase Target phase.
* @returns True if the current step is the first step in target phase.
*/
export const isStartOfPhase = async (
dictator: ethers.Contract,
phase: number
): Promise<boolean> => {
const phaseToStep = {
1: 1,
2: 3,
3: 6,
}
return (await dictator.currentStep()) === phaseToStep[phase]
}
/** /**
* Mini helper for executing a given step. * Mini helper for executing a given step.
* *
...@@ -350,7 +405,8 @@ export const doStep = async (opts: { ...@@ -350,7 +405,8 @@ export const doStep = async (opts: {
message: string message: string
checks: () => Promise<void> checks: () => Promise<void>
}): Promise<void> => { }): Promise<void> => {
if (!(await isStep(opts.SystemDictator, opts.step))) { const isStepVal = await isStep(opts.SystemDictator, opts.step)
if (!isStepVal) {
console.log(`Step already completed: ${opts.step}`) console.log(`Step already completed: ${opts.step}`)
return return
} }
...@@ -368,11 +424,8 @@ export const doStep = async (opts: { ...@@ -368,11 +424,8 @@ export const doStep = async (opts: {
]() ]()
console.log(`Please execute step ${opts.step}...`) console.log(`Please execute step ${opts.step}...`)
console.log(`MSD address: ${opts.SystemDictator.address}`) console.log(`MSD address: ${opts.SystemDictator.address}`)
console.log(`JSON:`) printJsonTransaction(tx)
console.log(jsonifyTransaction(tx)) await printTenderlySimulationLink(opts.SystemDictator.provider, tx)
console.log(
await getTenderlySimulationLink(opts.SystemDictator.provider, tx)
)
} }
// Wait for the step to complete. // Wait for the step to complete.
...@@ -389,36 +442,91 @@ export const doStep = async (opts: { ...@@ -389,36 +442,91 @@ export const doStep = async (opts: {
} }
/** /**
* Returns a direct link to a Tenderly simulation. * Mini helper for executing a given phase.
*
* @param opts Options for executing the step.
* @param opts.isLiveDeployer True if the deployer is live.
* @param opts.SystemDictator SystemDictator contract.
* @param opts.step Step to execute.
* @param opts.message Message to print before executing the step.
* @param opts.checks Checks to perform after executing the step.
*/
export const doPhase = async (opts: {
isLiveDeployer?: boolean
SystemDictator: ethers.Contract
phase: number
message: string
checks: () => Promise<void>
}): Promise<void> => {
const isStart = await isStartOfPhase(opts.SystemDictator, opts.phase)
if (!isStart) {
console.log(`Start of phase ${opts.phase} already completed`)
return
}
// Extra message to help the user understand what's going on.
console.log(opts.message)
// Either automatically or manually execute the step.
if (opts.isLiveDeployer) {
console.log(`Executing phase ${opts.phase}...`)
await opts.SystemDictator[`phase${opts.phase}`]()
} else {
const tx = await opts.SystemDictator.populateTransaction[
`phase${opts.phase}`
]()
console.log(`Please execute phase ${opts.phase}...`)
console.log(`MSD address: ${opts.SystemDictator.address}`)
printJsonTransaction(tx)
await printTenderlySimulationLink(opts.SystemDictator.provider, tx)
}
// Wait for the step to complete.
await awaitCondition(
async () => {
return isStartOfPhase(opts.SystemDictator, opts.phase + 1)
},
30000,
1000
)
// Perform post-step checks.
await opts.checks()
}
/**
* Prints a direct link to a Tenderly simulation.
* *
* @param provider Ethers Provider. * @param provider Ethers Provider.
* @param tx Ethers transaction object. * @param tx Ethers transaction object.
* @returns the url of the tenderly simulation.
*/ */
export const getTenderlySimulationLink = async ( export const printTenderlySimulationLink = async (
provider: ethers.providers.Provider, provider: ethers.providers.Provider,
tx: ethers.PopulatedTransaction tx: ethers.PopulatedTransaction
): Promise<string> => { ): Promise<void> => {
if (process.env.TENDERLY_PROJECT && process.env.TENDERLY_USERNAME) { if (process.env.TENDERLY_PROJECT && process.env.TENDERLY_USERNAME) {
return `https://dashboard.tenderly.co/${process.env.TENDERLY_PROJECT}/${ console.log(
process.env.TENDERLY_USERNAME `https://dashboard.tenderly.co/${process.env.TENDERLY_PROJECT}/${
}/simulator/new?${new URLSearchParams({ process.env.TENDERLY_USERNAME
network: (await provider.getNetwork()).chainId.toString(), }/simulator/new?${new URLSearchParams({
contractAddress: tx.to, network: (await provider.getNetwork()).chainId.toString(),
rawFunctionInput: tx.data, contractAddress: tx.to,
from: tx.from, rawFunctionInput: tx.data,
}).toString()}` from: tx.from,
}).toString()}`
)
} }
} }
/** /**
* Returns a cast commmand for submitting a given transaction. * Prints a cast commmand for submitting a given transaction.
* *
* @param tx Ethers transaction object. * @param tx Ethers transaction object.
* @returns the cast command
*/ */
export const getCastCommand = (tx: ethers.PopulatedTransaction): string => { export const printCastCommand = (tx: ethers.PopulatedTransaction): void => {
if (process.env.CAST_COMMANDS) { if (process.env.CAST_COMMANDS) {
return `cast send ${tx.to} ${tx.data} --from ${tx.from} --value ${tx.value}` console.log(
`cast send ${tx.to} ${tx.data} --from ${tx.from} --value ${tx.value}`
)
} }
} }
...@@ -6,6 +6,8 @@ const config: DeployConfig = { ...@@ -6,6 +6,8 @@ const config: DeployConfig = {
optimistName: '', optimistName: '',
optimistSymbol: '', optimistSymbol: '',
attestorAddress: '', attestorAddress: '',
optimistInviterName: '',
optimistInviterInviteGranter: '',
} }
export default config export default config
...@@ -6,6 +6,8 @@ const config: DeployConfig = { ...@@ -6,6 +6,8 @@ const config: DeployConfig = {
optimistName: 'Optimist', optimistName: 'Optimist',
optimistSymbol: 'OPTIMIST', optimistSymbol: 'OPTIMIST',
attestorAddress: '0x60c5C9c98bcBd0b0F2fD89B24c16e533BaA8CdA3', attestorAddress: '0x60c5C9c98bcBd0b0F2fD89B24c16e533BaA8CdA3',
optimistInviterInviteGranter: '0x60c5C9c98bcBd0b0F2fD89B24c16e533BaA8CdA3',
optimistInviterName: 'OptimistInviter',
} }
export default config export default config
...@@ -6,6 +6,8 @@ const config: DeployConfig = { ...@@ -6,6 +6,8 @@ const config: DeployConfig = {
optimistName: 'OP Citizenship', optimistName: 'OP Citizenship',
optimistSymbol: 'OPNFT', optimistSymbol: 'OPNFT',
attestorAddress: '0x70997970c51812dc3a010c7d01b50e0d17dc79c8', attestorAddress: '0x70997970c51812dc3a010c7d01b50e0d17dc79c8',
optimistInviterInviteGranter: '0x70997970c51812dc3a010c7d01b50e0d17dc79c8',
optimistInviterName: 'OptimistInviter',
} }
export default config export default config
...@@ -6,6 +6,8 @@ const config: DeployConfig = { ...@@ -6,6 +6,8 @@ const config: DeployConfig = {
optimistName: 'Optimist', optimistName: 'Optimist',
optimistSymbol: 'OPTIMIST', optimistSymbol: 'OPTIMIST',
attestorAddress: '0x60c5C9c98bcBd0b0F2fD89B24c16e533BaA8CdA3', attestorAddress: '0x60c5C9c98bcBd0b0F2fD89B24c16e533BaA8CdA3',
optimistInviterInviteGranter: '0x60c5C9c98bcBd0b0F2fD89B24c16e533BaA8CdA3',
optimistInviterName: 'OptimistInviter',
} }
export default config export default config
...@@ -6,6 +6,8 @@ const config: DeployConfig = { ...@@ -6,6 +6,8 @@ const config: DeployConfig = {
optimistName: 'Optimist', optimistName: 'Optimist',
optimistSymbol: 'OPTIMIST', optimistSymbol: 'OPTIMIST',
attestorAddress: '0x8F0EBDaA1cF7106bE861753B0f9F5c0250fE0819', attestorAddress: '0x8F0EBDaA1cF7106bE861753B0f9F5c0250fE0819',
optimistInviterInviteGranter: '0x8F0EBDaA1cF7106bE861753B0f9F5c0250fE0819',
optimistInviterName: 'OptimistInviter',
} }
export default config export default config
...@@ -6,6 +6,8 @@ const config: DeployConfig = { ...@@ -6,6 +6,8 @@ const config: DeployConfig = {
optimistName: 'Optimist', optimistName: 'Optimist',
optimistSymbol: 'OPTIMIST', optimistSymbol: 'OPTIMIST',
attestorAddress: '0x60c5C9c98bcBd0b0F2fD89B24c16e533BaA8CdA3', attestorAddress: '0x60c5C9c98bcBd0b0F2fD89B24c16e533BaA8CdA3',
optimistInviterInviteGranter: '0x60c5C9c98bcBd0b0F2fD89B24c16e533BaA8CdA3',
optimistInviterName: 'OptimistInviter',
} }
export default config export default config
// SPDX-License-Identifier: MIT
pragma solidity ^0.8.0;
import { Multicall3 } from "multicall/src/Multicall3.sol";
/**
* Just exists so we can compile this contract.
*/
contract MulticallContractCompiler {
}
...@@ -6,40 +6,54 @@ import { ...@@ -6,40 +6,54 @@ import {
ERC721BurnableUpgradeable ERC721BurnableUpgradeable
} from "@openzeppelin/contracts-upgradeable/token/ERC721/extensions/ERC721BurnableUpgradeable.sol"; } from "@openzeppelin/contracts-upgradeable/token/ERC721/extensions/ERC721BurnableUpgradeable.sol";
import { AttestationStation } from "./AttestationStation.sol"; import { AttestationStation } from "./AttestationStation.sol";
import { OptimistAllowlist } from "./OptimistAllowlist.sol";
import { Strings } from "@openzeppelin/contracts/utils/Strings.sol"; import { Strings } from "@openzeppelin/contracts/utils/Strings.sol";
/** /**
* @author Optimism Collective * @author Optimism Collective
* @author Gitcoin * @author Gitcoin
* @title Optimist * @title Optimist
* @notice A Soul Bound Token for real humans only(tm). * @notice A Soul Bound Token for real humans only(tm).
*/ */
contract Optimist is ERC721BurnableUpgradeable, Semver { contract Optimist is ERC721BurnableUpgradeable, Semver {
/**
* @notice Attestation key used by the attestor to attest the baseURI.
*/
bytes32 public constant BASE_URI_ATTESTATION_KEY = bytes32("optimist.base-uri");
/**
* @notice Attestor who attests to baseURI.
*/
address public immutable BASE_URI_ATTESTOR;
/** /**
* @notice Address of the AttestationStation contract. * @notice Address of the AttestationStation contract.
*/ */
AttestationStation public immutable ATTESTATION_STATION; AttestationStation public immutable ATTESTATION_STATION;
/** /**
* @notice Attestor who attests to baseURI and allowlist. * @notice Address of the OptimistAllowlist contract.
*/ */
address public immutable ATTESTOR; OptimistAllowlist public immutable OPTIMIST_ALLOWLIST;
/** /**
* @custom:semver 1.0.0 * @custom:semver 2.0.0
* @param _name Token name. * @param _name Token name.
* @param _symbol Token symbol. * @param _symbol Token symbol.
* @param _attestor Address of the attestor. * @param _baseURIAttestor Address of the baseURI attestor.
* @param _attestationStation Address of the AttestationStation contract. * @param _attestationStation Address of the AttestationStation contract.
* @param _optimistAllowlist Address of the OptimistAllowlist contract
*/ */
constructor( constructor(
string memory _name, string memory _name,
string memory _symbol, string memory _symbol,
address _attestor, address _baseURIAttestor,
AttestationStation _attestationStation AttestationStation _attestationStation,
) Semver(1, 0, 0) { OptimistAllowlist _optimistAllowlist
ATTESTOR = _attestor; ) Semver(2, 0, 0) {
BASE_URI_ATTESTOR = _baseURIAttestor;
ATTESTATION_STATION = _attestationStation; ATTESTATION_STATION = _attestationStation;
OPTIMIST_ALLOWLIST = _optimistAllowlist;
initialize(_name, _symbol); initialize(_name, _symbol);
} }
...@@ -76,7 +90,7 @@ contract Optimist is ERC721BurnableUpgradeable, Semver { ...@@ -76,7 +90,7 @@ contract Optimist is ERC721BurnableUpgradeable, Semver {
string( string(
abi.encodePacked( abi.encodePacked(
ATTESTATION_STATION.attestations( ATTESTATION_STATION.attestations(
ATTESTOR, BASE_URI_ATTESTOR,
address(this), address(this),
bytes32("optimist.base-uri") bytes32("optimist.base-uri")
) )
...@@ -105,17 +119,15 @@ contract Optimist is ERC721BurnableUpgradeable, Semver { ...@@ -105,17 +119,15 @@ contract Optimist is ERC721BurnableUpgradeable, Semver {
} }
/** /**
* @notice Checks whether a given address is allowed to mint the Optimist NFT yet. Since the * @notice Checks OptimistAllowlist to determine whether a given address is allowed to mint
* Optimist NFT will also be used as part of the Citizens House, mints are currently * the Optimist NFT. Since the Optimist NFT will also be used as part of the
* restricted. Eventually anyone will be able to mint. * Citizens House, mints are currently restricted. Eventually anyone will be able
* to mint.
* *
* @return Whether or not the address is allowed to mint yet. * @return Whether or not the address is allowed to mint yet.
*/ */
function isOnAllowList(address _recipient) public view returns (bool) { function isOnAllowList(address _recipient) public view returns (bool) {
return return OPTIMIST_ALLOWLIST.isAllowedToMint(_recipient);
ATTESTATION_STATION
.attestations(ATTESTOR, _recipient, bytes32("optimist.can-mint"))
.length > 0;
} }
/** /**
......
// SPDX-License-Identifier: MIT
pragma solidity 0.8.15;
import { Semver } from "@eth-optimism/contracts-bedrock/contracts/universal/Semver.sol";
import { AttestationStation } from "./AttestationStation.sol";
import { OptimistConstants } from "./libraries/OptimistConstants.sol";
/**
* @title OptimistAllowlist
* @notice Source of truth for whether an address is able to mint an Optimist NFT.
isAllowedToMint function checks various signals to return boolean value for whether an
address is eligible or not.
*/
contract OptimistAllowlist is Semver {
/**
* @notice Attestation key used by the AllowlistAttestor to manually add addresses to the
* allowlist.
*/
bytes32 public constant OPTIMIST_CAN_MINT_ATTESTATION_KEY = bytes32("optimist.can-mint");
/**
* @notice Attestation key used by Coinbase to issue attestations for Quest participants.
*/
bytes32 public constant COINBASE_QUEST_ELIGIBLE_ATTESTATION_KEY =
bytes32("coinbase.quest-eligible");
/**
* @notice Address of the AttestationStation contract.
*/
AttestationStation public immutable ATTESTATION_STATION;
/**
* @notice Attestor that issues 'optimist.can-mint' attestations.
*/
address public immutable ALLOWLIST_ATTESTOR;
/**
* @notice Attestor that issues 'coinbase.quest-eligible' attestations.
*/
address public immutable COINBASE_QUEST_ATTESTOR;
/**
* @notice Address of OptimistInviter contract that issues 'optimist.can-mint-from-invite'
* attestations.
*/
address public immutable OPTIMIST_INVITER;
/**
* @custom:semver 1.0.0
*
* @param _attestationStation Address of the AttestationStation contract.
* @param _allowlistAttestor Address of the allowlist attestor.
* @param _coinbaseQuestAttestor Address of the Coinbase Quest attestor.
* @param _optimistInviter Address of the OptimistInviter contract.
*/
constructor(
AttestationStation _attestationStation,
address _allowlistAttestor,
address _coinbaseQuestAttestor,
address _optimistInviter
) Semver(1, 0, 0) {
ATTESTATION_STATION = _attestationStation;
ALLOWLIST_ATTESTOR = _allowlistAttestor;
COINBASE_QUEST_ATTESTOR = _coinbaseQuestAttestor;
OPTIMIST_INVITER = _optimistInviter;
}
/**
* @notice Checks whether a given address is allowed to mint the Optimist NFT yet. Since the
* Optimist NFT will also be used as part of the Citizens House, mints are currently
* restricted. Eventually anyone will be able to mint.
*
* Currently, address is allowed to mint if it satisfies any of the following:
* 1) Has a valid 'optimist.can-mint' attestation from the allowlist attestor.
* 2) Has a valid 'coinbase.quest-eligible' attestation from Coinbase Quest attestor
* 3) Has a valid 'optimist.can-mint-from-invite' attestation from the OptimistInviter
* contract.
*
* @param _claimer Address to check.
*
* @return Whether or not the address is allowed to mint yet.
*/
function isAllowedToMint(address _claimer) public view returns (bool) {
return
_hasAttestationFromAllowlistAttestor(_claimer) ||
_hasAttestationFromCoinbaseQuestAttestor(_claimer) ||
_hasAttestationFromOptimistInviter(_claimer);
}
/**
* @notice Checks whether an address has a valid 'optimist.can-mint' attestation from the
* allowlist attestor.
*
* @param _claimer Address to check.
*
* @return Whether or not the address has a valid attestation.
*/
function _hasAttestationFromAllowlistAttestor(address _claimer) internal view returns (bool) {
// Expected attestation value is bytes32("true")
return
_hasValidAttestation(ALLOWLIST_ATTESTOR, _claimer, OPTIMIST_CAN_MINT_ATTESTATION_KEY);
}
/**
* @notice Checks whether an address has a valid attestation from the Coinbase attestor.
*
* @param _claimer Address to check.
*
* @return Whether or not the address has a valid attestation.
*/
function _hasAttestationFromCoinbaseQuestAttestor(address _claimer)
internal
view
returns (bool)
{
// Expected attestation value is bytes32("true")
return
_hasValidAttestation(
COINBASE_QUEST_ATTESTOR,
_claimer,
COINBASE_QUEST_ELIGIBLE_ATTESTATION_KEY
);
}
/**
* @notice Checks whether an address has a valid attestation from the OptimistInviter contract.
*
* @param _claimer Address to check.
*
* @return Whether or not the address has a valid attestation.
*/
function _hasAttestationFromOptimistInviter(address _claimer) internal view returns (bool) {
// Expected attestation value is the inviter's address
return
_hasValidAttestation(
OPTIMIST_INVITER,
_claimer,
OptimistConstants.OPTIMIST_CAN_MINT_FROM_INVITE_ATTESTATION_KEY
);
}
/**
* @notice Checks whether an address has a valid truthy attestation.
* Any attestation val other than bytes32("") is considered truthy.
*
* @param _creator Address that made the attestation.
* @param _about Address attestation is about.
* @param _key Key of the attestation.
*
* @return Whether or not the address has a valid truthy attestation.
*/
function _hasValidAttestation(
address _creator,
address _about,
bytes32 _key
) internal view returns (bool) {
return ATTESTATION_STATION.attestations(_creator, _about, _key).length > 0;
}
}
...@@ -61,9 +61,9 @@ const deployFn: DeployFunction = async (hre: HardhatRuntimeEnvironment) => { ...@@ -61,9 +61,9 @@ const deployFn: DeployFunction = async (hre: HardhatRuntimeEnvironment) => {
addr addr
) )
const implementation = await Proxy.callStatic.implementation({ const implementation = await Proxy.connect(
from: ethers.constants.AddressZero, ethers.constants.AddressZero
}) ).callStatic.implementation()
console.log(`implementation is set to ${implementation}`) console.log(`implementation is set to ${implementation}`)
if ( if (
getAddress(implementation) !== getAddress(implementation) !==
...@@ -82,9 +82,9 @@ const deployFn: DeployFunction = async (hre: HardhatRuntimeEnvironment) => { ...@@ -82,9 +82,9 @@ const deployFn: DeployFunction = async (hre: HardhatRuntimeEnvironment) => {
} }
const l2ProxyOwnerAddress = deployConfig.l2ProxyOwnerAddress const l2ProxyOwnerAddress = deployConfig.l2ProxyOwnerAddress
const admin = await Proxy.callStatic.admin({ const admin = await Proxy.connect(
from: ethers.constants.AddressZero, ethers.constants.AddressZero
}) ).callStatic.admin()
console.log(`admin is set to ${admin}`) console.log(`admin is set to ${admin}`)
if (getAddress(admin) !== getAddress(l2ProxyOwnerAddress)) { if (getAddress(admin) !== getAddress(l2ProxyOwnerAddress)) {
console.log('admin not set correctly') console.log('admin not set correctly')
...@@ -99,7 +99,7 @@ const deployFn: DeployFunction = async (hre: HardhatRuntimeEnvironment) => { ...@@ -99,7 +99,7 @@ const deployFn: DeployFunction = async (hre: HardhatRuntimeEnvironment) => {
console.log('Contract deployment complete') console.log('Contract deployment complete')
await assertContractVariable(Proxy, 'admin', l2ProxyOwnerAddress) await assertContractVariable(Proxy, 'admin', l2ProxyOwnerAddress)
await assertContractVariable(AttestationStation, 'version', '1.0.0') await assertContractVariable(AttestationStation, 'version', '1.1.0')
} }
deployFn.tags = ['AttestationStationProxy', 'OptimistEnvironment'] deployFn.tags = ['AttestationStationProxy', 'OptimistEnvironment']
......
/* Imports: External */
import { DeployFunction } from 'hardhat-deploy/dist/types'
import { HardhatRuntimeEnvironment } from 'hardhat/types'
import '@nomiclabs/hardhat-ethers'
import '@eth-optimism/hardhat-deploy-config'
import 'hardhat-deploy'
import type { DeployConfig } from '../../src'
const deployFn: DeployFunction = async (hre: HardhatRuntimeEnvironment) => {
const deployConfig = hre.deployConfig as DeployConfig
const { deployer } = await hre.getNamedAccounts()
console.log(`Deploying OptimistInviter implementation with ${deployer}`)
const Deployment__AttestationStation = await hre.deployments.get(
'AttestationStationProxy'
)
const attestationStationAddress = Deployment__AttestationStation.address
console.log(`Using ${attestationStationAddress} as the ATTESTATION_STATION`)
console.log(
`Using ${deployConfig.optimistInviterInviteGranter} as INVITE_GRANTER`
)
const { deploy } = await hre.deployments.deterministic('OptimistInviter', {
salt: hre.ethers.utils.solidityKeccak256(['string'], ['OptimistInviter']),
from: deployer,
args: [
deployConfig.optimistInviterInviteGranter,
attestationStationAddress,
],
log: true,
})
await deploy()
}
deployFn.tags = ['OptimistInviter', 'OptimistEnvironment']
deployFn.dependencies = ['AttestationStationProxy']
export default deployFn
/* Imports: External */
import assert from 'assert'
import { DeployFunction } from 'hardhat-deploy/dist/types'
import { HardhatRuntimeEnvironment } from 'hardhat/types'
import '@eth-optimism/hardhat-deploy-config'
import '@nomiclabs/hardhat-ethers'
import 'hardhat-deploy'
import { assertContractVariable } from '@eth-optimism/contracts-bedrock/src/deploy-utils'
import { ethers, utils } from 'ethers'
import type { DeployConfig } from '../../src'
const { getAddress } = utils
// Required conditions before deploying - Specified in `deployFn.dependencies`
// - AttestationStationProxy is deployed and points to the correct implementation
// - OptimistInviterImpl is deployed
//
// Steps
// 1. Deploy OptimistInviterProxy
// 2. Point the newly deployed proxy to the implementation, if it hasn't been done already
// 3. Update the admin of the proxy to the l2ProxyOwnerAddress, if it hasn't been done already
// 4. Basic sanity checks for contract variables
const deployFn: DeployFunction = async (hre: HardhatRuntimeEnvironment) => {
const deployConfig = hre.deployConfig as DeployConfig
// Deployer should be set in hardhat.config.ts
const { deployer } = await hre.getNamedAccounts()
// We want the ability to deploy to a deterministic address, so we need the init bytecode to be
// consistent across deployments. The ddd will quickly transfer the ownership of the Proxy to a
// multisig after deployment.
//
// We need a consistent ddd, since the Proxy takes a `_admin` constructor argument, which
// affects the init bytecode and hence deployed address.
const ddd = deployConfig.ddd
if (getAddress(deployer) !== getAddress(ddd)) {
// Not a hard requirement. We can deploy with any account and just set the `_admin` to the
// ddd, but requiring that the deployer is the same as the ddd minimizes number of hot wallets
// we need to keep track of during deployment.
throw new Error('Must deploy with the ddd')
}
// Get the up to date deployment of the OptimistInviter contract
const Deployment__OptimistInviterImpl = await hre.deployments.get(
'OptimistInviter'
)
console.log(`Deploying OptimistInviterProxy with ${deployer}`)
// Deploys the Proxy.sol contract with the `_admin` constructor param set to the ddd (=== deployer).
const { deploy } = await hre.deployments.deterministic(
'OptimistInviterProxy',
{
salt: hre.ethers.utils.solidityKeccak256(
['string'],
['OptimistInviterProxy']
),
contract: 'Proxy',
from: deployer,
args: [deployer],
log: true,
}
)
// Deploy the Proxy contract
await deploy()
const Deployment__OptimistInviterProxy = await hre.deployments.get(
'OptimistInviterProxy'
)
console.log(
`OptimistProxy deployed to ${Deployment__OptimistInviterProxy.address}`
)
// Deployed Proxy.sol contract
const Proxy = await hre.ethers.getContractAt(
'Proxy',
Deployment__OptimistInviterProxy.address
)
// Deployed Proxy.sol contract with the OptimistInviter interface
const OptimistInviter = await hre.ethers.getContractAt(
'OptimistInviter',
Deployment__OptimistInviterProxy.address
)
// Gets the current implementation address the proxy is pointing to.
// callStatic is used since the `Proxy.implementation()` is not a view function and ethers will
// try to make a transaction if we don't use callStatic. Using the zero address as `from` lets us
// call functions on the proxy and not trigger the delegatecall. See Proxy.sol proxyCallIfNotAdmin
// modifier for more details.
const implementation = await Proxy.connect(
ethers.constants.AddressZero
).callStatic.implementation()
console.log(`implementation set to ${implementation}`)
if (
getAddress(implementation) !==
getAddress(Deployment__OptimistInviterImpl.address)
) {
// If the proxy isn't pointing to the correct implementation, we need to set it to the correct
// one, then call initialize() in the proxy's context.
console.log(
'implementation not set to OptimistInviter implementation contract'
)
console.log(
`Setting implementation to ${Deployment__OptimistInviterImpl.address}`
)
const name = deployConfig.optimistInviterName
// Create the calldata for the call to `initialize()`
const calldata = OptimistInviter.interface.encodeFunctionData(
'initialize',
[name]
)
// ethers.Signer for the ddd
const dddSigner = await hre.ethers.provider.getSigner(deployer)
// Point the proxy to the deployed OptimistInviter implementation contract,
// and call `initialize()` in the proxy's context
const tx = await Proxy.connect(dddSigner).upgradeToAndCall(
Deployment__OptimistInviterImpl.address,
calldata
)
const receipt = await tx.wait()
console.log(`implementation set in ${receipt.transactionHash}`)
} else {
console.log(
'implementation already set to OptimistInviter implementation contract'
)
}
const l2ProxyOwnerAddress = deployConfig.l2ProxyOwnerAddress
// Get the current proxy admin address
const admin = await Proxy.connect(
ethers.constants.AddressZero
).callStatic.admin()
console.log(`admin currently set to ${admin}`)
if (getAddress(admin) !== getAddress(l2ProxyOwnerAddress)) {
// If the proxy admin isn't the l2ProxyOwnerAddress, we need to update it
// We're assuming that the proxy admin is the ddd right now.
console.log('admin is not set to the l2ProxyOwnerAddress')
console.log(`Setting admin to ${l2ProxyOwnerAddress}`)
// ethers.Signer for the ddd
const dddSigner = await hre.ethers.provider.getSigner(deployer)
// change admin to the l2ProxyOwnerAddress
const tx = await Proxy.connect(dddSigner).changeAdmin(l2ProxyOwnerAddress)
const receipt = await tx.wait()
console.log(`admin set in ${receipt.transactionHash}`)
} else {
console.log('admin already set to proxy owner address')
}
const Deployment__AttestationStation = await hre.deployments.get(
'AttestationStationProxy'
)
await assert(
getAddress(
await Proxy.connect(ethers.constants.AddressZero).callStatic.admin()
) === getAddress(l2ProxyOwnerAddress)
)
await assertContractVariable(OptimistInviter, 'version', '1.0.0')
await assertContractVariable(
OptimistInviter,
'INVITE_GRANTER',
deployConfig.optimistInviterInviteGranter
)
await assertContractVariable(
OptimistInviter,
'ATTESTATION_STATION',
Deployment__AttestationStation.address
)
await assertContractVariable(OptimistInviter, 'EIP712_VERSION', '1.0.0')
}
deployFn.tags = ['OptimistInviterProxy', 'OptimistEnvironment']
deployFn.dependencies = ['AttestationStationProxy', 'OptimistInviter']
export default deployFn
...@@ -48,9 +48,9 @@ const deployFn: DeployFunction = async (hre: HardhatRuntimeEnvironment) => { ...@@ -48,9 +48,9 @@ const deployFn: DeployFunction = async (hre: HardhatRuntimeEnvironment) => {
Deployment__OptimistProxy.address Deployment__OptimistProxy.address
) )
const implementation = await Proxy.callStatic.implementation({ const implementation = await Proxy.connect(
from: ethers.constants.AddressZero, ethers.constants.AddressZero
}) ).callStatic.implementation()
console.log(`implementation set to ${implementation}`) console.log(`implementation set to ${implementation}`)
if (getAddress(implementation) !== getAddress(Deployment__Optimist.address)) { if (getAddress(implementation) !== getAddress(Deployment__Optimist.address)) {
console.log('implementation not set to Optimist contract') console.log('implementation not set to Optimist contract')
...@@ -75,9 +75,9 @@ const deployFn: DeployFunction = async (hre: HardhatRuntimeEnvironment) => { ...@@ -75,9 +75,9 @@ const deployFn: DeployFunction = async (hre: HardhatRuntimeEnvironment) => {
} }
const l2ProxyOwnerAddress = deployConfig.l2ProxyOwnerAddress const l2ProxyOwnerAddress = deployConfig.l2ProxyOwnerAddress
const admin = await Proxy.callStatic.admin({ const admin = await Proxy.connect(
from: ethers.constants.AddressZero, ethers.constants.AddressZero
}) ).callStatic.admin()
console.log(`admin set to ${admin}`) console.log(`admin set to ${admin}`)
if (getAddress(admin) !== getAddress(l2ProxyOwnerAddress)) { if (getAddress(admin) !== getAddress(l2ProxyOwnerAddress)) {
console.log('detected admin is not set') console.log('detected admin is not set')
...@@ -96,7 +96,7 @@ const deployFn: DeployFunction = async (hre: HardhatRuntimeEnvironment) => { ...@@ -96,7 +96,7 @@ const deployFn: DeployFunction = async (hre: HardhatRuntimeEnvironment) => {
await assertContractVariable(Proxy, 'admin', l2ProxyOwnerAddress) await assertContractVariable(Proxy, 'admin', l2ProxyOwnerAddress)
await assertContractVariable(Optimist, 'name', deployConfig.optimistName) await assertContractVariable(Optimist, 'name', deployConfig.optimistName)
await assertContractVariable(Optimist, 'verson', '1.0.0') await assertContractVariable(Optimist, 'version', '1.0.0')
await assertContractVariable(Optimist, 'symbol', deployConfig.optimistSymbol) await assertContractVariable(Optimist, 'symbol', deployConfig.optimistSymbol)
await assertContractVariable( await assertContractVariable(
Optimist, Optimist,
......
...@@ -16,9 +16,13 @@ remappings = [ ...@@ -16,9 +16,13 @@ remappings = [
'@rari-capital/solmate/=node_modules/@rari-capital/solmate', '@rari-capital/solmate/=node_modules/@rari-capital/solmate',
'forge-std/=node_modules/forge-std/src', 'forge-std/=node_modules/forge-std/src',
'ds-test/=node_modules/ds-test/src', 'ds-test/=node_modules/ds-test/src',
'multicall/=lib/multicall',
'@openzeppelin/contracts/=node_modules/@openzeppelin/contracts/', '@openzeppelin/contracts/=node_modules/@openzeppelin/contracts/',
'@openzeppelin/contracts-upgradeable/=node_modules/@openzeppelin/contracts-upgradeable/', '@openzeppelin/contracts-upgradeable/=node_modules/@openzeppelin/contracts-upgradeable/',
'@eth-optimism/contracts-bedrock/=../../node_modules/@eth-optimism/contracts-bedrock', '@eth-optimism/contracts-bedrock/=../../node_modules/@eth-optimism/contracts-bedrock',
] ]
# The metadata hash can be removed from the bytecode by setting "none" # The metadata hash can be removed from the bytecode by setting "none"
bytecode_hash = "none" bytecode_hash = "none"
libs = ["node_modules", "lib"]
# Required to use `deployCode` to deploy the multicall contract which has incompatible version
fs_permissions = [{ access = "read", path = "./forge-artifacts/Multicall3.sol/Multicall3.json"}]
Subproject commit a1fa0644fa412cd3237ef7081458ecb2ffad7dbe
...@@ -29,10 +29,20 @@ export interface DeployConfig { ...@@ -29,10 +29,20 @@ export interface DeployConfig {
optimistSymbol: string optimistSymbol: string
/** /**
* Address of the priviledged attestor for the Optimist contract. * Address of the privileged attestor for the Optimist contract.
*/ */
attestorAddress: string attestorAddress: string
/**
* Address of the privileged account for the OptimistInviter contract that can grant invites.
*/
optimistInviterInviteGranter: string
/**
* Name of OptimistInviter contract, used for the EIP712 domain separator.
*/
optimistInviterName: string
/** /**
* Address of the owner of the proxies on L2. There will be a ProxyAdmin deployed as a predeploy * Address of the owner of the proxies on L2. There will be a ProxyAdmin deployed as a predeploy
* after bedrock, so the owner of proxies should be updated to that after the upgrade. * after bedrock, so the owner of proxies should be updated to that after the upgrade.
...@@ -63,6 +73,12 @@ export const configSpec: DeployConfigSpec<DeployConfig> = { ...@@ -63,6 +73,12 @@ export const configSpec: DeployConfigSpec<DeployConfig> = {
attestorAddress: { attestorAddress: {
type: 'address', type: 'address',
}, },
optimistInviterInviteGranter: {
type: 'address',
},
optimistInviterName: {
type: 'string',
},
l2ProxyOwnerAddress: { l2ProxyOwnerAddress: {
type: 'address', type: 'address',
}, },
......
...@@ -277,6 +277,12 @@ export class L1IngestionService extends BaseService<L1IngestionServiceOptions> { ...@@ -277,6 +277,12 @@ export class L1IngestionService extends BaseService<L1IngestionServiceOptions> {
depositTargetL1Block, depositTargetL1Block,
handleEventsTransactionEnqueued handleEventsTransactionEnqueued
) )
} else {
this.logger.info('Deposit shutoff reached', {
depositTargetL1Block,
highestSyncedL1Block,
depositShutoffBlock,
})
} }
await this._syncEvents( await this._syncEvents(
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment