Commit dee6046b authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge branch 'develop' into p2p-alt-sync

parents 8da91c8a 025e157a
---
'@eth-optimism/hardhat-deploy-config': patch
---
Add getter for other network's deploy config
......@@ -330,29 +330,70 @@ jobs:
patterns: contracts-bedrock,hardhat-deploy-config
- run:
name: lint
command: yarn lint:check
working_directory: packages/contracts-bedrock
- run:
name: slither
command: |
slither --version
yarn slither || exit 0
yarn lint:check || echo "export LINT_STATUS=1" >> "$BASH_ENV"
working_directory: packages/contracts-bedrock
- run:
name: gas snapshot
command: |
forge --version
yarn gas-snapshot --check
yarn gas-snapshot --check || echo "export GAS_SNAPSHOT_STATUS=1" >> "$BASH_ENV"
environment:
FOUNDRY_PROFILE: ci
working_directory: packages/contracts-bedrock
- run:
name: storage snapshot
command: yarn storage-snapshot && git diff --exit-code .storage-layout
command: |
yarn storage-snapshot
git diff --exit-code .storage-layout || echo "export STORAGE_SNAPSHOT_STATUS=1" >> "$BASH_ENV"
working_directory: packages/contracts-bedrock
- run:
name: invariant docs
command: yarn autogen:invariant-docs && git diff --exit-code ./invariant-docs/*.md
command: |
yarn autogen:invariant-docs
git diff --exit-code ./invariant-docs/*.md || echo "export INVARIANT_DOCS_STATUS=1" >> "$BASH_ENV"
working_directory: packages/contracts-bedrock
- run:
name: check statuses
command: |
if [[ "$LINT_STATUS" -ne 0 ]]; then
FAILED=1
echo "Linting failed, see job output for details."
fi
if [[ "$GAS_SNAPSHOT_STATUS" -ne 0 ]]; then
FAILED=1
echo "Gas snapshot failed, see job output for details."
fi
if [[ "$STORAGE_SNAPSHOT_STATUS" -ne 0 ]]; then
echo "Storage snapshot failed, see job output for details."
FAILED=1
fi
if [[ "$INVARIANT_DOCS_STATUS" -ne 0 ]]; then
echo "Invariant docs failed, see job output for details."
FAILED=1
fi
if [[ "$FAILED" -ne 0 ]]; then
exit 1
fi
contracts-bedrock-slither:
docker:
- image: ethereumoptimism/ci-builder:latest
resource_class: xlarge
steps:
- checkout
- attach_workspace: { at: "." }
- restore_cache:
name: Restore Yarn Package Cache
keys:
- yarn-packages-v2-{{ checksum "yarn.lock" }}
- check-changed:
patterns: contracts-bedrock,hardhat-deploy-config
- run:
name: slither
command: |
slither --version
yarn slither
working_directory: packages/contracts-bedrock
contracts-bedrock-validate-spaces:
......@@ -968,6 +1009,9 @@ workflows:
- contracts-bedrock-checks:
requires:
- yarn-monorepo
- contracts-bedrock-slither:
requires:
- yarn-monorepo
- contracts-bedrock-validate-spaces:
requires:
- yarn-monorepo
......@@ -1096,6 +1140,9 @@ workflows:
- go-lint:
name: op-proposer-lint
module: op-proposer
- go-lint:
name: op-program-lint
module: op-program
- go-lint:
name: op-service-lint
module: op-service
......@@ -1118,6 +1165,9 @@ workflows:
- go-test:
name: op-proposer-tests
module: op-proposer
- go-test:
name: op-program-tests
module: op-program
- go-test:
name: op-service-tests
module: op-service
......@@ -1137,12 +1187,14 @@ workflows:
- op-e2e-lint
- op-node-lint
- op-proposer-lint
- op-program-lint
- op-service-lint
- op-batcher-tests
- op-bindings-tests
- op-chain-ops-tests
- op-node-tests
- op-proposer-tests
- op-program-tests
- op-service-tests
- op-e2e-WS-tests
- op-e2e-HTTP-tests
......
......@@ -29,6 +29,7 @@
/op-node @ethereum-optimism/go-reviewers
/op-node/rollup @protolambda @trianglesphere
/op-proposer @ethereum-optimism/go-reviewers
/op-program @ethereum-optimism/go-reviewers
/op-service @ethereum-optimism/go-reviewers
# Ops
......
......@@ -75,6 +75,7 @@ jobs:
uses: changesets/action@v1
id: changesets
with:
createGithubReleases: false
publish: yarn changeset publish --tag canary
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
......
......@@ -67,6 +67,7 @@ jobs:
uses: changesets/action@v1
id: changesets
with:
createGithubReleases: false
publish: yarn release
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
......
......@@ -40,6 +40,10 @@ op-proposer:
make -C ./op-proposer op-proposer
.PHONY: op-proposer
op-program:
make -C ./op-program op-program
.PHONY: op-program
mod-tidy:
# Below GOPRIVATE line allows mod-tidy to be run immediately after
# releasing new versions. This bypasses the Go modules proxy, which
......
......@@ -61,7 +61,7 @@ def main():
addresses = read_json(addresses_json_path)
else:
log.info('Deploying contracts.')
run_command(['yarn', 'hardhat', '--network', 'devnetL1', 'deploy'], env={
run_command(['yarn', 'hardhat', '--network', 'devnetL1', 'deploy', '--tags', 'l1'], env={
'CHAIN_ID': '900',
'L1_RPC': 'http://localhost:8545',
'PRIVATE_KEY_DEPLOYER': 'ac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80'
......
This diff is collapsed.
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -26,7 +26,8 @@ type Config struct {
RollupNode *sources.RollupClient
TxManager txmgr.TxManager
PollInterval time.Duration
NetworkTimeout time.Duration
PollInterval time.Duration
// RollupConfig is queried at startup
Rollup *rollup.Config
......
......@@ -69,19 +69,19 @@ func NewBatchSubmitterFromCLIConfig(cfg CLIConfig, l log.Logger, m metrics.Metri
return nil, fmt.Errorf("querying rollup config: %w", err)
}
txManagerConfig, err := txmgr.NewConfig(cfg.TxMgrConfig, l)
txManager, err := txmgr.NewSimpleTxManager("batcher", l, m, cfg.TxMgrConfig)
if err != nil {
return nil, err
}
txManager := txmgr.NewSimpleTxManager("batcher", l, txManagerConfig)
batcherCfg := Config{
L1Client: l1Client,
L2Client: l2Client,
RollupNode: rollupClient,
PollInterval: cfg.PollInterval,
TxManager: txManager,
Rollup: rcfg,
L1Client: l1Client,
L2Client: l2Client,
RollupNode: rollupClient,
PollInterval: cfg.PollInterval,
NetworkTimeout: cfg.TxMgrConfig.NetworkTimeout,
TxManager: txManager,
Rollup: rcfg,
Channel: ChannelConfig{
SeqWindowSize: rcfg.SeqWindowSize,
ChannelTimeout: rcfg.ChannelTimeout,
......@@ -223,7 +223,7 @@ func (l *BatchSubmitter) loadBlocksIntoState(ctx context.Context) {
// loadBlockIntoState fetches & stores a single block into `state`. It returns the block it loaded.
func (l *BatchSubmitter) loadBlockIntoState(ctx context.Context, blockNumber uint64) (*types.Block, error) {
ctx, cancel := context.WithTimeout(ctx, txManagerTimeout)
ctx, cancel := context.WithTimeout(ctx, l.NetworkTimeout)
defer cancel()
block, err := l.L2Client.BlockByNumber(ctx, new(big.Int).SetUint64(blockNumber))
if err != nil {
......@@ -241,9 +241,9 @@ func (l *BatchSubmitter) loadBlockIntoState(ctx context.Context, blockNumber uin
// calculateL2BlockRangeToStore determines the range (start,end] that should be loaded into the local state.
// It also takes care of initializing some local state (i.e. will modify l.lastStoredBlock in certain conditions)
func (l *BatchSubmitter) calculateL2BlockRangeToStore(ctx context.Context) (eth.BlockID, eth.BlockID, error) {
childCtx, cancel := context.WithTimeout(ctx, txManagerTimeout)
ctx, cancel := context.WithTimeout(ctx, l.NetworkTimeout)
defer cancel()
syncStatus, err := l.RollupNode.SyncStatus(childCtx)
syncStatus, err := l.RollupNode.SyncStatus(ctx)
// Ensure that we have the sync status
if err != nil {
return eth.BlockID{}, eth.BlockID{}, fmt.Errorf("failed to get sync status: %w", err)
......@@ -343,13 +343,6 @@ func (l *BatchSubmitter) publishStateToL1(ctx context.Context) {
}
}
const networkTimeout = 2 * time.Second // How long a single network request can take. TODO: put in a config somewhere
// fix(refcell):
// combined with above, these config variables should also be replicated in the op-proposer
// along with op-proposer changes to include the updated tx manager
const txManagerTimeout = 2 * time.Minute // How long the tx manager can take to send a transaction.
// sendTransaction creates & submits a transaction to the batch inbox address with the given `data`.
// It currently uses the underlying `txmgr` to handle transaction sending & price management.
// This is a blocking method. It should not be called concurrently.
......@@ -361,12 +354,9 @@ func (l *BatchSubmitter) sendTransaction(ctx context.Context, data []byte) (*typ
}
// Send the transaction through the txmgr
ctx, cancel := context.WithTimeout(ctx, txManagerTimeout)
defer cancel()
if receipt, err := l.txMgr.Send(ctx, txmgr.TxCandidate{
To: l.Rollup.BatchInboxAddress,
To: &l.Rollup.BatchInboxAddress,
TxData: data,
From: l.txMgr.From(),
GasLimit: intrinsicGas,
}); err != nil {
l.log.Warn("unable to publish tx", "err", err, "data_size", len(data))
......@@ -399,7 +389,7 @@ func (l *BatchSubmitter) recordConfirmedTx(id txID, receipt *types.Receipt) {
// l1Tip gets the current L1 tip as a L1BlockRef. The passed context is assumed
// to be a lifetime context, so it is internally wrapped with a network timeout.
func (l *BatchSubmitter) l1Tip(ctx context.Context) (eth.L1BlockRef, error) {
tctx, cancel := context.WithTimeout(ctx, networkTimeout)
tctx, cancel := context.WithTimeout(ctx, l.NetworkTimeout)
defer cancel()
head, err := l.L1Client.HeaderByNumber(tctx, nil)
if err != nil {
......
......@@ -11,6 +11,7 @@ import (
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics"
txmetrics "github.com/ethereum-optimism/optimism/op-service/txmgr/metrics"
)
const Namespace = "op_batcher"
......@@ -22,6 +23,9 @@ type Metricer interface {
// Records all L1 and L2 block events
opmetrics.RefMetricer
// Record Tx metrics
txmetrics.TxMetricer
RecordLatestL1Block(l1ref eth.L1BlockRef)
RecordL2BlocksLoaded(l2ref eth.L2BlockRef)
RecordChannelOpened(id derive.ChannelID, numPendingBlocks int)
......@@ -43,6 +47,7 @@ type Metrics struct {
factory opmetrics.Factory
opmetrics.RefMetrics
txmetrics.TxMetrics
Info prometheus.GaugeVec
Up prometheus.Gauge
......@@ -80,6 +85,7 @@ func NewMetrics(procName string) *Metrics {
factory: factory,
RefMetrics: opmetrics.MakeRefMetrics(ns, factory),
TxMetrics: txmetrics.MakeTxMetrics(ns, factory),
Info: *factory.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns,
......
......@@ -4,9 +4,13 @@ import (
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics"
txmetrics "github.com/ethereum-optimism/optimism/op-service/txmgr/metrics"
)
type noopMetrics struct{ opmetrics.NoopRefMetrics }
type noopMetrics struct {
opmetrics.NoopRefMetrics
txmetrics.NoopTxMetrics
}
var NoopMetrics Metricer = new(noopMetrics)
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
package main
import (
"context"
"errors"
"fmt"
"math/big"
"os"
"sync"
"time"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
legacy_bindings "github.com/ethereum-optimism/optimism/op-bindings/legacy-bindings"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum-optimism/optimism/op-chain-ops/util"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rpc"
"github.com/urfave/cli/v2"
)
func main() {
app := cli.NewApp()
app.Name = "rollover"
app.Usage = "Commands for assisting in the rollover of the system"
var flags []cli.Flag
flags = append(flags, util.ClientsFlags...)
flags = append(flags, util.AddressesFlags...)
app.Commands = []*cli.Command{
{
Name: "deposits",
Usage: "Ensures that all deposits have been ingested into L2",
Flags: flags,
Action: func(cliCtx *cli.Context) error {
clients, err := util.NewClients(cliCtx)
if err != nil {
return err
}
addresses, err := util.NewAddresses(cliCtx)
if err != nil {
return err
}
log.Info("Connecting to AddressManager", "address", addresses.AddressManager)
addressManager, err := bindings.NewAddressManager(addresses.AddressManager, clients.L1Client)
if err != nil {
return err
}
for {
shutoffBlock, err := addressManager.GetAddress(&bind.CallOpts{}, "DTL_SHUTOFF_BLOCK")
if err != nil {
return err
}
if num := shutoffBlock.Big(); num.Cmp(common.Big0) != 0 {
log.Info("DTL_SHUTOFF_BLOCK is set", "number", num.Uint64())
break
}
log.Info("DTL_SHUTOFF_BLOCK not set yet")
time.Sleep(3 * time.Second)
}
log.Info("Connecting to CanonicalTransactionChain", "address", addresses.CanonicalTransactionChain)
ctc, err := legacy_bindings.NewCanonicalTransactionChain(addresses.CanonicalTransactionChain, clients.L1Client)
if err != nil {
return err
}
queueLength, err := ctc.GetQueueLength(&bind.CallOpts{})
if err != nil {
return err
}
totalElements, err := ctc.GetTotalElements(&bind.CallOpts{})
if err != nil {
return err
}
totalBatches, err := ctc.GetTotalBatches(&bind.CallOpts{})
if err != nil {
return err
}
pending, err := ctc.GetNumPendingQueueElements(&bind.CallOpts{})
if err != nil {
return err
}
log.Info(
"CanonicalTransactionChain",
"address", addresses.CanonicalTransactionChain,
"queue-length", queueLength,
"total-elements", totalElements,
"total-batches", totalBatches,
"pending", pending,
)
blockNumber, err := clients.L2Client.BlockNumber(context.Background())
if err != nil {
return err
}
log.Info("Searching backwards for final deposit", "start", blockNumber)
for {
bn := new(big.Int).SetUint64(blockNumber)
log.Info("Checking L2 block", "number", bn)
block, err := clients.L2Client.BlockByNumber(context.Background(), bn)
if err != nil {
return err
}
if length := len(block.Transactions()); length != 1 {
return fmt.Errorf("unexpected number of transactions in block: %d", length)
}
tx := block.Transactions()[0]
hash := tx.Hash()
json, err := legacyTransactionByHash(clients.L2RpcClient, hash)
if err != nil {
return err
}
if json.QueueOrigin == "l1" {
if json.QueueIndex == nil {
// This should never happen
return errors.New("queue index is nil")
}
queueIndex := uint64(*json.QueueIndex)
if queueIndex == queueLength.Uint64()-1 {
log.Info("Found final deposit in l2geth", "queue-index", queueIndex)
break
}
if queueIndex < queueLength.Uint64() {
return errors.New("missed final deposit")
}
}
blockNumber--
}
finalPending, err := ctc.GetNumPendingQueueElements(&bind.CallOpts{})
if err != nil {
return err
}
log.Info("Remaining deposits that must be submitted", "count", finalPending)
return nil
},
},
{
Name: "batches",
Usage: "Ensures that all batches have been submitted to L1",
Flags: flags,
Action: func(cliCtx *cli.Context) error {
clients, err := util.NewClients(cliCtx)
if err != nil {
return err
}
addresses, err := util.NewAddresses(cliCtx)
if err != nil {
return err
}
log.Info("Connecting to CanonicalTransactionChain", "address", addresses.CanonicalTransactionChain)
ctc, err := legacy_bindings.NewCanonicalTransactionChain(addresses.CanonicalTransactionChain, clients.L1Client)
if err != nil {
return err
}
log.Info("Connecting to StateCommitmentChain", "address", addresses.StateCommitmentChain)
scc, err := legacy_bindings.NewStateCommitmentChain(addresses.StateCommitmentChain, clients.L1Client)
if err != nil {
return err
}
var wg sync.WaitGroup
log.Info("Waiting for CanonicalTransactionChain")
wg.Add(1)
go waitForTotalElements(&wg, ctc, clients.L2Client)
log.Info("Waiting for StateCommitmentChain")
wg.Add(1)
go waitForTotalElements(&wg, scc, clients.L2Client)
wg.Wait()
log.Info("All batches have been submitted")
return nil
},
},
}
if err := app.Run(os.Args); err != nil {
log.Crit("Application failed", "message", err)
}
}
// RollupContract represents a legacy rollup contract interface that
// exposes the GetTotalElements function. Both the StateCommitmentChain
// and the CanonicalTransactionChain implement this interface.
type RollupContract interface {
GetTotalElements(opts *bind.CallOpts) (*big.Int, error)
}
// waitForTotalElements will poll to see
func waitForTotalElements(wg *sync.WaitGroup, contract RollupContract, client *ethclient.Client) {
defer wg.Done()
for {
bn, err := client.BlockNumber(context.Background())
if err != nil {
log.Error("cannot fetch blocknumber", "error", err)
time.Sleep(3 * time.Second)
continue
}
totalElements, err := contract.GetTotalElements(&bind.CallOpts{})
if err != nil {
log.Error("cannot fetch total elements", "error", err)
time.Sleep(3 * time.Second)
continue
}
if totalElements.Uint64() == bn {
return
}
log.Info("Waiting for elements to be submitted", "count", totalElements.Uint64()-bn, "height", bn, "total-elements", totalElements.Uint64())
time.Sleep(3 * time.Second)
}
}
// legacyTransactionByHash will fetch a transaction by hash and be sure to decode
// the additional fields added to legacy transactions.
func legacyTransactionByHash(client *rpc.Client, hash common.Hash) (*RPCTransaction, error) {
var json *RPCTransaction
err := client.CallContext(context.Background(), &json, "eth_getTransactionByHash", hash)
if err != nil {
return nil, err
}
return json, nil
}
// RPCTransaction represents a transaction that will serialize to the RPC representation of a
// transaction. This handles the extra legacy fields added to transactions.
type RPCTransaction struct {
BlockHash *common.Hash `json:"blockHash"`
BlockNumber *hexutil.Big `json:"blockNumber"`
From common.Address `json:"from"`
Gas hexutil.Uint64 `json:"gas"`
GasPrice *hexutil.Big `json:"gasPrice"`
Hash common.Hash `json:"hash"`
Input hexutil.Bytes `json:"input"`
Nonce hexutil.Uint64 `json:"nonce"`
To *common.Address `json:"to"`
TransactionIndex *hexutil.Uint64 `json:"transactionIndex"`
Value *hexutil.Big `json:"value"`
V *hexutil.Big `json:"v"`
R *hexutil.Big `json:"r"`
S *hexutil.Big `json:"s"`
QueueOrigin string `json:"queueOrigin"`
L1TxOrigin *common.Address `json:"l1TxOrigin"`
L1BlockNumber *hexutil.Big `json:"l1BlockNumber"`
L1Timestamp hexutil.Uint64 `json:"l1Timestamp"`
Index *hexutil.Uint64 `json:"index"`
QueueIndex *hexutil.Uint64 `json:"queueIndex"`
RawTransaction hexutil.Bytes `json:"rawTransaction"`
}
......@@ -18,6 +18,7 @@ import (
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum-optimism/optimism/op-chain-ops/crossdomain"
"github.com/ethereum-optimism/optimism/op-chain-ops/genesis"
"github.com/ethereum-optimism/optimism/op-chain-ops/util"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
......@@ -25,9 +26,7 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth/tracers"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/ethclient/gethclient"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rpc"
)
// abiTrue represents the storage representation of the boolean
......@@ -115,7 +114,7 @@ func main() {
},
},
Action: func(ctx *cli.Context) error {
clients, err := newClients(ctx)
clients, err := util.NewClients(ctx)
if err != nil {
return err
}
......@@ -433,7 +432,7 @@ func main() {
}
// callTrace will call `debug_traceTransaction` on a remote node
func callTrace(c *clients, receipt *types.Receipt) (callFrame, error) {
func callTrace(c *util.Clients, receipt *types.Receipt) (callFrame, error) {
var finalizationTrace callFrame
tracer := "callTracer"
traceConfig := tracers.TraceConfig{
......@@ -558,7 +557,7 @@ func handleFinalizeERC20Withdrawal(args []any, receipt *types.Receipt, l1Standar
// proveWithdrawalTransaction will build the data required for proving a
// withdrawal and then send the transaction and make sure that it is included
// and successful and then wait for the finalization period to elapse.
func proveWithdrawalTransaction(c *contracts, cl *clients, opts *bind.TransactOpts, withdrawal *crossdomain.Withdrawal, bn, finalizationPeriod *big.Int) error {
func proveWithdrawalTransaction(c *contracts, cl *util.Clients, opts *bind.TransactOpts, withdrawal *crossdomain.Withdrawal, bn, finalizationPeriod *big.Int) error {
l2OutputIndex, outputRootProof, trieNodes, err := createOutput(withdrawal, c.L2OutputOracle, bn, cl)
if err != nil {
return err
......@@ -614,7 +613,7 @@ func proveWithdrawalTransaction(c *contracts, cl *clients, opts *bind.TransactOp
func finalizeWithdrawalTransaction(
c *contracts,
cl *clients,
cl *util.Clients,
opts *bind.TransactOpts,
wd *crossdomain.LegacyWithdrawal,
withdrawal *crossdomain.Withdrawal,
......@@ -699,67 +698,6 @@ func newContracts(ctx *cli.Context, l1Backend, l2Backend bind.ContractBackend) (
}, nil
}
// clients represents a set of initialized RPC clients
type clients struct {
L1Client *ethclient.Client
L2Client *ethclient.Client
L1RpcClient *rpc.Client
L2RpcClient *rpc.Client
L1GethClient *gethclient.Client
L2GethClient *gethclient.Client
}
// newClients will create new RPC clients
func newClients(ctx *cli.Context) (*clients, error) {
l1RpcURL := ctx.String("l1-rpc-url")
l1Client, err := ethclient.Dial(l1RpcURL)
if err != nil {
return nil, err
}
l1ChainID, err := l1Client.ChainID(context.Background())
if err != nil {
return nil, err
}
l2RpcURL := ctx.String("l2-rpc-url")
l2Client, err := ethclient.Dial(l2RpcURL)
if err != nil {
return nil, err
}
l2ChainID, err := l2Client.ChainID(context.Background())
if err != nil {
return nil, err
}
l1RpcClient, err := rpc.DialContext(context.Background(), l1RpcURL)
if err != nil {
return nil, err
}
l2RpcClient, err := rpc.DialContext(context.Background(), l2RpcURL)
if err != nil {
return nil, err
}
l1GethClient := gethclient.New(l1RpcClient)
l2GethClient := gethclient.New(l2RpcClient)
log.Info(
"Set up RPC clients",
"l1-chain-id", l1ChainID,
"l2-chain-id", l2ChainID,
)
return &clients{
L1Client: l1Client,
L2Client: l2Client,
L1RpcClient: l1RpcClient,
L2RpcClient: l2RpcClient,
L1GethClient: l1GethClient,
L2GethClient: l2GethClient,
}, nil
}
// newWithdrawals will create a set of legacy withdrawals
func newWithdrawals(ctx *cli.Context, l1ChainID *big.Int) ([]*crossdomain.LegacyWithdrawal, error) {
ovmMsgs := ctx.String("ovm-messages")
......@@ -849,7 +787,7 @@ func createOutput(
withdrawal *crossdomain.Withdrawal,
oracle *bindings.L2OutputOracle,
blockNumber *big.Int,
clients *clients,
clients *util.Clients,
) (*big.Int, bindings.TypesOutputRootProof, [][]byte, error) {
// compute the storage slot that the withdrawal is stored in
slot, err := withdrawal.StorageSlot()
......
package util
import (
"context"
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/ethclient/gethclient"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rpc"
"github.com/urfave/cli/v2"
)
func ProgressLogger(n int, msg string) func(...any) {
......@@ -15,3 +23,171 @@ func ProgressLogger(n int, msg string) func(...any) {
log.Info(msg, append([]any{"count", i}, args...)...)
}
}
// clients represents a set of initialized RPC clients
type Clients struct {
L1Client *ethclient.Client
L2Client *ethclient.Client
L1RpcClient *rpc.Client
L2RpcClient *rpc.Client
L1GethClient *gethclient.Client
L2GethClient *gethclient.Client
}
// NewClients will create new RPC clients from a CLI context
func NewClients(ctx *cli.Context) (*Clients, error) {
l1RpcURL := ctx.String("l1-rpc-url")
l1Client, err := ethclient.Dial(l1RpcURL)
if err != nil {
return nil, err
}
l1ChainID, err := l1Client.ChainID(context.Background())
if err != nil {
return nil, err
}
l2RpcURL := ctx.String("l2-rpc-url")
l2Client, err := ethclient.Dial(l2RpcURL)
if err != nil {
return nil, err
}
l2ChainID, err := l2Client.ChainID(context.Background())
if err != nil {
return nil, err
}
l1RpcClient, err := rpc.DialContext(context.Background(), l1RpcURL)
if err != nil {
return nil, err
}
l2RpcClient, err := rpc.DialContext(context.Background(), l2RpcURL)
if err != nil {
return nil, err
}
l1GethClient := gethclient.New(l1RpcClient)
l2GethClient := gethclient.New(l2RpcClient)
log.Info(
"Set up RPC clients",
"l1-chain-id", l1ChainID,
"l2-chain-id", l2ChainID,
)
return &Clients{
L1Client: l1Client,
L2Client: l2Client,
L1RpcClient: l1RpcClient,
L2RpcClient: l2RpcClient,
L1GethClient: l1GethClient,
L2GethClient: l2GethClient,
}, nil
}
// ClientsFlags represent the flags associated with creating RPC clients.
var ClientsFlags = []cli.Flag{
&cli.StringFlag{
Name: "l1-rpc-url",
Required: true,
Usage: "L1 RPC URL",
EnvVars: []string{"L1_RPC_URL"},
},
&cli.StringFlag{
Name: "l2-rpc-url",
Required: true,
Usage: "L2 RPC URL",
EnvVars: []string{"L2_RPC_URL"},
},
}
// Addresses represents the address values of various contracts. The values can
// be easily populated via a [cli.Context].
type Addresses struct {
AddressManager common.Address
OptimismPortal common.Address
L1StandardBridge common.Address
L1CrossDomainMessenger common.Address
CanonicalTransactionChain common.Address
StateCommitmentChain common.Address
}
// AddressesFlags represent the flags associated with address parsing.
var AddressesFlags = []cli.Flag{
&cli.StringFlag{
Name: "address-manager-address",
Usage: "AddressManager address",
EnvVars: []string{"ADDRESS_MANAGER_ADDRESS"},
},
&cli.StringFlag{
Name: "optimism-portal-address",
Usage: "OptimismPortal address",
EnvVars: []string{"OPTIMISM_PORTAL_ADDRESS"},
},
&cli.StringFlag{
Name: "l1-standard-bridge-address",
Usage: "L1StandardBridge address",
EnvVars: []string{"L1_STANDARD_BRIDGE_ADDRESS"},
},
&cli.StringFlag{
Name: "l1-crossdomain-messenger-address",
Usage: "L1CrossDomainMessenger address",
EnvVars: []string{"L1_CROSSDOMAIN_MESSENGER_ADDRESS"},
},
&cli.StringFlag{
Name: "canonical-transaction-chain-address",
Usage: "CanonicalTransactionChain address",
EnvVars: []string{"CANONICAL_TRANSACTION_CHAIN_ADDRESS"},
},
&cli.StringFlag{
Name: "state-commitment-chain-address",
Usage: "StateCommitmentChain address",
EnvVars: []string{"STATE_COMMITMENT_CHAIN_ADDRESS"},
},
}
// NewAddresses populates an Addresses struct given a [cli.Context].
// This is useful for writing scripts that interact with smart contracts.
func NewAddresses(ctx *cli.Context) (*Addresses, error) {
var addresses Addresses
var err error
addresses.AddressManager, err = parseAddress(ctx, "address-manager-address")
if err != nil {
return nil, err
}
addresses.OptimismPortal, err = parseAddress(ctx, "optimism-portal-address")
if err != nil {
return nil, err
}
addresses.L1StandardBridge, err = parseAddress(ctx, "l1-standard-bridge-address")
if err != nil {
return nil, err
}
addresses.L1CrossDomainMessenger, err = parseAddress(ctx, "l1-crossdomain-messenger-address")
if err != nil {
return nil, err
}
addresses.CanonicalTransactionChain, err = parseAddress(ctx, "canonical-transaction-chain-address")
if err != nil {
return nil, err
}
addresses.StateCommitmentChain, err = parseAddress(ctx, "state-commitment-chain-address")
if err != nil {
return nil, err
}
return &addresses, nil
}
// parseAddress will parse a [common.Address] from a [cli.Context] and return
// an error if the configured address is not correct.
func parseAddress(ctx *cli.Context, name string) (common.Address, error) {
value := ctx.String(name)
if value == "" {
return common.Address{}, nil
}
if !common.IsHexAddress(value) {
return common.Address{}, fmt.Errorf("invalid address: %s", value)
}
return common.HexToAddress(value), nil
}
......@@ -53,6 +53,7 @@ func NewL2Proposer(t Testing, log log.Logger, cfg *ProposerCfg, l1 *ethclient.Cl
proposerCfg := proposer.Config{
L2OutputOracleAddr: cfg.OutputOracleAddr,
PollInterval: time.Second,
NetworkTimeout: time.Second,
L1Client: l1,
RollupClient: rollupCl,
AllowNonFinalized: cfg.AllowNonFinalized,
......
......@@ -18,7 +18,6 @@ import (
proposermetrics "github.com/ethereum-optimism/optimism/op-proposer/metrics"
l2os "github.com/ethereum-optimism/optimism/op-proposer/proposer"
oplog "github.com/ethereum-optimism/optimism/op-service/log"
"github.com/ethereum-optimism/optimism/op-service/txmgr"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
......@@ -340,13 +339,7 @@ func TestMigration(t *testing.T) {
ApproxComprRatio: 0.4,
SubSafetyMargin: 4,
PollInterval: 50 * time.Millisecond,
TxMgrConfig: txmgr.CLIConfig{
L1RPCURL: forkedL1URL,
PrivateKey: hexPriv(secrets.Batcher),
NumConfirmations: 1,
ResubmissionTimeout: 5 * time.Second,
SafeAbortNonceTooLowCount: 3,
},
TxMgrConfig: newTxMgrConfig(forkedL1URL, secrets.Batcher),
LogConfig: oplog.CLIConfig{
Level: "info",
Format: "text",
......@@ -365,13 +358,7 @@ func TestMigration(t *testing.T) {
L2OOAddress: l2OS.Address.String(),
PollInterval: 50 * time.Millisecond,
AllowNonFinalized: true,
TxMgrConfig: txmgr.CLIConfig{
L1RPCURL: forkedL1URL,
PrivateKey: hexPriv(secrets.Proposer),
NumConfirmations: 1,
ResubmissionTimeout: 3 * time.Second,
SafeAbortNonceTooLowCount: 3,
},
TxMgrConfig: newTxMgrConfig(forkedL1URL, secrets.Proposer),
LogConfig: oplog.CLIConfig{
Level: "info",
Format: "text",
......
......@@ -47,6 +47,19 @@ var (
testingJWTSecret = [32]byte{123}
)
func newTxMgrConfig(l1Addr string, privKey *ecdsa.PrivateKey) txmgr.CLIConfig {
return txmgr.CLIConfig{
L1RPCURL: l1Addr,
PrivateKey: hexPriv(privKey),
NumConfirmations: 1,
SafeAbortNonceTooLowCount: 3,
ResubmissionTimeout: 3 * time.Second,
ReceiptQueryInterval: 50 * time.Millisecond,
NetworkTimeout: 2 * time.Second,
TxNotInMempoolTimeout: 2 * time.Minute,
}
}
func DefaultSystemConfig(t *testing.T) SystemConfig {
secrets, err := e2eutils.DefaultMnemonicConfig.Secrets()
require.NoError(t, err)
......@@ -555,18 +568,11 @@ func (cfg SystemConfig) Start(_opts ...SystemConfigOption) (*System, error) {
// L2Output Submitter
sys.L2OutputSubmitter, err = l2os.NewL2OutputSubmitterFromCLIConfig(l2os.CLIConfig{
L1EthRpc: sys.Nodes["l1"].WSEndpoint(),
RollupRpc: sys.RollupNodes["sequencer"].HTTPEndpoint(),
L2OOAddress: predeploys.DevL2OutputOracleAddr.String(),
PollInterval: 50 * time.Millisecond,
TxMgrConfig: txmgr.CLIConfig{
L1RPCURL: sys.Nodes["l1"].WSEndpoint(),
PrivateKey: hexPriv(cfg.Secrets.Proposer),
NumConfirmations: 1,
SafeAbortNonceTooLowCount: 3,
ResubmissionTimeout: 3 * time.Second,
ReceiptQueryInterval: 50 * time.Millisecond,
},
L1EthRpc: sys.Nodes["l1"].WSEndpoint(),
RollupRpc: sys.RollupNodes["sequencer"].HTTPEndpoint(),
L2OOAddress: predeploys.DevL2OutputOracleAddr.String(),
PollInterval: 50 * time.Millisecond,
TxMgrConfig: newTxMgrConfig(sys.Nodes["l1"].WSEndpoint(), cfg.Secrets.Proposer),
AllowNonFinalized: cfg.NonFinalizedProposals,
LogConfig: oplog.CLIConfig{
Level: "info",
......@@ -593,14 +599,7 @@ func (cfg SystemConfig) Start(_opts ...SystemConfigOption) (*System, error) {
ApproxComprRatio: 0.4,
SubSafetyMargin: 4,
PollInterval: 50 * time.Millisecond,
TxMgrConfig: txmgr.CLIConfig{
L1RPCURL: sys.Nodes["l1"].WSEndpoint(),
PrivateKey: hexPriv(cfg.Secrets.Batcher),
NumConfirmations: 1,
SafeAbortNonceTooLowCount: 3,
ResubmissionTimeout: 3 * time.Second,
ReceiptQueryInterval: 50 * time.Millisecond,
},
TxMgrConfig: newTxMgrConfig(sys.Nodes["l1"].WSEndpoint(), cfg.Secrets.Batcher),
LogConfig: oplog.CLIConfig{
Level: "info",
Format: "text",
......
......@@ -338,27 +338,15 @@ func BuildBlocksValidator(log log.Logger, cfg *rollup.Config, runCfg GossipRunti
}
func verifyBlockSignature(log log.Logger, cfg *rollup.Config, runCfg GossipRuntimeConfig, id peer.ID, signatureBytes []byte, payloadBytes []byte) pubsub.ValidationResult {
result := verifyBlockSignatureWithHasher(nil, cfg, runCfg, id, signatureBytes, payloadBytes, LegacyBlockSigningHash)
if result != pubsub.ValidationAccept {
return verifyBlockSignatureWithHasher(log, cfg, runCfg, id, signatureBytes, payloadBytes, BlockSigningHash)
}
return result
}
func verifyBlockSignatureWithHasher(log log.Logger, cfg *rollup.Config, runCfg GossipRuntimeConfig, id peer.ID, signatureBytes []byte, payloadBytes []byte, hasher func(cfg *rollup.Config, payloadBytes []byte) (common.Hash, error)) pubsub.ValidationResult {
signingHash, err := hasher(cfg, payloadBytes)
signingHash, err := BlockSigningHash(cfg, payloadBytes)
if err != nil {
if log != nil {
log.Warn("failed to compute block signing hash", "err", err, "peer", id)
}
log.Warn("failed to compute block signing hash", "err", err, "peer", id)
return pubsub.ValidationReject
}
pub, err := crypto.SigToPub(signingHash[:], signatureBytes)
if err != nil {
if log != nil {
log.Warn("invalid block signature", "err", err, "peer", id)
}
log.Warn("invalid block signature", "err", err, "peer", id)
return pubsub.ValidationReject
}
addr := crypto.PubkeyToAddress(*pub)
......@@ -369,14 +357,10 @@ func verifyBlockSignatureWithHasher(log log.Logger, cfg *rollup.Config, runCfg G
// This means we may drop old payloads upon key rotation,
// but this can be recovered from like any other missed unsafe payload.
if expected := runCfg.P2PSequencerAddress(); expected == (common.Address{}) {
if log != nil {
log.Warn("no configured p2p sequencer address, ignoring gossiped block", "peer", id, "addr", addr)
}
log.Warn("no configured p2p sequencer address, ignoring gossiped block", "peer", id, "addr", addr)
return pubsub.ValidationIgnore
} else if addr != expected {
if log != nil {
log.Warn("unexpected block author", "err", err, "peer", id, "addr", addr, "expected", expected)
}
log.Warn("unexpected block author", "err", err, "peer", id, "addr", addr, "expected", expected)
return pubsub.ValidationReject
}
return pubsub.ValidationAccept
......
......@@ -2,7 +2,6 @@ package p2p
import (
"context"
"crypto/ecdsa"
"math/big"
"testing"
......@@ -42,21 +41,6 @@ func TestGuardGossipValidator(t *testing.T) {
}
func TestVerifyBlockSignature(t *testing.T) {
// Should accept signatures over both the legacy and updated signature hashes
tests := []struct {
name string
newSigner func(priv *ecdsa.PrivateKey) *LocalSigner
}{
{
name: "Legacy",
newSigner: newLegacyLocalSigner,
},
{
name: "Updated",
newSigner: NewLocalSigner,
},
}
logger := testlog.Logger(t, log.LvlCrit)
cfg := &rollup.Config{
L2ChainID: big.NewInt(100),
......@@ -66,43 +50,37 @@ func TestVerifyBlockSignature(t *testing.T) {
require.NoError(t, err)
msg := []byte("any msg")
for _, test := range tests {
t.Run("Valid "+test.name, func(t *testing.T) {
runCfg := &testutils.MockRuntimeConfig{P2PSeqAddress: crypto.PubkeyToAddress(secrets.SequencerP2P.PublicKey)}
signer := &PreparedSigner{Signer: test.newSigner(secrets.SequencerP2P)}
sig, err := signer.Sign(context.Background(), SigningDomainBlocksV1, cfg.L2ChainID, msg)
require.NoError(t, err)
result := verifyBlockSignature(logger, cfg, runCfg, peerId, sig[:65], msg)
require.Equal(t, pubsub.ValidationAccept, result)
})
t.Run("WrongSigner "+test.name, func(t *testing.T) {
runCfg := &testutils.MockRuntimeConfig{P2PSeqAddress: common.HexToAddress("0x1234")}
signer := &PreparedSigner{Signer: test.newSigner(secrets.SequencerP2P)}
sig, err := signer.Sign(context.Background(), SigningDomainBlocksV1, cfg.L2ChainID, msg)
require.NoError(t, err)
result := verifyBlockSignature(logger, cfg, runCfg, peerId, sig[:65], msg)
require.Equal(t, pubsub.ValidationReject, result)
})
t.Run("Valid", func(t *testing.T) {
runCfg := &testutils.MockRuntimeConfig{P2PSeqAddress: crypto.PubkeyToAddress(secrets.SequencerP2P.PublicKey)}
signer := &PreparedSigner{Signer: NewLocalSigner(secrets.SequencerP2P)}
sig, err := signer.Sign(context.Background(), SigningDomainBlocksV1, cfg.L2ChainID, msg)
require.NoError(t, err)
result := verifyBlockSignature(logger, cfg, runCfg, peerId, sig[:65], msg)
require.Equal(t, pubsub.ValidationAccept, result)
})
t.Run("InvalidSignature "+test.name, func(t *testing.T) {
runCfg := &testutils.MockRuntimeConfig{P2PSeqAddress: crypto.PubkeyToAddress(secrets.SequencerP2P.PublicKey)}
sig := make([]byte, 65)
result := verifyBlockSignature(logger, cfg, runCfg, peerId, sig, msg)
require.Equal(t, pubsub.ValidationReject, result)
})
t.Run("WrongSigner", func(t *testing.T) {
runCfg := &testutils.MockRuntimeConfig{P2PSeqAddress: common.HexToAddress("0x1234")}
signer := &PreparedSigner{Signer: NewLocalSigner(secrets.SequencerP2P)}
sig, err := signer.Sign(context.Background(), SigningDomainBlocksV1, cfg.L2ChainID, msg)
require.NoError(t, err)
result := verifyBlockSignature(logger, cfg, runCfg, peerId, sig[:65], msg)
require.Equal(t, pubsub.ValidationReject, result)
})
t.Run("NoSequencer "+test.name, func(t *testing.T) {
runCfg := &testutils.MockRuntimeConfig{}
signer := &PreparedSigner{Signer: test.newSigner(secrets.SequencerP2P)}
sig, err := signer.Sign(context.Background(), SigningDomainBlocksV1, cfg.L2ChainID, msg)
require.NoError(t, err)
result := verifyBlockSignature(logger, cfg, runCfg, peerId, sig[:65], msg)
require.Equal(t, pubsub.ValidationIgnore, result)
})
}
}
t.Run("InvalidSignature", func(t *testing.T) {
runCfg := &testutils.MockRuntimeConfig{P2PSeqAddress: crypto.PubkeyToAddress(secrets.SequencerP2P.PublicKey)}
sig := make([]byte, 65)
result := verifyBlockSignature(logger, cfg, runCfg, peerId, sig, msg)
require.Equal(t, pubsub.ValidationReject, result)
})
func newLegacyLocalSigner(priv *ecdsa.PrivateKey) *LocalSigner {
return &LocalSigner{priv: priv, hasher: LegacySigningHash}
t.Run("NoSequencer", func(t *testing.T) {
runCfg := &testutils.MockRuntimeConfig{}
signer := &PreparedSigner{Signer: NewLocalSigner(secrets.SequencerP2P)}
sig, err := signer.Sign(context.Background(), SigningDomainBlocksV1, cfg.L2ChainID, msg)
require.NoError(t, err)
result := verifyBlockSignature(logger, cfg, runCfg, peerId, sig[:65], msg)
require.Equal(t, pubsub.ValidationIgnore, result)
})
}
......@@ -20,21 +20,6 @@ type Signer interface {
io.Closer
}
func LegacySigningHash(domain [32]byte, chainID *big.Int, payloadBytes []byte) (common.Hash, error) {
var msgInput [32 + 32 + 32]byte
// domain: first 32 bytes
copy(msgInput[:32], domain[:])
// chain_id: second 32 bytes
if chainID.BitLen() > 256 {
return common.Hash{}, errors.New("chain_id is too large")
}
chainID.FillBytes(msgInput[32:64])
// payload_hash: third 32 bytes, hash of encoded payload
copy(msgInput[32:], crypto.Keccak256(payloadBytes))
return crypto.Keccak256Hash(msgInput[:]), nil
}
func SigningHash(domain [32]byte, chainID *big.Int, payloadBytes []byte) (common.Hash, error) {
var msgInput [32 + 32 + 32]byte
// domain: first 32 bytes
......@@ -54,10 +39,6 @@ func BlockSigningHash(cfg *rollup.Config, payloadBytes []byte) (common.Hash, err
return SigningHash(SigningDomainBlocksV1, cfg.L2ChainID, payloadBytes)
}
func LegacyBlockSigningHash(cfg *rollup.Config, payloadBytes []byte) (common.Hash, error) {
return LegacySigningHash(SigningDomainBlocksV1, cfg.L2ChainID, payloadBytes)
}
// LocalSigner is suitable for testing
type LocalSigner struct {
priv *ecdsa.PrivateKey
......
......@@ -36,7 +36,7 @@ func NewDataSourceFactory(log log.Logger, cfg *rollup.Config, fetcher L1Transact
return &DataSourceFactory{log: log, cfg: cfg, fetcher: fetcher}
}
// OpenData returns a CalldataSourceImpl. This struct implements the `Next` function.
// OpenData returns a DataIter. This struct implements the `Next` function.
func (ds *DataSourceFactory) OpenData(ctx context.Context, id eth.BlockID, batcherAddr common.Address) DataIter {
return NewDataSource(ctx, ds.log, ds.cfg, ds.fetcher, id, batcherAddr)
}
......
......@@ -469,7 +469,7 @@ func (eq *EngineQueue) consolidateNextSafeAttributes(ctx context.Context) error
return NewTemporaryError(fmt.Errorf("failed to get existing unsafe payload to compare against derived attributes from L1: %w", err))
}
if err := AttributesMatchBlock(eq.safeAttributes, eq.safeHead.Hash, payload, eq.log); err != nil {
eq.log.Warn("L2 reorg: existing unsafe block does not match derived attributes from L1", "err", err)
eq.log.Warn("L2 reorg: existing unsafe block does not match derived attributes from L1", "err", err, "unsafe", eq.unsafeHead, "safe", eq.safeHead)
// geth cannot wind back a chain without reorging to a new, previously non-canonical, block
return eq.forceNextSafeAttributes(ctx)
}
......
......@@ -70,13 +70,13 @@ func ProcessSystemConfigUpdateLogEvent(destSysCfg *eth.SystemConfig, ev *types.L
return fmt.Errorf("expected 3 event topics (event identity, indexed version, indexed updateType), got %d", len(ev.Topics))
}
if ev.Topics[0] != ConfigUpdateEventABIHash {
return fmt.Errorf("invalid deposit event selector: %s, expected %s", ev.Topics[0], DepositEventABIHash)
return fmt.Errorf("invalid SystemConfig update event: %s, expected %s", ev.Topics[0], ConfigUpdateEventABIHash)
}
// indexed 0
version := ev.Topics[1]
if version != ConfigUpdateEventVersion0 {
return fmt.Errorf("unrecognized L1 sysCfg update event version: %s", version)
return fmt.Errorf("unrecognized SystemConfig update event version: %s", version)
}
// indexed 1
updateType := ev.Topics[2]
......
MIT License
Copyright (c) 2023 Optimism
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
GITCOMMIT := $(shell git rev-parse HEAD)
GITDATE := $(shell git show -s --format='%ct')
VERSION := v0.0.0
LDFLAGSSTRING +=-X main.GitCommit=$(GITCOMMIT)
LDFLAGSSTRING +=-X main.GitDate=$(GITDATE)
LDFLAGSSTRING +=-X github.com/ethereum-optimism/optimism/op-program/version.Version=$(VERSION)
LDFLAGSSTRING +=-X github.com/ethereum-optimism/optimism/op-program/version.Meta=$(VERSION_META)
LDFLAGS := -ldflags "$(LDFLAGSSTRING)"
op-program:
env GO111MODULE=on GOOS=$(TARGETOS) GOARCH=$(TARGETARCH) go build -v $(LDFLAGS) -o ./bin/op-program ./cmd/main.go
clean:
rm -rf bin
test:
go test -v ./...
lint:
golangci-lint run -E goimports,sqlclosecheck,bodyclose,asciicheck,misspell,errorlint -e "errors.As" -e "errors.Is"
.PHONY: \
op-program \
clean \
test \
lint
# op-program
Implements a fault proof program that runs through the rollup state-transition to verify an L2 output from L1 inputs.
This verifiable output can then resolve a disputed output on L1.
The program is designed such that it can be run in a deterministic way such that two invocations with the same input
data wil result in not only the same output, but the same program execution trace. This allows it to be run in an
on-chain VM as part of the dispute resolution process.
## Compiling
To build op-program, from within the `op-program` directory run:
```shell
make op-program
```
This resulting executable will be in `./bin/op-program`
## Testing
To run op-program unit tests, from within the `op-program` directory run:
```shell
make test
```
## Lint
To run the linter, from within the `op-program` directory run:
```shell
make lint
```
This requires having `golangci-lint` installed.
## Running
From within the `op-program` directory, options can be reviewed with:
```shell
./bin/op-program --help
```
package main
import (
"fmt"
"os"
"github.com/ethereum-optimism/optimism/op-node/chaincfg"
"github.com/ethereum-optimism/optimism/op-program/config"
"github.com/ethereum-optimism/optimism/op-program/flags"
"github.com/ethereum-optimism/optimism/op-program/version"
oplog "github.com/ethereum-optimism/optimism/op-service/log"
"github.com/ethereum/go-ethereum/log"
"github.com/urfave/cli"
)
var (
GitCommit = ""
GitDate = ""
)
// VersionWithMeta holds the textual version string including the metadata.
var VersionWithMeta = func() string {
v := version.Version
if GitCommit != "" {
v += "-" + GitCommit[:8]
}
if GitDate != "" {
v += "-" + GitDate
}
if version.Meta != "" {
v += "-" + version.Meta
}
return v
}()
func main() {
args := os.Args
err := run(args, FaultProofProgram)
if err != nil {
log.Crit("Application failed", "message", err)
}
}
type ConfigAction func(log log.Logger, config *config.Config) error
// run parses the supplied args to create a config.Config instance, sets up logging
// then calls the supplied ConfigAction.
// This allows testing the translation from CLI arguments to Config
func run(args []string, action ConfigAction) error {
// Set up logger with a default INFO level in case we fail to parse flags,
// otherwise the final critical log won't show what the parsing error was.
oplog.SetupDefaults()
app := cli.NewApp()
app.Version = VersionWithMeta
app.Flags = flags.Flags
app.Name = "op-program"
app.Usage = "Optimism Fault Proof Program"
app.Description = "The Optimism Fault Proof Program fault proof program that runs through the rollup state-transition to verify an L2 output from L1 inputs."
app.Action = func(ctx *cli.Context) error {
logger, err := setupLogging(ctx)
if err != nil {
return err
}
logger.Info("Starting fault proof program", "version", VersionWithMeta)
cfg, err := config.NewConfigFromCLI(ctx)
if err != nil {
return err
}
return action(logger, cfg)
}
return app.Run(args)
}
func setupLogging(ctx *cli.Context) (log.Logger, error) {
logCfg := oplog.ReadCLIConfig(ctx)
if err := logCfg.Check(); err != nil {
return nil, fmt.Errorf("log config error: %w", err)
}
logger := oplog.NewLogger(logCfg)
return logger, nil
}
// FaultProofProgram is the programmatic entry-point for the fault proof program
func FaultProofProgram(log log.Logger, cfg *config.Config) error {
cfg.Rollup.LogDescription(log, chaincfg.L2ChainIDToNetworkName)
return nil
}
package main
import (
"encoding/json"
"os"
"testing"
"github.com/ethereum-optimism/optimism/op-node/chaincfg"
"github.com/ethereum-optimism/optimism/op-program/config"
"github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require"
)
func TestLogLevel(t *testing.T) {
t.Run("RejectInvalid", func(t *testing.T) {
verifyArgsInvalid(t, "unknown level: foo", addRequiredArgs("--log.level=foo"))
})
for _, lvl := range []string{"trace", "debug", "info", "error", "crit"} {
lvl := lvl
t.Run("AcceptValid_"+lvl, func(t *testing.T) {
logger, _, err := runWithArgs(addRequiredArgs("--log.level", lvl))
require.NoError(t, err)
require.NotNil(t, logger)
})
}
}
func TestDefaultCLIOptionsMatchDefaultConfig(t *testing.T) {
cfg := configForArgs(t, addRequiredArgs())
require.Equal(t, config.NewConfig(&chaincfg.Goerli), cfg)
}
func TestNetwork(t *testing.T) {
t.Run("Unknown", func(t *testing.T) {
verifyArgsInvalid(t, "invalid network bar", replaceRequiredArg("--network", "bar"))
})
t.Run("Required", func(t *testing.T) {
verifyArgsInvalid(t, "flag rollup.config or network is required", addRequiredArgsExcept("--network"))
})
t.Run("DisallowNetworkAndRollupConfig", func(t *testing.T) {
verifyArgsInvalid(t, "cannot specify both rollup.config and network", addRequiredArgs("--rollup.config=foo"))
})
t.Run("RollupConfig", func(t *testing.T) {
dir := t.TempDir()
configJson, err := json.Marshal(chaincfg.Goerli)
require.NoError(t, err)
configFile := dir + "/config.json"
err = os.WriteFile(configFile, configJson, os.ModePerm)
require.NoError(t, err)
cfg := configForArgs(t, addRequiredArgsExcept("--network", "--rollup.config", configFile))
require.Equal(t, chaincfg.Goerli, *cfg.Rollup)
})
for name, cfg := range chaincfg.NetworksByName {
name := name
expected := cfg
t.Run("Network_"+name, func(t *testing.T) {
cfg := configForArgs(t, replaceRequiredArg("--network", name))
require.Equal(t, expected, *cfg.Rollup)
})
}
}
func verifyArgsInvalid(t *testing.T, messageContains string, cliArgs []string) {
_, _, err := runWithArgs(cliArgs)
require.ErrorContains(t, err, messageContains)
}
func configForArgs(t *testing.T, cliArgs []string) *config.Config {
_, cfg, err := runWithArgs(cliArgs)
require.NoError(t, err)
return cfg
}
func runWithArgs(cliArgs []string) (log.Logger, *config.Config, error) {
var cfg *config.Config
var logger log.Logger
fullArgs := append([]string{"op-program"}, cliArgs...)
err := run(fullArgs, func(log log.Logger, config *config.Config) error {
logger = log
cfg = config
return nil
})
return logger, cfg, err
}
func addRequiredArgs(args ...string) []string {
req := requiredArgs()
combined := toArgList(req)
return append(combined, args...)
}
func addRequiredArgsExcept(name string, optionalArgs ...string) []string {
req := requiredArgs()
delete(req, name)
return append(toArgList(req), optionalArgs...)
}
func replaceRequiredArg(name string, value string) []string {
req := requiredArgs()
req[name] = value
return toArgList(req)
}
// requiredArgs returns map of argument names to values which are the minimal arguments required
// to create a valid Config
func requiredArgs() map[string]string {
return map[string]string{
"--network": "goerli",
}
}
func toArgList(req map[string]string) []string {
var combined []string
for name, value := range req {
combined = append(combined, name)
combined = append(combined, value)
}
return combined
}
package config
import (
"errors"
opnode "github.com/ethereum-optimism/optimism/op-node"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-program/flags"
"github.com/urfave/cli"
)
var (
ErrMissingRollupConfig = errors.New("missing rollup config")
)
type Config struct {
Rollup *rollup.Config
}
func (c *Config) Check() error {
if c.Rollup == nil {
return ErrMissingRollupConfig
}
if err := c.Rollup.Check(); err != nil {
return err
}
return nil
}
// NewConfig creates a Config with all optional values set to the CLI default value
func NewConfig(rollupCfg *rollup.Config) *Config {
return &Config{
Rollup: rollupCfg,
}
}
func NewConfigFromCLI(ctx *cli.Context) (*Config, error) {
if err := flags.CheckRequired(ctx); err != nil {
return nil, err
}
rollupCfg, err := opnode.NewRollupConfig(ctx)
if err != nil {
return nil, err
}
return &Config{
Rollup: rollupCfg,
}, nil
}
package config
import (
"testing"
"github.com/ethereum-optimism/optimism/op-node/chaincfg"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/stretchr/testify/require"
)
func TestDefaultConfigIsValid(t *testing.T) {
err := NewConfig(&chaincfg.Goerli).Check()
require.NoError(t, err)
}
func TestRollupConfig(t *testing.T) {
t.Run("Required", func(t *testing.T) {
err := NewConfig(nil).Check()
require.ErrorIs(t, err, ErrMissingRollupConfig)
})
t.Run("Valid", func(t *testing.T) {
err := NewConfig(&rollup.Config{}).Check()
require.ErrorIs(t, err, rollup.ErrBlockTimeZero)
})
}
package flags
import (
"fmt"
"strings"
"github.com/ethereum-optimism/optimism/op-node/chaincfg"
service "github.com/ethereum-optimism/optimism/op-service"
oplog "github.com/ethereum-optimism/optimism/op-service/log"
"github.com/urfave/cli"
)
const envVarPrefix = "OP_PROGRAM"
var (
RollupConfig = cli.StringFlag{
Name: "rollup.config",
Usage: "Rollup chain parameters",
EnvVar: service.PrefixEnvVar(envVarPrefix, "ROLLUP_CONFIG"),
}
Network = cli.StringFlag{
Name: "network",
Usage: fmt.Sprintf("Predefined network selection. Available networks: %s", strings.Join(chaincfg.AvailableNetworks(), ", ")),
EnvVar: service.PrefixEnvVar(envVarPrefix, "NETWORK"),
}
)
// Flags contains the list of configuration options available to the binary.
var Flags []cli.Flag
var programFlags = []cli.Flag{
RollupConfig,
Network,
}
func init() {
Flags = append(Flags, oplog.CLIFlags(envVarPrefix)...)
Flags = append(Flags, programFlags...)
}
func CheckRequired(ctx *cli.Context) error {
rollupConfig := ctx.GlobalString(RollupConfig.Name)
network := ctx.GlobalString(Network.Name)
if rollupConfig == "" && network == "" {
return fmt.Errorf("flag %s or %s is required", RollupConfig.Name, Network.Name)
}
if rollupConfig != "" && network != "" {
return fmt.Errorf("cannot specify both %s and %s", RollupConfig.Name, Network.Name)
}
return nil
}
package flags
import (
"reflect"
"strings"
"testing"
)
// TestUniqueFlags asserts that all flag names are unique, to avoid accidental conflicts between the many flags.
func TestUniqueFlags(t *testing.T) {
seenCLI := make(map[string]struct{})
for _, flag := range Flags {
name := flag.GetName()
if _, ok := seenCLI[name]; ok {
t.Errorf("duplicate flag %s", name)
continue
}
seenCLI[name] = struct{}{}
}
}
func TestCorrectEnvVarPrefix(t *testing.T) {
for _, flag := range Flags {
values := reflect.ValueOf(flag)
envVarValue := values.FieldByName("EnvVar")
if envVarValue == (reflect.Value{}) {
t.Errorf("Failed to find EnvVar for flag %v", flag.GetName())
continue
}
envVar := envVarValue.String()
if envVar[:len("OP_PROGRAM_")] != "OP_PROGRAM_" {
t.Errorf("Flag %v env var (%v) does not start with OP_PROGRAM_", flag.GetName(), envVar)
}
if strings.Contains(envVar, "__") {
t.Errorf("Flag %v env var (%v) has duplicate underscores", flag.GetName(), envVar)
}
}
}
package version
var (
Version = "v0.10.14"
Meta = "dev"
)
......@@ -11,6 +11,7 @@ import (
"github.com/prometheus/client_golang/prometheus"
opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics"
txmetrics "github.com/ethereum-optimism/optimism/op-service/txmgr/metrics"
)
const Namespace = "op_proposer"
......@@ -22,6 +23,9 @@ type Metricer interface {
// Records all L1 and L2 block events
opmetrics.RefMetricer
// Record Tx metrics
txmetrics.TxMetricer
RecordL2BlocksProposed(l2ref eth.L2BlockRef)
}
......@@ -31,6 +35,7 @@ type Metrics struct {
factory opmetrics.Factory
opmetrics.RefMetrics
txmetrics.TxMetrics
Info prometheus.GaugeVec
Up prometheus.Gauge
......@@ -53,6 +58,7 @@ func NewMetrics(procName string) *Metrics {
factory: factory,
RefMetrics: opmetrics.MakeRefMetrics(ns, factory),
TxMetrics: txmetrics.MakeTxMetrics(ns, factory),
Info: *factory.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns,
......
......@@ -3,9 +3,13 @@ package metrics
import (
"github.com/ethereum-optimism/optimism/op-node/eth"
opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics"
txmetrics "github.com/ethereum-optimism/optimism/op-service/txmgr/metrics"
)
type noopMetrics struct{ opmetrics.NoopRefMetrics }
type noopMetrics struct {
opmetrics.NoopRefMetrics
txmetrics.NoopTxMetrics
}
var NoopMetrics Metricer = new(noopMetrics)
......
......@@ -22,6 +22,7 @@ import (
type Config struct {
L2OutputOracleAddr common.Address
PollInterval time.Duration
NetworkTimeout time.Duration
TxManager txmgr.TxManager
L1Client *ethclient.Client
RollupClient *sources.RollupClient
......
......@@ -28,12 +28,6 @@ import (
"github.com/ethereum-optimism/optimism/op-service/txmgr"
)
const (
// defaultDialTimeout is default duration the service will wait on
// startup to make a connection to either the L1 or L2 backends.
defaultDialTimeout = 5 * time.Second
)
var supportedL2OutputVersion = eth.Bytes32{}
// Main is the entrypoint into the L2 Output Submitter. This method executes the
......@@ -48,7 +42,7 @@ func Main(version string, cliCtx *cli.Context) error {
m := metrics.NewMetrics("default")
l.Info("Initializing L2 Output Submitter")
proposerConfig, err := NewL2OutputSubmitterConfigFromCLIConfig(cfg, l)
proposerConfig, err := NewL2OutputSubmitterConfigFromCLIConfig(cfg, l, m)
if err != nil {
l.Error("Unable to create the L2 Output Submitter", "error", err)
return err
......@@ -138,12 +132,13 @@ type L2OutputSubmitter struct {
// This option is not necessary when higher proposal latency is acceptable and L1 is healthy.
allowNonFinalized bool
// How frequently to poll L2 for new finalized outputs
pollInterval time.Duration
pollInterval time.Duration
networkTimeout time.Duration
}
// NewL2OutputSubmitterFromCLIConfig creates a new L2 Output Submitter given the CLI Config
func NewL2OutputSubmitterFromCLIConfig(cfg CLIConfig, l log.Logger, m metrics.Metricer) (*L2OutputSubmitter, error) {
proposerConfig, err := NewL2OutputSubmitterConfigFromCLIConfig(cfg, l)
proposerConfig, err := NewL2OutputSubmitterConfigFromCLIConfig(cfg, l, m)
if err != nil {
return nil, err
}
......@@ -151,17 +146,16 @@ func NewL2OutputSubmitterFromCLIConfig(cfg CLIConfig, l log.Logger, m metrics.Me
}
// NewL2OutputSubmitterConfigFromCLIConfig creates the proposer config from the CLI config.
func NewL2OutputSubmitterConfigFromCLIConfig(cfg CLIConfig, l log.Logger) (*Config, error) {
func NewL2OutputSubmitterConfigFromCLIConfig(cfg CLIConfig, l log.Logger, m metrics.Metricer) (*Config, error) {
l2ooAddress, err := parseAddress(cfg.L2OOAddress)
if err != nil {
return nil, err
}
txManagerConfig, err := txmgr.NewConfig(cfg.TxMgrConfig, l)
txManager, err := txmgr.NewSimpleTxManager("proposer", l, m, cfg.TxMgrConfig)
if err != nil {
return nil, err
}
txManager := txmgr.NewSimpleTxManager("proposer", l, txManagerConfig)
// Connect to L1 and L2 providers. Perform these last since they are the most expensive.
ctx := context.Background()
......@@ -178,6 +172,7 @@ func NewL2OutputSubmitterConfigFromCLIConfig(cfg CLIConfig, l log.Logger) (*Conf
return &Config{
L2OutputOracleAddr: l2ooAddress,
PollInterval: cfg.PollInterval,
NetworkTimeout: cfg.TxMgrConfig.NetworkTimeout,
L1Client: l1Client,
RollupClient: rollupClient,
AllowNonFinalized: cfg.AllowNonFinalized,
......@@ -196,7 +191,7 @@ func NewL2OutputSubmitter(cfg Config, l log.Logger, m metrics.Metricer) (*L2Outp
return nil, err
}
cCtx, cCancel := context.WithTimeout(ctx, defaultDialTimeout)
cCtx, cCancel := context.WithTimeout(ctx, cfg.NetworkTimeout)
defer cCancel()
version, err := l2ooContract.Version(&bind.CallOpts{Context: cCtx})
if err != nil {
......@@ -227,6 +222,7 @@ func NewL2OutputSubmitter(cfg Config, l log.Logger, m metrics.Metricer) (*L2Outp
allowNonFinalized: cfg.AllowNonFinalized,
pollInterval: cfg.PollInterval,
networkTimeout: cfg.NetworkTimeout,
}, nil
}
......@@ -245,7 +241,7 @@ func (l *L2OutputSubmitter) Stop() {
// FetchNextOutputInfo gets the block number of the next proposal.
// It returns: the next block number, if the proposal should be made, error
func (l *L2OutputSubmitter) FetchNextOutputInfo(ctx context.Context) (*eth.OutputResponse, bool, error) {
cCtx, cancel := context.WithTimeout(ctx, defaultDialTimeout)
cCtx, cancel := context.WithTimeout(ctx, l.networkTimeout)
defer cancel()
callOpts := &bind.CallOpts{
From: l.txMgr.From(),
......@@ -257,7 +253,7 @@ func (l *L2OutputSubmitter) FetchNextOutputInfo(ctx context.Context) (*eth.Outpu
return nil, false, err
}
// Fetch the current L2 heads
cCtx, cancel = context.WithTimeout(ctx, defaultDialTimeout)
cCtx, cancel = context.WithTimeout(ctx, l.networkTimeout)
defer cancel()
status, err := l.rollupClient.SyncStatus(cCtx)
if err != nil {
......@@ -281,7 +277,7 @@ func (l *L2OutputSubmitter) FetchNextOutputInfo(ctx context.Context) (*eth.Outpu
}
func (l *L2OutputSubmitter) fetchOuput(ctx context.Context, block *big.Int) (*eth.OutputResponse, bool, error) {
ctx, cancel := context.WithTimeout(ctx, defaultDialTimeout)
ctx, cancel := context.WithTimeout(ctx, l.networkTimeout)
defer cancel()
output, err := l.rollupClient.OutputAtBlock(ctx, block.Uint64())
if err != nil {
......@@ -332,9 +328,8 @@ func (l *L2OutputSubmitter) sendTransaction(ctx context.Context, output *eth.Out
}
receipt, err := l.txMgr.Send(ctx, txmgr.TxCandidate{
TxData: data,
To: l.l2ooContractAddr,
To: &l.l2ooContractAddr,
GasLimit: 0,
From: l.txMgr.From(),
})
if err != nil {
return err
......
......@@ -3,6 +3,7 @@ package proposer
import (
"context"
"fmt"
"time"
"github.com/ethereum-optimism/optimism/op-node/client"
"github.com/ethereum-optimism/optimism/op-node/sources"
......@@ -11,11 +12,12 @@ import (
"github.com/ethereum/go-ethereum/rpc"
)
var defaultDialTimeout = 5 * time.Second
// dialEthClientWithTimeout attempts to dial the L1 provider using the provided
// URL. If the dial doesn't complete within defaultDialTimeout seconds, this
// method will return an error.
func dialEthClientWithTimeout(ctx context.Context, url string) (*ethclient.Client, error) {
ctxt, cancel := context.WithTimeout(ctx, defaultDialTimeout)
defer cancel()
......
......@@ -3,6 +3,7 @@ package txmgr
import (
"context"
"errors"
"fmt"
"math/big"
"time"
......@@ -22,10 +23,14 @@ const (
MnemonicFlagName = "mnemonic"
HDPathFlagName = "hd-path"
PrivateKeyFlagName = "private-key"
// Legacy TxMgr Flags
// TxMgr Flags (new + legacy + some shared flags)
NumConfirmationsFlagName = "num-confirmations"
SafeAbortNonceTooLowCountFlagName = "safe-abort-nonce-too-low-count"
ResubmissionTimeoutFlagName = "resubmission-timeout"
NetworkTimeoutFlagName = "network-timeout"
TxSendTimeoutFlagName = "txmgr.send-timeout"
TxNotInMempoolTimeoutFlagName = "txmgr.not-in-mempool-timeout"
ReceiptQueryIntervalFlagName = "txmgr.receipt-query-interval"
)
var (
......@@ -69,17 +74,41 @@ func CLIFlags(envPrefix string) []cli.Flag {
EnvVar: opservice.PrefixEnvVar(envPrefix, "NUM_CONFIRMATIONS"),
},
cli.Uint64Flag{
Name: "safe-abort-nonce-too-low-count",
Name: SafeAbortNonceTooLowCountFlagName,
Usage: "Number of ErrNonceTooLow observations required to give up on a tx at a particular nonce without receiving confirmation",
Value: 3,
EnvVar: opservice.PrefixEnvVar(envPrefix, "SAFE_ABORT_NONCE_TOO_LOW_COUNT"),
},
cli.DurationFlag{
Name: "resubmission-timeout",
Name: ResubmissionTimeoutFlagName,
Usage: "Duration we will wait before resubmitting a transaction to L1",
Value: 30 * time.Second,
Value: 48 * time.Second,
EnvVar: opservice.PrefixEnvVar(envPrefix, "RESUBMISSION_TIMEOUT"),
},
cli.DurationFlag{
Name: NetworkTimeoutFlagName,
Usage: "Timeout for all network operations",
Value: 2 * time.Second,
EnvVar: opservice.PrefixEnvVar(envPrefix, "NETWORK_TIMEOUT"),
},
cli.DurationFlag{
Name: TxSendTimeoutFlagName,
Usage: "Timeout for sending transactions. If 0 it is disabled.",
Value: 0,
EnvVar: opservice.PrefixEnvVar(envPrefix, "TXMGR_TX_SEND_TIMEOUT"),
},
cli.DurationFlag{
Name: TxNotInMempoolTimeoutFlagName,
Usage: "Timeout for aborting a tx send if the tx does not make it to the mempool.",
Value: 2 * time.Minute,
EnvVar: opservice.PrefixEnvVar(envPrefix, "TXMGR_TX_NOT_IN_MEMPOOL_TIMEOUT"),
},
cli.DurationFlag{
Name: ReceiptQueryIntervalFlagName,
Usage: "Frequency to poll for receipts",
Value: 12 * time.Second,
EnvVar: opservice.PrefixEnvVar(envPrefix, "TXMGR_RECEIPT_QUERY_INTERVAL"),
},
}, client.CLIFlags(envPrefix)...)
}
......@@ -95,6 +124,9 @@ type CLIConfig struct {
SafeAbortNonceTooLowCount uint64
ResubmissionTimeout time.Duration
ReceiptQueryInterval time.Duration
NetworkTimeout time.Duration
TxSendTimeout time.Duration
TxNotInMempoolTimeout time.Duration
}
func (m CLIConfig) Check() error {
......@@ -102,7 +134,22 @@ func (m CLIConfig) Check() error {
return errors.New("must provide a L1 RPC url")
}
if m.NumConfirmations == 0 {
return errors.New("num confirmations must not be 0")
return errors.New("NumConfirmations must not be 0")
}
if m.NetworkTimeout == 0 {
return errors.New("must provide NetworkTimeout")
}
if m.ResubmissionTimeout == 0 {
return errors.New("must provide ResubmissionTimeout")
}
if m.ReceiptQueryInterval == 0 {
return errors.New("must provide ReceiptQueryInterval")
}
if m.TxNotInMempoolTimeout == 0 {
return errors.New("must provide TxNotInMempoolTimeout")
}
if m.SafeAbortNonceTooLowCount == 0 {
return errors.New("SafeAbortNonceTooLowCount must not be 0")
}
if err := m.SignerCLIConfig.Check(); err != nil {
return err
......@@ -122,29 +169,33 @@ func ReadCLIConfig(ctx *cli.Context) CLIConfig {
NumConfirmations: ctx.GlobalUint64(NumConfirmationsFlagName),
SafeAbortNonceTooLowCount: ctx.GlobalUint64(SafeAbortNonceTooLowCountFlagName),
ResubmissionTimeout: ctx.GlobalDuration(ResubmissionTimeoutFlagName),
ReceiptQueryInterval: ctx.GlobalDuration(ReceiptQueryIntervalFlagName),
NetworkTimeout: ctx.GlobalDuration(NetworkTimeoutFlagName),
TxSendTimeout: ctx.GlobalDuration(TxSendTimeoutFlagName),
TxNotInMempoolTimeout: ctx.GlobalDuration(TxNotInMempoolTimeoutFlagName),
}
}
func NewConfig(cfg CLIConfig, l log.Logger) (Config, error) {
if err := cfg.Check(); err != nil {
return Config{}, err
return Config{}, fmt.Errorf("invalid config: %w", err)
}
networkTimeout := 2 * time.Second
ctx, cancel := context.WithTimeout(context.Background(), networkTimeout)
ctx, cancel := context.WithTimeout(context.Background(), cfg.NetworkTimeout)
defer cancel()
l1, err := ethclient.DialContext(ctx, cfg.L1RPCURL)
if err != nil {
return Config{}, err
return Config{}, fmt.Errorf("could not dial eth client: %w", err)
}
ctx, cancel = context.WithTimeout(context.Background(), networkTimeout)
ctx, cancel = context.WithTimeout(context.Background(), cfg.NetworkTimeout)
defer cancel()
chainID, err := l1.ChainID(ctx)
if err != nil {
return Config{}, err
return Config{}, fmt.Errorf("could not dial fetch L1 chain ID: %w", err)
}
// Allow backwards compatible ways of specifying the HD path
hdPath := cfg.HDPath
if hdPath == "" && cfg.SequencerHDPath != "" {
hdPath = cfg.SequencerHDPath
......@@ -154,20 +205,17 @@ func NewConfig(cfg CLIConfig, l log.Logger) (Config, error) {
signerFactory, from, err := opcrypto.SignerFactoryFromConfig(l, cfg.PrivateKey, cfg.Mnemonic, hdPath, cfg.SignerCLIConfig)
if err != nil {
return Config{}, err
}
receiptQueryInterval := 30 * time.Second
if cfg.ReceiptQueryInterval != 0 {
receiptQueryInterval = cfg.ReceiptQueryInterval
return Config{}, fmt.Errorf("could not init signer: %w", err)
}
return Config{
Backend: l1,
ResubmissionTimeout: cfg.ResubmissionTimeout,
ChainID: chainID,
NetworkTimeout: networkTimeout,
ReceiptQueryInterval: receiptQueryInterval,
TxSendTimeout: cfg.TxSendTimeout,
TxNotInMempoolTimeout: cfg.TxNotInMempoolTimeout,
NetworkTimeout: cfg.NetworkTimeout,
ReceiptQueryInterval: cfg.ReceiptQueryInterval,
NumConfirmations: cfg.NumConfirmations,
SafeAbortNonceTooLowCount: cfg.SafeAbortNonceTooLowCount,
Signer: signerFactory(chainID),
......@@ -187,10 +235,16 @@ type Config struct {
// ChainID is the chain ID of the L1 chain.
ChainID *big.Int
// TxSendTimeout is how long to wait for sending a transaction.
// By default it is unbounded. If set, this is recommended to be at least 20 minutes.
TxSendTimeout time.Duration
// TxNotInMempoolTimeout is how long to wait before aborting a transaction send if the transaction does not
// make it to the mempool. If the tx is in the mempool, TxSendTimeout is used instead.
TxNotInMempoolTimeout time.Duration
// NetworkTimeout is the allowed duration for a single network request.
// This is intended to be used for network requests that can be replayed.
//
// If not set, this will default to 2 seconds.
NetworkTimeout time.Duration
// RequireQueryInterval is the interval at which the tx manager will
......
package metrics
import "github.com/ethereum/go-ethereum/core/types"
type NoopTxMetrics struct{}
func (*NoopTxMetrics) RecordL1GasFee(*types.Receipt) {}
package metrics
import (
"github.com/ethereum-optimism/optimism/op-service/metrics"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/params"
"github.com/prometheus/client_golang/prometheus"
)
type TxMetricer interface {
RecordL1GasFee(receipt *types.Receipt)
}
type TxMetrics struct {
TxL1GasFee prometheus.Gauge
}
var _ TxMetricer = (*TxMetrics)(nil)
func MakeTxMetrics(ns string, factory metrics.Factory) TxMetrics {
return TxMetrics{
TxL1GasFee: factory.NewGauge(prometheus.GaugeOpts{
Namespace: ns,
Name: "tx_fee_gwei",
Help: "L1 gas fee for transactions in GWEI",
Subsystem: "txmgr",
}),
}
}
func (t *TxMetrics) RecordL1GasFee(receipt *types.Receipt) {
t.TxL1GasFee.Set(float64(receipt.EffectiveGasPrice.Uint64() * receipt.GasUsed / params.GWei))
}
......@@ -3,6 +3,7 @@ package txmgr
import (
"strings"
"sync"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
......@@ -12,48 +13,53 @@ import (
// this context, a txn may correspond to multiple different txn hashes due to
// varying gas prices, though we treat them all as the same logical txn. This
// struct is primarily used to determine whether or not the txmgr should abort a
// given txn and retry with a higher nonce.
// given txn.
type SendState struct {
minedTxs map[common.Hash]struct{}
nonceTooLowCount uint64
mu sync.RWMutex
minedTxs map[common.Hash]struct{}
mu sync.RWMutex
now func() time.Time
safeAbortNonceTooLowCount uint64
// Config
nonceTooLowCount uint64
txInMempoolDeadline time.Time // deadline to abort at if no transactions are in the mempool
// Counts of the different types of errors
successFullPublishCount uint64 // nil error => tx made it to the mempool
safeAbortNonceTooLowCount uint64 // nonce too low error
}
// NewSendState parameterizes a new SendState from the passed
// safeAbortNonceTooLowCount.
func NewSendState(safeAbortNonceTooLowCount uint64) *SendState {
// NewSendStateWithNow creates a new send state with the provided clock.
func NewSendStateWithNow(safeAbortNonceTooLowCount uint64, unableToSendTimeout time.Duration, now func() time.Time) *SendState {
if safeAbortNonceTooLowCount == 0 {
panic("txmgr: safeAbortNonceTooLowCount cannot be zero")
}
return &SendState{
minedTxs: make(map[common.Hash]struct{}),
nonceTooLowCount: 0,
safeAbortNonceTooLowCount: safeAbortNonceTooLowCount,
txInMempoolDeadline: now().Add(unableToSendTimeout),
now: now,
}
}
// NewSendState creates a new send state
func NewSendState(safeAbortNonceTooLowCount uint64, unableToSendTimeout time.Duration) *SendState {
return NewSendStateWithNow(safeAbortNonceTooLowCount, unableToSendTimeout, time.Now)
}
// ProcessSendError should be invoked with the error returned for each
// publication. It is safe to call this method with nil or arbitrary errors.
// Currently it only acts on errors containing the ErrNonceTooLow message.
func (s *SendState) ProcessSendError(err error) {
// Nothing to do.
if err == nil {
return
}
// Only concerned with ErrNonceTooLow.
if !strings.Contains(err.Error(), core.ErrNonceTooLow.Error()) {
return
}
s.mu.Lock()
defer s.mu.Unlock()
// Record this nonce too low observation.
s.nonceTooLowCount++
// Record the type of error
switch {
case err == nil:
s.successFullPublishCount++
case strings.Contains(err.Error(), core.ErrNonceTooLow.Error()):
s.nonceTooLowCount++
}
}
// TxMined records that the txn with txnHash has been mined and is await
......@@ -85,8 +91,9 @@ func (s *SendState) TxNotMined(txHash common.Hash) {
}
// ShouldAbortImmediately returns true if the txmgr should give up on trying a
// given txn with the target nonce. For now, this only happens if we see an
// extended period of getting ErrNonceTooLow without having a txn mined.
// given txn with the target nonce.
// This occurs when the set of errors recorded indicates that no further progress can be made
// on this transaction.
func (s *SendState) ShouldAbortImmediately() bool {
s.mu.RLock()
defer s.mu.RUnlock()
......@@ -96,9 +103,14 @@ func (s *SendState) ShouldAbortImmediately() bool {
return false
}
// Only abort if we've observed enough ErrNonceTooLow to meet our safe abort
// threshold.
return s.nonceTooLowCount >= s.safeAbortNonceTooLowCount
// If we have exceeded the nonce too low count, abort
if s.nonceTooLowCount >= s.safeAbortNonceTooLowCount ||
// If we have not published a transaction in the allotted time, abort
(s.successFullPublishCount == 0 && s.now().After(s.txInMempoolDeadline)) {
return true
}
return false
}
// IsWaitingForConfirmation returns true if we have at least one confirmation on
......
......@@ -3,6 +3,7 @@ package txmgr_test
import (
"errors"
"testing"
"time"
"github.com/stretchr/testify/require"
......@@ -11,14 +12,18 @@ import (
"github.com/ethereum/go-ethereum/core"
)
const testSafeAbortNonceTooLowCount = 3
var (
testHash = common.HexToHash("0x01")
)
const testSafeAbortNonceTooLowCount = 3
func newSendState() *txmgr.SendState {
return txmgr.NewSendState(testSafeAbortNonceTooLowCount)
return newSendStateWithTimeout(time.Hour, time.Now)
}
func newSendStateWithTimeout(t time.Duration, now func() time.Time) *txmgr.SendState {
return txmgr.NewSendStateWithNow(testSafeAbortNonceTooLowCount, t, now)
}
func processNSendErrors(sendState *txmgr.SendState, err error, n int) {
......@@ -160,3 +165,27 @@ func TestSendStateIsNotWaitingForConfirmationAfterTxUnmined(t *testing.T) {
sendState.TxNotMined(testHash)
require.False(t, sendState.IsWaitingForConfirmation())
}
func stepClock(step time.Duration) func() time.Time {
i := 0
return func() time.Time {
var start time.Time
i += 1
return start.Add(time.Duration(i) * step)
}
}
// TestSendStateTimeoutAbort ensure that this will abort if it passes the tx pool timeout
// when no successful transactions have been recorded
func TestSendStateTimeoutAbort(t *testing.T) {
sendState := newSendStateWithTimeout(10*time.Millisecond, stepClock(20*time.Millisecond))
require.True(t, sendState.ShouldAbortImmediately(), "Should abort after timing out")
}
// TestSendStateNoTimeoutAbortIfPublishedTx ensure that this will not abort if there is
// a successful transaction send.
func TestSendStateNoTimeoutAbortIfPublishedTx(t *testing.T) {
sendState := newSendStateWithTimeout(10*time.Millisecond, stepClock(20*time.Millisecond))
sendState.ProcessSendError(nil)
require.False(t, sendState.ShouldAbortImmediately(), "Should not abort if published transcation successfully")
}
This diff is collapsed.
......@@ -3,6 +3,7 @@ package txmgr
import (
"context"
"errors"
"fmt"
"math/big"
"sync"
"testing"
......@@ -11,6 +12,8 @@ import (
"github.com/stretchr/testify/require"
"github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum-optimism/optimism/op-service/txmgr/metrics"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
......@@ -20,6 +23,10 @@ import (
type sendTransactionFunc func(ctx context.Context, tx *types.Transaction) error
func testSendState() *SendState {
return NewSendState(100, time.Hour)
}
// testHarness houses the necessary resources to test the SimpleTxManager.
type testHarness struct {
cfg Config
......@@ -34,7 +41,14 @@ func newTestHarnessWithConfig(t *testing.T, cfg Config) *testHarness {
g := newGasPricer(3)
backend := newMockBackend(g)
cfg.Backend = backend
mgr := NewSimpleTxManager("TEST", testlog.Logger(t, log.LvlCrit), cfg)
mgr := &SimpleTxManager{
chainID: cfg.ChainID,
name: "TEST",
cfg: cfg,
backend: cfg.Backend,
l: testlog.Logger(t, log.LvlCrit),
metr: &metrics.NoopTxMetrics{},
}
return &testHarness{
cfg: cfg,
......@@ -53,11 +67,9 @@ func newTestHarness(t *testing.T) *testHarness {
// createTxCandidate creates a mock [TxCandidate].
func (h testHarness) createTxCandidate() TxCandidate {
inbox := common.HexToAddress("0x42000000000000000000000000000000000000ff")
sender := common.HexToAddress("0xdeadbeef")
return TxCandidate{
To: inbox,
To: &inbox,
TxData: []byte{0x00, 0x01, 0x02},
From: sender,
GasLimit: uint64(1337),
}
}
......@@ -68,6 +80,7 @@ func configWithNumConfs(numConfirmations uint64) Config {
ReceiptQueryInterval: 50 * time.Millisecond,
NumConfirmations: numConfirmations,
SafeAbortNonceTooLowCount: 3,
TxNotInMempoolTimeout: 1 * time.Hour,
Signer: func(ctx context.Context, from common.Address, tx *types.Transaction) (*types.Transaction, error) {
return tx, nil
},
......@@ -530,7 +543,7 @@ func TestWaitMinedReturnsReceiptOnFirstSuccess(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
receipt, err := h.mgr.waitMined(ctx, tx, nil)
receipt, err := h.mgr.waitMined(ctx, tx, testSendState())
require.Nil(t, err)
require.NotNil(t, receipt)
require.Equal(t, receipt.TxHash, txHash)
......@@ -549,7 +562,7 @@ func TestWaitMinedCanBeCanceled(t *testing.T) {
// Create an unimined tx.
tx := types.NewTx(&types.LegacyTx{})
receipt, err := h.mgr.waitMined(ctx, tx, nil)
receipt, err := h.mgr.waitMined(ctx, tx, NewSendState(10, time.Hour))
require.Equal(t, err, context.DeadlineExceeded)
require.Nil(t, receipt)
}
......@@ -570,7 +583,7 @@ func TestWaitMinedMultipleConfs(t *testing.T) {
txHash := tx.Hash()
h.backend.mine(&txHash, new(big.Int))
receipt, err := h.mgr.waitMined(ctx, tx, nil)
receipt, err := h.mgr.waitMined(ctx, tx, NewSendState(10, time.Hour))
require.Equal(t, err, context.DeadlineExceeded)
require.Nil(t, receipt)
......@@ -579,24 +592,19 @@ func TestWaitMinedMultipleConfs(t *testing.T) {
// Mine an empty block, tx should now be confirmed.
h.backend.mine(nil, nil)
receipt, err = h.mgr.waitMined(ctx, tx, nil)
receipt, err = h.mgr.waitMined(ctx, tx, NewSendState(10, time.Hour))
require.Nil(t, err)
require.NotNil(t, receipt)
require.Equal(t, txHash, receipt.TxHash)
}
// TestManagerPanicOnZeroConfs ensures that the NewSimpleTxManager will panic
// TestManagerErrsOnZeroConfs ensures that the NewSimpleTxManager will error
// when attempting to configure with NumConfirmations set to zero.
func TestManagerPanicOnZeroConfs(t *testing.T) {
func TestManagerErrsOnZeroConfs(t *testing.T) {
t.Parallel()
defer func() {
if r := recover(); r == nil {
t.Fatal("NewSimpleTxManager should panic when using zero conf")
}
}()
_ = newTestHarnessWithConfig(t, configWithNumConfs(0))
_, err := NewSimpleTxManager("TEST", testlog.Logger(t, log.LvlCrit), &metrics.NoopTxMetrics{}, CLIConfig{})
require.Error(t, err)
}
// failingBackend implements ReceiptSource, returning a failure on the
......@@ -692,7 +700,7 @@ func TestWaitMinedReturnsReceiptAfterFailure(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
receipt, err := mgr.waitMined(ctx, tx, nil)
receipt, err := mgr.waitMined(ctx, tx, testSendState())
require.Nil(t, err)
require.NotNil(t, receipt)
require.Equal(t, receipt.TxHash, txHash)
......@@ -724,8 +732,7 @@ func doGasPriceIncrease(t *testing.T, txTipCap, txFeeCap, newTip, newBaseFee int
GasTipCap: big.NewInt(txTipCap),
GasFeeCap: big.NewInt(txFeeCap),
})
newTx, err := mgr.IncreaseGasPrice(context.Background(), tx)
require.NoError(t, err)
newTx := mgr.increaseGasPrice(context.Background(), tx)
return tx, newTx
}
......@@ -831,11 +838,32 @@ func TestIncreaseGasPriceNotExponential(t *testing.T) {
// Run IncreaseGasPrice a bunch of times in a row to simulate a very fast resubmit loop.
for i := 0; i < 20; i++ {
ctx := context.Background()
newTx, err := mgr.IncreaseGasPrice(ctx, tx)
require.NoError(t, err)
newTx := mgr.increaseGasPrice(ctx, tx)
require.True(t, newTx.GasFeeCap().Cmp(feeCap) == 0, "new tx fee cap must be equal L1")
require.True(t, newTx.GasTipCap().Cmp(borkedBackend.gasTip) == 0, "new tx tip must be equal L1")
tx = newTx
}
}
func TestErrStringMatch(t *testing.T) {
tests := []struct {
err error
target error
match bool
}{
{err: nil, target: nil, match: true},
{err: errors.New("exists"), target: nil, match: false},
{err: nil, target: errors.New("exists"), match: false},
{err: errors.New("exact match"), target: errors.New("exact match"), match: true},
{err: errors.New("partial: match"), target: errors.New("match"), match: true},
}
for i, test := range tests {
i := i
test := test
t.Run(fmt.Sprint(i), func(t *testing.T) {
require.Equal(t, test.match, errStringMatch(test.err, test.target))
})
}
}
......@@ -123,7 +123,6 @@ services:
OP_BATCHER_L1_ETH_RPC: http://l1:8545
OP_BATCHER_L2_ETH_RPC: http://l2:8545
OP_BATCHER_ROLLUP_RPC: http://op-node:8545
TX_MANAGER_TIMEOUT: 10m
OFFLINE_GAS_ESTIMATION: false
OP_BATCHER_MAX_CHANNEL_DURATION: 1
OP_BATCHER_MAX_L1_TX_SIZE_BYTES: 120000
......
......@@ -418,7 +418,7 @@ SequencerFeeVault_Test:test_withdraw_succeeds() (gas: 163228)
SetPrevBaseFee_Test:test_setPrevBaseFee_succeeds() (gas: 11515)
StandardBridge_Stateless_Test:test_isCorrectTokenPair_succeeds() (gas: 49936)
StandardBridge_Stateless_Test:test_isOptimismMintableERC20_succeeds() (gas: 33072)
SystemConfig_Initialize_TestFail:test_initialize_lowGasLimit_reverts() (gas: 148858)
SystemConfig_Initialize_TestFail:test_initialize_lowGasLimit_reverts() (gas: 148848)
SystemConfig_Setters_TestFail:test_setBatcherHash_notOwner_reverts() (gas: 10546)
SystemConfig_Setters_TestFail:test_setGasConfig_notOwner_reverts() (gas: 10622)
SystemConfig_Setters_TestFail:test_setGasLimit_notOwner_reverts() (gas: 10615)
......@@ -427,6 +427,6 @@ SystemConfig_Setters_TestFail:test_setResourceConfig_badPrecision_reverts() (gas
SystemConfig_Setters_TestFail:test_setResourceConfig_lowGasLimit_reverts() (gas: 16082)
SystemConfig_Setters_TestFail:test_setResourceConfig_notOwner_reverts() (gas: 11790)
SystemConfig_Setters_TestFail:test_setResourceConfig_zeroDenominator_reverts() (gas: 13039)
SystemConfig_Setters_TestFail:test_setUnsafeBlockSigner_notOwner_reverts() (gas: 10639)
SystemConfig_Setters_TestFail:test_setUnsafeBlockSigner_notOwner_reverts() (gas: 10616)
TransferOnionTest:test_constructor_succeeds() (gas: 564855)
TransferOnionTest:test_unwrap_succeeds() (gas: 724958)
\ No newline at end of file
......@@ -92,8 +92,10 @@ yarn echidna:aliasing
#### Configuration
1. Create or modify a file `<network-name>.json` inside of the [`deploy-config`](./deploy-config/) folder.
1. Create or modify a file `<network-name>.ts` inside of the [`deploy-config`](./deploy-config/) folder.
2. Fill out this file according to the `deployConfigSpec` located inside of the [`hardhat.config.ts](./hardhat.config.ts)
3. Optionally: Run `npx hardhat generate-deploy-config --network <network-name>` to generate the associated JSON
file. This is required if using `op-chain-ops`.
#### Execution
......
......@@ -42,13 +42,6 @@ contract SystemConfig is OwnableUpgradeable, Semver {
*/
bytes32 public constant UNSAFE_BLOCK_SIGNER_SLOT = keccak256("systemconfig.unsafeblocksigner");
/**
* @notice Minimum gas limit. This should not be lower than the maximum deposit gas resource
* limit in the ResourceMetering contract used by OptimismPortal, to ensure the L2
* block always has sufficient gas to process deposits.
*/
uint64 public constant MINIMUM_GAS_LIMIT = 8_000_000;
/**
* @notice Fixed L2 gas overhead. Used as part of the L2 fee calculation.
*/
......@@ -87,7 +80,7 @@ contract SystemConfig is OwnableUpgradeable, Semver {
event ConfigUpdate(uint256 indexed version, UpdateType indexed updateType, bytes data);
/**
* @custom:semver 1.1.0
* @custom:semver 1.2.0
*
* @param _owner Initial owner of the contract.
* @param _overhead Initial overhead value.
......@@ -105,7 +98,7 @@ contract SystemConfig is OwnableUpgradeable, Semver {
uint64 _gasLimit,
address _unsafeBlockSigner,
ResourceMetering.ResourceConfig memory _config
) Semver(1, 1, 0) {
) Semver(1, 2, 0) {
initialize({
_owner: _owner,
_overhead: _overhead,
......
......@@ -12,9 +12,9 @@ import { FeeVault } from "../universal/FeeVault.sol";
*/
contract BaseFeeVault is FeeVault, Semver {
/**
* @custom:semver 1.0.0
* @custom:semver 1.1.0
*
* @param _recipient Address that will receive the accumulated fees.
*/
constructor(address _recipient) FeeVault(_recipient, 10 ether) Semver(1, 0, 0) {}
constructor(address _recipient) FeeVault(_recipient, 10 ether) Semver(1, 1, 0) {}
}
......@@ -12,9 +12,9 @@ import { FeeVault } from "../universal/FeeVault.sol";
*/
contract L1FeeVault is FeeVault, Semver {
/**
* @custom:semver 1.0.0
* @custom:semver 1.1.0
*
* @param _recipient Address that will receive the accumulated fees.
*/
constructor(address _recipient) FeeVault(_recipient, 10 ether) Semver(1, 0, 0) {}
constructor(address _recipient) FeeVault(_recipient, 10 ether) Semver(1, 1, 0) {}
}
......@@ -20,7 +20,7 @@ import { Semver } from "../universal/Semver.sol";
*/
contract L2ERC721Bridge is ERC721Bridge, Semver {
/**
* @custom:semver 1.0.0
* @custom:semver 1.1.0
*
* @param _messenger Address of the CrossDomainMessenger on this network.
* @param _otherBridge Address of the ERC721 bridge on the other network.
......
......@@ -13,11 +13,11 @@ import { FeeVault } from "../universal/FeeVault.sol";
*/
contract SequencerFeeVault is FeeVault, Semver {
/**
* @custom:semver 1.0.0
* @custom:semver 1.1.0
*
* @param _recipient Address that will receive the accumulated fees.
*/
constructor(address _recipient) FeeVault(_recipient, 10 ether) Semver(1, 0, 0) {}
constructor(address _recipient) FeeVault(_recipient, 10 ether) Semver(1, 1, 0) {}
/**
* @custom:legacy
......
......@@ -176,7 +176,7 @@ contract SystemConfig_Setters_Test is SystemConfig_Init {
}
function testFuzz_setGasLimit_succeeds(uint64 newGasLimit) external {
uint64 minimumGasLimit = sysConf.MINIMUM_GAS_LIMIT();
uint64 minimumGasLimit = sysConf.minimumGasLimit();
newGasLimit = uint64(
bound(uint256(newGasLimit), uint256(minimumGasLimit), uint256(type(uint64).max))
);
......
......@@ -42,6 +42,6 @@ contract SystemConfig_GasLimitLowerBound_Invariant is Test {
* than the hard-coded lower bound.
*/
function invariant_gasLimitLowerBound() external {
assertTrue(config.gasLimit() >= config.MINIMUM_GAS_LIMIT());
assertTrue(config.gasLimit() >= config.minimumGasLimit());
}
}
{
"finalSystemOwner": "0x9BA6e03D8B90dE867373Db8cF1A58d2F7F006b3A",
"controller": "0x78339d822c23d943e4a2d4c3dd5408f66e6d662d",
"portalGuardian": "0x78339d822c23d943e4a2d4c3dd5408f66e6d662d",
"l1StartingBlockTag": "0x126e52a0cc0ae18948f567ee9443f4a8f0db67c437706e35baee424eb314a0d0",
"l1ChainID": 1,
"l2ChainID": 10,
"l2BlockTime": 2,
"maxSequencerDrift": 600,
"sequencerWindowSize": 3600,
"channelTimeout": 300,
"p2pSequencerAddress": "0x15d34AAf54267DB7D7c367839AAf71A00a2C6A65",
"batchInboxAddress": "0xff00000000000000000000000000000000000010",
"batchSenderAddress": "0x70997970C51812dc3A010C7d01b50e0d17dc79C8",
"l2OutputOracleSubmissionInterval": 20,
"l2OutputOracleStartingTimestamp": 1679069195,
"l2OutputOracleStartingBlockNumber": 79149704,
"l2OutputOracleProposer": "0x3C44CdDdB6a900fa2b585dd299e03d12FA4293BC",
"l2OutputOracleChallenger": "0x3C44CdDdB6a900fa2b585dd299e03d12FA4293BC",
"finalizationPeriodSeconds": 2,
"proxyAdminOwner": "0x90F79bf6EB2c4f870365E785982E1f101E93b906",
"baseFeeVaultRecipient": "0x90F79bf6EB2c4f870365E785982E1f101E93b906",
"l1FeeVaultRecipient": "0x90F79bf6EB2c4f870365E785982E1f101E93b906",
"sequencerFeeVaultRecipient": "0x90F79bf6EB2c4f870365E785982E1f101E93b906",
"governanceTokenName": "Optimism",
"governanceTokenSymbol": "OP",
"governanceTokenOwner": "0x90F79bf6EB2c4f870365E785982E1f101E93b906",
"l2GenesisBlockGasLimit": "0x17D7840",
"l2GenesisBlockCoinbase": "0x4200000000000000000000000000000000000011",
"l2GenesisBlockBaseFeePerGas": "0x3b9aca00",
"gasPriceOracleOverhead": 2100,
"gasPriceOracleScalar": 1000000,
"eip1559Denominator": 50,
"eip1559Elasticity": 10,
"l2GenesisRegolithTimeOffset": "0x0"
}
\ No newline at end of file
import { DeployConfig } from '../src/deploy-config'
// NOTE: The 'mainnet' network is currently being used for bedrock migration rehearsals.
// The system configured below is not yet live on mainnet, and many of the addresses used are
// unsafe for a production system.
const config: DeployConfig = {
finalSystemOwner: '0x9BA6e03D8B90dE867373Db8cF1A58d2F7F006b3A',
controller: '0x78339d822c23d943e4a2d4c3dd5408f66e6d662d',
portalGuardian: '0x78339d822c23d943e4a2d4c3dd5408f66e6d662d',
l1StartingBlockTag:
'0x126e52a0cc0ae18948f567ee9443f4a8f0db67c437706e35baee424eb314a0d0',
l1ChainID: 1,
l2ChainID: 10,
l2BlockTime: 2,
maxSequencerDrift: 600,
sequencerWindowSize: 3600,
channelTimeout: 300,
p2pSequencerAddress: '0x15d34AAf54267DB7D7c367839AAf71A00a2C6A65',
batchInboxAddress: '0xff00000000000000000000000000000000000010',
batchSenderAddress: '0x70997970C51812dc3A010C7d01b50e0d17dc79C8',
l2OutputOracleSubmissionInterval: 20,
l2OutputOracleStartingTimestamp: 1679069195,
l2OutputOracleStartingBlockNumber: 79149704,
l2OutputOracleProposer: '0x3C44CdDdB6a900fa2b585dd299e03d12FA4293BC',
l2OutputOracleChallenger: '0x3C44CdDdB6a900fa2b585dd299e03d12FA4293BC',
finalizationPeriodSeconds: 2,
proxyAdminOwner: '0x90F79bf6EB2c4f870365E785982E1f101E93b906',
baseFeeVaultRecipient: '0x90F79bf6EB2c4f870365E785982E1f101E93b906',
l1FeeVaultRecipient: '0x90F79bf6EB2c4f870365E785982E1f101E93b906',
sequencerFeeVaultRecipient: '0x90F79bf6EB2c4f870365E785982E1f101E93b906',
governanceTokenName: 'Optimism',
governanceTokenSymbol: 'OP',
governanceTokenOwner: '0x90F79bf6EB2c4f870365E785982E1f101E93b906',
l2GenesisBlockGasLimit: '0x17D7840',
l2GenesisBlockCoinbase: '0x4200000000000000000000000000000000000011',
l2GenesisBlockBaseFeePerGas: '0x3b9aca00',
gasPriceOracleOverhead: 2100,
gasPriceOracleScalar: 1000000,
eip1559Denominator: 50,
eip1559Elasticity: 10,
l2GenesisRegolithTimeOffset: '0x0',
}
export default config
......@@ -17,6 +17,6 @@ const deployFn: DeployFunction = async (hre) => {
})
}
deployFn.tags = ['ProxyAdmin', 'setup']
deployFn.tags = ['ProxyAdmin', 'setup', 'l1']
export default deployFn
......@@ -17,6 +17,6 @@ const deployFn: DeployFunction = async (hre) => {
})
}
deployFn.tags = ['AddressManager', 'setup']
deployFn.tags = ['AddressManager', 'setup', 'l1']
export default deployFn
......@@ -16,6 +16,6 @@ const deployFn: DeployFunction = async (hre) => {
})
}
deployFn.tags = ['L1StandardBridgeProxy', 'setup']
deployFn.tags = ['L1StandardBridgeProxy', 'setup', 'l1']
export default deployFn
......@@ -20,6 +20,6 @@ const deployFn: DeployFunction = async (hre) => {
})
}
deployFn.tags = ['L2OutputOracleProxy', 'setup']
deployFn.tags = ['L2OutputOracleProxy', 'setup', 'l1']
export default deployFn
......@@ -13,6 +13,6 @@ const deployFn: DeployFunction = async (hre) => {
})
}
deployFn.tags = ['L1CrossDomainMessengerProxy', 'setup']
deployFn.tags = ['L1CrossDomainMessengerProxy', 'setup', 'l1']
export default deployFn
......@@ -20,6 +20,6 @@ const deployFn: DeployFunction = async (hre) => {
})
}
deployFn.tags = ['OptimismPortalProxy', 'setup']
deployFn.tags = ['OptimismPortalProxy', 'setup', 'l1']
export default deployFn
......@@ -20,6 +20,6 @@ const deployFn: DeployFunction = async (hre) => {
})
}
deployFn.tags = ['OptimismMintableERC20FactoryProxy', 'setup']
deployFn.tags = ['OptimismMintableERC20FactoryProxy', 'setup', 'l1']
export default deployFn
......@@ -16,6 +16,6 @@ const deployFn: DeployFunction = async (hre) => {
})
}
deployFn.tags = ['L1ERC721BridgeProxy', 'setup']
deployFn.tags = ['L1ERC721BridgeProxy', 'setup', 'l1']
export default deployFn
......@@ -20,6 +20,6 @@ const deployFn: DeployFunction = async (hre) => {
})
}
deployFn.tags = ['SystemConfigProxy', 'setup']
deployFn.tags = ['SystemConfigProxy', 'setup', 'l1']
export default deployFn
......@@ -16,6 +16,6 @@ const deployFn: DeployFunction = async (hre) => {
})
}
deployFn.tags = ['SystemDictatorProxy', 'setup']
deployFn.tags = ['SystemDictatorProxy', 'setup', 'l1']
export default deployFn
......@@ -26,6 +26,6 @@ const deployFn: DeployFunction = async (hre) => {
})
}
deployFn.tags = ['L1CrossDomainMessengerImpl', 'setup']
deployFn.tags = ['L1CrossDomainMessengerImpl', 'setup', 'l1']
export default deployFn
......@@ -32,6 +32,6 @@ const deployFn: DeployFunction = async (hre) => {
})
}
deployFn.tags = ['L1StandardBridgeImpl', 'setup']
deployFn.tags = ['L1StandardBridgeImpl', 'setup', 'l1']
export default deployFn
......@@ -60,6 +60,6 @@ const deployFn: DeployFunction = async (hre) => {
})
}
deployFn.tags = ['L2OutputOracleImpl', 'setup']
deployFn.tags = ['L2OutputOracleImpl', 'setup', 'l1']
export default deployFn
......@@ -67,6 +67,6 @@ const deployFn: DeployFunction = async (hre) => {
})
}
deployFn.tags = ['OptimismPortalImpl', 'setup']
deployFn.tags = ['OptimismPortalImpl', 'setup', 'l1']
export default deployFn
......@@ -26,6 +26,6 @@ const deployFn: DeployFunction = async (hre) => {
})
}
deployFn.tags = ['OptimismMintableERC20FactoryImpl', 'setup']
deployFn.tags = ['OptimismMintableERC20FactoryImpl', 'setup', 'l1']
export default deployFn
......@@ -27,6 +27,6 @@ const deployFn: DeployFunction = async (hre) => {
})
}
deployFn.tags = ['L1ERC721BridgeImpl', 'setup']
deployFn.tags = ['L1ERC721BridgeImpl', 'setup', 'l1']
export default deployFn
......@@ -26,6 +26,6 @@ const deployFn: DeployFunction = async (hre) => {
})
}
deployFn.tags = ['PortalSenderImpl', 'setup']
deployFn.tags = ['PortalSenderImpl', 'setup', 'l1']
export default deployFn
......@@ -66,6 +66,6 @@ const deployFn: DeployFunction = async (hre) => {
})
}
deployFn.tags = ['SystemConfigImpl', 'setup']
deployFn.tags = ['SystemConfigImpl', 'setup', 'l1']
export default deployFn
......@@ -12,6 +12,6 @@ const deployFn: DeployFunction = async (hre) => {
})
}
deployFn.tags = ['SystemDictatorImpl', 'setup']
deployFn.tags = ['SystemDictatorImpl', 'setup', 'l1']
export default deployFn
......@@ -200,6 +200,6 @@ const deployFn: DeployFunction = async (hre) => {
}
}
deployFn.tags = ['SystemDictatorImpl', 'setup']
deployFn.tags = ['SystemDictatorImpl', 'setup', 'l1']
export default deployFn
......@@ -279,14 +279,14 @@ const deployFn: DeployFunction = async (hre) => {
need to restart the system, run exit1() followed by finalize().
`,
checks: async () => {
assert(
(await AddressManager.getAddress('OVM_L1CrossDomainMessenger')) ===
ethers.constants.AddressZero
const messenger = await AddressManager.getAddress(
'OVM_L1CrossDomainMessenger'
)
assert(messenger === ethers.constants.AddressZero)
},
})
}
deployFn.tags = ['SystemDictatorSteps', 'phase1']
deployFn.tags = ['SystemDictatorSteps', 'phase1', 'l1']
export default deployFn
......@@ -116,10 +116,8 @@ const deployFn: DeployFunction = async (hre) => {
'BondManager',
]
for (const dead of deads) {
assert(
(await AddressManager.getAddress(dead)) ===
ethers.constants.AddressZero
)
const addr = await AddressManager.getAddress(dead)
assert(addr === ethers.constants.AddressZero)
}
},
})
......@@ -372,6 +370,6 @@ const deployFn: DeployFunction = async (hre) => {
}
}
deployFn.tags = ['SystemDictatorSteps', 'phase2']
deployFn.tags = ['SystemDictatorSteps', 'phase2', 'l1']
export default deployFn
import { DeployFunction } from 'hardhat-deploy/dist/types'
import { ethers } from 'ethers'
import '@eth-optimism/hardhat-deploy-config'
import '@nomiclabs/hardhat-ethers'
import { assertContractVariable, deploy } from '../src/deploy-utils'
const deployFn: DeployFunction = async (hre) => {
await deploy({
hre,
name: 'L1Block',
args: [],
postDeployAction: async (contract) => {
await assertContractVariable(
contract,
'DEPOSITOR_ACCOUNT',
ethers.utils.getAddress('0xDeaDDEaDDeAdDeAdDEAdDEaddeAddEAdDEAd0001')
)
},
})
}
deployFn.tags = ['L1BlockImpl', 'l2']
export default deployFn
import { DeployFunction } from 'hardhat-deploy/dist/types'
import { ethers } from 'ethers'
import '@eth-optimism/hardhat-deploy-config'
import '@nomiclabs/hardhat-ethers'
import { assertContractVariable, deploy } from '../src/deploy-utils'
const deployFn: DeployFunction = async (hre) => {
const Artifact__L1CrossDomainMessenger = await hre.companionNetworks[
'l1'
].deployments.get('L1CrossDomainMessengerProxy')
await deploy({
hre,
name: 'L2CrossDomainMessenger',
args: [Artifact__L1CrossDomainMessenger.address],
postDeployAction: async (contract) => {
await assertContractVariable(
contract,
'OTHER_MESSENGER',
ethers.utils.getAddress(Artifact__L1CrossDomainMessenger.address)
)
},
})
}
deployFn.tags = ['L2CrossDomainMessengerImpl', 'l2']
export default deployFn
import { DeployFunction } from 'hardhat-deploy/dist/types'
import { ethers } from 'ethers'
import '@eth-optimism/hardhat-deploy-config'
import '@nomiclabs/hardhat-ethers'
import { assertContractVariable, deploy } from '../src/deploy-utils'
const deployFn: DeployFunction = async (hre) => {
const Artifact__L1StandardBridge = await hre.companionNetworks[
'l1'
].deployments.get('L1StandardBridgeProxy')
await deploy({
hre,
name: 'L2StandardBridge',
args: [Artifact__L1StandardBridge.address],
postDeployAction: async (contract) => {
await assertContractVariable(
contract,
'OTHER_BRIDGE',
ethers.utils.getAddress(Artifact__L1StandardBridge.address)
)
},
})
}
deployFn.tags = ['L2StandardBridgeImpl', 'l2']
export default deployFn
import { DeployFunction } from 'hardhat-deploy/dist/types'
import '@eth-optimism/hardhat-deploy-config'
import '@nomiclabs/hardhat-ethers'
import { assertContractVariable, deploy } from '../src/deploy-utils'
const deployFn: DeployFunction = async (hre) => {
await deploy({
hre,
name: 'L2ToL1MessagePasser',
args: [],
postDeployAction: async (contract) => {
await assertContractVariable(contract, 'MESSAGE_VERSION', 1)
},
})
}
deployFn.tags = ['L2ToL1MessagePasserImpl', 'l2']
export default deployFn
import { DeployFunction } from 'hardhat-deploy/dist/types'
import '@eth-optimism/hardhat-deploy-config'
import '@nomiclabs/hardhat-ethers'
import { ethers } from 'ethers'
import { predeploys } from '../src/constants'
import { assertContractVariable, deploy } from '../src/deploy-utils'
const deployFn: DeployFunction = async (hre) => {
const Artifact__L1ERC721Bridge = await hre.companionNetworks[
'l1'
].deployments.get('L1ERC721BridgeProxy')
await deploy({
hre,
name: 'L2ERC721Bridge',
args: [predeploys.L2CrossDomainMessenger, Artifact__L1ERC721Bridge.address],
postDeployAction: async (contract) => {
await assertContractVariable(
contract,
'MESSENGER',
ethers.utils.getAddress(predeploys.L2CrossDomainMessenger)
)
await assertContractVariable(
contract,
'OTHER_BRIDGE',
ethers.utils.getAddress(Artifact__L1ERC721Bridge.address)
)
},
})
}
deployFn.tags = ['L2ERC721BridgeImpl', 'l2']
export default deployFn
import { DeployFunction } from 'hardhat-deploy/dist/types'
import '@eth-optimism/hardhat-deploy-config'
import '@nomiclabs/hardhat-ethers'
import { assertContractVariable, deploy } from '../src/deploy-utils'
const deployFn: DeployFunction = async (hre) => {
await deploy({
hre,
name: 'GasPriceOracle',
args: [],
postDeployAction: async (contract) => {
await assertContractVariable(contract, 'DECIMALS', 6)
},
})
}
deployFn.tags = ['GasPriceOracle', 'l2']
export default deployFn
import { DeployFunction } from 'hardhat-deploy/dist/types'
import '@eth-optimism/hardhat-deploy-config'
import '@nomiclabs/hardhat-ethers'
import { ethers } from 'ethers'
import { assertContractVariable, deploy } from '../src/deploy-utils'
const deployFn: DeployFunction = async (hre) => {
const l1 = hre.network.companionNetworks['l1']
const deployConfig = hre.getDeployConfig(l1)
const sequencerFeeVaultRecipient = deployConfig.sequencerFeeVaultRecipient
if (sequencerFeeVaultRecipient === ethers.constants.AddressZero) {
throw new Error(`SequencerFeeVault RECIPIENT undefined`)
}
await deploy({
hre,
name: 'SequencerFeeVault',
args: [sequencerFeeVaultRecipient],
postDeployAction: async (contract) => {
await assertContractVariable(
contract,
'RECIPIENT',
ethers.utils.getAddress(sequencerFeeVaultRecipient)
)
},
})
}
deployFn.tags = ['SequencerFeeVaultImpl', 'l2']
export default deployFn
import { DeployFunction } from 'hardhat-deploy/dist/types'
import '@eth-optimism/hardhat-deploy-config'
import '@nomiclabs/hardhat-ethers'
import { ethers } from 'ethers'
import { assertContractVariable, deploy } from '../src/deploy-utils'
const deployFn: DeployFunction = async (hre) => {
const l1 = hre.network.companionNetworks['l1']
const deployConfig = hre.getDeployConfig(l1)
const baseFeeVaultRecipient = deployConfig.baseFeeVaultRecipient
if (baseFeeVaultRecipient === ethers.constants.AddressZero) {
throw new Error('BaseFeeVault RECIPIENT undefined')
}
await deploy({
hre,
name: 'BaseFeeVault',
args: [baseFeeVaultRecipient],
postDeployAction: async (contract) => {
await assertContractVariable(
contract,
'RECIPIENT',
ethers.utils.getAddress(baseFeeVaultRecipient)
)
},
})
}
deployFn.tags = ['BaseFeeVaultImpl', 'l2']
export default deployFn
import { DeployFunction } from 'hardhat-deploy/dist/types'
import '@eth-optimism/hardhat-deploy-config'
import '@nomiclabs/hardhat-ethers'
import { ethers } from 'ethers'
import { assertContractVariable, deploy } from '../src/deploy-utils'
const deployFn: DeployFunction = async (hre) => {
const l1 = hre.network.companionNetworks['l1']
const deployConfig = hre.getDeployConfig(l1)
const l1FeeVaultRecipient = deployConfig.l1FeeVaultRecipient
if (l1FeeVaultRecipient === ethers.constants.AddressZero) {
throw new Error('L1FeeVault RECIPIENT undefined')
}
await deploy({
hre,
name: 'L1FeeVault',
args: [l1FeeVaultRecipient],
postDeployAction: async (contract) => {
await assertContractVariable(
contract,
'RECIPIENT',
ethers.utils.getAddress(l1FeeVaultRecipient)
)
},
})
}
deployFn.tags = ['L1FeeVaultImpl', 'l2']
export default deployFn
import { DeployFunction } from 'hardhat-deploy/dist/types'
import { ethers } from 'ethers'
import '@eth-optimism/hardhat-deploy-config'
import '@nomiclabs/hardhat-ethers'
import { assertContractVariable, deploy } from '../src/deploy-utils'
import { predeploys } from '../src/constants'
const deployFn: DeployFunction = async (hre) => {
await deploy({
hre,
name: 'OptimismMintableERC20Factory',
args: [predeploys.L2StandardBridge],
postDeployAction: async (contract) => {
await assertContractVariable(
contract,
'BRIDGE',
ethers.utils.getAddress(predeploys.L2StandardBridge)
)
},
})
}
deployFn.tags = ['OptimismMintableERC20FactoryImpl', 'l2']
export default deployFn
import { DeployFunction } from 'hardhat-deploy/dist/types'
import { ethers } from 'ethers'
import '@eth-optimism/hardhat-deploy-config'
import '@nomiclabs/hardhat-ethers'
import { assertContractVariable, deploy } from '../src/deploy-utils'
import { predeploys } from '../src/constants'
const deployFn: DeployFunction = async (hre) => {
const OptimismMintableERC721Factory = await hre.ethers.getContractAt(
'OptimismMintableERC721Factory',
predeploys.OptimismMintableERC721Factory
)
const remoteChainId = await OptimismMintableERC721Factory.REMOTE_CHAIN_ID()
await deploy({
hre,
name: 'OptimismMintableERC721Factory',
args: [predeploys.L2StandardBridge, remoteChainId],
postDeployAction: async (contract) => {
await assertContractVariable(
contract,
'BRIDGE',
ethers.utils.getAddress(predeploys.L2StandardBridge)
)
await assertContractVariable(contract, 'REMOTE_CHAIN_ID', remoteChainId)
},
})
}
deployFn.tags = ['OptimismMintableERC721FactoryImpl', 'l2']
export default deployFn
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment