Commit 60d9e4f6 authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge branch 'develop' into aj/fpp-l2-fetcher

parents eff12946 b9f8f3ce
---
'@eth-optimism/hardhat-deploy-config': patch
---
Add getter for other network's deploy config
......@@ -333,12 +333,6 @@ jobs:
command: |
yarn lint:check || echo "export LINT_STATUS=1" >> "$BASH_ENV"
working_directory: packages/contracts-bedrock
- run:
name: slither
command: |
slither --version
yarn slither || exit 0
working_directory: packages/contracts-bedrock
- run:
name: gas snapshot
command: |
......@@ -382,6 +376,26 @@ jobs:
exit 1
fi
contracts-bedrock-slither:
docker:
- image: ethereumoptimism/ci-builder:latest
resource_class: xlarge
steps:
- checkout
- attach_workspace: { at: "." }
- restore_cache:
name: Restore Yarn Package Cache
keys:
- yarn-packages-v2-{{ checksum "yarn.lock" }}
- check-changed:
patterns: contracts-bedrock,hardhat-deploy-config
- run:
name: slither
command: |
slither --version
yarn slither
working_directory: packages/contracts-bedrock
contracts-bedrock-validate-spaces:
docker:
- image: ethereumoptimism/ci-builder:latest
......@@ -995,6 +1009,9 @@ workflows:
- contracts-bedrock-checks:
requires:
- yarn-monorepo
- contracts-bedrock-slither:
requires:
- yarn-monorepo
- contracts-bedrock-validate-spaces:
requires:
- yarn-monorepo
......
......@@ -75,6 +75,7 @@ jobs:
uses: changesets/action@v1
id: changesets
with:
createGithubReleases: false
publish: yarn changeset publish --tag canary
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
......
......@@ -67,6 +67,7 @@ jobs:
uses: changesets/action@v1
id: changesets
with:
createGithubReleases: false
publish: yarn release
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
......
......@@ -16,6 +16,7 @@ require (
github.com/google/gofuzz v1.2.1-0.20220503160820-4a35382e8fc8
github.com/hashicorp/go-multierror v1.1.1
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d
github.com/hashicorp/golang-lru/v2 v2.0.1
github.com/holiman/uint256 v1.2.0
github.com/ipfs/go-datastore v0.6.0
github.com/ipfs/go-ds-leveldb v0.5.0
......@@ -86,7 +87,6 @@ require (
github.com/graph-gophers/graphql-go v1.3.0 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-bexpr v0.1.11 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.1 // indirect
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
github.com/huin/goupnp v1.1.0 // indirect
github.com/influxdata/influxdb v1.8.3 // indirect
......
......@@ -69,18 +69,17 @@ func NewBatchSubmitterFromCLIConfig(cfg CLIConfig, l log.Logger, m metrics.Metri
return nil, fmt.Errorf("querying rollup config: %w", err)
}
txManagerConfig, err := txmgr.NewConfig(cfg.TxMgrConfig, l)
txManager, err := txmgr.NewSimpleTxManager("batcher", l, m, cfg.TxMgrConfig)
if err != nil {
return nil, err
}
txManager := txmgr.NewSimpleTxManager("batcher", l, m, txManagerConfig)
batcherCfg := Config{
L1Client: l1Client,
L2Client: l2Client,
RollupNode: rollupClient,
PollInterval: cfg.PollInterval,
NetworkTimeout: txManagerConfig.NetworkTimeout,
NetworkTimeout: cfg.TxMgrConfig.NetworkTimeout,
TxManager: txManager,
Rollup: rcfg,
Channel: ChannelConfig{
......@@ -356,9 +355,8 @@ func (l *BatchSubmitter) sendTransaction(ctx context.Context, data []byte) (*typ
// Send the transaction through the txmgr
if receipt, err := l.txMgr.Send(ctx, txmgr.TxCandidate{
To: l.Rollup.BatchInboxAddress,
To: &l.Rollup.BatchInboxAddress,
TxData: data,
From: l.txMgr.From(),
GasLimit: intrinsicGas,
}); err != nil {
l.log.Warn("unable to publish tx", "err", err, "data_size", len(data))
......
This diff is collapsed.
package main
import (
"context"
"errors"
"fmt"
"math/big"
"os"
"sync"
"time"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
legacy_bindings "github.com/ethereum-optimism/optimism/op-bindings/legacy-bindings"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum-optimism/optimism/op-chain-ops/util"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rpc"
"github.com/urfave/cli/v2"
)
func main() {
app := cli.NewApp()
app.Name = "rollover"
app.Usage = "Commands for assisting in the rollover of the system"
var flags []cli.Flag
flags = append(flags, util.ClientsFlags...)
flags = append(flags, util.AddressesFlags...)
app.Commands = []*cli.Command{
{
Name: "deposits",
Usage: "Ensures that all deposits have been ingested into L2",
Flags: flags,
Action: func(cliCtx *cli.Context) error {
clients, err := util.NewClients(cliCtx)
if err != nil {
return err
}
addresses, err := util.NewAddresses(cliCtx)
if err != nil {
return err
}
log.Info("Connecting to AddressManager", "address", addresses.AddressManager)
addressManager, err := bindings.NewAddressManager(addresses.AddressManager, clients.L1Client)
if err != nil {
return err
}
for {
shutoffBlock, err := addressManager.GetAddress(&bind.CallOpts{}, "DTL_SHUTOFF_BLOCK")
if err != nil {
return err
}
if num := shutoffBlock.Big(); num.Cmp(common.Big0) != 0 {
log.Info("DTL_SHUTOFF_BLOCK is set", "number", num.Uint64())
break
}
log.Info("DTL_SHUTOFF_BLOCK not set yet")
time.Sleep(3 * time.Second)
}
log.Info("Connecting to CanonicalTransactionChain", "address", addresses.CanonicalTransactionChain)
ctc, err := legacy_bindings.NewCanonicalTransactionChain(addresses.CanonicalTransactionChain, clients.L1Client)
if err != nil {
return err
}
queueLength, err := ctc.GetQueueLength(&bind.CallOpts{})
if err != nil {
return err
}
totalElements, err := ctc.GetTotalElements(&bind.CallOpts{})
if err != nil {
return err
}
totalBatches, err := ctc.GetTotalBatches(&bind.CallOpts{})
if err != nil {
return err
}
pending, err := ctc.GetNumPendingQueueElements(&bind.CallOpts{})
if err != nil {
return err
}
log.Info(
"CanonicalTransactionChain",
"address", addresses.CanonicalTransactionChain,
"queue-length", queueLength,
"total-elements", totalElements,
"total-batches", totalBatches,
"pending", pending,
)
blockNumber, err := clients.L2Client.BlockNumber(context.Background())
if err != nil {
return err
}
log.Info("Searching backwards for final deposit", "start", blockNumber)
for {
bn := new(big.Int).SetUint64(blockNumber)
log.Info("Checking L2 block", "number", bn)
block, err := clients.L2Client.BlockByNumber(context.Background(), bn)
if err != nil {
return err
}
if length := len(block.Transactions()); length != 1 {
return fmt.Errorf("unexpected number of transactions in block: %d", length)
}
tx := block.Transactions()[0]
hash := tx.Hash()
json, err := legacyTransactionByHash(clients.L2RpcClient, hash)
if err != nil {
return err
}
if json.QueueOrigin == "l1" {
if json.QueueIndex == nil {
// This should never happen
return errors.New("queue index is nil")
}
queueIndex := uint64(*json.QueueIndex)
if queueIndex == queueLength.Uint64()-1 {
log.Info("Found final deposit in l2geth", "queue-index", queueIndex)
break
}
if queueIndex < queueLength.Uint64() {
return errors.New("missed final deposit")
}
}
blockNumber--
}
finalPending, err := ctc.GetNumPendingQueueElements(&bind.CallOpts{})
if err != nil {
return err
}
log.Info("Remaining deposits that must be submitted", "count", finalPending)
return nil
},
},
{
Name: "batches",
Usage: "Ensures that all batches have been submitted to L1",
Flags: flags,
Action: func(cliCtx *cli.Context) error {
clients, err := util.NewClients(cliCtx)
if err != nil {
return err
}
addresses, err := util.NewAddresses(cliCtx)
if err != nil {
return err
}
log.Info("Connecting to CanonicalTransactionChain", "address", addresses.CanonicalTransactionChain)
ctc, err := legacy_bindings.NewCanonicalTransactionChain(addresses.CanonicalTransactionChain, clients.L1Client)
if err != nil {
return err
}
log.Info("Connecting to StateCommitmentChain", "address", addresses.StateCommitmentChain)
scc, err := legacy_bindings.NewStateCommitmentChain(addresses.StateCommitmentChain, clients.L1Client)
if err != nil {
return err
}
var wg sync.WaitGroup
log.Info("Waiting for CanonicalTransactionChain")
wg.Add(1)
go waitForTotalElements(&wg, ctc, clients.L2Client)
log.Info("Waiting for StateCommitmentChain")
wg.Add(1)
go waitForTotalElements(&wg, scc, clients.L2Client)
wg.Wait()
log.Info("All batches have been submitted")
return nil
},
},
}
if err := app.Run(os.Args); err != nil {
log.Crit("Application failed", "message", err)
}
}
// RollupContract represents a legacy rollup contract interface that
// exposes the GetTotalElements function. Both the StateCommitmentChain
// and the CanonicalTransactionChain implement this interface.
type RollupContract interface {
GetTotalElements(opts *bind.CallOpts) (*big.Int, error)
}
// waitForTotalElements will poll to see
func waitForTotalElements(wg *sync.WaitGroup, contract RollupContract, client *ethclient.Client) {
defer wg.Done()
for {
bn, err := client.BlockNumber(context.Background())
if err != nil {
log.Error("cannot fetch blocknumber", "error", err)
time.Sleep(3 * time.Second)
continue
}
totalElements, err := contract.GetTotalElements(&bind.CallOpts{})
if err != nil {
log.Error("cannot fetch total elements", "error", err)
time.Sleep(3 * time.Second)
continue
}
if totalElements.Uint64() == bn {
return
}
log.Info("Waiting for elements to be submitted", "count", totalElements.Uint64()-bn, "height", bn, "total-elements", totalElements.Uint64())
time.Sleep(3 * time.Second)
}
}
// legacyTransactionByHash will fetch a transaction by hash and be sure to decode
// the additional fields added to legacy transactions.
func legacyTransactionByHash(client *rpc.Client, hash common.Hash) (*RPCTransaction, error) {
var json *RPCTransaction
err := client.CallContext(context.Background(), &json, "eth_getTransactionByHash", hash)
if err != nil {
return nil, err
}
return json, nil
}
// RPCTransaction represents a transaction that will serialize to the RPC representation of a
// transaction. This handles the extra legacy fields added to transactions.
type RPCTransaction struct {
BlockHash *common.Hash `json:"blockHash"`
BlockNumber *hexutil.Big `json:"blockNumber"`
From common.Address `json:"from"`
Gas hexutil.Uint64 `json:"gas"`
GasPrice *hexutil.Big `json:"gasPrice"`
Hash common.Hash `json:"hash"`
Input hexutil.Bytes `json:"input"`
Nonce hexutil.Uint64 `json:"nonce"`
To *common.Address `json:"to"`
TransactionIndex *hexutil.Uint64 `json:"transactionIndex"`
Value *hexutil.Big `json:"value"`
V *hexutil.Big `json:"v"`
R *hexutil.Big `json:"r"`
S *hexutil.Big `json:"s"`
QueueOrigin string `json:"queueOrigin"`
L1TxOrigin *common.Address `json:"l1TxOrigin"`
L1BlockNumber *hexutil.Big `json:"l1BlockNumber"`
L1Timestamp hexutil.Uint64 `json:"l1Timestamp"`
Index *hexutil.Uint64 `json:"index"`
QueueIndex *hexutil.Uint64 `json:"queueIndex"`
RawTransaction hexutil.Bytes `json:"rawTransaction"`
}
......@@ -18,6 +18,7 @@ import (
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum-optimism/optimism/op-chain-ops/crossdomain"
"github.com/ethereum-optimism/optimism/op-chain-ops/genesis"
"github.com/ethereum-optimism/optimism/op-chain-ops/util"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
......@@ -25,9 +26,7 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth/tracers"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/ethclient/gethclient"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rpc"
)
// abiTrue represents the storage representation of the boolean
......@@ -115,7 +114,7 @@ func main() {
},
},
Action: func(ctx *cli.Context) error {
clients, err := newClients(ctx)
clients, err := util.NewClients(ctx)
if err != nil {
return err
}
......@@ -433,7 +432,7 @@ func main() {
}
// callTrace will call `debug_traceTransaction` on a remote node
func callTrace(c *clients, receipt *types.Receipt) (callFrame, error) {
func callTrace(c *util.Clients, receipt *types.Receipt) (callFrame, error) {
var finalizationTrace callFrame
tracer := "callTracer"
traceConfig := tracers.TraceConfig{
......@@ -558,7 +557,7 @@ func handleFinalizeERC20Withdrawal(args []any, receipt *types.Receipt, l1Standar
// proveWithdrawalTransaction will build the data required for proving a
// withdrawal and then send the transaction and make sure that it is included
// and successful and then wait for the finalization period to elapse.
func proveWithdrawalTransaction(c *contracts, cl *clients, opts *bind.TransactOpts, withdrawal *crossdomain.Withdrawal, bn, finalizationPeriod *big.Int) error {
func proveWithdrawalTransaction(c *contracts, cl *util.Clients, opts *bind.TransactOpts, withdrawal *crossdomain.Withdrawal, bn, finalizationPeriod *big.Int) error {
l2OutputIndex, outputRootProof, trieNodes, err := createOutput(withdrawal, c.L2OutputOracle, bn, cl)
if err != nil {
return err
......@@ -614,7 +613,7 @@ func proveWithdrawalTransaction(c *contracts, cl *clients, opts *bind.TransactOp
func finalizeWithdrawalTransaction(
c *contracts,
cl *clients,
cl *util.Clients,
opts *bind.TransactOpts,
wd *crossdomain.LegacyWithdrawal,
withdrawal *crossdomain.Withdrawal,
......@@ -699,67 +698,6 @@ func newContracts(ctx *cli.Context, l1Backend, l2Backend bind.ContractBackend) (
}, nil
}
// clients represents a set of initialized RPC clients
type clients struct {
L1Client *ethclient.Client
L2Client *ethclient.Client
L1RpcClient *rpc.Client
L2RpcClient *rpc.Client
L1GethClient *gethclient.Client
L2GethClient *gethclient.Client
}
// newClients will create new RPC clients
func newClients(ctx *cli.Context) (*clients, error) {
l1RpcURL := ctx.String("l1-rpc-url")
l1Client, err := ethclient.Dial(l1RpcURL)
if err != nil {
return nil, err
}
l1ChainID, err := l1Client.ChainID(context.Background())
if err != nil {
return nil, err
}
l2RpcURL := ctx.String("l2-rpc-url")
l2Client, err := ethclient.Dial(l2RpcURL)
if err != nil {
return nil, err
}
l2ChainID, err := l2Client.ChainID(context.Background())
if err != nil {
return nil, err
}
l1RpcClient, err := rpc.DialContext(context.Background(), l1RpcURL)
if err != nil {
return nil, err
}
l2RpcClient, err := rpc.DialContext(context.Background(), l2RpcURL)
if err != nil {
return nil, err
}
l1GethClient := gethclient.New(l1RpcClient)
l2GethClient := gethclient.New(l2RpcClient)
log.Info(
"Set up RPC clients",
"l1-chain-id", l1ChainID,
"l2-chain-id", l2ChainID,
)
return &clients{
L1Client: l1Client,
L2Client: l2Client,
L1RpcClient: l1RpcClient,
L2RpcClient: l2RpcClient,
L1GethClient: l1GethClient,
L2GethClient: l2GethClient,
}, nil
}
// newWithdrawals will create a set of legacy withdrawals
func newWithdrawals(ctx *cli.Context, l1ChainID *big.Int) ([]*crossdomain.LegacyWithdrawal, error) {
ovmMsgs := ctx.String("ovm-messages")
......@@ -849,7 +787,7 @@ func createOutput(
withdrawal *crossdomain.Withdrawal,
oracle *bindings.L2OutputOracle,
blockNumber *big.Int,
clients *clients,
clients *util.Clients,
) (*big.Int, bindings.TypesOutputRootProof, [][]byte, error) {
// compute the storage slot that the withdrawal is stored in
slot, err := withdrawal.StorageSlot()
......
package util
import (
"context"
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/ethclient/gethclient"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rpc"
"github.com/urfave/cli/v2"
)
func ProgressLogger(n int, msg string) func(...any) {
......@@ -15,3 +23,171 @@ func ProgressLogger(n int, msg string) func(...any) {
log.Info(msg, append([]any{"count", i}, args...)...)
}
}
// clients represents a set of initialized RPC clients
type Clients struct {
L1Client *ethclient.Client
L2Client *ethclient.Client
L1RpcClient *rpc.Client
L2RpcClient *rpc.Client
L1GethClient *gethclient.Client
L2GethClient *gethclient.Client
}
// NewClients will create new RPC clients from a CLI context
func NewClients(ctx *cli.Context) (*Clients, error) {
l1RpcURL := ctx.String("l1-rpc-url")
l1Client, err := ethclient.Dial(l1RpcURL)
if err != nil {
return nil, err
}
l1ChainID, err := l1Client.ChainID(context.Background())
if err != nil {
return nil, err
}
l2RpcURL := ctx.String("l2-rpc-url")
l2Client, err := ethclient.Dial(l2RpcURL)
if err != nil {
return nil, err
}
l2ChainID, err := l2Client.ChainID(context.Background())
if err != nil {
return nil, err
}
l1RpcClient, err := rpc.DialContext(context.Background(), l1RpcURL)
if err != nil {
return nil, err
}
l2RpcClient, err := rpc.DialContext(context.Background(), l2RpcURL)
if err != nil {
return nil, err
}
l1GethClient := gethclient.New(l1RpcClient)
l2GethClient := gethclient.New(l2RpcClient)
log.Info(
"Set up RPC clients",
"l1-chain-id", l1ChainID,
"l2-chain-id", l2ChainID,
)
return &Clients{
L1Client: l1Client,
L2Client: l2Client,
L1RpcClient: l1RpcClient,
L2RpcClient: l2RpcClient,
L1GethClient: l1GethClient,
L2GethClient: l2GethClient,
}, nil
}
// ClientsFlags represent the flags associated with creating RPC clients.
var ClientsFlags = []cli.Flag{
&cli.StringFlag{
Name: "l1-rpc-url",
Required: true,
Usage: "L1 RPC URL",
EnvVars: []string{"L1_RPC_URL"},
},
&cli.StringFlag{
Name: "l2-rpc-url",
Required: true,
Usage: "L2 RPC URL",
EnvVars: []string{"L2_RPC_URL"},
},
}
// Addresses represents the address values of various contracts. The values can
// be easily populated via a [cli.Context].
type Addresses struct {
AddressManager common.Address
OptimismPortal common.Address
L1StandardBridge common.Address
L1CrossDomainMessenger common.Address
CanonicalTransactionChain common.Address
StateCommitmentChain common.Address
}
// AddressesFlags represent the flags associated with address parsing.
var AddressesFlags = []cli.Flag{
&cli.StringFlag{
Name: "address-manager-address",
Usage: "AddressManager address",
EnvVars: []string{"ADDRESS_MANAGER_ADDRESS"},
},
&cli.StringFlag{
Name: "optimism-portal-address",
Usage: "OptimismPortal address",
EnvVars: []string{"OPTIMISM_PORTAL_ADDRESS"},
},
&cli.StringFlag{
Name: "l1-standard-bridge-address",
Usage: "L1StandardBridge address",
EnvVars: []string{"L1_STANDARD_BRIDGE_ADDRESS"},
},
&cli.StringFlag{
Name: "l1-crossdomain-messenger-address",
Usage: "L1CrossDomainMessenger address",
EnvVars: []string{"L1_CROSSDOMAIN_MESSENGER_ADDRESS"},
},
&cli.StringFlag{
Name: "canonical-transaction-chain-address",
Usage: "CanonicalTransactionChain address",
EnvVars: []string{"CANONICAL_TRANSACTION_CHAIN_ADDRESS"},
},
&cli.StringFlag{
Name: "state-commitment-chain-address",
Usage: "StateCommitmentChain address",
EnvVars: []string{"STATE_COMMITMENT_CHAIN_ADDRESS"},
},
}
// NewAddresses populates an Addresses struct given a [cli.Context].
// This is useful for writing scripts that interact with smart contracts.
func NewAddresses(ctx *cli.Context) (*Addresses, error) {
var addresses Addresses
var err error
addresses.AddressManager, err = parseAddress(ctx, "address-manager-address")
if err != nil {
return nil, err
}
addresses.OptimismPortal, err = parseAddress(ctx, "optimism-portal-address")
if err != nil {
return nil, err
}
addresses.L1StandardBridge, err = parseAddress(ctx, "l1-standard-bridge-address")
if err != nil {
return nil, err
}
addresses.L1CrossDomainMessenger, err = parseAddress(ctx, "l1-crossdomain-messenger-address")
if err != nil {
return nil, err
}
addresses.CanonicalTransactionChain, err = parseAddress(ctx, "canonical-transaction-chain-address")
if err != nil {
return nil, err
}
addresses.StateCommitmentChain, err = parseAddress(ctx, "state-commitment-chain-address")
if err != nil {
return nil, err
}
return &addresses, nil
}
// parseAddress will parse a [common.Address] from a [cli.Context] and return
// an error if the configured address is not correct.
func parseAddress(ctx *cli.Context, name string) (common.Address, error) {
value := ctx.String(name)
if value == "" {
return common.Address{}, nil
}
if !common.IsHexAddress(value) {
return common.Address{}, fmt.Errorf("invalid address: %s", value)
}
return common.HexToAddress(value), nil
}
......@@ -143,6 +143,7 @@ func (s *L2Verifier) SyncStatus() *eth.SyncStatus {
UnsafeL2: s.L2Unsafe(),
SafeL2: s.L2Safe(),
FinalizedL2: s.L2Finalized(),
UnsafeL2SyncTarget: s.derivation.UnsafeL2SyncTarget(),
}
}
......
......@@ -18,7 +18,6 @@ import (
proposermetrics "github.com/ethereum-optimism/optimism/op-proposer/metrics"
l2os "github.com/ethereum-optimism/optimism/op-proposer/proposer"
oplog "github.com/ethereum-optimism/optimism/op-service/log"
"github.com/ethereum-optimism/optimism/op-service/txmgr"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
......@@ -340,14 +339,7 @@ func TestMigration(t *testing.T) {
ApproxComprRatio: 0.4,
SubSafetyMargin: 4,
PollInterval: 50 * time.Millisecond,
TxMgrConfig: txmgr.CLIConfig{
L1RPCURL: forkedL1URL,
PrivateKey: hexPriv(secrets.Batcher),
NumConfirmations: 1,
ResubmissionTimeout: 5 * time.Second,
SafeAbortNonceTooLowCount: 3,
TxNotInMempoolTimeout: 2 * time.Minute,
},
TxMgrConfig: newTxMgrConfig(forkedL1URL, secrets.Batcher),
LogConfig: oplog.CLIConfig{
Level: "info",
Format: "text",
......@@ -366,14 +358,7 @@ func TestMigration(t *testing.T) {
L2OOAddress: l2OS.Address.String(),
PollInterval: 50 * time.Millisecond,
AllowNonFinalized: true,
TxMgrConfig: txmgr.CLIConfig{
L1RPCURL: forkedL1URL,
PrivateKey: hexPriv(secrets.Proposer),
NumConfirmations: 1,
ResubmissionTimeout: 3 * time.Second,
SafeAbortNonceTooLowCount: 3,
TxNotInMempoolTimeout: 2 * time.Minute,
},
TxMgrConfig: newTxMgrConfig(forkedL1URL, secrets.Proposer),
LogConfig: oplog.CLIConfig{
Level: "info",
Format: "text",
......
......@@ -47,6 +47,19 @@ var (
testingJWTSecret = [32]byte{123}
)
func newTxMgrConfig(l1Addr string, privKey *ecdsa.PrivateKey) txmgr.CLIConfig {
return txmgr.CLIConfig{
L1RPCURL: l1Addr,
PrivateKey: hexPriv(privKey),
NumConfirmations: 1,
SafeAbortNonceTooLowCount: 3,
ResubmissionTimeout: 3 * time.Second,
ReceiptQueryInterval: 50 * time.Millisecond,
NetworkTimeout: 2 * time.Second,
TxNotInMempoolTimeout: 2 * time.Minute,
}
}
func DefaultSystemConfig(t *testing.T) SystemConfig {
secrets, err := e2eutils.DefaultMnemonicConfig.Secrets()
require.NoError(t, err)
......@@ -193,6 +206,9 @@ type SystemConfig struct {
// Any node name not in the topology will not have p2p enabled.
P2PTopology map[string][]string
// Enables req-resp sync in the P2P nodes
P2PReqRespSync bool
// If the proposer can make proposals for L2 blocks derived from L1 blocks which are not finalized on L1 yet.
NonFinalizedProposals bool
......@@ -205,6 +221,8 @@ type System struct {
RollupConfig *rollup.Config
L2GenesisCfg *core.Genesis
// Connections to running nodes
Nodes map[string]*node.Node
Backends map[string]*geth_eth.Ethereum
......@@ -316,6 +334,7 @@ func (cfg SystemConfig) Start(_opts ...SystemConfigOption) (*System, error) {
if err != nil {
return nil, err
}
sys.L2GenesisCfg = l2Genesis
for addr, amount := range cfg.Premine {
if existing, ok := l2Genesis.Alloc[addr]; ok {
l2Genesis.Alloc[addr] = core.GenesisAccount{
......@@ -398,30 +417,10 @@ func (cfg SystemConfig) Start(_opts ...SystemConfigOption) (*System, error) {
// Configure connections to L1 and L2 for rollup nodes.
// TODO: refactor testing to use in-process rpc connections instead of websockets.
l1EndpointConfig := l1Node.WSEndpoint()
useHTTP := os.Getenv("OP_E2E_USE_HTTP") == "true"
if useHTTP {
log.Info("using HTTP client")
l1EndpointConfig = l1Node.HTTPEndpoint()
}
for name, rollupCfg := range cfg.Nodes {
l2EndpointConfig := sys.Nodes[name].WSAuthEndpoint()
if useHTTP {
l2EndpointConfig = sys.Nodes[name].HTTPAuthEndpoint()
}
rollupCfg.L1 = &rollupNode.L1EndpointConfig{
L1NodeAddr: l1EndpointConfig,
L1TrustRPC: false,
L1RPCKind: sources.RPCKindBasic,
RateLimit: 0,
BatchSize: 20,
HttpPollInterval: time.Duration(cfg.DeployConfig.L1BlockTime) * time.Second / 10,
}
rollupCfg.L2 = &rollupNode.L2EndpointConfig{
L2EngineAddr: l2EndpointConfig,
L2EngineJWTSecret: cfg.JWTSecret,
}
configureL1(rollupCfg, l1Node)
configureL2(rollupCfg, sys.Nodes[name], cfg.JWTSecret)
rollupCfg.L2Sync = &rollupNode.PreparedL2SyncEndpoint{
Client: nil,
TrustRPC: false,
......@@ -476,6 +475,7 @@ func (cfg SystemConfig) Start(_opts ...SystemConfigOption) (*System, error) {
HostP2P: h,
LocalNode: nil,
UDPv5: nil,
EnableReqRespSync: cfg.P2PReqRespSync,
}
p2pNodes[name] = p
return p, nil
......@@ -572,16 +572,7 @@ func (cfg SystemConfig) Start(_opts ...SystemConfigOption) (*System, error) {
RollupRpc: sys.RollupNodes["sequencer"].HTTPEndpoint(),
L2OOAddress: predeploys.DevL2OutputOracleAddr.String(),
PollInterval: 50 * time.Millisecond,
TxMgrConfig: txmgr.CLIConfig{
L1RPCURL: sys.Nodes["l1"].WSEndpoint(),
PrivateKey: hexPriv(cfg.Secrets.Proposer),
NumConfirmations: 1,
SafeAbortNonceTooLowCount: 3,
ResubmissionTimeout: 3 * time.Second,
ReceiptQueryInterval: 50 * time.Millisecond,
NetworkTimeout: 2 * time.Second,
TxNotInMempoolTimeout: 2 * time.Minute,
},
TxMgrConfig: newTxMgrConfig(sys.Nodes["l1"].WSEndpoint(), cfg.Secrets.Proposer),
AllowNonFinalized: cfg.NonFinalizedProposals,
LogConfig: oplog.CLIConfig{
Level: "info",
......@@ -608,16 +599,7 @@ func (cfg SystemConfig) Start(_opts ...SystemConfigOption) (*System, error) {
ApproxComprRatio: 0.4,
SubSafetyMargin: 4,
PollInterval: 50 * time.Millisecond,
TxMgrConfig: txmgr.CLIConfig{
L1RPCURL: sys.Nodes["l1"].WSEndpoint(),
PrivateKey: hexPriv(cfg.Secrets.Batcher),
NumConfirmations: 1,
SafeAbortNonceTooLowCount: 3,
ResubmissionTimeout: 3 * time.Second,
ReceiptQueryInterval: 50 * time.Millisecond,
NetworkTimeout: 2 * time.Second,
TxNotInMempoolTimeout: 2 * time.Minute,
},
TxMgrConfig: newTxMgrConfig(sys.Nodes["l1"].WSEndpoint(), cfg.Secrets.Batcher),
LogConfig: oplog.CLIConfig{
Level: "info",
Format: "text",
......@@ -637,6 +619,35 @@ func (cfg SystemConfig) Start(_opts ...SystemConfigOption) (*System, error) {
return sys, nil
}
func configureL1(rollupNodeCfg *rollupNode.Config, l1Node *node.Node) {
l1EndpointConfig := l1Node.WSEndpoint()
useHTTP := os.Getenv("OP_E2E_USE_HTTP") == "true"
if useHTTP {
log.Info("using HTTP client")
l1EndpointConfig = l1Node.HTTPEndpoint()
}
rollupNodeCfg.L1 = &rollupNode.L1EndpointConfig{
L1NodeAddr: l1EndpointConfig,
L1TrustRPC: false,
L1RPCKind: sources.RPCKindBasic,
RateLimit: 0,
BatchSize: 20,
HttpPollInterval: time.Millisecond * 100,
}
}
func configureL2(rollupNodeCfg *rollupNode.Config, l2Node *node.Node, jwtSecret [32]byte) {
useHTTP := os.Getenv("OP_E2E_USE_HTTP") == "true"
l2EndpointConfig := l2Node.WSAuthEndpoint()
if useHTTP {
l2EndpointConfig = l2Node.HTTPAuthEndpoint()
}
rollupNodeCfg.L2 = &rollupNode.L2EndpointConfig{
L2EngineAddr: l2EndpointConfig,
L2EngineJWTSecret: jwtSecret,
}
}
func (cfg SystemConfig) L1ChainIDBig() *big.Int {
return new(big.Int).SetUint64(cfg.DeployConfig.L1ChainID)
}
......
......@@ -28,6 +28,7 @@ import (
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum-optimism/optimism/op-node/client"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/metrics"
rollupNode "github.com/ethereum-optimism/optimism/op-node/node"
"github.com/ethereum-optimism/optimism/op-node/p2p"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
......@@ -35,6 +36,7 @@ import (
"github.com/ethereum-optimism/optimism/op-node/sources"
"github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum-optimism/optimism/op-node/withdrawals"
oppprof "github.com/ethereum-optimism/optimism/op-service/pprof"
)
var enableParallelTesting bool = true
......@@ -737,6 +739,159 @@ func TestSystemRPCAltSync(t *testing.T) {
require.ElementsMatch(t, received, published[:len(received)])
}
func TestSystemP2PAltSync(t *testing.T) {
parallel(t)
if !verboseGethNodes {
log.Root().SetHandler(log.DiscardHandler())
}
cfg := DefaultSystemConfig(t)
// remove default verifier node
delete(cfg.Nodes, "verifier")
// Add more verifier nodes
cfg.Nodes["alice"] = &rollupNode.Config{
Driver: driver.Config{
VerifierConfDepth: 0,
SequencerConfDepth: 0,
SequencerEnabled: false,
},
L1EpochPollInterval: time.Second * 4,
}
cfg.Nodes["bob"] = &rollupNode.Config{
Driver: driver.Config{
VerifierConfDepth: 0,
SequencerConfDepth: 0,
SequencerEnabled: false,
},
L1EpochPollInterval: time.Second * 4,
}
cfg.Loggers["alice"] = testlog.Logger(t, log.LvlInfo).New("role", "alice")
cfg.Loggers["bob"] = testlog.Logger(t, log.LvlInfo).New("role", "bob")
// connect the nodes
cfg.P2PTopology = map[string][]string{
"sequencer": {"alice", "bob"},
"alice": {"sequencer", "bob"},
"bob": {"alice", "sequencer"},
}
// Enable the P2P req-resp based sync
cfg.P2PReqRespSync = true
// Disable batcher, so there will not be any L1 data to sync from
cfg.DisableBatcher = true
var published []string
seqTracer := new(FnTracer)
// The sequencer still publishes the blocks to the tracer, even if they do not reach the network due to disabled P2P
seqTracer.OnPublishL2PayloadFn = func(ctx context.Context, payload *eth.ExecutionPayload) {
published = append(published, payload.ID().String())
}
// Blocks are now received via the RPC based alt-sync method
cfg.Nodes["sequencer"].Tracer = seqTracer
sys, err := cfg.Start()
require.Nil(t, err, "Error starting up system")
defer sys.Close()
l2Seq := sys.Clients["sequencer"]
// Transactor Account
ethPrivKey := cfg.Secrets.Alice
// Submit a TX to L2 sequencer node
toAddr := common.Address{0xff, 0xff}
tx := types.MustSignNewTx(ethPrivKey, types.LatestSignerForChainID(cfg.L2ChainIDBig()), &types.DynamicFeeTx{
ChainID: cfg.L2ChainIDBig(),
Nonce: 0,
To: &toAddr,
Value: big.NewInt(1_000_000_000),
GasTipCap: big.NewInt(10),
GasFeeCap: big.NewInt(200),
Gas: 21000,
})
err = l2Seq.SendTransaction(context.Background(), tx)
require.Nil(t, err, "Sending L2 tx to sequencer")
// Wait for tx to be mined on the L2 sequencer chain
receiptSeq, err := waitForTransaction(tx.Hash(), l2Seq, 6*time.Duration(sys.RollupConfig.BlockTime)*time.Second)
require.Nil(t, err, "Waiting for L2 tx on sequencer")
// Gossip is able to respond to IWANT messages for the duration of heartbeat_time * message_window = 0.5 * 12 = 6
// Wait till we pass that, and then we'll have missed some blocks that cannot be retrieved in any way from gossip
time.Sleep(time.Second * 10)
// set up our syncer node, connect it to alice/bob
cfg.Loggers["syncer"] = testlog.Logger(t, log.LvlInfo).New("role", "syncer")
snapLog := log.New()
snapLog.SetHandler(log.DiscardHandler())
// Create a peer, and hook up alice and bob
h, err := sys.Mocknet.GenPeer()
require.NoError(t, err)
_, err = sys.Mocknet.LinkPeers(sys.RollupNodes["alice"].P2P().Host().ID(), h.ID())
require.NoError(t, err)
_, err = sys.Mocknet.LinkPeers(sys.RollupNodes["bob"].P2P().Host().ID(), h.ID())
require.NoError(t, err)
// Configure the new rollup node that'll be syncing
var syncedPayloads []string
syncNodeCfg := &rollupNode.Config{
L2Sync: &rollupNode.PreparedL2SyncEndpoint{Client: nil},
Driver: driver.Config{VerifierConfDepth: 0},
Rollup: *sys.RollupConfig,
P2PSigner: nil,
RPC: rollupNode.RPCConfig{
ListenAddr: "127.0.0.1",
ListenPort: 0,
EnableAdmin: true,
},
P2P: &p2p.Prepared{HostP2P: h, EnableReqRespSync: true},
Metrics: rollupNode.MetricsConfig{Enabled: false}, // no metrics server
Pprof: oppprof.CLIConfig{},
L1EpochPollInterval: time.Second * 10,
Tracer: &FnTracer{
OnUnsafeL2PayloadFn: func(ctx context.Context, from peer.ID, payload *eth.ExecutionPayload) {
syncedPayloads = append(syncedPayloads, payload.ID().String())
},
},
}
configureL1(syncNodeCfg, sys.Nodes["l1"])
syncerL2Engine, _, err := initL2Geth("syncer", big.NewInt(int64(cfg.DeployConfig.L2ChainID)), sys.L2GenesisCfg, cfg.JWTFilePath)
require.NoError(t, err)
require.NoError(t, syncerL2Engine.Start())
configureL2(syncNodeCfg, syncerL2Engine, cfg.JWTSecret)
syncerNode, err := rollupNode.New(context.Background(), syncNodeCfg, cfg.Loggers["syncer"], snapLog, "", metrics.NewMetrics(""))
require.NoError(t, err)
err = syncerNode.Start(context.Background())
require.NoError(t, err)
// connect alice and bob to our new syncer node
_, err = sys.Mocknet.ConnectPeers(sys.RollupNodes["alice"].P2P().Host().ID(), syncerNode.P2P().Host().ID())
require.NoError(t, err)
_, err = sys.Mocknet.ConnectPeers(sys.RollupNodes["bob"].P2P().Host().ID(), syncerNode.P2P().Host().ID())
require.NoError(t, err)
rpc, err := syncerL2Engine.Attach()
require.NoError(t, err)
l2Verif := ethclient.NewClient(rpc)
// It may take a while to sync, but eventually we should see the sequenced data show up
receiptVerif, err := waitForTransaction(tx.Hash(), l2Verif, 100*time.Duration(sys.RollupConfig.BlockTime)*time.Second)
require.Nil(t, err, "Waiting for L2 tx on verifier")
require.Equal(t, receiptSeq, receiptVerif)
// Verify that the tx was received via P2P sync
require.Contains(t, syncedPayloads, eth.BlockID{Hash: receiptVerif.BlockHash, Number: receiptVerif.BlockNumber.Uint64()}.String())
// Verify that everything that was received was published
require.GreaterOrEqual(t, len(published), len(syncedPayloads))
require.ElementsMatch(t, syncedPayloads, published[:len(syncedPayloads)])
}
// TestSystemDenseTopology sets up a dense p2p topology with 3 verifier nodes and 1 sequencer node.
func TestSystemDenseTopology(t *testing.T) {
t.Skip("Skipping dense topology test to avoid flakiness. @refcell address in p2p scoring pr.")
......
......@@ -32,4 +32,7 @@ type SyncStatus struct {
// FinalizedL2 points to the L2 block that was derived fully from
// finalized L1 information, thus irreversible.
FinalizedL2 L2BlockRef `json:"finalized_l2"`
// UnsafeL2SyncTarget points to the first unprocessed unsafe L2 block.
// It may be zeroed if there is no targeted block.
UnsafeL2SyncTarget L2BlockRef `json:"queued_unsafe_l2"`
}
......@@ -276,6 +276,12 @@ var (
Hidden: true,
EnvVar: p2pEnv("GOSSIP_FLOOD_PUBLISH"),
}
SyncReqRespFlag = cli.BoolFlag{
Name: "p2p.sync.req-resp",
Usage: "Enables experimental P2P req-resp alternative sync method, on both server and client side.",
Required: false,
EnvVar: p2pEnv("SYNC_REQ_RESP"),
}
)
// None of these flags are strictly required.
......@@ -315,4 +321,5 @@ var p2pFlags = []cli.Flag{
GossipMeshDhiFlag,
GossipMeshDlazyFlag,
GossipFloodPublishFlag,
SyncReqRespFlag,
}
......@@ -66,6 +66,9 @@ type Metricer interface {
Document() []metrics.DocumentedMetric
// P2P Metrics
SetPeerScores(scores map[string]float64)
ClientPayloadByNumberEvent(num uint64, resultCode byte, duration time.Duration)
ServerPayloadByNumberEvent(num uint64, resultCode byte, duration time.Duration)
PayloadsQuarantineSize(n int)
}
// Metrics tracks all the metrics for the op-node.
......@@ -90,6 +93,12 @@ type Metrics struct {
SequencingErrors *EventMetrics
PublishingErrors *EventMetrics
P2PReqDurationSeconds *prometheus.HistogramVec
P2PReqTotal *prometheus.CounterVec
P2PPayloadByNumber *prometheus.GaugeVec
PayloadsQuarantineTotal prometheus.Gauge
SequencerInconsistentL1Origin *EventMetrics
SequencerResets *EventMetrics
......@@ -322,6 +331,44 @@ func NewMetrics(procName string) *Metrics {
"direction",
}),
P2PReqDurationSeconds: factory.NewHistogramVec(prometheus.HistogramOpts{
Namespace: ns,
Subsystem: "p2p",
Name: "req_duration_seconds",
Buckets: []float64{},
Help: "Duration of P2P requests",
}, []string{
"p2p_role", // "client" or "server"
"p2p_method",
"result_code",
}),
P2PReqTotal: factory.NewCounterVec(prometheus.CounterOpts{
Namespace: ns,
Subsystem: "p2p",
Name: "req_total",
Help: "Number of P2P requests",
}, []string{
"p2p_role", // "client" or "server"
"p2p_method",
"result_code",
}),
P2PPayloadByNumber: factory.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns,
Subsystem: "p2p",
Name: "payload_by_number",
Help: "Payload by number requests",
}, []string{
"p2p_role", // "client" or "server"
}),
PayloadsQuarantineTotal: factory.NewGauge(prometheus.GaugeOpts{
Namespace: ns,
Subsystem: "p2p",
Name: "payloads_quarantine_total",
Help: "number of unverified execution payloads buffered in quarantine",
}),
SequencerBuildingDiffDurationSeconds: factory.NewHistogram(prometheus.HistogramOpts{
Namespace: ns,
Name: "sequencer_building_diff_seconds",
......@@ -567,6 +614,27 @@ func (m *Metrics) Document() []metrics.DocumentedMetric {
return m.factory.Document()
}
func (m *Metrics) ClientPayloadByNumberEvent(num uint64, resultCode byte, duration time.Duration) {
if resultCode > 4 { // summarize all high codes to reduce metrics overhead
resultCode = 5
}
code := strconv.FormatUint(uint64(resultCode), 10)
m.P2PReqTotal.WithLabelValues("client", "payload_by_number", code).Inc()
m.P2PReqDurationSeconds.WithLabelValues("client", "payload_by_number", code).Observe(float64(duration) / float64(time.Second))
m.P2PPayloadByNumber.WithLabelValues("client").Set(float64(num))
}
func (m *Metrics) ServerPayloadByNumberEvent(num uint64, resultCode byte, duration time.Duration) {
code := strconv.FormatUint(uint64(resultCode), 10)
m.P2PReqTotal.WithLabelValues("server", "payload_by_number", code).Inc()
m.P2PReqDurationSeconds.WithLabelValues("server", "payload_by_number", code).Observe(float64(duration) / float64(time.Second))
m.P2PPayloadByNumber.WithLabelValues("server").Set(float64(num))
}
func (m *Metrics) PayloadsQuarantineSize(n int) {
m.PayloadsQuarantineTotal.Set(float64(n))
}
type noopMetricer struct{}
var NoopMetrics Metricer = new(noopMetricer)
......@@ -660,3 +728,12 @@ func (n *noopMetricer) RecordSequencerSealingTime(duration time.Duration) {
func (n *noopMetricer) Document() []metrics.DocumentedMetric {
return nil
}
func (n *noopMetricer) ClientPayloadByNumberEvent(num uint64, resultCode byte, duration time.Duration) {
}
func (n *noopMetricer) ServerPayloadByNumberEvent(num uint64, resultCode byte, duration time.Duration) {
}
func (n *noopMetricer) PayloadsQuarantineSize(int) {
}
......@@ -256,7 +256,7 @@ func (n *OpNode) initMetricsServer(ctx context.Context, cfg *Config) error {
func (n *OpNode) initP2P(ctx context.Context, cfg *Config) error {
if cfg.P2P != nil {
p2pNode, err := p2p.NewNodeP2P(n.resourcesCtx, &cfg.Rollup, n.log, cfg.P2P, n, n.runCfg, n.metrics)
p2pNode, err := p2p.NewNodeP2P(n.resourcesCtx, &cfg.Rollup, n.log, cfg.P2P, n, n.l2Source, n.runCfg, n.metrics)
if err != nil || p2pNode == nil {
return err
}
......@@ -373,11 +373,14 @@ func (n *OpNode) OnUnsafeL2Payload(ctx context.Context, from peer.ID, payload *e
return nil
}
func (n *OpNode) RequestL2Range(ctx context.Context, start, end uint64) error {
func (n *OpNode) RequestL2Range(ctx context.Context, start, end eth.L2BlockRef) error {
if n.rpcSync != nil {
return n.rpcSync.RequestL2Range(ctx, start, end)
}
n.log.Debug("ignoring request to sync L2 range, no sync method available")
if n.p2pNode != nil && n.p2pNode.AltSyncEnabled() {
return n.p2pNode.RequestL2Range(ctx, start, end)
}
n.log.Debug("ignoring request to sync L2 range, no sync method available", "start", start, "end", end)
return nil
}
......
......@@ -166,6 +166,7 @@ func randomSyncStatus(rng *rand.Rand) *eth.SyncStatus {
UnsafeL2: testutils.RandomL2BlockRef(rng),
SafeL2: testutils.RandomL2BlockRef(rng),
FinalizedL2: testutils.RandomL2BlockRef(rng),
UnsafeL2SyncTarget: testutils.RandomL2BlockRef(rng),
}
}
......
......@@ -73,6 +73,8 @@ func NewConfig(ctx *cli.Context, blockTime uint64) (*p2p.Config, error) {
conf.ConnGater = p2p.DefaultConnGater
conf.ConnMngr = p2p.DefaultConnManager
conf.EnableReqRespSync = ctx.GlobalBool(flags.SyncReqRespFlag.Name)
return conf, nil
}
......
......@@ -40,6 +40,7 @@ type SetupP2P interface {
Discovery(log log.Logger, rollupCfg *rollup.Config, tcpPort uint16) (*enode.LocalNode, *discover.UDPv5, error)
TargetPeers() uint
GossipSetupConfigurables
ReqRespSyncEnabled() bool
}
// Config sets up a p2p host and discv5 service from configuration.
......@@ -50,6 +51,9 @@ type Config struct {
DisableP2P bool
NoDiscovery bool
// Enable P2P-based alt-syncing method (req-resp protocol, not gossip)
AltSync bool
// Pubsub Scoring Parameters
PeerScoring pubsub.PeerScoreParams
TopicScoring pubsub.TopicScoreParams
......@@ -104,6 +108,8 @@ type Config struct {
ConnGater func(conf *Config) (connmgr.ConnectionGater, error)
ConnMngr func(conf *Config) (connmgr.ConnManager, error)
EnableReqRespSync bool
}
//go:generate mockery --name ConnectionGater
......@@ -166,6 +172,10 @@ func (conf *Config) TopicScoringParams() *pubsub.TopicScoreParams {
return &conf.TopicScoring
}
func (conf *Config) ReqRespSyncEnabled() bool {
return conf.EnableReqRespSync
}
const maxMeshParam = 1000
func (conf *Config) Check() error {
......
......@@ -338,27 +338,15 @@ func BuildBlocksValidator(log log.Logger, cfg *rollup.Config, runCfg GossipRunti
}
func verifyBlockSignature(log log.Logger, cfg *rollup.Config, runCfg GossipRuntimeConfig, id peer.ID, signatureBytes []byte, payloadBytes []byte) pubsub.ValidationResult {
result := verifyBlockSignatureWithHasher(nil, cfg, runCfg, id, signatureBytes, payloadBytes, LegacyBlockSigningHash)
if result != pubsub.ValidationAccept {
return verifyBlockSignatureWithHasher(log, cfg, runCfg, id, signatureBytes, payloadBytes, BlockSigningHash)
}
return result
}
func verifyBlockSignatureWithHasher(log log.Logger, cfg *rollup.Config, runCfg GossipRuntimeConfig, id peer.ID, signatureBytes []byte, payloadBytes []byte, hasher func(cfg *rollup.Config, payloadBytes []byte) (common.Hash, error)) pubsub.ValidationResult {
signingHash, err := hasher(cfg, payloadBytes)
signingHash, err := BlockSigningHash(cfg, payloadBytes)
if err != nil {
if log != nil {
log.Warn("failed to compute block signing hash", "err", err, "peer", id)
}
return pubsub.ValidationReject
}
pub, err := crypto.SigToPub(signingHash[:], signatureBytes)
if err != nil {
if log != nil {
log.Warn("invalid block signature", "err", err, "peer", id)
}
return pubsub.ValidationReject
}
addr := crypto.PubkeyToAddress(*pub)
......@@ -369,14 +357,10 @@ func verifyBlockSignatureWithHasher(log log.Logger, cfg *rollup.Config, runCfg G
// This means we may drop old payloads upon key rotation,
// but this can be recovered from like any other missed unsafe payload.
if expected := runCfg.P2PSequencerAddress(); expected == (common.Address{}) {
if log != nil {
log.Warn("no configured p2p sequencer address, ignoring gossiped block", "peer", id, "addr", addr)
}
return pubsub.ValidationIgnore
} else if addr != expected {
if log != nil {
log.Warn("unexpected block author", "err", err, "peer", id, "addr", addr, "expected", expected)
}
return pubsub.ValidationReject
}
return pubsub.ValidationAccept
......
......@@ -2,7 +2,6 @@ package p2p
import (
"context"
"crypto/ecdsa"
"math/big"
"testing"
......@@ -42,21 +41,6 @@ func TestGuardGossipValidator(t *testing.T) {
}
func TestVerifyBlockSignature(t *testing.T) {
// Should accept signatures over both the legacy and updated signature hashes
tests := []struct {
name string
newSigner func(priv *ecdsa.PrivateKey) *LocalSigner
}{
{
name: "Legacy",
newSigner: newLegacyLocalSigner,
},
{
name: "Updated",
newSigner: NewLocalSigner,
},
}
logger := testlog.Logger(t, log.LvlCrit)
cfg := &rollup.Config{
L2ChainID: big.NewInt(100),
......@@ -66,43 +50,37 @@ func TestVerifyBlockSignature(t *testing.T) {
require.NoError(t, err)
msg := []byte("any msg")
for _, test := range tests {
t.Run("Valid "+test.name, func(t *testing.T) {
t.Run("Valid", func(t *testing.T) {
runCfg := &testutils.MockRuntimeConfig{P2PSeqAddress: crypto.PubkeyToAddress(secrets.SequencerP2P.PublicKey)}
signer := &PreparedSigner{Signer: test.newSigner(secrets.SequencerP2P)}
signer := &PreparedSigner{Signer: NewLocalSigner(secrets.SequencerP2P)}
sig, err := signer.Sign(context.Background(), SigningDomainBlocksV1, cfg.L2ChainID, msg)
require.NoError(t, err)
result := verifyBlockSignature(logger, cfg, runCfg, peerId, sig[:65], msg)
require.Equal(t, pubsub.ValidationAccept, result)
})
t.Run("WrongSigner "+test.name, func(t *testing.T) {
t.Run("WrongSigner", func(t *testing.T) {
runCfg := &testutils.MockRuntimeConfig{P2PSeqAddress: common.HexToAddress("0x1234")}
signer := &PreparedSigner{Signer: test.newSigner(secrets.SequencerP2P)}
signer := &PreparedSigner{Signer: NewLocalSigner(secrets.SequencerP2P)}
sig, err := signer.Sign(context.Background(), SigningDomainBlocksV1, cfg.L2ChainID, msg)
require.NoError(t, err)
result := verifyBlockSignature(logger, cfg, runCfg, peerId, sig[:65], msg)
require.Equal(t, pubsub.ValidationReject, result)
})
t.Run("InvalidSignature "+test.name, func(t *testing.T) {
t.Run("InvalidSignature", func(t *testing.T) {
runCfg := &testutils.MockRuntimeConfig{P2PSeqAddress: crypto.PubkeyToAddress(secrets.SequencerP2P.PublicKey)}
sig := make([]byte, 65)
result := verifyBlockSignature(logger, cfg, runCfg, peerId, sig, msg)
require.Equal(t, pubsub.ValidationReject, result)
})
t.Run("NoSequencer "+test.name, func(t *testing.T) {
t.Run("NoSequencer", func(t *testing.T) {
runCfg := &testutils.MockRuntimeConfig{}
signer := &PreparedSigner{Signer: test.newSigner(secrets.SequencerP2P)}
signer := &PreparedSigner{Signer: NewLocalSigner(secrets.SequencerP2P)}
sig, err := signer.Sign(context.Background(), SigningDomainBlocksV1, cfg.L2ChainID, msg)
require.NoError(t, err)
result := verifyBlockSignature(logger, cfg, runCfg, peerId, sig[:65], msg)
require.Equal(t, pubsub.ValidationIgnore, result)
})
}
}
func newLegacyLocalSigner(priv *ecdsa.PrivateKey) *LocalSigner {
return &LocalSigner{priv: priv, hasher: LegacySigningHash}
}
......@@ -26,6 +26,7 @@ import (
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/metrics"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum-optimism/optimism/op-node/testutils"
......@@ -125,7 +126,7 @@ func TestP2PFull(t *testing.T) {
runCfgB := &testutils.MockRuntimeConfig{P2PSeqAddress: common.Address{0x42}}
logA := testlog.Logger(t, log.LvlError).New("host", "A")
nodeA, err := NewNodeP2P(context.Background(), &rollup.Config{}, logA, &confA, &mockGossipIn{}, runCfgA, nil)
nodeA, err := NewNodeP2P(context.Background(), &rollup.Config{}, logA, &confA, &mockGossipIn{}, nil, runCfgA, metrics.NoopMetrics)
require.NoError(t, err)
defer nodeA.Close()
......@@ -148,7 +149,7 @@ func TestP2PFull(t *testing.T) {
logB := testlog.Logger(t, log.LvlError).New("host", "B")
nodeB, err := NewNodeP2P(context.Background(), &rollup.Config{}, logB, &confB, &mockGossipIn{}, runCfgB, nil)
nodeB, err := NewNodeP2P(context.Background(), &rollup.Config{}, logB, &confB, &mockGossipIn{}, nil, runCfgB, metrics.NoopMetrics)
require.NoError(t, err)
defer nodeB.Close()
hostB := nodeB.Host()
......@@ -277,7 +278,7 @@ func TestDiscovery(t *testing.T) {
resourcesCtx, resourcesCancel := context.WithCancel(context.Background())
defer resourcesCancel()
nodeA, err := NewNodeP2P(context.Background(), rollupCfg, logA, &confA, &mockGossipIn{}, runCfgA, nil)
nodeA, err := NewNodeP2P(context.Background(), rollupCfg, logA, &confA, &mockGossipIn{}, nil, runCfgA, metrics.NoopMetrics)
require.NoError(t, err)
defer nodeA.Close()
hostA := nodeA.Host()
......@@ -292,7 +293,7 @@ func TestDiscovery(t *testing.T) {
confB.DiscoveryDB = discDBC
// Start B
nodeB, err := NewNodeP2P(context.Background(), rollupCfg, logB, &confB, &mockGossipIn{}, runCfgB, nil)
nodeB, err := NewNodeP2P(context.Background(), rollupCfg, logB, &confB, &mockGossipIn{}, nil, runCfgB, metrics.NoopMetrics)
require.NoError(t, err)
defer nodeB.Close()
hostB := nodeB.Host()
......@@ -307,7 +308,7 @@ func TestDiscovery(t *testing.T) {
}})
// Start C
nodeC, err := NewNodeP2P(context.Background(), rollupCfg, logC, &confC, &mockGossipIn{}, runCfgC, nil)
nodeC, err := NewNodeP2P(context.Background(), rollupCfg, logC, &confC, &mockGossipIn{}, nil, runCfgC, metrics.NoopMetrics)
require.NoError(t, err)
defer nodeC.Close()
hostC := nodeC.Host()
......
......@@ -11,8 +11,10 @@ import (
"github.com/libp2p/go-libp2p/core/connmgr"
"github.com/libp2p/go-libp2p/core/host"
p2pmetrics "github.com/libp2p/go-libp2p/core/metrics"
"github.com/libp2p/go-libp2p/core/network"
ma "github.com/multiformats/go-multiaddr"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/metrics"
"github.com/ethereum/go-ethereum/log"
......@@ -32,16 +34,18 @@ type NodeP2P struct {
dv5Udp *discover.UDPv5 // p2p discovery service
gs *pubsub.PubSub // p2p gossip router
gsOut GossipOut // p2p gossip application interface for publishing
syncCl *SyncClient
syncSrv *ReqRespServer
}
// NewNodeP2P creates a new p2p node, and returns a reference to it. If the p2p is disabled, it returns nil.
// If metrics are configured, a bandwidth monitor will be spawned in a goroutine.
func NewNodeP2P(resourcesCtx context.Context, rollupCfg *rollup.Config, log log.Logger, setup SetupP2P, gossipIn GossipIn, runCfg GossipRuntimeConfig, metrics metrics.Metricer) (*NodeP2P, error) {
func NewNodeP2P(resourcesCtx context.Context, rollupCfg *rollup.Config, log log.Logger, setup SetupP2P, gossipIn GossipIn, l2Chain L2Chain, runCfg GossipRuntimeConfig, metrics metrics.Metricer) (*NodeP2P, error) {
if setup == nil {
return nil, errors.New("p2p node cannot be created without setup")
}
var n NodeP2P
if err := n.init(resourcesCtx, rollupCfg, log, setup, gossipIn, runCfg, metrics); err != nil {
if err := n.init(resourcesCtx, rollupCfg, log, setup, gossipIn, l2Chain, runCfg, metrics); err != nil {
closeErr := n.Close()
if closeErr != nil {
log.Error("failed to close p2p after starting with err", "closeErr", closeErr, "err", err)
......@@ -54,7 +58,7 @@ func NewNodeP2P(resourcesCtx context.Context, rollupCfg *rollup.Config, log log.
return &n, nil
}
func (n *NodeP2P) init(resourcesCtx context.Context, rollupCfg *rollup.Config, log log.Logger, setup SetupP2P, gossipIn GossipIn, runCfg GossipRuntimeConfig, metrics metrics.Metricer) error {
func (n *NodeP2P) init(resourcesCtx context.Context, rollupCfg *rollup.Config, log log.Logger, setup SetupP2P, gossipIn GossipIn, l2Chain L2Chain, runCfg GossipRuntimeConfig, metrics metrics.Metricer) error {
bwc := p2pmetrics.NewBandwidthCounter()
var err error
......@@ -73,6 +77,29 @@ func (n *NodeP2P) init(resourcesCtx context.Context, rollupCfg *rollup.Config, l
n.gater = extra.ConnectionGater()
n.connMgr = extra.ConnectionManager()
}
// Activate the P2P req-resp sync if enabled by feature-flag.
if setup.ReqRespSyncEnabled() {
n.syncCl = NewSyncClient(log, rollupCfg, n.host.NewStream, gossipIn.OnUnsafeL2Payload, metrics)
n.host.Network().Notify(&network.NotifyBundle{
ConnectedF: func(nw network.Network, conn network.Conn) {
n.syncCl.AddPeer(conn.RemotePeer())
},
DisconnectedF: func(nw network.Network, conn network.Conn) {
n.syncCl.RemovePeer(conn.RemotePeer())
},
})
n.syncCl.Start()
// the host may already be connected to peers, add them all to the sync client
for _, peerID := range n.host.Network().Peers() {
n.syncCl.AddPeer(peerID)
}
if l2Chain != nil { // Only enable serving side of req-resp sync if we have a data-source, to make minimal P2P testing easy
n.syncSrv = NewReqRespServer(rollupCfg, l2Chain, metrics)
// register the sync protocol with libp2p host
payloadByNumber := MakeStreamHandler(resourcesCtx, log.New("serve", "payloads_by_number"), n.syncSrv.HandleSyncRequest)
n.host.SetStreamHandler(PayloadByNumberProtocolID(rollupCfg.L2ChainID), payloadByNumber)
}
}
// notify of any new connections/streams/etc.
n.host.Network().Notify(NewNetworkNotifier(log, metrics))
// note: the IDDelta functionality was removed from libP2P, and no longer needs to be explicitly disabled.
......@@ -104,6 +131,17 @@ func (n *NodeP2P) init(resourcesCtx context.Context, rollupCfg *rollup.Config, l
return nil
}
func (n *NodeP2P) AltSyncEnabled() bool {
return n.syncCl != nil
}
func (n *NodeP2P) RequestL2Range(ctx context.Context, start, end eth.L2BlockRef) error {
if !n.AltSyncEnabled() {
return fmt.Errorf("cannot request range %s - %s, req-resp sync is not enabled", start, end)
}
return n.syncCl.RequestL2Range(ctx, start, end)
}
func (n *NodeP2P) Host() host.Host {
return n.host
}
......@@ -146,6 +184,11 @@ func (n *NodeP2P) Close() error {
if err := n.host.Close(); err != nil {
result = multierror.Append(result, fmt.Errorf("failed to close p2p host cleanly: %w", err))
}
if n.syncCl != nil {
if err := n.syncCl.Close(); err != nil {
result = multierror.Append(result, fmt.Errorf("failed to close p2p sync client cleanly: %w", err))
}
}
}
return result.ErrorOrNil()
}
......
......@@ -22,6 +22,8 @@ type Prepared struct {
HostP2P host.Host
LocalNode *enode.LocalNode
UDPv5 *discover.UDPv5
EnableReqRespSync bool
}
var _ SetupP2P = (*Prepared)(nil)
......@@ -83,3 +85,7 @@ func (p *Prepared) TopicScoringParams() *pubsub.TopicScoreParams {
func (p *Prepared) Disabled() bool {
return false
}
func (p *Prepared) ReqRespSyncEnabled() bool {
return p.EnableReqRespSync
}
......@@ -20,21 +20,6 @@ type Signer interface {
io.Closer
}
func LegacySigningHash(domain [32]byte, chainID *big.Int, payloadBytes []byte) (common.Hash, error) {
var msgInput [32 + 32 + 32]byte
// domain: first 32 bytes
copy(msgInput[:32], domain[:])
// chain_id: second 32 bytes
if chainID.BitLen() > 256 {
return common.Hash{}, errors.New("chain_id is too large")
}
chainID.FillBytes(msgInput[32:64])
// payload_hash: third 32 bytes, hash of encoded payload
copy(msgInput[32:], crypto.Keccak256(payloadBytes))
return crypto.Keccak256Hash(msgInput[:]), nil
}
func SigningHash(domain [32]byte, chainID *big.Int, payloadBytes []byte) (common.Hash, error) {
var msgInput [32 + 32 + 32]byte
// domain: first 32 bytes
......@@ -54,10 +39,6 @@ func BlockSigningHash(cfg *rollup.Config, payloadBytes []byte) (common.Hash, err
return SigningHash(SigningDomainBlocksV1, cfg.L2ChainID, payloadBytes)
}
func LegacyBlockSigningHash(cfg *rollup.Config, payloadBytes []byte) (common.Hash, error) {
return LegacySigningHash(SigningDomainBlocksV1, cfg.L2ChainID, payloadBytes)
}
// LocalSigner is suitable for testing
type LocalSigner struct {
priv *ecdsa.PrivateKey
......
This diff is collapsed.
package p2p
import (
"context"
"math"
"math/big"
"testing"
"time"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
mocknet "github.com/libp2p/go-libp2p/p2p/net/mock"
"github.com/stretchr/testify/require"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/metrics"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
)
type mockPayloadFn func(n uint64) (*eth.ExecutionPayload, error)
func (fn mockPayloadFn) PayloadByNumber(_ context.Context, number uint64) (*eth.ExecutionPayload, error) {
return fn(number)
}
var _ L2Chain = mockPayloadFn(nil)
func setupSyncTestData(length uint64) (*rollup.Config, map[uint64]*eth.ExecutionPayload, func(i uint64) eth.L2BlockRef) {
// minimal rollup config to build mock blocks & verify their time.
cfg := &rollup.Config{
Genesis: rollup.Genesis{
L1: eth.BlockID{Hash: common.Hash{0xaa}},
L2: eth.BlockID{Hash: common.Hash{0xbb}},
L2Time: 9000,
},
BlockTime: 2,
L2ChainID: big.NewInt(1234),
}
// create some simple fake test blocks
payloads := make(map[uint64]*eth.ExecutionPayload)
payloads[0] = &eth.ExecutionPayload{
Timestamp: eth.Uint64Quantity(cfg.Genesis.L2Time),
}
payloads[0].BlockHash, _ = payloads[0].CheckBlockHash()
for i := uint64(1); i <= length; i++ {
payload := &eth.ExecutionPayload{
ParentHash: payloads[i-1].BlockHash,
BlockNumber: eth.Uint64Quantity(i),
Timestamp: eth.Uint64Quantity(cfg.Genesis.L2Time + i*cfg.BlockTime),
}
payload.BlockHash, _ = payload.CheckBlockHash()
payloads[i] = payload
}
l2Ref := func(i uint64) eth.L2BlockRef {
return eth.L2BlockRef{
Hash: payloads[i].BlockHash,
Number: uint64(payloads[i].BlockNumber),
ParentHash: payloads[i].ParentHash,
Time: uint64(payloads[i].Timestamp),
}
}
return cfg, payloads, l2Ref
}
func TestSinglePeerSync(t *testing.T) {
t.Parallel() // Takes a while, but can run in parallel
log := testlog.Logger(t, log.LvlError)
cfg, payloads, l2Ref := setupSyncTestData(25)
// Serving payloads: just load them from the map, if they exist
servePayload := mockPayloadFn(func(n uint64) (*eth.ExecutionPayload, error) {
p, ok := payloads[n]
if !ok {
return nil, ethereum.NotFound
}
return p, nil
})
// collect received payloads in a buffered channel, so we can verify we get everything
received := make(chan *eth.ExecutionPayload, 100)
receivePayload := receivePayloadFn(func(ctx context.Context, from peer.ID, payload *eth.ExecutionPayload) error {
received <- payload
return nil
})
// Setup 2 minimal test hosts to attach the sync protocol to
mnet, err := mocknet.FullMeshConnected(2)
require.NoError(t, err, "failed to setup mocknet")
defer mnet.Close()
hosts := mnet.Hosts()
hostA, hostB := hosts[0], hosts[1]
require.Equal(t, hostA.Network().Connectedness(hostB.ID()), network.Connected)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Setup host A as the server
srv := NewReqRespServer(cfg, servePayload, metrics.NoopMetrics)
payloadByNumber := MakeStreamHandler(ctx, log.New("role", "server"), srv.HandleSyncRequest)
hostA.SetStreamHandler(PayloadByNumberProtocolID(cfg.L2ChainID), payloadByNumber)
// Setup host B as the client
cl := NewSyncClient(log.New("role", "client"), cfg, hostB.NewStream, receivePayload, metrics.NoopMetrics)
// Setup host B (client) to sync from its peer Host A (server)
cl.AddPeer(hostA.ID())
cl.Start()
defer cl.Close()
// request to start syncing between 10 and 20
require.NoError(t, cl.RequestL2Range(ctx, l2Ref(10), l2Ref(20)))
// and wait for the sync results to come in (in reverse order)
receiveCtx, receiveCancel := context.WithTimeout(ctx, time.Second*5)
defer receiveCancel()
for i := uint64(19); i > 10; i-- {
select {
case p := <-received:
require.Equal(t, uint64(p.BlockNumber), i, "expecting payloads in order")
exp, ok := payloads[uint64(p.BlockNumber)]
require.True(t, ok, "expecting known payload")
require.Equal(t, exp.BlockHash, p.BlockHash, "expecting the correct payload")
case <-receiveCtx.Done():
t.Fatal("did not receive all expected payloads within expected time")
}
}
}
func TestMultiPeerSync(t *testing.T) {
t.Parallel() // Takes a while, but can run in parallel
log := testlog.Logger(t, log.LvlError)
cfg, payloads, l2Ref := setupSyncTestData(100)
setupPeer := func(ctx context.Context, h host.Host) (*SyncClient, chan *eth.ExecutionPayload) {
// Serving payloads: just load them from the map, if they exist
servePayload := mockPayloadFn(func(n uint64) (*eth.ExecutionPayload, error) {
p, ok := payloads[n]
if !ok {
return nil, ethereum.NotFound
}
return p, nil
})
// collect received payloads in a buffered channel, so we can verify we get everything
received := make(chan *eth.ExecutionPayload, 100)
receivePayload := receivePayloadFn(func(ctx context.Context, from peer.ID, payload *eth.ExecutionPayload) error {
received <- payload
return nil
})
// Setup as server
srv := NewReqRespServer(cfg, servePayload, metrics.NoopMetrics)
payloadByNumber := MakeStreamHandler(ctx, log.New("serve", "payloads_by_number"), srv.HandleSyncRequest)
h.SetStreamHandler(PayloadByNumberProtocolID(cfg.L2ChainID), payloadByNumber)
cl := NewSyncClient(log.New("role", "client"), cfg, h.NewStream, receivePayload, metrics.NoopMetrics)
return cl, received
}
// Setup 3 minimal test hosts to attach the sync protocol to
mnet, err := mocknet.FullMeshConnected(3)
require.NoError(t, err, "failed to setup mocknet")
defer mnet.Close()
hosts := mnet.Hosts()
hostA, hostB, hostC := hosts[0], hosts[1], hosts[2]
require.Equal(t, hostA.Network().Connectedness(hostB.ID()), network.Connected)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
clA, recvA := setupPeer(ctx, hostA)
clB, recvB := setupPeer(ctx, hostB)
clC, _ := setupPeer(ctx, hostC)
// Make them all sync from each other
clA.AddPeer(hostB.ID())
clA.AddPeer(hostC.ID())
clA.Start()
defer clA.Close()
clB.AddPeer(hostA.ID())
clB.AddPeer(hostC.ID())
clB.Start()
defer clB.Close()
clC.AddPeer(hostA.ID())
clC.AddPeer(hostB.ID())
clC.Start()
defer clC.Close()
// request to start syncing between 10 and 100
require.NoError(t, clA.RequestL2Range(ctx, l2Ref(10), l2Ref(90)))
// With such large range to request we are going to hit the rate-limits of B and C,
// but that means we'll balance the work between the peers.
// wait for the results to come in, based on the expected rate limit, divided by 2 (because we have 2 servers), with a buffer of 2 seconds
receiveCtx, receiveCancel := context.WithTimeout(ctx, time.Second*time.Duration(math.Ceil(float64((89-10)/peerServerBlocksRateLimit)))/2+time.Second*2)
defer receiveCancel()
for i := uint64(89); i > 10; i-- {
select {
case p := <-recvA:
exp, ok := payloads[uint64(p.BlockNumber)]
require.True(t, ok, "expecting known payload")
require.Equal(t, exp.BlockHash, p.BlockHash, "expecting the correct payload")
case <-receiveCtx.Done():
t.Fatal("did not receive all expected payloads within expected time")
}
}
// now see if B can sync a range, and fill the gap with a re-request
bl25 := payloads[25] // temporarily remove it from the available payloads. This will create a gap
delete(payloads, uint64(25))
require.NoError(t, clB.RequestL2Range(ctx, l2Ref(20), l2Ref(30)))
for i := uint64(29); i > 25; i-- {
select {
case p := <-recvB:
exp, ok := payloads[uint64(p.BlockNumber)]
require.True(t, ok, "expecting known payload")
require.Equal(t, exp.BlockHash, p.BlockHash, "expecting the correct payload")
case <-receiveCtx.Done():
t.Fatal("did not receive all expected payloads within expected time")
}
}
// the request for 25 should fail. See:
// server: WARN peer requested unknown block by number num=25
// client: WARN failed p2p sync request num=25 err="peer failed to serve request with code 1"
require.Zero(t, len(recvB), "there is a gap, should not see other payloads yet")
// Add back the block
payloads[25] = bl25
// And request a range again, 25 is there now, and 21-24 should follow quickly (some may already have been fetched and wait in quarantine)
require.NoError(t, clB.RequestL2Range(ctx, l2Ref(20), l2Ref(26)))
receiveCtx, receiveCancel = context.WithTimeout(ctx, time.Second*10)
defer receiveCancel()
for i := uint64(25); i > 20; i-- {
select {
case p := <-recvB:
exp, ok := payloads[uint64(p.BlockNumber)]
require.True(t, ok, "expecting known payload")
require.Equal(t, exp.BlockHash, p.BlockHash, "expecting the correct payload")
case <-receiveCtx.Done():
t.Fatal("did not receive all expected payloads within expected time")
}
}
}
......@@ -678,22 +678,15 @@ func (eq *EngineQueue) Reset(ctx context.Context, _ eth.L1BlockRef, _ eth.System
return io.EOF
}
// GetUnsafeQueueGap retrieves the current [start, end) range (incl. start, excl. end)
// of the gap between the tip of the unsafe priority queue and the unsafe head.
// If there is no gap, the difference between end and start will be 0.
func (eq *EngineQueue) GetUnsafeQueueGap(expectedNumber uint64) (start uint64, end uint64) {
// The start of the gap is always the unsafe head + 1
start = eq.unsafeHead.Number + 1
// If the priority queue is empty, the end is the first block number at the top of the priority queue
// Otherwise, the end is the expected block number
// UnsafeL2SyncTarget retrieves the first queued-up L2 unsafe payload, or a zeroed reference if there is none.
func (eq *EngineQueue) UnsafeL2SyncTarget() eth.L2BlockRef {
if first := eq.unsafePayloads.Peek(); first != nil {
// Don't include the payload we already have in the sync range
end = first.ID().Number
ref, err := PayloadToBlockRef(first, &eq.cfg.Genesis)
if err != nil {
return eth.L2BlockRef{}
}
return ref
} else {
// Include the expected payload in the sync range
end = expectedNumber + 1
return eth.L2BlockRef{}
}
return start, end
}
......@@ -51,7 +51,7 @@ type EngineQueueStage interface {
Finalize(l1Origin eth.L1BlockRef)
AddUnsafePayload(payload *eth.ExecutionPayload)
GetUnsafeQueueGap(expectedNumber uint64) (uint64, uint64)
UnsafeL2SyncTarget() eth.L2BlockRef
Step(context.Context) error
}
......@@ -167,10 +167,9 @@ func (dp *DerivationPipeline) AddUnsafePayload(payload *eth.ExecutionPayload) {
dp.eng.AddUnsafePayload(payload)
}
// GetUnsafeQueueGap retrieves the current [start, end] range of the gap between the tip of the unsafe priority queue and the unsafe head.
// If there is no gap, the start and end will be 0.
func (dp *DerivationPipeline) GetUnsafeQueueGap(expectedNumber uint64) (uint64, uint64) {
return dp.eng.GetUnsafeQueueGap(expectedNumber)
// UnsafeL2SyncTarget retrieves the first queued-up L2 unsafe payload, or a zeroed reference if there is none.
func (dp *DerivationPipeline) UnsafeL2SyncTarget() eth.L2BlockRef {
return dp.eng.UnsafeL2SyncTarget()
}
// Step tries to progress the buffer.
......
......@@ -48,7 +48,7 @@ type DerivationPipeline interface {
Reset()
Step(ctx context.Context) error
AddUnsafePayload(payload *eth.ExecutionPayload)
GetUnsafeQueueGap(expectedNumber uint64) (uint64, uint64)
UnsafeL2SyncTarget() eth.L2BlockRef
Finalize(ref eth.L1BlockRef)
FinalizedL1() eth.L1BlockRef
Finalized() eth.L2BlockRef
......@@ -84,12 +84,20 @@ type Network interface {
type AltSync interface {
// RequestL2Range informs the sync source that the given range of L2 blocks is missing,
// and should be retrieved from any available alternative syncing source.
// The start of the range is inclusive, the end is exclusive.
// The start and end of the range are exclusive:
// the start is the head we already have, the end is the first thing we have queued up.
// It's the task of the alt-sync mechanism to use this hint to fetch the right payloads.
// Note that the end and start may not be consistent: in this case the sync method should fetch older history
//
// If the end value is zeroed, then the sync-method may determine the end free of choice,
// e.g. sync till the chain head meets the wallclock time. This functionality is optional:
// a fixed target to sync towards may be determined by picking up payloads through P2P gossip or other sources.
//
// The sync results should be returned back to the driver via the OnUnsafeL2Payload(ctx, payload) method.
// The latest requested range should always take priority over previous requests.
// There may be overlaps in requested ranges.
// An error may be returned if the scheduling fails immediately, e.g. a context timeout.
RequestL2Range(ctx context.Context, start, end uint64) error
RequestL2Range(ctx context.Context, start, end eth.L2BlockRef) error
}
// NewDriver composes an events handler that tracks L1 state, triggers L2 derivation, and optionally sequences new L2 blocks.
......
......@@ -422,6 +422,7 @@ func (s *Driver) syncStatus() *eth.SyncStatus {
UnsafeL2: s.derivation.UnsafeL2Head(),
SafeL2: s.derivation.SafeL2Head(),
FinalizedL2: s.derivation.Finalized(),
UnsafeL2SyncTarget: s.derivation.UnsafeL2SyncTarget(),
}
}
......@@ -489,24 +490,14 @@ type hashAndErrorChannel struct {
// WARNING: This is only an outgoing signal, the blocks are not guaranteed to be retrieved.
// Results are received through OnUnsafeL2Payload.
func (s *Driver) checkForGapInUnsafeQueue(ctx context.Context) error {
// subtract genesis time from wall clock to get the time elapsed since genesis, and then divide that
// difference by the block time to get the expected L2 block number at the current time. If the
// unsafe head does not have this block number, then there is a gap in the queue.
wallClock := uint64(time.Now().Unix())
genesisTimestamp := s.config.Genesis.L2Time
if wallClock < genesisTimestamp {
s.log.Debug("nothing to sync, did not reach genesis L2 time yet", "genesis", genesisTimestamp)
return nil
}
wallClockGenesisDiff := wallClock - genesisTimestamp
// Note: round down, we should not request blocks into the future.
blocksSinceGenesis := wallClockGenesisDiff / s.config.BlockTime
expectedL2Block := s.config.Genesis.L2.Number + blocksSinceGenesis
start, end := s.derivation.GetUnsafeQueueGap(expectedL2Block)
// Check if there is a gap between the unsafe head and the expected L2 block number at the current time.
if end > start {
s.log.Debug("requesting missing unsafe L2 block range", "start", start, "end", end, "size", end-start)
start := s.derivation.UnsafeL2Head()
end := s.derivation.UnsafeL2SyncTarget()
// Check if we have missing blocks between the start and end. Request them if we do.
if end == (eth.L2BlockRef{}) {
s.log.Debug("requesting sync with open-end range", "start", start)
return s.altSync.RequestL2Range(ctx, start, eth.L2BlockRef{})
} else if end.Number > start.Number+1 {
s.log.Debug("requesting missing unsafe L2 block range", "start", start, "end", end, "size", end.Number-start.Number)
return s.altSync.RequestL2Range(ctx, start, end)
}
return nil
......
......@@ -116,6 +116,20 @@ func (cfg *Config) ValidateL2Config(ctx context.Context, client L2Client) error
return nil
}
func (cfg *Config) TargetBlockNumber(timestamp uint64) (num uint64, err error) {
// subtract genesis time from timestamp to get the time elapsed since genesis, and then divide that
// difference by the block time to get the expected L2 block number at the current time. If the
// unsafe head does not have this block number, then there is a gap in the queue.
genesisTimestamp := cfg.Genesis.L2Time
if timestamp < genesisTimestamp {
return 0, fmt.Errorf("did not reach genesis time (%d) yet", genesisTimestamp)
}
wallClockGenesisDiff := timestamp - genesisTimestamp
// Note: round down, we should not request blocks into the future.
blocksSinceGenesis := wallClockGenesisDiff / cfg.BlockTime
return cfg.Genesis.L2.Number + blocksSinceGenesis, nil
}
type L1Client interface {
ChainID(context.Context) (*big.Int, error)
L1BlockRefByNumber(context.Context, uint64) (eth.L1BlockRef, error)
......
......@@ -32,9 +32,10 @@ type RPCSync interface {
// Start starts an additional worker syncing job
Start() error
// RequestL2Range signals that the given range should be fetched, implementing the alt-sync interface.
RequestL2Range(ctx context.Context, start, end uint64) error
RequestL2Range(ctx context.Context, start uint64, end eth.L2BlockRef) error
}
// SyncClient implements the driver AltSync interface, including support for fetching an open-ended chain of L2 blocks.
type SyncClient struct {
*L2Client
......@@ -88,7 +89,7 @@ func (s *SyncClient) Close() error {
return nil
}
func (s *SyncClient) RequestL2Range(ctx context.Context, start, end uint64) error {
func (s *SyncClient) RequestL2Range(ctx context.Context, start, end eth.L2BlockRef) error {
// Drain previous requests now that we have new information
for len(s.requests) > 0 {
select { // in case requests is being read at the same time, don't block on draining it.
......@@ -98,11 +99,23 @@ func (s *SyncClient) RequestL2Range(ctx context.Context, start, end uint64) erro
}
}
endNum := end.Number
if end == (eth.L2BlockRef{}) {
n, err := s.rollupCfg.TargetBlockNumber(uint64(time.Now().Unix()))
if err != nil {
return err
}
if n <= start.Number {
return nil
}
endNum = n
}
// TODO(CLI-3635): optimize the by-range fetching with the Engine API payloads-by-range method.
s.log.Info("Scheduling to fetch missing payloads from backup RPC", "start", start, "end", end, "size", end-start)
s.log.Info("Scheduling to fetch trailing missing payloads from backup RPC", "start", start, "end", endNum, "size", endNum-start.Number-1)
for i := start; i < end; i++ {
for i := start.Number + 1; i < endNum; i++ {
select {
case s.requests <- i:
case <-ctx.Done():
......
......@@ -152,11 +152,10 @@ func NewL2OutputSubmitterConfigFromCLIConfig(cfg CLIConfig, l log.Logger, m metr
return nil, err
}
txManagerConfig, err := txmgr.NewConfig(cfg.TxMgrConfig, l)
txManager, err := txmgr.NewSimpleTxManager("proposer", l, m, cfg.TxMgrConfig)
if err != nil {
return nil, err
}
txManager := txmgr.NewSimpleTxManager("proposer", l, m, txManagerConfig)
// Connect to L1 and L2 providers. Perform these last since they are the most expensive.
ctx := context.Background()
......@@ -173,7 +172,7 @@ func NewL2OutputSubmitterConfigFromCLIConfig(cfg CLIConfig, l log.Logger, m metr
return &Config{
L2OutputOracleAddr: l2ooAddress,
PollInterval: cfg.PollInterval,
NetworkTimeout: txManagerConfig.NetworkTimeout,
NetworkTimeout: cfg.TxMgrConfig.NetworkTimeout,
L1Client: l1Client,
RollupClient: rollupClient,
AllowNonFinalized: cfg.AllowNonFinalized,
......@@ -329,9 +328,8 @@ func (l *L2OutputSubmitter) sendTransaction(ctx context.Context, output *eth.Out
}
receipt, err := l.txMgr.Send(ctx, txmgr.TxCandidate{
TxData: data,
To: l.l2ooContractAddr,
To: &l.l2ooContractAddr,
GasLimit: 0,
From: l.txMgr.From(),
})
if err != nil {
return err
......
......@@ -3,6 +3,7 @@ package txmgr
import (
"context"
"errors"
"fmt"
"math/big"
"time"
......@@ -81,7 +82,7 @@ func CLIFlags(envPrefix string) []cli.Flag {
cli.DurationFlag{
Name: ResubmissionTimeoutFlagName,
Usage: "Duration we will wait before resubmitting a transaction to L1",
Value: 30 * time.Second,
Value: 48 * time.Second,
EnvVar: opservice.PrefixEnvVar(envPrefix, "RESUBMISSION_TIMEOUT"),
},
cli.DurationFlag{
......@@ -105,7 +106,7 @@ func CLIFlags(envPrefix string) []cli.Flag {
cli.DurationFlag{
Name: ReceiptQueryIntervalFlagName,
Usage: "Frequency to poll for receipts",
Value: 30 * time.Second,
Value: 12 * time.Second,
EnvVar: opservice.PrefixEnvVar(envPrefix, "TXMGR_RECEIPT_QUERY_INTERVAL"),
},
}, client.CLIFlags(envPrefix)...)
......@@ -177,21 +178,21 @@ func ReadCLIConfig(ctx *cli.Context) CLIConfig {
func NewConfig(cfg CLIConfig, l log.Logger) (Config, error) {
if err := cfg.Check(); err != nil {
return Config{}, err
return Config{}, fmt.Errorf("invalid config: %w", err)
}
ctx, cancel := context.WithTimeout(context.Background(), cfg.NetworkTimeout)
defer cancel()
l1, err := ethclient.DialContext(ctx, cfg.L1RPCURL)
if err != nil {
return Config{}, err
return Config{}, fmt.Errorf("could not dial eth client: %w", err)
}
ctx, cancel = context.WithTimeout(context.Background(), cfg.NetworkTimeout)
defer cancel()
chainID, err := l1.ChainID(ctx)
if err != nil {
return Config{}, err
return Config{}, fmt.Errorf("could not dial fetch L1 chain ID: %w", err)
}
// Allow backwards compatible ways of specifying the HD path
......@@ -204,7 +205,7 @@ func NewConfig(cfg CLIConfig, l log.Logger) (Config, error) {
signerFactory, from, err := opcrypto.SignerFactoryFromConfig(l, cfg.PrivateKey, cfg.Mnemonic, hdPath, cfg.SignerCLIConfig)
if err != nil {
return Config{}, err
return Config{}, fmt.Errorf("could not init signer: %w", err)
}
return Config{
......
......@@ -87,22 +87,20 @@ type SimpleTxManager struct {
}
// NewSimpleTxManager initializes a new SimpleTxManager with the passed Config.
func NewSimpleTxManager(name string, l log.Logger, m metrics.TxMetricer, cfg Config) *SimpleTxManager {
if cfg.NumConfirmations == 0 {
panic("txmgr: NumConfirmations cannot be zero")
}
if cfg.NetworkTimeout == 0 {
cfg.NetworkTimeout = 2 * time.Second
func NewSimpleTxManager(name string, l log.Logger, m metrics.TxMetricer, cfg CLIConfig) (*SimpleTxManager, error) {
conf, err := NewConfig(cfg, l)
if err != nil {
return nil, err
}
return &SimpleTxManager{
chainID: cfg.ChainID,
chainID: conf.ChainID,
name: name,
cfg: cfg,
backend: cfg.Backend,
cfg: conf,
backend: conf.Backend,
l: l.New("service", name),
metr: m,
}
}, nil
}
func (m *SimpleTxManager) From() common.Address {
......@@ -114,12 +112,10 @@ func (m *SimpleTxManager) From() common.Address {
type TxCandidate struct {
// TxData is the transaction data to be used in the constructed tx.
TxData []byte
// To is the recipient of the constructed tx.
To common.Address
// To is the recipient of the constructed tx. Nil means contract creation.
To *common.Address
// GasLimit is the gas limit to be used in the constructed tx.
GasLimit uint64
// From is the sender (or `from`) of the constructed tx.
From common.Address
}
// Send is used to publish a transaction with incrementally higher gas prices
......@@ -159,7 +155,7 @@ func (m *SimpleTxManager) craftTx(ctx context.Context, candidate TxCandidate) (*
// Fetch the sender's nonce from the latest known block (nil `blockNumber`)
childCtx, cancel := context.WithTimeout(ctx, m.cfg.NetworkTimeout)
defer cancel()
nonce, err := m.backend.NonceAt(childCtx, candidate.From, nil)
nonce, err := m.backend.NonceAt(childCtx, m.cfg.From, nil)
if err != nil {
return nil, fmt.Errorf("failed to get nonce: %w", err)
}
......@@ -167,13 +163,13 @@ func (m *SimpleTxManager) craftTx(ctx context.Context, candidate TxCandidate) (*
rawTx := &types.DynamicFeeTx{
ChainID: m.chainID,
Nonce: nonce,
To: &candidate.To,
To: candidate.To,
GasTipCap: gasTipCap,
GasFeeCap: gasFeeCap,
Data: candidate.TxData,
}
m.l.Info("creating tx", "to", rawTx.To, "from", candidate.From)
m.l.Info("creating tx", "to", rawTx.To, "from", m.cfg.From)
// If the gas limit is set, we can use that as the gas
if candidate.GasLimit != 0 {
......@@ -181,8 +177,8 @@ func (m *SimpleTxManager) craftTx(ctx context.Context, candidate TxCandidate) (*
} else {
// Calculate the intrinsic gas for the transaction
gas, err := m.backend.EstimateGas(ctx, ethereum.CallMsg{
From: candidate.From,
To: &candidate.To,
From: m.cfg.From,
To: candidate.To,
GasFeeCap: gasFeeCap,
GasTipCap: gasTipCap,
Data: rawTx.Data,
......@@ -195,7 +191,7 @@ func (m *SimpleTxManager) craftTx(ctx context.Context, candidate TxCandidate) (*
ctx, cancel = context.WithTimeout(ctx, m.cfg.NetworkTimeout)
defer cancel()
return m.cfg.Signer(ctx, candidate.From, types.NewTx(rawTx))
return m.cfg.Signer(ctx, m.cfg.From, types.NewTx(rawTx))
}
// send submits the same transaction several times with increasing gas prices as necessary.
......
......@@ -41,7 +41,14 @@ func newTestHarnessWithConfig(t *testing.T, cfg Config) *testHarness {
g := newGasPricer(3)
backend := newMockBackend(g)
cfg.Backend = backend
mgr := NewSimpleTxManager("TEST", testlog.Logger(t, log.LvlCrit), &metrics.NoopTxMetrics{}, cfg)
mgr := &SimpleTxManager{
chainID: cfg.ChainID,
name: "TEST",
cfg: cfg,
backend: cfg.Backend,
l: testlog.Logger(t, log.LvlCrit),
metr: &metrics.NoopTxMetrics{},
}
return &testHarness{
cfg: cfg,
......@@ -60,11 +67,9 @@ func newTestHarness(t *testing.T) *testHarness {
// createTxCandidate creates a mock [TxCandidate].
func (h testHarness) createTxCandidate() TxCandidate {
inbox := common.HexToAddress("0x42000000000000000000000000000000000000ff")
sender := common.HexToAddress("0xdeadbeef")
return TxCandidate{
To: inbox,
To: &inbox,
TxData: []byte{0x00, 0x01, 0x02},
From: sender,
GasLimit: uint64(1337),
}
}
......@@ -593,18 +598,13 @@ func TestWaitMinedMultipleConfs(t *testing.T) {
require.Equal(t, txHash, receipt.TxHash)
}
// TestManagerPanicOnZeroConfs ensures that the NewSimpleTxManager will panic
// TestManagerErrsOnZeroConfs ensures that the NewSimpleTxManager will error
// when attempting to configure with NumConfirmations set to zero.
func TestManagerPanicOnZeroConfs(t *testing.T) {
func TestManagerErrsOnZeroConfs(t *testing.T) {
t.Parallel()
defer func() {
if r := recover(); r == nil {
t.Fatal("NewSimpleTxManager should panic when using zero conf")
}
}()
_ = newTestHarnessWithConfig(t, configWithNumConfs(0))
_, err := NewSimpleTxManager("TEST", testlog.Logger(t, log.LvlCrit), &metrics.NoopTxMetrics{}, CLIConfig{})
require.Error(t, err)
}
// failingBackend implements ReceiptSource, returning a failure on the
......
......@@ -12,7 +12,7 @@ import { FeeVault } from "../universal/FeeVault.sol";
*/
contract L1FeeVault is FeeVault, Semver {
/**
* @custom:semver 1.0.0
* @custom:semver 1.1.0
*
* @param _recipient Address that will receive the accumulated fees.
*/
......
import { DeployFunction } from 'hardhat-deploy/dist/types'
import { ethers } from 'ethers'
import '@eth-optimism/hardhat-deploy-config'
import '@nomiclabs/hardhat-ethers'
import { assertContractVariable, deploy } from '../src/deploy-utils'
const deployFn: DeployFunction = async (hre) => {
await deploy({
hre,
name: 'L1Block',
args: [],
postDeployAction: async (contract) => {
await assertContractVariable(
contract,
'DEPOSITOR_ACCOUNT',
ethers.utils.getAddress('0xDeaDDEaDDeAdDeAdDEAdDEaddeAddEAdDEAd0001')
)
},
})
}
deployFn.tags = ['L1BlockImpl', 'l2']
export default deployFn
import { DeployFunction } from 'hardhat-deploy/dist/types'
import { ethers } from 'ethers'
import '@eth-optimism/hardhat-deploy-config'
import '@nomiclabs/hardhat-ethers'
import { assertContractVariable, deploy } from '../src/deploy-utils'
const deployFn: DeployFunction = async (hre) => {
const Artifact__L1CrossDomainMessenger = await hre.companionNetworks[
'l1'
].deployments.get('L1CrossDomainMessengerProxy')
await deploy({
hre,
name: 'L2CrossDomainMessenger',
args: [Artifact__L1CrossDomainMessenger.address],
postDeployAction: async (contract) => {
await assertContractVariable(
contract,
'OTHER_MESSENGER',
ethers.utils.getAddress(Artifact__L1CrossDomainMessenger.address)
)
},
})
}
deployFn.tags = ['L2CrossDomainMessengerImpl', 'l2']
export default deployFn
import { DeployFunction } from 'hardhat-deploy/dist/types'
import { ethers } from 'ethers'
import '@eth-optimism/hardhat-deploy-config'
import '@nomiclabs/hardhat-ethers'
import { assertContractVariable, deploy } from '../src/deploy-utils'
const deployFn: DeployFunction = async (hre) => {
const Artifact__L1StandardBridge = await hre.companionNetworks[
'l1'
].deployments.get('L1StandardBridgeProxy')
await deploy({
hre,
name: 'L2StandardBridge',
args: [Artifact__L1StandardBridge.address],
postDeployAction: async (contract) => {
await assertContractVariable(
contract,
'OTHER_BRIDGE',
ethers.utils.getAddress(Artifact__L1StandardBridge.address)
)
},
})
}
deployFn.tags = ['L2StandardBridgeImpl', 'l2']
export default deployFn
import { DeployFunction } from 'hardhat-deploy/dist/types'
import '@eth-optimism/hardhat-deploy-config'
import '@nomiclabs/hardhat-ethers'
import { assertContractVariable, deploy } from '../src/deploy-utils'
const deployFn: DeployFunction = async (hre) => {
await deploy({
hre,
name: 'L2ToL1MessagePasser',
args: [],
postDeployAction: async (contract) => {
await assertContractVariable(contract, 'MESSAGE_VERSION', 1)
},
})
}
deployFn.tags = ['L2ToL1MessagePasserImpl', 'l2']
export default deployFn
import { DeployFunction } from 'hardhat-deploy/dist/types'
import '@eth-optimism/hardhat-deploy-config'
import '@nomiclabs/hardhat-ethers'
import { ethers } from 'ethers'
import { predeploys } from '../src/constants'
import { assertContractVariable, deploy } from '../src/deploy-utils'
const deployFn: DeployFunction = async (hre) => {
const Artifact__L1ERC721Bridge = await hre.companionNetworks[
'l1'
].deployments.get('L1ERC721BridgeProxy')
await deploy({
hre,
name: 'L2ERC721Bridge',
args: [predeploys.L2CrossDomainMessenger, Artifact__L1ERC721Bridge.address],
postDeployAction: async (contract) => {
await assertContractVariable(
contract,
'MESSENGER',
ethers.utils.getAddress(predeploys.L2CrossDomainMessenger)
)
await assertContractVariable(
contract,
'OTHER_BRIDGE',
ethers.utils.getAddress(Artifact__L1ERC721Bridge.address)
)
},
})
}
deployFn.tags = ['L2ERC721BridgeImpl', 'l2']
export default deployFn
import { DeployFunction } from 'hardhat-deploy/dist/types'
import '@eth-optimism/hardhat-deploy-config'
import '@nomiclabs/hardhat-ethers'
import { assertContractVariable, deploy } from '../src/deploy-utils'
const deployFn: DeployFunction = async (hre) => {
await deploy({
hre,
name: 'GasPriceOracle',
args: [],
postDeployAction: async (contract) => {
await assertContractVariable(contract, 'DECIMALS', 6)
},
})
}
deployFn.tags = ['GasPriceOracle', 'l2']
export default deployFn
import { DeployFunction } from 'hardhat-deploy/dist/types'
import '@eth-optimism/hardhat-deploy-config'
import '@nomiclabs/hardhat-ethers'
import { ethers } from 'ethers'
import { assertContractVariable, deploy } from '../src/deploy-utils'
const deployFn: DeployFunction = async (hre) => {
const l1 = hre.network.companionNetworks['l1']
const deployConfig = hre.getDeployConfig(l1)
const sequencerFeeVaultRecipient = deployConfig.sequencerFeeVaultRecipient
if (sequencerFeeVaultRecipient === ethers.constants.AddressZero) {
throw new Error(`SequencerFeeVault RECIPIENT undefined`)
}
await deploy({
hre,
name: 'SequencerFeeVault',
args: [sequencerFeeVaultRecipient],
postDeployAction: async (contract) => {
await assertContractVariable(
contract,
'RECIPIENT',
ethers.utils.getAddress(sequencerFeeVaultRecipient)
)
},
})
}
deployFn.tags = ['SequencerFeeVaultImpl', 'l2']
export default deployFn
import { DeployFunction } from 'hardhat-deploy/dist/types'
import '@eth-optimism/hardhat-deploy-config'
import '@nomiclabs/hardhat-ethers'
import { ethers } from 'ethers'
import { assertContractVariable, deploy } from '../src/deploy-utils'
const deployFn: DeployFunction = async (hre) => {
const l1 = hre.network.companionNetworks['l1']
const deployConfig = hre.getDeployConfig(l1)
const baseFeeVaultRecipient = deployConfig.baseFeeVaultRecipient
if (baseFeeVaultRecipient === ethers.constants.AddressZero) {
throw new Error('BaseFeeVault RECIPIENT undefined')
}
await deploy({
hre,
name: 'BaseFeeVault',
args: [baseFeeVaultRecipient],
postDeployAction: async (contract) => {
await assertContractVariable(
contract,
'RECIPIENT',
ethers.utils.getAddress(baseFeeVaultRecipient)
)
},
})
}
deployFn.tags = ['BaseFeeVaultImpl', 'l2']
export default deployFn
import { DeployFunction } from 'hardhat-deploy/dist/types'
import '@eth-optimism/hardhat-deploy-config'
import '@nomiclabs/hardhat-ethers'
import { ethers } from 'ethers'
import { assertContractVariable, deploy } from '../src/deploy-utils'
const deployFn: DeployFunction = async (hre) => {
const l1 = hre.network.companionNetworks['l1']
const deployConfig = hre.getDeployConfig(l1)
const l1FeeVaultRecipient = deployConfig.l1FeeVaultRecipient
if (l1FeeVaultRecipient === ethers.constants.AddressZero) {
throw new Error('L1FeeVault RECIPIENT undefined')
}
await deploy({
hre,
name: 'L1FeeVault',
args: [l1FeeVaultRecipient],
postDeployAction: async (contract) => {
await assertContractVariable(
contract,
'RECIPIENT',
ethers.utils.getAddress(l1FeeVaultRecipient)
)
},
})
}
deployFn.tags = ['L1FeeVaultImpl', 'l2']
export default deployFn
import { DeployFunction } from 'hardhat-deploy/dist/types'
import { ethers } from 'ethers'
import '@eth-optimism/hardhat-deploy-config'
import '@nomiclabs/hardhat-ethers'
import { assertContractVariable, deploy } from '../src/deploy-utils'
import { predeploys } from '../src/constants'
const deployFn: DeployFunction = async (hre) => {
await deploy({
hre,
name: 'OptimismMintableERC20Factory',
args: [predeploys.L2StandardBridge],
postDeployAction: async (contract) => {
await assertContractVariable(
contract,
'BRIDGE',
ethers.utils.getAddress(predeploys.L2StandardBridge)
)
},
})
}
deployFn.tags = ['OptimismMintableERC20FactoryImpl', 'l2']
export default deployFn
import { DeployFunction } from 'hardhat-deploy/dist/types'
import { ethers } from 'ethers'
import '@eth-optimism/hardhat-deploy-config'
import '@nomiclabs/hardhat-ethers'
import { assertContractVariable, deploy } from '../src/deploy-utils'
import { predeploys } from '../src/constants'
const deployFn: DeployFunction = async (hre) => {
const OptimismMintableERC721Factory = await hre.ethers.getContractAt(
'OptimismMintableERC721Factory',
predeploys.OptimismMintableERC721Factory
)
const remoteChainId = await OptimismMintableERC721Factory.REMOTE_CHAIN_ID()
await deploy({
hre,
name: 'OptimismMintableERC721Factory',
args: [predeploys.L2StandardBridge, remoteChainId],
postDeployAction: async (contract) => {
await assertContractVariable(
contract,
'BRIDGE',
ethers.utils.getAddress(predeploys.L2StandardBridge)
)
await assertContractVariable(contract, 'REMOTE_CHAIN_ID', remoteChainId)
},
})
}
deployFn.tags = ['OptimismMintableERC721FactoryImpl', 'l2']
export default deployFn
......@@ -32,6 +32,7 @@
"storage-snapshot": "./scripts/storage-snapshot.sh",
"validate-spacers": "hardhat compile && hardhat validate-spacers",
"slither": "./scripts/slither.sh",
"slither:triage": "TRIAGE_MODE=1 ./scripts/slither.sh",
"clean": "rm -rf ./dist ./artifacts ./forge-artifacts ./cache ./tsconfig.tsbuildinfo ./tsconfig.build.tsbuildinfo ./src/contract-artifacts.ts ./test-case-generator/fuzz",
"lint:ts:check": "eslint . --max-warnings=0",
"lint:forge-tests:check": "ts-node scripts/forge-test-names.ts",
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -8,6 +8,4 @@ import './solidity'
import './accounts'
import './check-l2'
import './update-dynamic-oracle-config'
import './wait-for-final-batch'
import './wait-for-final-deposit'
import './generate-deploy-config'
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment