Commit cb21d3fa authored by Matthew Slipper's avatar Matthew Slipper Committed by GitHub

Merge pull request #3170 from ethereum-optimism/develop

Develop -> Master
parents 907ef2ee b6f4bfcf
---
'@eth-optimism/contracts-bedrock': patch
---
Fix slither script
---
'@eth-optimism/contracts-bedrock': patch
---
Update genesis-l2 task to set immutables in the bytecode
---
'@eth-optimism/contracts-bedrock': patch
---
Fix build for smaller package
---
'@eth-optimism/contracts-bedrock': patch
---
Use the same initializable across codebase
---
'@eth-optimism/contracts-bedrock': patch
---
Update @foundry-rs/hardhat-forge@0.1.16
---
'@eth-optimism/contracts-bedrock': patch
---
Fix build on latest foundry
---
'@eth-optimism/core-utils': patch
---
Implement basic OpNodeProvider
---
'@eth-optimism/hardhat-deploy-config': patch
---
Support JSON-formatted deploy configs
---
'@eth-optimism/proxyd': minor
---
Add frontend rate limiting
---
'@eth-optimism/contracts-bedrock': patch
---
Emit an extra event when withdrawals are initiated to make chainops easier
---
'@eth-optimism/proxyd': patch
---
Unwrap single RPC batches
---
'@eth-optimism/contracts-bedrock': patch
---
Fix portal deployment to have L2OutputOracle proxy address
---
'@eth-optimism/contracts-bedrock': patch
---
Add watch task
---
'@eth-optimism/contracts-bedrock': patch
---
Add harhdat forge contract verification support
---
'@eth-optimism/proxyd': patch
---
Parameterize full RPC request logging
---
'@eth-optimism/contracts-bedrock': patch
---
Update hardhat-forge dep, remove dead deps
---
'@eth-optimism/contracts-bedrock': patch
---
bedrock-goerli-96f44f79 deployment
---
'@eth-optimism/foundry': patch
---
Update to 64fe4acc97e6d76551cea7598c201f05ecd65639
---
'@eth-optimism/contracts-bedrock': patch
'@eth-optimism/core-utils': patch
---
Update deposit transaction type
---
'@eth-optimism/integration-tests': patch
---
Modularize the itests away from depending on api of messenger
---
'@eth-optimism/contracts-bedrock': patch
---
Fix typechain exports
---
'@eth-optimism/sdk': minor
---
Add wstETH to sdk
---
'@eth-optimism/sdk': minor
'@eth-optimism/contracts-bedrock': patch
'@eth-optimism/core-utils': patch
---
Updates the SDK to be compatible with Bedrock (via the "bedrock: true" constructor param). Updates the build pipeline for contracts-bedrock to export a properly formatted dist folder that matches our other packages.
......@@ -15,7 +15,7 @@ jobs:
name: Check if we should run
command: |
shopt -s inherit_errexit
CHANGED=$(check-changed "packages/")
CHANGED=$(check-changed "(op-bindings|packages/)")
if [[ "$CHANGED" = "FALSE" ]]; then
circleci step halt
fi
......@@ -141,7 +141,7 @@ jobs:
name: gas snapshot
command: |
forge --version
forge snapshot --check || exit 0
forge snapshot --check
environment:
FOUNDRY_PROFILE: ci
working_directory: packages/contracts-bedrock
......@@ -277,14 +277,19 @@ jobs:
image: ubuntu-2004:202111-02
steps:
- checkout
- run:
name: yarn dev deps # todo: what's the best way to pull in the dependencies for linting? yarn install above is using production env without dev dependencies
command: yarn install --production=false
- run:
name: specs toc
command: yarn lint:specs:toc && git diff --exit-code
- run:
name: markdown lint
command: |
docker run -v `pwd`:/workdir davidanson/markdownlint-cli2:0.4.0 "op-node/README.md" "./specs/**/*.md" "#**/node_modules"
command: yarn lint:specs:check
- run:
name: link lint
command: |
docker run --init -it -v `pwd`:/input lycheeverse/lychee --verbose --no-progress --exclude-loopback --exclude twitter.com --exclude-mail /input/README.md "/input/specs/**/*.md" "/input/meta/**/*.md" "/input/op-node/**/*.md" || exit 0
docker run --init -it -v `pwd`:/input lycheeverse/lychee --verbose --no-progress --exclude-loopback --exclude twitter.com --exclude-mail /input/README.md "/input/specs/**/*.md"
fuzz-op-node:
docker:
......@@ -449,17 +454,25 @@ jobs:
make devnet-up
- run:
name: Do a deposit
no_output_timeout: 5m
command: |
npx hardhat compile
npx hardhat deposit \
timeout 5m npx hardhat deposit \
--to 0xB79f76EF2c5F0286176833E7B2eEe103b1CC3244 \
--amount-eth 1 \
--private-key 0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80 \
--network devnetL1
working_directory: packages/contracts-bedrock/contracts-bedrock
working_directory: packages/contracts-bedrock
- run:
name: Deposit ERC20 through the bridge
command: timeout 5m npx hardhat deposit --network devnetL1
working_directory: packages/sdk
- run:
name: Check the status
command: npx hardhat check-op-node
working_directory: packages/contracts-bedrock
- run:
command: echo "Deposit complete."
name: Check L2 Config
command: npx hardhat check-l2-config
working_directory: packages/contracts-bedrock
integration-tests:
machine:
......@@ -691,20 +704,6 @@ workflows:
docker_context: ops/docker/hardhat
context:
- optimism
- docker-publish:
name: go-builder-release
docker_file: ops/docker/go-builder/Dockerfile
docker_tags: ethereumoptimism/go-builder:nightly
docker_context: .
context:
- optimism
- docker-publish:
name: js-builder-release
docker_file: ops/docker/js-builder/Dockerfile
docker_tags: ethereumoptimism/js-builder:nightly
docker_context: .
context:
- optimism
- docker-publish:
name: proxyd-release
docker_file: proxyd/Dockerfile
......
......@@ -93,3 +93,7 @@ semgrep:
$(eval DEV_REF := $(shell git rev-parse develop))
SEMGREP_REPO_NAME=ethereum-optimism/optimism semgrep ci --baseline-commit=$(DEV_REF)
.PHONY: semgrep
clean-node-modules:
rm -rf node_modules
rm -rf packages/**/node_modules
......@@ -58,7 +58,7 @@ root
├── <a href="./teleportr">teleportr</a>: Bridge for teleporting ETH between L1 and L2 at low cost
~~ BEDROCK upgrade - Not production-ready yet, part of next major upgrade ~~
├── <a href="./contracts-bedrock">contracts-bedrock</a>: Bedrock smart contracts. To be merged with ./packages/contracts.
├── <a href="./packages/contracts-bedrock">packages/contracts-bedrock</a>: Bedrock smart contracts. To be merged with ./packages/contracts.
├── <a href="./op-bindings">op-bindings</a>: Go bindings for Bedrock smart contracts.
├── <a href="./op-batcher">op-batcher</a>: L2-Batch Submitter, submits bundles of batches to L1
├── <a href="./op-e2e">op-e2e</a>: End-to-End testing of all bedrock components in Go
......
steps:
- name: 'gcr.io/kaniko-project/executor:latest'
args:
- --destination=us-central1-docker.pkg.dev/$PROJECT_ID/images/op-node:$_TAG
- --destination=us-central1-docker.pkg.dev/$PROJECT_ID/images/op-node:$COMMIT_SHA
- --dockerfile=op-node/Dockerfile
- --cache=true
- --cache-ttl=48h
waitFor: ['-']
- name: 'gcr.io/kaniko-project/executor:latest'
args:
- --destination=us-central1-docker.pkg.dev/$PROJECT_ID/images/op-batcher:$_TAG
- --destination=us-central1-docker.pkg.dev/$PROJECT_ID/images/op-batcher:$COMMIT_SHA
- --dockerfile=./op-batcher/Dockerfile
- --cache=true
- --cache-ttl=48h
waitFor: ['-']
- name: 'gcr.io/kaniko-project/executor:latest'
args:
- --destination=us-central1-docker.pkg.dev/$PROJECT_ID/images/op-proposer:$_TAG
- --destination=us-central1-docker.pkg.dev/$PROJECT_ID/images/op-proposer:$COMMIT_SHA
- --dockerfile=./op-proposer/Dockerfile
- --cache=true
- --cache-ttl=48h
waitFor: ['-']
- name: 'gcr.io/kaniko-project/executor:latest'
args:
- --destination=us-central1-docker.pkg.dev/$PROJECT_ID/images/deployer-bedrock:$_TAG
- --destination=us-central1-docker.pkg.dev/$PROJECT_ID/images/deployer-bedrock:$COMMIT_SHA
- --dockerfile=./ops/docker/Dockerfile.packages
- --target=deployer-bedrock
- --cache=true
- --cache-ttl=48h
waitFor: ['-']
options:
machineType: N1_HIGHCPU_32
\ No newline at end of file
......@@ -18,7 +18,7 @@ use (
./state-surgery
)
replace github.com/ethereum/go-ethereum v1.10.20 => github.com/ethereum-optimism/reference-optimistic-geth v0.0.0-20220715235548-70b02481016d
replace github.com/ethereum/go-ethereum v1.10.21 => github.com/ethereum-optimism/reference-optimistic-geth v0.0.0-20220803173305-1c9d4cc76a6e
// For local debugging:
//replace github.com/ethereum/go-ethereum v1.10.20 => ../go-ethereum
//replace github.com/ethereum/go-ethereum v1.10.21 => ../go-ethereum
......@@ -15,6 +15,7 @@ cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJW
cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M=
cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s=
cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU=
github.com/BurntSushi/toml v1.1.0 h1:ksErzDEI1khOiGPgpwuI7x2ebx/uXQNw7xJpn9Eq1+I=
github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/btcsuite/btcd/btcec/v2 v2.1.3/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE=
github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
......
......@@ -2,6 +2,7 @@
import { providers } from 'ethers'
import { applyL1ToL2Alias } from '@eth-optimism/core-utils'
import { asL2Provider } from '@eth-optimism/sdk'
import { getContractInterface } from '@eth-optimism/contracts'
/* Imports: External */
import { expect } from './shared/setup'
......@@ -47,11 +48,9 @@ describe('Queue Ingestion', () => {
receipt.remoteTx.hash
)) as any
const params =
env.messenger.contracts.l2.L2CrossDomainMessenger.interface.decodeFunctionData(
'relayMessage',
l2Tx.data
)
const params = getContractInterface(
'L2CrossDomainMessenger'
).decodeFunctionData('relayMessage', l2Tx.data)
expect(params._sender.toLowerCase()).to.equal(
env.l1Wallet.address.toLowerCase()
......
......@@ -3,11 +3,17 @@ package op_batcher
import (
"bytes"
"context"
"crypto/ecdsa"
"errors"
"fmt"
"io"
"math/big"
"net"
"net/http"
_ "net/http/pprof"
"os"
"os/signal"
"strings"
"sync"
"syscall"
"time"
......@@ -76,6 +82,26 @@ func Main(version string) func(ctx *cli.Context) error {
defer batchSubmitter.Stop()
l.Info("Batch Submitter started")
if cfg.PprofEnabled {
var srv http.Server
srv.Addr = net.JoinHostPort(cfg.PprofAddr, cfg.PprofPort)
// Start pprof server + register it's shutdown
go func() {
l.Info("pprof server started", "addr", srv.Addr)
if err := srv.ListenAndServe(); err != http.ErrServerClosed {
l.Error("error in pprof server", "err", err)
} else {
l.Info("pprof server shutting down")
}
}()
defer func() {
shutCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
err := srv.Shutdown(shutCtx)
l.Info("pprof server shut down", "err", err)
}()
}
interruptChannel := make(chan os.Signal, 1)
signal.Notify(interruptChannel, []os.Signal{
......@@ -112,6 +138,15 @@ type BatchSubmitter struct {
func NewBatchSubmitter(cfg Config, l log.Logger) (*BatchSubmitter, error) {
ctx := context.Background()
var err error
var sequencerPrivKey *ecdsa.PrivateKey
var addr common.Address
if cfg.PrivateKey != "" && cfg.Mnemonic != "" {
return nil, errors.New("cannot specify both a private key and a mnemonic")
}
if cfg.PrivateKey == "" {
// Parse wallet private key that will be used to submit L2 txs to the batch
// inbox address.
wallet, err := hdwallet.NewFromMnemonic(cfg.Mnemonic)
......@@ -124,15 +159,23 @@ func NewBatchSubmitter(cfg Config, l log.Logger) (*BatchSubmitter, error) {
Path: cfg.SequencerHDPath,
},
}
addr, err := wallet.Address(acc)
addr, err = wallet.Address(acc)
if err != nil {
return nil, err
}
sequencerPrivKey, err := wallet.PrivateKey(acc)
sequencerPrivKey, err = wallet.PrivateKey(acc)
if err != nil {
return nil, err
}
} else {
sequencerPrivKey, err = crypto.HexToECDSA(strings.TrimPrefix(cfg.PrivateKey, "0x"))
if err != nil {
return nil, err
}
addr = crypto.PubkeyToAddress(sequencerPrivKey.PublicKey)
}
batchInboxAddress, err := parseAddress(cfg.SequencerBatchInboxAddress)
if err != nil {
......
......@@ -56,6 +56,9 @@ type Config struct {
// batched submission of sequencer transactions.
SequencerHDPath string
// PrivateKey is the private key used to submit sequencer transactions.
PrivateKey string
// SequencerBatchInboxAddress is the address in which to send batch
// transactions.
SequencerBatchInboxAddress string
......@@ -68,6 +71,11 @@ type Config struct {
// LogTerminal if true, will log to stdout in terminal format. Otherwise the
// output will be in JSON format.
LogTerminal bool
// Flags for the pprof server
PprofEnabled bool
PprofAddr string
PprofPort string
}
// NewConfig parses the Config from the provided flags or environment variables.
......@@ -86,9 +94,13 @@ func NewConfig(ctx *cli.Context) Config {
ResubmissionTimeout: ctx.GlobalDuration(flags.ResubmissionTimeoutFlag.Name),
Mnemonic: ctx.GlobalString(flags.MnemonicFlag.Name),
SequencerHDPath: ctx.GlobalString(flags.SequencerHDPathFlag.Name),
PrivateKey: ctx.GlobalString(flags.PrivateKeyFlag.Name),
SequencerBatchInboxAddress: ctx.GlobalString(flags.SequencerBatchInboxAddressFlag.Name),
/* Optional Flags */
LogLevel: ctx.GlobalString(flags.LogLevelFlag.Name),
LogTerminal: ctx.GlobalBool(flags.LogTerminalFlag.Name),
PprofEnabled: ctx.GlobalBool(flags.PprofEnabledFlag.Name),
PprofAddr: ctx.GlobalString(flags.PprofAddrFlag.Name),
PprofPort: ctx.GlobalString(flags.PprofPortFlag.Name),
}
}
......@@ -82,16 +82,19 @@ var (
Name: "mnemonic",
Usage: "The mnemonic used to derive the wallets for either the " +
"sequencer or the l2output",
Required: true,
EnvVar: prefixEnvVar("MNEMONIC"),
}
SequencerHDPathFlag = cli.StringFlag{
Name: "sequencer-hd-path",
Usage: "The HD path used to derive the sequencer wallet from the " +
"mnemonic. The mnemonic flag must also be set.",
Required: true,
EnvVar: prefixEnvVar("SEQUENCER_HD_PATH"),
}
PrivateKeyFlag = cli.StringFlag{
Name: "private-key",
Usage: "The private key to use with the l2output wallet. Must not be used with mnemonic.",
EnvVar: prefixEnvVar("PRIVATE_KEY"),
}
SequencerBatchInboxAddressFlag = cli.StringFlag{
Name: "sequencer-batch-inbox-address",
Usage: "L1 Address to receive batch transactions",
......@@ -113,6 +116,23 @@ var (
"in JSON format.",
EnvVar: prefixEnvVar("LOG_TERMINAL"),
}
PprofEnabledFlag = cli.BoolFlag{
Name: "pprof.enabled",
Usage: "Enable the pprof server",
EnvVar: prefixEnvVar("PPROF_ENABLED"),
}
PprofAddrFlag = cli.StringFlag{
Name: "pprof.addr",
Usage: "pprof listening address",
Value: "0.0.0.0",
EnvVar: prefixEnvVar("PPROF_ADDR"),
}
PprofPortFlag = cli.IntFlag{
Name: "pprof.port",
Usage: "pprof listening port",
Value: 6060,
EnvVar: prefixEnvVar("PPROF_PORT"),
}
)
var requiredFlags = []cli.Flag{
......@@ -126,14 +146,18 @@ var requiredFlags = []cli.Flag{
NumConfirmationsFlag,
SafeAbortNonceTooLowCountFlag,
ResubmissionTimeoutFlag,
MnemonicFlag,
SequencerHDPathFlag,
SequencerBatchInboxAddressFlag,
}
var optionalFlags = []cli.Flag{
MnemonicFlag,
SequencerHDPathFlag,
PrivateKeyFlag,
LogLevelFlag,
LogTerminalFlag,
PprofEnabledFlag,
PprofAddrFlag,
PprofPortFlag,
}
// Flags contains the list of configuration options available to the binary.
......
......@@ -5,7 +5,7 @@ go 1.18
require (
github.com/ethereum-optimism/optimism/op-node v0.3.0
github.com/ethereum-optimism/optimism/op-proposer v0.3.0
github.com/ethereum/go-ethereum v1.10.20
github.com/ethereum/go-ethereum v1.10.21
github.com/miguelmota/go-ethereum-hdwallet v0.1.1
github.com/urfave/cli v1.22.5
)
......@@ -54,4 +54,4 @@ require (
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
)
replace github.com/ethereum/go-ethereum v1.10.20 => github.com/ethereum-optimism/reference-optimistic-geth v0.0.0-20220715235548-70b02481016d
replace github.com/ethereum/go-ethereum v1.10.21 => github.com/ethereum-optimism/reference-optimistic-geth v0.0.0-20220803173305-1c9d4cc76a6e
......@@ -153,8 +153,8 @@ github.com/ethereum-optimism/optimism/op-node v0.3.0 h1:jep/cbIbP7fjBSAR48yk5NJV
github.com/ethereum-optimism/optimism/op-node v0.3.0/go.mod h1:iF9AhYjr8jNeoCDNP/Vs/ywQ2USZU5L66AxZbSAUi0E=
github.com/ethereum-optimism/optimism/op-proposer v0.3.0 h1:K1ipZt3TLD0BJi7tKOmx8tCLXj9i4f4baBIhbPmUxk4=
github.com/ethereum-optimism/optimism/op-proposer v0.3.0/go.mod h1:GcQ9VCWz2zEVexecq5IYo/2eadK/y7IBOEfx4YV1QJk=
github.com/ethereum-optimism/reference-optimistic-geth v0.0.0-20220715235548-70b02481016d h1:w0DBXhp0sv0bWRDOCA/Y6yHOALU7qLLLf5/kE3YfFr4=
github.com/ethereum-optimism/reference-optimistic-geth v0.0.0-20220715235548-70b02481016d/go.mod h1:m2m08SAQ8XB0VcVBoDg9n74Dw5PUMl3hzv1NXVBFPfg=
github.com/ethereum-optimism/reference-optimistic-geth v0.0.0-20220803173305-1c9d4cc76a6e h1:hz+iywXjnqz6xA3lTLvtNL9OZyX76pS5SER4kZBmQLs=
github.com/ethereum-optimism/reference-optimistic-geth v0.0.0-20220803173305-1c9d4cc76a6e/go.mod h1:EYFyF19u3ezGLD4RqOkLq+ZCXzYbLoNDdZlMt7kyKFg=
github.com/ethereum/go-ethereum v1.10.4/go.mod h1:nEE0TP5MtxGzOMd7egIrbPJMQBnhVU3ELNxhBglIzhg=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
......
......@@ -18,22 +18,22 @@ gas-price-oracle-deployed: gas-price-oracle-bindings
./gen_deployed_bytecode.sh GasPriceOracle bindings
l1block-bindings:
./gen_bindings.sh L1Block bindings
./gen_bindings.sh contracts/L2/L1Block.sol:L1Block bindings
l2-to-l1-message-passer-bindings:
./gen_bindings.sh L2ToL1MessagePasser bindings
./gen_bindings.sh contracts/L2/L2ToL1MessagePasser.sol:L2ToL1MessagePasser bindings
optimism-portal-bindings:
./gen_bindings.sh OptimismPortal bindings
./gen_bindings.sh contracts/L1/OptimismPortal.sol:OptimismPortal bindings
l2-output-oracle-bindings:
./gen_bindings.sh L2OutputOracle bindings
./gen_bindings.sh contracts/L1/L2OutputOracle.sol:L2OutputOracle bindings
gas-price-oracle-bindings:
./gen_bindings.sh GasPriceOracle bindings
./gen_bindings.sh contracts/L2/GasPriceOracle.sol:GasPriceOracle bindings
address-manager-bindings:
./gen_bindings.sh AddressManager bindings
./gen_bindings.sh contracts/legacy/AddressManager.sol:AddressManager bindings
mkdir:
mkdir -p bin bindings
......
This diff is collapsed.
This diff is collapsed.
......@@ -2,4 +2,4 @@
// This file is a generated binding and any manual changes will be lost.
package bindings
var L2ToL1MessagePasserDeployedBin = "0x60806040526004361061005e5760003560e01c806382e3702d1161004357806382e3702d146100c7578063affed0e014610107578063c2b3e5ac1461012b57600080fd5b806344df8e701461008757806354fd4d501461009c57600080fd5b366100825761008033620186a060405180602001604052806000815250610139565b005b600080fd5b34801561009357600080fd5b50610080610242565b3480156100a857600080fd5b506100b161027a565b6040516100be919061055c565b60405180910390f35b3480156100d357600080fd5b506100f76100e2366004610576565b60006020819052908152604090205460ff1681565b60405190151581526020016100be565b34801561011357600080fd5b5061011d60015481565b6040519081526020016100be565b6100806101393660046105be565b600061019e6040518060c0016040528060015481526020013373ffffffffffffffffffffffffffffffffffffffff1681526020018673ffffffffffffffffffffffffffffffffffffffff1681526020013481526020018581526020018481525061031d565b6000818152602081905260409081902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016600190811790915554905191925073ffffffffffffffffffffffffffffffffffffffff8616913391907f87bf7b546c8de873abb0db5b579ec131f8d0cf5b14f39933551cf9ced23a61369061022c903490899089906106c2565b60405180910390a4505060018054810190555050565b4761024c8161036a565b60405181907f7967de617a5ac1cc7eba2d6f37570a0135afa950d8bb77cdd35f0d0b4e85a16f90600090a250565b60606102a57f0000000000000000000000000000000000000000000000000000000000000000610399565b6102ce7f0000000000000000000000000000000000000000000000000000000000000000610399565b6102f77f0000000000000000000000000000000000000000000000000000000000000000610399565b604051602001610309939291906106ea565b604051602081830303815290604052905090565b80516020808301516040808501516060860151608087015160a0880151935160009761034d979096959101610760565b604051602081830303815290604052805190602001209050919050565b80604051610377906104d6565b6040518091039082f0905080158015610394573d6000803e3d6000fd5b505050565b6060816000036103dc57505060408051808201909152600181527f3000000000000000000000000000000000000000000000000000000000000000602082015290565b8160005b811561040657806103f0816107e6565b91506103ff9050600a8361084d565b91506103e0565b60008167ffffffffffffffff8111156104215761042161058f565b6040519080825280601f01601f19166020018201604052801561044b576020820181803683370190505b5090505b84156104ce57610460600183610861565b915061046d600a86610878565b61047890603061088c565b60f81b81838151811061048d5761048d6108a4565b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053506104c7600a8661084d565b945061044f565b949350505050565b6008806108d483390190565b60005b838110156104fd5781810151838201526020016104e5565b8381111561050c576000848401525b50505050565b6000815180845261052a8160208601602086016104e2565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b60208152600061056f6020830184610512565b9392505050565b60006020828403121561058857600080fd5b5035919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6000806000606084860312156105d357600080fd5b833573ffffffffffffffffffffffffffffffffffffffff811681146105f757600080fd5b925060208401359150604084013567ffffffffffffffff8082111561061b57600080fd5b818601915086601f83011261062f57600080fd5b8135818111156106415761064161058f565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0908116603f011681019083821181831017156106875761068761058f565b816040528281528960208487010111156106a057600080fd5b8260208601602083013760006020848301015280955050505050509250925092565b8381528260208201526060604082015260006106e16060830184610512565b95945050505050565b600084516106fc8184602089016104e2565b80830190507f2e000000000000000000000000000000000000000000000000000000000000008082528551610738816001850160208a016104e2565b600192019182015283516107538160028401602088016104e2565b0160020195945050505050565b868152600073ffffffffffffffffffffffffffffffffffffffff808816602084015280871660408401525084606083015283608083015260c060a08301526107ab60c0830184610512565b98975050505050505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8203610817576108176107b7565b5060010190565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b60008261085c5761085c61081e565b500490565b600082821015610873576108736107b7565b500390565b6000826108875761088761081e565b500690565b6000821982111561089f5761089f6107b7565b500190565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fdfe608060405230fffea164736f6c634300080f000a"
var L2ToL1MessagePasserDeployedBin = "0x60806040526004361061005e5760003560e01c806382e3702d1161004357806382e3702d146100c7578063affed0e014610107578063c2b3e5ac1461012b57600080fd5b806344df8e701461008757806354fd4d501461009c57600080fd5b366100825761008033620186a060405180602001604052806000815250610139565b005b600080fd5b34801561009357600080fd5b5061008061026d565b3480156100a857600080fd5b506100b16102a5565b6040516100be9190610587565b60405180910390f35b3480156100d357600080fd5b506100f76100e23660046105a1565b60006020819052908152604090205460ff1681565b60405190151581526020016100be565b34801561011357600080fd5b5061011d60015481565b6040519081526020016100be565b6100806101393660046105e9565b600061019e6040518060c0016040528060015481526020013373ffffffffffffffffffffffffffffffffffffffff1681526020018673ffffffffffffffffffffffffffffffffffffffff16815260200134815260200185815260200184815250610348565b6000818152602081905260409081902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016600190811790915554905191925073ffffffffffffffffffffffffffffffffffffffff8616913391907f87bf7b546c8de873abb0db5b579ec131f8d0cf5b14f39933551cf9ced23a61369061022c903490899089906106ed565b60405180910390a460405181907f2ef6ceb1668fdd882b1f89ddd53a666b0c1113d14cf90c0fbf97c7b1ad880fbb90600090a2505060018054810190555050565b4761027781610395565b60405181907f7967de617a5ac1cc7eba2d6f37570a0135afa950d8bb77cdd35f0d0b4e85a16f90600090a250565b60606102d07f00000000000000000000000000000000000000000000000000000000000000006103c4565b6102f97f00000000000000000000000000000000000000000000000000000000000000006103c4565b6103227f00000000000000000000000000000000000000000000000000000000000000006103c4565b60405160200161033493929190610715565b604051602081830303815290604052905090565b80516020808301516040808501516060860151608087015160a0880151935160009761037897909695910161078b565b604051602081830303815290604052805190602001209050919050565b806040516103a290610501565b6040518091039082f09050801580156103bf573d6000803e3d6000fd5b505050565b60608160000361040757505060408051808201909152600181527f3000000000000000000000000000000000000000000000000000000000000000602082015290565b8160005b8115610431578061041b81610811565b915061042a9050600a83610878565b915061040b565b60008167ffffffffffffffff81111561044c5761044c6105ba565b6040519080825280601f01601f191660200182016040528015610476576020820181803683370190505b5090505b84156104f95761048b60018361088c565b9150610498600a866108a3565b6104a39060306108b7565b60f81b8183815181106104b8576104b86108cf565b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053506104f2600a86610878565b945061047a565b949350505050565b6008806108ff83390190565b60005b83811015610528578181015183820152602001610510565b83811115610537576000848401525b50505050565b6000815180845261055581602086016020860161050d565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b60208152600061059a602083018461053d565b9392505050565b6000602082840312156105b357600080fd5b5035919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6000806000606084860312156105fe57600080fd5b833573ffffffffffffffffffffffffffffffffffffffff8116811461062257600080fd5b925060208401359150604084013567ffffffffffffffff8082111561064657600080fd5b818601915086601f83011261065a57600080fd5b81358181111561066c5761066c6105ba565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0908116603f011681019083821181831017156106b2576106b26105ba565b816040528281528960208487010111156106cb57600080fd5b8260208601602083013760006020848301015280955050505050509250925092565b83815282602082015260606040820152600061070c606083018461053d565b95945050505050565b6000845161072781846020890161050d565b80830190507f2e000000000000000000000000000000000000000000000000000000000000008082528551610763816001850160208a0161050d565b6001920191820152835161077e81600284016020880161050d565b0160020195945050505050565b868152600073ffffffffffffffffffffffffffffffffffffffff808816602084015280871660408401525084606083015283608083015260c060a08301526107d660c083018461053d565b98975050505050505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8203610842576108426107e2565b5060010190565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b60008261088757610887610849565b500490565b60008282101561089e5761089e6107e2565b500390565b6000826108b2576108b2610849565b500690565b600082198211156108ca576108ca6107e2565b500190565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fdfe608060405230fffea164736f6c634300080f000a"
This diff is collapsed.
......@@ -19,8 +19,11 @@ need_cmd() {
need_cmd forge
need_cmd abigen
TYPE=$1
NAME=$1
# This can handle both fully qualified syntax or just
# the name of the contract.
# Fully qualified: path-to-contract-file:contract-name
TYPE=$(echo "$NAME" | cut -d ':' -f2)
PACKAGE=$2
# Convert to lower case to respect golang package naming conventions
......@@ -35,9 +38,9 @@ CWD=$(pwd)
# Build contracts
cd ${CONTRACTS_PATH}
forge build
forge inspect ${TYPE} abi > ${TEMP}/${TYPE}.abi
forge inspect ${TYPE} bytecode > ${TEMP}/${TYPE}.bin
forge inspect ${TYPE} deployedBytecode > ${CWD}/bin/${TYPE_LOWER}_deployed.hex
forge inspect ${NAME} abi > ${TEMP}/${TYPE}.abi
forge inspect ${NAME} bytecode > ${TEMP}/${TYPE}.bin
forge inspect ${NAME} deployedBytecode > ${CWD}/bin/${TYPE_LOWER}_deployed.hex
# Run ABIGEN
cd ${CWD}
......
......@@ -3,7 +3,7 @@ module github.com/ethereum-optimism/optimism/op-bindings
go 1.18
require (
github.com/ethereum/go-ethereum v1.10.20
github.com/ethereum/go-ethereum v1.10.21
github.com/stretchr/testify v1.7.2
)
......@@ -41,4 +41,4 @@ require (
gopkg.in/yaml.v3 v3.0.1 // indirect
)
replace github.com/ethereum/go-ethereum v1.10.20 => github.com/ethereum-optimism/reference-optimistic-geth v0.0.0-20220715235548-70b02481016d
replace github.com/ethereum/go-ethereum v1.10.21 => github.com/ethereum-optimism/reference-optimistic-geth v0.0.0-20220803173305-1c9d4cc76a6e
......@@ -28,8 +28,8 @@ github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
github.com/ethereum-optimism/reference-optimistic-geth v0.0.0-20220715235548-70b02481016d h1:w0DBXhp0sv0bWRDOCA/Y6yHOALU7qLLLf5/kE3YfFr4=
github.com/ethereum-optimism/reference-optimistic-geth v0.0.0-20220715235548-70b02481016d/go.mod h1:m2m08SAQ8XB0VcVBoDg9n74Dw5PUMl3hzv1NXVBFPfg=
github.com/ethereum-optimism/reference-optimistic-geth v0.0.0-20220803173305-1c9d4cc76a6e h1:hz+iywXjnqz6xA3lTLvtNL9OZyX76pS5SER4kZBmQLs=
github.com/ethereum-optimism/reference-optimistic-geth v0.0.0-20220803173305-1c9d4cc76a6e/go.mod h1:EYFyF19u3ezGLD4RqOkLq+ZCXzYbLoNDdZlMt7kyKFg=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI=
......
......@@ -7,7 +7,7 @@ require (
github.com/ethereum-optimism/optimism/op-bindings v0.3.0
github.com/ethereum-optimism/optimism/op-node v0.3.0
github.com/ethereum-optimism/optimism/op-proposer v0.3.0
github.com/ethereum/go-ethereum v1.10.20
github.com/ethereum/go-ethereum v1.10.21
github.com/libp2p/go-libp2p v0.18.1
github.com/libp2p/go-libp2p-core v0.15.0
github.com/miguelmota/go-ethereum-hdwallet v0.1.1
......@@ -161,4 +161,4 @@ require (
gopkg.in/yaml.v3 v3.0.1 // indirect
)
replace github.com/ethereum/go-ethereum v1.10.20 => github.com/ethereum-optimism/reference-optimistic-geth v0.0.0-20220715235548-70b02481016d
replace github.com/ethereum/go-ethereum v1.10.21 => github.com/ethereum-optimism/reference-optimistic-geth v0.0.0-20220803173305-1c9d4cc76a6e
......@@ -245,8 +245,8 @@ github.com/ethereum-optimism/optimism/op-node v0.3.0 h1:jep/cbIbP7fjBSAR48yk5NJV
github.com/ethereum-optimism/optimism/op-node v0.3.0/go.mod h1:iF9AhYjr8jNeoCDNP/Vs/ywQ2USZU5L66AxZbSAUi0E=
github.com/ethereum-optimism/optimism/op-proposer v0.3.0 h1:K1ipZt3TLD0BJi7tKOmx8tCLXj9i4f4baBIhbPmUxk4=
github.com/ethereum-optimism/optimism/op-proposer v0.3.0/go.mod h1:GcQ9VCWz2zEVexecq5IYo/2eadK/y7IBOEfx4YV1QJk=
github.com/ethereum-optimism/reference-optimistic-geth v0.0.0-20220715235548-70b02481016d h1:w0DBXhp0sv0bWRDOCA/Y6yHOALU7qLLLf5/kE3YfFr4=
github.com/ethereum-optimism/reference-optimistic-geth v0.0.0-20220715235548-70b02481016d/go.mod h1:m2m08SAQ8XB0VcVBoDg9n74Dw5PUMl3hzv1NXVBFPfg=
github.com/ethereum-optimism/reference-optimistic-geth v0.0.0-20220803173305-1c9d4cc76a6e h1:hz+iywXjnqz6xA3lTLvtNL9OZyX76pS5SER4kZBmQLs=
github.com/ethereum-optimism/reference-optimistic-geth v0.0.0-20220803173305-1c9d4cc76a6e/go.mod h1:EYFyF19u3ezGLD4RqOkLq+ZCXzYbLoNDdZlMt7kyKFg=
github.com/ethereum/go-ethereum v1.10.4/go.mod h1:nEE0TP5MtxGzOMd7egIrbPJMQBnhVU3ELNxhBglIzhg=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
......
......@@ -2,9 +2,14 @@ package main
import (
"context"
"net"
"net/http"
"os"
"os/signal"
"syscall"
"time"
"github.com/ethereum-optimism/optimism/op-node/cmd/p2p"
"github.com/ethereum-optimism/optimism/op-node/metrics"
......@@ -50,13 +55,19 @@ func main() {
)
app := cli.NewApp()
app.Flags = flags.Flags
app.Version = VersionWithMeta
app.Name = "opnode"
app.Flags = flags.Flags
app.Name = "op-node"
app.Usage = "Optimism Rollup Node"
app.Description = "The deposit only rollup node drives the L2 execution engine based on L1 deposits."
app.Description = "The Optimism Rollup Node derives L2 block inputs from L1 data and drives an external L2 Execution Engine to build a L2 chain."
app.Action = RollupNodeMain
app.Commands = []cli.Command{
{
Name: "p2p",
Subcommands: p2p.Subcommands,
},
}
err := app.Run(os.Args)
if err != nil {
log.Crit("Application failed", "message", err)
......@@ -101,6 +112,27 @@ func RollupNodeMain(ctx *cli.Context) error {
m.RecordUp()
log.Info("Rollup node started")
if cfg.Pprof.Enabled {
var srv http.Server
srv.Addr = net.JoinHostPort(cfg.Pprof.ListenAddr, cfg.Pprof.ListenPort)
// Start pprof server + register it's shutdown
go func() {
log.Info("pprof server started", "addr", srv.Addr)
if err := srv.ListenAndServe(); err != http.ErrServerClosed {
log.Error("error in pprof server", "err", err)
} else {
log.Info("pprof server shutting down")
}
}()
defer func() {
shutCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
err := srv.Shutdown(shutCtx)
log.Info("pprof server shut down", "err", err)
}()
}
interruptChannel := make(chan os.Signal, 1)
signal.Notify(interruptChannel, []os.Signal{
os.Interrupt,
......
package p2p
import (
"crypto/rand"
"encoding/hex"
"fmt"
"io"
"io/ioutil"
"os"
"strings"
"github.com/libp2p/go-libp2p-core/crypto"
"github.com/libp2p/go-libp2p-core/peer"
"github.com/urfave/cli"
)
func Priv2PeerID(r io.Reader) (string, error) {
b, err := readHexData(r)
if err != nil {
return "", nil
}
p, err := crypto.UnmarshalSecp256k1PrivateKey(b)
if err != nil {
return "", fmt.Errorf("failed to parse priv key from %d bytes: %w", len(b), err)
}
pid, err := peer.IDFromPrivateKey(p)
if err != nil {
return "", fmt.Errorf("failed to parse peer ID from private key: %w", err)
}
return pid.String(), nil
}
func Pub2PeerID(r io.Reader) (string, error) {
b, err := readHexData(r)
if err != nil {
return "", nil
}
p, err := crypto.UnmarshalSecp256k1PublicKey(b)
if err != nil {
return "", fmt.Errorf("failed to parse pub key from %d bytes: %w", len(b), err)
}
pid, err := peer.IDFromPublicKey(p)
if err != nil {
return "", fmt.Errorf("failed to parse peer ID from public key: %w", err)
}
return pid.String(), nil
}
func readHexData(r io.Reader) ([]byte, error) {
data, err := ioutil.ReadAll(r)
if err != nil {
return nil, err
}
rawStr := strings.TrimSpace(string(data))
rawStr = strings.TrimPrefix(rawStr, "0x")
b, err := hex.DecodeString(rawStr)
if err != nil {
return nil, fmt.Errorf("p2p key is not formatted in hex chars: %w", err)
}
return b, nil
}
var Subcommands = cli.Commands{
{
Name: "priv2id",
Usage: "Reads a private key from STDIN, and returns a peer ID",
Action: func(ctx *cli.Context) error {
key, err := Priv2PeerID(os.Stdin)
if err != nil {
return err
}
fmt.Println(key)
return nil
},
},
{
Name: "pub2id",
Usage: "Reads a public key from STDIN, and returns a peer ID",
Action: func(ctx *cli.Context) error {
key, err := Pub2PeerID(os.Stdin)
if err != nil {
return err
}
fmt.Println(key)
return nil
},
},
{
Name: "genkey",
Usage: "Generates a private key",
Action: func(ctx *cli.Context) error {
buf := make([]byte, 32)
if _, err := rand.Read(buf); err != nil {
return fmt.Errorf("failed to get entropy: %w", err)
}
fmt.Println(hex.EncodeToString(buf))
return nil
},
},
}
package p2p
import (
"bytes"
"encoding/hex"
"testing"
"github.com/libp2p/go-libp2p-core/crypto"
"github.com/libp2p/go-libp2p-core/peer"
"github.com/stretchr/testify/require"
)
func TestPrivPub2PeerID(t *testing.T) {
priv, pub, err := crypto.GenerateKeyPair(crypto.Secp256k1, 32)
require.NoError(t, err)
privRaw, err := priv.Raw()
require.NoError(t, err)
pubRaw, err := pub.Raw()
require.NoError(t, err)
t.Run("with a private key", func(t *testing.T) {
privPidLib, err := peer.IDFromPrivateKey(priv)
require.NoError(t, err)
privPidImpl, err := Priv2PeerID(bytes.NewReader([]byte(hex.EncodeToString(privRaw))))
require.NoError(t, err)
require.Equal(t, privPidLib.String(), privPidImpl)
})
t.Run("with a public key", func(t *testing.T) {
pubPidLib, err := peer.IDFromPublicKey(pub)
require.NoError(t, err)
pubPidImpl, err := Pub2PeerID(bytes.NewReader([]byte(hex.EncodeToString(pubRaw))))
require.NoError(t, err)
require.Equal(t, pubPidLib.String(), pubPidImpl)
})
}
package flags
import "github.com/urfave/cli"
import (
"fmt"
"github.com/urfave/cli"
)
// Flags
......@@ -15,32 +19,27 @@ var (
L1NodeAddr = cli.StringFlag{
Name: "l1",
Usage: "Address of L1 User JSON-RPC endpoint to use (eth namespace required)",
Required: true,
Value: "http://127.0.0.1:8545",
EnvVar: prefixEnvVar("L1_ETH_RPC"),
}
L2EngineAddr = cli.StringFlag{
Name: "l2",
Usage: "Address of L2 Engine JSON-RPC endpoints to use (engine and eth namespace required)",
Required: true,
EnvVar: prefixEnvVar("L2_ENGINE_RPC"),
}
RollupConfig = cli.StringFlag{
Name: "rollup.config",
Usage: "Rollup chain parameters",
Required: true,
EnvVar: prefixEnvVar("ROLLUP_CONFIG"),
}
RPCListenAddr = cli.StringFlag{
Name: "rpc.addr",
Usage: "RPC listening address",
Required: true,
EnvVar: prefixEnvVar("RPC_ADDR"),
}
RPCListenPort = cli.IntFlag{
Name: "rpc.port",
Usage: "RPC listening port",
Required: true,
EnvVar: prefixEnvVar("RPC_PORT"),
}
......@@ -111,6 +110,23 @@ var (
Value: 7300,
EnvVar: prefixEnvVar("METRICS_PORT"),
}
PprofEnabledFlag = cli.BoolFlag{
Name: "pprof.enabled",
Usage: "Enable the pprof server",
EnvVar: prefixEnvVar("PPROF_ENABLED"),
}
PprofAddrFlag = cli.StringFlag{
Name: "pprof.addr",
Usage: "pprof listening address",
Value: "0.0.0.0",
EnvVar: prefixEnvVar("PPROF_ADDR"),
}
PprofPortFlag = cli.IntFlag{
Name: "pprof.port",
Usage: "pprof listening port",
Value: 6060,
EnvVar: prefixEnvVar("PPROF_PORT"),
}
SnapshotLog = cli.StringFlag{
Name: "snapshotlog.file",
......@@ -139,8 +155,34 @@ var optionalFlags = append([]cli.Flag{
MetricsEnabledFlag,
MetricsAddrFlag,
MetricsPortFlag,
PprofEnabledFlag,
PprofAddrFlag,
PprofPortFlag,
SnapshotLog,
}, p2pFlags...)
// Flags contains the list of configuration options available to the binary.
var Flags = append(requiredFlags, optionalFlags...)
func CheckRequired(ctx *cli.Context) error {
l1NodeAddr := ctx.GlobalString(L1NodeAddr.Name)
if l1NodeAddr == "" {
return fmt.Errorf("flag %s is required", L1NodeAddr.Name)
}
l2EngineAddr := ctx.GlobalString(L2EngineAddr.Name)
if l2EngineAddr == "" {
return fmt.Errorf("flag %s is required", L2EngineAddr.Name)
}
rollupConfig := ctx.GlobalString(RollupConfig.Name)
if rollupConfig == "" {
return fmt.Errorf("flag %s is required", RollupConfig.Name)
}
rpcListenAddr := ctx.GlobalString(RPCListenAddr.Name)
if rpcListenAddr == "" {
return fmt.Errorf("flag %s is required", RPCListenAddr.Name)
}
if !ctx.GlobalIsSet(RPCListenPort.Name) {
return fmt.Errorf("flag %s is required", RPCListenPort.Name)
}
return nil
}
......@@ -7,16 +7,6 @@ import (
"github.com/urfave/cli"
)
// TestRequiredFlagsSetRequired asserts that all flags deemed required properly
// have the Required field set to true.
func TestRequiredFlagsSetRequired(t *testing.T) {
for _, flag := range requiredFlags {
reqFlag, ok := flag.(cli.RequiredFlag)
require.True(t, ok)
require.True(t, reqFlag.IsRequired())
}
}
// TestOptionalFlagsDontSetRequired asserts that all flags deemed optional set
// the Required field to false.
func TestOptionalFlagsDontSetRequired(t *testing.T) {
......
......@@ -4,7 +4,7 @@ go 1.18
require (
github.com/ethereum-optimism/optimism/op-bindings v0.3.0
github.com/ethereum/go-ethereum v1.10.20
github.com/ethereum/go-ethereum v1.10.21
github.com/golang/snappy v0.0.4
github.com/google/go-cmp v0.5.8
github.com/hashicorp/go-multierror v1.1.1
......@@ -173,7 +173,4 @@ require (
gopkg.in/yaml.v3 v3.0.1 // indirect
)
replace github.com/ethereum/go-ethereum v1.10.20 => github.com/ethereum-optimism/reference-optimistic-geth v0.0.0-20220715235548-70b02481016d
// For local debugging:
// replace github.com/ethereum/go-ethereum v1.10.17 => ../go-ethereum
replace github.com/ethereum/go-ethereum v1.10.21 => github.com/ethereum-optimism/reference-optimistic-geth v0.0.0-20220803173305-1c9d4cc76a6e
......@@ -193,8 +193,8 @@ github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/ethereum-optimism/optimism/op-bindings v0.3.0 h1:d2Mwb8FzR2zuhW0sS5xFKWz/6VFPTEIE+XINqZj0Rv4=
github.com/ethereum-optimism/optimism/op-bindings v0.3.0/go.mod h1:CrvUVIISKcyJ7o27ub/HY4Kq9wEJQxrGmWthTqxPSGo=
github.com/ethereum-optimism/reference-optimistic-geth v0.0.0-20220715235548-70b02481016d h1:w0DBXhp0sv0bWRDOCA/Y6yHOALU7qLLLf5/kE3YfFr4=
github.com/ethereum-optimism/reference-optimistic-geth v0.0.0-20220715235548-70b02481016d/go.mod h1:m2m08SAQ8XB0VcVBoDg9n74Dw5PUMl3hzv1NXVBFPfg=
github.com/ethereum-optimism/reference-optimistic-geth v0.0.0-20220803173305-1c9d4cc76a6e h1:hz+iywXjnqz6xA3lTLvtNL9OZyX76pS5SER4kZBmQLs=
github.com/ethereum-optimism/reference-optimistic-geth v0.0.0-20220803173305-1c9d4cc76a6e/go.mod h1:EYFyF19u3ezGLD4RqOkLq+ZCXzYbLoNDdZlMt7kyKFg=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fjl/memsize v0.0.1 h1:+zhkb+dhUgx0/e+M8sF0QqiouvMQUiKR+QYvdxIOKcQ=
github.com/fjl/memsize v0.0.1/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
......
......@@ -7,6 +7,7 @@ import (
"net"
"net/http"
"strconv"
"time"
"github.com/ethereum/go-ethereum"
"github.com/prometheus/client_golang/prometheus/collectors"
......@@ -29,12 +30,22 @@ const (
type Metrics struct {
Info *prometheus.GaugeVec
Up prometheus.Gauge
RPCServerRequestsTotal *prometheus.CounterVec
RPCServerRequestDurationSeconds *prometheus.HistogramVec
RPCClientRequestsTotal *prometheus.CounterVec
RPCClientRequestDurationSeconds *prometheus.HistogramVec
RPCClientResponsesTotal *prometheus.CounterVec
DerivationIdle prometheus.Gauge
PipelineResetsTotal prometheus.Counter
LastPipelineResetUnix prometheus.Gauge
UnsafePayloadsTotal prometheus.Counter
DerivationErrorsTotal prometheus.Counter
Heads *prometheus.GaugeVec
TransactionsSequencedTotal prometheus.Counter
registry *prometheus.Registry
}
......@@ -60,6 +71,7 @@ func NewMetrics(procName string) *Metrics {
Name: "up",
Help: "1 if the op node has finished starting up",
}),
RPCServerRequestsTotal: promauto.With(registry).NewCounterVec(prometheus.CounterOpts{
Namespace: ns,
Subsystem: RPCServerSubsystem,
......@@ -103,6 +115,46 @@ func NewMetrics(procName string) *Metrics {
"method",
"error",
}),
DerivationIdle: promauto.With(registry).NewGauge(prometheus.GaugeOpts{
Namespace: ns,
Name: "derivation_idle",
Help: "1 if the derivation pipeline is idle",
}),
PipelineResetsTotal: promauto.With(registry).NewCounter(prometheus.CounterOpts{
Namespace: ns,
Name: "pipeline_resets_total",
Help: "Count of derivation pipeline resets",
}),
LastPipelineResetUnix: promauto.With(registry).NewGauge(prometheus.GaugeOpts{
Namespace: ns,
Name: "last_pipeline_reset_unix",
Help: "Timestamp of last pipeline reset",
}),
UnsafePayloadsTotal: promauto.With(registry).NewCounter(prometheus.CounterOpts{
Namespace: ns,
Name: "unsafe_payloads_total",
Help: "Count of unsafe payloads received via p2p",
}),
DerivationErrorsTotal: promauto.With(registry).NewCounter(prometheus.CounterOpts{
Namespace: ns,
Name: "derivation_errors_total",
Help: "Count of total derivation errors",
}),
Heads: promauto.With(registry).NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns,
Name: "heads",
Help: "Gauge representing the different L1/L2 heads",
}, []string{
"type",
}),
TransactionsSequencedTotal: promauto.With(registry).NewGauge(prometheus.GaugeOpts{
Namespace: ns,
Name: "transactions_sequenced_total",
Help: "Count of total transactions sequenced",
}),
registry: registry,
}
}
......@@ -166,6 +218,24 @@ func (m *Metrics) RecordRPCClientResponse(method string, err error) {
m.RPCClientResponsesTotal.WithLabelValues(method, errStr).Inc()
}
func (m *Metrics) SetDerivationIdle(status bool) {
var val float64
if status {
val = 1
}
m.DerivationIdle.Set(val)
}
func (m *Metrics) SetHead(kind string, num uint64) {
m.Heads.WithLabelValues(kind).Set(float64(num))
}
func (m *Metrics) RecordPipelineReset() {
m.PipelineResetsTotal.Inc()
m.DerivationErrorsTotal.Inc()
m.LastPipelineResetUnix.Set(float64(time.Now().Unix()))
}
// Serve starts the metrics server on the given hostname and port.
// The server will be closed when the passed-in context is cancelled.
func (m *Metrics) Serve(ctx context.Context, hostname string, port int) error {
......
......@@ -28,6 +28,8 @@ type Config struct {
Metrics MetricsConfig
Pprof PprofConfig
// Optional
Tracer Tracer
}
......@@ -59,6 +61,16 @@ func (m MetricsConfig) Check() error {
return nil
}
type PprofConfig struct {
Enabled bool
ListenAddr string
ListenPort string
}
func (p PprofConfig) Check() error {
return nil
}
// Check verifies that the given configuration makes sense
func (cfg *Config) Check() error {
if err := cfg.L2.Check(); err != nil {
......@@ -70,6 +82,9 @@ func (cfg *Config) Check() error {
if err := cfg.Metrics.Check(); err != nil {
return fmt.Errorf("metrics config error: %w", err)
}
if err := cfg.Pprof.Check(); err != nil {
return fmt.Errorf("pprof config error: %w", err)
}
if cfg.P2P != nil {
if err := cfg.P2P.Check(); err != nil {
return fmt.Errorf("p2p config error: %w", err)
......
......@@ -144,7 +144,7 @@ func (n *OpNode) initL2(ctx context.Context, cfg *Config, snapshotLog log.Logger
return err
}
n.l2Engine = driver.NewDriver(&cfg.Driver, &cfg.Rollup, source, n.l1Source, n, n.log, snapshotLog)
n.l2Engine = driver.NewDriver(&cfg.Driver, &cfg.Rollup, source, n.l1Source, n, n.log, snapshotLog, n.metrics)
return nil
}
......
......@@ -22,7 +22,7 @@ type L1ReceiptsFetcher interface {
// by setting NoTxPool=false as sequencer, or by appending batch transactions as verifier.
// The severity of the error is returned; a crit=false error means there was a temporary issue, like a failed RPC or time-out.
// A crit=true error means the input arguments are inconsistent or invalid.
func PreparePayloadAttributes(ctx context.Context, cfg *rollup.Config, dl L1ReceiptsFetcher, l2Parent eth.L2BlockRef, epoch eth.BlockID) (attrs *eth.PayloadAttributes, crit bool, err error) {
func PreparePayloadAttributes(ctx context.Context, cfg *rollup.Config, dl L1ReceiptsFetcher, l2Parent eth.L2BlockRef, timestamp uint64, epoch eth.BlockID) (attrs *eth.PayloadAttributes, crit bool, err error) {
var l1Info eth.L1Info
var depositTxs []hexutil.Bytes
var seqNumber uint64
......@@ -68,7 +68,7 @@ func PreparePayloadAttributes(ctx context.Context, cfg *rollup.Config, dl L1Rece
txs = append(txs, depositTxs...)
return &eth.PayloadAttributes{
Timestamp: hexutil.Uint64(l2Parent.Time + cfg.BlockTime),
Timestamp: hexutil.Uint64(timestamp),
PrevRandao: eth.Bytes32(l1Info.MixDigest()),
SuggestedFeeRecipient: cfg.FeeRecipientAddress,
Transactions: txs,
......
......@@ -55,7 +55,7 @@ func (aq *AttributesQueue) Step(ctx context.Context, outer Progress) error {
fetchCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
defer cancel()
attrs, crit, err := PreparePayloadAttributes(fetchCtx, aq.config, aq.dl, aq.next.SafeL2Head(), batch.Epoch())
attrs, crit, err := PreparePayloadAttributes(fetchCtx, aq.config, aq.dl, aq.next.SafeL2Head(), batch.Timestamp, batch.Epoch())
if err != nil {
if crit {
return fmt.Errorf("failed to prepare payload attributes for batch: %v", err)
......
......@@ -70,7 +70,7 @@ func TestAttributesQueue_Step(t *testing.T) {
batch := &BatchData{BatchV1{
EpochNum: rollup.Epoch(l1Info.InfoNum),
EpochHash: l1Info.InfoHash,
Timestamp: 12345,
Timestamp: safeHead.Time + cfg.BlockTime,
Transactions: []eth.Data{eth.Data("foobar"), eth.Data("example")},
}}
......
......@@ -31,11 +31,12 @@ func TestPreparePayloadAttributes(t *testing.T) {
l1Fetcher := &testutils.MockL1Source{}
defer l1Fetcher.AssertExpectations(t)
l2Parent := testutils.RandomL2BlockRef(rng)
l2Time := l2Parent.Time + cfg.BlockTime
l1Info := testutils.RandomL1Info(rng)
l1Info.InfoNum = l2Parent.L1Origin.Number + 1
epoch := l1Info.ID()
l1Fetcher.ExpectFetch(epoch.Hash, l1Info, nil, nil, nil)
_, crit, err := PreparePayloadAttributes(context.Background(), cfg, l1Fetcher, l2Parent, epoch)
_, crit, err := PreparePayloadAttributes(context.Background(), cfg, l1Fetcher, l2Parent, l2Time, epoch)
require.NotNil(t, err, "inconsistent L1 origin error expected")
require.True(t, crit, "inconsistent L1 origin transition must be handled like a critical error with reorg")
})
......@@ -44,10 +45,11 @@ func TestPreparePayloadAttributes(t *testing.T) {
l1Fetcher := &testutils.MockL1Source{}
defer l1Fetcher.AssertExpectations(t)
l2Parent := testutils.RandomL2BlockRef(rng)
l2Time := l2Parent.Time + cfg.BlockTime
l1Info := testutils.RandomL1Info(rng)
l1Info.InfoNum = l2Parent.L1Origin.Number
epoch := l1Info.ID()
_, crit, err := PreparePayloadAttributes(context.Background(), cfg, l1Fetcher, l2Parent, epoch)
_, crit, err := PreparePayloadAttributes(context.Background(), cfg, l1Fetcher, l2Parent, l2Time, epoch)
require.NotNil(t, err, "inconsistent L1 origin error expected")
require.True(t, crit, "inconsistent L1 origin transition must be handled like a critical error with reorg")
})
......@@ -56,11 +58,12 @@ func TestPreparePayloadAttributes(t *testing.T) {
l1Fetcher := &testutils.MockL1Source{}
defer l1Fetcher.AssertExpectations(t)
l2Parent := testutils.RandomL2BlockRef(rng)
l2Time := l2Parent.Time + cfg.BlockTime
epoch := l2Parent.L1Origin
epoch.Number += 1
mockRPCErr := errors.New("mock rpc error")
l1Fetcher.ExpectFetch(epoch.Hash, nil, nil, nil, mockRPCErr)
_, crit, err := PreparePayloadAttributes(context.Background(), cfg, l1Fetcher, l2Parent, epoch)
_, crit, err := PreparePayloadAttributes(context.Background(), cfg, l1Fetcher, l2Parent, l2Time, epoch)
require.ErrorIs(t, err, mockRPCErr, "mock rpc error expected")
require.False(t, crit, "rpc errors should not be critical, it is not necessary to reorg")
})
......@@ -69,10 +72,11 @@ func TestPreparePayloadAttributes(t *testing.T) {
l1Fetcher := &testutils.MockL1Source{}
defer l1Fetcher.AssertExpectations(t)
l2Parent := testutils.RandomL2BlockRef(rng)
l2Time := l2Parent.Time + cfg.BlockTime
epoch := l2Parent.L1Origin
mockRPCErr := errors.New("mock rpc error")
l1Fetcher.ExpectInfoByHash(epoch.Hash, nil, mockRPCErr)
_, crit, err := PreparePayloadAttributes(context.Background(), cfg, l1Fetcher, l2Parent, epoch)
_, crit, err := PreparePayloadAttributes(context.Background(), cfg, l1Fetcher, l2Parent, l2Time, epoch)
require.ErrorIs(t, err, mockRPCErr, "mock rpc error expected")
require.False(t, crit, "rpc errors should not be critical, it is not necessary to reorg")
})
......@@ -81,6 +85,7 @@ func TestPreparePayloadAttributes(t *testing.T) {
l1Fetcher := &testutils.MockL1Source{}
defer l1Fetcher.AssertExpectations(t)
l2Parent := testutils.RandomL2BlockRef(rng)
l2Time := l2Parent.Time + cfg.BlockTime
l1Info := testutils.RandomL1Info(rng)
l1Info.InfoParentHash = l2Parent.L1Origin.Hash
l1Info.InfoNum = l2Parent.L1Origin.Number + 1
......@@ -88,7 +93,7 @@ func TestPreparePayloadAttributes(t *testing.T) {
l1InfoTx, err := L1InfoDepositBytes(0, l1Info)
require.NoError(t, err)
l1Fetcher.ExpectFetch(epoch.Hash, l1Info, nil, nil, nil)
attrs, crit, err := PreparePayloadAttributes(context.Background(), cfg, l1Fetcher, l2Parent, epoch)
attrs, crit, err := PreparePayloadAttributes(context.Background(), cfg, l1Fetcher, l2Parent, l2Time, epoch)
require.NoError(t, err)
require.False(t, crit)
require.NotNil(t, attrs)
......@@ -104,6 +109,7 @@ func TestPreparePayloadAttributes(t *testing.T) {
l1Fetcher := &testutils.MockL1Source{}
defer l1Fetcher.AssertExpectations(t)
l2Parent := testutils.RandomL2BlockRef(rng)
l2Time := l2Parent.Time + cfg.BlockTime
l1Info := testutils.RandomL1Info(rng)
l1Info.InfoParentHash = l2Parent.L1Origin.Hash
l1Info.InfoNum = l2Parent.L1Origin.Number + 1
......@@ -126,7 +132,7 @@ func TestPreparePayloadAttributes(t *testing.T) {
// txs are ignored, API is a bit bloated to previous approach. Only l1Info and receipts matter.
l1Txs := make(types.Transactions, len(receipts))
l1Fetcher.ExpectFetch(epoch.Hash, l1Info, l1Txs, receipts, nil)
attrs, crit, err := PreparePayloadAttributes(context.Background(), cfg, l1Fetcher, l2Parent, epoch)
attrs, crit, err := PreparePayloadAttributes(context.Background(), cfg, l1Fetcher, l2Parent, l2Time, epoch)
require.NoError(t, err)
require.False(t, crit)
require.NotNil(t, attrs)
......@@ -142,6 +148,7 @@ func TestPreparePayloadAttributes(t *testing.T) {
l1Fetcher := &testutils.MockL1Source{}
defer l1Fetcher.AssertExpectations(t)
l2Parent := testutils.RandomL2BlockRef(rng)
l2Time := l2Parent.Time + cfg.BlockTime
l1Info := testutils.RandomL1Info(rng)
l1Info.InfoHash = l2Parent.L1Origin.Hash
l1Info.InfoNum = l2Parent.L1Origin.Number
......@@ -151,7 +158,7 @@ func TestPreparePayloadAttributes(t *testing.T) {
require.NoError(t, err)
l1Fetcher.ExpectInfoByHash(epoch.Hash, l1Info, nil)
attrs, crit, err := PreparePayloadAttributes(context.Background(), cfg, l1Fetcher, l2Parent, epoch)
attrs, crit, err := PreparePayloadAttributes(context.Background(), cfg, l1Fetcher, l2Parent, l2Time, epoch)
require.NoError(t, err)
require.False(t, crit)
require.NotNil(t, attrs)
......
package derive
import (
"bytes"
"context"
"encoding/binary"
"fmt"
"io"
......@@ -81,85 +81,72 @@ func (ib *ChannelBank) IngestData(data []byte) error {
if data[0] != DerivationVersion0 {
return fmt.Errorf("unrecognized derivation version: %d", data)
}
buf := bytes.NewBuffer(data[1:])
ib.prune()
offset := 1
if len(data[offset:]) < minimumFrameSize {
if buf.Len() < minimumFrameSize {
return fmt.Errorf("data must be at least have one frame")
}
// Iterate over all frames. They may have different channel IDs to indicate that they stream consumer should reset.
for {
if len(data) < offset+ChannelIDDataSize+1 {
// Don't try to unmarshal from an empty buffer.
// The if done checks should catch most/all of this case though.
if buf.Len() < ChannelIDDataSize+1 {
return nil
}
var chID ChannelID
copy(chID.Data[:], data[offset:])
offset += ChannelIDDataSize
chIDTime, n := binary.Uvarint(data[offset:])
if n <= 0 {
return fmt.Errorf("failed to read frame number")
done := false
var f Frame
if err := (&f).UnmarshalBinary(buf); err == io.EOF {
done = true
} else if err != nil {
return fmt.Errorf("failed to unmarshal a frame: %w", err)
}
offset += n
chID.Time = chIDTime
// stop reading and ignore remaining data if we encounter a zeroed ID
if chID == (ChannelID{}) {
if f.ID == (ChannelID{}) {
ib.log.Info("empty channel ID")
return nil
}
frameNumber, n := binary.Uvarint(data[offset:])
if n <= 0 {
return fmt.Errorf("failed to read frame number")
}
offset += n
frameLength, n := binary.Uvarint(data[offset:])
if n <= 0 {
return fmt.Errorf("failed to read frame length")
}
offset += n
if remaining := uint64(len(data) - offset); remaining < frameLength {
return fmt.Errorf("not enough data left for frame: %d < %d", remaining, frameLength)
}
frameData := data[offset : uint64(offset)+frameLength]
offset += int(frameLength)
if offset >= len(data) {
return fmt.Errorf("failed to read frame end byte, no data left, offset past length %d", len(data))
}
isLastNum := data[offset]
if isLastNum > 1 {
return fmt.Errorf("invalid isLast bool value: %d", data[offset])
}
isLast := isLastNum == 1
offset += 1
// check if the channel is not timed out
if chID.Time+ib.cfg.ChannelTimeout < ib.progress.Origin.Time {
ib.log.Info("channel is timed out, ignore frame", "channel", chID, "id_time", chID.Time, "frame", frameNumber)
if f.ID.Time+ib.cfg.ChannelTimeout < ib.progress.Origin.Time {
ib.log.Info("channel is timed out, ignore frame", "channel", f.ID, "id_time", f.ID.Time, "frame", f.FrameNumber)
if done {
return nil
}
continue
}
// check if the channel is not included too soon (otherwise timeouts wouldn't be effective)
if chID.Time > ib.progress.Origin.Time {
ib.log.Info("channel claims to be from the future, ignore frame", "channel", chID, "id_time", chID.Time, "frame", frameNumber)
if f.ID.Time > ib.progress.Origin.Time {
ib.log.Info("channel claims to be from the future, ignore frame", "channel", f.ID, "id_time", f.ID.Time, "frame", f.FrameNumber)
if done {
return nil
}
continue
}
currentCh, ok := ib.channels[chID]
currentCh, ok := ib.channels[f.ID]
if !ok { // create new channel if it doesn't exist yet
currentCh = &ChannelIn{id: chID}
ib.channels[chID] = currentCh
ib.channelQueue = append(ib.channelQueue, chID)
currentCh = &ChannelIn{id: f.ID}
ib.channels[f.ID] = currentCh
ib.channelQueue = append(ib.channelQueue, f.ID)
}
ib.log.Debug("ingesting frame", "channel", chID, "frame_number", frameNumber, "length", len(frameData))
if err := currentCh.IngestData(frameNumber, isLast, frameData); err != nil {
ib.log.Debug("failed to ingest frame into channel", "channel", chID, "frame_number", frameNumber, "err", err)
ib.log.Debug("ingesting frame", "channel", f.ID, "frame_number", f.FrameNumber, "length", len(f.Data))
if err := currentCh.IngestData(f.FrameNumber, f.IsLast, f.Data); err != nil {
ib.log.Debug("failed to ingest frame into channel", "channel", f.ID, "frame_number", f.FrameNumber, "err", err)
if done {
return nil
}
continue
}
if done {
return nil
}
}
}
......
package derive
import (
"encoding/binary"
"errors"
"fmt"
"io"
)
// Frames cannot be larger than 1 MB.
// Data transactions that carry frames are generally not larger than 128 KB due to L1 network conditions,
// but we leave space to grow larger anyway (gas limit allows for more data).
const MaxFrameLen = 1_000_000
var ErrNotEnoughFrameBytes = errors.New("not enough available bytes for the frame")
// Data Format
//
// frame = channel_id ++ frame_number ++ frame_data_length ++ frame_data ++ is_last
//
// channel_id = random ++ timestamp
// random = bytes32
// timestamp = uvarint
// frame_number = uvarint
// frame_data_length = uvarint
// frame_data = bytes
// is_last = bool
type Frame struct {
ID ChannelID
FrameNumber uint64
Data []byte
IsLast bool
}
// MarshalBinary writes the frame to `w`.
// It returns the number of bytes written as well as any
// error encountered while writing.
func (f *Frame) MarshalBinary(w io.Writer) (int, error) {
n, err := w.Write(f.ID.Data[:])
if err != nil {
return n, err
}
l, err := w.Write(makeUVarint(f.ID.Time))
n += l
if err != nil {
return n, err
}
l, err = w.Write(makeUVarint(f.FrameNumber))
n += l
if err != nil {
return n, err
}
l, err = w.Write(makeUVarint(uint64(len(f.Data))))
n += l
if err != nil {
return n, err
}
l, err = w.Write(f.Data)
n += l
if err != nil {
return n, err
}
if f.IsLast {
l, err = w.Write([]byte{1})
n += l
if err != nil {
return n, err
}
} else {
l, err = w.Write([]byte{0})
n += l
if err != nil {
return n, err
}
}
return n, nil
}
type ByteReader interface {
io.Reader
io.ByteReader
}
// UnmarshalBinary consumes a full frame from the reader.
// If `r` fails a read, it returns the error from the reader
// The reader will be left in a partially read state.
func (f *Frame) UnmarshalBinary(r ByteReader) error {
_, err := io.ReadFull(r, f.ID.Data[:])
if err != nil {
return fmt.Errorf("error reading ID: %w", err)
}
f.ID.Time, err = binary.ReadUvarint(r)
if err != nil {
return fmt.Errorf("error reading ID.Time: %w", err)
}
// stop reading and ignore remaining data if we encounter a zeroed ID
if f.ID == (ChannelID{}) {
return io.EOF
}
f.FrameNumber, err = binary.ReadUvarint(r)
if err != nil {
return fmt.Errorf("error reading frame number: %w", err)
}
frameLength, err := binary.ReadUvarint(r)
if err != nil {
return fmt.Errorf("error reading frame length: %w", err)
}
// Cap frame length to MaxFrameLen (currently 1MB)
if frameLength > MaxFrameLen {
return fmt.Errorf("frameLength is too large: %d", frameLength)
}
f.Data = make([]byte, int(frameLength))
if _, err := io.ReadFull(r, f.Data); err != nil {
return fmt.Errorf("error reading frame data: %w", err)
}
isLastByte, err := r.ReadByte()
if err != nil && err != io.EOF {
return fmt.Errorf("error reading final byte: %w", err)
}
if isLastByte == 0 {
f.IsLast = false
} else if isLastByte == 1 {
f.IsLast = true
} else {
return errors.New("invalid byte as is_last")
}
return err
}
......@@ -6,7 +6,6 @@ import (
"crypto/rand"
"encoding/binary"
"errors"
"fmt"
"io"
"github.com/ethereum-optimism/optimism/op-node/rollup"
......@@ -114,41 +113,38 @@ func (co *ChannelOut) Close() error {
// Returns nil if there is still more buffered data.
// Returns and error if it ran into an error during processing.
func (co *ChannelOut) OutputFrame(w *bytes.Buffer, maxSize uint64) error {
w.Write(co.id.Data[:])
w.Write(makeUVarint(co.id.Time))
w.Write(makeUVarint(co.frame))
f := Frame{
ID: co.id,
FrameNumber: co.frame,
}
// Copy data from the local buffer into the frame data buffer
// Don't go past the maxSize even with the max possible uvarints
// +1 for single byte of frame content, +1 for lastFrame bool
if uint64(w.Len())+2 > maxSize {
return fmt.Errorf("no more space: %d > %d", w.Len(), maxSize)
// +24 for maximum uvarints
// +32 for the data ID
maxDataSize := maxSize - 32 - 24 - 1 - 1
if maxDataSize >= uint64(co.buf.Len()) {
maxDataSize = uint64(co.buf.Len())
// If we are closed & will not spill past the current frame, end it.
if co.closed {
f.IsLast = true
}
}
f.Data = make([]byte, maxDataSize)
remaining := maxSize - uint64(w.Len())
maxFrameLen := remaining - 1 // -1 for the bool at the end
// estimate how many bytes we lose with encoding the length of the frame
// by encoding the max length (larger uvarints take more space)
maxFrameLen -= uint64(len(makeUVarint(maxFrameLen)))
// Pull the data into a temporary buffer b/c we use uvarints to record the length
// Could theoretically use the min of co.buf.Len() & maxFrameLen
co.scratch.Reset()
_, err := io.CopyN(&co.scratch, &co.buf, int64(maxFrameLen))
if err != nil && err != io.EOF {
if _, err := io.ReadFull(&co.buf, f.Data); err != nil {
return err
}
frameLen := uint64(co.scratch.Len())
co.offset += frameLen
w.Write(makeUVarint(frameLen))
if _, err := w.ReadFrom(&co.scratch); err != nil {
if _, err := f.MarshalBinary(w); err != nil {
return err
}
co.frame += 1
// Only mark as closed if the channel is closed & there is no more data available
if co.closed && err == io.EOF {
w.WriteByte(1)
if f.IsLast {
return io.EOF
} else {
w.WriteByte(0)
return nil
}
}
......
......@@ -77,6 +77,7 @@ func UnmarshalDepositLogEvent(ev *types.Log) (*types.DepositTx, error) {
}
dep.SourceHash = source.SourceHash()
dep.From = from
dep.IsSystemTransaction = false
var err error
switch version {
......
......@@ -102,16 +102,16 @@ func L1InfoDeposit(seqNumber uint64, block eth.L1Info) (*types.DepositTx, error)
L1BlockHash: block.Hash(),
SeqNumber: seqNumber,
}
// Uses ~30k normal case
// Uses ~70k on first transaction
// Round up to 75k to ensure that we always have enough gas.
// Set a very large gas limit with `IsSystemTransaction` to ensure
// that the L1 Attributes Transaction does not run out of gas.
return &types.DepositTx{
SourceHash: source.SourceHash(),
From: L1InfoDepositerAddress,
To: &L1BlockAddress,
Mint: nil,
Value: big.NewInt(0),
Gas: 150_000, // TODO: temporary work around. Block 1 seems to require more gas than specced.
Gas: 150_000_000,
IsSystemTransaction: true,
Data: data,
}, nil
}
......
......@@ -4,6 +4,8 @@ import (
"context"
"math/big"
"github.com/ethereum-optimism/optimism/op-node/metrics"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/l1"
"github.com/ethereum-optimism/optimism/op-node/l2"
......@@ -56,7 +58,7 @@ type Network interface {
PublishL2Payload(ctx context.Context, payload *eth.ExecutionPayload) error
}
func NewDriver(driverCfg *Config, cfg *rollup.Config, l2 *l2.Source, l1 *l1.Source, network Network, log log.Logger, snapshotLog log.Logger) *Driver {
func NewDriver(driverCfg *Config, cfg *rollup.Config, l2 *l2.Source, l1 *l1.Source, network Network, log log.Logger, snapshotLog log.Logger, metrics *metrics.Metrics) *Driver {
output := &outputImpl{
Config: cfg,
dl: l1,
......@@ -67,7 +69,7 @@ func NewDriver(driverCfg *Config, cfg *rollup.Config, l2 *l2.Source, l1 *l1.Sour
var state *state
verifConfDepth := NewConfDepth(driverCfg.VerifierConfDepth, func() eth.L1BlockRef { return state.l1Head }, l1)
derivationPipeline := derive.NewDerivationPipeline(log, cfg, verifConfDepth, l2)
state = NewState(driverCfg, log, snapshotLog, cfg, l1, l2, output, derivationPipeline, network)
state = NewState(driverCfg, log, snapshotLog, cfg, l1, l2, output, derivationPipeline, network, metrics)
return &Driver{s: state}
}
......
......@@ -9,6 +9,7 @@ import (
"time"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/metrics"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum/go-ethereum/log"
)
......@@ -67,6 +68,7 @@ type state struct {
output outputInterface
network Network // may be nil, network for is optional
metrics *metrics.Metrics
log log.Logger
snapshotLog log.Logger
done chan struct{}
......@@ -77,7 +79,7 @@ type state struct {
// NewState creates a new driver state. State changes take effect though
// the given output, derivation pipeline and network interfaces.
func NewState(driverCfg *Config, log log.Logger, snapshotLog log.Logger, config *rollup.Config, l1Chain L1Chain, l2Chain L2Chain,
output outputInterface, derivationPipeline DerivationPipeline, network Network) *state {
output outputInterface, derivationPipeline DerivationPipeline, network Network, metrics *metrics.Metrics) *state {
return &state{
derivation: derivationPipeline,
idleDerivation: false,
......@@ -91,6 +93,7 @@ func NewState(driverCfg *Config, log log.Logger, snapshotLog log.Logger, config
l2: l2Chain,
output: output,
network: network,
metrics: metrics,
l1Heads: make(chan eth.L1BlockRef, 10),
unsafeL2Payloads: make(chan *eth.ExecutionPayload, 10),
}
......@@ -105,6 +108,8 @@ func (s *state) Start(ctx context.Context) error {
}
s.l1Head = l1Head
s.l2Head, _ = s.l2.L2BlockRefByNumber(ctx, nil)
s.metrics.SetHead("l1", s.l1Head.Number)
s.metrics.SetHead("l2_unsafe", s.l2Head.Number)
s.derivation.Reset()
......@@ -151,6 +156,7 @@ func (s *state) handleNewL1Block(newL1Head eth.L1BlockRef) {
// This could either be a long L1 extension, or a reorg. Both can be handled the same way.
s.log.Warn("L1 Head signal indicates an L1 re-org", "old_l1_head", s.l1Head, "new_l1_head_parent", newL1Head.ParentHash, "new_l1_head", newL1Head)
}
s.metrics.SetHead("l1", newL1Head.Number)
s.l1Head = newL1Head
}
......@@ -238,6 +244,7 @@ func (s *state) createNewL2Block(ctx context.Context) error {
s.l2Head = newUnsafeL2Head
s.log.Info("Sequenced new l2 block", "l2Head", s.l2Head, "l1Origin", s.l2Head.L1Origin, "txs", len(payload.Transactions), "time", s.l2Head.Time)
s.metrics.TransactionsSequencedTotal.Add(float64(len(payload.Transactions)))
if s.network != nil {
if err := s.network.PublishL2Payload(ctx, payload); err != nil {
......@@ -315,6 +322,7 @@ func (s *state) eventLoop() {
cancel()
if err != nil {
s.log.Error("Error creating new L2 block", "err", err)
s.metrics.DerivationErrorsTotal.Inc()
}
// We need to catch up to the next origin as quickly as possible. We can do this by
......@@ -330,6 +338,7 @@ func (s *state) eventLoop() {
s.snapshot("New unsafe payload")
s.log.Info("Optimistically queueing unsafe L2 execution payload", "id", payload.ID())
s.derivation.AddUnsafePayload(payload)
s.metrics.UnsafePayloadsTotal.Inc()
reqStep()
case newL1Head := <-s.l1Heads:
......@@ -338,6 +347,7 @@ func (s *state) eventLoop() {
s.handleNewL1Block(newL1Head)
reqStep() // a new L1 head may mean we have the data to not get an EOF again.
case <-stepReqCh:
s.metrics.SetDerivationIdle(false)
s.idleDerivation = false
s.log.Debug("Derivation process step", "onto_origin", s.derivation.Progress().Origin, "onto_closed", s.derivation.Progress().Closed)
stepCtx, cancel := context.WithTimeout(ctx, time.Second*10) // TODO pick a timeout for executing a single step
......@@ -346,16 +356,21 @@ func (s *state) eventLoop() {
if err == io.EOF {
s.log.Debug("Derivation process went idle", "progress", s.derivation.Progress().Origin)
s.idleDerivation = true
s.metrics.SetDerivationIdle(true)
continue
} else if err != nil {
// If the pipeline corrupts, e.g. due to a reorg, simply reset it
s.log.Warn("Derivation pipeline is reset", "err", err)
s.derivation.Reset()
s.metrics.RecordPipelineReset()
} else {
finalized, safe, unsafe := s.derivation.Finalized(), s.derivation.SafeL2Head(), s.derivation.UnsafeL2Head()
// log sync progress when it changes
if s.l2Finalized != finalized || s.l2SafeHead != safe || s.l2Head != unsafe {
s.log.Info("Sync progress", "finalized", finalized, "safe", safe, "unsafe", unsafe)
s.metrics.SetHead("l2_finalized", finalized.Number)
s.metrics.SetHead("l2_safe", safe.Number)
s.metrics.SetHead("l2_unsafe", unsafe.Number)
}
// update the heads
s.l2Finalized = finalized
......
......@@ -25,7 +25,7 @@ func (d *outputImpl) createNewBlock(ctx context.Context, l2Head eth.L2BlockRef,
fetchCtx, cancel := context.WithTimeout(ctx, time.Second*20)
defer cancel()
attrs, _, err := derive.PreparePayloadAttributes(fetchCtx, d.Config, d.dl, l2Head, l1Origin.ID())
attrs, _, err := derive.PreparePayloadAttributes(fetchCtx, d.Config, d.dl, l2Head, l2Head.Time+d.Config.BlockTime, l1Origin.ID())
if err != nil {
return l2Head, nil, err
}
......
......@@ -24,6 +24,10 @@ import (
// NewConfig creates a Config from the provided flags or environment variables.
func NewConfig(ctx *cli.Context, log log.Logger) (*node.Config, error) {
if err := flags.CheckRequired(ctx); err != nil {
return nil, err
}
rollupConfig, err := NewRollupConfig(ctx)
if err != nil {
return nil, err
......@@ -68,6 +72,11 @@ func NewConfig(ctx *cli.Context, log log.Logger) (*node.Config, error) {
ListenAddr: ctx.GlobalString(flags.MetricsAddrFlag.Name),
ListenPort: ctx.GlobalInt(flags.MetricsPortFlag.Name),
},
Pprof: node.PprofConfig{
Enabled: ctx.GlobalBool(flags.PprofEnabledFlag.Name),
ListenAddr: ctx.GlobalString(flags.PprofAddrFlag.Name),
ListenPort: ctx.GlobalString(flags.PprofPortFlag.Name),
},
P2P: p2pConfig,
P2PSigner: p2pSignerSetup,
}
......
......@@ -32,6 +32,7 @@ func GenerateDeposit(sourceHash common.Hash, rng *rand.Rand) *types.DepositTx {
Gas: uint64(rng.Int63n(10 * 1e6)), // 10 M gas max
Data: data,
Mint: mint,
IsSystemTransaction: false,
}
return dep
}
......
package withdrawals
import (
"bytes"
"context"
"errors"
"fmt"
......@@ -172,8 +173,15 @@ func FinalizeWithdrawalParameters(ctx context.Context, l2client ProofClient, txH
if err != nil {
return FinalizedWithdrawalParameters{}, err
}
ev1, err := ParseWithdrawalInitiatedExtension1(receipt)
if err != nil {
return FinalizedWithdrawalParameters{}, err
}
// Generate then verify the withdrawal proof
withdrawalHash, err := WithdrawalHash(ev)
if !bytes.Equal(withdrawalHash[:], ev1.Hash[:]) {
return FinalizedWithdrawalParameters{}, errors.New("Computed withdrawal hash incorrectly")
}
if err != nil {
return FinalizedWithdrawalParameters{}, err
}
......@@ -255,14 +263,52 @@ func ParseWithdrawalInitiated(receipt *types.Receipt) (*bindings.L2ToL1MessagePa
if err != nil {
return nil, err
}
if len(receipt.Logs) != 1 {
return nil, errors.New("invalid length of logs")
abi, err := bindings.L2ToL1MessagePasserMetaData.GetAbi()
if err != nil {
return nil, err
}
for _, log := range receipt.Logs {
event, err := abi.EventByID(log.Topics[0])
if err != nil {
return nil, err
}
ev, err := contract.ParseWithdrawalInitiated(*receipt.Logs[0])
if event.Name == "WithdrawalInitiated" {
ev, err := contract.ParseWithdrawalInitiated(*log)
if err != nil {
return nil, fmt.Errorf("failed to parse log: %w", err)
}
return ev, nil
}
}
return nil, errors.New("Unable to find WithdrawalInitiated event")
}
// ParseWithdrawalInitiatedExtension1 parses
func ParseWithdrawalInitiatedExtension1(receipt *types.Receipt) (*bindings.L2ToL1MessagePasserWithdrawalInitiatedExtension1, error) {
contract, err := bindings.NewL2ToL1MessagePasser(common.Address{}, nil)
if err != nil {
return nil, err
}
abi, err := bindings.L2ToL1MessagePasserMetaData.GetAbi()
if err != nil {
return nil, err
}
for _, log := range receipt.Logs {
event, err := abi.EventByID(log.Topics[0])
if err != nil {
return nil, err
}
if event.Name == "WithdrawalInitiatedExtension1" {
ev, err := contract.ParseWithdrawalInitiatedExtension1(*log)
if err != nil {
return nil, fmt.Errorf("failed to parse log: %w", err)
}
return ev, nil
}
}
return nil, errors.New("Unable to find WithdrawalInitiatedExtension1 event")
}
// StorageSlotOfWithdrawalHash determines the storage slot of the Withdrawer contract to look at
......
......@@ -49,6 +49,9 @@ type Config struct {
// the l2output transactions.
L2OutputHDPath string
// PrivateKey is the private key used for l2output transactions.
PrivateKey string
/* Optional Params */
// LogLevel is the lowest log level that will be output.
......@@ -57,6 +60,11 @@ type Config struct {
// LogTerminal if true, will log to stdout in terminal format. Otherwise the
// output will be in JSON format.
LogTerminal bool
// Flags for the pprof server
PprofEnabled bool
PprofAddr string
PprofPort string
}
// NewConfig parses the Config from the provided flags or environment variables.
......@@ -73,8 +81,12 @@ func NewConfig(ctx *cli.Context) Config {
ResubmissionTimeout: ctx.GlobalDuration(flags.ResubmissionTimeoutFlag.Name),
Mnemonic: ctx.GlobalString(flags.MnemonicFlag.Name),
L2OutputHDPath: ctx.GlobalString(flags.L2OutputHDPathFlag.Name),
PrivateKey: ctx.GlobalString(flags.PrivateKeyFlag.Name),
/* Optional Flags */
LogLevel: ctx.GlobalString(flags.LogLevelFlag.Name),
LogTerminal: ctx.GlobalBool(flags.LogTerminalFlag.Name),
PprofEnabled: ctx.GlobalBool(flags.PprofEnabledFlag.Name),
PprofAddr: ctx.GlobalString(flags.PprofAddrFlag.Name),
PprofPort: ctx.GlobalString(flags.PprofPortFlag.Name),
}
}
......@@ -70,16 +70,19 @@ var (
Name: "mnemonic",
Usage: "The mnemonic used to derive the wallets for either the " +
"sequencer or the l2output",
Required: true,
EnvVar: prefixEnvVar("MNEMONIC"),
}
L2OutputHDPathFlag = cli.StringFlag{
Name: "l2-output-hd-path",
Usage: "The HD path used to derive the l2output wallet from the " +
"mnemonic. The mnemonic flag must also be set.",
Required: true,
EnvVar: prefixEnvVar("L2_OUTPUT_HD_PATH"),
}
PrivateKeyFlag = cli.StringFlag{
Name: "private-key",
Usage: "The private key to use with the l2output wallet. Must not be used with mnemonic.",
EnvVar: prefixEnvVar("PRIVATE_KEY"),
}
/* Optional Flags */
......@@ -95,6 +98,23 @@ var (
"in JSON format.",
EnvVar: prefixEnvVar("LOG_TERMINAL"),
}
PprofEnabledFlag = cli.BoolFlag{
Name: "pprof.enabled",
Usage: "Enable the pprof server",
EnvVar: prefixEnvVar("PPROF_ENABLED"),
}
PprofAddrFlag = cli.StringFlag{
Name: "pprof.addr",
Usage: "pprof listening address",
Value: "0.0.0.0",
EnvVar: prefixEnvVar("PPROF_ADDR"),
}
PprofPortFlag = cli.IntFlag{
Name: "pprof.port",
Usage: "pprof listening port",
Value: 6060,
EnvVar: prefixEnvVar("PPROF_PORT"),
}
)
var requiredFlags = []cli.Flag{
......@@ -106,13 +126,17 @@ var requiredFlags = []cli.Flag{
NumConfirmationsFlag,
SafeAbortNonceTooLowCountFlag,
ResubmissionTimeoutFlag,
MnemonicFlag,
L2OutputHDPathFlag,
}
var optionalFlags = []cli.Flag{
MnemonicFlag,
L2OutputHDPathFlag,
PrivateKeyFlag,
LogLevelFlag,
LogTerminalFlag,
PprofEnabledFlag,
PprofAddrFlag,
PprofPortFlag,
}
// Flags contains the list of configuration options available to the binary.
......
......@@ -5,7 +5,7 @@ go 1.18
require (
github.com/ethereum-optimism/optimism/op-bindings v0.3.0
github.com/ethereum-optimism/optimism/op-node v0.3.0
github.com/ethereum/go-ethereum v1.10.20
github.com/ethereum/go-ethereum v1.10.21
github.com/miguelmota/go-ethereum-hdwallet v0.1.1
github.com/stretchr/testify v1.8.0
github.com/urfave/cli v1.22.5
......@@ -60,4 +60,4 @@ require (
gopkg.in/yaml.v3 v3.0.1 // indirect
)
replace github.com/ethereum/go-ethereum v1.10.20 => github.com/ethereum-optimism/reference-optimistic-geth v0.0.0-20220715235548-70b02481016d
replace github.com/ethereum/go-ethereum v1.10.21 => github.com/ethereum-optimism/reference-optimistic-geth v0.0.0-20220803173305-1c9d4cc76a6e
......@@ -181,8 +181,8 @@ github.com/ethereum-optimism/optimism/op-bindings v0.3.0 h1:d2Mwb8FzR2zuhW0sS5xF
github.com/ethereum-optimism/optimism/op-bindings v0.3.0/go.mod h1:CrvUVIISKcyJ7o27ub/HY4Kq9wEJQxrGmWthTqxPSGo=
github.com/ethereum-optimism/optimism/op-node v0.3.0 h1:jep/cbIbP7fjBSAR48yk5NJVEoGYvoNlYI00KpBI6Mw=
github.com/ethereum-optimism/optimism/op-node v0.3.0/go.mod h1:iF9AhYjr8jNeoCDNP/Vs/ywQ2USZU5L66AxZbSAUi0E=
github.com/ethereum-optimism/reference-optimistic-geth v0.0.0-20220715235548-70b02481016d h1:w0DBXhp0sv0bWRDOCA/Y6yHOALU7qLLLf5/kE3YfFr4=
github.com/ethereum-optimism/reference-optimistic-geth v0.0.0-20220715235548-70b02481016d/go.mod h1:m2m08SAQ8XB0VcVBoDg9n74Dw5PUMl3hzv1NXVBFPfg=
github.com/ethereum-optimism/reference-optimistic-geth v0.0.0-20220803173305-1c9d4cc76a6e h1:hz+iywXjnqz6xA3lTLvtNL9OZyX76pS5SER4kZBmQLs=
github.com/ethereum-optimism/reference-optimistic-geth v0.0.0-20220803173305-1c9d4cc76a6e/go.mod h1:EYFyF19u3ezGLD4RqOkLq+ZCXzYbLoNDdZlMt7kyKFg=
github.com/ethereum/go-ethereum v1.10.4/go.mod h1:nEE0TP5MtxGzOMd7egIrbPJMQBnhVU3ELNxhBglIzhg=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
......
......@@ -2,12 +2,20 @@ package op_proposer
import (
"context"
"crypto/ecdsa"
"errors"
"fmt"
"net"
"net/http"
_ "net/http/pprof"
"os"
"os/signal"
"strings"
"syscall"
"time"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum-optimism/optimism/op-proposer/drivers/l2output"
"github.com/ethereum-optimism/optimism/op-proposer/rollupclient"
"github.com/ethereum-optimism/optimism/op-proposer/txmgr"
......@@ -68,6 +76,27 @@ func Main(version string) func(ctx *cli.Context) error {
l.Info("L2 Output Submitter started")
if cfg.PprofEnabled {
var srv http.Server
srv.Addr = net.JoinHostPort(cfg.PprofAddr, cfg.PprofPort)
// Start pprof server + register it's shutdown
go func() {
l.Info("pprof server started", "addr", srv.Addr)
if err := srv.ListenAndServe(); err != http.ErrServerClosed {
l.Error("error in pprof server", "err", err)
} else {
l.Info("pprof server shutting down")
}
}()
defer func() {
shutCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
err := srv.Shutdown(shutCtx)
l.Info("pprof server shut down", "err", err)
}()
}
interruptChannel := make(chan os.Signal, 1)
signal.Notify(interruptChannel, []os.Signal{
os.Interrupt,
......@@ -97,14 +126,21 @@ func NewL2OutputSubmitter(
) (*L2OutputSubmitter, error) {
ctx := context.Background()
var l2OutputPrivKey *ecdsa.PrivateKey
var err error
if cfg.PrivateKey != "" && cfg.Mnemonic != "" {
return nil, errors.New("cannot specify both a private key and a mnemonic")
}
if cfg.PrivateKey == "" {
// Parse l2output wallet private key and L2OO contract address.
wallet, err := hdwallet.NewFromMnemonic(cfg.Mnemonic)
if err != nil {
return nil, err
}
l2OutputPrivKey, err := wallet.PrivateKey(accounts.Account{
l2OutputPrivKey, err = wallet.PrivateKey(accounts.Account{
URL: accounts.URL{
Path: cfg.L2OutputHDPath,
},
......@@ -112,6 +148,12 @@ func NewL2OutputSubmitter(
if err != nil {
return nil, err
}
} else {
l2OutputPrivKey, err = crypto.HexToECDSA(strings.TrimPrefix(cfg.PrivateKey, "0x"))
if err != nil {
return nil, err
}
}
l2ooAddress, err := parseAddress(cfg.L2OOAddress)
if err != nil {
......
......@@ -58,9 +58,9 @@ function wait_up {
mkdir -p ./.devnet
if [ ! -f ./.devnet/rollup.json ]; then
GENESIS_TIMESTAMP=$(date +%s | xargs printf "0x%x")
L1_GENESIS_TIMESTAMP=$(date +%s | xargs printf "0x%x")
else
GENESIS_TIMESTAMP=$(jq '.timestamp' < .devnet/genesis-l1.json)
L1_GENESIS_TIMESTAMP=$(jq '.timestamp' < .devnet/genesis-l1.json)
fi
# Regenerate the L1 genesis file if necessary. The existence of the genesis
......@@ -69,7 +69,7 @@ if [ ! -f ./.devnet/genesis-l1.json ]; then
echo "Regenerating L1 genesis."
(
cd $CONTRACTS_BEDROCK
L2OO_STARTING_BLOCK_TIMESTAMP=$GENESIS_TIMESTAMP npx hardhat genesis-l1 \
L1_GENESIS_TIMESTAMP=$L1_GENESIS_TIMESTAMP npx hardhat genesis-l1 \
--outfile genesis-l1.json
mv genesis-l1.json ../../.devnet/genesis-l1.json
)
......@@ -89,7 +89,7 @@ if [ ! -d $CONTRACTS_BEDROCK/deployments/$NETWORK ]; then
(
echo "Deploying contracts."
cd $CONTRACTS_BEDROCK
L2OO_STARTING_BLOCK_TIMESTAMP=$GENESIS_TIMESTAMP yarn hardhat --network $NETWORK deploy
L1_GENESIS_TIMESTAMP=$L1_GENESIS_TIMESTAMP yarn hardhat --network $NETWORK deploy
)
else
echo "Contracts already deployed, skipping."
......@@ -99,7 +99,7 @@ if [ ! -f ./.devnet/genesis-l2.json ]; then
(
echo "Creating L2 genesis file."
cd $CONTRACTS_BEDROCK
L2OO_STARTING_BLOCK_TIMESTAMP=$GENESIS_TIMESTAMP npx hardhat --network $NETWORK genesis-l2
L1_GENESIS_TIMESTAMP=$L1_GENESIS_TIMESTAMP npx hardhat --network $NETWORK genesis-l2
mv genesis.json ../../.devnet/genesis-l2.json
echo "Created L2 genesis."
)
......@@ -120,7 +120,7 @@ if [ ! -f ./.devnet/rollup.json ]; then
(
echo "Building rollup config..."
cd $CONTRACTS_BEDROCK
L2OO_STARTING_BLOCK_TIMESTAMP=$GENESIS_TIMESTAMP npx hardhat rollup-config --network $NETWORK
L1_GENESIS_TIMESTAMP=$L1_GENESIS_TIMESTAMP npx hardhat --network $NETWORK rollup-config
mv rollup.json ../../.devnet/rollup.json
)
else
......
......@@ -63,10 +63,12 @@ services:
--metrics.enabled
--metrics.addr=0.0.0.0
--metrics.port=7300
--pprof.enabled
ports:
- "7545:8545"
- "9003:9003"
- "7300:7300"
- "6060:6060"
volumes:
- ${PWD}/p2p-sequencer-key.txt:/config/p2p-sequencer-key.txt
- ${PWD}/p2p-node-key.txt:/config/p2p-node-key.txt
......@@ -82,11 +84,13 @@ services:
build:
context: ../
dockerfile: ./op-proposer/Dockerfile
ports:
- "6062:6060"
environment:
L1_ETH_RPC: http://l1:8545
L2_ETH_RPC: http://l2:8545
ROLLUP_RPC: http://op-node:8545
OUTPUT_SUBMITTER_POLL_INTERVAL: 10s
OUTPUT_SUBMITTER_POLL_INTERVAL: 1s
OUTPUT_SUBMITTER_NUM_CONFIRMATIONS: 1
OUTPUT_SUBMITTER_SAFE_ABORT_NONCE_TOO_LOW_COUNT: 3
OUTPUT_SUBMITTER_RESUBMISSION_TIMEOUT: 30s
......@@ -94,6 +98,7 @@ services:
OUTPUT_SUBMITTER_L2_OUTPUT_HD_PATH: "m/44'/60'/0'/0/1"
OUTPUT_SUBMITTER_LOG_TERMINAL: "true"
L2OO_ADDRESS: "${L2OO_ADDRESS}"
OUTPUT_SUBMITTER_PPROF_ENABLED: "true"
op-batcher:
depends_on:
......@@ -103,6 +108,8 @@ services:
build:
context: ../
dockerfile: ./op-batcher/Dockerfile
ports:
- "6061:6060"
environment:
L1_ETH_RPC: http://l1:8545
L2_ETH_RPC: http://l2:8545
......@@ -116,10 +123,10 @@ services:
BATCH_SUBMITTER_RESUBMISSION_TIMEOUT: 30s
BATCH_SUBMITTER_MNEMONIC: test test test test test test test test test test test junk
BATCH_SUBMITTER_SEQUENCER_HD_PATH: "m/44'/60'/0'/0/2"
BATCH_SUBMITTER_SEQUENCER_HISTORY_DB_FILENAME: "history_db.json"
BATCH_SUBMITTER_SEQUENCER_GENESIS_HASH: "${SEQUENCER_GENESIS_HASH}"
BATCH_SUBMITTER_SEQUENCER_BATCH_INBOX_ADDRESS: "${SEQUENCER_BATCH_INBOX_ADDRESS}"
BATCH_SUBMITTER_LOG_TERMINAL: "true"
BATCH_SUBMITTER_PPROF_ENABLED: "true"
stateviz:
build:
......
......@@ -9,7 +9,7 @@ WORKDIR /opt/foundry
# Only diff from upstream docker image is this clone instead
# of COPY. We select a specific commit to use.
RUN git clone https://github.com/foundry-rs/foundry.git . \
&& git checkout 3c49efe58ca4bdeec4729490501da06914446405
&& git checkout 64fe4acc97e6d76551cea7598c201f05ecd65639
RUN source $HOME/.profile && cargo build --release \
&& strip /opt/foundry/target/release/forge \
......
......@@ -46,6 +46,9 @@
"lint:ts:check": "yarn lerna run lint:ts:check",
"lint:check": "yarn lerna run lint:check",
"lint:fix": "yarn lerna run lint:fix --parallel",
"lint:specs:fix": "yarn run markdownlint-cli2-fix \"./specs/**/*.md\"",
"lint:specs:check": "yarn run markdownlint-cli2 \"./specs/**/*.md\"",
"lint:specs:toc": "yarn run doctoc '--title=**Table of Contents**' ./specs",
"postinstall": "patch-package",
"ready": "yarn lint && yarn test",
"prepare": "husky install",
......@@ -63,7 +66,7 @@
"chai": "^4.2.0",
"copyfiles": "^2.3.0",
"depcheck": "^1.4.3",
"doctoc": "2.1.0",
"doctoc": "^2.2.0",
"eslint": "^8.16.0",
"eslint-config-prettier": "^8.3.0",
"eslint-config-standard": "^16.0.3",
......@@ -79,7 +82,7 @@
"lerna": "^4.0.0",
"lint-staged": "11.0.0",
"markdownlint": "^0.24.0",
"markdownlint-cli2": "^0.3.2",
"markdownlint-cli2": "0.4.0",
"mkdirp": "^1.0.4",
"mocha": "^8.4.0",
"nyc": "^15.1.0",
......
This diff is collapsed.
// SPDX-License-Identifier: MIT
pragma solidity 0.8.15;
import { Initializable } from "@openzeppelin/contracts/proxy/utils/Initializable.sol";
import { Initializable } from "@openzeppelin/contracts-upgradeable/proxy/utils/Initializable.sol";
import { ExcessivelySafeCall } from "excessively-safe-call/src/ExcessivelySafeCall.sol";
import { L2OutputOracle } from "./L2OutputOracle.sol";
import { Types } from "../libraries/Types.sol";
......@@ -136,6 +136,8 @@ contract OptimismPortal is Initializable, ResourceMetering, Semver {
// Prevent users from creating a deposit transaction where this address is the message
// sender on L2.
// In the context of the proxy delegate calling to this implementation,
// address(this) will return the address of the proxy.
require(
_tx.target != address(this),
"OptimismPortal: you cannot send messages to the portal contract"
......
// SPDX-License-Identifier: MIT
pragma solidity 0.8.15;
import { Initializable } from "@openzeppelin/contracts/proxy/utils/Initializable.sol";
import { Initializable } from "@openzeppelin/contracts-upgradeable/proxy/utils/Initializable.sol";
import { Math } from "@openzeppelin/contracts/utils/math/Math.sol";
import { SignedMath } from "@openzeppelin/contracts/utils/math/SignedMath.sol";
import { FixedPointMathLib } from "@rari-capital/solmate/src/utils/FixedPointMathLib.sol";
......
......@@ -49,6 +49,14 @@ contract L2ToL1MessagePasser is Semver {
bytes data
);
/**
* @notice Emitted any time a withdrawal is initiated. An extension to
* WithdrawalInitiated so that the interface is maintained.
*
* @param hash The hash of the withdrawal
*/
event WithdrawalInitiatedExtension1(bytes32 indexed hash);
/**
* @notice Emitted when the balance of this contract is burned.
*
......@@ -106,6 +114,8 @@ contract L2ToL1MessagePasser is Semver {
sentMessages[withdrawalHash] = true;
emit WithdrawalInitiated(nonce, msg.sender, _target, msg.value, _gasLimit, _data);
emit WithdrawalInitiatedExtension1(withdrawalHash);
unchecked {
++nonce;
}
......
......@@ -13,6 +13,8 @@ library Encoding {
/**
* @notice RLP encodes the L2 transaction that would be generated when a given deposit is sent
* to the L2 system. Useful for searching for a deposit in the L2 system.
* This currently only supports user deposits and not system
* transactions.
*
* @param _tx User deposit transaction to encode.
*
......@@ -24,14 +26,15 @@ library Encoding {
returns (bytes memory)
{
bytes32 source = Hashing.hashDepositSource(_tx.l1BlockHash, _tx.logIndex);
bytes[] memory raw = new bytes[](7);
bytes[] memory raw = new bytes[](8);
raw[0] = RLPWriter.writeBytes(abi.encodePacked(source));
raw[1] = RLPWriter.writeAddress(_tx.from);
raw[2] = _tx.isCreation ? RLPWriter.writeBytes("") : RLPWriter.writeAddress(_tx.to);
raw[3] = RLPWriter.writeUint(_tx.mint);
raw[4] = RLPWriter.writeUint(_tx.value);
raw[5] = RLPWriter.writeUint(uint256(_tx.gasLimit));
raw[6] = RLPWriter.writeBytes(_tx.data);
raw[6] = RLPWriter.writeBool(false);
raw[7] = RLPWriter.writeBytes(_tx.data);
return abi.encodePacked(uint8(0x7e), RLPWriter.writeList(raw));
}
......
......@@ -34,10 +34,10 @@ library Types {
struct UserDepositTransaction {
address from;
address to;
bool isCreation;
uint256 value;
uint256 mint;
uint64 gasLimit;
bool isCreation;
bytes data;
bytes32 l1BlockHash;
uint256 logIndex;
......
......@@ -15,9 +15,10 @@ import { L2CrossDomainMessenger } from "../L2/L2CrossDomainMessenger.sol";
import { AddressAliasHelper } from "../vendor/AddressAliasHelper.sol";
import { LegacyERC20ETH } from "../legacy/LegacyERC20ETH.sol";
import { Predeploys } from "../libraries/Predeploys.sol";
import { Types } from "../libraries/Types.sol";
import { ERC20 } from "@openzeppelin/contracts/token/ERC20/ERC20.sol";
import { Proxy } from "../universal/Proxy.sol";
import { Initializable } from "@openzeppelin/contracts/proxy/utils/Initializable.sol";
import { Initializable } from "@openzeppelin/contracts-upgradeable/proxy/utils/Initializable.sol";
import { ResolvedDelegateProxy } from "../legacy/ResolvedDelegateProxy.sol";
import { AddressManager } from "../legacy/AddressManager.sol";
import { L1ChugSplashProxy } from "../legacy/L1ChugSplashProxy.sol";
......@@ -78,7 +79,6 @@ contract CommonTest is Test {
abi.encodePacked(_mint, _value, _gasLimit, _isCreation, _data)
);
}
}
contract L2OutputOracle_Initializer is CommonTest {
......@@ -457,24 +457,26 @@ contract Bridge_Initializer is Messenger_Initializer {
}
contract FFIInterface is Test {
function getFinalizeWithdrawalTransactionInputs(
uint256 _nonce,
address _sender,
address _target,
uint64 _value,
uint256 _gasLimit,
bytes memory _data
) external returns (bytes32, bytes32, bytes32, bytes32, bytes memory) {
function getFinalizeWithdrawalTransactionInputs(Types.WithdrawalTransaction memory _tx)
external
returns (
bytes32,
bytes32,
bytes32,
bytes32,
bytes memory
)
{
string[] memory cmds = new string[](9);
cmds[0] = "node";
cmds[1] = "dist/scripts/differential-testing.js";
cmds[2] = "getFinalizeWithdrawalTransactionInputs";
cmds[3] = vm.toString(_nonce);
cmds[4] = vm.toString(_sender);
cmds[5] = vm.toString(_target);
cmds[6] = vm.toString(_value);
cmds[7] = vm.toString(_gasLimit);
cmds[8] = vm.toString(_data);
cmds[3] = vm.toString(_tx.nonce);
cmds[4] = vm.toString(_tx.sender);
cmds[5] = vm.toString(_tx.target);
cmds[6] = vm.toString(_tx.value);
cmds[7] = vm.toString(_tx.gasLimit);
cmds[8] = vm.toString(_tx.data);
bytes memory result = vm.ffi(cmds);
(
......@@ -574,11 +576,32 @@ contract FFIInterface is Test {
cmds[8] = vm.toString(_value);
cmds[9] = vm.toString(_gas);
cmds[10] = vm.toString(_data);
bytes memory result = vm.ffi(cmds);
bytes memory result = vm.ffi(cmds);
return abi.decode(result, (bytes32));
}
function encodeDepositTransaction(
Types.UserDepositTransaction calldata txn
) external returns (bytes memory) {
string[] memory cmds = new string[](12);
cmds[0] = "node";
cmds[1] = "dist/scripts/differential-testing.js";
cmds[2] = "encodeDepositTransaction";
cmds[3] = vm.toString(txn.from);
cmds[4] = vm.toString(txn.to);
cmds[5] = vm.toString(txn.value);
cmds[6] = vm.toString(txn.mint);
cmds[7] = vm.toString(txn.gasLimit);
cmds[8] = vm.toString(txn.isCreation);
cmds[9] = vm.toString(txn.data);
cmds[10] = vm.toString(txn.l1BlockHash);
cmds[11] = vm.toString(txn.logIndex);
bytes memory result = vm.ffi(cmds);
return abi.decode(result, (bytes));
}
function encodeCrossDomainMessage(
uint256 _nonce,
address _sender,
......
......@@ -65,4 +65,35 @@ contract Encoding_Test is CommonTest {
assertEq(encoding, _encoding);
}
function test_encodeDepositTransaction_differential(
address _from,
address _to,
uint256 _mint,
uint256 _value,
uint64 _gas,
bool isCreate,
bytes memory _data,
uint256 _logIndex
) external {
Types.UserDepositTransaction memory t = Types.UserDepositTransaction(
_from,
_to,
isCreate,
_value,
_mint,
_gas,
_data,
bytes32(uint256(0)),
_logIndex
);
bytes memory txn = Encoding.encodeDepositTransaction(t);
bytes memory _txn = ffi.encodeDepositTransaction(t);
assertEq(
txn,
_txn
);
}
}
......@@ -127,10 +127,10 @@ contract Hashing_Test is CommonTest {
Types.UserDepositTransaction(
_from,
_to,
false, // isCreate
_value,
_mint,
_gas,
false, // isCreate
_data,
bytes32(uint256(0)),
_logIndex
......
......@@ -18,6 +18,8 @@ contract L2ToL1MessagePasserTest is CommonTest {
bytes data
);
event WithdrawalInitiatedExtension1(bytes32 indexed hash);
event WithdrawerBalanceBurnt(uint256 indexed amount);
function setUp() virtual public {
......@@ -36,6 +38,20 @@ contract L2ToL1MessagePasserTest is CommonTest {
hex""
);
bytes32 withdrawalHash = Hashing.hashWithdrawal(
Types.WithdrawalTransaction(
messagePasser.nonce(),
address(this),
address(4),
100,
64000,
hex""
)
);
vm.expectEmit(true, true, true, true);
emit WithdrawalInitiatedExtension1(withdrawalHash);
vm.deal(address(this), 2**64);
messagePasser.initiateWithdrawal{ value: 100 }(
address(4),
......
......@@ -2,19 +2,20 @@ import { ethers } from 'ethers'
const { env } = process
const startingTimestamp =
typeof env.L2OO_STARTING_BLOCK_TIMESTAMP === 'string'
? ethers.BigNumber.from(env.L2OO_STARTING_BLOCK_TIMESTAMP).toNumber()
const l1GenesisTimestamp =
typeof env.L1_GENESIS_TIMESTAMP === 'string'
? ethers.BigNumber.from(env.L1_GENESIS_TIMESTAMP).toNumber()
: Math.floor(Date.now() / 1000)
const config = {
submissionInterval: 6,
submissionInterval: 20,
genesisOutput: ethers.constants.HashZero,
historicalBlocks: 0,
l1StartingBlockTag: 'earliest',
startingBlockNumber: 0,
l2BlockTime: 2,
startingTimestamp,
l1GenesisTimestamp,
sequencerAddress: '0x70997970C51812dc3A010C7d01b50e0d17dc79C8',
l2CrossDomainMessengerOwner: ethers.constants.AddressZero,
......@@ -52,7 +53,7 @@ const config = {
outputOracleOwner: '0x70997970C51812dc3A010C7d01b50e0d17dc79C8',
optimismL2FeeRecipient: '0xd9c09e21b57c98e58a80552c170989b426766aa7',
batchSenderAddress: '0xDe3829A23DF1479438622a08a116E8Eb3f620BB5',
batchSenderAddress: '0x3C44CdDdB6a900fa2b585dd299e03d12FA4293BC',
}
export default config
import { ethers } from 'ethers'
const sequencerAddress = '0x0631f9bccb86548dc4a574c730a46d6ca283a338'
const startingTimestamp = 1656654016
const sequencerAddress = '0x6c23a0dcdfc44b7a57bed148de598895e398d984'
const l1StartingBlockTag =
'0xafce66a0a2446856112e4069b275ad32b1f4a607888f9c4c59eddf9be81f8670'
const config = {
submissionInterval: 6,
......@@ -9,7 +10,7 @@ const config = {
historicalBlocks: 0,
startingBlockNumber: 0,
l2BlockTime: 2,
startingTimestamp,
l1StartingBlockTag,
sequencerAddress,
l2CrossDomainMessengerOwner: ethers.constants.AddressZero,
......@@ -41,12 +42,12 @@ const config = {
sequencerWindowSize: 120,
channelTimeout: 120,
proxyAdmin: '0x05e22b779967b86fb9572e8292090be2d5c1cab7',
optimismBaseFeeRecipient: '0xec4f588262821a7c1f722e5bc40dc5332335c47f',
optimismL1FeeRecipient: '0x8fd8d6b9e556cf4791ff9c99a56420ac2fdd2b59',
optimismL2FeeRecipient: '0x7890eee9efd42496c63f3ec71bf61bf96af088d0',
outputOracleOwner: '0x0f01ce071078396040a4a0de613aa024aba2d18f',
batchSenderAddress: '0x32b317fc8d35e015cd9942bc9c7cecaf7f651838',
proxyAdmin: '0xe584e1b833ca80020130b1b69f84f90479076168',
optimismBaseFeeRecipient: '0xf116a24056b647e3211d095c667e951536cdebaa',
optimismL1FeeRecipient: '0xc731837b696ca3d9720d23336925368ceaa58f83',
optimismL2FeeRecipient: '0x26862c200bd48c19f39d9e1cd88a3b439611d911',
outputOracleOwner: '0x6925b8704ff96dee942623d6fb5e946ef5884b63',
batchSenderAddress: '0xa11d2b908470e17923fff184d48269bebbd9b2a5',
}
export default config
......@@ -2,9 +2,9 @@ import { ethers } from 'ethers'
const { env } = process
const startingTimestamp =
typeof env.L2OO_STARTING_BLOCK_TIMESTAMP === 'string'
? ethers.BigNumber.from(env.L2OO_STARTING_BLOCK_TIMESTAMP).toNumber()
const l1GenesisTimestamp =
typeof env.L1_GENESIS_TIMESTAMP === 'string'
? ethers.BigNumber.from(env.L1_GENESIS_TIMESTAMP).toNumber()
: Math.floor(Date.now() / 1000)
const config = {
......@@ -12,8 +12,9 @@ const config = {
genesisOutput: ethers.constants.HashZero,
historicalBlocks: 0,
startingBlockNumber: 0,
l1StartingBlockTag: 'earliest',
l2BlockTime: 2,
startingTimestamp,
l1GenesisTimestamp,
sequencerAddress: '0x70997970C51812dc3A010C7d01b50e0d17dc79C8',
maxSequencerDrift: 10,
sequencerWindowSize: 4,
......
......@@ -10,13 +10,10 @@ const deployFn: DeployFunction = async (hre) => {
const { deployer } = await hre.getNamedAccounts()
const { deployConfig } = hre
if (
typeof deployConfig.startingTimestamp !== 'number' ||
isNaN(deployConfig.startingTimestamp)
) {
throw new Error(
'Cannot deploy L2OutputOracle without specifying a valid startingTimestamp.'
)
const l1 = hre.ethers.provider
const l1StartingBlock = await l1.getBlock(deployConfig.l1StartingBlockTag)
if (l1StartingBlock === null) {
throw new Error(`Cannot fetch block tag ${deployConfig.l1StartingBlockTag}`)
}
await deploy('L2OutputOracleProxy', {
......@@ -34,7 +31,7 @@ const deployFn: DeployFunction = async (hre) => {
deployConfig.genesisOutput,
deployConfig.historicalBlocks,
deployConfig.startingBlockNumber,
deployConfig.startingTimestamp,
l1StartingBlock.timestamp,
deployConfig.l2BlockTime,
deployConfig.sequencerAddress,
deployConfig.outputOracleOwner,
......@@ -84,7 +81,7 @@ const deployFn: DeployFunction = async (hre) => {
}
const startingTimestamp = await L2OutputOracle.STARTING_TIMESTAMP()
if (!startingTimestamp.eq(BigNumber.from(deployConfig.startingTimestamp))) {
if (!startingTimestamp.eq(BigNumber.from(l1StartingBlock.timestamp))) {
throw new Error('starting timestamp misconfigured')
}
const l2BlockTime = await L2OutputOracle.L2_BLOCK_TIME()
......
......@@ -17,7 +17,7 @@ const deployFn: DeployFunction = async (hre) => {
waitConfirmations: deployConfig.deploymentWaitConfirmations,
})
const oracle = await get('L2OutputOracle')
const oracle = await get('L2OutputOracleProxy')
await deploy('OptimismPortal', {
from: deployer,
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment