Commit f32e9225 authored by Mark Tyneway's avatar Mark Tyneway Committed by GitHub

Merge pull request #1652 from ethereum-optimism/develop

Develop -> Master PR
parents 25c79ce4 240e62e9
---
'@eth-optimism/integration-tests': patch
'@eth-optimism/batch-submitter': patch
'@eth-optimism/common-ts': patch
'@eth-optimism/contracts': patch
'@eth-optimism/core-utils': patch
'@eth-optimism/data-transport-layer': patch
'@eth-optimism/hardhat-ovm': patch
'@eth-optimism/message-relayer': patch
'@eth-optimism/replica-healthcheck': patch
'@eth-optimism/specs': patch
---
Update dependencies
---
'@eth-optimism/rpc-proxy': patch
---
Initial rpc-proxy package
---
'@eth-optimism/core-utils': patch
---
Parse the L1 timestamp in `injectContext`
name: batch-submitter unit tests
on:
push:
paths:
- 'go/batch-submitter/**'
branches:
- 'master'
- 'develop'
- '*rc'
- 'regenesis/*'
pull_request:
paths:
- 'go/batch-submitter/*'
branches:
- 'master'
- 'develop'
- '*rc'
- 'regenesis/*'
workflow_dispatch:
defaults:
run:
working-directory: './go/batch-submitter'
jobs:
tests:
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.16.x
- name: Checkout code
uses: actions/checkout@v2
- name: Install
run: make
- name: Test
run: make test
......@@ -5,6 +5,11 @@ on: workflow_dispatch
jobs:
integration:
runs-on: ubuntu-latest
services:
registry:
image: registry:2
ports:
- 5000:5000
env:
DOCKER_BUILDKIT: 1
COMPOSE_DOCKER_CLI_BUILD: 1
......@@ -28,9 +33,23 @@ jobs:
restore-keys: |
${{ runner.os }}-yarn-
- uses: actions/cache@v2
name: Set up layer cache
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-1-${{ github.sha }}
restore-keys: |
${{ runner.os }}-buildx-1-
- uses: docker/setup-buildx-action@master
name: Set up Docker Buildx
id: buildx
with:
version: latest
driver-opts: image=moby/buildkit:master,network=host
- name: Build the services
working-directory: ./ops
run: ./scripts/build-ci.sh
run: ./ops/scripts/build-ci.sh
- name: Bring the stack up
working-directory: ./ops
......@@ -63,3 +82,8 @@ jobs:
with:
name: logs.tgz
path: ./logs.tgz
- name: Move cache
run: |
rm -rf /tmp/.buildx-cache
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
......@@ -3,6 +3,7 @@ on:
push:
paths:
- 'go/gas-oracle/**'
- 'go/batch-submitter/**'
branches:
- 'master'
- 'develop'
......@@ -11,6 +12,7 @@ on:
pull_request:
paths:
- 'go/gas-oracle/**'
- 'go/batch-submitter/**'
branches:
- 'master'
- 'develop'
......@@ -22,8 +24,13 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: golangci-lint
- name: golangci-lint gas-oracle
uses: golangci/golangci-lint-action@v2
with:
version: v1.29
working-directory: go/gas-oracle
- name: golangci-lint batch-submitter
uses: golangci/golangci-lint-action@v2
with:
version: v1.29
working-directory: go/batch-submitter
......@@ -13,6 +13,11 @@ on:
jobs:
integration:
runs-on: ubuntu-latest
services:
registry:
image: registry:2
ports:
- 5000:5000
env:
DOCKER_BUILDKIT: 1
COMPOSE_DOCKER_CLI_BUILD: 1
......@@ -32,9 +37,23 @@ jobs:
restore-keys: |
${{ runner.os }}-yarn-
- uses: actions/cache@v2
name: Set up layer cache
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-1-${{ github.sha }}
restore-keys: |
${{ runner.os }}-buildx-1-
- uses: docker/setup-buildx-action@master
name: Set up Docker Buildx
id: buildx
with:
version: latest
driver-opts: image=moby/buildkit:master,network=host
- name: Build the services
working-directory: ./ops
run: ./scripts/build-ci.sh
run: ./ops/scripts/build-ci.sh
- name: Bring the stack up
working-directory: ./ops
......@@ -67,3 +86,11 @@ jobs:
with:
name: logs.tgz
path: ./logs.tgz
# Needed to address the following bugs:
# https://github.com/docker/build-push-action/issues/252
# https://github.com/moby/buildkit/issues/1896
- name: Move cache
run: |
rm -rf /tmp/.buildx-cache
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
......@@ -23,6 +23,8 @@ jobs:
contracts: ${{ steps.packages.outputs.contracts }}
replica-healthcheck: ${{ steps.packages.outputs.replica-healthcheck }}
canary-docker-tag: ${{ steps.docker-image-name.outputs.canary-docker-tag }}
proxyd: ${{ steps.canary-publish.outputs.proxyd }}
rpc-proxy : ${{ steps.canary-publish.outputs.rpc-proxy }}
steps:
- name: Check out source code
......@@ -308,3 +310,62 @@ jobs:
file: ./ops/docker/Dockerfile.replica-healthcheck
push: true
tags: ethereumoptimism/replica-healthcheck:${{ needs.builder.outputs.canary-docker-tag }}
proxyd:
name: Publish proxyd Version ${{ needs.canary-publish.outputs.canary-docker-tag }}
needs: canary-publish
if: needs.canary-publish.outputs.proxyd != ''
runs-on: ubuntu:latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to Docker Hub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_USERNAME }}
password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_SECRET }}
- name: Set env
run: |
echo "GITDATE=$(date)" >> $GITHUB_ENV"
- name: Build and push
uses: docker/build-push-action@v2
with:
context: ./go/proxyd
file: ./Dockerfile
push: true
tags: ethereumoptimism/proxyd:${{ needs.canary-publish.outputs.proxyd }}
build-args:
- GITCOMMIT=$GITHUB_SHA
- GITDATE=$GITDATE
rpc-proxy:
name: Publish rpc-proxy Version ${{ needs.canary-publish.outputs.canary-docker-tag }}
needs: canary-publish
if: needs.canary-publish.outputs.rpc-proxy != ''
runs-on: ubuntu:latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to Docker Hub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_USERNAME }}
password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_SECRET }}
- name: Build and push
uses: docker/build-push-action@v2
with:
context: .
file: ./ops/docker/Dockerfile.rpc-proxy
push: true
tags: ethereumoptimism/rpc-proxy:${{ needs.canary-publish.outputs.rpc-proxy }}
......@@ -19,6 +19,8 @@ jobs:
contracts: ${{ steps.packages.outputs.contracts }}
gas-oracle: ${{ steps.packages.outputs.gas-oracle }}
replica-healthcheck: ${{ steps.packages.outputs.replica-healthcheck }}
proxyd: ${{ steps.packages.outputs.proxyd }}
rpc-proxy: ${{ steps.packages.outputs.rpc-proxy }}
steps:
- name: Checkout Repo
......@@ -326,3 +328,66 @@ jobs:
file: ./ops/docker/Dockerfile.replica-healthcheck
push: true
tags: ethereumoptimism/replica-healthcheck:${{ needs.builder.outputs.replica-healthcheck }},ethereumoptimism/replica-healthcheck:latest
proxyd:
name: Publish proxyd Version ${{ needs.release.outputs.proxyd }}
needs: release
if: needs.release.outputs.proxyd != ''
runs-on: ubuntu:latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to Docker Hub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_USERNAME }}
password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_SECRET }}
- name: Set env
run: |
echo "GITDATE=$(date)" >> $GITHUB_ENV"
- name: Build and push
uses: docker/build-push-action@v2
with:
context: ./go/proxyd
file: ./Dockerfile
push: true
tags: ethereumoptimism/proxyd:${{ needs.canary-publish.outputs.proxyd }}
build-args:
- GITCOMMIT=$GITHUB_SHA
- GITDATE=$GITDATE
rpc-proxy:
name: Publish rpc-proxy Version ${{ needs.release.outputs.rpc-proxy }}
needs: release
if: needs.release.outputs.rpc-proxy != ''
runs-on: ubuntu:latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to Docker Hub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_USERNAME }}
password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_SECRET }}
- name: Set env
run: |
echo "GITDATE=$(date)" >> $GITHUB_ENV"
- name: Build and push
uses: docker/build-push-action@v2
with:
context: .
file: ./ops/docker/Dockerfile.rpc-proxy
push: true
tags: ethereumoptimism/rpc-proxy:${{ needs.canary-publish.outputs.rpc-proxy }}
......@@ -5,6 +5,11 @@ on: workflow_dispatch
jobs:
integration-sync-test:
runs-on: ubuntu-latest
services:
registry:
image: registry:2
ports:
- 5000:5000
env:
DOCKER_BUILDKIT: 1
COMPOSE_DOCKER_CLI_BUILD: 1
......@@ -22,9 +27,24 @@ jobs:
key: ${{ runner.os }}-yarn-${{ hashFiles('**/yarn.lock') }}
restore-keys: |
${{ runner.os }}-yarn-
- uses: actions/cache@v2
name: Set up layer cache
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-1-${{ github.sha }}
restore-keys: |
${{ runner.os }}-buildx-1-
- uses: docker/setup-buildx-action@master
name: Set up Docker Buildx
id: buildx
with:
version: latest
driver-opts: image=moby/buildkit:master,network=host
- name: Build the services
working-directory: ./ops
run: ./scripts/build-ci.sh
run: ./ops/scripts/build-ci.sh
- name: Bring the stack up
working-directory: ./ops
......@@ -54,3 +74,8 @@ jobs:
with:
name: logs.tgz
path: ./logs.tgz
- name: Move cache
run: |
rm -rf /tmp/.buildx-cache
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
GITCOMMIT := $(shell git rev-parse HEAD)
GITDATE := $(shell git show -s --format='%ct')
GITVERSION := $(shell cat package.json | jq .version)
LDFLAGSSTRING +=-X main.GitCommit=$(GITCOMMIT)
LDFLAGSSTRING +=-X main.GitDate=$(GITDATE)
LDFLAGSSTRING +=-X main.GitVersion=$(GITVERSION)
LDFLAGS := -ldflags "$(LDFLAGSSTRING)"
batch-submitter:
env GO111MODULE=on go build -v $(LDFLAGS) ./cmd/batch-submitter
clean:
rm batch-submitter
test:
go test -v ./...
lint:
golangci-lint run ./...
.PHONY: \
batch-submitter \
clean \
test \
lint
package batchsubmitter
import (
"context"
"crypto/ecdsa"
"fmt"
"net/http"
"os"
"strconv"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
"github.com/getsentry/sentry-go"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/urfave/cli"
)
const (
// defaultDialTimeout is default duration the service will wait on
// startup to make a connection to either the L1 or L2 backends.
defaultDialTimeout = 5 * time.Second
)
// Main is the entrypoint into the batch submitter service. This method returns
// a closure that executes the service and blocks until the service exits. The
// use of a closure allows the parameters bound to the top-level main package,
// e.g. GitVersion, to be captured and used once the function is executed.
func Main(gitVersion string) func(ctx *cli.Context) error {
return func(ctx *cli.Context) error {
cfg, err := NewConfig(ctx)
if err != nil {
return err
}
// The call to defer is done here so that any errors logged from
// this point on are posted to Sentry before exiting.
if cfg.SentryEnable {
defer sentry.Flush(2 * time.Second)
}
_, err = NewBatchSubmitter(cfg, gitVersion)
if err != nil {
log.Error("Unable to create batch submitter", "error", err)
return err
}
return nil
}
}
// BatchSubmitter is a service that configures the necessary resources for
// running the TxBatchSubmitter and StateBatchSubmitter sub-services.
type BatchSubmitter struct {
ctx context.Context
cfg Config
l1Client *ethclient.Client
l2Client *ethclient.Client
sequencerPrivKey *ecdsa.PrivateKey
proposerPrivKey *ecdsa.PrivateKey
ctcAddress common.Address
sccAddress common.Address
}
// NewBatchSubmitter initializes the BatchSubmitter, gathering any resources
// that will be needed by the TxBatchSubmitter and StateBatchSubmitter
// sub-services.
func NewBatchSubmitter(cfg Config, gitVersion string) (*BatchSubmitter, error) {
ctx := context.Background()
// Set up our logging. If Sentry is enabled, we will use our custom
// log handler that logs to stdout and forwards any error messages to
// Sentry for collection. Otherwise, logs will only be posted to stdout.
var logHandler log.Handler
if cfg.SentryEnable {
err := sentry.Init(sentry.ClientOptions{
Dsn: cfg.SentryDsn,
Environment: cfg.EthNetworkName,
Release: "batch-submitter@" + gitVersion,
TracesSampleRate: traceRateToFloat64(cfg.SentryTraceRate),
Debug: false,
})
if err != nil {
return nil, err
}
logHandler = SentryStreamHandler(os.Stdout, log.TerminalFormat(true))
} else {
logHandler = log.StreamHandler(os.Stdout, log.TerminalFormat(true))
}
logLevel, err := log.LvlFromString(cfg.LogLevel)
if err != nil {
return nil, err
}
log.Root().SetHandler(log.LvlFilterHandler(logLevel, logHandler))
log.Info("Config", "config", fmt.Sprintf("%#v", cfg))
// Parse sequencer private key and CTC contract address.
sequencerPrivKey, ctcAddress, err := parseWalletPrivKeyAndContractAddr(
"Sequencer", cfg.Mnemonic, cfg.SequencerHDPath,
cfg.SequencerPrivateKey, cfg.CTCAddress,
)
if err != nil {
return nil, err
}
// Parse proposer private key and SCC contract address.
proposerPrivKey, sccAddress, err := parseWalletPrivKeyAndContractAddr(
"Proposer", cfg.Mnemonic, cfg.ProposerHDPath,
cfg.ProposerPrivateKey, cfg.SCCAddress,
)
if err != nil {
return nil, err
}
// Connect to L1 and L2 providers. Perform these lastsince they are the
// most expensive.
l1Client, err := dialEthClientWithTimeout(ctx, cfg.L1EthRpc)
if err != nil {
return nil, err
}
l2Client, err := dialEthClientWithTimeout(ctx, cfg.L2EthRpc)
if err != nil {
return nil, err
}
if cfg.MetricsServerEnable {
go runMetricsServer(cfg.MetricsHostname, cfg.MetricsPort)
}
return &BatchSubmitter{
ctx: ctx,
cfg: cfg,
l1Client: l1Client,
l2Client: l2Client,
sequencerPrivKey: sequencerPrivKey,
proposerPrivKey: proposerPrivKey,
ctcAddress: ctcAddress,
sccAddress: sccAddress,
}, nil
}
// parseWalletPrivKeyAndContractAddr returns the wallet private key to use for
// sending transactions as well as the contract address to send to for a
// particular sub-service.
func parseWalletPrivKeyAndContractAddr(
name string,
mnemonic string,
hdPath string,
privKeyStr string,
contractAddrStr string,
) (*ecdsa.PrivateKey, common.Address, error) {
// Parse wallet private key from either privkey string or BIP39 mnemonic
// and BIP32 HD derivation path.
privKey, err := GetConfiguredPrivateKey(mnemonic, hdPath, privKeyStr)
if err != nil {
return nil, common.Address{}, err
}
// Parse the target contract address the wallet will send to.
contractAddress, err := ParseAddress(contractAddrStr)
if err != nil {
return nil, common.Address{}, err
}
// Log wallet address rather than private key...
walletAddress := crypto.PubkeyToAddress(privKey.PublicKey)
log.Info(name+" wallet params parsed successfully", "wallet_address",
walletAddress, "contract_address", contractAddress)
return privKey, contractAddress, nil
}
// runMetricsServer spins up a prometheus metrics server at the provided
// hostname and port.
//
// NOTE: This method MUST be run as a goroutine.
func runMetricsServer(hostname string, port uint64) {
metricsPortStr := strconv.FormatUint(port, 10)
metricsAddr := fmt.Sprintf("%s: %s", hostname, metricsPortStr)
http.Handle("/metrics", promhttp.Handler())
_ = http.ListenAndServe(metricsAddr, nil)
}
// dialEthClientWithTimeout attempts to dial the L1 or L2 provider using the
// provided URL. If the dial doesn't complete within defaultDialTimeout seconds,
// this method will return an error.
func dialEthClientWithTimeout(ctx context.Context, url string) (
*ethclient.Client, error) {
ctxt, cancel := context.WithTimeout(ctx, defaultDialTimeout)
defer cancel()
return ethclient.DialContext(ctxt, url)
}
// traceRateToFloat64 converts a time.Duration into a valid float64 for the
// Sentry client. The client only accepts values between 0.0 and 1.0, so this
// method clamps anything greater than 1 second to 1.0.
func traceRateToFloat64(rate time.Duration) float64 {
rate64 := float64(rate) / float64(time.Second)
if rate64 > 1.0 {
rate64 = 1.0
}
return rate64
}
package main
import (
"fmt"
"os"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/urfave/cli"
batchsubmitter "github.com/ethereum-optimism/go/batch-submitter"
"github.com/ethereum-optimism/go/batch-submitter/flags"
)
var (
GitVersion = ""
GitCommit = ""
GitDate = ""
)
func main() {
// Set up logger with a default INFO level in case we fail to parse flags.
// Otherwise the final crtiical log won't show what the parsing error was.
log.Root().SetHandler(
log.LvlFilterHandler(
log.LvlInfo,
log.StreamHandler(os.Stdout, log.TerminalFormat(true)),
),
)
app := cli.NewApp()
app.Flags = flags.Flags
app.Version = fmt.Sprintf("%s-%s", GitVersion, params.VersionWithCommit(GitCommit, GitDate))
app.Name = "batch-submitter"
app.Usage = "Batch Submitter Service"
app.Description = "Service for generating and submitting batched transactions " +
"that synchronize L2 state to L1 contracts"
app.Action = batchsubmitter.Main(GitVersion)
err := app.Run(os.Args)
if err != nil {
log.Crit("Application failed", "message", err)
}
}
package batchsubmitter
import (
"errors"
"time"
"github.com/ethereum/go-ethereum/log"
"github.com/urfave/cli"
"github.com/ethereum-optimism/go/batch-submitter/flags"
)
var (
// ErrSequencerPrivKeyOrMnemonic signals that the user tried to set both
// sequencer wallet derivation methods or neither of them.
ErrSequencerPrivKeyOrMnemonic = errors.New("either sequencer-private-key " +
"or mnemonic + sequencer-hd-path must be set")
// ErrProposererPrivKeyOrMnemonic signals that the user tried to set
// both proposer wallet derivation methods or neither of them.
ErrProposerPrivKeyOrMnemonic = errors.New("either proposer-private-key " +
"or mnemonic + proposer-hd-path must be set")
// ErrSameSequencerAndProposerHDPath signals that the user specified the
// same sequencer and proposer derivations paths, which otherwise would
// lead to the two using the same wallet.
ErrSameSequencerAndProposerHDPath = errors.New("sequencer-hd-path and " +
"proposer-hd-path must be distinct when using mnemonic")
// ErrSameSequencerAndProposerPrivKey signals that the user specified
// the same sequencer and proposer private keys, which otherwise would
// lead to the two using the same wallet.
ErrSameSequencerAndProposerPrivKey = errors.New("sequencer-priv-key and " +
"proposer-priv-key must be distinct")
// ErrSentryDSNNotSet signals that not Data Source Name was provided
// with which to configure Sentry logging.
ErrSentryDSNNotSet = errors.New("sentry-dsn must be set if use-sentry " +
"is true")
)
type Config struct {
/* Required Params */
// BuildEnv identifies the environment this binary is intended for, i.e.
// production, development, etc.
BuildEnv string
// EthNetworkName identifies the intended Ethereum network.
EthNetworkName string
// L1EthRpc is the HTTP provider URL for L1.
L1EthRpc string
// L2EthRpc is the HTTP provider URL for L1.
L2EthRpc string
// CTCAddress is the CTC contract address.
CTCAddress string
// SCCAddress is the SCC contract address.
SCCAddress string
// MinL1TxSize is the minimum size in bytes of any L1 transactions generated
// by the batch submitter.
MinL1TxSize uint64
// MaxL1TxSize is the maximum size in bytes of any L1 transactions generated
// by the batch submitter.
MaxL1TxSize uint64
// MaxTxBatchCount is the maximum number of L2 transactions that can ever be
// in a batch.
MaxTxBatchCount uint64
// MaxStateBatchCount is the maximum number of L2 state roots that can ever
// be in a batch.
MaxStateBatchCount uint64
// MaxBatchSubmissionTime is the maximum amount of time that we will
// wait before submitting an under-sized batch.
MaxBatchSubmissionTime time.Duration
// PollInterval is the delay between querying L2 for more transaction
// and creating a new batch.
PollInterval time.Duration
// NumConfirmations is the number of confirmations which we will wait after
// appending new batches.
NumConfirmations uint64
// ResubmissionTimeout is time we will wait before resubmitting a
// transaction.
ResubmissionTimeout time.Duration
// FinalityConfirmations is the number of confirmations that we should wait
// before submitting state roots for CTC elements.
FinalityConfirmations uint64
// RunTxBatchSubmitter determines whether or not to run the tx batch
// submitter.
RunTxBatchSubmitter bool
// RunStateBatchSubmitter determines whether or not to run the state batch
// submitter.
RunStateBatchSubmitter bool
//SafeMinimumEtherBalance is the safe minimum amount of ether the batch
//submitter key should hold before it starts to log errors.
SafeMinimumEtherBalance uint64
// ClearPendingTxs is a boolean to clear the pending transactions in the
// mempool on startup.
ClearPendingTxs bool
/* Optional Params */
// LogLevel is the lowest log level that will be output.
LogLevel string
// SentryEnable if true, logs any error messages to sentry. SentryDsn
// must also be set if SentryEnable is true.
SentryEnable bool
// SentryDsn is the sentry Data Source Name.
SentryDsn string
// SentryTraceRate the frequency with which Sentry should flush buffered
// events.
SentryTraceRate time.Duration
// BlockOffset is the offset between the CTC contract start and the L2 geth
// blocks.
BlockOffset uint64
// MaxGasPriceInGwei is the maximum gas price in gwei we will allow in order
// to confirm a transaction.
MaxGasPriceInGwei uint64
// GasRetryIncrement is the step size (in gwei) by which we will ratchet the
// gas price in order to get a transaction confirmed.
GasRetryIncrement uint64
// SequencerPrivateKey the private key of the wallet used to submit
// transactions to the CTC contract.
SequencerPrivateKey string
// PropopserPrivateKey the private key of the wallet used to submit
// transaction to the SCC contract.
ProposerPrivateKey string
// Mnemonic is the HD seed used to derive the wallet private keys for both
// the sequence and proposer. Must be used in conjunction with
// SequencerHDPath and ProposerHDPath.
Mnemonic string
// SequencerHDPath is the derivation path used to obtain the private key for
// the sequencer transactions.
SequencerHDPath string
// ProposerHDPath is the derivation path used to obtain the private key for
// the proposer transactions.
ProposerHDPath string
// MetricsServerEnable if true, will create a metrics client and log to
// Prometheus.
MetricsServerEnable bool
// MetricsHostname is the hostname at which the metrics server is running.
MetricsHostname string
// MetricsPort is the port at which the metrics server is running.
MetricsPort uint64
}
// NewConfig parses the Config from the provided flags or environment variables.
// This method fails if ValidateConfig deems the configuration to be malformed.
func NewConfig(ctx *cli.Context) (Config, error) {
cfg := Config{
/* Required Flags */
BuildEnv: ctx.GlobalString(flags.BuildEnvFlag.Name),
EthNetworkName: ctx.GlobalString(flags.EthNetworkNameFlag.Name),
L1EthRpc: ctx.GlobalString(flags.L1EthRpcFlag.Name),
L2EthRpc: ctx.GlobalString(flags.L2EthRpcFlag.Name),
CTCAddress: ctx.GlobalString(flags.CTCAddressFlag.Name),
SCCAddress: ctx.GlobalString(flags.SCCAddressFlag.Name),
MaxL1TxSize: ctx.GlobalUint64(flags.MaxL1TxSizeFlag.Name),
MaxBatchSubmissionTime: ctx.GlobalDuration(flags.MaxBatchSubmissionTimeFlag.Name),
PollInterval: ctx.GlobalDuration(flags.PollIntervalFlag.Name),
NumConfirmations: ctx.GlobalUint64(flags.NumConfirmationsFlag.Name),
ResubmissionTimeout: ctx.GlobalDuration(flags.ResubmissionTimeoutFlag.Name),
FinalityConfirmations: ctx.GlobalUint64(flags.FinalityConfirmationsFlag.Name),
RunTxBatchSubmitter: ctx.GlobalBool(flags.RunTxBatchSubmitterFlag.Name),
RunStateBatchSubmitter: ctx.GlobalBool(flags.RunStateBatchSubmitterFlag.Name),
SafeMinimumEtherBalance: ctx.GlobalUint64(flags.SafeMinimumEtherBalanceFlag.Name),
ClearPendingTxs: ctx.GlobalBool(flags.ClearPendingTxsFlag.Name),
/* Optional Flags */
SentryEnable: ctx.GlobalBool(flags.SentryEnableFlag.Name),
SentryDsn: ctx.GlobalString(flags.SentryDsnFlag.Name),
SentryTraceRate: ctx.GlobalDuration(flags.SentryTraceRateFlag.Name),
BlockOffset: ctx.GlobalUint64(flags.BlockOffsetFlag.Name),
MaxGasPriceInGwei: ctx.GlobalUint64(flags.MaxGasPriceInGweiFlag.Name),
GasRetryIncrement: ctx.GlobalUint64(flags.GasRetryIncrementFlag.Name),
SequencerPrivateKey: ctx.GlobalString(flags.SequencerPrivateKeyFlag.Name),
ProposerPrivateKey: ctx.GlobalString(flags.ProposerPrivateKeyFlag.Name),
Mnemonic: ctx.GlobalString(flags.MnemonicFlag.Name),
SequencerHDPath: ctx.GlobalString(flags.SequencerHDPathFlag.Name),
ProposerHDPath: ctx.GlobalString(flags.ProposerHDPathFlag.Name),
MetricsServerEnable: ctx.GlobalBool(flags.MetricsServerEnableFlag.Name),
MetricsHostname: ctx.GlobalString(flags.MetricsHostnameFlag.Name),
MetricsPort: ctx.GlobalUint64(flags.MetricsPortFlag.Name),
}
err := ValidateConfig(&cfg)
if err != nil {
return Config{}, err
}
return cfg, nil
}
// ValidateConfig ensures additional constraints on the parsed configuration to
// ensure that it is well-formed.
func ValidateConfig(cfg *Config) error {
// Sanity check log level.
_, err := log.LvlFromString(cfg.LogLevel)
if err != nil {
return err
}
// Enforce that either sequencer-private-key or mnemonic + sequencer-hd-path
// is enabled, but not both or neither.
usingSequencerPrivateKey := cfg.SequencerPrivateKey != ""
usingSequencerHDPath := cfg.Mnemonic != "" && cfg.SequencerHDPath != ""
if usingSequencerPrivateKey == usingSequencerHDPath {
return ErrSequencerPrivKeyOrMnemonic
}
// Enforce that either proposer-private-key or mnemonic + proposer-hd-path
// is enabled, but not both or neither.
usingProposerPrivateKey := cfg.ProposerPrivateKey != ""
usingProposerHDPath := cfg.Mnemonic != "" && cfg.ProposerHDPath != ""
if usingProposerPrivateKey == usingProposerHDPath {
return ErrProposerPrivKeyOrMnemonic
}
// If mnemonic is used, the sequencer-hd-path and proposer-hd-path must
// differ to avoid resuing the same wallet for both.
if cfg.Mnemonic != "" && cfg.SequencerHDPath == cfg.ProposerHDPath {
return ErrSameSequencerAndProposerHDPath
}
// If private keys are used, ensure the keys are different to avoid resuing
// the same wallet for both.
if usingSequencerPrivateKey && usingProposerPrivateKey &&
cfg.SequencerPrivateKey == cfg.ProposerPrivateKey {
return ErrSameSequencerAndProposerPrivKey
}
// Ensure the Sentry Data Source Name is set when using Sentry.
if cfg.SentryEnable && cfg.SentryDsn == "" {
return ErrSentryDSNNotSet
}
return nil
}
package batchsubmitter_test
import (
"fmt"
"testing"
batchsubmitter "github.com/ethereum-optimism/go/batch-submitter"
"github.com/stretchr/testify/require"
)
var validateConfigTests = []struct {
name string
cfg batchsubmitter.Config
expErr error
}{
{
name: "bad log level",
cfg: batchsubmitter.Config{
LogLevel: "unknown",
},
expErr: fmt.Errorf("unknown level: unknown"),
},
{
name: "sequencer priv key or mnemonic none set",
cfg: batchsubmitter.Config{
LogLevel: "info",
SequencerPrivateKey: "",
Mnemonic: "",
SequencerHDPath: "",
},
expErr: batchsubmitter.ErrSequencerPrivKeyOrMnemonic,
},
{
name: "sequencer priv key or mnemonic both set",
cfg: batchsubmitter.Config{
LogLevel: "info",
SequencerPrivateKey: "sequencer-privkey",
Mnemonic: "mnemonic",
SequencerHDPath: "sequencer-path",
},
expErr: batchsubmitter.ErrSequencerPrivKeyOrMnemonic,
},
{
name: "sequencer priv key or mnemonic only mnemonic set",
cfg: batchsubmitter.Config{
LogLevel: "info",
SequencerPrivateKey: "",
Mnemonic: "mnemonic",
SequencerHDPath: "",
},
expErr: batchsubmitter.ErrSequencerPrivKeyOrMnemonic,
},
{
name: "sequencer priv key or mnemonic only hdpath set",
cfg: batchsubmitter.Config{
LogLevel: "info",
SequencerPrivateKey: "",
Mnemonic: "",
SequencerHDPath: "sequencer-path",
},
expErr: batchsubmitter.ErrSequencerPrivKeyOrMnemonic,
},
{
name: "proposer priv key or mnemonic none set",
cfg: batchsubmitter.Config{
LogLevel: "info",
SequencerPrivateKey: "sequencer-privkey",
ProposerPrivateKey: "",
Mnemonic: "",
ProposerHDPath: "",
},
expErr: batchsubmitter.ErrProposerPrivKeyOrMnemonic,
},
{
name: "proposer priv key or mnemonic both set",
cfg: batchsubmitter.Config{
LogLevel: "info",
SequencerPrivateKey: "sequencer-privkey",
ProposerPrivateKey: "proposer-privkey",
Mnemonic: "mnemonic",
ProposerHDPath: "proposer-path",
},
expErr: batchsubmitter.ErrProposerPrivKeyOrMnemonic,
},
{
name: "proposer priv key or mnemonic only mnemonic set",
cfg: batchsubmitter.Config{
LogLevel: "info",
SequencerPrivateKey: "sequencer-privkey",
ProposerPrivateKey: "",
Mnemonic: "mnemonic",
ProposerHDPath: "",
},
expErr: batchsubmitter.ErrProposerPrivKeyOrMnemonic,
},
{
name: "proposer priv key or mnemonic only hdpath set",
cfg: batchsubmitter.Config{
LogLevel: "info",
SequencerPrivateKey: "sequencer-privkey",
ProposerPrivateKey: "",
Mnemonic: "",
ProposerHDPath: "proposer-path",
},
expErr: batchsubmitter.ErrProposerPrivKeyOrMnemonic,
},
{
name: "same sequencer and proposer hd path",
cfg: batchsubmitter.Config{
LogLevel: "info",
Mnemonic: "mnemonic",
SequencerHDPath: "path",
ProposerHDPath: "path",
},
expErr: batchsubmitter.ErrSameSequencerAndProposerHDPath,
},
{
name: "same sequencer and proposer privkey",
cfg: batchsubmitter.Config{
LogLevel: "info",
SequencerPrivateKey: "privkey",
ProposerPrivateKey: "privkey",
},
expErr: batchsubmitter.ErrSameSequencerAndProposerPrivKey,
},
{
name: "sentry-dsn not set when sentry-enable is true",
cfg: batchsubmitter.Config{
LogLevel: "info",
SequencerPrivateKey: "sequencer-privkey",
ProposerPrivateKey: "proposer-privkey",
SentryEnable: true,
SentryDsn: "",
},
expErr: batchsubmitter.ErrSentryDSNNotSet,
},
// Valid configs
{
name: "valid config with privkeys and no sentry",
cfg: batchsubmitter.Config{
LogLevel: "info",
SequencerPrivateKey: "sequencer-privkey",
ProposerPrivateKey: "proposer-privkey",
SentryEnable: false,
SentryDsn: "",
},
expErr: nil,
},
{
name: "valid config with mnemonic and no sentry",
cfg: batchsubmitter.Config{
LogLevel: "info",
Mnemonic: "mnemonic",
SequencerHDPath: "sequencer-path",
ProposerHDPath: "proposer-path",
SentryEnable: false,
SentryDsn: "",
},
expErr: nil,
},
{
name: "valid config with privkeys and sentry",
cfg: batchsubmitter.Config{
LogLevel: "info",
SequencerPrivateKey: "sequencer-privkey",
ProposerPrivateKey: "proposer-privkey",
SentryEnable: true,
SentryDsn: "batch-submitter",
},
expErr: nil,
},
{
name: "valid config with mnemonic and sentry",
cfg: batchsubmitter.Config{
LogLevel: "info",
Mnemonic: "mnemonic",
SequencerHDPath: "sequencer-path",
ProposerHDPath: "proposer-path",
SentryEnable: true,
SentryDsn: "batch-submitter",
},
expErr: nil,
},
}
// TestValidateConfig asserts the behavior of ValidateConfig by testing expected
// error and success configurations.
func TestValidateConfig(t *testing.T) {
for _, test := range validateConfigTests {
t.Run(test.name, func(t *testing.T) {
err := batchsubmitter.ValidateConfig(&test.cfg)
require.Equal(t, err, test.expErr)
})
}
}
package batchsubmitter
import (
"crypto/ecdsa"
"errors"
"fmt"
"strings"
"github.com/decred/dcrd/hdkeychain/v3"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/tyler-smith/go-bip39"
)
var (
// ErrCannotGetPrivateKey signals that an both or neither combination of
// mnemonic+hdpath or private key string was used in the configuration.
ErrCannotGetPrivateKey = errors.New("invalid combination of privkey " +
"or mnemonic+hdpath")
)
// ParseAddress parses an ETH addres from a hex string. This method will fail if
// the address is not a valid hexidecimal address.
func ParseAddress(address string) (common.Address, error) {
if common.IsHexAddress(address) {
return common.HexToAddress(address), nil
}
return common.Address{}, fmt.Errorf("invalid address: %v", address)
}
// GetConfiguredPrivateKey computes the private key for our configured services.
// The two supported methods are:
// - Derived from BIP39 mnemonic and BIP32 HD derivation path.
// - Directly from a serialized private key.
func GetConfiguredPrivateKey(mnemonic, hdPath, privKeyStr string) (
*ecdsa.PrivateKey, error) {
useMnemonic := mnemonic != "" && hdPath != ""
usePrivKeyStr := privKeyStr != ""
switch {
case useMnemonic && !usePrivKeyStr:
return DerivePrivateKey(mnemonic, hdPath)
case usePrivKeyStr && !useMnemonic:
return ParsePrivateKeyStr(privKeyStr)
default:
return nil, ErrCannotGetPrivateKey
}
}
// fakeNetworkParams implements the hdkeychain.NetworkParams interface. These
// methods are unused in the child derivation, and only needed for serializing
// xpubs/xprivs which we don't rely on.
type fakeNetworkParams struct{}
func (f fakeNetworkParams) HDPrivKeyVersion() [4]byte {
return [4]byte{}
}
func (f fakeNetworkParams) HDPubKeyVersion() [4]byte {
return [4]byte{}
}
// DerivePrivateKey derives the private key from a given mnemonic and BIP32
// deriviation path.
func DerivePrivateKey(mnemonic, hdPath string) (*ecdsa.PrivateKey, error) {
// Parse the seed string into the master BIP32 key.
seed, err := bip39.NewSeedWithErrorChecking(mnemonic, "")
if err != nil {
return nil, err
}
privKey, err := hdkeychain.NewMaster(seed, fakeNetworkParams{})
if err != nil {
return nil, err
}
// Parse the derivation path and derive a child for each level of the
// BIP32 derivation path.
derivationPath, err := accounts.ParseDerivationPath(hdPath)
if err != nil {
return nil, err
}
for _, child := range derivationPath {
privKey, err = privKey.Child(child)
if err != nil {
return nil, err
}
}
rawPrivKey, err := privKey.SerializedPrivKey()
if err != nil {
return nil, err
}
return crypto.ToECDSA(rawPrivKey)
}
// ParsePrivateKeyStr parses a hexidecimal encoded private key, the encoding may
// optionally have an "0x" prefix.
func ParsePrivateKeyStr(privKeyStr string) (*ecdsa.PrivateKey, error) {
hex := strings.TrimPrefix(privKeyStr, "0x")
return crypto.HexToECDSA(hex)
}
package batchsubmitter_test
import (
"bytes"
"errors"
"strings"
"testing"
batchsubmitter "github.com/ethereum-optimism/go/batch-submitter"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/stretchr/testify/require"
)
var (
validMnemonic = strings.Join([]string{
"abandon", "abandon", "abandon", "abandon",
"abandon", "abandon", "abandon", "abandon",
"abandon", "abandon", "abandon", "about",
}, " ")
validHDPath = "m/44'/60'/0'/128"
// validPrivKeyStr is the private key string for the child derived at
// validHDPath from validMnemonic.
validPrivKeyStr = "69d3a0e79bf039ca788924cb98b6b60c5f5aaa5e770aef09b4b15fdb59944d02"
// validPrivKeyBytes is the raw private key bytes for the child derived
// at validHDPath from validMnemonic.
validPrivKeyBytes = []byte{
0x69, 0xd3, 0xa0, 0xe7, 0x9b, 0xf0, 0x39, 0xca,
0x78, 0x89, 0x24, 0xcb, 0x98, 0xb6, 0xb6, 0x0c,
0x5f, 0x5a, 0xaa, 0x5e, 0x77, 0x0a, 0xef, 0x09,
0xb4, 0xb1, 0x5f, 0xdb, 0x59, 0x94, 0x4d, 0x02,
}
// invalidMnemonic has an invalid checksum.
invalidMnemonic = strings.Join([]string{
"abandon", "abandon", "abandon", "abandon",
"abandon", "abandon", "abandon", "abandon",
"abandon", "abandon", "abandon", "abandon",
}, " ")
)
// TestParseAddress asserts that ParseAddress correctly parses 40-characater
// hexidecimal strings with optional 0x prefix into valid 20-byte addresses.
func TestParseAddress(t *testing.T) {
tests := []struct {
name string
addr string
expErr error
expAddr common.Address
}{
{
name: "empty address",
addr: "",
expErr: errors.New("invalid address: "),
},
{
name: "only 0x",
addr: "0x",
expErr: errors.New("invalid address: 0x"),
},
{
name: "non hex character",
addr: "0xaaaaaazaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
expErr: errors.New("invalid address: 0xaaaaaazaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
},
{
name: "valid address",
addr: "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
expErr: nil,
expAddr: common.BytesToAddress(bytes.Repeat([]byte{170}, 20)),
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
addr, err := batchsubmitter.ParseAddress(test.addr)
require.Equal(t, err, test.expErr)
if test.expErr != nil {
return
}
require.Equal(t, addr, test.expAddr)
})
}
}
// TestDerivePrivateKey asserts that DerivePrivateKey properly parses a BIP39
// mnemonic and BIP32 HD path, and derives the corresponding private key.
func TestDerivePrivateKey(t *testing.T) {
tests := []struct {
name string
mnemonic string
hdPath string
expErr error
expPrivKey []byte
}{
{
name: "invalid mnemonic",
mnemonic: invalidMnemonic,
hdPath: validHDPath,
expErr: errors.New("Checksum incorrect"),
},
{
name: "valid mnemonic invalid hdpath",
mnemonic: validMnemonic,
hdPath: "",
expErr: errors.New("ambiguous path: use 'm/' prefix for absolute " +
"paths, or no leading '/' for relative ones"),
},
{
name: "valid mnemonic invalid hdpath",
mnemonic: validMnemonic,
hdPath: "m/",
expErr: errors.New("invalid component: "),
},
{
name: "valid mnemonic valid hdpath no components",
mnemonic: validMnemonic,
hdPath: "m/0",
expPrivKey: []byte{
0xba, 0xa8, 0x9a, 0x8b, 0xdd, 0x61, 0xc5, 0xe2,
0x2b, 0x9f, 0x10, 0x60, 0x1d, 0x87, 0x91, 0xc9,
0xf8, 0xfc, 0x4b, 0x2f, 0xa6, 0xdf, 0x9d, 0x68,
0xd3, 0x36, 0xf0, 0xeb, 0x03, 0xb0, 0x6e, 0xb6,
},
},
{
name: "valid mnemonic valid hdpath full path",
mnemonic: validMnemonic,
hdPath: validHDPath,
expPrivKey: []byte{
0x69, 0xd3, 0xa0, 0xe7, 0x9b, 0xf0, 0x39, 0xca,
0x78, 0x89, 0x24, 0xcb, 0x98, 0xb6, 0xb6, 0x0c,
0x5f, 0x5a, 0xaa, 0x5e, 0x77, 0x0a, 0xef, 0x09,
0xb4, 0xb1, 0x5f, 0xdb, 0x59, 0x94, 0x4d, 0x02,
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
privKey, err := batchsubmitter.DerivePrivateKey(test.mnemonic, test.hdPath)
require.Equal(t, err, test.expErr)
if test.expErr != nil {
return
}
expPrivKey, err := crypto.ToECDSA(test.expPrivKey)
require.Nil(t, err)
require.Equal(t, privKey, expPrivKey)
})
}
}
// TestParsePrivateKeyStr asserts that ParsePrivateKey properly parses
// 64-character hexidecimal strings with optional 0x prefix into valid ECDSA
// private keys.
func TestParsePrivateKeyStr(t *testing.T) {
tests := []struct {
name string
privKeyStr string
expErr error
expPrivKey []byte
}{
{
name: "empty privkey string",
privKeyStr: "",
expErr: errors.New("invalid length, need 256 bits"),
},
{
name: "privkey string only 0x",
privKeyStr: "0x",
expErr: errors.New("invalid length, need 256 bits"),
},
{
name: "non hex privkey string",
privKeyStr: "0xaaaazaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
expErr: errors.New("invalid hex character 'z' in private key"),
},
{
name: "valid privkey string",
privKeyStr: validPrivKeyStr,
expPrivKey: validPrivKeyBytes,
},
{
name: "valid privkey string with 0x",
privKeyStr: "0x" + validPrivKeyStr,
expPrivKey: validPrivKeyBytes,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
privKey, err := batchsubmitter.ParsePrivateKeyStr(test.privKeyStr)
require.Equal(t, err, test.expErr)
if test.expErr != nil {
return
}
expPrivKey, err := crypto.ToECDSA(test.expPrivKey)
require.Nil(t, err)
require.Equal(t, privKey, expPrivKey)
})
}
}
// TestGetConfiguredPrivateKey asserts that GetConfiguredPrivateKey either:
// 1) Derives the correct private key assuming the BIP39 mnemonic and BIP32
// derivation path are both present and the private key string is ommitted.
// 2) Parses the correct private key assuming only the private key string is
// present, but the BIP39 mnemonic and BIP32 derivation path are ommitted.
func TestGetConfiguredPrivateKey(t *testing.T) {
tests := []struct {
name string
mnemonic string
hdPath string
privKeyStr string
expErr error
expPrivKey []byte
}{
{
name: "valid mnemonic+hdpath",
mnemonic: validMnemonic,
hdPath: validHDPath,
privKeyStr: "",
expPrivKey: validPrivKeyBytes,
},
{
name: "valid privkey",
mnemonic: "",
hdPath: "",
privKeyStr: validPrivKeyStr,
expPrivKey: validPrivKeyBytes,
},
{
name: "valid privkey with 0x",
mnemonic: "",
hdPath: "",
privKeyStr: "0x" + validPrivKeyStr,
expPrivKey: validPrivKeyBytes,
},
{
name: "valid menmonic+hdpath and privkey",
mnemonic: validMnemonic,
hdPath: validHDPath,
privKeyStr: validPrivKeyStr,
expErr: batchsubmitter.ErrCannotGetPrivateKey,
},
{
name: "neither menmonic+hdpath or privkey",
mnemonic: "",
hdPath: "",
privKeyStr: "",
expErr: batchsubmitter.ErrCannotGetPrivateKey,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
privKey, err := batchsubmitter.GetConfiguredPrivateKey(
test.mnemonic, test.hdPath, test.privKeyStr,
)
require.Equal(t, err, test.expErr)
if test.expErr != nil {
return
}
expPrivKey, err := crypto.ToECDSA(test.expPrivKey)
require.Nil(t, err)
require.Equal(t, privKey, expPrivKey)
})
}
}
package flags
import (
"time"
"github.com/urfave/cli"
)
const envVarPrefix = "BATCH_SUBMITTER_"
func prefixEnvVar(name string) string {
return envVarPrefix + name
}
var (
/* Required Flags */
BuildEnvFlag = cli.StringFlag{
Name: "build-env",
Usage: "Build environment for which the binary is produced, " +
"e.g. production or development",
Required: true,
EnvVar: "BUILD_ENV",
}
EthNetworkNameFlag = cli.StringFlag{
Name: "eth-network-name",
Usage: "Ethereum network name",
Required: true,
EnvVar: "ETH_NETWORK_NAME",
}
L1EthRpcFlag = cli.StringFlag{
Name: "l1-eth-rpc",
Usage: "HTTP provider URL for L1",
Required: true,
EnvVar: "L1_ETH_RPC",
}
L2EthRpcFlag = cli.StringFlag{
Name: "l2-eth-rpc",
Usage: "HTTP provider URL for L2",
Required: true,
EnvVar: "L2_ETH_RPC",
}
CTCAddressFlag = cli.StringFlag{
Name: "ctc-address",
Usage: "Address of the CTC contract",
Required: true,
EnvVar: "CTC_ADDRESS",
}
SCCAddressFlag = cli.StringFlag{
Name: "scc-address",
Usage: "Address of the SCC contract",
Required: true,
EnvVar: "SCC_ADDRESS",
}
MaxL1TxSizeFlag = cli.Uint64Flag{
Name: "max-l1-tx-size",
Usage: "Maximum size in bytes of any L1 transaction that gets " +
"generated by the batch submitter",
Required: true,
EnvVar: prefixEnvVar("MAX_L1_TX_SIZE"),
}
MaxBatchSubmissionTimeFlag = cli.DurationFlag{
Name: "max-batch-submission-time",
Usage: "Maximum amount of time that we will wait before " +
"submitting an under-sized batch",
Required: true,
EnvVar: prefixEnvVar("MAX_BATCH_SUBMISSION_TIME"),
}
PollIntervalFlag = cli.DurationFlag{
Name: "poll-interval",
Usage: "Delay between querying L2 for more transactions and " +
"creating a new batch",
Required: true,
EnvVar: prefixEnvVar("POLL_INTERVAL"),
}
NumConfirmationsFlag = cli.Uint64Flag{
Name: "num-confirmations",
Usage: "Number of confirmations which we will wait after " +
"appending a new batch",
Required: true,
EnvVar: prefixEnvVar("NUM_CONFIRMATIONS"),
}
ResubmissionTimeoutFlag = cli.DurationFlag{
Name: "resubmission-timeout",
Usage: "Duration we will wait before resubmitting a " +
"transaction to L1",
Required: true,
EnvVar: prefixEnvVar("RESUBMISSION_TIMEOUT"),
}
FinalityConfirmationsFlag = cli.Uint64Flag{
Name: "finality-confirmations",
Usage: "Number of confirmations that we should wait before " +
"submitting state roots for CTC elements",
Required: true,
EnvVar: prefixEnvVar("FINALITY_CONFIRMATIONS"),
}
RunTxBatchSubmitterFlag = cli.BoolFlag{
Name: "run-tx-batch-submitter",
Usage: "Determines whether or not to run the tx batch submitter",
Required: true,
EnvVar: prefixEnvVar("RUN_TX_BATCH_SUBMITTER"),
}
RunStateBatchSubmitterFlag = cli.BoolFlag{
Name: "run-state-batch-submitter",
Usage: "Determines whether or not to run the state batch submitter",
Required: true,
EnvVar: prefixEnvVar("RUN_STATE_BATCH_SUBMITTER"),
}
SafeMinimumEtherBalanceFlag = cli.Uint64Flag{
Name: "safe-minimum-ether-balance",
Usage: "Safe minimum amount of ether the batch submitter key " +
"should hold before it starts to log errors",
Required: true,
EnvVar: prefixEnvVar("SAFE_MINIMUM_ETHER_BALANCE"),
}
ClearPendingTxsFlag = cli.BoolFlag{
Name: "clear-pending-txs",
Usage: "Whether or not to clear pending transaction in the " +
"mempool on startup",
Required: true,
EnvVar: prefixEnvVar("CLEAR_PENDING_TXS"),
}
/* Optional Flags */
LogLevelFlag = cli.StringFlag{
Name: "log-level",
Usage: "The lowest log level that will be output",
Value: "info",
EnvVar: prefixEnvVar("LOG_LEVEL"),
}
SentryEnableFlag = cli.BoolFlag{
Name: "sentry-enable",
Usage: "Whether or not to enable Sentry. If true, sentry-dsn must also be set",
EnvVar: prefixEnvVar("SENTRY_ENABLE"),
}
SentryDsnFlag = cli.StringFlag{
Name: "sentry-dsn",
Usage: "Sentry data source name",
EnvVar: prefixEnvVar("SENTRY_DSN"),
}
SentryTraceRateFlag = cli.DurationFlag{
Name: "sentry-trace-rate",
Usage: "Sentry trace rate",
Value: 50 * time.Millisecond,
EnvVar: prefixEnvVar("SENTRY_TRACE_RATE"),
}
BlockOffsetFlag = cli.Uint64Flag{
Name: "block-offset",
Usage: "The offset between the CTC contract start and the L2 geth blocks",
Value: 1,
EnvVar: prefixEnvVar("BLOCK_OFFSET"),
}
MaxGasPriceInGweiFlag = cli.Uint64Flag{
Name: "max-gas-price-in-gwei",
Usage: "Maximum gas price the batch submitter can use for transactions",
Value: 100,
EnvVar: prefixEnvVar("MAX_GAS_PRICE_IN_GWEI"),
}
GasRetryIncrementFlag = cli.Uint64Flag{
Name: "gas-retry-increment",
Usage: "Default step by which to increment gas price bumps",
Value: 5,
EnvVar: prefixEnvVar("GAS_RETRY_INCREMENT_FLAG"),
}
SequencerPrivateKeyFlag = cli.StringFlag{
Name: "sequencer-private-key",
Usage: "The private key to use for sending to the sequencer contract",
EnvVar: prefixEnvVar("SEQUENCER_PRIVATE_KEY"),
}
ProposerPrivateKeyFlag = cli.StringFlag{
Name: "proposer-private-key",
Usage: "The private key to use for sending to the proposer contract",
EnvVar: prefixEnvVar("PROPOSER_PRIVATE_KEY"),
}
MnemonicFlag = cli.StringFlag{
Name: "mnemonic",
Usage: "The mnemonic used to derive the wallets for either the " +
"sequencer or the proposer",
EnvVar: prefixEnvVar("MNEMONIC"),
}
SequencerHDPathFlag = cli.StringFlag{
Name: "sequencer-hd-path",
Usage: "The HD path used to derive the sequencer wallet from the " +
"mnemonic. The mnemonic flag must also be set.",
EnvVar: prefixEnvVar("SEQUENCER_HD_PATH"),
}
ProposerHDPathFlag = cli.StringFlag{
Name: "proposer-hd-path",
Usage: "The HD path used to derive the proposer wallet from the " +
"mnemonic. The mnemonic flag must also be set.",
EnvVar: prefixEnvVar("PROPOSER_HD_PATH"),
}
MetricsServerEnableFlag = cli.BoolFlag{
Name: "metrics-server-enable",
Usage: "Whether or not to run the embedded metrics server",
EnvVar: prefixEnvVar("METRICS_SERVER_ENABLE"),
}
MetricsHostnameFlag = cli.StringFlag{
Name: "metrics-hostname",
Usage: "The hostname of the metrics server",
Value: "127.0.0.1",
EnvVar: prefixEnvVar("METRICS_HOSTNAME"),
}
MetricsPortFlag = cli.Uint64Flag{
Name: "metrics-port",
Usage: "The port of the metrics server",
Value: 7300,
EnvVar: prefixEnvVar("METRICS_PORT"),
}
)
var requiredFlags = []cli.Flag{
BuildEnvFlag,
EthNetworkNameFlag,
L1EthRpcFlag,
L2EthRpcFlag,
CTCAddressFlag,
SCCAddressFlag,
MaxL1TxSizeFlag,
MaxBatchSubmissionTimeFlag,
PollIntervalFlag,
NumConfirmationsFlag,
ResubmissionTimeoutFlag,
FinalityConfirmationsFlag,
RunTxBatchSubmitterFlag,
RunStateBatchSubmitterFlag,
SafeMinimumEtherBalanceFlag,
ClearPendingTxsFlag,
}
var optionalFlags = []cli.Flag{
LogLevelFlag,
SentryEnableFlag,
SentryDsnFlag,
SentryTraceRateFlag,
BlockOffsetFlag,
MaxGasPriceInGweiFlag,
GasRetryIncrementFlag,
SequencerPrivateKeyFlag,
ProposerPrivateKeyFlag,
MnemonicFlag,
SequencerHDPathFlag,
ProposerHDPathFlag,
MetricsServerEnableFlag,
MetricsHostnameFlag,
MetricsPortFlag,
}
// Flags contains the list of configuration options available to the binary.
var Flags = append(requiredFlags, optionalFlags...)
package flags
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/urfave/cli"
)
// TestRequiredFlagsSetRequired asserts that all flags deemed required properly
// have the Required field set to true.
func TestRequiredFlagsSetRequired(t *testing.T) {
for _, flag := range requiredFlags {
reqFlag, ok := flag.(cli.RequiredFlag)
require.True(t, ok)
require.True(t, reqFlag.IsRequired())
}
}
// TestOptionalFlagsDontSetRequired asserts that all flags deemed optional set
// the Required field to false.
func TestOptionalFlagsDontSetRequired(t *testing.T) {
for _, flag := range optionalFlags {
reqFlag, ok := flag.(cli.RequiredFlag)
require.True(t, ok)
require.False(t, reqFlag.IsRequired())
}
}
module github.com/ethereum-optimism/go/batch-submitter
go 1.16
require (
github.com/decred/dcrd/hdkeychain/v3 v3.0.0
github.com/ethereum/go-ethereum v1.10.11
github.com/getsentry/sentry-go v0.11.0
github.com/prometheus/client_golang v1.0.0
github.com/stretchr/testify v1.7.0
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef
github.com/urfave/cli v1.22.5
)
This diff is collapsed.
{
"name": "@eth-optimism/batch-submitter-service",
"version": "0.0.1",
"private": true,
"devDependencies": {}
}
package batchsubmitter
import (
"errors"
"io"
"github.com/ethereum/go-ethereum/log"
"github.com/getsentry/sentry-go"
)
var jsonFmt = log.JSONFormat()
// SentryStreamHandler creates a log.Handler that behaves similarly to
// log.StreamHandler, however it writes any log with severity greater than or
// equal to log.LvlError to Sentry. In that case, the passed log.Record is
// encoded using JSON rather than the default terminal output, so that it can be
// captured for debugging in the Sentry dashboard.
func SentryStreamHandler(wr io.Writer, fmtr log.Format) log.Handler {
h := log.FuncHandler(func(r *log.Record) error {
_, err := wr.Write(fmtr.Format(r))
// If this record's severity is log.LvlError or higher,
// serialize the record using JSON and write it to Sentry. We
// also capture the error message separately so that it's easy
// to parse what the error is in the dashboard.
//
// NOTE: The log.Lvl* constants are defined in reverse order of
// their severity, i.e. zero (log.LvlCrit) is the highest
// severity.
if r.Lvl <= log.LvlError {
sentry.WithScope(func(scope *sentry.Scope) {
scope.SetExtra("context", jsonFmt.Format(r))
sentry.CaptureException(errors.New(r.Msg))
})
}
return err
})
return log.LazyHandler(log.SyncHandler(h))
}
package txmgr
import (
"context"
"errors"
"math/big"
"sync"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
)
// ErrPublishTimeout signals that the tx manager did not receive a confirmation
// for a given tx after publishing with the maximum gas price and waiting out a
// resubmission timeout.
var ErrPublishTimeout = errors.New("failed to publish tx with max gas price")
// SendTxFunc defines a function signature for publishing a desired tx with a
// specific gas price. Implementations of this signature should also return
// promptly when the context is canceled.
type SendTxFunc = func(
ctx context.Context, gasPrice *big.Int) (*types.Transaction, error)
// Config houses parameters for altering the behavior of a SimpleTxManager.
type Config struct {
// MinGasPrice is the minimum gas price (in gwei). This is used as the
// initial publication attempt.
MinGasPrice *big.Int
// MaxGasPrice is the maximum gas price (in gwei). This is used to clamp
// the upper end of the range that the TxManager will ever publish when
// attempting to confirm a transaction.
MaxGasPrice *big.Int
// GasRetryIncrement is the additive gas price (in gwei) that will be
// used to bump each successive tx after a ResubmissionTimeout has
// elapsed.
GasRetryIncrement *big.Int
// ResubmissionTimeout is the interval at which, if no previously
// published transaction has been mined, the new tx with a bumped gas
// price will be published. Only one publication at MaxGasPrice will be
// attempted.
ResubmissionTimeout time.Duration
// RequireQueryInterval is the interval at which the tx manager will
// query the backend to check for confirmations after a tx at a
// specific gas price has been published.
ReceiptQueryInterval time.Duration
}
// TxManager is an interface that allows callers to reliably publish txs,
// bumping the gas price if needed, and obtain the receipt of the resulting tx.
type TxManager interface {
// Send is used to publish a transaction with incrementally higher gas
// prices until the transaction eventually confirms. This method blocks
// until an invocation of sendTx returns (called with differing gas
// prices). The method may be canceled using the passed context.
//
// NOTE: Send should be called by AT MOST one caller at a time.
Send(ctx context.Context, sendTx SendTxFunc) (*types.Receipt, error)
}
// ReceiptSource is a minimal function signature used to detect the confirmation
// of published txs.
//
// NOTE: This is a subset of bind.DeployBackend.
type ReceiptSource interface {
// TransactionReceipt queries the backend for a receipt associated with
// txHash. If lookup does not fail, but the transaction is not found,
// nil should be returned for both values.
TransactionReceipt(
ctx context.Context, txHash common.Hash) (*types.Receipt, error)
}
// SimpleTxManager is a implementation of TxManager that performs linear fee
// bumping of a tx until it confirms.
type SimpleTxManager struct {
cfg Config
backend ReceiptSource
}
// NewSimpleTxManager initializes a new SimpleTxManager with the passed Config.
func NewSimpleTxManager(cfg Config, backend ReceiptSource) *SimpleTxManager {
return &SimpleTxManager{
cfg: cfg,
backend: backend,
}
}
// Send is used to publish a transaction with incrementally higher gas prices
// until the transaction eventually confirms. This method blocks until an
// invocation of sendTx returns (called with differing gas prices). The method
// may be canceled using the passed context.
//
// NOTE: Send should be called by AT MOST one caller at a time.
func (m *SimpleTxManager) Send(
ctx context.Context, sendTx SendTxFunc) (*types.Receipt, error) {
// Initialize a wait group to track any spawned goroutines, and ensure
// we properly clean up any dangling resources this method generates.
// We assert that this is the case thoroughly in our unit tests.
var wg sync.WaitGroup
defer wg.Wait()
// Initialize a subcontext for the goroutines spawned in this process.
// The defer to cancel is done here (in reverse order of Wait) so that
// the goroutines can exit before blocking on the wait group.
ctxc, cancel := context.WithCancel(ctx)
defer cancel()
// Create a closure that will block on passed sendTx function in the
// background, returning the first successfully mined receipt back to
// the main event loop via receiptChan.
receiptChan := make(chan *types.Receipt, 1)
sendTxAsync := func(gasPrice *big.Int) {
defer wg.Done()
// Sign and publish transaction with current gas price.
tx, err := sendTx(ctxc, gasPrice)
if err != nil {
log.Error("Unable to publish transaction",
"gas_price", gasPrice, "err", err)
// TODO(conner): add retry?
return
}
txHash := tx.Hash()
log.Info("Transaction published successfully", "hash", txHash,
"gas_price", gasPrice)
// Wait for the transaction to be mined, reporting the receipt
// back to the main event loop if found.
receipt, err := WaitMined(
ctxc, m.backend, tx, m.cfg.ReceiptQueryInterval,
)
if err != nil {
log.Trace("Send tx failed", "hash", txHash,
"gas_price", gasPrice, "err", err)
}
if receipt != nil {
// Use non-blocking select to ensure function can exit
// if more than one receipt is discovered.
select {
case receiptChan <- receipt:
log.Trace("Send tx succeeded", "hash", txHash,
"gas_price", gasPrice)
default:
}
}
}
// Initialize our initial gas price to the configured minimum.
curGasPrice := new(big.Int).Set(m.cfg.MinGasPrice)
// Submit and wait for the receipt at our first gas price in the
// background, before entering the event loop and waiting out the
// resubmission timeout.
wg.Add(1)
go sendTxAsync(curGasPrice)
for {
select {
// Whenever a resubmission timeout has elapsed, bump the gas
// price and publish a new transaction.
case <-time.After(m.cfg.ResubmissionTimeout):
// If our last attempt published at the max gas price,
// return an error as we are unlikely to succeed in
// publishing. This also indicates that the max gas
// price should likely be adjusted higher for the
// daemon.
if curGasPrice.Cmp(m.cfg.MaxGasPrice) >= 0 {
return nil, ErrPublishTimeout
}
// Bump the gas price using linear gas price increments.
curGasPrice = NextGasPrice(
curGasPrice, m.cfg.GasRetryIncrement,
m.cfg.MaxGasPrice,
)
// Submit and wait for the bumped traction to confirm.
wg.Add(1)
go sendTxAsync(curGasPrice)
// The passed context has been canceled, i.e. in the event of a
// shutdown.
case <-ctxc.Done():
return nil, ctxc.Err()
// The transaction has confirmed.
case receipt := <-receiptChan:
return receipt, nil
}
}
}
// WaitMined blocks until the backend indicates confirmation of tx and returns
// the tx receipt. Queries are made every queryInterval, regardless of whether
// the backend returns an error. This method can be canceled using the passed
// context.
func WaitMined(
ctx context.Context,
backend ReceiptSource,
tx *types.Transaction,
queryInterval time.Duration,
) (*types.Receipt, error) {
queryTicker := time.NewTicker(queryInterval)
defer queryTicker.Stop()
txHash := tx.Hash()
for {
receipt, err := backend.TransactionReceipt(ctx, txHash)
if receipt != nil {
return receipt, nil
}
if err != nil {
log.Trace("Receipt retrievel failed", "hash", txHash,
"err", err)
} else {
log.Trace("Transaction not yet mined", "hash", txHash)
}
select {
case <-ctx.Done():
return nil, ctx.Err()
case <-queryTicker.C:
}
}
}
// NextGasPrice bumps the current gas price using an additive gasRetryIncrement,
// clamping the resulting value to maxGasPrice.
//
// NOTE: This method does not mutate curGasPrice, but instead returns a copy.
// This removes the possiblity of races occuring from goroutines sharing access
// to the same underlying big.Int.
func NextGasPrice(curGasPrice, gasRetryIncrement, maxGasPrice *big.Int) *big.Int {
nextGasPrice := new(big.Int).Set(curGasPrice)
nextGasPrice.Add(nextGasPrice, gasRetryIncrement)
if nextGasPrice.Cmp(maxGasPrice) == 1 {
nextGasPrice.Set(maxGasPrice)
}
return nextGasPrice
}
This diff is collapsed.
FROM golang:1.17.2-alpine3.14 AS builder
ARG GITCOMMIT=docker
ARG GITDATE=docker
ARG GITVERSION=docker
RUN apk add make jq && \
mkdir -p /app
WORKDIR /app
COPY go.mod /app
COPY go.sum /app
COPY cmd /app/cmd
COPY *.go /app/
COPY package.json /app
COPY Makefile /app
RUN make proxyd GITCOMMIT=$GITCOMMIT GITDATE=$GITDATE
FROM alpine:3.14.2
EXPOSE 8080
VOLUME /etc/proxyd.toml
COPY --from=builder /app/bin/proxyd /bin/proxyd
CMD ["/bin/proxyd", "/etc/proxyd.toml"]
GITCOMMIT := $(shell git rev-parse HEAD)
GITDATE := $(shell git show -s --format='%ct')
GITVERSION := $(shell cat package.json | jq .version)
LDFLAGSSTRING +=-X main.GitCommit=$(GITCOMMIT)
LDFLAGSSTRING +=-X main.GitDate=$(GITDATE)
LDFLAGSSTRING +=-X main.GitVersion=$(GITVERSION)
LDFLAGS := -ldflags "$(LDFLAGSSTRING)"
proxyd:
go build -v $(LDFLAGS) -o ./bin/proxyd ./cmd/proxyd
.PHONY: proxyd
fmt:
go mod tidy
gofmt -w .
.PHONY: fmt
# rpc-proxy
This tool implements `proxyd`, an RPC request router and proxy. It does the following things:
1. Whitelists RPC methods.
2. Routes RPC methods to groups of backend services.
3. Automatically retries failed backend requests.
4. Provides metrics the measure request latency, error rates, and the like.
## Usage
Run `make proxyd` to build the binary. No additional dependencies are necessary.
To configure `proxyd` for use, you'll need to create a configuration file to define your proxy backends and routing rules. An example config that routes `eth_chainId` between Infura and Alchemy is below:
```toml
[backends]
[backends.infura]
base_url = "url-here"
[backends.alchemy]
base_url = "url-here"
[backend_groups]
[backend_groups.main]
backends = ["infura", "alchemy"]
[method_mappings]
eth_chainId = "main"
```
Check out [example.config.toml](./example.config.toml) for a full list of all options with commentary.
Once you have a config file, start the daemon via `proxyd <path-to-config>.toml`.
## Metrics
The following Prometheus metrics are exported:
| Name | Description | Flags |
|------------------------------------------------|-------------------------------------------------------------------------------------------------|----------------------------------------|
| `proxyd_backend_requests_total` | Count of all successful requests to a backend. | backend_name: The name of the backend. |
| `proxyd_backend_errors_total` | Count of all backend errors. | backend_name: The name of the backend |
| `proxyd_http_requests_total` | Count of all HTTP requests, successful or not. | |
| `proxyd_http_request_duration_histogram_seconds` | Histogram of HTTP request durations. | |
| `proxyd_rpc_requests_total` | Count of all RPC requests. | method_name: The RPC method requested. |
| `proxyd_blocked_rpc_requests_total` | Count of all RPC requests with a blacklisted method. | method_name: The RPC method requested. |
| `proxyd_rpc_errors_total` | Count of all RPC errors. **NOTE:** Does not include errors sent from the backend to the client. |
The metrics port is configurable via the `metrics.port` and `metrics.host` keys in the config.
## Errata
- RPC errors originating from the backend (e.g., any backend response containing an `error` key) are passed on to the client directly. This simplifies the code and avoids having to marshal/unmarshal the backend's response JSON.
- Requests are distributed round-robin between backends in a group.
\ No newline at end of file
package proxyd
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"github.com/ethereum/go-ethereum/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"io"
"io/ioutil"
"math"
"math/rand"
"net/http"
"sync/atomic"
"time"
)
const (
JSONRPCVersion = "2.0"
)
var (
ErrNoBackend = errors.New("no backend available for method")
ErrBackendsInconsistent = errors.New("backends inconsistent, try again")
ErrBackendOffline = errors.New("backend offline")
backendRequestsCtr = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: "proxyd",
Name: "backend_requests_total",
Help: "Count of backend requests.",
}, []string{
"name",
})
backendErrorsCtr = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: "proxyd",
Name: "backend_errors_total",
Help: "Count of backend errors.",
}, []string{
"name",
})
backendPermanentErrorsCtr = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: "proxyd",
Name: "backend_permanent_errors_total",
Help: "Count of backend errors that mark a backend as offline.",
}, []string{
"name",
})
)
type Backend struct {
Name string
authUsername string
authPassword string
baseURL string
client *http.Client
maxRetries int
maxResponseSize int64
lastPermError int64
unhealthyRetryInterval int64
}
type BackendOpt func(b *Backend)
func WithBasicAuth(username, password string) BackendOpt {
return func(b *Backend) {
b.authUsername = username
b.authPassword = password
}
}
func WithTimeout(timeout time.Duration) BackendOpt {
return func(b *Backend) {
b.client.Timeout = timeout
}
}
func WithMaxRetries(retries int) BackendOpt {
return func(b *Backend) {
b.maxRetries = retries
}
}
func WithMaxResponseSize(size int64) BackendOpt {
return func(b *Backend) {
b.maxResponseSize = size
}
}
func WithUnhealthyRetryInterval(interval int64) BackendOpt {
return func(b *Backend) {
b.unhealthyRetryInterval = interval
}
}
func NewBackend(name, baseURL string, opts ...BackendOpt) *Backend {
backend := &Backend{
Name: name,
baseURL: baseURL,
maxResponseSize: math.MaxInt64,
client: &http.Client{
Timeout: 5 * time.Second,
},
}
for _, opt := range opts {
opt(backend)
}
return backend
}
func (b *Backend) Forward(body []byte) (*RPCRes, error) {
if time.Now().Unix()-atomic.LoadInt64(&b.lastPermError) < b.unhealthyRetryInterval {
return nil, ErrBackendOffline
}
var lastError error
// <= to account for the first attempt not technically being
// a retry
for i := 0; i <= b.maxRetries; i++ {
resB, err := b.doForward(body)
if err != nil {
lastError = err
log.Warn("backend request failed, trying again", "err", err, "name", b.Name)
time.Sleep(calcBackoff(i))
continue
}
res := new(RPCRes)
// don't mark the backend down if they give us a bad response body
if err := json.Unmarshal(resB, res); err != nil {
return nil, wrapErr(err, "error unmarshaling JSON")
}
return res, nil
}
atomic.StoreInt64(&b.lastPermError, time.Now().Unix())
backendPermanentErrorsCtr.WithLabelValues(b.Name).Inc()
return nil, wrapErr(lastError, "permanent error forwarding request")
}
func (b *Backend) doForward(body []byte) ([]byte, error) {
req, err := http.NewRequest("POST", b.baseURL, bytes.NewReader(body))
if err != nil {
backendErrorsCtr.WithLabelValues(b.Name).Inc()
return nil, wrapErr(err, "error creating backend request")
}
if b.authPassword != "" {
req.SetBasicAuth(b.authUsername, b.authPassword)
}
res, err := b.client.Do(req)
if err != nil {
backendErrorsCtr.WithLabelValues(b.Name).Inc()
return nil, wrapErr(err, "error in backend request")
}
if res.StatusCode != 200 {
backendErrorsCtr.WithLabelValues(b.Name).Inc()
return nil, fmt.Errorf("response code %d", res.StatusCode)
}
defer res.Body.Close()
resB, err := ioutil.ReadAll(io.LimitReader(res.Body, b.maxResponseSize))
if err != nil {
backendErrorsCtr.WithLabelValues(b.Name).Inc()
return nil, wrapErr(err, "error reading response body")
}
backendRequestsCtr.WithLabelValues(b.Name).Inc()
return resB, nil
}
type BackendGroup struct {
Name string
backends []*Backend
i int64
}
func (b *BackendGroup) Forward(body []byte) (*RPCRes, error) {
var outRes *RPCRes
for _, back := range b.backends {
res, err := back.Forward(body)
if err == ErrBackendOffline {
log.Debug("skipping offline backend", "name", back.Name)
continue
}
if err != nil {
log.Error("error forwarding request to backend", "err", err, "name", b.Name)
continue
}
outRes = res
break
}
if outRes == nil {
return nil, errors.New("no backends available")
}
return outRes, nil
}
type MethodMapping struct {
methods map[string]*BackendGroup
}
func NewMethodMapping(methods map[string]*BackendGroup) *MethodMapping {
return &MethodMapping{methods: methods}
}
func (m *MethodMapping) BackendGroupFor(method string) (*BackendGroup, error) {
group := m.methods[method]
if group == nil {
return nil, ErrNoBackend
}
return group, nil
}
func calcBackoff(i int) time.Duration {
jitter := float64(rand.Int63n(250))
ms := math.Min(math.Pow(2, float64(i))*1000+jitter, 10000)
return time.Duration(ms) * time.Millisecond
}
package main
import (
"github.com/BurntSushi/toml"
"github.com/ethereum-optimism/optimism/go/proxyd"
"github.com/ethereum/go-ethereum/log"
"os"
)
var (
GitVersion = ""
GitCommit = ""
GitDate = ""
)
func main() {
// Set up logger with a default INFO level in case we fail to parse flags.
// Otherwise the final critical log won't show what the parsing error was.
log.Root().SetHandler(
log.LvlFilterHandler(
log.LvlInfo,
log.StreamHandler(os.Stdout, log.TerminalFormat(true)),
),
)
log.Info("starting proxyd", "version", GitVersion, "commit", GitCommit, "date", GitDate)
if len(os.Args) < 2 {
log.Crit("must specify a config file on the command line")
}
config := new(proxyd.Config)
if _, err := toml.DecodeFile(os.Args[1], config); err != nil {
log.Crit("error reading config file", "err", err)
}
if err := proxyd.Start(config); err != nil {
log.Crit("error starting proxyd", "err", err)
}
}
package proxyd
type ServerConfig struct {
Host string `toml:"host"`
Port int `toml:"port"`
MaxBodySizeBytes int64 `toml:"max_body_size_bytes"`
}
type MetricsConfig struct {
Enabled bool `toml:"enabled"`
Host string `toml:"host"`
Port int `toml:"port"`
}
type BackendOptions struct {
ResponseTimeoutSeconds int `toml:"response_timeout_seconds"`
MaxResponseSizeBytes int64 `toml:"max_response_size_bytes"`
MaxRetries int `toml:"backend_retries"`
UnhealthyBackendRetryIntervalSeconds int64 `toml:"unhealthy_backend_retry_interval_seconds"`
}
type BackendConfig struct {
Username string `toml:"username"`
Password string `toml:"password"`
BaseURL string `toml:"base_url"`
}
type BackendsConfig map[string]*BackendConfig
type BackendGroupConfig struct {
Backends []string
}
type BackendGroupsConfig map[string]*BackendGroupConfig
type MethodMappingsConfig map[string]string
type Config struct {
Server *ServerConfig `toml:"server"`
Metrics *MetricsConfig `toml:"metrics"`
BackendOptions *BackendOptions `toml:"backend"`
Backends BackendsConfig `toml:"backends"`
BackendGroups BackendGroupsConfig `toml:"backend_groups"`
MethodMappings MethodMappingsConfig `toml:"method_mappings"`
}
package proxyd
import "fmt"
func wrapErr(err error, msg string) error {
return fmt.Errorf("%s %v", msg, err)
}
[server]
# Host for the proxyd server to listen on.
host = "0.0.0.0"
# Port for the above.
port = 8080
# Maximum client body size, in bytes, that the server will accept.
max_body_size_bytes = 10485760
[metrics]
# Whether or not to enable Prometheus metrics.
enabled = true
# Host for the Prometheus metrics endpoint to listen on.
host = "0.0.0.0"
# Port for the above.
port = 9761
[backend]
# How long proxyd should wait for a backend response before timing out.
response_timeout_seconds = 5
# Maximum response size, in bytes, that proxyd will accept from a backend.
max_response_size_bytes = 5242880
# Maximum number of times proxyd will try a backend before giving up.
max_retries = 0
# Number of seconds to wait before trying an unhealthy backend again.
unhealthy_backend_retry_interval_seconds = 600
[backends]
# A map of backends by name.
[backends.infura]
# The URL to contact the backend at.
base_url = "url-here"
# HTTP basic auth username to use with the backend.
username = ""
# HTTP basic auth password to use with the backend.
password = ""
[backend_groups]
# A map of backend groups by name.
[backend_groups.main]
# A list of backend names to place in the group.
backends = ["infura", "alchemy"]
[method_mappings]
# A mapping between RPC methods and the backend groups that should serve them.
eth_call = "main"
eth_chainId = "main"
# other mappings go here
module github.com/ethereum-optimism/optimism/go/proxyd
go 1.16
require (
github.com/BurntSushi/toml v0.4.1
github.com/ethereum/go-ethereum v1.10.11
github.com/gorilla/mux v1.8.0
github.com/prometheus/client_golang v1.11.0
)
This diff is collapsed.
{
"name": "@eth-optimism/proxyd",
"version": "0.0.1",
"private": true,
"dependencies": {}
}
package proxyd
import (
"errors"
"fmt"
"github.com/ethereum/go-ethereum/log"
"github.com/prometheus/client_golang/prometheus/promhttp"
"net/http"
"os"
"os/signal"
"syscall"
"time"
)
func Start(config *Config) error {
backendsByName := make(map[string]*Backend)
groupsByName := make(map[string]*BackendGroup)
if len(config.Backends) == 0 {
return errors.New("must define at least one backend")
}
if len(config.BackendGroups) == 0 {
return errors.New("must define at least one backend group")
}
if len(config.MethodMappings) == 0 {
return errors.New("must define at least one method mapping")
}
for name, cfg := range config.Backends {
opts := make([]BackendOpt, 0)
if cfg.BaseURL == "" {
return fmt.Errorf("must define a base URL for backend %s", name)
}
if config.BackendOptions.ResponseTimeoutSeconds != 0 {
timeout := time.Duration(config.BackendOptions.ResponseTimeoutSeconds) * time.Second
opts = append(opts, WithTimeout(timeout))
}
if config.BackendOptions.MaxRetries != 0 {
opts = append(opts, WithMaxRetries(config.BackendOptions.MaxRetries))
}
if config.BackendOptions.MaxResponseSizeBytes != 0 {
opts = append(opts, WithMaxResponseSize(config.BackendOptions.MaxResponseSizeBytes))
}
if config.BackendOptions.UnhealthyBackendRetryIntervalSeconds != 0 {
opts = append(opts, WithUnhealthyRetryInterval(config.BackendOptions.UnhealthyBackendRetryIntervalSeconds))
}
if cfg.Password != "" {
opts = append(opts, WithBasicAuth(cfg.Username, cfg.Password))
}
backendsByName[name] = NewBackend(name, cfg.BaseURL, opts...)
log.Info("configured backend", "name", name, "base_url", cfg.BaseURL)
}
for groupName, cfg := range config.BackendGroups {
backs := make([]*Backend, 0)
for _, backName := range cfg.Backends {
if backendsByName[backName] == nil {
return fmt.Errorf("undefined backend %s", backName)
}
backs = append(backs, backendsByName[backName])
log.Info("configured backend group", "name", groupName)
}
groupsByName[groupName] = &BackendGroup{
Name: groupName,
backends: backs,
}
}
mappings := make(map[string]*BackendGroup)
for method, groupName := range config.MethodMappings {
if groupsByName[groupName] == nil {
return fmt.Errorf("undefined backend group %s", groupName)
}
mappings[method] = groupsByName[groupName]
}
methodMappings := NewMethodMapping(mappings)
srv := NewServer(methodMappings, config.Server.MaxBodySizeBytes)
if config.Metrics.Enabled {
addr := fmt.Sprintf("%s:%d", config.Metrics.Host, config.Metrics.Port)
log.Info("starting metrics server", "addr", addr)
go http.ListenAndServe(addr, promhttp.Handler())
}
go func() {
if err := srv.ListenAndServe(config.Server.Host, config.Server.Port); err != nil {
log.Crit("error starting server", "err", err)
}
}()
sig := make(chan os.Signal, 1)
signal.Notify(sig, syscall.SIGINT, syscall.SIGTERM)
recvSig := <-sig
log.Info("caught signal, shutting down", "signal", recvSig)
return nil
}
package proxyd
import (
"encoding/json"
"errors"
"fmt"
"github.com/ethereum/go-ethereum/log"
"github.com/gorilla/mux"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"io"
"io/ioutil"
"net/http"
"time"
)
var (
httpRequestsCtr = promauto.NewCounter(prometheus.CounterOpts{
Namespace: "proxyd",
Name: "http_requests_total",
Help: "Count of total HTTP requests.",
})
httpRequestDurationHisto = promauto.NewHistogram(prometheus.HistogramOpts{
Namespace: "proxyd",
Name: "http_request_duration_histogram_seconds",
Help: "Histogram of HTTP request durations.",
Buckets: []float64{
0,
0.1,
0.25,
0.75,
1,
},
})
rpcRequestsCtr = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: "proxyd",
Name: "rpc_requests_total",
Help: "Count of RPC requests.",
}, []string{
"method_name",
})
blockedRPCsCtr = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: "proxyd",
Name: "blocked_rpc_requests_total",
Help: "Count of blocked RPC requests.",
}, []string{
"method_name",
})
rpcErrorsCtr = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: "proxyd",
Name: "rpc_errors_total",
Help: "Count of RPC errors.",
}, []string{
"error_code",
})
)
type RPCReq struct {
JSONRPC string `json:"jsonrpc"`
Method string `json:"method"`
Params json.RawMessage `json:"params"`
ID *int `json:"id"`
}
type RPCRes struct {
JSONRPC string `json:"jsonrpc"`
Result interface{} `json:"result,omitempty"`
Error *RPCErr `json:"error,omitempty"`
ID *int `json:"id"`
}
type RPCErr struct {
Code int `json:"code"`
Message string `json:"message"`
}
type Server struct {
mappings *MethodMapping
maxBodySize int64
}
func NewServer(mappings *MethodMapping, maxBodySize int64) *Server {
return &Server{
mappings: mappings,
maxBodySize: maxBodySize,
}
}
func (s *Server) ListenAndServe(host string, port int) error {
hdlr := mux.NewRouter()
hdlr.HandleFunc("/healthz", s.HandleHealthz).Methods("GET")
hdlr.HandleFunc("/", s.HandleRPC).Methods("POST")
addr := fmt.Sprintf("%s:%d", host, port)
server := &http.Server{
Handler: instrumentedHdlr(hdlr),
Addr: addr,
}
log.Info("starting HTTP server", "addr", addr)
return server.ListenAndServe()
}
func (s *Server) HandleHealthz(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("OK"))
}
func (s *Server) HandleRPC(w http.ResponseWriter, r *http.Request) {
body, err := ioutil.ReadAll(io.LimitReader(r.Body, s.maxBodySize))
if err != nil {
log.Error("error reading request body", "err", err)
rpcErrorsCtr.WithLabelValues("-32700").Inc()
writeRPCError(w, nil, -32700, "could not read request body")
return
}
req := new(RPCReq)
if err := json.Unmarshal(body, req); err != nil {
rpcErrorsCtr.WithLabelValues("-32700").Inc()
writeRPCError(w, nil, -32700, "invalid JSON")
return
}
if req.JSONRPC != JSONRPCVersion {
rpcErrorsCtr.WithLabelValues("-32600").Inc()
writeRPCError(w, nil, -32600, "invalid json-rpc version")
return
}
group, err := s.mappings.BackendGroupFor(req.Method)
if err != nil {
rpcErrorsCtr.WithLabelValues("-32601").Inc()
blockedRPCsCtr.WithLabelValues(req.Method).Inc()
log.Info("blocked request for non-whitelisted method", "method", req.Method)
writeRPCError(w, req.ID, -32601, "method not found")
return
}
backendRes, err := group.Forward(body)
if err != nil {
log.Error("error forwarding RPC request", "group", group.Name, "method", req.Method, "err", err)
rpcErrorsCtr.WithLabelValues("-32603").Inc()
msg := "error fetching data from upstream"
if errors.Is(err, ErrBackendsInconsistent) {
msg = ErrBackendsInconsistent.Error()
}
writeRPCError(w, req.ID, -32603, msg)
return
}
enc := json.NewEncoder(w)
if err := enc.Encode(backendRes); err != nil {
log.Error("error encoding response", "err", err)
return
}
rpcRequestsCtr.WithLabelValues(req.Method).Inc()
log.Debug("forwarded RPC method", "method", req.Method, "group", group.Name)
}
func writeRPCError(w http.ResponseWriter, id *int, code int, msg string) {
enc := json.NewEncoder(w)
w.WriteHeader(200)
body := &RPCRes{
ID: id,
Error: &RPCErr{
Code: code,
Message: msg,
},
}
if err := enc.Encode(body); err != nil {
log.Error("error writing RPC error", "err", err)
}
}
func instrumentedHdlr(h http.Handler) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
httpRequestsCtr.Inc()
start := time.Now()
h.ServeHTTP(w, r)
dur := time.Since(start)
httpRequestDurationHisto.Observe(float64(dur) / float64(time.Second))
}
}
......@@ -22,17 +22,17 @@
"@eth-optimism/core-utils": "^0.6.0",
"@eth-optimism/hardhat-ovm": "^0.2.3",
"@eth-optimism/message-relayer": "^0.1.13",
"@ethersproject/providers": "^5.4.4",
"@ethersproject/providers": "^5.4.5",
"@nomiclabs/hardhat-ethers": "^2.0.2",
"@nomiclabs/hardhat-waffle": "^2.0.1",
"@types/chai": "^4.2.17",
"@types/chai-as-promised": "^7.1.3",
"@types/chai": "^4.2.18",
"@types/chai-as-promised": "^7.1.4",
"@types/mocha": "^8.2.2",
"@types/rimraf": "^3.0.0",
"@types/shelljs": "^0.8.8",
"@typescript-eslint/eslint-plugin": "^4.26.0",
"@typescript-eslint/parser": "^4.26.0",
"chai": "^4.3.3",
"chai": "^4.3.4",
"chai-as-promised": "^7.1.1",
"docker-compose": "^0.23.8",
"dotenv": "^10.0.0",
......@@ -49,11 +49,11 @@
"eslint-plugin-unicorn": "^32.0.1",
"ethereum-waffle": "^3.3.0",
"ethers": "^5.4.5",
"hardhat": "^2.2.1",
"hardhat": "^2.3.0",
"hardhat-gas-reporter": "^1.0.4",
"mocha": "^8.3.1",
"mocha": "^8.4.0",
"rimraf": "^3.0.2",
"shelljs": "^0.8.4",
"typescript": "^4.2.3"
"typescript": "^4.3.5"
}
}
FROM ethereumoptimism/builder AS builder
ARG LOCAL_REGISTRY=docker.io
FROM ${LOCAL_REGISTRY}/ethereumoptimism/builder AS builder
FROM node:14-alpine
......
FROM ethereumoptimism/builder AS builder
ARG LOCAL_REGISTRY=docker.io
FROM ${LOCAL_REGISTRY}/ethereumoptimism/builder AS builder
FROM node:14-alpine
......
FROM ethereumoptimism/builder AS builder
ARG LOCAL_REGISTRY=docker.io
FROM ${LOCAL_REGISTRY}/ethereumoptimism/builder AS builder
FROM node:14-alpine
......
FROM ethereumoptimism/builder AS builder
ARG LOCAL_REGISTRY=docker.io
FROM ${LOCAL_REGISTRY}/ethereumoptimism/builder AS builder
FROM node:14-alpine
......
FROM ethereumoptimism/builder AS builder
ARG LOCAL_REGISTRY=docker.io
FROM ${LOCAL_REGISTRY}/ethereumoptimism/builder AS builder
FROM node:14-alpine
......
......@@ -69,7 +69,9 @@ http {
}
location / {
set $jsonrpc_whitelist {{env.Getenv "ETH_CALLS_ALLOWED"}};
if ($request_method = POST) {
access_by_lua_file 'eth-jsonrpc-access.lua';
}
proxy_pass http://sequencer;
}
}
......
{
"name": "@eth-optimism/rpc-proxy",
"version": "0.0.1",
"private": true,
"devDependencies": {}
}
# build in 2 steps
function build_images() {
docker-compose build --parallel -- builder l2geth l1_chain
docker-compose build --parallel -- deployer dtl batch_submitter relayer integration_tests
# Builds an image using Buildx. Usage:
# build <name> <tag> <dockerfile> <context>
function build() {
echo "Building $1."
echo "Tag: $2"
echo "Dockerfile: $3"
echo "Context: $4"
docker buildx build \
--tag "$2" \
--build-arg LOCAL_REGISTRY=localhost:5000 \
--cache-from "type=local,src=/tmp/.buildx-cache/$1" \
--cache-to="type=local,dest=/tmp/.buildx-cache-new/$1" \
--file "$3" \
--load "$4" \
&
}
function build_dependencies() {
yarn
yarn build
}
# Split across two build stages:
#
# 1. Build the builder and everything that doesn't depend on it, then
# 2. Build everything else.
#
# Each individual build is executed in parallel, so we use wait block all builds
# in each stage are complete.
mkdir -p /tmp/.buildx-cache-new
docker buildx build --tag "localhost:5000/ethereumoptimism/builder:latest" --cache-from "type=local,src=/tmp/.buildx-cache/builder" --cache-to="type=local,mode=max,dest=/tmp/.buildx-cache-new/builder" --file "./ops/docker/Dockerfile.monorepo" --push . &
build l2geth "ethereumoptimism/l2geth:latest" "./ops/docker/Dockerfile.geth" .
build l1chain "ethereumoptimism/hardhat:latest" "./ops/docker/hardhat/Dockerfile" ./ops/docker/hardhat
wait
# BuildX builds everything in a container when docker-container is selected as
# the backend. Unfortunately, this means that the built image must be pushed
# then re-pulled in order to make the container accessible to the Docker daemon.
# We have to use the docker-container backend since the the docker backend does
# not support cache-from and cache-to.
docker pull localhost:5000/ethereumoptimism/builder:latest
# Re-tag the local registry version of the builder so that docker-compose and
# friends can see it.
docker tag localhost:5000/ethereumoptimism/builder:latest ethereumoptimism/builder:latest
build_images &
build_dependencies &
build deployer "ethereumoptimism/deployer:latest" "./ops/docker/Dockerfile.deployer" .
build dtl "ethereumoptimism/data-transport-layer:latest" "./ops/docker/Dockerfile.data-transport-layer" .
build batch_submitter "ethereumoptimism/batch-submitter:latest" "./ops/docker/Dockerfile.batch-submitter" .
build relayer "ethereumoptimism/message-relayer:latest" "./ops/docker/Dockerfile.message-relayer" .
build integration-tests "ethereumoptimism/integration-tests:latest" "./ops/docker/Dockerfile.integration-tests" .
wait
......@@ -9,7 +9,10 @@
"l2geth",
"integration-tests",
"specs",
"go/gas-oracle"
"go/gas-oracle",
"go/batch-submitter",
"go/proxyd",
"ops/docker/rpc-proxy"
],
"nohoist": [
"examples/*"
......
......@@ -33,35 +33,35 @@
"url": "https://github.com/ethereum-optimism/optimism-monorepo.git"
},
"dependencies": {
"@eth-optimism/common-ts": "^0.1.5",
"@eth-optimism/common-ts": "0.1.5",
"@eth-optimism/contracts": "^0.4.13",
"@eth-optimism/core-utils": "^0.6.0",
"@eth-optimism/ynatm": "^0.2.2",
"@ethersproject/abstract-provider": "^5.4.1",
"@ethersproject/providers": "^5.4.4",
"@sentry/node": "^6.2.5",
"@ethersproject/providers": "^5.4.5",
"@sentry/node": "^6.3.1",
"bcfg": "^0.1.6",
"bluebird": "^3.7.2",
"dotenv": "^8.2.0",
"dotenv": "^10.0.0",
"ethers": "^5.4.5",
"old-contracts": "npm:@eth-optimism/contracts@^0.0.2-alpha.7",
"prom-client": "^13.1.0"
},
"devDependencies": {
"@eth-optimism/smock": "^1.1.9",
"@eth-optimism/smock": "^1.1.10",
"@nomiclabs/hardhat-ethers": "^2.0.2",
"@nomiclabs/hardhat-waffle": "^2.0.1",
"@types/bluebird": "^3.5.34",
"@types/chai": "^4.1.7",
"@types/mocha": "^5.2.6",
"@types/node": "^11.11.3",
"@types/prettier": "^1.19.1",
"@types/chai": "^4.2.18",
"@types/mocha": "^8.2.2",
"@types/node": "^15.12.2",
"@types/prettier": "^2.2.3",
"@types/rimraf": "^3.0.0",
"@types/sinon": "^9.0.10",
"@types/sinon-chai": "^3.2.5",
"@typescript-eslint/eslint-plugin": "^4.26.0",
"@typescript-eslint/parser": "^4.26.0",
"chai": "^4.2.0",
"chai": "^4.3.4",
"babel-eslint": "^10.1.0",
"eslint": "^7.27.0",
"eslint-plugin-prettier": "^3.4.0",
......@@ -72,16 +72,16 @@
"eslint-plugin-prefer-arrow": "^1.2.3",
"eslint-plugin-react": "^7.24.0",
"eslint-plugin-unicorn": "^32.0.1",
"ethereum-waffle": "3.0.0",
"ethereum-waffle": "^3.3.0",
"ganache-core": "^2.13.2",
"hardhat": "^2.2.1",
"mocha": "^6.1.4",
"prettier": "^2.2.1",
"hardhat": "^2.3.0",
"mocha": "^8.4.0",
"prettier": "^2.3.1",
"lint-staged": "11.0.0",
"rimraf": "^2.6.3",
"rimraf": "^3.0.2",
"sinon": "^9.2.4",
"sinon-chai": "^3.5.0",
"typescript": "^4.2.3"
"typescript": "^4.3.5"
},
"resolutions": {
"ganache-core": "^2.13.2",
......
......@@ -229,7 +229,7 @@ describe('BatchSubmitter', () => {
txBatchTxSubmitter,
1,
new Logger({ name: TX_BATCH_SUBMITTER_LOG_TAG }),
testMetrics,
testMetrics
)
}
......
......@@ -20,7 +20,7 @@
},
"devDependencies": {
"@types/chai": "^4.2.18",
"@types/express": "^4.17.11",
"@types/express": "^4.17.12",
"@types/mocha": "^8.2.2",
"@types/pino": "^6.3.6",
"@types/pino-multi-stream": "^5.1.1",
......@@ -39,13 +39,13 @@
"eslint-plugin-react": "^7.24.0",
"eslint-plugin-unicorn": "^32.0.1",
"mocha": "^8.4.0",
"prettier": "^2.2.1",
"supertest": "^6.1.3",
"prettier": "^2.3.1",
"supertest": "^6.1.4",
"ts-mocha": "^8.0.0",
"typescript": "^4.2.3"
"typescript": "^4.3.5"
},
"dependencies": {
"@sentry/node": "^6.2.5",
"@sentry/node": "^6.3.1",
"express": "^4.17.1",
"pino": "^6.11.3",
"pino-multi-stream": "^5.3.0",
......
This diff is collapsed.
......@@ -21,14 +21,14 @@
"test:coverage": "nyc ts-mocha test/**/*.spec.ts && nyc merge .nyc_output coverage.json"
},
"devDependencies": {
"@types/chai": "^4.2.17",
"@types/chai": "^4.2.18",
"@types/lodash": "^4.14.168",
"@types/mocha": "^8.2.2",
"@types/prettier": "^2.2.3",
"@typescript-eslint/eslint-plugin": "^4.26.0",
"@typescript-eslint/parser": "^4.26.0",
"babel-eslint": "^10.1.0",
"chai": "^4.3.0",
"chai": "^4.3.4",
"eslint": "^7.27.0",
"eslint-config-prettier": "^8.3.0",
"eslint-plugin-ban": "^1.5.2",
......@@ -39,11 +39,11 @@
"eslint-plugin-react": "^7.24.0",
"eslint-plugin-unicorn": "^32.0.1",
"lint-staged": "11.0.0",
"mocha": "^8.3.0",
"mocha": "^8.4.0",
"nyc": "^15.1.0",
"prettier": "^2.2.1",
"prettier": "^2.3.1",
"ts-mocha": "^8.0.0",
"typescript": "^4.2.3"
"typescript": "^4.3.5"
},
"dependencies": {
"@ethersproject/abstract-provider": "^5.4.1",
......
import cloneDeep from 'lodash/cloneDeep'
import { providers } from 'ethers'
const parseNumber = (n: string | number): number => {
if (typeof n === 'string' && n.startsWith('0x')) {
return parseInt(n, 16)
}
if (typeof n === 'number') {
return n
}
return parseInt(n, 10)
}
/**
* Helper for adding additional L2 context to transactions
*/
......@@ -25,9 +35,14 @@ export const injectL2Context = (l1Provider: providers.JsonRpcProvider) => {
for (let i = 0; i < b.transactions.length; i++) {
b.transactions[i].l1BlockNumber = block.transactions[i].l1BlockNumber
if (b.transactions[i].l1BlockNumber != null) {
b.transactions[i].l1BlockNumber = parseInt(
b.transactions[i].l1BlockNumber,
16
b.transactions[i].l1BlockNumber = parseNumber(
b.transactions[i].l1BlockNumber
)
}
b.transactions[i].l1Timestamp = block.transactions[i].l1Timestamp
if (b.transactions[i].l1Timestamp != null) {
b.transactions[i].l1Timestamp = parseNumber(
b.transactions[i].l1Timestamp
)
}
b.transactions[i].l1TxOrigin = block.transactions[i].l1TxOrigin
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -26,7 +26,7 @@
"supertest": "^6.1.4",
"ts-mocha": "^8.0.0",
"ts-node": "^10.0.0",
"typescript": "^4.3.2"
"typescript": "^4.3.5"
},
"dependencies": {
"@eth-optimism/common-ts": "0.1.5",
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment