Commit 2833f2b8 authored by Matthew Slipper's avatar Matthew Slipper Committed by GitHub

Merge pull request #2264 from ethereum-optimism/develop

Develop -> Master
parents 03f1cfda 82465db9
---
'@eth-optimism/integration-tests': patch
'@eth-optimism/l2geth': patch
'@eth-optimism/contracts': patch
---
Add support for system addresses
---
'@eth-optimism/replica-healthcheck': patch
---
Fix bug in replica healthcheck dockerfile
---
'@eth-optimism/contracts': patch
---
Add a fetch batches hardhat task
---
'@eth-optimism/integration-tests': patch
---
Add test coverage for zlib compressed batches
---
'@eth-optimism/data-transport-layer': patch
---
Enable typed batch support
---
'@eth-optimism/batch-submitter': patch
---
Update to allow for zlib compressed batches
---
'@eth-optimism/contracts': patch
---
Remove yargs as a contracts dependency (unused)
---
'@eth-optimism/sdk': patch
---
Add a function for waiting for a particular message status
---
'@eth-optimism/batch-submitter-service': patch
---
Move L2 dial logic out of bss-core to avoid l2geth dependency
---
'@eth-optimism/integration-tests': patch
---
Replaces contract references in integration tests with SDK CrossChainMessenger objects.
---
'@eth-optimism/core-utils': patch
---
Add toJSON methods to the batch primitives
---
'@eth-optimism/batch-submitter-service': patch
---
Enable the usage of typed batches and type 0 zlib compressed batches
---
'@eth-optimism/core-utils': patch
---
Update batch serialization with typed batches and zlib compression
...@@ -2,7 +2,7 @@ version: 2.1 ...@@ -2,7 +2,7 @@ version: 2.1
orbs: orbs:
gcp-gke: circleci/gcp-gke@1.3.0 gcp-gke: circleci/gcp-gke@1.3.0
slack: circleci/slack@4.5.1 slack: circleci/slack@4.5.1
slack-fail-post-step: &slack-fail-post-step slack-nightly-build-fail-post-step: &slack-nightly-build-fail-post-step
post-steps: post-steps:
- slack/notify: - slack/notify:
channel: $SLACK_DEFAULT_CHANNEL channel: $SLACK_DEFAULT_CHANNEL
...@@ -154,7 +154,7 @@ jobs: ...@@ -154,7 +154,7 @@ jobs:
--env PRIVATE_KEY=$NIGHTLY_ITESTS_PRIVKEY \ --env PRIVATE_KEY=$NIGHTLY_ITESTS_PRIVKEY \
--env L1_URL=https://nightly-l1.optimism-stacks.net \ --env L1_URL=https://nightly-l1.optimism-stacks.net \
--env L2_URL=https://nightly-l2.optimism-stacks.net \ --env L2_URL=https://nightly-l2.optimism-stacks.net \
--env ADDRESS_MANAGER=0x22D4E211ef8704f2ca2d6dfdB32125E2530ACE3e \ --env ADDRESS_MANAGER=0xfcA6De8Db94C4d99bD5a7f5De1bb7A039265Ac42 \
--env L2_CHAINID=69 \ --env L2_CHAINID=69 \
--env MOCHA_BAIL=true \ --env MOCHA_BAIL=true \
--env MOCHA_TIMEOUT=300000 \ --env MOCHA_TIMEOUT=300000 \
...@@ -190,6 +190,66 @@ workflows: ...@@ -190,6 +190,66 @@ workflows:
- run-itests-nightly: - run-itests-nightly:
context: context:
- optimism - optimism
post-steps:
- slack/notify:
channel: $SLACK_DEFAULT_CHANNEL
event: fail
custom: |
{
"text": "",
"blocks": [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": "🔴 Nightly integration tests failed!"
}
},
{
"type": "actions",
"elements": [
{
"type": "button",
"text": {
"type": "plain_text",
"text": "View Job"
},
"url": "${CIRCLE_BUILD_URL}"
}
]
}
]
}
- slack/notify:
channel: $SLACK_DEFAULT_CHANNEL
event: pass
custom: |
{
"text": "",
"blocks": [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": "✅ Nightly integration tests passed."
}
},
{
"type": "actions",
"elements": [
{
"type": "button",
"text": {
"type": "plain_text",
"text": "View Job"
},
"url": "${CIRCLE_BUILD_URL}"
}
]
}
]
}
nightly: nightly:
triggers: triggers:
- schedule: - schedule:
...@@ -203,47 +263,47 @@ workflows: ...@@ -203,47 +263,47 @@ workflows:
context: context:
- optimism - optimism
- slack - slack
<<: *slack-fail-post-step <<: *slack-nightly-build-fail-post-step
- build-batch-submitter: - build-batch-submitter:
context: context:
- optimism - optimism
- slack - slack
<<: *slack-fail-post-step <<: *slack-nightly-build-fail-post-step
- build-deployer: - build-deployer:
context: context:
- optimism - optimism
- slack - slack
<<: *slack-fail-post-step <<: *slack-nightly-build-fail-post-step
- build-l2geth: - build-l2geth:
context: context:
- optimism - optimism
- slack - slack
<<: *slack-fail-post-step <<: *slack-nightly-build-fail-post-step
- build-gas-oracle: - build-gas-oracle:
context: context:
- optimism - optimism
- slack - slack
<<: *slack-fail-post-step <<: *slack-nightly-build-fail-post-step
- build-integration-tests: - build-integration-tests:
context: context:
- optimism - optimism
- slack - slack
<<: *slack-fail-post-step <<: *slack-nightly-build-fail-post-step
- build-go-batch-submitter: - build-go-batch-submitter:
context: context:
- optimism - optimism
- slack - slack
<<: *slack-fail-post-step <<: *slack-nightly-build-fail-post-step
- build-proxyd: - build-proxyd:
context: context:
- optimism - optimism
- slack - slack
<<: *slack-fail-post-step <<: *slack-nightly-build-fail-post-step
- deploy-nightly: - deploy-nightly:
context: context:
- optimism - optimism
- slack - slack
<<: *slack-fail-post-step <<: *slack-nightly-build-fail-post-step
requires: requires:
- build-dtl - build-dtl
- build-batch-submitter - build-batch-submitter
......
...@@ -14,3 +14,4 @@ l2geth/signer/fourbyte ...@@ -14,3 +14,4 @@ l2geth/signer/fourbyte
l2geth/cmd/puppeth l2geth/cmd/puppeth
l2geth/cmd/clef l2geth/cmd/clef
go/gas-oracle/gas-oracle go/gas-oracle/gas-oracle
go/batch-submitter/batch-submitter
...@@ -10,8 +10,8 @@ on: ...@@ -10,8 +10,8 @@ on:
- '*rc' - '*rc'
- 'regenesis/*' - 'regenesis/*'
pull_request: pull_request:
paths: branches:
- 'go/batch-submitter/**' - '*'
workflow_dispatch: workflow_dispatch:
defaults: defaults:
......
...@@ -10,8 +10,8 @@ on: ...@@ -10,8 +10,8 @@ on:
- '*rc' - '*rc'
- 'regenesis/*' - 'regenesis/*'
pull_request: pull_request:
paths: branches:
- 'go/bss-core/**' - '*'
workflow_dispatch: workflow_dispatch:
defaults: defaults:
......
...@@ -10,8 +10,8 @@ on: ...@@ -10,8 +10,8 @@ on:
- '*rc' - '*rc'
- 'regenesis/*' - 'regenesis/*'
pull_request: pull_request:
paths: branches:
- 'go/gas-oracle/**' - '*'
workflow_dispatch: workflow_dispatch:
defaults: defaults:
......
...@@ -4,15 +4,16 @@ on: ...@@ -4,15 +4,16 @@ on:
paths: paths:
- 'go/gas-oracle/**' - 'go/gas-oracle/**'
- 'go/batch-submitter/**' - 'go/batch-submitter/**'
- 'go/bss-core/**'
- 'go/teleportr/**'
branches: branches:
- 'master' - 'master'
- 'develop' - 'develop'
- '*rc' - '*rc'
- 'regenesis/*' - 'regenesis/*'
pull_request: pull_request:
paths: branches:
- 'go/gas-oracle/**' - '*'
- 'go/batch-submitter/**'
jobs: jobs:
golangci: golangci:
name: lint name: lint
...@@ -34,3 +35,8 @@ jobs: ...@@ -34,3 +35,8 @@ jobs:
with: with:
version: v1.29 version: v1.29
working-directory: go/bss-core working-directory: go/bss-core
- name: golangci-lint teleportr
uses: golangci/golangci-lint-action@v2
with:
version: v1.29
working-directory: go/teleportr
...@@ -20,7 +20,9 @@ jobs: ...@@ -20,7 +20,9 @@ jobs:
- 5000:5000 - 5000:5000
strategy: strategy:
matrix: matrix:
batch-submitter: [ts-batch-submitter, go-batch-submitter] batch-type:
- zlib
- legacy
env: env:
DOCKER_BUILDKIT: 1 DOCKER_BUILDKIT: 1
COMPOSE_DOCKER_CLI_BUILD: 1 COMPOSE_DOCKER_CLI_BUILD: 1
...@@ -40,11 +42,15 @@ jobs: ...@@ -40,11 +42,15 @@ jobs:
restore-keys: | restore-keys: |
${{ runner.os }}-yarn- ${{ runner.os }}-yarn-
- name: Set conditional env vars
run: |
echo "BATCH_SUBMITTER_SEQUENCER_BATCH_TYPE=${{ matrix.batch-type }}" >> $GITHUB_ENV
- name: Bring the stack up - name: Bring the stack up
working-directory: ./ops working-directory: ./ops
run: | run: |
./scripts/stats.sh & ./scripts/stats.sh &
docker-compose -f docker-compose.yml -f docker-compose.${{ matrix.batch-submitter }}.yml up -d docker-compose -f docker-compose.yml up -d
- name: Wait for the Sequencer node - name: Wait for the Sequencer node
working-directory: ./ops working-directory: ./ops
......
...@@ -6,8 +6,8 @@ on: ...@@ -6,8 +6,8 @@ on:
- 'master' - 'master'
- 'develop' - 'develop'
pull_request: pull_request:
paths: branches:
- 'go/proxyd/**' - '*'
workflow_dispatch: workflow_dispatch:
defaults: defaults:
......
...@@ -345,7 +345,7 @@ jobs: ...@@ -345,7 +345,7 @@ jobs:
build-args: BUILDER_TAG=${{ needs.builder.outputs.canary-docker-tag }} build-args: BUILDER_TAG=${{ needs.builder.outputs.canary-docker-tag }}
replica-healthcheck: replica-healthcheck:
name: Publish Data Transport Layer Version ${{ needs.builder.outputs.canary-docker-tag }} name: Publish Replica Healthcheck Version ${{ needs.builder.outputs.canary-docker-tag }}
needs: builder needs: builder
if: needs.builder.outputs.replica-healthcheck != '' if: needs.builder.outputs.replica-healthcheck != ''
runs-on: ubuntu-latest runs-on: ubuntu-latest
......
...@@ -10,8 +10,8 @@ on: ...@@ -10,8 +10,8 @@ on:
- '*rc' - '*rc'
- 'regenesis/*' - 'regenesis/*'
pull_request: pull_request:
paths: branches:
- 'go/teleportr/**' - '*'
workflow_dispatch: workflow_dispatch:
defaults: defaults:
...@@ -25,8 +25,8 @@ jobs: ...@@ -25,8 +25,8 @@ jobs:
postgres: postgres:
image: postgres image: postgres
env: env:
POSTGRES_USER=postgres POSTGRES_USER: postgres
POSTGRES_PASSWORD=password POSTGRES_PASSWORD: password
ports: ports:
- 5432:5432 - 5432:5432
steps: steps:
......
...@@ -113,12 +113,6 @@ jobs: ...@@ -113,12 +113,6 @@ jobs:
fail_ci_if_error: false fail_ci_if_error: false
verbose: true verbose: true
flags: core-utils flags: core-utils
- uses: codecov/codecov-action@v1
with:
files: ./packages/batch-submitter/coverage.json
fail_ci_if_error: false
verbose: true
flags: batch-submitter
- uses: codecov/codecov-action@v1 - uses: codecov/codecov-action@v1
with: with:
files: ./packages/data-transport-layer/coverage.json files: ./packages/data-transport-layer/coverage.json
...@@ -170,10 +164,6 @@ jobs: ...@@ -170,10 +164,6 @@ jobs:
# if: steps.yarn-cache.outputs.cache-hit != 'true' # if: steps.yarn-cache.outputs.cache-hit != 'true'
run: yarn install run: yarn install
- name: Check packages/batch-submitter
working-directory: ./packages/batch-submitter
run: npx depcheck
- name: Check packages/contracts - name: Check packages/contracts
working-directory: ./packages/contracts working-directory: ./packages/contracts
run: npx depcheck run: npx depcheck
......
...@@ -20,7 +20,7 @@ packages/contracts/hardhat* ...@@ -20,7 +20,7 @@ packages/contracts/hardhat*
packages/data-transport-layer/db packages/data-transport-layer/db
# vim # vim
*.swp *.sw*
.env .env
.env* .env*
......
...@@ -116,7 +116,7 @@ This will build the following containers: ...@@ -116,7 +116,7 @@ This will build the following containers:
* [`l2geth`](https://hub.docker.com/r/ethereumoptimism/l2geth): L2 geth node running in Sequencer mode * [`l2geth`](https://hub.docker.com/r/ethereumoptimism/l2geth): L2 geth node running in Sequencer mode
* [`verifier`](https://hub.docker.com/r/ethereumoptimism/go-ethereum): L2 geth node running in Verifier mode * [`verifier`](https://hub.docker.com/r/ethereumoptimism/go-ethereum): L2 geth node running in Verifier mode
* [`relayer`](https://hub.docker.com/r/ethereumoptimism/message-relayer): helper process that relays messages between L1 and L2 * [`relayer`](https://hub.docker.com/r/ethereumoptimism/message-relayer): helper process that relays messages between L1 and L2
* [`batch_submitter`](https://hub.docker.com/r/ethereumoptimism/batch-submitter): service that submits batches of Sequencer transactions to the L1 chain * [`batch_submitter`](https://hub.docker.com/r/ethereumoptimism/batch-submitter-service): service that submits batches of Sequencer transactions to the L1 chain
* [`integration_tests`](https://hub.docker.com/r/ethereumoptimism/integration-tests): integration tests in a box * [`integration_tests`](https://hub.docker.com/r/ethereumoptimism/integration-tests): integration tests in a box
If you want to make a change to a container, you'll need to take it down and rebuild it. If you want to make a change to a container, you'll need to take it down and rebuild it.
......
...@@ -35,7 +35,6 @@ root ...@@ -35,7 +35,6 @@ root
│ ├── <a href="./packages/core-utils">core-utils</a>: Low-level utilities that make building Optimism easier │ ├── <a href="./packages/core-utils">core-utils</a>: Low-level utilities that make building Optimism easier
│ ├── <a href="./packages/common-ts">common-ts</a>: Common tools for building apps in TypeScript │ ├── <a href="./packages/common-ts">common-ts</a>: Common tools for building apps in TypeScript
│ ├── <a href="./packages/data-transport-layer">data-transport-layer</a>: Service for indexing Optimism-related L1 data │ ├── <a href="./packages/data-transport-layer">data-transport-layer</a>: Service for indexing Optimism-related L1 data
│ ├── <a href="./packages/batch-submitter">batch-submitter</a>: Service for submitting batches of transactions and results to L1
│ ├── <a href="./packages/message-relayer">message-relayer</a>: Tool for automatically relaying L1<>L2 messages in development │ ├── <a href="./packages/message-relayer">message-relayer</a>: Tool for automatically relaying L1<>L2 messages in development
│ └── <a href="./packages/replica-healthcheck">replica-healthcheck</a>: Service for monitoring the health of a replica node │ └── <a href="./packages/replica-healthcheck">replica-healthcheck</a>: Service for monitoring the health of a replica node
├── <a href="./go">go</a> ├── <a href="./go">go</a>
...@@ -83,6 +82,12 @@ Some exceptions to this rule exist for cases in which we absolutely must deploy ...@@ -83,6 +82,12 @@ Some exceptions to this rule exist for cases in which we absolutely must deploy
If you're changing or adding a contract and you're unsure about which branch to make a PR into, default to using the latest release candidate branch. If you're changing or adding a contract and you're unsure about which branch to make a PR into, default to using the latest release candidate branch.
See below for info about release candidate branches. See below for info about release candidate branches.
### Release new versions
Developers can release new versions of the software by adding changesets to their pull requests using `yarn changeset`. Changesets will persist over time on the `develop` branch without triggering new version bumps to be proposed by the Changesets bot. Once changesets are merged into `master`, the bot will create a new pull request called "Version Packages" which bumps the versions of packages. The correct flow for triggering releases is to update the base branch of these pull requests onto `develop` and merge them, and then create a new pull request to merge `develop` into `master`. Then, the `release` workflow will trigger the actual publishing to `npm` and Docker hub.
Be sure to not merge other pull requests into `develop` if partially through the release process. This can cause problems with Changesets doing releases and will require manual intervention to fix it.
### Release candidate branches ### Release candidate branches
Branches marked `regenesis/X.X.X` are **release candidate branches**. Branches marked `regenesis/X.X.X` are **release candidate branches**.
......
...@@ -93,7 +93,7 @@ func Main(gitVersion string) func(ctx *cli.Context) error { ...@@ -93,7 +93,7 @@ func Main(gitVersion string) func(ctx *cli.Context) error {
return err return err
} }
l2Client, err := dial.L2EthClientWithTimeout(ctx, cfg.L2EthRpc, cfg.DisableHTTP2) l2Client, err := DialL2EthClientWithTimeout(ctx, cfg.L2EthRpc, cfg.DisableHTTP2)
if err != nil { if err != nil {
return err return err
} }
...@@ -125,6 +125,7 @@ func Main(gitVersion string) func(ctx *cli.Context) error { ...@@ -125,6 +125,7 @@ func Main(gitVersion string) func(ctx *cli.Context) error {
CTCAddr: ctcAddress, CTCAddr: ctcAddress,
ChainID: chainID, ChainID: chainID,
PrivKey: sequencerPrivKey, PrivKey: sequencerPrivKey,
BatchType: sequencer.BatchTypeFromString(cfg.SequencerBatchType),
}) })
if err != nil { if err != nil {
return err return err
......
...@@ -33,6 +33,11 @@ var ( ...@@ -33,6 +33,11 @@ var (
ErrSameSequencerAndProposerPrivKey = errors.New("sequencer-priv-key and " + ErrSameSequencerAndProposerPrivKey = errors.New("sequencer-priv-key and " +
"proposer-priv-key must be distinct") "proposer-priv-key must be distinct")
// ErrInvalidBatchType signals that an unsupported batch type is being
// configured. The default is "legacy" and the options are "legacy" or
// "zlib"
ErrInvalidBatchType = errors.New("invalid batch type")
// ErrSentryDSNNotSet signals that not Data Source Name was provided // ErrSentryDSNNotSet signals that not Data Source Name was provided
// with which to configure Sentry logging. // with which to configure Sentry logging.
ErrSentryDSNNotSet = errors.New("sentry-dsn must be set if use-sentry " + ErrSentryDSNNotSet = errors.New("sentry-dsn must be set if use-sentry " +
...@@ -164,6 +169,9 @@ type Config struct { ...@@ -164,6 +169,9 @@ type Config struct {
// the proposer transactions. // the proposer transactions.
ProposerHDPath string ProposerHDPath string
// SequencerBatchType represents the type of batch the sequencer submits.
SequencerBatchType string
// MetricsServerEnable if true, will create a metrics client and log to // MetricsServerEnable if true, will create a metrics client and log to
// Prometheus. // Prometheus.
MetricsServerEnable bool MetricsServerEnable bool
...@@ -212,6 +220,7 @@ func NewConfig(ctx *cli.Context) (Config, error) { ...@@ -212,6 +220,7 @@ func NewConfig(ctx *cli.Context) (Config, error) {
Mnemonic: ctx.GlobalString(flags.MnemonicFlag.Name), Mnemonic: ctx.GlobalString(flags.MnemonicFlag.Name),
SequencerHDPath: ctx.GlobalString(flags.SequencerHDPathFlag.Name), SequencerHDPath: ctx.GlobalString(flags.SequencerHDPathFlag.Name),
ProposerHDPath: ctx.GlobalString(flags.ProposerHDPathFlag.Name), ProposerHDPath: ctx.GlobalString(flags.ProposerHDPathFlag.Name),
SequencerBatchType: ctx.GlobalString(flags.SequencerBatchType.Name),
MetricsServerEnable: ctx.GlobalBool(flags.MetricsServerEnableFlag.Name), MetricsServerEnable: ctx.GlobalBool(flags.MetricsServerEnableFlag.Name),
MetricsHostname: ctx.GlobalString(flags.MetricsHostnameFlag.Name), MetricsHostname: ctx.GlobalString(flags.MetricsHostnameFlag.Name),
MetricsPort: ctx.GlobalUint64(flags.MetricsPortFlag.Name), MetricsPort: ctx.GlobalUint64(flags.MetricsPortFlag.Name),
...@@ -265,6 +274,12 @@ func ValidateConfig(cfg *Config) error { ...@@ -265,6 +274,12 @@ func ValidateConfig(cfg *Config) error {
return ErrSameSequencerAndProposerPrivKey return ErrSameSequencerAndProposerPrivKey
} }
usingTypedBatches := cfg.SequencerBatchType != ""
validBatchType := cfg.SequencerBatchType == "legacy" || cfg.SequencerBatchType == "zlib"
if usingTypedBatches && !validBatchType {
return ErrInvalidBatchType
}
// Ensure the Sentry Data Source Name is set when using Sentry. // Ensure the Sentry Data Source Name is set when using Sentry.
if cfg.SentryEnable && cfg.SentryDsn == "" { if cfg.SentryEnable && cfg.SentryDsn == "" {
return ErrSentryDSNNotSet return ErrSentryDSNNotSet
......
package dial package batchsubmitter
import ( import (
"context" "context"
...@@ -6,18 +6,19 @@ import ( ...@@ -6,18 +6,19 @@ import (
"net/http" "net/http"
"strings" "strings"
"github.com/ethereum-optimism/optimism/go/bss-core/dial"
"github.com/ethereum-optimism/optimism/l2geth/ethclient" "github.com/ethereum-optimism/optimism/l2geth/ethclient"
"github.com/ethereum-optimism/optimism/l2geth/log" "github.com/ethereum-optimism/optimism/l2geth/log"
"github.com/ethereum-optimism/optimism/l2geth/rpc" "github.com/ethereum-optimism/optimism/l2geth/rpc"
) )
// L2EthClientWithTimeout attempts to dial the L2 provider using the // DialL2EthClientWithTimeout attempts to dial the L2 provider using the
// provided URL. If the dial doesn't complete within defaultDialTimeout seconds, // provided URL. If the dial doesn't complete within dial.DefaultTimeout seconds,
// this method will return an error. // this method will return an error.
func L2EthClientWithTimeout(ctx context.Context, url string, disableHTTP2 bool) ( func DialL2EthClientWithTimeout(ctx context.Context, url string, disableHTTP2 bool) (
*ethclient.Client, error) { *ethclient.Client, error) {
ctxt, cancel := context.WithTimeout(ctx, defaultDialTimeout) ctxt, cancel := context.WithTimeout(ctx, dial.DefaultTimeout)
defer cancel() defer cancel()
if strings.HasPrefix(url, "http") { if strings.HasPrefix(url, "http") {
......
...@@ -78,6 +78,7 @@ func GenSequencerBatchParams( ...@@ -78,6 +78,7 @@ func GenSequencerBatchParams(
shouldStartAtElement uint64, shouldStartAtElement uint64,
blockOffset uint64, blockOffset uint64,
batch []BatchElement, batch []BatchElement,
batchType BatchType,
) (*AppendSequencerBatchParams, error) { ) (*AppendSequencerBatchParams, error) {
var ( var (
...@@ -188,5 +189,6 @@ func GenSequencerBatchParams( ...@@ -188,5 +189,6 @@ func GenSequencerBatchParams(
TotalElementsToAppend: uint64(len(batch)), TotalElementsToAppend: uint64(len(batch)),
Contexts: contexts, Contexts: contexts,
Txs: txs, Txs: txs,
Type: batchType,
}, nil }, nil
} }
...@@ -36,6 +36,7 @@ type Config struct { ...@@ -36,6 +36,7 @@ type Config struct {
CTCAddr common.Address CTCAddr common.Address
ChainID *big.Int ChainID *big.Int
PrivKey *ecdsa.PrivateKey PrivKey *ecdsa.PrivateKey
BatchType BatchType
} }
type Driver struct { type Driver struct {
...@@ -160,7 +161,7 @@ func (d *Driver) CraftBatchTx( ...@@ -160,7 +161,7 @@ func (d *Driver) CraftBatchTx(
name := d.cfg.Name name := d.cfg.Name
log.Info(name+" crafting batch tx", "start", start, "end", end, log.Info(name+" crafting batch tx", "start", start, "end", end,
"nonce", nonce) "nonce", nonce, "type", d.cfg.BatchType.String())
var ( var (
batchElements []BatchElement batchElements []BatchElement
...@@ -195,7 +196,7 @@ func (d *Driver) CraftBatchTx( ...@@ -195,7 +196,7 @@ func (d *Driver) CraftBatchTx(
var pruneCount int var pruneCount int
for { for {
batchParams, err := GenSequencerBatchParams( batchParams, err := GenSequencerBatchParams(
shouldStartAt, d.cfg.BlockOffset, batchElements, shouldStartAt, d.cfg.BlockOffset, batchElements, d.cfg.BatchType,
) )
if err != nil { if err != nil {
return nil, err return nil, err
......
package sequencer package sequencer
import ( import (
"bufio"
"bytes" "bytes"
"compress/zlib"
"encoding/binary" "encoding/binary"
"errors"
"fmt" "fmt"
"io" "io"
"math" "math"
...@@ -17,7 +20,13 @@ const ( ...@@ -17,7 +20,13 @@ const (
TxLenSize = 3 TxLenSize = 3
) )
var byteOrder = binary.BigEndian var (
// byteOrder represents the endiannes used for batch serialization
byteOrder = binary.BigEndian
// ErrMalformedBatch represents a batch that is not well formed
// according to the protocol specification
ErrMalformedBatch = errors.New("malformed batch")
)
// BatchContext denotes a range of transactions that belong the same batch. It // BatchContext denotes a range of transactions that belong the same batch. It
// is used to compress shared fields that would otherwise be repeated for each // is used to compress shared fields that would otherwise be repeated for each
...@@ -44,11 +53,14 @@ type BatchContext struct { ...@@ -44,11 +53,14 @@ type BatchContext struct {
// - num_subsequent_queue_txs: 3 bytes // - num_subsequent_queue_txs: 3 bytes
// - timestamp: 5 bytes // - timestamp: 5 bytes
// - block_number: 5 bytes // - block_number: 5 bytes
//
// Note that writing to a bytes.Buffer cannot
// error, so errors are ignored here
func (c *BatchContext) Write(w *bytes.Buffer) { func (c *BatchContext) Write(w *bytes.Buffer) {
writeUint64(w, c.NumSequencedTxs, 3) _ = writeUint64(w, c.NumSequencedTxs, 3)
writeUint64(w, c.NumSubsequentQueueTxs, 3) _ = writeUint64(w, c.NumSubsequentQueueTxs, 3)
writeUint64(w, c.Timestamp, 5) _ = writeUint64(w, c.Timestamp, 5)
writeUint64(w, c.BlockNumber, 5) _ = writeUint64(w, c.BlockNumber, 5)
} }
// Read decodes the BatchContext from the passed reader. If fewer than 16-bytes // Read decodes the BatchContext from the passed reader. If fewer than 16-bytes
...@@ -71,6 +83,45 @@ func (c *BatchContext) Read(r io.Reader) error { ...@@ -71,6 +83,45 @@ func (c *BatchContext) Read(r io.Reader) error {
return readUint64(r, &c.BlockNumber, 5) return readUint64(r, &c.BlockNumber, 5)
} }
// BatchType represents the type of batch being
// submitted. When the first context in the batch
// has a timestamp of 0, the blocknumber is interpreted
// as an enum that represets the type
type BatchType int8
// Implements the Stringer interface for BatchType
func (b BatchType) String() string {
switch b {
case BatchTypeLegacy:
return "LEGACY"
case BatchTypeZlib:
return "ZLIB"
default:
return ""
}
}
// BatchTypeFromString returns the BatchType
// enum based on a human readable string
func BatchTypeFromString(s string) BatchType {
switch s {
case "zlib", "ZLIB":
return BatchTypeZlib
case "legacy", "LEGACY":
return BatchTypeLegacy
default:
return BatchTypeLegacy
}
}
const (
// BatchTypeLegacy represets the legacy batch type
BatchTypeLegacy BatchType = -1
// BatchTypeZlib represents a batch type where the
// transaction data is compressed using zlib
BatchTypeZlib BatchType = 0
)
// AppendSequencerBatchParams holds the raw data required to submit a batch of // AppendSequencerBatchParams holds the raw data required to submit a batch of
// L2 txs to L1 CTC contract. Rather than encoding the objects using the // L2 txs to L1 CTC contract. Rather than encoding the objects using the
// standard ABI encoding, a custom encoding is and provided in the call data to // standard ABI encoding, a custom encoding is and provided in the call data to
...@@ -95,6 +146,9 @@ type AppendSequencerBatchParams struct { ...@@ -95,6 +146,9 @@ type AppendSequencerBatchParams struct {
// Txs contains all sequencer txs that will be recorded in the L1 CTC // Txs contains all sequencer txs that will be recorded in the L1 CTC
// contract. // contract.
Txs []*CachedTx Txs []*CachedTx
// The type of the batch
Type BatchType
} }
// Write encodes the AppendSequencerBatchParams using the following format: // Write encodes the AppendSequencerBatchParams using the following format:
...@@ -105,21 +159,74 @@ type AppendSequencerBatchParams struct { ...@@ -105,21 +159,74 @@ type AppendSequencerBatchParams struct {
// - [num txs ommitted] // - [num txs ommitted]
// - tx_len: 3 bytes // - tx_len: 3 bytes
// - tx_bytes: tx_len bytes // - tx_bytes: tx_len bytes
//
// Typed batches include a dummy context as the first context
// where the timestamp is 0. The blocknumber is interpreted
// as an enum that defines the type. It is impossible to have
// a timestamp of 0 in practice, so this safely can indicate
// that the batch is typed.
// Type 0 batches have a dummy context where the blocknumber is
// set to 0. The transaction data is compressed with zlib before
// submitting the transaction to the chain. The fields should_start_at_element,
// total_elements_to_append, num_contexts and the contexts themselves
// are not altered.
//
// Note that writing to a bytes.Buffer cannot
// error, so errors are ignored here
func (p *AppendSequencerBatchParams) Write(w *bytes.Buffer) error { func (p *AppendSequencerBatchParams) Write(w *bytes.Buffer) error {
writeUint64(w, p.ShouldStartAtElement, 5) _ = writeUint64(w, p.ShouldStartAtElement, 5)
writeUint64(w, p.TotalElementsToAppend, 3) _ = writeUint64(w, p.TotalElementsToAppend, 3)
// There must be contexts if there are transactions
if len(p.Contexts) == 0 && len(p.Txs) != 0 {
return ErrMalformedBatch
}
// There must be transactions if there are contexts
if len(p.Txs) == 0 && len(p.Contexts) != 0 {
return ErrMalformedBatch
}
// copy the contexts as to not malleate the struct
// when it is a typed batch
contexts := make([]BatchContext, 0, len(p.Contexts)+1)
if p.Type == BatchTypeZlib {
// All zero values for the single batch context
// is desired here as blocknumber 0 means it is a zlib batch
contexts = append(contexts, BatchContext{})
}
contexts = append(contexts, p.Contexts...)
// Write number of contexts followed by each fixed-size BatchContext. // Write number of contexts followed by each fixed-size BatchContext.
writeUint64(w, uint64(len(p.Contexts)), 3) _ = writeUint64(w, uint64(len(contexts)), 3)
for _, context := range p.Contexts { for _, context := range contexts {
context.Write(w) context.Write(w)
} }
switch p.Type {
case BatchTypeLegacy:
// Write each length-prefixed tx. // Write each length-prefixed tx.
for _, tx := range p.Txs { for _, tx := range p.Txs {
writeUint64(w, uint64(tx.Size()), TxLenSize) _ = writeUint64(w, uint64(tx.Size()), TxLenSize)
_, _ = w.Write(tx.RawTx()) // can't fail for bytes.Buffer _, _ = w.Write(tx.RawTx()) // can't fail for bytes.Buffer
} }
case BatchTypeZlib:
zw := zlib.NewWriter(w)
for _, tx := range p.Txs {
if err := writeUint64(zw, uint64(tx.Size()), TxLenSize); err != nil {
return err
}
if _, err := zw.Write(tx.RawTx()); err != nil {
return err
}
}
if err := zw.Close(); err != nil {
return err
}
default:
return fmt.Errorf("Unknown batch type: %s", p.Type)
}
return nil return nil
} }
...@@ -159,6 +266,8 @@ func (p *AppendSequencerBatchParams) Read(r io.Reader) error { ...@@ -159,6 +266,8 @@ func (p *AppendSequencerBatchParams) Read(r io.Reader) error {
return err return err
} }
// Ensure that contexts is never nil
p.Contexts = make([]BatchContext, 0)
for i := uint64(0); i < numContexts; i++ { for i := uint64(0); i < numContexts; i++ {
var batchContext BatchContext var batchContext BatchContext
if err := batchContext.Read(r); err != nil { if err := batchContext.Read(r); err != nil {
...@@ -168,14 +277,44 @@ func (p *AppendSequencerBatchParams) Read(r io.Reader) error { ...@@ -168,14 +277,44 @@ func (p *AppendSequencerBatchParams) Read(r io.Reader) error {
p.Contexts = append(p.Contexts, batchContext) p.Contexts = append(p.Contexts, batchContext)
} }
// Assume that it is a legacy batch at first
p.Type = BatchTypeLegacy
// Handle backwards compatible batch types
if len(p.Contexts) > 0 && p.Contexts[0].Timestamp == 0 {
switch p.Contexts[0].BlockNumber {
case 0:
// zlib compressed transaction data
p.Type = BatchTypeZlib
// remove the first dummy context
p.Contexts = p.Contexts[1:]
numContexts--
zr, err := zlib.NewReader(r)
if err != nil {
return err
}
defer zr.Close()
r = bufio.NewReader(zr)
}
}
// Deserialize any transactions. Since the number of txs is ommitted // Deserialize any transactions. Since the number of txs is ommitted
// from the encoding, loop until the stream is consumed. // from the encoding, loop until the stream is consumed.
for { for {
var txLen uint64 var txLen uint64
err := readUint64(r, &txLen, TxLenSize) err := readUint64(r, &txLen, TxLenSize)
// Getting an EOF when reading the txLen expected for a cleanly // Getting an EOF when reading the txLen expected for a cleanly
// encoded object. Silece the error and return success. // encoded object. Silence the error and return success if
// the batch is well formed.
if err == io.EOF { if err == io.EOF {
if len(p.Contexts) == 0 && len(p.Txs) != 0 {
return ErrMalformedBatch
}
if len(p.Txs) == 0 && len(p.Contexts) != 0 {
return ErrMalformedBatch
}
return nil return nil
} else if err != nil { } else if err != nil {
return err return err
...@@ -188,10 +327,11 @@ func (p *AppendSequencerBatchParams) Read(r io.Reader) error { ...@@ -188,10 +327,11 @@ func (p *AppendSequencerBatchParams) Read(r io.Reader) error {
p.Txs = append(p.Txs, NewCachedTx(tx)) p.Txs = append(p.Txs, NewCachedTx(tx))
} }
} }
// writeUint64 writes a the bottom `n` bytes of `val` to `w`. // writeUint64 writes a the bottom `n` bytes of `val` to `w`.
func writeUint64(w *bytes.Buffer, val uint64, n uint) { func writeUint64(w io.Writer, val uint64, n uint) error {
if n < 1 || n > 8 { if n < 1 || n > 8 {
panic(fmt.Sprintf("invalid number of bytes %d must be 1-8", n)) panic(fmt.Sprintf("invalid number of bytes %d must be 1-8", n))
} }
...@@ -204,7 +344,8 @@ func writeUint64(w *bytes.Buffer, val uint64, n uint) { ...@@ -204,7 +344,8 @@ func writeUint64(w *bytes.Buffer, val uint64, n uint) {
var buf [8]byte var buf [8]byte
byteOrder.PutUint64(buf[:], val) byteOrder.PutUint64(buf[:], val)
_, _ = w.Write(buf[8-n:]) // can't fail for bytes.Buffer _, err := w.Write(buf[8-n:])
return err
} }
// readUint64 reads `n` bytes from `r` and returns them in the lower `n` bytes // readUint64 reads `n` bytes from `r` and returns them in the lower `n` bytes
......
...@@ -65,167 +65,21 @@ type AppendSequencerBatchParamsTest struct { ...@@ -65,167 +65,21 @@ type AppendSequencerBatchParamsTest struct {
TotalElementsToAppend uint64 `json:"total_elements_to_append"` TotalElementsToAppend uint64 `json:"total_elements_to_append"`
Contexts []sequencer.BatchContext `json:"contexts"` Contexts []sequencer.BatchContext `json:"contexts"`
Txs []string `json:"txs"` Txs []string `json:"txs"`
Error bool `json:"error"`
} }
var appendSequencerBatchParamTests = AppendSequencerBatchParamsTestCases{ var appendSequencerBatchParamTests = AppendSequencerBatchParamsTestCases{}
Tests: []AppendSequencerBatchParamsTest{
{
Name: "empty batch",
HexEncoding: "0000000000000000" +
"000000",
ShouldStartAtElement: 0,
TotalElementsToAppend: 0,
Contexts: nil,
Txs: nil,
},
{
Name: "single tx",
HexEncoding: "0000000001000001" +
"000000" +
"00000ac9808080808080808080",
ShouldStartAtElement: 1,
TotalElementsToAppend: 1,
Contexts: nil,
Txs: []string{
"c9808080808080808080",
},
},
{
Name: "multiple txs",
HexEncoding: "0000000001000004" +
"000000" +
"00000ac9808080808080808080" +
"00000ac9808080808080808080" +
"00000ac9808080808080808080" +
"00000ac9808080808080808080",
ShouldStartAtElement: 1,
TotalElementsToAppend: 4,
Contexts: nil,
Txs: []string{
"c9808080808080808080",
"c9808080808080808080",
"c9808080808080808080",
"c9808080808080808080",
},
},
{
Name: "single context",
HexEncoding: "0000000001000000" +
"000001" +
"000102030405060708090a0b0c0d0e0f",
ShouldStartAtElement: 1,
TotalElementsToAppend: 0,
Contexts: []sequencer.BatchContext{
{
NumSequencedTxs: 0x000102,
NumSubsequentQueueTxs: 0x030405,
Timestamp: 0x060708090a,
BlockNumber: 0x0b0c0d0e0f,
},
},
Txs: nil,
},
{
Name: "multiple contexts",
HexEncoding: "0000000001000000" +
"000004" +
"000102030405060708090a0b0c0d0e0f" +
"000102030405060708090a0b0c0d0e0f" +
"000102030405060708090a0b0c0d0e0f" +
"000102030405060708090a0b0c0d0e0f",
ShouldStartAtElement: 1,
TotalElementsToAppend: 0,
Contexts: []sequencer.BatchContext{
{
NumSequencedTxs: 0x000102,
NumSubsequentQueueTxs: 0x030405,
Timestamp: 0x060708090a,
BlockNumber: 0x0b0c0d0e0f,
},
{
NumSequencedTxs: 0x000102,
NumSubsequentQueueTxs: 0x030405,
Timestamp: 0x060708090a,
BlockNumber: 0x0b0c0d0e0f,
},
{
NumSequencedTxs: 0x000102,
NumSubsequentQueueTxs: 0x030405,
Timestamp: 0x060708090a,
BlockNumber: 0x0b0c0d0e0f,
},
{
NumSequencedTxs: 0x000102,
NumSubsequentQueueTxs: 0x030405,
Timestamp: 0x060708090a,
BlockNumber: 0x0b0c0d0e0f,
},
},
Txs: nil,
},
{
Name: "complex",
HexEncoding: "0102030405060708" +
"000004" +
"000102030405060708090a0b0c0d0e0f" +
"000102030405060708090a0b0c0d0e0f" +
"000102030405060708090a0b0c0d0e0f" +
"000102030405060708090a0b0c0d0e0f" +
"00000ac9808080808080808080" +
"00000ac9808080808080808080" +
"00000ac9808080808080808080" +
"00000ac9808080808080808080",
ShouldStartAtElement: 0x0102030405,
TotalElementsToAppend: 0x060708,
Contexts: []sequencer.BatchContext{
{
NumSequencedTxs: 0x000102,
NumSubsequentQueueTxs: 0x030405,
Timestamp: 0x060708090a,
BlockNumber: 0x0b0c0d0e0f,
},
{
NumSequencedTxs: 0x000102,
NumSubsequentQueueTxs: 0x030405,
Timestamp: 0x060708090a,
BlockNumber: 0x0b0c0d0e0f,
},
{
NumSequencedTxs: 0x000102,
NumSubsequentQueueTxs: 0x030405,
Timestamp: 0x060708090a,
BlockNumber: 0x0b0c0d0e0f,
},
{
NumSequencedTxs: 0x000102,
NumSubsequentQueueTxs: 0x030405,
Timestamp: 0x060708090a,
BlockNumber: 0x0b0c0d0e0f,
},
},
Txs: []string{
"c9808080808080808080",
"c9808080808080808080",
"c9808080808080808080",
"c9808080808080808080",
},
},
},
}
// TestAppendSequencerBatchParamsEncodeDecodeMatchesJSON ensures that the
// in-memory test vectors for valid encode/decode stay in sync with the JSON
// version.
func TestAppendSequencerBatchParamsEncodeDecodeMatchesJSON(t *testing.T) {
t.Parallel()
jsonBytes, err := json.MarshalIndent(appendSequencerBatchParamTests, "", "\t")
require.Nil(t, err)
func init() {
data, err := os.ReadFile("./testdata/valid_append_sequencer_batch_params.json") data, err := os.ReadFile("./testdata/valid_append_sequencer_batch_params.json")
require.Nil(t, err) if err != nil {
panic(err)
}
require.Equal(t, jsonBytes, data) err = json.Unmarshal(data, &appendSequencerBatchParamTests)
if err != nil {
panic(err)
}
} }
// TestAppendSequencerBatchParamsEncodeDecode asserts the proper encoding and // TestAppendSequencerBatchParamsEncodeDecode asserts the proper encoding and
...@@ -265,6 +119,7 @@ func testAppendSequencerBatchParamsEncodeDecode( ...@@ -265,6 +119,7 @@ func testAppendSequencerBatchParamsEncodeDecode(
TotalElementsToAppend: test.TotalElementsToAppend, TotalElementsToAppend: test.TotalElementsToAppend,
Contexts: test.Contexts, Contexts: test.Contexts,
Txs: nil, Txs: nil,
Type: sequencer.BatchTypeLegacy,
} }
// Decode the batch from the test string. // Decode the batch from the test string.
...@@ -273,7 +128,12 @@ func testAppendSequencerBatchParamsEncodeDecode( ...@@ -273,7 +128,12 @@ func testAppendSequencerBatchParamsEncodeDecode(
var params sequencer.AppendSequencerBatchParams var params sequencer.AppendSequencerBatchParams
err = params.Read(bytes.NewReader(rawBytes)) err = params.Read(bytes.NewReader(rawBytes))
if test.Error {
require.ErrorIs(t, err, sequencer.ErrMalformedBatch)
} else {
require.Nil(t, err) require.Nil(t, err)
}
require.Equal(t, params.Type, sequencer.BatchTypeLegacy)
// Assert that the decoded params match the expected params. The // Assert that the decoded params match the expected params. The
// transactions are compared serparetly (via hash), since the internal // transactions are compared serparetly (via hash), since the internal
...@@ -290,8 +150,34 @@ func testAppendSequencerBatchParamsEncodeDecode( ...@@ -290,8 +150,34 @@ func testAppendSequencerBatchParamsEncodeDecode(
// Finally, encode the decoded object and assert it matches the original // Finally, encode the decoded object and assert it matches the original
// hex string. // hex string.
paramsBytes, err := params.Serialize() paramsBytes, err := params.Serialize()
// Return early when testing error cases, no need to reserialize again
if test.Error {
require.ErrorIs(t, err, sequencer.ErrMalformedBatch)
return
}
require.Nil(t, err) require.Nil(t, err)
require.Equal(t, test.HexEncoding, hex.EncodeToString(paramsBytes)) require.Equal(t, test.HexEncoding, hex.EncodeToString(paramsBytes))
// Serialize the batches in compressed form
params.Type = sequencer.BatchTypeZlib
compressedParamsBytes, err := params.Serialize()
require.Nil(t, err)
// Deserialize the compressed batch
var paramsCompressed sequencer.AppendSequencerBatchParams
err = paramsCompressed.Read(bytes.NewReader(compressedParamsBytes))
require.Nil(t, err)
require.Equal(t, paramsCompressed.Type, sequencer.BatchTypeZlib)
expParams.Type = sequencer.BatchTypeZlib
decompressedTxs := paramsCompressed.Txs
paramsCompressed.Txs = nil
require.Equal(t, expParams, paramsCompressed)
compareTxs(t, expTxs, decompressedTxs)
paramsCompressed.Txs = decompressedTxs
} }
// compareTxs compares a list of two transactions, testing each pair by tx hash. // compareTxs compares a list of two transactions, testing each pair by tx hash.
......
...@@ -194,6 +194,12 @@ var ( ...@@ -194,6 +194,12 @@ var (
"mnemonic. The mnemonic flag must also be set.", "mnemonic. The mnemonic flag must also be set.",
EnvVar: prefixEnvVar("PROPOSER_HD_PATH"), EnvVar: prefixEnvVar("PROPOSER_HD_PATH"),
} }
SequencerBatchType = cli.StringFlag{
Name: "sequencer-batch-type",
Usage: "The type of sequencer batch to be submitted. Valid arguments are legacy or zlib.",
Value: "legacy",
EnvVar: prefixEnvVar("SEQUENCER_BATCH_TYPE"),
}
MetricsServerEnableFlag = cli.BoolFlag{ MetricsServerEnableFlag = cli.BoolFlag{
Name: "metrics-server-enable", Name: "metrics-server-enable",
Usage: "Whether or not to run the embedded metrics server", Usage: "Whether or not to run the embedded metrics server",
...@@ -245,6 +251,7 @@ var optionalFlags = []cli.Flag{ ...@@ -245,6 +251,7 @@ var optionalFlags = []cli.Flag{
SentryDsnFlag, SentryDsnFlag,
SentryTraceRateFlag, SentryTraceRateFlag,
BlockOffsetFlag, BlockOffsetFlag,
SequencerBatchType,
SequencerPrivateKeyFlag, SequencerPrivateKeyFlag,
ProposerPrivateKeyFlag, ProposerPrivateKeyFlag,
MnemonicFlag, MnemonicFlag,
......
...@@ -3,7 +3,7 @@ package dial ...@@ -3,7 +3,7 @@ package dial
import "time" import "time"
const ( const (
// defaultDialTimeout is default duration the service will wait on // DefaultTimeout is default duration the service will wait on startup to
// startup to make a connection to either the L1 or L2 backends. // make a connection to either the L1 or L2 backends.
defaultDialTimeout = 5 * time.Second DefaultTimeout = 5 * time.Second
) )
...@@ -12,12 +12,12 @@ import ( ...@@ -12,12 +12,12 @@ import (
) )
// L1EthClientWithTimeout attempts to dial the L1 provider using the // L1EthClientWithTimeout attempts to dial the L1 provider using the
// provided URL. If the dial doesn't complete within defaultDialTimeout seconds, // provided URL. If the dial doesn't complete within DefaultTimeout seconds,
// this method will return an error. // this method will return an error.
func L1EthClientWithTimeout(ctx context.Context, url string, disableHTTP2 bool) ( func L1EthClientWithTimeout(ctx context.Context, url string, disableHTTP2 bool) (
*ethclient.Client, error) { *ethclient.Client, error) {
ctxt, cancel := context.WithTimeout(ctx, defaultDialTimeout) ctxt, cancel := context.WithTimeout(ctx, DefaultTimeout)
defer cancel() defer cancel()
if strings.HasPrefix(url, "http") { if strings.HasPrefix(url, "http") {
......
...@@ -3,13 +3,11 @@ module github.com/ethereum-optimism/optimism/go/bss-core ...@@ -3,13 +3,11 @@ module github.com/ethereum-optimism/optimism/go/bss-core
go 1.16 go 1.16
require ( require (
github.com/btcsuite/btcd v0.22.0-beta // indirect
github.com/decred/dcrd/hdkeychain/v3 v3.0.0 github.com/decred/dcrd/hdkeychain/v3 v3.0.0
github.com/ethereum-optimism/optimism/l2geth v1.0.0
github.com/ethereum/go-ethereum v1.10.12 github.com/ethereum/go-ethereum v1.10.12
github.com/getsentry/sentry-go v0.11.0 github.com/getsentry/sentry-go v0.11.0
github.com/prometheus/client_golang v1.11.0 github.com/prometheus/client_golang v1.11.0
github.com/stretchr/testify v1.7.0 github.com/stretchr/testify v1.7.0
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef
) )
replace github.com/ethereum-optimism/optimism/l2geth => ../../l2geth
This diff is collapsed.
GITCOMMIT := $(shell git rev-parse HEAD)
GITDATE := $(shell git show -s --format='%ct')
GITVERSION := $(shell cat package.json | jq .version)
LDFLAGSSTRING +=-X main.GitCommit=$(GITCOMMIT)
LDFLAGSSTRING +=-X main.GitDate=$(GITDATE)
LDFLAGSSTRING +=-X main.GitVersion=$(GITVERSION)
LDFLAGS := -ldflags "$(LDFLAGSSTRING)"
DEPOSIT_ARTIFACT := ../../packages/contracts/artifacts/contracts/L1/teleportr/TeleportrDeposit.sol/TeleportrDeposit.json
DISBURSER_ARTIFACT := ../../packages/contracts/artifacts/contracts/L2/teleportr/TeleportrDisburser.sol/TeleportrDisburser.json
teleportr:
env GO111MODULE=on go build -v $(LDFLAGS) ./cmd/teleportr
clean:
rm teleportr
test:
go test -v ./...
lint:
golangci-lint run ./...
bindings: bindings-deposit bindings-disburser
bindings-deposit:
$(eval temp := $(shell mktemp))
cat $(DEPOSIT_ARTIFACT) | jq -r .bytecode > $(temp)
cat $(DEPOSIT_ARTIFACT) | jq .abi | \
abigen \
--pkg deposit \
--abi - \
--out bindings/deposit/teleportr_deposit.go \
--type TeleportrDeposit \
--bin $(temp)
bindings-disburser:
$(eval temp := $(shell mktemp))
cat $(DISBURSER_ARTIFACT) | jq -r .bytecode > $(temp)
cat $(DISBURSER_ARTIFACT) | jq .abi | \
abigen \
--pkg disburse \
--abi - \
--out bindings/disburse/teleportr_disburser.go \
--type TeleportrDisburser \
--bin $(temp)
.PHONY: \
teleportr \
bindings \
bindings-deposit \
bindings-disburser \
clean \
test \
lint
This diff is collapsed.
This diff is collapsed.
package main
import (
"fmt"
"os"
"github.com/ethereum/go-ethereum/log"
"github.com/urfave/cli"
"github.com/ethereum-optimism/optimism/go/teleportr"
"github.com/ethereum-optimism/optimism/go/teleportr/flags"
)
var (
GitVersion = ""
GitCommit = ""
GitDate = ""
)
func main() {
// Set up logger with a default INFO level in case we fail to parse flags.
// Otherwise the final critical log won't show what the parsing error was.
log.Root().SetHandler(
log.LvlFilterHandler(
log.LvlInfo,
log.StreamHandler(os.Stdout, log.TerminalFormat(true)),
),
)
app := cli.NewApp()
app.Flags = flags.Flags
app.Version = fmt.Sprintf("%s-%s-%s", GitVersion, GitCommit, GitDate)
app.Name = "teleportr"
app.Usage = "Teleportr"
app.Description = "Teleportr bridge from L1 to L2"
app.Action = teleportr.Main(GitVersion)
err := app.Run(os.Args)
if err != nil {
log.Crit("Application failed", "message", err)
}
}
package teleportr
import (
"time"
"github.com/ethereum-optimism/optimism/go/teleportr/flags"
"github.com/urfave/cli"
)
type Config struct {
/* Required Params */
// BuildEnv identifies the environment this binary is intended for, i.e.
// production, development, etc.
BuildEnv string
// EthNetworkName identifies the intended Ethereum network.
EthNetworkName string
// L1EthRpc is the HTTP provider URL for L1.
L1EthRpc string
// L2EthRpc is the HTTP provider URL for L1.
L2EthRpc string
// DepositAddress is the TeleportrDeposit contract adddress.
DepositAddress string
// DepositDeployBlockNumber is the deployment block number of the
// TeleportrDeposit contract.
DepositDeployBlockNumber uint64
// FilterQueryMaxBlocks is the maximum range of a filter query in blocks.
FilterQueryMaxBlocks uint64
// DisburserAddress is the TeleportrDisburser contract address.
DisburserAddress string
// MaxL2TxSize is the maximum size in bytes of any L2 transactions generated
// for teleportr disbursements.
MaxL2TxSize uint64
// NumDepositConfirmations is the number of confirmations required before a
// deposit is considered confirmed.
NumDepositConfirmations uint64
// PollInterval is the delay between querying L2 for more transaction
// and creating a new batch.
PollInterval time.Duration
// SafeAbortNonceTooLowCount is the number of ErrNonceTooLowObservations
// required to give up on a tx at a particular nonce without receiving
// confirmation.
SafeAbortNonceTooLowCount uint64
// ResubmissionTimeout is time we will wait before resubmitting a
// transaction.
ResubmissionTimeout time.Duration
// PostgresHost is the host of the teleportr postgres instance.
PostgresHost string
// PostgresPort is the port of the teleportr postgres instance.
PostgresPort uint16
// PostgresUser is the username for the teleportr postgres instance.
PostgresUser string
// PostgresPassword is the password for the teleportr postgres instance.
PostgresPassword string
// PostgresDBName is the database name of the teleportr postgres instance.
PostgresDBName string
// PostgresEnableSSL determines whether or not to enable SSL on connections
// to the teleportr postgres instance.
PostgresEnableSSL bool
/* Optional Params */
// LogLevel is the lowest log level that will be output.
LogLevel string
// LogTerminal if true, prints to stdout in terminal format, otherwise
// prints using JSON. If SentryEnable is true this flag is ignored, and logs
// are printed using JSON.
LogTerminal bool
// DisburserPrivKey the private key of the wallet used to submit
// transactions to the TeleportrDisburser contract.
DisburserPrivKey string
// Mnemonic is the HD seed used to derive the wallet private key for
// submitting to the TeleportrDisburser. Must be used in conjunction with
// DisburserHDPath.
Mnemonic string
// DisburserHDPath is the derivation path used to obtain the private key for
// the disburser transactions.
DisburserHDPath string
// MetricsServerEnable if true, will create a metrics client and log to
// Prometheus.
MetricsServerEnable bool
// MetricsHostname is the hostname at which the metrics server is running.
MetricsHostname string
// MetricsPort is the port at which the metrics server is running.
MetricsPort uint64
// DisableHTTP2 disables HTTP2 support.
DisableHTTP2 bool
}
func NewConfig(ctx *cli.Context) (Config, error) {
return Config{
/* Required Flags */
BuildEnv: ctx.GlobalString(flags.BuildEnvFlag.Name),
EthNetworkName: ctx.GlobalString(flags.EthNetworkNameFlag.Name),
L1EthRpc: ctx.GlobalString(flags.L1EthRpcFlag.Name),
L2EthRpc: ctx.GlobalString(flags.L2EthRpcFlag.Name),
DepositAddress: ctx.GlobalString(flags.DepositAddressFlag.Name),
DepositDeployBlockNumber: ctx.GlobalUint64(flags.DepositDeployBlockNumberFlag.Name),
DisburserAddress: ctx.GlobalString(flags.DisburserAddressFlag.Name),
MaxL2TxSize: ctx.GlobalUint64(flags.MaxL2TxSizeFlag.Name),
NumDepositConfirmations: ctx.GlobalUint64(flags.NumDepositConfirmationsFlag.Name),
FilterQueryMaxBlocks: ctx.GlobalUint64(flags.FilterQueryMaxBlocksFlag.Name),
PollInterval: ctx.GlobalDuration(flags.PollIntervalFlag.Name),
SafeAbortNonceTooLowCount: ctx.GlobalUint64(flags.SafeAbortNonceTooLowCountFlag.Name),
ResubmissionTimeout: ctx.GlobalDuration(flags.ResubmissionTimeoutFlag.Name),
PostgresHost: ctx.GlobalString(flags.PostgresHostFlag.Name),
PostgresPort: uint16(ctx.GlobalUint64(flags.PostgresPortFlag.Name)),
PostgresUser: ctx.GlobalString(flags.PostgresUserFlag.Name),
PostgresPassword: ctx.GlobalString(flags.PostgresPasswordFlag.Name),
PostgresDBName: ctx.GlobalString(flags.PostgresDBNameFlag.Name),
PostgresEnableSSL: ctx.GlobalBool(flags.PostgresEnableSSLFlag.Name),
/* Optional flags */
LogLevel: ctx.GlobalString(flags.LogLevelFlag.Name),
LogTerminal: ctx.GlobalBool(flags.LogTerminalFlag.Name),
DisburserPrivKey: ctx.GlobalString(flags.DisburserPrivateKeyFlag.Name),
Mnemonic: ctx.GlobalString(flags.MnemonicFlag.Name),
DisburserHDPath: ctx.GlobalString(flags.DisburserHDPathFlag.Name),
MetricsServerEnable: ctx.GlobalBool(flags.MetricsServerEnableFlag.Name),
MetricsHostname: ctx.GlobalString(flags.MetricsHostnameFlag.Name),
MetricsPort: ctx.GlobalUint64(flags.MetricsPortFlag.Name),
DisableHTTP2: ctx.GlobalBool(flags.HTTP2DisableFlag.Name),
}, nil
}
...@@ -24,9 +24,9 @@ var ( ...@@ -24,9 +24,9 @@ var (
// Deposit represents an event emitted from the TeleportrDeposit contract on L1, // Deposit represents an event emitted from the TeleportrDeposit contract on L1,
// along with additional info about the tx that generated the event. // along with additional info about the tx that generated the event.
type Deposit struct { type Deposit struct {
ID int64 ID uint64
TxnHash common.Hash TxnHash common.Hash
BlockNumber int64 BlockNumber uint64
BlockTimestamp time.Time BlockTimestamp time.Time
Address common.Address Address common.Address
Amount *big.Int Amount *big.Int
...@@ -35,16 +35,17 @@ type Deposit struct { ...@@ -35,16 +35,17 @@ type Deposit struct {
// ConfirmationInfo holds metadata about a tx on either the L1 or L2 chain. // ConfirmationInfo holds metadata about a tx on either the L1 or L2 chain.
type ConfirmationInfo struct { type ConfirmationInfo struct {
TxnHash common.Hash TxnHash common.Hash
BlockNumber int64 BlockNumber uint64
BlockTimestamp time.Time BlockTimestamp time.Time
} }
// CompletedTeleport represents an L1 deposit that has been disbursed on L2. The // CompletedTeleport represents an L1 deposit that has been disbursed on L2. The
// struct also hold info about the L1 and L2 txns involved. // struct also hold info about the L1 and L2 txns involved.
type CompletedTeleport struct { type CompletedTeleport struct {
ID int64 ID uint64
Address common.Address Address common.Address
Amount *big.Int Amount *big.Int
Success bool
Deposit ConfirmationInfo Deposit ConfirmationInfo
Disbursement ConfirmationInfo Disbursement ConfirmationInfo
} }
...@@ -65,13 +66,32 @@ CREATE TABLE IF NOT EXISTS disbursements ( ...@@ -65,13 +66,32 @@ CREATE TABLE IF NOT EXISTS disbursements (
id INT8 NOT NULL PRIMARY KEY REFERENCES deposits(id), id INT8 NOT NULL PRIMARY KEY REFERENCES deposits(id),
txn_hash VARCHAR NOT NULL, txn_hash VARCHAR NOT NULL,
block_number INT8 NOT NULL, block_number INT8 NOT NULL,
block_timestamp TIMESTAMPTZ NOT NULL block_timestamp TIMESTAMPTZ NOT NULL,
success BOOL NOT NULL
);
`
const lastProcessedBlockTable = `
CREATE TABLE IF NOT EXISTS last_processed_block (
id BOOL PRIMARY KEY DEFAULT TRUE,
value INT8 NOT NULL,
CONSTRAINT id CHECK (id)
);
`
const pendingTxTable = `
CREATE TABLE IF NOT EXISTS pending_txs (
txn_hash VARCHAR NOT NULL PRIMARY KEY,
start_id INT8 NOT NULL,
end_id INT8 NOT NULL
); );
` `
var migrations = []string{ var migrations = []string{
createDepositsTable, createDepositsTable,
createDisbursementsTable, createDisbursementsTable,
lastProcessedBlockTable,
pendingTxTable,
} }
// Config houses the data required to connect to a Postgres backend. // Config houses the data required to connect to a Postgres backend.
...@@ -155,6 +175,13 @@ func (d *Database) Close() error { ...@@ -155,6 +175,13 @@ func (d *Database) Close() error {
return d.conn.Close() return d.conn.Close()
} }
const upsertLastProcessedBlock = `
INSERT INTO last_processed_block (value)
VALUES ($1)
ON CONFLICT (id) DO UPDATE
SET value = $1
`
const upsertDepositStatement = ` const upsertDepositStatement = `
INSERT INTO deposits (id, txn_hash, block_number, block_timestamp, address, amount) INSERT INTO deposits (id, txn_hash, block_number, block_timestamp, address, amount)
VALUES ($1, $2, $3, $4, $5, $6) VALUES ($1, $2, $3, $4, $5, $6)
...@@ -164,10 +191,10 @@ SET (txn_hash, block_number, block_timestamp, address, amount) = ($2, $3, $4, $5 ...@@ -164,10 +191,10 @@ SET (txn_hash, block_number, block_timestamp, address, amount) = ($2, $3, $4, $5
// UpsertDeposits inserts a list of deposits into the database, or updats an // UpsertDeposits inserts a list of deposits into the database, or updats an
// existing deposit in place if the same ID is found. // existing deposit in place if the same ID is found.
func (d *Database) UpsertDeposits(deposits []Deposit) error { func (d *Database) UpsertDeposits(
if len(deposits) == 0 { deposits []Deposit,
return nil lastProcessedBlock uint64,
} ) error {
// Sanity check deposits. // Sanity check deposits.
for _, deposit := range deposits { for _, deposit := range deposits {
...@@ -180,10 +207,11 @@ func (d *Database) UpsertDeposits(deposits []Deposit) error { ...@@ -180,10 +207,11 @@ func (d *Database) UpsertDeposits(deposits []Deposit) error {
if err != nil { if err != nil {
return err return err
} }
defer tx.Rollback() defer func() {
_ = tx.Rollback()
}()
for _, deposit := range deposits { for _, deposit := range deposits {
_, err = tx.Exec( _, err = tx.Exec(
upsertDepositStatement, upsertDepositStatement,
deposit.ID, deposit.ID,
...@@ -198,29 +226,30 @@ func (d *Database) UpsertDeposits(deposits []Deposit) error { ...@@ -198,29 +226,30 @@ func (d *Database) UpsertDeposits(deposits []Deposit) error {
} }
} }
_, err = tx.Exec(upsertLastProcessedBlock, lastProcessedBlock)
if err != nil {
return err
}
return tx.Commit() return tx.Commit()
} }
const latestDepositQuery = ` const lastProcessedBlockQuery = `
SELECT block_number FROM deposits SELECT value FROM last_processed_block
ORDER BY block_number DESC
LIMIT 1
` `
// LatestDeposit returns the block number of the latest deposit known to the func (d *Database) LastProcessedBlock() (*uint64, error) {
// database. row := d.conn.QueryRow(lastProcessedBlockQuery)
func (d *Database) LatestDeposit() (*int64, error) {
row := d.conn.QueryRow(latestDepositQuery)
var latestTransfer int64 var lastProcessedBlock uint64
err := row.Scan(&latestTransfer) err := row.Scan(&lastProcessedBlock)
if err == sql.ErrNoRows { if err == sql.ErrNoRows {
return nil, nil return nil, nil
} else if err != nil { } else if err != nil {
return nil, err return nil, err
} }
return &latestTransfer, nil return &lastProcessedBlock, nil
} }
const confirmedDepositsQuery = ` const confirmedDepositsQuery = `
...@@ -233,7 +262,7 @@ ORDER BY dep.id ASC ...@@ -233,7 +262,7 @@ ORDER BY dep.id ASC
// ConfirmedDeposits returns the set of all deposits that have sufficient // ConfirmedDeposits returns the set of all deposits that have sufficient
// confirmation, but do not have a recorded disbursement. // confirmation, but do not have a recorded disbursement.
func (d *Database) ConfirmedDeposits(blockNumber, confirmations int64) ([]Deposit, error) { func (d *Database) ConfirmedDeposits(blockNumber, confirmations uint64) ([]Deposit, error) {
rows, err := d.conn.Query(confirmedDepositsQuery, confirmations, blockNumber) rows, err := d.conn.Query(confirmedDepositsQuery, confirmations, blockNumber)
if err != nil { if err != nil {
return nil, err return nil, err
...@@ -275,20 +304,43 @@ func (d *Database) ConfirmedDeposits(blockNumber, confirmations int64) ([]Deposi ...@@ -275,20 +304,43 @@ func (d *Database) ConfirmedDeposits(blockNumber, confirmations int64) ([]Deposi
return deposits, nil return deposits, nil
} }
const latestDisbursementIDQuery = `
SELECT id FROM disbursements
ORDER BY id DESC
LIMIT 1
`
// LatestDisbursementID returns the latest deposit id known to the database that
// has a recorded disbursement.
func (d *Database) LatestDisbursementID() (*uint64, error) {
row := d.conn.QueryRow(latestDisbursementIDQuery)
var latestDisbursementID uint64
err := row.Scan(&latestDisbursementID)
if err == sql.ErrNoRows {
return nil, nil
} else if err != nil {
return nil, err
}
return &latestDisbursementID, nil
}
const markDisbursedStatement = ` const markDisbursedStatement = `
INSERT INTO disbursements (id, txn_hash, block_number, block_timestamp) INSERT INTO disbursements (id, txn_hash, block_number, block_timestamp, success)
VALUES ($1, $2, $3, $4) VALUES ($1, $2, $3, $4, $5)
ON CONFLICT (id) DO UPDATE ON CONFLICT (id) DO UPDATE
SET (txn_hash, block_number, block_timestamp) = ($2, $3, $4) SET (txn_hash, block_number, block_timestamp, success) = ($2, $3, $4, $5)
` `
// UpsertDisbursement inserts a disbursement, or updates an existing record // UpsertDisbursement inserts a disbursement, or updates an existing record
// in-place if the ID already exists. // in-place if the ID already exists.
func (d *Database) UpsertDisbursement( func (d *Database) UpsertDisbursement(
id int64, id uint64,
txnHash common.Hash, txnHash common.Hash,
blockNumber int64, blockNumber uint64,
blockTimestamp time.Time, blockTimestamp time.Time,
success bool,
) error { ) error {
if blockTimestamp.IsZero() { if blockTimestamp.IsZero() {
return ErrZeroTimestamp return ErrZeroTimestamp
...@@ -300,6 +352,7 @@ func (d *Database) UpsertDisbursement( ...@@ -300,6 +352,7 @@ func (d *Database) UpsertDisbursement(
txnHash.String(), txnHash.String(),
blockNumber, blockNumber,
blockTimestamp, blockTimestamp,
success,
) )
if err != nil { if err != nil {
if strings.Contains(err.Error(), "violates foreign key constraint") { if strings.Contains(err.Error(), "violates foreign key constraint") {
...@@ -320,7 +373,7 @@ func (d *Database) UpsertDisbursement( ...@@ -320,7 +373,7 @@ func (d *Database) UpsertDisbursement(
const completedTeleportsQuery = ` const completedTeleportsQuery = `
SELECT SELECT
dep.id, dep.address, dep.amount, dep.id, dep.address, dep.amount, dis.success,
dep.txn_hash, dep.block_number, dep.block_timestamp, dep.txn_hash, dep.block_number, dep.block_timestamp,
dis.txn_hash, dis.block_number, dis.block_timestamp dis.txn_hash, dis.block_number, dis.block_timestamp
FROM deposits AS dep, disbursements AS dis FROM deposits AS dep, disbursements AS dis
...@@ -348,6 +401,7 @@ func (d *Database) CompletedTeleports() ([]CompletedTeleport, error) { ...@@ -348,6 +401,7 @@ func (d *Database) CompletedTeleports() ([]CompletedTeleport, error) {
&teleport.ID, &teleport.ID,
&addressStr, &addressStr,
&amountStr, &amountStr,
&teleport.Success,
&depTxnHashStr, &depTxnHashStr,
&teleport.Deposit.BlockNumber, &teleport.Deposit.BlockNumber,
&teleport.Deposit.BlockTimestamp, &teleport.Deposit.BlockTimestamp,
...@@ -377,3 +431,88 @@ func (d *Database) CompletedTeleports() ([]CompletedTeleport, error) { ...@@ -377,3 +431,88 @@ func (d *Database) CompletedTeleports() ([]CompletedTeleport, error) {
return teleports, nil return teleports, nil
} }
// PendingTx encapsulates the metadata stored about published disbursement txs.
type PendingTx struct {
// Txhash is the tx hash of the disbursement tx.
TxHash common.Hash
// StartID is the deposit id of the first disbursement, inclusive.
StartID uint64
// EndID is the deposit id fo the last disbursement, exclusive.
EndID uint64
}
const upsertPendingTxStatement = `
INSERT INTO pending_txs (txn_hash, start_id, end_id)
VALUES ($1, $2, $3)
ON CONFLICT (txn_hash) DO UPDATE
SET (start_id, end_id) = ($2, $3)
`
// UpsertPendingTx inserts a disbursement, or updates the entry if the TxHash
// already exists.
func (d *Database) UpsertPendingTx(pendingTx PendingTx) error {
_, err := d.conn.Exec(
upsertPendingTxStatement,
pendingTx.TxHash.String(),
pendingTx.StartID,
pendingTx.EndID,
)
return err
}
const listPendingTxsQuery = `
SELECT txn_hash, start_id, end_id
FROM pending_txs
ORDER BY start_id DESC, end_id DESC, txn_hash ASC
`
// ListPendingTxs returns all pending txs stored in the database.
func (d *Database) ListPendingTxs() ([]PendingTx, error) {
rows, err := d.conn.Query(listPendingTxsQuery)
if err != nil {
return nil, err
}
defer rows.Close()
var pendingTxs []PendingTx
for rows.Next() {
var pendingTx PendingTx
var txHashStr string
err = rows.Scan(
&txHashStr,
&pendingTx.StartID,
&pendingTx.EndID,
)
if err != nil {
return nil, err
}
pendingTx.TxHash = common.HexToHash(txHashStr)
pendingTxs = append(pendingTxs, pendingTx)
}
if err := rows.Err(); err != nil {
return nil, err
}
return pendingTxs, nil
}
const deletePendingTxsStatement = `
DELETE FROM pending_txs
WHERE start_id = $1 AND end_id = $2
`
// DeletePendingTx removes any pending txs with matching start and end ids. This
// allows the caller to remove any logically-conflicting pending txs from the
// database after successfully processing the outcomes.
func (d *Database) DeletePendingTx(startID, endID uint64) error {
_, err := d.conn.Exec(
deletePendingTxsStatement,
startID,
endID,
)
return err
}
This diff is collapsed.
This diff is collapsed.
package disburser
// FilterStartBlockNumberParams holds the arguments passed to
// FindFilterStartBlockNumber.
type FilterStartBlockNumberParams struct {
// BlockNumber the current block height of the chain.
BlockNumber uint64
// NumConfirmations is the number of confirmations required to consider a
// block final.
NumConfirmations uint64
// DeployBlockNumber is the deployment height of the Deposit contract.
DeployBlockNumber uint64
// LastProcessedBlockNumber is the height of the last processed block.
//
// NOTE: This will be nil on the first invocation, before blocks have been
// ingested.
LastProcessedBlockNumber *uint64
}
func (p *FilterStartBlockNumberParams) unconfirmed(blockNumber uint64) bool {
return p.BlockNumber+1 < blockNumber+p.NumConfirmations
}
// FindFilterStartBlockNumber returns the block height from which to begin
// filtering logs based on the relative heights of the chain, the contract
// deployment, and the last block that was processed.
func FindFilterStartBlockNumber(params FilterStartBlockNumberParams) uint64 {
// On initilization, always start at the deploy height.
if params.LastProcessedBlockNumber == nil {
return params.DeployBlockNumber
}
// If the deployment height has not exited the confirmation window, we can
// still begin our search from the deployment height.
if params.unconfirmed(params.DeployBlockNumber) {
return params.DeployBlockNumber
}
// Otherwise, start from the block immediately following the last processed
// block. If that height is still hasn't fully confirmed, we'll use the
// height of the last confirmed block.
var filterStartBlockNumber = *params.LastProcessedBlockNumber + 1
if params.unconfirmed(filterStartBlockNumber) {
filterStartBlockNumber = params.BlockNumber + 1 - params.NumConfirmations
}
return filterStartBlockNumber
}
package disburser_test
import (
"testing"
"github.com/ethereum-optimism/optimism/go/teleportr/drivers/disburser"
"github.com/stretchr/testify/require"
)
func uint64Ptr(x uint64) *uint64 {
return &x
}
type filterStartBlockNumberTestCase struct {
name string
params disburser.FilterStartBlockNumberParams
expStartBlockNumber uint64
}
// TestFindFilterStartBlockNumber exhaustively tests the behavior of
// FindFilterStartBlockNumber and its edge cases.
func TestFindFilterStartBlockNumber(t *testing.T) {
tests := []filterStartBlockNumberTestCase{
// Deploy number should be returned if LastProcessedBlockNumber is nil.
{
name: "init returns deploy block number",
params: disburser.FilterStartBlockNumberParams{
BlockNumber: 10,
NumConfirmations: 5,
DeployBlockNumber: 42,
LastProcessedBlockNumber: nil,
},
expStartBlockNumber: 42,
},
// Deploy number should be returned if the deploy number is still in our
// confirmation window.
{
name: "conf lookback before deploy number",
params: disburser.FilterStartBlockNumberParams{
BlockNumber: 43,
NumConfirmations: 5,
DeployBlockNumber: 42,
LastProcessedBlockNumber: uint64Ptr(43),
},
expStartBlockNumber: 42,
},
// Deploy number should be returned if the deploy number is still in our
// confirmation window.
{
name: "conf lookback before deploy number",
params: disburser.FilterStartBlockNumberParams{
BlockNumber: 43,
NumConfirmations: 44,
DeployBlockNumber: 42,
LastProcessedBlockNumber: uint64Ptr(43),
},
expStartBlockNumber: 42,
},
// If our confirmation window is ahead of the last deposit + 1, expect
// last deposit + 1.
{
name: "conf lookback gt last deposit plus one",
params: disburser.FilterStartBlockNumberParams{
BlockNumber: 100,
NumConfirmations: 5,
DeployBlockNumber: 42,
LastProcessedBlockNumber: uint64Ptr(43),
},
expStartBlockNumber: 44,
},
// If our confirmation window is equal to last deposit + 1, expect last
// deposit + 1.
{
name: "conf lookback eq last deposit plus one",
params: disburser.FilterStartBlockNumberParams{
BlockNumber: 48,
NumConfirmations: 5,
DeployBlockNumber: 42,
LastProcessedBlockNumber: uint64Ptr(43),
},
expStartBlockNumber: 44,
},
// If our confirmation window starts before last deposit + 1, expect
// block number - num confs + 1.
{
name: "conf lookback lt last deposit plus one",
params: disburser.FilterStartBlockNumberParams{
BlockNumber: 47,
NumConfirmations: 5,
DeployBlockNumber: 42,
LastProcessedBlockNumber: uint64Ptr(43),
},
expStartBlockNumber: 43,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
testFindFilterStartBlockNumber(t, test)
})
}
}
func testFindFilterStartBlockNumber(
t *testing.T,
test filterStartBlockNumberTestCase,
) {
startBlockNumber := disburser.FindFilterStartBlockNumber(test.params)
require.Equal(t, test.expStartBlockNumber, startBlockNumber)
}
package disburser
import (
"github.com/ethereum-optimism/optimism/go/bss-core/metrics"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
const methodLabel = "method"
var (
// DBMethodUpsertDeposits is a label for UpsertDeposits db method.
DBMethodUpsertDeposits = prometheus.Labels{methodLabel: "upsert_deposits"}
// DBMethodConfirmedDeposits is a label for ConfirmedDeposits db method.
DBMethodConfirmedDeposits = prometheus.Labels{methodLabel: "confirmed_deposits"}
// DBMethodLastProcessedBlock is a label for LastProcessedBlock db method.
DBMethodLastProcessedBlock = prometheus.Labels{methodLabel: "last_processed_block"}
// DBMethodUpsertPendingTx is a label for UpsertPendingTx db method.
DBMethodUpsertPendingTx = prometheus.Labels{methodLabel: "upsert_pending_tx"}
// DBMethodListPendingTxs is a label for ListPendingTxs db method.
DBMethodListPendingTxs = prometheus.Labels{methodLabel: "list_pending_txs"}
// DBMethodUpsertDisbursement is a label for UpsertDisbursement db method.
DBMethodUpsertDisbursement = prometheus.Labels{methodLabel: "upsert_disbursement"}
// DBMethodLatestDisbursementID is a label for LatestDisbursementID db method.
DBMethodLatestDisbursementID = prometheus.Labels{methodLabel: "latest_disbursement_id"}
// DBMethodDeletePendingTx is a label for DeletePendingTx db method.
DBMethodDeletePendingTx = prometheus.Labels{methodLabel: "delete_pending_tx"}
)
// Metrics extends the BSS core metrics with additional metrics tracked by the
// sequencer driver.
type Metrics struct {
*metrics.Base
// FailedDatabaseMethods tracks the number of database failures for each
// known database method.
FailedDatabaseMethods *prometheus.GaugeVec
// DepositIDMismatch tracks whether or not our database is in sync with the
// disrburser contract. 1 means in sync, 0 means out of sync.
DepositIDMismatch prometheus.Gauge
// MissingDisbursements tracks the number of deposits that are missing
// disbursement below our supposed next deposit id.
MissingDisbursements prometheus.Gauge
// SuccessfulDisbursements tracks the number of disbursements that emit a
// success event from a given tx.
SuccessfulDisbursements prometheus.Gauge
// FailedDisbursements tracks the number of disbursements that emit a failed
// event from a given tx.
FailedDisbursements prometheus.Gauge
// PostgresLastDisbursedID tracks the latest disbursement id in postgres.
PostgresLastDisbursedID prometheus.Gauge
// ContractNextDisbursementID tracks the next disbursement id expected by
// the disburser contract.
ContractNextDisbursementID prometheus.Gauge
}
// NewMetrics initializes a new, extended metrics object.
func NewMetrics(subsystem string) *Metrics {
base := metrics.NewBase(subsystem, "")
return &Metrics{
Base: base,
FailedDatabaseMethods: promauto.NewGaugeVec(prometheus.GaugeOpts{
Name: "failed_database_operations",
Help: "Tracks the number of database failures",
Subsystem: base.SubsystemName(),
}, []string{methodLabel}),
DepositIDMismatch: promauto.NewGauge(prometheus.GaugeOpts{
Name: "deposit_id_mismatch",
Help: "Set to 1 when the postgres and the disrburser contract " +
"disagree on the next deposit id, and 0 otherwise",
Subsystem: base.SubsystemName(),
}),
MissingDisbursements: promauto.NewGauge(prometheus.GaugeOpts{
Name: "missing_disbursements",
Help: "Number of deposits that are missing disbursements in " +
"postgres below our supposed next deposit id",
Subsystem: base.SubsystemName(),
}),
SuccessfulDisbursements: promauto.NewGauge(prometheus.GaugeOpts{
Name: "successful_disbursements",
Help: "Number of disbursements that emit a success event " +
"from a given tx",
Subsystem: base.SubsystemName(),
}),
FailedDisbursements: promauto.NewGauge(prometheus.GaugeOpts{
Name: "failed_disbursements",
Help: "Number of disbursements that emit a failed event " +
"from a given tx",
Subsystem: base.SubsystemName(),
}),
PostgresLastDisbursedID: promauto.NewGauge(prometheus.GaugeOpts{
Name: "postgres_last_disbursed_id",
Help: "Latest recorded disbursement id in postgres",
Subsystem: base.SubsystemName(),
}),
ContractNextDisbursementID: promauto.NewGauge(prometheus.GaugeOpts{
Name: "contract_next_disbursement_id",
Help: "Next disbursement id expected by the disburser contract",
Subsystem: base.SubsystemName(),
}),
}
}
package flags
import "github.com/urfave/cli"
const envVarPrefix = "TELEPORTR_"
func prefixEnvVar(name string) string {
return envVarPrefix + name
}
var (
/* Required Flags */
BuildEnvFlag = cli.StringFlag{
Name: "build-env",
Usage: "Build environment for which the binary is produced, " +
"e.g. production or development",
Required: true,
EnvVar: "BUILD_ENV",
}
EthNetworkNameFlag = cli.StringFlag{
Name: "eth-network-name",
Usage: "Ethereum network name",
Required: true,
EnvVar: "ETH_NETWORK_NAME",
}
L1EthRpcFlag = cli.StringFlag{
Name: "l1-eth-rpc",
Usage: "HTTP provider URL for L1",
Required: true,
EnvVar: "L1_ETH_RPC",
}
L2EthRpcFlag = cli.StringFlag{
Name: "l2-eth-rpc",
Usage: "HTTP provider URL for L2",
Required: true,
EnvVar: "L2_ETH_RPC",
}
DepositAddressFlag = cli.StringFlag{
Name: "deposit-address",
Usage: "Address of the TeleportrDeposit contract",
Required: true,
EnvVar: prefixEnvVar("DEPOSIT_ADDRESS"),
}
DepositDeployBlockNumberFlag = cli.Uint64Flag{
Name: "deposit-deploy-block-number",
Usage: "Deployment block number of the TeleportrDeposit contract",
Required: true,
EnvVar: prefixEnvVar("DEPOSIT_DEPLOY_BLOCK_NUMBER"),
}
DisburserAddressFlag = cli.StringFlag{
Name: "disburser-address",
Usage: "Address of the TeleportrDisburser contract",
Required: true,
EnvVar: prefixEnvVar("DISBURSER_ADDRESS"),
}
MaxL2TxSizeFlag = cli.Uint64Flag{
Name: "max-l2-tx-size",
Usage: "Maximum size in bytes of any L2 transaction that gets " +
"sent for disbursement",
Required: true,
EnvVar: prefixEnvVar("MAX_L2_TX_SIZE"),
}
NumDepositConfirmationsFlag = cli.Uint64Flag{
Name: "num-deposit-confirmations",
Usage: "Number of confirmations before deposits are considered " +
"confirmed",
Required: true,
EnvVar: prefixEnvVar("NUM_DEPOSIT_CONFIRMATIONS"),
}
FilterQueryMaxBlocksFlag = cli.Uint64Flag{
Name: "filter-query-max-blocks",
Usage: "Maximum range of a filter query in blocks",
Required: true,
EnvVar: prefixEnvVar("FILTER_QUERY_MAX_BLOCKS"),
}
PollIntervalFlag = cli.DurationFlag{
Name: "poll-interval",
Usage: "Delay between querying L1 for more transactions and " +
"creating a new disbursement batch",
Required: true,
EnvVar: prefixEnvVar("POLL_INTERVAL"),
}
SafeAbortNonceTooLowCountFlag = cli.Uint64Flag{
Name: "safe-abort-nonce-too-low-count",
Usage: "Number of ErrNonceTooLow observations required to " +
"give up on a tx at a particular nonce without receiving " +
"confirmation",
Required: true,
EnvVar: prefixEnvVar("SAFE_ABORT_NONCE_TOO_LOW_COUNT"),
}
ResubmissionTimeoutFlag = cli.DurationFlag{
Name: "resubmission-timeout",
Usage: "Duration we will wait before resubmitting a " +
"transaction to L2",
Required: true,
EnvVar: prefixEnvVar("RESUBMISSION_TIMEOUT"),
}
PostgresHostFlag = cli.StringFlag{
Name: "postgres-host",
Usage: "Host of the teleportr postgres instance",
Required: true,
EnvVar: prefixEnvVar("POSTGRES_HOST"),
}
PostgresPortFlag = cli.Uint64Flag{
Name: "postgres-port",
Usage: "Port of the teleportr postgres instance",
Required: true,
EnvVar: prefixEnvVar("POSTGRES_PORT"),
}
PostgresUserFlag = cli.StringFlag{
Name: "postgres-user",
Usage: "Username of the teleportr postgres instance",
Required: true,
EnvVar: prefixEnvVar("POSTGRES_USER"),
}
PostgresPasswordFlag = cli.StringFlag{
Name: "postgres-password",
Usage: "Password of the teleportr postgres instance",
Required: true,
EnvVar: prefixEnvVar("POSTGRES_PASSWORD"),
}
PostgresDBNameFlag = cli.StringFlag{
Name: "postgres-db-name",
Usage: "Database name of the teleportr postgres instance",
Required: true,
EnvVar: prefixEnvVar("POSTGRES_DB_NAME"),
}
PostgresEnableSSLFlag = cli.BoolFlag{
Name: "postgres-enable-ssl",
Usage: "Whether or not to enable SSL on connections to " +
"teleportr postgres instance",
Required: true,
EnvVar: prefixEnvVar("POSTGRES_ENABLE_SSL"),
}
/* Optional Flags */
LogLevelFlag = cli.StringFlag{
Name: "log-level",
Usage: "The lowest log level that will be output",
Value: "info",
EnvVar: prefixEnvVar("LOG_LEVEL"),
}
LogTerminalFlag = cli.BoolFlag{
Name: "log-terminal",
Usage: "If true, outputs logs in terminal format, otherwise prints " +
"in JSON format. If SENTRY_ENABLE is set to true, this flag is " +
"ignored and logs are printed using JSON",
EnvVar: prefixEnvVar("LOG_TERMINAL"),
}
DisburserPrivateKeyFlag = cli.StringFlag{
Name: "disburser-private-key",
Usage: "The private key to use for sending to the disburser contract",
EnvVar: prefixEnvVar("DISBURSER_PRIVATE_KEY"),
}
MnemonicFlag = cli.StringFlag{
Name: "mnemonic",
Usage: "The mnemonic used to derive the wallet for the disburser",
EnvVar: prefixEnvVar("MNEMONIC"),
}
DisburserHDPathFlag = cli.StringFlag{
Name: "disburser-hd-path",
Usage: "The HD path used to derive the disburser wallet from the " +
"mnemonic. The mnemonic flag must also be set.",
EnvVar: prefixEnvVar("DISBURSER_HD_PATH"),
}
MetricsServerEnableFlag = cli.BoolFlag{
Name: "metrics-server-enable",
Usage: "Whether or not to run the embedded metrics server",
EnvVar: prefixEnvVar("METRICS_SERVER_ENABLE"),
}
MetricsHostnameFlag = cli.StringFlag{
Name: "metrics-hostname",
Usage: "The hostname of the metrics server",
Value: "127.0.0.1",
EnvVar: prefixEnvVar("METRICS_HOSTNAME"),
}
MetricsPortFlag = cli.Uint64Flag{
Name: "metrics-port",
Usage: "The port of the metrics server",
Value: 7300,
EnvVar: prefixEnvVar("METRICS_PORT"),
}
HTTP2DisableFlag = cli.BoolFlag{
Name: "http2-disable",
Usage: "Whether or not to disable HTTP/2 support.",
EnvVar: prefixEnvVar("HTTP2_DISABLE"),
}
)
var requiredFlags = []cli.Flag{
BuildEnvFlag,
EthNetworkNameFlag,
L1EthRpcFlag,
L2EthRpcFlag,
DepositAddressFlag,
DepositDeployBlockNumberFlag,
DisburserAddressFlag,
MaxL2TxSizeFlag,
NumDepositConfirmationsFlag,
FilterQueryMaxBlocksFlag,
PollIntervalFlag,
SafeAbortNonceTooLowCountFlag,
ResubmissionTimeoutFlag,
PostgresHostFlag,
PostgresPortFlag,
PostgresUserFlag,
PostgresPasswordFlag,
PostgresDBNameFlag,
PostgresEnableSSLFlag,
}
var optionalFlags = []cli.Flag{
LogLevelFlag,
LogTerminalFlag,
DisburserPrivateKeyFlag,
MnemonicFlag,
DisburserHDPathFlag,
MetricsServerEnableFlag,
MetricsHostnameFlag,
MetricsPortFlag,
HTTP2DisableFlag,
}
// Flags contains the list of configuration options available to the binary.
var Flags = append(requiredFlags, optionalFlags...)
package flags
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/urfave/cli"
)
// TestRequiredFlagsSetRequired asserts that all flags deemed required properly
// have the Required field set to true.
func TestRequiredFlagsSetRequired(t *testing.T) {
for _, flag := range requiredFlags {
reqFlag, ok := flag.(cli.RequiredFlag)
require.True(t, ok)
require.True(t, reqFlag.IsRequired())
}
}
// TestOptionalFlagsDontSetRequired asserts that all flags deemed optional set
// the Required field to false.
func TestOptionalFlagsDontSetRequired(t *testing.T) {
for _, flag := range optionalFlags {
reqFlag, ok := flag.(cli.RequiredFlag)
require.True(t, ok)
require.False(t, reqFlag.IsRequired())
}
}
This diff is collapsed.
This diff is collapsed.
{
"name": "@eth-optimism/teleportr",
"version": "0.0.0",
"private": true,
"devDependencies": {}
}
This diff is collapsed.
...@@ -74,11 +74,10 @@ describe('Basic L1<>L2 Communication', async () => { ...@@ -74,11 +74,10 @@ describe('Basic L1<>L2 Communication', async () => {
} }
) )
let status: MessageStatus await env.messenger.waitForMessageStatus(
while (status !== MessageStatus.READY_FOR_RELAY) { transaction,
status = await env.messenger.getMessageStatus(transaction) MessageStatus.READY_FOR_RELAY
await sleep(1000) )
}
await env.messenger.finalizeMessage(transaction) await env.messenger.finalizeMessage(transaction)
await env.messenger.waitForMessageReceipt(transaction) await env.messenger.waitForMessageReceipt(transaction)
......
/* Imports: External */
import { BigNumber, Contract, ContractFactory, utils, Wallet } from 'ethers' import { BigNumber, Contract, ContractFactory, utils, Wallet } from 'ethers'
import { ethers } from 'hardhat' import { ethers } from 'hardhat'
import { UniswapV3Deployer } from 'uniswap-v3-deploy-plugin/dist/deployer/UniswapV3Deployer' import { UniswapV3Deployer } from 'uniswap-v3-deploy-plugin/dist/deployer/UniswapV3Deployer'
...@@ -5,8 +6,9 @@ import { FeeAmount, TICK_SPACINGS } from '@uniswap/v3-sdk' ...@@ -5,8 +6,9 @@ import { FeeAmount, TICK_SPACINGS } from '@uniswap/v3-sdk'
import { abi as NFTABI } from '@uniswap/v3-periphery/artifacts/contracts/NonfungiblePositionManager.sol/NonfungiblePositionManager.json' import { abi as NFTABI } from '@uniswap/v3-periphery/artifacts/contracts/NonfungiblePositionManager.sol/NonfungiblePositionManager.json'
import { abi as RouterABI } from '@uniswap/v3-periphery/artifacts/contracts/SwapRouter.sol/SwapRouter.json' import { abi as RouterABI } from '@uniswap/v3-periphery/artifacts/contracts/SwapRouter.sol/SwapRouter.json'
import { OptimismEnv } from './shared/env' /* Imports: Internal */
import { expect } from './shared/setup' import { expect } from './shared/setup'
import { OptimismEnv } from './shared/env'
// Below methods taken from the Uniswap test suite, see // Below methods taken from the Uniswap test suite, see
// https://github.com/Uniswap/v3-periphery/blob/main/test/shared/ticks.ts // https://github.com/Uniswap/v3-periphery/blob/main/test/shared/ticks.ts
......
This diff is collapsed.
/* Imports: External */
import { Contract, BigNumber } from 'ethers' import { Contract, BigNumber } from 'ethers'
import { ethers } from 'hardhat' import { ethers } from 'hardhat'
/* Imports: Internal */
import { expect } from './shared/setup' import { expect } from './shared/setup'
import { OptimismEnv } from './shared/env' import { OptimismEnv } from './shared/env'
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
module.exports = {
extends: '../../.eslintrc.js',
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment