Commit 6395c76d authored by Mark Tyneway's avatar Mark Tyneway

cleanup: delete legacy batch submitter

The batch submitter uses `l2geth` as a dep, so
delete the batch submitter first
parent 33741760
...@@ -1253,11 +1253,6 @@ workflows: ...@@ -1253,11 +1253,6 @@ workflows:
- devnet: - devnet:
name: devnet (with genesis contracts) name: devnet (with genesis contracts)
deploy: false deploy: false
- go-lint-test-build:
name: batch-submitter-tests
binary_name: batch-submitter
working_directory: batch-submitter
dependencies: bss-core
- go-lint-test-build: - go-lint-test-build:
name: proxyd-tests name: proxyd-tests
binary_name: proxyd binary_name: proxyd
......
...@@ -15,5 +15,3 @@ tests/testdata ...@@ -15,5 +15,3 @@ tests/testdata
l2geth/signer/fourbyte l2geth/signer/fourbyte
l2geth/cmd/puppeth l2geth/cmd/puppeth
l2geth/cmd/clef l2geth/cmd/clef
go/gas-oracle/gas-oracle
go/batch-submitter/batch-submitter
# Legacy codebases # Legacy codebases
/batch-submitter @ethereum-optimism/legacy-reviewers
/bss-core @ethereum-optimism/legacy-reviewers /bss-core @ethereum-optimism/legacy-reviewers
/gas-oracle @ethereum-optimism/legacy-reviewers /gas-oracle @ethereum-optimism/legacy-reviewers
/l2geth @ethereum-optimism/legacy-reviewers /l2geth @ethereum-optimism/legacy-reviewers
......
...@@ -29,7 +29,6 @@ jobs: ...@@ -29,7 +29,6 @@ jobs:
hardhat-node: ${{ steps.packages.outputs.hardhat-node }} hardhat-node: ${{ steps.packages.outputs.hardhat-node }}
canary-docker-tag: ${{ steps.docker-image-name.outputs.canary-docker-tag }} canary-docker-tag: ${{ steps.docker-image-name.outputs.canary-docker-tag }}
op-exporter: ${{ steps.packages.outputs.op-exporter }} op-exporter: ${{ steps.packages.outputs.op-exporter }}
batch-submitter-service: ${{ steps.packages.outputs.batch-submitter-service }}
endpoint-monitor: ${{ steps.packages.outputs.endpoint-monitor }} endpoint-monitor: ${{ steps.packages.outputs.endpoint-monitor }}
steps: steps:
...@@ -457,32 +456,6 @@ jobs: ...@@ -457,32 +456,6 @@ jobs:
GITCOMMIT=${{ steps.build_args.outputs.GITCOMMIT }} GITCOMMIT=${{ steps.build_args.outputs.GITCOMMIT }}
GITVERSION=${{ steps.build_args.outputs.GITVERSION }} GITVERSION=${{ steps.build_args.outputs.GITVERSION }}
batch-submitter-service:
name: Publish batch-submitter-service Version ${{ needs.canary-publish.outputs.canary-docker-tag }}
needs: canary-publish
if: needs.canary-publish.outputs.batch-submitter-service != ''
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to Docker Hub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_USERNAME }}
password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_SECRET }}
- name: Build and push
uses: docker/build-push-action@v2
with:
context: .
file: ./batch-submitter/Dockerfile
push: true
tags: ethereumoptimism/batch-submitter-service:${{ needs.canary-publish.outputs.batch-submitter-service }}
endpoint-monitor: endpoint-monitor:
name: Publish endpoint-monitor Version ${{ needs.canary-publish.outputs.canary-docker-tag }} name: Publish endpoint-monitor Version ${{ needs.canary-publish.outputs.canary-docker-tag }}
needs: canary-publish needs: canary-publish
......
...@@ -28,7 +28,6 @@ jobs: ...@@ -28,7 +28,6 @@ jobs:
replica-healthcheck: ${{ steps.packages.outputs.replica-healthcheck }} replica-healthcheck: ${{ steps.packages.outputs.replica-healthcheck }}
hardhat-node: ${{ steps.packages.outputs.hardhat-node }} hardhat-node: ${{ steps.packages.outputs.hardhat-node }}
op-exporter: ${{ steps.packages.outputs.op-exporter }} op-exporter: ${{ steps.packages.outputs.op-exporter }}
batch-submitter-service: ${{ steps.packages.outputs.batch-submitter-service }}
foundry: ${{ steps.packages.outputs.foundry }} foundry: ${{ steps.packages.outputs.foundry }}
endpoint-monitor: ${{ steps.packages.outputs.endpoint-monitor }} endpoint-monitor: ${{ steps.packages.outputs.endpoint-monitor }}
...@@ -496,32 +495,6 @@ jobs: ...@@ -496,32 +495,6 @@ jobs:
push: true push: true
tags: ethereumoptimism/replica-healthcheck:${{ needs.release.outputs.replica-healthcheck }},ethereumoptimism/replica-healthcheck:latest tags: ethereumoptimism/replica-healthcheck:${{ needs.release.outputs.replica-healthcheck }},ethereumoptimism/replica-healthcheck:latest
batch-submitter-service:
name: Publish batch-submitter-service Version ${{ needs.release.outputs.batch-submitter-service }}
needs: release
if: needs.release.outputs.batch-submitter-service != ''
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to Docker Hub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_USERNAME }}
password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_SECRET }}
- name: Build and push
uses: docker/build-push-action@v2
with:
context: .
file: ./batch-submitter/Dockerfile
push: true
tags: ethereumoptimism/batch-submitter-service:${{ needs.release.outputs.batch-submitter-service }},ethereumoptimism/batch-submitter-service:latest
endpoint-monitor: endpoint-monitor:
name: Publish endpoint-monitor Version ${{ needs.release.outputs.endpoint-monitor}} name: Publish endpoint-monitor Version ${{ needs.release.outputs.endpoint-monitor}}
needs: release needs: release
......
...@@ -28,10 +28,6 @@ ...@@ -28,10 +28,6 @@
"directory": "packages/chain-mon", "directory": "packages/chain-mon",
"changeProcessCWD": true "changeProcessCWD": true
}, },
{
"directory": "packages/batch-submitter",
"changeProcessCWD": true
},
{ {
"directory": "packages/message-relayer", "directory": "packages/message-relayer",
"changeProcessCWD": true "changeProcessCWD": true
......
...@@ -128,7 +128,6 @@ This will build the following containers: ...@@ -128,7 +128,6 @@ This will build the following containers:
* [`l2geth`](https://hub.docker.com/r/ethereumoptimism/l2geth): L2 geth node running in Sequencer mode * [`l2geth`](https://hub.docker.com/r/ethereumoptimism/l2geth): L2 geth node running in Sequencer mode
* [`verifier`](https://hub.docker.com/r/ethereumoptimism/go-ethereum): L2 geth node running in Verifier mode * [`verifier`](https://hub.docker.com/r/ethereumoptimism/go-ethereum): L2 geth node running in Verifier mode
* [`relayer`](https://hub.docker.com/r/ethereumoptimism/message-relayer): helper process that relays messages between L1 and L2 * [`relayer`](https://hub.docker.com/r/ethereumoptimism/message-relayer): helper process that relays messages between L1 and L2
* [`batch_submitter`](https://hub.docker.com/r/ethereumoptimism/batch-submitter-service): service that submits batches of Sequencer transactions to the L1 chain
If you want to make a change to a container, you'll need to take it down and rebuild it. If you want to make a change to a container, you'll need to take it down and rebuild it.
For example, if you make a change in l2geth: For example, if you make a change in l2geth:
......
...@@ -90,7 +90,6 @@ Refer to the Directory Structure section below to understand which packages are ...@@ -90,7 +90,6 @@ Refer to the Directory Structure section below to understand which packages are
│ ├── <a href="./packages/message-relayer">message-relayer</a>: Tool for automatically relaying L1<>L2 messages in development │ ├── <a href="./packages/message-relayer">message-relayer</a>: Tool for automatically relaying L1<>L2 messages in development
│ ├── <a href="./packages/replica-healthcheck">replica-healthcheck</a>: Service for monitoring the health of a replica node │ ├── <a href="./packages/replica-healthcheck">replica-healthcheck</a>: Service for monitoring the health of a replica node
│ └── <a href="./packages/sdk">sdk</a>: provides a set of tools for interacting with Optimism │ └── <a href="./packages/sdk">sdk</a>: provides a set of tools for interacting with Optimism
├── <a href="./batch-submitter">batch-submitter</a>: Service for submitting batches of transactions and results to L1
├── <a href="./bss-core">bss-core</a>: Core batch-submitter logic and utilities ├── <a href="./bss-core">bss-core</a>: Core batch-submitter logic and utilities
├── <a href="./gas-oracle">gas-oracle</a>: Service for updating L1 gas prices on L2 ├── <a href="./gas-oracle">gas-oracle</a>: Service for updating L1 gas prices on L2
├── <a href="./indexer">indexer</a>: indexes and syncs transactions ├── <a href="./indexer">indexer</a>: indexes and syncs transactions
......
# @eth-optimism/batch-submitter-service
## 0.1.16
### Patch Changes
- 32bd79ec9: Allow deposit only batches
- da79ef441: fix flag name for MaxStateRootElements in batch-submitter
fix log package for proposer
## 0.1.15
### Patch Changes
- 1d8d50c42: build(deps): bump golang.org/x/crypto from 0.0.0-20220307211146-efcb8507fb70 to 0.1.0 in /batch-submitter
## 0.1.14
### Patch Changes
- 72fa86bff: chore(deps): bump github.com/prometheus/client_golang from 1.11.0 to 1.11.1 in /batch-submitter
## 0.1.13
### Patch Changes
- 7a8812d14: Update go-ethereum to v1.10.26
## 0.1.12
### Patch Changes
- 6f458607: Bump go-ethereum to 1.10.17
## 0.1.11
### Patch Changes
- b2aa08e0: Add MAX_PLAINTEXT_BATCH_SIZE parameter to max out compression
## 0.1.10
### Patch Changes
- 526eac8d: feat: bss less strict min-tx-size
## 0.1.9
### Patch Changes
- 160f4c3d: Update docker image to use golang 1.18.0
- 0c4d4e08: l2geth: Revert transaction pubsub feature
## 0.1.8
### Patch Changes
- 88601cb7: Refactored Dockerfiles
- 6856b215: Count reverted transactions in failed_submissions
- 9678b357: Add Min/MaxStateRootElements configuration
- f8348862: l2geth: Sync from Backend Queue
- 727b0582: Enforce min/max tx size on plaintext batch encoding
## 0.1.7
### Patch Changes
- aca0684e: Add 20% buffer to gas estimation on tx-batch submission to prevent OOG reverts
- 75040ca5: Adds MIN_L1_TX_SIZE configuration
## 0.1.6
### Patch Changes
- 6af67df5: Move L2 dial logic out of bss-core to avoid l2geth dependency
- fe680568: Enable the usage of typed batches and type 0 zlib compressed batches
## 0.1.5
### Patch Changes
- 6f2ea193: Update to go-ethereum v1.10.16
- 87359fd2: Refactors the bss-core service to use a metrics interface to allow
driver-specific metric extensions
## 0.1.4
### Patch Changes
- bcbde5f3: Fixes a bug that causes the txmgr to not wait for the configured numConfirmations
## 0.1.3
### Patch Changes
- 69118ac3: Switch num_elements_per_batch from Histogram to Summary
- df98d134: Remove extra space in metric names
- 3ec06301: Default to JSON logs, add LOG_TERMINAL flag for debugging
- fe321618: Unify metric name format
- 93a26819: Fixes a bug where clearing txs are rejected on startup due to missing gas limit
## 0.1.2
### Patch Changes
- c775ffbe: fix BSS log-level flag parsing
- d093a6bb: Adds a fix for the BSS to account for the new timestamp logic in L2Geth
- d4c2e01b: Restructure to use bss-core package
## 0.1.1
### Patch Changes
- 5905f3dc: Update golang version to support HTTP/2
- c1eba2e6: use EIP-1559 txns for tx/state batches
## 0.1.0
### Minor Changes
- 356b7271: Add multi-tx support, clear pending txs on startup
### Patch Changes
- 85aa148d: Adds confirmation depth awareness to txmgr
## 0.0.2
### Patch Changes
- d6e0de5a: Fix metrics server
FROM golang:1.18.0-alpine3.15 as builder
RUN apk add --no-cache make gcc musl-dev linux-headers git jq bash
COPY ./batch-submitter /go/batch-submitter
COPY ./bss-core /go/bss-core
COPY ./l2geth /go/l2geth
COPY ./batch-submitter/docker.go.work /go/go.work
WORKDIR /go/batch-submitter
RUN make
FROM alpine:3.15
RUN apk add --no-cache ca-certificates jq curl
COPY --from=builder /go/batch-submitter/batch-submitter /usr/local/bin/
WORKDIR /usr/local/bin
COPY ./ops/scripts/batch-submitter.sh .
ENTRYPOINT ["batch-submitter"]
GITCOMMIT := $(shell git rev-parse HEAD)
GITDATE := $(shell git show -s --format='%ct')
GITVERSION := $(shell cat package.json | jq .version)
LDFLAGSSTRING +=-X main.GitCommit=$(GITCOMMIT)
LDFLAGSSTRING +=-X main.GitDate=$(GITDATE)
LDFLAGSSTRING +=-X main.GitVersion=$(GITVERSION)
LDFLAGS := -ldflags "$(LDFLAGSSTRING)"
CTC_ABI_ARTIFACT := ../../packages/contracts/artifacts/contracts/L1/rollup/CanonicalTransactionChain.sol/CanonicalTransactionChain.json
SCC_ABI_ARTIFACT := ../../packages/contracts/artifacts/contracts/L1/rollup/StateCommitmentChain.sol/StateCommitmentChain.json
batch-submitter:
env GO111MODULE=on go build -v $(LDFLAGS) ./cmd/batch-submitter
clean:
rm batch-submitter
test:
go test -v ./...
lint:
golangci-lint run ./...
bindings: bindings-ctc bindings-scc
bindings-ctc:
$(eval temp := $(shell mktemp))
cat $(CTC_ABI_ARTIFACT) \
| jq -r .bytecode > $(temp)
cat $(CTC_ABI_ARTIFACT) \
| jq .abi \
| abigen --pkg ctc \
--abi - \
--out bindings/ctc/canonical_transaction_chain.go \
--type CanonicalTransactionChain \
--bin $(temp)
rm $(temp)
bindings-scc:
$(eval temp := $(shell mktemp))
cat $(SCC_ABI_ARTIFACT) \
| jq -r .bytecode > $(temp)
cat $(SCC_ABI_ARTIFACT) \
| jq .abi \
| abigen --pkg scc \
--abi - \
--out bindings/scc/state_commitment_chain.go \
--type StateCommitmentChain \
--bin $(temp)
rm $(temp)
.PHONY: \
batch-submitter \
bindings \
bindings-ctc \
bindings-scc \
clean \
test \
lint
package batchsubmitter
import (
"context"
"os"
"time"
"github.com/ethereum-optimism/optimism/batch-submitter/drivers/proposer"
"github.com/ethereum-optimism/optimism/batch-submitter/drivers/sequencer"
bsscore "github.com/ethereum-optimism/optimism/bss-core"
"github.com/ethereum-optimism/optimism/bss-core/dial"
"github.com/ethereum-optimism/optimism/bss-core/metrics"
"github.com/ethereum-optimism/optimism/bss-core/txmgr"
"github.com/ethereum/go-ethereum/log"
"github.com/getsentry/sentry-go"
"github.com/urfave/cli"
)
// Main is the entrypoint into the batch submitter service. This method returns
// a closure that executes the service and blocks until the service exits. The
// use of a closure allows the parameters bound to the top-level main package,
// e.g. GitVersion, to be captured and used once the function is executed.
func Main(gitVersion string) func(ctx *cli.Context) error {
return func(cliCtx *cli.Context) error {
cfg, err := NewConfig(cliCtx)
if err != nil {
return err
}
log.Info("Config parsed",
"min_tx_size", cfg.MinL1TxSize,
"max_tx_size", cfg.MaxL1TxSize)
// The call to defer is done here so that any errors logged from
// this point on are posted to Sentry before exiting.
if cfg.SentryEnable {
defer sentry.Flush(2 * time.Second)
}
log.Info("Initializing batch submitter")
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Set up our logging. If Sentry is enabled, we will use our custom log
// handler that logs to stdout and forwards any error messages to Sentry
// for collection. Otherwise, logs will only be posted to stdout.
var logHandler log.Handler
if cfg.SentryEnable {
err := sentry.Init(sentry.ClientOptions{
Dsn: cfg.SentryDsn,
Environment: cfg.EthNetworkName,
Release: "batch-submitter@" + gitVersion,
TracesSampleRate: bsscore.TraceRateToFloat64(cfg.SentryTraceRate),
Debug: false,
})
if err != nil {
return err
}
logHandler = bsscore.SentryStreamHandler(os.Stdout, log.JSONFormat())
} else if cfg.LogTerminal {
logHandler = log.StreamHandler(os.Stdout, log.TerminalFormat(true))
} else {
logHandler = log.StreamHandler(os.Stdout, log.JSONFormat())
}
logLevel, err := log.LvlFromString(cfg.LogLevel)
if err != nil {
return err
}
log.Root().SetHandler(log.LvlFilterHandler(logLevel, logHandler))
// Parse sequencer private key and CTC contract address.
sequencerPrivKey, ctcAddress, err := bsscore.ParseWalletPrivKeyAndContractAddr(
"Sequencer", cfg.Mnemonic, cfg.SequencerHDPath,
cfg.SequencerPrivateKey, cfg.CTCAddress,
)
if err != nil {
return err
}
// Parse proposer private key and SCC contract address.
proposerPrivKey, sccAddress, err := bsscore.ParseWalletPrivKeyAndContractAddr(
"Proposer", cfg.Mnemonic, cfg.ProposerHDPath,
cfg.ProposerPrivateKey, cfg.SCCAddress,
)
if err != nil {
return err
}
// Connect to L1 and L2 providers. Perform these last since they are the
// most expensive.
l1Client, err := dial.L1EthClientWithTimeout(ctx, cfg.L1EthRpc, cfg.DisableHTTP2)
if err != nil {
return err
}
l2Client, err := DialL2EthClientWithTimeout(ctx, cfg.L2EthRpc, cfg.DisableHTTP2)
if err != nil {
return err
}
if cfg.MetricsServerEnable {
go metrics.RunServer(cfg.MetricsHostname, cfg.MetricsPort)
}
chainID, err := l1Client.ChainID(ctx)
if err != nil {
return err
}
txManagerConfig := txmgr.Config{
ResubmissionTimeout: cfg.ResubmissionTimeout,
ReceiptQueryInterval: time.Second,
NumConfirmations: cfg.NumConfirmations,
SafeAbortNonceTooLowCount: cfg.SafeAbortNonceTooLowCount,
}
var services []*bsscore.Service
if cfg.RunTxBatchSubmitter {
batchTxDriver, err := sequencer.NewDriver(sequencer.Config{
Name: "Sequencer",
L1Client: l1Client,
L2Client: l2Client,
BlockOffset: cfg.BlockOffset,
MinTxSize: cfg.MinL1TxSize,
MaxTxSize: cfg.MaxL1TxSize,
MaxPlaintextBatchSize: cfg.MaxPlaintextBatchSize,
CTCAddr: ctcAddress,
ChainID: chainID,
PrivKey: sequencerPrivKey,
BatchType: sequencer.BatchTypeFromString(cfg.SequencerBatchType),
})
if err != nil {
return err
}
services = append(services, bsscore.NewService(bsscore.ServiceConfig{
Context: ctx,
Driver: batchTxDriver,
PollInterval: cfg.PollInterval,
ClearPendingTx: cfg.ClearPendingTxs,
L1Client: l1Client,
TxManagerConfig: txManagerConfig,
}))
}
if cfg.RunStateBatchSubmitter {
batchStateDriver, err := proposer.NewDriver(proposer.Config{
Name: "Proposer",
L1Client: l1Client,
L2Client: l2Client,
BlockOffset: cfg.BlockOffset,
MinStateRootElements: cfg.MinStateRootElements,
MaxStateRootElements: cfg.MaxStateRootElements,
SCCAddr: sccAddress,
CTCAddr: ctcAddress,
ChainID: chainID,
PrivKey: proposerPrivKey,
})
if err != nil {
return err
}
services = append(services, bsscore.NewService(bsscore.ServiceConfig{
Context: ctx,
Driver: batchStateDriver,
PollInterval: cfg.PollInterval,
ClearPendingTx: cfg.ClearPendingTxs,
L1Client: l1Client,
TxManagerConfig: txManagerConfig,
}))
}
batchSubmitter, err := bsscore.NewBatchSubmitter(ctx, cancel, services)
if err != nil {
log.Error("Unable to create batch submitter", "error", err)
return err
}
log.Info("Starting batch submitter")
if err := batchSubmitter.Start(); err != nil {
return err
}
defer batchSubmitter.Stop()
log.Info("Batch submitter started")
<-(chan struct{})(nil)
return nil
}
}
This diff is collapsed.
package main
import (
"fmt"
"os"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/urfave/cli"
batchsubmitter "github.com/ethereum-optimism/optimism/batch-submitter"
"github.com/ethereum-optimism/optimism/batch-submitter/flags"
)
var (
GitVersion = ""
GitCommit = ""
GitDate = ""
)
func main() {
// Set up logger with a default INFO level in case we fail to parse flags.
// Otherwise the final critical log won't show what the parsing error was.
log.Root().SetHandler(
log.LvlFilterHandler(
log.LvlInfo,
log.StreamHandler(os.Stdout, log.TerminalFormat(true)),
),
)
app := cli.NewApp()
app.Flags = flags.Flags
app.Version = fmt.Sprintf("%s-%s", GitVersion, params.VersionWithCommit(GitCommit, GitDate))
app.Name = "batch-submitter"
app.Usage = "Batch Submitter Service"
app.Description = "Service for generating and submitting batched transactions " +
"that synchronize L2 state to L1 contracts"
app.Action = batchsubmitter.Main(GitVersion)
err := app.Run(os.Args)
if err != nil {
log.Crit("Application failed", "message", err)
}
}
This diff is collapsed.
package batchsubmitter_test
import (
"fmt"
"testing"
batchsubmitter "github.com/ethereum-optimism/optimism/batch-submitter"
"github.com/stretchr/testify/require"
)
var validateConfigTests = []struct {
name string
cfg batchsubmitter.Config
expErr error
}{
{
name: "bad log level",
cfg: batchsubmitter.Config{
LogLevel: "unknown",
},
expErr: fmt.Errorf("unknown level: unknown"),
},
{
name: "sequencer priv key or mnemonic none set",
cfg: batchsubmitter.Config{
LogLevel: "info",
SequencerPrivateKey: "",
Mnemonic: "",
SequencerHDPath: "",
},
expErr: batchsubmitter.ErrSequencerPrivKeyOrMnemonic,
},
{
name: "sequencer priv key or mnemonic both set",
cfg: batchsubmitter.Config{
LogLevel: "info",
SequencerPrivateKey: "sequencer-privkey",
Mnemonic: "mnemonic",
SequencerHDPath: "sequencer-path",
},
expErr: batchsubmitter.ErrSequencerPrivKeyOrMnemonic,
},
{
name: "sequencer priv key or mnemonic only mnemonic set",
cfg: batchsubmitter.Config{
LogLevel: "info",
SequencerPrivateKey: "",
Mnemonic: "mnemonic",
SequencerHDPath: "",
},
expErr: batchsubmitter.ErrSequencerPrivKeyOrMnemonic,
},
{
name: "sequencer priv key or mnemonic only hdpath set",
cfg: batchsubmitter.Config{
LogLevel: "info",
SequencerPrivateKey: "",
Mnemonic: "",
SequencerHDPath: "sequencer-path",
},
expErr: batchsubmitter.ErrSequencerPrivKeyOrMnemonic,
},
{
name: "proposer priv key or mnemonic none set",
cfg: batchsubmitter.Config{
LogLevel: "info",
SequencerPrivateKey: "sequencer-privkey",
ProposerPrivateKey: "",
Mnemonic: "",
ProposerHDPath: "",
},
expErr: batchsubmitter.ErrProposerPrivKeyOrMnemonic,
},
{
name: "proposer priv key or mnemonic both set",
cfg: batchsubmitter.Config{
LogLevel: "info",
SequencerPrivateKey: "sequencer-privkey",
ProposerPrivateKey: "proposer-privkey",
Mnemonic: "mnemonic",
ProposerHDPath: "proposer-path",
},
expErr: batchsubmitter.ErrProposerPrivKeyOrMnemonic,
},
{
name: "proposer priv key or mnemonic only mnemonic set",
cfg: batchsubmitter.Config{
LogLevel: "info",
SequencerPrivateKey: "sequencer-privkey",
ProposerPrivateKey: "",
Mnemonic: "mnemonic",
ProposerHDPath: "",
},
expErr: batchsubmitter.ErrProposerPrivKeyOrMnemonic,
},
{
name: "proposer priv key or mnemonic only hdpath set",
cfg: batchsubmitter.Config{
LogLevel: "info",
SequencerPrivateKey: "sequencer-privkey",
ProposerPrivateKey: "",
Mnemonic: "",
ProposerHDPath: "proposer-path",
},
expErr: batchsubmitter.ErrProposerPrivKeyOrMnemonic,
},
{
name: "same sequencer and proposer hd path",
cfg: batchsubmitter.Config{
LogLevel: "info",
Mnemonic: "mnemonic",
SequencerHDPath: "path",
ProposerHDPath: "path",
},
expErr: batchsubmitter.ErrSameSequencerAndProposerHDPath,
},
{
name: "same sequencer and proposer privkey",
cfg: batchsubmitter.Config{
LogLevel: "info",
SequencerPrivateKey: "privkey",
ProposerPrivateKey: "privkey",
},
expErr: batchsubmitter.ErrSameSequencerAndProposerPrivKey,
},
{
name: "sentry-dsn not set when sentry-enable is true",
cfg: batchsubmitter.Config{
LogLevel: "info",
SequencerPrivateKey: "sequencer-privkey",
ProposerPrivateKey: "proposer-privkey",
SentryEnable: true,
SentryDsn: "",
},
expErr: batchsubmitter.ErrSentryDSNNotSet,
},
// Valid configs
{
name: "valid config with privkeys and no sentry",
cfg: batchsubmitter.Config{
LogLevel: "info",
SequencerPrivateKey: "sequencer-privkey",
ProposerPrivateKey: "proposer-privkey",
SentryEnable: false,
SentryDsn: "",
},
expErr: nil,
},
{
name: "valid config with mnemonic and no sentry",
cfg: batchsubmitter.Config{
LogLevel: "info",
Mnemonic: "mnemonic",
SequencerHDPath: "sequencer-path",
ProposerHDPath: "proposer-path",
SentryEnable: false,
SentryDsn: "",
},
expErr: nil,
},
{
name: "valid config with privkeys and sentry",
cfg: batchsubmitter.Config{
LogLevel: "info",
SequencerPrivateKey: "sequencer-privkey",
ProposerPrivateKey: "proposer-privkey",
SentryEnable: true,
SentryDsn: "batch-submitter",
},
expErr: nil,
},
{
name: "valid config with mnemonic and sentry",
cfg: batchsubmitter.Config{
LogLevel: "info",
Mnemonic: "mnemonic",
SequencerHDPath: "sequencer-path",
ProposerHDPath: "proposer-path",
SentryEnable: true,
SentryDsn: "batch-submitter",
},
expErr: nil,
},
}
// TestValidateConfig asserts the behavior of ValidateConfig by testing expected
// error and success configurations.
func TestValidateConfig(t *testing.T) {
for _, test := range validateConfigTests {
t.Run(test.name, func(t *testing.T) {
err := batchsubmitter.ValidateConfig(&test.cfg)
require.Equal(t, err, test.expErr)
})
}
}
package batchsubmitter
import (
"context"
"crypto/tls"
"net/http"
"strings"
"github.com/ethereum-optimism/optimism/bss-core/dial"
"github.com/ethereum-optimism/optimism/l2geth/ethclient"
"github.com/ethereum-optimism/optimism/l2geth/log"
"github.com/ethereum-optimism/optimism/l2geth/rpc"
)
// DialL2EthClientWithTimeout attempts to dial the L2 provider using the
// provided URL. If the dial doesn't complete within dial.DefaultTimeout seconds,
// this method will return an error.
func DialL2EthClientWithTimeout(ctx context.Context, url string, disableHTTP2 bool) (
*ethclient.Client, error) {
ctxt, cancel := context.WithTimeout(ctx, dial.DefaultTimeout)
defer cancel()
if strings.HasPrefix(url, "http") {
httpClient := new(http.Client)
if disableHTTP2 {
log.Info("Disabled HTTP/2 support in L2 eth client")
httpClient.Transport = &http.Transport{
TLSNextProto: make(map[string]func(authority string, c *tls.Conn) http.RoundTripper),
}
}
rpcClient, err := rpc.DialHTTPWithClient(url, httpClient)
if err != nil {
return nil, err
}
return ethclient.NewClient(rpcClient), nil
}
return ethclient.DialContext(ctxt, url)
}
go 1.18
use (
./batch-submitter
./bss-core
./l2geth
)
package proposer
import (
"context"
"crypto/ecdsa"
"fmt"
"math/big"
"strings"
"github.com/ethereum-optimism/optimism/batch-submitter/bindings/ctc"
"github.com/ethereum-optimism/optimism/batch-submitter/bindings/scc"
"github.com/ethereum-optimism/optimism/bss-core/drivers"
"github.com/ethereum-optimism/optimism/bss-core/metrics"
"github.com/ethereum-optimism/optimism/bss-core/txmgr"
l2ethclient "github.com/ethereum-optimism/optimism/l2geth/ethclient"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
)
// stateRootSize is the size in bytes of a state root.
const stateRootSize = 32
var bigOne = new(big.Int).SetUint64(1) //nolint:unused
type Config struct {
Name string
L1Client *ethclient.Client
L2Client *l2ethclient.Client
BlockOffset uint64
MaxStateRootElements uint64
MinStateRootElements uint64
SCCAddr common.Address
CTCAddr common.Address
ChainID *big.Int
PrivKey *ecdsa.PrivateKey
}
type Driver struct {
cfg Config
sccContract *scc.StateCommitmentChain
rawSccContract *bind.BoundContract
ctcContract *ctc.CanonicalTransactionChain
walletAddr common.Address
metrics *metrics.Base
}
func NewDriver(cfg Config) (*Driver, error) {
sccContract, err := scc.NewStateCommitmentChain(
cfg.SCCAddr, cfg.L1Client,
)
if err != nil {
return nil, err
}
ctcContract, err := ctc.NewCanonicalTransactionChain(
cfg.CTCAddr, cfg.L1Client,
)
if err != nil {
return nil, err
}
parsed, err := abi.JSON(strings.NewReader(
scc.StateCommitmentChainABI,
))
if err != nil {
return nil, err
}
rawSccContract := bind.NewBoundContract(
cfg.SCCAddr, parsed, cfg.L1Client, cfg.L1Client, cfg.L1Client,
)
walletAddr := crypto.PubkeyToAddress(cfg.PrivKey.PublicKey)
return &Driver{
cfg: cfg,
sccContract: sccContract,
rawSccContract: rawSccContract,
ctcContract: ctcContract,
walletAddr: walletAddr,
metrics: metrics.NewBase("batch_submitter", cfg.Name),
}, nil
}
// Name is an identifier used to prefix logs for a particular service.
func (d *Driver) Name() string {
return d.cfg.Name
}
// WalletAddr is the wallet address used to pay for batch transaction fees.
func (d *Driver) WalletAddr() common.Address {
return d.walletAddr
}
// Metrics returns the subservice telemetry object.
func (d *Driver) Metrics() metrics.Metrics {
return d.metrics
}
// ClearPendingTx a publishes a transaction at the next available nonce in order
// to clear any transactions in the mempool left over from a prior running
// instance of the batch submitter.
func (d *Driver) ClearPendingTx(
ctx context.Context,
txMgr txmgr.TxManager,
l1Client *ethclient.Client,
) error {
return drivers.ClearPendingTx(
d.cfg.Name, ctx, txMgr, l1Client, d.walletAddr, d.cfg.PrivKey,
d.cfg.ChainID,
)
}
// GetBatchBlockRange returns the start and end L2 block heights that need to be
// processed. Note that the end value is *exclusive*, therefore if the returned
// values are identical nothing needs to be processed.
func (d *Driver) GetBatchBlockRange(
ctx context.Context) (*big.Int, *big.Int, error) {
blockOffset := new(big.Int).SetUint64(d.cfg.BlockOffset)
start, err := d.sccContract.GetTotalElements(&bind.CallOpts{
Pending: false,
Context: ctx,
})
if err != nil {
return nil, nil, err
}
start.Add(start, blockOffset)
end, err := d.ctcContract.GetTotalElements(&bind.CallOpts{
Pending: false,
Context: ctx,
})
if err != nil {
return nil, nil, err
}
end.Add(end, blockOffset)
if start.Cmp(end) > 0 {
return nil, nil, fmt.Errorf("invalid range, "+
"end(%v) < start(%v)", end, start)
}
return start, end, nil
}
// CraftBatchTx transforms the L2 blocks between start and end into a batch
// transaction using the given nonce. A dummy gas price is used in the resulting
// transaction to use for size estimation.
//
// NOTE: This method SHOULD NOT publish the resulting transaction.
func (d *Driver) CraftBatchTx(
ctx context.Context,
start, end, nonce *big.Int,
) (*types.Transaction, error) {
name := d.cfg.Name
log.Info(name+" crafting batch tx", "start", start, "end", end,
"nonce", nonce)
var stateRoots [][stateRootSize]byte
for i := new(big.Int).Set(start); i.Cmp(end) < 0; i.Add(i, bigOne) {
// Consume state roots until reach our maximum tx size.
if uint64(len(stateRoots)) > d.cfg.MaxStateRootElements {
break
}
block, err := d.cfg.L2Client.BlockByNumber(ctx, i)
if err != nil {
return nil, err
}
stateRoots = append(stateRoots, block.Root())
}
// Abort if we don't have enough state roots to meet our minimum
// requirement.
if uint64(len(stateRoots)) < d.cfg.MinStateRootElements {
log.Info(name+" number of state roots below minimum",
"num_state_roots", len(stateRoots),
"min_state_roots", d.cfg.MinStateRootElements)
return nil, nil
}
d.metrics.NumElementsPerBatch().Observe(float64(len(stateRoots)))
log.Info(name+" batch constructed", "num_state_roots", len(stateRoots))
opts, err := bind.NewKeyedTransactorWithChainID(
d.cfg.PrivKey, d.cfg.ChainID,
)
if err != nil {
return nil, err
}
opts.Context = ctx
opts.Nonce = nonce
opts.NoSend = true
blockOffset := new(big.Int).SetUint64(d.cfg.BlockOffset)
offsetStartsAtIndex := new(big.Int).Sub(start, blockOffset)
tx, err := d.sccContract.AppendStateBatch(
opts, stateRoots, offsetStartsAtIndex,
)
switch {
case err == nil:
return tx, nil
// If the transaction failed because the backend does not support
// eth_maxPriorityFeePerGas, fallback to using the default constant.
// Currently Alchemy is the only backend provider that exposes this method,
// so in the event their API is unreachable we can fallback to a degraded
// mode of operation. This also applies to our test environments, as hardhat
// doesn't support the query either.
case drivers.IsMaxPriorityFeePerGasNotFoundError(err):
log.Warn(d.cfg.Name + " eth_maxPriorityFeePerGas is unsupported " +
"by current backend, using fallback gasTipCap")
opts.GasTipCap = drivers.FallbackGasTipCap
return d.sccContract.AppendStateBatch(
opts, stateRoots, offsetStartsAtIndex,
)
default:
return nil, err
}
}
// UpdateGasPrice signs an otherwise identical txn to the one provided but with
// updated gas prices sampled from the existing network conditions.
//
// NOTE: Thie method SHOULD NOT publish the resulting transaction.
func (d *Driver) UpdateGasPrice(
ctx context.Context,
tx *types.Transaction,
) (*types.Transaction, error) {
opts, err := bind.NewKeyedTransactorWithChainID(
d.cfg.PrivKey, d.cfg.ChainID,
)
if err != nil {
return nil, err
}
opts.Context = ctx
opts.Nonce = new(big.Int).SetUint64(tx.Nonce())
opts.NoSend = true
finalTx, err := d.rawSccContract.RawTransact(opts, tx.Data())
switch {
case err == nil:
return finalTx, nil
// If the transaction failed because the backend does not support
// eth_maxPriorityFeePerGas, fallback to using the default constant.
// Currently Alchemy is the only backend provider that exposes this method,
// so in the event their API is unreachable we can fallback to a degraded
// mode of operation. This also applies to our test environments, as hardhat
// doesn't support the query either.
case drivers.IsMaxPriorityFeePerGasNotFoundError(err):
log.Warn(d.cfg.Name + " eth_maxPriorityFeePerGas is unsupported " +
"by current backend, using fallback gasTipCap")
opts.GasTipCap = drivers.FallbackGasTipCap
return d.rawSccContract.RawTransact(opts, tx.Data())
default:
return nil, err
}
}
// SendTransaction injects a signed transaction into the pending pool for
// execution.
func (d *Driver) SendTransaction(
ctx context.Context,
tx *types.Transaction,
) error {
return d.cfg.L1Client.SendTransaction(ctx, tx)
}
package sequencer
import (
"errors"
"fmt"
l2types "github.com/ethereum-optimism/optimism/l2geth/core/types"
)
var (
// ErrBlockWithInvalidContext signals an attempt to generate a
// BatchContext that specifies a total of zero txs.
ErrBlockWithInvalidContext = errors.New("attempted to generate batch " +
"context with 0 queued and 0 sequenced txs")
)
// BatchElement reflects the contents of an atomic update to the L2 state.
// Currently, each BatchElement is constructed from a single block containing
// exactly one tx.
type BatchElement struct {
// Timestamp is the L1 timestamp of the batch.
Timestamp uint64
// BlockNumber is the L1 BlockNumber of the batch.
BlockNumber uint64
// Tx is the optional transaction that was applied in this batch.
//
// NOTE: This field will only be populated for sequencer txs.
Tx *CachedTx
}
// IsSequencerTx returns true if this batch contains a tx that needs to be
// posted to the L1 CTC contract.
func (b *BatchElement) IsSequencerTx() bool {
return b.Tx != nil
}
// BatchElementFromBlock constructs a BatchElement from a single L2 block. This
// method expects that there is exactly ONE tx per block. The returned
// BatchElement will reflect whether or not the lone tx is a sequencer tx or a
// queued tx.
func BatchElementFromBlock(block *l2types.Block) BatchElement {
txs := block.Transactions()
if len(txs) != 1 {
panic(fmt.Sprintf("attempting to create batch element from block %d, "+
"found %d txs instead of 1", block.Number(), len(txs)))
}
tx := txs[0]
// Extract L2 metadata.
l1BlockNumber := tx.L1BlockNumber().Uint64()
isSequencerTx := tx.QueueOrigin() == l2types.QueueOriginSequencer
// Only include sequencer txs in the returned BatchElement.
var cachedTx *CachedTx
if isSequencerTx {
cachedTx = NewCachedTx(tx)
}
return BatchElement{
Timestamp: block.Time(),
BlockNumber: l1BlockNumber,
Tx: cachedTx,
}
}
type groupedBlock struct {
sequenced []BatchElement
queued []BatchElement
}
// GenSequencerBatchParams generates a valid AppendSequencerBatchParams from a
// list of BatchElements. The BatchElements are assumed to be ordered in
// ascending order by L2 block height.
func GenSequencerBatchParams(
shouldStartAtElement uint64,
blockOffset uint64,
batch []BatchElement,
) (*AppendSequencerBatchParams, error) {
var (
contexts []BatchContext
groupedBlocks []groupedBlock
txs []*CachedTx
lastBlockIsSequencerTx bool
lastTimestamp uint64
lastBlockNumber uint64
)
// Iterate over the batch elements, grouping the elements according to
// the following criteria:
// - All txs in the same group must have the same timestamp.
// - All sequencer txs in the same group must have the same block number.
// - If sequencer txs exist in a group, they must come before all
// queued txs.
//
// Assuming the block and timestamp criteria for sequencer txs are
// respected within each group, the following are examples of groupings:
// - [s] // sequencer can exist by itself
// - [q] // ququed tx can exist by itself
// - [s] [s] // differing sequencer tx timestamp/blocknumber
// - [s q] [s] // sequencer tx must precede queued tx in group
// - [q] [q s] // INVALID: consecutive queued txs are split
// - [q q] [s] // correct split for preceding case
// - [s q] [s q] // alternating sequencer tx interleaved with queued
for _, el := range batch {
// To enforce the above groupings, the following condition is
// used to determine when to create a new batch:
// - On the first pass, or
// - The preceding tx has a different timestamp, or
// - Whenever a sequencer tx is observed, and:
// - The preceding tx was a queued tx, or
// - The preceding sequencer tx has a different block number.
// Note that a sequencer tx is usually required to create a new group,
// so a queued tx may ONLY exist as the first element in a group if it
// is the very first element or it has a different timestamp from the
// preceding tx.
needsNewGroupOnSequencerTx := !lastBlockIsSequencerTx ||
el.BlockNumber != lastBlockNumber
if len(groupedBlocks) == 0 ||
el.Timestamp != lastTimestamp ||
(el.IsSequencerTx() && needsNewGroupOnSequencerTx) {
groupedBlocks = append(groupedBlocks, groupedBlock{})
}
// Append the tx to either the sequenced or queued txs,
// depending on its type.
cur := len(groupedBlocks) - 1
if el.IsSequencerTx() {
groupedBlocks[cur].sequenced =
append(groupedBlocks[cur].sequenced, el)
// Gather all sequencer txs, as these will be encoded in
// the calldata of the batch tx submitted to the L1 CTC
// contract.
txs = append(txs, el.Tx)
} else {
groupedBlocks[cur].queued =
append(groupedBlocks[cur].queued, el)
}
lastBlockIsSequencerTx = el.IsSequencerTx()
lastTimestamp = el.Timestamp
lastBlockNumber = el.BlockNumber
}
// For each group, construct the resulting BatchContext.
for _, block := range groupedBlocks {
numSequencedTxs := uint64(len(block.sequenced))
numSubsequentQueueTxs := uint64(len(block.queued))
// Ensure at least one tx was included in this group.
if numSequencedTxs == 0 && numSubsequentQueueTxs == 0 {
return nil, ErrBlockWithInvalidContext
}
// Compute the timestamp and block number from for the batch
// using either the earliest sequenced tx or the earliest queued
// tx. If a batch has a sequencer tx it is given preference,
// since it is guaranteed to be the earliest item in the group.
// Otherwise, we fallback to the earliest queued tx since it was
// the very first item.
var (
timestamp uint64
blockNumber uint64
)
if numSequencedTxs > 0 {
timestamp = block.sequenced[0].Timestamp
blockNumber = block.sequenced[0].BlockNumber
} else {
timestamp = block.queued[0].Timestamp
blockNumber = block.queued[0].BlockNumber
}
contexts = append(contexts, BatchContext{
NumSequencedTxs: numSequencedTxs,
NumSubsequentQueueTxs: numSubsequentQueueTxs,
Timestamp: timestamp,
BlockNumber: blockNumber,
})
}
return &AppendSequencerBatchParams{
ShouldStartAtElement: shouldStartAtElement - blockOffset,
TotalElementsToAppend: uint64(len(batch)),
Contexts: contexts,
Txs: txs,
}, nil
}
package sequencer_test
import (
"math/big"
"testing"
"github.com/ethereum-optimism/optimism/batch-submitter/drivers/sequencer"
l2common "github.com/ethereum-optimism/optimism/l2geth/common"
"github.com/ethereum-optimism/optimism/l2geth/core/types"
l2types "github.com/ethereum-optimism/optimism/l2geth/core/types"
"github.com/stretchr/testify/require"
)
func TestBatchElementFromBlock(t *testing.T) {
expTime := uint64(42)
expBlockNumber := uint64(43)
header := &l2types.Header{
Time: expTime,
}
expTx := l2types.NewTransaction(
1, l2common.Address{}, new(big.Int).SetUint64(2), 3,
new(big.Int).SetUint64(4), []byte{},
)
expTx.SetL1BlockNumber(expBlockNumber)
txs := []*l2types.Transaction{expTx}
block := l2types.NewBlock(header, txs, nil, nil)
element := sequencer.BatchElementFromBlock(block)
require.Equal(t, element.Timestamp, expTime)
require.Equal(t, element.BlockNumber, expBlockNumber)
require.True(t, element.IsSequencerTx())
require.Equal(t, element.Tx.Tx(), expTx)
queueMeta := l2types.NewTransactionMeta(
new(big.Int).SetUint64(expBlockNumber), 0, nil,
l2types.QueueOriginL1ToL2, nil, nil, nil,
)
expTx.SetTransactionMeta(queueMeta)
element = sequencer.BatchElementFromBlock(block)
require.Equal(t, element.Timestamp, expTime)
require.Equal(t, element.BlockNumber, expBlockNumber)
require.False(t, element.IsSequencerTx())
require.Nil(t, element.Tx)
}
func TestGenSequencerParams(t *testing.T) {
tx := types.NewTransaction(0, l2common.Address{}, big.NewInt(0), 0, big.NewInt(0), []byte{})
shouldStartAtElement := uint64(1)
blockOffset := uint64(1)
batches := []sequencer.BatchElement{
{Timestamp: 1, BlockNumber: 1},
{Timestamp: 1, BlockNumber: 1, Tx: sequencer.NewCachedTx(tx)},
}
params, err := sequencer.GenSequencerBatchParams(shouldStartAtElement, blockOffset, batches)
require.NoError(t, err)
require.Equal(t, uint64(0), params.ShouldStartAtElement)
require.Equal(t, uint64(len(batches)), params.TotalElementsToAppend)
require.Equal(t, len(batches), len(params.Contexts))
// There is only 1 sequencer tx
require.Equal(t, 1, len(params.Txs))
// There are 2 contexts
// The first context contains the deposit
context1 := params.Contexts[0]
require.Equal(t, uint64(0), context1.NumSequencedTxs)
require.Equal(t, uint64(1), context1.NumSubsequentQueueTxs)
require.Equal(t, uint64(1), context1.Timestamp)
require.Equal(t, uint64(1), context1.BlockNumber)
// The second context contains the sequencer tx
context2 := params.Contexts[1]
require.Equal(t, uint64(1), context2.NumSequencedTxs)
require.Equal(t, uint64(0), context2.NumSubsequentQueueTxs)
require.Equal(t, uint64(1), context2.Timestamp)
require.Equal(t, uint64(1), context2.BlockNumber)
}
func TestGenSequencerParamsOnlyDeposits(t *testing.T) {
shouldStartAtElement := uint64(1)
blockOffset := uint64(1)
batches := []sequencer.BatchElement{
{Timestamp: 1, BlockNumber: 1},
{Timestamp: 1, BlockNumber: 1},
{Timestamp: 2, BlockNumber: 2},
}
params, err := sequencer.GenSequencerBatchParams(shouldStartAtElement, blockOffset, batches)
require.NoError(t, err)
// The batches will pack deposits into the same context when their
// timestamps and blocknumbers are the same
require.Equal(t, uint64(0), params.ShouldStartAtElement)
require.Equal(t, uint64(len(batches)), params.TotalElementsToAppend)
// 2 deposits have the same timestamp + blocknumber, they go in the
// same context. 1 deposit has a different timestamp + blocknumber,
// it goes into a different context. Therefore there are 2 contexts
require.Equal(t, 2, len(params.Contexts))
// No sequencer txs
require.Equal(t, 0, len(params.Txs))
// There are 2 contexts
// The first context contains the deposit
context1 := params.Contexts[0]
require.Equal(t, uint64(0), context1.NumSequencedTxs)
require.Equal(t, uint64(2), context1.NumSubsequentQueueTxs)
require.Equal(t, uint64(1), context1.Timestamp)
require.Equal(t, uint64(1), context1.BlockNumber)
context2 := params.Contexts[1]
require.Equal(t, uint64(0), context2.NumSequencedTxs)
require.Equal(t, uint64(1), context2.NumSubsequentQueueTxs)
require.Equal(t, uint64(2), context2.Timestamp)
require.Equal(t, uint64(2), context2.BlockNumber)
}
package sequencer
import (
"bytes"
"fmt"
l2types "github.com/ethereum-optimism/optimism/l2geth/core/types"
)
type CachedTx struct {
tx *l2types.Transaction
rawTx []byte
}
func NewCachedTx(tx *l2types.Transaction) *CachedTx {
var txBuf bytes.Buffer
if err := tx.EncodeRLP(&txBuf); err != nil {
panic(fmt.Sprintf("Unable to encode tx: %v", err))
}
return &CachedTx{
tx: tx,
rawTx: txBuf.Bytes(),
}
}
func (t *CachedTx) Tx() *l2types.Transaction {
return t.tx
}
func (t *CachedTx) Size() int {
return len(t.rawTx)
}
func (t *CachedTx) RawTx() []byte {
return t.rawTx
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
package sequencer
import (
"github.com/ethereum-optimism/optimism/bss-core/metrics"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
// Metrics extends the BSS core metrics with additional metrics tracked by the
// sequencer driver.
type Metrics struct {
*metrics.Base
// BatchPruneCount tracks the number of times a batch of sequencer
// transactions is pruned in order to meet the desired size requirements.
BatchPruneCount prometheus.Gauge
}
// NewMetrics initializes a new, extended metrics object.
func NewMetrics(subsystem string) *Metrics {
base := metrics.NewBase("batch_submitter", subsystem)
return &Metrics{
Base: base,
BatchPruneCount: promauto.NewGauge(prometheus.GaugeOpts{
Name: "batch_prune_count",
Help: "Number of times a batch is pruned",
Subsystem: base.SubsystemName(),
}),
}
}
This diff is collapsed.
package flags
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/urfave/cli"
)
// TestRequiredFlagsSetRequired asserts that all flags deemed required properly
// have the Required field set to true.
func TestRequiredFlagsSetRequired(t *testing.T) {
for _, flag := range requiredFlags {
reqFlag, ok := flag.(cli.RequiredFlag)
require.True(t, ok)
require.True(t, reqFlag.IsRequired())
}
}
// TestOptionalFlagsDontSetRequired asserts that all flags deemed optional set
// the Required field to false.
func TestOptionalFlagsDontSetRequired(t *testing.T) {
for _, flag := range optionalFlags {
reqFlag, ok := flag.(cli.RequiredFlag)
require.True(t, ok)
require.False(t, reqFlag.IsRequired())
}
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment