Commit cd8f6294 authored by Adrian Sutton's avatar Adrian Sutton

Merge branch 'develop' into aj/multicaller

parents bb475da9 1bc3691b
This diff is collapsed.
...@@ -25,9 +25,10 @@ ...@@ -25,9 +25,10 @@
/ops-bedrock @ethereum-optimism/go-reviewers /ops-bedrock @ethereum-optimism/go-reviewers
# Ops # Ops
/.circleci @ethereum-optimism/infra-reviewers /.circleci @ethereum-optimism/infra-reviewers
/.github @ethereum-optimism/infra-reviewers /.github @ethereum-optimism/infra-reviewers
/ops @ethereum-optimism/infra-reviewers /ops @ethereum-optimism/infra-reviewers
/docker-bake.hcl @ethereum-optimism/infra-reviewers
# Misc # Misc
/proxyd @ethereum-optimism/infra-reviewers /proxyd @ethereum-optimism/infra-reviewers
......
...@@ -17,6 +17,3 @@ ...@@ -17,6 +17,3 @@
path = packages/contracts-bedrock/lib/safe-contracts path = packages/contracts-bedrock/lib/safe-contracts
url = https://github.com/safe-global/safe-contracts url = https://github.com/safe-global/safe-contracts
branch = v1.4.0 branch = v1.4.0
...@@ -181,9 +181,9 @@ You must have Python 3.x installed to run `slither`. ...@@ -181,9 +181,9 @@ You must have Python 3.x installed to run `slither`.
To run `slither` locally, do: To run `slither` locally, do:
```bash ```bash
cd packages/contracts cd packages/contracts-bedrock
pip3 install slither-analyzer pip3 install slither-analyzer
pnpm test:slither pnpm slither
``` ```
## Labels ## Labels
......
COMPOSEFLAGS=-d COMPOSEFLAGS=-d
ITESTS_L2_HOST=http://localhost:9545 ITESTS_L2_HOST=http://localhost:9545
BEDROCK_TAGS_REMOTE?=origin BEDROCK_TAGS_REMOTE?=origin
OP_STACK_GO_BUILDER?=us-docker.pkg.dev/oplabs-tools-artifacts/images/op_stack_go:latest OP_STACK_GO_BUILDER?=us-docker.pkg.dev/oplabs-tools-artifacts/images/op-stack-go:latest
# Requires at least Python v3.9; specify a minor version below if needed
PYTHON?=python3
build: build-go build-ts build: build-go build-ts
.PHONY: build .PHONY: build
...@@ -25,10 +28,15 @@ ci-builder: ...@@ -25,10 +28,15 @@ ci-builder:
docker build -t ci-builder -f ops/docker/ci-builder/Dockerfile . docker build -t ci-builder -f ops/docker/ci-builder/Dockerfile .
golang-docker: golang-docker:
DOCKER_BUILDKIT=1 docker build -t op-stack-go \ # We don't use a buildx builder here, and just load directly into regular docker, for convenience.
--build-arg GIT_COMMIT=$$(git rev-parse HEAD) \ GIT_COMMIT=$$(git rev-parse HEAD) \
--build-arg GIT_DATE=$$(git show -s --format='%ct') \ GIT_DATE=$$(git show -s --format='%ct') \
-f ops/docker/op-stack-go/Dockerfile . IMAGE_TAGS=$$GIT_COMMIT,latest \
docker buildx bake \
--progress plain \
--load \
-f docker-bake.hcl \
op-node op-batcher op-proposer op-challenger
.PHONY: golang-docker .PHONY: golang-docker
submodules: submodules:
...@@ -109,14 +117,14 @@ pre-devnet: ...@@ -109,14 +117,14 @@ pre-devnet:
devnet-up: pre-devnet devnet-up: pre-devnet
./ops/scripts/newer-file.sh .devnet/allocs-l1.json ./packages/contracts-bedrock \ ./ops/scripts/newer-file.sh .devnet/allocs-l1.json ./packages/contracts-bedrock \
|| make devnet-allocs || make devnet-allocs
PYTHONPATH=./bedrock-devnet python3 ./bedrock-devnet/main.py --monorepo-dir=. PYTHONPATH=./bedrock-devnet $(PYTHON) ./bedrock-devnet/main.py --monorepo-dir=.
.PHONY: devnet-up .PHONY: devnet-up
# alias for devnet-up # alias for devnet-up
devnet-up-deploy: devnet-up devnet-up-deploy: devnet-up
devnet-test: pre-devnet devnet-test: pre-devnet
PYTHONPATH=./bedrock-devnet python3 ./bedrock-devnet/main.py --monorepo-dir=. --test PYTHONPATH=./bedrock-devnet $(PYTHON) ./bedrock-devnet/main.py --monorepo-dir=. --test
.PHONY: devnet-test .PHONY: devnet-test
devnet-down: devnet-down:
...@@ -132,7 +140,7 @@ devnet-clean: ...@@ -132,7 +140,7 @@ devnet-clean:
.PHONY: devnet-clean .PHONY: devnet-clean
devnet-allocs: pre-devnet devnet-allocs: pre-devnet
PYTHONPATH=./bedrock-devnet python3 ./bedrock-devnet/main.py --monorepo-dir=. --allocs PYTHONPATH=./bedrock-devnet $(PYTHON) ./bedrock-devnet/main.py --monorepo-dir=. --allocs
devnet-logs: devnet-logs:
@(cd ./ops-bedrock && docker compose logs -f) @(cd ./ops-bedrock && docker compose logs -f)
......
...@@ -10,6 +10,9 @@ import time ...@@ -10,6 +10,9 @@ import time
import shutil import shutil
import http.client import http.client
from multiprocessing import Process, Queue from multiprocessing import Process, Queue
import concurrent.futures
from collections import namedtuple
import devnet.log_setup import devnet.log_setup
...@@ -94,12 +97,21 @@ def main(): ...@@ -94,12 +97,21 @@ def main():
devnet_l1_genesis(paths) devnet_l1_genesis(paths)
return return
log.info('Building docker images') git_commit = subprocess.run(['git', 'rev-parse', 'HEAD'], capture_output=True, text=True).stdout.strip()
run_command(['docker', 'compose', 'build', '--progress', 'plain'], cwd=paths.ops_bedrock_dir, env={ git_date = subprocess.run(['git', 'show', '-s', "--format=%ct"], capture_output=True, text=True).stdout.strip()
'PWD': paths.ops_bedrock_dir,
'DOCKER_BUILDKIT': '1', # (should be available by default in later versions, but explicitly enable it anyway) # CI loads the images from workspace, and does not otherwise know the images are good as-is
'COMPOSE_DOCKER_CLI_BUILD': '1' # use the docker cache if os.getenv('DEVNET_NO_BUILD') == "true":
}) log.info('Skipping docker images build')
else:
log.info(f'Building docker images for git commit {git_commit} ({git_date})')
run_command(['docker', 'compose', 'build', '--progress', 'plain',
'--build-arg', f'GIT_COMMIT={git_commit}', '--build-arg', f'GIT_DATE={git_date}'],
cwd=paths.ops_bedrock_dir, env={
'PWD': paths.ops_bedrock_dir,
'DOCKER_BUILDKIT': '1', # (should be available by default in later versions, but explicitly enable it anyway)
'COMPOSE_DOCKER_CLI_BUILD': '1' # use the docker cache
})
log.info('Devnet starting') log.info('Devnet starting')
devnet_deploy(paths) devnet_deploy(paths)
...@@ -292,6 +304,10 @@ def wait_for_rpc_server(url): ...@@ -292,6 +304,10 @@ def wait_for_rpc_server(url):
log.info(f'Waiting for RPC server at {url}') log.info(f'Waiting for RPC server at {url}')
time.sleep(1) time.sleep(1)
CommandPreset = namedtuple('Command', ['name', 'args', 'cwd', 'timeout'])
def devnet_test(paths): def devnet_test(paths):
# Check the L2 config # Check the L2 config
run_command( run_command(
...@@ -299,17 +315,57 @@ def devnet_test(paths): ...@@ -299,17 +315,57 @@ def devnet_test(paths):
cwd=paths.ops_chain_ops, cwd=paths.ops_chain_ops,
) )
run_command( # Run the two commands with different signers, so the ethereum nonce management does not conflict
['npx', 'hardhat', 'deposit-erc20', '--network', 'devnetL1', '--l1-contracts-json-path', paths.addresses_json_path], # And do not use devnet system addresses, to avoid breaking fee-estimation or nonce values.
cwd=paths.sdk_dir, run_commands([
timeout=8*60, CommandPreset('erc20-test',
) ['npx', 'hardhat', 'deposit-erc20', '--network', 'devnetL1',
'--l1-contracts-json-path', paths.addresses_json_path, '--signer-index', '14'],
cwd=paths.sdk_dir, timeout=8*60),
CommandPreset('eth-test',
['npx', 'hardhat', 'deposit-eth', '--network', 'devnetL1',
'--l1-contracts-json-path', paths.addresses_json_path, '--signer-index', '15'],
cwd=paths.sdk_dir, timeout=8*60)
], max_workers=2)
def run_commands(commands: list[CommandPreset], max_workers=2):
with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
futures = [executor.submit(run_command_preset, cmd) for cmd in commands]
for future in concurrent.futures.as_completed(futures):
result = future.result()
if result:
print(result.stdout)
def run_command_preset(command: CommandPreset):
with subprocess.Popen(command.args, cwd=command.cwd,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) as proc:
try:
# Live output processing
for line in proc.stdout:
# Annotate and print the line with timestamp and command name
timestamp = datetime.datetime.utcnow().strftime('%H:%M:%S.%f')
# Annotate and print the line with the timestamp
print(f"[{timestamp}][{command.name}] {line}", end='')
stdout, stderr = proc.communicate(timeout=command.timeout)
if proc.returncode != 0:
raise RuntimeError(f"Command '{' '.join(command.args)}' failed with return code {proc.returncode}: {stderr}")
except subprocess.TimeoutExpired:
raise RuntimeError(f"Command '{' '.join(command.args)}' timed out!")
except Exception as e:
raise RuntimeError(f"Error executing '{' '.join(command.args)}': {e}")
finally:
# Ensure process is terminated
proc.kill()
return proc.returncode
run_command(
['npx', 'hardhat', 'deposit-eth', '--network', 'devnetL1', '--l1-contracts-json-path', paths.addresses_json_path],
cwd=paths.sdk_dir,
timeout=8*60,
)
def run_command(args, check=True, shell=False, cwd=None, env=None, timeout=None): def run_command(args, check=True, shell=False, cwd=None, env=None, timeout=None):
env = env if env else {} env = env if env else {}
......
comment: false codecov:
require_ci_to_pass: false
comment:
layout: "diff, flags, files"
behavior: default
require_changes: false
flags:
- contracts-bedrock-tests
ignore: ignore:
- "op-e2e" - "op-e2e"
- "**/*.t.sol"
- "op-bindings/bindings/*.go" - "op-bindings/bindings/*.go"
- "**/*.t.sol"
- "packages/contracts-bedrock/test/**/*.sol"
- "packages/contracts-bedrock/contracts/vendor/WETH9.sol" - "packages/contracts-bedrock/contracts/vendor/WETH9.sol"
- 'packages/contracts-bedrock/contracts/EAS/**/*.sol' - 'packages/contracts-bedrock/contracts/EAS/**/*.sol'
coverage: coverage:
...@@ -13,6 +22,7 @@ coverage: ...@@ -13,6 +22,7 @@ coverage:
threshold: 0% # coverage is not allowed to reduce vs. the PR base threshold: 0% # coverage is not allowed to reduce vs. the PR base
base: auto base: auto
informational: true informational: true
enabled: true
project: project:
default: default:
informational: true informational: true
...@@ -22,7 +32,7 @@ flag_management: ...@@ -22,7 +32,7 @@ flag_management:
individual_flags: individual_flags:
- name: contracts-bedrock-tests - name: contracts-bedrock-tests
paths: paths:
- packages/contracts-bedrock/contracts - packages/contracts-bedrock
statuses: statuses:
- type: patch - type: patch
target: 100% target: 100%
......
variable "REGISTRY" {
default = "us-docker.pkg.dev"
}
variable "REPOSITORY" {
default = "oplabs-tools-artifacts/images"
}
variable "GIT_COMMIT" {
default = "dev"
}
variable "GIT_DATE" {
default = "0"
}
variable "GIT_VERSION" {
default = "docker" // original default as set in proxyd file, not used by full go stack, yet
}
variable "IMAGE_TAGS" {
default = "${GIT_COMMIT}" // split by ","
}
variable "PLATFORMS" {
// You can override this as "linux/amd64,linux/arm64".
// Only a specify a single platform when `--load` ing into docker.
// Multi-platform is supported when outputting to disk or pushing to a registry.
// Multi-platform builds can be tested locally with: --set="*.output=type=image,push=false"
default = "linux/amd64"
}
target "op-stack-go" {
dockerfile = "ops/docker/op-stack-go/Dockerfile"
context = "."
args = {
GIT_COMMIT = "${GIT_COMMIT}"
GIT_DATE = "${GIT_DATE}"
}
platforms = split(",", PLATFORMS)
tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/op-stack-go:${tag}"]
}
target "op-node" {
dockerfile = "Dockerfile"
context = "./op-node"
args = {
OP_STACK_GO_BUILDER = "op-stack-go"
}
contexts = {
op-stack-go: "target:op-stack-go"
}
platforms = split(",", PLATFORMS)
tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/op-node:${tag}"]
}
target "op-batcher" {
dockerfile = "Dockerfile"
context = "./op-batcher"
args = {
OP_STACK_GO_BUILDER = "op-stack-go"
}
contexts = {
op-stack-go: "target:op-stack-go"
}
platforms = split(",", PLATFORMS)
tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/op-batcher:${tag}"]
}
target "op-proposer" {
dockerfile = "Dockerfile"
context = "./op-proposer"
args = {
OP_STACK_GO_BUILDER = "op-stack-go"
}
contexts = {
op-stack-go: "target:op-stack-go"
}
platforms = split(",", PLATFORMS)
tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/op-proposer:${tag}"]
}
target "op-challenger" {
dockerfile = "Dockerfile"
context = "./op-challenger"
args = {
OP_STACK_GO_BUILDER = "op-stack-go"
}
contexts = {
op-stack-go: "target:op-stack-go"
}
platforms = split(",", PLATFORMS)
tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/op-challenger:${tag}"]
}
target "op-heartbeat" {
dockerfile = "Dockerfile"
context = "./op-heartbeat"
args = {
OP_STACK_GO_BUILDER = "op-stack-go"
}
contexts = {
op-stack-go: "target:op-stack-go"
}
platforms = split(",", PLATFORMS)
tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/op-heartbeat:${tag}"]
}
target "op-program" {
dockerfile = "Dockerfile"
context = "./op-program"
args = {
OP_STACK_GO_BUILDER = "op-stack-go"
}
contexts = {
op-stack-go: "target:op-stack-go"
}
platforms = split(",", PLATFORMS)
tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/op-program:${tag}"]
}
target "proxyd" {
dockerfile = "Dockerfile"
context = "./proxyd"
args = {
// proxyd dockerfile has no _ in the args
GITCOMMIT = "${GIT_COMMIT}"
GITDATE = "${GIT_DATE}"
GITVERSION = "${GIT_VERSION}"
}
platforms = split(",", PLATFORMS)
tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/proxyd:${tag}"]
}
target "indexer" {
dockerfile = "./indexer/Dockerfile"
context = "./"
args = {
// proxyd dockerfile has no _ in the args
GITCOMMIT = "${GIT_COMMIT}"
GITDATE = "${GIT_DATE}"
GITVERSION = "${GIT_VERSION}"
}
platforms = split(",", PLATFORMS)
tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/indexer:${tag}"]
}
target "ufm-metamask" {
dockerfile = "Dockerfile"
context = "./ufm-test-services/metamask"
args = {
// proxyd dockerfile has no _ in the args
GITCOMMIT = "${GIT_COMMIT}"
GITDATE = "${GIT_DATE}"
GITVERSION = "${GIT_VERSION}"
}
platforms = split(",", PLATFORMS)
tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/ufm-metamask:${tag}"]
}
target "chain-mon" {
dockerfile = "./ops/docker/Dockerfile.packages"
context = "."
args = {
// proxyd dockerfile has no _ in the args
GITCOMMIT = "${GIT_COMMIT}"
GITDATE = "${GIT_DATE}"
GITVERSION = "${GIT_VERSION}"
}
// this is a multi-stage build, where each stage is a possible output target, but wd-mon is all we publish
target = "wd-mon"
platforms = split(",", PLATFORMS)
tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/chain-mon:${tag}"]
}
target "ci-builder" {
dockerfile = "./ops/docker/ci-builder/Dockerfile"
context = "."
platforms = split(",", PLATFORMS)
tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/ci-builder:${tag}"]
}
package main package main
import ( import (
"fmt"
"os" "os"
opservice "github.com/ethereum-optimism/optimism/op-service"
oplog "github.com/ethereum-optimism/optimism/op-service/log" oplog "github.com/ethereum-optimism/optimism/op-service/log"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
...@@ -22,7 +22,7 @@ func main() { ...@@ -22,7 +22,7 @@ func main() {
app := cli.NewApp() app := cli.NewApp()
app.Flags = endpointMonitor.CLIFlags("ENDPOINT_MONITOR") app.Flags = endpointMonitor.CLIFlags("ENDPOINT_MONITOR")
app.Version = fmt.Sprintf("%s-%s-%s", Version, GitCommit, GitDate) app.Version = opservice.FormatVersion(Version, GitCommit, GitDate, "")
app.Name = "endpoint-monitor" app.Name = "endpoint-monitor"
app.Usage = "Endpoint Monitoring Service" app.Usage = "Endpoint Monitoring Service"
app.Description = "" app.Description = ""
......
...@@ -8,15 +8,15 @@ require ( ...@@ -8,15 +8,15 @@ require (
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0
github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3 github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3
github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20231018202221-fdba3d104171 github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20231026175037-2cff0d130e74
github.com/ethereum/go-ethereum v1.13.1 github.com/ethereum/go-ethereum v1.13.1
github.com/fsnotify/fsnotify v1.6.0 github.com/fsnotify/fsnotify v1.7.0
github.com/go-chi/chi/v5 v5.0.10 github.com/go-chi/chi/v5 v5.0.10
github.com/go-chi/docgen v1.2.0 github.com/go-chi/docgen v1.2.0
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb
github.com/google/go-cmp v0.6.0 github.com/google/go-cmp v0.6.0
github.com/google/gofuzz v1.2.1-0.20220503160820-4a35382e8fc8 github.com/google/gofuzz v1.2.1-0.20220503160820-4a35382e8fc8
github.com/google/uuid v1.3.1 github.com/google/uuid v1.4.0
github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/go-multierror v1.1.1
github.com/hashicorp/golang-lru/v2 v2.0.5 github.com/hashicorp/golang-lru/v2 v2.0.5
github.com/holiman/uint256 v1.2.3 github.com/holiman/uint256 v1.2.3
...@@ -33,7 +33,7 @@ require ( ...@@ -33,7 +33,7 @@ require (
github.com/multiformats/go-multiaddr v0.12.0 github.com/multiformats/go-multiaddr v0.12.0
github.com/multiformats/go-multiaddr-dns v0.3.1 github.com/multiformats/go-multiaddr-dns v0.3.1
github.com/olekukonko/tablewriter v0.0.5 github.com/olekukonko/tablewriter v0.0.5
github.com/onsi/gomega v1.28.0 github.com/onsi/gomega v1.29.0
github.com/pkg/errors v0.9.1 github.com/pkg/errors v0.9.1
github.com/pkg/profile v1.7.0 github.com/pkg/profile v1.7.0
github.com/prometheus/client_golang v1.17.0 github.com/prometheus/client_golang v1.17.0
...@@ -44,14 +44,14 @@ require ( ...@@ -44,14 +44,14 @@ require (
golang.org/x/sync v0.4.0 golang.org/x/sync v0.4.0
golang.org/x/term v0.13.0 golang.org/x/term v0.13.0
golang.org/x/time v0.3.0 golang.org/x/time v0.3.0
gorm.io/driver/postgres v1.5.3 gorm.io/driver/postgres v1.5.4
gorm.io/gorm v1.25.5 gorm.io/gorm v1.25.5
) )
require ( require (
github.com/DataDog/zstd v1.5.2 // indirect github.com/DataDog/zstd v1.5.2 // indirect
github.com/Microsoft/go-winio v0.6.1 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect
github.com/VictoriaMetrics/fastcache v1.10.0 // indirect github.com/VictoriaMetrics/fastcache v1.12.1 // indirect
github.com/allegro/bigcache v1.2.1 // indirect github.com/allegro/bigcache v1.2.1 // indirect
github.com/benbjohnson/clock v1.3.5 // indirect github.com/benbjohnson/clock v1.3.5 // indirect
github.com/beorn7/perks v1.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect
...@@ -60,13 +60,13 @@ require ( ...@@ -60,13 +60,13 @@ require (
github.com/btcsuite/btcd/btcutil v1.1.0 // indirect github.com/btcsuite/btcd/btcutil v1.1.0 // indirect
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f // indirect github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/cockroachdb/errors v1.9.1 // indirect github.com/cockroachdb/errors v1.11.1 // indirect
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593 // indirect github.com/cockroachdb/pebble v0.0.0-20231018212520-f6cde3fc2fa4 // indirect
github.com/cockroachdb/redact v1.1.3 // indirect github.com/cockroachdb/redact v1.1.5 // indirect
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect
github.com/consensys/bavard v0.1.13 // indirect github.com/consensys/bavard v0.1.13 // indirect
github.com/consensys/gnark-crypto v0.12.0 // indirect github.com/consensys/gnark-crypto v0.12.1 // indirect
github.com/containerd/cgroups v1.1.0 // indirect github.com/containerd/cgroups v1.1.0 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
...@@ -82,7 +82,7 @@ require ( ...@@ -82,7 +82,7 @@ require (
github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127 // indirect github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127 // indirect
github.com/elastic/gosigar v0.14.2 // indirect github.com/elastic/gosigar v0.14.2 // indirect
github.com/ethereum/c-kzg-4844 v0.3.1 // indirect github.com/ethereum/c-kzg-4844 v0.3.1 // indirect
github.com/fatih/color v1.7.0 // indirect github.com/fatih/color v1.13.0 // indirect
github.com/felixge/fgprof v0.9.3 // indirect github.com/felixge/fgprof v0.9.3 // indirect
github.com/fjl/memsize v0.0.1 // indirect github.com/fjl/memsize v0.0.1 // indirect
github.com/flynn/noise v1.0.0 // indirect github.com/flynn/noise v1.0.0 // indirect
...@@ -97,7 +97,7 @@ require ( ...@@ -97,7 +97,7 @@ require (
github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect
github.com/gofrs/flock v0.8.1 // indirect github.com/gofrs/flock v0.8.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v4 v4.4.2 // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect
github.com/golang/mock v1.6.0 // indirect github.com/golang/mock v1.6.0 // indirect
github.com/golang/protobuf v1.5.3 // indirect github.com/golang/protobuf v1.5.3 // indirect
github.com/google/gopacket v1.1.19 // indirect github.com/google/gopacket v1.1.19 // indirect
...@@ -162,7 +162,7 @@ require ( ...@@ -162,7 +162,7 @@ require (
github.com/multiformats/go-varint v0.0.7 // indirect github.com/multiformats/go-varint v0.0.7 // indirect
github.com/naoina/go-stringutil v0.1.0 // indirect github.com/naoina/go-stringutil v0.1.0 // indirect
github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 // indirect github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 // indirect
github.com/onsi/ginkgo/v2 v2.12.0 // indirect github.com/onsi/ginkgo/v2 v2.13.0 // indirect
github.com/opencontainers/runtime-spec v1.1.0 // indirect github.com/opencontainers/runtime-spec v1.1.0 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
...@@ -209,7 +209,7 @@ require ( ...@@ -209,7 +209,7 @@ require (
rsc.io/tmplfunc v0.0.3 // indirect rsc.io/tmplfunc v0.0.3 // indirect
) )
replace github.com/ethereum/go-ethereum v1.13.1 => github.com/ethereum-optimism/op-geth v1.101301.2-0.20231018201518-63125bd85c80 replace github.com/ethereum/go-ethereum v1.13.1 => github.com/ethereum-optimism/op-geth v1.101303.0-rc.2.0.20231026180835-94fbbd04522e
//replace github.com/ethereum-optimism/superchain-registry/superchain => ../superchain-registry/superchain //replace github.com/ethereum-optimism/superchain-registry/superchain => ../superchain-registry/superchain
//replace github.com/ethereum/go-ethereum v1.13.1 => ../go-ethereum //replace github.com/ethereum/go-ethereum v1.13.1 => ../go-ethereum
This diff is collapsed.
...@@ -16,6 +16,7 @@ func newWithdrawalResponse(withdrawals *database.L2BridgeWithdrawalsResponse) mo ...@@ -16,6 +16,7 @@ func newWithdrawalResponse(withdrawals *database.L2BridgeWithdrawalsResponse) mo
item := models.WithdrawalItem{ item := models.WithdrawalItem{
Guid: withdrawal.L2BridgeWithdrawal.TransactionWithdrawalHash.String(), Guid: withdrawal.L2BridgeWithdrawal.TransactionWithdrawalHash.String(),
L2BlockHash: withdrawal.L2BlockHash.String(), L2BlockHash: withdrawal.L2BlockHash.String(),
Timestamp: withdrawal.L2BridgeWithdrawal.Tx.Timestamp,
From: withdrawal.L2BridgeWithdrawal.Tx.FromAddress.String(), From: withdrawal.L2BridgeWithdrawal.Tx.FromAddress.String(),
To: withdrawal.L2BridgeWithdrawal.Tx.ToAddress.String(), To: withdrawal.L2BridgeWithdrawal.Tx.ToAddress.String(),
TransactionHash: withdrawal.L2TransactionHash.String(), TransactionHash: withdrawal.L2TransactionHash.String(),
......
...@@ -153,11 +153,18 @@ func LoadConfig(log log.Logger, path string) (Config, error) { ...@@ -153,11 +153,18 @@ func LoadConfig(log log.Logger, path string) (Config, error) {
data = []byte(os.ExpandEnv(string(data))) data = []byte(os.ExpandEnv(string(data)))
log.Debug("parsed config file", "data", string(data)) log.Debug("parsed config file", "data", string(data))
if _, err := toml.Decode(string(data), &cfg); err != nil { md, err := toml.Decode(string(data), &cfg)
if err != nil {
log.Error("failed to decode config file", "err", err) log.Error("failed to decode config file", "err", err)
return cfg, err return cfg, err
} }
if len(md.Undecoded()) > 0 {
log.Error("unknown fields in config file", "fields", md.Undecoded())
err = fmt.Errorf("unknown fields in config file: %v", md.Undecoded())
return cfg, err
}
if cfg.Chain.Preset == DevnetPresetId { if cfg.Chain.Preset == DevnetPresetId {
preset, err := DevnetPreset() preset, err := DevnetPreset()
if err != nil { if err != nil {
......
...@@ -257,3 +257,49 @@ func TestLocalDevnet(t *testing.T) { ...@@ -257,3 +257,49 @@ func TestLocalDevnet(t *testing.T) {
require.Equal(t, devnetPreset.ChainConfig.L1Contracts, conf.Chain.L1Contracts) require.Equal(t, devnetPreset.ChainConfig.L1Contracts, conf.Chain.L1Contracts)
} }
func TestThrowsOnUnknownKeys(t *testing.T) {
logger := testlog.Logger(t, log.LvlInfo)
tmpfile, err := os.CreateTemp("", "test.toml")
require.NoError(t, err)
defer os.Remove(tmpfile.Name())
defer tmpfile.Close()
testData := `
[chain]
unknown_key = 420
preset = 420
[rpcs]
l1-rpc = "https://l1.example.com"
l2-rpc = "https://l2.example.com"
[db]
another_unknownKey = 420
host = "127.0.0.1"
port = 5432
user = "postgres"
password = "postgres"
name = "indexer"
[http]
host = "127.0.0.1"
port = 8080
[metrics]
host = "127.0.0.1"
port = 7300
`
data := []byte(testData)
err = os.WriteFile(tmpfile.Name(), data, 0644)
require.NoError(t, err)
defer os.Remove(tmpfile.Name())
err = tmpfile.Close()
require.NoError(t, err)
_, err = LoadConfig(logger, tmpfile.Name())
require.Error(t, err)
require.Contains(t, err.Error(), "unknown fields in config file")
}
...@@ -186,7 +186,6 @@ func (db *blocksDB) LatestObservedEpoch(fromL1Height *big.Int, maxL1Range uint64 ...@@ -186,7 +186,6 @@ func (db *blocksDB) LatestObservedEpoch(fromL1Height *big.Int, maxL1Range uint64
var header L1BlockHeader var header L1BlockHeader
if fromL1Height != nil { if fromL1Height != nil {
result := db.gorm.Where("number = ?", fromL1Height).Take(&header) result := db.gorm.Where("number = ?", fromL1Height).Take(&header)
// TODO - Embed logging to db
if result.Error != nil { if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) { if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, nil return nil, nil
...@@ -196,7 +195,8 @@ func (db *blocksDB) LatestObservedEpoch(fromL1Height *big.Int, maxL1Range uint64 ...@@ -196,7 +195,8 @@ func (db *blocksDB) LatestObservedEpoch(fromL1Height *big.Int, maxL1Range uint64
fromTimestamp = header.Timestamp fromTimestamp = header.Timestamp
} else { } else {
result := db.gorm.Order("number desc").Take(&header) // Take the lowest indexed L1 block to compute the lower bound
result := db.gorm.Order("number ASC").Take(&header)
if result.Error != nil { if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) { if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, nil return nil, nil
...@@ -205,6 +205,7 @@ func (db *blocksDB) LatestObservedEpoch(fromL1Height *big.Int, maxL1Range uint64 ...@@ -205,6 +205,7 @@ func (db *blocksDB) LatestObservedEpoch(fromL1Height *big.Int, maxL1Range uint64
} }
fromL1Height = header.Number fromL1Height = header.Number
fromTimestamp = header.Timestamp
} }
// Upper Bound (lowest timestamp indexed between L1/L2 bounded by `maxL1Range`) // Upper Bound (lowest timestamp indexed between L1/L2 bounded by `maxL1Range`)
......
...@@ -154,12 +154,11 @@ func (db *bridgeTransfersDB) L1BridgeDepositsByAddress(address common.Address, c ...@@ -154,12 +154,11 @@ func (db *bridgeTransfersDB) L1BridgeDepositsByAddress(address common.Address, c
cursorClause = fmt.Sprintf("l1_transaction_deposits.timestamp <= %d", txDeposit.Tx.Timestamp) cursorClause = fmt.Sprintf("l1_transaction_deposits.timestamp <= %d", txDeposit.Tx.Timestamp)
} }
// TODO join with l1_bridged_tokens and l2_bridged_tokens
ethAddressString := predeploys.LegacyERC20ETHAddr.String() ethAddressString := predeploys.LegacyERC20ETHAddr.String()
// Coalesce l1 transaction deposits that are simply ETH sends // Coalesce l1 transaction deposits that are simply ETH sends
ethTransactionDeposits := db.gorm.Model(&L1TransactionDeposit{}) ethTransactionDeposits := db.gorm.Model(&L1TransactionDeposit{})
ethTransactionDeposits = ethTransactionDeposits.Where(&Transaction{FromAddress: address}).Where("data = '0x' AND amount > 0") ethTransactionDeposits = ethTransactionDeposits.Where(&Transaction{FromAddress: address}).Where("amount > 0")
ethTransactionDeposits = ethTransactionDeposits.Joins("INNER JOIN l1_contract_events ON l1_contract_events.guid = initiated_l1_event_guid") ethTransactionDeposits = ethTransactionDeposits.Joins("INNER JOIN l1_contract_events ON l1_contract_events.guid = initiated_l1_event_guid")
ethTransactionDeposits = ethTransactionDeposits.Select(` ethTransactionDeposits = ethTransactionDeposits.Select(`
from_address, to_address, amount, data, source_hash AS transaction_source_hash, from_address, to_address, amount, data, source_hash AS transaction_source_hash,
...@@ -283,7 +282,7 @@ func (db *bridgeTransfersDB) L2BridgeWithdrawalsByAddress(address common.Address ...@@ -283,7 +282,7 @@ func (db *bridgeTransfersDB) L2BridgeWithdrawalsByAddress(address common.Address
// Coalesce l2 transaction withdrawals that are simply ETH sends // Coalesce l2 transaction withdrawals that are simply ETH sends
ethTransactionWithdrawals := db.gorm.Model(&L2TransactionWithdrawal{}) ethTransactionWithdrawals := db.gorm.Model(&L2TransactionWithdrawal{})
ethTransactionWithdrawals = ethTransactionWithdrawals.Where(&Transaction{FromAddress: address}).Where(`data = '0x' AND amount > 0`) ethTransactionWithdrawals = ethTransactionWithdrawals.Where(&Transaction{FromAddress: address}).Where("amount > 0")
ethTransactionWithdrawals = ethTransactionWithdrawals.Joins("INNER JOIN l2_contract_events ON l2_contract_events.guid = l2_transaction_withdrawals.initiated_l2_event_guid") ethTransactionWithdrawals = ethTransactionWithdrawals.Joins("INNER JOIN l2_contract_events ON l2_contract_events.guid = l2_transaction_withdrawals.initiated_l2_event_guid")
ethTransactionWithdrawals = ethTransactionWithdrawals.Joins("LEFT JOIN l1_contract_events AS proven_l1_events ON proven_l1_events.guid = l2_transaction_withdrawals.proven_l1_event_guid") ethTransactionWithdrawals = ethTransactionWithdrawals.Joins("LEFT JOIN l1_contract_events AS proven_l1_events ON proven_l1_events.guid = l2_transaction_withdrawals.proven_l1_event_guid")
ethTransactionWithdrawals = ethTransactionWithdrawals.Joins("LEFT JOIN l1_contract_events AS finalized_l1_events ON finalized_l1_events.guid = l2_transaction_withdrawals.finalized_l1_event_guid") ethTransactionWithdrawals = ethTransactionWithdrawals.Joins("LEFT JOIN l1_contract_events AS finalized_l1_events ON finalized_l1_events.guid = l2_transaction_withdrawals.finalized_l1_event_guid")
......
...@@ -58,7 +58,6 @@ services: ...@@ -58,7 +58,6 @@ services:
depends_on: depends_on:
postgres: postgres:
condition: service_healthy condition: service_healthy
depends_on:
migrations: migrations:
condition: service_started condition: service_started
......
...@@ -3,6 +3,7 @@ package etl ...@@ -3,6 +3,7 @@ package etl
import ( import (
"context" "context"
"errors" "errors"
"fmt"
"math/big" "math/big"
"time" "time"
...@@ -105,22 +106,33 @@ func (etl *ETL) processBatch(headers []types.Header) error { ...@@ -105,22 +106,33 @@ func (etl *ETL) processBatch(headers []types.Header) error {
} }
headersWithLog := make(map[common.Hash]bool, len(headers)) headersWithLog := make(map[common.Hash]bool, len(headers))
logs, err := etl.EthClient.FilterLogs(ethereum.FilterQuery{FromBlock: firstHeader.Number, ToBlock: lastHeader.Number, Addresses: etl.contracts}) filterQuery := ethereum.FilterQuery{FromBlock: firstHeader.Number, ToBlock: lastHeader.Number, Addresses: etl.contracts}
logs, err := etl.EthClient.FilterLogs(filterQuery)
if err != nil { if err != nil {
batchLog.Info("failed to extract logs", "err", err) batchLog.Info("failed to extract logs", "err", err)
return err return err
} }
if len(logs) > 0 {
batchLog.Info("detected logs", "size", len(logs)) if logs.ToBlockHeader.Number.Cmp(lastHeader.Number) != 0 {
// Warn and simply wait for the provider to synchronize state
batchLog.Warn("mismatch in FilterLog#ToBlock number", "queried_to_block_number", lastHeader.Number, "reported_to_block_number", logs.ToBlockHeader.Number)
return fmt.Errorf("mismatch in FilterLog#ToBlock number")
} else if logs.ToBlockHeader.Hash() != lastHeader.Hash() {
batchLog.Error("mismatch in FitlerLog#ToBlock block hash!!!", "queried_to_block_hash", lastHeader.Hash().String(), "reported_to_block_hash", logs.ToBlockHeader.Hash().String())
return fmt.Errorf("mismatch in FitlerLog#ToBlock block hash!!!")
}
if len(logs.Logs) > 0 {
batchLog.Info("detected logs", "size", len(logs.Logs))
} }
for i := range logs { for i := range logs.Logs {
log := logs[i] log := logs.Logs[i]
if _, ok := headerMap[log.BlockHash]; !ok { if _, ok := headerMap[log.BlockHash]; !ok {
// NOTE. Definitely an error state if the none of the headers were re-orged out in between // NOTE. Definitely an error state if the none of the headers were re-orged out in between
// the blocks and logs retrieval operations. Unlikely as long as the confirmation depth has // the blocks and logs retrieval operations. Unlikely as long as the confirmation depth has
// been appropriately set or when we get to natively handling reorgs. // been appropriately set or when we get to natively handling reorgs.
batchLog.Error("log found with block hash not in the batch", "block_hash", logs[i].BlockHash, "log_index", logs[i].Index) batchLog.Error("log found with block hash not in the batch", "block_hash", logs.Logs[i].BlockHash, "log_index", logs.Logs[i].Index)
return errors.New("parsed log with a block hash not in the batch") return errors.New("parsed log with a block hash not in the batch")
} }
...@@ -130,6 +142,6 @@ func (etl *ETL) processBatch(headers []types.Header) error { ...@@ -130,6 +142,6 @@ func (etl *ETL) processBatch(headers []types.Header) error {
// ensure we use unique downstream references for the etl batch // ensure we use unique downstream references for the etl batch
headersRef := headers headersRef := headers
etl.etlBatches <- ETLBatch{Logger: batchLog, Headers: headersRef, HeaderMap: headerMap, Logs: logs, HeadersWithLog: headersWithLog} etl.etlBatches <- ETLBatch{Logger: batchLog, Headers: headersRef, HeaderMap: headerMap, Logs: logs.Logs, HeadersWithLog: headersWithLog}
return nil return nil
} }
...@@ -177,26 +177,6 @@ CREATE INDEX IF NOT EXISTS l2_bridge_messages_transaction_withdrawal_hash ON l2_ ...@@ -177,26 +177,6 @@ CREATE INDEX IF NOT EXISTS l2_bridge_messages_transaction_withdrawal_hash ON l2_
CREATE INDEX IF NOT EXISTS l2_bridge_messages_from_address ON l2_bridge_messages(from_address); CREATE INDEX IF NOT EXISTS l2_bridge_messages_from_address ON l2_bridge_messages(from_address);
-- StandardBridge -- StandardBridge
CREATE TABLE IF NOT EXISTS l1_bridged_tokens (
address VARCHAR PRIMARY KEY,
bridge_address VARCHAR NOT NULL,
name VARCHAR NOT NULL,
symbol VARCHAR NOT NULL,
decimals INTEGER NOT NULL CHECK (decimals >= 0 AND decimals <= 18)
);
CREATE TABLE IF NOT EXISTS l2_bridged_tokens (
address VARCHAR PRIMARY KEY,
bridge_address VARCHAR NOT NULL,
-- L1-L2 relationship is 1 to many so this is not necessarily unique
l1_token_address VARCHAR REFERENCES l1_bridged_tokens(address) ON DELETE CASCADE,
name VARCHAR NOT NULL,
symbol VARCHAR NOT NULL,
decimals INTEGER NOT NULL CHECK (decimals >= 0 AND decimals <= 18)
);
CREATE TABLE IF NOT EXISTS l1_bridge_deposits ( CREATE TABLE IF NOT EXISTS l1_bridge_deposits (
transaction_source_hash VARCHAR PRIMARY KEY REFERENCES l1_transaction_deposits(source_hash) ON DELETE CASCADE, transaction_source_hash VARCHAR PRIMARY KEY REFERENCES l1_transaction_deposits(source_hash) ON DELETE CASCADE,
cross_domain_message_hash VARCHAR NOT NULL UNIQUE REFERENCES l1_bridge_messages(message_hash) ON DELETE CASCADE, cross_domain_message_hash VARCHAR NOT NULL UNIQUE REFERENCES l1_bridge_messages(message_hash) ON DELETE CASCADE,
...@@ -204,8 +184,8 @@ CREATE TABLE IF NOT EXISTS l1_bridge_deposits ( ...@@ -204,8 +184,8 @@ CREATE TABLE IF NOT EXISTS l1_bridge_deposits (
-- Deposit information -- Deposit information
from_address VARCHAR NOT NULL, from_address VARCHAR NOT NULL,
to_address VARCHAR NOT NULL, to_address VARCHAR NOT NULL,
local_token_address VARCHAR NOT NULL, -- REFERENCES l1_bridged_tokens(address), uncomment me in future pr local_token_address VARCHAR NOT NULL,
remote_token_address VARCHAR NOT NULL, -- REFERENCES l2_bridged_tokens(address), uncomment me in future pr remote_token_address VARCHAR NOT NULL,
amount UINT256 NOT NULL, amount UINT256 NOT NULL,
data VARCHAR NOT NULL, data VARCHAR NOT NULL,
timestamp INTEGER NOT NULL CHECK (timestamp > 0) timestamp INTEGER NOT NULL CHECK (timestamp > 0)
...@@ -221,8 +201,8 @@ CREATE TABLE IF NOT EXISTS l2_bridge_withdrawals ( ...@@ -221,8 +201,8 @@ CREATE TABLE IF NOT EXISTS l2_bridge_withdrawals (
-- Withdrawal information -- Withdrawal information
from_address VARCHAR NOT NULL, from_address VARCHAR NOT NULL,
to_address VARCHAR NOT NULL, to_address VARCHAR NOT NULL,
local_token_address VARCHAR NOT NULL, -- REFERENCES l2_bridged_tokens(address), uncomment me in future pr local_token_address VARCHAR NOT NULL,
remote_token_address VARCHAR NOT NULL, -- REFERENCES l1_bridged_tokens(address), uncomment me in future pr remote_token_address VARCHAR NOT NULL,
amount UINT256 NOT NULL, amount UINT256 NOT NULL,
data VARCHAR NOT NULL, data VARCHAR NOT NULL,
timestamp INTEGER NOT NULL CHECK (timestamp > 0) timestamp INTEGER NOT NULL CHECK (timestamp > 0)
......
...@@ -39,7 +39,7 @@ type EthClient interface { ...@@ -39,7 +39,7 @@ type EthClient interface {
TxByHash(common.Hash) (*types.Transaction, error) TxByHash(common.Hash) (*types.Transaction, error)
StorageHash(common.Address, *big.Int) (common.Hash, error) StorageHash(common.Address, *big.Int) (common.Hash, error)
FilterLogs(ethereum.FilterQuery) ([]types.Log, error) FilterLogs(ethereum.FilterQuery) (Logs, error)
} }
type clnt struct { type clnt struct {
...@@ -122,15 +122,12 @@ func (c *clnt) BlockHeadersByRange(startHeight, endHeight *big.Int) ([]types.Hea ...@@ -122,15 +122,12 @@ func (c *clnt) BlockHeadersByRange(startHeight, endHeight *big.Int) ([]types.Hea
} }
count := new(big.Int).Sub(endHeight, startHeight).Uint64() + 1 count := new(big.Int).Sub(endHeight, startHeight).Uint64() + 1
headers := make([]types.Header, count)
batchElems := make([]rpc.BatchElem, count) batchElems := make([]rpc.BatchElem, count)
for i := uint64(0); i < count; i++ { for i := uint64(0); i < count; i++ {
height := new(big.Int).Add(startHeight, new(big.Int).SetUint64(i)) height := new(big.Int).Add(startHeight, new(big.Int).SetUint64(i))
batchElems[i] = rpc.BatchElem{ batchElems[i] = rpc.BatchElem{Method: "eth_getBlockByNumber", Args: []interface{}{toBlockNumArg(height), false}, Result: &headers[i]}
Method: "eth_getBlockByNumber",
Args: []interface{}{toBlockNumArg(height), false},
Result: new(types.Header),
Error: nil,
}
} }
ctxwt, cancel := context.WithTimeout(context.Background(), defaultRequestTimeout) ctxwt, cancel := context.WithTimeout(context.Background(), defaultRequestTimeout)
...@@ -144,23 +141,21 @@ func (c *clnt) BlockHeadersByRange(startHeight, endHeight *big.Int) ([]types.Hea ...@@ -144,23 +141,21 @@ func (c *clnt) BlockHeadersByRange(startHeight, endHeight *big.Int) ([]types.Hea
// - Ensure integrity that they build on top of each other // - Ensure integrity that they build on top of each other
// - Truncate out headers that do not exist (endHeight > "latest") // - Truncate out headers that do not exist (endHeight > "latest")
size := 0 size := 0
headers := make([]types.Header, count)
for i, batchElem := range batchElems { for i, batchElem := range batchElems {
if batchElem.Error != nil { if batchElem.Error != nil {
return nil, batchElem.Error if size == 0 {
return nil, batchElem.Error
} else {
break // try return whatever headers are available
}
} else if batchElem.Result == nil { } else if batchElem.Result == nil {
break break
} }
header, ok := batchElem.Result.(*types.Header) if i > 0 && headers[i].ParentHash != headers[i-1].Hash() {
if !ok { return nil, fmt.Errorf("queried header %s does not follow parent %s", headers[i].Hash(), headers[i-1].Hash())
return nil, fmt.Errorf("unable to transform rpc response %v into types.Header", batchElem.Result)
}
if i > 0 && header.ParentHash != headers[i-1].Hash() {
return nil, fmt.Errorf("queried header %s does not follow parent %s", header.Hash(), headers[i-1].Hash())
} }
headers[i] = *header
size = size + 1 size = size + 1
} }
...@@ -197,19 +192,43 @@ func (c *clnt) StorageHash(address common.Address, blockNumber *big.Int) (common ...@@ -197,19 +192,43 @@ func (c *clnt) StorageHash(address common.Address, blockNumber *big.Int) (common
return proof.StorageHash, nil return proof.StorageHash, nil
} }
// FilterLogs returns logs that fit the query parameters type Logs struct {
func (c *clnt) FilterLogs(query ethereum.FilterQuery) ([]types.Log, error) { Logs []types.Log
ctxwt, cancel := context.WithTimeout(context.Background(), defaultRequestTimeout) ToBlockHeader *types.Header
defer cancel() }
var result []types.Log // FilterLogs returns logs that fit the query parameters. The underlying request is a batch
// request including `eth_getBlockByNumber` to allow the caller to check that connected
// node has the state necessary to fulfill this request
func (c *clnt) FilterLogs(query ethereum.FilterQuery) (Logs, error) {
arg, err := toFilterArg(query) arg, err := toFilterArg(query)
if err != nil { if err != nil {
return nil, err return Logs{}, err
} }
err = c.rpc.CallContext(ctxwt, &result, "eth_getLogs", arg) var logs []types.Log
return result, err var header types.Header
batchElems := make([]rpc.BatchElem, 2)
batchElems[0] = rpc.BatchElem{Method: "eth_getBlockByNumber", Args: []interface{}{toBlockNumArg(query.ToBlock), false}, Result: &header}
batchElems[1] = rpc.BatchElem{Method: "eth_getLogs", Args: []interface{}{arg}, Result: &logs}
ctxwt, cancel := context.WithTimeout(context.Background(), defaultRequestTimeout)
defer cancel()
err = c.rpc.BatchCallContext(ctxwt, batchElems)
if err != nil {
return Logs{}, err
}
if batchElems[0].Error != nil {
return Logs{}, fmt.Errorf("unable to query for the `FilterQuery#ToBlock` header: %w", batchElems[0].Error)
}
if batchElems[1].Error != nil {
return Logs{}, fmt.Errorf("unable to query logs: %w", batchElems[1].Error)
}
return Logs{Logs: logs, ToBlockHeader: &header}, nil
} }
// Modeled off op-service/client.go. We can refactor this once the client/metrics portion // Modeled off op-service/client.go. We can refactor this once the client/metrics portion
...@@ -262,10 +281,7 @@ func toBlockNumArg(number *big.Int) string { ...@@ -262,10 +281,7 @@ func toBlockNumArg(number *big.Int) string {
} }
func toFilterArg(q ethereum.FilterQuery) (interface{}, error) { func toFilterArg(q ethereum.FilterQuery) (interface{}, error) {
arg := map[string]interface{}{ arg := map[string]interface{}{"address": q.Addresses, "topics": q.Topics}
"address": q.Addresses,
"topics": q.Topics,
}
if q.BlockHash != nil { if q.BlockHash != nil {
arg["blockHash"] = *q.BlockHash arg["blockHash"] = *q.BlockHash
if q.FromBlock != nil || q.ToBlock != nil { if q.FromBlock != nil || q.ToBlock != nil {
......
...@@ -41,7 +41,7 @@ func (m *MockEthClient) StorageHash(address common.Address, blockNumber *big.Int ...@@ -41,7 +41,7 @@ func (m *MockEthClient) StorageHash(address common.Address, blockNumber *big.Int
return args.Get(0).(common.Hash), args.Error(1) return args.Get(0).(common.Hash), args.Error(1)
} }
func (m *MockEthClient) FilterLogs(query ethereum.FilterQuery) ([]types.Log, error) { func (m *MockEthClient) FilterLogs(query ethereum.FilterQuery) (Logs, error) {
args := m.Called(query) args := m.Called(query)
return args.Get(0).([]types.Log), args.Error(1) return args.Get(0).(Logs), args.Error(1)
} }
...@@ -29,7 +29,7 @@ func L1ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, metrics L1M ...@@ -29,7 +29,7 @@ func L1ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, metrics L1M
log.Info("detected transaction deposits", "size", len(optimismPortalTxDeposits)) log.Info("detected transaction deposits", "size", len(optimismPortalTxDeposits))
} }
var mintedGWEI = bigint.Zero mintedGWEI := bigint.Zero
portalDeposits := make(map[logKey]*contracts.OptimismPortalTransactionDepositEvent, len(optimismPortalTxDeposits)) portalDeposits := make(map[logKey]*contracts.OptimismPortalTransactionDepositEvent, len(optimismPortalTxDeposits))
transactionDeposits := make([]database.L1TransactionDeposit, len(optimismPortalTxDeposits)) transactionDeposits := make([]database.L1TransactionDeposit, len(optimismPortalTxDeposits))
for i := range optimismPortalTxDeposits { for i := range optimismPortalTxDeposits {
...@@ -44,7 +44,6 @@ func L1ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, metrics L1M ...@@ -44,7 +44,6 @@ func L1ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, metrics L1M
GasLimit: depositTx.GasLimit, GasLimit: depositTx.GasLimit,
Tx: depositTx.Tx, Tx: depositTx.Tx,
} }
} }
if len(transactionDeposits) > 0 { if len(transactionDeposits) > 0 {
...@@ -125,6 +124,7 @@ func L1ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, metrics L1M ...@@ -125,6 +124,7 @@ func L1ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, metrics L1M
} }
bridgedTokens[initiatedBridge.BridgeTransfer.TokenPair.LocalTokenAddress]++ bridgedTokens[initiatedBridge.BridgeTransfer.TokenPair.LocalTokenAddress]++
initiatedBridge.BridgeTransfer.CrossDomainMessageHash = &sentMessage.BridgeMessage.MessageHash initiatedBridge.BridgeTransfer.CrossDomainMessageHash = &sentMessage.BridgeMessage.MessageHash
bridgeDeposits[i] = database.L1BridgeDeposit{ bridgeDeposits[i] = database.L1BridgeDeposit{
TransactionSourceHash: portalDeposit.DepositTx.SourceHash, TransactionSourceHash: portalDeposit.DepositTx.SourceHash,
...@@ -214,10 +214,8 @@ func L1ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, metrics L1M ...@@ -214,10 +214,8 @@ func L1ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, metrics L1M
log.Info("detected relayed messages", "size", len(crossDomainRelayedMessages)) log.Info("detected relayed messages", "size", len(crossDomainRelayedMessages))
} }
relayedMessages := make(map[logKey]*contracts.CrossDomainMessengerRelayedMessageEvent, len(crossDomainRelayedMessages))
for i := range crossDomainRelayedMessages { for i := range crossDomainRelayedMessages {
relayed := crossDomainRelayedMessages[i] relayed := crossDomainRelayedMessages[i]
relayedMessages[logKey{BlockHash: relayed.Event.BlockHash, LogIndex: relayed.Event.LogIndex}] = &relayed
message, err := db.BridgeMessages.L2BridgeMessage(relayed.MessageHash) message, err := db.BridgeMessages.L2BridgeMessage(relayed.MessageHash)
if err != nil { if err != nil {
return err return err
...@@ -236,40 +234,21 @@ func L1ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, metrics L1M ...@@ -236,40 +234,21 @@ func L1ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, metrics L1M
} }
// (4) L1StandardBridge // (4) L1StandardBridge
// - Nothing actionable on the database. Since the StandardBridge is layered ontop of the
// CrossDomainMessenger, there's no need for any sanity or invariant checks as the previous step
// ensures a relayed message (finalized bridge) can be linked with a sent message (initiated bridge).
finalizedBridges, err := contracts.StandardBridgeFinalizedEvents("l1", l1Contracts.L1StandardBridgeProxy, db, fromHeight, toHeight) finalizedBridges, err := contracts.StandardBridgeFinalizedEvents("l1", l1Contracts.L1StandardBridgeProxy, db, fromHeight, toHeight)
if err != nil { if err != nil {
return err return err
} }
if len(finalizedBridges) > 0 {
log.Info("detected finalized bridge withdrawals", "size", len(finalizedBridges))
}
finalizedTokens := make(map[common.Address]int) finalizedTokens := make(map[common.Address]int)
for i := range finalizedBridges { for i := range finalizedBridges {
// Nothing actionable on the database. However, we can treat the relayed message
// as an invariant by ensuring we can query for a deposit by the same hash
finalizedBridge := finalizedBridges[i] finalizedBridge := finalizedBridges[i]
relayedMessage, ok := relayedMessages[logKey{finalizedBridge.Event.BlockHash, finalizedBridge.Event.LogIndex + 1}]
if !ok {
log.Error("expected RelayedMessage following BridgeFinalized event", "tx_hash", finalizedBridge.Event.TransactionHash.String())
return fmt.Errorf("expected RelayedMessage following BridgeFinalized event. tx_hash = %s", finalizedBridge.Event.TransactionHash.String())
} else if relayedMessage.Event.TransactionHash != finalizedBridge.Event.TransactionHash {
log.Error("correlated events tx hash mismatch", "message_tx_hash", relayedMessage.Event.TransactionHash.String(), "bridge_tx_hash", finalizedBridge.Event.TransactionHash.String())
return fmt.Errorf("correlated events tx hash mismatch")
}
// Since the message hash is computed from the relayed message, this ensures the deposit fields must match
withdrawal, err := db.BridgeTransfers.L2BridgeWithdrawalWithFilter(database.BridgeTransfer{CrossDomainMessageHash: &relayedMessage.MessageHash})
if err != nil {
return err
} else if withdrawal == nil {
log.Error("missing L2StandardBridge withdrawal on L1 finalization", "tx_hash", finalizedBridge.Event.TransactionHash.String())
return fmt.Errorf("missing L2StandardBridge withdrawal on L1 finalization. tx_hash: %s", finalizedBridge.Event.TransactionHash.String())
}
finalizedTokens[finalizedBridge.BridgeTransfer.TokenPair.LocalTokenAddress]++ finalizedTokens[finalizedBridge.BridgeTransfer.TokenPair.LocalTokenAddress]++
} }
if len(finalizedBridges) > 0 { if len(finalizedBridges) > 0 {
log.Info("detected finalized bridge withdrawals", "size", len(finalizedBridges))
for tokenAddr, size := range finalizedTokens { for tokenAddr, size := range finalizedTokens {
metrics.RecordL1FinalizedBridgeTransfers(tokenAddr, size) metrics.RecordL1FinalizedBridgeTransfers(tokenAddr, size)
} }
......
package bridge package bridge
import ( import (
"errors"
"fmt" "fmt"
"math/big" "math/big"
...@@ -29,7 +28,7 @@ func L2ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, metrics L2M ...@@ -29,7 +28,7 @@ func L2ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, metrics L2M
log.Info("detected transaction withdrawals", "size", len(l2ToL1MPMessagesPassed)) log.Info("detected transaction withdrawals", "size", len(l2ToL1MPMessagesPassed))
} }
var withdrawnWEI = bigint.Zero withdrawnWEI := bigint.Zero
messagesPassed := make(map[logKey]*contracts.L2ToL1MessagePasserMessagePassed, len(l2ToL1MPMessagesPassed)) messagesPassed := make(map[logKey]*contracts.L2ToL1MessagePasserMessagePassed, len(l2ToL1MPMessagesPassed))
transactionWithdrawals := make([]database.L2TransactionWithdrawal, len(l2ToL1MPMessagesPassed)) transactionWithdrawals := make([]database.L2TransactionWithdrawal, len(l2ToL1MPMessagesPassed))
for i := range l2ToL1MPMessagesPassed { for i := range l2ToL1MPMessagesPassed {
...@@ -122,8 +121,9 @@ func L2ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, metrics L2M ...@@ -122,8 +121,9 @@ func L2ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, metrics L2M
return fmt.Errorf("correlated events tx hash mismatch") return fmt.Errorf("correlated events tx hash mismatch")
} }
initiatedBridge.BridgeTransfer.CrossDomainMessageHash = &sentMessage.BridgeMessage.MessageHash
bridgedTokens[initiatedBridge.BridgeTransfer.TokenPair.LocalTokenAddress]++ bridgedTokens[initiatedBridge.BridgeTransfer.TokenPair.LocalTokenAddress]++
initiatedBridge.BridgeTransfer.CrossDomainMessageHash = &sentMessage.BridgeMessage.MessageHash
bridgeWithdrawals[i] = database.L2BridgeWithdrawal{ bridgeWithdrawals[i] = database.L2BridgeWithdrawal{
TransactionWithdrawalHash: messagePassed.WithdrawalHash, TransactionWithdrawalHash: messagePassed.WithdrawalHash,
BridgeTransfer: initiatedBridge.BridgeTransfer, BridgeTransfer: initiatedBridge.BridgeTransfer,
...@@ -158,10 +158,8 @@ func L2ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, metrics L2M ...@@ -158,10 +158,8 @@ func L2ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, metrics L2M
log.Info("detected relayed messages", "size", len(crossDomainRelayedMessages)) log.Info("detected relayed messages", "size", len(crossDomainRelayedMessages))
} }
relayedMessages := make(map[logKey]*contracts.CrossDomainMessengerRelayedMessageEvent, len(crossDomainRelayedMessages))
for i := range crossDomainRelayedMessages { for i := range crossDomainRelayedMessages {
relayed := crossDomainRelayedMessages[i] relayed := crossDomainRelayedMessages[i]
relayedMessages[logKey{BlockHash: relayed.Event.BlockHash, LogIndex: relayed.Event.LogIndex}] = &relayed
message, err := db.BridgeMessages.L1BridgeMessage(relayed.MessageHash) message, err := db.BridgeMessages.L1BridgeMessage(relayed.MessageHash)
if err != nil { if err != nil {
return err return err
...@@ -175,45 +173,26 @@ func L2ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, metrics L2M ...@@ -175,45 +173,26 @@ func L2ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, metrics L2M
return err return err
} }
} }
if len(relayedMessages) > 0 { if len(crossDomainRelayedMessages) > 0 {
metrics.RecordL2CrossDomainRelayedMessages(len(relayedMessages)) metrics.RecordL2CrossDomainRelayedMessages(len(crossDomainRelayedMessages))
} }
// (2) L2StandardBridge // (2) L2StandardBridge
// - Nothing actionable on the database. Since the StandardBridge is layered ontop of the
// CrossDomainMessenger, there's no need for any sanity or invariant checks as the previous step
// ensures a relayed message (finalized bridge) can be linked with a sent message (initiated bridge).
finalizedBridges, err := contracts.StandardBridgeFinalizedEvents("l2", l2Contracts.L2StandardBridge, db, fromHeight, toHeight) finalizedBridges, err := contracts.StandardBridgeFinalizedEvents("l2", l2Contracts.L2StandardBridge, db, fromHeight, toHeight)
if err != nil { if err != nil {
return err return err
} }
if len(finalizedBridges) > 0 {
log.Info("detected finalized bridge deposits", "size", len(finalizedBridges))
}
finalizedTokens := make(map[common.Address]int) finalizedTokens := make(map[common.Address]int)
for i := range finalizedBridges { for i := range finalizedBridges {
// Nothing actionable on the database. However, we can treat the relayed message
// as an invariant by ensuring we can query for a deposit by the same hash
finalizedBridge := finalizedBridges[i] finalizedBridge := finalizedBridges[i]
relayedMessage, ok := relayedMessages[logKey{finalizedBridge.Event.BlockHash, finalizedBridge.Event.LogIndex + 1}]
if !ok {
log.Error("expected RelayedMessage following BridgeFinalized event", "tx_hash", finalizedBridge.Event.TransactionHash.String())
return fmt.Errorf("expected RelayedMessage following BridgeFinalized event. tx_hash = %s", finalizedBridge.Event.TransactionHash.String())
} else if relayedMessage.Event.TransactionHash != finalizedBridge.Event.TransactionHash {
log.Error("correlated events tx hash mismatch", "message_tx_hash", relayedMessage.Event.TransactionHash.String(), "bridge_tx_hash", finalizedBridge.Event.TransactionHash.String())
return fmt.Errorf("correlated events tx hash mismatch")
}
// Since the message hash is computed from the relayed message, this ensures the withdrawal fields must match
deposit, err := db.BridgeTransfers.L1BridgeDepositWithFilter(database.BridgeTransfer{CrossDomainMessageHash: &relayedMessage.MessageHash})
if err != nil {
return err
} else if deposit == nil {
log.Error("missing L1StandardBridge deposit on L2 finalization", "tx_hash", finalizedBridge.Event.TransactionHash.String())
return errors.New("missing L1StandardBridge deposit on L2 finalization")
}
finalizedTokens[finalizedBridge.BridgeTransfer.TokenPair.LocalTokenAddress]++ finalizedTokens[finalizedBridge.BridgeTransfer.TokenPair.LocalTokenAddress]++
} }
if len(finalizedBridges) > 0 { if len(finalizedBridges) > 0 {
log.Info("detected finalized bridge deposits", "size", len(finalizedBridges))
for tokenAddr, size := range finalizedTokens { for tokenAddr, size := range finalizedTokens {
metrics.RecordL2FinalizedBridgeTransfers(tokenAddr, size) metrics.RecordL2FinalizedBridgeTransfers(tokenAddr, size)
} }
......
...@@ -5,7 +5,6 @@ import ( ...@@ -5,7 +5,6 @@ import (
"math/big" "math/big"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/indexer/bigint" "github.com/ethereum-optimism/optimism/indexer/bigint"
...@@ -40,8 +39,8 @@ func LegacyL1ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, metri ...@@ -40,8 +39,8 @@ func LegacyL1ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, metri
ctcTxDeposits[logKey{deposit.Event.BlockHash, deposit.Event.LogIndex}] = &deposit ctcTxDeposits[logKey{deposit.Event.BlockHash, deposit.Event.LogIndex}] = &deposit
mintedWEI = new(big.Int).Add(mintedWEI, deposit.Tx.Amount) mintedWEI = new(big.Int).Add(mintedWEI, deposit.Tx.Amount)
// We re-use the L2 Transaction hash as the source hash to remain consistent in the schema.
transactionDeposits[i] = database.L1TransactionDeposit{ transactionDeposits[i] = database.L1TransactionDeposit{
// We re-use the L2 Transaction hash as the source hash to remain consistent in the schema.
SourceHash: deposit.TxHash, SourceHash: deposit.TxHash,
L2TransactionHash: deposit.TxHash, L2TransactionHash: deposit.TxHash,
InitiatedL1EventGUID: deposit.Event.GUID, InitiatedL1EventGUID: deposit.Event.GUID,
...@@ -162,20 +161,22 @@ func LegacyL2ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, metri ...@@ -162,20 +161,22 @@ func LegacyL2ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, metri
log.Info("detected legacy transaction withdrawals (via L2CrossDomainMessenger)", "size", len(crossDomainSentMessages)) log.Info("detected legacy transaction withdrawals (via L2CrossDomainMessenger)", "size", len(crossDomainSentMessages))
} }
type sentMessageEvent struct {
*contracts.CrossDomainMessengerSentMessageEvent
WithdrawalHash common.Hash
}
withdrawnWEI := bigint.Zero withdrawnWEI := bigint.Zero
sentMessages := make(map[logKey]*contracts.CrossDomainMessengerSentMessageEvent, len(crossDomainSentMessages)) sentMessages := make(map[logKey]sentMessageEvent, len(crossDomainSentMessages))
bridgeMessages := make([]database.L2BridgeMessage, len(crossDomainSentMessages)) bridgeMessages := make([]database.L2BridgeMessage, len(crossDomainSentMessages))
transactionWithdrawals := make([]database.L2TransactionWithdrawal, len(crossDomainSentMessages)) transactionWithdrawals := make([]database.L2TransactionWithdrawal, len(crossDomainSentMessages))
for i := range crossDomainSentMessages { for i := range crossDomainSentMessages {
sentMessage := crossDomainSentMessages[i] sentMessage := crossDomainSentMessages[i]
sentMessages[logKey{sentMessage.Event.BlockHash, sentMessage.Event.LogIndex}] = &sentMessage
withdrawnWEI = new(big.Int).Add(withdrawnWEI, sentMessage.BridgeMessage.Tx.Amount) withdrawnWEI = new(big.Int).Add(withdrawnWEI, sentMessage.BridgeMessage.Tx.Amount)
// To ensure consistency in the schema, we duplicate this as the "root" transaction withdrawal. The storage key in the message // We re-use the L2CrossDomainMessenger message hash as the withdrawal hash to remain consistent in the schema.
// passer contract is sha3(calldata + sender). The sender always being the L2CrossDomainMessenger pre-bedrock.
withdrawalHash := crypto.Keccak256Hash(append(sentMessage.MessageCalldata, l2Contracts.L2CrossDomainMessenger[:]...))
transactionWithdrawals[i] = database.L2TransactionWithdrawal{ transactionWithdrawals[i] = database.L2TransactionWithdrawal{
WithdrawalHash: withdrawalHash, WithdrawalHash: sentMessage.BridgeMessage.MessageHash,
InitiatedL2EventGUID: sentMessage.Event.GUID, InitiatedL2EventGUID: sentMessage.Event.GUID,
Nonce: sentMessage.BridgeMessage.Nonce, Nonce: sentMessage.BridgeMessage.Nonce,
GasLimit: sentMessage.BridgeMessage.GasLimit, GasLimit: sentMessage.BridgeMessage.GasLimit,
...@@ -188,8 +189,9 @@ func LegacyL2ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, metri ...@@ -188,8 +189,9 @@ func LegacyL2ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, metri
}, },
} }
sentMessages[logKey{sentMessage.Event.BlockHash, sentMessage.Event.LogIndex}] = sentMessageEvent{&sentMessage, sentMessage.BridgeMessage.MessageHash}
bridgeMessages[i] = database.L2BridgeMessage{ bridgeMessages[i] = database.L2BridgeMessage{
TransactionWithdrawalHash: withdrawalHash, TransactionWithdrawalHash: sentMessage.BridgeMessage.MessageHash,
BridgeMessage: sentMessage.BridgeMessage, BridgeMessage: sentMessage.BridgeMessage,
} }
} }
...@@ -235,7 +237,7 @@ func LegacyL2ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, metri ...@@ -235,7 +237,7 @@ func LegacyL2ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, metri
bridgedTokens[initiatedBridge.BridgeTransfer.TokenPair.LocalTokenAddress]++ bridgedTokens[initiatedBridge.BridgeTransfer.TokenPair.LocalTokenAddress]++
initiatedBridge.BridgeTransfer.CrossDomainMessageHash = &sentMessage.BridgeMessage.MessageHash initiatedBridge.BridgeTransfer.CrossDomainMessageHash = &sentMessage.BridgeMessage.MessageHash
l2BridgeWithdrawals[i] = database.L2BridgeWithdrawal{ l2BridgeWithdrawals[i] = database.L2BridgeWithdrawal{
TransactionWithdrawalHash: sentMessage.BridgeMessage.MessageHash, TransactionWithdrawalHash: sentMessage.WithdrawalHash,
BridgeTransfer: initiatedBridge.BridgeTransfer, BridgeTransfer: initiatedBridge.BridgeTransfer,
} }
} }
...@@ -280,7 +282,8 @@ func LegacyL1ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, metri ...@@ -280,7 +282,8 @@ func LegacyL1ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, metri
// for OP-Mainnet pre-regensis withdrawals that no longer exist on L2. // for OP-Mainnet pre-regensis withdrawals that no longer exist on L2.
tx, err := l1Client.TxByHash(relayedMessage.Event.TransactionHash) tx, err := l1Client.TxByHash(relayedMessage.Event.TransactionHash)
if err != nil { if err != nil {
return err log.Error("unable to query legacy relayed tx", "tx_hash", relayedMessage.Event.TransactionHash.String(), "err", err)
return fmt.Errorf("unable to query legacy relayed tx_hash = %s: %w", relayedMessage.Event.TransactionHash.String(), err)
} else if tx == nil { } else if tx == nil {
log.Error("missing tx for relayed message", "tx_hash", relayedMessage.Event.TransactionHash.String()) log.Error("missing tx for relayed message", "tx_hash", relayedMessage.Event.TransactionHash.String())
return fmt.Errorf("missing tx for relayed message. tx_hash = %s", relayedMessage.Event.TransactionHash.String()) return fmt.Errorf("missing tx for relayed message. tx_hash = %s", relayedMessage.Event.TransactionHash.String())
...@@ -306,7 +309,7 @@ func LegacyL1ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, metri ...@@ -306,7 +309,7 @@ func LegacyL1ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, metri
} }
} }
// Mark the associated tx withdrawal as proven/finalized with the same event // Mark the associated tx withdrawal as proven/finalized with the same event. The message hash is also the transaction withdrawal hash
if err := db.BridgeTransactions.MarkL2TransactionWithdrawalProvenEvent(relayedMessage.MessageHash, relayedMessage.Event.GUID); err != nil { if err := db.BridgeTransactions.MarkL2TransactionWithdrawalProvenEvent(relayedMessage.MessageHash, relayedMessage.Event.GUID); err != nil {
log.Error("failed to mark withdrawal as proven", "err", err) log.Error("failed to mark withdrawal as proven", "err", err)
return err return err
...@@ -330,10 +333,12 @@ func LegacyL1ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, metri ...@@ -330,10 +333,12 @@ func LegacyL1ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, metri
log.Warn("skipped pre-regensis relayed L2CrossDomainMessenger withdrawals", "size", skippedPreRegenesisMessages) log.Warn("skipped pre-regensis relayed L2CrossDomainMessenger withdrawals", "size", skippedPreRegenesisMessages)
} }
// (2) L2StandardBridge -- no-op for now as there's nothing actionable to do here besides // (2) L1StandardBridge
// santiy checks which is not important for legacy code. Not worth extra code pre-bedrock. // - Nothing actionable on the database. Since the StandardBridge is layered ontop of the
// The message status is already tracked via the relayed bridge messed through the cross domain messenger. // CrossDomainMessenger, there's no need for any sanity or invariant checks as the previous step
// - NOTE: This means we dont increment metrics for finalized bridge transfers // ensures a relayed message (finalized bridge) can be linked with a sent message (initiated bridge).
// - NOTE: Ignoring metrics for pre-bedrock transfers
// a-ok! // a-ok!
return nil return nil
...@@ -372,10 +377,12 @@ func LegacyL2ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, metri ...@@ -372,10 +377,12 @@ func LegacyL2ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, metri
metrics.RecordL2CrossDomainRelayedMessages(len(crossDomainRelayedMessages)) metrics.RecordL2CrossDomainRelayedMessages(len(crossDomainRelayedMessages))
} }
// (2) L2StandardBridge -- no-op for now as there's nothing actionable to do here besides // (2) L2StandardBridge
// santiy checks which is not important for legacy code. Not worth the extra code pre-bedorck. // - Nothing actionable on the database. Since the StandardBridge is layered ontop of the
// The message status is already tracked via the relayed bridge messed through the cross domain messenger. // CrossDomainMessenger, there's no need for any sanity or invariant checks as the previous step
// - NOTE: This means we dont increment metrics for finalized bridge transfers // ensures a relayed message (finalized bridge) can be linked with a sent message (initiated bridge).
// - NOTE: Ignoring metrics for pre-bedrock transfers
// a-ok! // a-ok!
return nil return nil
......
ARG OP_STACK_GO_BUILDER=us-docker.pkg.dev/oplabs-tools-artifacts/images/op_stack_go:latest ARG OP_STACK_GO_BUILDER=us-docker.pkg.dev/oplabs-tools-artifacts/images/op-stack-go:latest
FROM $OP_STACK_GO_BUILDER as builder FROM $OP_STACK_GO_BUILDER as builder
# See "make golang-docker" and /ops/docker/op-stack-go # See "make golang-docker" and /ops/docker/op-stack-go
......
...@@ -3,110 +3,32 @@ package batcher ...@@ -3,110 +3,32 @@ package batcher
import ( import (
"context" "context"
"fmt" "fmt"
_ "net/http/pprof"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
"github.com/ethereum-optimism/optimism/op-batcher/flags" "github.com/ethereum-optimism/optimism/op-batcher/flags"
"github.com/ethereum-optimism/optimism/op-batcher/metrics"
"github.com/ethereum-optimism/optimism/op-batcher/rpc"
opservice "github.com/ethereum-optimism/optimism/op-service" opservice "github.com/ethereum-optimism/optimism/op-service"
"github.com/ethereum-optimism/optimism/op-service/cliapp"
oplog "github.com/ethereum-optimism/optimism/op-service/log" oplog "github.com/ethereum-optimism/optimism/op-service/log"
"github.com/ethereum-optimism/optimism/op-service/opio"
oppprof "github.com/ethereum-optimism/optimism/op-service/pprof"
oprpc "github.com/ethereum-optimism/optimism/op-service/rpc"
) )
// Main is the entrypoint into the Batch Submitter. This method returns a // Main is the entrypoint into the Batch Submitter.
// closure that executes the service and blocks until the service exits. The use // This method returns a cliapp.LifecycleAction, to create an op-service CLI-lifecycle-managed batch-submitter with.
// of a closure allows the parameters bound to the top-level main package, e.g. func Main(version string) cliapp.LifecycleAction {
// GitVersion, to be captured and used once the function is executed. return func(cliCtx *cli.Context, closeApp context.CancelCauseFunc) (cliapp.Lifecycle, error) {
func Main(version string, cliCtx *cli.Context) error { if err := flags.CheckRequired(cliCtx); err != nil {
if err := flags.CheckRequired(cliCtx); err != nil { return nil, err
return err
}
cfg := NewConfig(cliCtx)
if err := cfg.Check(); err != nil {
return fmt.Errorf("invalid CLI flags: %w", err)
}
l := oplog.NewLogger(oplog.AppOut(cliCtx), cfg.LogConfig)
oplog.SetGlobalLogHandler(l.GetHandler())
opservice.ValidateEnvVars(flags.EnvVarPrefix, flags.Flags, l)
procName := "default"
m := metrics.NewMetrics(procName)
l.Info("Initializing Batch Submitter")
batchSubmitter, err := NewBatchSubmitterFromCLIConfig(cfg, l, m)
if err != nil {
l.Error("Unable to create Batch Submitter", "error", err)
return err
}
if !cfg.Stopped {
if err := batchSubmitter.Start(); err != nil {
l.Error("Unable to start Batch Submitter", "error", err)
return err
}
}
defer batchSubmitter.StopIfRunning(context.Background())
pprofConfig := cfg.PprofConfig
if pprofConfig.Enabled {
l.Debug("starting pprof", "addr", pprofConfig.ListenAddr, "port", pprofConfig.ListenPort)
pprofSrv, err := oppprof.StartServer(pprofConfig.ListenAddr, pprofConfig.ListenPort)
if err != nil {
l.Error("failed to start pprof server", "err", err)
return err
} }
l.Info("started pprof server", "addr", pprofSrv.Addr()) cfg := NewConfig(cliCtx)
defer func() { if err := cfg.Check(); err != nil {
if err := pprofSrv.Stop(context.Background()); err != nil { return nil, fmt.Errorf("invalid CLI flags: %w", err)
l.Error("failed to stop pprof server", "err", err)
}
}()
}
metricsCfg := cfg.MetricsConfig
if metricsCfg.Enabled {
l.Debug("starting metrics server", "addr", metricsCfg.ListenAddr, "port", metricsCfg.ListenPort)
metricsSrv, err := m.Start(metricsCfg.ListenAddr, metricsCfg.ListenPort)
if err != nil {
return fmt.Errorf("failed to start metrics server: %w", err)
} }
l.Info("started metrics server", "addr", metricsSrv.Addr())
defer func() {
if err := metricsSrv.Stop(context.Background()); err != nil {
l.Error("failed to stop pprof server", "err", err)
}
}()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
m.StartBalanceMetrics(ctx, l, batchSubmitter.L1Client, batchSubmitter.TxManager.From())
}
server := oprpc.NewServer(
cfg.RPCFlag.ListenAddr,
cfg.RPCFlag.ListenPort,
version,
oprpc.WithLogger(l),
)
if cfg.RPCFlag.EnableAdmin {
adminAPI := rpc.NewAdminAPI(batchSubmitter, &m.RPCMetrics, l)
server.AddAPI(rpc.GetAdminAPI(adminAPI))
l.Info("Admin RPC enabled")
}
if err := server.Start(); err != nil {
return fmt.Errorf("error starting RPC server: %w", err)
}
m.RecordInfo(version) l := oplog.NewLogger(oplog.AppOut(cliCtx), cfg.LogConfig)
m.RecordUp() oplog.SetGlobalLogHandler(l.GetHandler())
opservice.ValidateEnvVars(flags.EnvVarPrefix, flags.Flags, l)
opio.BlockOnInterrupts() l.Info("Initializing Batch Submitter")
if err := server.Stop(); err != nil { return BatcherServiceFromCLIConfig(cliCtx.Context, version, cfg, l)
l.Error("Error shutting down http server: %w", err)
} }
return nil
} }
...@@ -3,52 +3,17 @@ package batcher ...@@ -3,52 +3,17 @@ package batcher
import ( import (
"time" "time"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
"github.com/ethereum-optimism/optimism/op-batcher/compressor" "github.com/ethereum-optimism/optimism/op-batcher/compressor"
"github.com/ethereum-optimism/optimism/op-batcher/flags" "github.com/ethereum-optimism/optimism/op-batcher/flags"
"github.com/ethereum-optimism/optimism/op-batcher/metrics"
"github.com/ethereum-optimism/optimism/op-node/rollup"
oplog "github.com/ethereum-optimism/optimism/op-service/log" oplog "github.com/ethereum-optimism/optimism/op-service/log"
opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics" opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics"
oppprof "github.com/ethereum-optimism/optimism/op-service/pprof" oppprof "github.com/ethereum-optimism/optimism/op-service/pprof"
oprpc "github.com/ethereum-optimism/optimism/op-service/rpc" oprpc "github.com/ethereum-optimism/optimism/op-service/rpc"
"github.com/ethereum-optimism/optimism/op-service/sources"
"github.com/ethereum-optimism/optimism/op-service/txmgr" "github.com/ethereum-optimism/optimism/op-service/txmgr"
) )
type Config struct {
log log.Logger
metr metrics.Metricer
L1Client *ethclient.Client
L2Client *ethclient.Client
RollupNode *sources.RollupClient
TxManager txmgr.TxManager
NetworkTimeout time.Duration
PollInterval time.Duration
MaxPendingTransactions uint64
// RollupConfig is queried at startup
Rollup *rollup.Config
// Channel builder parameters
Channel ChannelConfig
}
// Check ensures that the [Config] is valid.
func (c *Config) Check() error {
if err := c.Rollup.Check(); err != nil {
return err
}
if err := c.Channel.Check(); err != nil {
return err
}
return nil
}
type CLIConfig struct { type CLIConfig struct {
// L1EthRpc is the HTTP provider URL for L1. // L1EthRpc is the HTTP provider URL for L1.
L1EthRpc string L1EthRpc string
...@@ -92,11 +57,11 @@ type CLIConfig struct { ...@@ -92,11 +57,11 @@ type CLIConfig struct {
MetricsConfig opmetrics.CLIConfig MetricsConfig opmetrics.CLIConfig
PprofConfig oppprof.CLIConfig PprofConfig oppprof.CLIConfig
CompressorConfig compressor.CLIConfig CompressorConfig compressor.CLIConfig
RPCFlag oprpc.CLIConfig RPC oprpc.CLIConfig
} }
func (c CLIConfig) Check() error { func (c *CLIConfig) Check() error {
// TODO: check the sanity of flags loaded directly https://github.com/ethereum-optimism/optimism/issues/7512 // TODO(7512): check the sanity of flags loaded directly https://github.com/ethereum-optimism/optimism/issues/7512
if err := c.MetricsConfig.Check(); err != nil { if err := c.MetricsConfig.Check(); err != nil {
return err return err
...@@ -107,15 +72,15 @@ func (c CLIConfig) Check() error { ...@@ -107,15 +72,15 @@ func (c CLIConfig) Check() error {
if err := c.TxMgrConfig.Check(); err != nil { if err := c.TxMgrConfig.Check(); err != nil {
return err return err
} }
if err := c.RPCFlag.Check(); err != nil { if err := c.RPC.Check(); err != nil {
return err return err
} }
return nil return nil
} }
// NewConfig parses the Config from the provided flags or environment variables. // NewConfig parses the Config from the provided flags or environment variables.
func NewConfig(ctx *cli.Context) CLIConfig { func NewConfig(ctx *cli.Context) *CLIConfig {
return CLIConfig{ return &CLIConfig{
/* Required Flags */ /* Required Flags */
L1EthRpc: ctx.String(flags.L1EthRpcFlag.Name), L1EthRpc: ctx.String(flags.L1EthRpcFlag.Name),
L2EthRpc: ctx.String(flags.L2EthRpcFlag.Name), L2EthRpc: ctx.String(flags.L2EthRpcFlag.Name),
...@@ -133,6 +98,6 @@ func NewConfig(ctx *cli.Context) CLIConfig { ...@@ -133,6 +98,6 @@ func NewConfig(ctx *cli.Context) CLIConfig {
MetricsConfig: opmetrics.ReadCLIConfig(ctx), MetricsConfig: opmetrics.ReadCLIConfig(ctx),
PprofConfig: oppprof.ReadCLIConfig(ctx), PprofConfig: oppprof.ReadCLIConfig(ctx),
CompressorConfig: compressor.ReadCLIConfig(ctx), CompressorConfig: compressor.ReadCLIConfig(ctx),
RPCFlag: oprpc.ReadCLIConfig(ctx), RPC: oprpc.ReadCLIConfig(ctx),
} }
} }
This diff is collapsed.
This diff is collapsed.
package main package main
import ( import (
"fmt"
"os" "os"
opservice "github.com/ethereum-optimism/optimism/op-service"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
"github.com/ethereum-optimism/optimism/op-batcher/batcher" "github.com/ethereum-optimism/optimism/op-batcher/batcher"
...@@ -26,11 +26,11 @@ func main() { ...@@ -26,11 +26,11 @@ func main() {
app := cli.NewApp() app := cli.NewApp()
app.Flags = cliapp.ProtectFlags(flags.Flags) app.Flags = cliapp.ProtectFlags(flags.Flags)
app.Version = fmt.Sprintf("%s-%s-%s", Version, GitCommit, GitDate) app.Version = opservice.FormatVersion(Version, GitCommit, GitDate, "")
app.Name = "op-batcher" app.Name = "op-batcher"
app.Usage = "Batch Submitter Service" app.Usage = "Batch Submitter Service"
app.Description = "Service for generating and submitting L2 tx batches to L1" app.Description = "Service for generating and submitting L2 tx batches to L1"
app.Action = curryMain(Version) app.Action = cliapp.LifecycleCmd(batcher.Main(Version))
app.Commands = []*cli.Command{ app.Commands = []*cli.Command{
{ {
Name: "doc", Name: "doc",
...@@ -43,11 +43,3 @@ func main() { ...@@ -43,11 +43,3 @@ func main() {
log.Crit("Application failed", "message", err) log.Crit("Application failed", "message", err)
} }
} }
// curryMain transforms the batcher.Main function into an app.Action
// This is done to capture the Version of the batcher.
func curryMain(version string) func(ctx *cli.Context) error {
return func(ctx *cli.Context) error {
return batcher.Main(version, ctx)
}
}
package metrics package metrics
import ( import (
"context" "io"
"github.com/prometheus/client_golang/prometheus"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/httputil"
opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics" opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics"
txmetrics "github.com/ethereum-optimism/optimism/op-service/txmgr/metrics" txmetrics "github.com/ethereum-optimism/optimism/op-service/txmgr/metrics"
) )
...@@ -28,6 +28,10 @@ type Metricer interface { ...@@ -28,6 +28,10 @@ type Metricer interface {
// Record Tx metrics // Record Tx metrics
txmetrics.TxMetricer txmetrics.TxMetricer
opmetrics.RPCMetricer
StartBalanceMetrics(l log.Logger, client *ethclient.Client, account common.Address) io.Closer
RecordLatestL1Block(l1ref eth.L1BlockRef) RecordLatestL1Block(l1ref eth.L1BlockRef)
RecordL2BlocksLoaded(l2ref eth.L2BlockRef) RecordL2BlocksLoaded(l2ref eth.L2BlockRef)
RecordChannelOpened(id derive.ChannelID, numPendingBlocks int) RecordChannelOpened(id derive.ChannelID, numPendingBlocks int)
...@@ -79,6 +83,9 @@ type Metrics struct { ...@@ -79,6 +83,9 @@ type Metrics struct {
var _ Metricer = (*Metrics)(nil) var _ Metricer = (*Metrics)(nil)
// implements the Registry getter, for metrics HTTP server to hook into
var _ opmetrics.RegistryMetricer = (*Metrics)(nil)
func NewMetrics(procName string) *Metrics { func NewMetrics(procName string) *Metrics {
if procName == "" { if procName == "" {
procName = "default" procName = "default"
...@@ -179,17 +186,16 @@ func NewMetrics(procName string) *Metrics { ...@@ -179,17 +186,16 @@ func NewMetrics(procName string) *Metrics {
} }
} }
func (m *Metrics) Start(host string, port int) (*httputil.HTTPServer, error) { func (m *Metrics) Registry() *prometheus.Registry {
return opmetrics.StartServer(m.registry, host, port) return m.registry
} }
func (m *Metrics) Document() []opmetrics.DocumentedMetric { func (m *Metrics) Document() []opmetrics.DocumentedMetric {
return m.factory.Document() return m.factory.Document()
} }
func (m *Metrics) StartBalanceMetrics(ctx context.Context, func (m *Metrics) StartBalanceMetrics(l log.Logger, client *ethclient.Client, account common.Address) io.Closer {
l log.Logger, client *ethclient.Client, account common.Address) { return opmetrics.LaunchBalanceMetrics(l, m.registry, m.ns, client, account)
opmetrics.LaunchBalanceMetrics(ctx, l, m.registry, m.ns, client, account)
} }
// RecordInfo sets a pseudo-metric that contains versioning and // RecordInfo sets a pseudo-metric that contains versioning and
......
package metrics package metrics
import ( import (
"io"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/eth"
opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics" opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics"
txmetrics "github.com/ethereum-optimism/optimism/op-service/txmgr/metrics" txmetrics "github.com/ethereum-optimism/optimism/op-service/txmgr/metrics"
"github.com/ethereum/go-ethereum/core/types"
) )
type noopMetrics struct { type noopMetrics struct {
opmetrics.NoopRefMetrics opmetrics.NoopRefMetrics
txmetrics.NoopTxMetrics txmetrics.NoopTxMetrics
opmetrics.NoopRPCMetrics
} }
var NoopMetrics Metricer = new(noopMetrics) var NoopMetrics Metricer = new(noopMetrics)
...@@ -35,3 +42,6 @@ func (*noopMetrics) RecordChannelTimedOut(derive.ChannelID) {} ...@@ -35,3 +42,6 @@ func (*noopMetrics) RecordChannelTimedOut(derive.ChannelID) {}
func (*noopMetrics) RecordBatchTxSubmitted() {} func (*noopMetrics) RecordBatchTxSubmitted() {}
func (*noopMetrics) RecordBatchTxSuccess() {} func (*noopMetrics) RecordBatchTxSuccess() {}
func (*noopMetrics) RecordBatchTxFailed() {} func (*noopMetrics) RecordBatchTxFailed() {}
func (*noopMetrics) StartBalanceMetrics(log.Logger, *ethclient.Client, common.Address) io.Closer {
return nil
}
...@@ -10,17 +10,17 @@ import ( ...@@ -10,17 +10,17 @@ import (
"github.com/ethereum-optimism/optimism/op-service/rpc" "github.com/ethereum-optimism/optimism/op-service/rpc"
) )
type batcherClient interface { type BatcherDriver interface {
Start() error StartBatchSubmitting() error
Stop(ctx context.Context) error StopBatchSubmitting(ctx context.Context) error
} }
type adminAPI struct { type adminAPI struct {
*rpc.CommonAdminAPI *rpc.CommonAdminAPI
b batcherClient b BatcherDriver
} }
func NewAdminAPI(dr batcherClient, m metrics.RPCMetricer, log log.Logger) *adminAPI { func NewAdminAPI(dr BatcherDriver, m metrics.RPCMetricer, log log.Logger) *adminAPI {
return &adminAPI{ return &adminAPI{
CommonAdminAPI: rpc.NewCommonAdminAPI(m, log), CommonAdminAPI: rpc.NewCommonAdminAPI(m, log),
b: dr, b: dr,
...@@ -35,9 +35,9 @@ func GetAdminAPI(api *adminAPI) gethrpc.API { ...@@ -35,9 +35,9 @@ func GetAdminAPI(api *adminAPI) gethrpc.API {
} }
func (a *adminAPI) StartBatcher(_ context.Context) error { func (a *adminAPI) StartBatcher(_ context.Context) error {
return a.b.Start() return a.b.StartBatchSubmitting()
} }
func (a *adminAPI) StopBatcher(ctx context.Context) error { func (a *adminAPI) StopBatcher(ctx context.Context) error {
return a.b.Stop(ctx) return a.b.StopBatchSubmitting(ctx)
} }
This diff is collapsed.
This diff is collapsed.
...@@ -185,6 +185,8 @@ type DeployConfig struct { ...@@ -185,6 +185,8 @@ type DeployConfig struct {
EIP1559Elasticity uint64 `json:"eip1559Elasticity"` EIP1559Elasticity uint64 `json:"eip1559Elasticity"`
// EIP1559Denominator is the denominator of EIP1559 base fee market. // EIP1559Denominator is the denominator of EIP1559 base fee market.
EIP1559Denominator uint64 `json:"eip1559Denominator"` EIP1559Denominator uint64 `json:"eip1559Denominator"`
// EIP1559DenominatorCanyon is the denominator of EIP1559 base fee market when Canyon is active.
EIP1559DenominatorCanyon uint64 `json:"eip1559DenominatorCanyon"`
// SystemConfigStartBlock represents the block at which the op-node should start syncing // SystemConfigStartBlock represents the block at which the op-node should start syncing
// from. It is an override to set this value on legacy networks where it is not set by // from. It is an override to set this value on legacy networks where it is not set by
// default. It can be removed once all networks have this value set in their storage. // default. It can be removed once all networks have this value set in their storage.
...@@ -318,6 +320,9 @@ func (d *DeployConfig) Check() error { ...@@ -318,6 +320,9 @@ func (d *DeployConfig) Check() error {
if d.EIP1559Denominator == 0 { if d.EIP1559Denominator == 0 {
return fmt.Errorf("%w: EIP1559Denominator cannot be 0", ErrInvalidDeployConfig) return fmt.Errorf("%w: EIP1559Denominator cannot be 0", ErrInvalidDeployConfig)
} }
if d.L2GenesisCanyonTimeOffset != nil && d.EIP1559DenominatorCanyon == 0 {
return fmt.Errorf("%w: EIP1559DenominatorCanyon cannot be 0 if Canyon is activated", ErrInvalidDeployConfig)
}
if d.EIP1559Elasticity == 0 { if d.EIP1559Elasticity == 0 {
return fmt.Errorf("%w: EIP1559Elasticity cannot be 0", ErrInvalidDeployConfig) return fmt.Errorf("%w: EIP1559Elasticity cannot be 0", ErrInvalidDeployConfig)
} }
......
...@@ -31,6 +31,10 @@ func NewL2Genesis(config *DeployConfig, block *types.Block) (*core.Genesis, erro ...@@ -31,6 +31,10 @@ func NewL2Genesis(config *DeployConfig, block *types.Block) (*core.Genesis, erro
if eip1559Denom == 0 { if eip1559Denom == 0 {
eip1559Denom = 50 eip1559Denom = 50
} }
eip1559DenomCanyon := config.EIP1559DenominatorCanyon
if eip1559DenomCanyon == 0 {
eip1559DenomCanyon = 250
}
eip1559Elasticity := config.EIP1559Elasticity eip1559Elasticity := config.EIP1559Elasticity
if eip1559Elasticity == 0 { if eip1559Elasticity == 0 {
eip1559Elasticity = 10 eip1559Elasticity = 10
...@@ -61,8 +65,9 @@ func NewL2Genesis(config *DeployConfig, block *types.Block) (*core.Genesis, erro ...@@ -61,8 +65,9 @@ func NewL2Genesis(config *DeployConfig, block *types.Block) (*core.Genesis, erro
CanyonTime: config.CanyonTime(block.Time()), CanyonTime: config.CanyonTime(block.Time()),
ShanghaiTime: config.CanyonTime(block.Time()), ShanghaiTime: config.CanyonTime(block.Time()),
Optimism: &params.OptimismConfig{ Optimism: &params.OptimismConfig{
EIP1559Denominator: eip1559Denom, EIP1559Denominator: eip1559Denom,
EIP1559Elasticity: eip1559Elasticity, EIP1559Elasticity: eip1559Elasticity,
EIP1559DenominatorCanyon: eip1559DenomCanyon,
}, },
} }
......
...@@ -62,6 +62,7 @@ ...@@ -62,6 +62,7 @@
"governanceTokenOwner": "0x0000000000000000000000000000000000000333", "governanceTokenOwner": "0x0000000000000000000000000000000000000333",
"deploymentWaitConfirmations": 1, "deploymentWaitConfirmations": 1,
"eip1559Denominator": 8, "eip1559Denominator": 8,
"eip1559DenominatorCanyon": 12,
"eip1559Elasticity": 2, "eip1559Elasticity": 2,
"fundDevAccounts": true, "fundDevAccounts": true,
"faultGameAbsolutePrestate": "0x0000000000000000000000000000000000000000000000000000000000000000", "faultGameAbsolutePrestate": "0x0000000000000000000000000000000000000000000000000000000000000000",
......
ARG OP_STACK_GO_BUILDER=us-docker.pkg.dev/oplabs-tools-artifacts/images/op_stack_go:latest ARG OP_STACK_GO_BUILDER=us-docker.pkg.dev/oplabs-tools-artifacts/images/op-stack-go:latest
FROM $OP_STACK_GO_BUILDER as builder FROM $OP_STACK_GO_BUILDER as builder
# See "make golang-docker" and /ops/docker/op-stack-go # See "make golang-docker" and /ops/docker/op-stack-go
......
...@@ -5,6 +5,7 @@ import ( ...@@ -5,6 +5,7 @@ import (
"os" "os"
op_challenger "github.com/ethereum-optimism/optimism/op-challenger" op_challenger "github.com/ethereum-optimism/optimism/op-challenger"
opservice "github.com/ethereum-optimism/optimism/op-service"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
...@@ -21,19 +22,7 @@ var ( ...@@ -21,19 +22,7 @@ var (
) )
// VersionWithMeta holds the textual version string including the metadata. // VersionWithMeta holds the textual version string including the metadata.
var VersionWithMeta = func() string { var VersionWithMeta = opservice.FormatVersion(version.Version, GitCommit, GitDate, version.Meta)
v := version.Version
if GitCommit != "" {
v += "-" + GitCommit[:8]
}
if GitDate != "" {
v += "-" + GitDate
}
if version.Meta != "" {
v += "-" + version.Meta
}
return v
}()
func main() { func main() {
args := os.Args args := os.Args
......
...@@ -34,7 +34,7 @@ type OutputTraceProvider struct { ...@@ -34,7 +34,7 @@ type OutputTraceProvider struct {
} }
func NewTraceProvider(ctx context.Context, logger log.Logger, rollupRpc string, gameDepth, prestateBlock, poststateBlock uint64) (*OutputTraceProvider, error) { func NewTraceProvider(ctx context.Context, logger log.Logger, rollupRpc string, gameDepth, prestateBlock, poststateBlock uint64) (*OutputTraceProvider, error) {
rollupClient, err := dial.DialRollupClientWithTimeout(dial.DefaultDialTimeout, logger, rollupRpc) rollupClient, err := dial.DialRollupClientWithTimeout(ctx, dial.DefaultDialTimeout, logger, rollupRpc)
if err != nil { if err != nil {
return nil, err return nil, err
} }
......
...@@ -141,6 +141,7 @@ func (m *gameMonitor) MonitorGames(ctx context.Context) error { ...@@ -141,6 +141,7 @@ func (m *gameMonitor) MonitorGames(ctx context.Context) error {
for { for {
select { select {
case <-ctx.Done(): case <-ctx.Done():
m.l1HeadsSub.Unsubscribe()
return nil return nil
case err, ok := <-m.l1HeadsSub.Err(): case err, ok := <-m.l1HeadsSub.Err():
if !ok { if !ok {
......
...@@ -100,7 +100,8 @@ func TestMonitorGames(t *testing.T) { ...@@ -100,7 +100,8 @@ func TestMonitorGames(t *testing.T) {
defer cancel() defer cancel()
go func() { go func() {
waitErr := wait.For(context.Background(), 100*time.Millisecond, func() (bool, error) { // Wait for the subscription to be created
waitErr := wait.For(context.Background(), 5*time.Second, func() (bool, error) {
return mockHeadSource.sub != nil, nil return mockHeadSource.sub != nil, nil
}) })
require.NoError(t, waitErr) require.NoError(t, waitErr)
......
...@@ -55,7 +55,7 @@ func NewService(ctx context.Context, logger log.Logger, cfg *config.Config) (*Se ...@@ -55,7 +55,7 @@ func NewService(ctx context.Context, logger log.Logger, cfg *config.Config) (*Se
return nil, fmt.Errorf("failed to create the transaction manager: %w", err) return nil, fmt.Errorf("failed to create the transaction manager: %w", err)
} }
l1Client, err := dial.DialEthClientWithTimeout(dial.DefaultDialTimeout, logger, cfg.L1EthRpc) l1Client, err := dial.DialEthClientWithTimeout(ctx, dial.DefaultDialTimeout, logger, cfg.L1EthRpc)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to dial L1: %w", err) return nil, fmt.Errorf("failed to dial L1: %w", err)
} }
......
...@@ -133,7 +133,12 @@ func (m *Metrics) StartBalanceMetrics( ...@@ -133,7 +133,12 @@ func (m *Metrics) StartBalanceMetrics(
client *ethclient.Client, client *ethclient.Client,
account common.Address, account common.Address,
) { ) {
opmetrics.LaunchBalanceMetrics(ctx, l, m.registry, m.ns, client, account) // TODO(7684): util was refactored to close, but ctx is still being used by caller for shutdown
balanceMetric := opmetrics.LaunchBalanceMetrics(l, m.registry, m.ns, client, account)
go func() {
<-ctx.Done()
_ = balanceMetric.Close()
}()
} }
// RecordInfo sets a pseudo-metric that contains versioning and // RecordInfo sets a pseudo-metric that contains versioning and
......
...@@ -67,7 +67,7 @@ func (s *L1Miner) ActL1StartBlock(timeDelta uint64) Action { ...@@ -67,7 +67,7 @@ func (s *L1Miner) ActL1StartBlock(timeDelta uint64) Action {
MixDigest: common.Hash{}, // TODO: maybe randomize this (prev-randao value) MixDigest: common.Hash{}, // TODO: maybe randomize this (prev-randao value)
} }
if s.l1Cfg.Config.IsLondon(header.Number) { if s.l1Cfg.Config.IsLondon(header.Number) {
header.BaseFee = eip1559.CalcBaseFee(s.l1Cfg.Config, parent) header.BaseFee = eip1559.CalcBaseFee(s.l1Cfg.Config, parent, header.Time)
// At the transition, double the gas limit so the gas target is equal to the old gas limit. // At the transition, double the gas limit so the gas target is equal to the old gas limit.
if !s.l1Cfg.Config.IsLondon(parent.Number) { if !s.l1Cfg.Config.IsLondon(parent.Number) {
header.GasLimit = parent.GasLimit * s.l1Cfg.Config.ElasticityMultiplier() header.GasLimit = parent.GasLimit * s.l1Cfg.Config.ElasticityMultiplier()
...@@ -95,14 +95,12 @@ func (s *L1Miner) ActL1IncludeTx(from common.Address) Action { ...@@ -95,14 +95,12 @@ func (s *L1Miner) ActL1IncludeTx(from common.Address) Action {
t.InvalidAction("no tx inclusion when not building l1 block") t.InvalidAction("no tx inclusion when not building l1 block")
return return
} }
i := s.pendingIndices[from] getPendingIndex := func(from common.Address) uint64 {
txs, q := s.eth.TxPool().ContentFrom(from) return s.pendingIndices[from]
if uint64(len(txs)) <= i {
t.Fatalf("no pending txs from %s, and have %d unprocessable queued txs from this account", from, len(q))
} }
tx := txs[i] tx := firstValidTx(t, from, getPendingIndex, s.eth.TxPool().ContentFrom, s.EthClient().NonceAt)
s.IncludeTx(t, tx) s.IncludeTx(t, tx)
s.pendingIndices[from] = i + 1 // won't retry the tx s.pendingIndices[from] = s.pendingIndices[from] + 1 // won't retry the tx
} }
} }
......
package actions package actions
import ( import (
"context"
"errors" "errors"
"time"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait"
"github.com/ethereum-optimism/optimism/op-program/client/l2/engineapi" "github.com/ethereum-optimism/optimism/op-program/client/l2/engineapi"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
...@@ -179,22 +176,8 @@ func (e *L2Engine) ActL2IncludeTx(from common.Address) Action { ...@@ -179,22 +176,8 @@ func (e *L2Engine) ActL2IncludeTx(from common.Address) Action {
return return
} }
var i uint64 tx := firstValidTx(t, from, e.engineApi.PendingIndices, e.eth.TxPool().ContentFrom, e.EthClient().NonceAt)
var txs []*types.Transaction err := e.engineApi.IncludeTx(tx, from)
var q []*types.Transaction
// Wait for the tx to be in the pending tx queue
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
err := wait.For(ctx, time.Second, func() (bool, error) {
i = e.engineApi.PendingIndices(from)
txs, q = e.eth.TxPool().ContentFrom(from)
return uint64(len(txs)) > i, nil
})
require.NoError(t, err,
"no pending txs from %s, and have %d unprocessable queued txs from this account: %w", from, len(q), err)
tx := txs[i]
err = e.engineApi.IncludeTx(tx, from)
if errors.Is(err, engineapi.ErrNotBuildingBlock) { if errors.Is(err, engineapi.ErrNotBuildingBlock) {
t.InvalidAction(err.Error()) t.InvalidAction(err.Error())
} else if errors.Is(err, engineapi.ErrUsesTooMuchGas) { } else if errors.Is(err, engineapi.ErrUsesTooMuchGas) {
......
...@@ -127,9 +127,9 @@ func TestL2EngineAPIBlockBuilding(gt *testing.T) { ...@@ -127,9 +127,9 @@ func TestL2EngineAPIBlockBuilding(gt *testing.T) {
nextBlockTime := eth.Uint64Quantity(parent.Time) + 2 nextBlockTime := eth.Uint64Quantity(parent.Time) + 2
var w *eth.Withdrawals var w *types.Withdrawals
if sd.RollupCfg.IsCanyon(uint64(nextBlockTime)) { if sd.RollupCfg.IsCanyon(uint64(nextBlockTime)) {
w = &eth.Withdrawals{} w = &types.Withdrawals{}
} }
// Now let's ask the engine to build a block // Now let's ask the engine to build a block
......
package actions
import (
"context"
"math/big"
"time"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/stretchr/testify/require"
)
// firstValidTx finds the first transaction that is valid for inclusion from the specified address.
// It uses a waiter and filtering of already included transactions to avoid race conditions with the async
// updates to the transaction pool.
func firstValidTx(
t Testing,
from common.Address,
pendingIndices func(common.Address) uint64,
contentFrom func(common.Address) ([]*types.Transaction, []*types.Transaction),
nonceAt func(context.Context, common.Address, *big.Int) (uint64, error),
) *types.Transaction {
var i uint64
var txs []*types.Transaction
var q []*types.Transaction
// Wait for the tx to be in the pending tx queue
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
err := wait.For(ctx, time.Second, func() (bool, error) {
i = pendingIndices(from)
txs, q = contentFrom(from)
// Remove any transactions that have already been included in the head block
// The tx pool only prunes included transactions async so they may still be in the list
nonce, err := nonceAt(ctx, from, nil)
if err != nil {
return false, err
}
for len(txs) > 0 && txs[0].Nonce() < nonce {
t.Logf("Removing already included transaction from list of length %v", len(txs))
txs = txs[1:]
}
return uint64(len(txs)) > i, nil
})
require.NoError(t, err,
"no pending txs from %s, and have %d unprocessable queued txs from this account: %w", from, len(q), err)
return txs[i]
}
...@@ -3,6 +3,7 @@ package disputegame ...@@ -3,6 +3,7 @@ package disputegame
import ( import (
"context" "context"
"errors" "errors"
"time"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/types" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/types"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
...@@ -96,7 +97,9 @@ func (d *DishonestHelper) ExhaustDishonestClaims(ctx context.Context) { ...@@ -96,7 +97,9 @@ func (d *DishonestHelper) ExhaustDishonestClaims(ctx context.Context) {
var numClaimsSeen int64 var numClaimsSeen int64
for { for {
newCount, err := d.WaitForNewClaim(ctx, numClaimsSeen) // Use a short timeout since we don't know the challenger will respond,
// and this is only designed for the alphabet game where the response should be fast.
newCount, err := d.waitForNewClaim(ctx, numClaimsSeen, 30*time.Second)
if errors.Is(err, context.DeadlineExceeded) { if errors.Is(err, context.DeadlineExceeded) {
// we assume that the honest challenger has stopped responding // we assume that the honest challenger has stopped responding
// There's nothing to respond to. // There's nothing to respond to.
......
...@@ -18,6 +18,8 @@ import ( ...@@ -18,6 +18,8 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
const defaultTimeout = 5 * time.Minute
type FaultGameHelper struct { type FaultGameHelper struct {
t *testing.T t *testing.T
require *require.Assertions require *require.Assertions
...@@ -42,7 +44,7 @@ func (g *FaultGameHelper) GameDuration(ctx context.Context) time.Duration { ...@@ -42,7 +44,7 @@ func (g *FaultGameHelper) GameDuration(ctx context.Context) time.Duration {
// This does not check that the number of claims is exactly the specified count to avoid intermittent failures // This does not check that the number of claims is exactly the specified count to avoid intermittent failures
// where a challenger posts an additional claim before this method sees the number of claims it was waiting for. // where a challenger posts an additional claim before this method sees the number of claims it was waiting for.
func (g *FaultGameHelper) WaitForClaimCount(ctx context.Context, count int64) { func (g *FaultGameHelper) WaitForClaimCount(ctx context.Context, count int64) {
ctx, cancel := context.WithTimeout(ctx, 2*time.Minute) ctx, cancel := context.WithTimeout(ctx, defaultTimeout)
defer cancel() defer cancel()
err := wait.For(ctx, time.Second, func() (bool, error) { err := wait.For(ctx, time.Second, func() (bool, error) {
actual, err := g.game.ClaimDataLen(&bind.CallOpts{Context: ctx}) actual, err := g.game.ClaimDataLen(&bind.CallOpts{Context: ctx})
...@@ -70,7 +72,7 @@ func (g *FaultGameHelper) MaxDepth(ctx context.Context) int64 { ...@@ -70,7 +72,7 @@ func (g *FaultGameHelper) MaxDepth(ctx context.Context) int64 {
} }
func (g *FaultGameHelper) waitForClaim(ctx context.Context, errorMsg string, predicate func(claim ContractClaim) bool) { func (g *FaultGameHelper) waitForClaim(ctx context.Context, errorMsg string, predicate func(claim ContractClaim) bool) {
timedCtx, cancel := context.WithTimeout(ctx, 2*time.Minute) timedCtx, cancel := context.WithTimeout(ctx, defaultTimeout)
defer cancel() defer cancel()
err := wait.For(timedCtx, time.Second, func() (bool, error) { err := wait.For(timedCtx, time.Second, func() (bool, error) {
count, err := g.game.ClaimDataLen(&bind.CallOpts{Context: timedCtx}) count, err := g.game.ClaimDataLen(&bind.CallOpts{Context: timedCtx})
...@@ -95,7 +97,7 @@ func (g *FaultGameHelper) waitForClaim(ctx context.Context, errorMsg string, pre ...@@ -95,7 +97,7 @@ func (g *FaultGameHelper) waitForClaim(ctx context.Context, errorMsg string, pre
} }
func (g *FaultGameHelper) waitForNoClaim(ctx context.Context, errorMsg string, predicate func(claim ContractClaim) bool) { func (g *FaultGameHelper) waitForNoClaim(ctx context.Context, errorMsg string, predicate func(claim ContractClaim) bool) {
timedCtx, cancel := context.WithTimeout(ctx, 3*time.Minute) timedCtx, cancel := context.WithTimeout(ctx, defaultTimeout)
defer cancel() defer cancel()
err := wait.For(timedCtx, time.Second, func() (bool, error) { err := wait.For(timedCtx, time.Second, func() (bool, error) {
count, err := g.game.ClaimDataLen(&bind.CallOpts{Context: timedCtx}) count, err := g.game.ClaimDataLen(&bind.CallOpts{Context: timedCtx})
...@@ -193,7 +195,7 @@ func (g *FaultGameHelper) Status(ctx context.Context) Status { ...@@ -193,7 +195,7 @@ func (g *FaultGameHelper) Status(ctx context.Context) Status {
func (g *FaultGameHelper) WaitForGameStatus(ctx context.Context, expected Status) { func (g *FaultGameHelper) WaitForGameStatus(ctx context.Context, expected Status) {
g.t.Logf("Waiting for game %v to have status %v", g.addr, expected) g.t.Logf("Waiting for game %v to have status %v", g.addr, expected)
timedCtx, cancel := context.WithTimeout(ctx, time.Minute) timedCtx, cancel := context.WithTimeout(ctx, defaultTimeout)
defer cancel() defer cancel()
err := wait.For(timedCtx, time.Second, func() (bool, error) { err := wait.For(timedCtx, time.Second, func() (bool, error) {
ctx, cancel := context.WithTimeout(timedCtx, 30*time.Second) ctx, cancel := context.WithTimeout(timedCtx, 30*time.Second)
...@@ -302,7 +304,10 @@ func (g *FaultGameHelper) ChallengeRootClaim(ctx context.Context, performMove Mo ...@@ -302,7 +304,10 @@ func (g *FaultGameHelper) ChallengeRootClaim(ctx context.Context, performMove Mo
} }
func (g *FaultGameHelper) WaitForNewClaim(ctx context.Context, checkPoint int64) (int64, error) { func (g *FaultGameHelper) WaitForNewClaim(ctx context.Context, checkPoint int64) (int64, error) {
timedCtx, cancel := context.WithTimeout(ctx, 2*time.Minute) return g.waitForNewClaim(ctx, checkPoint, defaultTimeout)
}
func (g *FaultGameHelper) waitForNewClaim(ctx context.Context, checkPoint int64, timeout time.Duration) (int64, error) {
timedCtx, cancel := context.WithTimeout(ctx, timeout)
defer cancel() defer cancel()
var newClaimLen int64 var newClaimLen int64
err := wait.For(timedCtx, time.Second, func() (bool, error) { err := wait.For(timedCtx, time.Second, func() (bool, error) {
......
package geth package geth
import ( import (
"math/rand"
"time" "time"
"github.com/ethereum-optimism/optimism/op-service/clock"
"github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/beacon/engine" "github.com/ethereum/go-ethereum/beacon/engine"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
...@@ -12,6 +12,9 @@ import ( ...@@ -12,6 +12,9 @@ import (
"github.com/ethereum/go-ethereum/eth/catalyst" "github.com/ethereum/go-ethereum/eth/catalyst"
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-service/clock"
"github.com/ethereum-optimism/optimism/op-service/testutils"
) )
// fakePoS is a testing-only utility to attach to Geth, // fakePoS is a testing-only utility to attach to Geth,
...@@ -22,6 +25,8 @@ type fakePoS struct { ...@@ -22,6 +25,8 @@ type fakePoS struct {
log log.Logger log log.Logger
blockTime uint64 blockTime uint64
withdrawalsIndex uint64
finalizedDistance uint64 finalizedDistance uint64
safeDistance uint64 safeDistance uint64
...@@ -33,6 +38,7 @@ func (f *fakePoS) Start() error { ...@@ -33,6 +38,7 @@ func (f *fakePoS) Start() error {
if advancing, ok := f.clock.(*clock.AdvancingClock); ok { if advancing, ok := f.clock.(*clock.AdvancingClock); ok {
advancing.Start() advancing.Start()
} }
withdrawalsRNG := rand.New(rand.NewSource(450368975843)) // avoid generating the same address as any test
f.sub = event.NewSubscription(func(quit <-chan struct{}) error { f.sub = event.NewSubscription(func(quit <-chan struct{}) error {
// poll every half a second: enough to catch up with any block time when ticks are missed // poll every half a second: enough to catch up with any block time when ticks are missed
t := f.clock.NewTicker(time.Second / 2) t := f.clock.NewTicker(time.Second / 2)
...@@ -64,6 +70,17 @@ func (f *fakePoS) Start() error { ...@@ -64,6 +70,17 @@ func (f *fakePoS) Start() error {
// We're a long way behind, let's skip some blocks... // We're a long way behind, let's skip some blocks...
newBlockTime = uint64(f.clock.Now().Unix()) newBlockTime = uint64(f.clock.Now().Unix())
} }
// create some random withdrawals
withdrawals := make([]*types.Withdrawal, withdrawalsRNG.Intn(4))
for i := 0; i < len(withdrawals); i++ {
withdrawals[i] = &types.Withdrawal{
Index: f.withdrawalsIndex + uint64(i),
Validator: withdrawalsRNG.Uint64() % 100_000_000, // 100 million fake validators
Address: testutils.RandomAddress(withdrawalsRNG),
// in gwei, consensus-layer quirk. withdraw non-zero value up to 50 ETH
Amount: uint64(withdrawalsRNG.Intn(50_000_000_000) + 1),
}
}
res, err := f.engineAPI.ForkchoiceUpdatedV2(engine.ForkchoiceStateV1{ res, err := f.engineAPI.ForkchoiceUpdatedV2(engine.ForkchoiceStateV1{
HeadBlockHash: head.Hash(), HeadBlockHash: head.Hash(),
SafeBlockHash: safe.Hash(), SafeBlockHash: safe.Hash(),
...@@ -72,7 +89,7 @@ func (f *fakePoS) Start() error { ...@@ -72,7 +89,7 @@ func (f *fakePoS) Start() error {
Timestamp: newBlockTime, Timestamp: newBlockTime,
Random: common.Hash{}, Random: common.Hash{},
SuggestedFeeRecipient: head.Coinbase, SuggestedFeeRecipient: head.Coinbase,
Withdrawals: make([]*types.Withdrawal, 0), Withdrawals: withdrawals,
}) })
if err != nil { if err != nil {
f.log.Error("failed to start building L1 block", "err", err) f.log.Error("failed to start building L1 block", "err", err)
...@@ -109,6 +126,10 @@ func (f *fakePoS) Start() error { ...@@ -109,6 +126,10 @@ func (f *fakePoS) Start() error {
f.log.Error("failed to make built L1 block canonical", "err", err) f.log.Error("failed to make built L1 block canonical", "err", err)
continue continue
} }
// Increment global withdrawals index in the CL.
// The EL doesn't really care about the value,
// but it's nice to mock something consistent with the CL specs.
f.withdrawalsIndex += uint64(len(withdrawals))
case <-quit: case <-quit:
return nil return nil
} }
......
...@@ -2,6 +2,7 @@ package op_e2e ...@@ -2,6 +2,7 @@ package op_e2e
import ( import (
"encoding/json" "encoding/json"
"errors"
"math/big" "math/big"
"os" "os"
"os/exec" "os/exec"
...@@ -51,6 +52,11 @@ func (eec *ExternalEthClient) Close() error { ...@@ -51,6 +52,11 @@ func (eec *ExternalEthClient) Close() error {
select { select {
case <-time.After(5 * time.Second): case <-time.After(5 * time.Second):
eec.Session.Kill() eec.Session.Kill()
select {
case <-time.After(30 * time.Second):
return errors.New("external client failed to terminate")
case <-eec.Session.Exited:
}
case <-eec.Session.Exited: case <-eec.Session.Exited:
} }
return nil return nil
......
...@@ -18,7 +18,7 @@ import ( ...@@ -18,7 +18,7 @@ import (
) )
func TestMultipleCannonGames(t *testing.T) { func TestMultipleCannonGames(t *testing.T) {
InitParallel(t) InitParallel(t, UsesCannon)
ctx := context.Background() ctx := context.Background()
sys, l1Client := startFaultDisputeSystem(t) sys, l1Client := startFaultDisputeSystem(t)
...@@ -78,7 +78,7 @@ func TestMultipleCannonGames(t *testing.T) { ...@@ -78,7 +78,7 @@ func TestMultipleCannonGames(t *testing.T) {
} }
func TestMultipleGameTypes(t *testing.T) { func TestMultipleGameTypes(t *testing.T) {
InitParallel(t) InitParallel(t, UsesCannon)
ctx := context.Background() ctx := context.Background()
sys, l1Client := startFaultDisputeSystem(t) sys, l1Client := startFaultDisputeSystem(t)
...@@ -277,7 +277,7 @@ func TestChallengerCompleteExhaustiveDisputeGame(t *testing.T) { ...@@ -277,7 +277,7 @@ func TestChallengerCompleteExhaustiveDisputeGame(t *testing.T) {
} }
func TestCannonDisputeGame(t *testing.T) { func TestCannonDisputeGame(t *testing.T) {
InitParallel(t) InitParallel(t, UsesCannon)
tests := []struct { tests := []struct {
name string name string
...@@ -328,7 +328,7 @@ func TestCannonDisputeGame(t *testing.T) { ...@@ -328,7 +328,7 @@ func TestCannonDisputeGame(t *testing.T) {
} }
func TestCannonDefendStep(t *testing.T) { func TestCannonDefendStep(t *testing.T) {
InitParallel(t) InitParallel(t, UsesCannon)
ctx := context.Background() ctx := context.Background()
sys, l1Client := startFaultDisputeSystem(t) sys, l1Client := startFaultDisputeSystem(t)
...@@ -370,7 +370,7 @@ func TestCannonDefendStep(t *testing.T) { ...@@ -370,7 +370,7 @@ func TestCannonDefendStep(t *testing.T) {
} }
func TestCannonProposedOutputRootInvalid(t *testing.T) { func TestCannonProposedOutputRootInvalid(t *testing.T) {
InitParallel(t) InitParallel(t, UsesCannon)
// honestStepsFail attempts to perform both an attack and defend step using the correct trace. // honestStepsFail attempts to perform both an attack and defend step using the correct trace.
honestStepsFail := func(ctx context.Context, game *disputegame.CannonGameHelper, correctTrace *disputegame.HonestHelper, parentClaimIdx int64) { honestStepsFail := func(ctx context.Context, game *disputegame.CannonGameHelper, correctTrace *disputegame.HonestHelper, parentClaimIdx int64) {
// Attack step should fail // Attack step should fail
...@@ -448,7 +448,7 @@ func TestCannonProposedOutputRootInvalid(t *testing.T) { ...@@ -448,7 +448,7 @@ func TestCannonProposedOutputRootInvalid(t *testing.T) {
} }
func TestCannonPoisonedPostState(t *testing.T) { func TestCannonPoisonedPostState(t *testing.T) {
InitParallel(t) InitParallel(t, UsesCannon)
ctx := context.Background() ctx := context.Background()
sys, l1Client := startFaultDisputeSystem(t) sys, l1Client := startFaultDisputeSystem(t)
...@@ -558,7 +558,7 @@ func setupDisputeGameForInvalidOutputRoot(t *testing.T, outputRoot common.Hash) ...@@ -558,7 +558,7 @@ func setupDisputeGameForInvalidOutputRoot(t *testing.T, outputRoot common.Hash)
} }
func TestCannonChallengeWithCorrectRoot(t *testing.T) { func TestCannonChallengeWithCorrectRoot(t *testing.T) {
InitParallel(t) InitParallel(t, UsesCannon)
ctx := context.Background() ctx := context.Background()
sys, l1Client := startFaultDisputeSystem(t) sys, l1Client := startFaultDisputeSystem(t)
......
...@@ -7,9 +7,18 @@ import ( ...@@ -7,9 +7,18 @@ import (
var enableParallelTesting bool = os.Getenv("OP_E2E_DISABLE_PARALLEL") != "true" var enableParallelTesting bool = os.Getenv("OP_E2E_DISABLE_PARALLEL") != "true"
func InitParallel(t *testing.T) { func InitParallel(t *testing.T, opts ...func(t *testing.T)) {
t.Helper() t.Helper()
if enableParallelTesting { if enableParallelTesting {
t.Parallel() t.Parallel()
} }
for _, opt := range opts {
opt(t)
}
}
func UsesCannon(t *testing.T) {
if os.Getenv("OP_E2E_CANNON_ENABLED") == "false" {
t.Skip("Skipping cannon test")
}
} }
...@@ -102,7 +102,7 @@ func NewOpGeth(t *testing.T, ctx context.Context, cfg *SystemConfig) (*OpGeth, e ...@@ -102,7 +102,7 @@ func NewOpGeth(t *testing.T, ctx context.Context, cfg *SystemConfig) (*OpGeth, e
) )
require.Nil(t, err) require.Nil(t, err)
l2Client, err := ethclient.Dial(node.HTTPEndpoint()) l2Client, err := ethclient.Dial(selectEndpoint(node))
require.Nil(t, err) require.Nil(t, err)
genesisPayload, err := eth.BlockAsPayload(l2GenesisBlock, cfg.DeployConfig.CanyonTime(l2GenesisBlock.Time())) genesisPayload, err := eth.BlockAsPayload(l2GenesisBlock, cfg.DeployConfig.CanyonTime(l2GenesisBlock.Time()))
...@@ -210,9 +210,9 @@ func (d *OpGeth) CreatePayloadAttributes(txs ...*types.Transaction) (*eth.Payloa ...@@ -210,9 +210,9 @@ func (d *OpGeth) CreatePayloadAttributes(txs ...*types.Transaction) (*eth.Payloa
txBytes = append(txBytes, bin) txBytes = append(txBytes, bin)
} }
var withdrawals *eth.Withdrawals var withdrawals *types.Withdrawals
if d.L2ChainConfig.IsCanyon(uint64(timestamp)) { if d.L2ChainConfig.IsCanyon(uint64(timestamp)) {
withdrawals = &eth.Withdrawals{} withdrawals = &types.Withdrawals{}
} }
attrs := eth.PayloadAttributes{ attrs := eth.PayloadAttributes{
......
...@@ -823,7 +823,7 @@ func TestCanyon(t *testing.T) { ...@@ -823,7 +823,7 @@ func TestCanyon(t *testing.T) {
b, err := opGeth.AddL2Block(ctx) b, err := opGeth.AddL2Block(ctx)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, *b.Withdrawals, eth.Withdrawals{}) assert.Equal(t, *b.Withdrawals, types.Withdrawals{})
l1Block, err := opGeth.L2Client.BlockByNumber(ctx, nil) l1Block, err := opGeth.L2Client.BlockByNumber(ctx, nil)
require.Nil(t, err) require.Nil(t, err)
......
...@@ -4,6 +4,7 @@ import ( ...@@ -4,6 +4,7 @@ import (
"context" "context"
"crypto/ecdsa" "crypto/ecdsa"
"crypto/rand" "crypto/rand"
"errors"
"fmt" "fmt"
"math/big" "math/big"
"net" "net"
...@@ -37,7 +38,6 @@ import ( ...@@ -37,7 +38,6 @@ import (
bss "github.com/ethereum-optimism/optimism/op-batcher/batcher" bss "github.com/ethereum-optimism/optimism/op-batcher/batcher"
"github.com/ethereum-optimism/optimism/op-batcher/compressor" "github.com/ethereum-optimism/optimism/op-batcher/compressor"
batchermetrics "github.com/ethereum-optimism/optimism/op-batcher/metrics"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys" "github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum-optimism/optimism/op-chain-ops/genesis" "github.com/ethereum-optimism/optimism/op-chain-ops/genesis"
"github.com/ethereum-optimism/optimism/op-e2e/config" "github.com/ethereum-optimism/optimism/op-e2e/config"
...@@ -254,7 +254,7 @@ type System struct { ...@@ -254,7 +254,7 @@ type System struct {
RawClients map[string]*rpc.Client RawClients map[string]*rpc.Client
RollupNodes map[string]*rollupNode.OpNode RollupNodes map[string]*rollupNode.OpNode
L2OutputSubmitter *l2os.L2OutputSubmitter L2OutputSubmitter *l2os.L2OutputSubmitter
BatchSubmitter *bss.BatchSubmitter BatchSubmitter *bss.BatcherService
Mocknet mocknet.Mocknet Mocknet mocknet.Mocknet
// TimeTravelClock is nil unless SystemConfig.SupportL1TimeTravel was set to true // TimeTravelClock is nil unless SystemConfig.SupportL1TimeTravel was set to true
...@@ -270,18 +270,16 @@ func (sys *System) NodeEndpoint(name string) string { ...@@ -270,18 +270,16 @@ func (sys *System) NodeEndpoint(name string) string {
} }
func (sys *System) Close() { func (sys *System) Close() {
postCtx, postCancel := context.WithCancel(context.Background())
postCancel() // immediate shutdown, no allowance for idling
if sys.L2OutputSubmitter != nil { if sys.L2OutputSubmitter != nil {
sys.L2OutputSubmitter.Stop() sys.L2OutputSubmitter.Stop()
} }
if sys.BatchSubmitter != nil { if sys.BatchSubmitter != nil {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) _ = sys.BatchSubmitter.Kill()
defer cancel()
sys.BatchSubmitter.StopIfRunning(ctx)
} }
postCtx, postCancel := context.WithCancel(context.Background())
postCancel() // immediate shutdown, no allowance for idling
for _, node := range sys.RollupNodes { for _, node := range sys.RollupNodes {
_ = node.Stop(postCtx) _ = node.Stop(postCtx)
} }
...@@ -681,8 +679,7 @@ func (cfg SystemConfig) Start(t *testing.T, _opts ...SystemConfigOption) (*Syste ...@@ -681,8 +679,7 @@ func (cfg SystemConfig) Start(t *testing.T, _opts ...SystemConfigOption) (*Syste
return nil, fmt.Errorf("unable to start l2 output submitter: %w", err) return nil, fmt.Errorf("unable to start l2 output submitter: %w", err)
} }
// Batch Submitter batcherCLIConfig := &bss.CLIConfig{
sys.BatchSubmitter, err = bss.NewBatchSubmitterFromCLIConfig(bss.CLIConfig{
L1EthRpc: sys.EthInstances["l1"].WSEndpoint(), L1EthRpc: sys.EthInstances["l1"].WSEndpoint(),
L2EthRpc: sys.EthInstances["sequencer"].WSEndpoint(), L2EthRpc: sys.EthInstances["sequencer"].WSEndpoint(),
RollupRpc: sys.RollupNodes["sequencer"].HTTPEndpoint(), RollupRpc: sys.RollupNodes["sequencer"].HTTPEndpoint(),
...@@ -701,17 +698,17 @@ func (cfg SystemConfig) Start(t *testing.T, _opts ...SystemConfigOption) (*Syste ...@@ -701,17 +698,17 @@ func (cfg SystemConfig) Start(t *testing.T, _opts ...SystemConfigOption) (*Syste
Level: log.LvlInfo, Level: log.LvlInfo,
Format: oplog.FormatText, Format: oplog.FormatText,
}, },
}, sys.cfg.Loggers["batcher"], batchermetrics.NoopMetrics) Stopped: sys.cfg.DisableBatcher, // Batch submitter may be enabled later
}
// Batch Submitter
batcher, err := bss.BatcherServiceFromCLIConfig(context.Background(), "0.0.1", batcherCLIConfig, sys.cfg.Loggers["batcher"])
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to setup batch submitter: %w", err) return nil, fmt.Errorf("failed to setup batch submitter: %w", err)
} }
if err := batcher.Start(context.Background()); err != nil {
// Batcher may be enabled later return nil, errors.Join(fmt.Errorf("failed to start batch submitter: %w", err), batcher.Stop(context.Background()))
if !sys.cfg.DisableBatcher {
if err := sys.BatchSubmitter.Start(); err != nil {
return nil, fmt.Errorf("unable to start batch submitter: %w", err)
}
} }
sys.BatchSubmitter = batcher
return sys, nil return sys, nil
} }
...@@ -761,9 +758,12 @@ func (sys *System) newMockNetPeer() (host.Host, error) { ...@@ -761,9 +758,12 @@ func (sys *System) newMockNetPeer() (host.Host, error) {
return sys.Mocknet.AddPeerWithPeerstore(p, eps) return sys.Mocknet.AddPeerWithPeerstore(p, eps)
} }
func UseHTTP() bool {
return os.Getenv("OP_E2E_USE_HTTP") == "true"
}
func selectEndpoint(node EthInstance) string { func selectEndpoint(node EthInstance) string {
useHTTP := os.Getenv("OP_E2E_USE_HTTP") == "true" if UseHTTP() {
if useHTTP {
log.Info("using HTTP client") log.Info("using HTTP client")
return node.HTTPEndpoint() return node.HTTPEndpoint()
} }
...@@ -788,9 +788,8 @@ type WSOrHTTPEndpoint interface { ...@@ -788,9 +788,8 @@ type WSOrHTTPEndpoint interface {
} }
func configureL2(rollupNodeCfg *rollupNode.Config, l2Node WSOrHTTPEndpoint, jwtSecret [32]byte) { func configureL2(rollupNodeCfg *rollupNode.Config, l2Node WSOrHTTPEndpoint, jwtSecret [32]byte) {
useHTTP := os.Getenv("OP_E2E_USE_HTTP") == "true"
l2EndpointConfig := l2Node.WSAuthEndpoint() l2EndpointConfig := l2Node.WSAuthEndpoint()
if useHTTP { if UseHTTP() {
l2EndpointConfig = l2Node.HTTPAuthEndpoint() l2EndpointConfig = l2Node.HTTPAuthEndpoint()
} }
......
...@@ -93,7 +93,7 @@ func testVerifyL2OutputRootEmptyBlock(t *testing.T, detached bool) { ...@@ -93,7 +93,7 @@ func testVerifyL2OutputRootEmptyBlock(t *testing.T, detached bool) {
l2OutputRoot := agreedL2Output.OutputRoot l2OutputRoot := agreedL2Output.OutputRoot
t.Log("=====Stopping batch submitter=====") t.Log("=====Stopping batch submitter=====")
err = sys.BatchSubmitter.Stop(ctx) err = sys.BatchSubmitter.Driver().StopBatchSubmitting(ctx)
require.NoError(t, err, "could not stop batch submitter") require.NoError(t, err, "could not stop batch submitter")
// Wait for the sequencer to catch up with the current L1 head so we know all submitted batches are processed // Wait for the sequencer to catch up with the current L1 head so we know all submitted batches are processed
...@@ -121,7 +121,7 @@ func testVerifyL2OutputRootEmptyBlock(t *testing.T, detached bool) { ...@@ -121,7 +121,7 @@ func testVerifyL2OutputRootEmptyBlock(t *testing.T, detached bool) {
l2Claim := l2Output.OutputRoot l2Claim := l2Output.OutputRoot
t.Log("=====Restarting batch submitter=====") t.Log("=====Restarting batch submitter=====")
err = sys.BatchSubmitter.Start() err = sys.BatchSubmitter.Driver().StartBatchSubmitting()
require.NoError(t, err, "could not start batch submitter") require.NoError(t, err, "could not start batch submitter")
t.Log("Add a transaction to the next batch after sequence of empty blocks") t.Log("Add a transaction to the next batch after sequence of empty blocks")
...@@ -258,7 +258,7 @@ func testFaultProofProgramScenario(t *testing.T, ctx context.Context, sys *Syste ...@@ -258,7 +258,7 @@ func testFaultProofProgramScenario(t *testing.T, ctx context.Context, sys *Syste
t.Log("Shutting down network") t.Log("Shutting down network")
// Shutdown the nodes from the actual chain. Should now be able to run using only the pre-fetched data. // Shutdown the nodes from the actual chain. Should now be able to run using only the pre-fetched data.
sys.BatchSubmitter.StopIfRunning(context.Background()) require.NoError(t, sys.BatchSubmitter.Kill())
sys.L2OutputSubmitter.Stop() sys.L2OutputSubmitter.Stop()
sys.L2OutputSubmitter = nil sys.L2OutputSubmitter = nil
for _, node := range sys.EthInstances { for _, node := range sys.EthInstances {
......
...@@ -1266,7 +1266,7 @@ func TestStopStartBatcher(t *testing.T) { ...@@ -1266,7 +1266,7 @@ func TestStopStartBatcher(t *testing.T) {
require.Greater(t, newSeqStatus.SafeL2.Number, seqStatus.SafeL2.Number, "Safe chain did not advance") require.Greater(t, newSeqStatus.SafeL2.Number, seqStatus.SafeL2.Number, "Safe chain did not advance")
// stop the batch submission // stop the batch submission
err = sys.BatchSubmitter.Stop(context.Background()) err = sys.BatchSubmitter.Driver().StopBatchSubmitting(context.Background())
require.Nil(t, err) require.Nil(t, err)
// wait for any old safe blocks being submitted / derived // wait for any old safe blocks being submitted / derived
...@@ -1286,7 +1286,7 @@ func TestStopStartBatcher(t *testing.T) { ...@@ -1286,7 +1286,7 @@ func TestStopStartBatcher(t *testing.T) {
require.Equal(t, newSeqStatus.SafeL2.Number, seqStatus.SafeL2.Number, "Safe chain advanced while batcher was stopped") require.Equal(t, newSeqStatus.SafeL2.Number, seqStatus.SafeL2.Number, "Safe chain advanced while batcher was stopped")
// start the batch submission // start the batch submission
err = sys.BatchSubmitter.Start() err = sys.BatchSubmitter.Driver().StartBatchSubmitting()
require.Nil(t, err) require.Nil(t, err)
time.Sleep(safeBlockInclusionDuration) time.Sleep(safeBlockInclusionDuration)
...@@ -1325,7 +1325,7 @@ func TestBatcherMultiTx(t *testing.T) { ...@@ -1325,7 +1325,7 @@ func TestBatcherMultiTx(t *testing.T) {
require.Nil(t, err) require.Nil(t, err)
// start batch submission // start batch submission
err = sys.BatchSubmitter.Start() err = sys.BatchSubmitter.Driver().StartBatchSubmitting()
require.Nil(t, err) require.Nil(t, err)
totalTxCount := 0 totalTxCount := 0
......
ARG OP_STACK_GO_BUILDER=us-docker.pkg.dev/oplabs-tools-artifacts/images/op_stack_go:latest ARG OP_STACK_GO_BUILDER=us-docker.pkg.dev/oplabs-tools-artifacts/images/op-stack-go:latest
FROM $OP_STACK_GO_BUILDER as builder FROM $OP_STACK_GO_BUILDER as builder
# See "make golang-docker" and /ops/docker/op-stack-go # See "make golang-docker" and /ops/docker/op-stack-go
......
package main package main
import ( import (
"fmt"
"os" "os"
heartbeat "github.com/ethereum-optimism/optimism/op-heartbeat" heartbeat "github.com/ethereum-optimism/optimism/op-heartbeat"
"github.com/ethereum-optimism/optimism/op-heartbeat/flags" "github.com/ethereum-optimism/optimism/op-heartbeat/flags"
opservice "github.com/ethereum-optimism/optimism/op-service"
oplog "github.com/ethereum-optimism/optimism/op-service/log" oplog "github.com/ethereum-optimism/optimism/op-service/log"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
...@@ -22,7 +22,7 @@ func main() { ...@@ -22,7 +22,7 @@ func main() {
app := cli.NewApp() app := cli.NewApp()
app.Flags = flags.Flags app.Flags = flags.Flags
app.Version = fmt.Sprintf("%s-%s-%s", Version, GitCommit, GitDate) app.Version = opservice.FormatVersion(Version, GitCommit, GitDate, "")
app.Name = "op-heartbeat" app.Name = "op-heartbeat"
app.Usage = "Heartbeat recorder" app.Usage = "Heartbeat recorder"
app.Description = "Service that records opt-in heartbeats from op nodes" app.Description = "Service that records opt-in heartbeats from op nodes"
......
ARG OP_STACK_GO_BUILDER=us-docker.pkg.dev/oplabs-tools-artifacts/images/op_stack_go:latest ARG OP_STACK_GO_BUILDER=us-docker.pkg.dev/oplabs-tools-artifacts/images/op-stack-go:latest
FROM $OP_STACK_GO_BUILDER as builder FROM $OP_STACK_GO_BUILDER as builder
# See "make golang-docker" and /ops/docker/op-stack-go # See "make golang-docker" and /ops/docker/op-stack-go
......
...@@ -29,19 +29,7 @@ var ( ...@@ -29,19 +29,7 @@ var (
) )
// VersionWithMeta holds the textual version string including the metadata. // VersionWithMeta holds the textual version string including the metadata.
var VersionWithMeta = func() string { var VersionWithMeta = opservice.FormatVersion(version.Version, GitCommit, GitDate, version.Meta)
v := version.Version
if GitCommit != "" {
v += "-" + GitCommit[:8]
}
if GitDate != "" {
v += "-" + GitDate
}
if version.Meta != "" {
v += "-" + version.Meta
}
return v
}()
func main() { func main() {
// Set up logger with a default INFO level in case we fail to parse flags, // Set up logger with a default INFO level in case we fail to parse flags,
......
...@@ -82,6 +82,12 @@ var ( ...@@ -82,6 +82,12 @@ var (
return &out return &out
}(), }(),
} }
L1RethDBPath = &cli.StringFlag{
Name: "l1.rethdb",
Usage: "The L1 RethDB path, used to fetch receipts for L1 blocks. Only applicable when using the `reth_db` RPC kind with `l1.rpckind`.",
EnvVars: prefixEnvVars("L1_RETHDB"),
Required: false,
}
L1RPCRateLimit = &cli.Float64Flag{ L1RPCRateLimit = &cli.Float64Flag{
Name: "l1.rpc-rate-limit", Name: "l1.rpc-rate-limit",
Usage: "Optional self-imposed global rate-limit on L1 RPC requests, specified in requests / second. Disabled if set to 0.", Usage: "Optional self-imposed global rate-limit on L1 RPC requests, specified in requests / second. Disabled if set to 0.",
...@@ -254,9 +260,10 @@ var ( ...@@ -254,9 +260,10 @@ var (
EnvVars: prefixEnvVars("ROLLUP_LOAD_PROTOCOL_VERSIONS"), EnvVars: prefixEnvVars("ROLLUP_LOAD_PROTOCOL_VERSIONS"),
} }
CanyonOverrideFlag = &cli.Uint64Flag{ CanyonOverrideFlag = &cli.Uint64Flag{
Name: "override.canyon", Name: "override.canyon",
Usage: "Manually specify the Canyon fork timestamp, overriding the bundled setting", Usage: "Manually specify the Canyon fork timestamp, overriding the bundled setting",
Hidden: true, EnvVars: prefixEnvVars("OVERRIDE_CANYON"),
Hidden: false,
} }
) )
...@@ -303,6 +310,7 @@ var optionalFlags = []cli.Flag{ ...@@ -303,6 +310,7 @@ var optionalFlags = []cli.Flag{
RollupHalt, RollupHalt,
RollupLoadProtocolVersions, RollupLoadProtocolVersions,
CanyonOverrideFlag, CanyonOverrideFlag,
L1RethDBPath,
} }
// Flags contains the list of configuration options available to the binary. // Flags contains the list of configuration options available to the binary.
......
...@@ -604,7 +604,9 @@ func (m *Metrics) ReportProtocolVersions(local, engine, recommended, required pa ...@@ -604,7 +604,9 @@ func (m *Metrics) ReportProtocolVersions(local, engine, recommended, required pa
m.ProtocolVersions.WithLabelValues(local.String(), engine.String(), recommended.String(), required.String()).Set(1) m.ProtocolVersions.WithLabelValues(local.String(), engine.String(), recommended.String(), required.String()).Set(1)
} }
type noopMetricer struct{} type noopMetricer struct {
metrics.NoopRPCMetrics
}
var NoopMetrics Metricer = new(noopMetricer) var NoopMetrics Metricer = new(noopMetricer)
...@@ -614,17 +616,6 @@ func (n *noopMetricer) RecordInfo(version string) { ...@@ -614,17 +616,6 @@ func (n *noopMetricer) RecordInfo(version string) {
func (n *noopMetricer) RecordUp() { func (n *noopMetricer) RecordUp() {
} }
func (n *noopMetricer) RecordRPCServerRequest(method string) func() {
return func() {}
}
func (n *noopMetricer) RecordRPCClientRequest(method string) func(err error) {
return func(err error) {}
}
func (n *noopMetricer) RecordRPCClientResponse(method string, err error) {
}
func (n *noopMetricer) SetDerivationIdle(status bool) { func (n *noopMetricer) SetDerivationIdle(status bool) {
} }
......
...@@ -60,6 +60,9 @@ type Config struct { ...@@ -60,6 +60,9 @@ type Config struct {
// Cancel to request a premature shutdown of the node itself, e.g. when halting. This may be nil. // Cancel to request a premature shutdown of the node itself, e.g. when halting. This may be nil.
Cancel context.CancelCauseFunc Cancel context.CancelCauseFunc
// [OPTIONAL] The reth DB path to read receipts from
RethDBPath string
} }
type RPCConfig struct { type RPCConfig struct {
......
...@@ -156,6 +156,9 @@ func (n *OpNode) initL1(ctx context.Context, cfg *Config) error { ...@@ -156,6 +156,9 @@ func (n *OpNode) initL1(ctx context.Context, cfg *Config) error {
return fmt.Errorf("failed to get L1 RPC client: %w", err) return fmt.Errorf("failed to get L1 RPC client: %w", err)
} }
// Set the RethDB path in the EthClientConfig, if there is one configured.
rpcCfg.EthClientConfig.RethDBPath = cfg.RethDBPath
n.l1Source, err = sources.NewL1Client( n.l1Source, err = sources.NewL1Client(
client.NewInstrumentedRPC(l1Node, n.metrics), n.log, n.metrics.L1SourceCache, rpcCfg) client.NewInstrumentedRPC(l1Node, n.metrics), n.log, n.metrics.L1SourceCache, rpcCfg)
if err != nil { if err != nil {
......
...@@ -2,6 +2,7 @@ package store ...@@ -2,6 +2,7 @@ package store
import ( import (
"context" "context"
"sync/atomic"
"time" "time"
"github.com/ethereum-optimism/optimism/op-service/clock" "github.com/ethereum-optimism/optimism/op-service/clock"
...@@ -17,17 +18,18 @@ const ( ...@@ -17,17 +18,18 @@ const (
var scoresBase = ds.NewKey("/peers/scores") var scoresBase = ds.NewKey("/peers/scores")
// LastUpdate requires atomic update operations. Use the helper functions SetLastUpdated and LastUpdated to modify and access this field.
type scoreRecord struct { type scoreRecord struct {
PeerScores PeerScores `json:"peerScores"`
LastUpdate int64 `json:"lastUpdate"` // unix timestamp in seconds LastUpdate int64 `json:"lastUpdate"` // unix timestamp in seconds
PeerScores PeerScores `json:"peerScores"`
} }
func (s *scoreRecord) SetLastUpdated(t time.Time) { func (s *scoreRecord) SetLastUpdated(t time.Time) {
s.LastUpdate = t.Unix() atomic.StoreInt64(&s.LastUpdate, t.Unix())
} }
func (s *scoreRecord) LastUpdated() time.Time { func (s *scoreRecord) LastUpdated() time.Time {
return time.Unix(s.LastUpdate, 0) return time.Unix(atomic.LoadInt64(&s.LastUpdate), 0)
} }
func (s *scoreRecord) MarshalBinary() (data []byte, err error) { func (s *scoreRecord) MarshalBinary() (data []byte, err error) {
......
...@@ -109,9 +109,9 @@ func (ba *FetchingAttributesBuilder) PreparePayloadAttributes(ctx context.Contex ...@@ -109,9 +109,9 @@ func (ba *FetchingAttributesBuilder) PreparePayloadAttributes(ctx context.Contex
txs = append(txs, l1InfoTx) txs = append(txs, l1InfoTx)
txs = append(txs, depositTxs...) txs = append(txs, depositTxs...)
var withdrawals *eth.Withdrawals var withdrawals *types.Withdrawals
if ba.cfg.IsCanyon(nextL2Time) { if ba.cfg.IsCanyon(nextL2Time) {
withdrawals = &eth.Withdrawals{} withdrawals = &types.Withdrawals{}
} }
return &eth.PayloadAttributes{ return &eth.PayloadAttributes{
......
...@@ -47,7 +47,7 @@ func AttributesMatchBlock(attrs *eth.PayloadAttributes, parentHash common.Hash, ...@@ -47,7 +47,7 @@ func AttributesMatchBlock(attrs *eth.PayloadAttributes, parentHash common.Hash,
return nil return nil
} }
func checkWithdrawalsMatch(attrWithdrawals *eth.Withdrawals, blockWithdrawals *eth.Withdrawals) error { func checkWithdrawalsMatch(attrWithdrawals *types.Withdrawals, blockWithdrawals *types.Withdrawals) error {
if attrWithdrawals == nil && blockWithdrawals == nil { if attrWithdrawals == nil && blockWithdrawals == nil {
return nil return nil
} }
...@@ -67,7 +67,7 @@ func checkWithdrawalsMatch(attrWithdrawals *eth.Withdrawals, blockWithdrawals *e ...@@ -67,7 +67,7 @@ func checkWithdrawalsMatch(attrWithdrawals *eth.Withdrawals, blockWithdrawals *e
for idx, expected := range *attrWithdrawals { for idx, expected := range *attrWithdrawals {
actual := (*blockWithdrawals)[idx] actual := (*blockWithdrawals)[idx]
if expected != actual { if *expected != *actual {
return fmt.Errorf("expected withdrawal %d to be %v, actual %v", idx, expected, actual) return fmt.Errorf("expected withdrawal %d to be %v, actual %v", idx, expected, actual)
} }
} }
......
...@@ -3,14 +3,15 @@ package derive ...@@ -3,14 +3,15 @@ package derive
import ( import (
"testing" "testing"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/ethereum/go-ethereum/core/types"
) )
func TestWithdrawalsMatch(t *testing.T) { func TestWithdrawalsMatch(t *testing.T) {
tests := []struct { tests := []struct {
attrs *eth.Withdrawals attrs *types.Withdrawals
block *eth.Withdrawals block *types.Withdrawals
shouldMatch bool shouldMatch bool
}{ }{
{ {
...@@ -19,36 +20,36 @@ func TestWithdrawalsMatch(t *testing.T) { ...@@ -19,36 +20,36 @@ func TestWithdrawalsMatch(t *testing.T) {
shouldMatch: true, shouldMatch: true,
}, },
{ {
attrs: &eth.Withdrawals{}, attrs: &types.Withdrawals{},
block: nil, block: nil,
shouldMatch: false, shouldMatch: false,
}, },
{ {
attrs: nil, attrs: nil,
block: &eth.Withdrawals{}, block: &types.Withdrawals{},
shouldMatch: false, shouldMatch: false,
}, },
{ {
attrs: &eth.Withdrawals{}, attrs: &types.Withdrawals{},
block: &eth.Withdrawals{}, block: &types.Withdrawals{},
shouldMatch: true, shouldMatch: true,
}, },
{ {
attrs: &eth.Withdrawals{ attrs: &types.Withdrawals{
{ {
Index: 1, Index: 1,
}, },
}, },
block: &eth.Withdrawals{}, block: &types.Withdrawals{},
shouldMatch: false, shouldMatch: false,
}, },
{ {
attrs: &eth.Withdrawals{ attrs: &types.Withdrawals{
{ {
Index: 1, Index: 1,
}, },
}, },
block: &eth.Withdrawals{ block: &types.Withdrawals{
{ {
Index: 2, Index: 2,
}, },
......
...@@ -12,7 +12,7 @@ import ( ...@@ -12,7 +12,7 @@ import (
"github.com/ethereum-optimism/superchain-registry/superchain" "github.com/ethereum-optimism/superchain-registry/superchain"
) )
var OPStackSupport = params.ProtocolVersionV0{Build: [8]byte{}, Major: 3, Minor: 1, Patch: 0, PreRelease: 1}.Encode() var OPStackSupport = params.ProtocolVersionV0{Build: [8]byte{}, Major: 4, Minor: 0, Patch: 0, PreRelease: 1}.Encode()
const ( const (
opMainnet = 10 opMainnet = 10
...@@ -98,6 +98,7 @@ func LoadOPStackRollupConfig(chainID uint64) (*Config, error) { ...@@ -98,6 +98,7 @@ func LoadOPStackRollupConfig(chainID uint64) (*Config, error) {
L1ChainID: new(big.Int).SetUint64(superChain.Config.L1.ChainID), L1ChainID: new(big.Int).SetUint64(superChain.Config.L1.ChainID),
L2ChainID: new(big.Int).SetUint64(chConfig.ChainID), L2ChainID: new(big.Int).SetUint64(chConfig.ChainID),
RegolithTime: &regolithTime, RegolithTime: &regolithTime,
CanyonTime: superChain.Config.CanyonTime,
BatchInboxAddress: common.Address(chConfig.BatchInboxAddr), BatchInboxAddress: common.Address(chConfig.BatchInboxAddr),
DepositContractAddress: depositContractAddress, DepositContractAddress: depositContractAddress,
L1SystemConfigAddress: common.Address(chConfig.SystemConfigAddr), L1SystemConfigAddress: common.Address(chConfig.SystemConfigAddr),
......
...@@ -104,6 +104,7 @@ func NewConfig(ctx *cli.Context, log log.Logger) (*node.Config, error) { ...@@ -104,6 +104,7 @@ func NewConfig(ctx *cli.Context, log log.Logger) (*node.Config, error) {
ConfigPersistence: configPersistence, ConfigPersistence: configPersistence,
Sync: *syncConfig, Sync: *syncConfig,
RollupHalt: haltOption, RollupHalt: haltOption,
RethDBPath: ctx.String(flags.L1RethDBPath.Name),
} }
if err := cfg.LoadPersisted(log); err != nil { if err := cfg.LoadPersisted(log); err != nil {
......
ARG OP_STACK_GO_BUILDER=us-docker.pkg.dev/oplabs-tools-artifacts/images/op_stack_go:latest ARG OP_STACK_GO_BUILDER=us-docker.pkg.dev/oplabs-tools-artifacts/images/op-stack-go:latest
FROM $OP_STACK_GO_BUILDER as builder FROM $OP_STACK_GO_BUILDER as builder
# See "make golang-docker" and /ops/docker/op-stack-go # See "make golang-docker" and /ops/docker/op-stack-go
......
...@@ -8,6 +8,8 @@ LDFLAGSSTRING +=-X github.com/ethereum-optimism/optimism/op-program/version.Vers ...@@ -8,6 +8,8 @@ LDFLAGSSTRING +=-X github.com/ethereum-optimism/optimism/op-program/version.Vers
LDFLAGSSTRING +=-X github.com/ethereum-optimism/optimism/op-program/version.Meta=$(VERSION_META) LDFLAGSSTRING +=-X github.com/ethereum-optimism/optimism/op-program/version.Meta=$(VERSION_META)
LDFLAGS := -ldflags "$(LDFLAGSSTRING)" LDFLAGS := -ldflags "$(LDFLAGSSTRING)"
COMPAT_DIR := temp/compat
op-program: \ op-program: \
op-program-host \ op-program-host \
op-program-client \ op-program-client \
...@@ -25,13 +27,26 @@ op-program-client-mips: ...@@ -25,13 +27,26 @@ op-program-client-mips:
# result is mips32, big endian, R3000 # result is mips32, big endian, R3000
clean: clean:
rm -rf bin rm -rf bin "$(COMPAT_DIR)"
test: test:
go test -v ./... go test -v ./...
verify-goerli: op-program-host op-program-client verify-goerli: op-program-host op-program-client
env GO111MODULE=on go run ./verify/cmd/goerli.go $$L1URL $$L2URL env GO111MODULE=on go run ./verify/cmd/goerli.go --l1 $$L1URL --l2 $$L2URL
capture-goerli-verify: op-program-host op-program-client
rm -rf "$(COMPAT_DIR)/goerli" "$(COMPAT_DIR)/goerli.tar.bz"
env GO111MODULE=on go run ./verify/cmd/goerli.go --l1 $$L1URL --l2 $$L2URL --datadir "$(COMPAT_DIR)/goerli"
tar jcf "$(COMPAT_DIR)/goerli.tar.bz" -C "$(COMPAT_DIR)" goerli
capture-chain-test-data: capture-goerli-verify
run-goerli-verify: op-program-host op-program-client
mkdir -p "$(COMPAT_DIR)"
curl -L -o "$(COMPAT_DIR)/goerli.tar.bz" https://github.com/ethereum-optimism/chain-test-data/releases/download/2023-10-11/goerli.tar.bz
tar jxf "$(COMPAT_DIR)/goerli.tar.bz" -C "$(COMPAT_DIR)"
./bin/op-program `cat "$(COMPAT_DIR)/goerli/args.txt"`
.PHONY: \ .PHONY: \
op-program \ op-program \
......
...@@ -68,7 +68,7 @@ func NewBlockProcessorFromHeader(provider BlockDataProvider, h *types.Header) (* ...@@ -68,7 +68,7 @@ func NewBlockProcessorFromHeader(provider BlockDataProvider, h *types.Header) (*
return nil, fmt.Errorf("get parent state: %w", err) return nil, fmt.Errorf("get parent state: %w", err)
} }
header.Number = new(big.Int).Add(parentHeader.Number, common.Big1) header.Number = new(big.Int).Add(parentHeader.Number, common.Big1)
header.BaseFee = eip1559.CalcBaseFee(provider.Config(), parentHeader) header.BaseFee = eip1559.CalcBaseFee(provider.Config(), parentHeader, header.Time)
header.GasUsed = 0 header.GasUsed = 0
gasPool := new(core.GasPool).AddGas(header.GasLimit) gasPool := new(core.GasPool).AddGas(header.GasLimit)
return &BlockProcessor{ return &BlockProcessor{
......
...@@ -57,9 +57,9 @@ func RunEngineAPITests(t *testing.T, createBackend func(t *testing.T) engineapi. ...@@ -57,9 +57,9 @@ func RunEngineAPITests(t *testing.T, createBackend func(t *testing.T) engineapi.
nextBlockTime := eth.Uint64Quantity(genesis.Time + 1) nextBlockTime := eth.Uint64Quantity(genesis.Time + 1)
var w *eth.Withdrawals var w *types.Withdrawals
if api.backend.Config().IsCanyon(uint64(nextBlockTime)) { if api.backend.Config().IsCanyon(uint64(nextBlockTime)) {
w = &eth.Withdrawals{} w = &types.Withdrawals{}
} }
result, err := api.engine.ForkchoiceUpdatedV2(api.ctx, &eth.ForkchoiceState{ result, err := api.engine.ForkchoiceUpdatedV2(api.ctx, &eth.ForkchoiceState{
...@@ -111,9 +111,9 @@ func RunEngineAPITests(t *testing.T, createBackend func(t *testing.T) engineapi. ...@@ -111,9 +111,9 @@ func RunEngineAPITests(t *testing.T, createBackend func(t *testing.T) engineapi.
t.Run("RejectInvalidBlockHash", func(t *testing.T) { t.Run("RejectInvalidBlockHash", func(t *testing.T) {
api := newTestHelper(t, createBackend) api := newTestHelper(t, createBackend)
var w *eth.Withdrawals var w *types.Withdrawals
if api.backend.Config().IsCanyon(uint64(0)) { if api.backend.Config().IsCanyon(uint64(0)) {
w = &eth.Withdrawals{} w = &types.Withdrawals{}
} }
// Invalid because BlockHash won't be correct (among many other reasons) // Invalid because BlockHash won't be correct (among many other reasons)
...@@ -385,9 +385,9 @@ func (h *testHelper) startBlockBuilding(head *types.Header, newBlockTimestamp et ...@@ -385,9 +385,9 @@ func (h *testHelper) startBlockBuilding(head *types.Header, newBlockTimestamp et
} }
canyonTime := h.backend.Config().CanyonTime canyonTime := h.backend.Config().CanyonTime
var w *eth.Withdrawals var w *types.Withdrawals
if canyonTime != nil && *canyonTime <= uint64(newBlockTimestamp) { if canyonTime != nil && *canyonTime <= uint64(newBlockTimestamp) {
w = &eth.Withdrawals{} w = &types.Withdrawals{}
} }
result, err := h.engine.ForkchoiceUpdatedV2(h.ctx, &eth.ForkchoiceState{ result, err := h.engine.ForkchoiceUpdatedV2(h.ctx, &eth.ForkchoiceState{
......
...@@ -7,6 +7,7 @@ import ( ...@@ -7,6 +7,7 @@ import (
"github.com/ethereum-optimism/optimism/op-program/host/config" "github.com/ethereum-optimism/optimism/op-program/host/config"
"github.com/ethereum-optimism/optimism/op-program/host/flags" "github.com/ethereum-optimism/optimism/op-program/host/flags"
"github.com/ethereum-optimism/optimism/op-program/host/version" "github.com/ethereum-optimism/optimism/op-program/host/version"
opservice "github.com/ethereum-optimism/optimism/op-service"
oplog "github.com/ethereum-optimism/optimism/op-service/log" oplog "github.com/ethereum-optimism/optimism/op-service/log"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
...@@ -18,19 +19,7 @@ var ( ...@@ -18,19 +19,7 @@ var (
) )
// VersionWithMeta holds the textual version string including the metadata. // VersionWithMeta holds the textual version string including the metadata.
var VersionWithMeta = func() string { var VersionWithMeta = opservice.FormatVersion(version.Version, GitCommit, GitDate, version.Meta)
v := version.Version
if GitCommit != "" {
v += "-" + GitCommit[:8]
}
if GitDate != "" {
v += "-" + GitDate
}
if version.Meta != "" {
v += "-" + version.Meta
}
return v
}()
func main() { func main() {
args := os.Args args := os.Args
......
This diff is collapsed.
ARG OP_STACK_GO_BUILDER=us-docker.pkg.dev/oplabs-tools-artifacts/images/op_stack_go:latest ARG OP_STACK_GO_BUILDER=us-docker.pkg.dev/oplabs-tools-artifacts/images/op-stack-go:latest
FROM $OP_STACK_GO_BUILDER as builder FROM $OP_STACK_GO_BUILDER as builder
# See "make golang-docker" and /ops/docker/op-stack-go # See "make golang-docker" and /ops/docker/op-stack-go
......
package main package main
import ( import (
"fmt"
"os" "os"
opservice "github.com/ethereum-optimism/optimism/op-service"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
"github.com/ethereum-optimism/optimism/op-proposer/flags" "github.com/ethereum-optimism/optimism/op-proposer/flags"
...@@ -26,7 +26,7 @@ func main() { ...@@ -26,7 +26,7 @@ func main() {
app := cli.NewApp() app := cli.NewApp()
app.Flags = cliapp.ProtectFlags(flags.Flags) app.Flags = cliapp.ProtectFlags(flags.Flags)
app.Version = fmt.Sprintf("%s-%s-%s", Version, GitCommit, GitDate) app.Version = opservice.FormatVersion(Version, GitCommit, GitDate, "")
app.Name = "op-proposer" app.Name = "op-proposer"
app.Usage = "L2Output Submitter" app.Usage = "L2Output Submitter"
app.Description = "Service for generating and submitting L2 Output checkpoints to the L2OutputOracle contract" app.Description = "Service for generating and submitting L2 Output checkpoints to the L2OutputOracle contract"
......
...@@ -84,7 +84,12 @@ func (m *Metrics) Start(host string, port int) (*httputil.HTTPServer, error) { ...@@ -84,7 +84,12 @@ func (m *Metrics) Start(host string, port int) (*httputil.HTTPServer, error) {
func (m *Metrics) StartBalanceMetrics(ctx context.Context, func (m *Metrics) StartBalanceMetrics(ctx context.Context,
l log.Logger, client *ethclient.Client, account common.Address) { l log.Logger, client *ethclient.Client, account common.Address) {
opmetrics.LaunchBalanceMetrics(ctx, l, m.registry, m.ns, client, account) // TODO(7684): util was refactored to close, but ctx is still being used by caller for shutdown
balanceMetric := opmetrics.LaunchBalanceMetrics(l, m.registry, m.ns, client, account)
go func() {
<-ctx.Done()
_ = balanceMetric.Close()
}()
} }
// RecordInfo sets a pseudo-metric that contains versioning and // RecordInfo sets a pseudo-metric that contains versioning and
......
...@@ -172,12 +172,12 @@ func NewL2OutputSubmitterConfigFromCLIConfig(cfg CLIConfig, l log.Logger, m metr ...@@ -172,12 +172,12 @@ func NewL2OutputSubmitterConfigFromCLIConfig(cfg CLIConfig, l log.Logger, m metr
} }
// Connect to L1 and L2 providers. Perform these last since they are the most expensive. // Connect to L1 and L2 providers. Perform these last since they are the most expensive.
l1Client, err := dial.DialEthClientWithTimeout(dial.DefaultDialTimeout, l, cfg.L1EthRpc) l1Client, err := dial.DialEthClientWithTimeout(context.Background(), dial.DefaultDialTimeout, l, cfg.L1EthRpc)
if err != nil { if err != nil {
return nil, err return nil, err
} }
rollupClient, err := dial.DialRollupClientWithTimeout(dial.DefaultDialTimeout, l, cfg.RollupRpc) rollupClient, err := dial.DialRollupClientWithTimeout(context.Background(), dial.DefaultDialTimeout, l, cfg.RollupRpc)
if err != nil { if err != nil {
return nil, err return nil, err
} }
......
package clock
import (
"context"
"sync"
"time"
)
// LoopFn is a simple ticker-loop with io.Closer support.
// Note that ticks adapt; slow function calls may result in lost ticks.
type LoopFn struct {
ctx context.Context
cancel context.CancelFunc
ticker Ticker
fn func(ctx context.Context)
onClose func() error
wg sync.WaitGroup
}
// Close cancels the context of the ongoing function call, waits for the call to complete, and cancels further calls.
// Close is safe to call again or concurrently. The onClose callback will be called for each Close call.
func (lf *LoopFn) Close() error {
lf.cancel() // stop any ongoing function call, and close the main loop
lf.wg.Wait() // wait for completion
if lf.onClose != nil {
return lf.onClose() // optional: user can specify function to close resources with
}
return nil
}
func (lf *LoopFn) work() {
defer lf.wg.Done()
defer lf.ticker.Stop() // clean up the timer
for {
select {
case <-lf.ctx.Done():
return
case <-lf.ticker.Ch():
ctx, cancel := context.WithCancel(lf.ctx)
func() {
defer cancel()
lf.fn(ctx)
}()
}
}
}
// NewLoopFn creates a periodic function call, which can be closed,
// with an optional onClose callback to clean up resources.
func NewLoopFn(clock Clock, fn func(ctx context.Context), onClose func() error, interval time.Duration) *LoopFn {
ctx, cancel := context.WithCancel(context.Background())
lf := &LoopFn{
ctx: ctx,
cancel: cancel,
fn: fn,
ticker: clock.NewTicker(interval),
onClose: onClose,
}
lf.wg.Add(1)
go lf.work()
return lf
}
package clock
import (
"context"
"errors"
"testing"
"time"
"github.com/stretchr/testify/require"
)
func TestLoopFn(t *testing.T) {
cl := NewDeterministicClock(time.Now())
calls := make(chan struct{}, 10)
testErr := errors.New("test close error")
loopFn := NewLoopFn(cl, func(ctx context.Context) {
calls <- struct{}{}
}, func() error {
close(calls)
return testErr
}, time.Second*10)
cl.AdvanceTime(time.Second * 15)
<-calls
cl.AdvanceTime(time.Second * 10)
<-calls
select {
case <-calls:
t.Fatal("more calls than expected")
default:
}
require.ErrorIs(t, loopFn.Close(), testErr)
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -11,3 +11,7 @@ func NewRegistry() *prometheus.Registry { ...@@ -11,3 +11,7 @@ func NewRegistry() *prometheus.Registry {
registry.MustRegister(collectors.NewGoCollector()) registry.MustRegister(collectors.NewGoCollector())
return registry return registry
} }
type RegistryMetricer interface {
Registry() *prometheus.Registry
}
This diff is collapsed.
# Target
target/
# Bindings
rdb.h
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment