Commit c963cd95 authored by Ethen Pociask's avatar Ethen Pociask

Merge branch 'develop' of https://github.com/epociask/optimism into indexer.api.supply_view

parents 95dd5008 88eee4fb
This diff is collapsed.
3b1129b5bc43ba22a9bcf4e4323c5a9df0023140 d85718785859dc0b5a095d2302d1a20ec06ab77a
v1.13.4
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
/op-exporter @ethereum-optimism/go-reviewers /op-exporter @ethereum-optimism/go-reviewers
/op-heartbeat @ethereum-optimism/go-reviewers /op-heartbeat @ethereum-optimism/go-reviewers
/op-node @ethereum-optimism/go-reviewers /op-node @ethereum-optimism/go-reviewers
/op-node/rollup @protolambda @trianglesphere /op-node/rollup @protolambda @trianglesphere @ajsutton
/op-preimage @ethereum-optimism/go-reviewers /op-preimage @ethereum-optimism/go-reviewers
/op-program @ethereum-optimism/go-reviewers /op-program @ethereum-optimism/go-reviewers
/op-proposer @ethereum-optimism/go-reviewers /op-proposer @ethereum-optimism/go-reviewers
...@@ -25,9 +25,10 @@ ...@@ -25,9 +25,10 @@
/ops-bedrock @ethereum-optimism/go-reviewers /ops-bedrock @ethereum-optimism/go-reviewers
# Ops # Ops
/.circleci @ethereum-optimism/infra-reviewers /.circleci @ethereum-optimism/infra-reviewers
/.github @ethereum-optimism/infra-reviewers /.github @ethereum-optimism/infra-reviewers
/ops @ethereum-optimism/infra-reviewers /ops @ethereum-optimism/infra-reviewers
/docker-bake.hcl @ethereum-optimism/infra-reviewers
# Misc # Misc
/proxyd @ethereum-optimism/infra-reviewers /proxyd @ethereum-optimism/infra-reviewers
......
...@@ -17,6 +17,3 @@ ...@@ -17,6 +17,3 @@
path = packages/contracts-bedrock/lib/safe-contracts path = packages/contracts-bedrock/lib/safe-contracts
url = https://github.com/safe-global/safe-contracts url = https://github.com/safe-global/safe-contracts
branch = v1.4.0 branch = v1.4.0
...@@ -181,9 +181,9 @@ You must have Python 3.x installed to run `slither`. ...@@ -181,9 +181,9 @@ You must have Python 3.x installed to run `slither`.
To run `slither` locally, do: To run `slither` locally, do:
```bash ```bash
cd packages/contracts cd packages/contracts-bedrock
pip3 install slither-analyzer pip3 install slither-analyzer
pnpm test:slither pnpm slither
``` ```
## Labels ## Labels
......
COMPOSEFLAGS=-d COMPOSEFLAGS=-d
ITESTS_L2_HOST=http://localhost:9545 ITESTS_L2_HOST=http://localhost:9545
BEDROCK_TAGS_REMOTE?=origin BEDROCK_TAGS_REMOTE?=origin
OP_STACK_GO_BUILDER?=us-docker.pkg.dev/oplabs-tools-artifacts/images/op-stack-go:latest
# Requires at least Python v3.9; specify a minor version below if needed
PYTHON?=python3
build: build-go build-ts build: build-go build-ts
.PHONY: build .PHONY: build
...@@ -8,6 +12,10 @@ build: build-go build-ts ...@@ -8,6 +12,10 @@ build: build-go build-ts
build-go: submodules op-node op-proposer op-batcher build-go: submodules op-node op-proposer op-batcher
.PHONY: build-go .PHONY: build-go
lint-go:
golangci-lint run -E goimports,sqlclosecheck,bodyclose,asciicheck,misspell,errorlint --timeout 5m -e "errors.As" -e "errors.Is" ./...
.PHONY: lint-go
build-ts: submodules build-ts: submodules
if [ -n "$$NVM_DIR" ]; then \ if [ -n "$$NVM_DIR" ]; then \
. $$NVM_DIR/nvm.sh && nvm use; \ . $$NVM_DIR/nvm.sh && nvm use; \
...@@ -19,6 +27,18 @@ build-ts: submodules ...@@ -19,6 +27,18 @@ build-ts: submodules
ci-builder: ci-builder:
docker build -t ci-builder -f ops/docker/ci-builder/Dockerfile . docker build -t ci-builder -f ops/docker/ci-builder/Dockerfile .
golang-docker:
# We don't use a buildx builder here, and just load directly into regular docker, for convenience.
GIT_COMMIT=$$(git rev-parse HEAD) \
GIT_DATE=$$(git show -s --format='%ct') \
IMAGE_TAGS=$$(git rev-parse HEAD),latest \
docker buildx bake \
--progress plain \
--load \
-f docker-bake.hcl \
op-node op-batcher op-proposer op-challenger
.PHONY: golang-docker
submodules: submodules:
# CI will checkout submodules on its own (and fails on these commands) # CI will checkout submodules on its own (and fails on these commands)
if [ -z "$$GITHUB_ENV" ]; then \ if [ -z "$$GITHUB_ENV" ]; then \
...@@ -97,14 +117,14 @@ pre-devnet: ...@@ -97,14 +117,14 @@ pre-devnet:
devnet-up: pre-devnet devnet-up: pre-devnet
./ops/scripts/newer-file.sh .devnet/allocs-l1.json ./packages/contracts-bedrock \ ./ops/scripts/newer-file.sh .devnet/allocs-l1.json ./packages/contracts-bedrock \
|| make devnet-allocs || make devnet-allocs
PYTHONPATH=./bedrock-devnet python3 ./bedrock-devnet/main.py --monorepo-dir=. PYTHONPATH=./bedrock-devnet $(PYTHON) ./bedrock-devnet/main.py --monorepo-dir=.
.PHONY: devnet-up .PHONY: devnet-up
# alias for devnet-up # alias for devnet-up
devnet-up-deploy: devnet-up devnet-up-deploy: devnet-up
devnet-test: pre-devnet devnet-test: pre-devnet
PYTHONPATH=./bedrock-devnet python3 ./bedrock-devnet/main.py --monorepo-dir=. --test PYTHONPATH=./bedrock-devnet $(PYTHON) ./bedrock-devnet/main.py --monorepo-dir=. --test
.PHONY: devnet-test .PHONY: devnet-test
devnet-down: devnet-down:
...@@ -120,7 +140,7 @@ devnet-clean: ...@@ -120,7 +140,7 @@ devnet-clean:
.PHONY: devnet-clean .PHONY: devnet-clean
devnet-allocs: pre-devnet devnet-allocs: pre-devnet
PYTHONPATH=./bedrock-devnet python3 ./bedrock-devnet/main.py --monorepo-dir=. --allocs PYTHONPATH=./bedrock-devnet $(PYTHON) ./bedrock-devnet/main.py --monorepo-dir=. --allocs
devnet-logs: devnet-logs:
@(cd ./ops-bedrock && docker compose logs -f) @(cd ./ops-bedrock && docker compose logs -f)
...@@ -163,4 +183,9 @@ bedrock-markdown-links: ...@@ -163,4 +183,9 @@ bedrock-markdown-links:
--exclude-mail /input/README.md "/input/specs/**/*.md" --exclude-mail /input/README.md "/input/specs/**/*.md"
install-geth: install-geth:
go install github.com/ethereum/go-ethereum/cmd/geth@v1.12.0 ./ops/scripts/geth-version-checker.sh && \
(echo "Geth versions match, not installing geth..."; true) || \
(echo "Versions do not match, installing geth!"; \
go install -v github.com/ethereum/go-ethereum/cmd/geth@$(shell cat .gethrc); \
echo "Installed geth!"; true)
.PHONY: install-geth
...@@ -10,6 +10,9 @@ import time ...@@ -10,6 +10,9 @@ import time
import shutil import shutil
import http.client import http.client
from multiprocessing import Process, Queue from multiprocessing import Process, Queue
import concurrent.futures
from collections import namedtuple
import devnet.log_setup import devnet.log_setup
...@@ -94,10 +97,21 @@ def main(): ...@@ -94,10 +97,21 @@ def main():
devnet_l1_genesis(paths) devnet_l1_genesis(paths)
return return
log.info('Building docker images') git_commit = subprocess.run(['git', 'rev-parse', 'HEAD'], capture_output=True, text=True).stdout.strip()
run_command(['docker', 'compose', 'build', '--progress', 'plain'], cwd=paths.ops_bedrock_dir, env={ git_date = subprocess.run(['git', 'show', '-s', "--format=%ct"], capture_output=True, text=True).stdout.strip()
'PWD': paths.ops_bedrock_dir
}) # CI loads the images from workspace, and does not otherwise know the images are good as-is
if os.getenv('DEVNET_NO_BUILD') == "true":
log.info('Skipping docker images build')
else:
log.info(f'Building docker images for git commit {git_commit} ({git_date})')
run_command(['docker', 'compose', 'build', '--progress', 'plain',
'--build-arg', f'GIT_COMMIT={git_commit}', '--build-arg', f'GIT_DATE={git_date}'],
cwd=paths.ops_bedrock_dir, env={
'PWD': paths.ops_bedrock_dir,
'DOCKER_BUILDKIT': '1', # (should be available by default in later versions, but explicitly enable it anyway)
'COMPOSE_DOCKER_CLI_BUILD': '1' # use the docker cache
})
log.info('Devnet starting') log.info('Devnet starting')
devnet_deploy(paths) devnet_deploy(paths)
...@@ -188,13 +202,12 @@ def devnet_deploy(paths): ...@@ -188,13 +202,12 @@ def devnet_deploy(paths):
# If someone reads this comment and understands why this is being done, please # If someone reads this comment and understands why this is being done, please
# update this comment to explain. # update this comment to explain.
init_devnet_l1_deploy_config(paths, update_timestamp=True) init_devnet_l1_deploy_config(paths, update_timestamp=True)
outfile_l1 = pjoin(paths.devnet_dir, 'genesis-l1.json')
run_command([ run_command([
'go', 'run', 'cmd/main.go', 'genesis', 'l1', 'go', 'run', 'cmd/main.go', 'genesis', 'l1',
'--deploy-config', paths.devnet_config_path, '--deploy-config', paths.devnet_config_path,
'--l1-allocs', paths.allocs_path, '--l1-allocs', paths.allocs_path,
'--l1-deployments', paths.addresses_json_path, '--l1-deployments', paths.addresses_json_path,
'--outfile.l1', outfile_l1, '--outfile.l1', paths.genesis_l1_path,
], cwd=paths.op_node_dir) ], cwd=paths.op_node_dir)
log.info('Starting L1.') log.info('Starting L1.')
...@@ -213,8 +226,8 @@ def devnet_deploy(paths): ...@@ -213,8 +226,8 @@ def devnet_deploy(paths):
'--l1-rpc', 'http://localhost:8545', '--l1-rpc', 'http://localhost:8545',
'--deploy-config', paths.devnet_config_path, '--deploy-config', paths.devnet_config_path,
'--deployment-dir', paths.deployment_dir, '--deployment-dir', paths.deployment_dir,
'--outfile.l2', pjoin(paths.devnet_dir, 'genesis-l2.json'), '--outfile.l2', paths.genesis_l2_path,
'--outfile.rollup', pjoin(paths.devnet_dir, 'rollup.json') '--outfile.rollup', paths.rollup_config_path
], cwd=paths.op_node_dir) ], cwd=paths.op_node_dir)
rollup_config = read_json(paths.rollup_config_path) rollup_config = read_json(paths.rollup_config_path)
...@@ -274,21 +287,27 @@ def debug_dumpBlock(url): ...@@ -274,21 +287,27 @@ def debug_dumpBlock(url):
def wait_for_rpc_server(url): def wait_for_rpc_server(url):
log.info(f'Waiting for RPC server at {url}') log.info(f'Waiting for RPC server at {url}')
conn = http.client.HTTPConnection(url)
headers = {'Content-type': 'application/json'} headers = {'Content-type': 'application/json'}
body = '{"id":1, "jsonrpc":"2.0", "method": "eth_chainId", "params":[]}' body = '{"id":1, "jsonrpc":"2.0", "method": "eth_chainId", "params":[]}'
while True: while True:
try: try:
conn = http.client.HTTPConnection(url)
conn.request('POST', '/', body, headers) conn.request('POST', '/', body, headers)
response = conn.getresponse() response = conn.getresponse()
conn.close()
if response.status < 300: if response.status < 300:
log.info(f'RPC server at {url} ready') log.info(f'RPC server at {url} ready')
return return
except Exception as e: except Exception as e:
log.info(f'Waiting for RPC server at {url}') log.info(f'Waiting for RPC server at {url}')
time.sleep(1) time.sleep(1)
finally:
if conn:
conn.close()
CommandPreset = namedtuple('Command', ['name', 'args', 'cwd', 'timeout'])
def devnet_test(paths): def devnet_test(paths):
# Check the L2 config # Check the L2 config
...@@ -297,17 +316,57 @@ def devnet_test(paths): ...@@ -297,17 +316,57 @@ def devnet_test(paths):
cwd=paths.ops_chain_ops, cwd=paths.ops_chain_ops,
) )
run_command( # Run the two commands with different signers, so the ethereum nonce management does not conflict
['npx', 'hardhat', 'deposit-erc20', '--network', 'devnetL1', '--l1-contracts-json-path', paths.addresses_json_path], # And do not use devnet system addresses, to avoid breaking fee-estimation or nonce values.
cwd=paths.sdk_dir, run_commands([
timeout=8*60, CommandPreset('erc20-test',
) ['npx', 'hardhat', 'deposit-erc20', '--network', 'devnetL1',
'--l1-contracts-json-path', paths.addresses_json_path, '--signer-index', '14'],
cwd=paths.sdk_dir, timeout=8*60),
CommandPreset('eth-test',
['npx', 'hardhat', 'deposit-eth', '--network', 'devnetL1',
'--l1-contracts-json-path', paths.addresses_json_path, '--signer-index', '15'],
cwd=paths.sdk_dir, timeout=8*60)
], max_workers=2)
def run_commands(commands: list[CommandPreset], max_workers=2):
with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
futures = [executor.submit(run_command_preset, cmd) for cmd in commands]
for future in concurrent.futures.as_completed(futures):
result = future.result()
if result:
print(result.stdout)
def run_command_preset(command: CommandPreset):
with subprocess.Popen(command.args, cwd=command.cwd,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) as proc:
try:
# Live output processing
for line in proc.stdout:
# Annotate and print the line with timestamp and command name
timestamp = datetime.datetime.utcnow().strftime('%H:%M:%S.%f')
# Annotate and print the line with the timestamp
print(f"[{timestamp}][{command.name}] {line}", end='')
stdout, stderr = proc.communicate(timeout=command.timeout)
if proc.returncode != 0:
raise RuntimeError(f"Command '{' '.join(command.args)}' failed with return code {proc.returncode}: {stderr}")
except subprocess.TimeoutExpired:
raise RuntimeError(f"Command '{' '.join(command.args)}' timed out!")
except Exception as e:
raise RuntimeError(f"Error executing '{' '.join(command.args)}': {e}")
finally:
# Ensure process is terminated
proc.kill()
return proc.returncode
run_command(
['npx', 'hardhat', 'deposit-eth', '--network', 'devnetL1', '--l1-contracts-json-path', paths.addresses_json_path],
cwd=paths.sdk_dir,
timeout=8*60,
)
def run_command(args, check=True, shell=False, cwd=None, env=None, timeout=None): def run_command(args, check=True, shell=False, cwd=None, env=None, timeout=None):
env = env if env else {} env = env if env else {}
......
GITCOMMIT := $(shell git rev-parse HEAD) GITCOMMIT ?= $(shell git rev-parse HEAD)
GITDATE := $(shell git show -s --format='%ct') GITDATE ?= $(shell git show -s --format='%ct')
VERSION := v0.0.0 VERSION := v0.0.0
LDFLAGSSTRING +=-X main.GitCommit=$(GITCOMMIT) LDFLAGSSTRING +=-X main.GitCommit=$(GITCOMMIT)
...@@ -20,9 +20,6 @@ elf: ...@@ -20,9 +20,6 @@ elf:
test: elf test: elf
go test -v ./... go test -v ./...
lint:
golangci-lint run -E goimports,sqlclosecheck,bodyclose,asciicheck,misspell,errorlint --timeout 5m -e "errors.As" -e "errors.Is"
fuzz: fuzz:
go test -run NOTAREALTEST -v -fuzztime 10s -fuzz=FuzzStateSyscallBrk ./mipsevm go test -run NOTAREALTEST -v -fuzztime 10s -fuzz=FuzzStateSyscallBrk ./mipsevm
go test -run NOTAREALTEST -v -fuzztime 10s -fuzz=FuzzStateSyscallClone ./mipsevm go test -run NOTAREALTEST -v -fuzztime 10s -fuzz=FuzzStateSyscallClone ./mipsevm
......
...@@ -94,9 +94,6 @@ type Proof struct { ...@@ -94,9 +94,6 @@ type Proof struct {
OracleKey hexutil.Bytes `json:"oracle-key,omitempty"` OracleKey hexutil.Bytes `json:"oracle-key,omitempty"`
OracleValue hexutil.Bytes `json:"oracle-value,omitempty"` OracleValue hexutil.Bytes `json:"oracle-value,omitempty"`
OracleOffset uint32 `json:"oracle-offset,omitempty"` OracleOffset uint32 `json:"oracle-offset,omitempty"`
StepInput hexutil.Bytes `json:"step-input"`
OracleInput hexutil.Bytes `json:"oracle-input"`
} }
type rawHint string type rawHint string
...@@ -348,14 +345,8 @@ func Run(ctx *cli.Context) error { ...@@ -348,14 +345,8 @@ func Run(ctx *cli.Context) error {
Post: postStateHash, Post: postStateHash,
StateData: witness.State, StateData: witness.State,
ProofData: witness.MemProof, ProofData: witness.MemProof,
StepInput: witness.EncodeStepInput(),
} }
if witness.HasPreimage() { if witness.HasPreimage() {
inp, err := witness.EncodePreimageOracleInput()
if err != nil {
return fmt.Errorf("failed to encode pre-image oracle input: %w", err)
}
proof.OracleInput = inp
proof.OracleKey = witness.PreimageKey[:] proof.OracleKey = witness.PreimageKey[:]
proof.OracleValue = witness.PreimageValue proof.OracleValue = witness.PreimageValue
proof.OracleOffset = witness.PreimageOffset proof.OracleOffset = witness.PreimageOffset
......
...@@ -16,7 +16,6 @@ import ( ...@@ -16,7 +16,6 @@ import (
"github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum-optimism/optimism/op-bindings/bindings" "github.com/ethereum-optimism/optimism/op-bindings/bindings"
...@@ -24,11 +23,26 @@ import ( ...@@ -24,11 +23,26 @@ import (
) )
var ( var (
StepBytes4 = crypto.Keccak256([]byte("step(bytes,bytes)"))[:4] StepBytes4 []byte
LoadKeccak256PreimagePartBytes4 = crypto.Keccak256([]byte("loadKeccak256PreimagePart(uint256,bytes)"))[:4] LoadKeccak256PreimagePartBytes4 []byte
LoadLocalDataBytes4 = crypto.Keccak256([]byte("loadLocalData(uint256,bytes32,uint256,uint256)"))[:4] LoadLocalDataBytes4 []byte
) )
func init() {
mipsAbi, err := bindings.MIPSMetaData.GetAbi()
if err != nil {
panic(fmt.Errorf("failed to load MIPS ABI: %w", err))
}
StepBytes4 = mipsAbi.Methods["step"].ID[:4]
preimageAbi, err := bindings.PreimageOracleMetaData.GetAbi()
if err != nil {
panic(fmt.Errorf("failed to load pre-image oracle ABI: %w", err))
}
LoadKeccak256PreimagePartBytes4 = preimageAbi.Methods["loadKeccak256PreimagePart"].ID[:4]
LoadLocalDataBytes4 = preimageAbi.Methods["loadLocalData"].ID[:4]
}
// LoadContracts loads the Cannon contracts, from op-bindings package // LoadContracts loads the Cannon contracts, from op-bindings package
func LoadContracts() (*Contracts, error) { func LoadContracts() (*Contracts, error) {
var mips, oracle Contract var mips, oracle Contract
......
...@@ -76,13 +76,13 @@ func (m *MIPSEVM) Step(t *testing.T, stepWitness *StepWitness) []byte { ...@@ -76,13 +76,13 @@ func (m *MIPSEVM) Step(t *testing.T, stepWitness *StepWitness) []byte {
if stepWitness.HasPreimage() { if stepWitness.HasPreimage() {
t.Logf("reading preimage key %x at offset %d", stepWitness.PreimageKey, stepWitness.PreimageOffset) t.Logf("reading preimage key %x at offset %d", stepWitness.PreimageKey, stepWitness.PreimageOffset)
poInput, err := stepWitness.EncodePreimageOracleInput() poInput, err := stepWitness.EncodePreimageOracleInput(0)
require.NoError(t, err, "encode preimage oracle input") require.NoError(t, err, "encode preimage oracle input")
_, leftOverGas, err := m.env.Call(vm.AccountRef(sender), m.addrs.Oracle, poInput, startingGas, big.NewInt(0)) _, leftOverGas, err := m.env.Call(vm.AccountRef(sender), m.addrs.Oracle, poInput, startingGas, big.NewInt(0))
require.NoErrorf(t, err, "evm should not fail, took %d gas", startingGas-leftOverGas) require.NoErrorf(t, err, "evm should not fail, took %d gas", startingGas-leftOverGas)
} }
input := stepWitness.EncodeStepInput() input := stepWitness.EncodeStepInput(0)
ret, leftOverGas, err := m.env.Call(vm.AccountRef(sender), m.addrs.MIPS, input, startingGas, big.NewInt(0)) ret, leftOverGas, err := m.env.Call(vm.AccountRef(sender), m.addrs.MIPS, input, startingGas, big.NewInt(0))
require.NoError(t, err, "evm should not fail") require.NoError(t, err, "evm should not fail")
require.Len(t, ret, 32, "expecting 32-byte state hash") require.Len(t, ret, 32, "expecting 32-byte state hash")
...@@ -241,7 +241,7 @@ func TestEVMFault(t *testing.T) { ...@@ -241,7 +241,7 @@ func TestEVMFault(t *testing.T) {
State: initialState.EncodeWitness(), State: initialState.EncodeWitness(),
MemProof: insnProof[:], MemProof: insnProof[:],
} }
input := stepWitness.EncodeStepInput() input := stepWitness.EncodeStepInput(0)
startingGas := uint64(30_000_000) startingGas := uint64(30_000_000)
_, _, err := env.Call(vm.AccountRef(sender), addrs.MIPS, input, startingGas, big.NewInt(0)) _, _, err := env.Call(vm.AccountRef(sender), addrs.MIPS, input, startingGas, big.NewInt(0))
......
...@@ -8,6 +8,8 @@ import ( ...@@ -8,6 +8,8 @@ import (
preimage "github.com/ethereum-optimism/optimism/op-preimage" preimage "github.com/ethereum-optimism/optimism/op-preimage"
) )
type LocalContext uint64
type StepWitness struct { type StepWitness struct {
// encoded state witness // encoded state witness
State []byte State []byte
...@@ -25,7 +27,13 @@ func uint32ToBytes32(v uint32) []byte { ...@@ -25,7 +27,13 @@ func uint32ToBytes32(v uint32) []byte {
return out[:] return out[:]
} }
func (wit *StepWitness) EncodeStepInput() []byte { func uint64ToBytes32(v uint64) []byte {
var out [32]byte
binary.BigEndian.PutUint64(out[32-8:], v)
return out[:]
}
func (wit *StepWitness) EncodeStepInput(localContext LocalContext) []byte {
abiStateLen := len(wit.State) abiStateLen := len(wit.State)
if abiStateLen%32 != 0 { if abiStateLen%32 != 0 {
abiStateLen += 32 - (abiStateLen % 32) abiStateLen += 32 - (abiStateLen % 32)
...@@ -36,8 +44,9 @@ func (wit *StepWitness) EncodeStepInput() []byte { ...@@ -36,8 +44,9 @@ func (wit *StepWitness) EncodeStepInput() []byte {
var input []byte var input []byte
input = append(input, StepBytes4...) input = append(input, StepBytes4...)
input = append(input, uint32ToBytes32(32*2)...) // state data offset in bytes input = append(input, uint32ToBytes32(32*3)...) // state data offset in bytes
input = append(input, uint32ToBytes32(32*2+32+uint32(len(abiState)))...) // proof data offset in bytes input = append(input, uint32ToBytes32(32*3+32+uint32(len(abiState)))...) // proof data offset in bytes
input = append(input, uint64ToBytes32(uint64(localContext))...) // local context in bytes
input = append(input, uint32ToBytes32(uint32(len(wit.State)))...) // state data length in bytes input = append(input, uint32ToBytes32(uint32(len(wit.State)))...) // state data length in bytes
input = append(input, abiState[:]...) input = append(input, abiState[:]...)
...@@ -50,7 +59,7 @@ func (wit *StepWitness) HasPreimage() bool { ...@@ -50,7 +59,7 @@ func (wit *StepWitness) HasPreimage() bool {
return wit.PreimageKey != ([32]byte{}) return wit.PreimageKey != ([32]byte{})
} }
func (wit *StepWitness) EncodePreimageOracleInput() ([]byte, error) { func (wit *StepWitness) EncodePreimageOracleInput(localContext LocalContext) ([]byte, error) {
if wit.PreimageKey == ([32]byte{}) { if wit.PreimageKey == ([32]byte{}) {
return nil, errors.New("cannot encode pre-image oracle input, witness has no pre-image to proof") return nil, errors.New("cannot encode pre-image oracle input, witness has no pre-image to proof")
} }
...@@ -63,6 +72,7 @@ func (wit *StepWitness) EncodePreimageOracleInput() ([]byte, error) { ...@@ -63,6 +72,7 @@ func (wit *StepWitness) EncodePreimageOracleInput() ([]byte, error) {
var input []byte var input []byte
input = append(input, LoadLocalDataBytes4...) input = append(input, LoadLocalDataBytes4...)
input = append(input, wit.PreimageKey[:]...) input = append(input, wit.PreimageKey[:]...)
input = append(input, uint64ToBytes32(uint64(localContext))...) // local context in bytes
preimagePart := wit.PreimageValue[8:] preimagePart := wit.PreimageValue[8:]
var tmp [32]byte var tmp [32]byte
......
comment: false codecov:
require_ci_to_pass: false
comment:
layout: "diff, flags, files"
behavior: default
require_changes: true
flags:
- contracts-bedrock-tests
ignore: ignore:
- "op-e2e" - "op-e2e"
- "**/*.t.sol"
- "op-bindings/bindings/*.go" - "op-bindings/bindings/*.go"
- "**/*.t.sol"
- "packages/contracts-bedrock/test/**/*.sol"
- "packages/contracts-bedrock/scripts/**/*.sol"
- "packages/contracts-bedrock/contracts/vendor/WETH9.sol" - "packages/contracts-bedrock/contracts/vendor/WETH9.sol"
- 'packages/contracts-bedrock/contracts/EAS/**/*.sol' - 'packages/contracts-bedrock/contracts/EAS/**/*.sol'
coverage: coverage:
...@@ -13,6 +23,7 @@ coverage: ...@@ -13,6 +23,7 @@ coverage:
threshold: 0% # coverage is not allowed to reduce vs. the PR base threshold: 0% # coverage is not allowed to reduce vs. the PR base
base: auto base: auto
informational: true informational: true
enabled: true
project: project:
default: default:
informational: true informational: true
...@@ -22,7 +33,7 @@ flag_management: ...@@ -22,7 +33,7 @@ flag_management:
individual_flags: individual_flags:
- name: contracts-bedrock-tests - name: contracts-bedrock-tests
paths: paths:
- packages/contracts-bedrock/contracts - packages/contracts-bedrock
statuses: statuses:
- type: patch - type: patch
target: 100% target: 100%
......
variable "REGISTRY" {
default = "us-docker.pkg.dev"
}
variable "REPOSITORY" {
default = "oplabs-tools-artifacts/images"
}
variable "GIT_COMMIT" {
default = "dev"
}
variable "GIT_DATE" {
default = "0"
}
variable "GIT_VERSION" {
default = "docker" // original default as set in proxyd file, not used by full go stack, yet
}
variable "IMAGE_TAGS" {
default = "${GIT_COMMIT}" // split by ","
}
variable "PLATFORMS" {
// You can override this as "linux/amd64,linux/arm64".
// Only a specify a single platform when `--load` ing into docker.
// Multi-platform is supported when outputting to disk or pushing to a registry.
// Multi-platform builds can be tested locally with: --set="*.output=type=image,push=false"
default = "linux/amd64"
}
target "op-stack-go" {
dockerfile = "ops/docker/op-stack-go/Dockerfile"
context = "."
args = {
GIT_COMMIT = "${GIT_COMMIT}"
GIT_DATE = "${GIT_DATE}"
}
platforms = split(",", PLATFORMS)
tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/op-stack-go:${tag}"]
}
target "op-node" {
dockerfile = "Dockerfile"
context = "./op-node"
args = {
OP_STACK_GO_BUILDER = "op-stack-go"
}
contexts = {
op-stack-go: "target:op-stack-go"
}
platforms = split(",", PLATFORMS)
tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/op-node:${tag}"]
}
target "op-batcher" {
dockerfile = "Dockerfile"
context = "./op-batcher"
args = {
OP_STACK_GO_BUILDER = "op-stack-go"
}
contexts = {
op-stack-go: "target:op-stack-go"
}
platforms = split(",", PLATFORMS)
tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/op-batcher:${tag}"]
}
target "op-proposer" {
dockerfile = "Dockerfile"
context = "./op-proposer"
args = {
OP_STACK_GO_BUILDER = "op-stack-go"
}
contexts = {
op-stack-go: "target:op-stack-go"
}
platforms = split(",", PLATFORMS)
tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/op-proposer:${tag}"]
}
target "op-challenger" {
dockerfile = "Dockerfile"
context = "./op-challenger"
args = {
OP_STACK_GO_BUILDER = "op-stack-go"
}
contexts = {
op-stack-go: "target:op-stack-go"
}
platforms = split(",", PLATFORMS)
tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/op-challenger:${tag}"]
}
target "op-heartbeat" {
dockerfile = "Dockerfile"
context = "./op-heartbeat"
args = {
OP_STACK_GO_BUILDER = "op-stack-go"
}
contexts = {
op-stack-go: "target:op-stack-go"
}
platforms = split(",", PLATFORMS)
tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/op-heartbeat:${tag}"]
}
target "op-program" {
dockerfile = "Dockerfile"
context = "./op-program"
args = {
OP_STACK_GO_BUILDER = "op-stack-go"
}
contexts = {
op-stack-go: "target:op-stack-go"
}
platforms = split(",", PLATFORMS)
tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/op-program:${tag}"]
}
target "proxyd" {
dockerfile = "Dockerfile"
context = "./proxyd"
args = {
// proxyd dockerfile has no _ in the args
GITCOMMIT = "${GIT_COMMIT}"
GITDATE = "${GIT_DATE}"
GITVERSION = "${GIT_VERSION}"
}
platforms = split(",", PLATFORMS)
tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/proxyd:${tag}"]
}
target "indexer" {
dockerfile = "./indexer/Dockerfile"
context = "./"
args = {
// proxyd dockerfile has no _ in the args
GITCOMMIT = "${GIT_COMMIT}"
GITDATE = "${GIT_DATE}"
GITVERSION = "${GIT_VERSION}"
}
platforms = split(",", PLATFORMS)
tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/indexer:${tag}"]
}
target "ufm-metamask" {
dockerfile = "Dockerfile"
context = "./ufm-test-services/metamask"
args = {
// proxyd dockerfile has no _ in the args
GITCOMMIT = "${GIT_COMMIT}"
GITDATE = "${GIT_DATE}"
GITVERSION = "${GIT_VERSION}"
}
platforms = split(",", PLATFORMS)
tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/ufm-metamask:${tag}"]
}
target "chain-mon" {
dockerfile = "./ops/docker/Dockerfile.packages"
context = "."
args = {
// proxyd dockerfile has no _ in the args
GITCOMMIT = "${GIT_COMMIT}"
GITDATE = "${GIT_DATE}"
GITVERSION = "${GIT_VERSION}"
}
// this is a multi-stage build, where each stage is a possible output target, but wd-mon is all we publish
target = "wd-mon"
platforms = split(",", PLATFORMS)
tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/chain-mon:${tag}"]
}
target "ci-builder" {
dockerfile = "./ops/docker/ci-builder/Dockerfile"
context = "."
platforms = split(",", PLATFORMS)
tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/ci-builder:${tag}"]
}
...@@ -6,6 +6,9 @@ During this early phase of ongoing development, we invite security researchers a ...@@ -6,6 +6,9 @@ During this early phase of ongoing development, we invite security researchers a
The current system is not production ready, however the core infrastructure for creating an instruction trace ([Cannon][cannon] + the [`op-program`][op-program]), the off-chain challenge agent ([`op-challenger`][op-challenger]), The current system is not production ready, however the core infrastructure for creating an instruction trace ([Cannon][cannon] + the [`op-program`][op-program]), the off-chain challenge agent ([`op-challenger`][op-challenger]),
and the on-chain infrastructure for the [Dispute Game][dispute-game] are all in place. and the on-chain infrastructure for the [Dispute Game][dispute-game] are all in place.
For the Fault Proof Alpha security review, we've pinned `546fb2c7a5796b7fe50b0b7edc7666d3bd281d6f` as the commit hash in the monorepo. This commit hash was the head of the `develop` branch at the time of the alpha's launch. All
security reviews and PoCs should be derived from this commit hash, as the contracts and off-chain agents are being updated frequently at this stage of development.
### Resources ### Resources
> **Note** > **Note**
......
...@@ -229,7 +229,7 @@ of checking email at the start of their day. This caused some delay in the initi ...@@ -229,7 +229,7 @@ of checking email at the start of their day. This caused some delay in the initi
Early in the process, the existence of the issue was openly discussed in a public slack channel, Early in the process, the existence of the issue was openly discussed in a public slack channel,
although the details of the vulnerability and exploit path were not described. This violates the although the details of the vulnerability and exploit path were not described. This violates the
[principle of least priviledge](https://en.wikipedia.org/wiki/Principle_of_least_privilege), as well [principle of least privilege](https://en.wikipedia.org/wiki/Principle_of_least_privilege), as well
as our already existing incident response protocols as our already existing incident response protocols
**Action taken:** **Action taken:**
......
GITCOMMIT := $(shell git rev-parse HEAD) GITCOMMIT ?= $(shell git rev-parse HEAD)
GITDATE := $(shell git show -s --format='%ct') GITDATE ?= $(shell git show -s --format='%ct')
VERSION := v0.0.0 VERSION := v0.0.0
LDFLAGSSTRING +=-X main.GitCommit=$(GITCOMMIT) LDFLAGSSTRING +=-X main.GitCommit=$(GITCOMMIT)
......
package main package main
import ( import (
"fmt"
"os" "os"
opservice "github.com/ethereum-optimism/optimism/op-service"
oplog "github.com/ethereum-optimism/optimism/op-service/log" oplog "github.com/ethereum-optimism/optimism/op-service/log"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
...@@ -22,7 +22,7 @@ func main() { ...@@ -22,7 +22,7 @@ func main() {
app := cli.NewApp() app := cli.NewApp()
app.Flags = endpointMonitor.CLIFlags("ENDPOINT_MONITOR") app.Flags = endpointMonitor.CLIFlags("ENDPOINT_MONITOR")
app.Version = fmt.Sprintf("%s-%s-%s", Version, GitCommit, GitDate) app.Version = opservice.FormatVersion(Version, GitCommit, GitDate, "")
app.Name = "endpoint-monitor" app.Name = "endpoint-monitor"
app.Usage = "Endpoint Monitoring Service" app.Usage = "Endpoint Monitoring Service"
app.Description = "" app.Description = ""
......
...@@ -8,32 +8,32 @@ require ( ...@@ -8,32 +8,32 @@ require (
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0
github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3 github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3
github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20231001123245-7b48d3818686 github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20231030223232-e16eae11e492
github.com/ethereum/go-ethereum v1.13.1 github.com/ethereum/go-ethereum v1.13.1
github.com/fsnotify/fsnotify v1.6.0 github.com/fsnotify/fsnotify v1.7.0
github.com/go-chi/chi/v5 v5.0.10 github.com/go-chi/chi/v5 v5.0.10
github.com/go-chi/docgen v1.2.0 github.com/go-chi/docgen v1.2.0
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb
github.com/google/go-cmp v0.6.0 github.com/google/go-cmp v0.6.0
github.com/google/gofuzz v1.2.1-0.20220503160820-4a35382e8fc8 github.com/google/gofuzz v1.2.1-0.20220503160820-4a35382e8fc8
github.com/google/uuid v1.3.1 github.com/google/uuid v1.4.0
github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/go-multierror v1.1.1
github.com/hashicorp/golang-lru/v2 v2.0.5 github.com/hashicorp/golang-lru/v2 v2.0.5
github.com/holiman/uint256 v1.2.3 github.com/holiman/uint256 v1.2.3
github.com/ipfs/go-datastore v0.6.0 github.com/ipfs/go-datastore v0.6.0
github.com/ipfs/go-ds-leveldb v0.5.0 github.com/ipfs/go-ds-leveldb v0.5.0
github.com/jackc/pgtype v1.14.0 github.com/jackc/pgtype v1.14.0
github.com/jackc/pgx/v5 v5.4.3 github.com/jackc/pgx/v5 v5.5.0
github.com/libp2p/go-libp2p v0.31.0 github.com/libp2p/go-libp2p v0.31.0
github.com/libp2p/go-libp2p-mplex v0.9.0 github.com/libp2p/go-libp2p-mplex v0.9.0
github.com/libp2p/go-libp2p-pubsub v0.9.3 github.com/libp2p/go-libp2p-pubsub v0.9.3
github.com/libp2p/go-libp2p-testing v0.12.0 github.com/libp2p/go-libp2p-testing v0.12.0
github.com/mattn/go-isatty v0.0.19 github.com/mattn/go-isatty v0.0.20
github.com/multiformats/go-base32 v0.1.0 github.com/multiformats/go-base32 v0.1.0
github.com/multiformats/go-multiaddr v0.12.0 github.com/multiformats/go-multiaddr v0.12.0
github.com/multiformats/go-multiaddr-dns v0.3.1 github.com/multiformats/go-multiaddr-dns v0.3.1
github.com/olekukonko/tablewriter v0.0.5 github.com/olekukonko/tablewriter v0.0.5
github.com/onsi/gomega v1.28.0 github.com/onsi/gomega v1.29.0
github.com/pkg/errors v0.9.1 github.com/pkg/errors v0.9.1
github.com/pkg/profile v1.7.0 github.com/pkg/profile v1.7.0
github.com/prometheus/client_golang v1.17.0 github.com/prometheus/client_golang v1.17.0
...@@ -41,17 +41,17 @@ require ( ...@@ -41,17 +41,17 @@ require (
github.com/urfave/cli/v2 v2.25.7 github.com/urfave/cli/v2 v2.25.7
golang.org/x/crypto v0.14.0 golang.org/x/crypto v0.14.0
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 golang.org/x/exp v0.0.0-20230905200255-921286631fa9
golang.org/x/sync v0.4.0 golang.org/x/sync v0.5.0
golang.org/x/term v0.13.0 golang.org/x/term v0.13.0
golang.org/x/time v0.3.0 golang.org/x/time v0.4.0
gorm.io/driver/postgres v1.5.3 gorm.io/driver/postgres v1.5.4
gorm.io/gorm v1.25.5 gorm.io/gorm v1.25.5
) )
require ( require (
github.com/DataDog/zstd v1.5.2 // indirect github.com/DataDog/zstd v1.5.2 // indirect
github.com/Microsoft/go-winio v0.6.1 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect
github.com/VictoriaMetrics/fastcache v1.10.0 // indirect github.com/VictoriaMetrics/fastcache v1.12.1 // indirect
github.com/allegro/bigcache v1.2.1 // indirect github.com/allegro/bigcache v1.2.1 // indirect
github.com/benbjohnson/clock v1.3.5 // indirect github.com/benbjohnson/clock v1.3.5 // indirect
github.com/beorn7/perks v1.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect
...@@ -60,12 +60,13 @@ require ( ...@@ -60,12 +60,13 @@ require (
github.com/btcsuite/btcd/btcutil v1.1.0 // indirect github.com/btcsuite/btcd/btcutil v1.1.0 // indirect
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f // indirect github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/cockroachdb/errors v1.9.1 // indirect github.com/cockroachdb/errors v1.11.1 // indirect
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
github.com/cockroachdb/pebble v0.0.0-20230906160148-46873a6a7a06 // indirect github.com/cockroachdb/pebble v0.0.0-20231018212520-f6cde3fc2fa4 // indirect
github.com/cockroachdb/redact v1.1.3 // indirect github.com/cockroachdb/redact v1.1.5 // indirect
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect
github.com/consensys/bavard v0.1.13 // indirect github.com/consensys/bavard v0.1.13 // indirect
github.com/consensys/gnark-crypto v0.12.0 // indirect github.com/consensys/gnark-crypto v0.12.1 // indirect
github.com/containerd/cgroups v1.1.0 // indirect github.com/containerd/cgroups v1.1.0 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
...@@ -81,7 +82,7 @@ require ( ...@@ -81,7 +82,7 @@ require (
github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127 // indirect github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127 // indirect
github.com/elastic/gosigar v0.14.2 // indirect github.com/elastic/gosigar v0.14.2 // indirect
github.com/ethereum/c-kzg-4844 v0.3.1 // indirect github.com/ethereum/c-kzg-4844 v0.3.1 // indirect
github.com/fatih/color v1.7.0 // indirect github.com/fatih/color v1.13.0 // indirect
github.com/felixge/fgprof v0.9.3 // indirect github.com/felixge/fgprof v0.9.3 // indirect
github.com/fjl/memsize v0.0.1 // indirect github.com/fjl/memsize v0.0.1 // indirect
github.com/flynn/noise v1.0.0 // indirect github.com/flynn/noise v1.0.0 // indirect
...@@ -96,7 +97,7 @@ require ( ...@@ -96,7 +97,7 @@ require (
github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect
github.com/gofrs/flock v0.8.1 // indirect github.com/gofrs/flock v0.8.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v4 v4.4.2 // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect
github.com/golang/mock v1.6.0 // indirect github.com/golang/mock v1.6.0 // indirect
github.com/golang/protobuf v1.5.3 // indirect github.com/golang/protobuf v1.5.3 // indirect
github.com/google/gopacket v1.1.19 // indirect github.com/google/gopacket v1.1.19 // indirect
...@@ -117,6 +118,7 @@ require ( ...@@ -117,6 +118,7 @@ require (
github.com/jackc/pgio v1.0.0 // indirect github.com/jackc/pgio v1.0.0 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
github.com/jackc/puddle/v2 v2.2.1 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect
github.com/jbenet/goprocess v0.1.4 // indirect github.com/jbenet/goprocess v0.1.4 // indirect
...@@ -161,7 +163,7 @@ require ( ...@@ -161,7 +163,7 @@ require (
github.com/multiformats/go-varint v0.0.7 // indirect github.com/multiformats/go-varint v0.0.7 // indirect
github.com/naoina/go-stringutil v0.1.0 // indirect github.com/naoina/go-stringutil v0.1.0 // indirect
github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 // indirect github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 // indirect
github.com/onsi/ginkgo/v2 v2.12.0 // indirect github.com/onsi/ginkgo/v2 v2.13.0 // indirect
github.com/opencontainers/runtime-spec v1.1.0 // indirect github.com/opencontainers/runtime-spec v1.1.0 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
...@@ -208,7 +210,7 @@ require ( ...@@ -208,7 +210,7 @@ require (
rsc.io/tmplfunc v0.0.3 // indirect rsc.io/tmplfunc v0.0.3 // indirect
) )
replace github.com/ethereum/go-ethereum v1.13.1 => github.com/ethereum-optimism/op-geth v1.101301.0-rc.2.0.20231002141926-1e6910b91798 replace github.com/ethereum/go-ethereum v1.13.1 => github.com/ethereum-optimism/op-geth v1.101304.0-rc.2.0.20231030225546-cd491fa3b588
//replace github.com/ethereum-optimism/superchain-registry/superchain => ../superchain-registry/superchain //replace github.com/ethereum-optimism/superchain-registry/superchain => ../superchain-registry/superchain
//replace github.com/ethereum/go-ethereum v1.13.1 => ../go-ethereum //replace github.com/ethereum/go-ethereum v1.13.1 => ../go-ethereum
This diff is collapsed.
...@@ -34,12 +34,12 @@ export interface WithdrawalItem { ...@@ -34,12 +34,12 @@ export interface WithdrawalItem {
from: string; from: string;
to: string; to: string;
transactionHash: string; transactionHash: string;
messageHash: string; crossDomainMessageHash: string;
timestamp: number /* uint64 */; timestamp: number /* uint64 */;
l2BlockHash: string; l2BlockHash: string;
amount: string; amount: string;
proofTransactionHash: string; l1ProvenTxHash: string;
claimTransactionHash: string; l1FinalizedTxHash: string;
l1TokenAddress: string; l1TokenAddress: string;
l2TokenAddress: string; l2TokenAddress: string;
} }
......
...@@ -6,33 +6,25 @@ import ( ...@@ -6,33 +6,25 @@ import (
"fmt" "fmt"
"net" "net"
"net/http" "net/http"
"runtime/debug"
"strconv" "strconv"
"sync" "sync/atomic"
"time"
"github.com/go-chi/chi/v5"
"github.com/go-chi/chi/v5/middleware"
"github.com/prometheus/client_golang/prometheus"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/indexer/api/routes" "github.com/ethereum-optimism/optimism/indexer/api/routes"
"github.com/ethereum-optimism/optimism/indexer/config" "github.com/ethereum-optimism/optimism/indexer/config"
"github.com/ethereum-optimism/optimism/indexer/database" "github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum-optimism/optimism/op-service/httputil" "github.com/ethereum-optimism/optimism/op-service/httputil"
"github.com/ethereum-optimism/optimism/op-service/metrics" "github.com/ethereum-optimism/optimism/op-service/metrics"
"github.com/ethereum/go-ethereum/log"
"github.com/go-chi/chi/v5"
"github.com/go-chi/chi/v5/middleware"
"github.com/prometheus/client_golang/prometheus"
) )
const ethereumAddressRegex = `^0x[a-fA-F0-9]{40}$` const ethereumAddressRegex = `^0x[a-fA-F0-9]{40}$`
// Api ... Indexer API struct
// TODO : Structured error responses
type API struct {
log log.Logger
router *chi.Mux
serverConfig config.ServerConfig
metricsConfig config.ServerConfig
metricsRegistry *prometheus.Registry
}
const ( const (
MetricsNamespace = "op_indexer_api" MetricsNamespace = "op_indexer_api"
addressParam = "{address:%s}" addressParam = "{address:%s}"
...@@ -47,6 +39,23 @@ const ( ...@@ -47,6 +39,23 @@ const (
SupplyPath = "/api/v0/supply" SupplyPath = "/api/v0/supply"
) )
// Api ... Indexer API struct
// TODO : Structured error responses
type APIService struct {
log log.Logger
router *chi.Mux
bv database.BridgeTransfersView
dbClose func() error
metricsRegistry *prometheus.Registry
apiServer *httputil.HTTPServer
metricsServer *httputil.HTTPServer
stopped atomic.Bool
}
// chiMetricsMiddleware ... Injects a metrics recorder into request processing middleware // chiMetricsMiddleware ... Injects a metrics recorder into request processing middleware
func chiMetricsMiddleware(rec metrics.HTTPRecorder) func(http.Handler) http.Handler { func chiMetricsMiddleware(rec metrics.HTTPRecorder) func(http.Handler) http.Handler {
return func(next http.Handler) http.Handler { return func(next http.Handler) http.Handler {
...@@ -55,114 +64,116 @@ func chiMetricsMiddleware(rec metrics.HTTPRecorder) func(http.Handler) http.Hand ...@@ -55,114 +64,116 @@ func chiMetricsMiddleware(rec metrics.HTTPRecorder) func(http.Handler) http.Hand
} }
// NewApi ... Construct a new api instance // NewApi ... Construct a new api instance
func NewApi(logger log.Logger, bv database.BridgeTransfersView, serverConfig config.ServerConfig, metricsConfig config.ServerConfig) *API { func NewApi(ctx context.Context, log log.Logger, cfg *Config) (*APIService, error) {
// (1) Initialize dependencies out := &APIService{log: log, metricsRegistry: metrics.NewRegistry()}
apiRouter := chi.NewRouter() if err := out.initFromConfig(ctx, cfg); err != nil {
h := routes.NewRoutes(logger, bv, apiRouter) return nil, errors.Join(err, out.Stop(ctx)) // close any resources we may have opened already
}
mr := metrics.NewRegistry() return out, nil
promRecorder := metrics.NewPromHTTPRecorder(mr, MetricsNamespace) }
// (2) Inject routing middleware
apiRouter.Use(chiMetricsMiddleware(promRecorder))
apiRouter.Use(middleware.Recoverer)
apiRouter.Use(middleware.Heartbeat(HealthPath))
// (3) Set GET routes
apiRouter.Get(fmt.Sprintf(DepositsPath+addressParam, ethereumAddressRegex), h.L1DepositsHandler)
apiRouter.Get(fmt.Sprintf(WithdrawalsPath+addressParam, ethereumAddressRegex), h.L2WithdrawalsHandler)
apiRouter.Get(SupplyPath, h.SupplyView) func (a *APIService) initFromConfig(ctx context.Context, cfg *Config) error {
if err := a.initDB(ctx, cfg.DB); err != nil {
return fmt.Errorf("failed to init DB: %w", err)
}
if err := a.startMetricsServer(cfg.MetricsServer); err != nil {
return fmt.Errorf("failed to start metrics server: %w", err)
}
a.initRouter(cfg.HTTPServer)
if err := a.startServer(cfg.HTTPServer); err != nil {
return fmt.Errorf("failed to start API server: %w", err)
}
return nil
}
return &API{log: logger, router: apiRouter, metricsRegistry: mr, serverConfig: serverConfig, metricsConfig: metricsConfig} func (a *APIService) Start(ctx context.Context) error {
// Completed all setup-up jobs at init-time already,
// and the API service does not have any other special starting routines or background-jobs to start.
return nil
} }
// Run ... Runs the API server routines func (a *APIService) Stop(ctx context.Context) error {
func (a *API) Run(ctx context.Context) error { var result error
var wg sync.WaitGroup if a.apiServer != nil {
errCh := make(chan error, 2) if err := a.apiServer.Stop(ctx); err != nil {
result = errors.Join(result, fmt.Errorf("failed to stop API server: %w", err))
// (1) Construct an inner function that will start a goroutine }
// and handle any panics that occur on a shared error channel }
processCtx, processCancel := context.WithCancel(ctx) if a.metricsServer != nil {
runProcess := func(start func(ctx context.Context) error) { if err := a.metricsServer.Stop(ctx); err != nil {
wg.Add(1) result = errors.Join(result, fmt.Errorf("failed to stop metrics server: %w", err))
go func() { }
defer func() {
if err := recover(); err != nil {
a.log.Error("halting api on panic", "err", err)
debug.PrintStack()
errCh <- fmt.Errorf("panic: %v", err)
}
processCancel()
wg.Done()
}()
errCh <- start(processCtx)
}()
} }
if a.dbClose != nil {
if err := a.dbClose(); err != nil {
result = errors.Join(result, fmt.Errorf("failed to close DB: %w", err))
}
}
a.stopped.Store(true)
a.log.Info("API service shutdown complete")
return result
}
// (2) Start the API and metrics servers func (a *APIService) Stopped() bool {
runProcess(a.startServer) return a.stopped.Load()
runProcess(a.startMetricsServer) }
// (3) Wait for all processes to complete // Addr ... returns the address that the HTTP server is listening on (excl. http:// prefix, just the host and port)
wg.Wait() func (a *APIService) Addr() string {
if a.apiServer == nil {
return ""
}
return a.apiServer.Addr().String()
}
err := <-errCh func (a *APIService) initDB(ctx context.Context, connector DBConnector) error {
db, err := connector.OpenDB(ctx, a.log)
if err != nil { if err != nil {
a.log.Error("api stopped", "err", err) return fmt.Errorf("failed to connect to databse: %w", err)
} else {
a.log.Info("api stopped")
} }
a.dbClose = db.Closer
return err a.bv = db.BridgeTransfers
return nil
} }
// Port ... Returns the the port that server is listening on func (a *APIService) initRouter(apiConfig config.ServerConfig) {
func (a *API) Port() int { apiRouter := chi.NewRouter()
return a.serverConfig.Port h := routes.NewRoutes(a.log, a.bv, apiRouter)
promRecorder := metrics.NewPromHTTPRecorder(a.metricsRegistry, MetricsNamespace)
apiRouter.Use(chiMetricsMiddleware(promRecorder))
apiRouter.Use(middleware.Timeout(time.Duration(apiConfig.WriteTimeout) * time.Second))
apiRouter.Use(middleware.Recoverer)
apiRouter.Use(middleware.Heartbeat(HealthPath))
apiRouter.Get(fmt.Sprintf(DepositsPath+addressParam, ethereumAddressRegex), h.L1DepositsHandler)
apiRouter.Get(fmt.Sprintf(WithdrawalsPath+addressParam, ethereumAddressRegex), h.L2WithdrawalsHandler)
apiRouter.Get(SupplyPath, h.SupplyView)
a.router = apiRouter
} }
// startServer ... Starts the API server // startServer ... Starts the API server
func (a *API) startServer(ctx context.Context) error { func (a *APIService) startServer(serverConfig config.ServerConfig) error {
a.log.Debug("api server listening...", "port", a.serverConfig.Port) a.log.Debug("API server listening...", "port", serverConfig.Port)
addr := net.JoinHostPort(a.serverConfig.Host, strconv.Itoa(a.serverConfig.Port)) addr := net.JoinHostPort(serverConfig.Host, strconv.Itoa(serverConfig.Port))
srv, err := httputil.StartHTTPServer(addr, a.router) srv, err := httputil.StartHTTPServer(addr, a.router)
if err != nil { if err != nil {
return fmt.Errorf("failed to start API server: %w", err) return fmt.Errorf("failed to start API server: %w", err)
} }
a.log.Info("API server started", "addr", srv.Addr().String())
host, portStr, err := net.SplitHostPort(srv.Addr().String()) a.apiServer = srv
if err != nil {
return errors.Join(err, srv.Close())
}
port, err := strconv.Atoi(portStr)
if err != nil {
return errors.Join(err, srv.Close())
}
// Update the port in the config in case the OS chose a different port
// than the one we requested (e.g. using port 0 to fetch a random open port)
a.serverConfig.Host = host
a.serverConfig.Port = port
<-ctx.Done()
if err := srv.Stop(context.Background()); err != nil {
return fmt.Errorf("failed to shutdown api server: %w", err)
}
return nil return nil
} }
// startMetricsServer ... Starts the metrics server // startMetricsServer ... Starts the metrics server
func (a *API) startMetricsServer(ctx context.Context) error { func (a *APIService) startMetricsServer(metricsConfig config.ServerConfig) error {
a.log.Debug("starting metrics server...", "port", a.metricsConfig.Port) a.log.Debug("starting metrics server...", "port", metricsConfig.Port)
srv, err := metrics.StartServer(a.metricsRegistry, a.metricsConfig.Host, a.metricsConfig.Port) srv, err := metrics.StartServer(a.metricsRegistry, metricsConfig.Host, metricsConfig.Port)
if err != nil { if err != nil {
return fmt.Errorf("failed to start metrics server: %w", err) return fmt.Errorf("failed to start metrics server: %w", err)
} }
<-ctx.Done() a.log.Info("Metrics server started", "addr", srv.Addr().String())
defer a.log.Info("metrics server stopped") a.metricsServer = srv
return srv.Stop(context.Background()) return nil
} }
package api package api
import ( import (
"context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"net/http" "net/http"
...@@ -24,11 +25,12 @@ var mockAddress = "0x4204204204204204204204204204204204204204" ...@@ -24,11 +25,12 @@ var mockAddress = "0x4204204204204204204204204204204204204204"
var apiConfig = config.ServerConfig{ var apiConfig = config.ServerConfig{
Host: "localhost", Host: "localhost",
Port: 8080, Port: 0, // random port, to allow parallel tests
} }
var metricsConfig = config.ServerConfig{ var metricsConfig = config.ServerConfig{
Host: "localhost", Host: "localhost",
Port: 7300, Port: 0, // random port, to allow parallel tests
} }
var ( var (
...@@ -100,8 +102,14 @@ func (mbv *MockBridgeTransfersView) L1BridgeDepositSum() (float64, error) { ...@@ -100,8 +102,14 @@ func (mbv *MockBridgeTransfersView) L1BridgeDepositSum() (float64, error) {
func TestHealthz(t *testing.T) { func TestHealthz(t *testing.T) {
logger := testlog.Logger(t, log.LvlInfo) logger := testlog.Logger(t, log.LvlInfo)
api := NewApi(logger, &MockBridgeTransfersView{}, apiConfig, metricsConfig) cfg := &Config{
request, err := http.NewRequest("GET", "/healthz", nil) DB: &TestDBConnector{BridgeTransfers: &MockBridgeTransfersView{}},
HTTPServer: apiConfig,
MetricsServer: metricsConfig,
}
api, err := NewApi(context.Background(), logger, cfg)
require.NoError(t, err)
request, err := http.NewRequest("GET", "http://"+api.Addr()+"/healthz", nil)
assert.Nil(t, err) assert.Nil(t, err)
responseRecorder := httptest.NewRecorder() responseRecorder := httptest.NewRecorder()
...@@ -112,8 +120,14 @@ func TestHealthz(t *testing.T) { ...@@ -112,8 +120,14 @@ func TestHealthz(t *testing.T) {
func TestL1BridgeDepositsHandler(t *testing.T) { func TestL1BridgeDepositsHandler(t *testing.T) {
logger := testlog.Logger(t, log.LvlInfo) logger := testlog.Logger(t, log.LvlInfo)
api := NewApi(logger, &MockBridgeTransfersView{}, apiConfig, metricsConfig) cfg := &Config{
request, err := http.NewRequest("GET", fmt.Sprintf("/api/v0/deposits/%s", mockAddress), nil) DB: &TestDBConnector{BridgeTransfers: &MockBridgeTransfersView{}},
HTTPServer: apiConfig,
MetricsServer: metricsConfig,
}
api, err := NewApi(context.Background(), logger, cfg)
require.NoError(t, err)
request, err := http.NewRequest("GET", fmt.Sprintf("http://"+api.Addr()+"/api/v0/deposits/%s", mockAddress), nil)
assert.Nil(t, err) assert.Nil(t, err)
responseRecorder := httptest.NewRecorder() responseRecorder := httptest.NewRecorder()
...@@ -135,8 +149,14 @@ func TestL1BridgeDepositsHandler(t *testing.T) { ...@@ -135,8 +149,14 @@ func TestL1BridgeDepositsHandler(t *testing.T) {
func TestL2BridgeWithdrawalsByAddressHandler(t *testing.T) { func TestL2BridgeWithdrawalsByAddressHandler(t *testing.T) {
logger := testlog.Logger(t, log.LvlInfo) logger := testlog.Logger(t, log.LvlInfo)
api := NewApi(logger, &MockBridgeTransfersView{}, apiConfig, metricsConfig) cfg := &Config{
request, err := http.NewRequest("GET", fmt.Sprintf("/api/v0/withdrawals/%s", mockAddress), nil) DB: &TestDBConnector{BridgeTransfers: &MockBridgeTransfersView{}},
HTTPServer: apiConfig,
MetricsServer: metricsConfig,
}
api, err := NewApi(context.Background(), logger, cfg)
require.NoError(t, err)
request, err := http.NewRequest("GET", fmt.Sprintf("http://"+api.Addr()+"/api/v0/withdrawals/%s", mockAddress), nil)
assert.Nil(t, err) assert.Nil(t, err)
responseRecorder := httptest.NewRecorder() responseRecorder := httptest.NewRecorder()
...@@ -154,8 +174,8 @@ func TestL2BridgeWithdrawalsByAddressHandler(t *testing.T) { ...@@ -154,8 +174,8 @@ func TestL2BridgeWithdrawalsByAddressHandler(t *testing.T) {
assert.Equal(t, resp.Items[0].To, withdrawal.Tx.ToAddress.String()) assert.Equal(t, resp.Items[0].To, withdrawal.Tx.ToAddress.String())
assert.Equal(t, resp.Items[0].TransactionHash, common.HexToHash("0x789").String()) assert.Equal(t, resp.Items[0].TransactionHash, common.HexToHash("0x789").String())
assert.Equal(t, resp.Items[0].Amount, withdrawal.Tx.Amount.String()) assert.Equal(t, resp.Items[0].Amount, withdrawal.Tx.Amount.String())
assert.Equal(t, resp.Items[0].ProofTransactionHash, common.HexToHash("0x123").String()) assert.Equal(t, resp.Items[0].L1ProvenTxHash, common.HexToHash("0x123").String())
assert.Equal(t, resp.Items[0].ClaimTransactionHash, common.HexToHash("0x123").String()) assert.Equal(t, resp.Items[0].L1FinalizedTxHash, common.HexToHash("0x123").String())
assert.Equal(t, resp.Items[0].L1TokenAddress, withdrawal.TokenPair.RemoteTokenAddress.String()) assert.Equal(t, resp.Items[0].L1TokenAddress, withdrawal.TokenPair.RemoteTokenAddress.String())
assert.Equal(t, resp.Items[0].L2TokenAddress, withdrawal.TokenPair.LocalTokenAddress.String()) assert.Equal(t, resp.Items[0].L2TokenAddress, withdrawal.TokenPair.LocalTokenAddress.String())
assert.Equal(t, resp.Items[0].Timestamp, withdrawal.Tx.Timestamp) assert.Equal(t, resp.Items[0].Timestamp, withdrawal.Tx.Timestamp)
......
package api
import (
"context"
"fmt"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/indexer/config"
"github.com/ethereum-optimism/optimism/indexer/database"
)
// DB represents the abstract DB access the API has.
type DB struct {
BridgeTransfers database.BridgeTransfersView
Closer func() error
}
// DBConfigConnector implements a fully config based DBConnector
type DBConfigConnector struct {
config.DBConfig
}
func (cfg *DBConfigConnector) OpenDB(ctx context.Context, log log.Logger) (*DB, error) {
db, err := database.NewDB(ctx, log, cfg.DBConfig)
if err != nil {
return nil, fmt.Errorf("failed to connect to databse: %w", err)
}
return &DB{
BridgeTransfers: db.BridgeTransfers,
Closer: db.Close,
}, nil
}
type TestDBConnector struct {
BridgeTransfers database.BridgeTransfersView
}
func (tdb *TestDBConnector) OpenDB(ctx context.Context, log log.Logger) (*DB, error) {
return &DB{
BridgeTransfers: tdb.BridgeTransfers,
Closer: func() error {
log.Info("API service closed test DB view")
return nil
},
}, nil
}
// DBConnector is an interface: the config may provide different ways to access the DB.
// This is implemented in tests to provide custom DB views, or share the DB with other services.
type DBConnector interface {
OpenDB(ctx context.Context, log log.Logger) (*DB, error)
}
// Config for the API service
type Config struct {
DB DBConnector
HTTPServer config.ServerConfig
MetricsServer config.ServerConfig
}
package models package models
import (
"github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum/go-ethereum/common"
)
// DepositItem ... Deposit item model for API responses // DepositItem ... Deposit item model for API responses
type DepositItem struct { type DepositItem struct {
Guid string `json:"guid"` Guid string `json:"guid"`
...@@ -23,18 +28,18 @@ type DepositResponse struct { ...@@ -23,18 +28,18 @@ type DepositResponse struct {
// WithdrawalItem ... Data model for API JSON response // WithdrawalItem ... Data model for API JSON response
type WithdrawalItem struct { type WithdrawalItem struct {
Guid string `json:"guid"` Guid string `json:"guid"`
From string `json:"from"` From string `json:"from"`
To string `json:"to"` To string `json:"to"`
TransactionHash string `json:"transactionHash"` TransactionHash string `json:"transactionHash"`
MessageHash string `json:"messageHash"` CrossDomainMessageHash string `json:"crossDomainMessageHash"`
Timestamp uint64 `json:"timestamp"` Timestamp uint64 `json:"timestamp"`
L2BlockHash string `json:"l2BlockHash"` L2BlockHash string `json:"l2BlockHash"`
Amount string `json:"amount"` Amount string `json:"amount"`
ProofTransactionHash string `json:"proofTransactionHash"` L1ProvenTxHash string `json:"l1ProvenTxHash"`
ClaimTransactionHash string `json:"claimTransactionHash"` L1FinalizedTxHash string `json:"l1FinalizedTxHash"`
L1TokenAddress string `json:"l1TokenAddress"` L1TokenAddress string `json:"l1TokenAddress"`
L2TokenAddress string `json:"l2TokenAddress"` L2TokenAddress string `json:"l2TokenAddress"`
} }
// WithdrawalResponse ... Data model for API JSON response // WithdrawalResponse ... Data model for API JSON response
...@@ -47,3 +52,38 @@ type WithdrawalResponse struct { ...@@ -47,3 +52,38 @@ type WithdrawalResponse struct {
type BridgeSupplyView struct { type BridgeSupplyView struct {
L1DepositSum float64 `json:"l1DepositSum"` L1DepositSum float64 `json:"l1DepositSum"`
} }
// FIXME make a pure function that returns a struct instead of newWithdrawalResponse
// newWithdrawalResponse ... Converts a database.L2BridgeWithdrawalsResponse to an api.WithdrawalResponse
func CreateWithdrawalResponse(withdrawals *database.L2BridgeWithdrawalsResponse) WithdrawalResponse {
items := make([]WithdrawalItem, len(withdrawals.Withdrawals))
for i, withdrawal := range withdrawals.Withdrawals {
cdh := withdrawal.L2BridgeWithdrawal.CrossDomainMessageHash
if cdh == nil { // Zero value indicates that the withdrawal didn't have a cross domain message
cdh = &common.Hash{0}
}
item := WithdrawalItem{
Guid: withdrawal.L2BridgeWithdrawal.TransactionWithdrawalHash.String(),
L2BlockHash: withdrawal.L2BlockHash.String(),
Timestamp: withdrawal.L2BridgeWithdrawal.Tx.Timestamp,
From: withdrawal.L2BridgeWithdrawal.Tx.FromAddress.String(),
To: withdrawal.L2BridgeWithdrawal.Tx.ToAddress.String(),
TransactionHash: withdrawal.L2TransactionHash.String(),
Amount: withdrawal.L2BridgeWithdrawal.Tx.Amount.String(),
CrossDomainMessageHash: cdh.String(),
L1ProvenTxHash: withdrawal.ProvenL1TransactionHash.String(),
L1FinalizedTxHash: withdrawal.FinalizedL1TransactionHash.String(),
L1TokenAddress: withdrawal.L2BridgeWithdrawal.TokenPair.RemoteTokenAddress.String(),
L2TokenAddress: withdrawal.L2BridgeWithdrawal.TokenPair.LocalTokenAddress.String(),
}
items[i] = item
}
return WithdrawalResponse{
Cursor: withdrawals.Cursor,
HasNextPage: withdrawals.HasNextPage,
Items: items,
}
}
package models_test
import (
"fmt"
"reflect"
"testing"
"github.com/ethereum-optimism/optimism/indexer/api/models"
"github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum/go-ethereum/common"
"github.com/stretchr/testify/require"
)
func TestCreateWithdrawal(t *testing.T) {
// (1) Create a dummy database response object
cdh := common.HexToHash("0x2")
dbWithdrawals := &database.L2BridgeWithdrawalsResponse{
Withdrawals: []database.L2BridgeWithdrawalWithTransactionHashes{
{
L2BridgeWithdrawal: database.L2BridgeWithdrawal{
TransactionWithdrawalHash: common.HexToHash("0x1"),
BridgeTransfer: database.BridgeTransfer{
CrossDomainMessageHash: &cdh,
Tx: database.Transaction{
FromAddress: common.HexToAddress("0x3"),
ToAddress: common.HexToAddress("0x4"),
Timestamp: 5,
},
TokenPair: database.TokenPair{
LocalTokenAddress: common.HexToAddress("0x6"),
RemoteTokenAddress: common.HexToAddress("0x7"),
},
},
},
},
},
}
// (2) Create and validate response object
response := models.CreateWithdrawalResponse(dbWithdrawals)
require.NotEmpty(t, response.Items)
require.Len(t, response.Items, 1)
// (3) Use reflection to check that all fields in WithdrawalItem are populated correctly
item := response.Items[0]
structType := reflect.TypeOf(item)
structVal := reflect.ValueOf(item)
fieldNum := structVal.NumField()
for i := 0; i < fieldNum; i++ {
field := structVal.Field(i)
fieldName := structType.Field(i).Name
isSet := field.IsValid() && !field.IsZero()
require.True(t, isSet, fmt.Sprintf("%s in not set", fieldName))
}
}
...@@ -4,37 +4,9 @@ import ( ...@@ -4,37 +4,9 @@ import (
"net/http" "net/http"
"github.com/ethereum-optimism/optimism/indexer/api/models" "github.com/ethereum-optimism/optimism/indexer/api/models"
"github.com/ethereum-optimism/optimism/indexer/database"
"github.com/go-chi/chi/v5" "github.com/go-chi/chi/v5"
) )
// FIXME make a pure function that returns a struct instead of newWithdrawalResponse
// newWithdrawalResponse ... Converts a database.L2BridgeWithdrawalsResponse to an api.WithdrawalResponse
func newWithdrawalResponse(withdrawals *database.L2BridgeWithdrawalsResponse) models.WithdrawalResponse {
items := make([]models.WithdrawalItem, len(withdrawals.Withdrawals))
for i, withdrawal := range withdrawals.Withdrawals {
item := models.WithdrawalItem{
Guid: withdrawal.L2BridgeWithdrawal.TransactionWithdrawalHash.String(),
L2BlockHash: withdrawal.L2BlockHash.String(),
From: withdrawal.L2BridgeWithdrawal.Tx.FromAddress.String(),
To: withdrawal.L2BridgeWithdrawal.Tx.ToAddress.String(),
TransactionHash: withdrawal.L2TransactionHash.String(),
Amount: withdrawal.L2BridgeWithdrawal.Tx.Amount.String(),
ProofTransactionHash: withdrawal.ProvenL1TransactionHash.String(),
ClaimTransactionHash: withdrawal.FinalizedL1TransactionHash.String(),
L1TokenAddress: withdrawal.L2BridgeWithdrawal.TokenPair.RemoteTokenAddress.String(),
L2TokenAddress: withdrawal.L2BridgeWithdrawal.TokenPair.LocalTokenAddress.String(),
}
items[i] = item
}
return models.WithdrawalResponse{
Cursor: withdrawals.Cursor,
HasNextPage: withdrawals.HasNextPage,
Items: items,
}
}
// L2WithdrawalsHandler ... Handles /api/v0/withdrawals/{address} GET requests // L2WithdrawalsHandler ... Handles /api/v0/withdrawals/{address} GET requests
func (h Routes) L2WithdrawalsHandler(w http.ResponseWriter, r *http.Request) { func (h Routes) L2WithdrawalsHandler(w http.ResponseWriter, r *http.Request) {
addressValue := chi.URLParam(r, "address") addressValue := chi.URLParam(r, "address")
...@@ -68,7 +40,7 @@ func (h Routes) L2WithdrawalsHandler(w http.ResponseWriter, r *http.Request) { ...@@ -68,7 +40,7 @@ func (h Routes) L2WithdrawalsHandler(w http.ResponseWriter, r *http.Request) {
h.logger.Error("Unable to read withdrawals from DB", "err", err.Error()) h.logger.Error("Unable to read withdrawals from DB", "err", err.Error())
return return
} }
response := newWithdrawalResponse(withdrawals) response := models.CreateWithdrawalResponse(withdrawals)
err = jsonResponse(w, response, http.StatusOK) err = jsonResponse(w, response, http.StatusOK)
if err != nil { if err != nil {
......
...@@ -25,3 +25,9 @@ func Clamp(start, end *big.Int, size uint64) *big.Int { ...@@ -25,3 +25,9 @@ func Clamp(start, end *big.Int, size uint64) *big.Int {
func Matcher(num int64) func(*big.Int) bool { func Matcher(num int64) func(*big.Int) bool {
return func(bi *big.Int) bool { return bi.Int64() == num } return func(bi *big.Int) bool { return bi.Int64() == num }
} }
func WeiToETH(wei *big.Int) *big.Float {
f := new(big.Float)
f.SetString(wei.String())
return f.Quo(f, big.NewFloat(1e18))
}
package main package main
import ( import (
"context"
"github.com/urfave/cli/v2"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum-optimism/optimism/indexer" "github.com/ethereum-optimism/optimism/indexer"
"github.com/ethereum-optimism/optimism/indexer/api" "github.com/ethereum-optimism/optimism/indexer/api"
"github.com/ethereum-optimism/optimism/indexer/config" "github.com/ethereum-optimism/optimism/indexer/config"
"github.com/ethereum-optimism/optimism/indexer/database" "github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum-optimism/optimism/op-service/cliapp"
oplog "github.com/ethereum-optimism/optimism/op-service/log" oplog "github.com/ethereum-optimism/optimism/op-service/log"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum-optimism/optimism/op-service/opio"
"github.com/urfave/cli/v2"
) )
var ( var (
...@@ -27,68 +32,62 @@ var ( ...@@ -27,68 +32,62 @@ var (
} }
) )
func runIndexer(ctx *cli.Context) error { func runIndexer(ctx *cli.Context, shutdown context.CancelCauseFunc) (cliapp.Lifecycle, error) {
log := oplog.NewLogger(oplog.AppOut(ctx), oplog.ReadCLIConfig(ctx)).New("role", "indexer") log := oplog.NewLogger(oplog.AppOut(ctx), oplog.ReadCLIConfig(ctx)).New("role", "indexer")
oplog.SetGlobalLogHandler(log.GetHandler()) oplog.SetGlobalLogHandler(log.GetHandler())
log.Info("running indexer...")
cfg, err := config.LoadConfig(log, ctx.String(ConfigFlag.Name)) cfg, err := config.LoadConfig(log, ctx.String(ConfigFlag.Name))
if err != nil { if err != nil {
log.Error("failed to load config", "err", err) log.Error("failed to load config", "err", err)
return err return nil, err
}
db, err := database.NewDB(log, cfg.DB)
if err != nil {
log.Error("failed to connect to database", "err", err)
return err
} }
defer db.Close()
indexer, err := indexer.NewIndexer(log, db, cfg.Chain, cfg.RPCs, cfg.HTTPServer, cfg.MetricsServer) return indexer.NewIndexer(ctx.Context, log, &cfg, shutdown)
if err != nil {
log.Error("failed to create indexer", "err", err)
return err
}
return indexer.Run(ctx.Context)
} }
func runApi(ctx *cli.Context) error { func runApi(ctx *cli.Context, _ context.CancelCauseFunc) (cliapp.Lifecycle, error) {
log := oplog.NewLogger(oplog.AppOut(ctx), oplog.ReadCLIConfig(ctx)).New("role", "api") log := oplog.NewLogger(oplog.AppOut(ctx), oplog.ReadCLIConfig(ctx)).New("role", "api")
oplog.SetGlobalLogHandler(log.GetHandler()) oplog.SetGlobalLogHandler(log.GetHandler())
log.Info("running api...")
cfg, err := config.LoadConfig(log, ctx.String(ConfigFlag.Name)) cfg, err := config.LoadConfig(log, ctx.String(ConfigFlag.Name))
if err != nil { if err != nil {
log.Error("failed to load config", "err", err) log.Error("failed to load config", "err", err)
return err return nil, err
} }
db, err := database.NewDB(log, cfg.DB) apiCfg := &api.Config{
if err != nil { DB: &api.DBConfigConnector{DBConfig: cfg.DB},
log.Error("failed to connect to database", "err", err) HTTPServer: cfg.HTTPServer,
return err MetricsServer: cfg.MetricsServer,
} }
defer db.Close()
api := api.NewApi(log, db.BridgeTransfers, cfg.HTTPServer, cfg.MetricsServer) return api.NewApi(ctx.Context, log, apiCfg)
return api.Run(ctx.Context)
} }
func runMigrations(ctx *cli.Context) error { func runMigrations(ctx *cli.Context) error {
log := oplog.NewLogger(oplog.AppOut(ctx), oplog.ReadCLIConfig(ctx)).New("role", "api") // We don't maintain a complicated lifecycle here, just interrupt to shut down.
ctx.Context = opio.CancelOnInterrupt(ctx.Context)
log := oplog.NewLogger(oplog.AppOut(ctx), oplog.ReadCLIConfig(ctx)).New("role", "migrations")
oplog.SetGlobalLogHandler(log.GetHandler()) oplog.SetGlobalLogHandler(log.GetHandler())
log.Info("running migrations...")
cfg, err := config.LoadConfig(log, ctx.String(ConfigFlag.Name)) cfg, err := config.LoadConfig(log, ctx.String(ConfigFlag.Name))
migrationsDir := ctx.String(MigrationsFlag.Name)
if err != nil { if err != nil {
log.Error("failed to load config", "err", err) log.Error("failed to load config", "err", err)
return err return err
} }
db, err := database.NewDB(log, cfg.DB) db, err := database.NewDB(ctx.Context, log, cfg.DB)
if err != nil { if err != nil {
log.Error("failed to connect to database", "err", err) log.Error("failed to connect to database", "err", err)
return err return err
} }
defer db.Close() defer db.Close()
migrationsDir := ctx.String(MigrationsFlag.Name)
return db.ExecuteSQLMigration(migrationsDir) return db.ExecuteSQLMigration(migrationsDir)
} }
...@@ -106,13 +105,13 @@ func newCli(GitCommit string, GitDate string) *cli.App { ...@@ -106,13 +105,13 @@ func newCli(GitCommit string, GitDate string) *cli.App {
Name: "api", Name: "api",
Flags: flags, Flags: flags,
Description: "Runs the api service", Description: "Runs the api service",
Action: runApi, Action: cliapp.LifecycleCmd(runApi),
}, },
{ {
Name: "index", Name: "index",
Flags: flags, Flags: flags,
Description: "Runs the indexing service", Description: "Runs the indexing service",
Action: runIndexer, Action: cliapp.LifecycleCmd(runIndexer),
}, },
{ {
Name: "migrate", Name: "migrate",
......
...@@ -4,9 +4,10 @@ import ( ...@@ -4,9 +4,10 @@ import (
"context" "context"
"os" "os"
"github.com/ethereum/go-ethereum/log"
oplog "github.com/ethereum-optimism/optimism/op-service/log" oplog "github.com/ethereum-optimism/optimism/op-service/log"
"github.com/ethereum-optimism/optimism/op-service/opio" "github.com/ethereum-optimism/optimism/op-service/opio"
"github.com/ethereum/go-ethereum/log"
) )
var ( var (
...@@ -15,17 +16,12 @@ var ( ...@@ -15,17 +16,12 @@ var (
) )
func main() { func main() {
// This is the most root context, used to propagate
// cancellations to all spawned application-level goroutines
ctx, cancel := context.WithCancel(context.Background())
go func() {
opio.BlockOnInterrupts()
cancel()
}()
oplog.SetupDefaults() oplog.SetupDefaults()
app := newCli(GitCommit, GitDate) app := newCli(GitCommit, GitDate)
// sub-commands set up their individual interrupt lifecycles, which can block on the given interrupt as needed.
ctx := opio.WithInterruptBlocker(context.Background())
if err := app.RunContext(ctx, os.Args); err != nil { if err := app.RunContext(ctx, os.Args); err != nil {
log.Error("application failed", "err", err) log.Error("application failed", "err", err)
os.Exit(1)
} }
} }
...@@ -134,10 +134,11 @@ type DBConfig struct { ...@@ -134,10 +134,11 @@ type DBConfig struct {
Password string `toml:"password"` Password string `toml:"password"`
} }
// Configures the a server // Configures the server
type ServerConfig struct { type ServerConfig struct {
Host string `toml:"host"` Host string `toml:"host"`
Port int `toml:"port"` Port int `toml:"port"`
WriteTimeout int `toml:"timeout"`
} }
// LoadConfig loads the `indexer.toml` config file from a given path // LoadConfig loads the `indexer.toml` config file from a given path
...@@ -153,18 +154,25 @@ func LoadConfig(log log.Logger, path string) (Config, error) { ...@@ -153,18 +154,25 @@ func LoadConfig(log log.Logger, path string) (Config, error) {
data = []byte(os.ExpandEnv(string(data))) data = []byte(os.ExpandEnv(string(data)))
log.Debug("parsed config file", "data", string(data)) log.Debug("parsed config file", "data", string(data))
if _, err := toml.Decode(string(data), &cfg); err != nil { md, err := toml.Decode(string(data), &cfg)
if err != nil {
log.Error("failed to decode config file", "err", err) log.Error("failed to decode config file", "err", err)
return cfg, err return cfg, err
} }
if len(md.Undecoded()) > 0 {
log.Error("unknown fields in config file", "fields", md.Undecoded())
err = fmt.Errorf("unknown fields in config file: %v", md.Undecoded())
return cfg, err
}
if cfg.Chain.Preset == DevnetPresetId { if cfg.Chain.Preset == DevnetPresetId {
preset, err := DevnetPreset() preset, err := DevnetPreset()
if err != nil { if err != nil {
return cfg, err return cfg, err
} }
log.Info("loaded local devnet preset") log.Info("detected preset", "preset", DevnetPresetId, "name", preset.Name)
cfg.Chain = preset.ChainConfig cfg.Chain = preset.ChainConfig
} else if cfg.Chain.Preset != 0 { } else if cfg.Chain.Preset != 0 {
preset, ok := Presets[cfg.Chain.Preset] preset, ok := Presets[cfg.Chain.Preset]
...@@ -191,25 +199,21 @@ func LoadConfig(log log.Logger, path string) (Config, error) { ...@@ -191,25 +199,21 @@ func LoadConfig(log log.Logger, path string) (Config, error) {
// Defaults for any unset options // Defaults for any unset options
if cfg.Chain.L1PollingInterval == 0 { if cfg.Chain.L1PollingInterval == 0 {
log.Info("setting default L1 polling interval", "interval", defaultLoopInterval)
cfg.Chain.L1PollingInterval = defaultLoopInterval cfg.Chain.L1PollingInterval = defaultLoopInterval
} }
if cfg.Chain.L2PollingInterval == 0 { if cfg.Chain.L2PollingInterval == 0 {
log.Info("setting default L2 polling interval", "interval", defaultLoopInterval)
cfg.Chain.L2PollingInterval = defaultLoopInterval cfg.Chain.L2PollingInterval = defaultLoopInterval
} }
if cfg.Chain.L1HeaderBufferSize == 0 { if cfg.Chain.L1HeaderBufferSize == 0 {
log.Info("setting default L1 header buffer", "size", defaultHeaderBufferSize)
cfg.Chain.L1HeaderBufferSize = defaultHeaderBufferSize cfg.Chain.L1HeaderBufferSize = defaultHeaderBufferSize
} }
if cfg.Chain.L2HeaderBufferSize == 0 { if cfg.Chain.L2HeaderBufferSize == 0 {
log.Info("setting default L2 header buffer", "size", defaultHeaderBufferSize)
cfg.Chain.L2HeaderBufferSize = defaultHeaderBufferSize cfg.Chain.L2HeaderBufferSize = defaultHeaderBufferSize
} }
log.Info("loaded config", "config", cfg.Chain) log.Info("loaded chain config", "config", cfg.Chain)
return cfg, nil return cfg, nil
} }
...@@ -257,3 +257,49 @@ func TestLocalDevnet(t *testing.T) { ...@@ -257,3 +257,49 @@ func TestLocalDevnet(t *testing.T) {
require.Equal(t, devnetPreset.ChainConfig.L1Contracts, conf.Chain.L1Contracts) require.Equal(t, devnetPreset.ChainConfig.L1Contracts, conf.Chain.L1Contracts)
} }
func TestThrowsOnUnknownKeys(t *testing.T) {
logger := testlog.Logger(t, log.LvlInfo)
tmpfile, err := os.CreateTemp("", "test.toml")
require.NoError(t, err)
defer os.Remove(tmpfile.Name())
defer tmpfile.Close()
testData := `
[chain]
unknown_key = 420
preset = 420
[rpcs]
l1-rpc = "https://l1.example.com"
l2-rpc = "https://l2.example.com"
[db]
another_unknownKey = 420
host = "127.0.0.1"
port = 5432
user = "postgres"
password = "postgres"
name = "indexer"
[http]
host = "127.0.0.1"
port = 8080
[metrics]
host = "127.0.0.1"
port = 7300
`
data := []byte(testData)
err = os.WriteFile(tmpfile.Name(), data, 0644)
require.NoError(t, err)
defer os.Remove(tmpfile.Name())
err = tmpfile.Close()
require.NoError(t, err)
_, err = LoadConfig(logger, tmpfile.Name())
require.Error(t, err)
require.Contains(t, err.Error(), "unknown fields in config file")
}
...@@ -7,8 +7,10 @@ import ( ...@@ -7,8 +7,10 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"gorm.io/gorm" "gorm.io/gorm"
"gorm.io/gorm/clause"
) )
/** /**
...@@ -67,17 +69,23 @@ type BlocksDB interface { ...@@ -67,17 +69,23 @@ type BlocksDB interface {
*/ */
type blocksDB struct { type blocksDB struct {
log log.Logger
gorm *gorm.DB gorm *gorm.DB
} }
func newBlocksDB(db *gorm.DB) BlocksDB { func newBlocksDB(log log.Logger, db *gorm.DB) BlocksDB {
return &blocksDB{gorm: db} return &blocksDB{log: log.New("table", "blocks"), gorm: db}
} }
// L1 // L1
func (db *blocksDB) StoreL1BlockHeaders(headers []L1BlockHeader) error { func (db *blocksDB) StoreL1BlockHeaders(headers []L1BlockHeader) error {
result := db.gorm.CreateInBatches(&headers, batchInsertSize) deduped := db.gorm.Clauses(clause.OnConflict{Columns: []clause.Column{{Name: "hash"}}, DoNothing: true})
result := deduped.Create(&headers)
if result.Error == nil && int(result.RowsAffected) < len(headers) {
db.log.Warn("ignored L1 block duplicates", "duplicates", len(headers)-int(result.RowsAffected))
}
return result.Error return result.Error
} }
...@@ -115,7 +123,12 @@ func (db *blocksDB) L1LatestBlockHeader() (*L1BlockHeader, error) { ...@@ -115,7 +123,12 @@ func (db *blocksDB) L1LatestBlockHeader() (*L1BlockHeader, error) {
// L2 // L2
func (db *blocksDB) StoreL2BlockHeaders(headers []L2BlockHeader) error { func (db *blocksDB) StoreL2BlockHeaders(headers []L2BlockHeader) error {
result := db.gorm.CreateInBatches(&headers, batchInsertSize) deduped := db.gorm.Clauses(clause.OnConflict{Columns: []clause.Column{{Name: "hash"}}, DoNothing: true})
result := deduped.Create(&headers)
if result.Error == nil && int(result.RowsAffected) < len(headers) {
db.log.Warn("ignored L2 block duplicates", "duplicates", len(headers)-int(result.RowsAffected))
}
return result.Error return result.Error
} }
...@@ -173,7 +186,6 @@ func (db *blocksDB) LatestObservedEpoch(fromL1Height *big.Int, maxL1Range uint64 ...@@ -173,7 +186,6 @@ func (db *blocksDB) LatestObservedEpoch(fromL1Height *big.Int, maxL1Range uint64
var header L1BlockHeader var header L1BlockHeader
if fromL1Height != nil { if fromL1Height != nil {
result := db.gorm.Where("number = ?", fromL1Height).Take(&header) result := db.gorm.Where("number = ?", fromL1Height).Take(&header)
// TODO - Embed logging to db
if result.Error != nil { if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) { if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, nil return nil, nil
...@@ -183,7 +195,8 @@ func (db *blocksDB) LatestObservedEpoch(fromL1Height *big.Int, maxL1Range uint64 ...@@ -183,7 +195,8 @@ func (db *blocksDB) LatestObservedEpoch(fromL1Height *big.Int, maxL1Range uint64
fromTimestamp = header.Timestamp fromTimestamp = header.Timestamp
} else { } else {
result := db.gorm.Order("number desc").Take(&header) // Take the lowest indexed L1 block to compute the lower bound
result := db.gorm.Order("number ASC").Take(&header)
if result.Error != nil { if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) { if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, nil return nil, nil
...@@ -192,6 +205,7 @@ func (db *blocksDB) LatestObservedEpoch(fromL1Height *big.Int, maxL1Range uint64 ...@@ -192,6 +205,7 @@ func (db *blocksDB) LatestObservedEpoch(fromL1Height *big.Int, maxL1Range uint64
} }
fromL1Height = header.Number fromL1Height = header.Number
fromTimestamp = header.Timestamp
} }
// Upper Bound (lowest timestamp indexed between L1/L2 bounded by `maxL1Range`) // Upper Bound (lowest timestamp indexed between L1/L2 bounded by `maxL1Range`)
......
...@@ -6,8 +6,10 @@ import ( ...@@ -6,8 +6,10 @@ import (
"math/big" "math/big"
"gorm.io/gorm" "gorm.io/gorm"
"gorm.io/gorm/clause"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/google/uuid" "github.com/google/uuid"
) )
...@@ -60,11 +62,12 @@ type BridgeMessagesDB interface { ...@@ -60,11 +62,12 @@ type BridgeMessagesDB interface {
*/ */
type bridgeMessagesDB struct { type bridgeMessagesDB struct {
log log.Logger
gorm *gorm.DB gorm *gorm.DB
} }
func newBridgeMessagesDB(db *gorm.DB) BridgeMessagesDB { func newBridgeMessagesDB(log log.Logger, db *gorm.DB) BridgeMessagesDB {
return &bridgeMessagesDB{gorm: db} return &bridgeMessagesDB{log: log.New("table", "bridge_messages"), gorm: db}
} }
/** /**
...@@ -72,7 +75,12 @@ func newBridgeMessagesDB(db *gorm.DB) BridgeMessagesDB { ...@@ -72,7 +75,12 @@ func newBridgeMessagesDB(db *gorm.DB) BridgeMessagesDB {
*/ */
func (db bridgeMessagesDB) StoreL1BridgeMessages(messages []L1BridgeMessage) error { func (db bridgeMessagesDB) StoreL1BridgeMessages(messages []L1BridgeMessage) error {
result := db.gorm.CreateInBatches(&messages, batchInsertSize) deduped := db.gorm.Clauses(clause.OnConflict{Columns: []clause.Column{{Name: "message_hash"}}, DoNothing: true})
result := deduped.Create(&messages)
if result.Error == nil && int(result.RowsAffected) < len(messages) {
db.log.Warn("ignored L1 bridge message duplicates", "duplicates", len(messages)-int(result.RowsAffected))
}
return result.Error return result.Error
} }
...@@ -98,7 +106,13 @@ func (db bridgeMessagesDB) MarkRelayedL1BridgeMessage(messageHash common.Hash, r ...@@ -98,7 +106,13 @@ func (db bridgeMessagesDB) MarkRelayedL1BridgeMessage(messageHash common.Hash, r
if err != nil { if err != nil {
return err return err
} else if message == nil { } else if message == nil {
return fmt.Errorf("L1BridgeMessage with message hash %s not found", messageHash) return fmt.Errorf("L1BridgeMessage %s not found", messageHash)
}
if message.RelayedMessageEventGUID != nil && message.RelayedMessageEventGUID.ID() == relayEvent.ID() {
return nil
} else if message.RelayedMessageEventGUID != nil {
return fmt.Errorf("relayed message %s re-relayed with a different event %d", messageHash, relayEvent)
} }
message.RelayedMessageEventGUID = &relayEvent message.RelayedMessageEventGUID = &relayEvent
...@@ -111,7 +125,12 @@ func (db bridgeMessagesDB) MarkRelayedL1BridgeMessage(messageHash common.Hash, r ...@@ -111,7 +125,12 @@ func (db bridgeMessagesDB) MarkRelayedL1BridgeMessage(messageHash common.Hash, r
*/ */
func (db bridgeMessagesDB) StoreL2BridgeMessages(messages []L2BridgeMessage) error { func (db bridgeMessagesDB) StoreL2BridgeMessages(messages []L2BridgeMessage) error {
result := db.gorm.CreateInBatches(&messages, batchInsertSize) deduped := db.gorm.Clauses(clause.OnConflict{Columns: []clause.Column{{Name: "message_hash"}}, DoNothing: true})
result := deduped.Create(&messages)
if result.Error == nil && int(result.RowsAffected) < len(messages) {
db.log.Warn("ignored L2 bridge message duplicates", "duplicates", len(messages)-int(result.RowsAffected))
}
return result.Error return result.Error
} }
...@@ -137,7 +156,13 @@ func (db bridgeMessagesDB) MarkRelayedL2BridgeMessage(messageHash common.Hash, r ...@@ -137,7 +156,13 @@ func (db bridgeMessagesDB) MarkRelayedL2BridgeMessage(messageHash common.Hash, r
if err != nil { if err != nil {
return err return err
} else if message == nil { } else if message == nil {
return fmt.Errorf("L2BridgeMessage with message hash %s not found", messageHash) return fmt.Errorf("L2BridgeMessage %s not found", messageHash)
}
if message.RelayedMessageEventGUID != nil && message.RelayedMessageEventGUID.ID() == relayEvent.ID() {
return nil
} else if message.RelayedMessageEventGUID != nil {
return fmt.Errorf("relayed message %s re-relayed with a different event %s", messageHash, relayEvent)
} }
message.RelayedMessageEventGUID = &relayEvent message.RelayedMessageEventGUID = &relayEvent
......
...@@ -7,8 +7,10 @@ import ( ...@@ -7,8 +7,10 @@ import (
"github.com/google/uuid" "github.com/google/uuid"
"gorm.io/gorm" "gorm.io/gorm"
"gorm.io/gorm/clause"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
) )
/** /**
...@@ -68,11 +70,12 @@ type BridgeTransactionsDB interface { ...@@ -68,11 +70,12 @@ type BridgeTransactionsDB interface {
*/ */
type bridgeTransactionsDB struct { type bridgeTransactionsDB struct {
log log.Logger
gorm *gorm.DB gorm *gorm.DB
} }
func newBridgeTransactionsDB(db *gorm.DB) BridgeTransactionsDB { func newBridgeTransactionsDB(log log.Logger, db *gorm.DB) BridgeTransactionsDB {
return &bridgeTransactionsDB{gorm: db} return &bridgeTransactionsDB{log: log.New("table", "bridge_transactions"), gorm: db}
} }
/** /**
...@@ -80,7 +83,12 @@ func newBridgeTransactionsDB(db *gorm.DB) BridgeTransactionsDB { ...@@ -80,7 +83,12 @@ func newBridgeTransactionsDB(db *gorm.DB) BridgeTransactionsDB {
*/ */
func (db *bridgeTransactionsDB) StoreL1TransactionDeposits(deposits []L1TransactionDeposit) error { func (db *bridgeTransactionsDB) StoreL1TransactionDeposits(deposits []L1TransactionDeposit) error {
result := db.gorm.CreateInBatches(&deposits, batchInsertSize) deduped := db.gorm.Clauses(clause.OnConflict{Columns: []clause.Column{{Name: "source_hash"}}, DoNothing: true})
result := deduped.Create(&deposits)
if result.Error == nil && int(result.RowsAffected) < len(deposits) {
db.log.Warn("ignored L1 tx deposit duplicates", "duplicates", len(deposits)-int(result.RowsAffected))
}
return result.Error return result.Error
} }
...@@ -133,7 +141,12 @@ func (db *bridgeTransactionsDB) L1LatestBlockHeader() (*L1BlockHeader, error) { ...@@ -133,7 +141,12 @@ func (db *bridgeTransactionsDB) L1LatestBlockHeader() (*L1BlockHeader, error) {
*/ */
func (db *bridgeTransactionsDB) StoreL2TransactionWithdrawals(withdrawals []L2TransactionWithdrawal) error { func (db *bridgeTransactionsDB) StoreL2TransactionWithdrawals(withdrawals []L2TransactionWithdrawal) error {
result := db.gorm.CreateInBatches(&withdrawals, batchInsertSize) deduped := db.gorm.Clauses(clause.OnConflict{Columns: []clause.Column{{Name: "withdrawal_hash"}}, DoNothing: true})
result := deduped.Create(&withdrawals)
if result.Error == nil && int(result.RowsAffected) < len(withdrawals) {
db.log.Warn("ignored L2 tx withdrawal duplicates", "duplicates", len(withdrawals)-int(result.RowsAffected))
}
return result.Error return result.Error
} }
...@@ -155,11 +168,16 @@ func (db *bridgeTransactionsDB) MarkL2TransactionWithdrawalProvenEvent(withdrawa ...@@ -155,11 +168,16 @@ func (db *bridgeTransactionsDB) MarkL2TransactionWithdrawalProvenEvent(withdrawa
withdrawal, err := db.L2TransactionWithdrawal(withdrawalHash) withdrawal, err := db.L2TransactionWithdrawal(withdrawalHash)
if err != nil { if err != nil {
return err return err
} } else if withdrawal == nil {
if withdrawal == nil {
return fmt.Errorf("transaction withdrawal hash %s not found", withdrawalHash) return fmt.Errorf("transaction withdrawal hash %s not found", withdrawalHash)
} }
if withdrawal.ProvenL1EventGUID != nil && withdrawal.ProvenL1EventGUID.ID() == provenL1EventGuid.ID() {
return nil
} else if withdrawal.ProvenL1EventGUID != nil {
return fmt.Errorf("proven withdrawal %s re-proven with a different event %s", withdrawalHash, provenL1EventGuid)
}
withdrawal.ProvenL1EventGUID = &provenL1EventGuid withdrawal.ProvenL1EventGUID = &provenL1EventGuid
result := db.gorm.Save(&withdrawal) result := db.gorm.Save(&withdrawal)
return result.Error return result.Error
...@@ -170,14 +188,18 @@ func (db *bridgeTransactionsDB) MarkL2TransactionWithdrawalFinalizedEvent(withdr ...@@ -170,14 +188,18 @@ func (db *bridgeTransactionsDB) MarkL2TransactionWithdrawalFinalizedEvent(withdr
withdrawal, err := db.L2TransactionWithdrawal(withdrawalHash) withdrawal, err := db.L2TransactionWithdrawal(withdrawalHash)
if err != nil { if err != nil {
return err return err
} } else if withdrawal == nil {
if withdrawal == nil {
return fmt.Errorf("transaction withdrawal hash %s not found", withdrawalHash) return fmt.Errorf("transaction withdrawal hash %s not found", withdrawalHash)
} } else if withdrawal.ProvenL1EventGUID == nil {
if withdrawal.ProvenL1EventGUID == nil {
return fmt.Errorf("cannot mark unproven withdrawal hash %s as finalized", withdrawal.WithdrawalHash) return fmt.Errorf("cannot mark unproven withdrawal hash %s as finalized", withdrawal.WithdrawalHash)
} }
if withdrawal.FinalizedL1EventGUID != nil && withdrawal.FinalizedL1EventGUID.ID() == finalizedL1EventGuid.ID() {
return nil
} else if withdrawal.FinalizedL1EventGUID != nil {
return fmt.Errorf("finalized withdrawal %s re-finalized with a different event %s", withdrawalHash, finalizedL1EventGuid)
}
withdrawal.FinalizedL1EventGUID = &finalizedL1EventGuid withdrawal.FinalizedL1EventGUID = &finalizedL1EventGuid
withdrawal.Succeeded = &succeeded withdrawal.Succeeded = &succeeded
result := db.gorm.Save(&withdrawal) result := db.gorm.Save(&withdrawal)
......
...@@ -5,9 +5,11 @@ import ( ...@@ -5,9 +5,11 @@ import (
"fmt" "fmt"
"gorm.io/gorm" "gorm.io/gorm"
"gorm.io/gorm/clause"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys" "github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
) )
var ( var (
...@@ -80,11 +82,12 @@ type BridgeTransfersDB interface { ...@@ -80,11 +82,12 @@ type BridgeTransfersDB interface {
*/ */
type bridgeTransfersDB struct { type bridgeTransfersDB struct {
log log.Logger
gorm *gorm.DB gorm *gorm.DB
} }
func newBridgeTransfersDB(db *gorm.DB) BridgeTransfersDB { func newBridgeTransfersDB(log log.Logger, db *gorm.DB) BridgeTransfersDB {
return &bridgeTransfersDB{gorm: db} return &bridgeTransfersDB{log: log.New("table", "bridge_transfers"), gorm: db}
} }
/** /**
...@@ -92,7 +95,12 @@ func newBridgeTransfersDB(db *gorm.DB) BridgeTransfersDB { ...@@ -92,7 +95,12 @@ func newBridgeTransfersDB(db *gorm.DB) BridgeTransfersDB {
*/ */
func (db *bridgeTransfersDB) StoreL1BridgeDeposits(deposits []L1BridgeDeposit) error { func (db *bridgeTransfersDB) StoreL1BridgeDeposits(deposits []L1BridgeDeposit) error {
result := db.gorm.CreateInBatches(&deposits, batchInsertSize) deduped := db.gorm.Clauses(clause.OnConflict{Columns: []clause.Column{{Name: "transaction_source_hash"}}, DoNothing: true})
result := deduped.Create(&deposits)
if result.Error == nil && int(result.RowsAffected) < len(deposits) {
db.log.Warn("ignored L1 bridge transfer duplicates", "duplicates", len(deposits)-int(result.RowsAffected))
}
return result.Error return result.Error
} }
...@@ -159,12 +167,11 @@ func (db *bridgeTransfersDB) L1BridgeDepositsByAddress(address common.Address, c ...@@ -159,12 +167,11 @@ func (db *bridgeTransfersDB) L1BridgeDepositsByAddress(address common.Address, c
cursorClause = fmt.Sprintf("l1_transaction_deposits.timestamp <= %d", txDeposit.Tx.Timestamp) cursorClause = fmt.Sprintf("l1_transaction_deposits.timestamp <= %d", txDeposit.Tx.Timestamp)
} }
// TODO join with l1_bridged_tokens and l2_bridged_tokens
ethAddressString := predeploys.LegacyERC20ETHAddr.String() ethAddressString := predeploys.LegacyERC20ETHAddr.String()
// Coalesce l1 transaction deposits that are simply ETH sends // Coalesce l1 transaction deposits that are simply ETH sends
ethTransactionDeposits := db.gorm.Model(&L1TransactionDeposit{}) ethTransactionDeposits := db.gorm.Model(&L1TransactionDeposit{})
ethTransactionDeposits = ethTransactionDeposits.Where(&Transaction{FromAddress: address}).Where("data = '0x' AND amount > 0") ethTransactionDeposits = ethTransactionDeposits.Where(&Transaction{FromAddress: address}).Where("amount > 0")
ethTransactionDeposits = ethTransactionDeposits.Joins("INNER JOIN l1_contract_events ON l1_contract_events.guid = initiated_l1_event_guid") ethTransactionDeposits = ethTransactionDeposits.Joins("INNER JOIN l1_contract_events ON l1_contract_events.guid = initiated_l1_event_guid")
ethTransactionDeposits = ethTransactionDeposits.Select(` ethTransactionDeposits = ethTransactionDeposits.Select(`
from_address, to_address, amount, data, source_hash AS transaction_source_hash, from_address, to_address, amount, data, source_hash AS transaction_source_hash,
...@@ -217,7 +224,12 @@ l1_bridge_deposits.timestamp, cross_domain_message_hash, local_token_address, re ...@@ -217,7 +224,12 @@ l1_bridge_deposits.timestamp, cross_domain_message_hash, local_token_address, re
*/ */
func (db *bridgeTransfersDB) StoreL2BridgeWithdrawals(withdrawals []L2BridgeWithdrawal) error { func (db *bridgeTransfersDB) StoreL2BridgeWithdrawals(withdrawals []L2BridgeWithdrawal) error {
result := db.gorm.CreateInBatches(&withdrawals, batchInsertSize) deduped := db.gorm.Clauses(clause.OnConflict{Columns: []clause.Column{{Name: "transaction_withdrawal_hash"}}, DoNothing: true})
result := deduped.Create(&withdrawals)
if result.Error == nil && int(result.RowsAffected) < len(withdrawals) {
db.log.Warn("ignored L2 bridge transfer duplicates", "duplicates", len(withdrawals)-int(result.RowsAffected))
}
return result.Error return result.Error
} }
...@@ -283,7 +295,7 @@ func (db *bridgeTransfersDB) L2BridgeWithdrawalsByAddress(address common.Address ...@@ -283,7 +295,7 @@ func (db *bridgeTransfersDB) L2BridgeWithdrawalsByAddress(address common.Address
// Coalesce l2 transaction withdrawals that are simply ETH sends // Coalesce l2 transaction withdrawals that are simply ETH sends
ethTransactionWithdrawals := db.gorm.Model(&L2TransactionWithdrawal{}) ethTransactionWithdrawals := db.gorm.Model(&L2TransactionWithdrawal{})
ethTransactionWithdrawals = ethTransactionWithdrawals.Where(&Transaction{FromAddress: address}).Where(`data = '0x' AND amount > 0`) ethTransactionWithdrawals = ethTransactionWithdrawals.Where(&Transaction{FromAddress: address}).Where("amount > 0")
ethTransactionWithdrawals = ethTransactionWithdrawals.Joins("INNER JOIN l2_contract_events ON l2_contract_events.guid = l2_transaction_withdrawals.initiated_l2_event_guid") ethTransactionWithdrawals = ethTransactionWithdrawals.Joins("INNER JOIN l2_contract_events ON l2_contract_events.guid = l2_transaction_withdrawals.initiated_l2_event_guid")
ethTransactionWithdrawals = ethTransactionWithdrawals.Joins("LEFT JOIN l1_contract_events AS proven_l1_events ON proven_l1_events.guid = l2_transaction_withdrawals.proven_l1_event_guid") ethTransactionWithdrawals = ethTransactionWithdrawals.Joins("LEFT JOIN l1_contract_events AS proven_l1_events ON proven_l1_events.guid = l2_transaction_withdrawals.proven_l1_event_guid")
ethTransactionWithdrawals = ethTransactionWithdrawals.Joins("LEFT JOIN l1_contract_events AS finalized_l1_events ON finalized_l1_events.guid = l2_transaction_withdrawals.finalized_l1_event_guid") ethTransactionWithdrawals = ethTransactionWithdrawals.Joins("LEFT JOIN l1_contract_events AS finalized_l1_events ON finalized_l1_events.guid = l2_transaction_withdrawals.finalized_l1_event_guid")
......
...@@ -6,9 +6,11 @@ import ( ...@@ -6,9 +6,11 @@ import (
"math/big" "math/big"
"gorm.io/gorm" "gorm.io/gorm"
"gorm.io/gorm/clause"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/google/uuid" "github.com/google/uuid"
) )
...@@ -99,17 +101,25 @@ type ContractEventsDB interface { ...@@ -99,17 +101,25 @@ type ContractEventsDB interface {
*/ */
type contractEventsDB struct { type contractEventsDB struct {
log log.Logger
gorm *gorm.DB gorm *gorm.DB
} }
func newContractEventsDB(db *gorm.DB) ContractEventsDB { func newContractEventsDB(log log.Logger, db *gorm.DB) ContractEventsDB {
return &contractEventsDB{gorm: db} return &contractEventsDB{log: log.New("table", "events"), gorm: db}
} }
// L1 // L1
func (db *contractEventsDB) StoreL1ContractEvents(events []L1ContractEvent) error { func (db *contractEventsDB) StoreL1ContractEvents(events []L1ContractEvent) error {
result := db.gorm.CreateInBatches(&events, batchInsertSize) // Since the block hash refers back to L1, we dont necessarily have to check
// that the RLP bytes match when doing conflict resolution.
deduped := db.gorm.Clauses(clause.OnConflict{OnConstraint: "l1_contract_events_block_hash_log_index_key", DoNothing: true})
result := deduped.Create(&events)
if result.Error == nil && int(result.RowsAffected) < len(events) {
db.log.Warn("ignored L1 contract event duplicates", "duplicates", len(events)-int(result.RowsAffected))
}
return result.Error return result.Error
} }
...@@ -144,7 +154,7 @@ func (db *contractEventsDB) L1ContractEventsWithFilter(filter ContractEvent, fro ...@@ -144,7 +154,7 @@ func (db *contractEventsDB) L1ContractEventsWithFilter(filter ContractEvent, fro
query := db.gorm.Table("l1_contract_events").Where(&filter) query := db.gorm.Table("l1_contract_events").Where(&filter)
query = query.Joins("INNER JOIN l1_block_headers ON l1_contract_events.block_hash = l1_block_headers.hash") query = query.Joins("INNER JOIN l1_block_headers ON l1_contract_events.block_hash = l1_block_headers.hash")
query = query.Where("l1_block_headers.number >= ? AND l1_block_headers.number <= ?", fromHeight, toHeight) query = query.Where("l1_block_headers.number >= ? AND l1_block_headers.number <= ?", fromHeight, toHeight)
query = query.Order("l1_block_headers.number ASC").Select("l1_contract_events.*") query = query.Order("l1_block_headers.number ASC, l1_contract_events.log_index ASC").Select("l1_contract_events.*")
// NOTE: We use `Find` here instead of `Scan` since `Scan` doesn't not support // NOTE: We use `Find` here instead of `Scan` since `Scan` doesn't not support
// model hooks like `ContractEvent#AfterFind`. Functionally they are the same // model hooks like `ContractEvent#AfterFind`. Functionally they are the same
...@@ -176,7 +186,14 @@ func (db *contractEventsDB) L1LatestContractEventWithFilter(filter ContractEvent ...@@ -176,7 +186,14 @@ func (db *contractEventsDB) L1LatestContractEventWithFilter(filter ContractEvent
// L2 // L2
func (db *contractEventsDB) StoreL2ContractEvents(events []L2ContractEvent) error { func (db *contractEventsDB) StoreL2ContractEvents(events []L2ContractEvent) error {
result := db.gorm.CreateInBatches(&events, batchInsertSize) // Since the block hash refers back to L2, we dont necessarily have to check
// that the RLP bytes match when doing conflict resolution.
deduped := db.gorm.Clauses(clause.OnConflict{OnConstraint: "l2_contract_events_block_hash_log_index_key", DoNothing: true})
result := deduped.Create(&events)
if result.Error == nil && int(result.RowsAffected) < len(events) {
db.log.Warn("ignored L2 contract event duplicates", "duplicates", len(events)-int(result.RowsAffected))
}
return result.Error return result.Error
} }
...@@ -211,7 +228,7 @@ func (db *contractEventsDB) L2ContractEventsWithFilter(filter ContractEvent, fro ...@@ -211,7 +228,7 @@ func (db *contractEventsDB) L2ContractEventsWithFilter(filter ContractEvent, fro
query := db.gorm.Table("l2_contract_events").Where(&filter) query := db.gorm.Table("l2_contract_events").Where(&filter)
query = query.Joins("INNER JOIN l2_block_headers ON l2_contract_events.block_hash = l2_block_headers.hash") query = query.Joins("INNER JOIN l2_block_headers ON l2_contract_events.block_hash = l2_block_headers.hash")
query = query.Where("l2_block_headers.number >= ? AND l2_block_headers.number <= ?", fromHeight, toHeight) query = query.Where("l2_block_headers.number >= ? AND l2_block_headers.number <= ?", fromHeight, toHeight)
query = query.Order("l2_block_headers.number ASC").Select("l2_contract_events.*") query = query.Order("l2_block_headers.number ASC, l2_contract_events.log_index ASC").Select("l2_contract_events.*")
// NOTE: We use `Find` here instead of `Scan` since `Scan` doesn't not support // NOTE: We use `Find` here instead of `Scan` since `Scan` doesn't not support
// model hooks like `ContractEvent#AfterFind`. Functionally they are the same // model hooks like `ContractEvent#AfterFind`. Functionally they are the same
......
...@@ -10,6 +10,7 @@ import ( ...@@ -10,6 +10,7 @@ import (
"github.com/ethereum-optimism/optimism/indexer/config" "github.com/ethereum-optimism/optimism/indexer/config"
_ "github.com/ethereum-optimism/optimism/indexer/database/serializers" _ "github.com/ethereum-optimism/optimism/indexer/database/serializers"
"github.com/ethereum-optimism/optimism/op-service/retry" "github.com/ethereum-optimism/optimism/op-service/retry"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
...@@ -18,16 +19,9 @@ import ( ...@@ -18,16 +19,9 @@ import (
"gorm.io/gorm" "gorm.io/gorm"
) )
var (
// The postgres parameter counter for a given query is stored via a uint16,
// resulting in a parameter limit of 65535. In order to avoid reaching this limit
// we'll utilize a batch size of 3k for inserts, well below as long as the the number
// of columns < 20.
batchInsertSize int = 3_000
)
type DB struct { type DB struct {
gorm *gorm.DB gorm *gorm.DB
log log.Logger
Blocks BlocksDB Blocks BlocksDB
ContractEvents ContractEventsDB ContractEvents ContractEventsDB
...@@ -36,8 +30,10 @@ type DB struct { ...@@ -36,8 +30,10 @@ type DB struct {
BridgeTransactions BridgeTransactionsDB BridgeTransactions BridgeTransactionsDB
} }
func NewDB(log log.Logger, dbConfig config.DBConfig) (*DB, error) { // NewDB connects to the configured DB, and provides client-bindings to it.
retryStrategy := &retry.ExponentialStrategy{Min: 1000, Max: 20_000, MaxJitter: 250} // The initial connection may fail, or the dial may be cancelled with the provided context.
func NewDB(ctx context.Context, log log.Logger, dbConfig config.DBConfig) (*DB, error) {
log = log.New("module", "db")
dsn := fmt.Sprintf("host=%s dbname=%s sslmode=disable", dbConfig.Host, dbConfig.Name) dsn := fmt.Sprintf("host=%s dbname=%s sslmode=disable", dbConfig.Host, dbConfig.Name)
if dbConfig.Port != 0 { if dbConfig.Port != 0 {
...@@ -51,11 +47,19 @@ func NewDB(log log.Logger, dbConfig config.DBConfig) (*DB, error) { ...@@ -51,11 +47,19 @@ func NewDB(log log.Logger, dbConfig config.DBConfig) (*DB, error) {
} }
gormConfig := gorm.Config{ gormConfig := gorm.Config{
Logger: newLogger(log),
// The indexer will explicitly manage the transactions // The indexer will explicitly manage the transactions
SkipDefaultTransaction: true, SkipDefaultTransaction: true,
Logger: newLogger(log),
// The postgres parameter counter for a given query is represented with uint16,
// resulting in a parameter limit of 65535. In order to avoid reaching this limit
// we'll utilize a batch size of 3k for inserts, well below the limit as long as
// the number of columns < 20.
CreateBatchSize: 3_000,
} }
retryStrategy := &retry.ExponentialStrategy{Min: 1000, Max: 20_000, MaxJitter: 250}
gorm, err := retry.Do[*gorm.DB](context.Background(), 10, retryStrategy, func() (*gorm.DB, error) { gorm, err := retry.Do[*gorm.DB](context.Background(), 10, retryStrategy, func() (*gorm.DB, error) {
gorm, err := gorm.Open(postgres.Open(dsn), &gormConfig) gorm, err := gorm.Open(postgres.Open(dsn), &gormConfig)
if err != nil { if err != nil {
...@@ -66,16 +70,17 @@ func NewDB(log log.Logger, dbConfig config.DBConfig) (*DB, error) { ...@@ -66,16 +70,17 @@ func NewDB(log log.Logger, dbConfig config.DBConfig) (*DB, error) {
}) })
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to connect to database after multiple retries: %w", err) return nil, err
} }
db := &DB{ db := &DB{
gorm: gorm, gorm: gorm,
Blocks: newBlocksDB(gorm), log: log,
ContractEvents: newContractEventsDB(gorm), Blocks: newBlocksDB(log, gorm),
BridgeTransfers: newBridgeTransfersDB(gorm), ContractEvents: newContractEventsDB(log, gorm),
BridgeMessages: newBridgeMessagesDB(gorm), BridgeTransfers: newBridgeTransfersDB(log, gorm),
BridgeTransactions: newBridgeTransactionsDB(gorm), BridgeMessages: newBridgeMessagesDB(log, gorm),
BridgeTransactions: newBridgeTransactionsDB(log, gorm),
} }
return db, nil return db, nil
...@@ -85,11 +90,21 @@ func NewDB(log log.Logger, dbConfig config.DBConfig) (*DB, error) { ...@@ -85,11 +90,21 @@ func NewDB(log log.Logger, dbConfig config.DBConfig) (*DB, error) {
// transaction. If the supplied function errors, the transaction is rolled back. // transaction. If the supplied function errors, the transaction is rolled back.
func (db *DB) Transaction(fn func(db *DB) error) error { func (db *DB) Transaction(fn func(db *DB) error) error {
return db.gorm.Transaction(func(tx *gorm.DB) error { return db.gorm.Transaction(func(tx *gorm.DB) error {
return fn(dbFromGormTx(tx)) txDB := &DB{
gorm: tx,
Blocks: newBlocksDB(db.log, tx),
ContractEvents: newContractEventsDB(db.log, tx),
BridgeTransfers: newBridgeTransfersDB(db.log, tx),
BridgeMessages: newBridgeMessagesDB(db.log, tx),
BridgeTransactions: newBridgeTransactionsDB(db.log, tx),
}
return fn(txDB)
}) })
} }
func (db *DB) Close() error { func (db *DB) Close() error {
db.log.Info("closing database")
sql, err := db.gorm.DB() sql, err := db.gorm.DB()
if err != nil { if err != nil {
return err return err
...@@ -98,17 +113,6 @@ func (db *DB) Close() error { ...@@ -98,17 +113,6 @@ func (db *DB) Close() error {
return sql.Close() return sql.Close()
} }
func dbFromGormTx(tx *gorm.DB) *DB {
return &DB{
gorm: tx,
Blocks: newBlocksDB(tx),
ContractEvents: newContractEventsDB(tx),
BridgeTransfers: newBridgeTransfersDB(tx),
BridgeMessages: newBridgeMessagesDB(tx),
BridgeTransactions: newBridgeTransactionsDB(tx),
}
}
func (db *DB) ExecuteSQLMigration(migrationsFolder string) error { func (db *DB) ExecuteSQLMigration(migrationsFolder string) error {
err := filepath.Walk(migrationsFolder, func(path string, info os.FileInfo, err error) error { err := filepath.Walk(migrationsFolder, func(path string, info os.FileInfo, err error) error {
// Check for any walking error // Check for any walking error
...@@ -122,12 +126,14 @@ func (db *DB) ExecuteSQLMigration(migrationsFolder string) error { ...@@ -122,12 +126,14 @@ func (db *DB) ExecuteSQLMigration(migrationsFolder string) error {
} }
// Read the migration file content // Read the migration file content
db.log.Info("reading sql file", "path", path)
fileContent, readErr := os.ReadFile(path) fileContent, readErr := os.ReadFile(path)
if readErr != nil { if readErr != nil {
return errors.Wrap(readErr, fmt.Sprintf("Error reading SQL file: %s", path)) return errors.Wrap(readErr, fmt.Sprintf("Error reading SQL file: %s", path))
} }
// Execute the migration // Execute the migration
db.log.Info("executing sql file", "path", path)
execErr := db.gorm.Exec(string(fileContent)).Error execErr := db.gorm.Exec(string(fileContent)).Error
if execErr != nil { if execErr != nil {
return errors.Wrap(execErr, fmt.Sprintf("Error executing SQL script: %s", path)) return errors.Wrap(execErr, fmt.Sprintf("Error executing SQL script: %s", path))
...@@ -136,5 +142,6 @@ func (db *DB) ExecuteSQLMigration(migrationsFolder string) error { ...@@ -136,5 +142,6 @@ func (db *DB) ExecuteSQLMigration(migrationsFolder string) error {
return nil return nil
}) })
db.log.Info("finished migrations")
return err return err
} }
...@@ -14,7 +14,7 @@ import ( ...@@ -14,7 +14,7 @@ import (
var ( var (
_ logger.Interface = Logger{} _ logger.Interface = Logger{}
SlowThresholdMilliseconds = 200 SlowThresholdMilliseconds int64 = 500
) )
type Logger struct { type Logger struct {
...@@ -22,7 +22,7 @@ type Logger struct { ...@@ -22,7 +22,7 @@ type Logger struct {
} }
func newLogger(log log.Logger) Logger { func newLogger(log log.Logger) Logger {
return Logger{log.New("module", "db")} return Logger{log}
} }
func (l Logger) LogMode(lvl logger.LogLevel) logger.Interface { func (l Logger) LogMode(lvl logger.LogLevel) logger.Interface {
...@@ -50,7 +50,7 @@ func (l Logger) Trace(ctx context.Context, begin time.Time, fc func() (sql strin ...@@ -50,7 +50,7 @@ func (l Logger) Trace(ctx context.Context, begin time.Time, fc func() (sql strin
sql = fmt.Sprintf("%sVALUES (...)", sql[:i]) sql = fmt.Sprintf("%sVALUES (...)", sql[:i])
} }
if elapsedMs < 200 { if elapsedMs < SlowThresholdMilliseconds {
l.log.Debug("database operation", "duration_ms", elapsedMs, "rows_affected", rows, "sql", sql) l.log.Debug("database operation", "duration_ms", elapsedMs, "rows_affected", rows, "sql", sql)
} else { } else {
l.log.Warn("database operation", "duration_ms", elapsedMs, "rows_affected", rows, "sql", sql) l.log.Warn("database operation", "duration_ms", elapsedMs, "rows_affected", rows, "sql", sql)
......
...@@ -4,6 +4,7 @@ import ( ...@@ -4,6 +4,7 @@ import (
"context" "context"
"fmt" "fmt"
"reflect" "reflect"
"strings"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"gorm.io/gorm/schema" "gorm.io/gorm/schema"
...@@ -70,5 +71,5 @@ func (BytesSerializer) Value(ctx context.Context, field *schema.Field, dst refle ...@@ -70,5 +71,5 @@ func (BytesSerializer) Value(ctx context.Context, field *schema.Field, dst refle
} }
hexStr := hexutil.Encode(fieldBytes.Bytes()) hexStr := hexutil.Encode(fieldBytes.Bytes())
return hexStr, nil return strings.ToLower(hexStr), nil
} }
...@@ -4,6 +4,7 @@ import ( ...@@ -4,6 +4,7 @@ import (
"context" "context"
"fmt" "fmt"
"reflect" "reflect"
"strings"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
...@@ -52,5 +53,5 @@ func (RLPSerializer) Value(ctx context.Context, field *schema.Field, dst reflect ...@@ -52,5 +53,5 @@ func (RLPSerializer) Value(ctx context.Context, field *schema.Field, dst reflect
} }
hexStr := hexutil.Encode(rlpBytes) hexStr := hexutil.Encode(rlpBytes)
return hexStr, nil return strings.ToLower(hexStr), nil
} }
...@@ -58,7 +58,6 @@ services: ...@@ -58,7 +58,6 @@ services:
depends_on: depends_on:
postgres: postgres:
condition: service_healthy condition: service_healthy
depends_on:
migrations: migrations:
condition: service_started condition: service_started
...@@ -155,7 +154,7 @@ services: ...@@ -155,7 +154,7 @@ services:
- MAINNET_BEDROCK=true - MAINNET_BEDROCK=true
- TRM_API_KEY=$TRM_API_KEY - TRM_API_KEY=$TRM_API_KEY
- GOOGLE_CLOUD_STORAGE_BUCKET_NAME=oplabs-dev-web-content - GOOGLE_CLOUD_STORAGE_BUCKET_NAME=oplabs-dev-web-content
# Recommened to uncomment for local dev unless you need it # Recommend to uncomment for local dev unless you need it
#- BYPASS_EVENT_LOG_POLLER_BOOTSTRAP=true #- BYPASS_EVENT_LOG_POLLER_BOOTSTRAP=true
ports: ports:
- 7422:7300 - 7422:7300
......
...@@ -34,7 +34,7 @@ type E2ETestSuite struct { ...@@ -34,7 +34,7 @@ type E2ETestSuite struct {
// API // API
Client *client.Client Client *client.Client
API *api.API API *api.APIService
// Indexer // Indexer
DB *database.DB DB *database.DB
...@@ -73,7 +73,7 @@ func createE2ETestSuite(t *testing.T) E2ETestSuite { ...@@ -73,7 +73,7 @@ func createE2ETestSuite(t *testing.T) E2ETestSuite {
t.Cleanup(func() { opSys.Close() }) t.Cleanup(func() { opSys.Close() })
// Indexer Configuration and Start // Indexer Configuration and Start
indexerCfg := config.Config{ indexerCfg := &config.Config{
DB: config.DBConfig{ DB: config.DBConfig{
Host: "127.0.0.1", Host: "127.0.0.1",
Port: 5432, Port: 5432,
...@@ -106,51 +106,40 @@ func createE2ETestSuite(t *testing.T) E2ETestSuite { ...@@ -106,51 +106,40 @@ func createE2ETestSuite(t *testing.T) E2ETestSuite {
// the system is running, mark this test for Parallel execution // the system is running, mark this test for Parallel execution
t.Parallel() t.Parallel()
// provide a DB for the unit test. disable logging
silentLog := testlog.Logger(t, log.LvlInfo)
silentLog.SetHandler(log.DiscardHandler())
db, err := database.NewDB(silentLog, indexerCfg.DB)
require.NoError(t, err)
t.Cleanup(func() { db.Close() })
indexerLog := testlog.Logger(t, log.LvlInfo).New("role", "indexer") indexerLog := testlog.Logger(t, log.LvlInfo).New("role", "indexer")
indexer, err := indexer.NewIndexer(indexerLog, db, indexerCfg.Chain, indexerCfg.RPCs, indexerCfg.HTTPServer, indexerCfg.MetricsServer) ix, err := indexer.NewIndexer(context.Background(), indexerLog, indexerCfg, func(cause error) {
if cause != nil {
t.Fatalf("indexer shut down with critical error: %v", cause)
}
})
require.NoError(t, err) require.NoError(t, err)
indexerCtx, indexerStop := context.WithCancel(context.Background()) require.NoError(t, ix.Start(context.Background()), "cleanly start indexer")
go func() {
err := indexer.Run(indexerCtx)
if err != nil { // panicking here ensures that the test will exit
// during service failure. Using t.Fail() wouldn't be caught
// until all awaiting routines finish which would never happen.
panic(err)
}
}()
apiLog := testlog.Logger(t, log.LvlInfo).New("role", "indexer_api") t.Cleanup(func() {
require.NoError(t, ix.Stop(context.Background()), "cleanly shut down indexer")
})
apiCfg := config.ServerConfig{ apiLog := testlog.Logger(t, log.LvlInfo).New("role", "indexer_api")
Host: "127.0.0.1",
Port: 0,
}
mCfg := config.ServerConfig{ apiCfg := &api.Config{
Host: "127.0.0.1", DB: &api.TestDBConnector{BridgeTransfers: ix.DB.BridgeTransfers}, // reuse the same DB
Port: 0, HTTPServer: config.ServerConfig{
Host: "127.0.0.1",
Port: 0,
},
MetricsServer: config.ServerConfig{
Host: "127.0.0.1",
Port: 0,
},
} }
api := api.NewApi(apiLog, db.BridgeTransfers, apiCfg, mCfg) apiService, err := api.NewApi(context.Background(), apiLog, apiCfg)
apiCtx, apiStop := context.WithCancel(context.Background()) require.NoError(t, err, "create indexer API service")
go func() {
err := api.Run(apiCtx)
if err != nil {
panic(err)
}
}()
require.NoError(t, apiService.Start(context.Background()), "start indexer API service")
t.Cleanup(func() { t.Cleanup(func() {
apiStop() require.NoError(t, apiService.Stop(context.Background()), "cleanly shut down indexer")
indexerStop()
}) })
// Wait for the API to start listening // Wait for the API to start listening
...@@ -158,16 +147,15 @@ func createE2ETestSuite(t *testing.T) E2ETestSuite { ...@@ -158,16 +147,15 @@ func createE2ETestSuite(t *testing.T) E2ETestSuite {
client, err := client.NewClient(&client.Config{ client, err := client.NewClient(&client.Config{
PaginationLimit: 100, PaginationLimit: 100,
BaseURL: fmt.Sprintf("http://%s:%d", apiCfg.Host, api.Port()), BaseURL: "http://" + apiService.Addr(),
}) })
require.NoError(t, err, "must open indexer API client")
require.NoError(t, err)
return E2ETestSuite{ return E2ETestSuite{
t: t, t: t,
Client: client, Client: client,
DB: db, DB: ix.DB,
Indexer: indexer, Indexer: ix,
OpCfg: &opCfg, OpCfg: &opCfg,
OpSys: opSys, OpSys: opSys,
L1Client: opSys.Clients["l1"], L1Client: opSys.Clients["l1"],
...@@ -203,7 +191,7 @@ func setupTestDatabase(t *testing.T) string { ...@@ -203,7 +191,7 @@ func setupTestDatabase(t *testing.T) string {
silentLog := log.New() silentLog := log.New()
silentLog.SetHandler(log.DiscardHandler()) silentLog.SetHandler(log.DiscardHandler())
db, err := database.NewDB(silentLog, dbConfig) db, err := database.NewDB(context.Background(), silentLog, dbConfig)
require.NoError(t, err) require.NoError(t, err)
defer db.Close() defer db.Close()
......
...@@ -3,14 +3,17 @@ package etl ...@@ -3,14 +3,17 @@ package etl
import ( import (
"context" "context"
"errors" "errors"
"fmt"
"math/big" "math/big"
"time" "time"
"github.com/ethereum-optimism/optimism/indexer/node"
"github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/indexer/node"
"github.com/ethereum-optimism/optimism/op-service/clock"
) )
type Config struct { type Config struct {
...@@ -30,9 +33,15 @@ type ETL struct { ...@@ -30,9 +33,15 @@ type ETL struct {
headerTraversal *node.HeaderTraversal headerTraversal *node.HeaderTraversal
contracts []common.Address contracts []common.Address
etlBatches chan ETLBatch etlBatches chan *ETLBatch
EthClient node.EthClient EthClient node.EthClient
// A reference that'll stay populated between intervals
// in the event of failures in order to retry.
headers []types.Header
worker *clock.LoopFn
} }
type ETLBatch struct { type ETLBatch struct {
...@@ -45,47 +54,54 @@ type ETLBatch struct { ...@@ -45,47 +54,54 @@ type ETLBatch struct {
HeadersWithLog map[common.Hash]bool HeadersWithLog map[common.Hash]bool
} }
func (etl *ETL) Start(ctx context.Context) error { // Start starts the ETL polling routine. The ETL work should be stopped with Close().
done := ctx.Done() func (etl *ETL) Start() error {
pollTicker := time.NewTicker(etl.loopInterval) if etl.worker != nil {
defer pollTicker.Stop() return errors.New("already started")
}
etl.log.Info("starting etl...")
etl.worker = clock.NewLoopFn(clock.SystemClock, etl.tick, func() error {
close(etl.etlBatches) // can close the channel now, to signal to the consumer that we're done
etl.log.Info("stopped etl worker loop")
return nil
}, etl.loopInterval)
return nil
}
// A reference that'll stay populated between intervals func (etl *ETL) Close() error {
// in the event of failures in order to retry. if etl.worker == nil {
var headers []types.Header return nil // worker was not running
}
return etl.worker.Close()
}
etl.log.Info("starting etl...") func (etl *ETL) tick(_ context.Context) {
for { done := etl.metrics.RecordInterval()
select { if len(etl.headers) > 0 {
case <-done: etl.log.Info("retrying previous batch")
etl.log.Info("stopping etl") } else {
return nil newHeaders, err := etl.headerTraversal.NextHeaders(etl.headerBufferSize)
if err != nil {
case <-pollTicker.C: etl.log.Error("error querying for headers", "err", err)
done := etl.metrics.RecordInterval() } else if len(newHeaders) == 0 {
if len(headers) > 0 { etl.log.Warn("no new headers. etl at head?")
etl.log.Info("retrying previous batch") } else {
} else { etl.headers = newHeaders
newHeaders, err := etl.headerTraversal.NextFinalizedHeaders(etl.headerBufferSize)
if err != nil {
etl.log.Error("error querying for headers", "err", err)
} else if len(newHeaders) == 0 {
etl.log.Warn("no new headers. processor unexpectedly at head...")
} else {
headers = newHeaders
etl.metrics.RecordBatchHeaders(len(newHeaders))
}
}
// only clear the reference if we were able to process this batch
err := etl.processBatch(headers)
if err == nil {
headers = nil
}
done(err)
} }
latestHeader := etl.headerTraversal.LatestHeader()
if latestHeader != nil {
etl.metrics.RecordLatestHeight(latestHeader.Number)
}
}
// only clear the reference if we were able to process this batch
err := etl.processBatch(etl.headers)
if err == nil {
etl.headers = nil
} }
done(err)
} }
func (etl *ETL) processBatch(headers []types.Header) error { func (etl *ETL) processBatch(headers []types.Header) error {
...@@ -97,7 +113,6 @@ func (etl *ETL) processBatch(headers []types.Header) error { ...@@ -97,7 +113,6 @@ func (etl *ETL) processBatch(headers []types.Header) error {
batchLog := etl.log.New("batch_start_block_number", firstHeader.Number, "batch_end_block_number", lastHeader.Number) batchLog := etl.log.New("batch_start_block_number", firstHeader.Number, "batch_end_block_number", lastHeader.Number)
batchLog.Info("extracting batch", "size", len(headers)) batchLog.Info("extracting batch", "size", len(headers))
etl.metrics.RecordBatchLatestHeight(lastHeader.Number)
headerMap := make(map[common.Hash]*types.Header, len(headers)) headerMap := make(map[common.Hash]*types.Header, len(headers))
for i := range headers { for i := range headers {
header := headers[i] header := headers[i]
...@@ -105,31 +120,40 @@ func (etl *ETL) processBatch(headers []types.Header) error { ...@@ -105,31 +120,40 @@ func (etl *ETL) processBatch(headers []types.Header) error {
} }
headersWithLog := make(map[common.Hash]bool, len(headers)) headersWithLog := make(map[common.Hash]bool, len(headers))
logs, err := etl.EthClient.FilterLogs(ethereum.FilterQuery{FromBlock: firstHeader.Number, ToBlock: lastHeader.Number, Addresses: etl.contracts}) filterQuery := ethereum.FilterQuery{FromBlock: firstHeader.Number, ToBlock: lastHeader.Number, Addresses: etl.contracts}
logs, err := etl.EthClient.FilterLogs(filterQuery)
if err != nil { if err != nil {
batchLog.Info("failed to extract logs", "err", err) batchLog.Info("failed to extract logs", "err", err)
return err return err
} }
if len(logs) > 0 {
batchLog.Info("detected logs", "size", len(logs)) if logs.ToBlockHeader.Number.Cmp(lastHeader.Number) != 0 {
// Warn and simply wait for the provider to synchronize state
batchLog.Warn("mismatch in FilterLog#ToBlock number", "queried_to_block_number", lastHeader.Number, "reported_to_block_number", logs.ToBlockHeader.Number)
return fmt.Errorf("mismatch in FilterLog#ToBlock number")
} else if logs.ToBlockHeader.Hash() != lastHeader.Hash() {
batchLog.Error("mismatch in FitlerLog#ToBlock block hash!!!", "queried_to_block_hash", lastHeader.Hash().String(), "reported_to_block_hash", logs.ToBlockHeader.Hash().String())
return fmt.Errorf("mismatch in FitlerLog#ToBlock block hash!!!")
}
if len(logs.Logs) > 0 {
batchLog.Info("detected logs", "size", len(logs.Logs))
} }
for i := range logs { for i := range logs.Logs {
log := logs[i] log := logs.Logs[i]
headersWithLog[log.BlockHash] = true
if _, ok := headerMap[log.BlockHash]; !ok { if _, ok := headerMap[log.BlockHash]; !ok {
// NOTE. Definitely an error state if the none of the headers were re-orged out in between // NOTE. Definitely an error state if the none of the headers were re-orged out in between
// the blocks and logs retrieval operations. Unlikely as long as the confirmation depth has // the blocks and logs retrieval operations. Unlikely as long as the confirmation depth has
// been appropriately set or when we get to natively handling reorgs. // been appropriately set or when we get to natively handling reorgs.
batchLog.Error("log found with block hash not in the batch", "block_hash", logs[i].BlockHash, "log_index", logs[i].Index) batchLog.Error("log found with block hash not in the batch", "block_hash", logs.Logs[i].BlockHash, "log_index", logs.Logs[i].Index)
return errors.New("parsed log with a block hash not in the batch") return errors.New("parsed log with a block hash not in the batch")
} }
etl.metrics.RecordBatchLog(log.Address)
headersWithLog[log.BlockHash] = true
} }
// ensure we use unique downstream references for the etl batch // ensure we use unique downstream references for the etl batch
headersRef := headers headersRef := headers
etl.etlBatches <- ETLBatch{Logger: batchLog, Headers: headersRef, HeaderMap: headerMap, Logs: logs, HeadersWithLog: headersWithLog} etl.etlBatches <- &ETLBatch{Logger: batchLog, Headers: headersRef, HeaderMap: headerMap, Logs: logs.Logs, HeadersWithLog: headersWithLog}
return nil return nil
} }
...@@ -8,26 +8,37 @@ import ( ...@@ -8,26 +8,37 @@ import (
"sync" "sync"
"time" "time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/indexer/config" "github.com/ethereum-optimism/optimism/indexer/config"
"github.com/ethereum-optimism/optimism/indexer/database" "github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum-optimism/optimism/indexer/node" "github.com/ethereum-optimism/optimism/indexer/node"
"github.com/ethereum-optimism/optimism/op-service/retry" "github.com/ethereum-optimism/optimism/op-service/retry"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum-optimism/optimism/op-service/tasks"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
) )
type L1ETL struct { type L1ETL struct {
ETL ETL
db *database.DB // the batch handler may do work that we can interrupt on shutdown
mu *sync.Mutex resourceCtx context.Context
resourceCancel context.CancelFunc
tasks tasks.Group
db *database.DB
mu sync.Mutex
listeners []chan interface{} listeners []chan interface{}
} }
// NewL1ETL creates a new L1ETL instance that will start indexing from different starting points // NewL1ETL creates a new L1ETL instance that will start indexing from different starting points
// depending on the state of the database and the supplied start height. // depending on the state of the database and the supplied start height.
func NewL1ETL(cfg Config, log log.Logger, db *database.DB, metrics Metricer, client node.EthClient, contracts config.L1Contracts) (*L1ETL, error) { func NewL1ETL(cfg Config, log log.Logger, db *database.DB, metrics Metricer, client node.EthClient,
contracts config.L1Contracts, shutdown context.CancelCauseFunc) (*L1ETL, error) {
log = log.New("etl", "l1") log = log.New("etl", "l1")
zeroAddr := common.Address{} zeroAddr := common.Address{}
...@@ -71,8 +82,10 @@ func NewL1ETL(cfg Config, log log.Logger, db *database.DB, metrics Metricer, cli ...@@ -71,8 +82,10 @@ func NewL1ETL(cfg Config, log log.Logger, db *database.DB, metrics Metricer, cli
} }
// NOTE - The use of un-buffered channel here assumes that downstream consumers // NOTE - The use of un-buffered channel here assumes that downstream consumers
// will be able to keep up with the rate of incoming batches // will be able to keep up with the rate of incoming batches.
etlBatches := make(chan ETLBatch) // When the producer closes the channel we stop consuming from it.
etlBatches := make(chan *ETLBatch)
etl := ETL{ etl := ETL{
loopInterval: time.Duration(cfg.LoopIntervalMsec) * time.Millisecond, loopInterval: time.Duration(cfg.LoopIntervalMsec) * time.Millisecond,
headerBufferSize: uint64(cfg.HeaderBufferSize), headerBufferSize: uint64(cfg.HeaderBufferSize),
...@@ -86,81 +99,115 @@ func NewL1ETL(cfg Config, log log.Logger, db *database.DB, metrics Metricer, cli ...@@ -86,81 +99,115 @@ func NewL1ETL(cfg Config, log log.Logger, db *database.DB, metrics Metricer, cli
EthClient: client, EthClient: client,
} }
return &L1ETL{ETL: etl, db: db, mu: new(sync.Mutex)}, nil resCtx, resCancel := context.WithCancel(context.Background())
return &L1ETL{
ETL: etl,
db: db,
resourceCtx: resCtx,
resourceCancel: resCancel,
tasks: tasks.Group{HandleCrit: func(err error) {
shutdown(fmt.Errorf("critical error in L1 ETL: %w", err))
}},
}, nil
} }
func (l1Etl *L1ETL) Start(ctx context.Context) error { func (l1Etl *L1ETL) Close() error {
errCh := make(chan error, 1) var result error
go func() { // close the producer
errCh <- l1Etl.ETL.Start(ctx) if err := l1Etl.ETL.Close(); err != nil {
}() result = errors.Join(result, fmt.Errorf("failed to close internal ETL: %w", err))
}
// tell the consumer it can stop what it's doing
l1Etl.resourceCancel()
// wait for consumer to pick up on closure of producer
if err := l1Etl.tasks.Wait(); err != nil {
result = errors.Join(result, fmt.Errorf("failed to await batch handler completion: %w", err))
}
return result
}
for { func (l1Etl *L1ETL) Start() error {
select { // start ETL batch producer
case err := <-errCh: if err := l1Etl.ETL.Start(); err != nil {
return err return fmt.Errorf("failed to start internal ETL: %w", err)
}
// Index incoming batches (only L1 blocks that have an emitted log) // start ETL batch consumer
case batch := <-l1Etl.etlBatches: l1Etl.tasks.Go(func() error {
l1BlockHeaders := make([]database.L1BlockHeader, 0, len(batch.Headers)) for {
for i := range batch.Headers { // Index incoming batches (only L1 blocks that have an emitted log)
if _, ok := batch.HeadersWithLog[batch.Headers[i].Hash()]; ok { batch, ok := <-l1Etl.etlBatches
l1BlockHeaders = append(l1BlockHeaders, database.L1BlockHeader{BlockHeader: database.BlockHeaderFromHeader(&batch.Headers[i])}) if !ok {
} l1Etl.log.Info("No more batches, shutting down L1 batch handler")
return nil
} }
if len(l1BlockHeaders) == 0 { if err := l1Etl.handleBatch(batch); err != nil {
batch.Logger.Info("no l1 blocks with logs in batch") return fmt.Errorf("failed to handle batch, stopping L2 ETL: %w", err)
continue
} }
}
})
return nil
}
l1ContractEvents := make([]database.L1ContractEvent, len(batch.Logs)) func (l1Etl *L1ETL) handleBatch(batch *ETLBatch) error {
for i := range batch.Logs { l1BlockHeaders := make([]database.L1BlockHeader, 0, len(batch.Headers))
timestamp := batch.HeaderMap[batch.Logs[i].BlockHash].Time for i := range batch.Headers {
l1ContractEvents[i] = database.L1ContractEvent{ContractEvent: database.ContractEventFromLog(&batch.Logs[i], timestamp)} if _, ok := batch.HeadersWithLog[batch.Headers[i].Hash()]; ok {
} l1BlockHeaders = append(l1BlockHeaders, database.L1BlockHeader{BlockHeader: database.BlockHeaderFromHeader(&batch.Headers[i])})
}
}
if len(l1BlockHeaders) == 0 {
batch.Logger.Info("no l1 blocks with logs in batch")
return nil
}
l1ContractEvents := make([]database.L1ContractEvent, len(batch.Logs))
for i := range batch.Logs {
timestamp := batch.HeaderMap[batch.Logs[i].BlockHash].Time
l1ContractEvents[i] = database.L1ContractEvent{ContractEvent: database.ContractEventFromLog(&batch.Logs[i], timestamp)}
l1Etl.ETL.metrics.RecordIndexedLog(batch.Logs[i].Address)
}
// Continually try to persist this batch. If it fails after 10 attempts, we simply error out // Continually try to persist this batch. If it fails after 10 attempts, we simply error out
retryStrategy := &retry.ExponentialStrategy{Min: 1000, Max: 20_000, MaxJitter: 250} retryStrategy := &retry.ExponentialStrategy{Min: 1000, Max: 20_000, MaxJitter: 250}
if _, err := retry.Do[interface{}](ctx, 10, retryStrategy, func() (interface{}, error) { if _, err := retry.Do[interface{}](l1Etl.resourceCtx, 10, retryStrategy, func() (interface{}, error) {
if err := l1Etl.db.Transaction(func(tx *database.DB) error { if err := l1Etl.db.Transaction(func(tx *database.DB) error {
if err := tx.Blocks.StoreL1BlockHeaders(l1BlockHeaders); err != nil { if err := tx.Blocks.StoreL1BlockHeaders(l1BlockHeaders); err != nil {
return err
}
// we must have logs if we have l1 blocks
if err := tx.ContractEvents.StoreL1ContractEvents(l1ContractEvents); err != nil {
return err
}
return nil
}); err != nil {
batch.Logger.Error("unable to persist batch", "err", err)
return nil, err
}
l1Etl.ETL.metrics.RecordIndexedHeaders(len(l1BlockHeaders))
l1Etl.ETL.metrics.RecordIndexedLatestHeight(l1BlockHeaders[len(l1BlockHeaders)-1].Number)
l1Etl.ETL.metrics.RecordIndexedLogs(len(l1ContractEvents))
// a-ok!
return nil, nil
}); err != nil {
return err return err
} }
// we must have logs if we have l1 blocks
batch.Logger.Info("indexed batch") if err := tx.ContractEvents.StoreL1ContractEvents(l1ContractEvents); err != nil {
return err
// Notify Listeners
l1Etl.mu.Lock()
for i := range l1Etl.listeners {
select {
case l1Etl.listeners[i] <- struct{}{}:
default:
// do nothing if the listener hasn't picked
// up the previous notif
}
} }
l1Etl.mu.Unlock() return nil
}); err != nil {
batch.Logger.Error("unable to persist batch", "err", err)
return nil, fmt.Errorf("unable to persist batch: %w", err)
}
l1Etl.ETL.metrics.RecordIndexedHeaders(len(l1BlockHeaders))
l1Etl.ETL.metrics.RecordIndexedLatestHeight(l1BlockHeaders[len(l1BlockHeaders)-1].Number)
// a-ok!
return nil, nil
}); err != nil {
return err
}
batch.Logger.Info("indexed batch")
// Notify Listeners
l1Etl.mu.Lock()
for i := range l1Etl.listeners {
select {
case l1Etl.listeners[i] <- struct{}{}:
default:
// do nothing if the listener hasn't picked
// up the previous notif
} }
} }
l1Etl.mu.Unlock()
return nil
} }
// Notify returns a channel that'll receive a value every time new data has // Notify returns a channel that'll receive a value every time new data has
......
...@@ -62,7 +62,7 @@ func TestL1ETLConstruction(t *testing.T) { ...@@ -62,7 +62,7 @@ func TestL1ETLConstruction(t *testing.T) {
}, },
assertion: func(etl *L1ETL, err error) { assertion: func(etl *L1ETL, err error) {
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, etl.headerTraversal.LastHeader().ParentHash, common.HexToHash("0x69")) require.Equal(t, etl.headerTraversal.LastTraversedHeader().ParentHash, common.HexToHash("0x69"))
}, },
}, },
{ {
...@@ -94,7 +94,7 @@ func TestL1ETLConstruction(t *testing.T) { ...@@ -94,7 +94,7 @@ func TestL1ETLConstruction(t *testing.T) {
}, },
assertion: func(etl *L1ETL, err error) { assertion: func(etl *L1ETL, err error) {
require.NoError(t, err) require.NoError(t, err)
header := etl.headerTraversal.LastHeader() header := etl.headerTraversal.LastTraversedHeader()
require.True(t, header.Number.Cmp(big.NewInt(69)) == 0) require.True(t, header.Number.Cmp(big.NewInt(69)) == 0)
}, },
...@@ -108,7 +108,9 @@ func TestL1ETLConstruction(t *testing.T) { ...@@ -108,7 +108,9 @@ func TestL1ETLConstruction(t *testing.T) {
logger := testlog.Logger(t, log.LvlInfo) logger := testlog.Logger(t, log.LvlInfo)
cfg := Config{StartHeight: ts.start} cfg := Config{StartHeight: ts.start}
etl, err := NewL1ETL(cfg, logger, ts.db.DB, etlMetrics, ts.client, ts.contracts) etl, err := NewL1ETL(cfg, logger, ts.db.DB, etlMetrics, ts.client, ts.contracts, func(cause error) {
t.Fatalf("crit error: %v", cause)
})
test.assertion(etl, err) test.assertion(etl, err)
}) })
} }
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -29,6 +29,7 @@ name = "$INDEXER_DB_NAME" ...@@ -29,6 +29,7 @@ name = "$INDEXER_DB_NAME"
[http] [http]
host = "127.0.0.1" host = "127.0.0.1"
port = 8080 port = 8080
timeout = 10
[metrics] [metrics]
host = "127.0.0.1" host = "127.0.0.1"
......
This diff is collapsed.
package node
import (
"context"
"fmt"
"net"
"strings"
"testing"
"github.com/stretchr/testify/require"
)
func TestDialEthClientUnavailable(t *testing.T) {
listener, err := net.Listen("tcp4", ":0")
require.NoError(t, err)
defer listener.Close()
a := listener.Addr().String()
parts := strings.Split(a, ":")
addr := fmt.Sprintf("http://localhost:%s", parts[1])
metrics := &clientMetrics{}
// available
_, err = DialEthClient(context.Background(), addr, metrics)
require.NoError(t, err)
// :0 requests a new unbound port
_, err = DialEthClient(context.Background(), "http://localhost:0", metrics)
require.Error(t, err)
// Fail open if we don't recognize the scheme
_, err = DialEthClient(context.Background(), "mailto://example.com", metrics)
require.Error(t, err)
}
This diff is collapsed.
This diff is collapsed.
...@@ -12,7 +12,7 @@ import ( ...@@ -12,7 +12,7 @@ import (
) )
var ( var (
MetricsNamespace = "rpc" MetricsNamespace = "op_indexer_rpc"
batchMethod = "<batch>" batchMethod = "<batch>"
) )
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment