Commit 0737ebb2 authored by Florian's avatar Florian Committed by GitHub

Merge branch 'ethereum-optimism:develop' into op-service/http-util-cleanup

parents 0f1633ec 95fe7e47
---
'@eth-optimism/core-utils': patch
---
Upgraded npm dependencies to latest
---
'@eth-optimism/chain-mon': patch
---
Upgraded npm dependencies to latest
This diff is collapsed.
......@@ -6,7 +6,7 @@ updates:
interval: daily
open-pull-requests-limit: 10
labels:
- dependabot
- M-dependabot
- package-ecosystem: github-actions
directory: "/"
......@@ -14,7 +14,7 @@ updates:
interval: daily
open-pull-requests-limit: 10
labels:
- dependabot
- M-dependabot
- package-ecosystem: npm
directory: "/"
......@@ -25,7 +25,7 @@ updates:
open-pull-requests-limit: 10
versioning-strategy: auto
labels:
- dependabot
- M-dependabot
- package-ecosystem: gomod
directory: "/"
......@@ -33,4 +33,4 @@ updates:
interval: daily
open-pull-requests-limit: 10
labels:
- dependabot
- M-dependabot
......@@ -12,6 +12,9 @@ pull_request_rules:
- "label!=do-not-merge"
- "label!=multiple-reviewers"
- "label!=mergify-ignore"
- "label!=M-do-not-merge"
- "label!=M-multiple-reviewers"
- "label!=M-mergify-ignore"
- "base=develop"
actions:
queue:
......@@ -27,14 +30,14 @@ pull_request_rules:
This PR has been added to the merge queue, and will be merged soon.
label:
add:
- on-merge-train
- S-on-merge-train
- name: Remove merge train label
conditions:
- "queue-position = -1"
actions:
label:
remove:
- on-merge-train
- S-on-merge-train
- name: Ask to resolve conflict
conditions:
- conflict
......@@ -43,14 +46,14 @@ pull_request_rules:
message: Hey @{{author}}! This PR has merge conflicts. Please fix them before continuing review.
label:
add:
- conflict
- S-conflict
- name: Remove conflicts label when conflicts gone
conditions:
- -conflict
actions:
label:
remove:
- conflict
- S-conflict
- name: Notify author when added to merge queue
conditions:
- "check-pending=Queue: Embarked in merge train"
......@@ -71,33 +74,250 @@ pull_request_rules:
More details can be found on the `Queue: Embarked in merge train`
check-run.
- name: Add indexer tag and ecopod reviewers
- name: Add A-cannon label
conditions:
- 'files~=^cannon/'
actions:
label:
add:
- A-cannon
- name: Add A-indexer label and ecopod reviewers
conditions:
- 'files~=^indexer/'
- '#label<5'
actions:
label:
add:
- indexer
- A-indexer
request_reviews:
users:
- roninjin10
- name: Add sdk tag and ecopod reviewers
- name: Add A-op-batcher label
conditions:
- 'files~=^packages/sdk/'
- 'files~=^op-batcher/'
actions:
label:
add:
- A-op-batcher
- name: Add A-op-bindings label
conditions:
- 'files~=^op-bindings/'
actions:
label:
add:
- A-op-bindings
- name: Add A-op-bootnode label
conditions:
- 'files~=^op-bootnode/'
- '#label<5'
actions:
label:
add:
- A-op-bootnode
- name: Add A-op-chain-ops label
conditions:
- 'files~=^op-chain-ops/'
actions:
label:
add:
- A-op-chain-ops
- name: Add A-op-challenger label
conditions:
- 'files~=^op-challenger/'
actions:
label:
add:
- A-op-challenger
- name: Add A-op-e2e label
conditions:
- 'files~=^op-e2e/'
- '#label<5'
actions:
label:
add:
- A-op-e2e
- name: Add A-op-exporter label
conditions:
- 'files~=^op-exporter/'
- '#label<5'
actions:
label:
add:
- A-op-exporter
- name: Add A-op-heartbeat label
conditions:
- 'files~=^op-heartbeat/'
- '#label<5'
actions:
label:
add:
- A-op-heartbeat
- name: Add A-op-node label
conditions:
- 'files~=^op-node/'
actions:
label:
add:
- A-op-node
- name: Add A-op-preimage label
conditions:
- 'files~=^op-preimage/'
- '#label<5'
actions:
label:
add:
- A-op-preimage
- name: Add A-op-program label
conditions:
- 'files~=^op-program/'
actions:
label:
add:
- A-op-program
- name: Add A-op-proposer label
conditions:
- 'files~=^op-proposer/'
actions:
label:
add:
- A-op-proposer
- name: Add A-op-service label
conditions:
- 'files~=^op-service/'
- '#label<5'
actions:
label:
add:
- A-op-service
- name: Add A-op-signer label
conditions:
- 'files~=^op-signer/'
- '#label<5'
actions:
label:
add:
- sdk
- A-op-signer
- name: Add A-op-wheel label
conditions:
- 'files~=^op-wheel/'
- '#label<5'
actions:
label:
add:
- A-op-wheel
- name: Add A-ops-bedrock label
conditions:
- 'files~=^ops-bedrock/'
- '#label<5'
actions:
label:
add:
- A-ops-bedrock
- name: Add A-ops label
conditions:
- 'files~=^ops/'
- '#label<5'
actions:
label:
add:
- A-ops
- name: Add A-pkg-chain-mon label
conditions:
- 'files~=^packages/chain-mon/'
- '#label<5'
actions:
label:
add:
- A-pkg-chain-mon
- name: Add A-pkg-common-ts label and ecopod reviewers
conditions:
- 'files~=^packages/common-ts/'
- '#label<5'
actions:
label:
add:
- A-pkg-common-ts
request_reviews:
users:
- roninjin10
- name: Add common-ts tag and ecopod reviewers
- name: Add A-pkg-contracts-bedrock label
conditions:
- 'files~=^packages/common-ts/'
- 'files~=^packages/contracts-bedrock/'
actions:
label:
add:
- A-pkg-contracts-bedrock
- name: Add A-pkg-contracts-ts label
conditions:
- 'files~=^packages/contracts-ts/'
- '#label<5'
actions:
label:
add:
- A-pkg-contracts-ts
- name: Add A-pkg-core-utils label
conditions:
- 'files~=^packages/core-utils/'
- '#label<5'
actions:
label:
add:
- common-ts
- A-pkg-core-utils
- name: Add A-pkg-fee-estimation label
conditions:
- 'files~=^packages/fee-estimation/'
- '#label<5'
actions:
label:
add:
- A-pkg-fee-estimation
- name: Add A-pkg-sdk label and ecopod reviewers
conditions:
- 'files~=^packages/sdk/'
- '#label<5'
actions:
label:
add:
- A-pkg-sdk
request_reviews:
users:
- roninjin10
- name: Add A-pkg-web3js-plugin label
conditions:
- 'files~=^packages/web3js-plugin/'
- '#label<5'
actions:
label:
add:
- A-pkg-web3js-plugin
- name: Add A-proxyd label
conditions:
- 'files~=^proxyd/'
- '#label<5'
actions:
label:
add:
- A-proxyd
- name: Add M-docs label
conditions:
- 'files~=^(docs|specs)\/'
- '#label<5'
actions:
label:
add:
- M-docs
- name: Add M-deletion label when files are removed
conditions:
- 'removed-files~=^/'
actions:
label:
add:
- M-deletion
- name: Add M-ci label when ci files are modified
conditions:
- 'files~=^\.(github|circleci|husky)\/'
- '#label<5'
actions:
label:
add:
- M-ci
......@@ -10,6 +10,7 @@ jobs:
- uses: actions/stale@v4
with:
stale-pr-message: 'This PR is stale because it has been open 14 days with no activity. Remove stale label or comment or this will be closed in 5 days.'
stale-issue-label: 'S-stale'
exempt-pr-labels: exempt-stale
days-before-issue-stale: 999
days-before-pr-stale: 14
......
......@@ -119,7 +119,7 @@ Note that these environment variables significantly speed up build time.
cd ops-bedrock
export COMPOSE_DOCKER_CLI_BUILD=1
export DOCKER_BUILDKIT=1
docker-compose build
docker compose build
```
Source code changes can have an impact on more than one container.
......@@ -127,9 +127,9 @@ Source code changes can have an impact on more than one container.
```bash
cd ops-bedrock
docker-compose down
docker-compose build
docker-compose up
docker compose down
docker compose build
docker compose up
```
**If a node process exits with exit code: 137** you may need to increase the default memory limit of docker containers
......@@ -141,18 +141,18 @@ cd optimism
pnpm clean
pnpm build
cd ops
docker-compose down -v
docker-compose build
docker-compose up
docker compose down -v
docker compose build
docker compose up
```
#### Viewing docker container logs
By default, the `docker-compose up` command will show logs from all services, and that
By default, the `docker compose up` command will show logs from all services, and that
can be hard to filter through. In order to view the logs from a specific service, you can run:
```bash
docker-compose logs --follow <service name>
docker compose logs --follow <service name>
```
### Running tests
......@@ -185,3 +185,44 @@ cd packages/contracts
pip3 install slither-analyzer
pnpm test:slither
```
## Labels
Labels are divided into categories with their descriptions annotated as `<Category Name>: <description>`.
The following are a comprehensive list of label categories.
- **Area labels** ([`A-`][area]): Denote the general area for the related issue or PR changes.
- **Category labels** ([`C-`][category]): Contextualize the type of issue or change.
- **Meta labels** ([`M-`][meta]): These add context to the issues or prs themselves primarily relating to process.
- **Difficulty labels** ([`D-`][difficulty]): Describe the associated implementation's difficulty level.
- **Status labels** ([`S-`][status]): Specify the status of an issue or pr.
Labels also provide a versatile filter for finding tickets that need help or are open for assignment.
This makes them a great tool for contributors!
[area]: https://github.com/ethereum-optimism/optimism/labels?q=a-
[category]: https://github.com/ethereum-optimism/optimism/labels?q=c-
[meta]: https://github.com/ethereum-optimism/optimism/labels?q=m-
[difficulty]: https://github.com/ethereum-optimism/optimism/labels?q=d-
[status]: https://github.com/ethereum-optimism/optimism/labels?q=s-
#### Filtering for Work
To find tickets available for external contribution, take a look at the https://github.com/ethereum-optimism/optimism/labels/M-community label.
You can filter by the https://github.com/ethereum-optimism/optimism/labels/D-good-first-issue
label to find issues that are intended to be easy to implement or fix.
Also, all labels can be seen by visiting the [labels page][labels]
[labels]: https://github.com/ethereum-optimism/optimism/labels
#### Modifying Labels
When altering label names or deleting labels there are a few things you must be aware of.
- This may affect the mergify bot's use of labels. See the [mergify config](.github/mergify.yml).
- If the https://github.com/ethereum-optimism/labels/S-stale label is altered, the [close-stale](.github/workflows/close-stale.yml) workflow should be updated.
- If the https://github.com/ethereum-optimism/labels/M-dependabot label is altered, the [dependabot config](.github/dependabot.yml) file should be adjusted.
- Saved label filters for project boards will not automatically update. These should be updated if label names change.
......@@ -86,6 +86,9 @@ nuke: clean devnet-clean
.PHONY: nuke
devnet-up:
@if [ ! -e op-program/bin ]; then \
make cannon-prestate; \
fi
$(shell ./ops/scripts/newer-file.sh .devnet/allocs-l1.json ./packages/contracts-bedrock)
if [ $(.SHELLSTATUS) -ne 0 ]; then \
make devnet-allocs; \
......@@ -96,14 +99,18 @@ devnet-up:
# alias for devnet-up
devnet-up-deploy: devnet-up
devnet-test:
PYTHONPATH=./bedrock-devnet python3 ./bedrock-devnet/main.py --monorepo-dir=. --test
.PHONY: devnet-test
devnet-down:
@(cd ./ops-bedrock && GENESIS_TIMESTAMP=$(shell date +%s) docker-compose stop)
@(cd ./ops-bedrock && GENESIS_TIMESTAMP=$(shell date +%s) docker compose stop)
.PHONY: devnet-down
devnet-clean:
rm -rf ./packages/contracts-bedrock/deployments/devnetL1
rm -rf ./.devnet
cd ./ops-bedrock && docker-compose down
cd ./ops-bedrock && docker compose down
docker image ls 'ops-bedrock*' --format='{{.Repository}}' | xargs -r docker rmi
docker volume ls --filter name=ops-bedrock --format='{{.Name}}' | xargs -r docker volume rm
.PHONY: devnet-clean
......@@ -112,7 +119,7 @@ devnet-allocs:
PYTHONPATH=./bedrock-devnet python3 ./bedrock-devnet/main.py --monorepo-dir=. --allocs
devnet-logs:
@(cd ./ops-bedrock && docker-compose logs -f)
@(cd ./ops-bedrock && docker compose logs -f)
.PHONY: devnet-logs
test-unit:
......
<div align="center">
<br />
<br />
......@@ -48,13 +47,7 @@ Refer to the Directory Structure section below to understand which packages are
## Directory Structure
<pre>
~~ Production ~~
├── <a href="./packages">packages</a>
│ ├── <a href="./packages/common-ts">common-ts</a>: Common tools for building apps in TypeScript
│ ├── <a href="./packages/contracts-bedrock">contracts-bedrock</a>: Bedrock smart contracts.
│ ├── <a href="./packages/core-utils">core-utils</a>: Low-level utilities that make building Optimism easier
│ ├── <a href="./packages/chain-mon">chain-mon</a>: Chain monitoring services
│ └── <a href="./packages/sdk">sdk</a>: provides a set of tools for interacting with Optimism
├── <a href="./docs">docs</a>: A collection of documents including audits and post-mortems
├── <a href="./op-bindings">op-bindings</a>: Go bindings for Bedrock smart contracts.
├── <a href="./op-batcher">op-batcher</a>: L2-Batch Submitter, submits bundles of batches to L1
├── <a href="./op-bootnode">op-bootnode</a>: Standalone op-node discovery bootnode
......@@ -70,19 +63,15 @@ Refer to the Directory Structure section below to understand which packages are
├── <a href="./op-signer">op-signer</a>: Client signer
├── <a href="./op-wheel">op-wheel</a>: Database utilities
├── <a href="./ops-bedrock">ops-bedrock</a>: Bedrock devnet work
├── <a href="./proxyd">proxyd</a>: Configurable RPC request router and proxy
└── <a href="./specs">specs</a>: Specs of the rollup starting at the Bedrock upgrade
~~ Pre-BEDROCK ~~
├── <a href="./packages">packages</a>
│ ├── <a href="./packages/chain-mon">chain-mon</a>: Chain monitoring services
│ ├── <a href="./packages/common-ts">common-ts</a>: Common tools for building apps in TypeScript
│ ├── <a href="./packages/contracts-ts">contracts-ts</a>: ABI and Address constants
│ ├── <a href="./packages/contracts-bedrock">contracts-bedrock</a>: Bedrock smart contracts
│ ├── <a href="./packages/core-utils">core-utils</a>: Low-level utilities that make building Optimism easier
│ ├── <a href="./packages/chain-mon">chain-mon</a>: Chain monitoring services
│ └── <a href="./packages/sdk">sdk</a>: provides a set of tools for interacting with Optimism
├── <a href="./indexer">indexer</a>: indexes and syncs transactions
├── <a href="./op-exporter">op-exporter</a>: A prometheus exporter to collect/serve metrics from an Optimism node
├── <a href="./proxyd">proxyd</a>: Configurable RPC request router and proxy
└── <a href="./technical-documents">technical-documents</a>: audits and post-mortem documents
└── <a href="./specs">specs</a>: Specs of the rollup starting at the Bedrock upgrade
</pre>
## Branching Model
......
......@@ -18,6 +18,7 @@ pjoin = os.path.join
parser = argparse.ArgumentParser(description='Bedrock devnet launcher')
parser.add_argument('--monorepo-dir', help='Directory of the monorepo', default=os.getcwd())
parser.add_argument('--allocs', help='Only create the allocs and exit', type=bool, action=argparse.BooleanOptionalAction)
parser.add_argument('--test', help='Tests the deployment, must already be deployed', type=bool, action=argparse.BooleanOptionalAction)
log = logging.getLogger()
......@@ -57,6 +58,8 @@ def main():
ops_bedrock_dir = pjoin(monorepo_dir, 'ops-bedrock')
deploy_config_dir = pjoin(contracts_bedrock_dir, 'deploy-config'),
devnet_config_path = pjoin(contracts_bedrock_dir, 'deploy-config', 'devnetL1.json')
ops_chain_ops = pjoin(monorepo_dir, 'op-chain-ops')
sdk_dir = pjoin(monorepo_dir, 'packages', 'sdk')
paths = Bunch(
mono_repo_dir=monorepo_dir,
......@@ -68,6 +71,8 @@ def main():
devnet_config_path=devnet_config_path,
op_node_dir=op_node_dir,
ops_bedrock_dir=ops_bedrock_dir,
ops_chain_ops=ops_chain_ops,
sdk_dir=sdk_dir,
genesis_l1_path=pjoin(devnet_dir, 'genesis-l1.json'),
genesis_l2_path=pjoin(devnet_dir, 'genesis-l2.json'),
allocs_path=pjoin(devnet_dir, 'allocs-l1.json'),
......@@ -76,6 +81,11 @@ def main():
rollup_config_path=pjoin(devnet_dir, 'rollup.json')
)
if args.test:
log.info('Testing deployed devnet')
devnet_test(paths)
return
os.makedirs(devnet_dir, exist_ok=True)
if args.allocs:
......@@ -83,7 +93,7 @@ def main():
return
log.info('Building docker images')
run_command(['docker-compose', 'build', '--progress', 'plain'], cwd=paths.ops_bedrock_dir, env={
run_command(['docker', 'compose', 'build', '--progress', 'plain'], cwd=paths.ops_bedrock_dir, env={
'PWD': paths.ops_bedrock_dir
})
......@@ -163,7 +173,7 @@ def devnet_deploy(paths):
], cwd=paths.op_node_dir)
log.info('Starting L1.')
run_command(['docker-compose', 'up', '-d', 'l1'], cwd=paths.ops_bedrock_dir, env={
run_command(['docker', 'compose', 'up', '-d', 'l1'], cwd=paths.ops_bedrock_dir, env={
'PWD': paths.ops_bedrock_dir
})
wait_up(8545)
......@@ -186,7 +196,7 @@ def devnet_deploy(paths):
addresses = read_json(paths.addresses_json_path)
log.info('Bringing up L2.')
run_command(['docker-compose', 'up', '-d', 'l2'], cwd=paths.ops_bedrock_dir, env={
run_command(['docker', 'compose', 'up', '-d', 'l2'], cwd=paths.ops_bedrock_dir, env={
'PWD': paths.ops_bedrock_dir
})
wait_up(9545)
......@@ -198,7 +208,7 @@ def devnet_deploy(paths):
log.info(f'Using batch inbox {batch_inbox_address}')
log.info('Bringing up everything else.')
run_command(['docker-compose', 'up', '-d', 'op-node', 'op-proposer', 'op-batcher'], cwd=paths.ops_bedrock_dir, env={
run_command(['docker', 'compose', 'up', '-d', 'op-node', 'op-proposer', 'op-batcher'], cwd=paths.ops_bedrock_dir, env={
'PWD': paths.ops_bedrock_dir,
'L2OO_ADDRESS': l2_output_oracle,
'SEQUENCER_BATCH_INBOX_ADDRESS': batch_inbox_address
......@@ -250,8 +260,26 @@ def wait_for_rpc_server(url):
log.info(f'Waiting for RPC server at {url}')
time.sleep(1)
def devnet_test(paths):
# Check the L2 config
run_command(
['go', 'run', 'cmd/check-l2/main.go', '--l2-rpc-url', 'http://localhost:9545', '--l1-rpc-url', 'http://localhost:8545'],
cwd=paths.ops_chain_ops,
)
run_command(
['npx', 'hardhat', 'deposit-erc20', '--network', 'devnetL1', '--l1-contracts-json-path', paths.addresses_json_path],
cwd=paths.sdk_dir,
timeout=8*60,
)
run_command(
['npx', 'hardhat', 'deposit-eth', '--network', 'devnetL1', '--l1-contracts-json-path', paths.addresses_json_path],
cwd=paths.sdk_dir,
timeout=8*60,
)
def run_command(args, check=True, shell=False, cwd=None, env=None):
def run_command(args, check=True, shell=False, cwd=None, env=None, timeout=None):
env = env if env else {}
return subprocess.run(
args,
......@@ -261,7 +289,8 @@ def run_command(args, check=True, shell=False, cwd=None, env=None):
**os.environ,
**env
},
cwd=cwd
cwd=cwd,
timeout=timeout
)
......
package cmd
import (
"compress/gzip"
"encoding/json"
"errors"
"fmt"
"io"
"os"
"strings"
"github.com/ethereum-optimism/optimism/op-service/ioutil"
)
func loadJSON[X any](inputPath string) (*X, error) {
......@@ -15,18 +15,11 @@ func loadJSON[X any](inputPath string) (*X, error) {
return nil, errors.New("no path specified")
}
var f io.ReadCloser
f, err := os.OpenFile(inputPath, os.O_RDONLY, 0)
f, err := ioutil.OpenDecompressed(inputPath)
if err != nil {
return nil, fmt.Errorf("failed to open file %q: %w", inputPath, err)
}
defer f.Close()
if isGzip(inputPath) {
f, err = gzip.NewReader(f)
if err != nil {
return nil, fmt.Errorf("create gzip reader: %w", err)
}
defer f.Close()
}
var state X
if err := json.NewDecoder(f).Decode(&state); err != nil {
return nil, fmt.Errorf("failed to decode file %q: %w", inputPath, err)
......@@ -37,17 +30,12 @@ func loadJSON[X any](inputPath string) (*X, error) {
func writeJSON[X any](outputPath string, value X, outIfEmpty bool) error {
var out io.Writer
if outputPath != "" {
f, err := os.OpenFile(outputPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0755)
f, err := ioutil.OpenCompressed(outputPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0755)
if err != nil {
return fmt.Errorf("failed to open output file: %w", err)
}
defer f.Close()
out = f
if isGzip(outputPath) {
g := gzip.NewWriter(f)
defer g.Close()
out = g
}
} else if outIfEmpty {
out = os.Stdout
} else {
......@@ -63,7 +51,3 @@ func writeJSON[X any](outputPath string, value X, outIfEmpty bool) error {
}
return nil
}
func isGzip(path string) bool {
return strings.HasSuffix(path, ".gz")
}
package cmd
import (
"context"
"fmt"
"os"
"os/exec"
......@@ -115,8 +116,12 @@ type ProcessPreimageOracle struct {
pCl *preimage.OracleClient
hCl *preimage.HintWriter
cmd *exec.Cmd
waitErr chan error
cancelIO context.CancelCauseFunc
}
const clientPollTimeout = time.Second * 15
func NewProcessPreimageOracle(name string, args []string) (*ProcessPreimageOracle, error) {
if name == "" {
return &ProcessPreimageOracle{}, nil
......@@ -140,10 +145,18 @@ func NewProcessPreimageOracle(name string, args []string) (*ProcessPreimageOracl
pOracleRW.Reader(),
pOracleRW.Writer(),
}
// Note that the client file descriptors are not closed when the pre-image server exits.
// So we use the FilePoller to ensure that we don't get stuck in a blocking read/write.
ctx, cancelIO := context.WithCancelCause(context.Background())
preimageClientIO := preimage.NewFilePoller(ctx, pClientRW, clientPollTimeout)
hostClientIO := preimage.NewFilePoller(ctx, hClientRW, clientPollTimeout)
out := &ProcessPreimageOracle{
pCl: preimage.NewOracleClient(pClientRW),
hCl: preimage.NewHintWriter(hClientRW),
pCl: preimage.NewOracleClient(preimageClientIO),
hCl: preimage.NewHintWriter(hostClientIO),
cmd: cmd,
waitErr: make(chan error),
cancelIO: cancelIO,
}
return out, nil
}
......@@ -166,23 +179,30 @@ func (p *ProcessPreimageOracle) Start() error {
if p.cmd == nil {
return nil
}
return p.cmd.Start()
err := p.cmd.Start()
go p.wait()
return err
}
func (p *ProcessPreimageOracle) Close() error {
if p.cmd == nil {
return nil
}
// Give the pre-image server time to exit cleanly before killing it.
time.Sleep(time.Second * 1)
_ = p.cmd.Process.Signal(os.Interrupt)
// Go 1.20 feature, to introduce later
//p.cmd.WaitDelay = time.Second * 10
return <-p.waitErr
}
func (p *ProcessPreimageOracle) wait() {
err := p.cmd.Wait()
if err, ok := err.(*exec.ExitError); ok {
if err.Success() {
return nil
var waitErr error
if err, ok := err.(*exec.ExitError); !ok || !err.Success() {
waitErr = err
}
}
return err
p.cancelIO(fmt.Errorf("%w: pre-image server has exited", waitErr))
p.waitErr <- waitErr
close(p.waitErr)
}
type StepFn func(proof bool) (*mipsevm.StepWitness, error)
......
......@@ -395,12 +395,10 @@ func (m *InstrumentedState) mipsStep() error {
func execute(insn uint32, rs uint32, rt uint32, mem uint32) uint32 {
opcode := insn >> 26 // 6-bits
fun := insn & 0x3f // 6-bits
if opcode < 0x20 {
// transform ArithLogI
// TODO(CLI-4136): replace with table
if opcode >= 8 && opcode < 0xF {
if opcode == 0 || (opcode >= 8 && opcode < 0xF) {
fun := insn & 0x3f // 6-bits
// transform ArithLogI to SPECIAL
switch opcode {
case 8:
fun = 0x20 // addi
......@@ -417,65 +415,90 @@ func execute(insn uint32, rs uint32, rt uint32, mem uint32) uint32 {
case 0xE:
fun = 0x26 // xori
}
opcode = 0
}
// 0 is opcode SPECIAL
if opcode == 0 {
shamt := (insn >> 6) & 0x1F
if fun < 0x20 {
switch {
case fun >= 0x08:
return rs // jr/jalr/div + others
case fun == 0x00:
return rt << shamt // sll
case fun == 0x02:
return rt >> shamt // srl
case fun == 0x03:
return SE(rt>>shamt, 32-shamt) // sra
case fun == 0x04:
return rt << (rs & 0x1F) // sllv
case fun == 0x06:
return rt >> (rs & 0x1F) // srlv
case fun == 0x07:
return SE(rt>>rs, 32-rs) // srav
}
}
// 0x10-0x13 = mfhi, mthi, mflo, mtlo
// R-type (ArithLog)
switch fun {
case 0x20, 0x21:
return rs + rt // add or addu
case 0x22, 0x23:
return rs - rt // sub or subu
case 0x24:
return rs & rt // and
case 0x25:
return rs | rt // or
case 0x26:
return rs ^ rt // xor
case 0x27:
return ^(rs | rt) // nor
case 0x2A:
case 0x00: // sll
return rt << ((insn >> 6) & 0x1F)
case 0x02: // srl
return rt >> ((insn >> 6) & 0x1F)
case 0x03: // sra
shamt := (insn >> 6) & 0x1F
return SE(rt>>shamt, 32-shamt)
case 0x04: // sllv
return rt << (rs & 0x1F)
case 0x06: // srlv
return rt >> (rs & 0x1F)
case 0x07: // srav
return SE(rt>>rs, 32-rs)
// functs in range [0x8, 0x1b] are handled specially by other functions
case 0x08: // jr
return rs
case 0x09: // jalr
return rs
case 0x0a: // movz
return rs
case 0x0b: // movn
return rs
case 0x0c: // syscall
return rs
// 0x0d - break not supported
case 0x0f: // sync
return rs
case 0x10: // mfhi
return rs
case 0x11: // mthi
return rs
case 0x12: // mflo
return rs
case 0x13: // mtlo
return rs
case 0x18: // mult
return rs
case 0x19: // multu
return rs
case 0x1a: // div
return rs
case 0x1b: // divu
return rs
// The rest includes transformed R-type arith imm instructions
case 0x20: // add
return rs + rt
case 0x21: // addu
return rs + rt
case 0x22: // sub
return rs - rt
case 0x23: // subu
return rs - rt
case 0x24: // and
return rs & rt
case 0x25: // or
return rs | rt
case 0x26: // xor
return rs ^ rt
case 0x27: // nor
return ^(rs | rt)
case 0x2a: // slti
if int32(rs) < int32(rt) {
return 1 // slt
} else {
return 0
return 1
}
case 0x2B:
if rs < rt {
return 1 // sltu
} else {
return 0
case 0x2b: // sltiu
if rs < rt {
return 1
}
return 0
default:
panic("invalid instruction")
}
} else if opcode == 0xF {
return rt << 16 // lui
} else if opcode == 0x1C { // SPECIAL2
if fun == 2 { // mul
} else {
switch opcode {
// SPECIAL2
case 0x1C:
fun := insn & 0x3f // 6-bits
switch fun {
case 0x2: // mul
return uint32(int32(rs) * int32(rt))
}
if fun == 0x20 || fun == 0x21 { // clo
case 0x20, 0x21: // clo
if fun == 0x20 {
rs = ^rs
}
......@@ -485,9 +508,8 @@ func execute(insn uint32, rs uint32, rt uint32, mem uint32) uint32 {
}
return i
}
}
} else if opcode < 0x28 {
switch opcode {
case 0x0F: // lui
return rt << 16
case 0x20: // lb
return SE((mem>>(24-(rs&3)*8))&0xFF, 8)
case 0x21: // lh
......@@ -506,31 +528,32 @@ func execute(insn uint32, rs uint32, rt uint32, mem uint32) uint32 {
val := mem >> (24 - (rs&3)*8)
mask := uint32(0xFFFFFFFF) >> (24 - (rs&3)*8)
return (rt & ^mask) | val
}
} else if opcode == 0x28 { // sb
case 0x28: // sb
val := (rt & 0xFF) << (24 - (rs&3)*8)
mask := 0xFFFFFFFF ^ uint32(0xFF<<(24-(rs&3)*8))
return (mem & mask) | val
} else if opcode == 0x29 { // sh
case 0x29: // sh
val := (rt & 0xFFFF) << (16 - (rs&2)*8)
mask := 0xFFFFFFFF ^ uint32(0xFFFF<<(16-(rs&2)*8))
return (mem & mask) | val
} else if opcode == 0x2a { // swl
case 0x2a: // swl
val := rt >> ((rs & 3) * 8)
mask := uint32(0xFFFFFFFF) >> ((rs & 3) * 8)
return (mem & ^mask) | val
} else if opcode == 0x2b { // sw
case 0x2b: // sw
return rt
} else if opcode == 0x2e { // swr
case 0x2e: // swr
val := rt << (24 - (rs&3)*8)
mask := uint32(0xFFFFFFFF) << (24 - (rs&3)*8)
return (mem & ^mask) | val
} else if opcode == 0x30 {
return mem // ll
} else if opcode == 0x38 {
return rt // sc
case 0x30: // ll
return mem
case 0x38: // sc
return rt
default:
panic("invalid instruction")
}
}
panic("invalid instruction")
}
......
## Optimism Monorepo Documentation
The `docs/` directory contains Optimism documentation closely tied to the implementation details of the monorepo (https://github.com/ethereum-optimism/optimism).
The directory layout is divided into the following sub-directories.
- [`postmortems/`](./postmortems/): Timestamped post-mortem documents.
- [`security-reviews`](./security-reviews/): Audit summaries and other security review documents.
FROM golang:1.19.9-alpine3.16 as builder
FROM golang:1.20.7-alpine3.18 as builder
RUN apk --no-cache add make jq bash git alpine-sdk
......@@ -16,7 +16,7 @@ RUN go mod download
RUN make build
FROM alpine:3.16
FROM alpine:3.18
RUN apk --no-cache add ca-certificates
RUN addgroup -S app && adduser -S app -G app
......
module github.com/ethereum-optimism/optimism
go 1.19
go 1.20
require (
github.com/BurntSushi/toml v1.3.2
......@@ -8,6 +8,7 @@ require (
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0
github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3
github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20230817174831-5d3ca1966435
github.com/ethereum/go-ethereum v1.12.0
github.com/fsnotify/fsnotify v1.6.0
github.com/go-chi/chi/v5 v5.0.10
......@@ -15,16 +16,14 @@ require (
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb
github.com/google/go-cmp v0.5.9
github.com/google/gofuzz v1.2.1-0.20220503160820-4a35382e8fc8
github.com/google/uuid v1.3.0
github.com/google/uuid v1.3.1
github.com/hashicorp/go-multierror v1.1.1
github.com/hashicorp/golang-lru v1.0.2
github.com/hashicorp/golang-lru/v2 v2.0.2
github.com/holiman/uint256 v1.2.3
github.com/ipfs/go-datastore v0.6.0
github.com/ipfs/go-ds-leveldb v0.5.0
github.com/jackc/pgtype v1.14.0
github.com/jackc/pgx/v5 v5.4.3
github.com/joho/godotenv v1.5.1
github.com/libp2p/go-libp2p v0.27.8
github.com/libp2p/go-libp2p-pubsub v0.9.3
github.com/libp2p/go-libp2p-testing v0.12.0
......@@ -33,6 +32,7 @@ require (
github.com/multiformats/go-multiaddr v0.10.1
github.com/multiformats/go-multiaddr-dns v0.3.1
github.com/olekukonko/tablewriter v0.0.5
github.com/onsi/gomega v1.27.10
github.com/pkg/errors v0.9.1
github.com/pkg/profile v1.7.0
github.com/prometheus/client_golang v1.14.0
......@@ -44,7 +44,7 @@ require (
golang.org/x/term v0.11.0
golang.org/x/time v0.3.0
gorm.io/driver/postgres v1.5.2
gorm.io/gorm v1.25.3
gorm.io/gorm v1.25.4
)
require (
......@@ -67,23 +67,29 @@ require (
github.com/containerd/cgroups v1.1.0 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/crate-crypto/go-ipa v0.0.0-20220523130400-f11357ae11c7 // indirect
github.com/crate-crypto/go-kzg-4844 v0.2.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
github.com/deckarep/golang-set/v2 v2.1.0 // indirect
github.com/decred/dcrd/crypto/blake256 v1.0.0 // indirect
github.com/deepmap/oapi-codegen v1.8.2 // indirect
github.com/dlclark/regexp2 v1.7.0 // indirect
github.com/docker/docker v20.10.24+incompatible // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/dop251/goja v0.0.0-20230122112309-96b1610dd4f7 // indirect
github.com/elastic/gosigar v0.14.2 // indirect
github.com/ethereum/c-kzg-4844 v0.2.0 // indirect
github.com/fatih/color v1.7.0 // indirect
github.com/felixge/fgprof v0.9.3 // indirect
github.com/fjl/memsize v0.0.1 // indirect
github.com/flynn/noise v1.0.0 // indirect
github.com/francoispqt/gojay v1.2.13 // indirect
github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 // indirect
github.com/gballet/go-verkle v0.0.0-20220902153445-097bd83b7732 // indirect
github.com/getsentry/sentry-go v0.18.0 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect
github.com/go-stack/stack v1.8.1 // indirect
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
github.com/godbus/dbus/v5 v5.1.0 // indirect
......@@ -111,8 +117,10 @@ require (
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect
github.com/jbenet/goprocess v0.1.4 // indirect
github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect
github.com/karalabe/usb v0.0.2 // indirect
github.com/klauspost/compress v1.16.4 // indirect
github.com/klauspost/cpuid/v2 v2.2.4 // indirect
github.com/koron/go-ssdp v0.0.4 // indirect
......@@ -148,7 +156,9 @@ require (
github.com/multiformats/go-multihash v0.2.1 // indirect
github.com/multiformats/go-multistream v0.4.1 // indirect
github.com/multiformats/go-varint v0.0.7 // indirect
github.com/onsi/ginkgo/v2 v2.9.2 // indirect
github.com/naoina/go-stringutil v0.1.0 // indirect
github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 // indirect
github.com/onsi/ginkgo/v2 v2.11.0 // indirect
github.com/opencontainers/runtime-spec v1.0.2 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
......@@ -184,10 +194,10 @@ require (
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.24.0 // indirect
golang.org/x/mod v0.11.0 // indirect
golang.org/x/net v0.10.0 // indirect
golang.org/x/net v0.12.0 // indirect
golang.org/x/sys v0.11.0 // indirect
golang.org/x/text v0.12.0 // indirect
golang.org/x/tools v0.7.0 // indirect
golang.org/x/tools v0.9.3 // indirect
google.golang.org/protobuf v1.30.0 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
......@@ -198,6 +208,6 @@ require (
rsc.io/tmplfunc v0.0.3 // indirect
)
replace github.com/ethereum/go-ethereum v1.12.0 => github.com/ethereum-optimism/op-geth v1.101106.1-0.20230724181546-b9c6d36ae9b8
replace github.com/ethereum/go-ethereum v1.12.0 => github.com/ethereum-optimism/op-geth v1.101200.0-rc.1.0.20230818191139-f7376a28049b
//replace github.com/ethereum/go-ethereum v1.12.0 => ../go-ethereum
This diff is collapsed.
docker-compose.dev.yml
.env
indexer
/indexer
FROM --platform=$BUILDPLATFORM golang:1.19.9-alpine3.16 as builder
FROM --platform=$BUILDPLATFORM golang:1.20.7-alpine3.18 as builder
RUN apk add --no-cache make gcc musl-dev linux-headers git jq bash
......@@ -18,8 +18,8 @@ RUN go mod download
RUN make indexer
FROM alpine:3.16
FROM alpine:3.18
COPY --from=builder /app/indexer/indexer /usr/local/bin
CMD ["indexer"]
CMD ["indexer", "all", "--config", "/app/indexer/indexer.toml"]
......@@ -2,12 +2,24 @@
## Getting started
### Setup env
The `indexer.toml` stores a set of preset environmental variables that can be used to run the indexer with the exception of the network specific `l1-rpc` and `l2-rpc` variables. The `indexer.toml` file can be ran as a default config, otherwise a custom `.toml` config can provided via the `--config` flag when running the application. An optional `l1-starting-height` value can be provided to the indexer to specify the L1 starting block height to begin indexing from. This should be ideally be an L1 block that holds a correlated L2 genesis commitment. Furthermore, this value must be less than the current L1 block height to pass validation. If no starting height value is provided and the database is empty, the indexer will begin sequentially processing from L1 genesis.
### Setup polling intervals
The indexer polls and processes batches from the L1 and L2 chains on a set interval/size. The default polling interval is 5 seconds for both chains with a default batch header size of 500. The polling frequency can be changed by setting the `l1-polling-interval` and `l2-polling-interval` values in the `indexer.toml` file. The batch header size can be changed by setting the `l1-batch-size` and `l2-batch-size` values in the `indexer.toml` file.
### Testing
All tests can be ran by running `make test` from the `/indexer` directory. This will run all unit and e2e tests.
**NOTE:** Successfully running the E2E tests requires spinning up a local L1 geth node and pre-populating it with necessary bedrock genesis state. This can be done by calling `make devnet-allocs` from the root of the optimism monorepo before running the indexer tests. More information on this can be found in the [op-e2e README](../op-e2e/README.md).
### Run indexer vs goerli
- install docker
- `cp example.env .env`
- fill in .env
- run `docker-compose up` to start the indexer vs optimism goerli network
- run `docker compose up` to start the indexer vs optimism goerli network
### Run indexer with go
......@@ -21,3 +33,28 @@ TODO add indexer to the optimism devnet compose file (previously removed for bre
`docker-compose.dev.yml` is git ignored. Fill in your own docker-compose file here.
## Architecture
![Architectural Diagram](./assets/architecture.png)
The indexer application supports two separate services for collective operation:
**Indexer API** - Provides a lightweight API service that supports paginated lookups for bridge events.
**Indexer Service** - A polling based service that constantly reads and persists OP Stack chain data (i.e, block meta, system contract events, synchronized bridge events) from a L1 and L2 chain.
### Indexer API
TBD
### Indexer Service
![Service Component Diagram](./assets/indexer-service.png)
The indexer service is responsible for polling and processing real-time batches of L1 and L2 chain data. The indexer service is currently composed of the following key components:
- **Poller Routines** - Individually polls the L1/L2 chain for new blocks and OP Stack system contract events.
- **Insertion Routines** - Awaits new batches from the poller routines and inserts them into the database upon retrieval.
- **Bridge Routine** - Polls the database directly for new L1 blocks and bridge events. Upon retrieval, the bridge routine will:
* Process and persist new bridge events
* Synchronize L1 proven/finalized withdrawals with their L2 initialization counterparts
### Database
The indexer service currently supports a Postgres database for storing L1/L2 OP Stack chain data. The most up-to-date database schemas can be found in the `./migrations` directory.
**NOTE:** The indexer service implementation currently does not natively support database migration. Because of this a database must be manually updated to ensure forward compatibility with the latest indexer service implementation.
\ No newline at end of file
package api
import (
"context"
"fmt"
"net/http"
"github.com/ethereum-optimism/optimism/indexer/api/routes"
"github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum-optimism/optimism/op-service/httputil"
"github.com/ethereum/go-ethereum/log"
"github.com/go-chi/chi/v5"
"github.com/go-chi/chi/v5/middleware"
)
const ethereumAddressRegex = `^0x[a-fA-F0-9]{40}$`
type Api struct {
log log.Logger
Router *chi.Mux
}
func NewApi(bv database.BridgeTransfersView, logger log.Logger) *Api {
logger.Info("Initializing API...")
func NewApi(logger log.Logger, bv database.BridgeTransfersView) *Api {
r := chi.NewRouter()
h := routes.NewRoutes(logger, bv, r)
api := &Api{Router: r}
r.Use(middleware.Heartbeat("/healthz"))
r.Get("/healthz", h.HealthzHandler)
r.Get(fmt.Sprintf("/api/v0/deposits/{address:%s}", ethereumAddressRegex), h.L1DepositsHandler)
r.Get(fmt.Sprintf("/api/v0/withdrawals/{address:%s}", ethereumAddressRegex), h.L2WithdrawalsHandler)
return api
return &Api{log: logger, Router: r}
}
func (a *Api) Listen(port string) error {
return http.ListenAndServe(port, a.Router)
func (a *Api) Listen(ctx context.Context, port int) error {
a.log.Info("api server listening...", "port", port)
server := http.Server{Addr: fmt.Sprintf(":%d", port), Handler: a.Router}
err := httputil.ListenAndServeContext(ctx, &server)
if err != nil {
a.log.Error("api server stopped", "err", err)
} else {
a.log.Info("api server stopped")
}
return err
}
......@@ -2,7 +2,6 @@ package api
import (
"fmt"
"math/big"
"net/http"
"net/http/httptest"
"testing"
......@@ -22,16 +21,20 @@ var mockAddress = "0x4204204204204204204204204204204204204204"
var (
deposit = database.L1BridgeDeposit{
TransactionSourceHash: common.HexToHash("abc"),
CrossDomainMessengerNonce: &database.U256{Int: big.NewInt(0)},
BridgeTransfer: database.BridgeTransfer{
CrossDomainMessageHash: &common.Hash{},
Tx: database.Transaction{},
TokenPair: database.TokenPair{},
},
}
withdrawal = database.L2BridgeWithdrawal{
TransactionWithdrawalHash: common.HexToHash("0x420"),
CrossDomainMessengerNonce: &database.U256{Int: big.NewInt(0)},
BridgeTransfer: database.BridgeTransfer{
CrossDomainMessageHash: &common.Hash{},
Tx: database.Transaction{},
TokenPair: database.TokenPair{},
},
}
)
......@@ -39,39 +42,42 @@ func (mbv *MockBridgeTransfersView) L1BridgeDeposit(hash common.Hash) (*database
return &deposit, nil
}
func (mbv *MockBridgeTransfersView) L1BridgeDepositByCrossDomainMessengerNonce(nonce *big.Int) (*database.L1BridgeDeposit, error) {
func (mbv *MockBridgeTransfersView) L1BridgeDepositWithFilter(filter database.BridgeTransfer) (*database.L1BridgeDeposit, error) {
return &deposit, nil
}
func (mbv *MockBridgeTransfersView) L1BridgeDepositsByAddress(address common.Address) ([]*database.L1BridgeDepositWithTransactionHashes, error) {
return []*database.L1BridgeDepositWithTransactionHashes{
{
L1BridgeDeposit: deposit,
L1TransactionHash: common.HexToHash("0x123"),
},
}, nil
}
func (mbv *MockBridgeTransfersView) L2BridgeWithdrawal(address common.Hash) (*database.L2BridgeWithdrawal, error) {
return &withdrawal, nil
}
func (mbv *MockBridgeTransfersView) L2BridgeWithdrawalByCrossDomainMessengerNonce(nonce *big.Int) (*database.L2BridgeWithdrawal, error) {
func (mbv *MockBridgeTransfersView) L2BridgeWithdrawalWithFilter(filter database.BridgeTransfer) (*database.L2BridgeWithdrawal, error) {
return &withdrawal, nil
}
func (mbv *MockBridgeTransfersView) L2BridgeWithdrawalsByAddress(address common.Address) ([]*database.L2BridgeWithdrawalWithTransactionHashes, error) {
return []*database.L2BridgeWithdrawalWithTransactionHashes{
func (mbv *MockBridgeTransfersView) L1BridgeDepositsByAddress(address common.Address, cursor string, limit int) (*database.L1BridgeDepositsResponse, error) {
return &database.L1BridgeDepositsResponse{
Deposits: []database.L1BridgeDepositWithTransactionHashes{
{
L1BridgeDeposit: deposit,
L1TransactionHash: common.HexToHash("0x123"),
},
},
}, nil
}
func (mbv *MockBridgeTransfersView) L2BridgeWithdrawalsByAddress(address common.Address, cursor string, limit int) (*database.L2BridgeWithdrawalsResponse, error) {
return &database.L2BridgeWithdrawalsResponse{
Withdrawals: []database.L2BridgeWithdrawalWithTransactionHashes{
{
L2BridgeWithdrawal: withdrawal,
L2TransactionHash: common.HexToHash("0x789"),
},
},
}, nil
}
func TestHealthz(t *testing.T) {
logger := testlog.Logger(t, log.LvlInfo)
api := NewApi(&MockBridgeTransfersView{}, logger)
api := NewApi(logger, &MockBridgeTransfersView{})
request, err := http.NewRequest("GET", "/healthz", nil)
assert.Nil(t, err)
......@@ -83,7 +89,7 @@ func TestHealthz(t *testing.T) {
func TestL1BridgeDepositsHandler(t *testing.T) {
logger := testlog.Logger(t, log.LvlInfo)
api := NewApi(&MockBridgeTransfersView{}, logger)
api := NewApi(logger, &MockBridgeTransfersView{})
request, err := http.NewRequest("GET", fmt.Sprintf("/api/v0/deposits/%s", mockAddress), nil)
assert.Nil(t, err)
......@@ -95,7 +101,7 @@ func TestL1BridgeDepositsHandler(t *testing.T) {
func TestL2BridgeWithdrawalsByAddressHandler(t *testing.T) {
logger := testlog.Logger(t, log.LvlInfo)
api := NewApi(&MockBridgeTransfersView{}, logger)
api := NewApi(logger, &MockBridgeTransfersView{})
request, err := http.NewRequest("GET", fmt.Sprintf("/api/v0/withdrawals/%s", mockAddress), nil)
assert.Nil(t, err)
......
......@@ -2,6 +2,7 @@ package routes
import (
"net/http"
"strconv"
"github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum/go-ethereum/common"
......@@ -29,9 +30,9 @@ type DepositResponse struct {
// TODO this is original spec but maybe include the l2 block info too for the relayed tx
// FIXME make a pure function that returns a struct instead of newWithdrawalResponse
func newDepositResponse(deposits []*database.L1BridgeDepositWithTransactionHashes) DepositResponse {
items := make([]DepositItem, len(deposits))
for _, deposit := range deposits {
func newDepositResponse(deposits *database.L1BridgeDepositsResponse) DepositResponse {
items := make([]DepositItem, len(deposits.Deposits))
for _, deposit := range deposits.Deposits {
item := DepositItem{
Guid: deposit.L1BridgeDeposit.TransactionSourceHash.String(),
Block: Block{
......@@ -45,10 +46,10 @@ func newDepositResponse(deposits []*database.L1BridgeDepositWithTransactionHashe
},
From: deposit.L1BridgeDeposit.Tx.FromAddress.String(),
To: deposit.L1BridgeDeposit.Tx.ToAddress.String(),
Amount: deposit.L1BridgeDeposit.Tx.Amount.Int.String(),
Amount: deposit.L1BridgeDeposit.Tx.Amount.String(),
L1Token: TokenInfo{
ChainId: 1,
Address: deposit.L1BridgeDeposit.TokenPair.L1TokenAddress.String(),
Address: deposit.L1BridgeDeposit.TokenPair.LocalTokenAddress.String(),
Name: "TODO",
Symbol: "TODO",
Decimals: 420,
......@@ -58,7 +59,7 @@ func newDepositResponse(deposits []*database.L1BridgeDepositWithTransactionHashe
},
L2Token: TokenInfo{
ChainId: 10,
Address: deposit.L1BridgeDeposit.TokenPair.L2TokenAddress.String(),
Address: deposit.L1BridgeDeposit.TokenPair.RemoteTokenAddress.String(),
Name: "TODO",
Symbol: "TODO",
Decimals: 420,
......@@ -71,16 +72,30 @@ func newDepositResponse(deposits []*database.L1BridgeDepositWithTransactionHashe
}
return DepositResponse{
Cursor: "42042042-4204-4204-4204-420420420420", // TODO
HasNextPage: false, // TODO
Cursor: deposits.Cursor,
HasNextPage: deposits.HasNextPage,
Items: items,
}
}
func (h Routes) L1DepositsHandler(w http.ResponseWriter, r *http.Request) {
address := common.HexToAddress(chi.URLParam(r, "address"))
cursor := r.URL.Query().Get("cursor")
limitQuery := r.URL.Query().Get("limit")
deposits, err := h.BridgeTransfersView.L1BridgeDepositsByAddress(address)
defaultLimit := 100
limit := defaultLimit
if limitQuery != "" {
parsedLimit, err := strconv.Atoi(limitQuery)
if err != nil {
http.Error(w, "Limit could not be parsed into a number", http.StatusBadRequest)
h.Logger.Error("Invalid limit")
h.Logger.Error(err.Error())
}
limit = parsedLimit
}
deposits, err := h.BridgeTransfersView.L1BridgeDepositsByAddress(address, cursor, limit)
if err != nil {
http.Error(w, "Internal server error reading deposits", http.StatusInternalServerError)
h.Logger.Error("Unable to read deposits from DB")
......
package routes
import (
"net/http"
)
func (h Routes) HealthzHandler(w http.ResponseWriter, r *http.Request) {
jsonResponse(w, h.Logger, "ok", http.StatusOK)
}
......@@ -2,6 +2,7 @@ package routes
import (
"net/http"
"strconv"
"github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum/go-ethereum/common"
......@@ -42,9 +43,9 @@ type WithdrawalResponse struct {
}
// FIXME make a pure function that returns a struct instead of newWithdrawalResponse
func newWithdrawalResponse(withdrawals []*database.L2BridgeWithdrawalWithTransactionHashes) WithdrawalResponse {
items := make([]WithdrawalItem, len(withdrawals))
for _, withdrawal := range withdrawals {
func newWithdrawalResponse(withdrawals *database.L2BridgeWithdrawalsResponse) WithdrawalResponse {
items := make([]WithdrawalItem, len(withdrawals.Withdrawals))
for _, withdrawal := range withdrawals.Withdrawals {
item := WithdrawalItem{
Guid: withdrawal.L2BridgeWithdrawal.TransactionWithdrawalHash.String(),
Block: Block{
......@@ -59,7 +60,7 @@ func newWithdrawalResponse(withdrawals []*database.L2BridgeWithdrawalWithTransac
From: withdrawal.L2BridgeWithdrawal.Tx.FromAddress.String(),
To: withdrawal.L2BridgeWithdrawal.Tx.ToAddress.String(),
TransactionHash: withdrawal.L2TransactionHash.String(),
Amount: withdrawal.L2BridgeWithdrawal.Tx.Amount.Int.String(),
Amount: withdrawal.L2BridgeWithdrawal.Tx.Amount.String(),
Proof: Proof{
TransactionHash: withdrawal.ProvenL1TransactionHash.String(),
BlockTimestamp: withdrawal.L2BridgeWithdrawal.Tx.Timestamp,
......@@ -73,7 +74,7 @@ func newWithdrawalResponse(withdrawals []*database.L2BridgeWithdrawalWithTransac
WithdrawalState: "COMPLETE", // TODO
L1Token: TokenInfo{
ChainId: 1,
Address: withdrawal.L2BridgeWithdrawal.TokenPair.L1TokenAddress.String(),
Address: withdrawal.L2BridgeWithdrawal.TokenPair.RemoteTokenAddress.String(),
Name: "Example", // TODO
Symbol: "EXAMPLE", // TODO
Decimals: 18, // TODO
......@@ -83,7 +84,7 @@ func newWithdrawalResponse(withdrawals []*database.L2BridgeWithdrawalWithTransac
},
L2Token: TokenInfo{
ChainId: 10,
Address: withdrawal.L2BridgeWithdrawal.TokenPair.L2TokenAddress.String(),
Address: withdrawal.L2BridgeWithdrawal.TokenPair.LocalTokenAddress.String(),
Name: "Example", // TODO
Symbol: "EXAMPLE", // TODO
Decimals: 18, // TODO
......@@ -96,23 +97,34 @@ func newWithdrawalResponse(withdrawals []*database.L2BridgeWithdrawalWithTransac
}
return WithdrawalResponse{
Cursor: "42042042-0420-4204-2042-420420420420", // TODO
HasNextPage: true, // TODO
Cursor: withdrawals.Cursor,
HasNextPage: withdrawals.HasNextPage,
Items: items,
}
}
func (h Routes) L2WithdrawalsHandler(w http.ResponseWriter, r *http.Request) {
address := common.HexToAddress(chi.URLParam(r, "address"))
cursor := r.URL.Query().Get("cursor")
limitQuery := r.URL.Query().Get("limit")
withdrawals, err := h.BridgeTransfersView.L2BridgeWithdrawalsByAddress(address)
defaultLimit := 100
limit := defaultLimit
if limitQuery != "" {
parsedLimit, err := strconv.Atoi(limitQuery)
if err != nil {
http.Error(w, "Internal server error fetching withdrawals", http.StatusInternalServerError)
h.Logger.Error("Unable to read deposits from DB")
http.Error(w, "Limit could not be parsed into a number", http.StatusBadRequest)
h.Logger.Error("Invalid limit")
h.Logger.Error(err.Error())
}
limit = parsedLimit
}
withdrawals, err := h.BridgeTransfersView.L2BridgeWithdrawalsByAddress(address, cursor, limit)
if err != nil {
http.Error(w, "Internal server error reading withdrawals", http.StatusInternalServerError)
h.Logger.Error("Unable to read withdrawals from DB")
h.Logger.Error(err.Error())
return
}
response := newWithdrawalResponse(withdrawals)
jsonResponse(w, h.Logger, response, http.StatusOK)
......
package cli
package main
import (
"context"
"fmt"
"strconv"
"sync"
"github.com/ethereum-optimism/optimism/indexer"
"github.com/ethereum-optimism/optimism/indexer/api"
"github.com/ethereum-optimism/optimism/indexer/config"
"github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum-optimism/optimism/op-service/log"
"github.com/ethereum-optimism/optimism/op-service/opio"
"github.com/ethereum/go-ethereum/params"
"github.com/urfave/cli/v2"
)
type Cli struct {
GitVersion string
GitCommit string
GitDate string
app *cli.App
Flags []cli.Flag
}
var (
ConfigFlag = &cli.StringFlag{
Name: "config",
Value: "./indexer.toml",
Aliases: []string{"c"},
Usage: "path to config file",
EnvVars: []string{"INDEXER_CONFIG"},
}
)
func runIndexer(ctx *cli.Context) error {
logger := log.NewLogger(log.ReadCLIConfig(ctx))
configPath := ctx.String(ConfigFlag.Name)
cfg, err := config.LoadConfig(logger, configPath)
log := log.NewLogger(log.ReadCLIConfig(ctx)).New("role", "indexer")
cfg, err := config.LoadConfig(log, ctx.String(ConfigFlag.Name))
if err != nil {
logger.Error("failed to load config", "err", err)
log.Error("failed to load config", "err", err)
return err
}
db, err := database.NewDB(cfg.DB)
if err != nil {
log.Error("failed to connect to database", "err", err)
return err
}
defer db.Close()
indexer, err := indexer.NewIndexer(cfg.Chain, cfg.RPCs, db, logger)
indexer, err := indexer.NewIndexer(log, db, cfg.Chain, cfg.RPCs, cfg.Metrics)
if err != nil {
log.Error("failed to create indexer", "err", err)
return err
}
indexerCtx, indexerCancel := context.WithCancel(context.Background())
go func() {
opio.BlockOnInterrupts()
indexerCancel()
}()
return indexer.Run(indexerCtx)
return indexer.Run(ctx.Context)
}
func runApi(ctx *cli.Context) error {
logger := log.NewLogger(log.ReadCLIConfig(ctx))
configPath := ctx.String(ConfigFlag.Name)
cfg, err := config.LoadConfig(logger, configPath)
log := log.NewLogger(log.ReadCLIConfig(ctx)).New("role", "api")
cfg, err := config.LoadConfig(log, ctx.String(ConfigFlag.Name))
if err != nil {
logger.Error("failed to load config", "err", err)
log.Error("failed to load config", "err", err)
return err
}
db, err := database.NewDB(cfg.DB)
if err != nil {
logger.Crit("Failed to connect to database", "err", err)
log.Error("failed to connect to database", "err", err)
return err
}
defer db.Close()
server := api.NewApi(db.BridgeTransfers, logger)
return server.Listen(strconv.Itoa(cfg.API.Port))
api := api.NewApi(log, db.BridgeTransfers)
return api.Listen(ctx.Context, cfg.API.Port)
}
var (
ConfigFlag = &cli.StringFlag{
Name: "config",
Value: "./indexer.toml",
Aliases: []string{"c"},
Usage: "path to config file",
EnvVars: []string{"INDEXER_CONFIG"},
func runAll(ctx *cli.Context) error {
log := log.NewLogger(log.ReadCLIConfig(ctx))
// Ensure both processes complete before returning.
var wg sync.WaitGroup
wg.Add(2)
go func() {
defer wg.Done()
err := runApi(ctx)
if err != nil {
log.Error("api process non-zero exit", "err", err)
}
)
}()
go func() {
defer wg.Done()
err := runIndexer(ctx)
if err != nil {
log.Error("indexer process non-zero exit", "err", err)
}
}()
// make a instance method on Cli called Run that runs cli
// and returns an error
func (c *Cli) Run(args []string) error {
return c.app.Run(args)
// We purposefully return no error since the indexer and api
// have no inter-dependencies. We simply rely on the logs to
// report a non-zero exit for either process.
wg.Wait()
return nil
}
func NewCli(GitVersion string, GitCommit string, GitDate string) *Cli {
func newCli(GitCommit string, GitDate string) *cli.App {
flags := []cli.Flag{ConfigFlag}
flags = append(flags, log.CLIFlags("INDEXER")...)
app := &cli.App{
Version: fmt.Sprintf("%s-%s", GitVersion, params.VersionWithCommit(GitCommit, GitDate)),
return &cli.App{
Version: params.VersionWithCommit(GitCommit, GitDate),
Description: "An indexer of all optimism events with a serving api layer",
EnableBashCompletion: true,
Commands: []*cli.Command{
{
Name: "api",
......@@ -105,16 +110,25 @@ func NewCli(GitVersion string, GitCommit string, GitDate string) *Cli {
Action: runApi,
},
{
Name: "indexer",
Name: "index",
Flags: flags,
Description: "Runs the indexing service",
Action: runIndexer,
},
},
}
return &Cli{
app: app,
{
Name: "all",
Flags: flags,
Description: "Runs both the api service and the indexing service",
Action: runAll,
},
{
Name: "version",
Description: "print version",
Action: func(ctx *cli.Context) error {
cli.ShowVersion(ctx)
return nil
},
},
},
}
}
package main
import (
"context"
"os"
"github.com/ethereum-optimism/optimism/indexer/cli"
"github.com/ethereum-optimism/optimism/op-service/opio"
"github.com/ethereum/go-ethereum/log"
)
var (
GitVersion = ""
GitCommit = ""
GitDate = ""
)
func main() {
app := cli.NewCli(GitVersion, GitCommit, GitDate)
// This is the most root context, used to propagate
// cancellations to all spawned application-level goroutines
ctx, cancel := context.WithCancel(context.Background())
go func() {
opio.BlockOnInterrupts()
cancel()
}()
if err := app.Run(os.Args); err != nil {
log.Crit("Application failed", "message", err)
app := newCli(GitCommit, GitDate)
if err := app.RunContext(ctx, os.Args); err != nil {
log.Error("application failed", "err", err)
}
}
......@@ -8,53 +8,77 @@ import (
"github.com/BurntSushi/toml"
"github.com/ethereum/go-ethereum/common"
geth_log "github.com/ethereum/go-ethereum/log"
"github.com/joho/godotenv"
)
const (
// default to 5 seconds
defaultLoopInterval = 5000
defaultHeaderBufferSize = 500
)
// in future presets can just be onchain config and fetched on initialization
// Config represents the `indexer.toml` file used to configure the indexer
type Config struct {
Chain ChainConfig
Chain ChainConfig `toml:"chain"`
RPCs RPCsConfig `toml:"rpcs"`
DB DBConfig
API APIConfig
Metrics MetricsConfig
DB DBConfig `toml:"db"`
API APIConfig `toml:"api"`
Metrics MetricsConfig `toml:"metrics"`
}
// fetch this via onchain config from RPCsConfig and remove from config in future
type L1Contracts struct {
OptimismPortal common.Address
L2OutputOracle common.Address
L1CrossDomainMessenger common.Address
L1StandardBridge common.Address
L1ERC721Bridge common.Address
OptimismPortalProxy common.Address `toml:"optimism-portal"`
L2OutputOracleProxy common.Address `toml:"l2-output-oracle"`
L1CrossDomainMessengerProxy common.Address `toml:"l1-cross-domain-messenger"`
L1StandardBridgeProxy common.Address `toml:"l1-standard-bridge"`
// Some more contracts -- ProxyAdmin, SystemConfig, etcc
// Some more contracts -- L1ERC721Bridge, ProxyAdmin, SystemConfig, etc
// Ignore the auxiliary contracts?
// Legacy contracts? We'll add this in to index the legacy chain.
// Remove afterwards?
}
func (c L1Contracts) ToSlice() []common.Address {
fields := reflect.VisibleFields(reflect.TypeOf(c))
v := reflect.ValueOf(c)
contracts := make([]common.Address, len(fields))
// converts struct of to a slice of addresses for easy iteration
// also validates that all fields are addresses
func (c *L1Contracts) AsSlice() ([]common.Address, error) {
clone := *c
contractValue := reflect.ValueOf(clone)
fields := reflect.VisibleFields(reflect.TypeOf(clone))
l1Contracts := make([]common.Address, len(fields))
for i, field := range fields {
contracts[i] = (v.FieldByName(field.Name).Interface()).(common.Address)
// ruleid: unsafe-reflect-by-name
addr, ok := (contractValue.FieldByName(field.Name).Interface()).(common.Address)
if !ok {
return nil, fmt.Errorf("non-address found in L1Contracts: %s", field.Name)
}
l1Contracts[i] = addr
}
return contracts
return l1Contracts, nil
}
// ChainConfig configures of the chain being indexed
type ChainConfig struct {
// Configure known chains with the l2 chain id
Preset int
// Configure custom chains via providing the L1Contract addresses
L1Contracts L1Contracts
L1Contracts L1Contracts `toml:"l1-contracts"`
L1StartingHeight uint `toml:"l1-starting-height"`
// These configuration options will be removed once
// native reorg handling is implemented
L1ConfirmationDepth uint `toml:"l1-confirmation-depth"`
L2ConfirmationDepth uint `toml:"l2-confirmation-depth"`
L1PollingInterval uint `toml:"l1-polling-interval"`
L2PollingInterval uint `toml:"l2-polling-interval"`
L1HeaderBufferSize uint `toml:"l1-header-buffer-size"`
L2HeaderBufferSize uint `toml:"l2-header-buffer-size"`
}
// RPCsConfig configures the RPC urls
......@@ -65,45 +89,39 @@ type RPCsConfig struct {
// DBConfig configures the postgres database
type DBConfig struct {
Host string
Port int
Name string
User string
Password string
Host string `toml:"host"`
Port int `toml:"port"`
Name string `toml:"name"`
User string `toml:"user"`
Password string `toml:"password"`
}
// APIConfig configures the API server
type APIConfig struct {
Host string
Port int
Host string `toml:"host"`
Port int `toml:"port"`
}
// MetricsConfig configures the metrics server
type MetricsConfig struct {
Host string
Port int
Host string `toml:"host"`
Port int `toml:"port"`
}
// LoadConfig loads the `indexer.toml` config file from a given path
func LoadConfig(logger geth_log.Logger, path string) (Config, error) {
if err := godotenv.Load(); err != nil {
logger.Warn("Unable to load .env file", err)
logger.Info("Continuing without .env file")
} else {
logger.Info("Loaded .env file")
}
logger.Debug("loading config", "path", path)
var conf Config
data, err := os.ReadFile(path)
if err != nil {
return conf, err
}
data = []byte(os.ExpandEnv(string(data)))
logger.Debug("parsed config file", "data", string(data))
if _, err := toml.Decode(string(data), &conf); err != nil {
logger.Info("Failed to decode config file", "message", err)
logger.Info("failed to decode config file", "err", err)
return conf, err
}
......@@ -116,7 +134,27 @@ func LoadConfig(logger geth_log.Logger, path string) (Config, error) {
}
}
logger.Debug("Loaded config file", conf)
// Set polling defaults if not set
if conf.Chain.L1PollingInterval == 0 {
logger.Info("setting default L1 polling interval", "interval", defaultLoopInterval)
conf.Chain.L1PollingInterval = defaultLoopInterval
}
if conf.Chain.L2PollingInterval == 0 {
logger.Info("setting default L2 polling interval", "interval", defaultLoopInterval)
conf.Chain.L2PollingInterval = defaultLoopInterval
}
if conf.Chain.L1HeaderBufferSize == 0 {
logger.Info("setting default L1 header buffer", "size", defaultHeaderBufferSize)
conf.Chain.L1HeaderBufferSize = defaultHeaderBufferSize
}
if conf.Chain.L2HeaderBufferSize == 0 {
logger.Info("setting default L2 header buffer", "size", defaultHeaderBufferSize)
conf.Chain.L2HeaderBufferSize = defaultHeaderBufferSize
}
logger.Info("loaded config")
return conf, nil
}
......@@ -54,11 +54,10 @@ func TestLoadConfig(t *testing.T) {
require.NoError(t, err)
require.Equal(t, conf.Chain.Preset, 420)
require.Equal(t, conf.Chain.L1Contracts.OptimismPortal.String(), presetL1Contracts[420].OptimismPortal.String())
require.Equal(t, conf.Chain.L1Contracts.L1CrossDomainMessenger.String(), presetL1Contracts[420].L1CrossDomainMessenger.String())
require.Equal(t, conf.Chain.L1Contracts.L1ERC721Bridge.String(), presetL1Contracts[420].L1ERC721Bridge.String())
require.Equal(t, conf.Chain.L1Contracts.L1StandardBridge.String(), presetL1Contracts[420].L1StandardBridge.String())
require.Equal(t, conf.Chain.L1Contracts.L2OutputOracle.String(), presetL1Contracts[420].L2OutputOracle.String())
require.Equal(t, conf.Chain.L1Contracts.OptimismPortalProxy.String(), presetL1Contracts[420].OptimismPortalProxy.String())
require.Equal(t, conf.Chain.L1Contracts.L1CrossDomainMessengerProxy.String(), presetL1Contracts[420].L1CrossDomainMessengerProxy.String())
require.Equal(t, conf.Chain.L1Contracts.L1StandardBridgeProxy.String(), presetL1Contracts[420].L1StandardBridgeProxy.String())
require.Equal(t, conf.Chain.L1Contracts.L2OutputOracleProxy.String(), presetL1Contracts[420].L2OutputOracleProxy.String())
require.Equal(t, conf.RPCs.L1RPC, "https://l1.example.com")
require.Equal(t, conf.RPCs.L2RPC, "https://l2.example.com")
require.Equal(t, conf.DB.Host, "127.0.0.1")
......@@ -80,7 +79,12 @@ func TestLoadConfig_WithoutPreset(t *testing.T) {
testData := `
[chain]
l1contracts = { OptimismPortal = "0x4205Fc579115071764c7423A4f12eDde41f106Ed", L2OutputOracle = "0x42097868233d1aa22e815a266982f2cf17685a27", L1CrossDomainMessenger = "0x420ce71c97B33Cc4729CF772ae268934F7ab5fA1", L1StandardBridge = "0x4209fc46f92E8a1c0deC1b1747d010903E884bE1", L1ERC721Bridge ="0x420749f83b81B301cAb5f48EB8516B986DAef23D" }
[chain.l1-contracts]
optimism-portal = "0x4205Fc579115071764c7423A4f12eDde41f106Ed"
l2-output-oracle = "0x42097868233d1aa22e815a266982f2cf17685a27"
l1-cross-domain-messenger = "0x420ce71c97B33Cc4729CF772ae268934F7ab5fA1"
l1-standard-bridge = "0x4209fc46f92E8a1c0deC1b1747d010903E884bE1"
[rpcs]
l1-rpc = "https://l1.example.com"
......@@ -99,12 +103,18 @@ func TestLoadConfig_WithoutPreset(t *testing.T) {
conf, err := LoadConfig(logger, tmpfile.Name())
require.NoError(t, err)
require.Equal(t, conf.Chain.L1Contracts.OptimismPortal.String(), common.HexToAddress("0x4205Fc579115071764c7423A4f12eDde41f106Ed").String())
require.Equal(t, conf.Chain.L1Contracts.L2OutputOracle.String(), common.HexToAddress("0x42097868233d1aa22e815a266982f2cf17685a27").String())
require.Equal(t, conf.Chain.L1Contracts.L1CrossDomainMessenger.String(), common.HexToAddress("0x420ce71c97B33Cc4729CF772ae268934F7ab5fA1").String())
require.Equal(t, conf.Chain.L1Contracts.L1StandardBridge.String(), common.HexToAddress("0x4209fc46f92E8a1c0deC1b1747d010903E884bE1").String())
require.Equal(t, conf.Chain.L1Contracts.L1ERC721Bridge.String(), common.HexToAddress("0x420749f83b81B301cAb5f48EB8516B986DAef23D").String())
// Enforce default values
require.Equal(t, conf.Chain.L1Contracts.OptimismPortalProxy.String(), common.HexToAddress("0x4205Fc579115071764c7423A4f12eDde41f106Ed").String())
require.Equal(t, conf.Chain.L1Contracts.L2OutputOracleProxy.String(), common.HexToAddress("0x42097868233d1aa22e815a266982f2cf17685a27").String())
require.Equal(t, conf.Chain.L1Contracts.L1CrossDomainMessengerProxy.String(), common.HexToAddress("0x420ce71c97B33Cc4729CF772ae268934F7ab5fA1").String())
require.Equal(t, conf.Chain.L1Contracts.L1StandardBridgeProxy.String(), common.HexToAddress("0x4209fc46f92E8a1c0deC1b1747d010903E884bE1").String())
require.Equal(t, conf.Chain.Preset, 0)
// Enforce polling default values
require.Equal(t, conf.Chain.L1PollingInterval, uint(5000))
require.Equal(t, conf.Chain.L2PollingInterval, uint(5000))
require.Equal(t, conf.Chain.L1HeaderBufferSize, uint(500))
require.Equal(t, conf.Chain.L2HeaderBufferSize, uint(500))
}
func TestLoadConfig_WithUnknownPreset(t *testing.T) {
......@@ -137,3 +147,54 @@ func TestLoadConfig_WithUnknownPreset(t *testing.T) {
require.Error(t, err)
require.Equal(t, fmt.Sprintf("unknown preset: %d", faultyPreset), err.Error())
}
func Test_LoadConfig_PollingValues(t *testing.T) {
tmpfile, err := os.CreateTemp("", "test_user_values.toml")
require.NoError(t, err)
defer os.Remove(tmpfile.Name())
defer tmpfile.Close()
testData := `
[chain]
l1-polling-interval = 1000
l2-polling-interval = 1005
l1-header-buffer-size = 100
l2-header-buffer-size = 105`
data := []byte(testData)
err = os.WriteFile(tmpfile.Name(), data, 0644)
require.NoError(t, err)
defer os.Remove(tmpfile.Name())
err = tmpfile.Close()
require.NoError(t, err)
logger := testlog.Logger(t, log.LvlInfo)
conf, err := LoadConfig(logger, tmpfile.Name())
require.NoError(t, err)
require.Equal(t, conf.Chain.L1PollingInterval, uint(1000))
require.Equal(t, conf.Chain.L2PollingInterval, uint(1005))
require.Equal(t, conf.Chain.L1HeaderBufferSize, uint(100))
require.Equal(t, conf.Chain.L2HeaderBufferSize, uint(105))
}
func Test_AsSliceSuccess(t *testing.T) {
// error cases are intentionally ignored for testing since they can only be
// generated when the L1Contracts struct is developer modified to hold a non-address var field
testCfg := &L1Contracts{
OptimismPortalProxy: common.HexToAddress("0x4205Fc579115071764c7423A4f12eDde41f106Ed"),
L2OutputOracleProxy: common.HexToAddress("0x42097868233d1aa22e815a266982f2cf17685a27"),
L1CrossDomainMessengerProxy: common.HexToAddress("0x420ce71c97B33Cc4729CF772ae268934F7ab5fA1"),
L1StandardBridgeProxy: common.HexToAddress("0x4209fc46f92E8a1c0deC1b1747d010903E884bE1"),
}
slice, err := testCfg.AsSlice()
require.NoError(t, err)
require.Equal(t, len(slice), 4)
require.Equal(t, slice[0].String(), testCfg.OptimismPortalProxy.String())
require.Equal(t, slice[1].String(), testCfg.L2OutputOracleProxy.String())
require.Equal(t, slice[2].String(), testCfg.L1CrossDomainMessengerProxy.String())
require.Equal(t, slice[3].String(), testCfg.L1StandardBridgeProxy.String())
}
......@@ -10,54 +10,44 @@ import (
var presetL1Contracts = map[int]L1Contracts{
// OP Mainnet
10: {
OptimismPortal: common.HexToAddress("0xbEb5Fc579115071764c7423A4f12eDde41f106Ed"),
L2OutputOracle: common.HexToAddress("0xdfe97868233d1aa22e815a266982f2cf17685a27"),
L1CrossDomainMessenger: common.HexToAddress("0x25ace71c97B33Cc4729CF772ae268934F7ab5fA1"),
L1StandardBridge: common.HexToAddress("0x99C9fc46f92E8a1c0deC1b1747d010903E884bE1"),
L1ERC721Bridge: common.HexToAddress("0x5a7749f83b81B301cAb5f48EB8516B986DAef23D"),
OptimismPortalProxy: common.HexToAddress("0xbEb5Fc579115071764c7423A4f12eDde41f106Ed"),
L2OutputOracleProxy: common.HexToAddress("0xdfe97868233d1aa22e815a266982f2cf17685a27"),
L1CrossDomainMessengerProxy: common.HexToAddress("0x25ace71c97B33Cc4729CF772ae268934F7ab5fA1"),
L1StandardBridgeProxy: common.HexToAddress("0x99C9fc46f92E8a1c0deC1b1747d010903E884bE1"),
},
// OP Goerli
420: {
OptimismPortal: common.HexToAddress("0x5b47E1A08Ea6d985D6649300584e6722Ec4B1383"),
L2OutputOracle: common.HexToAddress("0xE6Dfba0953616Bacab0c9A8ecb3a9BBa77FC15c0"),
L1CrossDomainMessenger: common.HexToAddress("0x5086d1eEF304eb5284A0f6720f79403b4e9bE294"),
L1StandardBridge: common.HexToAddress("0x636Af16bf2f682dD3109e60102b8E1A089FedAa8"),
L1ERC721Bridge: common.HexToAddress("0x8DD330DdE8D9898d43b4dc840Da27A07dF91b3c9"),
OptimismPortalProxy: common.HexToAddress("0x5b47E1A08Ea6d985D6649300584e6722Ec4B1383"),
L2OutputOracleProxy: common.HexToAddress("0xE6Dfba0953616Bacab0c9A8ecb3a9BBa77FC15c0"),
L1CrossDomainMessengerProxy: common.HexToAddress("0x5086d1eEF304eb5284A0f6720f79403b4e9bE294"),
L1StandardBridgeProxy: common.HexToAddress("0x636Af16bf2f682dD3109e60102b8E1A089FedAa8"),
},
// Base Mainnet
8453: {
OptimismPortal: common.HexToAddress("0x49048044D57e1C92A77f79988d21Fa8fAF74E97e"),
L2OutputOracle: common.HexToAddress("0x56315b90c40730925ec5485cf004d835058518A0"),
L1CrossDomainMessenger: common.HexToAddress("0x866E82a600A1414e583f7F13623F1aC5d58b0Afa"),
L1StandardBridge: common.HexToAddress("0x3154Cf16ccdb4C6d922629664174b904d80F2C35"),
// FIXME update this to the correct address
L1ERC721Bridge: common.HexToAddress("0x0000000000000000000000000000000000000000"),
OptimismPortalProxy: common.HexToAddress("0x49048044D57e1C92A77f79988d21Fa8fAF74E97e"),
L2OutputOracleProxy: common.HexToAddress("0x56315b90c40730925ec5485cf004d835058518A0"),
L1CrossDomainMessengerProxy: common.HexToAddress("0x866E82a600A1414e583f7F13623F1aC5d58b0Afa"),
L1StandardBridgeProxy: common.HexToAddress("0x3154Cf16ccdb4C6d922629664174b904d80F2C35"),
},
// Base Goerli
84531: {
OptimismPortal: common.HexToAddress("0xe93c8cD0D409341205A592f8c4Ac1A5fe5585cfA"),
L2OutputOracle: common.HexToAddress("0x2A35891ff30313CcFa6CE88dcf3858bb075A2298"),
L1CrossDomainMessenger: common.HexToAddress("0x8e5693140eA606bcEB98761d9beB1BC87383706D"),
L1StandardBridge: common.HexToAddress("0xfA6D8Ee5BE770F84FC001D098C4bD604Fe01284a"),
// FIXME update this to the correct address
L1ERC721Bridge: common.HexToAddress("0x0000000000000000000000000000000000000000"),
OptimismPortalProxy: common.HexToAddress("0xe93c8cD0D409341205A592f8c4Ac1A5fe5585cfA"),
L2OutputOracleProxy: common.HexToAddress("0x2A35891ff30313CcFa6CE88dcf3858bb075A2298"),
L1CrossDomainMessengerProxy: common.HexToAddress("0x8e5693140eA606bcEB98761d9beB1BC87383706D"),
L1StandardBridgeProxy: common.HexToAddress("0xfA6D8Ee5BE770F84FC001D098C4bD604Fe01284a"),
},
// Zora mainnet
7777777: {
OptimismPortal: common.HexToAddress("0x1a0ad011913A150f69f6A19DF447A0CfD9551054"),
L2OutputOracle: common.HexToAddress("0x9E6204F750cD866b299594e2aC9eA824E2e5f95c"),
L1CrossDomainMessenger: common.HexToAddress("0xdC40a14d9abd6F410226f1E6de71aE03441ca506"),
L1StandardBridge: common.HexToAddress("0x3e2Ea9B92B7E48A52296fD261dc26fd995284631"),
// FIXME update this to the correct address
L1ERC721Bridge: common.HexToAddress("0x0000000000000000000000000000000000000000"),
OptimismPortalProxy: common.HexToAddress("0x1a0ad011913A150f69f6A19DF447A0CfD9551054"),
L2OutputOracleProxy: common.HexToAddress("0x9E6204F750cD866b299594e2aC9eA824E2e5f95c"),
L1CrossDomainMessengerProxy: common.HexToAddress("0xdC40a14d9abd6F410226f1E6de71aE03441ca506"),
L1StandardBridgeProxy: common.HexToAddress("0x3e2Ea9B92B7E48A52296fD261dc26fd995284631"),
},
// Zora goerli
999: {
OptimismPortal: common.HexToAddress("0xDb9F51790365e7dc196e7D072728df39Be958ACe"),
L2OutputOracle: common.HexToAddress("0xdD292C9eEd00f6A32Ff5245d0BCd7f2a15f24e00"),
L1CrossDomainMessenger: common.HexToAddress("0xD87342e16352D33170557A7dA1e5fB966a60FafC"),
L1StandardBridge: common.HexToAddress("0x7CC09AC2452D6555d5e0C213Ab9E2d44eFbFc956"),
// FIXME update this to the correct address
L1ERC721Bridge: common.HexToAddress("0x0000000000000000000000000000000000000000"),
OptimismPortalProxy: common.HexToAddress("0xDb9F51790365e7dc196e7D072728df39Be958ACe"),
L2OutputOracleProxy: common.HexToAddress("0xdD292C9eEd00f6A32Ff5245d0BCd7f2a15f24e00"),
L1CrossDomainMessengerProxy: common.HexToAddress("0xD87342e16352D33170557A7dA1e5fB966a60FafC"),
L1StandardBridgeProxy: common.HexToAddress("0x7CC09AC2452D6555d5e0C213Ab9E2d44eFbFc956"),
},
}
......@@ -18,9 +18,9 @@ import (
*/
type BlockHeader struct {
Hash common.Hash `gorm:"primaryKey;serializer:json"`
ParentHash common.Hash `gorm:"serializer:json"`
Number U256
Hash common.Hash `gorm:"primaryKey;serializer:bytes"`
ParentHash common.Hash `gorm:"serializer:bytes"`
Number *big.Int `gorm:"serializer:u256"`
Timestamp uint64
RLPHeader *RLPHeader `gorm:"serializer:rlp;column:rlp_bytes"`
......@@ -30,7 +30,7 @@ func BlockHeaderFromHeader(header *types.Header) BlockHeader {
return BlockHeader{
Hash: header.Hash(),
ParentHash: header.ParentHash,
Number: U256{Int: header.Number},
Number: header.Number,
Timestamp: header.Time,
RLPHeader: (*RLPHeader)(header),
......@@ -38,11 +38,11 @@ func BlockHeaderFromHeader(header *types.Header) BlockHeader {
}
type L1BlockHeader struct {
BlockHeader
BlockHeader `gorm:"embedded"`
}
type L2BlockHeader struct {
BlockHeader
BlockHeader `gorm:"embedded"`
}
type LegacyStateBatch struct {
......@@ -50,39 +50,43 @@ type LegacyStateBatch struct {
// violating the primary key constraint.
Index uint64 `gorm:"primaryKey;default:0"`
Root common.Hash `gorm:"serializer:json"`
Root common.Hash `gorm:"serializer:bytes"`
Size uint64
PrevTotal uint64
L1ContractEventGUID uuid.UUID
}
type OutputProposal struct {
OutputRoot common.Hash `gorm:"primaryKey;serializer:json"`
L2OutputIndex U256
L2BlockNumber U256
OutputRoot common.Hash `gorm:"primaryKey;serializer:bytes"`
L2OutputIndex *big.Int `gorm:"serializer:u256"`
L2BlockNumber *big.Int `gorm:"serializer:u256"`
L1ContractEventGUID uuid.UUID
}
type BlocksView interface {
L1BlockHeader(*big.Int) (*L1BlockHeader, error)
LatestL1BlockHeader() (*L1BlockHeader, error)
L1BlockHeader(common.Hash) (*L1BlockHeader, error)
L1BlockHeaderWithFilter(BlockHeader) (*L1BlockHeader, error)
L1LatestBlockHeader() (*L1BlockHeader, error)
L2BlockHeader(common.Hash) (*L2BlockHeader, error)
L2BlockHeaderWithFilter(BlockHeader) (*L2BlockHeader, error)
L2LatestBlockHeader() (*L2BlockHeader, error)
LatestCheckpointedOutput() (*OutputProposal, error)
OutputProposal(index *big.Int) (*OutputProposal, error)
L2BlockHeader(*big.Int) (*L2BlockHeader, error)
LatestL2BlockHeader() (*L2BlockHeader, error)
LatestEpoch() (*Epoch, error)
}
type BlocksDB interface {
BlocksView
StoreL1BlockHeaders([]*L1BlockHeader) error
StoreL2BlockHeaders([]*L2BlockHeader) error
StoreL1BlockHeaders([]L1BlockHeader) error
StoreL2BlockHeaders([]L2BlockHeader) error
StoreLegacyStateBatches([]*LegacyStateBatch) error
StoreOutputProposals([]*OutputProposal) error
StoreLegacyStateBatches([]LegacyStateBatch) error
StoreOutputProposals([]OutputProposal) error
}
/**
......@@ -99,36 +103,39 @@ func newBlocksDB(db *gorm.DB) BlocksDB {
// L1
func (db *blocksDB) StoreL1BlockHeaders(headers []*L1BlockHeader) error {
func (db *blocksDB) StoreL1BlockHeaders(headers []L1BlockHeader) error {
result := db.gorm.Create(&headers)
return result.Error
}
func (db *blocksDB) StoreLegacyStateBatches(stateBatches []*LegacyStateBatch) error {
func (db *blocksDB) StoreLegacyStateBatches(stateBatches []LegacyStateBatch) error {
result := db.gorm.Create(stateBatches)
return result.Error
}
func (db *blocksDB) StoreOutputProposals(outputs []*OutputProposal) error {
func (db *blocksDB) StoreOutputProposals(outputs []OutputProposal) error {
result := db.gorm.Create(outputs)
return result.Error
}
func (db *blocksDB) L1BlockHeader(height *big.Int) (*L1BlockHeader, error) {
func (db *blocksDB) L1BlockHeader(hash common.Hash) (*L1BlockHeader, error) {
return db.L1BlockHeaderWithFilter(BlockHeader{Hash: hash})
}
func (db *blocksDB) L1BlockHeaderWithFilter(filter BlockHeader) (*L1BlockHeader, error) {
var l1Header L1BlockHeader
result := db.gorm.Where(&BlockHeader{Number: U256{Int: height}}).Take(&l1Header)
result := db.gorm.Where(&filter).Take(&l1Header)
if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, result.Error
}
return &l1Header, nil
}
func (db *blocksDB) LatestL1BlockHeader() (*L1BlockHeader, error) {
func (db *blocksDB) L1LatestBlockHeader() (*L1BlockHeader, error) {
var l1Header L1BlockHeader
result := db.gorm.Order("number DESC").Take(&l1Header)
if result.Error != nil {
......@@ -158,7 +165,7 @@ func (db *blocksDB) LatestCheckpointedOutput() (*OutputProposal, error) {
func (db *blocksDB) OutputProposal(index *big.Int) (*OutputProposal, error) {
var outputProposal OutputProposal
result := db.gorm.Where(&OutputProposal{L2OutputIndex: U256{Int: index}}).Take(&outputProposal)
result := db.gorm.Where(&OutputProposal{L2OutputIndex: index}).Take(&outputProposal)
if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, nil
......@@ -172,36 +179,71 @@ func (db *blocksDB) OutputProposal(index *big.Int) (*OutputProposal, error) {
// L2
func (db *blocksDB) StoreL2BlockHeaders(headers []*L2BlockHeader) error {
func (db *blocksDB) StoreL2BlockHeaders(headers []L2BlockHeader) error {
result := db.gorm.Create(&headers)
return result.Error
}
func (db *blocksDB) L2BlockHeader(height *big.Int) (*L2BlockHeader, error) {
func (db *blocksDB) L2BlockHeader(hash common.Hash) (*L2BlockHeader, error) {
return db.L2BlockHeaderWithFilter(BlockHeader{Hash: hash})
}
func (db *blocksDB) L2BlockHeaderWithFilter(filter BlockHeader) (*L2BlockHeader, error) {
var l2Header L2BlockHeader
result := db.gorm.Where(&BlockHeader{Number: U256{Int: height}}).Take(&l2Header)
result := db.gorm.Where(&filter).Take(&l2Header)
if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, result.Error
}
return &l2Header, nil
}
func (db *blocksDB) LatestL2BlockHeader() (*L2BlockHeader, error) {
func (db *blocksDB) L2LatestBlockHeader() (*L2BlockHeader, error) {
var l2Header L2BlockHeader
result := db.gorm.Order("number DESC").Take(&l2Header)
if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, result.Error
}
result.Logger.Info(context.Background(), "number ", l2Header.Number)
return &l2Header, nil
}
// Auxiliary Methods on both L1 & L2
type Epoch struct {
L1BlockHeader L1BlockHeader `gorm:"embedded"`
L2BlockHeader L2BlockHeader `gorm:"embedded"`
}
// LatestEpoch return the latest epoch, seen on L1 & L2. In other words
// this returns the latest indexed L1 block that has a corresponding
// indexed L2 block with a matching L1Origin (equal timestamps).
//
// For more, see the protocol spec:
// - https://github.com/ethereum-optimism/optimism/blob/develop/specs/derivation.md
func (db *blocksDB) LatestEpoch() (*Epoch, error) {
// Since L1 blocks occur less frequently than L2, we do a INNER JOIN from L1 on
// L2 for a faster query. Per the protocol, the L2 block that starts a new epoch
// will have a matching timestamp with the L1 origin.
query := db.gorm.Table("l1_block_headers").Order("l1_block_headers.timestamp DESC")
query = query.Joins("INNER JOIN l2_block_headers ON l1_block_headers.timestamp = l2_block_headers.timestamp")
query = query.Select("*")
var epoch Epoch
result := query.Take(&epoch)
if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, result.Error
}
return &epoch, nil
}
......@@ -17,43 +17,41 @@ import (
*/
type BridgeMessage struct {
Nonce U256 `gorm:"primaryKey"`
MessageHash common.Hash `gorm:"serializer:json"`
MessageHash common.Hash `gorm:"primaryKey;serializer:bytes"`
Nonce *big.Int `gorm:"serializer:u256"`
SentMessageEventGUID uuid.UUID
RelayedMessageEventGUID *uuid.UUID
Tx Transaction `gorm:"embedded"`
GasLimit U256
GasLimit *big.Int `gorm:"serializer:u256"`
}
type L1BridgeMessage struct {
BridgeMessage `gorm:"embedded"`
TransactionSourceHash common.Hash `gorm:"serializer:json"`
TransactionSourceHash common.Hash `gorm:"serializer:bytes"`
}
type L2BridgeMessage struct {
BridgeMessage `gorm:"embedded"`
TransactionWithdrawalHash common.Hash `gorm:"serializer:json"`
TransactionWithdrawalHash common.Hash `gorm:"serializer:bytes"`
}
type BridgeMessagesView interface {
L1BridgeMessage(*big.Int) (*L1BridgeMessage, error)
L1BridgeMessageByHash(common.Hash) (*L1BridgeMessage, error)
LatestL1BridgeMessageNonce() (*big.Int, error)
L1BridgeMessage(common.Hash) (*L1BridgeMessage, error)
L1BridgeMessageWithFilter(BridgeMessage) (*L1BridgeMessage, error)
L2BridgeMessage(*big.Int) (*L2BridgeMessage, error)
L2BridgeMessageByHash(common.Hash) (*L2BridgeMessage, error)
LatestL2BridgeMessageNonce() (*big.Int, error)
L2BridgeMessage(common.Hash) (*L2BridgeMessage, error)
L2BridgeMessageWithFilter(BridgeMessage) (*L2BridgeMessage, error)
}
type BridgeMessagesDB interface {
BridgeMessagesView
StoreL1BridgeMessages([]*L1BridgeMessage) error
StoreL1BridgeMessages([]L1BridgeMessage) error
MarkRelayedL1BridgeMessage(common.Hash, uuid.UUID) error
StoreL2BridgeMessages([]*L2BridgeMessage) error
StoreL2BridgeMessages([]L2BridgeMessage) error
MarkRelayedL2BridgeMessage(common.Hash, uuid.UUID) error
}
......@@ -73,27 +71,18 @@ func newBridgeMessagesDB(db *gorm.DB) BridgeMessagesDB {
* Arbitrary Messages Sent from L1
*/
func (db bridgeMessagesDB) StoreL1BridgeMessages(messages []*L1BridgeMessage) error {
func (db bridgeMessagesDB) StoreL1BridgeMessages(messages []L1BridgeMessage) error {
result := db.gorm.Create(&messages)
return result.Error
}
func (db bridgeMessagesDB) L1BridgeMessage(nonce *big.Int) (*L1BridgeMessage, error) {
var sentMessage L1BridgeMessage
result := db.gorm.Where(&BridgeMessage{Nonce: U256{Int: nonce}}).Take(&sentMessage)
if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, result.Error
}
return &sentMessage, nil
func (db bridgeMessagesDB) L1BridgeMessage(msgHash common.Hash) (*L1BridgeMessage, error) {
return db.L1BridgeMessageWithFilter(BridgeMessage{MessageHash: msgHash})
}
func (db bridgeMessagesDB) L1BridgeMessageByHash(messageHash common.Hash) (*L1BridgeMessage, error) {
func (db bridgeMessagesDB) L1BridgeMessageWithFilter(filter BridgeMessage) (*L1BridgeMessage, error) {
var sentMessage L1BridgeMessage
result := db.gorm.Where(&BridgeMessage{MessageHash: messageHash}).Take(&sentMessage)
result := db.gorm.Where(&filter).Take(&sentMessage)
if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, nil
......@@ -104,25 +93,8 @@ func (db bridgeMessagesDB) L1BridgeMessageByHash(messageHash common.Hash) (*L1Br
return &sentMessage, nil
}
func (db bridgeMessagesDB) LatestL1BridgeMessageNonce() (*big.Int, error) {
var sentMessage L1BridgeMessage
result := db.gorm.Order("nonce DESC").Take(&sentMessage)
if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, result.Error
}
return sentMessage.Nonce.Int, nil
}
/**
* Arbitrary Messages Sent from L2
*/
func (db bridgeMessagesDB) MarkRelayedL1BridgeMessage(messageHash common.Hash, relayEvent uuid.UUID) error {
message, err := db.L1BridgeMessageByHash(messageHash)
message, err := db.L1BridgeMessage(messageHash)
if err != nil {
return err
} else if message == nil {
......@@ -134,27 +106,22 @@ func (db bridgeMessagesDB) MarkRelayedL1BridgeMessage(messageHash common.Hash, r
return result.Error
}
func (db bridgeMessagesDB) StoreL2BridgeMessages(messages []*L2BridgeMessage) error {
/**
* Arbitrary Messages Sent from L2
*/
func (db bridgeMessagesDB) StoreL2BridgeMessages(messages []L2BridgeMessage) error {
result := db.gorm.Create(&messages)
return result.Error
}
func (db bridgeMessagesDB) L2BridgeMessage(nonce *big.Int) (*L2BridgeMessage, error) {
var sentMessage L2BridgeMessage
result := db.gorm.Where(&BridgeMessage{Nonce: U256{Int: nonce}}).Take(&sentMessage)
if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, result.Error
}
return &sentMessage, nil
func (db bridgeMessagesDB) L2BridgeMessage(msgHash common.Hash) (*L2BridgeMessage, error) {
return db.L2BridgeMessageWithFilter(BridgeMessage{MessageHash: msgHash})
}
func (db bridgeMessagesDB) L2BridgeMessageByHash(messageHash common.Hash) (*L2BridgeMessage, error) {
func (db bridgeMessagesDB) L2BridgeMessageWithFilter(filter BridgeMessage) (*L2BridgeMessage, error) {
var sentMessage L2BridgeMessage
result := db.gorm.Where(&BridgeMessage{MessageHash: messageHash}).Take(&sentMessage)
result := db.gorm.Where(&filter).Take(&sentMessage)
if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, nil
......@@ -165,21 +132,8 @@ func (db bridgeMessagesDB) L2BridgeMessageByHash(messageHash common.Hash) (*L2Br
return &sentMessage, nil
}
func (db bridgeMessagesDB) LatestL2BridgeMessageNonce() (*big.Int, error) {
var sentMessage L2BridgeMessage
result := db.gorm.Order("nonce DESC").Take(&sentMessage)
if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, result.Error
}
return sentMessage.Nonce.Int, nil
}
func (db bridgeMessagesDB) MarkRelayedL2BridgeMessage(messageHash common.Hash, relayEvent uuid.UUID) error {
message, err := db.L2BridgeMessageByHash(messageHash)
message, err := db.L2BridgeMessage(messageHash)
if err != nil {
return err
} else if message == nil {
......
......@@ -3,12 +3,12 @@ package database
import (
"errors"
"fmt"
"math/big"
"github.com/google/uuid"
"gorm.io/gorm"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
)
/**
......@@ -16,30 +16,25 @@ import (
*/
type Transaction struct {
FromAddress common.Address `gorm:"serializer:json"`
ToAddress common.Address `gorm:"serializer:json"`
Amount U256
Data hexutil.Bytes `gorm:"serializer:json"`
FromAddress common.Address `gorm:"serializer:bytes"`
ToAddress common.Address `gorm:"serializer:bytes"`
Amount *big.Int `gorm:"serializer:u256"`
Data Bytes `gorm:"serializer:bytes"`
Timestamp uint64
}
type L1TransactionDeposit struct {
SourceHash common.Hash `gorm:"serializer:json;primaryKey"`
L2TransactionHash common.Hash `gorm:"serializer:json"`
SourceHash common.Hash `gorm:"serializer:bytes;primaryKey"`
L2TransactionHash common.Hash `gorm:"serializer:bytes"`
InitiatedL1EventGUID uuid.UUID
Version U256
OpaqueData hexutil.Bytes `gorm:"serializer:json"`
Tx Transaction `gorm:"embedded"`
GasLimit U256
GasLimit *big.Int `gorm:"serializer:u256"`
}
type L2TransactionWithdrawal struct {
WithdrawalHash common.Hash `gorm:"serializer:json;primaryKey"`
Nonce U256
WithdrawalHash common.Hash `gorm:"serializer:bytes;primaryKey"`
Nonce *big.Int `gorm:"serializer:u256"`
InitiatedL2EventGUID uuid.UUID
ProvenL1EventGUID *uuid.UUID
......@@ -47,7 +42,7 @@ type L2TransactionWithdrawal struct {
Succeeded *bool
Tx Transaction `gorm:"embedded"`
GasLimit U256
GasLimit *big.Int `gorm:"serializer:u256"`
}
type BridgeTransactionsView interface {
......@@ -58,9 +53,9 @@ type BridgeTransactionsView interface {
type BridgeTransactionsDB interface {
BridgeTransactionsView
StoreL1TransactionDeposits([]*L1TransactionDeposit) error
StoreL1TransactionDeposits([]L1TransactionDeposit) error
StoreL2TransactionWithdrawals([]*L2TransactionWithdrawal) error
StoreL2TransactionWithdrawals([]L2TransactionWithdrawal) error
MarkL2TransactionWithdrawalProvenEvent(common.Hash, uuid.UUID) error
MarkL2TransactionWithdrawalFinalizedEvent(common.Hash, uuid.UUID, bool) error
}
......@@ -81,7 +76,7 @@ func newBridgeTransactionsDB(db *gorm.DB) BridgeTransactionsDB {
* Transactions deposited from L1
*/
func (db *bridgeTransactionsDB) StoreL1TransactionDeposits(deposits []*L1TransactionDeposit) error {
func (db *bridgeTransactionsDB) StoreL1TransactionDeposits(deposits []L1TransactionDeposit) error {
result := db.gorm.Create(&deposits)
return result.Error
}
......@@ -103,7 +98,7 @@ func (db *bridgeTransactionsDB) L1TransactionDeposit(sourceHash common.Hash) (*L
* Transactions withdrawn from L2
*/
func (db *bridgeTransactionsDB) StoreL2TransactionWithdrawals(withdrawals []*L2TransactionWithdrawal) error {
func (db *bridgeTransactionsDB) StoreL2TransactionWithdrawals(withdrawals []L2TransactionWithdrawal) error {
result := db.gorm.Create(&withdrawals)
return result.Error
}
......
This diff is collapsed.
......@@ -2,6 +2,7 @@ package database
import (
"errors"
"fmt"
"math/big"
"gorm.io/gorm"
......@@ -20,12 +21,12 @@ type ContractEvent struct {
GUID uuid.UUID `gorm:"primaryKey"`
// Some useful derived fields
BlockHash common.Hash `gorm:"serializer:json"`
ContractAddress common.Address `gorm:"serializer:json"`
TransactionHash common.Hash `gorm:"serializer:json"`
BlockHash common.Hash `gorm:"serializer:bytes"`
ContractAddress common.Address `gorm:"serializer:bytes"`
TransactionHash common.Hash `gorm:"serializer:bytes"`
LogIndex uint64
EventSignature common.Hash `gorm:"serializer:json"`
EventSignature common.Hash `gorm:"serializer:bytes"`
Timestamp uint64
// NOTE: NOT ALL THE DERIVED FIELDS ON `types.Log` ARE
......@@ -74,19 +75,23 @@ type L2ContractEvent struct {
type ContractEventsView interface {
L1ContractEvent(uuid.UUID) (*L1ContractEvent, error)
L1ContractEventByTxLogIndex(common.Hash, uint64) (*L1ContractEvent, error)
L1ContractEventWithFilter(ContractEvent) (*L1ContractEvent, error)
L1ContractEventsWithFilter(ContractEvent, *big.Int, *big.Int) ([]L1ContractEvent, error)
L1LatestContractEventWithFilter(ContractEvent) (*L1ContractEvent, error)
L2ContractEvent(uuid.UUID) (*L2ContractEvent, error)
L2ContractEventByTxLogIndex(common.Hash, uint64) (*L2ContractEvent, error)
L2ContractEventWithFilter(ContractEvent) (*L2ContractEvent, error)
L2ContractEventsWithFilter(ContractEvent, *big.Int, *big.Int) ([]L2ContractEvent, error)
L2LatestContractEventWithFilter(ContractEvent) (*L2ContractEvent, error)
ContractEventsWithFilter(ContractEvent, string, *big.Int, *big.Int) ([]ContractEvent, error)
}
type ContractEventsDB interface {
ContractEventsView
StoreL1ContractEvents([]*L1ContractEvent) error
StoreL2ContractEvents([]*L2ContractEvent) error
StoreL1ContractEvents([]L1ContractEvent) error
StoreL2ContractEvents([]L2ContractEvent) error
}
/**
......@@ -103,33 +108,22 @@ func newContractEventsDB(db *gorm.DB) ContractEventsDB {
// L1
func (db *contractEventsDB) StoreL1ContractEvents(events []*L1ContractEvent) error {
func (db *contractEventsDB) StoreL1ContractEvents(events []L1ContractEvent) error {
result := db.gorm.Create(&events)
return result.Error
}
func (db *contractEventsDB) L1ContractEvent(uuid uuid.UUID) (*L1ContractEvent, error) {
var l1ContractEvent L1ContractEvent
result := db.gorm.Where(&ContractEvent{GUID: uuid}).Take(&l1ContractEvent)
if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, result.Error
}
return &l1ContractEvent, nil
return db.L1ContractEventWithFilter(ContractEvent{GUID: uuid})
}
func (db *contractEventsDB) L1ContractEventByTxLogIndex(txHash common.Hash, logIndex uint64) (*L1ContractEvent, error) {
func (db *contractEventsDB) L1ContractEventWithFilter(filter ContractEvent) (*L1ContractEvent, error) {
var l1ContractEvent L1ContractEvent
result := db.gorm.Where(&ContractEvent{TransactionHash: txHash, LogIndex: logIndex}).Take(&l1ContractEvent)
result := db.gorm.Where(&filter).Take(&l1ContractEvent)
if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, result.Error
}
......@@ -140,6 +134,12 @@ func (db *contractEventsDB) L1ContractEventsWithFilter(filter ContractEvent, fro
if fromHeight == nil {
fromHeight = big.NewInt(0)
}
if toHeight == nil {
return nil, errors.New("end height unspecified")
}
if fromHeight.Cmp(toHeight) > 0 {
return nil, fmt.Errorf("fromHeight %d is greater than toHeight %d", fromHeight, toHeight)
}
query := db.gorm.Table("l1_contract_events").Where(&filter)
query = query.Joins("INNER JOIN l1_block_headers ON l1_contract_events.block_hash = l1_block_headers.hash")
......@@ -160,35 +160,37 @@ func (db *contractEventsDB) L1ContractEventsWithFilter(filter ContractEvent, fro
return events, nil
}
// L2
func (db *contractEventsDB) StoreL2ContractEvents(events []*L2ContractEvent) error {
result := db.gorm.Create(&events)
return result.Error
}
func (db *contractEventsDB) L2ContractEvent(uuid uuid.UUID) (*L2ContractEvent, error) {
var l2ContractEvent L2ContractEvent
result := db.gorm.Where(&ContractEvent{GUID: uuid}).Take(&l2ContractEvent)
func (db *contractEventsDB) L1LatestContractEventWithFilter(filter ContractEvent) (*L1ContractEvent, error) {
var l1ContractEvent L1ContractEvent
result := db.gorm.Where(&filter).Order("timestamp DESC").Take(&l1ContractEvent)
if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, result.Error
}
return &l2ContractEvent, nil
return &l1ContractEvent, nil
}
func (db *contractEventsDB) L2ContractEventByTxLogIndex(txHash common.Hash, logIndex uint64) (*L2ContractEvent, error) {
// L2
func (db *contractEventsDB) StoreL2ContractEvents(events []L2ContractEvent) error {
result := db.gorm.Create(&events)
return result.Error
}
func (db *contractEventsDB) L2ContractEvent(uuid uuid.UUID) (*L2ContractEvent, error) {
return db.L2ContractEventWithFilter(ContractEvent{GUID: uuid})
}
func (db *contractEventsDB) L2ContractEventWithFilter(filter ContractEvent) (*L2ContractEvent, error) {
var l2ContractEvent L2ContractEvent
result := db.gorm.Where(&ContractEvent{TransactionHash: txHash, LogIndex: logIndex}).Take(&l2ContractEvent)
result := db.gorm.Where(&filter).Take(&l2ContractEvent)
if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, result.Error
}
......@@ -199,6 +201,12 @@ func (db *contractEventsDB) L2ContractEventsWithFilter(filter ContractEvent, fro
if fromHeight == nil {
fromHeight = big.NewInt(0)
}
if toHeight == nil {
return nil, errors.New("end height unspecified")
}
if fromHeight.Cmp(toHeight) > 0 {
return nil, fmt.Errorf("fromHeight %d is greater than toHeight %d", fromHeight, toHeight)
}
query := db.gorm.Table("l2_contract_events").Where(&filter)
query = query.Joins("INNER JOIN l2_block_headers ON l2_contract_events.block_hash = l2_block_headers.hash")
......@@ -218,3 +226,46 @@ func (db *contractEventsDB) L2ContractEventsWithFilter(filter ContractEvent, fro
return events, nil
}
func (db *contractEventsDB) L2LatestContractEventWithFilter(filter ContractEvent) (*L2ContractEvent, error) {
var l2ContractEvent L2ContractEvent
result := db.gorm.Where(&filter).Order("timestamp DESC").Take(&l2ContractEvent)
if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, result.Error
}
return &l2ContractEvent, nil
}
// Auxiliary methods for both L1 and L2
// ContractEventsWithFilter will retrieve contract events within the specified range according to the `chainSelector`.
func (db *contractEventsDB) ContractEventsWithFilter(filter ContractEvent, chainSelector string, fromHeight, toHeight *big.Int) ([]ContractEvent, error) {
switch chainSelector {
case "l1":
l1Events, err := db.L1ContractEventsWithFilter(filter, fromHeight, toHeight)
if err != nil {
return nil, err
}
events := make([]ContractEvent, len(l1Events))
for i := range l1Events {
events[i] = l1Events[i].ContractEvent
}
return events, nil
case "l2":
l2Events, err := db.L2ContractEventsWithFilter(filter, fromHeight, toHeight)
if err != nil {
return nil, err
}
events := make([]ContractEvent, len(l2Events))
for i := range l2Events {
events[i] = l2Events[i].ContractEvent
}
return events, nil
default:
return nil, errors.New("expected 'l1' or 'l2' for chain selection")
}
}
......@@ -5,6 +5,8 @@ import (
"fmt"
"github.com/ethereum-optimism/optimism/indexer/config"
_ "github.com/ethereum-optimism/optimism/indexer/database/serializers"
"gorm.io/driver/postgres"
"gorm.io/gorm"
"gorm.io/gorm/logger"
......
package database
import (
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/stretchr/testify/mock"
)
type MockBlocksView struct {
mock.Mock
}
func (m *MockBlocksView) L1BlockHeader(common.Hash) (*L1BlockHeader, error) {
args := m.Called()
header, ok := args.Get(0).(*L1BlockHeader)
if !ok {
header = nil
}
return header, args.Error(1)
}
func (m *MockBlocksView) L1BlockHeaderWithFilter(BlockHeader) (*L1BlockHeader, error) {
args := m.Called()
return args.Get(0).(*L1BlockHeader), args.Error(1)
}
func (m *MockBlocksView) L1LatestBlockHeader() (*L1BlockHeader, error) {
args := m.Called()
header, ok := args.Get(0).(*L1BlockHeader)
if !ok {
header = nil
}
return header, args.Error(1)
}
func (m *MockBlocksView) L2BlockHeader(common.Hash) (*L2BlockHeader, error) {
args := m.Called()
return args.Get(0).(*L2BlockHeader), args.Error(1)
}
func (m *MockBlocksView) L2BlockHeaderWithFilter(BlockHeader) (*L2BlockHeader, error) {
args := m.Called()
return args.Get(0).(*L2BlockHeader), args.Error(1)
}
func (m *MockBlocksView) L2LatestBlockHeader() (*L2BlockHeader, error) {
args := m.Called()
return args.Get(0).(*L2BlockHeader), args.Error(1)
}
func (m *MockBlocksView) LatestCheckpointedOutput() (*OutputProposal, error) {
args := m.Called()
return args.Get(0).(*OutputProposal), args.Error(1)
}
func (m *MockBlocksView) OutputProposal(index *big.Int) (*OutputProposal, error) {
args := m.Called()
return args.Get(0).(*OutputProposal), args.Error(1)
}
func (m *MockBlocksView) LatestEpoch() (*Epoch, error) {
args := m.Called()
return args.Get(0).(*Epoch), args.Error(1)
}
type MockBlocksDB struct {
MockBlocksView
}
func (m *MockBlocksDB) StoreL1BlockHeaders(headers []L1BlockHeader) error {
args := m.Called(headers)
return args.Error(1)
}
func (m *MockBlocksDB) StoreL2BlockHeaders(headers []L2BlockHeader) error {
args := m.Called(headers)
return args.Error(1)
}
func (m *MockBlocksDB) StoreLegacyStateBatches(headers []LegacyStateBatch) error {
args := m.Called(headers)
return args.Error(1)
}
func (m *MockBlocksDB) StoreOutputProposals(headers []OutputProposal) error {
args := m.Called(headers)
return args.Error(1)
}
// MockDB is a mock database that can be used for testing
type MockDB struct {
MockBlocks *MockBlocksDB
DB *DB
}
func NewMockDB() *MockDB {
// This is currently just mocking the BlocksDB interface
// but can be expanded to mock other inner DB interfaces
// as well
mockBlocks := new(MockBlocksDB)
db := &DB{Blocks: mockBlocks}
return &MockDB{MockBlocks: mockBlocks, DB: db}
}
package serializers
import (
"context"
"fmt"
"reflect"
"github.com/ethereum/go-ethereum/common/hexutil"
"gorm.io/gorm/schema"
)
type BytesSerializer struct{}
type BytesInterface interface{ Bytes() []byte }
type SetBytesInterface interface{ SetBytes([]byte) }
func init() {
schema.RegisterSerializer("bytes", BytesSerializer{})
}
func (BytesSerializer) Scan(ctx context.Context, field *schema.Field, dst reflect.Value, dbValue interface{}) error {
if dbValue == nil {
return nil
}
hexStr, ok := dbValue.(string)
if !ok {
return fmt.Errorf("expected hex string as the database value: %T", dbValue)
}
b, err := hexutil.Decode(hexStr)
if err != nil {
return fmt.Errorf("failed to decode database value: %w", err)
}
fieldValue := reflect.New(field.FieldType)
fieldInterface := fieldValue.Interface()
// Detect if we're deserializing into a pointer. If so, we'll need to
// also allocate memory to where the allocated pointer should point to
if field.FieldType.Kind() == reflect.Pointer {
nestedField := fieldValue.Elem()
if nestedField.Elem().Kind() == reflect.Pointer {
return fmt.Errorf("double pointers are the max depth supported: %T", fieldValue)
}
// We'll want to call `SetBytes` on the pointer to
// the allocated memory and not the double pointer
nestedField.Set(reflect.New(field.FieldType.Elem()))
fieldInterface = nestedField.Interface()
}
fieldSetBytes, ok := fieldInterface.(SetBytesInterface)
if !ok {
return fmt.Errorf("field does not satisfy the `SetBytes([]byte)` interface: %T", fieldInterface)
}
fieldSetBytes.SetBytes(b)
field.ReflectValueOf(ctx, dst).Set(fieldValue.Elem())
return nil
}
func (BytesSerializer) Value(ctx context.Context, field *schema.Field, dst reflect.Value, fieldValue interface{}) (interface{}, error) {
if fieldValue == nil || (field.FieldType.Kind() == reflect.Pointer && reflect.ValueOf(fieldValue).IsNil()) {
return nil, nil
}
fieldBytes, ok := fieldValue.(BytesInterface)
if !ok {
return nil, fmt.Errorf("field does not satisfy the `Bytes() []byte` interface")
}
hexStr := hexutil.Encode(fieldBytes.Bytes())
return hexStr, nil
}
package database
package serializers
import (
"context"
......@@ -13,38 +13,28 @@ import (
type RLPSerializer struct{}
type RLPInterface interface {
rlp.Encoder
rlp.Decoder
}
func init() {
schema.RegisterSerializer("rlp", RLPSerializer{})
}
func (RLPSerializer) Scan(ctx context.Context, field *schema.Field, dst reflect.Value, dbValue interface{}) error {
fieldValue := reflect.New(field.FieldType)
if dbValue != nil {
var bytes []byte
switch v := dbValue.(type) {
case []byte:
bytes = v
case string:
b, err := hexutil.Decode(v)
if err != nil {
return err
if dbValue == nil {
return nil
}
bytes = b
default:
return fmt.Errorf("unrecognized RLP bytes: %#v", dbValue)
hexStr, ok := dbValue.(string)
if !ok {
return fmt.Errorf("expected hex string as the database value: %T", dbValue)
}
if len(bytes) > 0 {
err := rlp.DecodeBytes(bytes, fieldValue.Interface())
b, err := hexutil.Decode(hexStr)
if err != nil {
return err
}
return fmt.Errorf("failed to decode database value: %w", err)
}
fieldValue := reflect.New(field.FieldType)
if err := rlp.DecodeBytes(b, fieldValue.Interface()); err != nil {
return fmt.Errorf("failed to decode rlp bytes: %w", err)
}
field.ReflectValueOf(ctx, dst).Set(fieldValue.Elem())
......@@ -52,18 +42,15 @@ func (RLPSerializer) Scan(ctx context.Context, field *schema.Field, dst reflect.
}
func (RLPSerializer) Value(ctx context.Context, field *schema.Field, dst reflect.Value, fieldValue interface{}) (interface{}, error) {
// Even though rlp.Encode takes an interface and will error out if the passed interface does not
// satisfy the interface, we check here since we also want to make sure this type satisfies the
// rlp.Decoder interface as well
i := reflect.TypeOf(new(RLPInterface)).Elem()
if !reflect.TypeOf(fieldValue).Implements(i) {
return nil, fmt.Errorf("%T does not satisfy RLP Encoder & Decoder interface", fieldValue)
if fieldValue == nil || (field.FieldType.Kind() == reflect.Pointer && reflect.ValueOf(fieldValue).IsNil()) {
return nil, nil
}
rlpBytes, err := rlp.EncodeToBytes(fieldValue)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to encode rlp bytes: %w", err)
}
return hexutil.Bytes(rlpBytes).MarshalText()
hexStr := hexutil.Encode(rlpBytes)
return hexStr, nil
}
package serializers
import (
"context"
"fmt"
"math/big"
"reflect"
"github.com/jackc/pgtype"
"gorm.io/gorm/schema"
)
var (
big10 = big.NewInt(10)
u256BigIntOverflow = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), nil)
)
type U256Serializer struct{}
func init() {
schema.RegisterSerializer("u256", U256Serializer{})
}
func (U256Serializer) Scan(ctx context.Context, field *schema.Field, dst reflect.Value, dbValue interface{}) error {
if dbValue == nil {
return nil
} else if field.FieldType != reflect.TypeOf((*big.Int)(nil)) {
return fmt.Errorf("can only deserialize into a *big.Int: %T", field.FieldType)
}
numeric := new(pgtype.Numeric)
err := numeric.Scan(dbValue)
if err != nil {
return err
}
bigInt := numeric.Int
if numeric.Exp > 0 {
factor := new(big.Int).Exp(big10, big.NewInt(int64(numeric.Exp)), nil)
bigInt.Mul(bigInt, factor)
}
if bigInt.Cmp(u256BigIntOverflow) >= 0 {
return fmt.Errorf("deserialized number larger than u256 can hold: %s", bigInt)
}
field.ReflectValueOf(ctx, dst).Set(reflect.ValueOf(bigInt))
return nil
}
func (U256Serializer) Value(ctx context.Context, field *schema.Field, dst reflect.Value, fieldValue interface{}) (interface{}, error) {
if fieldValue == nil || (field.FieldType.Kind() == reflect.Pointer && reflect.ValueOf(fieldValue).IsNil()) {
return nil, nil
} else if field.FieldType != reflect.TypeOf((*big.Int)(nil)) {
return nil, fmt.Errorf("can only serialize a *big.Int: %T", field.FieldType)
}
numeric := pgtype.Numeric{Int: fieldValue.(*big.Int), Status: pgtype.Present}
return numeric.Value()
}
package database
import (
"database/sql/driver"
"errors"
"io"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
"github.com/jackc/pgtype"
)
var u256BigIntOverflow = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), nil)
var big10 = big.NewInt(10)
var ErrU256Overflow = errors.New("number exceeds u256")
var ErrU256ContainsDecimal = errors.New("number contains fractional digits")
var ErrU256Null = errors.New("number cannot be null")
// U256 is a wrapper over big.Int that conforms to the database U256 numeric domain type
type U256 struct {
Int *big.Int
}
// Scan implements the database/sql Scanner interface.
func (u256 *U256) Scan(src interface{}) error {
// deserialize as a numeric
var numeric pgtype.Numeric
err := numeric.Scan(src)
if err != nil {
return err
} else if numeric.Exp < 0 {
return ErrU256ContainsDecimal
} else if numeric.Status == pgtype.Null {
return ErrU256Null
}
// factor in the powers of 10
num := numeric.Int
if numeric.Exp > 0 {
factor := new(big.Int).Exp(big10, big.NewInt(int64(numeric.Exp)), nil)
num.Mul(num, factor)
}
// check bounds before setting the u256
if num.Cmp(u256BigIntOverflow) >= 0 {
return ErrU256Overflow
} else {
u256.Int = num
}
return nil
}
// Value implements the database/sql/driver Valuer interface.
func (u256 U256) Value() (driver.Value, error) {
// check bounds
if u256.Int == nil {
return nil, ErrU256Null
} else if u256.Int.Cmp(u256BigIntOverflow) >= 0 {
return nil, ErrU256Overflow
}
// simply encode as a numeric with no Exp set (non-decimal)
numeric := pgtype.Numeric{Int: u256.Int, Status: pgtype.Present}
return numeric.Value()
}
// Wrapper over types.Header such that we can get an RLP
// encoder over it via a `types.Block` wrapper
type RLPHeader types.Header
......@@ -93,3 +36,15 @@ func (h *RLPHeader) Header() *types.Header {
func (h *RLPHeader) Hash() common.Hash {
return h.Header().Hash()
}
// Type definition for []byte to conform to the
// interface expected by the `bytes` serializer
type Bytes []byte
func (b Bytes) Bytes() []byte {
return b[:]
}
func (b *Bytes) SetBytes(bytes []byte) {
*b = bytes
}
......@@ -15,26 +15,19 @@ services:
- "5434:5432"
volumes:
- postgres_data:/data/postgres
- ./migrations:/docker-entrypoint-initdb.d/
indexer:
build:
context: ..
dockerfile: indexer/Dockerfile.refresh
command: ["indexer-refresh", "processor"]
# healthcheck:
# Add healthcheck once figure out good way how
# maybe after we add metrics?
ports:
- 8080:8080
dockerfile: indexer/Dockerfile
command: ["indexer", "index"]
environment:
- INDEXER_DB_PORT=5432
- INDEXER_DB_USER=db_username
- INDEXER_DB_PASSWORD=db_password
- INDEXER_DB_NAME=db_name
- INDEXER_DB_HOST=postgres
- INDEXER_CONFIG=/configs/indexer.toml
- INDEXER_RPC_URL_L1=$INDEXER_RPC_URL_L1
- INDEXER_RPC_URL_L2=$INDEXER_RPC_URL_L2
- INDEXER_CONFIG=/indexer/indexer.toml
volumes:
- ./indexer.toml:/configs/indexer.toml
- ./indexer.toml:/indexer/indexer.toml
depends_on:
postgres:
condition: service_healthy
......@@ -43,27 +36,17 @@ services:
build:
context: ..
dockerfile: indexer/Dockerfile
command: ["indexer", "api"]
healthcheck:
test: wget localhost:8080/healthz -q -O - > /dev/null 2>&1
environment:
# Note that you must index goerli with INDEXER_BEDROCK=false first, then
# reindex with INDEXER_BEDROCK=true or seed the database
- INDEXER_BEDROCK=${INDEXER_BEDROCK_GOERLI:-true}
- INDEXER_BUILD_ENV=${INDEXER_BUILD_ENV:-development}
- INDEXER_DB_PORT=${INDEXER_DB_PORT:-5432}
- INDEXER_DB_USER=${INDEXER_DB_USER:-db_username}
- INDEXER_DB_PASSWORD=${INDEXER_DB_PASSWORD:-db_password}
- INDEXER_DB_NAME=${INDEXER_DB_NAME:-db_name}
- INDEXER_DB_HOST=${INDEXER_DB_HOST:-postgres}
- INDEXER_CHAIN_ID=${INDEXER_CHAIN_ID:-5}
- INDEXER_L1_ETH_RPC=$INDEXER_L1_ETH_RPC
- INDEXER_L2_ETH_RPC=$INDEXER_L2_ETH_RPC
- INDEXER_REST_HOSTNAME=0.0.0.0
- INDEXER_REST_PORT=8080
- INDEXER_BEDROCK_L1_STANDARD_BRIDGE=0
- INDEXER_BEDROCK_L1_STANDARD_BRIDGE=${INDEXER_BEDROCK_L1_STANDARD_BRIDGE:-0x636Af16bf2f682dD3109e60102b8E1A089FedAa8}
- INDEXER_BEDROCK_OPTIMISM_PORTAL=${INDEXER_BEDROCK_OPTIMISM_PORTAL:-0xB7040fd32359688346A3D1395a42114cf8E3b9b2}
- INDEXER_L1_ADDRESS_MANAGER_ADDRESS=${INDEXER_L1_ADDRESS_MANAGER_ADDRESS:-0xdE1FCfB0851916CA5101820A69b13a4E276bd81F}
- INDEXER_RPC_URL_L1=$INDEXER_RPC_URL_L1
- INDEXER_RPC_URL_L2=$INDEXER_RPC_URL_L2
- INDEXER_CONFIG=/indexer/indexer.toml
volumes:
- ./indexer.toml:/indexer/indexer.toml
ports:
- 8080:8080
depends_on:
......@@ -98,77 +81,6 @@ services:
postgres:
condition: service_healthy
gateway-frontend:
command: pnpm nx start @gateway/frontend --host 0.0.0.0 --port 5173
# Change tag to `latest` after https://github.com/ethereum-optimism/gateway/pull/2541 merges
image: ethereumoptimism/gateway-frontend:latest
ports:
- 5173:5173
healthcheck:
test: curl http://0.0.0.0:5173
environment:
- VITE_GROWTHBOOK=${VITE_GROWTHBOOK:-https://cdn.growthbook.io/api/features/dev_iGoAbSwtGOtEJONeHdVTosV0BD3TvTPttAccGyRxqsk}
- VITE_ENABLE_DEVNET=true
- VITE_RPC_URL_ETHEREUM_MAINNET=$VITE_RPC_URL_ETHEREUM_MAINNET
- VITE_RPC_URL_ETHEREUM_OPTIMISM_MAINNET=$VITE_RPC_URL_OPTIMISM_MAINNET
- VITE_RPC_URL_ETHEREUM_GOERLI=$VITE_RPC_URL_ETHEREUM_GOERLI
- VITE_RPC_URL_ETHEREUM_OPTIMISM_GOERLI=$VITE_RPC_URL_OPTIMISM_GOERLI
- VITE_BACKEND_URL_MAINNET=http://localhost:7421
- VITE_BACKEND_URL_GOERLI=http://localhost:7422
- VITE_ENABLE_ALL_FEATURES=true
backend-mainnet:
image: ethereumoptimism/gateway-backend:latest
environment:
# this enables the backend to proxy history requests to the indexer
- BRIDGE_INDEXER_URI=http://api
- HOST=0.0.0.0
- PORT=7300
- MIGRATE_APP_DB_USER=${MIGRATE_APP_DB_USER:-postgres}
- MIGRATE_APP_DB_PASSWORD=${MIGRATE_APP_DB_PASSWORD:-db_password}
- APP_DB_HOST=${APP_DB_HOST:-postgres-app}
- APP_DB_USER=${APP_DB_USER:-gateway-backend-mainnet@oplabs-local-web.iam}
- APP_DB_NAME=${APP_DB_NAME:-gateway}
- APP_DB_PORT=${APP_DB_PORT:-5432}
# THis is for the legacy indexer which won't be used but the env variable is still required
- INDEXER_DB_HOST=postgres-mainnet
# THis is for the legacy indexer which won't be used but the env variable is still required
- INDEXER_DB_USER=db_username
# THis is for the legacy indexer which won't be used but the env variable is still required
- INDEXER_DB_PASS=db_password
# THis is for the legacy indexer which won't be used but the env variable is still required
- INDEXER_DB_NAME=db_name
# THis is for the legacy indexer which won't be used but the env variable is still required
- INDEXER_DB_PORT=5432
# THis is for the legacy indexer which won't be used but the env variable is still required
- DATABASE_URL=postgres://db_username:db_password@postgres-mainnet:5432/db_name
- JSON_RPC_URLS_L1=$JSON_RPC_URLS_L1_MAINNET
- JSON_RPC_URLS_L2=$JSON_RPC_URLS_L2_MAINNET
- JSON_RPC_URLS_L2_GOERLI=$JSON_RPC_URLS_L2_GOERLI
# anvil[0] privater key as placeholder
- FAUCET_AUTH_ADMIN_WALLET_PRIVATE_KEY=${$FAUCET_AUTH_ADMIN_WALLET_PRIVATE_KEY:-0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80}
- IRON_SESSION_SECRET=${IRON_SESSION_SECRET:-UNKNOWN_IRON_SESSION_PASSWORD_32}
- CHAIN_ID_L1=1
- CHAIN_ID_L2=10
- FLEEK_BUCKET_ADDRESS=34a609661-6774-441f-9fdb-453fdbb89931-bucket
- FLEEK_API_SECRET=$FLEEK_API_SECRET
- FLEEK_API_KEY=$FLEEK_API_KEY
- MOCK_MERKLE_PROOF=true
- LOOP_INTERVAL_MINUTES=.1
- GITHUB_CLIENT_ID=$GITHUB_CLIENT_ID
- GITHUB_SECRET=$GITHUB_SECRET
- MAINNET_BEDROCK=$MAINNET_BEDROCK
- TRM_API_KEY=$TRM_API_KEY
- GOOGLE_CLOUD_STORAGE_BUCKET_NAME=oplabs-dev-web-content
# Recommened to uncomment for local dev unless you need it
#- BYPASS_EVENT_LOG_POLLER_BOOTSTRAP=true
ports:
- 7421:7300
# overrides command in Dockerfile so we can hot reload the server in docker while developing
#command: ['pnpm', 'nx', 'run', '@gateway/backend:docker:watch']
healthcheck:
test: curl http://0.0.0.0:7300/api/v0/healthz
backend-goerli:
image: ethereumoptimism/gateway-backend:latest
environment:
......
......@@ -32,9 +32,6 @@ func TestE2EBridgeL1CrossDomainMessenger(t *testing.T) {
require.NoError(t, err)
l1Opts.Value = big.NewInt(params.Ether)
// Pause the processor to track relayed event
testSuite.Indexer.L2Processor.PauseForTest()
// (1) Send the Message
sentMsgTx, err := l1CrossDomainMessenger.SendMessage(l1Opts, aliceAddr, calldata, 100_000)
require.NoError(t, err)
......@@ -46,7 +43,7 @@ func TestE2EBridgeL1CrossDomainMessenger(t *testing.T) {
// wait for processor catchup
require.NoError(t, wait.For(context.Background(), 500*time.Millisecond, func() (bool, error) {
l1Header := testSuite.Indexer.L1Processor.LatestProcessedHeader()
l1Header := testSuite.Indexer.BridgeProcessor.LatestL1Header
return l1Header != nil && l1Header.Number.Uint64() >= sentMsgReceipt.BlockNumber.Uint64(), nil
}))
......@@ -57,33 +54,34 @@ func TestE2EBridgeL1CrossDomainMessenger(t *testing.T) {
nonceBytes := [31]byte{0: byte(1)}
nonce := new(big.Int).SetBytes(nonceBytes[:])
sentMessage, err := testSuite.DB.BridgeMessages.L1BridgeMessage(nonce)
sentMessage, err := testSuite.DB.BridgeMessages.L1BridgeMessage(parsedMessage.MessageHash)
require.NoError(t, err)
require.NotNil(t, sentMessage)
require.NotNil(t, sentMessage.SentMessageEventGUID)
require.Equal(t, depositInfo.DepositTx.SourceHash, sentMessage.TransactionSourceHash)
require.Equal(t, parsedMessage.MessageHash, sentMessage.MessageHash)
require.Equal(t, uint64(100_000), sentMessage.GasLimit.Int.Uint64())
require.Equal(t, big.NewInt(params.Ether), sentMessage.Tx.Amount.Int)
require.Equal(t, nonce.Uint64(), sentMessage.Nonce.Uint64())
require.Equal(t, uint64(100_000), sentMessage.GasLimit.Uint64())
require.Equal(t, uint64(params.Ether), sentMessage.Tx.Amount.Uint64())
require.Equal(t, aliceAddr, sentMessage.Tx.FromAddress)
require.Equal(t, aliceAddr, sentMessage.Tx.ToAddress)
require.ElementsMatch(t, calldata, sentMessage.Tx.Data)
// (2) Process RelayedMesssage on inclusion
require.Nil(t, sentMessage.RelayedMessageEventGUID)
testSuite.Indexer.L2Processor.ResumeForTest()
// - We dont assert that `RelayedMessageEventGUID` is nil prior to inclusion since there isn't a
// a straightforward way of pausing/resuming the processors at the right time. The codepath is the
// same for L2->L1 messages which does check for this so we are still covered
transaction, err := testSuite.DB.BridgeTransactions.L1TransactionDeposit(sentMessage.TransactionSourceHash)
require.NoError(t, err)
// wait for processor catchup
depositReceipt, err := wait.ForReceiptOK(context.Background(), testSuite.L2Client, transaction.L2TransactionHash)
l2DepositReceipt, err := wait.ForReceiptOK(context.Background(), testSuite.L2Client, transaction.L2TransactionHash)
require.NoError(t, err)
require.NoError(t, wait.For(context.Background(), 500*time.Millisecond, func() (bool, error) {
l2Header := testSuite.Indexer.L2Processor.LatestProcessedHeader()
return l2Header != nil && l2Header.Number.Uint64() >= depositReceipt.BlockNumber.Uint64(), nil
l2Header := testSuite.Indexer.BridgeProcessor.LatestL2Header
return l2Header != nil && l2Header.Number.Uint64() >= l2DepositReceipt.BlockNumber.Uint64(), nil
}))
sentMessage, err = testSuite.DB.BridgeMessages.L1BridgeMessage(nonce)
sentMessage, err = testSuite.DB.BridgeMessages.L1BridgeMessage(parsedMessage.MessageHash)
require.NoError(t, err)
require.NotNil(t, sentMessage)
require.NotNil(t, sentMessage.RelayedMessageEventGUID)
......@@ -132,7 +130,7 @@ func TestE2EBridgeL2CrossDomainMessenger(t *testing.T) {
// wait for processor catchup
require.NoError(t, wait.For(context.Background(), 500*time.Millisecond, func() (bool, error) {
l2Header := testSuite.Indexer.L2Processor.LatestProcessedHeader()
l2Header := testSuite.Indexer.BridgeProcessor.LatestL2Header
return l2Header != nil && l2Header.Number.Uint64() >= sentMsgReceipt.BlockNumber.Uint64(), nil
}))
......@@ -143,30 +141,30 @@ func TestE2EBridgeL2CrossDomainMessenger(t *testing.T) {
nonceBytes := [31]byte{0: byte(1)}
nonce := new(big.Int).SetBytes(nonceBytes[:])
sentMessage, err := testSuite.DB.BridgeMessages.L2BridgeMessage(nonce)
sentMessage, err := testSuite.DB.BridgeMessages.L2BridgeMessage(parsedMessage.MessageHash)
require.NoError(t, err)
require.NotNil(t, sentMessage)
require.NotNil(t, sentMessage.SentMessageEventGUID)
require.Equal(t, withdrawalHash, sentMessage.TransactionWithdrawalHash)
require.Equal(t, parsedMessage.MessageHash, sentMessage.MessageHash)
require.Equal(t, uint64(100_000), sentMessage.GasLimit.Int.Uint64())
require.Equal(t, big.NewInt(params.Ether), sentMessage.Tx.Amount.Int)
require.Equal(t, nonce.Uint64(), sentMessage.Nonce.Uint64())
require.Equal(t, uint64(100_000), sentMessage.GasLimit.Uint64())
require.Equal(t, uint64(params.Ether), sentMessage.Tx.Amount.Uint64())
require.Equal(t, aliceAddr, sentMessage.Tx.FromAddress)
require.Equal(t, aliceAddr, sentMessage.Tx.ToAddress)
require.ElementsMatch(t, calldata, sentMessage.Tx.Data)
// (2) Process RelayedMessage on withdrawal finalization
require.Nil(t, sentMessage.RelayedMessageEventGUID)
_, finalizedReceipt := op_e2e.ProveAndFinalizeWithdrawal(t, *testSuite.OpCfg, testSuite.L1Client, testSuite.OpSys.Nodes["sequencer"], testSuite.OpCfg.Secrets.Alice, sentMsgReceipt)
_, finalizedReceipt := op_e2e.ProveAndFinalizeWithdrawal(t, *testSuite.OpCfg, testSuite.L1Client, testSuite.OpSys.EthInstances["sequencer"], testSuite.OpCfg.Secrets.Alice, sentMsgReceipt)
// wait for processor catchup
require.NoError(t, wait.For(context.Background(), 500*time.Millisecond, func() (bool, error) {
l1Header := testSuite.Indexer.L1Processor.LatestProcessedHeader()
l1Header := testSuite.Indexer.BridgeProcessor.LatestL1Header
return l1Header != nil && l1Header.Number.Uint64() >= finalizedReceipt.BlockNumber.Uint64(), nil
}))
// message is marked as relayed
sentMessage, err = testSuite.DB.BridgeMessages.L2BridgeMessage(nonce)
sentMessage, err = testSuite.DB.BridgeMessages.L2BridgeMessage(parsedMessage.MessageHash)
require.NoError(t, err)
require.NotNil(t, sentMessage)
require.NotNil(t, sentMessage.RelayedMessageEventGUID)
......
......@@ -46,22 +46,20 @@ func TestE2EBridgeTransactionsOptimismPortalDeposits(t *testing.T) {
// wait for processor catchup
require.NoError(t, wait.For(context.Background(), 500*time.Millisecond, func() (bool, error) {
l1Header := testSuite.Indexer.L1Processor.LatestProcessedHeader()
l1Header := testSuite.Indexer.BridgeProcessor.LatestL1Header
return l1Header != nil && l1Header.Number.Uint64() >= depositReceipt.BlockNumber.Uint64(), nil
}))
deposit, err := testSuite.DB.BridgeTransactions.L1TransactionDeposit(depositInfo.DepositTx.SourceHash)
require.NoError(t, err)
require.NotNil(t, deposit)
require.Equal(t, depositL2TxHash, deposit.L2TransactionHash)
require.Equal(t, big.NewInt(100_000), deposit.GasLimit.Int)
require.Equal(t, big.NewInt(params.Ether), deposit.Tx.Amount.Int)
require.Equal(t, uint64(100_000), deposit.GasLimit.Uint64())
require.Equal(t, uint64(params.Ether), deposit.Tx.Amount.Uint64())
require.Equal(t, aliceAddr, deposit.Tx.FromAddress)
require.Equal(t, aliceAddr, deposit.Tx.ToAddress)
require.ElementsMatch(t, calldata, deposit.Tx.Data)
require.Equal(t, depositInfo.Version.Uint64(), deposit.Version.Int.Uint64())
require.ElementsMatch(t, depositInfo.OpaqueData, deposit.OpaqueData)
event, err := testSuite.DB.ContractEvents.L1ContractEvent(deposit.InitiatedL1EventGUID)
require.NoError(t, err)
require.NotNil(t, event)
......@@ -103,7 +101,7 @@ func TestE2EBridgeTransactionsL2ToL1MessagePasserWithdrawal(t *testing.T) {
// wait for processor catchup
require.NoError(t, wait.For(context.Background(), 500*time.Millisecond, func() (bool, error) {
l2Header := testSuite.Indexer.L2Processor.LatestProcessedHeader()
l2Header := testSuite.Indexer.BridgeProcessor.LatestL2Header
return l2Header != nil && l2Header.Number.Uint64() >= withdrawReceipt.BlockNumber.Uint64(), nil
}))
......@@ -114,9 +112,10 @@ func TestE2EBridgeTransactionsL2ToL1MessagePasserWithdrawal(t *testing.T) {
withdraw, err := testSuite.DB.BridgeTransactions.L2TransactionWithdrawal(withdrawalHash)
require.NoError(t, err)
require.Equal(t, msgPassed.Nonce.Uint64(), withdraw.Nonce.Int.Uint64())
require.Equal(t, big.NewInt(100_000), withdraw.GasLimit.Int)
require.Equal(t, big.NewInt(params.Ether), withdraw.Tx.Amount.Int)
require.NotNil(t, withdraw)
require.Equal(t, msgPassed.Nonce.Uint64(), withdraw.Nonce.Uint64())
require.Equal(t, uint64(100_000), withdraw.GasLimit.Uint64())
require.Equal(t, uint64(params.Ether), withdraw.Tx.Amount.Uint64())
require.Equal(t, aliceAddr, withdraw.Tx.FromAddress)
require.Equal(t, aliceAddr, withdraw.Tx.ToAddress)
require.ElementsMatch(t, calldata, withdraw.Tx.Data)
......@@ -130,9 +129,9 @@ func TestE2EBridgeTransactionsL2ToL1MessagePasserWithdrawal(t *testing.T) {
require.Nil(t, withdraw.ProvenL1EventGUID)
require.Nil(t, withdraw.FinalizedL1EventGUID)
withdrawParams, proveReceipt := op_e2e.ProveWithdrawal(t, *testSuite.OpCfg, testSuite.L1Client, testSuite.OpSys.Nodes["sequencer"], testSuite.OpCfg.Secrets.Alice, withdrawReceipt)
withdrawParams, proveReceipt := op_e2e.ProveWithdrawal(t, *testSuite.OpCfg, testSuite.L1Client, testSuite.OpSys.EthInstances["sequencer"], testSuite.OpCfg.Secrets.Alice, withdrawReceipt)
require.NoError(t, wait.For(context.Background(), 500*time.Millisecond, func() (bool, error) {
l1Header := testSuite.Indexer.L1Processor.LatestProcessedHeader()
l1Header := testSuite.Indexer.BridgeProcessor.LatestL1Header
return l1Header != nil && l1Header.Number.Uint64() >= proveReceipt.BlockNumber.Uint64(), nil
}))
......@@ -150,7 +149,7 @@ func TestE2EBridgeTransactionsL2ToL1MessagePasserWithdrawal(t *testing.T) {
finalizeReceipt := op_e2e.FinalizeWithdrawal(t, *testSuite.OpCfg, testSuite.L1Client, testSuite.OpCfg.Secrets.Alice, proveReceipt, withdrawParams)
require.NoError(t, wait.For(context.Background(), 500*time.Millisecond, func() (bool, error) {
l1Header := testSuite.Indexer.L1Processor.LatestProcessedHeader()
l1Header := testSuite.Indexer.BridgeProcessor.LatestL1Header
return l1Header != nil && l1Header.Number.Uint64() >= finalizeReceipt.BlockNumber.Uint64(), nil
}))
......@@ -190,9 +189,9 @@ func TestE2EBridgeTransactionsL2ToL1MessagePasserFailedWithdrawal(t *testing.T)
require.NoError(t, err)
// Prove&Finalize withdrawal
_, finalizeReceipt := op_e2e.ProveAndFinalizeWithdrawal(t, *testSuite.OpCfg, testSuite.L1Client, testSuite.OpSys.Nodes["sequencer"], testSuite.OpCfg.Secrets.Alice, withdrawReceipt)
_, finalizeReceipt := op_e2e.ProveAndFinalizeWithdrawal(t, *testSuite.OpCfg, testSuite.L1Client, testSuite.OpSys.EthInstances["sequencer"], testSuite.OpCfg.Secrets.Alice, withdrawReceipt)
require.NoError(t, wait.For(context.Background(), 500*time.Millisecond, func() (bool, error) {
l1Header := testSuite.Indexer.L1Processor.LatestProcessedHeader()
l1Header := testSuite.Indexer.BridgeProcessor.LatestL1Header
return l1Header != nil && l1Header.Number.Uint64() >= finalizeReceipt.BlockNumber.Uint64(), nil
}))
......
......@@ -6,51 +6,53 @@ import (
"testing"
"time"
"github.com/ethereum-optimism/optimism/indexer/node"
"github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rlp"
"github.com/stretchr/testify/require"
)
func TestE2EBlockHeaders(t *testing.T) {
func TestE2EETL(t *testing.T) {
testSuite := createE2ETestSuite(t)
l2OutputOracle, err := bindings.NewL2OutputOracle(testSuite.OpCfg.L1Deployments.L2OutputOracleProxy, testSuite.L1Client)
require.NoError(t, err)
// wait for at least 10 L2 blocks to be created & posted on L1
// wait for at least 10 L2 blocks posted on L1
require.NoError(t, wait.For(context.Background(), time.Second, func() (bool, error) {
l2Height, err := l2OutputOracle.LatestBlockNumber(&bind.CallOpts{Context: context.Background()})
return l2Height != nil && l2Height.Uint64() >= 9, err
}))
// ensure the processors are caught up to this state
// ensure we've indexed up to this state
l1Height, err := testSuite.L1Client.BlockNumber(context.Background())
require.NoError(t, err)
require.NoError(t, wait.For(context.Background(), time.Second, func() (bool, error) {
l1Header := testSuite.Indexer.L1Processor.LatestProcessedHeader()
l2Header := testSuite.Indexer.L2Processor.LatestProcessedHeader()
require.NoError(t, wait.For(context.Background(), 100*time.Millisecond, func() (bool, error) {
l1Header, err := testSuite.DB.Blocks.L1LatestBlockHeader()
require.NoError(t, err)
l2Header, err := testSuite.DB.Blocks.L2LatestBlockHeader()
require.NoError(t, err)
return (l1Header != nil && l1Header.Number.Uint64() >= l1Height) && (l2Header != nil && l2Header.Number.Uint64() >= 9), nil
}))
t.Run("indexes L2 blocks", func(t *testing.T) {
latestL2Header, err := testSuite.DB.Blocks.LatestL2BlockHeader()
t.Run("indexes all L2 blocks", func(t *testing.T) {
latestL2Header, err := testSuite.DB.Blocks.L2LatestBlockHeader()
require.NoError(t, err)
require.NotNil(t, latestL2Header)
require.True(t, latestL2Header.Number.Int.Uint64() >= 9)
require.True(t, latestL2Header.Number.Uint64() >= 9)
for i := int64(0); i < 10; i++ {
height := big.NewInt(i)
indexedHeader, err := testSuite.DB.Blocks.L2BlockHeader(height)
indexedHeader, err := testSuite.DB.Blocks.L2BlockHeaderWithFilter(database.BlockHeader{Number: height})
require.NoError(t, err)
require.NotNil(t, indexedHeader)
......@@ -58,23 +60,26 @@ func TestE2EBlockHeaders(t *testing.T) {
require.NoError(t, err)
require.NotNil(t, indexedHeader)
require.Equal(t, header.Number.Int64(), indexedHeader.Number.Int.Int64())
require.Equal(t, header.Number.Int64(), indexedHeader.Number.Int64())
require.Equal(t, header.Hash(), indexedHeader.Hash)
require.Equal(t, header.ParentHash, indexedHeader.ParentHash)
require.Equal(t, header.Time, indexedHeader.Timestamp)
// ensure the right rlp encoding is stored. checking the hashes sufficies
// ensure the right rlp encoding is stored. checking the hashes
// suffices as it is based on the rlp bytes of the header
require.Equal(t, header.Hash(), indexedHeader.RLPHeader.Hash())
}
})
/*
TODO: ADD THIS BACK IN WHEN THESE MARKERS ARE INDEXED
t.Run("indexes L2 checkpoints", func(t *testing.T) {
latestOutput, err := testSuite.DB.Blocks.LatestCheckpointedOutput()
require.NoError(t, err)
require.NotNil(t, latestOutput)
require.GreaterOrEqual(t, latestOutput.L2BlockNumber.Int.Uint64(), uint64(9))
l2EthClient, err := node.DialEthClient(testSuite.OpSys.Nodes["sequencer"].HTTPEndpoint())
l2EthClient, err := node.DialEthClient(testSuite.OpSys.EthInstances["sequencer"].HTTPEndpoint())
require.NoError(t, err)
submissionInterval := testSuite.OpCfg.DeployConfig.L2OutputOracleSubmissionInterval
......@@ -103,19 +108,18 @@ func TestE2EBlockHeaders(t *testing.T) {
require.Equal(t, crypto.Keccak256Hash(outputRootPreImage[:]), output.OutputRoot)
}
})
*/
t.Run("indexes L1 logs and associated blocks", func(t *testing.T) {
testCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
t.Run("indexes L1 blocks with accompanying contract event", func(t *testing.T) {
l1Contracts := []common.Address{}
testSuite.OpCfg.L1Deployments.ForEach(func(name string, addr common.Address) { l1Contracts = append(l1Contracts, addr) })
logFilter := ethereum.FilterQuery{FromBlock: big.NewInt(0), ToBlock: big.NewInt(int64(l1Height)), Addresses: l1Contracts}
logs, err := testSuite.L1Client.FilterLogs(testCtx, logFilter) // []types.Log
logs, err := testSuite.L1Client.FilterLogs(context.Background(), logFilter) // []types.Log
require.NoError(t, err)
for _, log := range logs {
contractEvent, err := testSuite.DB.ContractEvents.L1ContractEventByTxLogIndex(log.TxHash, uint64(log.Index))
for i := range logs {
log := logs[i]
contractEvent, err := testSuite.DB.ContractEvents.L1ContractEventWithFilter(database.ContractEvent{TransactionHash: log.TxHash, LogIndex: uint64(log.Index)})
require.NoError(t, err)
require.Equal(t, log.Topics[0], contractEvent.EventSignature)
require.Equal(t, log.BlockHash, contractEvent.BlockHash)
......@@ -131,16 +135,16 @@ func TestE2EBlockHeaders(t *testing.T) {
require.ElementsMatch(t, logRlp, contractEventRlp)
// ensure the block is also indexed
block, err := testSuite.L1Client.BlockByNumber(testCtx, big.NewInt(int64(log.BlockNumber)))
block, err := testSuite.L1Client.BlockByNumber(context.Background(), big.NewInt(int64(log.BlockNumber)))
require.NoError(t, err)
require.Equal(t, block.Time(), contractEvent.Timestamp)
require.Equal(t, block.Hash(), contractEvent.BlockHash)
l1BlockHeader, err := testSuite.DB.Blocks.L1BlockHeader(block.Number())
l1BlockHeader, err := testSuite.DB.Blocks.L1BlockHeader(block.Hash())
require.NoError(t, err)
require.Equal(t, block.Hash(), l1BlockHeader.Hash)
require.Equal(t, block.ParentHash(), l1BlockHeader.ParentHash)
require.Equal(t, block.Number(), l1BlockHeader.Number.Int)
require.Equal(t, block.Number().Uint64(), l1BlockHeader.Number.Uint64())
require.Equal(t, block.Time(), l1BlockHeader.Timestamp)
// ensure the right rlp encoding is stored. checking the hashes
......
......@@ -43,14 +43,16 @@ func createE2ETestSuite(t *testing.T) E2ETestSuite {
dbUser := os.Getenv("DB_USER")
dbName := setupTestDatabase(t)
// Replace the handler of the global logger with the testlog
logger := testlog.Logger(t, log.LvlInfo)
log.Root().SetHandler(logger.GetHandler())
// Discard the Global Logger as each component
// has its own configured logger
log.Root().SetHandler(log.DiscardHandler())
// Rollup System Configuration and Start
opCfg := op_e2e.DefaultSystemConfig(t)
opSys, err := opCfg.Start()
opCfg.DeployConfig.FinalizationPeriodSeconds = 2
opSys, err := opCfg.Start(t)
require.NoError(t, err)
t.Cleanup(func() { opSys.Close() })
// E2E tests can run on the order of magnitude of minutes. Once
// the system is running, mark this test for Parallel execution
......@@ -58,7 +60,6 @@ func createE2ETestSuite(t *testing.T) E2ETestSuite {
// Indexer Configuration and Start
indexerCfg := config.Config{
DB: config.DBConfig{
Host: "127.0.0.1",
Port: 5432,
......@@ -66,47 +67,42 @@ func createE2ETestSuite(t *testing.T) E2ETestSuite {
User: dbUser,
},
RPCs: config.RPCsConfig{
L1RPC: opSys.Nodes["l1"].HTTPEndpoint(),
L2RPC: opSys.Nodes["sequencer"].HTTPEndpoint(),
L1RPC: opSys.EthInstances["l1"].HTTPEndpoint(),
L2RPC: opSys.EthInstances["sequencer"].HTTPEndpoint(),
},
Chain: config.ChainConfig{
L1PollingInterval: uint(opCfg.DeployConfig.L1BlockTime) * 1000,
L1ConfirmationDepth: 0,
L2PollingInterval: uint(opCfg.DeployConfig.L2BlockTime) * 1000,
L2ConfirmationDepth: 0,
L1Contracts: config.L1Contracts{
OptimismPortal: opCfg.L1Deployments.OptimismPortalProxy,
L2OutputOracle: opCfg.L1Deployments.L2OutputOracleProxy,
L1CrossDomainMessenger: opCfg.L1Deployments.L1CrossDomainMessengerProxy,
L1StandardBridge: opCfg.L1Deployments.L1StandardBridgeProxy,
L1ERC721Bridge: opCfg.L1Deployments.L1ERC721BridgeProxy,
OptimismPortalProxy: opCfg.L1Deployments.OptimismPortalProxy,
L2OutputOracleProxy: opCfg.L1Deployments.L2OutputOracleProxy,
L1CrossDomainMessengerProxy: opCfg.L1Deployments.L1CrossDomainMessengerProxy,
L1StandardBridgeProxy: opCfg.L1Deployments.L1StandardBridgeProxy,
},
},
Metrics: config.MetricsConfig{
Host: "127.0.0.1",
Port: 0,
},
}
db, err := database.NewDB(indexerCfg.DB)
require.NoError(t, err)
indexer, err := indexer.NewIndexer(
indexerCfg.Chain,
indexerCfg.RPCs,
db,
logger,
)
t.Cleanup(func() { db.Close() })
indexerLog := testlog.Logger(t, log.LvlInfo).New("role", "indexer")
indexer, err := indexer.NewIndexer(indexerLog, db, indexerCfg.Chain, indexerCfg.RPCs, indexerCfg.Metrics)
require.NoError(t, err)
indexerStoppedCh := make(chan interface{}, 1)
indexerCtx, indexerStop := context.WithCancel(context.Background())
t.Cleanup(func() { indexerStop() })
go func() {
err := indexer.Run(indexerCtx)
require.NoError(t, err)
indexerStoppedCh <- nil
}()
t.Cleanup(func() {
indexerStop()
<-indexerStoppedCh
indexer.Cleanup()
db.Close()
opSys.Close()
})
return E2ETestSuite{
t: t,
DB: db,
......
......@@ -4,7 +4,7 @@ import (
"errors"
"math/big"
"github.com/ethereum-optimism/optimism/indexer/processor"
"github.com/ethereum-optimism/optimism/indexer/processors/contracts"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum/go-ethereum/common"
......@@ -57,5 +57,5 @@ func CrossDomainMessengerSentMessageHash(sentMessage *bindings.CrossDomainMessen
return common.Hash{}, err
}
return processor.CrossDomainMessageHash(abi, sentMessage, value)
return contracts.CrossDomainMessageHash(abi, sentMessage, value)
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
# Chain configures l1 chain addresses
# Can configure them manually or use a preset l2 ChainId for known chains including OP Mainnet, OP Goerli, Base, Base Goerli, Zora, and Zora goerli
[chain]
# OP Goerli
preset = 420
# L1 Config
l1-polling-interval = 0
l1-header-buffer-size = 0
l1-confirmation-depth = 0
l1-starting-height = 0
# L2 Config
l2-polling-interval = 0
l2-header-buffer-size = 0
l2-confirmation-depth = 0
[rpcs]
l1-rpc = "${INDEXER_RPC_URL_L1}"
l2-rpc = "${INDEXER_RPC_URL_L2}"
[db]
host = "127.0.0.1"
host = "postgres"
port = 5432
user = "postgres"
password = "postgres"
name = "indexer"
user = "db_username"
password = "db_password"
name = "db_name"
[api]
host = "127.0.0.1"
......
......@@ -19,3 +19,8 @@ func clampBigInt(start, end *big.Int, size uint64) *big.Int {
temp.Add(start, big.NewInt(int64(size-1)))
return temp
}
// returns an inner comparison function result for a big.Int
func BigIntMatcher(num int64) func(*big.Int) bool {
return func(bi *big.Int) bool { return bi.Int64() == num }
}
......@@ -7,17 +7,13 @@ import (
"github.com/stretchr/testify/assert"
)
func bigIntMatcher(num int64) func(*big.Int) bool {
return func(bi *big.Int) bool { return bi.Int64() == num }
}
func TestClampBigInt(t *testing.T) {
assert.True(t, true)
start := big.NewInt(1)
end := big.NewInt(10)
// When the (start, end) boudnds are within range
// When the (start, end) bounds are within range
// the same end pointer should be returned
// larger range
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment