Commit e61a5cd3 authored by Matthew Slipper's avatar Matthew Slipper Committed by GitHub

indexer: Remove (#10784)

parent 06d4d206
......@@ -1085,69 +1085,6 @@ jobs:
command: make <<parameters.binary_name>>
working_directory: <<parameters.working_directory>>
indexer-tests:
parameters:
variant:
type: string
default: ''
environment:
DEVNET_L2OO: 'false'
OP_E2E_USE_L2OO: 'false'
docker:
- image: <<pipeline.parameters.ci_builder_image>>
- image: cimg/postgres:14.1
resource_class: xlarge
steps:
- checkout
- when:
condition:
equal: ['l2oo', <<parameters.variant>>]
steps:
- run:
name: Set DEVNET_L2OO = true
command: echo 'export DEVNET_L2OO=true' >> $BASH_ENV
- run:
name: Set OP_E2E_USE_L2OO = true
command: echo 'export OP_E2E_USE_L2OO=true' >> $BASH_ENV
- check-changed:
patterns: indexer
- run:
name: Lint
command: golangci-lint run -E goimports,sqlclosecheck,bodyclose,asciicheck,misspell,errorlint --timeout 4m -e "errors.As" -e "errors.Is" ./...
working_directory: indexer
- run:
name: git submodules
command: make submodules
- run:
name: generate cannon prestate
command: make cannon-prestate
- run:
name: generate L1 state
command: make devnet-allocs
- run:
name: Test
command: |
mkdir -p /test-results
DB_USER=postgres gotestsum --format=standard-verbose --junitfile=/test-results/indexer_tests.xml -- -parallel=4 ./...
working_directory: indexer
- store_test_results:
path: /test-results
- run:
name: Build
command: make indexer
working_directory: indexer
- run:
name: Install node_modules
command: pnpm install:ci
- run:
name: Install tygo
command: go install github.com/gzuidhof/tygo@latest
working_directory: indexer/api-ts
- run:
name: Check generated code
command: npm run generate && git diff --exit-code
working_directory: indexer/api-ts
cannon-prestate:
docker:
- image: <<pipeline.parameters.ci_builder_image>>
......@@ -1702,11 +1639,6 @@ workflows:
name: proxyd-tests
binary_name: proxyd
working_directory: proxyd
- indexer-tests:
name: indexer-tests<< matrix.variant >>
matrix:
parameters:
variant: ["", "-l2oo"]
- semgrep-scan
- go-mod-download
- fuzz-golang:
......@@ -1900,10 +1832,6 @@ workflows:
- op-challenger-docker-build
- da-server-docker-build
- cannon-prestate
- docker-build:
name: indexer-docker-build
docker_name: indexer
docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>>
- check-generated-mocks-op-node
- check-generated-mocks-op-service
- cannon-go-lint-and-test:
......@@ -1926,7 +1854,7 @@ workflows:
type: approval
filters:
tags:
only: /^(da-server|proxyd|chain-mon|indexer|ci-builder(-rust)?|ufm-[a-z0-9\-]*|op-[a-z0-9\-]*)\/v.*/
only: /^(da-server|proxyd|chain-mon|ci-builder(-rust)?|ufm-[a-z0-9\-]*|op-[a-z0-9\-]*)\/v.*/
branches:
ignore: /.*/
- docker-build:
......@@ -2119,21 +2047,6 @@ workflows:
- oplabs-gcr-release
requires:
- hold
- docker-build:
name: indexer-docker-release
filters:
tags:
only: /^indexer\/v.*/
branches:
ignore: /.*/
docker_name: indexer
docker_tags: <<pipeline.git.revision>>
publish: true
release: true
context:
- oplabs-gcr-release
requires:
- hold
- docker-build:
name: chain-mon-docker-release
filters:
......@@ -2340,15 +2253,6 @@ workflows:
context:
- oplabs-gcr
- slack
- docker-build:
name: indexer-docker-publish
docker_name: indexer
docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>>
publish: true
context:
- oplabs-gcr
- slack
platforms: "linux/amd64,linux/arm64"
- docker-build:
name: chain-mon-docker-publish
docker_name: chain-mon
......
......@@ -33,7 +33,6 @@
# Misc
/proxyd @ethereum-optimism/infra-reviewers
/indexer @ethereum-optimism/devxpod
/infra @ethereum-optimism/infra-reviewers
/specs @ethereum-optimism/contract-reviewers @ethereum-optimism/go-reviewers
......
......@@ -89,17 +89,6 @@ pull_request_rules:
label:
add:
- A-cannon
- name: Add A-indexer label and ecopod reviewers
conditions:
- 'files~=^indexer/'
- '#label<5'
actions:
label:
add:
- A-indexer
request_reviews:
users:
- roninjin10
- name: Add A-op-batcher label
conditions:
- 'files~=^op-batcher/'
......
......@@ -20,7 +20,6 @@ on:
options:
- ci-builder
- ci-builder-rust
- indexer
- op-heartbeat
- chain-mon
- op-node
......
......@@ -117,7 +117,6 @@ The full set of components that have releases are:
- `chain-mon`
- `ci-builder`
- `ci-builder`
- `indexer`
- `op-batcher`
- `op-contracts`
- `op-challenger`
......
......@@ -212,19 +212,6 @@ target "proxyd" {
tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/proxyd:${tag}"]
}
target "indexer" {
dockerfile = "./indexer/Dockerfile"
context = "./"
args = {
// proxyd dockerfile has no _ in the args
GITCOMMIT = "${GIT_COMMIT}"
GITDATE = "${GIT_DATE}"
GITVERSION = "${GIT_VERSION}"
}
platforms = split(",", PLATFORMS)
tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/indexer:${tag}"]
}
target "chain-mon" {
dockerfile = "./ops/docker/Dockerfile.packages"
context = "."
......
......@@ -3,7 +3,6 @@ module github.com/ethereum-optimism/optimism
go 1.21
require (
github.com/BurntSushi/toml v1.3.2
github.com/DataDog/zstd v1.5.2
github.com/andybalholm/brotli v1.1.0
github.com/btcsuite/btcd v0.24.0
......@@ -16,12 +15,9 @@ require (
github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20240603085035-9c8f6081266e
github.com/ethereum/go-ethereum v1.13.15
github.com/fsnotify/fsnotify v1.7.0
github.com/go-chi/chi/v5 v5.0.12
github.com/go-chi/docgen v1.2.0
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb
github.com/google/go-cmp v0.6.0
github.com/google/gofuzz v1.2.1-0.20220503160820-4a35382e8fc8
github.com/google/uuid v1.6.0
github.com/hashicorp/go-multierror v1.1.1
github.com/hashicorp/golang-lru/v2 v2.0.7
github.com/hashicorp/raft v1.6.1
......@@ -29,8 +25,6 @@ require (
github.com/holiman/uint256 v1.2.4
github.com/ipfs/go-datastore v0.6.0
github.com/ipfs/go-ds-leveldb v0.5.0
github.com/jackc/pgtype v1.14.3
github.com/jackc/pgx/v5 v5.6.0
github.com/libp2p/go-libp2p v0.35.0
github.com/libp2p/go-libp2p-mplex v0.9.0
github.com/libp2p/go-libp2p-pubsub v0.11.0
......@@ -52,8 +46,6 @@ require (
golang.org/x/sync v0.7.0
golang.org/x/term v0.21.0
golang.org/x/time v0.5.0
gorm.io/driver/postgres v1.5.7
gorm.io/gorm v1.25.10
)
require (
......@@ -110,6 +102,7 @@ require (
github.com/golang/protobuf v1.5.3 // indirect
github.com/google/gopacket v1.1.19 // indirect
github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/gorilla/websocket v1.5.1 // indirect
github.com/graph-gophers/graphql-go v1.3.0 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
......@@ -128,23 +121,16 @@ require (
github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect
github.com/ipfs/go-cid v0.4.1 // indirect
github.com/ipfs/go-log/v2 v2.5.1 // indirect
github.com/jackc/pgio v1.0.0 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
github.com/jackc/puddle/v2 v2.2.1 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect
github.com/jbenet/goprocess v0.1.4 // indirect
github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect
github.com/karalabe/usb v0.0.3-0.20230711191512-61db3e06439c // indirect
github.com/klauspost/compress v1.17.8 // indirect
github.com/klauspost/cpuid/v2 v2.2.7 // indirect
github.com/koron/go-ssdp v0.0.4 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/lib/pq v1.10.9 // indirect
github.com/libp2p/go-buffer-pool v0.1.0 // indirect
github.com/libp2p/go-flow-metrics v0.1.0 // indirect
github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect
......
This diff is collapsed.
docker-compose.dev.yml
.env
/indexer
api-ts/yarn.lock
api-ts/package-lock.json
\ No newline at end of file
FROM --platform=$BUILDPLATFORM golang:1.21.1-alpine3.18 as builder
RUN apk add --no-cache make ca-certificates gcc musl-dev linux-headers git jq bash
COPY ./go.mod /app/go.mod
COPY ./go.sum /app/go.sum
WORKDIR /app
RUN go mod download
# build indexer with the shared go.mod & go.sum files
COPY ./indexer /app/indexer
COPY ./op-service /app/op-service
COPY ./op-node /app/op-node
COPY ./op-plasma /app/op-plasma
COPY ./op-chain-ops /app/op-chain-ops
COPY ./go.mod /app/go.mod
COPY ./go.sum /app/go.sum
WORKDIR /app/indexer
RUN make indexer
FROM alpine:3.18
COPY --from=builder /app/indexer/indexer /usr/local/bin
COPY --from=builder /app/indexer/migrations /app/indexer/migrations
WORKDIR /app/indexer
CMD ["indexer"]
GITCOMMIT := $(shell git rev-parse HEAD)
GITDATE := $(shell git show -s --format='%ct')
LDFLAGSSTRING +=-X main.GitCommit=$(GITCOMMIT)
LDFLAGSSTRING +=-X main.GitDate=$(GITDATE)
LDFLAGS := -ldflags "$(LDFLAGSSTRING)"
indexer:
env GO111MODULE=on go build -v $(LDFLAGS) ./cmd/indexer
up:
docker-compose up --build
clean:
rm indexer
test:
go test -v ./...
lint:
golangci-lint run ./...
.PHONY: \
indexer \
bindings \
bindings-scc \
clean \
test \
lint
# @eth-optimism/indexer
## Getting started
### Configuration
The `indexer.toml` contains configuration for the indexer. The file is templated for the local devnet, however presets are available for [known networks](https://github.com/ethereum-optimism/optimism/blob/develop/indexer/config/presets.go). The file also templates keys needed for custom networks such as the rollup contract addresses and the `l1-starting-height` for the deployment height.
Required configuration is network specific `(l1|l2)-rpc` urls that point to archival nodes as well as the `(l1|l2)-polling-interval` & `(l1|l2)-header-buffer-size` keys which controls the of rate data retrieval from these notes.
### Testing
All tests can be ran by running `make test` from the `/indexer` directory. This will run all unit and e2e tests.
> **NOTE:** Successfully running the E2E tests requires spinning up a local devnet via [op-e2e](https://github.com/ethereum-optimism/optimism/tree/develop/op-e2e) and pre-populating it with necessary bedrock genesis state. This genesis state is generated by invoking the`make devnet-allocs` target from the root of the optimism monorepo before running the indexer tests. More information on this can be found in the [op-e2e README](../op-e2e/README.md). A postgres database through pwd-less user $DB_USER env variable on port 5432 must be available as well.
### Run the Indexer (docker-compose)
The local [docker-compose.go](https://github.com/ethereum-optimism/optimism/blob/develop/indexer/docker-compose.yml) file spins up **index, api, postgres, prometheus and grafana** services. The `indexer.toml` file is setup for the local devnet. To run against a live network, update the `indexer.toml` with the desired configuration.
> The API, Postgres, and Grafana services are the only ones with ports mapped externally. Postgres database is mapped to port 5433 to deconflict any instances already running on the default port
1. Install Deps: Docker, Genesis State: `make devnet-allocs`
2. Start Devnet `make devnet up`, Otherwise update `indexer.toml` to desired network config.
3. Start Indexer: `cd indexer && docker-compose up`
4. View the Grafana dashboard at http://localhost:3000
- **User**: admin
- **Password**: optimism
### Run the Indexer (Go Binary or Dockerfile)
1. Prepare the `indexer.toml` file
2. **Run database migrations**: `indexer migrate --config <indexer.toml>`
3. Run index service, cmd: `indexer index --config <indexer.toml>`
4. Run the api service, cmd: `indexer api --config <indexer.toml>`
> Both the index and api services listen on an HTTP and Metrics port. Migrations should **always** be run prior to start the indexer to ensure latest schemas are set.
## Architecture
![Architectural Diagram](./ops/assets/architecture.png)
The indexer application supports two separate services for collective operation:
**Indexer API** - Provides a lightweight API service that supports paginated lookups for bridge data.
**Indexer Service** - A polling based service that constantly reads and persists OP Stack chain data (i.e, block meta, system contract events, synchronized bridge events) from a L1 and L2 chain.
### Indexer API
See `api/api.go` & `api/routes/` for available API endpoints to for paginated retrieval of bridge data. **TDB** docs.
### Indexer Service
![Service Component Diagram](./ops/assets/indexer-service.png)
The indexer service is responsible for polling and processing real-time batches of L1 and L2 chain data. The indexer service is currently composed of the following key components:
- **Poller Routines** - Individually polls the L1/L2 chain for new blocks and OP Stack contract events.
- **Insertion Routines** - Awaits new batches from the poller routines and inserts them into the database upon retrieval.
- **Bridge Routine** - Polls the database directly for new L1 blocks and bridge events. Upon retrieval, the bridge routine will:
* Process and persist new bridge events
* Synchronize L1 proven/finalized withdrawals with their L2 initialization counterparts
#### L1 Poller
L1 blocks are only indexed if they contain L1 contract events. This is done to reduce the amount of unnecessary data that is indexed. Because of this, the `l1_block_headers` table will not contain every L1 block header unlike L2 blocks.
An **exception** to this is if no log activity has been observed over the specified `ETLAllowedInactivityWindowSeconds` value in the [chain config](https://github.com/ethereum-optimism/optimism/blob/develop/indexer/config/config.go) -- disabled by default with a zero value. Past this duration, the L1 ETL will index the latest
observed L1 header.
#### Database
The indexer service currently supports a Postgres database for storing L1/L2 OP Stack chain data. The most up-to-date database schemas can be found in the `./migrations` directory. **Run the idempotent migrations prior to starting the indexer**
#### HTTP
The indexer service runs a lightweight health server adjacently to the main service. The health server exposes a single endpoint `/healthz` that can be used to check the health of the indexer service. The health assessment doesn't check dependency health (ie. database) but rather checks the health of the indexer service itself.
#### Metrics
The indexer services exposes a set of Prometheus metrics that can be used to monitor the health of the service. The metrics are exposed via the `/metrics` endpoint on the health server.
## Security
All security related issues should be filed via github issues and will be triaged by the team. The following are some security considerations to be taken when running the service:
- Since the Indexer API only performs read operations on the database, access to the database for any API instances should be restricted to read-only operations.
- The API has no rate limiting or authentication/authorization mechanisms. It is recommended to place the API behind a reverse proxy that can provide these features.
- Postgres connection timeouts are unenforced in the services. It is recommended to configure the database to enforce connection timeouts to prevent connection exhaustion attacks.
- Setting confirmation count values too low can result in indexing failures due to chain reorgs.
## Troubleshooting
Please advise the [troubleshooting](./ops/docs/troubleshooting.md) guide for common failure scenarios and how to resolve them.
# @eth-optimism/indexer-api
## 0.0.5
### Patch Changes
- [#9964](https://github.com/ethereum-optimism/optimism/pull/9964) [`8241220898128e1f61064f22dcb6fdd0a5f043c3`](https://github.com/ethereum-optimism/optimism/commit/8241220898128e1f61064f22dcb6fdd0a5f043c3) Thanks [@roninjin10](https://github.com/roninjin10)! - Removed only-allow command from package.json
## 0.0.4
### Patch Changes
- [#7450](https://github.com/ethereum-optimism/optimism/pull/7450) [`ac90e16a7`](https://github.com/ethereum-optimism/optimism/commit/ac90e16a7f85c4f73661ae6023135c3d00421c1e) Thanks [@roninjin10](https://github.com/roninjin10)! - Updated dev dependencies related to testing that is causing audit tooling to report failures
MIT License
Copyright (c) 2022 Optimism
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Generated typescript types for https://github.com/ethereum-optimism/optimism/tree/develop/indexer
// Code generated by tygo. DO NOT EDIT.
//////////
// source: models.go
export interface QueryParams {
Address: any /* common.Address */;
Limit: number /* int */;
Cursor: string;
}
/**
* DepositItem ... Deposit item model for API responses
*/
export interface DepositItem {
guid: string;
from: string;
to: string;
timestamp: number /* uint64 */;
l1BlockHash: string;
l1TxHash: string;
l2TxHash: string;
amount: string;
l1TokenAddress: string;
l2TokenAddress: string;
}
/**
* DepositResponse ... Data model for API JSON response
*/
export interface DepositResponse {
cursor: string;
hasNextPage: boolean;
items: DepositItem[];
}
/**
* WithdrawalItem ... Data model for API JSON response
*/
export interface WithdrawalItem {
guid: string;
from: string;
to: string;
transactionHash: string;
crossDomainMessageHash: string;
timestamp: number /* uint64 */;
l2BlockHash: string;
amount: string;
l1ProvenTxHash: string;
l1FinalizedTxHash: string;
l1TokenAddress: string;
l2TokenAddress: string;
}
/**
* WithdrawalResponse ... Data model for API JSON response
*/
export interface WithdrawalResponse {
cursor: string;
hasNextPage: boolean;
items: WithdrawalItem[];
}
export interface BridgeSupplyView {
l1DepositSum: number /* float64 */;
l2WithdrawalSum: number /* float64 */;
provenSum: number /* float64 */;
finalizedSum: number /* float64 */;
}
"use strict";
var __defProp = Object.defineProperty;
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
var __getOwnPropNames = Object.getOwnPropertyNames;
var __hasOwnProp = Object.prototype.hasOwnProperty;
var __export = (target, all) => {
for (var name in all)
__defProp(target, name, { get: all[name], enumerable: true });
};
var __copyProps = (to, from, except, desc) => {
if (from && typeof from === "object" || typeof from === "function") {
for (let key of __getOwnPropNames(from))
if (!__hasOwnProp.call(to, key) && key !== except)
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
}
return to;
};
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
// indexer.ts
var indexer_exports = {};
__export(indexer_exports, {
depositEndpoint: () => depositEndpoint,
withdrawalEndoint: () => withdrawalEndoint
});
module.exports = __toCommonJS(indexer_exports);
var createQueryString = ({ cursor, limit }) => {
if (cursor === void 0 && limit === void 0) {
return "";
}
const queries = [];
if (cursor) {
queries.push(`cursor=${cursor}`);
}
if (limit) {
queries.push(`limit=${limit}`);
}
return `?${queries.join("&")}`;
};
var depositEndpoint = ({ baseUrl = "", address, cursor, limit }) => {
return [baseUrl, "deposits", `${address}${createQueryString({ cursor, limit })}`].join("/");
};
var withdrawalEndoint = ({ baseUrl = "", address, cursor, limit }) => {
return [baseUrl, "withdrawals", `${address}${createQueryString({ cursor, limit })}`].join("/");
};
// Annotate the CommonJS export names for ESM import in node:
0 && (module.exports = {
depositEndpoint,
withdrawalEndoint
});
//# sourceMappingURL=indexer.cjs.map
\ No newline at end of file
{"version":3,"sources":["indexer.ts"],"sourcesContent":["export * from './generated'\n\ntype PaginationOptions = {\n limit?: number\n cursor?: string\n}\n\ntype Options = {\n baseUrl?: string\n address: `0x${string}`\n} & PaginationOptions\n\nconst createQueryString = ({ cursor, limit }: PaginationOptions): string => {\n if (cursor === undefined && limit === undefined) {\n return ''\n }\n const queries: string[] = []\n if (cursor) {\n queries.push(`cursor=${cursor}`)\n }\n if (limit) {\n queries.push(`limit=${limit}`)\n }\n return `?${queries.join('&')}`\n}\n\nexport const depositEndpoint = ({ baseUrl = '', address, cursor, limit }: Options): string => {\n return [baseUrl, 'deposits', `${address}${createQueryString({ cursor, limit })}`].join('/')\n}\n\nexport const withdrawalEndoint = ({ baseUrl = '', address, cursor, limit }: Options): string => {\n return [baseUrl, 'withdrawals', `${address}${createQueryString({ cursor, limit })}`].join('/')\n}\n\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAYA,IAAM,oBAAoB,CAAC,EAAE,QAAQ,MAAM,MAAiC;AAC1E,MAAI,WAAW,UAAa,UAAU,QAAW;AAC/C,WAAO;AAAA,EACT;AACA,QAAM,UAAoB,CAAC;AAC3B,MAAI,QAAQ;AACV,YAAQ,KAAK,UAAU,MAAM,EAAE;AAAA,EACjC;AACA,MAAI,OAAO;AACT,YAAQ,KAAK,SAAS,KAAK,EAAE;AAAA,EAC/B;AACA,SAAO,IAAI,QAAQ,KAAK,GAAG,CAAC;AAC9B;AAEO,IAAM,kBAAkB,CAAC,EAAE,UAAU,IAAI,SAAS,QAAQ,MAAM,MAAuB;AAC5F,SAAO,CAAC,SAAS,YAAY,GAAG,OAAO,GAAG,kBAAkB,EAAE,QAAQ,MAAM,CAAC,CAAC,EAAE,EAAE,KAAK,GAAG;AAC5F;AAEO,IAAM,oBAAoB,CAAC,EAAE,UAAU,IAAI,SAAS,QAAQ,MAAM,MAAuB;AAC9F,SAAO,CAAC,SAAS,eAAe,GAAG,OAAO,GAAG,kBAAkB,EAAE,QAAQ,MAAM,CAAC,CAAC,EAAE,EAAE,KAAK,GAAG;AAC/F;","names":[]}
\ No newline at end of file
// indexer.ts
var createQueryString = ({ cursor, limit }) => {
if (cursor === void 0 && limit === void 0) {
return "";
}
const queries = [];
if (cursor) {
queries.push(`cursor=${cursor}`);
}
if (limit) {
queries.push(`limit=${limit}`);
}
return `?${queries.join("&")}`;
};
var depositEndpoint = ({ baseUrl = "", address, cursor, limit }) => {
return [baseUrl, "deposits", `${address}${createQueryString({ cursor, limit })}`].join("/");
};
var withdrawalEndoint = ({ baseUrl = "", address, cursor, limit }) => {
return [baseUrl, "withdrawals", `${address}${createQueryString({ cursor, limit })}`].join("/");
};
export {
depositEndpoint,
withdrawalEndoint
};
//# sourceMappingURL=indexer.js.map
\ No newline at end of file
{"version":3,"sources":["indexer.ts"],"sourcesContent":["export * from './generated'\n\ntype PaginationOptions = {\n limit?: number\n cursor?: string\n}\n\ntype Options = {\n baseUrl?: string\n address: `0x${string}`\n} & PaginationOptions\n\nconst createQueryString = ({ cursor, limit }: PaginationOptions): string => {\n if (cursor === undefined && limit === undefined) {\n return ''\n }\n const queries: string[] = []\n if (cursor) {\n queries.push(`cursor=${cursor}`)\n }\n if (limit) {\n queries.push(`limit=${limit}`)\n }\n return `?${queries.join('&')}`\n}\n\nexport const depositEndpoint = ({ baseUrl = '', address, cursor, limit }: Options): string => {\n return [baseUrl, 'deposits', `${address}${createQueryString({ cursor, limit })}`].join('/')\n}\n\nexport const withdrawalEndoint = ({ baseUrl = '', address, cursor, limit }: Options): string => {\n return [baseUrl, 'withdrawals', `${address}${createQueryString({ cursor, limit })}`].join('/')\n}\n\n"],"mappings":";AAYA,IAAM,oBAAoB,CAAC,EAAE,QAAQ,MAAM,MAAiC;AAC1E,MAAI,WAAW,UAAa,UAAU,QAAW;AAC/C,WAAO;AAAA,EACT;AACA,QAAM,UAAoB,CAAC;AAC3B,MAAI,QAAQ;AACV,YAAQ,KAAK,UAAU,MAAM,EAAE;AAAA,EACjC;AACA,MAAI,OAAO;AACT,YAAQ,KAAK,SAAS,KAAK,EAAE;AAAA,EAC/B;AACA,SAAO,IAAI,QAAQ,KAAK,GAAG,CAAC;AAC9B;AAEO,IAAM,kBAAkB,CAAC,EAAE,UAAU,IAAI,SAAS,QAAQ,MAAM,MAAuB;AAC5F,SAAO,CAAC,SAAS,YAAY,GAAG,OAAO,GAAG,kBAAkB,EAAE,QAAQ,MAAM,CAAC,CAAC,EAAE,EAAE,KAAK,GAAG;AAC5F;AAEO,IAAM,oBAAoB,CAAC,EAAE,UAAU,IAAI,SAAS,QAAQ,MAAM,MAAuB;AAC9F,SAAO,CAAC,SAAS,eAAe,GAAG,OAAO,GAAG,kBAAkB,EAAE,QAAQ,MAAM,CAAC,CAAC,EAAE,EAAE,KAAK,GAAG;AAC/F;","names":[]}
\ No newline at end of file
import { test, expect } from 'vitest'
import { depositEndpoint, withdrawalEndoint } from './indexer.ts'
test(depositEndpoint.name, () => {
expect(depositEndpoint({ baseUrl: 'http://localhost:8080/api/v0', address: '0x1234', cursor: '0x1235', limit: 10 })).toMatchInlineSnapshot('"http://localhost:8080/api/v0/deposits/0x1234?cursor=0x1235&limit=10"')
expect(depositEndpoint({ baseUrl: 'http://localhost:8080/api/v0', address: '0x1234' })).toMatchInlineSnapshot('"http://localhost:8080/api/v0/deposits/0x1234"')
})
test(withdrawalEndoint.name, () => {
expect(withdrawalEndoint({ baseUrl: 'http://localhost:8080/api/v0', address: '0x1234', cursor: '0x1235', limit: 10 })).toMatchInlineSnapshot('"http://localhost:8080/api/v0/withdrawals/0x1234?cursor=0x1235&limit=10"')
expect(withdrawalEndoint({ baseUrl: 'http://localhost:8080/api/v0', address: '0x1234' })).toMatchInlineSnapshot('"http://localhost:8080/api/v0/withdrawals/0x1234"')
})
export * from './generated'
type PaginationOptions = {
limit?: number
cursor?: string
}
type Options = {
baseUrl?: string
address: `0x${string}`
} & PaginationOptions
const createQueryString = ({ cursor, limit }: PaginationOptions): string => {
if (cursor === undefined && limit === undefined) {
return ''
}
const queries: string[] = []
if (cursor) {
queries.push(`cursor=${cursor}`)
}
if (limit) {
queries.push(`limit=${limit}`)
}
return `?${queries.join('&')}`
}
export const depositEndpoint = ({ baseUrl = '', address, cursor, limit }: Options): string => {
return [baseUrl, 'deposits', `${address}${createQueryString({ cursor, limit })}`].join('/')
}
export const withdrawalEndoint = ({ baseUrl = '', address, cursor, limit }: Options): string => {
return [baseUrl, 'withdrawals', `${address}${createQueryString({ cursor, limit })}`].join('/')
}
{
"name": "@eth-optimism/indexer-api",
"version": "0.0.5",
"description": "[Optimism] typescript types for the indexer service",
"main": "indexer.cjs",
"module": "indexer.js",
"types": "indexer.ts",
"type": "module",
"files": [
"*.ts",
"*.ts",
"*.js",
"*.js.map",
"*.cjs",
"*.cjs.map",
"LICENSE"
],
"scripts": {
"generate:clean": "rm -rf generated.ts indexer.cjs indexer.js",
"generate": "npm run generate:clean && tygo generate && mv ../api/models/index.ts generated.ts && tsup",
"test": "vitest"
},
"keywords": [
"optimism",
"ethereum",
"indexer"
],
"homepage": "https://github.com/ethereum-optimism/optimism/tree/develop/indexer#readme",
"license": "MIT",
"author": "Optimism PBC",
"repository": {
"type": "git",
"url": "https://github.com/ethereum-optimism/optimism.git"
},
"devDependencies": {
"tsup": "^8.0.1",
"vitest": "^1.2.2"
}
}
{
"compilerOptions": {
"outDir": "dist",
"strict": true,
"skipLibCheck": true,
"module": "ESNext",
"moduleResolution": "NodeNext",
"jsx": "react",
"target": "ESNext",
"noEmit": true
},
"include": ["."]
}
import packageJson from './package.json'
export default {
name: packageJson.name,
entry: ['indexer.ts'],
outDir: '.',
format: ['esm', 'cjs'],
splitting: false,
sourcemap: true,
clean: false,
}
packages:
- path: "github.com/ethereum-optimism/optimism/indexer/api/models"
package api
import (
"context"
"errors"
"fmt"
"net"
"net/http"
"strconv"
"sync/atomic"
"time"
"github.com/go-chi/chi/v5"
"github.com/go-chi/chi/v5/middleware"
"github.com/prometheus/client_golang/prometheus"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/indexer/api/routes"
"github.com/ethereum-optimism/optimism/indexer/api/service"
"github.com/ethereum-optimism/optimism/indexer/config"
"github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum-optimism/optimism/op-service/httputil"
"github.com/ethereum-optimism/optimism/op-service/metrics"
)
const ethereumAddressRegex = `^0x[a-fA-F0-9]{40}$`
const (
MetricsNamespace = "op_indexer_api"
addressParam = "{address:%s}"
// Endpoint paths
DocsPath = "/docs"
HealthPath = "/healthz"
DepositsPath = "/api/v0/deposits/"
WithdrawalsPath = "/api/v0/withdrawals/"
SupplyPath = "/api/v0/supply"
)
// Api ... Indexer API struct
// TODO : Structured error responses
type APIService struct {
log log.Logger
router *chi.Mux
bv database.BridgeTransfersView
dbClose func() error
metricsRegistry *prometheus.Registry
apiServer *httputil.HTTPServer
metricsServer *httputil.HTTPServer
stopped atomic.Bool
}
// chiMetricsMiddleware ... Injects a metrics recorder into request processing middleware
func chiMetricsMiddleware(rec metrics.HTTPRecorder) func(http.Handler) http.Handler {
return func(next http.Handler) http.Handler {
return metrics.NewHTTPRecordingMiddleware(rec, next)
}
}
// NewApi ... Construct a new api instance
func NewApi(ctx context.Context, log log.Logger, cfg *Config) (*APIService, error) {
out := &APIService{log: log, metricsRegistry: metrics.NewRegistry()}
if err := out.initFromConfig(ctx, cfg); err != nil {
return nil, errors.Join(err, out.Stop(ctx)) // close any resources we may have opened already
}
return out, nil
}
func (a *APIService) initFromConfig(ctx context.Context, cfg *Config) error {
if err := a.initDB(ctx, cfg.DB); err != nil {
return fmt.Errorf("failed to init DB: %w", err)
}
if err := a.startMetricsServer(cfg.MetricsServer); err != nil {
return fmt.Errorf("failed to start metrics server: %w", err)
}
a.initRouter(cfg.HTTPServer)
if err := a.startServer(cfg.HTTPServer); err != nil {
return fmt.Errorf("failed to start API server: %w", err)
}
return nil
}
func (a *APIService) Start(ctx context.Context) error {
// Completed all setup-up jobs at init-time already,
// and the API service does not have any other special starting routines or background-jobs to start.
return nil
}
func (a *APIService) Stop(ctx context.Context) error {
var result error
if a.apiServer != nil {
if err := a.apiServer.Stop(ctx); err != nil {
result = errors.Join(result, fmt.Errorf("failed to stop API server: %w", err))
}
}
if a.metricsServer != nil {
if err := a.metricsServer.Stop(ctx); err != nil {
result = errors.Join(result, fmt.Errorf("failed to stop metrics server: %w", err))
}
}
if a.dbClose != nil {
if err := a.dbClose(); err != nil {
result = errors.Join(result, fmt.Errorf("failed to close DB: %w", err))
}
}
a.stopped.Store(true)
a.log.Info("API service shutdown complete")
return result
}
func (a *APIService) Stopped() bool {
return a.stopped.Load()
}
// Addr ... returns the address that the HTTP server is listening on (excl. http:// prefix, just the host and port)
func (a *APIService) Addr() string {
if a.apiServer == nil {
return ""
}
return a.apiServer.Addr().String()
}
func (a *APIService) initDB(ctx context.Context, connector DBConnector) error {
db, err := connector.OpenDB(ctx, a.log)
if err != nil {
return fmt.Errorf("failed to connect to database: %w", err)
}
a.dbClose = db.Closer
a.bv = db.BridgeTransfers
return nil
}
func (a *APIService) initRouter(apiConfig config.ServerConfig) {
v := new(service.Validator)
svc := service.New(v, a.bv, a.log)
apiRouter := chi.NewRouter()
h := routes.NewRoutes(a.log, apiRouter, svc)
apiRouter.Use(middleware.Logger)
apiRouter.Use(middleware.Timeout(time.Duration(apiConfig.WriteTimeout) * time.Second))
apiRouter.Use(middleware.Recoverer)
apiRouter.Use(middleware.Heartbeat(HealthPath))
apiRouter.Use(chiMetricsMiddleware(metrics.NewPromHTTPRecorder(a.metricsRegistry, MetricsNamespace)))
apiRouter.Get(fmt.Sprintf(DepositsPath+addressParam, ethereumAddressRegex), h.L1DepositsHandler)
apiRouter.Get(fmt.Sprintf(WithdrawalsPath+addressParam, ethereumAddressRegex), h.L2WithdrawalsHandler)
apiRouter.Get(SupplyPath, h.SupplyView)
apiRouter.Get(DocsPath, h.DocsHandler)
a.router = apiRouter
}
// startServer ... Starts the API server
func (a *APIService) startServer(serverConfig config.ServerConfig) error {
a.log.Debug("API server listening...", "port", serverConfig.Port)
addr := net.JoinHostPort(serverConfig.Host, strconv.Itoa(serverConfig.Port))
srv, err := httputil.StartHTTPServer(addr, a.router)
if err != nil {
return fmt.Errorf("failed to start API server: %w", err)
}
a.log.Info("API server started", "addr", srv.Addr().String())
a.apiServer = srv
return nil
}
// startMetricsServer ... Starts the metrics server
func (a *APIService) startMetricsServer(metricsConfig config.ServerConfig) error {
a.log.Debug("starting metrics server...", "port", metricsConfig.Port)
srv, err := metrics.StartServer(a.metricsRegistry, metricsConfig.Host, metricsConfig.Port)
if err != nil {
return fmt.Errorf("failed to start metrics server: %w", err)
}
a.log.Info("Metrics server started", "addr", srv.Addr().String())
a.metricsServer = srv
return nil
}
package api
import (
"fmt"
"io"
"net/http"
"time"
"encoding/json"
"github.com/ethereum-optimism/optimism/indexer/api/models"
"github.com/ethereum-optimism/optimism/op-service/metrics"
"github.com/ethereum/go-ethereum/common"
)
const (
urlParams = "?cursor=%s&limit=%d"
defaultPagingLimit = 100
// method names
healthz = "get_health"
deposits = "get_deposits"
withdrawals = "get_withdrawals"
sum = "get_sum"
)
// Option ... Provides configuration through callback injection
type Option func(*Client) error
// WithMetrics ... Triggers metric optionality
func WithMetrics(m metrics.RPCClientMetricer) Option {
return func(c *Client) error {
c.metrics = m
return nil
}
}
// WithTimeout ... Embeds a timeout limit to request
func WithTimeout(t time.Duration) Option {
return func(c *Client) error {
c.c.Timeout = t
return nil
}
}
// Config ... Indexer client config struct
type ClientConfig struct {
PaginationLimit int
BaseURL string
}
// Client ... Indexer client struct
type Client struct {
cfg *ClientConfig
c *http.Client
metrics metrics.RPCClientMetricer
}
// NewClient ... Construct a new indexer client
func NewClient(cfg *ClientConfig, opts ...Option) (*Client, error) {
if cfg.PaginationLimit <= 0 {
cfg.PaginationLimit = defaultPagingLimit
}
c := &http.Client{}
client := &Client{cfg: cfg, c: c}
for _, opt := range opts {
err := opt(client)
if err != nil {
return nil, err
}
}
return client, nil
}
// doRecordRequest ... Performs a read request on a provided endpoint w/ telemetry
func (c *Client) doRecordRequest(method string, endpoint string) ([]byte, error) {
var recordRequest func(error) = nil
if c.metrics != nil {
recordRequest = c.metrics.RecordRPCClientRequest(method)
}
resp, err := c.c.Get(endpoint)
if recordRequest != nil {
recordRequest(err)
}
if err != nil {
return nil, err
}
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("failed to read response body: %w", err)
}
if err = resp.Body.Close(); err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("endpoint failed with status code %d", resp.StatusCode)
}
return body, nil
}
// HealthCheck ... Checks the health of the indexer API
func (c *Client) HealthCheck() error {
_, err := c.doRecordRequest(healthz, c.cfg.BaseURL+HealthPath)
return err
}
// GetDepositsByAddress ... Gets a deposit response object provided an L1 address and cursor
func (c *Client) GetDepositsByAddress(l1Address common.Address, cursor string) (*models.DepositResponse, error) {
var response models.DepositResponse
url := c.cfg.BaseURL + DepositsPath + l1Address.String() + urlParams
endpoint := fmt.Sprintf(url, cursor, c.cfg.PaginationLimit)
resp, err := c.doRecordRequest(deposits, endpoint)
if err != nil {
return nil, err
}
if err := json.Unmarshal(resp, &response); err != nil {
return nil, err
}
return &response, nil
}
// GetAllDepositsByAddress ... Gets all deposits provided a L1 address
func (c *Client) GetAllDepositsByAddress(l1Address common.Address) ([]models.DepositItem, error) {
var deposits []models.DepositItem
cursor := ""
for {
dResponse, err := c.GetDepositsByAddress(l1Address, cursor)
if err != nil {
return nil, err
}
deposits = append(deposits, dResponse.Items...)
if !dResponse.HasNextPage {
break
}
cursor = dResponse.Cursor
}
return deposits, nil
}
// GetSupplyAssessment ... Returns an assessment of the current supply
// on both L1 and L2. This includes the individual sums of
// (L1/L2) deposits and withdrawals
func (c *Client) GetSupplyAssessment() (*models.BridgeSupplyView, error) {
url := c.cfg.BaseURL + SupplyPath
resp, err := c.doRecordRequest(sum, url)
if err != nil {
return nil, err
}
var bsv *models.BridgeSupplyView
if err := json.Unmarshal(resp, &bsv); err != nil {
return nil, err
}
return bsv, nil
}
// GetAllWithdrawalsByAddress ... Gets all withdrawals provided a L2 address
func (c *Client) GetAllWithdrawalsByAddress(l2Address common.Address) ([]models.WithdrawalItem, error) {
var withdrawals []models.WithdrawalItem
cursor := ""
for {
wResponse, err := c.GetWithdrawalsByAddress(l2Address, cursor)
if err != nil {
return nil, err
}
withdrawals = append(withdrawals, wResponse.Items...)
if !wResponse.HasNextPage {
break
}
cursor = wResponse.Cursor
}
return withdrawals, nil
}
// GetWithdrawalsByAddress ... Gets a withdrawal response object provided an L2 address and cursor
func (c *Client) GetWithdrawalsByAddress(l2Address common.Address, cursor string) (*models.WithdrawalResponse, error) {
var wResponse *models.WithdrawalResponse
url := c.cfg.BaseURL + WithdrawalsPath + l2Address.String() + urlParams
endpoint := fmt.Sprintf(url, cursor, c.cfg.PaginationLimit)
resp, err := c.doRecordRequest(withdrawals, endpoint)
if err != nil {
return nil, err
}
if err := json.Unmarshal(resp, &wResponse); err != nil {
return nil, err
}
return wResponse, nil
}
package api
import (
"context"
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
"testing"
"github.com/ethereum-optimism/optimism/indexer/api/models"
"github.com/ethereum-optimism/optimism/indexer/config"
"github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum-optimism/optimism/op-service/testlog"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// MockBridgeTransfersView mocks the BridgeTransfersView interface
type MockBridgeTransfersView struct{}
var mockAddress = "0x4204204204204204204204204204204204204204"
var apiConfig = config.ServerConfig{
Host: "localhost",
Port: 0, // random port, to allow parallel tests
}
var metricsConfig = config.ServerConfig{
Host: "localhost",
Port: 0, // random port, to allow parallel tests
}
var (
deposit = database.L1BridgeDeposit{
TransactionSourceHash: common.HexToHash("abc"),
BridgeTransfer: database.BridgeTransfer{
CrossDomainMessageHash: &common.Hash{},
Tx: database.Transaction{},
TokenPair: database.TokenPair{},
},
}
withdrawal = database.L2BridgeWithdrawal{
TransactionWithdrawalHash: common.HexToHash("0x420"),
BridgeTransfer: database.BridgeTransfer{
CrossDomainMessageHash: &common.Hash{},
Tx: database.Transaction{},
TokenPair: database.TokenPair{},
},
}
)
func (mbv *MockBridgeTransfersView) L1BridgeDeposit(hash common.Hash) (*database.L1BridgeDeposit, error) {
return &deposit, nil
}
func (mbv *MockBridgeTransfersView) L1BridgeDepositWithFilter(filter database.BridgeTransfer) (*database.L1BridgeDeposit, error) {
return &deposit, nil
}
func (mbv *MockBridgeTransfersView) L2BridgeWithdrawal(address common.Hash) (*database.L2BridgeWithdrawal, error) {
return &withdrawal, nil
}
func (mbv *MockBridgeTransfersView) L2BridgeWithdrawalWithFilter(filter database.BridgeTransfer) (*database.L2BridgeWithdrawal, error) {
return &withdrawal, nil
}
func (mbv *MockBridgeTransfersView) L1BridgeDepositsByAddress(address common.Address, cursor string, limit int) (*database.L1BridgeDepositsResponse, error) {
return &database.L1BridgeDepositsResponse{
Deposits: []database.L1BridgeDepositWithTransactionHashes{
{
L1BridgeDeposit: deposit,
L1TransactionHash: common.HexToHash("0x123"),
L2TransactionHash: common.HexToHash("0x555"),
L1BlockHash: common.HexToHash("0x456"),
},
},
}, nil
}
func (mbv *MockBridgeTransfersView) L2BridgeWithdrawalsByAddress(address common.Address, cursor string, limit int) (*database.L2BridgeWithdrawalsResponse, error) {
return &database.L2BridgeWithdrawalsResponse{
Withdrawals: []database.L2BridgeWithdrawalWithTransactionHashes{
{
L2BridgeWithdrawal: withdrawal,
L2TransactionHash: common.HexToHash("0x789"),
L2BlockHash: common.HexToHash("0x456"),
ProvenL1TransactionHash: common.HexToHash("0x123"),
FinalizedL1TransactionHash: common.HexToHash("0x123"),
},
},
}, nil
}
func (mbv *MockBridgeTransfersView) L1TxDepositSum() (float64, error) {
return 69, nil
}
func (mbv *MockBridgeTransfersView) L2BridgeWithdrawalSum(database.WithdrawFilter) (float64, error) {
return 420, nil
}
func TestHealthz(t *testing.T) {
logger := testlog.Logger(t, log.LevelInfo)
cfg := &Config{
DB: &TestDBConnector{BridgeTransfers: &MockBridgeTransfersView{}},
HTTPServer: apiConfig,
MetricsServer: metricsConfig,
}
api, err := NewApi(context.Background(), logger, cfg)
require.NoError(t, err)
request, err := http.NewRequest("GET", "http://"+api.Addr()+"/healthz", nil)
assert.Nil(t, err)
responseRecorder := httptest.NewRecorder()
api.router.ServeHTTP(responseRecorder, request)
assert.Equal(t, http.StatusOK, responseRecorder.Code)
}
func TestL1BridgeDepositsHandler(t *testing.T) {
logger := testlog.Logger(t, log.LevelInfo)
cfg := &Config{
DB: &TestDBConnector{BridgeTransfers: &MockBridgeTransfersView{}},
HTTPServer: apiConfig,
MetricsServer: metricsConfig,
}
api, err := NewApi(context.Background(), logger, cfg)
require.NoError(t, err)
request, err := http.NewRequest("GET", fmt.Sprintf("http://"+api.Addr()+"/api/v0/deposits/%s", mockAddress), nil)
assert.Nil(t, err)
responseRecorder := httptest.NewRecorder()
api.router.ServeHTTP(responseRecorder, request)
assert.Equal(t, http.StatusOK, responseRecorder.Code)
var resp models.DepositResponse
err = json.Unmarshal(responseRecorder.Body.Bytes(), &resp)
assert.Nil(t, err)
require.Len(t, resp.Items, 1)
assert.Equal(t, resp.Items[0].L1BlockHash, common.HexToHash("0x456").String())
assert.Equal(t, resp.Items[0].L1TxHash, common.HexToHash("0x123").String())
assert.Equal(t, resp.Items[0].Timestamp, deposit.Tx.Timestamp)
assert.Equal(t, resp.Items[0].L2TxHash, common.HexToHash("555").String())
}
func TestL2BridgeWithdrawalsByAddressHandler(t *testing.T) {
logger := testlog.Logger(t, log.LevelInfo)
cfg := &Config{
DB: &TestDBConnector{BridgeTransfers: &MockBridgeTransfersView{}},
HTTPServer: apiConfig,
MetricsServer: metricsConfig,
}
api, err := NewApi(context.Background(), logger, cfg)
require.NoError(t, err)
request, err := http.NewRequest("GET", fmt.Sprintf("http://"+api.Addr()+"/api/v0/withdrawals/%s", mockAddress), nil)
assert.Nil(t, err)
responseRecorder := httptest.NewRecorder()
api.router.ServeHTTP(responseRecorder, request)
var resp models.WithdrawalResponse
err = json.Unmarshal(responseRecorder.Body.Bytes(), &resp)
assert.Nil(t, err)
require.Len(t, resp.Items, 1)
assert.Equal(t, resp.Items[0].Guid, withdrawal.TransactionWithdrawalHash.String())
assert.Equal(t, resp.Items[0].L2BlockHash, common.HexToHash("0x456").String())
assert.Equal(t, resp.Items[0].From, withdrawal.Tx.FromAddress.String())
assert.Equal(t, resp.Items[0].To, withdrawal.Tx.ToAddress.String())
assert.Equal(t, resp.Items[0].TransactionHash, common.HexToHash("0x789").String())
assert.Equal(t, resp.Items[0].Amount, withdrawal.Tx.Amount.String())
assert.Equal(t, resp.Items[0].L1ProvenTxHash, common.HexToHash("0x123").String())
assert.Equal(t, resp.Items[0].L1FinalizedTxHash, common.HexToHash("0x123").String())
assert.Equal(t, resp.Items[0].L1TokenAddress, withdrawal.TokenPair.RemoteTokenAddress.String())
assert.Equal(t, resp.Items[0].L2TokenAddress, withdrawal.TokenPair.LocalTokenAddress.String())
assert.Equal(t, resp.Items[0].Timestamp, withdrawal.Tx.Timestamp)
}
package api
import (
"context"
"fmt"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/indexer/config"
"github.com/ethereum-optimism/optimism/indexer/database"
)
// DB represents the abstract DB access the API has.
type DB struct {
BridgeTransfers database.BridgeTransfersView
Closer func() error
}
// DBConfigConnector implements a fully config based DBConnector
type DBConfigConnector struct {
config.DBConfig
}
func (cfg *DBConfigConnector) OpenDB(ctx context.Context, log log.Logger) (*DB, error) {
db, err := database.NewDB(ctx, log, cfg.DBConfig)
if err != nil {
return nil, fmt.Errorf("failed to connect to database: %w", err)
}
return &DB{
BridgeTransfers: db.BridgeTransfers,
Closer: db.Close,
}, nil
}
type TestDBConnector struct {
BridgeTransfers database.BridgeTransfersView
}
func (tdb *TestDBConnector) OpenDB(ctx context.Context, log log.Logger) (*DB, error) {
return &DB{
BridgeTransfers: tdb.BridgeTransfers,
Closer: func() error {
log.Info("API service closed test DB view")
return nil
},
}, nil
}
// DBConnector is an interface: the config may provide different ways to access the DB.
// This is implemented in tests to provide custom DB views, or share the DB with other services.
type DBConnector interface {
OpenDB(ctx context.Context, log log.Logger) (*DB, error)
}
// Config for the API service
type Config struct {
DB DBConnector
HTTPServer config.ServerConfig
MetricsServer config.ServerConfig
}
package models
import (
"github.com/ethereum/go-ethereum/common"
)
type QueryParams struct {
Address common.Address
Limit int
Cursor string
}
// DepositItem ... Deposit item model for API responses
type DepositItem struct {
Guid string `json:"guid"`
From string `json:"from"`
To string `json:"to"`
Timestamp uint64 `json:"timestamp"`
L1BlockHash string `json:"l1BlockHash"`
L1TxHash string `json:"l1TxHash"`
L2TxHash string `json:"l2TxHash"`
Amount string `json:"amount"`
L1TokenAddress string `json:"l1TokenAddress"`
L2TokenAddress string `json:"l2TokenAddress"`
}
// DepositResponse ... Data model for API JSON response
type DepositResponse struct {
Cursor string `json:"cursor"`
HasNextPage bool `json:"hasNextPage"`
Items []DepositItem `json:"items"`
}
// WithdrawalItem ... Data model for API JSON response
type WithdrawalItem struct {
Guid string `json:"guid"`
From string `json:"from"`
To string `json:"to"`
TransactionHash string `json:"transactionHash"`
CrossDomainMessageHash string `json:"crossDomainMessageHash"`
Timestamp uint64 `json:"timestamp"`
L2BlockHash string `json:"l2BlockHash"`
Amount string `json:"amount"`
L1ProvenTxHash string `json:"l1ProvenTxHash"`
L1FinalizedTxHash string `json:"l1FinalizedTxHash"`
L1TokenAddress string `json:"l1TokenAddress"`
L2TokenAddress string `json:"l2TokenAddress"`
}
// WithdrawalResponse ... Data model for API JSON response
type WithdrawalResponse struct {
Cursor string `json:"cursor"`
HasNextPage bool `json:"hasNextPage"`
Items []WithdrawalItem `json:"items"`
}
type BridgeSupplyView struct {
L1DepositSum float64 `json:"l1DepositSum"`
InitWithdrawalSum float64 `json:"l2WithdrawalSum"`
ProvenWithdrawSum float64 `json:"provenSum"`
FinalizedWithdrawSum float64 `json:"finalizedSum"`
}
package routes
import (
"encoding/json"
"net/http"
)
const (
InternalServerError = "Internal server error"
)
// jsonResponse ... Marshals and writes a JSON response provided arbitrary data
func jsonResponse(w http.ResponseWriter, data interface{}, statusCode int) error {
w.Header().Set("Content-Type", "application/json")
jsonData, err := json.Marshal(data)
if err != nil {
http.Error(w, InternalServerError, http.StatusInternalServerError)
return err
}
w.WriteHeader(statusCode)
_, err = w.Write(jsonData)
if err != nil {
http.Error(w, InternalServerError, http.StatusInternalServerError)
return err
}
return nil
}
package routes
import (
"net/http"
"github.com/go-chi/chi/v5"
)
// L1DepositsHandler ... Handles /api/v0/deposits/{address} GET requests
func (h Routes) L1DepositsHandler(w http.ResponseWriter, r *http.Request) {
address := chi.URLParam(r, "address")
cursor := r.URL.Query().Get("cursor")
limit := r.URL.Query().Get("limit")
params, err := h.svc.QueryParams(address, cursor, limit)
if err != nil {
http.Error(w, "invalid query params", http.StatusBadRequest)
h.logger.Error("error reading request params", "err", err.Error())
return
}
deposits, err := h.svc.GetDeposits(params)
if err != nil {
http.Error(w, "internal server error", http.StatusInternalServerError)
h.logger.Error("error fetching deposits", "err", err.Error())
return
}
resp := h.svc.DepositResponse(deposits)
err = jsonResponse(w, resp, http.StatusOK)
if err != nil {
h.logger.Error("error writing response", "err", err)
}
}
package routes
import (
"net/http"
"github.com/go-chi/docgen"
)
func (h Routes) DocsHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/plain")
w.WriteHeader(http.StatusOK)
docs := docgen.MarkdownRoutesDoc(h.router, docgen.MarkdownOpts{
ProjectPath: "github.com/ethereum-optimism/optimism/indexer",
// Intro text included at the top of the generated markdown file.
Intro: "Generated documentation for Optimism indexer",
})
_, err := w.Write([]byte(docs))
if err != nil {
h.logger.Error("error writing docs", "err", err)
http.Error(w, "Internal server error fetching docs", http.StatusInternalServerError)
}
}
package routes
import (
"github.com/ethereum-optimism/optimism/indexer/api/service"
"github.com/ethereum/go-ethereum/log"
"github.com/go-chi/chi/v5"
)
type Routes struct {
logger log.Logger
router *chi.Mux
svc service.Service
}
// NewRoutes ... Construct a new route handler instance
func NewRoutes(l log.Logger, r *chi.Mux, svc service.Service) Routes {
return Routes{
logger: l,
router: r,
svc: svc,
}
}
package routes
import (
"net/http"
)
// SupplyView ... Handles /api/v0/supply GET requests
func (h Routes) SupplyView(w http.ResponseWriter, r *http.Request) {
view, err := h.svc.GetSupplyInfo()
if err != nil {
http.Error(w, "Internal server error", http.StatusInternalServerError)
h.logger.Error("error getting supply info", "err", err)
return
}
err = jsonResponse(w, view, http.StatusOK)
if err != nil {
h.logger.Error("error writing response", "err", err)
}
}
package routes
import (
"net/http"
"github.com/go-chi/chi/v5"
)
// L2WithdrawalsHandler ... Handles /api/v0/withdrawals/{address} GET requests
func (h Routes) L2WithdrawalsHandler(w http.ResponseWriter, r *http.Request) {
address := chi.URLParam(r, "address")
cursor := r.URL.Query().Get("cursor")
limit := r.URL.Query().Get("limit")
params, err := h.svc.QueryParams(address, cursor, limit)
if err != nil {
http.Error(w, "Invalid query params", http.StatusBadRequest)
h.logger.Error("Invalid query params", "err", err.Error())
return
}
withdrawals, err := h.svc.GetWithdrawals(params)
if err != nil {
http.Error(w, "Internal server error", http.StatusInternalServerError)
h.logger.Error("Error getting withdrawals", "err", err.Error())
return
}
resp := h.svc.WithdrawResponse(withdrawals)
err = jsonResponse(w, resp, http.StatusOK)
if err != nil {
h.logger.Error("Error writing response", "err", err.Error())
}
}
package service
import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/indexer/api/models"
"github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum/go-ethereum/common"
)
type Service interface {
GetDeposits(*models.QueryParams) (*database.L1BridgeDepositsResponse, error)
DepositResponse(*database.L1BridgeDepositsResponse) models.DepositResponse
GetWithdrawals(params *models.QueryParams) (*database.L2BridgeWithdrawalsResponse, error)
WithdrawResponse(*database.L2BridgeWithdrawalsResponse) models.WithdrawalResponse
GetSupplyInfo() (*models.BridgeSupplyView, error)
QueryParams(address, cursor, limit string) (*models.QueryParams, error)
}
type HandlerSvc struct {
v *Validator
db database.BridgeTransfersView
logger log.Logger
}
func New(v *Validator, db database.BridgeTransfersView, l log.Logger) Service {
return &HandlerSvc{
logger: l,
v: v,
db: db,
}
}
func (svc *HandlerSvc) QueryParams(a, c, l string) (*models.QueryParams, error) {
address, err := svc.v.ParseValidateAddress(a)
if err != nil {
svc.logger.Error("invalid address param", "param", a, "err", err)
return nil, err
}
err = svc.v.ValidateCursor(c)
if err != nil {
svc.logger.Error("invalid cursor param", "cursor", c, "err", err)
return nil, err
}
limit, err := svc.v.ParseValidateLimit(l)
if err != nil {
svc.logger.Error("invalid query param", "cursor", c, "err", err)
return nil, err
}
return &models.QueryParams{
Address: address,
Cursor: c,
Limit: limit,
}, nil
}
func (svc *HandlerSvc) GetWithdrawals(params *models.QueryParams) (*database.L2BridgeWithdrawalsResponse, error) {
withdrawals, err := svc.db.L2BridgeWithdrawalsByAddress(params.Address, params.Cursor, params.Limit)
if err != nil {
svc.logger.Error("error getting withdrawals", "err", err.Error(), "address", params.Address.String())
return nil, err
}
svc.logger.Debug("read withdrawals from db", "count", len(withdrawals.Withdrawals), "address", params.Address.String())
return withdrawals, nil
}
func (svc *HandlerSvc) WithdrawResponse(withdrawals *database.L2BridgeWithdrawalsResponse) models.WithdrawalResponse {
items := make([]models.WithdrawalItem, len(withdrawals.Withdrawals))
for i, withdrawal := range withdrawals.Withdrawals {
cdh := withdrawal.L2BridgeWithdrawal.CrossDomainMessageHash
if cdh == nil { // Zero value indicates that the withdrawal didn't have a cross domain message
cdh = &common.Hash{0}
}
item := models.WithdrawalItem{
Guid: withdrawal.L2BridgeWithdrawal.TransactionWithdrawalHash.String(),
L2BlockHash: withdrawal.L2BlockHash.String(),
Timestamp: withdrawal.L2BridgeWithdrawal.Tx.Timestamp,
From: withdrawal.L2BridgeWithdrawal.Tx.FromAddress.String(),
To: withdrawal.L2BridgeWithdrawal.Tx.ToAddress.String(),
TransactionHash: withdrawal.L2TransactionHash.String(),
Amount: withdrawal.L2BridgeWithdrawal.Tx.Amount.String(),
CrossDomainMessageHash: cdh.String(),
L1ProvenTxHash: withdrawal.ProvenL1TransactionHash.String(),
L1FinalizedTxHash: withdrawal.FinalizedL1TransactionHash.String(),
L1TokenAddress: withdrawal.L2BridgeWithdrawal.TokenPair.RemoteTokenAddress.String(),
L2TokenAddress: withdrawal.L2BridgeWithdrawal.TokenPair.LocalTokenAddress.String(),
}
items[i] = item
}
return models.WithdrawalResponse{
Cursor: withdrawals.Cursor,
HasNextPage: withdrawals.HasNextPage,
Items: items,
}
}
func (svc *HandlerSvc) GetDeposits(params *models.QueryParams) (*database.L1BridgeDepositsResponse, error) {
deposits, err := svc.db.L1BridgeDepositsByAddress(params.Address, params.Cursor, params.Limit)
if err != nil {
svc.logger.Error("error getting deposits", "err", err.Error(), "address", params.Address.String())
return nil, err
}
svc.logger.Debug("read deposits from db", "count", len(deposits.Deposits), "address", params.Address.String())
return deposits, nil
}
// DepositResponse ... Converts a database.L1BridgeDepositsResponse to an api.DepositResponse
func (svc *HandlerSvc) DepositResponse(deposits *database.L1BridgeDepositsResponse) models.DepositResponse {
items := make([]models.DepositItem, len(deposits.Deposits))
for i, deposit := range deposits.Deposits {
item := models.DepositItem{
Guid: deposit.L1BridgeDeposit.TransactionSourceHash.String(),
L1BlockHash: deposit.L1BlockHash.String(),
Timestamp: deposit.L1BridgeDeposit.Tx.Timestamp,
L1TxHash: deposit.L1TransactionHash.String(),
L2TxHash: deposit.L2TransactionHash.String(),
From: deposit.L1BridgeDeposit.Tx.FromAddress.String(),
To: deposit.L1BridgeDeposit.Tx.ToAddress.String(),
Amount: deposit.L1BridgeDeposit.Tx.Amount.String(),
L1TokenAddress: deposit.L1BridgeDeposit.TokenPair.LocalTokenAddress.String(),
L2TokenAddress: deposit.L1BridgeDeposit.TokenPair.RemoteTokenAddress.String(),
}
items[i] = item
}
return models.DepositResponse{
Cursor: deposits.Cursor,
HasNextPage: deposits.HasNextPage,
Items: items,
}
}
// GetSupplyInfo ... Fetch native bridge supply info
func (svc *HandlerSvc) GetSupplyInfo() (*models.BridgeSupplyView, error) {
depositSum, err := svc.db.L1TxDepositSum()
if err != nil {
svc.logger.Error("error getting deposit sum", "err", err)
return nil, err
}
initSum, err := svc.db.L2BridgeWithdrawalSum(database.All)
if err != nil {
svc.logger.Error("error getting init sum", "err", err)
return nil, err
}
provenSum, err := svc.db.L2BridgeWithdrawalSum(database.Proven)
if err != nil {
svc.logger.Error("error getting proven sum", "err", err)
return nil, err
}
finalizedSum, err := svc.db.L2BridgeWithdrawalSum(database.Finalized)
if err != nil {
svc.logger.Error("error getting finalized sum", "err", err)
return nil, err
}
return &models.BridgeSupplyView{
L1DepositSum: depositSum,
InitWithdrawalSum: initSum,
ProvenWithdrawSum: provenSum,
FinalizedWithdrawSum: finalizedSum,
}, nil
}
package service_test
import (
"fmt"
"reflect"
"testing"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/indexer/api/service"
"github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum/go-ethereum/common"
"github.com/stretchr/testify/require"
)
func assertFieldsAreSet(t *testing.T, item any) {
structType := reflect.TypeOf(item)
structVal := reflect.ValueOf(item)
fieldNum := structVal.NumField()
for i := 0; i < fieldNum; i++ {
field := structVal.Field(i)
fieldName := structType.Field(i).Name
isSet := field.IsValid() && !field.IsZero()
require.True(t, isSet, fmt.Sprintf("%s in not set", fieldName))
}
}
func TestWithdrawalResponse(t *testing.T) {
svc := service.New(nil, nil, nil)
cdh := common.HexToHash("0x2")
withdraws := &database.L2BridgeWithdrawalsResponse{
Withdrawals: []database.L2BridgeWithdrawalWithTransactionHashes{
{
L2BridgeWithdrawal: database.L2BridgeWithdrawal{
TransactionWithdrawalHash: common.HexToHash("0x1"),
BridgeTransfer: database.BridgeTransfer{
CrossDomainMessageHash: &cdh,
Tx: database.Transaction{
FromAddress: common.HexToAddress("0x3"),
ToAddress: common.HexToAddress("0x4"),
Timestamp: 5,
},
TokenPair: database.TokenPair{
LocalTokenAddress: common.HexToAddress("0x6"),
RemoteTokenAddress: common.HexToAddress("0x7"),
},
},
},
},
},
}
response := svc.WithdrawResponse(withdraws)
require.NotEmpty(t, response.Items)
require.Len(t, response.Items, 1)
assertFieldsAreSet(t, response.Items[0])
}
func TestDepositResponse(t *testing.T) {
cdh := common.HexToHash("0x2")
svc := service.New(nil, nil, nil)
deposits := &database.L1BridgeDepositsResponse{
Deposits: []database.L1BridgeDepositWithTransactionHashes{
{
L1BridgeDeposit: database.L1BridgeDeposit{
BridgeTransfer: database.BridgeTransfer{
CrossDomainMessageHash: &cdh,
Tx: database.Transaction{
FromAddress: common.HexToAddress("0x3"),
ToAddress: common.HexToAddress("0x4"),
Timestamp: 5,
},
TokenPair: database.TokenPair{
LocalTokenAddress: common.HexToAddress("0x6"),
RemoteTokenAddress: common.HexToAddress("0x7"),
},
},
},
},
},
}
response := svc.DepositResponse(deposits)
require.NotEmpty(t, response.Items)
require.Len(t, response.Items, 1)
assertFieldsAreSet(t, response.Items[0])
}
func TestQueryParams(t *testing.T) {
var tests = []struct {
name string
test func(*testing.T, service.Service)
}{
{
name: "empty params",
test: func(t *testing.T, svc service.Service) {
params, err := svc.QueryParams("", "", "")
require.Error(t, err)
require.Nil(t, params)
},
},
{
name: "empty params except address",
test: func(t *testing.T, svc service.Service) {
addr := common.HexToAddress("0x420")
params, err := svc.QueryParams(addr.String(), "", "")
require.NoError(t, err)
require.NotNil(t, params)
require.Equal(t, addr, params.Address)
require.Equal(t, 100, params.Limit)
require.Equal(t, "", params.Cursor)
},
},
}
v := new(service.Validator)
svc := service.New(v, nil, log.New())
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tt.test(t, svc)
})
}
}
package service
import (
"errors"
"strconv"
"github.com/ethereum/go-ethereum/common"
)
// Validator ... Validates API user request parameters
type Validator struct{}
// ParseValidateAddress ... Validates and parses the address query parameter
func (v *Validator) ParseValidateAddress(addr string) (common.Address, error) {
if !common.IsHexAddress(addr) {
return common.Address{}, errors.New("address must be represented as a valid hexadecimal string")
}
parsedAddr := common.HexToAddress(addr)
if parsedAddr == common.HexToAddress("0x0") {
return common.Address{}, errors.New("address cannot be the zero address")
}
return parsedAddr, nil
}
// ValidateCursor ... Validates and parses the cursor query parameter
func (v *Validator) ValidateCursor(cursor string) error {
if cursor == "" {
return nil
}
if len(cursor) != 66 { // 0x + 64 chars
return errors.New("cursor must be a 32 byte hex string")
}
if cursor[:2] != "0x" {
return errors.New("cursor must begin with 0x")
}
return nil
}
// ParseValidateLimit ... Validates and parses the limit query parameters
func (v *Validator) ParseValidateLimit(limit string) (int, error) {
if limit == "" {
return 100, nil
}
val, err := strconv.Atoi(limit)
if err != nil {
return 0, errors.New("limit must be an integer value")
}
if val <= 0 {
return 0, errors.New("limit must be greater than 0")
}
// TODO - Add a check against a max limit value
return val, nil
}
package service
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestParseValidateLimit(t *testing.T) {
v := Validator{}
// (1) Happy case
limit := "100"
_, err := v.ParseValidateLimit(limit)
require.NoError(t, err, "limit should be valid")
// (2) Boundary validation
limit = "0"
_, err = v.ParseValidateLimit(limit)
require.Error(t, err, "limit must be greater than 0")
// (3) Type validation
limit = "abc"
_, err = v.ParseValidateLimit(limit)
require.Error(t, err, "limit must be an integer value")
}
func TestParseValidateAddress(t *testing.T) {
v := Validator{}
// (1) Happy case
addr := "0x95222290DD7278Aa3Ddd389Cc1E1d165CC4BAfe5"
_, err := v.ParseValidateAddress(addr)
require.NoError(t, err, "address should be pass")
// (2) Invalid hex
addr = "🫡"
_, err = v.ParseValidateAddress(addr)
require.Error(t, err, "address must be represented as a valid hexadecimal string")
// (3) Zero address
addr = "0x0000000000000000000000000000000000000000"
_, err = v.ParseValidateAddress(addr)
require.Error(t, err, "address cannot be black-hole value")
}
func Test_ParseValidateCursor(t *testing.T) {
v := Validator{}
// (1) Happy case
cursor := "0xf3fd2eb696dab4263550b938726f9b3606e334cce6ebe27446bc26cb700b94e0"
err := v.ValidateCursor(cursor)
require.NoError(t, err, "cursor should be pass")
// (2) Invalid length
cursor = "0x000"
err = v.ValidateCursor(cursor)
require.Error(t, err, "cursor must be 32 byte hex string")
// (3) Invalid hex
cursor = "0🫡"
err = v.ValidateCursor(cursor)
require.Error(t, err, "cursor must start with 0x")
}
package bigint
import "math/big"
var (
Zero = big.NewInt(0)
One = big.NewInt(1)
)
// Clamp returns a new big.Int for `end` to which `end - start` <= size.
// @note (start, end) is an inclusive range. This function assumes that `start` is not greater than `end`.
func Clamp(start, end *big.Int, size uint64) *big.Int {
temp := new(big.Int)
count := temp.Sub(end, start).Uint64() + 1
if count <= size {
return end
}
// we re-use the allocated temp as the new end
temp.Add(start, big.NewInt(int64(size-1)))
return temp
}
// Matcher returns an inner comparison function result for a big.Int
func Matcher(num int64) func(*big.Int) bool {
return func(bi *big.Int) bool { return bi.Int64() == num }
}
func WeiToETH(wei *big.Int) *big.Float {
f := new(big.Float)
f.SetString(wei.String())
return f.Quo(f, big.NewFloat(1e18))
}
package bigint
import (
"math/big"
"testing"
"github.com/stretchr/testify/require"
)
func TestClamp(t *testing.T) {
start := big.NewInt(1)
end := big.NewInt(10)
// When the (start, end) bounds are within range
// the same end pointer should be returned
// larger range
result := Clamp(start, end, 20)
require.True(t, end == result)
// exact range
result = Clamp(start, end, 10)
require.True(t, end == result)
// smaller range
result = Clamp(start, end, 5)
require.False(t, end == result)
require.Equal(t, uint64(5), result.Uint64())
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
package main
import (
"context"
"fmt"
"math/big"
"github.com/urfave/cli/v2"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum-optimism/optimism/indexer"
"github.com/ethereum-optimism/optimism/indexer/api"
"github.com/ethereum-optimism/optimism/indexer/config"
"github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum-optimism/optimism/op-service/cliapp"
oplog "github.com/ethereum-optimism/optimism/op-service/log"
"github.com/ethereum-optimism/optimism/op-service/opio"
)
var (
ConfigFlag = &cli.StringFlag{
Name: "config",
Value: "./indexer.toml",
Aliases: []string{"c"},
Usage: "path to config file",
EnvVars: []string{"INDEXER_CONFIG"},
}
MigrationsFlag = &cli.StringFlag{
Name: "migrations-dir",
Value: "./migrations",
Usage: "path to migrations folder",
EnvVars: []string{"INDEXER_MIGRATIONS_DIR"},
}
ReorgFlag = &cli.Uint64Flag{
Name: "l1-height",
Aliases: []string{"height"},
Usage: `the lowest l1 height that has been reorg'd. All L1 data and derived L2 state will be deleted. Since not all L1 blocks are
indexed, this will find the maximum indexed height <= the marker, which may result in slightly more deleted state.`,
Required: true,
}
)
func runIndexer(ctx *cli.Context, shutdown context.CancelCauseFunc) (cliapp.Lifecycle, error) {
log := oplog.NewLogger(oplog.AppOut(ctx), oplog.ReadCLIConfig(ctx)).New("role", "indexer")
oplog.SetGlobalLogHandler(log.Handler())
log.Info("running indexer...")
cfg, err := config.LoadConfig(log, ctx.String(ConfigFlag.Name))
if err != nil {
log.Error("failed to load config", "err", err)
return nil, err
}
return indexer.NewIndexer(ctx.Context, log, &cfg, shutdown)
}
func runApi(ctx *cli.Context, _ context.CancelCauseFunc) (cliapp.Lifecycle, error) {
log := oplog.NewLogger(oplog.AppOut(ctx), oplog.ReadCLIConfig(ctx)).New("role", "api")
oplog.SetGlobalLogHandler(log.Handler())
log.Info("running api...")
cfg, err := config.LoadConfig(log, ctx.String(ConfigFlag.Name))
if err != nil {
log.Error("failed to load config", "err", err)
return nil, err
}
apiCfg := &api.Config{
DB: &api.DBConfigConnector{DBConfig: cfg.DB},
HTTPServer: cfg.HTTPServer,
MetricsServer: cfg.MetricsServer,
}
return api.NewApi(ctx.Context, log, apiCfg)
}
func runMigrations(ctx *cli.Context) error {
// We don't maintain a complicated lifecycle here, just interrupt to shut down.
ctx.Context = opio.CancelOnInterrupt(ctx.Context)
log := oplog.NewLogger(oplog.AppOut(ctx), oplog.ReadCLIConfig(ctx)).New("role", "migrations")
oplog.SetGlobalLogHandler(log.Handler())
log.Info("running migrations...")
cfg, err := config.LoadConfig(log, ctx.String(ConfigFlag.Name))
if err != nil {
log.Error("failed to load config", "err", err)
return err
}
db, err := database.NewDB(ctx.Context, log, cfg.DB)
if err != nil {
log.Error("failed to connect to database", "err", err)
return err
}
defer db.Close()
migrationsDir := ctx.String(MigrationsFlag.Name)
return db.ExecuteSQLMigration(migrationsDir)
}
func runReorgDeletion(ctx *cli.Context) error {
fromL1Height := ctx.Uint64(ReorgFlag.Name)
log := oplog.NewLogger(oplog.AppOut(ctx), oplog.ReadCLIConfig(ctx)).New("role", "reorg-deletion")
oplog.SetGlobalLogHandler(log.Handler())
cfg, err := config.LoadConfig(log, ctx.String(ConfigFlag.Name))
if err != nil {
return fmt.Errorf("failed to load config: %w", err)
}
l1Clnt, err := ethclient.DialContext(ctx.Context, cfg.RPCs.L1RPC)
if err != nil {
return fmt.Errorf("failed to dial L1 client: %w", err)
}
l1Header, err := l1Clnt.HeaderByNumber(ctx.Context, big.NewInt(int64(fromL1Height)))
if err != nil {
return fmt.Errorf("failed to query L1 header at height: %w", err)
} else if l1Header == nil {
return fmt.Errorf("no header found at height")
}
db, err := database.NewDB(ctx.Context, log, cfg.DB)
if err != nil {
return fmt.Errorf("failed to connect to database: %w", err)
}
defer db.Close()
return db.Transaction(func(db *database.DB) error {
return db.Blocks.DeleteReorgedState(l1Header.Time)
})
}
func newCli(GitCommit string, GitDate string) *cli.App {
flags := append([]cli.Flag{ConfigFlag}, oplog.CLIFlags("INDEXER")...)
return &cli.App{
Version: params.VersionWithCommit(GitCommit, GitDate),
Description: "An indexer of all optimism events with a serving api layer",
EnableBashCompletion: true,
Commands: []*cli.Command{
{
Name: "api",
Flags: flags,
Description: "Runs the api service",
Action: cliapp.LifecycleCmd(runApi),
},
{
Name: "index",
Flags: flags,
Description: "Runs the indexing service",
Action: cliapp.LifecycleCmd(runIndexer),
},
{
Name: "migrate",
Flags: append(flags, MigrationsFlag),
Description: "Runs the database migrations",
Action: runMigrations,
},
{
Name: "reorg-delete",
Aliases: []string{"reorg"},
Flags: append(flags, ReorgFlag),
Description: "Deletes data that has been reorg'ed out of the canonical L1 chain",
Action: runReorgDeletion,
},
{
Name: "version",
Description: "print version",
Action: func(ctx *cli.Context) error {
cli.ShowVersion(ctx)
return nil
},
},
},
}
}
package main
import (
"context"
"os"
"github.com/ethereum/go-ethereum/log"
oplog "github.com/ethereum-optimism/optimism/op-service/log"
"github.com/ethereum-optimism/optimism/op-service/opio"
)
var (
GitCommit = ""
GitDate = ""
)
func main() {
oplog.SetupDefaults()
app := newCli(GitCommit, GitDate)
// sub-commands set up their individual interrupt lifecycles, which can block on the given interrupt as needed.
ctx := opio.WithInterruptBlocker(context.Background())
if err := app.RunContext(ctx, os.Args); err != nil {
log.Error("application failed", "err", err)
os.Exit(1)
}
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
apiVersion: 1
providers:
- name: 'default'
orgId: 1
folder: ''
type: file
disableDeletion: false
options:
path: /var/lib/grafana/dashboards
apiVersion: 1
datasources:
- name: Prometheus
type: prometheus
uid: '6R74VAnVz'
access: proxy
url: http://prometheus:9090
isDefault: true
global:
scrape_interval: 5s
evaluation_interval: 5s
scrape_configs:
- job_name: 'indexer'
static_configs:
- targets: ['index:7300']
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment