Commit 1ca98c02 authored by Ethen Pociask's avatar Ethen Pociask

Merge branch 'develop' of https://github.com/epociask/optimism into indexer.client

parents a5436689 adf55b3a
---
'@eth-optimism/chain-mon': minor
---
Use node.js v18
---
'@eth-optimism/sdk': patch
---
Adds Sepolia & OP Sepolia support to SDK
---
'@eth-optimism/contracts-bedrock': patch
---
bumps sdk version to have access to sepolia deployments
version: 2.1 version: 2.1
orbs: orbs:
go: circleci/go@1.5.0 go: circleci/go@1.8.0
gcp-cli: circleci/gcp-cli@3.0.1 gcp-cli: circleci/gcp-cli@3.0.1
slack: circleci/slack@4.10.1 slack: circleci/slack@4.10.1
commands: commands:
...@@ -737,7 +737,7 @@ jobs: ...@@ -737,7 +737,7 @@ jobs:
description: Go Module Name description: Go Module Name
type: string type: string
docker: docker:
- image: cimg/go:1.20 - image: cimg/go:1.21
steps: steps:
- checkout - checkout
- run: - run:
...@@ -912,9 +912,9 @@ jobs: ...@@ -912,9 +912,9 @@ jobs:
- run: - run:
name: Install latest golang name: Install latest golang
command: | command: |
wget https://go.dev/dl/go1.20.linux-amd64.tar.gz wget https://go.dev/dl/go1.21.1.linux-amd64.tar.gz
sudo rm -rf /usr/local/go sudo rm -rf /usr/local/go
sudo tar -C /usr/local -xzf go1.20.linux-amd64.tar.gz sudo tar -C /usr/local -xzf go1.21.1.linux-amd64.tar.gz
export PATH=$PATH:/usr/local/go/bin export PATH=$PATH:/usr/local/go/bin
go version go version
- run: - run:
...@@ -1048,14 +1048,14 @@ jobs: ...@@ -1048,14 +1048,14 @@ jobs:
bedrock-go-tests: bedrock-go-tests:
docker: docker:
- image: cimg/go:1.20 - image: cimg/go:1.21
resource_class: medium resource_class: medium
steps: steps:
- run: echo Done - run: echo Done
fpp-verify: fpp-verify:
docker: docker:
- image: cimg/go:1.20 - image: cimg/go:1.21
steps: steps:
- checkout - checkout
- run: - run:
......
module claim module claim
go 1.20 go 1.21
toolchain go1.21.1
require github.com/ethereum-optimism/optimism v0.0.0 require github.com/ethereum-optimism/optimism v0.0.0
......
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
golang.org/x/crypto v0.13.0 h1:mvySKfSWJ+UKUii46M40LOvyWfN0s2U+46/jDd0e6Ck= golang.org/x/crypto v0.13.0 h1:mvySKfSWJ+UKUii46M40LOvyWfN0s2U+46/jDd0e6Ck=
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
# `mipsevm` # `mipsevm`
Supported 55 instructions: Supported 55 instructions:
``` | Category | Instruction | Description |
'addi', 'addiu', 'addu', 'and', 'andi', |----------------------|---------------|----------------------------------------------|
'b', 'beq', 'beqz', 'bgez', 'bgtz', 'blez', 'bltz', 'bne', 'bnez', | `Arithmetic` | `addi` | Add immediate (with sign-extension). |
'clz', 'divu', | `Arithmetic` | `addiu` | Add immediate unsigned (no overflow). |
'j', 'jal', 'jalr', 'jr', | `Arithmetic` | `addu` | Add unsigned (no overflow). |
'lb', 'lbu', 'lui', 'lw', 'lwr', | `Logical` | `and` | Bitwise AND. |
'mfhi', 'mflo', 'move', 'movn', 'movz', 'mtlo', 'mul', 'multu', | `Logical` | `andi` | Bitwise AND immediate. |
'negu', 'nop', 'not', 'or', 'ori', | `Branch` | `b` | Unconditional branch. |
'sb', 'sll', 'sllv', 'slt', 'slti', 'sltiu', 'sltu', 'sra', 'srl', 'srlv', 'subu', 'sw', 'swr', 'sync', 'syscall', | `Conditional Branch` | `beq` | Branch on equal. |
'xor', 'xori' | `Conditional Branch` | `beqz` | Branch if equal to zero. |
``` | `Conditional Branch` | `bgez` | Branch on greater than or equal to zero. |
| `Conditional Branch` | `bgtz` | Branch on greater than zero. |
| `Conditional Branch` | `blez` | Branch on less than or equal to zero. |
| `Conditional Branch` | `bltz` | Branch on less than zero. |
| `Conditional Branch` | `bne` | Branch on not equal. |
| `Conditional Branch` | `bnez` | Branch if not equal to zero. |
| `Logical` | `clz` | Count leading zeros. |
| `Arithmetic` | `divu` | Divide unsigned. |
| `Unconditional Jump` | `j` | Jump. |
| `Unconditional Jump` | `jal` | Jump and link. |
| `Unconditional Jump` | `jalr` | Jump and link register. |
| `Unconditional Jump` | `jr` | Jump register. |
| `Data Transfer` | `lb` | Load byte. |
| `Data Transfer` | `lbu` | Load byte unsigned. |
| `Data Transfer` | `lui` | Load upper immediate. |
| `Data Transfer` | `lw` | Load word. |
| `Data Transfer` | `lwr` | Load word right. |
| `Data Transfer` | `mfhi` | Move from HI register. |
| `Data Transfer` | `mflo` | Move from LO register. |
| `Data Transfer` | `move` | Move between registers. |
| `Data Transfer` | `movn` | Move conditional on not zero. |
| `Data Transfer` | `movz` | Move conditional on zero. |
| `Data Transfer` | `mtlo` | Move to LO register. |
| `Arithmetic` | `mul` | Multiply (to produce a word result). |
| `Arithmetic` | `multu` | Multiply unsigned. |
| `Arithmetic` | `negu` | Negate unsigned. |
| `No Op` | `nop` | No operation. |
| `Logical` | `not` | Bitwise NOT (pseudo-instruction in MIPS). |
| `Logical` | `or` | Bitwise OR. |
| `Logical` | `ori` | Bitwise OR immediate. |
| `Data Transfer` | `sb` | Store byte. |
| `Logical` | `sll` | Shift left logical. |
| `Logical` | `sllv` | Shift left logical variable. |
| `Comparison` | `slt` | Set on less than (signed). |
| `Comparison` | `slti` | Set on less than immediate. |
| `Comparison` | `sltiu` | Set on less than immediate unsigned. |
| `Comparison` | `sltu` | Set on less than unsigned. |
| `Logical` | `sra` | Shift right arithmetic. |
| `Logical` | `srl` | Shift right logical. |
| `Logical` | `srlv` | Shift right logical variable. |
| `Arithmetic` | `subu` | Subtract unsigned. |
| `Data Transfer` | `sw` | Store word. |
| `Data Transfer` | `swr` | Store word right. |
| `Serialization` | `sync` | Synchronize shared memory. |
| `System Calls` | `syscall` | System call. |
| `Logical` | `xor` | Bitwise XOR. |
| `Logical` | `xori` | Bitwise XOR immediate. |
To run: To run:
1. Load a program into a state, e.g. using `LoadELF`. 1. Load a program into a state, e.g. using `LoadELF`.
......
FROM golang:1.20.7-alpine3.18 as builder FROM golang:1.21.1-alpine3.18 as builder
RUN apk --no-cache add make jq bash git alpine-sdk RUN apk --no-cache add make jq bash git alpine-sdk
......
module github.com/ethereum-optimism/optimism module github.com/ethereum-optimism/optimism
go 1.20 go 1.21
require ( require (
github.com/BurntSushi/toml v1.3.2 github.com/BurntSushi/toml v1.3.2
github.com/btcsuite/btcd v0.23.3 github.com/btcsuite/btcd v0.23.3
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0
github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3 github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3
github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20230817174831-5d3ca1966435 github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20230920213331-413695cf7906
github.com/ethereum/go-ethereum v1.12.0 github.com/ethereum/go-ethereum v1.12.0
github.com/fsnotify/fsnotify v1.6.0 github.com/fsnotify/fsnotify v1.6.0
github.com/go-chi/chi/v5 v5.0.10 github.com/go-chi/chi/v5 v5.0.10
...@@ -18,18 +18,19 @@ require ( ...@@ -18,18 +18,19 @@ require (
github.com/google/gofuzz v1.2.1-0.20220503160820-4a35382e8fc8 github.com/google/gofuzz v1.2.1-0.20220503160820-4a35382e8fc8
github.com/google/uuid v1.3.1 github.com/google/uuid v1.3.1
github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/go-multierror v1.1.1
github.com/hashicorp/golang-lru/v2 v2.0.2 github.com/hashicorp/golang-lru/v2 v2.0.5
github.com/holiman/uint256 v1.2.3 github.com/holiman/uint256 v1.2.3
github.com/ipfs/go-datastore v0.6.0 github.com/ipfs/go-datastore v0.6.0
github.com/ipfs/go-ds-leveldb v0.5.0 github.com/ipfs/go-ds-leveldb v0.5.0
github.com/jackc/pgtype v1.14.0 github.com/jackc/pgtype v1.14.0
github.com/jackc/pgx/v5 v5.4.3 github.com/jackc/pgx/v5 v5.4.3
github.com/libp2p/go-libp2p v0.27.8 github.com/libp2p/go-libp2p v0.31.0
github.com/libp2p/go-libp2p-mplex v0.9.0
github.com/libp2p/go-libp2p-pubsub v0.9.3 github.com/libp2p/go-libp2p-pubsub v0.9.3
github.com/libp2p/go-libp2p-testing v0.12.0 github.com/libp2p/go-libp2p-testing v0.12.0
github.com/mattn/go-isatty v0.0.19 github.com/mattn/go-isatty v0.0.19
github.com/multiformats/go-base32 v0.1.0 github.com/multiformats/go-base32 v0.1.0
github.com/multiformats/go-multiaddr v0.10.1 github.com/multiformats/go-multiaddr v0.11.0
github.com/multiformats/go-multiaddr-dns v0.3.1 github.com/multiformats/go-multiaddr-dns v0.3.1
github.com/olekukonko/tablewriter v0.0.5 github.com/olekukonko/tablewriter v0.0.5
github.com/onsi/gomega v1.27.10 github.com/onsi/gomega v1.27.10
...@@ -39,7 +40,7 @@ require ( ...@@ -39,7 +40,7 @@ require (
github.com/stretchr/testify v1.8.4 github.com/stretchr/testify v1.8.4
github.com/urfave/cli/v2 v2.25.7 github.com/urfave/cli/v2 v2.25.7
golang.org/x/crypto v0.13.0 golang.org/x/crypto v0.13.0
golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df golang.org/x/exp v0.0.0-20230905200255-921286631fa9
golang.org/x/sync v0.3.0 golang.org/x/sync v0.3.0
golang.org/x/term v0.12.0 golang.org/x/term v0.12.0
golang.org/x/time v0.3.0 golang.org/x/time v0.3.0
...@@ -51,7 +52,7 @@ require ( ...@@ -51,7 +52,7 @@ require (
github.com/DataDog/zstd v1.5.2 // indirect github.com/DataDog/zstd v1.5.2 // indirect
github.com/VictoriaMetrics/fastcache v1.10.0 // indirect github.com/VictoriaMetrics/fastcache v1.10.0 // indirect
github.com/allegro/bigcache v1.2.1 // indirect github.com/allegro/bigcache v1.2.1 // indirect
github.com/benbjohnson/clock v1.3.0 // indirect github.com/benbjohnson/clock v1.3.5 // indirect
github.com/beorn7/perks v1.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect
github.com/bits-and-blooms/bitset v1.7.0 // indirect github.com/bits-and-blooms/bitset v1.7.0 // indirect
github.com/btcsuite/btcd/btcec/v2 v2.2.0 // indirect github.com/btcsuite/btcd/btcec/v2 v2.2.0 // indirect
...@@ -72,7 +73,7 @@ require ( ...@@ -72,7 +73,7 @@ require (
github.com/davecgh/go-spew v1.1.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
github.com/deckarep/golang-set/v2 v2.1.0 // indirect github.com/deckarep/golang-set/v2 v2.1.0 // indirect
github.com/decred/dcrd/crypto/blake256 v1.0.0 // indirect github.com/decred/dcrd/crypto/blake256 v1.0.1 // indirect
github.com/deepmap/oapi-codegen v1.8.2 // indirect github.com/deepmap/oapi-codegen v1.8.2 // indirect
github.com/dlclark/regexp2 v1.7.0 // indirect github.com/dlclark/regexp2 v1.7.0 // indirect
github.com/docker/docker v20.10.24+incompatible // indirect github.com/docker/docker v20.10.24+incompatible // indirect
...@@ -99,13 +100,14 @@ require ( ...@@ -99,13 +100,14 @@ require (
github.com/golang/mock v1.6.0 // indirect github.com/golang/mock v1.6.0 // indirect
github.com/golang/protobuf v1.5.3 // indirect github.com/golang/protobuf v1.5.3 // indirect
github.com/google/gopacket v1.1.19 // indirect github.com/google/gopacket v1.1.19 // indirect
github.com/google/pprof v0.0.0-20230405160723-4a4c7d95572b // indirect github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b // indirect
github.com/gorilla/websocket v1.5.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect
github.com/graph-gophers/graphql-go v1.3.0 // indirect github.com/graph-gophers/graphql-go v1.3.0 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-bexpr v0.1.11 // indirect github.com/hashicorp/go-bexpr v0.1.11 // indirect
github.com/hashicorp/golang-lru/arc/v2 v2.0.5 // indirect
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
github.com/huin/goupnp v1.1.0 // indirect github.com/huin/goupnp v1.2.0 // indirect
github.com/influxdata/influxdb-client-go/v2 v2.4.0 // indirect github.com/influxdata/influxdb-client-go/v2 v2.4.0 // indirect
github.com/influxdata/influxdb1-client v0.0.0-20220302092344-a9ab5670611c // indirect github.com/influxdata/influxdb1-client v0.0.0-20220302092344-a9ab5670611c // indirect
github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect
...@@ -121,8 +123,8 @@ require ( ...@@ -121,8 +123,8 @@ require (
github.com/jinzhu/inflection v1.0.0 // indirect github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect github.com/jinzhu/now v1.1.5 // indirect
github.com/karalabe/usb v0.0.2 // indirect github.com/karalabe/usb v0.0.2 // indirect
github.com/klauspost/compress v1.16.4 // indirect github.com/klauspost/compress v1.16.7 // indirect
github.com/klauspost/cpuid/v2 v2.2.4 // indirect github.com/klauspost/cpuid/v2 v2.2.5 // indirect
github.com/koron/go-ssdp v0.0.4 // indirect github.com/koron/go-ssdp v0.0.4 // indirect
github.com/kr/pretty v0.3.1 // indirect github.com/kr/pretty v0.3.1 // indirect
github.com/kr/text v0.2.0 // indirect github.com/kr/text v0.2.0 // indirect
...@@ -133,18 +135,18 @@ require ( ...@@ -133,18 +135,18 @@ require (
github.com/libp2p/go-libp2p-asn-util v0.3.0 // indirect github.com/libp2p/go-libp2p-asn-util v0.3.0 // indirect
github.com/libp2p/go-mplex v0.7.0 // indirect github.com/libp2p/go-mplex v0.7.0 // indirect
github.com/libp2p/go-msgio v0.3.0 // indirect github.com/libp2p/go-msgio v0.3.0 // indirect
github.com/libp2p/go-nat v0.1.0 // indirect github.com/libp2p/go-nat v0.2.0 // indirect
github.com/libp2p/go-netroute v0.2.1 // indirect github.com/libp2p/go-netroute v0.2.1 // indirect
github.com/libp2p/go-reuseport v0.2.0 // indirect github.com/libp2p/go-reuseport v0.4.0 // indirect
github.com/libp2p/go-yamux/v4 v4.0.0 // indirect github.com/libp2p/go-yamux/v4 v4.0.1 // indirect
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect
github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/miekg/dns v1.1.53 // indirect github.com/miekg/dns v1.1.55 // indirect
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
github.com/minio/sha256-simd v1.0.0 // indirect github.com/minio/sha256-simd v1.0.1 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/mitchellh/pointerstructure v1.2.1 // indirect github.com/mitchellh/pointerstructure v1.2.1 // indirect
github.com/mmcloughlin/addchain v0.4.0 // indirect github.com/mmcloughlin/addchain v0.4.0 // indirect
...@@ -152,26 +154,25 @@ require ( ...@@ -152,26 +154,25 @@ require (
github.com/multiformats/go-base36 v0.2.0 // indirect github.com/multiformats/go-base36 v0.2.0 // indirect
github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect
github.com/multiformats/go-multibase v0.2.0 // indirect github.com/multiformats/go-multibase v0.2.0 // indirect
github.com/multiformats/go-multicodec v0.8.1 // indirect github.com/multiformats/go-multicodec v0.9.0 // indirect
github.com/multiformats/go-multihash v0.2.1 // indirect github.com/multiformats/go-multihash v0.2.3 // indirect
github.com/multiformats/go-multistream v0.4.1 // indirect github.com/multiformats/go-multistream v0.4.1 // indirect
github.com/multiformats/go-varint v0.0.7 // indirect github.com/multiformats/go-varint v0.0.7 // indirect
github.com/naoina/go-stringutil v0.1.0 // indirect github.com/naoina/go-stringutil v0.1.0 // indirect
github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 // indirect github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 // indirect
github.com/onsi/ginkgo/v2 v2.11.0 // indirect github.com/onsi/ginkgo/v2 v2.11.0 // indirect
github.com/opencontainers/runtime-spec v1.0.2 // indirect github.com/opencontainers/runtime-spec v1.1.0 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 // indirect github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_model v0.3.0 // indirect github.com/prometheus/client_model v0.4.0 // indirect
github.com/prometheus/common v0.42.0 // indirect github.com/prometheus/common v0.42.0 // indirect
github.com/prometheus/procfs v0.9.0 // indirect github.com/prometheus/procfs v0.9.0 // indirect
github.com/quic-go/qpack v0.4.0 // indirect github.com/quic-go/qpack v0.4.0 // indirect
github.com/quic-go/qtls-go1-19 v0.3.3 // indirect github.com/quic-go/qtls-go1-20 v0.3.3 // indirect
github.com/quic-go/qtls-go1-20 v0.2.3 // indirect github.com/quic-go/quic-go v0.38.1 // indirect
github.com/quic-go/quic-go v0.33.0 // indirect github.com/quic-go/webtransport-go v0.5.3 // indirect
github.com/quic-go/webtransport-go v0.5.2 // indirect
github.com/raulk/go-watchdog v1.3.0 // indirect github.com/raulk/go-watchdog v1.3.0 // indirect
github.com/rivo/uniseg v0.4.3 // indirect github.com/rivo/uniseg v0.4.3 // indirect
github.com/rogpeppe/go-internal v1.9.0 // indirect github.com/rogpeppe/go-internal v1.9.0 // indirect
...@@ -188,23 +189,21 @@ require ( ...@@ -188,23 +189,21 @@ require (
github.com/tyler-smith/go-bip39 v1.1.0 // indirect github.com/tyler-smith/go-bip39 v1.1.0 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect github.com/yusufpapurcu/wmi v1.2.2 // indirect
go.uber.org/atomic v1.10.0 // indirect go.uber.org/dig v1.17.0 // indirect
go.uber.org/dig v1.16.1 // indirect go.uber.org/fx v1.20.0 // indirect
go.uber.org/fx v1.19.2 // indirect
go.uber.org/multierr v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.24.0 // indirect go.uber.org/zap v1.25.0 // indirect
golang.org/x/mod v0.11.0 // indirect golang.org/x/mod v0.12.0 // indirect
golang.org/x/net v0.12.0 // indirect golang.org/x/net v0.15.0 // indirect
golang.org/x/sys v0.12.0 // indirect golang.org/x/sys v0.12.0 // indirect
golang.org/x/text v0.13.0 // indirect golang.org/x/text v0.13.0 // indirect
golang.org/x/tools v0.9.3 // indirect golang.org/x/tools v0.13.0 // indirect
google.golang.org/protobuf v1.30.0 // indirect google.golang.org/protobuf v1.30.0 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect
lukechampine.com/blake3 v1.1.7 // indirect lukechampine.com/blake3 v1.2.1 // indirect
nhooyr.io/websocket v1.8.7 // indirect
rsc.io/tmplfunc v0.0.3 // indirect rsc.io/tmplfunc v0.0.3 // indirect
) )
......
This diff is collapsed.
FROM --platform=$BUILDPLATFORM golang:1.20.7-alpine3.18 as builder FROM --platform=$BUILDPLATFORM golang:1.21.1-alpine3.18 as builder
RUN apk add --no-cache make gcc musl-dev linux-headers git jq bash RUN apk add --no-cache make gcc musl-dev linux-headers git jq bash
...@@ -22,5 +22,11 @@ RUN make indexer ...@@ -22,5 +22,11 @@ RUN make indexer
FROM alpine:3.18 FROM alpine:3.18
COPY --from=builder /app/indexer/indexer /usr/local/bin COPY --from=builder /app/indexer/indexer /usr/local/bin
COPY --from=builder /app/indexer/indexer.toml /app/indexer/indexer.toml
COPY --from=builder /app/indexer/migrations /app/indexer/migrations
CMD ["indexer", "all", "--config", "/app/indexer/indexer.toml"] WORKDIR /app
ENV INDEXER_MIGRATIONS_DIR="/app/indexer/migrations"
CMD ["indexer", "index", "--config", "/app/indexer/indexer.toml"]
package main package main
import ( import (
"sync"
"github.com/ethereum-optimism/optimism/indexer" "github.com/ethereum-optimism/optimism/indexer"
"github.com/ethereum-optimism/optimism/indexer/api" "github.com/ethereum-optimism/optimism/indexer/api"
"github.com/ethereum-optimism/optimism/indexer/config" "github.com/ethereum-optimism/optimism/indexer/config"
...@@ -21,6 +19,12 @@ var ( ...@@ -21,6 +19,12 @@ var (
Usage: "path to config file", Usage: "path to config file",
EnvVars: []string{"INDEXER_CONFIG"}, EnvVars: []string{"INDEXER_CONFIG"},
} }
MigrationsFlag = &cli.StringFlag{
Name: "migrations-dir",
Value: "./migrations",
Usage: "path to migrations folder",
EnvVars: []string{"INDEXER_MIGRATIONS_DIR"},
}
) )
func runIndexer(ctx *cli.Context) error { func runIndexer(ctx *cli.Context) error {
...@@ -66,38 +70,30 @@ func runApi(ctx *cli.Context) error { ...@@ -66,38 +70,30 @@ func runApi(ctx *cli.Context) error {
return api.Start(ctx.Context) return api.Start(ctx.Context)
} }
func runAll(ctx *cli.Context) error { func runMigrations(ctx *cli.Context) error {
log := log.NewLogger(log.ReadCLIConfig(ctx)) log := log.NewLogger(log.ReadCLIConfig(ctx)).New("role", "api")
cfg, err := config.LoadConfig(log, ctx.String(ConfigFlag.Name))
// Ensure both processes complete before returning. migrationsDir := ctx.String(MigrationsFlag.Name)
var wg sync.WaitGroup if err != nil {
wg.Add(2) log.Error("failed to load config", "err", err)
return err
}
go func() { db, err := database.NewDB(cfg.DB)
defer wg.Done() if err != nil {
err := runApi(ctx) log.Error("failed to connect to database", "err", err)
if err != nil { return err
log.Error("api process non-zero exit", "err", err) }
} defer db.Close()
}()
go func() {
defer wg.Done()
err := runIndexer(ctx)
if err != nil {
log.Error("indexer process non-zero exit", "err", err)
}
}()
// We purposefully return no error since the indexer and api return db.ExecuteSQLMigration(migrationsDir)
// have no inter-dependencies. We simply rely on the logs to
// report a non-zero exit for either process.
wg.Wait()
return nil
} }
func newCli(GitCommit string, GitDate string) *cli.App { func newCli(GitCommit string, GitDate string) *cli.App {
flags := []cli.Flag{ConfigFlag} flags := []cli.Flag{ConfigFlag}
flags = append(flags, log.CLIFlags("INDEXER")...) flags = append(flags, log.CLIFlags("INDEXER")...)
migrationFlags := []cli.Flag{MigrationsFlag, ConfigFlag}
migrationFlags = append(migrationFlags, log.CLIFlags("INDEXER")...)
return &cli.App{ return &cli.App{
Version: params.VersionWithCommit(GitCommit, GitDate), Version: params.VersionWithCommit(GitCommit, GitDate),
Description: "An indexer of all optimism events with a serving api layer", Description: "An indexer of all optimism events with a serving api layer",
...@@ -116,10 +112,10 @@ func newCli(GitCommit string, GitDate string) *cli.App { ...@@ -116,10 +112,10 @@ func newCli(GitCommit string, GitDate string) *cli.App {
Action: runIndexer, Action: runIndexer,
}, },
{ {
Name: "all", Name: "migrate",
Flags: flags, Flags: migrationFlags,
Description: "Runs both the api service and the indexing service", Description: "Runs the database migrations",
Action: runAll, Action: runMigrations,
}, },
{ {
Name: "version", Name: "version",
......
...@@ -157,18 +157,20 @@ func LoadConfig(log log.Logger, path string) (Config, error) { ...@@ -157,18 +157,20 @@ func LoadConfig(log log.Logger, path string) (Config, error) {
return conf, err return conf, err
} }
if conf.Chain.Preset != 0 { if conf.Chain.Preset == DEVNET_L2_CHAIN_ID {
preset, err := GetDevnetPreset()
if err != nil {
return conf, err
}
conf.Chain = preset.ChainConfig
} else if conf.Chain.Preset != 0 {
preset, ok := Presets[conf.Chain.Preset] preset, ok := Presets[conf.Chain.Preset]
if !ok { if !ok {
return conf, fmt.Errorf("unknown preset: %d", conf.Chain.Preset) return conf, fmt.Errorf("unknown preset: %d", conf.Chain.Preset)
} }
log.Info("detected preset", "preset", conf.Chain.Preset, "name", preset.Name) log.Info("detected preset", "preset", conf.Chain.Preset, "name", preset.Name)
log.Info("setting L1 information from preset") log.Info("setting L1 information from preset")
conf.Chain.L1Contracts = preset.ChainConfig.L1Contracts conf.Chain = preset.ChainConfig
conf.Chain.L1StartingHeight = preset.ChainConfig.L1StartingHeight
conf.Chain.L1BedrockStartingHeight = preset.ChainConfig.L1BedrockStartingHeight
conf.Chain.L2BedrockStartingHeight = preset.ChainConfig.L1BedrockStartingHeight
} }
// Setup L2Contracts from predeploys // Setup L2Contracts from predeploys
......
package config
import (
"encoding/json"
"errors"
"io/fs"
"os"
)
var (
filePath = "../.devnet/addresses.json"
DEVNET_L2_CHAIN_ID = 901
)
func GetDevnetPreset() (*Preset, error) {
if _, err := os.Stat(filePath); errors.Is(err, fs.ErrNotExist) {
return nil, err
}
content, err := os.ReadFile(filePath)
if err != nil {
return nil, err
}
var l1Contracts L1Contracts
if err := json.Unmarshal(content, &l1Contracts); err != nil {
return nil, err
}
if err != nil {
return nil, err
}
return &Preset{
Name: "devnet",
ChainConfig: ChainConfig{
Preset: DEVNET_L2_CHAIN_ID,
L1Contracts: l1Contracts,
},
}, nil
}
...@@ -16,6 +16,7 @@ var Presets = map[int]Preset{ ...@@ -16,6 +16,7 @@ var Presets = map[int]Preset{
10: { 10: {
Name: "Optimism", Name: "Optimism",
ChainConfig: ChainConfig{ ChainConfig: ChainConfig{
Preset: 10,
L1Contracts: L1Contracts{ L1Contracts: L1Contracts{
AddressManager: common.HexToAddress("0xdE1FCfB0851916CA5101820A69b13a4E276bd81F"), AddressManager: common.HexToAddress("0xdE1FCfB0851916CA5101820A69b13a4E276bd81F"),
SystemConfigProxy: common.HexToAddress("0x229047fed2591dbec1eF1118d64F7aF3dB9EB290"), SystemConfigProxy: common.HexToAddress("0x229047fed2591dbec1eF1118d64F7aF3dB9EB290"),
...@@ -37,6 +38,7 @@ var Presets = map[int]Preset{ ...@@ -37,6 +38,7 @@ var Presets = map[int]Preset{
420: { 420: {
Name: "Optimism Goerli", Name: "Optimism Goerli",
ChainConfig: ChainConfig{ ChainConfig: ChainConfig{
Preset: 420,
L1Contracts: L1Contracts{ L1Contracts: L1Contracts{
AddressManager: common.HexToAddress("0xa6f73589243a6A7a9023b1Fa0651b1d89c177111"), AddressManager: common.HexToAddress("0xa6f73589243a6A7a9023b1Fa0651b1d89c177111"),
SystemConfigProxy: common.HexToAddress("0xAe851f927Ee40dE99aaBb7461C00f9622ab91d60"), SystemConfigProxy: common.HexToAddress("0xAe851f927Ee40dE99aaBb7461C00f9622ab91d60"),
...@@ -58,6 +60,7 @@ var Presets = map[int]Preset{ ...@@ -58,6 +60,7 @@ var Presets = map[int]Preset{
8453: { 8453: {
Name: "Base", Name: "Base",
ChainConfig: ChainConfig{ ChainConfig: ChainConfig{
Preset: 8453,
L1Contracts: L1Contracts{ L1Contracts: L1Contracts{
AddressManager: common.HexToAddress("0x8EfB6B5c4767B09Dc9AA6Af4eAA89F749522BaE2"), AddressManager: common.HexToAddress("0x8EfB6B5c4767B09Dc9AA6Af4eAA89F749522BaE2"),
SystemConfigProxy: common.HexToAddress("0x73a79Fab69143498Ed3712e519A88a918e1f4072"), SystemConfigProxy: common.HexToAddress("0x73a79Fab69143498Ed3712e519A88a918e1f4072"),
...@@ -73,6 +76,7 @@ var Presets = map[int]Preset{ ...@@ -73,6 +76,7 @@ var Presets = map[int]Preset{
84531: { 84531: {
Name: "Base Goerli", Name: "Base Goerli",
ChainConfig: ChainConfig{ ChainConfig: ChainConfig{
Preset: 84531,
L1Contracts: L1Contracts{ L1Contracts: L1Contracts{
AddressManager: common.HexToAddress("0x4Cf6b56b14c6CFcB72A75611080514F94624c54e"), AddressManager: common.HexToAddress("0x4Cf6b56b14c6CFcB72A75611080514F94624c54e"),
SystemConfigProxy: common.HexToAddress("0xb15eea247eCE011C68a614e4a77AD648ff495bc1"), SystemConfigProxy: common.HexToAddress("0xb15eea247eCE011C68a614e4a77AD648ff495bc1"),
...@@ -88,6 +92,7 @@ var Presets = map[int]Preset{ ...@@ -88,6 +92,7 @@ var Presets = map[int]Preset{
7777777: { 7777777: {
Name: "Zora", Name: "Zora",
ChainConfig: ChainConfig{ ChainConfig: ChainConfig{
Preset: 7777777,
L1Contracts: L1Contracts{ L1Contracts: L1Contracts{
AddressManager: common.HexToAddress("0xEF8115F2733fb2033a7c756402Fc1deaa56550Ef"), AddressManager: common.HexToAddress("0xEF8115F2733fb2033a7c756402Fc1deaa56550Ef"),
SystemConfigProxy: common.HexToAddress("0xA3cAB0126d5F504B071b81a3e8A2BBBF17930d86"), SystemConfigProxy: common.HexToAddress("0xA3cAB0126d5F504B071b81a3e8A2BBBF17930d86"),
...@@ -103,6 +108,7 @@ var Presets = map[int]Preset{ ...@@ -103,6 +108,7 @@ var Presets = map[int]Preset{
999: { 999: {
Name: "Zora Goerli", Name: "Zora Goerli",
ChainConfig: ChainConfig{ ChainConfig: ChainConfig{
Preset: 999,
L1Contracts: L1Contracts{ L1Contracts: L1Contracts{
AddressManager: common.HexToAddress("0x54f4676203dEDA6C08E0D40557A119c602bFA246"), AddressManager: common.HexToAddress("0x54f4676203dEDA6C08E0D40557A119c602bFA246"),
SystemConfigProxy: common.HexToAddress("0xF66C9A5E4fE1A8a9bc44a4aF80505a4C3620Ee64"), SystemConfigProxy: common.HexToAddress("0xF66C9A5E4fE1A8a9bc44a4aF80505a4C3620Ee64"),
......
...@@ -4,6 +4,8 @@ package database ...@@ -4,6 +4,8 @@ package database
import ( import (
"context" "context"
"fmt" "fmt"
"os"
"path/filepath"
"github.com/ethereum-optimism/optimism/indexer/config" "github.com/ethereum-optimism/optimism/indexer/config"
_ "github.com/ethereum-optimism/optimism/indexer/database/serializers" _ "github.com/ethereum-optimism/optimism/indexer/database/serializers"
...@@ -62,7 +64,6 @@ func NewDB(dbConfig config.DBConfig) (*DB, error) { ...@@ -62,7 +64,6 @@ func NewDB(dbConfig config.DBConfig) (*DB, error) {
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to connect to database after multiple retries") return nil, errors.Wrap(err, "failed to connect to database after multiple retries")
} }
db := &DB{ db := &DB{
gorm: gorm, gorm: gorm,
Blocks: newBlocksDB(gorm), Blocks: newBlocksDB(gorm),
...@@ -71,7 +72,6 @@ func NewDB(dbConfig config.DBConfig) (*DB, error) { ...@@ -71,7 +72,6 @@ func NewDB(dbConfig config.DBConfig) (*DB, error) {
BridgeMessages: newBridgeMessagesDB(gorm), BridgeMessages: newBridgeMessagesDB(gorm),
BridgeTransactions: newBridgeTransactionsDB(gorm), BridgeTransactions: newBridgeTransactionsDB(gorm),
} }
return db, nil return db, nil
} }
...@@ -102,3 +102,33 @@ func dbFromGormTx(tx *gorm.DB) *DB { ...@@ -102,3 +102,33 @@ func dbFromGormTx(tx *gorm.DB) *DB {
BridgeTransactions: newBridgeTransactionsDB(tx), BridgeTransactions: newBridgeTransactionsDB(tx),
} }
} }
func (db *DB) ExecuteSQLMigration(migrationsFolder string) error {
err := filepath.Walk(migrationsFolder, func(path string, info os.FileInfo, err error) error {
// Check for any walking error
if err != nil {
return errors.Wrap(err, fmt.Sprintf("Failed to process migration file: %s", path))
}
// Skip directories
if info.IsDir() {
return nil
}
// Read the migration file content
fileContent, readErr := os.ReadFile(path)
if readErr != nil {
return errors.Wrap(readErr, fmt.Sprintf("Error reading SQL file: %s", path))
}
// Execute the migration
execErr := db.gorm.Exec(string(fileContent)).Error
if execErr != nil {
return errors.Wrap(execErr, fmt.Sprintf("Error executing SQL script: %s", path))
}
return nil
})
return err
}
...@@ -17,6 +17,27 @@ services: ...@@ -17,6 +17,27 @@ services:
- postgres_data:/data/postgres - postgres_data:/data/postgres
- ./migrations:/docker-entrypoint-initdb.d/ - ./migrations:/docker-entrypoint-initdb.d/
migrations:
build:
context: ..
dockerfile: indexer/Dockerfile
command: ["indexer", "migrate"]
environment:
- INDEXER_RPC_URL_L1=$INDEXER_RPC_URL_L1
- INDEXER_RPC_URL_L2=$INDEXER_RPC_URL_L2
- INDEXER_CONFIG=/indexer/indexer.toml
- INDEXER_CHAIN_PRESET=$INDEXER_CHAIN_PRESET
- INDEXER_DB_PORT=5432
- INDEXER_DB_HOST=postgres
- INDEXER_DB_USER=db_username
- INDEXER_DB_PASS=db_password
- INDEXER_DB_NAME=db_name
volumes:
- ./indexer.toml:/indexer/indexer.toml
depends_on:
postgres:
condition: service_healthy
indexer: indexer:
build: build:
context: .. context: ..
...@@ -26,11 +47,21 @@ services: ...@@ -26,11 +47,21 @@ services:
- INDEXER_RPC_URL_L1=$INDEXER_RPC_URL_L1 - INDEXER_RPC_URL_L1=$INDEXER_RPC_URL_L1
- INDEXER_RPC_URL_L2=$INDEXER_RPC_URL_L2 - INDEXER_RPC_URL_L2=$INDEXER_RPC_URL_L2
- INDEXER_CONFIG=/indexer/indexer.toml - INDEXER_CONFIG=/indexer/indexer.toml
- INDEXER_CHAIN_PRESET=$INDEXER_CHAIN_PRESET
- INDEXER_DB_PORT=5432
- INDEXER_DB_HOST=postgres
- INDEXER_DB_USER=db_username
- INDEXER_DB_PASS=db_password
- INDEXER_DB_NAME=db_name
volumes: volumes:
- ./indexer.toml:/indexer/indexer.toml - ./indexer.toml:/indexer/indexer.toml
depends_on: depends_on:
postgres: postgres:
condition: service_healthy condition: service_healthy
depends_on:
migrations:
condition: service_started
api: api:
build: build:
...@@ -45,6 +76,12 @@ services: ...@@ -45,6 +76,12 @@ services:
- INDEXER_RPC_URL_L1=$INDEXER_RPC_URL_L1 - INDEXER_RPC_URL_L1=$INDEXER_RPC_URL_L1
- INDEXER_RPC_URL_L2=$INDEXER_RPC_URL_L2 - INDEXER_RPC_URL_L2=$INDEXER_RPC_URL_L2
- INDEXER_CONFIG=/indexer/indexer.toml - INDEXER_CONFIG=/indexer/indexer.toml
- INDEXER_CHAIN_PRESET=$INDEXER_CHAIN_PRESET
- INDEXER_DB_HOST=postgres
- INDEXER_DB_PORT=5432
- INDEXER_DB_USER=db_username
- INDEXER_DB_PASS=db_password
- INDEXER_DB_NAME=db_name
volumes: volumes:
- ./indexer.toml:/indexer/indexer.toml - ./indexer.toml:/indexer/indexer.toml
ports: ports:
...@@ -83,6 +120,7 @@ services: ...@@ -83,6 +120,7 @@ services:
backend-goerli: backend-goerli:
image: ethereumoptimism/gateway-backend:latest image: ethereumoptimism/gateway-backend:latest
platform: linux/amd64
environment: environment:
# this enables the backend to proxy history requests to the indexer # this enables the backend to proxy history requests to the indexer
- BRIDGE_INDEXER_URI=http://api - BRIDGE_INDEXER_URI=http://api
......
...@@ -4,9 +4,7 @@ import ( ...@@ -4,9 +4,7 @@ import (
"context" "context"
"database/sql" "database/sql"
"fmt" "fmt"
"io/fs"
"os" "os"
"path/filepath"
"testing" "testing"
"time" "time"
...@@ -160,31 +158,20 @@ func setupTestDatabase(t *testing.T) string { ...@@ -160,31 +158,20 @@ func setupTestDatabase(t *testing.T) string {
pg.Close() pg.Close()
}) })
// setup schema, migration files ware walked in lexical order dbConfig := config.DBConfig{
t.Logf("created database %s", dbName) Host: "127.0.0.1",
db, err := sql.Open("pgx", fmt.Sprintf("postgres://%s@localhost:5432/%s?sslmode=disable", user, dbName)) Port: 5432,
Name: dbName,
User: user,
Password: "",
}
// NewDB will create the database schema
db, err := database.NewDB(dbConfig)
require.NoError(t, err) require.NoError(t, err)
require.NoError(t, db.Ping())
defer db.Close() defer db.Close()
err = db.ExecuteSQLMigration("../migrations")
require.NoError(t, err)
t.Logf("running schema migrations...") t.Logf("database %s setup and migrations executed", dbName)
require.NoError(t, filepath.Walk("../migrations", func(path string, info fs.FileInfo, err error) error {
if err != nil {
return err
} else if info.IsDir() {
return nil
}
t.Logf("running schema migration: %s", path)
data, err := os.ReadFile(path)
if err != nil {
return err
}
_, err = db.Exec(string(data))
return err
}))
t.Logf("schema loaded")
return dbName return dbName
} }
FROM --platform=$BUILDPLATFORM golang:1.20.7-alpine3.18 as builder FROM --platform=$BUILDPLATFORM golang:1.21.1-alpine3.18 as builder
ARG VERSION=v0.0.0 ARG VERSION=v0.0.0
......
This diff is collapsed.
This diff is collapsed.
FROM --platform=$BUILDPLATFORM golang:1.20.7-alpine3.18 as builder FROM --platform=$BUILDPLATFORM golang:1.21.1-alpine3.18 as builder
ARG VERSION=v0.0.0 ARG VERSION=v0.0.0
......
...@@ -30,6 +30,7 @@ type ClaimLoader interface { ...@@ -30,6 +30,7 @@ type ClaimLoader interface {
type Agent struct { type Agent struct {
metrics metrics.Metricer metrics metrics.Metricer
fdgAddr common.Address
solver *solver.GameSolver solver *solver.GameSolver
loader ClaimLoader loader ClaimLoader
responder Responder responder Responder
...@@ -39,9 +40,10 @@ type Agent struct { ...@@ -39,9 +40,10 @@ type Agent struct {
log log.Logger log log.Logger
} }
func NewAgent(m metrics.Metricer, loader ClaimLoader, maxDepth int, trace types.TraceProvider, responder Responder, updater types.OracleUpdater, agreeWithProposedOutput bool, log log.Logger) *Agent { func NewAgent(m metrics.Metricer, addr common.Address, loader ClaimLoader, maxDepth int, trace types.TraceProvider, responder Responder, updater types.OracleUpdater, agreeWithProposedOutput bool, log log.Logger) *Agent {
return &Agent{ return &Agent{
metrics: m, metrics: m,
fdgAddr: addr,
solver: solver.NewGameSolver(maxDepth, trace), solver: solver.NewGameSolver(maxDepth, trace),
loader: loader, loader: loader,
responder: responder, responder: responder,
...@@ -196,6 +198,7 @@ func (a *Agent) newGameFromContracts(ctx context.Context) (types.Game, error) { ...@@ -196,6 +198,7 @@ func (a *Agent) newGameFromContracts(ctx context.Context) (types.Game, error) {
if len(claims) == 0 { if len(claims) == 0 {
return nil, errors.New("no claims") return nil, errors.New("no claims")
} }
a.metrics.RecordGameClaimCount(a.fdgAddr.String(), len(claims))
game := types.NewGameState(a.agreeWithProposedOutput, claims[0], uint64(a.maxDepth)) game := types.NewGameState(a.agreeWithProposedOutput, claims[0], uint64(a.maxDepth))
if err := game.PutAll(claims[1:]); err != nil { if err := game.PutAll(claims[1:]); err != nil {
return nil, fmt.Errorf("failed to load claims into the local state: %w", err) return nil, fmt.Errorf("failed to load claims into the local state: %w", err)
......
...@@ -10,10 +10,11 @@ import ( ...@@ -10,10 +10,11 @@ import (
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/types" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/types"
gameTypes "github.com/ethereum-optimism/optimism/op-challenger/game/types" gameTypes "github.com/ethereum-optimism/optimism/op-challenger/game/types"
"github.com/ethereum-optimism/optimism/op-challenger/metrics" "github.com/ethereum-optimism/optimism/op-challenger/metrics"
"github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/ethereum-optimism/optimism/op-node/testlog"
) )
// TestShouldResolve tests the resolution logic. // TestShouldResolve tests the resolution logic.
...@@ -110,11 +111,12 @@ func TestLoadClaimsWhenGameNotResolvable(t *testing.T) { ...@@ -110,11 +111,12 @@ func TestLoadClaimsWhenGameNotResolvable(t *testing.T) {
func setupTestAgent(t *testing.T, agreeWithProposedOutput bool) (*Agent, *stubClaimLoader, *stubResponder) { func setupTestAgent(t *testing.T, agreeWithProposedOutput bool) (*Agent, *stubClaimLoader, *stubResponder) {
logger := testlog.Logger(t, log.LvlInfo) logger := testlog.Logger(t, log.LvlInfo)
claimLoader := &stubClaimLoader{} claimLoader := &stubClaimLoader{}
addr := common.HexToAddress("0x1234")
depth := 4 depth := 4
trace := alphabet.NewTraceProvider("abcd", uint64(depth)) trace := alphabet.NewTraceProvider("abcd", uint64(depth))
responder := &stubResponder{} responder := &stubResponder{}
updater := &stubUpdater{} updater := &stubUpdater{}
agent := NewAgent(metrics.NoopMetrics, claimLoader, depth, trace, responder, updater, agreeWithProposedOutput, logger) agent := NewAgent(metrics.NoopMetrics, addr, claimLoader, depth, trace, responder, updater, agreeWithProposedOutput, logger)
return agent, claimLoader, responder return agent, claimLoader, responder
} }
......
...@@ -106,7 +106,7 @@ func NewGamePlayer( ...@@ -106,7 +106,7 @@ func NewGamePlayer(
} }
return &GamePlayer{ return &GamePlayer{
act: NewAgent(m, loader, int(gameDepth), provider, responder, updater, cfg.AgreeWithProposedOutput, logger).Act, act: NewAgent(m, addr, loader, int(gameDepth), provider, responder, updater, cfg.AgreeWithProposedOutput, logger).Act,
agreeWithProposedOutput: cfg.AgreeWithProposedOutput, agreeWithProposedOutput: cfg.AgreeWithProposedOutput,
loader: loader, loader: loader,
logger: logger, logger: logger,
...@@ -114,6 +114,10 @@ func NewGamePlayer( ...@@ -114,6 +114,10 @@ func NewGamePlayer(
}, nil }, nil
} }
func (g *GamePlayer) Status() gameTypes.GameStatus {
return g.status
}
func (g *GamePlayer) ProgressGame(ctx context.Context) gameTypes.GameStatus { func (g *GamePlayer) ProgressGame(ctx context.Context) gameTypes.GameStatus {
if g.status != gameTypes.GameStatusInProgress { if g.status != gameTypes.GameStatusInProgress {
// Game is already complete so don't try to perform further actions. // Game is already complete so don't try to perform further actions.
......
...@@ -81,7 +81,7 @@ func (c *coordinator) schedule(ctx context.Context, games []common.Address) erro ...@@ -81,7 +81,7 @@ func (c *coordinator) schedule(ctx context.Context, games []common.Address) erro
c.logger.Warn("Game not found in states map", "game", addr) c.logger.Warn("Game not found in states map", "game", addr)
} }
} }
c.m.RecordGamesStatus(gamesInProgress, gamesChallengerWon, gamesDefenderWon) c.m.RecordGamesStatus(gamesInProgress, gamesDefenderWon, gamesChallengerWon)
// Finally, enqueue the jobs // Finally, enqueue the jobs
for _, j := range jobs { for _, j := range jobs {
...@@ -109,9 +109,14 @@ func (c *coordinator) createJob(game common.Address) (*job, error) { ...@@ -109,9 +109,14 @@ func (c *coordinator) createJob(game common.Address) (*job, error) {
return nil, fmt.Errorf("failed to create game player: %w", err) return nil, fmt.Errorf("failed to create game player: %w", err)
} }
state.player = player state.player = player
state.status = player.Status()
} }
state.inflight = true state.inflight = true
return &job{addr: game, player: state.player}, nil if state.status != types.GameStatusInProgress {
c.logger.Debug("Not rescheduling resolved game", "game", game, "status", state.status)
return nil, nil
}
return &job{addr: game, player: state.player, status: state.status}, nil
} }
func (c *coordinator) enqueueJob(ctx context.Context, j job) error { func (c *coordinator) enqueueJob(ctx context.Context, j job) error {
......
...@@ -150,7 +150,10 @@ func TestDeleteDataForResolvedGames(t *testing.T) { ...@@ -150,7 +150,10 @@ func TestDeleteDataForResolvedGames(t *testing.T) {
gameAddrs := []common.Address{gameAddr1, gameAddr2, gameAddr3} gameAddrs := []common.Address{gameAddr1, gameAddr2, gameAddr3}
require.NoError(t, c.schedule(ctx, gameAddrs)) require.NoError(t, c.schedule(ctx, gameAddrs))
require.Len(t, workQueue, len(gameAddrs), "should schedule all games") // The work queue should only contain jobs for games 1 and 2
// A resolved game should not be scheduled for an update.
// This makes the inflight game metric more robust.
require.Len(t, workQueue, 2, "should schedule all games")
// Game 1 progresses and is still in progress // Game 1 progresses and is still in progress
// Game 2 progresses and is now resolved // Game 2 progresses and is now resolved
...@@ -249,6 +252,10 @@ func (g *stubGame) ProgressGame(_ context.Context) types.GameStatus { ...@@ -249,6 +252,10 @@ func (g *stubGame) ProgressGame(_ context.Context) types.GameStatus {
return g.status return g.status
} }
func (g *stubGame) Status() types.GameStatus {
return g.status
}
type createdGames struct { type createdGames struct {
t *testing.T t *testing.T
createCompleted common.Address createCompleted common.Address
......
...@@ -15,11 +15,16 @@ type SchedulerMetricer interface { ...@@ -15,11 +15,16 @@ type SchedulerMetricer interface {
RecordGamesStatus(inProgress, defenderWon, challengerWon int) RecordGamesStatus(inProgress, defenderWon, challengerWon int)
RecordGameUpdateScheduled() RecordGameUpdateScheduled()
RecordGameUpdateCompleted() RecordGameUpdateCompleted()
IncActiveExecutors()
DecActiveExecutors()
IncIdleExecutors()
DecIdleExecutors()
} }
type Scheduler struct { type Scheduler struct {
logger log.Logger logger log.Logger
coordinator *coordinator coordinator *coordinator
m SchedulerMetricer
maxConcurrency uint maxConcurrency uint
scheduleQueue chan []common.Address scheduleQueue chan []common.Address
jobQueue chan job jobQueue chan job
...@@ -40,6 +45,7 @@ func NewScheduler(logger log.Logger, m SchedulerMetricer, disk DiskManager, maxC ...@@ -40,6 +45,7 @@ func NewScheduler(logger log.Logger, m SchedulerMetricer, disk DiskManager, maxC
return &Scheduler{ return &Scheduler{
logger: logger, logger: logger,
m: m,
coordinator: newCoordinator(logger, m, jobQueue, resultQueue, createPlayer, disk), coordinator: newCoordinator(logger, m, jobQueue, resultQueue, createPlayer, disk),
maxConcurrency: maxConcurrency, maxConcurrency: maxConcurrency,
scheduleQueue: scheduleQueue, scheduleQueue: scheduleQueue,
...@@ -48,13 +54,24 @@ func NewScheduler(logger log.Logger, m SchedulerMetricer, disk DiskManager, maxC ...@@ -48,13 +54,24 @@ func NewScheduler(logger log.Logger, m SchedulerMetricer, disk DiskManager, maxC
} }
} }
func (s *Scheduler) ThreadActive() {
s.m.IncActiveExecutors()
s.m.DecIdleExecutors()
}
func (s *Scheduler) ThreadIdle() {
s.m.IncIdleExecutors()
s.m.DecActiveExecutors()
}
func (s *Scheduler) Start(ctx context.Context) { func (s *Scheduler) Start(ctx context.Context) {
ctx, cancel := context.WithCancel(ctx) ctx, cancel := context.WithCancel(ctx)
s.cancel = cancel s.cancel = cancel
for i := uint(0); i < s.maxConcurrency; i++ { for i := uint(0); i < s.maxConcurrency; i++ {
s.m.IncIdleExecutors()
s.wg.Add(1) s.wg.Add(1)
go progressGames(ctx, s.jobQueue, s.resultQueue, &s.wg) go progressGames(ctx, s.jobQueue, s.resultQueue, &s.wg, s.ThreadActive, s.ThreadIdle)
} }
s.wg.Add(1) s.wg.Add(1)
......
...@@ -10,6 +10,7 @@ import ( ...@@ -10,6 +10,7 @@ import (
type GamePlayer interface { type GamePlayer interface {
ProgressGame(ctx context.Context) types.GameStatus ProgressGame(ctx context.Context) types.GameStatus
Status() types.GameStatus
} }
type DiskManager interface { type DiskManager interface {
......
...@@ -8,15 +8,17 @@ import ( ...@@ -8,15 +8,17 @@ import (
// progressGames accepts jobs from in channel, calls ProgressGame on the job.player and returns the job // progressGames accepts jobs from in channel, calls ProgressGame on the job.player and returns the job
// with updated job.resolved via the out channel. // with updated job.resolved via the out channel.
// The loop exits when the ctx is done. wg.Done() is called when the function returns. // The loop exits when the ctx is done. wg.Done() is called when the function returns.
func progressGames(ctx context.Context, in <-chan job, out chan<- job, wg *sync.WaitGroup) { func progressGames(ctx context.Context, in <-chan job, out chan<- job, wg *sync.WaitGroup, threadActive, threadIdle func()) {
defer wg.Done() defer wg.Done()
for { for {
select { select {
case <-ctx.Done(): case <-ctx.Done():
return return
case j := <-in: case j := <-in:
threadActive()
j.status = j.player.ProgressGame(ctx) j.status = j.player.ProgressGame(ctx)
out <- j out <- j
threadIdle()
} }
} }
} }
...@@ -7,6 +7,7 @@ import ( ...@@ -7,6 +7,7 @@ import (
"time" "time"
"github.com/ethereum-optimism/optimism/op-challenger/game/types" "github.com/ethereum-optimism/optimism/op-challenger/game/types"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
...@@ -15,18 +16,32 @@ func TestWorkerShouldProcessJobsUntilContextDone(t *testing.T) { ...@@ -15,18 +16,32 @@ func TestWorkerShouldProcessJobsUntilContextDone(t *testing.T) {
in := make(chan job, 2) in := make(chan job, 2)
out := make(chan job, 2) out := make(chan job, 2)
ms := &metricSink{}
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
var wg sync.WaitGroup var wg sync.WaitGroup
wg.Add(1) wg.Add(1)
go progressGames(ctx, in, out, &wg) go progressGames(ctx, in, out, &wg, ms.ThreadActive, ms.ThreadIdle)
in <- job{ in <- job{
player: &stubPlayer{status: types.GameStatusInProgress}, player: &stubPlayer{status: types.GameStatusInProgress},
} }
waitErr := wait.For(context.Background(), 100*time.Millisecond, func() (bool, error) {
return ms.activeCalls >= 1, nil
})
require.NoError(t, waitErr)
require.Equal(t, ms.activeCalls, 1)
require.Equal(t, ms.idleCalls, 1)
in <- job{ in <- job{
player: &stubPlayer{status: types.GameStatusDefenderWon}, player: &stubPlayer{status: types.GameStatusDefenderWon},
} }
waitErr = wait.For(context.Background(), 100*time.Millisecond, func() (bool, error) {
return ms.activeCalls >= 2, nil
})
require.NoError(t, waitErr)
require.Equal(t, ms.activeCalls, 2)
require.Equal(t, ms.idleCalls, 2)
result1 := readWithTimeout(t, out) result1 := readWithTimeout(t, out)
result2 := readWithTimeout(t, out) result2 := readWithTimeout(t, out)
...@@ -39,6 +54,19 @@ func TestWorkerShouldProcessJobsUntilContextDone(t *testing.T) { ...@@ -39,6 +54,19 @@ func TestWorkerShouldProcessJobsUntilContextDone(t *testing.T) {
wg.Wait() wg.Wait()
} }
type metricSink struct {
activeCalls int
idleCalls int
}
func (m *metricSink) ThreadActive() {
m.activeCalls++
}
func (m *metricSink) ThreadIdle() {
m.idleCalls++
}
type stubPlayer struct { type stubPlayer struct {
status types.GameStatus status types.GameStatus
} }
...@@ -47,6 +75,10 @@ func (s *stubPlayer) ProgressGame(ctx context.Context) types.GameStatus { ...@@ -47,6 +75,10 @@ func (s *stubPlayer) ProgressGame(ctx context.Context) types.GameStatus {
return s.status return s.status
} }
func (s *stubPlayer) Status() types.GameStatus {
return s.status
}
func readWithTimeout[T any](t *testing.T, ch <-chan T) T { func readWithTimeout[T any](t *testing.T, ch <-chan T) T {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel() defer cancel()
......
...@@ -25,10 +25,17 @@ type Metricer interface { ...@@ -25,10 +25,17 @@ type Metricer interface {
RecordGameMove() RecordGameMove()
RecordCannonExecutionTime(t float64) RecordCannonExecutionTime(t float64)
RecordGameClaimCount(addr string, count int)
RecordGamesStatus(inProgress, defenderWon, challengerWon int) RecordGamesStatus(inProgress, defenderWon, challengerWon int)
RecordGameUpdateScheduled() RecordGameUpdateScheduled()
RecordGameUpdateCompleted() RecordGameUpdateCompleted()
IncActiveExecutors()
DecActiveExecutors()
IncIdleExecutors()
DecIdleExecutors()
} }
type Metrics struct { type Metrics struct {
...@@ -41,11 +48,15 @@ type Metrics struct { ...@@ -41,11 +48,15 @@ type Metrics struct {
info prometheus.GaugeVec info prometheus.GaugeVec
up prometheus.Gauge up prometheus.Gauge
executors prometheus.GaugeVec
moves prometheus.Counter moves prometheus.Counter
steps prometheus.Counter steps prometheus.Counter
cannonExecutionTime prometheus.Histogram cannonExecutionTime prometheus.Histogram
gameClaimCount prometheus.GaugeVec
trackedGames prometheus.GaugeVec trackedGames prometheus.GaugeVec
inflightGames prometheus.Gauge inflightGames prometheus.Gauge
} }
...@@ -75,6 +86,13 @@ func NewMetrics() *Metrics { ...@@ -75,6 +86,13 @@ func NewMetrics() *Metrics {
Name: "up", Name: "up",
Help: "1 if the op-challenger has finished starting up", Help: "1 if the op-challenger has finished starting up",
}), }),
executors: *factory.NewGaugeVec(prometheus.GaugeOpts{
Namespace: Namespace,
Name: "executors",
Help: "Number of active and idle executors",
}, []string{
"status",
}),
moves: factory.NewCounter(prometheus.CounterOpts{ moves: factory.NewCounter(prometheus.CounterOpts{
Namespace: Namespace, Namespace: Namespace,
Name: "moves", Name: "moves",
...@@ -93,6 +111,13 @@ func NewMetrics() *Metrics { ...@@ -93,6 +111,13 @@ func NewMetrics() *Metrics {
[]float64{1.0, 10.0}, []float64{1.0, 10.0},
prometheus.ExponentialBuckets(30.0, 2.0, 14)...), prometheus.ExponentialBuckets(30.0, 2.0, 14)...),
}), }),
gameClaimCount: *factory.NewGaugeVec(prometheus.GaugeOpts{
Namespace: Namespace,
Name: "game_claim_count",
Help: "Number of claims in the game",
}, []string{
"game_address",
}),
trackedGames: *factory.NewGaugeVec(prometheus.GaugeOpts{ trackedGames: *factory.NewGaugeVec(prometheus.GaugeOpts{
Namespace: Namespace, Namespace: Namespace,
Name: "tracked_games", Name: "tracked_games",
...@@ -149,6 +174,26 @@ func (m *Metrics) RecordCannonExecutionTime(t float64) { ...@@ -149,6 +174,26 @@ func (m *Metrics) RecordCannonExecutionTime(t float64) {
m.cannonExecutionTime.Observe(t) m.cannonExecutionTime.Observe(t)
} }
func (m *Metrics) IncActiveExecutors() {
m.executors.WithLabelValues("active").Inc()
}
func (m *Metrics) DecActiveExecutors() {
m.executors.WithLabelValues("active").Dec()
}
func (m *Metrics) IncIdleExecutors() {
m.executors.WithLabelValues("idle").Inc()
}
func (m *Metrics) DecIdleExecutors() {
m.executors.WithLabelValues("idle").Dec()
}
func (m *Metrics) RecordGameClaimCount(addr string, count int) {
m.gameClaimCount.With(prometheus.Labels{"game_address": addr}).Set(float64(count))
}
func (m *Metrics) RecordGamesStatus(inProgress, defenderWon, challengerWon int) { func (m *Metrics) RecordGamesStatus(inProgress, defenderWon, challengerWon int) {
m.trackedGames.WithLabelValues("in_progress").Set(float64(inProgress)) m.trackedGames.WithLabelValues("in_progress").Set(float64(inProgress))
m.trackedGames.WithLabelValues("defender_won").Set(float64(defenderWon)) m.trackedGames.WithLabelValues("defender_won").Set(float64(defenderWon))
......
...@@ -22,3 +22,10 @@ func (*NoopMetricsImpl) RecordGamesStatus(inProgress, defenderWon, challengerWon ...@@ -22,3 +22,10 @@ func (*NoopMetricsImpl) RecordGamesStatus(inProgress, defenderWon, challengerWon
func (*NoopMetricsImpl) RecordGameUpdateScheduled() {} func (*NoopMetricsImpl) RecordGameUpdateScheduled() {}
func (*NoopMetricsImpl) RecordGameUpdateCompleted() {} func (*NoopMetricsImpl) RecordGameUpdateCompleted() {}
func (*NoopMetricsImpl) IncActiveExecutors() {}
func (*NoopMetricsImpl) DecActiveExecutors() {}
func (*NoopMetricsImpl) IncIdleExecutors() {}
func (*NoopMetricsImpl) DecIdleExecutors() {}
func (*NoopMetricsImpl) RecordGameClaimCount(addr string, count int) {}
...@@ -492,7 +492,7 @@ func TestSystemMockP2P(t *testing.T) { ...@@ -492,7 +492,7 @@ func TestSystemMockP2P(t *testing.T) {
verifierPeerID := sys.RollupNodes["verifier"].P2P().Host().ID() verifierPeerID := sys.RollupNodes["verifier"].P2P().Host().ID()
check := func() bool { check := func() bool {
sequencerBlocksTopicPeers := sys.RollupNodes["sequencer"].P2P().GossipOut().BlocksTopicPeers() sequencerBlocksTopicPeers := sys.RollupNodes["sequencer"].P2P().GossipOut().BlocksTopicPeers()
return slices.Contains[peer.ID](sequencerBlocksTopicPeers, verifierPeerID) return slices.Contains[[]peer.ID](sequencerBlocksTopicPeers, verifierPeerID)
} }
// poll to see if the verifier node is connected & meshed on gossip. // poll to see if the verifier node is connected & meshed on gossip.
......
FROM golang:1.20.7-alpine3.18 as builder FROM golang:1.21.1-alpine3.18 as builder
RUN apk add --no-cache make gcc musl-dev linux-headers git jq bash RUN apk add --no-cache make gcc musl-dev linux-headers git jq bash
......
FROM --platform=$BUILDPLATFORM golang:1.20.7-alpine3.18 as builder FROM --platform=$BUILDPLATFORM golang:1.21.1-alpine3.18 as builder
ARG VERSION=v0.0.0 ARG VERSION=v0.0.0
......
...@@ -256,6 +256,11 @@ var ( ...@@ -256,6 +256,11 @@ var (
EnvVars: prefixEnvVars("BETA_ROLLUP_LOAD_PROTOCOL_VERSIONS"), EnvVars: prefixEnvVars("BETA_ROLLUP_LOAD_PROTOCOL_VERSIONS"),
Hidden: true, Hidden: true,
} }
CanyonOverrideFlag = &cli.Uint64Flag{
Name: "override.canyon",
Usage: "Manually specify the Canyon fork timestamp, overriding the bundled setting",
Hidden: true,
}
) )
var requiredFlags = []cli.Flag{ var requiredFlags = []cli.Flag{
...@@ -300,6 +305,7 @@ var optionalFlags = []cli.Flag{ ...@@ -300,6 +305,7 @@ var optionalFlags = []cli.Flag{
BetaExtraNetworks, BetaExtraNetworks,
BetaRollupHalt, BetaRollupHalt,
BetaRollupLoadProtocolVersions, BetaRollupLoadProtocolVersions,
CanyonOverrideFlag,
} }
// Flags contains the list of configuration options available to the binary. // Flags contains the list of configuration options available to the binary.
......
...@@ -7,7 +7,11 @@ import ( ...@@ -7,7 +7,11 @@ import (
"sync" "sync"
"time" "time"
//nolint:all
"github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoreds"
libp2p "github.com/libp2p/go-libp2p" libp2p "github.com/libp2p/go-libp2p"
mplex "github.com/libp2p/go-libp2p-mplex"
lconf "github.com/libp2p/go-libp2p/config" lconf "github.com/libp2p/go-libp2p/config"
"github.com/libp2p/go-libp2p/core/connmgr" "github.com/libp2p/go-libp2p/core/connmgr"
"github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/host"
...@@ -16,8 +20,6 @@ import ( ...@@ -16,8 +20,6 @@ import (
"github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/sec/insecure" "github.com/libp2p/go-libp2p/core/sec/insecure"
basichost "github.com/libp2p/go-libp2p/p2p/host/basic" basichost "github.com/libp2p/go-libp2p/p2p/host/basic"
"github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoreds"
"github.com/libp2p/go-libp2p/p2p/muxer/mplex"
"github.com/libp2p/go-libp2p/p2p/muxer/yamux" "github.com/libp2p/go-libp2p/p2p/muxer/yamux"
"github.com/libp2p/go-libp2p/p2p/security/noise" "github.com/libp2p/go-libp2p/p2p/security/noise"
tls "github.com/libp2p/go-libp2p/p2p/security/tls" tls "github.com/libp2p/go-libp2p/p2p/security/tls"
......
...@@ -10,6 +10,9 @@ import ( ...@@ -10,6 +10,9 @@ import (
"github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup"
//nolint:all
"github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoreds"
p2pMocks "github.com/ethereum-optimism/optimism/op-node/p2p/mocks" p2pMocks "github.com/ethereum-optimism/optimism/op-node/p2p/mocks"
"github.com/ethereum-optimism/optimism/op-node/p2p/store" "github.com/ethereum-optimism/optimism/op-node/p2p/store"
testlog "github.com/ethereum-optimism/optimism/op-node/testlog" testlog "github.com/ethereum-optimism/optimism/op-node/testlog"
...@@ -23,7 +26,6 @@ import ( ...@@ -23,7 +26,6 @@ import (
peer "github.com/libp2p/go-libp2p/core/peer" peer "github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/peerstore" "github.com/libp2p/go-libp2p/core/peerstore"
bhost "github.com/libp2p/go-libp2p/p2p/host/blank" bhost "github.com/libp2p/go-libp2p/p2p/host/blank"
"github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoreds"
tswarm "github.com/libp2p/go-libp2p/p2p/net/swarm/testing" tswarm "github.com/libp2p/go-libp2p/p2p/net/swarm/testing"
"github.com/stretchr/testify/mock" "github.com/stretchr/testify/mock"
......
...@@ -6,13 +6,15 @@ import ( ...@@ -6,13 +6,15 @@ import (
"testing" "testing"
"time" "time"
//nolint:all
"github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoreds"
"github.com/ethereum-optimism/optimism/op-node/testlog" "github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum-optimism/optimism/op-service/clock" "github.com/ethereum-optimism/optimism/op-service/clock"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
ds "github.com/ipfs/go-datastore" ds "github.com/ipfs/go-datastore"
"github.com/ipfs/go-datastore/sync" "github.com/ipfs/go-datastore/sync"
"github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoreds"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
......
...@@ -75,6 +75,10 @@ type Config struct { ...@@ -75,6 +75,10 @@ type Config struct {
// Active if RegolithTime != nil && L2 block timestamp >= *RegolithTime, inactive otherwise. // Active if RegolithTime != nil && L2 block timestamp >= *RegolithTime, inactive otherwise.
RegolithTime *uint64 `json:"regolith_time,omitempty"` RegolithTime *uint64 `json:"regolith_time,omitempty"`
// CanyonTime sets the activation time of the next network upgrade.
// Active if CanyonTime != nil && L2 block timestamp >= *CanyonTime, inactive otherwise.
CanyonTime *uint64 `json:"canyon_time,omitempty"`
// Note: below addresses are part of the block-derivation process, // Note: below addresses are part of the block-derivation process,
// and required to be the same network-wide to stay in consensus. // and required to be the same network-wide to stay in consensus.
...@@ -259,6 +263,11 @@ func (c *Config) IsRegolith(timestamp uint64) bool { ...@@ -259,6 +263,11 @@ func (c *Config) IsRegolith(timestamp uint64) bool {
return c.RegolithTime != nil && timestamp >= *c.RegolithTime return c.RegolithTime != nil && timestamp >= *c.RegolithTime
} }
// IsCanyon returns true if the Canyon hardfork is active at or past the given timestamp.
func (c *Config) IsCanyon(timestamp uint64) bool {
return c.CanyonTime != nil && timestamp >= *c.CanyonTime
}
// Description outputs a banner describing the important parts of rollup configuration in a human-readable form. // Description outputs a banner describing the important parts of rollup configuration in a human-readable form.
// Optionally provide a mapping of L2 chain IDs to network names to label the L2 chain with if not unknown. // Optionally provide a mapping of L2 chain IDs to network names to label the L2 chain with if not unknown.
// The config should be config.Check()-ed before creating a description. // The config should be config.Check()-ed before creating a description.
...@@ -286,6 +295,7 @@ func (c *Config) Description(l2Chains map[string]string) string { ...@@ -286,6 +295,7 @@ func (c *Config) Description(l2Chains map[string]string) string {
// Report the upgrade configuration // Report the upgrade configuration
banner += "Post-Bedrock Network Upgrades (timestamp based):\n" banner += "Post-Bedrock Network Upgrades (timestamp based):\n"
banner += fmt.Sprintf(" - Regolith: %s\n", fmtForkTimeOrUnset(c.RegolithTime)) banner += fmt.Sprintf(" - Regolith: %s\n", fmtForkTimeOrUnset(c.RegolithTime))
banner += fmt.Sprintf(" - Canyon: %s\n", fmtForkTimeOrUnset(c.CanyonTime))
// Report the protocol version // Report the protocol version
banner += fmt.Sprintf("Node supports up to OP-Stack Protocol Version: %s\n", OPStackSupport) banner += fmt.Sprintf("Node supports up to OP-Stack Protocol Version: %s\n", OPStackSupport)
return banner return banner
...@@ -310,7 +320,8 @@ func (c *Config) LogDescription(log log.Logger, l2Chains map[string]string) { ...@@ -310,7 +320,8 @@ func (c *Config) LogDescription(log log.Logger, l2Chains map[string]string) {
log.Info("Rollup Config", "l2_chain_id", c.L2ChainID, "l2_network", networkL2, "l1_chain_id", c.L1ChainID, log.Info("Rollup Config", "l2_chain_id", c.L2ChainID, "l2_network", networkL2, "l1_chain_id", c.L1ChainID,
"l1_network", networkL1, "l2_start_time", c.Genesis.L2Time, "l2_block_hash", c.Genesis.L2.Hash.String(), "l1_network", networkL1, "l2_start_time", c.Genesis.L2Time, "l2_block_hash", c.Genesis.L2.Hash.String(),
"l2_block_number", c.Genesis.L2.Number, "l1_block_hash", c.Genesis.L1.Hash.String(), "l2_block_number", c.Genesis.L2.Number, "l1_block_hash", c.Genesis.L1.Hash.String(),
"l1_block_number", c.Genesis.L1.Number, "regolith_time", fmtForkTimeOrUnset(c.RegolithTime)) "l1_block_number", c.Genesis.L1.Number, "regolith_time", fmtForkTimeOrUnset(c.RegolithTime),
"canyon_time", fmtForkTimeOrUnset(c.CanyonTime))
} }
func fmtForkTimeOrUnset(v *uint64) string { func fmtForkTimeOrUnset(v *uint64) string {
......
...@@ -202,6 +202,10 @@ Conflicting configuration is deprecated, and will stop the op-node from starting ...@@ -202,6 +202,10 @@ Conflicting configuration is deprecated, and will stop the op-node from starting
if err != nil { if err != nil {
return nil, err return nil, err
} }
if ctx.IsSet(flags.CanyonOverrideFlag.Name) {
canyon := ctx.Uint64(flags.CanyonOverrideFlag.Name)
config.CanyonTime = &canyon
}
return config, nil return config, nil
} }
...@@ -216,6 +220,10 @@ Conflicting configuration is deprecated, and will stop the op-node from starting ...@@ -216,6 +220,10 @@ Conflicting configuration is deprecated, and will stop the op-node from starting
if err := json.NewDecoder(file).Decode(&rollupConfig); err != nil { if err := json.NewDecoder(file).Decode(&rollupConfig); err != nil {
return nil, fmt.Errorf("failed to decode rollup config: %w", err) return nil, fmt.Errorf("failed to decode rollup config: %w", err)
} }
if ctx.IsSet(flags.CanyonOverrideFlag.Name) {
canyon := ctx.Uint64(flags.CanyonOverrideFlag.Name)
rollupConfig.CanyonTime = &canyon
}
return &rollupConfig, nil return &rollupConfig, nil
} }
......
FROM --platform=$BUILDPLATFORM golang:1.20.7-alpine3.18 as builder FROM --platform=$BUILDPLATFORM golang:1.21.1-alpine3.18 as builder
ARG VERSION=v0.0.0 ARG VERSION=v0.0.0
......
FROM --platform=$BUILDPLATFORM golang:1.20.7-alpine3.18 as builder FROM --platform=$BUILDPLATFORM golang:1.21.1-alpine3.18 as builder
ARG VERSION=v0.0.0 ARG VERSION=v0.0.0
......
FROM golang:1.20.4-alpine3.18 as builder FROM golang:1.21.1-alpine3.18 as builder
ARG GITCOMMIT=docker ARG GITCOMMIT=docker
ARG GITDATE=docker ARG GITDATE=docker
......
FROM golang:1.20.7-alpine3.18 as builder FROM golang:1.21.1-alpine3.18 as builder
RUN apk add --no-cache make gcc musl-dev linux-headers RUN apk add --no-cache make gcc musl-dev linux-headers
......
...@@ -56,11 +56,7 @@ log = logging.getLogger(__name__) ...@@ -56,11 +56,7 @@ log = logging.getLogger(__name__)
def main(): def main():
patterns = sys.argv[1].split(',') patterns = sys.argv[1].split(',')
patterns = patterns + REBUILD_ALL_PATTERNS
# temporarily only run indexer tests if indexer is changed because the tests are flaky
if len(patterns) != 1 or patterns[0] != "indexer":
patterns = patterns + REBUILD_ALL_PATTERNS
fp = os.path.realpath(__file__) fp = os.path.realpath(__file__)
monorepo_path = os.path.realpath(os.path.join(fp, '..', '..')) monorepo_path = os.path.realpath(os.path.join(fp, '..', '..'))
......
...@@ -11,8 +11,6 @@ RUN apt-get update && \ ...@@ -11,8 +11,6 @@ RUN apt-get update && \
chmod +x ./rustup.sh && \ chmod +x ./rustup.sh && \
./rustup.sh -y ./rustup.sh -y
COPY ./.abigenrc ./.abigenrc
# Only diff from upstream docker image is this clone instead # Only diff from upstream docker image is this clone instead
# of COPY. We select a specific commit to use. # of COPY. We select a specific commit to use.
COPY ./.foundryrc ./.foundryrc COPY ./.foundryrc ./.foundryrc
...@@ -27,8 +25,6 @@ RUN source $HOME/.profile && \ ...@@ -27,8 +25,6 @@ RUN source $HOME/.profile && \
strip /opt/foundry/target/release/cast && \ strip /opt/foundry/target/release/cast && \
strip /opt/foundry/target/release/anvil strip /opt/foundry/target/release/anvil
FROM ethereum/client-go:alltools-v1.10.25 as geth
FROM ghcr.io/crytic/echidna/echidna:v2.0.4 as echidna-test FROM ghcr.io/crytic/echidna/echidna:v2.0.4 as echidna-test
FROM python:3.11.4-slim-bullseye FROM python:3.11.4-slim-bullseye
...@@ -45,8 +41,8 @@ COPY --from=echidna-test /usr/local/bin/echidna-test /usr/local/bin/echidna-test ...@@ -45,8 +41,8 @@ COPY --from=echidna-test /usr/local/bin/echidna-test /usr/local/bin/echidna-test
RUN apt-get update && \ RUN apt-get update && \
apt-get install -y bash curl openssh-client git build-essential ca-certificates jq musl gnupg coreutils && \ apt-get install -y bash curl openssh-client git build-essential ca-certificates jq musl gnupg coreutils && \
curl -sL https://deb.nodesource.com/setup_16.x -o nodesource_setup.sh && \ curl -sL https://deb.nodesource.com/setup_16.x -o nodesource_setup.sh && \
curl -sL https://go.dev/dl/go1.20.linux-amd64.tar.gz -o go1.20.linux-amd64.tar.gz && \ curl -sL https://go.dev/dl/go1.21.1.linux-amd64.tar.gz -o go1.21.1.linux-amd64.tar.gz && \
tar -C /usr/local/ -xzvf go1.20.linux-amd64.tar.gz && \ tar -C /usr/local/ -xzvf go1.21.1.linux-amd64.tar.gz && \
ln -s /usr/local/go/bin/gofmt /usr/local/bin/gofmt && \ ln -s /usr/local/go/bin/gofmt /usr/local/bin/gofmt && \
bash nodesource_setup.sh && \ bash nodesource_setup.sh && \
apt-get install -y nodejs && \ apt-get install -y nodejs && \
...@@ -56,6 +52,8 @@ RUN apt-get update && \ ...@@ -56,6 +52,8 @@ RUN apt-get update && \
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.53.3 && \ curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.53.3 && \
curl -fLSs https://raw.githubusercontent.com/CircleCI-Public/circleci-cli/master/install.sh | bash curl -fLSs https://raw.githubusercontent.com/CircleCI-Public/circleci-cli/master/install.sh | bash
COPY ./.abigenrc ./.abigenrc
# Install the specific version of abigen from .abigenrc # Install the specific version of abigen from .abigenrc
RUN go install github.com/ethereum/go-ethereum/cmd/abigen@$(cat .abigenrc) RUN go install github.com/ethereum/go-ethereum/cmd/abigen@$(cat .abigenrc)
......
# @eth-optimism/drippie-mon # @eth-optimism/drippie-mon
## 0.5.0
### Minor Changes
- [#7178](https://github.com/ethereum-optimism/optimism/pull/7178) [`85d1622df`](https://github.com/ethereum-optimism/optimism/commit/85d1622dfdc16f220f7df0be42ba8cbc5dea31c5) Thanks [@tynes](https://github.com/tynes)! - Use node.js v18
### Patch Changes
- Updated dependencies [[`210b2c81d`](https://github.com/ethereum-optimism/optimism/commit/210b2c81dd383bad93480aa876b283d9a0c991c2), [`679207751`](https://github.com/ethereum-optimism/optimism/commit/6792077510fd76553c179d8b8d068262cda18db6), [`2440f5e7a`](https://github.com/ethereum-optimism/optimism/commit/2440f5e7ab6577f2d2e9c8b0c78c014290dde8e7)]:
- @eth-optimism/core-utils@0.13.0
- @eth-optimism/sdk@3.1.3
- @eth-optimism/contracts-bedrock@0.16.1
- @eth-optimism/common-ts@0.8.6
## 0.4.4 ## 0.4.4
### Patch Changes ### Patch Changes
......
{ {
"private": true, "private": true,
"name": "@eth-optimism/chain-mon", "name": "@eth-optimism/chain-mon",
"version": "0.4.4", "version": "0.5.0",
"description": "[Optimism] Chain monitoring services", "description": "[Optimism] Chain monitoring services",
"main": "dist/index", "main": "dist/index",
"types": "dist/index", "types": "dist/index",
......
# @eth-optimism/common-ts # @eth-optimism/common-ts
## 0.8.6
### Patch Changes
- Updated dependencies [[`210b2c81d`](https://github.com/ethereum-optimism/optimism/commit/210b2c81dd383bad93480aa876b283d9a0c991c2)]:
- @eth-optimism/core-utils@0.13.0
## 0.8.5 ## 0.8.5
### Patch Changes ### Patch Changes
......
{ {
"name": "@eth-optimism/common-ts", "name": "@eth-optimism/common-ts",
"version": "0.8.5", "version": "0.8.6",
"description": "[Optimism] Advanced typescript tooling used by various services", "description": "[Optimism] Advanced typescript tooling used by various services",
"main": "dist/index", "main": "dist/index",
"types": "dist/index", "types": "dist/index",
...@@ -40,7 +40,7 @@ ...@@ -40,7 +40,7 @@
"body-parser": "^1.20.2", "body-parser": "^1.20.2",
"commander": "^11.0.0", "commander": "^11.0.0",
"dotenv": "^16.3.1", "dotenv": "^16.3.1",
"envalid": "^7.3.1", "envalid": "^8.0.0",
"ethers": "^5.7.2", "ethers": "^5.7.2",
"express": "^4.18.2", "express": "^4.18.2",
"express-prom-bundle": "^6.6.0", "express-prom-bundle": "^6.6.0",
......
# @eth-optimism/contracts-bedrock # @eth-optimism/contracts-bedrock
## 0.16.1
### Patch Changes
- [#7244](https://github.com/ethereum-optimism/optimism/pull/7244) [`2440f5e7a`](https://github.com/ethereum-optimism/optimism/commit/2440f5e7ab6577f2d2e9c8b0c78c014290dde8e7) Thanks [@nitaliano](https://github.com/nitaliano)! - bumps sdk version to have access to sepolia deployments
## 0.16.0 ## 0.16.0
### Minor Changes ### Minor Changes
......
...@@ -38,5 +38,7 @@ ...@@ -38,5 +38,7 @@
"l2GenesisBlockBaseFeePerGas": "0x3b9aca00", "l2GenesisBlockBaseFeePerGas": "0x3b9aca00",
"eip1559Denominator": 50, "eip1559Denominator": 50,
"eip1559Elasticity": 10, "eip1559Elasticity": 10,
"systemConfigStartBlock": 8300214 "systemConfigStartBlock": 8300214,
"requiredProtocolVersion": "0x0000000000000000000000000000000000000000000000000000000000000000",
"recommendedProtocolVersion": "0x0000000000000000000000000000000000000000000000000000000000000000"
} }
{ {
"finalSystemOwner": "0x858F0751ef8B4067f0d2668C076BDB50a8549fbF", "finalSystemOwner": "0x858F0751ef8B4067f0d2668C076BDB50a8549fbF",
"portalGuardian": "0x858F0751ef8B4067f0d2668C076BDB50a8549fbF", "portalGuardian": "0x858F0751ef8B4067f0d2668C076BDB50a8549fbF",
"l1StartingBlockTag": "0x19c7e6b18fe156e45f4cfef707294fd8f079fa9c30a7b7cd6ec1ce3682ec6a2e", "l1StartingBlockTag": "0x19c7e6b18fe156e45f4cfef707294fd8f079fa9c30a7b7cd6ec1ce3682ec6a2e",
"l1ChainID": 5, "l1ChainID": 5,
"l2ChainID": 998, "l2ChainID": 998,
"l2BlockTime": 2, "l2BlockTime": 2,
"l1BlockTime": 12, "l1BlockTime": 12,
"maxSequencerDrift": 1200, "maxSequencerDrift": 1200,
"sequencerWindowSize": 3600, "sequencerWindowSize": 3600,
"channelTimeout": 120, "channelTimeout": 120,
"p2pSequencerAddress": "0xf1a4a22a65Ff01EBB23A580146a3ED49D70c8932", "p2pSequencerAddress": "0xf1a4a22a65Ff01EBB23A580146a3ED49D70c8932",
"batchInboxAddress": "0xff00000000000000000000000000000000000998", "batchInboxAddress": "0xff00000000000000000000000000000000000998",
"batchSenderAddress": "0xE0Fa1Cc7a0FD5bD82b9A06b08FD6C4563E6635C2", "batchSenderAddress": "0xE0Fa1Cc7a0FD5bD82b9A06b08FD6C4563E6635C2",
...@@ -21,32 +18,27 @@ ...@@ -21,32 +18,27 @@
"l2OutputOracleProposer": "0xE06d39D4B8DC05E562353F060DED346AC4acC077", "l2OutputOracleProposer": "0xE06d39D4B8DC05E562353F060DED346AC4acC077",
"l2OutputOracleChallenger": "0xE06d39D4B8DC05E562353F060DED346AC4acC077", "l2OutputOracleChallenger": "0xE06d39D4B8DC05E562353F060DED346AC4acC077",
"finalizationPeriodSeconds": 12, "finalizationPeriodSeconds": 12,
"proxyAdminOwner": "0x858F0751ef8B4067f0d2668C076BDB50a8549fbF", "proxyAdminOwner": "0x858F0751ef8B4067f0d2668C076BDB50a8549fbF",
"baseFeeVaultRecipient": "0x858F0751ef8B4067f0d2668C076BDB50a8549fbF", "baseFeeVaultRecipient": "0x858F0751ef8B4067f0d2668C076BDB50a8549fbF",
"l1FeeVaultRecipient": "0x858F0751ef8B4067f0d2668C076BDB50a8549fbF", "l1FeeVaultRecipient": "0x858F0751ef8B4067f0d2668C076BDB50a8549fbF",
"sequencerFeeVaultRecipient": "0x858F0751ef8B4067f0d2668C076BDB50a8549fbF", "sequencerFeeVaultRecipient": "0x858F0751ef8B4067f0d2668C076BDB50a8549fbF",
"baseFeeVaultMinimumWithdrawalAmount": "0x8ac7230489e80000", "baseFeeVaultMinimumWithdrawalAmount": "0x8ac7230489e80000",
"l1FeeVaultMinimumWithdrawalAmount": "0x8ac7230489e80000", "l1FeeVaultMinimumWithdrawalAmount": "0x8ac7230489e80000",
"sequencerFeeVaultMinimumWithdrawalAmount": "0x8ac7230489e80000", "sequencerFeeVaultMinimumWithdrawalAmount": "0x8ac7230489e80000",
"baseFeeVaultWithdrawalNetwork": 0, "baseFeeVaultWithdrawalNetwork": 0,
"l1FeeVaultWithdrawalNetwork": 0, "l1FeeVaultWithdrawalNetwork": 0,
"sequencerFeeVaultWithdrawalNetwork": 0, "sequencerFeeVaultWithdrawalNetwork": 0,
"enableGovernance": true, "enableGovernance": true,
"governanceTokenName": "Optimism", "governanceTokenName": "Optimism",
"governanceTokenSymbol": "OP", "governanceTokenSymbol": "OP",
"governanceTokenOwner": "0x858F0751ef8B4067f0d2668C076BDB50a8549fbF", "governanceTokenOwner": "0x858F0751ef8B4067f0d2668C076BDB50a8549fbF",
"l2GenesisBlockGasLimit": "0x17D7840", "l2GenesisBlockGasLimit": "0x17D7840",
"l2GenesisBlockBaseFeePerGas": "0x3b9aca00", "l2GenesisBlockBaseFeePerGas": "0x3b9aca00",
"gasPriceOracleOverhead": 2100, "gasPriceOracleOverhead": 2100,
"gasPriceOracleScalar": 1000000, "gasPriceOracleScalar": 1000000,
"eip1559Denominator": 50, "eip1559Denominator": 50,
"eip1559Elasticity": 10, "eip1559Elasticity": 10,
"systemConfigStartBlock": 8364212,
"systemConfigStartBlock": 8364212 "requiredProtocolVersion": "0x0000000000000000000000000000000000000000000000000000000000000000",
"recommendedProtocolVersion": "0x0000000000000000000000000000000000000000000000000000000000000000"
} }
{
"l1StartingBlockTag": "0x10da9fe",
"l1ChainID": 1,
"l2ChainID": 424,
"l2BlockTime": 2,
"finalizationPeriodSeconds": 604800,
"finalSystemOwner": "0x4a4962275DF8C60a80d3a25faEc5AA7De116A746",
"baseFeeVaultRecipient": "0xefCf0c8faFB425997870f845e26fC6cA6EE6dD5C",
"l1FeeVaultRecipient": "0xefCf0c8faFB425997870f845e26fC6cA6EE6dD5C",
"sequencerFeeVaultRecipient": "0xefCf0c8faFB425997870f845e26fC6cA6EE6dD5C",
"l2GenesisBlockBaseFeePerGas": "0x3b9aca00",
"governanceTokenOwner": "0x4a4962275DF8C60a80d3a25faEc5AA7De116A746",
"governanceTokenSymbol": "OP",
"governanceTokenName": "Optimism",
"maxSequencerDrift": 600,
"sequencerWindowSize": 3600,
"channelTimeout": 300,
"p2pSequencerAddress": "0x7916D35b57c2E4AffCaB46C3E5C8d76D3b44d1F7",
"batchInboxAddress": "0xC1B90E1e459aBBDcEc4DCF90dA45ba077d83BFc5",
"batchSenderAddress": "0x99526b0e49A95833E734EB556A6aBaFFAb0Ee167",
"l2GenesisRegolithTimeOffset": "0x0",
"portalGuardian": "0x39E13D1AB040F6EA58CE19998edCe01B3C365f84",
"l2OutputOracleSubmissionInterval": 1800,
"l2OutputOracleStartingTimestamp": -1,
"l2OutputOracleProposer": "0x69968Ce0E92d9c101BAd81de55EFbcb69603cFe3",
"l2GenesisBlockGasLimit": "0x1c9c380",
"fundDevAccounts": false,
"gasPriceOracleOverhead": 188,
"gasPriceOracleScalar": 684000,
"eip1559Denominator": 50,
"eip1559Elasticity": 6,
"proxyAdminOwner": "0xefCf0c8faFB425997870f845e26fC6cA6EE6dD5C",
"l2OutputOracleChallenger": "0x39E13D1AB040F6EA58CE19998edCe01B3C365f84",
"baseFeeVaultWithdrawalNetwork": 0,
"l1FeeVaultWithdrawalNetwork": 0,
"sequencerFeeVaultWithdrawalNetwork": 0,
"l1BlockTime": 12
}
{
"l1StartingBlockTag": "0x10aa183",
"l1ChainID": 1,
"l2ChainID": 7777777,
"l2BlockTime": 2,
"finalizationPeriodSeconds": 604800,
"finalSystemOwner": "0xC72aE5c7cc9a332699305E29F68Be66c73b60542",
"baseFeeVaultRecipient": "0xe900b3Edc1BA0430CFa9a204A1027B90825ac951",
"l1FeeVaultRecipient": "0xe900b3Edc1BA0430CFa9a204A1027B90825ac951",
"sequencerFeeVaultRecipient": "0xe900b3Edc1BA0430CFa9a204A1027B90825ac951",
"l2GenesisBlockBaseFeePerGas": "0x3b9aca00",
"governanceTokenOwner": "0xC72aE5c7cc9a332699305E29F68Be66c73b60542",
"governanceTokenSymbol": "OP",
"governanceTokenName": "Optimism",
"maxSequencerDrift": 600,
"sequencerWindowSize": 3600,
"channelTimeout": 300,
"p2pSequencerAddress": "0x3Dc8Dfd0709C835cAd15a6A27e089FF4cF4C9228",
"batchInboxAddress": "0x6F54Ca6F6EdE96662024Ffd61BFd18f3f4e34DFf",
"batchSenderAddress": "0x625726c858dBF78c0125436C943Bf4b4bE9d9033",
"l2GenesisRegolithTimeOffset": "0x0",
"portalGuardian": "0xC72aE5c7cc9a332699305E29F68Be66c73b60542",
"l2OutputOracleSubmissionInterval": 1800,
"l2OutputOracleStartingTimestamp": -1,
"l2OutputOracleProposer": "0x48247032092e7b0ecf5dEF611ad89eaf3fC888Dd",
"l2GenesisBlockGasLimit": "0x1c9c380",
"fundDevAccounts": false,
"gasPriceOracleOverhead": 188,
"gasPriceOracleScalar": 684000,
"eip1559Denominator": 50,
"eip1559Elasticity": 6,
"proxyAdminOwner": "0xb0cCdbD6fe09D2199171BE19450aF249250518A0",
"l2OutputOracleChallenger": "0xcA4571b1ecBeC86Ea2E660d242c1c29FcB55Dc72",
"baseFeeVaultWithdrawalNetwork": 0,
"l1FeeVaultWithdrawalNetwork": 0,
"sequencerFeeVaultWithdrawalNetwork": 0,
"l1BlockTime": 12
}
...@@ -30,6 +30,7 @@ fs_permissions = [ ...@@ -30,6 +30,7 @@ fs_permissions = [
{ access='read-write', path='./.resource-metering.csv' }, { access='read-write', path='./.resource-metering.csv' },
{ access='read-write', path='./deployments/' }, { access='read-write', path='./deployments/' },
{ access='read', path='./deploy-config/' }, { access='read', path='./deploy-config/' },
{ access='read', path='./periphery-deploy-config/' },
{ access='read', path='./broadcast/' }, { access='read', path='./broadcast/' },
{ access='read', path = './forge-artifacts/' }, { access='read', path = './forge-artifacts/' },
{ access='write', path='./semver-lock.json' }, { access='write', path='./semver-lock.json' },
......
{ {
"name": "@eth-optimism/contracts-bedrock", "name": "@eth-optimism/contracts-bedrock",
"version": "0.16.0", "version": "0.16.1",
"description": "Contracts for Optimism Specs", "description": "Contracts for Optimism Specs",
"license": "MIT", "license": "MIT",
"files": [ "files": [
......
{
"faucetAdmin": "0xf2C22a95bBA6F35545269183D8d1751a27F047F6"
}
// SPDX-License-Identifier: MIT
pragma solidity ^0.8.0;
import { console2 as console } from "forge-std/console2.sol";
import { Deployer } from "./Deployer.sol";
import { PeripheryDeployConfig } from "./PeripheryDeployConfig.s.sol";
import { ProxyAdmin } from "src/universal/ProxyAdmin.sol";
import { Proxy } from "src/universal/Proxy.sol";
import { Faucet } from "src/periphery/faucet/Faucet.sol";
/// @title DeployPeriphery
/// @notice Script used to deploy periphery contracts.
contract DeployPeriphery is Deployer {
PeripheryDeployConfig cfg;
/// @notice The name of the script, used to ensure the right deploy artifacts
/// are used.
function name() public pure override returns (string memory) {
return "DeployPeriphery";
}
function setUp() public override {
super.setUp();
string memory path = string.concat(vm.projectRoot(), "/periphery-deploy-config/", deploymentContext, ".json");
cfg = new PeripheryDeployConfig(path);
console.log("Deploying from %s", deployScript);
console.log("Deployment context: %s", deploymentContext);
}
/// @notice Deploy all of the periphery contracts
function run() public {
console.log("Deploying all periphery contracts");
deployProxies();
deployImplementations();
initializeFaucet();
}
/// @notice Deploy all of the proxies
function deployProxies() public {
deployProxyAdmin();
deployFaucetProxy();
}
/// @notice Deploy all of the implementations
function deployImplementations() public {
deployFaucet();
}
/// @notice Modifier that wraps a function in broadcasting.
modifier broadcast() {
vm.startBroadcast();
_;
vm.stopBroadcast();
}
/// @notice Deploy the ProxyAdmin
function deployProxyAdmin() public broadcast returns (address addr_) {
ProxyAdmin admin = new ProxyAdmin{ salt: keccak256(bytes("ProxyAdmin")) }({
_owner: msg.sender
});
require(admin.owner() == msg.sender);
save("ProxyAdmin", address(admin));
console.log("ProxyAdmin deployed at %s", address(admin));
addr_ = address(admin);
}
/// @notice Deploy the FaucetProxy
function deployFaucetProxy() public broadcast returns (address addr_) {
address proxyAdmin = mustGetAddress("ProxyAdmin");
Proxy proxy = new Proxy{ salt: keccak256(bytes("FaucetProxy")) }({
_admin: proxyAdmin
});
address admin = address(uint160(uint256(vm.load(address(proxy), OWNER_KEY))));
require(admin == proxyAdmin);
save("FaucetProxy", address(proxy));
console.log("FaucetProxy deployed at %s", address(proxy));
addr_ = address(proxy);
}
/// @notice Deploy the faucet contract.
function deployFaucet() public broadcast returns (address) {
Faucet faucet = new Faucet{ salt: keccak256(bytes("Faucet")) }(cfg.faucetAdmin());
require(faucet.ADMIN() == cfg.faucetAdmin());
save("Faucet", address(faucet));
console.log("Faucet deployed at %s", address(faucet));
return address(faucet);
}
/// @notice Initialize the Faucet
function initializeFaucet() public broadcast {
ProxyAdmin proxyAdmin = ProxyAdmin(mustGetAddress("ProxyAdmin"));
address faucetProxy = mustGetAddress("FaucetProxy");
address faucet = mustGetAddress("Faucet");
proxyAdmin.upgrade({ _proxy: payable(faucetProxy), _implementation: faucet });
require(Faucet(payable(faucetProxy)).ADMIN() == Faucet(payable(faucet)).ADMIN());
}
}
// SPDX-License-Identifier: MIT
pragma solidity ^0.8.0;
import { Script } from "forge-std/Script.sol";
import { console2 as console } from "forge-std/console2.sol";
import { stdJson } from "forge-std/StdJson.sol";
/// @title PeripheryDeployConfig
/// @notice Represents the configuration required to deploy the periphery contracts. It is expected
/// to read the file from JSON. A future improvement would be to have fallback
/// values if they are not defined in the JSON themselves.
contract PeripheryDeployConfig is Script {
string internal _json;
address public faucetAdmin;
constructor(string memory _path) {
console.log("PeripheryDeployConfig: reading file %s", _path);
try vm.readFile(_path) returns (string memory data) {
_json = data;
} catch {
console.log("Warning: unable to read config. Do not deploy unless you are not using config.");
return;
}
faucetAdmin = stdJson.readAddress(_json, "$.faucetAdmin");
}
}
# @eth-optimism/core-utils # @eth-optimism/core-utils
## 0.13.0
### Minor Changes
- [#7336](https://github.com/ethereum-optimism/optimism/pull/7336) [`210b2c81d`](https://github.com/ethereum-optimism/optimism/commit/210b2c81dd383bad93480aa876b283d9a0c991c2) Thanks [@tynes](https://github.com/tynes)! - Delete unmaintained geth types
## 0.12.3 ## 0.12.3
### Patch Changes ### Patch Changes
......
{ {
"name": "@eth-optimism/core-utils", "name": "@eth-optimism/core-utils",
"version": "0.12.3", "version": "0.13.0",
"description": "[Optimism] Core typescript utilities", "description": "[Optimism] Core typescript utilities",
"main": "dist/index", "main": "dist/index",
"types": "dist/index", "types": "dist/index",
......
// Types explicitly related to dealing with Geth.
/**
* Represents the Ethereum state, in the format that Geth expects it.
*/
export interface State {
[address: string]: {
nonce?: string
balance?: string
codeHash?: string
root?: string
code?: string
storage?: {
[key: string]: string
}
secretKey?: string
}
}
/**
* Represents Geth's ChainConfig
*/
export interface ChainConfig {
chainId: number
homesteadBlock: number
eip150Block: number
eip150Hash?: string
eip155Block: number
eip158Block: number
byzantiumBlock: number
constantinopleBlock: number
petersburgBlock: number
istanbulBlock: number
muirGlacierBlock: number
berlinBlock: number
londonBlock?: number
arrowGlacierBlock?: number
grayGlacierBlock?: number
mergeNetsplitBlock?: number
terminalTotalDifficulty?: number
clique?: {
period: number
epoch: number
}
ethash?: {}
}
/**
* Represents Geth's genesis file format.
*/
export interface Genesis {
config: ChainConfig
nonce?: string
timestamp?: string
difficulty: string
mixHash?: string
coinbase?: string
number?: string
gasLimit: string
gasUsed?: string
parentHash?: string
extraData: string
baseFeePerGas?: string
alloc: State
}
/**
* Represents the chain config for an Optimism chain
*/
export interface OptimismChainConfig extends ChainConfig {
optimism: {
baseFeeRecipient: string
l1FeeRecipient: string
}
}
/**
* Represents the Genesis file format for an Optimism chain
*/
export interface OptimismGenesis extends Genesis {
config: OptimismChainConfig
}
/**
* Utilities related to go-ethereum (Geth)
*/
export * from './geth-types'
...@@ -4,4 +4,3 @@ ...@@ -4,4 +4,3 @@
export * from './bcfg' export * from './bcfg'
export * from './ethers' export * from './ethers'
export * from './geth'
# @eth-optimism/sdk # @eth-optimism/sdk
## 3.1.3
### Patch Changes
- [#7244](https://github.com/ethereum-optimism/optimism/pull/7244) [`679207751`](https://github.com/ethereum-optimism/optimism/commit/6792077510fd76553c179d8b8d068262cda18db6) Thanks [@nitaliano](https://github.com/nitaliano)! - Adds Sepolia & OP Sepolia support to SDK
- Updated dependencies [[`210b2c81d`](https://github.com/ethereum-optimism/optimism/commit/210b2c81dd383bad93480aa876b283d9a0c991c2), [`2440f5e7a`](https://github.com/ethereum-optimism/optimism/commit/2440f5e7ab6577f2d2e9c8b0c78c014290dde8e7)]:
- @eth-optimism/core-utils@0.13.0
- @eth-optimism/contracts-bedrock@0.16.1
## 3.1.2 ## 3.1.2
### Patch Changes ### Patch Changes
......
{ {
"name": "@eth-optimism/sdk", "name": "@eth-optimism/sdk",
"version": "3.1.2", "version": "3.1.3",
"description": "[Optimism] Tools for working with Optimism", "description": "[Optimism] Tools for working with Optimism",
"main": "dist/index", "main": "dist/index",
"types": "dist/index", "types": "dist/index",
......
This diff is collapsed.
# Needs to point to docker, otherwise you'll get the error: exec: "docker": executable file not found in $PATH
PATH=/
# Runs every minute
# * * * * * /usr/local/bin/docker-compose -f /path/to/docker-compose.yml --profile 1minute up -d
# Runs every 5 minutes
*/5 * * * * /usr/local/bin/docker-compose -f /path/to/docker-compose.yml --profile 5minute up -d
...@@ -3,6 +3,7 @@ version: "3" ...@@ -3,6 +3,7 @@ version: "3"
services: services:
pushgateway: pushgateway:
image: prom/pushgateway image: prom/pushgateway
container_name: pushgateway
ports: ports:
- "9091:9091" - "9091:9091"
restart: unless-stopped restart: unless-stopped
...@@ -35,7 +36,8 @@ services: ...@@ -35,7 +36,8 @@ services:
environment: environment:
- GF_SECURITY_ADMIN_PASSWORD=${GRAFANA_ADMIN_PWD} - GF_SECURITY_ADMIN_PASSWORD=${GRAFANA_ADMIN_PWD}
volumes: volumes:
- ./datasources.yml:/etc/grafana/provisioning/datasources/datasources.yaml - ./grafana/provisioning:/etc/grafana/provisioning
- ./grafana/dashboards:/var/lib/grafana/dashboards
security_opt: security_opt:
- "no-new-privileges:true" - "no-new-privileges:true"
......
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"type": "grafana",
"uid": "-- Grafana --"
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"type": "dashboard"
}
]
},
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 0,
"links": [],
"liveNow": false,
"panels": [
{
"datasource": {
"type": "prometheus",
"uid": "PBFA97CFB590B2093"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "red",
"value": null
},
{
"color": "yellow",
"value": 1
},
{
"color": "green",
"value": 4
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 5,
"w": 6,
"x": 0,
"y": 0
},
"id": 1,
"options": {
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"showThresholdLabels": false,
"showThresholdMarkers": true
},
"pluginVersion": "10.1.2",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "PBFA97CFB590B2093"
},
"disableTextWrap": false,
"editorMode": "builder",
"expr": "metamask_tx_success",
"fullMetaSearch": false,
"includeNullMetadata": true,
"instant": false,
"legendFormat": "__auto",
"range": true,
"refId": "A",
"useBackend": false
}
],
"title": "Successful Transaction Since Last Failure",
"type": "gauge"
},
{
"datasource": {
"type": "prometheus",
"uid": "PBFA97CFB590B2093"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "text",
"value": null
},
{
"color": "red",
"value": 1
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 5,
"w": 6,
"x": 6,
"y": 0
},
"id": 2,
"options": {
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"showThresholdLabels": false,
"showThresholdMarkers": true
},
"pluginVersion": "10.1.2",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "PBFA97CFB590B2093"
},
"disableTextWrap": false,
"editorMode": "builder",
"expr": "metamask_tx_failure",
"fullMetaSearch": false,
"includeNullMetadata": true,
"instant": false,
"legendFormat": "__auto",
"range": true,
"refId": "A",
"useBackend": false
}
],
"title": "Failed Transactions Since Last Success",
"type": "gauge"
}
],
"refresh": "5s",
"schemaVersion": 38,
"style": "dark",
"tags": [],
"templating": {
"list": []
},
"time": {
"from": "now-6h",
"to": "now"
},
"timepicker": {},
"timezone": "",
"title": "UFM: Metamask",
"uid": "f66f7076-c724-4f81-8ff9-58d6d99f2716",
"version": 1,
"weekStart": ""
}
apiVersion: 1
providers:
- name: 'default'
orgId: 1
folder: ''
type: file
disableDeletion: false
options:
path: /var/lib/grafana/dashboards
# User Facing Monitoring - Metamask Tests
## Running Locally
### Building Docker Image
```bash
docker build -t ufm-test-service-metamask .
```
### Running the Docker Container on MacOS
The following steps were taken from [here](https://www.oddbird.net/2022/11/30/headed-playwright-in-docker/#macos)
Apple’s operating system doesn’t include a built-in XServer, but we can use [XQuartz](https://www.xquartz.org/) to provide one:
1. Install XQuartz: `brew install --cask xquartz``
2. Open XQuartz, go to `Preferences -> Security`, and check `Allow connections from network clients`
3. Restart your computer (restarting XQuartz might not be enough)
4. Start XQuartz by executing `xhost +localhost` in your terminal
5. Open Docker Desktop and edit settings to give access to `/tmp/.X11-unix` in `Preferences -> Resources -> File sharing`
Once XQuartz is running with the right permissions, you can populate the environment variable and socket Docker args:
```bash
docker run --rm -it \
-e DISPLAY=host.docker.internal:0 \
-v /tmp/.X11-unix:/tmp/.X11-unix \
ufm-test-service-metamask
```
...@@ -20,10 +20,14 @@ ...@@ -20,10 +20,14 @@
"devDependencies": { "devDependencies": {
"@metamask/test-dapp": "^7.1.0", "@metamask/test-dapp": "^7.1.0",
"@playwright/test": "1.37.1", "@playwright/test": "1.37.1",
"@synthetixio/synpress": "3.7.2-beta.5", "@synthetixio/synpress": "3.7.2-beta.7",
"dotenv": "^16.3.1", "dotenv": "^16.3.1",
"static-server": "^2.2.1", "static-server": "^2.2.1",
"typescript": "^5.1.6", "typescript": "^5.1.6",
"viem": "^1.10.8" "viem": "^1.10.8"
},
"dependencies": {
"prom-client": "^14.2.0",
"zod": "^3.22.2"
} }
} }
This diff is collapsed.
import 'dotenv/config' import 'dotenv/config'
import { z } from 'zod'
import metamask from '@synthetixio/synpress/commands/metamask.js' import metamask from '@synthetixio/synpress/commands/metamask.js'
import { expect, test, type Page } from '@playwright/test' import { expect, test, type Page } from '@playwright/test'
import { mnemonicToAccount, privateKeyToAccount } from 'viem/accounts' import { mnemonicToAccount, privateKeyToAccount } from 'viem/accounts'
import { testWithSynpress } from './testWithSynpressUtil' import { testWithSynpress } from './testWithSynpressUtil'
import {
getMetamaskTxCounterValue,
incrementMetamaskTxCounter,
setMetamaskTxCounter,
} from './prometheusUtils'
const env = z.object({
METAMASK_SECRET_WORDS_OR_PRIVATEKEY: z.string(),
OP_GOERLI_RPC_URL: z.string().url(),
METAMASK_DAPP_URL: z.string().url()
}).parse(process.env)
const expectedSender = const expectedSender =
process.env.METAMASK_SECRET_WORDS_OR_PRIVATEKEY?.startsWith('0x') env.METAMASK_SECRET_WORDS_OR_PRIVATEKEY?.startsWith('0x')
? privateKeyToAccount( ? privateKeyToAccount(
process.env.METAMASK_SECRET_WORDS_OR_PRIVATEKEY as `0x${string}` env.METAMASK_SECRET_WORDS_OR_PRIVATEKEY as `0x${string}`
).address.toLowerCase() ).address.toLowerCase()
: mnemonicToAccount( : mnemonicToAccount(
process.env.METAMASK_SECRET_WORDS_OR_PRIVATEKEY as string env.METAMASK_SECRET_WORDS_OR_PRIVATEKEY as string
).address.toLowerCase() ).address.toLowerCase()
const expectedRecipient = '0x8fcfbe8953433fd1f2e8375ee99057833e4e1e9e' const expectedRecipient = '0x8fcfbe8953433fd1f2e8375ee99057833e4e1e9e'
...@@ -35,7 +47,7 @@ testWithSynpress('Add OP Goerli network', async () => { ...@@ -35,7 +47,7 @@ testWithSynpress('Add OP Goerli network', async () => {
name: 'op-goerli', name: 'op-goerli',
rpcUrls: { rpcUrls: {
default: { default: {
http: [process.env.OP_GOERLI_RPC_URL], http: [env.OP_GOERLI_RPC_URL],
}, },
}, },
id: '420', id: '420',
...@@ -49,13 +61,26 @@ testWithSynpress('Add OP Goerli network', async () => { ...@@ -49,13 +61,26 @@ testWithSynpress('Add OP Goerli network', async () => {
}, },
}) })
await expect(sharedPage.locator('#chainId')).toHaveText(expectedChainId) try {
await expect(sharedPage.locator('#chainId')).toHaveText(expectedChainId)
} catch (error) {
await setMetamaskTxCounter(true, 0)
await incrementMetamaskTxCounter(false)
throw error
}
}) })
test(`Connect wallet with ${expectedSender}`, async () => { test(`Connect wallet with ${expectedSender}`, async () => {
await sharedPage.click('#connectButton') await sharedPage.click('#connectButton')
await metamask.acceptAccess() await metamask.acceptAccess()
await expect(sharedPage.locator('#accounts')).toHaveText(expectedSender)
try {
await expect(sharedPage.locator('#accounts')).toHaveText(expectedSender)
} catch (error) {
await setMetamaskTxCounter(true, 0)
await incrementMetamaskTxCounter(false)
throw error
}
}) })
test('Send an EIP-1559 transaciton and verfiy success', async () => { test('Send an EIP-1559 transaciton and verfiy success', async () => {
...@@ -76,17 +101,14 @@ test('Send an EIP-1559 transaciton and verfiy success', async () => { ...@@ -76,17 +101,14 @@ test('Send an EIP-1559 transaciton and verfiy success', async () => {
}) })
}) })
await metamask.confirmTransaction() await metamask.confirmTransactionAndWaitForMining()
const txHash = await txHashPromise const txHash = await txHashPromise
// Waiting for Infura (Metamask given provider) to index our transaction
await sharedPage.waitForTimeout(10_000)
// Metamask test dApp allows us access to the Metamask RPC provider via loading this URL. // Metamask test dApp allows us access to the Metamask RPC provider via loading this URL.
// The RPC reponse will be populated onto the page that's loaded. // The RPC reponse will be populated onto the page that's loaded.
// More info here: https://github.com/MetaMask/test-dapp/tree/main#usage // More info here: https://github.com/MetaMask/test-dapp/tree/main#usage
await sharedPage.goto( await sharedPage.goto(
`${process.env.METAMASK_DAPP_URL}/request.html?method=eth_getTransactionReceipt&params=["${txHash}"]` `${env.METAMASK_DAPP_URL}/request.html?method=eth_getTransactionReceipt&params=["${txHash}"]`
) )
// Waiting for RPC response to be populated on the page // Waiting for RPC response to be populated on the page
...@@ -98,5 +120,14 @@ test('Send an EIP-1559 transaciton and verfiy success', async () => { ...@@ -98,5 +120,14 @@ test('Send an EIP-1559 transaciton and verfiy success', async () => {
'' ''
) )
) )
expect(transaction.status).toBe('0x1')
try {
expect(transaction.status).toBe('0x1')
await setMetamaskTxCounter(false, 0)
await incrementMetamaskTxCounter(true)
} catch (error) {
await setMetamaskTxCounter(true, 0)
await incrementMetamaskTxCounter(false)
throw error
}
}) })
import 'dotenv/config'
import { z } from 'zod'
import { Counter, Pushgateway } from 'prom-client'
const env = z
.object({
PROMETHEUS_SERVER_URL: z.string().url(),
PROMETHEUS_PUSHGATEWAY_URL: z.string().url(),
})
.parse(process.env)
const txSuccessMetricName = 'metamask_tx_success'
const txFailureMetricName = 'metamask_tx_failuree'
const txSuccessCounter = new Counter({
name: txSuccessMetricName,
help: 'A counter signifying the number of successful transactions sent with Metamask since last failure',
})
const txFailureCounter = new Counter({
name: txFailureMetricName,
help: 'A counter signifying the number of failed transactions sent with Metamask since last successful transaction',
})
export const getMetamaskTxCounterValue = async (isSuccess: boolean) => {
const metricName = isSuccess ? txSuccessMetricName : txFailureMetricName
const prometheusMetricQuery = `${env.PROMETHEUS_SERVER_URL}/api/v1/query?query=${metricName}`
const response = await fetch(prometheusMetricQuery)
if (!response.ok) {
console.error(response.status)
console.error(response.statusText)
throw new Error(`Failed to fetch metric from: ${prometheusMetricQuery}`)
}
// The following is an example of the expect response from prometheusMetricQuery
// for response.json().data.result[0]:
// [
// {
// metric: {
// __name__: 'metamask_tx_success',
// exported_job: 'metamask_tx_count',
// instance: 'pushgateway:9091',
// job: 'pushgateway'
// },
// value: [ 1695250414.474, '0' ]
// }
// ]
try {
const responseJson = z
.object({
data: z.object({
result: z.array(
z.object({
value: z.tuple([
z.number(),
z.number().or(z.string().transform((value) => parseInt(value))),
]),
})
),
}),
})
.parse(await response.json())
return responseJson.data.result[0].value[1]
} catch (error) {
if (
error.message === "Cannot read properties of undefined (reading 'value')"
) {
console.warn(`No data found for metric ${metricName} in Prometheus`)
return undefined
}
throw error
}
}
export const setMetamaskTxCounter = async (
isSuccess: boolean,
valueToSetTo: number
) => {
const metricName = isSuccess ? txSuccessMetricName : txFailureMetricName
const txCounter = isSuccess ? txSuccessCounter : txFailureCounter
txCounter.reset()
console.log(`Setting ${metricName} to ${valueToSetTo}`)
txCounter.inc(valueToSetTo)
const pushGateway = new Pushgateway(env.PROMETHEUS_PUSHGATEWAY_URL)
await pushGateway.pushAdd({ jobName: 'metamask_tx_count' })
}
export const incrementMetamaskTxCounter = async (isSuccess: boolean) => {
const metricName = isSuccess ? txSuccessMetricName : txFailureMetricName
const currentMetricValue = (await getMetamaskTxCounterValue(true)) ?? 0
console.log(
`Current value of ${metricName} is ${currentMetricValue}, incrementing to ${
currentMetricValue + 1
}`
)
await setMetamaskTxCounter(isSuccess, currentMetricValue + 1)
}
global: global:
scrape_interval: 5s scrape_interval: 2s
scrape_configs: scrape_configs:
- job_name: 'pushgateway' - job_name: 'pushgateway'
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment