Commit 5e42fbdb authored by Sabnock's avatar Sabnock Committed by GitHub

Merge branch 'develop' into issue-6731

parents 0a94e45e 068c89b3
......@@ -53,7 +53,7 @@ jobs:
- name: Checkout
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub
uses: docker/login-action@v3
......@@ -80,7 +80,7 @@ jobs:
- name: Checkout
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub
uses: docker/login-action@v3
......@@ -107,7 +107,7 @@ jobs:
- name: Checkout
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub
uses: docker/login-action@v3
......@@ -134,7 +134,7 @@ jobs:
- name: Checkout
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub
uses: docker/login-action@v3
......@@ -161,7 +161,7 @@ jobs:
- name: Checkout
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub
uses: docker/login-action@v3
......@@ -188,7 +188,7 @@ jobs:
- name: Checkout
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub
uses: docker/login-action@v3
......@@ -226,7 +226,7 @@ jobs:
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub
uses: docker/login-action@v3
......
......@@ -78,7 +78,7 @@ jobs:
- name: Checkout
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub
uses: docker/login-action@v3
......@@ -115,7 +115,7 @@ jobs:
- name: Checkout
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub
uses: docker/login-action@v3
......@@ -142,7 +142,7 @@ jobs:
- name: Checkout
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub
uses: docker/login-action@v3
......@@ -169,7 +169,7 @@ jobs:
- name: Checkout
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub
uses: docker/login-action@v3
......@@ -196,7 +196,7 @@ jobs:
- name: Checkout
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub
uses: docker/login-action@v3
......@@ -223,7 +223,7 @@ jobs:
- name: Checkout
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub
uses: docker/login-action@v3
......@@ -250,7 +250,7 @@ jobs:
- name: Checkout
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub
uses: docker/login-action@v3
......
name: 'UFM Test Service: 1 hour'
on:
schedule:
# Run every hour
- cron: '0 * * * *'
jobs:
ufm_test_service_metamask:
name: 'UFM Test Service: Metamask'
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Run Docker Compose
run: docker-compose run metamask
env:
CI: ${{ secrets.CI }}
PROMETHEUS_SERVER_URL: ${{ secrets.PROMETHEUS_SERVER_URL }}
PROMETHEUS_PUSHGATEWAY_URL: ${{ secrets.PROMETHEUS_PUSHGATEWAY_URL }}
METAMASK_SECRET_WORDS_OR_PRIVATEKEY: ${{ secrets.METAMASK_SECRET_WORDS_OR_PRIVATEKEY }}
METAMASK_NETWORK: ${{ secrets.METAMASK_NETWORK || 'goerli' }}
METAMASK_PASSWORD: ${{ secrets.METAMASK_PASSWORD || 'T3st_P@ssw0rd!' }}
METAMASK_DAPP_URL: ${{ secrets.METAMASK_DAPP_URL || 'http://localhost:9011' }}
METAMASK_OP_GOERLI_RPC_URL: ${{ secrets.METAMASK_OP_GOERLI_RPC_URL }}
......@@ -26,6 +26,7 @@ packages/contracts-bedrock/deployments/anvil
# jetbrains
.idea/
.secrets
.env
.env*
!.env.example
......
......@@ -23,7 +23,7 @@ submodules:
# CI will checkout submodules on its own (and fails on these commands)
if [ -z "$$GITHUB_ENV" ]; then \
git submodule init; \
git submodule update; \
git submodule update --recursive; \
fi
.PHONY: submodules
......@@ -86,6 +86,9 @@ nuke: clean devnet-clean
.PHONY: nuke
devnet-up:
@if ! [ -x "$(command -v geth)" ]; then \
make install-geth; \
fi
@if [ ! -e op-program/bin ]; then \
make cannon-prestate; \
fi
......
......@@ -156,6 +156,7 @@ def devnet_l1_genesis(paths):
'--rpc.allow-unprotected-txs'
])
try:
forge = ChildProcess(deploy_contracts, paths)
forge.start()
forge.join()
......@@ -168,6 +169,7 @@ def devnet_l1_genesis(paths):
allocs = response['result']
write_json(paths.allocs_path, allocs)
finally:
geth.terminate()
......
......@@ -25,7 +25,6 @@ import (
var (
StepBytes4 = crypto.Keccak256([]byte("step(bytes,bytes)"))[:4]
CheatBytes4 = crypto.Keccak256([]byte("cheat(uint256,bytes32,bytes32,uint256)"))[:4]
LoadKeccak256PreimagePartBytes4 = crypto.Keccak256([]byte("loadKeccak256PreimagePart(uint256,bytes)"))[:4]
LoadLocalDataBytes4 = crypto.Keccak256([]byte("loadLocalData(uint256,bytes32,uint256,uint256)"))[:4]
)
......
......@@ -4,7 +4,6 @@ ignore:
- "**/*.t.sol"
- "op-bindings/bindings/*.go"
- "packages/contracts-bedrock/contracts/vendor/WETH9.sol"
- "packages/contracts-bedrock/contracts/cannon" # tested through Go tests
- 'packages/contracts-bedrock/contracts/EAS/**/*.sol'
coverage:
status:
......@@ -31,6 +30,5 @@ flag_management:
- name: common-ts-tests
- name: contracts-tests
- name: core-utils-tests
- name: dtl-tests
- name: chain-mon-tests
- name: sdk-tests
# @eth-optimism/endpoint-monitor
## 1.0.3
### Patch Changes
- [#7450](https://github.com/ethereum-optimism/optimism/pull/7450) [`ac90e16a7`](https://github.com/ethereum-optimism/optimism/commit/ac90e16a7f85c4f73661ae6023135c3d00421c1e) Thanks [@roninjin10](https://github.com/roninjin10)! - Updated dev dependencies related to testing that is causing audit tooling to report failures
## 1.0.2
### Patch Changes
......
......@@ -65,9 +65,6 @@ func (c Config) Check() error {
if c.CheckDuration >= c.CheckInterval {
return fmt.Errorf("%s must be less than %s", CheckDurationFlagName, CheckIntervalFlagName)
}
if err := c.LogConfig.Check(); err != nil {
return err
}
if err := c.MetricsConfig.Check(); err != nil {
return err
}
......
......@@ -33,7 +33,8 @@ func Main(version string) func(cliCtx *cli.Context) error {
return fmt.Errorf("invalid CLI flags: %w", err)
}
l := oplog.NewLogger(cfg.LogConfig)
l := oplog.NewLogger(oplog.AppOut(cliCtx), cfg.LogConfig)
oplog.SetGlobalLogHandler(l.GetHandler())
endpointMonitor := NewEndpointMonitor(cfg, l)
l.Info(fmt.Sprintf("starting endpoint monitor with checkInterval=%s checkDuration=%s", cfg.CheckInterval, cfg.CheckDuration))
......
{
"name": "@eth-optimism/endpoint-monitor",
"version": "1.0.2",
"version": "1.0.3",
"private": true,
"dependencies": {}
}
......@@ -9,7 +9,7 @@ require (
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0
github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3
github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20230921190252-f29074de9e36
github.com/ethereum/go-ethereum v1.12.2
github.com/ethereum/go-ethereum v1.13.1
github.com/fsnotify/fsnotify v1.6.0
github.com/go-chi/chi/v5 v5.0.10
github.com/go-chi/docgen v1.2.0
......@@ -36,7 +36,7 @@ require (
github.com/onsi/gomega v1.27.10
github.com/pkg/errors v0.9.1
github.com/pkg/profile v1.7.0
github.com/prometheus/client_golang v1.14.0
github.com/prometheus/client_golang v1.17.0
github.com/stretchr/testify v1.8.4
github.com/urfave/cli/v2 v2.25.7
golang.org/x/crypto v0.13.0
......@@ -50,6 +50,7 @@ require (
require (
github.com/DataDog/zstd v1.5.2 // indirect
github.com/Microsoft/go-winio v0.6.1 // indirect
github.com/VictoriaMetrics/fastcache v1.10.0 // indirect
github.com/allegro/bigcache v1.2.1 // indirect
github.com/benbjohnson/clock v1.3.5 // indirect
......@@ -61,7 +62,7 @@ require (
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/cockroachdb/errors v1.9.1 // indirect
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 // indirect
github.com/cockroachdb/pebble v0.0.0-20230906160148-46873a6a7a06 // indirect
github.com/cockroachdb/redact v1.1.3 // indirect
github.com/consensys/bavard v0.1.13 // indirect
github.com/consensys/gnark-crypto v0.10.0 // indirect
......@@ -76,9 +77,8 @@ require (
github.com/decred/dcrd/crypto/blake256 v1.0.1 // indirect
github.com/deepmap/oapi-codegen v1.8.2 // indirect
github.com/dlclark/regexp2 v1.7.0 // indirect
github.com/docker/docker v20.10.24+incompatible // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/dop251/goja v0.0.0-20230605162241-28ee0ee714f3 // indirect
github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127 // indirect
github.com/elastic/gosigar v0.14.2 // indirect
github.com/ethereum/c-kzg-4844 v0.3.1 // indirect
github.com/fatih/color v1.7.0 // indirect
......@@ -108,7 +108,7 @@ require (
github.com/hashicorp/golang-lru/arc/v2 v2.0.5 // indirect
github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7 // indirect
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
github.com/huin/goupnp v1.2.0 // indirect
github.com/huin/goupnp v1.3.0 // indirect
github.com/influxdata/influxdb-client-go/v2 v2.4.0 // indirect
github.com/influxdata/influxdb1-client v0.0.0-20220302092344-a9ab5670611c // indirect
github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect
......@@ -120,7 +120,7 @@ require (
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect
github.com/jbenet/goprocess v0.1.4 // indirect
github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e // indirect
github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect
github.com/karalabe/usb v0.0.3-0.20230711191512-61db3e06439c // indirect
......@@ -167,16 +167,16 @@ require (
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_model v0.4.0 // indirect
github.com/prometheus/common v0.42.0 // indirect
github.com/prometheus/procfs v0.9.0 // indirect
github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 // indirect
github.com/prometheus/common v0.44.0 // indirect
github.com/prometheus/procfs v0.11.1 // indirect
github.com/quic-go/qpack v0.4.0 // indirect
github.com/quic-go/qtls-go1-20 v0.3.3 // indirect
github.com/quic-go/quic-go v0.38.1 // indirect
github.com/quic-go/webtransport-go v0.5.3 // indirect
github.com/raulk/go-watchdog v1.3.0 // indirect
github.com/rivo/uniseg v0.4.3 // indirect
github.com/rogpeppe/go-internal v1.9.0 // indirect
github.com/rogpeppe/go-internal v1.10.0 // indirect
github.com/rs/cors v1.9.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
......@@ -185,8 +185,8 @@ require (
github.com/stretchr/objx v0.5.0 // indirect
github.com/supranational/blst v0.3.11 // indirect
github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a // indirect
github.com/tklauser/go-sysconf v0.3.10 // indirect
github.com/tklauser/numcpus v0.5.0 // indirect
github.com/tklauser/go-sysconf v0.3.12 // indirect
github.com/tklauser/numcpus v0.6.1 // indirect
github.com/tyler-smith/go-bip39 v1.1.0 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect
......@@ -200,15 +200,15 @@ require (
golang.org/x/sys v0.12.0 // indirect
golang.org/x/text v0.13.0 // indirect
golang.org/x/tools v0.13.0 // indirect
google.golang.org/protobuf v1.30.0 // indirect
google.golang.org/protobuf v1.31.0 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
lukechampine.com/blake3 v1.2.1 // indirect
rsc.io/tmplfunc v0.0.3 // indirect
)
replace github.com/ethereum/go-ethereum v1.12.2 => github.com/ethereum-optimism/op-geth v1.101200.2-rc.1.0.20230922185314-7997a6fed17c
replace github.com/ethereum/go-ethereum v1.13.1 => github.com/ethereum-optimism/op-geth v1.101301.0-rc.2
//replace github.com/ethereum/go-ethereum v1.12.2 => ../go-ethereum
//replace github.com/ethereum-optimism/superchain-registry/superchain => ../superchain-registry/superchain
//replace github.com/ethereum/go-ethereum v1.13.1 => ../go-ethereum
This diff is collapsed.
# @eth-optimism/indexer-api
## 0.0.4
### Patch Changes
- [#7450](https://github.com/ethereum-optimism/optimism/pull/7450) [`ac90e16a7`](https://github.com/ethereum-optimism/optimism/commit/ac90e16a7f85c4f73661ae6023135c3d00421c1e) Thanks [@roninjin10](https://github.com/roninjin10)! - Updated dev dependencies related to testing that is causing audit tooling to report failures
{
"name": "@eth-optimism/indexer-api",
"version": "0.0.3",
"version": "0.0.4",
"description": "[Optimism] typescript types for the indexer service",
"main": "indexer.cjs",
"module": "indexer.js",
......
......@@ -10,7 +10,7 @@ import (
"github.com/ethereum-optimism/optimism/indexer/api/routes"
"github.com/ethereum-optimism/optimism/indexer/config"
"github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum-optimism/optimism/op-service/testlog"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/assert"
......
......@@ -25,35 +25,3 @@ func Clamp(start, end *big.Int, size uint64) *big.Int {
func Matcher(num int64) func(*big.Int) bool {
return func(bi *big.Int) bool { return bi.Int64() == num }
}
type Range struct {
Start *big.Int
End *big.Int
}
// Grouped will return a slice of inclusive ranges from (start, end),
// capped to the supplied size from `(start, end)`.
func Grouped(start, end *big.Int, size uint64) []Range {
if end.Cmp(start) < 0 || size == 0 {
return nil
}
bigMaxDiff := big.NewInt(int64(size - 1))
groups := []Range{}
for start.Cmp(end) <= 0 {
diff := new(big.Int).Sub(end, start)
switch {
case diff.Uint64()+1 <= size:
// re-use allocated diff as the next start
groups = append(groups, Range{start, end})
start = diff.Add(end, One)
default:
// re-use allocated diff as the next start
end := new(big.Int).Add(start, bigMaxDiff)
groups = append(groups, Range{start, end})
start = diff.Add(end, One)
}
}
return groups
}
......@@ -27,46 +27,3 @@ func TestClamp(t *testing.T) {
require.False(t, end == result)
require.Equal(t, uint64(5), result.Uint64())
}
func TestGrouped(t *testing.T) {
// base cases
require.Nil(t, Grouped(One, Zero, 1))
require.Nil(t, Grouped(Zero, One, 0))
// Same Start/End
group := Grouped(One, One, 1)
require.Len(t, group, 1)
require.Equal(t, One, group[0].Start)
require.Equal(t, One, group[0].End)
Three, Five := big.NewInt(3), big.NewInt(5)
// One at a time
group = Grouped(One, Three, 1)
require.Equal(t, One, group[0].End)
require.Equal(t, int64(1), group[0].End.Int64())
require.Equal(t, int64(2), group[1].Start.Int64())
require.Equal(t, int64(2), group[1].End.Int64())
require.Equal(t, int64(3), group[2].Start.Int64())
require.Equal(t, int64(3), group[2].End.Int64())
// Split groups
group = Grouped(One, Five, 3)
require.Len(t, group, 2)
require.Equal(t, One, group[0].Start)
require.Equal(t, int64(3), group[0].End.Int64())
require.Equal(t, int64(4), group[1].Start.Int64())
require.Equal(t, Five, group[1].End)
// Encompasses the range
group = Grouped(One, Five, 5)
require.Len(t, group, 1)
require.Equal(t, One, group[0].Start, Zero)
require.Equal(t, Five, group[0].End)
// Size larger than the entire range
group = Grouped(One, Five, 100)
require.Len(t, group, 1)
require.Equal(t, One, group[0].Start, Zero)
require.Equal(t, Five, group[0].End)
}
......@@ -5,7 +5,7 @@ import (
"github.com/ethereum-optimism/optimism/indexer/api"
"github.com/ethereum-optimism/optimism/indexer/config"
"github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum-optimism/optimism/op-service/log"
oplog "github.com/ethereum-optimism/optimism/op-service/log"
"github.com/ethereum/go-ethereum/params"
"github.com/urfave/cli/v2"
......@@ -28,14 +28,15 @@ var (
)
func runIndexer(ctx *cli.Context) error {
log := log.NewLogger(log.ReadCLIConfig(ctx)).New("role", "indexer")
log := oplog.NewLogger(oplog.AppOut(ctx), oplog.ReadCLIConfig(ctx)).New("role", "indexer")
oplog.SetGlobalLogHandler(log.GetHandler())
cfg, err := config.LoadConfig(log, ctx.String(ConfigFlag.Name))
if err != nil {
log.Error("failed to load config", "err", err)
return err
}
db, err := database.NewDB(cfg.DB)
db, err := database.NewDB(log, cfg.DB)
if err != nil {
log.Error("failed to connect to database", "err", err)
return err
......@@ -52,14 +53,15 @@ func runIndexer(ctx *cli.Context) error {
}
func runApi(ctx *cli.Context) error {
log := log.NewLogger(log.ReadCLIConfig(ctx)).New("role", "api")
log := oplog.NewLogger(oplog.AppOut(ctx), oplog.ReadCLIConfig(ctx)).New("role", "api")
oplog.SetGlobalLogHandler(log.GetHandler())
cfg, err := config.LoadConfig(log, ctx.String(ConfigFlag.Name))
if err != nil {
log.Error("failed to load config", "err", err)
return err
}
db, err := database.NewDB(cfg.DB)
db, err := database.NewDB(log, cfg.DB)
if err != nil {
log.Error("failed to connect to database", "err", err)
return err
......@@ -71,7 +73,8 @@ func runApi(ctx *cli.Context) error {
}
func runMigrations(ctx *cli.Context) error {
log := log.NewLogger(log.ReadCLIConfig(ctx)).New("role", "api")
log := oplog.NewLogger(oplog.AppOut(ctx), oplog.ReadCLIConfig(ctx)).New("role", "api")
oplog.SetGlobalLogHandler(log.GetHandler())
cfg, err := config.LoadConfig(log, ctx.String(ConfigFlag.Name))
migrationsDir := ctx.String(MigrationsFlag.Name)
if err != nil {
......@@ -79,7 +82,7 @@ func runMigrations(ctx *cli.Context) error {
return err
}
db, err := database.NewDB(cfg.DB)
db, err := database.NewDB(log, cfg.DB)
if err != nil {
log.Error("failed to connect to database", "err", err)
return err
......@@ -91,9 +94,9 @@ func runMigrations(ctx *cli.Context) error {
func newCli(GitCommit string, GitDate string) *cli.App {
flags := []cli.Flag{ConfigFlag}
flags = append(flags, log.CLIFlags("INDEXER")...)
flags = append(flags, oplog.CLIFlags("INDEXER")...)
migrationFlags := []cli.Flag{MigrationsFlag, ConfigFlag}
migrationFlags = append(migrationFlags, log.CLIFlags("INDEXER")...)
migrationFlags = append(migrationFlags, oplog.CLIFlags("INDEXER")...)
return &cli.App{
Version: params.VersionWithCommit(GitCommit, GitDate),
Description: "An indexer of all optimism events with a serving api layer",
......
......@@ -5,7 +5,7 @@ import (
"os"
"testing"
"github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum-optimism/optimism/op-service/testlog"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require"
......
......@@ -121,4 +121,52 @@ var Presets = map[int]Preset{
L1StartingHeight: 8942381,
},
},
11155420: {
Name: "OP Sepolia",
ChainConfig: ChainConfig{
Preset: 11155420,
L1Contracts: L1Contracts{
AddressManager: common.HexToAddress("0x9bFE9c5609311DF1c011c47642253B78a4f33F4B"),
SystemConfigProxy: common.HexToAddress("0x034edD2A225f7f429A63E0f1D2084B9E0A93b538"),
OptimismPortalProxy: common.HexToAddress("0x16Fc5058F25648194471939df75CF27A2fdC48BC"),
L2OutputOracleProxy: common.HexToAddress("0x90E9c4f8a994a250F6aEfd61CAFb4F2e895D458F"),
L1CrossDomainMessengerProxy: common.HexToAddress("0x58Cc85b8D04EA49cC6DBd3CbFFd00B4B8D6cb3ef"),
L1StandardBridgeProxy: common.HexToAddress("0xFBb0621E0B23b5478B630BD55a5f21f67730B0F1"),
L1ERC721BridgeProxy: common.HexToAddress("0xd83e03D576d23C9AEab8cC44Fa98d058D2176D1f"),
},
L1StartingHeight: 4071408,
},
},
424: {
Name: "PGN",
ChainConfig: ChainConfig{
Preset: 424,
L1Contracts: L1Contracts{
AddressManager: common.HexToAddress("0x09d5DbA52F0ee2C4A5E94FD5C802bD74Ca9cAD3e"),
SystemConfigProxy: common.HexToAddress("0x7Df716EAD1d83a2BF35B416B7BC84bd0700357C9"),
OptimismPortalProxy: common.HexToAddress("0xb26Fd985c5959bBB382BAFdD0b879E149e48116c"),
L2OutputOracleProxy: common.HexToAddress("0xA38d0c4E6319F9045F20318BA5f04CDe94208608"),
L1CrossDomainMessengerProxy: common.HexToAddress("0x97BAf688E5d0465E149d1d5B497Ca99392a6760e"),
L1StandardBridgeProxy: common.HexToAddress("0xD0204B9527C1bA7bD765Fa5CCD9355d38338272b"),
L1ERC721BridgeProxy: common.HexToAddress("0xaFF0F8aaB6Cc9108D34b3B8423C76d2AF434d115"),
},
L1StartingHeight: 17672702,
},
},
58008: {
Name: "PGN Sepolia",
ChainConfig: ChainConfig{
Preset: 58008,
L1Contracts: L1Contracts{
AddressManager: common.HexToAddress("0x0Ad91488288BBe60ff38258785568A6D1EB3B983"),
SystemConfigProxy: common.HexToAddress("0x4BCCC52151f0ad7C62D45Ce0aA77d9d8ffCE534e"),
OptimismPortalProxy: common.HexToAddress("0xF04BdD5353Bb0EFF6CA60CfcC78594278eBfE179"),
L2OutputOracleProxy: common.HexToAddress("0xD5bAc3152ffC25318F848B3DD5dA6C85171BaEEe"),
L1CrossDomainMessengerProxy: common.HexToAddress("0x97f3558Ce48FE71B8CeFA5497708A49531D5A8E1"),
L1StandardBridgeProxy: common.HexToAddress("0xFaE6abCAF30D23e233AC7faF747F2fC3a5a6Bfa3"),
L1ERC721BridgeProxy: common.HexToAddress("0xBA8397B6f255618D5985d0fB427D8c0496F3a5FA"),
},
L1StartingHeight: 17672702,
},
},
}
......@@ -2,8 +2,10 @@ package database
import (
"errors"
"fmt"
"math/big"
"github.com/ethereum-optimism/optimism/indexer/bigint"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
......@@ -51,7 +53,7 @@ type BlocksView interface {
L2BlockHeaderWithFilter(BlockHeader) (*L2BlockHeader, error)
L2LatestBlockHeader() (*L2BlockHeader, error)
LatestEpoch() (*Epoch, error)
LatestObservedEpoch(*big.Int, uint64) (*Epoch, error)
}
type BlocksDB interface {
......@@ -155,36 +157,74 @@ type Epoch struct {
L2BlockHeader L2BlockHeader `gorm:"embedded"`
}
// LatestEpoch return the latest epoch, seen on L1 & L2. In other words
// this returns the latest indexed L1 block that has a corresponding
// indexed L2 block with a matching L1Origin (equal timestamps).
// LatestObservedEpoch return the marker for latest epoch, observed on L1 & L2, within
// the specified bounds. In other words this returns the latest indexed L1 block that has
// a corresponding indexed L2 block with a matching L1Origin (equal timestamps).
//
// If `fromL1Height` (inclusive) is not specified, the search will start from genesis and
// continue all the way to latest indexed heights if `maxL1Range == 0`.
//
// For more, see the protocol spec:
// - https://github.com/ethereum-optimism/optimism/blob/develop/specs/derivation.md
func (db *blocksDB) LatestEpoch() (*Epoch, error) {
latestL1Header, err := db.L1LatestBlockHeader()
if err != nil {
return nil, err
} else if latestL1Header == nil {
func (db *blocksDB) LatestObservedEpoch(fromL1Height *big.Int, maxL1Range uint64) (*Epoch, error) {
// We use timestamps since that translates to both L1 & L2
var fromTimestamp, toTimestamp uint64
if fromL1Height == nil {
fromL1Height = bigint.Zero
}
// Lower Bound (the default `fromTimestamp = 0` suffices genesis representation)
if fromL1Height.BitLen() > 0 {
var header L1BlockHeader
result := db.gorm.Where("number = ?", fromL1Height).Take(&header)
if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, result.Error
}
fromTimestamp = header.Timestamp
}
latestL2Header, err := db.L2LatestBlockHeader()
if err != nil {
return nil, err
} else if latestL2Header == nil {
// Upper Bound (lowest timestamp indexed between L1/L2 bounded by `maxL1Range`)
{
l1QueryFilter := fmt.Sprintf("timestamp >= %d", fromTimestamp)
if maxL1Range > 0 {
maxHeight := new(big.Int).Add(fromL1Height, big.NewInt(int64(maxL1Range)))
l1QueryFilter = fmt.Sprintf("%s AND number <= %d", l1QueryFilter, maxHeight)
}
var l1Header L1BlockHeader
result := db.gorm.Where(l1QueryFilter).Order("timestamp DESC").Take(&l1Header)
if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, result.Error
}
toTimestamp = l1Header.Timestamp
minTime := latestL1Header.Timestamp
if latestL2Header.Timestamp < minTime {
minTime = latestL2Header.Timestamp
var l2Header L2BlockHeader
result = db.gorm.Where("timestamp <= ?", toTimestamp).Order("timestamp DESC").Take(&l2Header)
if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, result.Error
}
if l2Header.Timestamp < toTimestamp {
toTimestamp = l2Header.Timestamp
}
}
// This is a faster query than doing an INNER JOIN between l1_block_headers and l2_block_headers
// which requires a full table scan to compute the resulting table.
l1Query := db.gorm.Table("l1_block_headers").Where("timestamp <= ?", minTime)
l2Query := db.gorm.Table("l2_block_headers").Where("timestamp <= ?", minTime)
// Search for the latest indexed epoch within range. This is a faster query than doing an INNER JOIN between
// l1_block_headers and l2_block_headers which requires a full table scan to compute the resulting table.
l1Query := db.gorm.Table("l1_block_headers").Where("timestamp >= ? AND timestamp <= ?", fromTimestamp, toTimestamp)
l2Query := db.gorm.Table("l2_block_headers").Where("timestamp >= ? AND timestamp <= ?", fromTimestamp, toTimestamp)
query := db.gorm.Raw(`SELECT * FROM (?) AS l1_block_headers, (?) AS l2_block_headers
WHERE l1_block_headers.timestamp = l2_block_headers.timestamp
ORDER BY l2_block_headers.number DESC LIMIT 1`, l1Query, l2Query)
......
......@@ -12,9 +12,10 @@ import (
"github.com/ethereum-optimism/optimism/op-service/retry"
"github.com/pkg/errors"
"github.com/ethereum/go-ethereum/log"
"gorm.io/driver/postgres"
"gorm.io/gorm"
"gorm.io/gorm/logger"
)
var (
......@@ -35,7 +36,7 @@ type DB struct {
BridgeTransactions BridgeTransactionsDB
}
func NewDB(dbConfig config.DBConfig) (*DB, error) {
func NewDB(log log.Logger, dbConfig config.DBConfig) (*DB, error) {
retryStrategy := &retry.ExponentialStrategy{Min: 1000, Max: 20_000, MaxJitter: 250}
dsn := fmt.Sprintf("host=%s dbname=%s sslmode=disable", dbConfig.Host, dbConfig.Name)
......@@ -52,21 +53,22 @@ func NewDB(dbConfig config.DBConfig) (*DB, error) {
gormConfig := gorm.Config{
// The indexer will explicitly manage the transactions
SkipDefaultTransaction: true,
Logger: logger.Default.LogMode(logger.Silent),
Logger: newLogger(log),
}
gorm, err := retry.Do[*gorm.DB](context.Background(), 10, retryStrategy, func() (*gorm.DB, error) {
gorm, err := gorm.Open(postgres.Open(dsn), &gormConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to connect to database")
return nil, fmt.Errorf("failed to connect to database: %w", err)
}
return gorm, nil
})
if err != nil {
return nil, errors.Wrap(err, "failed to connect to database after multiple retries")
return nil, fmt.Errorf("failed to connect to database after multiple retries: %w", err)
}
db := &DB{
gorm: gorm,
Blocks: newBlocksDB(gorm),
......@@ -75,6 +77,7 @@ func NewDB(dbConfig config.DBConfig) (*DB, error) {
BridgeMessages: newBridgeMessagesDB(gorm),
BridgeTransactions: newBridgeTransactionsDB(gorm),
}
return db, nil
}
......
package database
import (
"context"
"fmt"
"strings"
"time"
"github.com/ethereum/go-ethereum/log"
"gorm.io/gorm/logger"
)
var (
_ logger.Interface = Logger{}
SlowThresholdMilliseconds = 200
)
type Logger struct {
log log.Logger
}
func newLogger(log log.Logger) Logger {
return Logger{log.New("module", "db")}
}
func (l Logger) LogMode(lvl logger.LogLevel) logger.Interface {
return l
}
func (l Logger) Info(ctx context.Context, msg string, data ...interface{}) {
l.log.Info(fmt.Sprintf(msg, data...))
}
func (l Logger) Warn(ctx context.Context, msg string, data ...interface{}) {
l.log.Warn(fmt.Sprintf(msg, data...))
}
func (l Logger) Error(ctx context.Context, msg string, data ...interface{}) {
l.log.Error(fmt.Sprintf(msg, data...))
}
func (l Logger) Trace(ctx context.Context, begin time.Time, fc func() (sql string, rowsAffected int64), err error) {
elapsedMs := time.Since(begin).Milliseconds()
// omit any values for batch inserts as they can be very long
sql, rows := fc()
if i := strings.Index(strings.ToLower(sql), "values"); i > 0 {
sql = fmt.Sprintf("%sVALUES (...)", sql[:i])
}
if elapsedMs < 200 {
l.log.Debug("database operation", "duration_ms", elapsedMs, "rows_affected", rows, "sql", sql)
} else {
l.log.Warn("database operation", "duration_ms", elapsedMs, "rows_affected", rows, "sql", sql)
}
}
package database
import (
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/stretchr/testify/mock"
......@@ -51,7 +53,7 @@ func (m *MockBlocksView) L2LatestBlockHeader() (*L2BlockHeader, error) {
return args.Get(0).(*L2BlockHeader), args.Error(1)
}
func (m *MockBlocksView) LatestEpoch() (*Epoch, error) {
func (m *MockBlocksView) LatestObservedEpoch(*big.Int, uint64) (*Epoch, error) {
args := m.Called()
return args.Get(0).(*Epoch), args.Error(1)
}
......
......@@ -13,7 +13,7 @@ import (
"github.com/ethereum-optimism/optimism/indexer/database"
op_e2e "github.com/ethereum-optimism/optimism/op-e2e"
"github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum-optimism/optimism/op-service/testlog"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
......@@ -41,21 +41,24 @@ func createE2ETestSuite(t *testing.T) E2ETestSuite {
dbUser := os.Getenv("DB_USER")
dbName := setupTestDatabase(t)
// Discard the Global Logger as each component
// has its own configured logger
// Rollup System Configuration. Unless specified,
// omit logs emitted by the various components. Maybe
// we can eventually dump these logs to a temp file
log.Root().SetHandler(log.DiscardHandler())
// Rollup System Configuration and Start
opCfg := op_e2e.DefaultSystemConfig(t)
opCfg.DeployConfig.FinalizationPeriodSeconds = 2
if len(os.Getenv("ENABLE_ROLLUP_LOGS")) == 0 {
t.Log("set env 'ENABLE_ROLLUP_LOGS' to show rollup logs")
for name, logger := range opCfg.Loggers {
t.Logf("discarding logs for %s", name)
logger.SetHandler(log.DiscardHandler())
}
}
// Rollup Start
opSys, err := opCfg.Start(t)
require.NoError(t, err)
t.Cleanup(func() { opSys.Close() })
// E2E tests can run on the order of magnitude of minutes. Once
// the system is running, mark this test for Parallel execution
t.Parallel()
// Indexer Configuration and Start
indexerCfg := config.Config{
DB: config.DBConfig{
......@@ -86,7 +89,14 @@ func createE2ETestSuite(t *testing.T) E2ETestSuite {
MetricsServer: config.ServerConfig{Host: "127.0.0.1", Port: 0},
}
db, err := database.NewDB(indexerCfg.DB)
// E2E tests can run on the order of magnitude of minutes. Once
// the system is running, mark this test for Parallel execution
t.Parallel()
// provide a DB for the unit test. disable logging
silentLog := testlog.Logger(t, log.LvlInfo)
silentLog.SetHandler(log.DiscardHandler())
db, err := database.NewDB(silentLog, indexerCfg.DB)
require.NoError(t, err)
t.Cleanup(func() { db.Close() })
......@@ -137,10 +147,13 @@ func setupTestDatabase(t *testing.T) string {
User: user,
Password: "",
}
// NewDB will create the database schema
db, err := database.NewDB(dbConfig)
silentLog := log.New()
silentLog.SetHandler(log.DiscardHandler())
db, err := database.NewDB(silentLog, dbConfig)
require.NoError(t, err)
defer db.Close()
err = db.ExecuteSQLMigration("../migrations")
require.NoError(t, err)
......
......@@ -71,11 +71,11 @@ func (etl *ETL) Start(ctx context.Context) error {
etl.log.Error("error querying for headers", "err", err)
} else if len(newHeaders) == 0 {
etl.log.Warn("no new headers. processor unexpectedly at head...")
}
} else {
headers = newHeaders
etl.metrics.RecordBatchHeaders(len(newHeaders))
}
}
// only clear the reference if we were able to process this batch
err := etl.processBatch(headers)
......@@ -107,7 +107,7 @@ func (etl *ETL) processBatch(headers []types.Header) error {
headersWithLog := make(map[common.Hash]bool, len(headers))
logs, err := etl.EthClient.FilterLogs(ethereum.FilterQuery{FromBlock: firstHeader.Number, ToBlock: lastHeader.Number, Addresses: etl.contracts})
if err != nil {
batchLog.Info("unable to extract logs", "err", err)
batchLog.Info("failed to extract logs", "err", err)
return err
}
if len(logs) > 0 {
......@@ -118,7 +118,8 @@ func (etl *ETL) processBatch(headers []types.Header) error {
log := logs[i]
if _, ok := headerMap[log.BlockHash]; !ok {
// NOTE. Definitely an error state if the none of the headers were re-orged out in between
// the blocks and logs retrieval operations. However, we need to gracefully handle reorgs
// the blocks and logs retrieval operations. Unlikely as long as the confirmation depth has
// been appropriately set or when we get to natively handling reorgs.
batchLog.Error("log found with block hash not in the batch", "block_hash", logs[i].BlockHash, "log_index", logs[i].Index)
return errors.New("parsed log with a block hash not in the batch")
}
......
......@@ -2,20 +2,21 @@ package etl
import (
"math/big"
"testing"
"github.com/ethereum-optimism/optimism/op-service/log"
"github.com/ethereum-optimism/optimism/op-service/metrics"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/indexer/bigint"
"github.com/ethereum-optimism/optimism/indexer/config"
"github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum-optimism/optimism/indexer/node"
"testing"
"github.com/ethereum-optimism/optimism/op-service/metrics"
"github.com/ethereum-optimism/optimism/op-service/testlog"
)
func TestL1ETLConstruction(t *testing.T) {
......@@ -104,7 +105,7 @@ func TestL1ETLConstruction(t *testing.T) {
t.Run(test.name, func(t *testing.T) {
ts := test.construction()
logger := log.NewLogger(log.DefaultCLIConfig())
logger := testlog.Logger(t, log.LvlInfo)
cfg := Config{StartHeight: ts.start}
etl, err := NewL1ETL(cfg, logger, ts.db.DB, etlMetrics, ts.client, ts.contracts)
......
......@@ -16,7 +16,6 @@ type Metricer interface {
RecordInterval() (done func(err error))
// Batch Extraction
RecordBatchFailure()
RecordBatchLatestHeight(height *big.Int)
RecordBatchHeaders(size int)
RecordBatchLog(contractAddress common.Address)
......@@ -108,17 +107,13 @@ func (m *etlMetrics) RecordInterval() func(error) {
timer := prometheus.NewTimer(m.intervalDuration)
return func(err error) {
if err != nil {
m.RecordBatchFailure()
m.batchFailures.Inc()
}
timer.ObserveDuration()
}
}
func (m *etlMetrics) RecordBatchFailure() {
m.batchFailures.Inc()
}
func (m *etlMetrics) RecordBatchLatestHeight(height *big.Int) {
m.batchLatestHeight.Set(float64(height.Uint64()))
}
......
......@@ -20,6 +20,7 @@ import (
"github.com/ethereum-optimism/optimism/indexer/etl"
"github.com/ethereum-optimism/optimism/indexer/node"
"github.com/ethereum-optimism/optimism/indexer/processors"
"github.com/ethereum-optimism/optimism/indexer/processors/bridge"
"github.com/ethereum-optimism/optimism/op-service/httputil"
"github.com/ethereum-optimism/optimism/op-service/metrics"
)
......@@ -82,7 +83,7 @@ func NewIndexer(
}
// Bridge
bridgeProcessor, err := processors.NewBridgeProcessor(log, db, l1Etl, chainConfig)
bridgeProcessor, err := processors.NewBridgeProcessor(log, db, bridge.NewMetrics(metricsRegistry), l1Etl, chainConfig)
if err != nil {
return nil, err
}
......
# Chain configures l1 chain addresses
# Can configure them manually or use a preset l2 ChainId for known chains including OP Mainnet, OP Goerli, Base, Base Goerli, Zora, and Zora goerli
# Can configure them manually or use a preset L2 ChainId for known chains
# - i.e OP Mainnet, OP Goerli, Base, Base Goerli, Zora, Zora Goerli, etc
[chain]
preset = $INDEXER_CHAIN_PRESET
# L1 Config
......@@ -15,17 +15,12 @@ l2-polling-interval = 0
l2-header-buffer-size = 0
l2-confirmation-depth = 0
[rpcs]
l1-rpc = "${INDEXER_RPC_URL_L1}"
l2-rpc = "${INDEXER_RPC_URL_L2}"
[db]
host = "$INDEXER_DB_HOST"
# this port may be problematic once we depoly
# the DATABASE_URL looks like this for previous services and didn't include a port
# DATABASE_URL="postgresql://${INDEXER_DB_USER}:${INDEXER_DB_PASS}@localhost/${INDEXER_DB_NAME}?host=${INDEXER_DB_HOST}"
# If not problematic delete these comments
port = $INDEXER_DB_PORT
user = "$INDEXER_DB_USER"
password = "$INDEXER_DB_PASS"
......
......@@ -7,6 +7,7 @@ import (
"github.com/ethereum-optimism/optimism/op-service/metrics"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/rpc"
"github.com/prometheus/client_golang/prometheus"
)
......
This diff is collapsed.
......@@ -8,6 +8,7 @@ import (
"github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum-optimism/optimism/indexer/processors/contracts"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
)
......@@ -17,7 +18,7 @@ import (
// 1. OptimismPortal
// 2. L1CrossDomainMessenger
// 3. L1StandardBridge
func L1ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, l1Contracts config.L1Contracts, fromHeight, toHeight *big.Int) error {
func L1ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, metrics L1Metricer, l1Contracts config.L1Contracts, fromHeight, toHeight *big.Int) error {
// (1) OptimismPortal
optimismPortalTxDeposits, err := contracts.OptimismPortalTransactionDepositEvents(l1Contracts.OptimismPortalProxy, db, fromHeight, toHeight)
if err != nil {
......@@ -44,6 +45,7 @@ func L1ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, l1Contracts
if err := db.BridgeTransactions.StoreL1TransactionDeposits(transactionDeposits); err != nil {
return err
}
metrics.RecordL1TransactionDeposits(len(transactionDeposits))
}
// (2) L1CrossDomainMessenger
......@@ -56,7 +58,7 @@ func L1ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, l1Contracts
}
sentMessages := make(map[logKey]*contracts.CrossDomainMessengerSentMessageEvent, len(crossDomainSentMessages))
l1BridgeMessages := make([]database.L1BridgeMessage, len(crossDomainSentMessages))
bridgeMessages := make([]database.L1BridgeMessage, len(crossDomainSentMessages))
for i := range crossDomainSentMessages {
sentMessage := crossDomainSentMessages[i]
sentMessages[logKey{sentMessage.Event.BlockHash, sentMessage.Event.LogIndex}] = &sentMessage
......@@ -68,12 +70,13 @@ func L1ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, l1Contracts
return fmt.Errorf("expected TransactionDeposit preceding SentMessage event. tx_hash = %s", sentMessage.Event.TransactionHash.String())
}
l1BridgeMessages[i] = database.L1BridgeMessage{TransactionSourceHash: portalDeposit.DepositTx.SourceHash, BridgeMessage: sentMessage.BridgeMessage}
bridgeMessages[i] = database.L1BridgeMessage{TransactionSourceHash: portalDeposit.DepositTx.SourceHash, BridgeMessage: sentMessage.BridgeMessage}
}
if len(l1BridgeMessages) > 0 {
if err := db.BridgeMessages.StoreL1BridgeMessages(l1BridgeMessages); err != nil {
if len(bridgeMessages) > 0 {
if err := db.BridgeMessages.StoreL1BridgeMessages(bridgeMessages); err != nil {
return err
}
metrics.RecordL1CrossDomainSentMessages(len(bridgeMessages))
}
// (3) L1StandardBridge
......@@ -85,7 +88,8 @@ func L1ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, l1Contracts
log.Info("detected bridge deposits", "size", len(initiatedBridges))
}
l1BridgeDeposits := make([]database.L1BridgeDeposit, len(initiatedBridges))
bridgedTokens := make(map[common.Address]int)
bridgeDeposits := make([]database.L1BridgeDeposit, len(initiatedBridges))
for i := range initiatedBridges {
initiatedBridge := initiatedBridges[i]
......@@ -102,15 +106,19 @@ func L1ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, l1Contracts
}
initiatedBridge.BridgeTransfer.CrossDomainMessageHash = &sentMessage.BridgeMessage.MessageHash
l1BridgeDeposits[i] = database.L1BridgeDeposit{
bridgedTokens[initiatedBridge.BridgeTransfer.TokenPair.LocalTokenAddress]++
bridgeDeposits[i] = database.L1BridgeDeposit{
TransactionSourceHash: portalDeposit.DepositTx.SourceHash,
BridgeTransfer: initiatedBridge.BridgeTransfer,
}
}
if len(l1BridgeDeposits) > 0 {
if err := db.BridgeTransfers.StoreL1BridgeDeposits(l1BridgeDeposits); err != nil {
if len(bridgeDeposits) > 0 {
if err := db.BridgeTransfers.StoreL1BridgeDeposits(bridgeDeposits); err != nil {
return err
}
for tokenAddr, size := range bridgedTokens {
metrics.RecordL1InitiatedBridgeTransfers(tokenAddr, size)
}
}
return nil
......@@ -121,7 +129,7 @@ func L1ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, l1Contracts
// 1. OptimismPortal (Bedrock prove & finalize steps)
// 2. L1CrossDomainMessenger (relayMessage marker)
// 3. L1StandardBridge (no-op, since this is simply a wrapper over the L1CrossDomainMessenger)
func L1ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, l1Contracts config.L1Contracts, fromHeight, toHeight *big.Int) error {
func L1ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, metrics L1Metricer, l1Contracts config.L1Contracts, fromHeight, toHeight *big.Int) error {
// (1) OptimismPortal (proven withdrawals)
provenWithdrawals, err := contracts.OptimismPortalWithdrawalProvenEvents(l1Contracts.OptimismPortalProxy, db, fromHeight, toHeight)
if err != nil {
......@@ -146,6 +154,9 @@ func L1ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, l1Contracts
return err
}
}
if len(provenWithdrawals) > 0 {
metrics.RecordL1ProvenWithdrawals(len(provenWithdrawals))
}
// (2) OptimismPortal (finalized withdrawals)
finalizedWithdrawals, err := contracts.OptimismPortalWithdrawalFinalizedEvents(l1Contracts.OptimismPortalProxy, db, fromHeight, toHeight)
......@@ -171,6 +182,9 @@ func L1ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, l1Contracts
return err
}
}
if len(finalizedWithdrawals) > 0 {
metrics.RecordL1FinalizedWithdrawals(len(finalizedWithdrawals))
}
// (3) L1CrossDomainMessenger
crossDomainRelayedMessages, err := contracts.CrossDomainMessengerRelayedMessageEvents("l1", l1Contracts.L1CrossDomainMessengerProxy, db, fromHeight, toHeight)
......@@ -198,6 +212,9 @@ func L1ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, l1Contracts
return err
}
}
if len(crossDomainRelayedMessages) > 0 {
metrics.RecordL1CrossDomainRelayedMessages(len(crossDomainRelayedMessages))
}
// (4) L1StandardBridge
finalizedBridges, err := contracts.StandardBridgeFinalizedEvents("l1", l1Contracts.L1StandardBridgeProxy, db, fromHeight, toHeight)
......@@ -208,6 +225,7 @@ func L1ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, l1Contracts
log.Info("detected finalized bridge withdrawals", "size", len(finalizedBridges))
}
finalizedTokens := make(map[common.Address]int)
for i := range finalizedBridges {
// Nothing actionable on the database. However, we can treat the relayed message
// as an invariant by ensuring we can query for a deposit by the same hash
......@@ -218,8 +236,7 @@ func L1ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, l1Contracts
return fmt.Errorf("expected RelayedMessage following BridgeFinalized event. tx_hash = %s", finalizedBridge.Event.TransactionHash.String())
}
// Since the message hash is computed from the relayed message, this ensures the deposit fields must match. For good measure,
// we may choose to make sure `withdrawal.BridgeTransfer` matches with the finalized bridge
// Since the message hash is computed from the relayed message, this ensures the deposit fields must match
withdrawal, err := db.BridgeTransfers.L2BridgeWithdrawalWithFilter(database.BridgeTransfer{CrossDomainMessageHash: &relayedMessage.MessageHash})
if err != nil {
return err
......@@ -227,6 +244,13 @@ func L1ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, l1Contracts
log.Error("missing L2StandardBridge withdrawal on L1 finalization", "tx_hash", finalizedBridge.Event.TransactionHash.String())
return fmt.Errorf("missing L2StandardBridge withdrawal on L1 finalization. tx_hash: %s", finalizedBridge.Event.TransactionHash.String())
}
finalizedTokens[finalizedBridge.BridgeTransfer.TokenPair.LocalTokenAddress]++
}
if len(finalizedBridges) > 0 {
for tokenAddr, size := range finalizedTokens {
metrics.RecordL1FinalizedBridgeTransfers(tokenAddr, size)
}
}
// a-ok!
......
......@@ -9,6 +9,7 @@ import (
"github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum-optimism/optimism/indexer/processors/contracts"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
)
......@@ -17,7 +18,7 @@ import (
// 1. OptimismPortal
// 2. L2CrossDomainMessenger
// 3. L2StandardBridge
func L2ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, l2Contracts config.L2Contracts, fromHeight, toHeight *big.Int) error {
func L2ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, metrics L2Metricer, l2Contracts config.L2Contracts, fromHeight, toHeight *big.Int) error {
// (1) L2ToL1MessagePasser
l2ToL1MPMessagesPassed, err := contracts.L2ToL1MessagePasserMessagePassedEvents(l2Contracts.L2ToL1MessagePasser, db, fromHeight, toHeight)
if err != nil {
......@@ -44,6 +45,7 @@ func L2ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, l2Contracts
if err := db.BridgeTransactions.StoreL2TransactionWithdrawals(transactionWithdrawals); err != nil {
return err
}
metrics.RecordL2TransactionWithdrawals(len(transactionWithdrawals))
}
// (2) L2CrossDomainMessenger
......@@ -56,7 +58,7 @@ func L2ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, l2Contracts
}
sentMessages := make(map[logKey]*contracts.CrossDomainMessengerSentMessageEvent, len(crossDomainSentMessages))
l2BridgeMessages := make([]database.L2BridgeMessage, len(crossDomainSentMessages))
bridgeMessages := make([]database.L2BridgeMessage, len(crossDomainSentMessages))
for i := range crossDomainSentMessages {
sentMessage := crossDomainSentMessages[i]
sentMessages[logKey{sentMessage.Event.BlockHash, sentMessage.Event.LogIndex}] = &sentMessage
......@@ -68,13 +70,13 @@ func L2ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, l2Contracts
return fmt.Errorf("expected MessagePassedEvent preceding SentMessage. tx_hash = %s", sentMessage.Event.TransactionHash.String())
}
l2BridgeMessages[i] = database.L2BridgeMessage{TransactionWithdrawalHash: messagePassed.WithdrawalHash, BridgeMessage: sentMessage.BridgeMessage}
bridgeMessages[i] = database.L2BridgeMessage{TransactionWithdrawalHash: messagePassed.WithdrawalHash, BridgeMessage: sentMessage.BridgeMessage}
}
if len(l2BridgeMessages) > 0 {
if err := db.BridgeMessages.StoreL2BridgeMessages(l2BridgeMessages); err != nil {
if len(bridgeMessages) > 0 {
if err := db.BridgeMessages.StoreL2BridgeMessages(bridgeMessages); err != nil {
return err
}
metrics.RecordL2CrossDomainSentMessages(len(bridgeMessages))
}
// (3) L2StandardBridge
......@@ -86,7 +88,8 @@ func L2ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, l2Contracts
log.Info("detected bridge withdrawals", "size", len(initiatedBridges))
}
l2BridgeWithdrawals := make([]database.L2BridgeWithdrawal, len(initiatedBridges))
bridgedTokens := make(map[common.Address]int)
bridgeWithdrawals := make([]database.L2BridgeWithdrawal, len(initiatedBridges))
for i := range initiatedBridges {
initiatedBridge := initiatedBridges[i]
......@@ -103,13 +106,19 @@ func L2ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, l2Contracts
}
initiatedBridge.BridgeTransfer.CrossDomainMessageHash = &sentMessage.BridgeMessage.MessageHash
l2BridgeWithdrawals[i] = database.L2BridgeWithdrawal{TransactionWithdrawalHash: messagePassed.WithdrawalHash, BridgeTransfer: initiatedBridge.BridgeTransfer}
bridgedTokens[initiatedBridge.BridgeTransfer.TokenPair.LocalTokenAddress]++
bridgeWithdrawals[i] = database.L2BridgeWithdrawal{
TransactionWithdrawalHash: messagePassed.WithdrawalHash,
BridgeTransfer: initiatedBridge.BridgeTransfer,
}
if len(l2BridgeWithdrawals) > 0 {
if err := db.BridgeTransfers.StoreL2BridgeWithdrawals(l2BridgeWithdrawals); err != nil {
}
if len(bridgeWithdrawals) > 0 {
if err := db.BridgeTransfers.StoreL2BridgeWithdrawals(bridgeWithdrawals); err != nil {
return err
}
for tokenAddr, size := range bridgedTokens {
metrics.RecordL2InitiatedBridgeTransfers(tokenAddr, size)
}
}
// a-ok!
......@@ -122,7 +131,7 @@ func L2ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, l2Contracts
// 2. L2StandardBridge (no-op, since this is simply a wrapper over the L2CrossDomainMEssenger)
//
// NOTE: Unlike L1, there's no L2ToL1MessagePasser stage since transaction deposits are apart of the block derivation process.
func L2ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, l2Contracts config.L2Contracts, fromHeight, toHeight *big.Int) error {
func L2ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, metrics L2Metricer, l2Contracts config.L2Contracts, fromHeight, toHeight *big.Int) error {
// (1) L2CrossDomainMessenger
crossDomainRelayedMessages, err := contracts.CrossDomainMessengerRelayedMessageEvents("l2", l2Contracts.L2CrossDomainMessenger, db, fromHeight, toHeight)
if err != nil {
......@@ -149,6 +158,9 @@ func L2ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, l2Contracts
return err
}
}
if len(relayedMessages) > 0 {
metrics.RecordL2CrossDomainRelayedMessages(len(relayedMessages))
}
// (2) L2StandardBridge
finalizedBridges, err := contracts.StandardBridgeFinalizedEvents("l2", l2Contracts.L2StandardBridge, db, fromHeight, toHeight)
......@@ -159,6 +171,7 @@ func L2ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, l2Contracts
log.Info("detected finalized bridge deposits", "size", len(finalizedBridges))
}
finalizedTokens := make(map[common.Address]int)
for i := range finalizedBridges {
// Nothing actionable on the database. However, we can treat the relayed message
// as an invariant by ensuring we can query for a deposit by the same hash
......@@ -169,8 +182,7 @@ func L2ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, l2Contracts
return fmt.Errorf("expected RelayedMessage following BridgeFinalized event. tx_hash = %s", finalizedBridge.Event.TransactionHash.String())
}
// Since the message hash is computed from the relayed message, this ensures the withdrawal fields must match. For good measure,
// we may choose to make sure `deposit.BridgeTransfer` matches with the finalized bridge
// Since the message hash is computed from the relayed message, this ensures the withdrawal fields must match
deposit, err := db.BridgeTransfers.L1BridgeDepositWithFilter(database.BridgeTransfer{CrossDomainMessageHash: &relayedMessage.MessageHash})
if err != nil {
return err
......@@ -178,6 +190,13 @@ func L2ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, l2Contracts
log.Error("missing L1StandardBridge deposit on L2 finalization", "tx_hash", finalizedBridge.Event.TransactionHash.String())
return errors.New("missing L1StandardBridge deposit on L2 finalization")
}
finalizedTokens[finalizedBridge.BridgeTransfer.TokenPair.LocalTokenAddress]++
}
if len(finalizedBridges) > 0 {
for tokenAddr, size := range finalizedTokens {
metrics.RecordL2FinalizedBridgeTransfers(tokenAddr, size)
}
}
// a-ok!
......
package bridge
import (
"math/big"
"github.com/ethereum-optimism/optimism/op-service/metrics"
"github.com/ethereum/go-ethereum/common"
"github.com/prometheus/client_golang/prometheus"
)
var (
MetricsNamespace string = "bridge"
)
type L1Metricer interface {
RecordLatestIndexedL1Height(height *big.Int)
RecordL1TransactionDeposits(size int)
RecordL1ProvenWithdrawals(size int)
RecordL1FinalizedWithdrawals(size int)
RecordL1CrossDomainSentMessages(size int)
RecordL1CrossDomainRelayedMessages(size int)
RecordL1InitiatedBridgeTransfers(token common.Address, size int)
RecordL1FinalizedBridgeTransfers(token common.Address, size int)
}
type L2Metricer interface {
RecordLatestIndexedL2Height(height *big.Int)
RecordL2TransactionWithdrawals(size int)
RecordL2CrossDomainSentMessages(size int)
RecordL2CrossDomainRelayedMessages(size int)
RecordL2InitiatedBridgeTransfers(token common.Address, size int)
RecordL2FinalizedBridgeTransfers(token common.Address, size int)
}
type Metricer interface {
L1Metricer
L2Metricer
RecordInterval() (done func(err error))
}
type bridgeMetrics struct {
intervalTick prometheus.Counter
intervalDuration prometheus.Histogram
intervalFailures prometheus.Counter
latestL1Height prometheus.Gauge
latestL2Height prometheus.Gauge
txDeposits prometheus.Counter
txWithdrawals prometheus.Counter
provenWithdrawals prometheus.Counter
finalizedWithdrawals prometheus.Counter
sentMessages *prometheus.CounterVec
relayedMessages *prometheus.CounterVec
initiatedBridgeTransfers *prometheus.CounterVec
finalizedBridgeTransfers *prometheus.CounterVec
}
func NewMetrics(registry *prometheus.Registry) Metricer {
factory := metrics.With(registry)
return &bridgeMetrics{
intervalTick: factory.NewCounter(prometheus.CounterOpts{
Namespace: MetricsNamespace,
Name: "intervals_total",
Help: "number of times processing loop has run",
}),
intervalDuration: factory.NewHistogram(prometheus.HistogramOpts{
Namespace: MetricsNamespace,
Name: "interval_seconds",
Help: "duration elapsed in the processing loop",
}),
intervalFailures: factory.NewCounter(prometheus.CounterOpts{
Namespace: MetricsNamespace,
Name: "failures_total",
Help: "number of failures encountered",
}),
latestL1Height: factory.NewGauge(prometheus.GaugeOpts{
Namespace: MetricsNamespace,
Subsystem: "l1",
Name: "height",
Help: "the latest processed l1 block height",
}),
latestL2Height: factory.NewGauge(prometheus.GaugeOpts{
Namespace: MetricsNamespace,
Subsystem: "l2",
Name: "height",
Help: "the latest processed l2 block height",
}),
txDeposits: factory.NewCounter(prometheus.CounterOpts{
Namespace: MetricsNamespace,
Name: "tx_deposits",
Help: "number of processed transactions deposited from l1",
}),
txWithdrawals: factory.NewCounter(prometheus.CounterOpts{
Namespace: MetricsNamespace,
Name: "tx_withdrawals",
Help: "number of processed transactions withdrawn from l2",
}),
provenWithdrawals: factory.NewCounter(prometheus.CounterOpts{
Namespace: MetricsNamespace,
Name: "proven_withdrawals",
Help: "number of proven tx withdrawals on l1",
}),
finalizedWithdrawals: factory.NewCounter(prometheus.CounterOpts{
Namespace: MetricsNamespace,
Name: "finalized_withdrawals",
Help: "number of finalized tx withdrawals on l1",
}),
sentMessages: factory.NewCounterVec(prometheus.CounterOpts{
Namespace: MetricsNamespace,
Name: "sent_messages",
Help: "number of bridged messages between l1 and l2",
}, []string{
"chain",
}),
relayedMessages: factory.NewCounterVec(prometheus.CounterOpts{
Namespace: MetricsNamespace,
Name: "relayed_messages",
Help: "number of relayed messages between l1 and l2",
}, []string{
"chain",
}),
initiatedBridgeTransfers: factory.NewCounterVec(prometheus.CounterOpts{
Namespace: MetricsNamespace,
Name: "initiated_token_transfers",
Help: "number of bridged tokens between l1 and l2",
}, []string{
"chain",
"token_address",
}),
finalizedBridgeTransfers: factory.NewCounterVec(prometheus.CounterOpts{
Namespace: MetricsNamespace,
Name: "finalized_token_transfers",
Help: "number of finalized token transfers between l1 and l2",
}, []string{
"chain",
"token_address",
}),
}
}
func (m *bridgeMetrics) RecordInterval() func(error) {
m.intervalTick.Inc()
timer := prometheus.NewTimer(m.intervalDuration)
return func(err error) {
timer.ObserveDuration()
if err != nil {
m.intervalFailures.Inc()
}
}
}
// L1Metricer
func (m *bridgeMetrics) RecordLatestIndexedL1Height(height *big.Int) {
m.latestL1Height.Set(float64(height.Uint64()))
}
func (m *bridgeMetrics) RecordL1TransactionDeposits(size int) {
m.txDeposits.Add(float64(size))
}
func (m *bridgeMetrics) RecordL1ProvenWithdrawals(size int) {
m.provenWithdrawals.Add(float64(size))
}
func (m *bridgeMetrics) RecordL1FinalizedWithdrawals(size int) {
m.finalizedWithdrawals.Add(float64(size))
}
func (m *bridgeMetrics) RecordL1CrossDomainSentMessages(size int) {
m.sentMessages.WithLabelValues("l1").Add(float64(size))
}
func (m *bridgeMetrics) RecordL1CrossDomainRelayedMessages(size int) {
m.relayedMessages.WithLabelValues("l1").Add(float64(size))
}
func (m *bridgeMetrics) RecordL1InitiatedBridgeTransfers(tokenAddr common.Address, size int) {
m.initiatedBridgeTransfers.WithLabelValues("l1", tokenAddr.String()).Add(float64(size))
}
func (m *bridgeMetrics) RecordL1FinalizedBridgeTransfers(tokenAddr common.Address, size int) {
m.finalizedBridgeTransfers.WithLabelValues("l1", tokenAddr.String()).Add(float64(size))
}
// L2Metricer
func (m *bridgeMetrics) RecordLatestIndexedL2Height(height *big.Int) {
m.latestL2Height.Set(float64(height.Uint64()))
}
func (m *bridgeMetrics) RecordL2TransactionWithdrawals(size int) {
m.txWithdrawals.Add(float64(size))
}
func (m *bridgeMetrics) RecordL2CrossDomainSentMessages(size int) {
m.sentMessages.WithLabelValues("l2").Add(float64(size))
}
func (m *bridgeMetrics) RecordL2CrossDomainRelayedMessages(size int) {
m.relayedMessages.WithLabelValues("l2").Add(float64(size))
}
func (m *bridgeMetrics) RecordL2InitiatedBridgeTransfers(tokenAddr common.Address, size int) {
m.initiatedBridgeTransfers.WithLabelValues("l2", tokenAddr.String()).Add(float64(size))
}
func (m *bridgeMetrics) RecordL2FinalizedBridgeTransfers(tokenAddr common.Address, size int) {
m.finalizedBridgeTransfers.WithLabelValues("l2", tokenAddr.String()).Add(float64(size))
}
......@@ -15,21 +15,23 @@ type LegacyBridgeEvent struct {
}
func L1StandardBridgeLegacyDepositInitiatedEvents(contractAddress common.Address, db *database.DB, fromHeight, toHeight *big.Int) ([]LegacyBridgeEvent, error) {
// The L1StandardBridge ABI contains the legacy events
l1StandardBridgeAbi, err := bindings.L1StandardBridgeMetaData.GetAbi()
if err != nil {
return nil, err
}
// The L1StandardBridge contains the legacy events
ethDepositEventAbi := l1StandardBridgeAbi.Events["ETHDepositInitiated"]
erc20DepositEventAbi := l1StandardBridgeAbi.Events["ERC20DepositInitiated"]
// Grab both ETH & ERC20 Events
ethDepositEvents, err := db.ContractEvents.L1ContractEventsWithFilter(database.ContractEvent{ContractAddress: contractAddress, EventSignature: ethDepositEventAbi.ID}, fromHeight, toHeight)
contractEventFilter := database.ContractEvent{ContractAddress: contractAddress, EventSignature: ethDepositEventAbi.ID}
ethDepositEvents, err := db.ContractEvents.L1ContractEventsWithFilter(contractEventFilter, fromHeight, toHeight)
if err != nil {
return nil, err
}
erc20DepositEvents, err := db.ContractEvents.L1ContractEventsWithFilter(database.ContractEvent{ContractAddress: contractAddress, EventSignature: erc20DepositEventAbi.ID}, fromHeight, toHeight)
contractEventFilter.EventSignature = erc20DepositEventAbi.ID
erc20DepositEvents, err := db.ContractEvents.L1ContractEventsWithFilter(contractEventFilter, fromHeight, toHeight)
if err != nil {
return nil, err
}
......@@ -81,13 +83,15 @@ func L1StandardBridgeLegacyDepositInitiatedEvents(contractAddress common.Address
}
func L2StandardBridgeLegacyWithdrawalInitiatedEvents(contractAddress common.Address, db *database.DB, fromHeight, toHeight *big.Int) ([]LegacyBridgeEvent, error) {
// The L2StandardBridge ABI contains the legacy events
l2StandardBridgeAbi, err := bindings.L2StandardBridgeMetaData.GetAbi()
if err != nil {
return nil, err
}
withdrawalInitiatedEventAbi := l2StandardBridgeAbi.Events["WithdrawalInitiated"]
withdrawalEvents, err := db.ContractEvents.L2ContractEventsWithFilter(database.ContractEvent{ContractAddress: contractAddress, EventSignature: withdrawalInitiatedEventAbi.ID}, fromHeight, toHeight)
contractEventFilter := database.ContractEvent{ContractAddress: contractAddress, EventSignature: withdrawalInitiatedEventAbi.ID}
withdrawalEvents, err := db.ContractEvents.L2ContractEventsWithFilter(contractEventFilter, fromHeight, toHeight)
if err != nil {
return nil, err
}
......
{
"npmClient": "pnpm",
"useWorkspaces": true,
"version": "independent",
"packages": [
"packages/*"
]
}
......@@ -27,11 +27,9 @@ func Main(version string, cliCtx *cli.Context) error {
return err
}
cfg := NewConfig(cliCtx)
if err := cfg.Check(); err != nil {
return fmt.Errorf("invalid CLI flags: %w", err)
}
l := oplog.NewLogger(cfg.LogConfig)
l := oplog.NewLogger(oplog.AppOut(cliCtx), cfg.LogConfig)
oplog.SetGlobalLogHandler(l.GetHandler())
opservice.ValidateEnvVars(flags.EnvVarPrefix, flags.Flags, l)
m := metrics.NewMetrics("default")
l.Info("Initializing Batch Submitter")
......
......@@ -11,8 +11,8 @@ import (
"github.com/ethereum-optimism/optimism/op-batcher/metrics"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
derivetest "github.com/ethereum-optimism/optimism/op-node/rollup/derive/test"
"github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/testlog"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
......
......@@ -6,8 +6,8 @@ import (
"github.com/ethereum-optimism/optimism/op-batcher/metrics"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/testlog"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require"
......
......@@ -99,9 +99,6 @@ func (c CLIConfig) Check() error {
if err := c.RPCConfig.Check(); err != nil {
return err
}
if err := c.LogConfig.Check(); err != nil {
return err
}
if err := c.MetricsConfig.Check(); err != nil {
return err
}
......
......@@ -39,5 +39,6 @@
"ProtocolVersions",
"Safe",
"SafeProxyFactory",
"DelayedVetoable"
"DelayedVetoable",
"ISemver"
]
This diff is collapsed.
This diff is collapsed.
......@@ -13,7 +13,7 @@ const DeployerWhitelistStorageLayoutJSON = "{\"storage\":[{\"astId\":1000,\"cont
var DeployerWhitelistStorageLayout = new(solc.StorageLayout)
var DeployerWhitelistDeployedBin = "0x608060405234801561001057600080fd5b506004361061007d5760003560e01c80638da5cb5b1161005b5780638da5cb5b146100c85780639b19251a1461010d578063b1540a0114610140578063bdc7b54f1461015357600080fd5b806308fd63221461008257806313af40351461009757806354fd4d50146100aa575b600080fd5b61009561009036600461088a565b61015b565b005b6100956100a53660046108c6565b6102bb565b6100b26104ec565b6040516100bf9190610918565b60405180910390f35b6000546100e89073ffffffffffffffffffffffffffffffffffffffff1681565b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020016100bf565b61013061011b3660046108c6565b60016020526000908152604090205460ff1681565b60405190151581526020016100bf565b61013061014e3660046108c6565b61058f565b6100956105e0565b60005473ffffffffffffffffffffffffffffffffffffffff16331461022d576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152604c60248201527f4465706c6f79657257686974656c6973743a2066756e6374696f6e2063616e2060448201527f6f6e6c792062652063616c6c656420627920746865206f776e6572206f66207460648201527f68697320636f6e74726163740000000000000000000000000000000000000000608482015260a4015b60405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff821660008181526001602090815260409182902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00168515159081179091558251938452908301527f8daaf060c3306c38e068a75c054bf96ecd85a3db1252712c4d93632744c42e0d910160405180910390a15050565b60005473ffffffffffffffffffffffffffffffffffffffff163314610388576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152604c60248201527f4465706c6f79657257686974656c6973743a2066756e6374696f6e2063616e2060448201527f6f6e6c792062652063616c6c656420627920746865206f776e6572206f66207460648201527f68697320636f6e74726163740000000000000000000000000000000000000000608482015260a401610224565b73ffffffffffffffffffffffffffffffffffffffff8116610451576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152604d60248201527f4465706c6f79657257686974656c6973743a2063616e206f6e6c79206265206460448201527f697361626c65642076696120656e61626c65417262697472617279436f6e747260648201527f6163744465706c6f796d656e7400000000000000000000000000000000000000608482015260a401610224565b6000546040805173ffffffffffffffffffffffffffffffffffffffff928316815291831660208301527fb532073b38c83145e3e5135377a08bf9aab55bc0fd7c1179cd4fb995d2a5159c910160405180910390a1600080547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff92909216919091179055565b60606105177f0000000000000000000000000000000000000000000000000000000000000000610724565b6105407f0000000000000000000000000000000000000000000000000000000000000000610724565b6105697f0000000000000000000000000000000000000000000000000000000000000000610724565b60405160200161057b93929190610969565b604051602081830303815290604052905090565b6000805473ffffffffffffffffffffffffffffffffffffffff1615806105da575073ffffffffffffffffffffffffffffffffffffffff821660009081526001602052604090205460ff165b92915050565b60005473ffffffffffffffffffffffffffffffffffffffff1633146106ad576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152604c60248201527f4465706c6f79657257686974656c6973743a2066756e6374696f6e2063616e2060448201527f6f6e6c792062652063616c6c656420627920746865206f776e6572206f66207460648201527f68697320636f6e74726163740000000000000000000000000000000000000000608482015260a401610224565b60005460405173ffffffffffffffffffffffffffffffffffffffff90911681527fc0e106cf568e50698fdbde1eff56f5a5c966cc7958e37e276918e9e4ccdf8cd49060200160405180910390a1600080547fffffffffffffffffffffffff0000000000000000000000000000000000000000169055565b60608160000361076757505060408051808201909152600181527f3000000000000000000000000000000000000000000000000000000000000000602082015290565b8160005b8115610791578061077b81610a0e565b915061078a9050600a83610a75565b915061076b565b60008167ffffffffffffffff8111156107ac576107ac610a89565b6040519080825280601f01601f1916602001820160405280156107d6576020820181803683370190505b5090505b8415610859576107eb600183610ab8565b91506107f8600a86610acf565b610803906030610ae3565b60f81b81838151811061081857610818610afb565b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a905350610852600a86610a75565b94506107da565b949350505050565b803573ffffffffffffffffffffffffffffffffffffffff8116811461088557600080fd5b919050565b6000806040838503121561089d57600080fd5b6108a683610861565b9150602083013580151581146108bb57600080fd5b809150509250929050565b6000602082840312156108d857600080fd5b6108e182610861565b9392505050565b60005b838110156109035781810151838201526020016108eb565b83811115610912576000848401525b50505050565b60208152600082518060208401526109378160408501602087016108e8565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169190910160400192915050565b6000845161097b8184602089016108e8565b80830190507f2e0000000000000000000000000000000000000000000000000000000000000080825285516109b7816001850160208a016108e8565b600192019182015283516109d28160028401602088016108e8565b0160020195945050505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8203610a3f57610a3f6109df565b5060010190565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b600082610a8457610a84610a46565b500490565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b600082821015610aca57610aca6109df565b500390565b600082610ade57610ade610a46565b500690565b60008219821115610af657610af66109df565b500190565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fdfea164736f6c634300080f000a"
var DeployerWhitelistDeployedBin = "0x608060405234801561001057600080fd5b506004361061007d5760003560e01c80638da5cb5b1161005b5780638da5cb5b146100fc5780639b19251a14610141578063b1540a0114610174578063bdc7b54f1461018757600080fd5b806308fd63221461008257806313af40351461009757806354fd4d50146100aa575b600080fd5b6100956100903660046106de565b61018f565b005b6100956100a536600461071a565b6102ef565b6100e66040518060400160405280600581526020017f312e312e3000000000000000000000000000000000000000000000000000000081525081565b6040516100f3919061073c565b60405180910390f35b60005461011c9073ffffffffffffffffffffffffffffffffffffffff1681565b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020016100f3565b61016461014f36600461071a565b60016020526000908152604090205460ff1681565b60405190151581526020016100f3565b61016461018236600461071a565b610520565b610095610571565b60005473ffffffffffffffffffffffffffffffffffffffff163314610261576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152604c60248201527f4465706c6f79657257686974656c6973743a2066756e6374696f6e2063616e2060448201527f6f6e6c792062652063616c6c656420627920746865206f776e6572206f66207460648201527f68697320636f6e74726163740000000000000000000000000000000000000000608482015260a4015b60405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff821660008181526001602090815260409182902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00168515159081179091558251938452908301527f8daaf060c3306c38e068a75c054bf96ecd85a3db1252712c4d93632744c42e0d910160405180910390a15050565b60005473ffffffffffffffffffffffffffffffffffffffff1633146103bc576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152604c60248201527f4465706c6f79657257686974656c6973743a2066756e6374696f6e2063616e2060448201527f6f6e6c792062652063616c6c656420627920746865206f776e6572206f66207460648201527f68697320636f6e74726163740000000000000000000000000000000000000000608482015260a401610258565b73ffffffffffffffffffffffffffffffffffffffff8116610485576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152604d60248201527f4465706c6f79657257686974656c6973743a2063616e206f6e6c79206265206460448201527f697361626c65642076696120656e61626c65417262697472617279436f6e747260648201527f6163744465706c6f796d656e7400000000000000000000000000000000000000608482015260a401610258565b6000546040805173ffffffffffffffffffffffffffffffffffffffff928316815291831660208301527fb532073b38c83145e3e5135377a08bf9aab55bc0fd7c1179cd4fb995d2a5159c910160405180910390a1600080547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff92909216919091179055565b6000805473ffffffffffffffffffffffffffffffffffffffff16158061056b575073ffffffffffffffffffffffffffffffffffffffff821660009081526001602052604090205460ff165b92915050565b60005473ffffffffffffffffffffffffffffffffffffffff16331461063e576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152604c60248201527f4465706c6f79657257686974656c6973743a2066756e6374696f6e2063616e2060448201527f6f6e6c792062652063616c6c656420627920746865206f776e6572206f66207460648201527f68697320636f6e74726163740000000000000000000000000000000000000000608482015260a401610258565b60005460405173ffffffffffffffffffffffffffffffffffffffff90911681527fc0e106cf568e50698fdbde1eff56f5a5c966cc7958e37e276918e9e4ccdf8cd49060200160405180910390a1600080547fffffffffffffffffffffffff0000000000000000000000000000000000000000169055565b803573ffffffffffffffffffffffffffffffffffffffff811681146106d957600080fd5b919050565b600080604083850312156106f157600080fd5b6106fa836106b5565b91506020830135801515811461070f57600080fd5b809150509250929050565b60006020828403121561072c57600080fd5b610735826106b5565b9392505050565b600060208083528351808285015260005b818110156107695785810183015185820160400152820161074d565b8181111561077b576000604083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01692909201604001939250505056fea164736f6c634300080f000a"
func init() {
if err := json.Unmarshal([]byte(DeployerWhitelistStorageLayoutJSON), DeployerWhitelistStorageLayout); err != nil {
......
This diff is collapsed.
This diff is collapsed.
// Code generated - DO NOT EDIT.
// This file is a generated binding and any manual changes will be lost.
package bindings
import (
"errors"
"math/big"
"strings"
ethereum "github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/event"
)
// Reference imports to suppress errors if they are not otherwise used.
var (
_ = errors.New
_ = big.NewInt
_ = strings.NewReader
_ = ethereum.NotFound
_ = bind.Bind
_ = common.Big1
_ = types.BloomLookup
_ = event.NewSubscription
)
// ISemverMetaData contains all meta data concerning the ISemver contract.
var ISemverMetaData = &bind.MetaData{
ABI: "[{\"inputs\":[],\"name\":\"version\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]",
}
// ISemverABI is the input ABI used to generate the binding from.
// Deprecated: Use ISemverMetaData.ABI instead.
var ISemverABI = ISemverMetaData.ABI
// ISemver is an auto generated Go binding around an Ethereum contract.
type ISemver struct {
ISemverCaller // Read-only binding to the contract
ISemverTransactor // Write-only binding to the contract
ISemverFilterer // Log filterer for contract events
}
// ISemverCaller is an auto generated read-only Go binding around an Ethereum contract.
type ISemverCaller struct {
contract *bind.BoundContract // Generic contract wrapper for the low level calls
}
// ISemverTransactor is an auto generated write-only Go binding around an Ethereum contract.
type ISemverTransactor struct {
contract *bind.BoundContract // Generic contract wrapper for the low level calls
}
// ISemverFilterer is an auto generated log filtering Go binding around an Ethereum contract events.
type ISemverFilterer struct {
contract *bind.BoundContract // Generic contract wrapper for the low level calls
}
// ISemverSession is an auto generated Go binding around an Ethereum contract,
// with pre-set call and transact options.
type ISemverSession struct {
Contract *ISemver // Generic contract binding to set the session for
CallOpts bind.CallOpts // Call options to use throughout this session
TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session
}
// ISemverCallerSession is an auto generated read-only Go binding around an Ethereum contract,
// with pre-set call options.
type ISemverCallerSession struct {
Contract *ISemverCaller // Generic contract caller binding to set the session for
CallOpts bind.CallOpts // Call options to use throughout this session
}
// ISemverTransactorSession is an auto generated write-only Go binding around an Ethereum contract,
// with pre-set transact options.
type ISemverTransactorSession struct {
Contract *ISemverTransactor // Generic contract transactor binding to set the session for
TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session
}
// ISemverRaw is an auto generated low-level Go binding around an Ethereum contract.
type ISemverRaw struct {
Contract *ISemver // Generic contract binding to access the raw methods on
}
// ISemverCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract.
type ISemverCallerRaw struct {
Contract *ISemverCaller // Generic read-only contract binding to access the raw methods on
}
// ISemverTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract.
type ISemverTransactorRaw struct {
Contract *ISemverTransactor // Generic write-only contract binding to access the raw methods on
}
// NewISemver creates a new instance of ISemver, bound to a specific deployed contract.
func NewISemver(address common.Address, backend bind.ContractBackend) (*ISemver, error) {
contract, err := bindISemver(address, backend, backend, backend)
if err != nil {
return nil, err
}
return &ISemver{ISemverCaller: ISemverCaller{contract: contract}, ISemverTransactor: ISemverTransactor{contract: contract}, ISemverFilterer: ISemverFilterer{contract: contract}}, nil
}
// NewISemverCaller creates a new read-only instance of ISemver, bound to a specific deployed contract.
func NewISemverCaller(address common.Address, caller bind.ContractCaller) (*ISemverCaller, error) {
contract, err := bindISemver(address, caller, nil, nil)
if err != nil {
return nil, err
}
return &ISemverCaller{contract: contract}, nil
}
// NewISemverTransactor creates a new write-only instance of ISemver, bound to a specific deployed contract.
func NewISemverTransactor(address common.Address, transactor bind.ContractTransactor) (*ISemverTransactor, error) {
contract, err := bindISemver(address, nil, transactor, nil)
if err != nil {
return nil, err
}
return &ISemverTransactor{contract: contract}, nil
}
// NewISemverFilterer creates a new log filterer instance of ISemver, bound to a specific deployed contract.
func NewISemverFilterer(address common.Address, filterer bind.ContractFilterer) (*ISemverFilterer, error) {
contract, err := bindISemver(address, nil, nil, filterer)
if err != nil {
return nil, err
}
return &ISemverFilterer{contract: contract}, nil
}
// bindISemver binds a generic wrapper to an already deployed contract.
func bindISemver(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {
parsed, err := abi.JSON(strings.NewReader(ISemverABI))
if err != nil {
return nil, err
}
return bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil
}
// Call invokes the (constant) contract method with params as input values and
// sets the output to result. The result type might be a single field for simple
// returns, a slice of interfaces for anonymous returns and a struct for named
// returns.
func (_ISemver *ISemverRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error {
return _ISemver.Contract.ISemverCaller.contract.Call(opts, result, method, params...)
}
// Transfer initiates a plain transaction to move funds to the contract, calling
// its default method if one is available.
func (_ISemver *ISemverRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {
return _ISemver.Contract.ISemverTransactor.contract.Transfer(opts)
}
// Transact invokes the (paid) contract method with params as input values.
func (_ISemver *ISemverRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) {
return _ISemver.Contract.ISemverTransactor.contract.Transact(opts, method, params...)
}
// Call invokes the (constant) contract method with params as input values and
// sets the output to result. The result type might be a single field for simple
// returns, a slice of interfaces for anonymous returns and a struct for named
// returns.
func (_ISemver *ISemverCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error {
return _ISemver.Contract.contract.Call(opts, result, method, params...)
}
// Transfer initiates a plain transaction to move funds to the contract, calling
// its default method if one is available.
func (_ISemver *ISemverTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {
return _ISemver.Contract.contract.Transfer(opts)
}
// Transact invokes the (paid) contract method with params as input values.
func (_ISemver *ISemverTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) {
return _ISemver.Contract.contract.Transact(opts, method, params...)
}
// Version is a free data retrieval call binding the contract method 0x54fd4d50.
//
// Solidity: function version() view returns(string)
func (_ISemver *ISemverCaller) Version(opts *bind.CallOpts) (string, error) {
var out []interface{}
err := _ISemver.contract.Call(opts, &out, "version")
if err != nil {
return *new(string), err
}
out0 := *abi.ConvertType(out[0], new(string)).(*string)
return out0, err
}
// Version is a free data retrieval call binding the contract method 0x54fd4d50.
//
// Solidity: function version() view returns(string)
func (_ISemver *ISemverSession) Version() (string, error) {
return _ISemver.Contract.Version(&_ISemver.CallOpts)
}
// Version is a free data retrieval call binding the contract method 0x54fd4d50.
//
// Solidity: function version() view returns(string)
func (_ISemver *ISemverCallerSession) Version() (string, error) {
return _ISemver.Contract.Version(&_ISemver.CallOpts)
}
// Code generated - DO NOT EDIT.
// This file is a generated binding and any manual changes will be lost.
package bindings
import (
"encoding/json"
"github.com/ethereum-optimism/optimism/op-bindings/solc"
)
const ISemverStorageLayoutJSON = "{\"storage\":null,\"types\":{}}"
var ISemverStorageLayout = new(solc.StorageLayout)
var ISemverDeployedBin = "0x"
func init() {
if err := json.Unmarshal([]byte(ISemverStorageLayoutJSON), ISemverStorageLayout); err != nil {
panic(err)
}
layouts["ISemver"] = ISemverStorageLayout
deployedBytecodes["ISemver"] = ISemverDeployedBin
}
......@@ -30,8 +30,8 @@ var (
// L1BlockNumberMetaData contains all meta data concerning the L1BlockNumber contract.
var L1BlockNumberMetaData = &bind.MetaData{
ABI: "[{\"inputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"stateMutability\":\"payable\",\"type\":\"fallback\"},{\"inputs\":[],\"name\":\"getL1BlockNumber\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"version\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"stateMutability\":\"payable\",\"type\":\"receive\"}]",
Bin: "0x60e060405234801561001057600080fd5b506001608052600060a052600260c05260805160a05160c05161059461004f600039600061018d015260006101640152600061013b01526105946000f3fe60806040526004361061002d5760003560e01c806354fd4d5014610052578063b9b3efe91461007d57610048565b3661004857600061003c6100a0565b90508060005260206000f35b600061003c6100a0565b34801561005e57600080fd5b50610067610134565b6040516100749190610344565b60405180910390f35b34801561008957600080fd5b506100926100a0565b604051908152602001610074565b600073420000000000000000000000000000000000001573ffffffffffffffffffffffffffffffffffffffff16638381f58a6040518163ffffffff1660e01b8152600401602060405180830381865afa158015610101573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906101259190610395565b67ffffffffffffffff16905090565b606061015f7f00000000000000000000000000000000000000000000000000000000000000006101d7565b6101887f00000000000000000000000000000000000000000000000000000000000000006101d7565b6101b17f00000000000000000000000000000000000000000000000000000000000000006101d7565b6040516020016101c3939291906103c6565b604051602081830303815290604052905090565b60608160000361021a57505060408051808201909152600181527f3000000000000000000000000000000000000000000000000000000000000000602082015290565b8160005b8115610244578061022e8161046b565b915061023d9050600a836104d2565b915061021e565b60008167ffffffffffffffff81111561025f5761025f6104e6565b6040519080825280601f01601f191660200182016040528015610289576020820181803683370190505b5090505b841561030c5761029e600183610515565b91506102ab600a8661052c565b6102b6906030610540565b60f81b8183815181106102cb576102cb610558565b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a905350610305600a866104d2565b945061028d565b949350505050565b60005b8381101561032f578181015183820152602001610317565b8381111561033e576000848401525b50505050565b6020815260008251806020840152610363816040850160208701610314565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169190910160400192915050565b6000602082840312156103a757600080fd5b815167ffffffffffffffff811681146103bf57600080fd5b9392505050565b600084516103d8818460208901610314565b80830190507f2e000000000000000000000000000000000000000000000000000000000000008082528551610414816001850160208a01610314565b6001920191820152835161042f816002840160208801610314565b0160020195945050505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff820361049c5761049c61043c565b5060010190565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b6000826104e1576104e16104a3565b500490565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6000828210156105275761052761043c565b500390565b60008261053b5761053b6104a3565b500690565b600082198211156105535761055361043c565b500190565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fdfea164736f6c634300080f000a",
ABI: "[{\"stateMutability\":\"payable\",\"type\":\"fallback\"},{\"inputs\":[],\"name\":\"getL1BlockNumber\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"version\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"stateMutability\":\"payable\",\"type\":\"receive\"}]",
Bin: "0x608060405234801561001057600080fd5b50610219806100206000396000f3fe60806040526004361061002d5760003560e01c806354fd4d5014610052578063b9b3efe9146100b157610048565b3661004857600061003c6100d4565b90508060005260206000f35b600061003c6100d4565b34801561005e57600080fd5b5061009b6040518060400160405280600581526020017f312e312e3000000000000000000000000000000000000000000000000000000081525081565b6040516100a89190610168565b60405180910390f35b3480156100bd57600080fd5b506100c66100d4565b6040519081526020016100a8565b600073420000000000000000000000000000000000001573ffffffffffffffffffffffffffffffffffffffff16638381f58a6040518163ffffffff1660e01b8152600401602060405180830381865afa158015610135573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061015991906101db565b67ffffffffffffffff16905090565b600060208083528351808285015260005b8181101561019557858101830151858201604001528201610179565b818111156101a7576000604083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016929092016040019392505050565b6000602082840312156101ed57600080fd5b815167ffffffffffffffff8116811461020557600080fd5b939250505056fea164736f6c634300080f000a",
}
// L1BlockNumberABI is the input ABI used to generate the binding from.
......
......@@ -13,7 +13,7 @@ const L1BlockNumberStorageLayoutJSON = "{\"storage\":null,\"types\":{}}"
var L1BlockNumberStorageLayout = new(solc.StorageLayout)
var L1BlockNumberDeployedBin = "0x60806040526004361061002d5760003560e01c806354fd4d5014610052578063b9b3efe91461007d57610048565b3661004857600061003c6100a0565b90508060005260206000f35b600061003c6100a0565b34801561005e57600080fd5b50610067610134565b6040516100749190610344565b60405180910390f35b34801561008957600080fd5b506100926100a0565b604051908152602001610074565b600073420000000000000000000000000000000000001573ffffffffffffffffffffffffffffffffffffffff16638381f58a6040518163ffffffff1660e01b8152600401602060405180830381865afa158015610101573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906101259190610395565b67ffffffffffffffff16905090565b606061015f7f00000000000000000000000000000000000000000000000000000000000000006101d7565b6101887f00000000000000000000000000000000000000000000000000000000000000006101d7565b6101b17f00000000000000000000000000000000000000000000000000000000000000006101d7565b6040516020016101c3939291906103c6565b604051602081830303815290604052905090565b60608160000361021a57505060408051808201909152600181527f3000000000000000000000000000000000000000000000000000000000000000602082015290565b8160005b8115610244578061022e8161046b565b915061023d9050600a836104d2565b915061021e565b60008167ffffffffffffffff81111561025f5761025f6104e6565b6040519080825280601f01601f191660200182016040528015610289576020820181803683370190505b5090505b841561030c5761029e600183610515565b91506102ab600a8661052c565b6102b6906030610540565b60f81b8183815181106102cb576102cb610558565b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a905350610305600a866104d2565b945061028d565b949350505050565b60005b8381101561032f578181015183820152602001610317565b8381111561033e576000848401525b50505050565b6020815260008251806020840152610363816040850160208701610314565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169190910160400192915050565b6000602082840312156103a757600080fd5b815167ffffffffffffffff811681146103bf57600080fd5b9392505050565b600084516103d8818460208901610314565b80830190507f2e000000000000000000000000000000000000000000000000000000000000008082528551610414816001850160208a01610314565b6001920191820152835161042f816002840160208801610314565b0160020195945050505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff820361049c5761049c61043c565b5060010190565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b6000826104e1576104e16104a3565b500490565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6000828210156105275761052761043c565b500390565b60008261053b5761053b6104a3565b500690565b600082198211156105535761055361043c565b500190565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fdfea164736f6c634300080f000a"
var L1BlockNumberDeployedBin = "0x60806040526004361061002d5760003560e01c806354fd4d5014610052578063b9b3efe9146100b157610048565b3661004857600061003c6100d4565b90508060005260206000f35b600061003c6100d4565b34801561005e57600080fd5b5061009b6040518060400160405280600581526020017f312e312e3000000000000000000000000000000000000000000000000000000081525081565b6040516100a89190610168565b60405180910390f35b3480156100bd57600080fd5b506100c66100d4565b6040519081526020016100a8565b600073420000000000000000000000000000000000001573ffffffffffffffffffffffffffffffffffffffff16638381f58a6040518163ffffffff1660e01b8152600401602060405180830381865afa158015610135573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061015991906101db565b67ffffffffffffffff16905090565b600060208083528351808285015260005b8181101561019557858101830151858201604001528201610179565b818111156101a7576000604083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016929092016040019392505050565b6000602082840312156101ed57600080fd5b815167ffffffffffffffff8116811461020557600080fd5b939250505056fea164736f6c634300080f000a"
func init() {
if err := json.Unmarshal([]byte(L1BlockNumberStorageLayoutJSON), L1BlockNumberStorageLayout); err != nil {
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -43,7 +43,8 @@ func (l *l2Chain) PayloadByNumber(_ context.Context, _ uint64) (*eth.ExecutionPa
func Main(cliCtx *cli.Context) error {
log.Info("Initializing bootnode")
logCfg := oplog.ReadCLIConfig(cliCtx)
logger := oplog.NewLogger(logCfg)
logger := oplog.NewLogger(oplog.AppOut(cliCtx), logCfg)
oplog.SetGlobalLogHandler(logger.GetHandler())
m := metrics.NewMetrics("default")
ctx := context.Background()
......
......@@ -20,11 +20,16 @@ type Clients struct {
L2GethClient *gethclient.Client
}
// NewClients will create new RPC clients from a CLI context
func NewClients(ctx *cli.Context) (*Clients, error) {
// NewClientsFromContext will create new RPC clients from a CLI context
func NewClientsFromContext(ctx *cli.Context) (*Clients, error) {
return NewClients(ctx.String("l1-rpc-url"), ctx.String("l2-rpc-url"))
}
// NewClients will create new RPC clients from given URLs.
func NewClients(l1RpcURL, l2RpcURL string) (*Clients, error) {
clients := Clients{}
if l1RpcURL := ctx.String("l1-rpc-url"); l1RpcURL != "" {
if l1RpcURL != "" {
l1Client, l1RpcClient, l1GethClient, err := newClients(l1RpcURL)
if err != nil {
return nil, err
......@@ -34,7 +39,7 @@ func NewClients(ctx *cli.Context) (*Clients, error) {
clients.L1GethClient = l1GethClient
}
if l2RpcURL := ctx.String("l2-rpc-url"); l2RpcURL != "" {
if l2RpcURL != "" {
l2Client, l2RpcClient, l2GethClient, err := newClients(l2RpcURL)
if err != nil {
return nil, err
......
......@@ -58,7 +58,7 @@ func main() {
// entrypoint is the entrypoint for the check-l2 script
func entrypoint(ctx *cli.Context) error {
clients, err := clients.NewClients(ctx)
clients, err := clients.NewClientsFromContext(ctx)
if err != nil {
return err
}
......
# op-upgrade
A CLI tool for building Safe bundles that can upgrade many chains
at the same time. It will output a JSON file that is compatible with
the Safe UI. It is assumed that the implementations that are being
upgraded to have already been deployed and their contract addresses
exist inside of the `superchain-registry` repository. It is also
assumed that the semantic version file in the `superchain-registry`
has been updated. The tool will use semantic versioning to determine
which contract versions should be upgraded to and then build all of
the calldata.
### Configuration
#### L1 RPC URL
The L1 RPC URL is used to determine which superchain to target. All
L2s that are not based on top of the L1 chain that corresponds to the
L1 RPC URL are filtered out from being included. It also is used to
double check that the data in the `superchain-registry` is correct.
#### Chain IDs
A list of L2 chain IDs can be passed that will be used to filter which
L2 chains will have upgrades included in the bundle transaction. Omitting
this argument will result in all chains in the superchain being considered.
#### Deploy Config
The path to the `deploy-config` directory in the contracts package.
Since multiple L2 networks may be included in the bundle, the `deploy-config`
directory must be passed and then the particular deploy config files will
be read out of the directory as needed.
#### Outfile
The file that the bundle shoudl be written to. If omitted, the file
will be written to stdout.
This diff is collapsed.
This diff is collapsed.
......@@ -21,7 +21,7 @@ var (
uint128Max = new(big.Int)
// The default values for the ResourceConfig, used as part of
// an EIP-1559 curve for deposit gas.
defaultResourceConfig = bindings.ResourceMeteringResourceConfig{
DefaultResourceConfig = bindings.ResourceMeteringResourceConfig{
MaxResourceLimit: 20_000_000,
ElasticityMultiplier: 10,
BaseFeeMaxChangeDenominator: 8,
......@@ -37,7 +37,7 @@ func init() {
panic("bad uint128Max")
}
// Set the maximum base fee on the default config.
defaultResourceConfig.MaximumBaseFee = uint128Max
DefaultResourceConfig.MaximumBaseFee = uint128Max
}
// BuildL1DeveloperGenesis will create a L1 genesis block after creating
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -5,7 +5,7 @@ import (
"testing"
"github.com/ethereum-optimism/optimism/op-challenger/config"
"github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum-optimism/optimism/op-service/testlog"
"github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require"
)
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -9,7 +9,7 @@ import (
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/types"
gameTypes "github.com/ethereum-optimism/optimism/op-challenger/game/types"
"github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum-optimism/optimism/op-service/testlog"
"github.com/ethereum-optimism/optimism/op-service/txmgr"
"github.com/ethereum/go-ethereum"
......
This diff is collapsed.
This diff is collapsed.
......@@ -5,7 +5,7 @@ import (
"testing"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/types"
"github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum-optimism/optimism/op-service/testlog"
"github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require"
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment