Commit 15e868a6 authored by Sebastian Stammler's avatar Sebastian Stammler Committed by GitHub

Update op-geth dependency to upstream geth v1.13.8 and migrate to slog (#8917)

* Update op-geth dependency (v1.13.8) & migrate to slog

* op-e2e: format system_test.go

* op-chain-ops/genesis: Ignore nil addresses in BuildL1DeveloperGenesis

* go: Update to latest op-geth

commit fb90ca39bc5c4f45e99ef320abfab85eeb56c561

* update latest op-geth dependency

* op-program,op-wheel: Use new StateDB.OpenStorageTrie

* all: fix more slog stuff after merging

* proxyd: update geth 1.13.8 & migrate to slog

* op-ufm: update monorepo dependency to prev commit

* testlog: Return pointer with FindLog

* genesis: Parse addresses from dump string keys in BuildL1DeveloperGenesis

* op-ufm: go mod tidy

* update to latest op-geth

* genesis: Update ForgeDump.UnmarshalJSON to latest geth types

* eth: Use hexutils.U256 instead of uint256.Int as type in ExecutionPayload

This fixes JSON mashaling.

* op-e2e: fix usage of legacy geth levels

* go: update latest op-geth dependency

* check-ecotone: adapt to field type change

* Resolve remaining TODOs

* op-program: remove json-pretty formatting option from test

* go: update to latest op-geth v1.101308.0-rc.1

* op-dispute-mon: Fix logger setup

* log: improve LevelFromString docs

* op-e2e/config: treat EthNodeVerbosity as legacy log level

* all: fix order of imports
parent c5df4bb1
...@@ -3,14 +3,11 @@ package cmd ...@@ -3,14 +3,11 @@ package cmd
import ( import (
"io" "io"
"golang.org/x/exp/slog"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
) )
func Logger(w io.Writer, lvl log.Lvl) log.Logger { func Logger(w io.Writer, lvl slog.Level) log.Logger {
h := log.StreamHandler(w, log.LogfmtFormat()) return log.NewLogger(log.LogfmtHandlerWithLevel(w, lvl))
h = log.SyncHandler(h)
h = log.LvlFilterHandler(lvl, h)
l := log.New()
l.SetHandler(h)
return l
} }
...@@ -239,7 +239,7 @@ func Run(ctx *cli.Context) error { ...@@ -239,7 +239,7 @@ func Run(ctx *cli.Context) error {
return err return err
} }
l := Logger(os.Stderr, log.LvlInfo) l := Logger(os.Stderr, log.LevelInfo)
outLog := &mipsevm.LoggingWriter{Name: "program std-out", Log: l} outLog := &mipsevm.LoggingWriter{Name: "program std-out", Log: l}
errLog := &mipsevm.LoggingWriter{Name: "program std-err", Log: l} errLog := &mipsevm.LoggingWriter{Name: "program std-err", Log: l}
......
...@@ -36,7 +36,7 @@ func Main(version string) func(cliCtx *cli.Context) error { ...@@ -36,7 +36,7 @@ func Main(version string) func(cliCtx *cli.Context) error {
} }
l := oplog.NewLogger(oplog.AppOut(cliCtx), cfg.LogConfig) l := oplog.NewLogger(oplog.AppOut(cliCtx), cfg.LogConfig)
oplog.SetGlobalLogHandler(l.GetHandler()) oplog.SetGlobalLogHandler(l.Handler())
endpointMonitor := NewEndpointMonitor(cfg, l) endpointMonitor := NewEndpointMonitor(cfg, l)
l.Info(fmt.Sprintf("starting endpoint monitor with checkInterval=%s checkDuration=%s", cfg.CheckInterval, cfg.CheckDuration)) l.Info(fmt.Sprintf("starting endpoint monitor with checkInterval=%s checkDuration=%s", cfg.CheckInterval, cfg.CheckDuration))
......
...@@ -11,7 +11,7 @@ require ( ...@@ -11,7 +11,7 @@ require (
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0
github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3 github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3
github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20240131175747-1300b1825140 github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20240131175747-1300b1825140
github.com/ethereum/go-ethereum v1.13.5 github.com/ethereum/go-ethereum v1.13.8
github.com/fsnotify/fsnotify v1.7.0 github.com/fsnotify/fsnotify v1.7.0
github.com/go-chi/chi/v5 v5.0.11 github.com/go-chi/chi/v5 v5.0.11
github.com/go-chi/docgen v1.2.0 github.com/go-chi/docgen v1.2.0
...@@ -23,7 +23,7 @@ require ( ...@@ -23,7 +23,7 @@ require (
github.com/hashicorp/golang-lru/v2 v2.0.5 github.com/hashicorp/golang-lru/v2 v2.0.5
github.com/hashicorp/raft v1.6.0 github.com/hashicorp/raft v1.6.0
github.com/hashicorp/raft-boltdb v0.0.0-20231211162105-6c830fa4535e github.com/hashicorp/raft-boltdb v0.0.0-20231211162105-6c830fa4535e
github.com/holiman/uint256 v1.2.3 github.com/holiman/uint256 v1.2.4
github.com/ipfs/go-datastore v0.6.0 github.com/ipfs/go-datastore v0.6.0
github.com/ipfs/go-ds-leveldb v0.5.0 github.com/ipfs/go-ds-leveldb v0.5.0
github.com/jackc/pgtype v1.14.1 github.com/jackc/pgtype v1.14.1
...@@ -60,7 +60,7 @@ require ( ...@@ -60,7 +60,7 @@ require (
github.com/armon/go-metrics v0.4.1 // indirect github.com/armon/go-metrics v0.4.1 // indirect
github.com/benbjohnson/clock v1.3.5 // indirect github.com/benbjohnson/clock v1.3.5 // indirect
github.com/beorn7/perks v1.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect
github.com/bits-and-blooms/bitset v1.7.0 // indirect github.com/bits-and-blooms/bitset v1.10.0 // indirect
github.com/boltdb/bolt v1.3.1 // indirect github.com/boltdb/bolt v1.3.1 // indirect
github.com/btcsuite/btcd/btcec/v2 v2.2.0 // indirect github.com/btcsuite/btcd/btcec/v2 v2.2.0 // indirect
github.com/btcsuite/btcd/btcutil v1.1.5 // indirect github.com/btcsuite/btcd/btcutil v1.1.5 // indirect
...@@ -75,7 +75,7 @@ require ( ...@@ -75,7 +75,7 @@ require (
github.com/containerd/cgroups v1.1.0 // indirect github.com/containerd/cgroups v1.1.0 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/crate-crypto/go-ipa v0.0.0-20230601170251-1830d0757c80 // indirect github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
github.com/deckarep/golang-set/v2 v2.1.0 // indirect github.com/deckarep/golang-set/v2 v2.1.0 // indirect
...@@ -92,11 +92,10 @@ require ( ...@@ -92,11 +92,10 @@ require (
github.com/flynn/noise v1.0.0 // indirect github.com/flynn/noise v1.0.0 // indirect
github.com/francoispqt/gojay v1.2.13 // indirect github.com/francoispqt/gojay v1.2.13 // indirect
github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 // indirect github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 // indirect
github.com/gballet/go-verkle v0.0.0-20230607174250-df487255f46b // indirect github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46 // indirect
github.com/getsentry/sentry-go v0.18.0 // indirect github.com/getsentry/sentry-go v0.18.0 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect
github.com/go-stack/stack v1.8.1 // indirect
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect
github.com/gofrs/flock v0.8.1 // indirect github.com/gofrs/flock v0.8.1 // indirect
...@@ -219,7 +218,7 @@ require ( ...@@ -219,7 +218,7 @@ require (
rsc.io/tmplfunc v0.0.3 // indirect rsc.io/tmplfunc v0.0.3 // indirect
) )
replace github.com/ethereum/go-ethereum => github.com/ethereum-optimism/op-geth v1.101305.3-rc.5 replace github.com/ethereum/go-ethereum v1.13.8 => github.com/ethereum-optimism/op-geth v1.101308.0-rc.1
//replace github.com/ethereum-optimism/superchain-registry/superchain => ../superchain-registry/superchain //replace github.com/ethereum-optimism/superchain-registry/superchain => ../superchain-registry/superchain
//replace github.com/ethereum/go-ethereum v1.13.5 => ../go-ethereum //replace github.com/ethereum/go-ethereum v1.13.5 => ../go-ethereum
...@@ -42,8 +42,8 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24 ...@@ -42,8 +42,8 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bits-and-blooms/bitset v1.7.0 h1:YjAGVd3XmtK9ktAbX8Zg2g2PwLIMjGREZJHlV4j7NEo= github.com/bits-and-blooms/bitset v1.10.0 h1:ePXTeiPEazB5+opbv5fr8umg2R/1NlzgDsyepwsSr88=
github.com/bits-and-blooms/bitset v1.7.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/bits-and-blooms/bitset v1.10.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4= github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4=
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
...@@ -122,8 +122,8 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:ma ...@@ -122,8 +122,8 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:ma
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/crate-crypto/go-ipa v0.0.0-20230601170251-1830d0757c80 h1:DuBDHVjgGMPki7bAyh91+3cF1Vh34sAEdH8JQgbc2R0= github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 h1:d28BXYi+wUpz1KBmiF9bWrjEMacUEREV6MBi2ODnrfQ=
github.com/crate-crypto/go-ipa v0.0.0-20230601170251-1830d0757c80/go.mod h1:gzbVz57IDJgQ9rLQwfSk696JGWof8ftznEL9GoAv3NI= github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233/go.mod h1:geZJZH3SzKCqnz5VT0q/DyIG/tvu/dZk+VIfXicupJs=
github.com/crate-crypto/go-kzg-4844 v0.7.0 h1:C0vgZRk4q4EZ/JgPfzuSoxdCq3C3mOZMBShovmncxvA= github.com/crate-crypto/go-kzg-4844 v0.7.0 h1:C0vgZRk4q4EZ/JgPfzuSoxdCq3C3mOZMBShovmncxvA=
github.com/crate-crypto/go-kzg-4844 v0.7.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc= github.com/crate-crypto/go-kzg-4844 v0.7.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc=
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
...@@ -170,8 +170,8 @@ github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/ ...@@ -170,8 +170,8 @@ github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/
github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3 h1:RWHKLhCrQThMfch+QJ1Z8veEq5ZO3DfIhZ7xgRP9WTc= github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3 h1:RWHKLhCrQThMfch+QJ1Z8veEq5ZO3DfIhZ7xgRP9WTc=
github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3/go.mod h1:QziizLAiF0KqyLdNJYD7O5cpDlaFMNZzlxYNcWsJUxs= github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3/go.mod h1:QziizLAiF0KqyLdNJYD7O5cpDlaFMNZzlxYNcWsJUxs=
github.com/ethereum-optimism/op-geth v1.101305.3-rc.5 h1:ZDsZt9TWPTV8pCAklbk2IDxPbh23P6ZvaIeurGt/tL0= github.com/ethereum-optimism/op-geth v1.101308.0-rc.1 h1:cRlgrl7EQ2eh5IyKXgb4QglTJC5iphi/JC9MuWQzNTo=
github.com/ethereum-optimism/op-geth v1.101305.3-rc.5/go.mod h1:4i/arCdcrEzkmLO5XNMYo8s8eyhmKILxsLxz0PNwJwk= github.com/ethereum-optimism/op-geth v1.101308.0-rc.1/go.mod h1:ztegoX+28Fc+7JbR3AEukmpWYyg5psoxF3Ax+BTkYi0=
github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20240131175747-1300b1825140 h1:F2Q+Gj4+W67CKZpVR+MEDL+EiIpgbx6VtwtdsPR4mbQ= github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20240131175747-1300b1825140 h1:F2Q+Gj4+W67CKZpVR+MEDL+EiIpgbx6VtwtdsPR4mbQ=
github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20240131175747-1300b1825140/go.mod h1:7xh2awFQqsiZxFrHKTgEd+InVfDRrkKVUIuK8SAFHp0= github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20240131175747-1300b1825140/go.mod h1:7xh2awFQqsiZxFrHKTgEd+InVfDRrkKVUIuK8SAFHp0=
github.com/ethereum/c-kzg-4844 v0.4.0 h1:3MS1s4JtA868KpJxroZoepdV0ZKBp3u/O5HcZ7R3nlY= github.com/ethereum/c-kzg-4844 v0.4.0 h1:3MS1s4JtA868KpJxroZoepdV0ZKBp3u/O5HcZ7R3nlY=
...@@ -194,8 +194,8 @@ github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nos ...@@ -194,8 +194,8 @@ github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nos
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 h1:f6D9Hr8xV8uYKlyuj8XIruxlh9WjVjdh1gIicAS7ays= github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 h1:f6D9Hr8xV8uYKlyuj8XIruxlh9WjVjdh1gIicAS7ays=
github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww=
github.com/gballet/go-verkle v0.0.0-20230607174250-df487255f46b h1:vMT47RYsrftsHSTQhqXwC3BYflo38OLC3Y4LtXtLyU0= github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46 h1:BAIP2GihuqhwdILrV+7GJel5lyPV3u1+PgzrWLc0TkE=
github.com/gballet/go-verkle v0.0.0-20230607174250-df487255f46b/go.mod h1:CDncRYVRSDqwakm282WEkjfaAj1hxU/v5RXxk5nXOiI= github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46/go.mod h1:QNpY22eby74jVhqH4WhDLDwxc/vqsern6pW+u2kbkpc=
github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4=
github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4=
github.com/getsentry/sentry-go v0.18.0 h1:MtBW5H9QgdcJabtZcuJG80BMOwaBpkRDZkxRkNC1sN0= github.com/getsentry/sentry-go v0.18.0 h1:MtBW5H9QgdcJabtZcuJG80BMOwaBpkRDZkxRkNC1sN0=
...@@ -227,8 +227,6 @@ github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh ...@@ -227,8 +227,6 @@ github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh
github.com/go-sourcemap/sourcemap v2.1.3+incompatible h1:W1iEw64niKVGogNgBN3ePyLFfuisuzeidWPMPWmECqU= github.com/go-sourcemap/sourcemap v2.1.3+incompatible h1:W1iEw64niKVGogNgBN3ePyLFfuisuzeidWPMPWmECqU=
github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
...@@ -340,8 +338,8 @@ github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7 h1:3JQNjnMRil1yD0IfZ ...@@ -340,8 +338,8 @@ github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7 h1:3JQNjnMRil1yD0IfZ
github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc= github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc=
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
github.com/holiman/uint256 v1.2.3 h1:K8UWO1HUJpRMXBxbmaY1Y8IAMZC/RsKB+ArEnnK4l5o= github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU=
github.com/holiman/uint256 v1.2.3/go.mod h1:SC8Ryt4n+UBbPbIBKaG9zbbDlp4jOru9xFZmPzLUTxw= github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
...@@ -439,8 +437,6 @@ github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCV ...@@ -439,8 +437,6 @@ github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCV
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/karalabe/usb v0.0.3-0.20230711191512-61db3e06439c h1:AqsttAyEyIEsNz5WLRwuRwjiT5CMDUfLk6cFJDVPebs= github.com/karalabe/usb v0.0.3-0.20230711191512-61db3e06439c h1:AqsttAyEyIEsNz5WLRwuRwjiT5CMDUfLk6cFJDVPebs=
github.com/karalabe/usb v0.0.3-0.20230711191512-61db3e06439c/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= github.com/karalabe/usb v0.0.3-0.20230711191512-61db3e06439c/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
...@@ -903,7 +899,6 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ ...@@ -903,7 +899,6 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
...@@ -946,7 +941,6 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc ...@@ -946,7 +941,6 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211020174200-9d6173849985/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
......
...@@ -104,7 +104,7 @@ func (mbv *MockBridgeTransfersView) L2BridgeWithdrawalSum(database.WithdrawFilte ...@@ -104,7 +104,7 @@ func (mbv *MockBridgeTransfersView) L2BridgeWithdrawalSum(database.WithdrawFilte
} }
func TestHealthz(t *testing.T) { func TestHealthz(t *testing.T) {
logger := testlog.Logger(t, log.LvlInfo) logger := testlog.Logger(t, log.LevelInfo)
cfg := &Config{ cfg := &Config{
DB: &TestDBConnector{BridgeTransfers: &MockBridgeTransfersView{}}, DB: &TestDBConnector{BridgeTransfers: &MockBridgeTransfersView{}},
HTTPServer: apiConfig, HTTPServer: apiConfig,
...@@ -122,7 +122,7 @@ func TestHealthz(t *testing.T) { ...@@ -122,7 +122,7 @@ func TestHealthz(t *testing.T) {
} }
func TestL1BridgeDepositsHandler(t *testing.T) { func TestL1BridgeDepositsHandler(t *testing.T) {
logger := testlog.Logger(t, log.LvlInfo) logger := testlog.Logger(t, log.LevelInfo)
cfg := &Config{ cfg := &Config{
DB: &TestDBConnector{BridgeTransfers: &MockBridgeTransfersView{}}, DB: &TestDBConnector{BridgeTransfers: &MockBridgeTransfersView{}},
HTTPServer: apiConfig, HTTPServer: apiConfig,
...@@ -151,7 +151,7 @@ func TestL1BridgeDepositsHandler(t *testing.T) { ...@@ -151,7 +151,7 @@ func TestL1BridgeDepositsHandler(t *testing.T) {
} }
func TestL2BridgeWithdrawalsByAddressHandler(t *testing.T) { func TestL2BridgeWithdrawalsByAddressHandler(t *testing.T) {
logger := testlog.Logger(t, log.LvlInfo) logger := testlog.Logger(t, log.LevelInfo)
cfg := &Config{ cfg := &Config{
DB: &TestDBConnector{BridgeTransfers: &MockBridgeTransfersView{}}, DB: &TestDBConnector{BridgeTransfers: &MockBridgeTransfersView{}},
HTTPServer: apiConfig, HTTPServer: apiConfig,
......
...@@ -34,7 +34,7 @@ var ( ...@@ -34,7 +34,7 @@ var (
func runIndexer(ctx *cli.Context, shutdown context.CancelCauseFunc) (cliapp.Lifecycle, error) { func runIndexer(ctx *cli.Context, shutdown context.CancelCauseFunc) (cliapp.Lifecycle, error) {
log := oplog.NewLogger(oplog.AppOut(ctx), oplog.ReadCLIConfig(ctx)).New("role", "indexer") log := oplog.NewLogger(oplog.AppOut(ctx), oplog.ReadCLIConfig(ctx)).New("role", "indexer")
oplog.SetGlobalLogHandler(log.GetHandler()) oplog.SetGlobalLogHandler(log.Handler())
log.Info("running indexer...") log.Info("running indexer...")
cfg, err := config.LoadConfig(log, ctx.String(ConfigFlag.Name)) cfg, err := config.LoadConfig(log, ctx.String(ConfigFlag.Name))
...@@ -48,7 +48,7 @@ func runIndexer(ctx *cli.Context, shutdown context.CancelCauseFunc) (cliapp.Life ...@@ -48,7 +48,7 @@ func runIndexer(ctx *cli.Context, shutdown context.CancelCauseFunc) (cliapp.Life
func runApi(ctx *cli.Context, _ context.CancelCauseFunc) (cliapp.Lifecycle, error) { func runApi(ctx *cli.Context, _ context.CancelCauseFunc) (cliapp.Lifecycle, error) {
log := oplog.NewLogger(oplog.AppOut(ctx), oplog.ReadCLIConfig(ctx)).New("role", "api") log := oplog.NewLogger(oplog.AppOut(ctx), oplog.ReadCLIConfig(ctx)).New("role", "api")
oplog.SetGlobalLogHandler(log.GetHandler()) oplog.SetGlobalLogHandler(log.Handler())
log.Info("running api...") log.Info("running api...")
cfg, err := config.LoadConfig(log, ctx.String(ConfigFlag.Name)) cfg, err := config.LoadConfig(log, ctx.String(ConfigFlag.Name))
...@@ -71,7 +71,7 @@ func runMigrations(ctx *cli.Context) error { ...@@ -71,7 +71,7 @@ func runMigrations(ctx *cli.Context) error {
ctx.Context = opio.CancelOnInterrupt(ctx.Context) ctx.Context = opio.CancelOnInterrupt(ctx.Context)
log := oplog.NewLogger(oplog.AppOut(ctx), oplog.ReadCLIConfig(ctx)).New("role", "migrations") log := oplog.NewLogger(oplog.AppOut(ctx), oplog.ReadCLIConfig(ctx)).New("role", "migrations")
oplog.SetGlobalLogHandler(log.GetHandler()) oplog.SetGlobalLogHandler(log.Handler())
log.Info("running migrations...") log.Info("running migrations...")
cfg, err := config.LoadConfig(log, ctx.String(ConfigFlag.Name)) cfg, err := config.LoadConfig(log, ctx.String(ConfigFlag.Name))
......
...@@ -12,7 +12,7 @@ import ( ...@@ -12,7 +12,7 @@ import (
) )
func TestLoadConfig(t *testing.T) { func TestLoadConfig(t *testing.T) {
logger := testlog.Logger(t, log.LvlInfo) logger := testlog.Logger(t, log.LevelInfo)
tmpfile, err := os.CreateTemp("", "test.toml") tmpfile, err := os.CreateTemp("", "test.toml")
require.NoError(t, err) require.NoError(t, err)
defer os.Remove(tmpfile.Name()) defer os.Remove(tmpfile.Name())
...@@ -112,7 +112,7 @@ func TestLoadConfigWithoutPreset(t *testing.T) { ...@@ -112,7 +112,7 @@ func TestLoadConfigWithoutPreset(t *testing.T) {
err = tmpfile.Close() err = tmpfile.Close()
require.NoError(t, err) require.NoError(t, err)
logger := testlog.Logger(t, log.LvlInfo) logger := testlog.Logger(t, log.LevelInfo)
conf, err := LoadConfig(logger, tmpfile.Name()) conf, err := LoadConfig(logger, tmpfile.Name())
require.NoError(t, err) require.NoError(t, err)
...@@ -146,7 +146,7 @@ func TestLoadConfigWithUnknownPreset(t *testing.T) { ...@@ -146,7 +146,7 @@ func TestLoadConfigWithUnknownPreset(t *testing.T) {
err = tmpfile.Close() err = tmpfile.Close()
require.NoError(t, err) require.NoError(t, err)
logger := testlog.Logger(t, log.LvlInfo) logger := testlog.Logger(t, log.LevelInfo)
conf, err := LoadConfig(logger, tmpfile.Name()) conf, err := LoadConfig(logger, tmpfile.Name())
require.Error(t, err) require.Error(t, err)
...@@ -178,7 +178,7 @@ func TestLoadConfigPollingValues(t *testing.T) { ...@@ -178,7 +178,7 @@ func TestLoadConfigPollingValues(t *testing.T) {
err = tmpfile.Close() err = tmpfile.Close()
require.NoError(t, err) require.NoError(t, err)
logger := testlog.Logger(t, log.LvlInfo) logger := testlog.Logger(t, log.LevelInfo)
conf, err := LoadConfig(logger, tmpfile.Name()) conf, err := LoadConfig(logger, tmpfile.Name())
require.NoError(t, err) require.NoError(t, err)
...@@ -224,7 +224,7 @@ func TestLoadedConfigPresetPrecendence(t *testing.T) { ...@@ -224,7 +224,7 @@ func TestLoadedConfigPresetPrecendence(t *testing.T) {
err = tmpfile.Close() err = tmpfile.Close()
require.NoError(t, err) require.NoError(t, err)
logger := testlog.Logger(t, log.LvlInfo) logger := testlog.Logger(t, log.LevelInfo)
conf, err := LoadConfig(logger, tmpfile.Name()) conf, err := LoadConfig(logger, tmpfile.Name())
require.NoError(t, err) require.NoError(t, err)
...@@ -265,7 +265,7 @@ func TestLocalDevnet(t *testing.T) { ...@@ -265,7 +265,7 @@ func TestLocalDevnet(t *testing.T) {
err = tmpfile.Close() err = tmpfile.Close()
require.NoError(t, err) require.NoError(t, err)
logger := testlog.Logger(t, log.LvlInfo) logger := testlog.Logger(t, log.LevelInfo)
conf, err := LoadConfig(logger, tmpfile.Name()) conf, err := LoadConfig(logger, tmpfile.Name())
require.NoError(t, err) require.NoError(t, err)
...@@ -276,7 +276,7 @@ func TestLocalDevnet(t *testing.T) { ...@@ -276,7 +276,7 @@ func TestLocalDevnet(t *testing.T) {
} }
func TestThrowsOnUnknownKeys(t *testing.T) { func TestThrowsOnUnknownKeys(t *testing.T) {
logger := testlog.Logger(t, log.LvlInfo) logger := testlog.Logger(t, log.LevelInfo)
tmpfile, err := os.CreateTemp("", "test.toml") tmpfile, err := os.CreateTemp("", "test.toml")
require.NoError(t, err) require.NoError(t, err)
defer os.Remove(tmpfile.Name()) defer os.Remove(tmpfile.Name())
......
...@@ -13,15 +13,17 @@ import ( ...@@ -13,15 +13,17 @@ import (
"github.com/ethereum-optimism/optimism/indexer/client" "github.com/ethereum-optimism/optimism/indexer/client"
"github.com/ethereum-optimism/optimism/indexer/config" "github.com/ethereum-optimism/optimism/indexer/config"
"github.com/ethereum-optimism/optimism/indexer/database" "github.com/ethereum-optimism/optimism/indexer/database"
"github.com/prometheus/client_golang/prometheus"
op_e2e "github.com/ethereum-optimism/optimism/op-e2e" op_e2e "github.com/ethereum-optimism/optimism/op-e2e"
oplog "github.com/ethereum-optimism/optimism/op-service/log"
"github.com/ethereum-optimism/optimism/op-service/metrics" "github.com/ethereum-optimism/optimism/op-service/metrics"
"github.com/ethereum-optimism/optimism/op-service/testlog" "github.com/ethereum-optimism/optimism/op-service/testlog"
"github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
_ "github.com/jackc/pgx/v5/stdlib" _ "github.com/jackc/pgx/v5/stdlib"
"github.com/prometheus/client_golang/prometheus"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
...@@ -56,7 +58,7 @@ func init() { ...@@ -56,7 +58,7 @@ func init() {
// Disable the global logger. Ideally we'd like to dump geth // Disable the global logger. Ideally we'd like to dump geth
// logs per-test but that's possible when running tests in // logs per-test but that's possible when running tests in
// parallel as the root logger is shared. // parallel as the root logger is shared.
log.Root().SetHandler(log.DiscardHandler()) oplog.SetGlobalLogHandler(log.DiscardHandler())
} }
// createE2ETestSuite ... Create a new E2E test suite // createE2ETestSuite ... Create a new E2E test suite
...@@ -78,8 +80,7 @@ func createE2ETestSuite(t *testing.T) E2ETestSuite { ...@@ -78,8 +80,7 @@ func createE2ETestSuite(t *testing.T) E2ETestSuite {
t.Log("set env 'ENABLE_ROLLUP_LOGS' to show rollup logs") t.Log("set env 'ENABLE_ROLLUP_LOGS' to show rollup logs")
for name := range opCfg.Loggers { for name := range opCfg.Loggers {
t.Logf("discarding logs for %s", name) t.Logf("discarding logs for %s", name)
noopLog := log.New() noopLog := log.NewLogger(log.DiscardHandler())
noopLog.SetHandler(log.DiscardHandler())
opCfg.Loggers[name] = noopLog opCfg.Loggers[name] = noopLog
} }
} }
...@@ -114,7 +115,7 @@ func createE2ETestSuite(t *testing.T) E2ETestSuite { ...@@ -114,7 +115,7 @@ func createE2ETestSuite(t *testing.T) E2ETestSuite {
MetricsServer: config.ServerConfig{Host: "127.0.0.1", Port: 0}, MetricsServer: config.ServerConfig{Host: "127.0.0.1", Port: 0},
} }
indexerLog := testlog.Logger(t, log.LvlInfo).New("role", "indexer") indexerLog := testlog.Logger(t, log.LevelInfo).New("role", "indexer")
ix, err := indexer.NewIndexer(context.Background(), indexerLog, indexerCfg, func(cause error) { ix, err := indexer.NewIndexer(context.Background(), indexerLog, indexerCfg, func(cause error) {
if cause != nil { if cause != nil {
t.Fatalf("indexer shut down with critical error: %v", cause) t.Fatalf("indexer shut down with critical error: %v", cause)
...@@ -127,7 +128,7 @@ func createE2ETestSuite(t *testing.T) E2ETestSuite { ...@@ -127,7 +128,7 @@ func createE2ETestSuite(t *testing.T) E2ETestSuite {
}) })
// API Configuration and Start // API Configuration and Start
apiLog := testlog.Logger(t, log.LvlInfo).New("role", "indexer_api") apiLog := testlog.Logger(t, log.LevelInfo).New("role", "indexer_api")
apiCfg := &api.Config{ apiCfg := &api.Config{
DB: &api.TestDBConnector{BridgeTransfers: ix.DB.BridgeTransfers}, // reuse the same DB DB: &api.TestDBConnector{BridgeTransfers: ix.DB.BridgeTransfers}, // reuse the same DB
HTTPServer: config.ServerConfig{Host: "127.0.0.1", Port: 0}, HTTPServer: config.ServerConfig{Host: "127.0.0.1", Port: 0},
...@@ -186,8 +187,7 @@ func setupTestDatabase(t *testing.T) string { ...@@ -186,8 +187,7 @@ func setupTestDatabase(t *testing.T) string {
Password: "", Password: "",
} }
noopLog := log.New() noopLog := log.NewLogger(log.DiscardHandler())
noopLog.SetHandler(log.DiscardHandler())
db, err := database.NewDB(context.Background(), noopLog, dbConfig) db, err := database.NewDB(context.Background(), noopLog, dbConfig)
require.NoError(t, err) require.NoError(t, err)
defer db.Close() defer db.Close()
......
...@@ -105,7 +105,7 @@ func TestL1ETLConstruction(t *testing.T) { ...@@ -105,7 +105,7 @@ func TestL1ETLConstruction(t *testing.T) {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
ts := test.construction() ts := test.construction()
logger := testlog.Logger(t, log.LvlInfo) logger := testlog.Logger(t, log.LevelInfo)
cfg := Config{StartHeight: ts.start} cfg := Config{StartHeight: ts.start}
etl, err := NewL1ETL(cfg, logger, ts.db.DB, etlMetrics, ts.client, ts.contracts, func(cause error) { etl, err := NewL1ETL(cfg, logger, ts.db.DB, etlMetrics, ts.client, ts.contracts, func(cause error) {
......
...@@ -25,7 +25,7 @@ func Main(version string) cliapp.LifecycleAction { ...@@ -25,7 +25,7 @@ func Main(version string) cliapp.LifecycleAction {
} }
l := oplog.NewLogger(oplog.AppOut(cliCtx), cfg.LogConfig) l := oplog.NewLogger(oplog.AppOut(cliCtx), cfg.LogConfig)
oplog.SetGlobalLogHandler(l.GetHandler()) oplog.SetGlobalLogHandler(l.Handler())
opservice.ValidateEnvVars(flags.EnvVarPrefix, flags.Flags, l) opservice.ValidateEnvVars(flags.EnvVarPrefix, flags.Flags, l)
l.Info("Initializing Batch Submitter") l.Info("Initializing Batch Submitter")
......
...@@ -52,7 +52,7 @@ func TestChannelManagerBatchType(t *testing.T) { ...@@ -52,7 +52,7 @@ func TestChannelManagerBatchType(t *testing.T) {
// ChannelManagerReturnsErrReorg ensures that the channel manager // ChannelManagerReturnsErrReorg ensures that the channel manager
// detects a reorg when it has cached L1 blocks. // detects a reorg when it has cached L1 blocks.
func ChannelManagerReturnsErrReorg(t *testing.T, batchType uint) { func ChannelManagerReturnsErrReorg(t *testing.T, batchType uint) {
log := testlog.Logger(t, log.LvlCrit) log := testlog.Logger(t, log.LevelCrit)
m := NewChannelManager(log, metrics.NoopMetrics, ChannelConfig{BatchType: batchType}, &rollup.Config{}) m := NewChannelManager(log, metrics.NoopMetrics, ChannelConfig{BatchType: batchType}, &rollup.Config{})
m.Clear() m.Clear()
...@@ -83,7 +83,7 @@ func ChannelManagerReturnsErrReorg(t *testing.T, batchType uint) { ...@@ -83,7 +83,7 @@ func ChannelManagerReturnsErrReorg(t *testing.T, batchType uint) {
// ChannelManagerReturnsErrReorgWhenDrained ensures that the channel manager // ChannelManagerReturnsErrReorgWhenDrained ensures that the channel manager
// detects a reorg even if it does not have any blocks inside it. // detects a reorg even if it does not have any blocks inside it.
func ChannelManagerReturnsErrReorgWhenDrained(t *testing.T, batchType uint) { func ChannelManagerReturnsErrReorgWhenDrained(t *testing.T, batchType uint) {
log := testlog.Logger(t, log.LvlCrit) log := testlog.Logger(t, log.LevelCrit)
m := NewChannelManager(log, metrics.NoopMetrics, m := NewChannelManager(log, metrics.NoopMetrics,
ChannelConfig{ ChannelConfig{
MaxFrameSize: 120_000, MaxFrameSize: 120_000,
...@@ -116,7 +116,7 @@ func ChannelManager_Clear(t *testing.T, batchType uint) { ...@@ -116,7 +116,7 @@ func ChannelManager_Clear(t *testing.T, batchType uint) {
require := require.New(t) require := require.New(t)
// Create a channel manager // Create a channel manager
log := testlog.Logger(t, log.LvlCrit) log := testlog.Logger(t, log.LevelCrit)
rng := rand.New(rand.NewSource(time.Now().UnixNano())) rng := rand.New(rand.NewSource(time.Now().UnixNano()))
m := NewChannelManager(log, metrics.NoopMetrics, ChannelConfig{ m := NewChannelManager(log, metrics.NoopMetrics, ChannelConfig{
// Need to set the channel timeout here so we don't clear pending // Need to set the channel timeout here so we don't clear pending
...@@ -196,7 +196,7 @@ func ChannelManager_Clear(t *testing.T, batchType uint) { ...@@ -196,7 +196,7 @@ func ChannelManager_Clear(t *testing.T, batchType uint) {
func ChannelManager_TxResend(t *testing.T, batchType uint) { func ChannelManager_TxResend(t *testing.T, batchType uint) {
require := require.New(t) require := require.New(t)
rng := rand.New(rand.NewSource(time.Now().UnixNano())) rng := rand.New(rand.NewSource(time.Now().UnixNano()))
log := testlog.Logger(t, log.LvlError) log := testlog.Logger(t, log.LevelError)
m := NewChannelManager(log, metrics.NoopMetrics, m := NewChannelManager(log, metrics.NoopMetrics,
ChannelConfig{ ChannelConfig{
MaxFrameSize: 120_000, MaxFrameSize: 120_000,
...@@ -244,7 +244,7 @@ func ChannelManager_TxResend(t *testing.T, batchType uint) { ...@@ -244,7 +244,7 @@ func ChannelManager_TxResend(t *testing.T, batchType uint) {
func ChannelManagerCloseBeforeFirstUse(t *testing.T, batchType uint) { func ChannelManagerCloseBeforeFirstUse(t *testing.T, batchType uint) {
require := require.New(t) require := require.New(t)
rng := rand.New(rand.NewSource(time.Now().UnixNano())) rng := rand.New(rand.NewSource(time.Now().UnixNano()))
log := testlog.Logger(t, log.LvlCrit) log := testlog.Logger(t, log.LevelCrit)
m := NewChannelManager(log, metrics.NoopMetrics, m := NewChannelManager(log, metrics.NoopMetrics,
ChannelConfig{ ChannelConfig{
MaxFrameSize: 100, MaxFrameSize: 100,
...@@ -275,7 +275,7 @@ func ChannelManagerCloseBeforeFirstUse(t *testing.T, batchType uint) { ...@@ -275,7 +275,7 @@ func ChannelManagerCloseBeforeFirstUse(t *testing.T, batchType uint) {
// channel frames. // channel frames.
func ChannelManagerCloseNoPendingChannel(t *testing.T, batchType uint) { func ChannelManagerCloseNoPendingChannel(t *testing.T, batchType uint) {
require := require.New(t) require := require.New(t)
log := testlog.Logger(t, log.LvlCrit) log := testlog.Logger(t, log.LevelCrit)
m := NewChannelManager(log, metrics.NoopMetrics, m := NewChannelManager(log, metrics.NoopMetrics,
ChannelConfig{ ChannelConfig{
MaxFrameSize: 100, MaxFrameSize: 100,
...@@ -321,7 +321,7 @@ func ChannelManagerClosePendingChannel(t *testing.T, batchType uint) { ...@@ -321,7 +321,7 @@ func ChannelManagerClosePendingChannel(t *testing.T, batchType uint) {
// The number of batch txs depends on compression of the random data, hence the static test RNG seed. // The number of batch txs depends on compression of the random data, hence the static test RNG seed.
// Example of different RNG seed that creates less than 2 frames: 1698700588902821588 // Example of different RNG seed that creates less than 2 frames: 1698700588902821588
rng := rand.New(rand.NewSource(123)) rng := rand.New(rand.NewSource(123))
log := testlog.Logger(t, log.LvlError) log := testlog.Logger(t, log.LevelError)
m := NewChannelManager(log, metrics.NoopMetrics, m := NewChannelManager(log, metrics.NoopMetrics,
ChannelConfig{ ChannelConfig{
MaxFrameSize: 10_000, MaxFrameSize: 10_000,
...@@ -379,7 +379,7 @@ func TestChannelManager_Close_PartiallyPendingChannel(t *testing.T) { ...@@ -379,7 +379,7 @@ func TestChannelManager_Close_PartiallyPendingChannel(t *testing.T) {
// The number of batch txs depends on compression of the random data, hence the static test RNG seed. // The number of batch txs depends on compression of the random data, hence the static test RNG seed.
// Example of different RNG seed that creates less than 2 frames: 1698700588902821588 // Example of different RNG seed that creates less than 2 frames: 1698700588902821588
rng := rand.New(rand.NewSource(123)) rng := rand.New(rand.NewSource(123))
log := testlog.Logger(t, log.LvlError) log := testlog.Logger(t, log.LevelError)
const framesize = 2200 const framesize = 2200
m := NewChannelManager(log, metrics.NoopMetrics, m := NewChannelManager(log, metrics.NoopMetrics,
ChannelConfig{ ChannelConfig{
...@@ -441,7 +441,7 @@ func TestChannelManager_Close_PartiallyPendingChannel(t *testing.T) { ...@@ -441,7 +441,7 @@ func TestChannelManager_Close_PartiallyPendingChannel(t *testing.T) {
func ChannelManagerCloseAllTxsFailed(t *testing.T, batchType uint) { func ChannelManagerCloseAllTxsFailed(t *testing.T, batchType uint) {
require := require.New(t) require := require.New(t)
rng := rand.New(rand.NewSource(time.Now().UnixNano())) rng := rand.New(rand.NewSource(time.Now().UnixNano()))
log := testlog.Logger(t, log.LvlCrit) log := testlog.Logger(t, log.LevelCrit)
m := NewChannelManager(log, metrics.NoopMetrics, m := NewChannelManager(log, metrics.NoopMetrics,
ChannelConfig{ ChannelConfig{
MaxFrameSize: 1000, MaxFrameSize: 1000,
......
...@@ -18,7 +18,7 @@ import ( ...@@ -18,7 +18,7 @@ import (
// correctly identifies when a pending channel is timed out. // correctly identifies when a pending channel is timed out.
func TestChannelTimeout(t *testing.T) { func TestChannelTimeout(t *testing.T) {
// Create a new channel manager with a ChannelTimeout // Create a new channel manager with a ChannelTimeout
log := testlog.Logger(t, log.LvlCrit) log := testlog.Logger(t, log.LevelCrit)
m := NewChannelManager(log, metrics.NoopMetrics, ChannelConfig{ m := NewChannelManager(log, metrics.NoopMetrics, ChannelConfig{
ChannelTimeout: 100, ChannelTimeout: 100,
}, &rollup.Config{}) }, &rollup.Config{})
...@@ -64,7 +64,7 @@ func TestChannelTimeout(t *testing.T) { ...@@ -64,7 +64,7 @@ func TestChannelTimeout(t *testing.T) {
// TestChannelNextTxData checks the nextTxData function. // TestChannelNextTxData checks the nextTxData function.
func TestChannelNextTxData(t *testing.T) { func TestChannelNextTxData(t *testing.T) {
log := testlog.Logger(t, log.LvlCrit) log := testlog.Logger(t, log.LevelCrit)
m := NewChannelManager(log, metrics.NoopMetrics, ChannelConfig{}, &rollup.Config{}) m := NewChannelManager(log, metrics.NoopMetrics, ChannelConfig{}, &rollup.Config{})
m.Clear() m.Clear()
...@@ -108,7 +108,7 @@ func TestChannelNextTxData(t *testing.T) { ...@@ -108,7 +108,7 @@ func TestChannelNextTxData(t *testing.T) {
// TestChannelTxConfirmed checks the [ChannelManager.TxConfirmed] function. // TestChannelTxConfirmed checks the [ChannelManager.TxConfirmed] function.
func TestChannelTxConfirmed(t *testing.T) { func TestChannelTxConfirmed(t *testing.T) {
// Create a channel manager // Create a channel manager
log := testlog.Logger(t, log.LvlCrit) log := testlog.Logger(t, log.LevelCrit)
m := NewChannelManager(log, metrics.NoopMetrics, ChannelConfig{ m := NewChannelManager(log, metrics.NoopMetrics, ChannelConfig{
// Need to set the channel timeout here so we don't clear pending // Need to set the channel timeout here so we don't clear pending
// channels on confirmation. This would result in [TxConfirmed] // channels on confirmation. This would result in [TxConfirmed]
...@@ -162,7 +162,7 @@ func TestChannelTxConfirmed(t *testing.T) { ...@@ -162,7 +162,7 @@ func TestChannelTxConfirmed(t *testing.T) {
// TestChannelTxFailed checks the [ChannelManager.TxFailed] function. // TestChannelTxFailed checks the [ChannelManager.TxFailed] function.
func TestChannelTxFailed(t *testing.T) { func TestChannelTxFailed(t *testing.T) {
// Create a channel manager // Create a channel manager
log := testlog.Logger(t, log.LvlCrit) log := testlog.Logger(t, log.LevelCrit)
m := NewChannelManager(log, metrics.NoopMetrics, ChannelConfig{}, &rollup.Config{}) m := NewChannelManager(log, metrics.NoopMetrics, ChannelConfig{}, &rollup.Config{})
m.Clear() m.Clear()
......
...@@ -28,9 +28,7 @@ import ( ...@@ -28,9 +28,7 @@ import (
"github.com/ethereum-optimism/optimism/op-service/txmgr" "github.com/ethereum-optimism/optimism/op-service/txmgr"
) )
var ( var ErrAlreadyStopped = errors.New("already stopped")
ErrAlreadyStopped = errors.New("already stopped")
)
type BatcherConfig struct { type BatcherConfig struct {
NetworkTimeout time.Duration NetworkTimeout time.Duration
......
...@@ -72,7 +72,7 @@ func main() { ...@@ -72,7 +72,7 @@ func main() {
func setupLogger(c *cli.Context) log.Logger { func setupLogger(c *cli.Context) log.Logger {
logger := oplog.NewLogger(oplog.AppOut(c), oplog.ReadCLIConfig(c)) logger := oplog.NewLogger(oplog.AppOut(c), oplog.ReadCLIConfig(c))
oplog.SetGlobalLogHandler(logger.GetHandler()) oplog.SetGlobalLogHandler(logger.Handler())
return logger return logger
} }
......
...@@ -44,7 +44,7 @@ func Main(cliCtx *cli.Context) error { ...@@ -44,7 +44,7 @@ func Main(cliCtx *cli.Context) error {
log.Info("Initializing bootnode") log.Info("Initializing bootnode")
logCfg := oplog.ReadCLIConfig(cliCtx) logCfg := oplog.ReadCLIConfig(cliCtx)
logger := oplog.NewLogger(oplog.AppOut(cliCtx), logCfg) logger := oplog.NewLogger(oplog.AppOut(cliCtx), logCfg)
oplog.SetGlobalLogHandler(logger.GetHandler()) oplog.SetGlobalLogHandler(logger.Handler())
m := metrics.NewMetrics("default") m := metrics.NewMetrics("default")
ctx := context.Background() ctx := context.Background()
......
...@@ -12,7 +12,9 @@ import ( ...@@ -12,7 +12,9 @@ import (
"github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-service/client" "github.com/ethereum-optimism/optimism/op-service/client"
"github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/eth"
oplog "github.com/ethereum-optimism/optimism/op-service/log"
"github.com/ethereum-optimism/optimism/op-service/sources" "github.com/ethereum-optimism/optimism/op-service/sources"
"github.com/mattn/go-isatty"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/common/math"
...@@ -206,7 +208,10 @@ func CheckInactivation(f func(Args, bool) error, ctx Args, forkActivated bool, v ...@@ -206,7 +208,10 @@ func CheckInactivation(f func(Args, bool) error, ctx Args, forkActivated bool, v
} }
func main() { func main() {
logger := log.New() color := isatty.IsTerminal(os.Stderr.Fd())
handler := log.NewTerminalHandler(os.Stderr, color)
oplog.SetGlobalLogHandler(handler)
logger := log.NewLogger(handler)
// Define the flag variables // Define the flag variables
var ( var (
......
...@@ -9,11 +9,13 @@ import ( ...@@ -9,11 +9,13 @@ import (
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
"github.com/ethereum-optimism/optimism/op-chain-ops/genesis" "github.com/ethereum-optimism/optimism/op-chain-ops/genesis"
oplog "github.com/ethereum-optimism/optimism/op-service/log"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
) )
func main() { func main() {
log.Root().SetHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(isatty.IsTerminal(os.Stderr.Fd())))) color := isatty.IsTerminal(os.Stderr.Fd())
oplog.SetGlobalLogHandler(log.NewTerminalHandler(os.Stderr, color))
app := &cli.App{ app := &cli.App{
Name: "check-deploy-config", Name: "check-deploy-config",
......
...@@ -14,9 +14,11 @@ import ( ...@@ -14,9 +14,11 @@ import (
"github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-service/client" "github.com/ethereum-optimism/optimism/op-service/client"
"github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/eth"
oplog "github.com/ethereum-optimism/optimism/op-service/log"
"github.com/ethereum-optimism/optimism/op-service/retry" "github.com/ethereum-optimism/optimism/op-service/retry"
"github.com/ethereum-optimism/optimism/op-service/sources" "github.com/ethereum-optimism/optimism/op-service/sources"
"github.com/ethereum-optimism/optimism/op-service/testutils" "github.com/ethereum-optimism/optimism/op-service/testutils"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
...@@ -28,7 +30,8 @@ import ( ...@@ -28,7 +30,8 @@ import (
) )
func main() { func main() {
log.Root().SetHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(isatty.IsTerminal(os.Stderr.Fd())))) color := isatty.IsTerminal(os.Stderr.Fd())
oplog.SetGlobalLogHandler(log.NewTerminalHandler(os.Stderr, color))
app := cli.NewApp() app := cli.NewApp()
app.Name = "check-derivation" app.Name = "check-derivation"
...@@ -157,7 +160,7 @@ func detectL2Reorg(cliCtx *cli.Context) error { ...@@ -157,7 +160,7 @@ func detectL2Reorg(cliCtx *cli.Context) error {
return err return err
} }
var pollingInterval = cliCtx.Duration("polling-interval") pollingInterval := cliCtx.Duration("polling-interval")
// blockMap maps blockNumber to blockHash // blockMap maps blockNumber to blockHash
blockMap := make(map[uint64]common.Hash) blockMap := make(map[uint64]common.Hash)
var prevUnsafeHeadNum uint64 var prevUnsafeHeadNum uint64
...@@ -326,7 +329,7 @@ func checkConsolidation(cliCtx *cli.Context) error { ...@@ -326,7 +329,7 @@ func checkConsolidation(cliCtx *cli.Context) error {
if err != nil { if err != nil {
return err return err
} }
var pollingInterval = cliCtx.Duration("polling-interval") pollingInterval := cliCtx.Duration("polling-interval")
privateKey, err := getPrivateKey(cliCtx) privateKey, err := getPrivateKey(cliCtx)
if err != nil { if err != nil {
return err return err
......
...@@ -335,8 +335,10 @@ func execTx(ctx context.Context, to *common.Address, data []byte, expectRevert b ...@@ -335,8 +335,10 @@ func execTx(ctx context.Context, to *common.Address, data []byte, expectRevert b
if err != nil { if err != nil {
return fmt.Errorf("failed to get chainID: %w", err) return fmt.Errorf("failed to get chainID: %w", err)
} }
tx := types.NewTx(&types.DynamicFeeTx{ChainID: chainID, Nonce: nonce, tx := types.NewTx(&types.DynamicFeeTx{
GasTipCap: tip, GasFeeCap: maxFee, Gas: 500000, To: to, Data: data}) ChainID: chainID, Nonce: nonce,
GasTipCap: tip, GasFeeCap: maxFee, Gas: 500000, To: to, Data: data,
})
signer := types.NewCancunSigner(chainID) signer := types.NewCancunSigner(chainID)
signedTx, err := types.SignTx(tx, signer, env.key) signedTx, err := types.SignTx(tx, signer, env.key)
if err != nil { if err != nil {
...@@ -657,8 +659,9 @@ func checkL1Fees(ctx context.Context, env *actionEnv) error { ...@@ -657,8 +659,9 @@ func checkL1Fees(ctx context.Context, env *actionEnv) error {
return fmt.Errorf("failed to retrieve matching L1 block %s: %w", headRef, err) return fmt.Errorf("failed to retrieve matching L1 block %s: %w", headRef, err)
} }
gasTip := big.NewInt(2 * params.GWei) gasTip := big.NewInt(2 * params.GWei)
baseFee := (*uint256.Int)(&payload.ExecutionPayload.BaseFeePerGas).ToBig()
gasMaxFee := new(big.Int).Add( gasMaxFee := new(big.Int).Add(
new(big.Int).Mul(big.NewInt(2), payload.ExecutionPayload.BaseFeePerGas.ToBig()), gasTip) new(big.Int).Mul(big.NewInt(2), baseFee), gasTip)
to := common.Address{1, 2, 3, 5} to := common.Address{1, 2, 3, 5}
txData := &types.DynamicFeeTx{ txData := &types.DynamicFeeTx{
ChainID: rollupCfg.L2ChainID, ChainID: rollupCfg.L2ChainID,
......
...@@ -14,6 +14,7 @@ import ( ...@@ -14,6 +14,7 @@ import (
"github.com/ethereum-optimism/optimism/op-chain-ops/genesis" "github.com/ethereum-optimism/optimism/op-chain-ops/genesis"
"github.com/ethereum-optimism/optimism/op-chain-ops/safe" "github.com/ethereum-optimism/optimism/op-chain-ops/safe"
"github.com/ethereum-optimism/optimism/op-chain-ops/upgrades" "github.com/ethereum-optimism/optimism/op-chain-ops/upgrades"
oplog "github.com/ethereum-optimism/optimism/op-service/log"
"github.com/ethereum-optimism/superchain-registry/superchain" "github.com/ethereum-optimism/superchain-registry/superchain"
) )
...@@ -56,7 +57,8 @@ var deployments = map[uint64]superchain.ImplementationList{ ...@@ -56,7 +57,8 @@ var deployments = map[uint64]superchain.ImplementationList{
} }
func main() { func main() {
log.Root().SetHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(isatty.IsTerminal(os.Stderr.Fd())))) color := isatty.IsTerminal(os.Stderr.Fd())
oplog.SetGlobalLogHandler(log.NewTerminalHandler(os.Stderr, color))
app := &cli.App{ app := &cli.App{
Name: "op-upgrade", Name: "op-upgrade",
......
...@@ -19,12 +19,14 @@ import ( ...@@ -19,12 +19,14 @@ import (
"github.com/ethereum-optimism/optimism/op-chain-ops/safe" "github.com/ethereum-optimism/optimism/op-chain-ops/safe"
"github.com/ethereum-optimism/optimism/op-chain-ops/upgrades" "github.com/ethereum-optimism/optimism/op-chain-ops/upgrades"
"github.com/ethereum-optimism/optimism/op-service/jsonutil" "github.com/ethereum-optimism/optimism/op-service/jsonutil"
oplog "github.com/ethereum-optimism/optimism/op-service/log"
"github.com/ethereum-optimism/superchain-registry/superchain" "github.com/ethereum-optimism/superchain-registry/superchain"
) )
func main() { func main() {
log.Root().SetHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(isatty.IsTerminal(os.Stderr.Fd())))) color := isatty.IsTerminal(os.Stderr.Fd())
oplog.SetGlobalLogHandler(log.NewTerminalHandler(os.Stderr, color))
app := &cli.App{ app := &cli.App{
Name: "op-upgrade", Name: "op-upgrade",
......
...@@ -14,6 +14,8 @@ import ( ...@@ -14,6 +14,8 @@ import (
"github.com/ethereum-optimism/optimism/op-chain-ops/upgrades" "github.com/ethereum-optimism/optimism/op-chain-ops/upgrades"
"github.com/ethereum-optimism/optimism/op-service/jsonutil" "github.com/ethereum-optimism/optimism/op-service/jsonutil"
oplog "github.com/ethereum-optimism/optimism/op-service/log"
"github.com/ethereum-optimism/superchain-registry/superchain" "github.com/ethereum-optimism/superchain-registry/superchain"
) )
...@@ -29,7 +31,8 @@ type ChainVersionCheck struct { ...@@ -29,7 +31,8 @@ type ChainVersionCheck struct {
} }
func main() { func main() {
log.Root().SetHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(isatty.IsTerminal(os.Stderr.Fd())))) color := isatty.IsTerminal(os.Stderr.Fd())
oplog.SetGlobalLogHandler(log.NewTerminalHandler(os.Stderr, color))
app := &cli.App{ app := &cli.App{
Name: "op-version-check", Name: "op-version-check",
......
...@@ -7,12 +7,14 @@ import ( ...@@ -7,12 +7,14 @@ import (
"github.com/mattn/go-isatty" "github.com/mattn/go-isatty"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
"golang.org/x/exp/slog"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
opservice "github.com/ethereum-optimism/optimism/op-service" opservice "github.com/ethereum-optimism/optimism/op-service"
oplog "github.com/ethereum-optimism/optimism/op-service/log"
) )
const EnvPrefix = "OP_CHAIN_OPS_PROTOCOL_VERSION" const EnvPrefix = "OP_CHAIN_OPS_PROTOCOL_VERSION"
...@@ -51,7 +53,8 @@ var ( ...@@ -51,7 +53,8 @@ var (
) )
func main() { func main() {
log.Root().SetHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(isatty.IsTerminal(os.Stderr.Fd())))) color := isatty.IsTerminal(os.Stderr.Fd())
oplog.SetGlobalLogHandler(log.NewTerminalHandlerWithLevel(os.Stdout, slog.LevelDebug, color))
app := &cli.App{ app := &cli.App{
Name: "protocol-version", Name: "protocol-version",
......
...@@ -6,11 +6,13 @@ import ( ...@@ -6,11 +6,13 @@ import (
"github.com/mattn/go-isatty" "github.com/mattn/go-isatty"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
"golang.org/x/exp/slog"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
opservice "github.com/ethereum-optimism/optimism/op-service" opservice "github.com/ethereum-optimism/optimism/op-service"
oplog "github.com/ethereum-optimism/optimism/op-service/log"
) )
const EnvPrefix = "OP_CHAIN_OPS_RECEIPT_REFERENCE_BUILDER" const EnvPrefix = "OP_CHAIN_OPS_RECEIPT_REFERENCE_BUILDER"
...@@ -75,7 +77,8 @@ var ( ...@@ -75,7 +77,8 @@ var (
) )
func main() { func main() {
log.Root().SetHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(isatty.IsTerminal(os.Stderr.Fd())))) color := isatty.IsTerminal(os.Stderr.Fd())
oplog.SetGlobalLogHandler(log.NewTerminalHandlerWithLevel(os.Stdout, slog.LevelDebug, color))
app := &cli.App{ app := &cli.App{
Name: "receipt-reference-builder", Name: "receipt-reference-builder",
......
...@@ -12,6 +12,7 @@ import ( ...@@ -12,6 +12,7 @@ import (
opservice "github.com/ethereum-optimism/optimism/op-service" opservice "github.com/ethereum-optimism/optimism/op-service"
"github.com/ethereum-optimism/optimism/op-service/jsonutil" "github.com/ethereum-optimism/optimism/op-service/jsonutil"
oplog "github.com/ethereum-optimism/optimism/op-service/log"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
...@@ -52,7 +53,8 @@ var ( ...@@ -52,7 +53,8 @@ var (
) )
func main() { func main() {
log.Root().SetHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(isatty.IsTerminal(os.Stderr.Fd())))) color := isatty.IsTerminal(os.Stderr.Fd())
oplog.SetGlobalLogHandler(log.NewTerminalHandler(os.Stderr, color))
app := &cli.App{ app := &cli.App{
Name: "registry-data", Name: "registry-data",
......
...@@ -765,14 +765,14 @@ type ForgeDump gstate.Dump ...@@ -765,14 +765,14 @@ type ForgeDump gstate.Dump
func (d *ForgeDump) UnmarshalJSON(b []byte) error { func (d *ForgeDump) UnmarshalJSON(b []byte) error {
type forgeDumpAccount struct { type forgeDumpAccount struct {
Balance string `json:"balance"` Balance string `json:"balance"`
Nonce hexutil.Uint64 `json:"nonce"` Nonce hexutil.Uint64 `json:"nonce"`
Root hexutil.Bytes `json:"root"` Root hexutil.Bytes `json:"root"`
CodeHash hexutil.Bytes `json:"codeHash"` CodeHash hexutil.Bytes `json:"codeHash"`
Code hexutil.Bytes `json:"code,omitempty"` Code hexutil.Bytes `json:"code,omitempty"`
Storage map[common.Hash]string `json:"storage,omitempty"` Storage map[common.Hash]string `json:"storage,omitempty"`
Address *common.Address `json:"address,omitempty"` Address *common.Address `json:"address,omitempty"`
SecureKey hexutil.Bytes `json:"key,omitempty"` AddressHash hexutil.Bytes `json:"key,omitempty"`
} }
type forgeDump struct { type forgeDump struct {
Root string `json:"root"` Root string `json:"root"`
...@@ -784,17 +784,17 @@ func (d *ForgeDump) UnmarshalJSON(b []byte) error { ...@@ -784,17 +784,17 @@ func (d *ForgeDump) UnmarshalJSON(b []byte) error {
} }
d.Root = dump.Root d.Root = dump.Root
d.Accounts = make(map[common.Address]gstate.DumpAccount) d.Accounts = make(map[string]gstate.DumpAccount)
for addr, acc := range dump.Accounts { for addr, acc := range dump.Accounts {
d.Accounts[addr] = gstate.DumpAccount{ d.Accounts[addr.String()] = gstate.DumpAccount{
Balance: acc.Balance, Balance: acc.Balance,
Nonce: (uint64)(acc.Nonce), Nonce: (uint64)(acc.Nonce),
Root: acc.Root, Root: acc.Root,
CodeHash: acc.CodeHash, CodeHash: acc.CodeHash,
Code: acc.Code, Code: acc.Code,
Storage: acc.Storage, Storage: acc.Storage,
Address: acc.Address, Address: acc.Address,
SecureKey: acc.SecureKey, AddressHash: acc.AddressHash,
} }
} }
return nil return nil
......
...@@ -56,7 +56,16 @@ func BuildL1DeveloperGenesis(config *DeployConfig, dump *gstate.Dump, l1Deployme ...@@ -56,7 +56,16 @@ func BuildL1DeveloperGenesis(config *DeployConfig, dump *gstate.Dump, l1Deployme
SetPrecompileBalances(memDB) SetPrecompileBalances(memDB)
if dump != nil { if dump != nil {
for address, account := range dump.Accounts { for addrstr, account := range dump.Accounts {
if !common.IsHexAddress(addrstr) {
// Changes in https://github.com/ethereum/go-ethereum/pull/28504
// add accounts to the Dump with "pre(<AddressHash>)" as key
// if the address itself is nil.
// So depending on how `dump` was created, this might be a
// pre-image key, which we skip.
continue
}
address := common.HexToAddress(addrstr)
name := "<unknown>" name := "<unknown>"
if l1Deployments != nil { if l1Deployments != nil {
if n := l1Deployments.GetName(address); n != "" { if n := l1Deployments.GetName(address); n != "" {
......
...@@ -16,7 +16,10 @@ import ( ...@@ -16,7 +16,10 @@ import (
var _ vm.StateDB = (*MemoryStateDB)(nil) var _ vm.StateDB = (*MemoryStateDB)(nil)
var emptyCodeHash = crypto.Keccak256(nil) var (
emptyCodeHash = crypto.Keccak256(nil)
zeroAddr = common.Address{}
)
// MemoryStateDB implements geth's StateDB interface // MemoryStateDB implements geth's StateDB interface
// but operates on a core.Genesis so that a genesis.json // but operates on a core.Genesis so that a genesis.json
...@@ -28,7 +31,7 @@ type MemoryStateDB struct { ...@@ -28,7 +31,7 @@ type MemoryStateDB struct {
func NewMemoryStateDB(genesis *core.Genesis) *MemoryStateDB { func NewMemoryStateDB(genesis *core.Genesis) *MemoryStateDB {
if genesis == nil { if genesis == nil {
genesis = core.DeveloperGenesisBlock(15_000_000, common.Address{}) genesis = core.DeveloperGenesisBlock(15_000_000, &zeroAddr)
} }
return &MemoryStateDB{ return &MemoryStateDB{
......
...@@ -12,7 +12,7 @@ import ( ...@@ -12,7 +12,7 @@ import (
func TestMainShouldReturnErrorWhenConfigInvalid(t *testing.T) { func TestMainShouldReturnErrorWhenConfigInvalid(t *testing.T) {
cfg := &config.Config{} cfg := &config.Config{}
app, err := Main(context.Background(), testlog.Logger(t, log.LvlInfo), cfg) app, err := Main(context.Background(), testlog.Logger(t, log.LevelInfo), cfg)
require.ErrorIs(t, err, cfg.Check()) require.ErrorIs(t, err, cfg.Check())
require.Nil(t, app) require.Nil(t, app)
} }
...@@ -68,6 +68,6 @@ func run(ctx context.Context, args []string, action ConfiguredLifecycle) error { ...@@ -68,6 +68,6 @@ func run(ctx context.Context, args []string, action ConfiguredLifecycle) error {
func setupLogging(ctx *cli.Context) (log.Logger, error) { func setupLogging(ctx *cli.Context) (log.Logger, error) {
logCfg := oplog.ReadCLIConfig(ctx) logCfg := oplog.ReadCLIConfig(ctx)
logger := oplog.NewLogger(oplog.AppOut(ctx), logCfg) logger := oplog.NewLogger(oplog.AppOut(ctx), logCfg)
oplog.SetGlobalLogHandler(logger.GetHandler()) oplog.SetGlobalLogHandler(logger.Handler())
return logger, nil return logger, nil
} }
...@@ -72,7 +72,7 @@ func TestLoadClaimsWhenGameNotResolvable(t *testing.T) { ...@@ -72,7 +72,7 @@ func TestLoadClaimsWhenGameNotResolvable(t *testing.T) {
} }
func setupTestAgent(t *testing.T) (*Agent, *stubClaimLoader, *stubResponder) { func setupTestAgent(t *testing.T) (*Agent, *stubClaimLoader, *stubResponder) {
logger := testlog.Logger(t, log.LvlInfo) logger := testlog.Logger(t, log.LevelInfo)
claimLoader := &stubClaimLoader{} claimLoader := &stubClaimLoader{}
depth := types.Depth(4) depth := types.Depth(4)
provider := alphabet.NewTraceProvider(big.NewInt(0), depth) provider := alphabet.NewTraceProvider(big.NewInt(0), depth)
......
...@@ -13,9 +13,7 @@ import ( ...@@ -13,9 +13,7 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
var ( var mockValidatorError = fmt.Errorf("mock validator error")
mockValidatorError = fmt.Errorf("mock validator error")
)
func TestProgressGame_LogErrorFromAct(t *testing.T) { func TestProgressGame_LogErrorFromAct(t *testing.T) {
handler, game, actor := setupProgressGameTest(t) handler, game, actor := setupProgressGameTest(t)
...@@ -23,14 +21,14 @@ func TestProgressGame_LogErrorFromAct(t *testing.T) { ...@@ -23,14 +21,14 @@ func TestProgressGame_LogErrorFromAct(t *testing.T) {
status := game.ProgressGame(context.Background()) status := game.ProgressGame(context.Background())
require.Equal(t, types.GameStatusInProgress, status) require.Equal(t, types.GameStatusInProgress, status)
require.Equal(t, 1, actor.callCount, "should perform next actions") require.Equal(t, 1, actor.callCount, "should perform next actions")
errLog := handler.FindLog(log.LvlError, "Error when acting on game") errLog := handler.FindLog(log.LevelError, "Error when acting on game")
require.NotNil(t, errLog, "should log error") require.NotNil(t, errLog, "should log error")
require.Equal(t, actor.actErr, errLog.GetContextValue("err")) require.Equal(t, actor.actErr, errLog.AttrValue("err"))
// Should still log game status // Should still log game status
msg := handler.FindLog(log.LvlInfo, "Game info") msg := handler.FindLog(log.LevelInfo, "Game info")
require.NotNil(t, msg) require.NotNil(t, msg)
require.Equal(t, uint64(1), msg.GetContextValue("claims")) require.Equal(t, uint64(1), msg.AttrValue("claims"))
} }
func TestProgressGame_LogGameStatus(t *testing.T) { func TestProgressGame_LogGameStatus(t *testing.T) {
...@@ -64,9 +62,9 @@ func TestProgressGame_LogGameStatus(t *testing.T) { ...@@ -64,9 +62,9 @@ func TestProgressGame_LogGameStatus(t *testing.T) {
status := game.ProgressGame(context.Background()) status := game.ProgressGame(context.Background())
require.Equal(t, 1, gameState.callCount, "should perform next actions") require.Equal(t, 1, gameState.callCount, "should perform next actions")
require.Equal(t, test.status, status) require.Equal(t, test.status, status)
errLog := handler.FindLog(log.LvlInfo, test.logMsg) errLog := handler.FindLog(log.LevelInfo, test.logMsg)
require.NotNil(t, errLog, "should log game result") require.NotNil(t, errLog, "should log game result")
require.Equal(t, test.status, errLog.GetContextValue("status")) require.Equal(t, test.status, errLog.AttrValue("status"))
}) })
} }
} }
...@@ -146,18 +144,14 @@ func (m *mockValidator) Validate(ctx context.Context) error { ...@@ -146,18 +144,14 @@ func (m *mockValidator) Validate(ctx context.Context) error {
} }
func setupProgressGameTest(t *testing.T) (*testlog.CapturingHandler, *GamePlayer, *stubGameState) { func setupProgressGameTest(t *testing.T) (*testlog.CapturingHandler, *GamePlayer, *stubGameState) {
logger := testlog.Logger(t, log.LvlDebug) logger, logs := testlog.CaptureLogger(t, log.LevelDebug)
handler := &testlog.CapturingHandler{
Delegate: logger.GetHandler(),
}
logger.SetHandler(handler)
gameState := &stubGameState{claimCount: 1} gameState := &stubGameState{claimCount: 1}
game := &GamePlayer{ game := &GamePlayer{
act: gameState.Act, act: gameState.Act,
loader: gameState, loader: gameState,
logger: logger, logger: logger,
} }
return handler, game, gameState return logs, game, gameState
} }
type stubGameState struct { type stubGameState struct {
......
...@@ -54,7 +54,7 @@ func TestDirectPreimageUploader_UploadPreimage(t *testing.T) { ...@@ -54,7 +54,7 @@ func TestDirectPreimageUploader_UploadPreimage(t *testing.T) {
} }
func newTestDirectPreimageUploader(t *testing.T) (*DirectPreimageUploader, *mockTxSender, *mockPreimageGameContract) { func newTestDirectPreimageUploader(t *testing.T) (*DirectPreimageUploader, *mockTxSender, *mockPreimageGameContract) {
logger := testlog.Logger(t, log.LvlError) logger := testlog.Logger(t, log.LevelError)
txMgr := &mockTxSender{} txMgr := &mockTxSender{}
contract := &mockPreimageGameContract{} contract := &mockPreimageGameContract{}
return NewDirectPreimageUploader(logger, txMgr, contract), txMgr, contract return NewDirectPreimageUploader(logger, txMgr, contract), txMgr, contract
......
...@@ -251,7 +251,7 @@ func TestLargePreimageUploader_UploadPreimage_Succeeds(t *testing.T) { ...@@ -251,7 +251,7 @@ func TestLargePreimageUploader_UploadPreimage_Succeeds(t *testing.T) {
} }
func newTestLargePreimageUploader(t *testing.T) (*LargePreimageUploader, *clock.AdvancingClock, *mockTxSender, *mockPreimageOracleContract) { func newTestLargePreimageUploader(t *testing.T) (*LargePreimageUploader, *clock.AdvancingClock, *mockTxSender, *mockPreimageOracleContract) {
logger := testlog.Logger(t, log.LvlError) logger := testlog.Logger(t, log.LevelError)
cl := clock.NewAdvancingClock(time.Second) cl := clock.NewAdvancingClock(time.Second)
cl.Start() cl.Start()
txSender := &mockTxSender{} txSender := &mockTxSender{}
......
...@@ -281,7 +281,7 @@ func TestPerformAction(t *testing.T) { ...@@ -281,7 +281,7 @@ func TestPerformAction(t *testing.T) {
} }
func newTestFaultResponder(t *testing.T) (*FaultResponder, *mockTxManager, *mockContract, *mockPreimageUploader, *mockOracle) { func newTestFaultResponder(t *testing.T) (*FaultResponder, *mockTxManager, *mockContract, *mockPreimageUploader, *mockOracle) {
log := testlog.Logger(t, log.LvlError) log := testlog.Logger(t, log.LevelError)
mockTxMgr := &mockTxManager{} mockTxMgr := &mockTxManager{}
contract := &mockContract{} contract := &mockContract{}
uploader := &mockPreimageUploader{} uploader := &mockPreimageUploader{}
......
...@@ -140,10 +140,10 @@ func (e *Executor) generateProof(ctx context.Context, dir string, begin uint64, ...@@ -140,10 +140,10 @@ func (e *Executor) generateProof(ctx context.Context, dir string, begin uint64,
func runCmd(ctx context.Context, l log.Logger, binary string, args ...string) error { func runCmd(ctx context.Context, l log.Logger, binary string, args ...string) error {
cmd := exec.CommandContext(ctx, binary, args...) cmd := exec.CommandContext(ctx, binary, args...)
stdOut := oplog.NewWriter(l, log.LvlInfo) stdOut := oplog.NewWriter(l, log.LevelInfo)
defer stdOut.Close() defer stdOut.Close()
// Keep stdErr at info level because cannon uses stderr for progress messages // Keep stdErr at info level because cannon uses stderr for progress messages
stdErr := oplog.NewWriter(l, log.LvlInfo) stdErr := oplog.NewWriter(l, log.LevelInfo)
defer stdErr.Close() defer stdErr.Close()
cmd.Stdout = stdOut cmd.Stdout = stdOut
cmd.Stderr = stdErr cmd.Stderr = stdErr
......
...@@ -41,7 +41,7 @@ func TestGenerateProof(t *testing.T) { ...@@ -41,7 +41,7 @@ func TestGenerateProof(t *testing.T) {
} }
captureExec := func(t *testing.T, cfg config.Config, proofAt uint64) (string, string, map[string]string) { captureExec := func(t *testing.T, cfg config.Config, proofAt uint64) (string, string, map[string]string) {
m := &cannonDurationMetrics{} m := &cannonDurationMetrics{}
executor := NewExecutor(testlog.Logger(t, log.LvlInfo), m, &cfg, inputs) executor := NewExecutor(testlog.Logger(t, log.LevelInfo), m, &cfg, inputs)
executor.selectSnapshot = func(logger log.Logger, dir string, absolutePreState string, i uint64) (string, error) { executor.selectSnapshot = func(logger log.Logger, dir string, absolutePreState string, i uint64) (string, error) {
return input, nil return input, nil
} }
...@@ -135,15 +135,14 @@ func TestRunCmdLogsOutput(t *testing.T) { ...@@ -135,15 +135,14 @@ func TestRunCmdLogsOutput(t *testing.T) {
} }
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel() defer cancel()
logger := testlog.Logger(t, log.LvlInfo) logger, logs := testlog.CaptureLogger(t, log.LevelInfo)
logs := testlog.Capture(logger)
err := runCmd(ctx, logger, bin, "Hello World") err := runCmd(ctx, logger, bin, "Hello World")
require.NoError(t, err) require.NoError(t, err)
require.NotNil(t, logs.FindLog(log.LvlInfo, "Hello World")) require.NotNil(t, logs.FindLog(log.LevelInfo, "Hello World"))
} }
func TestFindStartingSnapshot(t *testing.T) { func TestFindStartingSnapshot(t *testing.T) {
logger := testlog.Logger(t, log.LvlInfo) logger := testlog.Logger(t, log.LevelInfo)
withSnapshots := func(t *testing.T, files ...string) string { withSnapshots := func(t *testing.T, files ...string) string {
dir := t.TempDir() dir := t.TempDir()
......
...@@ -235,7 +235,7 @@ func setupTestData(t *testing.T) (string, string) { ...@@ -235,7 +235,7 @@ func setupTestData(t *testing.T) (string, string) {
func setupWithTestData(t *testing.T, dataDir string, prestate string) (*CannonTraceProvider, *stubGenerator) { func setupWithTestData(t *testing.T, dataDir string, prestate string) (*CannonTraceProvider, *stubGenerator) {
generator := &stubGenerator{} generator := &stubGenerator{}
return &CannonTraceProvider{ return &CannonTraceProvider{
logger: testlog.Logger(t, log.LvlInfo), logger: testlog.Logger(t, log.LevelInfo),
dir: dataDir, dir: dataDir,
generator: generator, generator: generator,
prestate: filepath.Join(dataDir, prestate), prestate: filepath.Join(dataDir, prestate),
......
...@@ -121,7 +121,7 @@ func setupWithTestData(t *testing.T, prestateBlock, poststateBlock uint64, custo ...@@ -121,7 +121,7 @@ func setupWithTestData(t *testing.T, prestateBlock, poststateBlock uint64, custo
inputGameDepth = customGameDepth[0] inputGameDepth = customGameDepth[0]
} }
return &OutputTraceProvider{ return &OutputTraceProvider{
logger: testlog.Logger(t, log.LvlInfo), logger: testlog.Logger(t, log.LevelInfo),
rollupClient: &rollupClient, rollupClient: &rollupClient,
prestateBlock: prestateBlock, prestateBlock: prestateBlock,
poststateBlock: poststateBlock, poststateBlock: poststateBlock,
......
...@@ -134,7 +134,7 @@ func setupAdapterTest(t *testing.T, topDepth types.Depth) (split.ProviderCreator ...@@ -134,7 +134,7 @@ func setupAdapterTest(t *testing.T, topDepth types.Depth) (split.ProviderCreator
prestateProvider := &stubPrestateProvider{ prestateProvider := &stubPrestateProvider{
absolutePrestate: prestateOutputRoot, absolutePrestate: prestateOutputRoot,
} }
topProvider := NewTraceProviderFromInputs(testlog.Logger(t, log.LvlInfo), prestateProvider, rollupClient, topDepth, prestateBlock, poststateBlock) topProvider := NewTraceProviderFromInputs(testlog.Logger(t, log.LevelInfo), prestateProvider, rollupClient, topDepth, prestateBlock, poststateBlock)
adapter := OutputRootSplitAdapter(topProvider, creator.Create) adapter := OutputRootSplitAdapter(topProvider, creator.Create)
return adapter, creator return adapter, creator
} }
......
...@@ -38,7 +38,7 @@ func TestChallenge(t *testing.T) { ...@@ -38,7 +38,7 @@ func TestChallenge(t *testing.T) {
}, },
} }
logger := testlog.Logger(t, log.LvlInfo) logger := testlog.Logger(t, log.LevelInfo)
t.Run("SendChallenges", func(t *testing.T) { t.Run("SendChallenges", func(t *testing.T) {
verifier, sender, oracle, challenger := setupChallengerTest(logger) verifier, sender, oracle, challenger := setupChallengerTest(logger)
...@@ -65,7 +65,7 @@ func TestChallenge(t *testing.T) { ...@@ -65,7 +65,7 @@ func TestChallenge(t *testing.T) {
}) })
t.Run("LogErrorWhenCreateTxFails", func(t *testing.T) { t.Run("LogErrorWhenCreateTxFails", func(t *testing.T) {
logs := testlog.Capture(logger) logger, logs := testlog.CaptureLogger(t, log.LevelInfo)
verifier, _, oracle, challenger := setupChallengerTest(logger) verifier, _, oracle, challenger := setupChallengerTest(logger)
verifier.challenges[preimages[1].LargePreimageIdent] = keccakTypes.Challenge{StateMatrix: keccakTypes.StateSnapshot{0x01}} verifier.challenges[preimages[1].LargePreimageIdent] = keccakTypes.Challenge{StateMatrix: keccakTypes.StateSnapshot{0x01}}
...@@ -73,12 +73,12 @@ func TestChallenge(t *testing.T) { ...@@ -73,12 +73,12 @@ func TestChallenge(t *testing.T) {
err := challenger.Challenge(context.Background(), common.Hash{0xaa}, oracle, preimages) err := challenger.Challenge(context.Background(), common.Hash{0xaa}, oracle, preimages)
require.NoError(t, err) require.NoError(t, err)
errLog := logs.FindLog(log.LvlError, "Failed to create challenge transaction") errLog := logs.FindLog(log.LevelError, "Failed to create challenge transaction")
require.ErrorIs(t, errLog.GetContextValue("err").(error), oracle.err) require.ErrorIs(t, errLog.AttrValue("err").(error), oracle.err)
}) })
t.Run("LogErrorWhenVerifierFails", func(t *testing.T) { t.Run("LogErrorWhenVerifierFails", func(t *testing.T) {
logs := testlog.Capture(logger) logger, logs := testlog.CaptureLogger(t, log.LevelInfo)
verifier, _, oracle, challenger := setupChallengerTest(logger) verifier, _, oracle, challenger := setupChallengerTest(logger)
verifier.challenges[preimages[1].LargePreimageIdent] = keccakTypes.Challenge{StateMatrix: keccakTypes.StateSnapshot{0x01}} verifier.challenges[preimages[1].LargePreimageIdent] = keccakTypes.Challenge{StateMatrix: keccakTypes.StateSnapshot{0x01}}
...@@ -86,22 +86,22 @@ func TestChallenge(t *testing.T) { ...@@ -86,22 +86,22 @@ func TestChallenge(t *testing.T) {
err := challenger.Challenge(context.Background(), common.Hash{0xaa}, oracle, preimages) err := challenger.Challenge(context.Background(), common.Hash{0xaa}, oracle, preimages)
require.NoError(t, err) require.NoError(t, err)
errLog := logs.FindLog(log.LvlError, "Failed to verify large preimage") errLog := logs.FindLog(log.LevelError, "Failed to verify large preimage")
require.ErrorIs(t, errLog.GetContextValue("err").(error), verifier.err) require.ErrorIs(t, errLog.AttrValue("err").(error), verifier.err)
}) })
t.Run("DoNotLogErrValid", func(t *testing.T) { t.Run("DoNotLogErrValid", func(t *testing.T) {
logs := testlog.Capture(logger) logger, logs := testlog.CaptureLogger(t, log.LevelInfo)
_, _, oracle, challenger := setupChallengerTest(logger) _, _, oracle, challenger := setupChallengerTest(logger)
// All preimages are valid // All preimages are valid
err := challenger.Challenge(context.Background(), common.Hash{0xaa}, oracle, preimages) err := challenger.Challenge(context.Background(), common.Hash{0xaa}, oracle, preimages)
require.NoError(t, err) require.NoError(t, err)
errLog := logs.FindLog(log.LvlError, "Failed to verify large preimage") errLog := logs.FindLog(log.LevelError, "Failed to verify large preimage")
require.Nil(t, errLog) require.Nil(t, errLog)
dbgLog := logs.FindLog(log.LvlDebug, "Preimage is valid") dbgLog := logs.FindLog(log.LevelDebug, "Preimage is valid")
require.NotNil(t, dbgLog) require.NotNil(t, dbgLog)
}) })
} }
......
...@@ -174,7 +174,7 @@ func setupFetcherTest(t *testing.T) (*InputFetcher, *stubOracle, *stubL1Source) ...@@ -174,7 +174,7 @@ func setupFetcherTest(t *testing.T) (*InputFetcher, *stubOracle, *stubL1Source)
txs: make(map[uint64]types.Transactions), txs: make(map[uint64]types.Transactions),
rcptStatus: make(map[common.Hash]uint64), rcptStatus: make(map[common.Hash]uint64),
} }
fetcher := NewPreimageFetcher(testlog.Logger(t, log.LvlTrace), l1Source) fetcher := NewPreimageFetcher(testlog.Logger(t, log.LevelTrace), l1Source)
return fetcher, oracle, l1Source return fetcher, oracle, l1Source
} }
......
...@@ -23,7 +23,7 @@ var stubChallengePeriod = uint64(3600) ...@@ -23,7 +23,7 @@ var stubChallengePeriod = uint64(3600)
func TestScheduleNextCheck(t *testing.T) { func TestScheduleNextCheck(t *testing.T) {
ctx := context.Background() ctx := context.Background()
currentTimestamp := uint64(1240) currentTimestamp := uint64(1240)
logger := testlog.Logger(t, log.LvlInfo) logger := testlog.Logger(t, log.LevelInfo)
preimage1 := keccakTypes.LargePreimageMetaData{ // Incomplete so won't be verified preimage1 := keccakTypes.LargePreimageMetaData{ // Incomplete so won't be verified
LargePreimageIdent: keccakTypes.LargePreimageIdent{ LargePreimageIdent: keccakTypes.LargePreimageIdent{
Claimant: common.Address{0xab}, Claimant: common.Address{0xab},
......
...@@ -21,7 +21,7 @@ import ( ...@@ -21,7 +21,7 @@ import (
) )
func TestVerify(t *testing.T) { func TestVerify(t *testing.T) {
logger := testlog.Logger(t, log.LvlInfo) logger := testlog.Logger(t, log.LevelInfo)
tests := []struct { tests := []struct {
name string name string
inputs func() []keccakTypes.InputData inputs func() []keccakTypes.InputData
......
...@@ -181,7 +181,7 @@ func setupMonitorTest( ...@@ -181,7 +181,7 @@ func setupMonitorTest(
t *testing.T, t *testing.T,
allowedGames []common.Address, allowedGames []common.Address,
) (*gameMonitor, *stubGameSource, *stubScheduler, *mockNewHeadSource, *stubPreimageScheduler) { ) (*gameMonitor, *stubGameSource, *stubScheduler, *mockNewHeadSource, *stubPreimageScheduler) {
logger := testlog.Logger(t, log.LvlDebug) logger := testlog.Logger(t, log.LevelDebug)
source := &stubGameSource{} source := &stubGameSource{}
i := uint64(1) i := uint64(1)
fetchBlockNum := func(ctx context.Context) (uint64, error) { fetchBlockNum := func(ctx context.Context) (uint64, error) {
......
...@@ -332,7 +332,7 @@ func TestDropOldGameStates(t *testing.T) { ...@@ -332,7 +332,7 @@ func TestDropOldGameStates(t *testing.T) {
} }
func setupCoordinatorTest(t *testing.T, bufferSize int) (*coordinator, <-chan job, chan job, *createdGames, *stubDiskManager) { func setupCoordinatorTest(t *testing.T, bufferSize int) (*coordinator, <-chan job, chan job, *createdGames, *stubDiskManager) {
logger := testlog.Logger(t, log.LvlInfo) logger := testlog.Logger(t, log.LevelInfo)
workQueue := make(chan job, bufferSize) workQueue := make(chan job, bufferSize)
resultQueue := make(chan job, bufferSize) resultQueue := make(chan job, bufferSize)
games := &createdGames{ games := &createdGames{
......
...@@ -14,7 +14,7 @@ import ( ...@@ -14,7 +14,7 @@ import (
) )
func TestSchedulerProcessesGames(t *testing.T) { func TestSchedulerProcessesGames(t *testing.T) {
logger := testlog.Logger(t, log.LvlInfo) logger := testlog.Logger(t, log.LevelInfo)
ctx := context.Background() ctx := context.Background()
createPlayer := func(g types.GameMetadata, dir string) (GamePlayer, error) { createPlayer := func(g types.GameMetadata, dir string) (GamePlayer, error) {
return &test.StubGamePlayer{}, nil return &test.StubGamePlayer{}, nil
...@@ -43,7 +43,7 @@ func TestSchedulerProcessesGames(t *testing.T) { ...@@ -43,7 +43,7 @@ func TestSchedulerProcessesGames(t *testing.T) {
} }
func TestReturnBusyWhenScheduleQueueFull(t *testing.T) { func TestReturnBusyWhenScheduleQueueFull(t *testing.T) {
logger := testlog.Logger(t, log.LvlInfo) logger := testlog.Logger(t, log.LevelInfo)
createPlayer := func(game types.GameMetadata, dir string) (GamePlayer, error) { createPlayer := func(game types.GameMetadata, dir string) (GamePlayer, error) {
return &test.StubGamePlayer{}, nil return &test.StubGamePlayer{}, nil
} }
......
...@@ -20,7 +20,7 @@ func TestSendAndWait(t *testing.T) { ...@@ -20,7 +20,7 @@ func TestSendAndWait(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel() defer cancel()
txMgr := &stubTxMgr{sending: make(map[byte]chan *types.Receipt)} txMgr := &stubTxMgr{sending: make(map[byte]chan *types.Receipt)}
sender := NewTxSender(ctx, testlog.Logger(t, log.LvlInfo), txMgr, 5) sender := NewTxSender(ctx, testlog.Logger(t, log.LevelInfo), txMgr, 5)
tx := func(i byte) txmgr.TxCandidate { tx := func(i byte) txmgr.TxCandidate {
return txmgr.TxCandidate{TxData: []byte{i}} return txmgr.TxCandidate{TxData: []byte{i}}
......
...@@ -44,7 +44,7 @@ func main() { ...@@ -44,7 +44,7 @@ func main() {
func OpConductorMain(ctx *cli.Context, closeApp context.CancelCauseFunc) (cliapp.Lifecycle, error) { func OpConductorMain(ctx *cli.Context, closeApp context.CancelCauseFunc) (cliapp.Lifecycle, error) {
logCfg := oplog.ReadCLIConfig(ctx) logCfg := oplog.ReadCLIConfig(ctx)
log := oplog.NewLogger(oplog.AppOut(ctx), logCfg) log := oplog.NewLogger(oplog.AppOut(ctx), logCfg)
oplog.SetGlobalLogHandler(log.GetHandler()) oplog.SetGlobalLogHandler(log.Handler())
opservice.ValidateEnvVars(flags.EnvVarPrefix, flags.Flags, log) opservice.ValidateEnvVars(flags.EnvVarPrefix, flags.Flags, log)
cfg, err := conductor.NewConfig(ctx, log) cfg, err := conductor.NewConfig(ctx, log)
......
...@@ -100,7 +100,7 @@ type OpConductorTestSuite struct { ...@@ -100,7 +100,7 @@ type OpConductorTestSuite struct {
func (s *OpConductorTestSuite) SetupSuite() { func (s *OpConductorTestSuite) SetupSuite() {
s.ctx = context.Background() s.ctx = context.Background()
s.log = testlog.Logger(s.T(), log.LvlDebug) s.log = testlog.Logger(s.T(), log.LevelDebug)
s.cfg = mockConfig(s.T()) s.cfg = mockConfig(s.T())
s.version = "v0.0.1" s.version = "v0.0.1"
s.next = make(chan struct{}, 1) s.next = make(chan struct{}, 1)
......
...@@ -17,7 +17,7 @@ import ( ...@@ -17,7 +17,7 @@ import (
) )
func TestCommitAndRead(t *testing.T) { func TestCommitAndRead(t *testing.T) {
log := testlog.Logger(t, log.LvlInfo) log := testlog.Logger(t, log.LevelInfo)
serverID := "SequencerA" serverID := "SequencerA"
serverAddr := "127.0.0.1:0" serverAddr := "127.0.0.1:0"
bootstrap := true bootstrap := true
......
...@@ -33,7 +33,7 @@ type HealthMonitorTestSuite struct { ...@@ -33,7 +33,7 @@ type HealthMonitorTestSuite struct {
} }
func (s *HealthMonitorTestSuite) SetupSuite() { func (s *HealthMonitorTestSuite) SetupSuite() {
s.log = testlog.Logger(s.T(), log.LvlDebug) s.log = testlog.Logger(s.T(), log.LevelDebug)
s.interval = 1 s.interval = 1
s.minPeerCount = minPeerCount s.minPeerCount = minPeerCount
s.rollupCfg = &rollup.Config{ s.rollupCfg = &rollup.Config{
......
...@@ -64,6 +64,6 @@ func run(ctx context.Context, args []string, action ConfiguredLifecycle) error { ...@@ -64,6 +64,6 @@ func run(ctx context.Context, args []string, action ConfiguredLifecycle) error {
func setupLogging(ctx *cli.Context) (log.Logger, error) { func setupLogging(ctx *cli.Context) (log.Logger, error) {
logCfg := oplog.ReadCLIConfig(ctx) logCfg := oplog.ReadCLIConfig(ctx)
logger := oplog.NewLogger(oplog.AppOut(ctx), logCfg) logger := oplog.NewLogger(oplog.AppOut(ctx), logCfg)
oplog.SetGlobalLogHandler(logger.GetHandler()) oplog.SetGlobalLogHandler(logger.Handler())
return logger, nil return logger, nil
} }
...@@ -51,7 +51,7 @@ func BatchInLastPossibleBlocks(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { ...@@ -51,7 +51,7 @@ func BatchInLastPossibleBlocks(gt *testing.T, deltaTimeOffset *hexutil.Uint64) {
dp.DeployConfig.L2BlockTime = 2 dp.DeployConfig.L2BlockTime = 2
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlDebug) log := testlog.Logger(t, log.LevelDebug)
sd, _, miner, sequencer, sequencerEngine, _, _, batcher := setupReorgTestActors(t, dp, sd, log) sd, _, miner, sequencer, sequencerEngine, _, _, batcher := setupReorgTestActors(t, dp, sd, log)
...@@ -163,7 +163,7 @@ func LargeL1Gaps(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { ...@@ -163,7 +163,7 @@ func LargeL1Gaps(gt *testing.T, deltaTimeOffset *hexutil.Uint64) {
dp.DeployConfig.MaxSequencerDrift = 32 dp.DeployConfig.MaxSequencerDrift = 32
applyDeltaTimeOffset(dp, deltaTimeOffset) applyDeltaTimeOffset(dp, deltaTimeOffset)
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlDebug) log := testlog.Logger(t, log.LevelDebug)
sd, _, miner, sequencer, sequencerEngine, verifier, _, batcher := setupReorgTestActors(t, dp, sd, log) sd, _, miner, sequencer, sequencerEngine, verifier, _, batcher := setupReorgTestActors(t, dp, sd, log)
......
...@@ -22,7 +22,7 @@ func TestDencunL1ForkAfterGenesis(gt *testing.T) { ...@@ -22,7 +22,7 @@ func TestDencunL1ForkAfterGenesis(gt *testing.T) {
offset := hexutil.Uint64(24) offset := hexutil.Uint64(24)
dp.DeployConfig.L1CancunTimeOffset = &offset dp.DeployConfig.L1CancunTimeOffset = &offset
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlDebug) log := testlog.Logger(t, log.LevelDebug)
_, _, miner, sequencer, _, verifier, _, batcher := setupReorgTestActors(t, dp, sd, log) _, _, miner, sequencer, _, verifier, _, batcher := setupReorgTestActors(t, dp, sd, log)
l1Head := miner.l1Chain.CurrentBlock() l1Head := miner.l1Chain.CurrentBlock()
...@@ -65,7 +65,7 @@ func TestDencunL1ForkAtGenesis(gt *testing.T) { ...@@ -65,7 +65,7 @@ func TestDencunL1ForkAtGenesis(gt *testing.T) {
offset := hexutil.Uint64(0) offset := hexutil.Uint64(0)
dp.DeployConfig.L1CancunTimeOffset = &offset dp.DeployConfig.L1CancunTimeOffset = &offset
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlDebug) log := testlog.Logger(t, log.LevelDebug)
_, _, miner, sequencer, _, verifier, _, batcher := setupReorgTestActors(t, dp, sd, log) _, _, miner, sequencer, _, verifier, _, batcher := setupReorgTestActors(t, dp, sd, log)
l1Head := miner.l1Chain.CurrentBlock() l1Head := miner.l1Chain.CurrentBlock()
...@@ -130,7 +130,7 @@ func TestDencunL2ForkAfterGenesis(gt *testing.T) { ...@@ -130,7 +130,7 @@ func TestDencunL2ForkAfterGenesis(gt *testing.T) {
dp.DeployConfig.L2GenesisEcotoneTimeOffset = &offset dp.DeployConfig.L2GenesisEcotoneTimeOffset = &offset
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlDebug) log := testlog.Logger(t, log.LevelDebug)
_, _, _, sequencer, engine, verifier, _, _ := setupReorgTestActors(t, dp, sd, log) _, _, _, sequencer, engine, verifier, _, _ := setupReorgTestActors(t, dp, sd, log)
// start op-nodes // start op-nodes
...@@ -167,7 +167,7 @@ func TestDencunL2ForkAtGenesis(gt *testing.T) { ...@@ -167,7 +167,7 @@ func TestDencunL2ForkAtGenesis(gt *testing.T) {
dp.DeployConfig.L2GenesisEcotoneTimeOffset = &offset dp.DeployConfig.L2GenesisEcotoneTimeOffset = &offset
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlDebug) log := testlog.Logger(t, log.LevelDebug)
_, _, _, sequencer, engine, verifier, _, _ := setupReorgTestActors(t, dp, sd, log) _, _, _, sequencer, engine, verifier, _, _ := setupReorgTestActors(t, dp, sd, log)
// start op-nodes // start op-nodes
...@@ -209,7 +209,7 @@ func TestDencunBlobTxRPC(gt *testing.T) { ...@@ -209,7 +209,7 @@ func TestDencunBlobTxRPC(gt *testing.T) {
dp.DeployConfig.L2GenesisEcotoneTimeOffset = &offset dp.DeployConfig.L2GenesisEcotoneTimeOffset = &offset
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlDebug) log := testlog.Logger(t, log.LevelDebug)
engine := newEngine(t, sd, log) engine := newEngine(t, sd, log)
cl := engine.EthClient() cl := engine.EthClient()
tx := aliceSimpleBlobTx(t, dp) tx := aliceSimpleBlobTx(t, dp)
...@@ -228,7 +228,7 @@ func TestDencunBlobTxInTxPool(gt *testing.T) { ...@@ -228,7 +228,7 @@ func TestDencunBlobTxInTxPool(gt *testing.T) {
dp.DeployConfig.L2GenesisEcotoneTimeOffset = &offset dp.DeployConfig.L2GenesisEcotoneTimeOffset = &offset
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlDebug) log := testlog.Logger(t, log.LevelDebug)
engine := newEngine(t, sd, log) engine := newEngine(t, sd, log)
tx := aliceSimpleBlobTx(t, dp) tx := aliceSimpleBlobTx(t, dp)
errs := engine.eth.TxPool().Add([]*types.Transaction{tx}, true, true) errs := engine.eth.TxPool().Add([]*types.Transaction{tx}, true, true)
...@@ -246,7 +246,7 @@ func TestDencunBlobTxInclusion(gt *testing.T) { ...@@ -246,7 +246,7 @@ func TestDencunBlobTxInclusion(gt *testing.T) {
dp.DeployConfig.L2GenesisEcotoneTimeOffset = &offset dp.DeployConfig.L2GenesisEcotoneTimeOffset = &offset
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlDebug) log := testlog.Logger(t, log.LevelDebug)
_, engine, sequencer := setupSequencerTest(t, sd, log) _, engine, sequencer := setupSequencerTest(t, sd, log)
sequencer.ActL2PipelineFull(t) sequencer.ActL2PipelineFull(t)
......
...@@ -55,7 +55,7 @@ func TestEcotoneNetworkUpgradeTransactions(gt *testing.T) { ...@@ -55,7 +55,7 @@ func TestEcotoneNetworkUpgradeTransactions(gt *testing.T) {
require.NoError(t, dp.DeployConfig.Check(), "must have valid config") require.NoError(t, dp.DeployConfig.Check(), "must have valid config")
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlDebug) log := testlog.Logger(t, log.LevelDebug)
_, _, miner, sequencer, engine, verifier, _, _ := setupReorgTestActors(t, dp, sd, log) _, _, miner, sequencer, engine, verifier, _, _ := setupReorgTestActors(t, dp, sd, log)
ethCl := engine.EthClient() ethCl := engine.EthClient()
...@@ -242,7 +242,7 @@ func TestEcotoneBeforeL1(gt *testing.T) { ...@@ -242,7 +242,7 @@ func TestEcotoneBeforeL1(gt *testing.T) {
dp.DeployConfig.L2GenesisEcotoneTimeOffset = &offset dp.DeployConfig.L2GenesisEcotoneTimeOffset = &offset
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlDebug) log := testlog.Logger(t, log.LevelDebug)
_, _, _, sequencer, engine, verifier, _, _ := setupReorgTestActors(t, dp, sd, log) _, _, _, sequencer, engine, verifier, _, _ := setupReorgTestActors(t, dp, sd, log)
// start op-nodes // start op-nodes
......
...@@ -45,7 +45,7 @@ func setupBatcher(t Testing, log log.Logger, sd *e2eutils.SetupData, dp *e2eutil ...@@ -45,7 +45,7 @@ func setupBatcher(t Testing, log log.Logger, sd *e2eutils.SetupData, dp *e2eutil
func TestEIP4844DataAvailability(gt *testing.T) { func TestEIP4844DataAvailability(gt *testing.T) {
t := NewDefaultTesting(gt) t := NewDefaultTesting(gt)
log := testlog.Logger(t, log.LvlDebug) log := testlog.Logger(t, log.LevelDebug)
sd, dp, miner, sequencer, seqEngine, verifier, _ := setupEIP4844Test(t, log) sd, dp, miner, sequencer, seqEngine, verifier, _ := setupEIP4844Test(t, log)
batcher := setupBatcher(t, log, sd, dp, miner, sequencer, seqEngine, batcherFlags.BlobsType) batcher := setupBatcher(t, log, sd, dp, miner, sequencer, seqEngine, batcherFlags.BlobsType)
...@@ -84,7 +84,7 @@ func TestEIP4844DataAvailability(gt *testing.T) { ...@@ -84,7 +84,7 @@ func TestEIP4844DataAvailability(gt *testing.T) {
func TestEIP4844DataAvailabilitySwitch(gt *testing.T) { func TestEIP4844DataAvailabilitySwitch(gt *testing.T) {
t := NewDefaultTesting(gt) t := NewDefaultTesting(gt)
log := testlog.Logger(t, log.LvlDebug) log := testlog.Logger(t, log.LevelDebug)
sd, dp, miner, sequencer, seqEngine, verifier, _ := setupEIP4844Test(t, log) sd, dp, miner, sequencer, seqEngine, verifier, _ := setupEIP4844Test(t, log)
oldBatcher := setupBatcher(t, log, sd, dp, miner, sequencer, seqEngine, batcherFlags.CalldataType) oldBatcher := setupBatcher(t, log, sd, dp, miner, sequencer, seqEngine, batcherFlags.CalldataType)
......
...@@ -17,7 +17,7 @@ func TestL1Miner_BuildBlock(gt *testing.T) { ...@@ -17,7 +17,7 @@ func TestL1Miner_BuildBlock(gt *testing.T) {
t := NewDefaultTesting(gt) t := NewDefaultTesting(gt)
dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams) dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams)
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlDebug) log := testlog.Logger(t, log.LevelDebug)
miner := NewL1Miner(t, log, sd.L1Cfg) miner := NewL1Miner(t, log, sd.L1Cfg)
t.Cleanup(func() { t.Cleanup(func() {
_ = miner.Close() _ = miner.Close()
......
...@@ -34,7 +34,7 @@ func TestL1Replica_ActL1RPCFail(gt *testing.T) { ...@@ -34,7 +34,7 @@ func TestL1Replica_ActL1RPCFail(gt *testing.T) {
t := NewDefaultTesting(gt) t := NewDefaultTesting(gt)
dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams) dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams)
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlDebug) log := testlog.Logger(t, log.LevelDebug)
replica := NewL1Replica(t, log, sd.L1Cfg) replica := NewL1Replica(t, log, sd.L1Cfg)
t.Cleanup(func() { t.Cleanup(func() {
_ = replica.Close() _ = replica.Close()
...@@ -56,7 +56,7 @@ func TestL1Replica_ActL1Sync(gt *testing.T) { ...@@ -56,7 +56,7 @@ func TestL1Replica_ActL1Sync(gt *testing.T) {
t := NewDefaultTesting(gt) t := NewDefaultTesting(gt)
dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams) dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams)
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlDebug) log := testlog.Logger(t, log.LevelDebug)
genesisBlock := sd.L1Cfg.ToBlock() genesisBlock := sd.L1Cfg.ToBlock()
consensus := beacon.New(ethash.NewFaker()) consensus := beacon.New(ethash.NewFaker())
db := rawdb.NewMemoryDatabase() db := rawdb.NewMemoryDatabase()
......
...@@ -83,7 +83,7 @@ func NormalBatcher(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { ...@@ -83,7 +83,7 @@ func NormalBatcher(gt *testing.T, deltaTimeOffset *hexutil.Uint64) {
dp := e2eutils.MakeDeployParams(t, p) dp := e2eutils.MakeDeployParams(t, p)
applyDeltaTimeOffset(dp, deltaTimeOffset) applyDeltaTimeOffset(dp, deltaTimeOffset)
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlDebug) log := testlog.Logger(t, log.LevelDebug)
miner, seqEngine, sequencer := setupSequencerTest(t, sd, log) miner, seqEngine, sequencer := setupSequencerTest(t, sd, log)
verifEngine, verifier := setupVerifier(t, sd, log, miner.L1Client(t, sd.RollupCfg), miner.BlobStore(), &sync.Config{}) verifEngine, verifier := setupVerifier(t, sd, log, miner.L1Client(t, sd.RollupCfg), miner.BlobStore(), &sync.Config{})
...@@ -152,7 +152,7 @@ func L2Finalization(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { ...@@ -152,7 +152,7 @@ func L2Finalization(gt *testing.T, deltaTimeOffset *hexutil.Uint64) {
dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams) dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams)
applyDeltaTimeOffset(dp, deltaTimeOffset) applyDeltaTimeOffset(dp, deltaTimeOffset)
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlDebug) log := testlog.Logger(t, log.LevelDebug)
miner, engine, sequencer := setupSequencerTest(t, sd, log) miner, engine, sequencer := setupSequencerTest(t, sd, log)
sequencer.ActL2PipelineFull(t) sequencer.ActL2PipelineFull(t)
...@@ -256,7 +256,7 @@ func L2FinalizationWithSparseL1(gt *testing.T, deltaTimeOffset *hexutil.Uint64) ...@@ -256,7 +256,7 @@ func L2FinalizationWithSparseL1(gt *testing.T, deltaTimeOffset *hexutil.Uint64)
dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams) dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams)
applyDeltaTimeOffset(dp, deltaTimeOffset) applyDeltaTimeOffset(dp, deltaTimeOffset)
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlDebug) log := testlog.Logger(t, log.LevelDebug)
miner, engine, sequencer := setupSequencerTest(t, sd, log) miner, engine, sequencer := setupSequencerTest(t, sd, log)
sequencer.ActL2PipelineFull(t) sequencer.ActL2PipelineFull(t)
...@@ -314,7 +314,7 @@ func GarbageBatch(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { ...@@ -314,7 +314,7 @@ func GarbageBatch(gt *testing.T, deltaTimeOffset *hexutil.Uint64) {
applyDeltaTimeOffset(dp, deltaTimeOffset) applyDeltaTimeOffset(dp, deltaTimeOffset)
for _, garbageKind := range GarbageKinds { for _, garbageKind := range GarbageKinds {
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlError) log := testlog.Logger(t, log.LevelError)
miner, engine, sequencer := setupSequencerTest(t, sd, log) miner, engine, sequencer := setupSequencerTest(t, sd, log)
_, verifier := setupVerifier(t, sd, log, miner.L1Client(t, sd.RollupCfg), miner.BlobStore(), &sync.Config{}) _, verifier := setupVerifier(t, sd, log, miner.L1Client(t, sd.RollupCfg), miner.BlobStore(), &sync.Config{})
...@@ -394,7 +394,7 @@ func ExtendedTimeWithoutL1Batches(gt *testing.T, deltaTimeOffset *hexutil.Uint64 ...@@ -394,7 +394,7 @@ func ExtendedTimeWithoutL1Batches(gt *testing.T, deltaTimeOffset *hexutil.Uint64
dp := e2eutils.MakeDeployParams(t, p) dp := e2eutils.MakeDeployParams(t, p)
applyDeltaTimeOffset(dp, deltaTimeOffset) applyDeltaTimeOffset(dp, deltaTimeOffset)
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlError) log := testlog.Logger(t, log.LevelError)
miner, engine, sequencer := setupSequencerTest(t, sd, log) miner, engine, sequencer := setupSequencerTest(t, sd, log)
_, verifier := setupVerifier(t, sd, log, miner.L1Client(t, sd.RollupCfg), miner.BlobStore(), &sync.Config{}) _, verifier := setupVerifier(t, sd, log, miner.L1Client(t, sd.RollupCfg), miner.BlobStore(), &sync.Config{})
...@@ -450,7 +450,7 @@ func BigL2Txs(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { ...@@ -450,7 +450,7 @@ func BigL2Txs(gt *testing.T, deltaTimeOffset *hexutil.Uint64) {
dp := e2eutils.MakeDeployParams(t, p) dp := e2eutils.MakeDeployParams(t, p)
applyDeltaTimeOffset(dp, deltaTimeOffset) applyDeltaTimeOffset(dp, deltaTimeOffset)
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlInfo) log := testlog.Logger(t, log.LevelInfo)
miner, engine, sequencer := setupSequencerTest(t, sd, log) miner, engine, sequencer := setupSequencerTest(t, sd, log)
_, verifier := setupVerifier(t, sd, log, miner.L1Client(t, sd.RollupCfg), miner.BlobStore(), &sync.Config{}) _, verifier := setupVerifier(t, sd, log, miner.L1Client(t, sd.RollupCfg), miner.BlobStore(), &sync.Config{})
......
...@@ -30,7 +30,7 @@ func TestL2EngineAPI(gt *testing.T) { ...@@ -30,7 +30,7 @@ func TestL2EngineAPI(gt *testing.T) {
jwtPath := e2eutils.WriteDefaultJWT(t) jwtPath := e2eutils.WriteDefaultJWT(t)
dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams) dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams)
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlDebug) log := testlog.Logger(t, log.LevelDebug)
genesisBlock := sd.L2Cfg.ToBlock() genesisBlock := sd.L2Cfg.ToBlock()
consensus := beacon.New(ethash.NewFaker()) consensus := beacon.New(ethash.NewFaker())
db := rawdb.NewMemoryDatabase() db := rawdb.NewMemoryDatabase()
...@@ -94,7 +94,7 @@ func TestL2EngineAPIBlockBuilding(gt *testing.T) { ...@@ -94,7 +94,7 @@ func TestL2EngineAPIBlockBuilding(gt *testing.T) {
jwtPath := e2eutils.WriteDefaultJWT(t) jwtPath := e2eutils.WriteDefaultJWT(t)
dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams) dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams)
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlDebug) log := testlog.Logger(t, log.LevelDebug)
genesisBlock := sd.L2Cfg.ToBlock() genesisBlock := sd.L2Cfg.ToBlock()
db := rawdb.NewMemoryDatabase() db := rawdb.NewMemoryDatabase()
tdb := trie.NewDatabase(db, &trie.Config{HashDB: hashdb.Defaults}) tdb := trie.NewDatabase(db, &trie.Config{HashDB: hashdb.Defaults})
...@@ -189,7 +189,7 @@ func TestL2EngineAPIFail(gt *testing.T) { ...@@ -189,7 +189,7 @@ func TestL2EngineAPIFail(gt *testing.T) {
jwtPath := e2eutils.WriteDefaultJWT(t) jwtPath := e2eutils.WriteDefaultJWT(t)
dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams) dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams)
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlDebug) log := testlog.Logger(t, log.LevelDebug)
engine := NewL2Engine(t, log, sd.L2Cfg, sd.RollupCfg.Genesis.L1, jwtPath) engine := NewL2Engine(t, log, sd.L2Cfg, sd.RollupCfg.Genesis.L1, jwtPath)
// mock an RPC failure // mock an RPC failure
engine.ActL2RPCFail(t) engine.ActL2RPCFail(t)
......
...@@ -44,7 +44,7 @@ func RunProposerTest(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { ...@@ -44,7 +44,7 @@ func RunProposerTest(gt *testing.T, deltaTimeOffset *hexutil.Uint64) {
dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams) dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams)
dp.DeployConfig.L2GenesisDeltaTimeOffset = deltaTimeOffset dp.DeployConfig.L2GenesisDeltaTimeOffset = deltaTimeOffset
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlDebug) log := testlog.Logger(t, log.LevelDebug)
miner, seqEngine, sequencer := setupSequencerTest(t, sd, log) miner, seqEngine, sequencer := setupSequencerTest(t, sd, log)
rollupSeqCl := sequencer.RollupClient() rollupSeqCl := sequencer.RollupClient()
......
...@@ -61,7 +61,7 @@ func TestL2Sequencer_SequencerDrift(gt *testing.T) { ...@@ -61,7 +61,7 @@ func TestL2Sequencer_SequencerDrift(gt *testing.T) {
} }
dp := e2eutils.MakeDeployParams(t, p) dp := e2eutils.MakeDeployParams(t, p)
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlDebug) log := testlog.Logger(t, log.LevelDebug)
miner, engine, sequencer := setupSequencerTest(t, sd, log) miner, engine, sequencer := setupSequencerTest(t, sd, log)
miner.ActL1SetFeeRecipient(common.Address{'A'}) miner.ActL1SetFeeRecipient(common.Address{'A'})
...@@ -129,7 +129,7 @@ func TestL2Sequencer_SequencerOnlyReorg(gt *testing.T) { ...@@ -129,7 +129,7 @@ func TestL2Sequencer_SequencerOnlyReorg(gt *testing.T) {
t := NewDefaultTesting(gt) t := NewDefaultTesting(gt)
dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams) dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams)
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlDebug) log := testlog.Logger(t, log.LevelDebug)
miner, _, sequencer := setupSequencerTest(t, sd, log) miner, _, sequencer := setupSequencerTest(t, sd, log)
// Sequencer at first only recognizes the genesis as safe. // Sequencer at first only recognizes the genesis as safe.
......
...@@ -38,7 +38,7 @@ func TestL2Verifier_SequenceWindow(gt *testing.T) { ...@@ -38,7 +38,7 @@ func TestL2Verifier_SequenceWindow(gt *testing.T) {
} }
dp := e2eutils.MakeDeployParams(t, p) dp := e2eutils.MakeDeployParams(t, p)
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlDebug) log := testlog.Logger(t, log.LevelDebug)
miner, engine, verifier := setupVerifierOnlyTest(t, sd, log) miner, engine, verifier := setupVerifierOnlyTest(t, sd, log)
miner.ActL1SetFeeRecipient(common.Address{'A'}) miner.ActL1SetFeeRecipient(common.Address{'A'})
......
...@@ -27,7 +27,7 @@ func setupReorgTest(t Testing, config *e2eutils.TestParams, deltaTimeOffset *hex ...@@ -27,7 +27,7 @@ func setupReorgTest(t Testing, config *e2eutils.TestParams, deltaTimeOffset *hex
dp.DeployConfig.L2GenesisDeltaTimeOffset = deltaTimeOffset dp.DeployConfig.L2GenesisDeltaTimeOffset = deltaTimeOffset
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlDebug) log := testlog.Logger(t, log.LevelDebug)
return setupReorgTestActors(t, dp, sd, log) return setupReorgTestActors(t, dp, sd, log)
} }
...@@ -381,7 +381,7 @@ func DeepReorg(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { ...@@ -381,7 +381,7 @@ func DeepReorg(gt *testing.T, deltaTimeOffset *hexutil.Uint64) {
} }
// Set up alice // Set up alice
log := testlog.Logger(t, log.LvlDebug) log := testlog.Logger(t, log.LevelDebug)
addresses := e2eutils.CollectAddresses(sd, dp) addresses := e2eutils.CollectAddresses(sd, dp)
l2UserEnv := &BasicUserEnv[*L2Bindings]{ l2UserEnv := &BasicUserEnv[*L2Bindings]{
EthCl: l2Client, EthCl: l2Client,
...@@ -606,7 +606,7 @@ func RestartOpGeth(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { ...@@ -606,7 +606,7 @@ func RestartOpGeth(gt *testing.T, deltaTimeOffset *hexutil.Uint64) {
dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams) dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams)
dp.DeployConfig.L2GenesisDeltaTimeOffset = deltaTimeOffset dp.DeployConfig.L2GenesisDeltaTimeOffset = deltaTimeOffset
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlDebug) log := testlog.Logger(t, log.LevelDebug)
jwtPath := e2eutils.WriteDefaultJWT(t) jwtPath := e2eutils.WriteDefaultJWT(t)
// L1 // L1
miner := NewL1Miner(t, log, sd.L1Cfg) miner := NewL1Miner(t, log, sd.L1Cfg)
...@@ -694,7 +694,7 @@ func ConflictingL2Blocks(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { ...@@ -694,7 +694,7 @@ func ConflictingL2Blocks(gt *testing.T, deltaTimeOffset *hexutil.Uint64) {
dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams) dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams)
dp.DeployConfig.L2GenesisDeltaTimeOffset = deltaTimeOffset dp.DeployConfig.L2GenesisDeltaTimeOffset = deltaTimeOffset
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlDebug) log := testlog.Logger(t, log.LevelDebug)
sd, _, miner, sequencer, seqEng, verifier, _, batcher := setupReorgTestActors(t, dp, sd, log) sd, _, miner, sequencer, seqEng, verifier, _, batcher := setupReorgTestActors(t, dp, sd, log)
...@@ -806,7 +806,7 @@ func SyncAfterReorg(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { ...@@ -806,7 +806,7 @@ func SyncAfterReorg(gt *testing.T, deltaTimeOffset *hexutil.Uint64) {
} }
sd, dp, miner, sequencer, seqEngine, verifier, _, batcher := setupReorgTest(t, &testingParams, deltaTimeOffset) sd, dp, miner, sequencer, seqEngine, verifier, _, batcher := setupReorgTest(t, &testingParams, deltaTimeOffset)
l2Client := seqEngine.EthClient() l2Client := seqEngine.EthClient()
log := testlog.Logger(t, log.LvlDebug) log := testlog.Logger(t, log.LevelDebug)
addresses := e2eutils.CollectAddresses(sd, dp) addresses := e2eutils.CollectAddresses(sd, dp)
l2UserEnv := &BasicUserEnv[*L2Bindings]{ l2UserEnv := &BasicUserEnv[*L2Bindings]{
EthCl: l2Client, EthCl: l2Client,
......
...@@ -40,7 +40,7 @@ func TestDropSpanBatchBeforeHardfork(gt *testing.T) { ...@@ -40,7 +40,7 @@ func TestDropSpanBatchBeforeHardfork(gt *testing.T) {
// do not activate Delta hardfork for verifier // do not activate Delta hardfork for verifier
dp.DeployConfig.L2GenesisDeltaTimeOffset = nil dp.DeployConfig.L2GenesisDeltaTimeOffset = nil
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlError) log := testlog.Logger(t, log.LevelError)
miner, seqEngine, sequencer := setupSequencerTest(t, sd, log) miner, seqEngine, sequencer := setupSequencerTest(t, sd, log)
verifEngine, verifier := setupVerifier(t, sd, log, miner.L1Client(t, sd.RollupCfg), miner.BlobStore(), &sync.Config{}) verifEngine, verifier := setupVerifier(t, sd, log, miner.L1Client(t, sd.RollupCfg), miner.BlobStore(), &sync.Config{})
rollupSeqCl := sequencer.RollupClient() rollupSeqCl := sequencer.RollupClient()
...@@ -131,7 +131,7 @@ func TestHardforkMiddleOfSpanBatch(gt *testing.T) { ...@@ -131,7 +131,7 @@ func TestHardforkMiddleOfSpanBatch(gt *testing.T) {
deltaOffset := hexutil.Uint64(6) deltaOffset := hexutil.Uint64(6)
dp.DeployConfig.L2GenesisDeltaTimeOffset = &deltaOffset dp.DeployConfig.L2GenesisDeltaTimeOffset = &deltaOffset
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlError) log := testlog.Logger(t, log.LevelError)
miner, seqEngine, sequencer := setupSequencerTest(t, sd, log) miner, seqEngine, sequencer := setupSequencerTest(t, sd, log)
verifEngine, verifier := setupVerifier(t, sd, log, miner.L1Client(t, sd.RollupCfg), miner.BlobStore(), &sync.Config{}) verifEngine, verifier := setupVerifier(t, sd, log, miner.L1Client(t, sd.RollupCfg), miner.BlobStore(), &sync.Config{})
minerCl := miner.EthClient() minerCl := miner.EthClient()
...@@ -239,7 +239,7 @@ func TestAcceptSingularBatchAfterHardfork(gt *testing.T) { ...@@ -239,7 +239,7 @@ func TestAcceptSingularBatchAfterHardfork(gt *testing.T) {
// activate Delta hardfork for verifier. // activate Delta hardfork for verifier.
dp.DeployConfig.L2GenesisDeltaTimeOffset = &minTs dp.DeployConfig.L2GenesisDeltaTimeOffset = &minTs
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlError) log := testlog.Logger(t, log.LevelError)
miner, seqEngine, sequencer := setupSequencerTest(t, sd, log) miner, seqEngine, sequencer := setupSequencerTest(t, sd, log)
verifEngine, verifier := setupVerifier(t, sd, log, miner.L1Client(t, sd.RollupCfg), miner.BlobStore(), &sync.Config{}) verifEngine, verifier := setupVerifier(t, sd, log, miner.L1Client(t, sd.RollupCfg), miner.BlobStore(), &sync.Config{})
rollupSeqCl := sequencer.RollupClient() rollupSeqCl := sequencer.RollupClient()
...@@ -325,7 +325,7 @@ func TestMixOfBatchesAfterHardfork(gt *testing.T) { ...@@ -325,7 +325,7 @@ func TestMixOfBatchesAfterHardfork(gt *testing.T) {
// Activate Delta hardfork for verifier. // Activate Delta hardfork for verifier.
dp.DeployConfig.L2GenesisDeltaTimeOffset = &minTs dp.DeployConfig.L2GenesisDeltaTimeOffset = &minTs
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlError) log := testlog.Logger(t, log.LevelError)
miner, seqEngine, sequencer := setupSequencerTest(t, sd, log) miner, seqEngine, sequencer := setupSequencerTest(t, sd, log)
verifEngine, verifier := setupVerifier(t, sd, log, miner.L1Client(t, sd.RollupCfg), miner.BlobStore(), &sync.Config{}) verifEngine, verifier := setupVerifier(t, sd, log, miner.L1Client(t, sd.RollupCfg), miner.BlobStore(), &sync.Config{})
rollupSeqCl := sequencer.RollupClient() rollupSeqCl := sequencer.RollupClient()
...@@ -415,7 +415,7 @@ func TestSpanBatchEmptyChain(gt *testing.T) { ...@@ -415,7 +415,7 @@ func TestSpanBatchEmptyChain(gt *testing.T) {
// Activate Delta hardfork // Activate Delta hardfork
dp.DeployConfig.L2GenesisDeltaTimeOffset = &minTs dp.DeployConfig.L2GenesisDeltaTimeOffset = &minTs
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlError) log := testlog.Logger(t, log.LevelError)
miner, seqEngine, sequencer := setupSequencerTest(t, sd, log) miner, seqEngine, sequencer := setupSequencerTest(t, sd, log)
_, verifier := setupVerifier(t, sd, log, miner.L1Client(t, sd.RollupCfg), miner.BlobStore(), &sync.Config{}) _, verifier := setupVerifier(t, sd, log, miner.L1Client(t, sd.RollupCfg), miner.BlobStore(), &sync.Config{})
...@@ -478,7 +478,7 @@ func TestSpanBatchLowThroughputChain(gt *testing.T) { ...@@ -478,7 +478,7 @@ func TestSpanBatchLowThroughputChain(gt *testing.T) {
// Activate Delta hardfork // Activate Delta hardfork
dp.DeployConfig.L2GenesisDeltaTimeOffset = &minTs dp.DeployConfig.L2GenesisDeltaTimeOffset = &minTs
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlError) log := testlog.Logger(t, log.LevelError)
miner, seqEngine, sequencer := setupSequencerTest(t, sd, log) miner, seqEngine, sequencer := setupSequencerTest(t, sd, log)
_, verifier := setupVerifier(t, sd, log, miner.L1Client(t, sd.RollupCfg), miner.BlobStore(), &sync.Config{}) _, verifier := setupVerifier(t, sd, log, miner.L1Client(t, sd.RollupCfg), miner.BlobStore(), &sync.Config{})
...@@ -575,7 +575,7 @@ func TestSpanBatchLowThroughputChain(gt *testing.T) { ...@@ -575,7 +575,7 @@ func TestSpanBatchLowThroughputChain(gt *testing.T) {
func TestBatchEquivalence(gt *testing.T) { func TestBatchEquivalence(gt *testing.T) {
t := NewDefaultTesting(gt) t := NewDefaultTesting(gt)
log := testlog.Logger(t, log.LvlError) log := testlog.Logger(t, log.LevelError)
p := &e2eutils.TestParams{ p := &e2eutils.TestParams{
MaxSequencerDrift: 20, // larger than L1 block time we simulate in this test (12) MaxSequencerDrift: 20, // larger than L1 block time we simulate in this test (12)
......
...@@ -54,7 +54,7 @@ func DerivationWithFlakyL1RPC(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { ...@@ -54,7 +54,7 @@ func DerivationWithFlakyL1RPC(gt *testing.T, deltaTimeOffset *hexutil.Uint64) {
dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams) dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams)
dp.DeployConfig.L2GenesisDeltaTimeOffset = deltaTimeOffset dp.DeployConfig.L2GenesisDeltaTimeOffset = deltaTimeOffset
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlError) // mute all the temporary derivation errors that we forcefully create log := testlog.Logger(t, log.LevelError) // mute all the temporary derivation errors that we forcefully create
_, _, miner, sequencer, _, verifier, _, batcher := setupReorgTestActors(t, dp, sd, log) _, _, miner, sequencer, _, verifier, _, batcher := setupReorgTestActors(t, dp, sd, log)
rng := rand.New(rand.NewSource(1234)) rng := rand.New(rand.NewSource(1234))
...@@ -94,7 +94,7 @@ func FinalizeWhileSyncing(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { ...@@ -94,7 +94,7 @@ func FinalizeWhileSyncing(gt *testing.T, deltaTimeOffset *hexutil.Uint64) {
dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams) dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams)
dp.DeployConfig.L2GenesisDeltaTimeOffset = deltaTimeOffset dp.DeployConfig.L2GenesisDeltaTimeOffset = deltaTimeOffset
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlError) // mute all the temporary derivation errors that we forcefully create log := testlog.Logger(t, log.LevelError) // mute all the temporary derivation errors that we forcefully create
_, _, miner, sequencer, _, verifier, _, batcher := setupReorgTestActors(t, dp, sd, log) _, _, miner, sequencer, _, verifier, _, batcher := setupReorgTestActors(t, dp, sd, log)
sequencer.ActL2PipelineFull(t) sequencer.ActL2PipelineFull(t)
...@@ -138,7 +138,7 @@ func TestUnsafeSync(gt *testing.T) { ...@@ -138,7 +138,7 @@ func TestUnsafeSync(gt *testing.T) {
t := NewDefaultTesting(gt) t := NewDefaultTesting(gt)
dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams) dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams)
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlInfo) log := testlog.Logger(t, log.LevelInfo)
sd, _, _, sequencer, seqEng, verifier, _, _ := setupReorgTestActors(t, dp, sd, log) sd, _, _, sequencer, seqEng, verifier, _, _ := setupReorgTestActors(t, dp, sd, log)
seqEngCl, err := sources.NewEngineClient(seqEng.RPCClient(), log, nil, sources.EngineClientDefaultConfig(sd.RollupCfg)) seqEngCl, err := sources.NewEngineClient(seqEng.RPCClient(), log, nil, sources.EngineClientDefaultConfig(sd.RollupCfg))
...@@ -168,7 +168,7 @@ func TestELSync(gt *testing.T) { ...@@ -168,7 +168,7 @@ func TestELSync(gt *testing.T) {
t := NewDefaultTesting(gt) t := NewDefaultTesting(gt)
dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams) dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams)
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlInfo) log := testlog.Logger(t, log.LevelInfo)
miner, seqEng, sequencer := setupSequencerTest(t, sd, log) miner, seqEng, sequencer := setupSequencerTest(t, sd, log)
// Enable engine P2P sync // Enable engine P2P sync
...@@ -228,7 +228,7 @@ func TestInvalidPayloadInSpanBatch(gt *testing.T) { ...@@ -228,7 +228,7 @@ func TestInvalidPayloadInSpanBatch(gt *testing.T) {
dp.DeployConfig.L2GenesisDeltaTimeOffset = &minTs dp.DeployConfig.L2GenesisDeltaTimeOffset = &minTs
dp.DeployConfig.L2BlockTime = 2 dp.DeployConfig.L2BlockTime = 2
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlInfo) log := testlog.Logger(t, log.LevelInfo)
_, _, miner, sequencer, seqEng, verifier, _, batcher := setupReorgTestActors(t, dp, sd, log) _, _, miner, sequencer, seqEng, verifier, _, batcher := setupReorgTestActors(t, dp, sd, log)
l2Cl := seqEng.EthClient() l2Cl := seqEng.EthClient()
rng := rand.New(rand.NewSource(1234)) rng := rand.New(rand.NewSource(1234))
...@@ -351,7 +351,7 @@ func TestSpanBatchAtomicity_Consolidation(gt *testing.T) { ...@@ -351,7 +351,7 @@ func TestSpanBatchAtomicity_Consolidation(gt *testing.T) {
dp.DeployConfig.L2GenesisDeltaTimeOffset = &minTs dp.DeployConfig.L2GenesisDeltaTimeOffset = &minTs
dp.DeployConfig.L2BlockTime = 2 dp.DeployConfig.L2BlockTime = 2
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlInfo) log := testlog.Logger(t, log.LevelInfo)
_, _, miner, sequencer, seqEng, verifier, _, batcher := setupReorgTestActors(t, dp, sd, log) _, _, miner, sequencer, seqEng, verifier, _, batcher := setupReorgTestActors(t, dp, sd, log)
seqEngCl, err := sources.NewEngineClient(seqEng.RPCClient(), log, nil, sources.EngineClientDefaultConfig(sd.RollupCfg)) seqEngCl, err := sources.NewEngineClient(seqEng.RPCClient(), log, nil, sources.EngineClientDefaultConfig(sd.RollupCfg))
require.NoError(t, err) require.NoError(t, err)
...@@ -410,7 +410,7 @@ func TestSpanBatchAtomicity_ForceAdvance(gt *testing.T) { ...@@ -410,7 +410,7 @@ func TestSpanBatchAtomicity_ForceAdvance(gt *testing.T) {
dp.DeployConfig.L2GenesisDeltaTimeOffset = &minTs dp.DeployConfig.L2GenesisDeltaTimeOffset = &minTs
dp.DeployConfig.L2BlockTime = 2 dp.DeployConfig.L2BlockTime = 2
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlInfo) log := testlog.Logger(t, log.LevelInfo)
_, _, miner, sequencer, _, verifier, _, batcher := setupReorgTestActors(t, dp, sd, log) _, _, miner, sequencer, _, verifier, _, batcher := setupReorgTestActors(t, dp, sd, log)
targetHeadNumber := uint64(6) // L1 block time / L2 block time targetHeadNumber := uint64(6) // L1 block time / L2 block time
......
...@@ -55,7 +55,7 @@ func BatcherKeyRotation(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { ...@@ -55,7 +55,7 @@ func BatcherKeyRotation(gt *testing.T, deltaTimeOffset *hexutil.Uint64) {
dp.DeployConfig.L2BlockTime = 2 dp.DeployConfig.L2BlockTime = 2
dp.DeployConfig.L2GenesisDeltaTimeOffset = deltaTimeOffset dp.DeployConfig.L2GenesisDeltaTimeOffset = deltaTimeOffset
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlDebug) log := testlog.Logger(t, log.LevelDebug)
miner, seqEngine, sequencer := setupSequencerTest(t, sd, log) miner, seqEngine, sequencer := setupSequencerTest(t, sd, log)
miner.ActL1SetFeeRecipient(common.Address{'A'}) miner.ActL1SetFeeRecipient(common.Address{'A'})
sequencer.ActL2PipelineFull(t) sequencer.ActL2PipelineFull(t)
...@@ -229,7 +229,7 @@ func GPOParamsChange(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { ...@@ -229,7 +229,7 @@ func GPOParamsChange(gt *testing.T, deltaTimeOffset *hexutil.Uint64) {
dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams) dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams)
dp.DeployConfig.L2GenesisDeltaTimeOffset = deltaTimeOffset dp.DeployConfig.L2GenesisDeltaTimeOffset = deltaTimeOffset
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlDebug) log := testlog.Logger(t, log.LevelDebug)
miner, seqEngine, sequencer := setupSequencerTest(t, sd, log) miner, seqEngine, sequencer := setupSequencerTest(t, sd, log)
batcher := NewL2Batcher(log, sd.RollupCfg, DefaultBatcherCfg(dp), batcher := NewL2Batcher(log, sd.RollupCfg, DefaultBatcherCfg(dp),
sequencer.RollupClient(), miner.EthClient(), seqEngine.EthClient(), seqEngine.EngineClient(t, sd.RollupCfg)) sequencer.RollupClient(), miner.EthClient(), seqEngine.EthClient(), seqEngine.EngineClient(t, sd.RollupCfg))
...@@ -356,7 +356,7 @@ func GasLimitChange(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { ...@@ -356,7 +356,7 @@ func GasLimitChange(gt *testing.T, deltaTimeOffset *hexutil.Uint64) {
dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams) dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams)
dp.DeployConfig.L2GenesisDeltaTimeOffset = deltaTimeOffset dp.DeployConfig.L2GenesisDeltaTimeOffset = deltaTimeOffset
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlDebug) log := testlog.Logger(t, log.LevelDebug)
miner, seqEngine, sequencer := setupSequencerTest(t, sd, log) miner, seqEngine, sequencer := setupSequencerTest(t, sd, log)
batcher := NewL2Batcher(log, sd.RollupCfg, DefaultBatcherCfg(dp), batcher := NewL2Batcher(log, sd.RollupCfg, DefaultBatcherCfg(dp),
sequencer.RollupClient(), miner.EthClient(), seqEngine.EthClient(), seqEngine.EngineClient(t, sd.RollupCfg)) sequencer.RollupClient(), miner.EthClient(), seqEngine.EthClient(), seqEngine.EngineClient(t, sd.RollupCfg))
......
...@@ -123,7 +123,7 @@ func runCrossLayerUserTest(gt *testing.T, test hardforkScheduledTest) { ...@@ -123,7 +123,7 @@ func runCrossLayerUserTest(gt *testing.T, test hardforkScheduledTest) {
} }
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlDebug) log := testlog.Logger(t, log.LevelDebug)
require.Equal(t, dp.Secrets.Addresses().Batcher, dp.DeployConfig.BatchSenderAddress) require.Equal(t, dp.Secrets.Addresses().Batcher, dp.DeployConfig.BatchSenderAddress)
require.Equal(t, dp.Secrets.Addresses().Proposer, dp.DeployConfig.L2OutputOracleProposer) require.Equal(t, dp.Secrets.Addresses().Proposer, dp.DeployConfig.L2OutputOracleProposer)
......
...@@ -31,7 +31,7 @@ func TestERC20BridgeDeposits(t *testing.T) { ...@@ -31,7 +31,7 @@ func TestERC20BridgeDeposits(t *testing.T) {
require.Nil(t, err, "Error starting up system") require.Nil(t, err, "Error starting up system")
defer sys.Close() defer sys.Close()
log := testlog.Logger(t, log.LvlInfo) log := testlog.Logger(t, log.LevelInfo)
log.Info("genesis", "l2", sys.RollupConfig.Genesis.L2, "l1", sys.RollupConfig.Genesis.L1, "l2_time", sys.RollupConfig.Genesis.L2Time) log.Info("genesis", "l2", sys.RollupConfig.Genesis.L2, "l1", sys.RollupConfig.Genesis.L1, "l2_time", sys.RollupConfig.Genesis.L2Time)
l1Client := sys.Clients["l1"] l1Client := sys.Clients["l1"]
......
...@@ -10,6 +10,8 @@ import ( ...@@ -10,6 +10,8 @@ import (
"testing" "testing"
"time" "time"
"golang.org/x/exp/slog"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
...@@ -20,6 +22,17 @@ import ( ...@@ -20,6 +22,17 @@ import (
oplog "github.com/ethereum-optimism/optimism/op-service/log" oplog "github.com/ethereum-optimism/optimism/op-service/log"
) )
// legacy geth log levels - the geth command line --verbosity flag wasn't
// migrated to use slog's numerical levels.
const (
LegacyLevelCrit = iota
LegacyLevelError
LegacyLevelWarn
LegacyLevelInfo
LegacyLevelDebug
LegacyLevelTrace
)
var ( var (
// All of the following variables are set in the init function // All of the following variables are set in the init function
// and read from JSON files on disk that are generated by the // and read from JSON files on disk that are generated by the
...@@ -39,7 +52,7 @@ var ( ...@@ -39,7 +52,7 @@ var (
// ExternalL2TestParms is additional metadata for executing external L2 // ExternalL2TestParms is additional metadata for executing external L2
// tests. // tests.
ExternalL2TestParms external.TestParms ExternalL2TestParms external.TestParms
// EthNodeVerbosity is the level of verbosity to output // EthNodeVerbosity is the (legacy geth) level of verbosity to output
EthNodeVerbosity int EthNodeVerbosity int
) )
...@@ -63,26 +76,29 @@ func init() { ...@@ -63,26 +76,29 @@ func init() {
flag.StringVar(&l1DeploymentsPath, "l1-deployments", defaultL1DeploymentsPath, "") flag.StringVar(&l1DeploymentsPath, "l1-deployments", defaultL1DeploymentsPath, "")
flag.StringVar(&deployConfigPath, "deploy-config", defaultDeployConfigPath, "") flag.StringVar(&deployConfigPath, "deploy-config", defaultDeployConfigPath, "")
flag.StringVar(&externalL2, "externalL2", "", "Enable tests with external L2") flag.StringVar(&externalL2, "externalL2", "", "Enable tests with external L2")
flag.IntVar(&EthNodeVerbosity, "ethLogVerbosity", int(log.LvlInfo), "The level of verbosity to use for the eth node logs") flag.IntVar(&EthNodeVerbosity, "ethLogVerbosity", LegacyLevelInfo, "The (legacy geth) level of verbosity to use for the eth node logs")
testing.Init() // Register test flags before parsing testing.Init() // Register test flags before parsing
flag.Parse() flag.Parse()
// Setup global logger // Setup global logger
lvl := log.Lvl(EthNodeVerbosity) lvl := log.FromLegacyLevel(EthNodeVerbosity)
if lvl < log.LvlCrit { var handler slog.Handler
log.Root().SetHandler(log.DiscardHandler()) if lvl > log.LevelCrit {
} else if lvl > log.LvlTrace { // clip to trace level handler = log.DiscardHandler()
lvl = log.LvlTrace } else {
if lvl < log.LevelTrace { // clip to trace level
lvl = log.LevelTrace
}
// We cannot attach a testlog logger,
// because the global logger is shared between different independent parallel tests.
// Tests that write to a testlogger of another finished test fail.
handler = oplog.NewLogHandler(os.Stdout, oplog.CLIConfig{
Level: lvl,
Color: false, // some CI logs do not handle colors well
Format: oplog.FormatTerminal,
})
} }
// We cannot attach a testlog logger, oplog.SetGlobalLogHandler(handler)
// because the global logger is shared between different independent parallel tests.
// Tests that write to a testlogger of another finished test fail.
h := oplog.NewLogHandler(os.Stdout, oplog.CLIConfig{
Level: lvl,
Color: false, // some CI logs do not handle colors well
Format: oplog.FormatTerminal,
})
oplog.SetGlobalLogHandler(h)
if err := allExist(l1AllocsPath, l1DeploymentsPath, deployConfigPath); err != nil { if err := allExist(l1AllocsPath, l1DeploymentsPath, deployConfigPath); err != nil {
return return
......
...@@ -110,7 +110,7 @@ func WithAlphabet(rollupEndpoint string) Option { ...@@ -110,7 +110,7 @@ func WithAlphabet(rollupEndpoint string) Option {
} }
func NewChallenger(t *testing.T, ctx context.Context, l1Endpoint string, name string, options ...Option) *Helper { func NewChallenger(t *testing.T, ctx context.Context, l1Endpoint string, name string, options ...Option) *Helper {
log := testlog.Logger(t, log.LvlDebug).New("role", name) log := testlog.Logger(t, log.LevelDebug).New("role", name)
log.Info("Creating challenger", "l1", l1Endpoint) log.Info("Creating challenger", "l1", l1Endpoint)
cfg := NewChallengerConfig(t, l1Endpoint, options...) cfg := NewChallengerConfig(t, l1Endpoint, options...)
chl, err := challenger.Main(ctx, log, cfg) chl, err := challenger.Main(ctx, log, cfg)
......
...@@ -127,7 +127,7 @@ func (h *FactoryHelper) StartOutputCannonGameWithCorrectRoot(ctx context.Context ...@@ -127,7 +127,7 @@ func (h *FactoryHelper) StartOutputCannonGameWithCorrectRoot(ctx context.Context
} }
func (h *FactoryHelper) StartOutputCannonGame(ctx context.Context, l2Node string, l2BlockNumber uint64, rootClaim common.Hash) *OutputCannonGameHelper { func (h *FactoryHelper) StartOutputCannonGame(ctx context.Context, l2Node string, l2BlockNumber uint64, rootClaim common.Hash) *OutputCannonGameHelper {
logger := testlog.Logger(h.t, log.LvlInfo).New("role", "OutputCannonGameHelper") logger := testlog.Logger(h.t, log.LevelInfo).New("role", "OutputCannonGameHelper")
rollupClient := h.system.RollupClient(l2Node) rollupClient := h.system.RollupClient(l2Node)
extraData := h.createBisectionGameExtraData(l2Node, l2BlockNumber) extraData := h.createBisectionGameExtraData(l2Node, l2BlockNumber)
...@@ -179,7 +179,7 @@ func (h *FactoryHelper) StartOutputAlphabetGameWithCorrectRoot(ctx context.Conte ...@@ -179,7 +179,7 @@ func (h *FactoryHelper) StartOutputAlphabetGameWithCorrectRoot(ctx context.Conte
} }
func (h *FactoryHelper) StartOutputAlphabetGame(ctx context.Context, l2Node string, l2BlockNumber uint64, rootClaim common.Hash) *OutputAlphabetGameHelper { func (h *FactoryHelper) StartOutputAlphabetGame(ctx context.Context, l2Node string, l2BlockNumber uint64, rootClaim common.Hash) *OutputAlphabetGameHelper {
logger := testlog.Logger(h.t, log.LvlInfo).New("role", "OutputAlphabetGameHelper") logger := testlog.Logger(h.t, log.LevelInfo).New("role", "OutputAlphabetGameHelper")
rollupClient := h.system.RollupClient(l2Node) rollupClient := h.system.RollupClient(l2Node)
extraData := h.createBisectionGameExtraData(l2Node, l2BlockNumber) extraData := h.createBisectionGameExtraData(l2Node, l2BlockNumber)
......
...@@ -36,7 +36,7 @@ func (g *OutputAlphabetGameHelper) StartChallenger( ...@@ -36,7 +36,7 @@ func (g *OutputAlphabetGameHelper) StartChallenger(
} }
func (g *OutputAlphabetGameHelper) CreateHonestActor(ctx context.Context, l2Node string) *OutputHonestHelper { func (g *OutputAlphabetGameHelper) CreateHonestActor(ctx context.Context, l2Node string) *OutputHonestHelper {
logger := testlog.Logger(g.t, log.LvlInfo).New("role", "HonestHelper", "game", g.addr) logger := testlog.Logger(g.t, log.LevelInfo).New("role", "HonestHelper", "game", g.addr)
caller := batching.NewMultiCaller(g.system.NodeClient("l1").Client(), batching.DefaultBatchSize) caller := batching.NewMultiCaller(g.system.NodeClient("l1").Client(), batching.DefaultBatchSize)
contract, err := contracts.NewFaultDisputeGameContract(g.addr, caller) contract, err := contracts.NewFaultDisputeGameContract(g.addr, caller)
g.require.NoError(err, "Failed to create game contact") g.require.NoError(err, "Failed to create game contact")
......
...@@ -52,7 +52,7 @@ func (g *OutputCannonGameHelper) CreateHonestActor(ctx context.Context, l2Node s ...@@ -52,7 +52,7 @@ func (g *OutputCannonGameHelper) CreateHonestActor(ctx context.Context, l2Node s
opts = append(opts, options...) opts = append(opts, options...)
cfg := challenger.NewChallengerConfig(g.t, g.system.NodeEndpoint("l1"), opts...) cfg := challenger.NewChallengerConfig(g.t, g.system.NodeEndpoint("l1"), opts...)
logger := testlog.Logger(g.t, log.LvlInfo).New("role", "HonestHelper", "game", g.addr) logger := testlog.Logger(g.t, log.LevelInfo).New("role", "HonestHelper", "game", g.addr)
l2Client := g.system.NodeClient(l2Node) l2Client := g.system.NodeClient(l2Node)
caller := batching.NewMultiCaller(g.system.NodeClient("l1").Client(), batching.DefaultBatchSize) caller := batching.NewMultiCaller(g.system.NodeClient("l1").Client(), batching.DefaultBatchSize)
contract, err := contracts.NewFaultDisputeGameContract(g.addr, caller) contract, err := contracts.NewFaultDisputeGameContract(g.addr, caller)
...@@ -215,7 +215,7 @@ func (g *OutputCannonGameHelper) createCannonTraceProvider(ctx context.Context, ...@@ -215,7 +215,7 @@ func (g *OutputCannonGameHelper) createCannonTraceProvider(ctx context.Context,
splitDepth := g.SplitDepth(ctx) splitDepth := g.SplitDepth(ctx)
g.require.EqualValues(outputRootClaim.Depth(), splitDepth+1, "outputRootClaim must be the root of an execution game") g.require.EqualValues(outputRootClaim.Depth(), splitDepth+1, "outputRootClaim must be the root of an execution game")
logger := testlog.Logger(g.t, log.LvlInfo).New("role", "CannonTraceProvider", "game", g.addr) logger := testlog.Logger(g.t, log.LevelInfo).New("role", "CannonTraceProvider", "game", g.addr)
opt := g.defaultChallengerOptions(l2Node) opt := g.defaultChallengerOptions(l2Node)
opt = append(opt, options...) opt = append(opt, options...)
cfg := challenger.NewChallengerConfig(g.t, g.system.NodeEndpoint("l1"), opt...) cfg := challenger.NewChallengerConfig(g.t, g.system.NodeEndpoint("l1"), opt...)
......
...@@ -37,7 +37,7 @@ func TestSystem4844E2E(t *testing.T) { ...@@ -37,7 +37,7 @@ func TestSystem4844E2E(t *testing.T) {
require.Nil(t, err, "Error starting up system") require.Nil(t, err, "Error starting up system")
defer sys.Close() defer sys.Close()
log := testlog.Logger(t, log.LvlInfo) log := testlog.Logger(t, log.LevelInfo)
log.Info("genesis", "l2", sys.RollupConfig.Genesis.L2, "l1", sys.RollupConfig.Genesis.L1, "l2_time", sys.RollupConfig.Genesis.L2Time) log.Info("genesis", "l2", sys.RollupConfig.Genesis.L2, "l1", sys.RollupConfig.Genesis.L1, "l2_time", sys.RollupConfig.Genesis.L2Time)
l1Client := sys.Clients["l1"] l1Client := sys.Clients["l1"]
......
...@@ -33,7 +33,7 @@ func TestShim(t *testing.T) { ...@@ -33,7 +33,7 @@ func TestShim(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
require.FileExists(t, "op-geth") require.FileExists(t, "op-geth")
config.EthNodeVerbosity = 4 config.EthNodeVerbosity = config.LegacyLevelDebug
ec := (&e2e.ExternalRunner{ ec := (&e2e.ExternalRunner{
Name: "TestShim", Name: "TestShim",
......
...@@ -15,7 +15,7 @@ import ( ...@@ -15,7 +15,7 @@ import (
func TestGetVersion(t *testing.T) { func TestGetVersion(t *testing.T) {
InitParallel(t) InitParallel(t)
l := testlog.Logger(t, log.LvlInfo) l := testlog.Logger(t, log.LevelInfo)
beaconApi := fakebeacon.NewBeacon(l, t.TempDir(), uint64(0), uint64(0)) beaconApi := fakebeacon.NewBeacon(l, t.TempDir(), uint64(0), uint64(0))
t.Cleanup(func() { t.Cleanup(func() {
......
...@@ -52,7 +52,7 @@ type OpGeth struct { ...@@ -52,7 +52,7 @@ type OpGeth struct {
} }
func NewOpGeth(t *testing.T, ctx context.Context, cfg *SystemConfig) (*OpGeth, error) { func NewOpGeth(t *testing.T, ctx context.Context, cfg *SystemConfig) (*OpGeth, error) {
logger := testlog.Logger(t, log.LvlCrit) logger := testlog.Logger(t, log.LevelCrit)
l1Genesis, err := genesis.BuildL1DeveloperGenesis(cfg.DeployConfig, config.L1Allocs, config.L1Deployments) l1Genesis, err := genesis.BuildL1DeveloperGenesis(cfg.DeployConfig, config.L1Allocs, config.L1Deployments)
require.Nil(t, err) require.Nil(t, err)
......
...@@ -179,7 +179,7 @@ func setupConductor( ...@@ -179,7 +179,7 @@ func setupConductor(
RollupCfg: rollupCfg, RollupCfg: rollupCfg,
RPCEnableProxy: true, RPCEnableProxy: true,
LogConfig: oplog.CLIConfig{ LogConfig: oplog.CLIConfig{
Level: log.LvlInfo, Level: log.LevelInfo,
Color: false, Color: false,
}, },
RPC: oprpc.CLIConfig{ RPC: oprpc.CLIConfig{
...@@ -189,7 +189,7 @@ func setupConductor( ...@@ -189,7 +189,7 @@ func setupConductor(
} }
ctx := context.Background() ctx := context.Background()
service, err := con.New(ctx, &cfg, testlog.Logger(t, log.LvlInfo), "0.0.1") service, err := con.New(ctx, &cfg, testlog.Logger(t, log.LevelInfo), "0.0.1")
require.NoError(t, err) require.NoError(t, err)
err = service.Start(ctx) err = service.Start(ctx)
require.NoError(t, err) require.NoError(t, err)
...@@ -236,7 +236,7 @@ func setupBatcher(t *testing.T, sys *System, conductors map[string]*conductor) { ...@@ -236,7 +236,7 @@ func setupBatcher(t *testing.T, sys *System, conductors map[string]*conductor) {
PollInterval: 1 * time.Second, PollInterval: 1 * time.Second,
TxMgrConfig: newTxMgrConfig(sys.EthInstances["l1"].WSEndpoint(), sys.Cfg.Secrets.Batcher), TxMgrConfig: newTxMgrConfig(sys.EthInstances["l1"].WSEndpoint(), sys.Cfg.Secrets.Batcher),
LogConfig: oplog.CLIConfig{ LogConfig: oplog.CLIConfig{
Level: log.LvlDebug, Level: log.LevelDebug,
Format: oplog.FormatText, Format: oplog.FormatText,
}, },
Stopped: false, Stopped: false,
...@@ -260,9 +260,9 @@ func sequencerFailoverSystemConfig(t *testing.T, ports map[string]int) SystemCon ...@@ -260,9 +260,9 @@ func sequencerFailoverSystemConfig(t *testing.T, ports map[string]int) SystemCon
cfg.Nodes[Sequencer3Name] = sequencerCfg(ports[Sequencer3Name]) cfg.Nodes[Sequencer3Name] = sequencerCfg(ports[Sequencer3Name])
delete(cfg.Loggers, "sequencer") delete(cfg.Loggers, "sequencer")
cfg.Loggers[Sequencer1Name] = testlog.Logger(t, log.LvlInfo).New("role", Sequencer1Name) cfg.Loggers[Sequencer1Name] = testlog.Logger(t, log.LevelInfo).New("role", Sequencer1Name)
cfg.Loggers[Sequencer2Name] = testlog.Logger(t, log.LvlInfo).New("role", Sequencer2Name) cfg.Loggers[Sequencer2Name] = testlog.Logger(t, log.LevelInfo).New("role", Sequencer2Name)
cfg.Loggers[Sequencer3Name] = testlog.Logger(t, log.LvlInfo).New("role", Sequencer3Name) cfg.Loggers[Sequencer3Name] = testlog.Logger(t, log.LevelInfo).New("role", Sequencer3Name)
cfg.P2PTopology = map[string][]string{ cfg.P2PTopology = map[string][]string{
Sequencer1Name: {Sequencer2Name, Sequencer3Name}, Sequencer1Name: {Sequencer2Name, Sequencer3Name},
......
...@@ -147,10 +147,10 @@ func DefaultSystemConfig(t *testing.T) SystemConfig { ...@@ -147,10 +147,10 @@ func DefaultSystemConfig(t *testing.T) SystemConfig {
}, },
}, },
Loggers: map[string]log.Logger{ Loggers: map[string]log.Logger{
"verifier": testlog.Logger(t, log.LvlInfo).New("role", "verifier"), "verifier": testlog.Logger(t, log.LevelInfo).New("role", "verifier"),
"sequencer": testlog.Logger(t, log.LvlInfo).New("role", "sequencer"), "sequencer": testlog.Logger(t, log.LevelInfo).New("role", "sequencer"),
"batcher": testlog.Logger(t, log.LvlInfo).New("role", "batcher"), "batcher": testlog.Logger(t, log.LevelInfo).New("role", "batcher"),
"proposer": testlog.Logger(t, log.LvlCrit).New("role", "proposer"), "proposer": testlog.Logger(t, log.LevelCrit).New("role", "proposer"),
}, },
GethOptions: map[string][]geth.GethOption{}, GethOptions: map[string][]geth.GethOption{},
P2PTopology: nil, // no P2P connectivity by default P2PTopology: nil, // no P2P connectivity by default
...@@ -315,7 +315,7 @@ func (sys *System) RollupClient(name string) *sources.RollupClient { ...@@ -315,7 +315,7 @@ func (sys *System) RollupClient(name string) *sources.RollupClient {
if ok { if ok {
return client return client
} }
logger := testlog.Logger(sys.t, log.LvlInfo).New("rollupClient", name) logger := testlog.Logger(sys.t, log.LevelInfo).New("rollupClient", name)
endpoint := sys.RollupEndpoint(name) endpoint := sys.RollupEndpoint(name)
client, err := dial.DialRollupClientWithTimeout(context.Background(), 30*time.Second, logger, endpoint) client, err := dial.DialRollupClientWithTimeout(context.Background(), 30*time.Second, logger, endpoint)
require.NoErrorf(sys.t, err, "Failed to dial rollup client %v", name) require.NoErrorf(sys.t, err, "Failed to dial rollup client %v", name)
...@@ -517,7 +517,7 @@ func (cfg SystemConfig) Start(t *testing.T, _opts ...SystemConfigOption) (*Syste ...@@ -517,7 +517,7 @@ func (cfg SystemConfig) Start(t *testing.T, _opts ...SystemConfigOption) (*Syste
sys.RollupConfig = &defaultConfig sys.RollupConfig = &defaultConfig
// Create a fake Beacon node to hold on to blobs created by the L1 miner, and to serve them to L2 // Create a fake Beacon node to hold on to blobs created by the L1 miner, and to serve them to L2
bcn := fakebeacon.NewBeacon(testlog.Logger(t, log.LvlInfo).New("role", "l1_cl"), bcn := fakebeacon.NewBeacon(testlog.Logger(t, log.LevelInfo).New("role", "l1_cl"),
path.Join(cfg.BlobsPath, "l1_cl"), l1Genesis.Timestamp, cfg.DeployConfig.L1BlockTime) path.Join(cfg.BlobsPath, "l1_cl"), l1Genesis.Timestamp, cfg.DeployConfig.L1BlockTime)
t.Cleanup(func() { t.Cleanup(func() {
_ = bcn.Close() _ = bcn.Close()
...@@ -658,8 +658,7 @@ func (cfg SystemConfig) Start(t *testing.T, _opts ...SystemConfigOption) (*Syste ...@@ -658,8 +658,7 @@ func (cfg SystemConfig) Start(t *testing.T, _opts ...SystemConfigOption) (*Syste
} }
// Don't log state snapshots in test output // Don't log state snapshots in test output
snapLog := log.New() snapLog := log.NewLogger(log.DiscardHandler())
snapLog.SetHandler(log.DiscardHandler())
// Rollup nodes // Rollup nodes
...@@ -752,7 +751,7 @@ func (cfg SystemConfig) Start(t *testing.T, _opts ...SystemConfigOption) (*Syste ...@@ -752,7 +751,7 @@ func (cfg SystemConfig) Start(t *testing.T, _opts ...SystemConfigOption) (*Syste
TxMgrConfig: newTxMgrConfig(sys.EthInstances["l1"].WSEndpoint(), cfg.Secrets.Proposer), TxMgrConfig: newTxMgrConfig(sys.EthInstances["l1"].WSEndpoint(), cfg.Secrets.Proposer),
AllowNonFinalized: cfg.NonFinalizedProposals, AllowNonFinalized: cfg.NonFinalizedProposals,
LogConfig: oplog.CLIConfig{ LogConfig: oplog.CLIConfig{
Level: log.LvlInfo, Level: log.LevelInfo,
Format: oplog.FormatText, Format: oplog.FormatText,
}, },
} }
...@@ -789,7 +788,7 @@ func (cfg SystemConfig) Start(t *testing.T, _opts ...SystemConfigOption) (*Syste ...@@ -789,7 +788,7 @@ func (cfg SystemConfig) Start(t *testing.T, _opts ...SystemConfigOption) (*Syste
PollInterval: 50 * time.Millisecond, PollInterval: 50 * time.Millisecond,
TxMgrConfig: newTxMgrConfig(sys.EthInstances["l1"].WSEndpoint(), cfg.Secrets.Batcher), TxMgrConfig: newTxMgrConfig(sys.EthInstances["l1"].WSEndpoint(), cfg.Secrets.Batcher),
LogConfig: oplog.CLIConfig{ LogConfig: oplog.CLIConfig{
Level: log.LvlInfo, Level: log.LevelInfo,
Format: oplog.FormatText, Format: oplog.FormatText,
}, },
Stopped: sys.Cfg.DisableBatcher, // Batch submitter may be enabled later Stopped: sys.Cfg.DisableBatcher, // Batch submitter may be enabled later
......
...@@ -100,7 +100,7 @@ func testVerifyL2OutputRootEmptyBlock(t *testing.T, detached bool, spanBatchActi ...@@ -100,7 +100,7 @@ func testVerifyL2OutputRootEmptyBlock(t *testing.T, detached bool, spanBatchActi
require.Nil(t, err, "Error starting up system") require.Nil(t, err, "Error starting up system")
defer sys.Close() defer sys.Close()
log := testlog.Logger(t, log.LvlInfo) log := testlog.Logger(t, log.LevelInfo)
log.Info("genesis", "l2", sys.RollupConfig.Genesis.L2, "l1", sys.RollupConfig.Genesis.L1, "l2_time", sys.RollupConfig.Genesis.L2Time) log.Info("genesis", "l2", sys.RollupConfig.Genesis.L2, "l1", sys.RollupConfig.Genesis.L1, "l2_time", sys.RollupConfig.Genesis.L2Time)
l1Client := sys.Clients["l1"] l1Client := sys.Clients["l1"]
...@@ -199,7 +199,7 @@ func testVerifyL2OutputRoot(t *testing.T, detached bool, spanBatchActivated bool ...@@ -199,7 +199,7 @@ func testVerifyL2OutputRoot(t *testing.T, detached bool, spanBatchActivated bool
require.Nil(t, err, "Error starting up system") require.Nil(t, err, "Error starting up system")
defer sys.Close() defer sys.Close()
log := testlog.Logger(t, log.LvlInfo) log := testlog.Logger(t, log.LevelInfo)
log.Info("genesis", "l2", sys.RollupConfig.Genesis.L2, "l1", sys.RollupConfig.Genesis.L1, "l2_time", sys.RollupConfig.Genesis.L2Time) log.Info("genesis", "l2", sys.RollupConfig.Genesis.L2, "l1", sys.RollupConfig.Genesis.L1, "l2_time", sys.RollupConfig.Genesis.L2Time)
l1Client := sys.Clients["l1"] l1Client := sys.Clients["l1"]
...@@ -292,7 +292,7 @@ func testFaultProofProgramScenario(t *testing.T, ctx context.Context, sys *Syste ...@@ -292,7 +292,7 @@ func testFaultProofProgramScenario(t *testing.T, ctx context.Context, sys *Syste
// Check the FPP confirms the expected output // Check the FPP confirms the expected output
t.Log("Running fault proof in fetching mode") t.Log("Running fault proof in fetching mode")
log := testlog.Logger(t, log.LvlInfo) log := testlog.Logger(t, log.LevelInfo)
err := opp.FaultProofProgram(ctx, log, fppConfig) err := opp.FaultProofProgram(ctx, log, fppConfig)
require.NoError(t, err) require.NoError(t, err)
......
...@@ -224,7 +224,7 @@ func TestSystemE2E(t *testing.T) { ...@@ -224,7 +224,7 @@ func TestSystemE2E(t *testing.T) {
} }
func runE2ESystemTest(t *testing.T, sys *System) { func runE2ESystemTest(t *testing.T, sys *System) {
log := testlog.Logger(t, log.LvlInfo) log := testlog.Logger(t, log.LevelInfo)
log.Info("genesis", "l2", sys.RollupConfig.Genesis.L2, "l1", sys.RollupConfig.Genesis.L1, "l2_time", sys.RollupConfig.Genesis.L2Time) log.Info("genesis", "l2", sys.RollupConfig.Genesis.L2, "l1", sys.RollupConfig.Genesis.L1, "l2_time", sys.RollupConfig.Genesis.L2Time)
l1Client := sys.Clients["l1"] l1Client := sys.Clients["l1"]
...@@ -307,7 +307,7 @@ func TestConfirmationDepth(t *testing.T) { ...@@ -307,7 +307,7 @@ func TestConfirmationDepth(t *testing.T) {
require.Nil(t, err, "Error starting up system") require.Nil(t, err, "Error starting up system")
defer sys.Close() defer sys.Close()
log := testlog.Logger(t, log.LvlInfo) log := testlog.Logger(t, log.LevelInfo)
log.Info("genesis", "l2", sys.RollupConfig.Genesis.L2, "l1", sys.RollupConfig.Genesis.L1, "l2_time", sys.RollupConfig.Genesis.L2Time) log.Info("genesis", "l2", sys.RollupConfig.Genesis.L2, "l1", sys.RollupConfig.Genesis.L1, "l2_time", sys.RollupConfig.Genesis.L2Time)
l1Client := sys.Clients["l1"] l1Client := sys.Clients["l1"]
...@@ -365,7 +365,7 @@ func TestPendingGasLimit(t *testing.T) { ...@@ -365,7 +365,7 @@ func TestPendingGasLimit(t *testing.T) {
require.Nil(t, err, "Error starting up system") require.Nil(t, err, "Error starting up system")
defer sys.Close() defer sys.Close()
log := testlog.Logger(t, log.LvlInfo) log := testlog.Logger(t, log.LevelInfo)
log.Info("genesis", "l2", sys.RollupConfig.Genesis.L2, "l1", sys.RollupConfig.Genesis.L1, "l2_time", sys.RollupConfig.Genesis.L2Time) log.Info("genesis", "l2", sys.RollupConfig.Genesis.L2, "l1", sys.RollupConfig.Genesis.L1, "l2_time", sys.RollupConfig.Genesis.L2Time)
l2Verif := sys.Clients["verifier"] l2Verif := sys.Clients["verifier"]
...@@ -473,7 +473,7 @@ func TestMissingBatchE2E(t *testing.T) { ...@@ -473,7 +473,7 @@ func TestMissingBatchE2E(t *testing.T) {
func L1InfoFromState(ctx context.Context, contract *bindings.L1Block, l2Number *big.Int, ecotone bool) (*derive.L1BlockInfo, error) { func L1InfoFromState(ctx context.Context, contract *bindings.L1Block, l2Number *big.Int, ecotone bool) (*derive.L1BlockInfo, error) {
var err error var err error
var out = &derive.L1BlockInfo{} out := &derive.L1BlockInfo{}
opts := bind.CallOpts{ opts := bind.CallOpts{
BlockNumber: l2Number, BlockNumber: l2Number,
Context: ctx, Context: ctx,
...@@ -655,8 +655,8 @@ func TestSystemP2PAltSync(t *testing.T) { ...@@ -655,8 +655,8 @@ func TestSystemP2PAltSync(t *testing.T) {
}, },
L1EpochPollInterval: time.Second * 4, L1EpochPollInterval: time.Second * 4,
} }
cfg.Loggers["alice"] = testlog.Logger(t, log.LvlInfo).New("role", "alice") cfg.Loggers["alice"] = testlog.Logger(t, log.LevelInfo).New("role", "alice")
cfg.Loggers["bob"] = testlog.Logger(t, log.LvlInfo).New("role", "bob") cfg.Loggers["bob"] = testlog.Logger(t, log.LevelInfo).New("role", "bob")
// connect the nodes // connect the nodes
cfg.P2PTopology = map[string][]string{ cfg.P2PTopology = map[string][]string{
...@@ -699,9 +699,8 @@ func TestSystemP2PAltSync(t *testing.T) { ...@@ -699,9 +699,8 @@ func TestSystemP2PAltSync(t *testing.T) {
time.Sleep(time.Second * 10) time.Sleep(time.Second * 10)
// set up our syncer node, connect it to alice/bob // set up our syncer node, connect it to alice/bob
cfg.Loggers["syncer"] = testlog.Logger(t, log.LvlInfo).New("role", "syncer") cfg.Loggers["syncer"] = testlog.Logger(t, log.LevelInfo).New("role", "syncer")
snapLog := log.New() snapLog := log.NewLogger(log.DiscardHandler())
snapLog.SetHandler(log.DiscardHandler())
// Create a peer, and hook up alice and bob // Create a peer, and hook up alice and bob
h, err := sys.newMockNetPeer() h, err := sys.newMockNetPeer()
...@@ -798,8 +797,8 @@ func TestSystemDenseTopology(t *testing.T) { ...@@ -798,8 +797,8 @@ func TestSystemDenseTopology(t *testing.T) {
}, },
L1EpochPollInterval: time.Second * 4, L1EpochPollInterval: time.Second * 4,
} }
cfg.Loggers["verifier2"] = testlog.Logger(t, log.LvlInfo).New("role", "verifier") cfg.Loggers["verifier2"] = testlog.Logger(t, log.LevelInfo).New("role", "verifier")
cfg.Loggers["verifier3"] = testlog.Logger(t, log.LvlInfo).New("role", "verifier") cfg.Loggers["verifier3"] = testlog.Logger(t, log.LevelInfo).New("role", "verifier")
// connect the nodes // connect the nodes
cfg.P2PTopology = map[string][]string{ cfg.P2PTopology = map[string][]string{
...@@ -976,7 +975,6 @@ func TestL1InfoContract(t *testing.T) { ...@@ -976,7 +975,6 @@ func TestL1InfoContract(t *testing.T) {
checkInfoList("On sequencer with state", l1InfosFromSequencerState) checkInfoList("On sequencer with state", l1InfosFromSequencerState)
checkInfoList("On verifier with tx", l1InfosFromVerifierTransactions) checkInfoList("On verifier with tx", l1InfosFromVerifierTransactions)
checkInfoList("On verifier with state", l1InfosFromVerifierState) checkInfoList("On verifier with state", l1InfosFromVerifierState)
} }
// calcGasFees determines the actual cost of the transaction given a specific base fee // calcGasFees determines the actual cost of the transaction given a specific base fee
...@@ -1318,11 +1316,11 @@ func StopStartBatcher(t *testing.T, deltaTimeOffset *hexutil.Uint64) { ...@@ -1318,11 +1316,11 @@ func StopStartBatcher(t *testing.T, deltaTimeOffset *hexutil.Uint64) {
cfg := DefaultSystemConfig(t) cfg := DefaultSystemConfig(t)
cfg.DeployConfig.L2GenesisDeltaTimeOffset = deltaTimeOffset cfg.DeployConfig.L2GenesisDeltaTimeOffset = deltaTimeOffset
sys, err := cfg.Start(t) sys, err := cfg.Start(t)
require.Nil(t, err, "Error starting up system") require.NoError(t, err, "Error starting up system")
defer sys.Close() defer sys.Close()
rollupRPCClient, err := rpc.DialContext(context.Background(), sys.RollupNodes["verifier"].HTTPEndpoint()) rollupRPCClient, err := rpc.DialContext(context.Background(), sys.RollupNodes["verifier"].HTTPEndpoint())
require.Nil(t, err) require.NoError(t, err)
rollupClient := sources.NewRollupClient(client.NewBaseRPCClient(rollupRPCClient)) rollupClient := sources.NewRollupClient(client.NewBaseRPCClient(rollupRPCClient))
l2Seq := sys.Clients["sequencer"] l2Seq := sys.Clients["sequencer"]
...@@ -1330,7 +1328,7 @@ func StopStartBatcher(t *testing.T, deltaTimeOffset *hexutil.Uint64) { ...@@ -1330,7 +1328,7 @@ func StopStartBatcher(t *testing.T, deltaTimeOffset *hexutil.Uint64) {
// retrieve the initial sync status // retrieve the initial sync status
seqStatus, err := rollupClient.SyncStatus(context.Background()) seqStatus, err := rollupClient.SyncStatus(context.Background())
require.Nil(t, err) require.NoError(t, err)
nonce := uint64(0) nonce := uint64(0)
sendTx := func() *types.Receipt { sendTx := func() *types.Receipt {
...@@ -1349,24 +1347,24 @@ func StopStartBatcher(t *testing.T, deltaTimeOffset *hexutil.Uint64) { ...@@ -1349,24 +1347,24 @@ func StopStartBatcher(t *testing.T, deltaTimeOffset *hexutil.Uint64) {
// wait until the block the tx was first included in shows up in the safe chain on the verifier // wait until the block the tx was first included in shows up in the safe chain on the verifier
safeBlockInclusionDuration := time.Duration(6*cfg.DeployConfig.L1BlockTime) * time.Second safeBlockInclusionDuration := time.Duration(6*cfg.DeployConfig.L1BlockTime) * time.Second
_, err = geth.WaitForBlock(receipt.BlockNumber, l2Verif, safeBlockInclusionDuration) _, err = geth.WaitForBlock(receipt.BlockNumber, l2Verif, safeBlockInclusionDuration)
require.Nil(t, err, "Waiting for block on verifier") require.NoError(t, err, "Waiting for block on verifier")
require.NoError(t, wait.ForProcessingFullBatch(context.Background(), rollupClient)) require.NoError(t, wait.ForProcessingFullBatch(context.Background(), rollupClient))
// ensure the safe chain advances // ensure the safe chain advances
newSeqStatus, err := rollupClient.SyncStatus(context.Background()) newSeqStatus, err := rollupClient.SyncStatus(context.Background())
require.Nil(t, err) require.NoError(t, err)
require.Greater(t, newSeqStatus.SafeL2.Number, seqStatus.SafeL2.Number, "Safe chain did not advance") require.Greater(t, newSeqStatus.SafeL2.Number, seqStatus.SafeL2.Number, "Safe chain did not advance")
// stop the batch submission // stop the batch submission
err = sys.BatchSubmitter.Driver().StopBatchSubmitting(context.Background()) err = sys.BatchSubmitter.Driver().StopBatchSubmitting(context.Background())
require.Nil(t, err) require.NoError(t, err)
// wait for any old safe blocks being submitted / derived // wait for any old safe blocks being submitted / derived
time.Sleep(safeBlockInclusionDuration) time.Sleep(safeBlockInclusionDuration)
// get the initial sync status // get the initial sync status
seqStatus, err = rollupClient.SyncStatus(context.Background()) seqStatus, err = rollupClient.SyncStatus(context.Background())
require.Nil(t, err) require.NoError(t, err)
// send another tx // send another tx
sendTx() sendTx()
...@@ -1374,12 +1372,12 @@ func StopStartBatcher(t *testing.T, deltaTimeOffset *hexutil.Uint64) { ...@@ -1374,12 +1372,12 @@ func StopStartBatcher(t *testing.T, deltaTimeOffset *hexutil.Uint64) {
// ensure that the safe chain does not advance while the batcher is stopped // ensure that the safe chain does not advance while the batcher is stopped
newSeqStatus, err = rollupClient.SyncStatus(context.Background()) newSeqStatus, err = rollupClient.SyncStatus(context.Background())
require.Nil(t, err) require.NoError(t, err)
require.Equal(t, newSeqStatus.SafeL2.Number, seqStatus.SafeL2.Number, "Safe chain advanced while batcher was stopped") require.Equal(t, newSeqStatus.SafeL2.Number, seqStatus.SafeL2.Number, "Safe chain advanced while batcher was stopped")
// start the batch submission // start the batch submission
err = sys.BatchSubmitter.Driver().StartBatchSubmitting() err = sys.BatchSubmitter.Driver().StartBatchSubmitting()
require.Nil(t, err) require.NoError(t, err)
time.Sleep(safeBlockInclusionDuration) time.Sleep(safeBlockInclusionDuration)
// send a third tx // send a third tx
...@@ -1387,12 +1385,12 @@ func StopStartBatcher(t *testing.T, deltaTimeOffset *hexutil.Uint64) { ...@@ -1387,12 +1385,12 @@ func StopStartBatcher(t *testing.T, deltaTimeOffset *hexutil.Uint64) {
// wait until the block the tx was first included in shows up in the safe chain on the verifier // wait until the block the tx was first included in shows up in the safe chain on the verifier
_, err = geth.WaitForBlock(receipt.BlockNumber, l2Verif, safeBlockInclusionDuration) _, err = geth.WaitForBlock(receipt.BlockNumber, l2Verif, safeBlockInclusionDuration)
require.Nil(t, err, "Waiting for block on verifier") require.NoError(t, err, "Waiting for block on verifier")
require.NoError(t, wait.ForProcessingFullBatch(context.Background(), rollupClient)) require.NoError(t, wait.ForProcessingFullBatch(context.Background(), rollupClient))
// ensure that the safe chain advances after restarting the batcher // ensure that the safe chain advances after restarting the batcher
newSeqStatus, err = rollupClient.SyncStatus(context.Background()) newSeqStatus, err = rollupClient.SyncStatus(context.Background())
require.Nil(t, err) require.NoError(t, err)
require.Greater(t, newSeqStatus.SafeL2.Number, seqStatus.SafeL2.Number, "Safe chain did not advance after batcher was restarted") require.Greater(t, newSeqStatus.SafeL2.Number, seqStatus.SafeL2.Number, "Safe chain did not advance after batcher was restarted")
} }
......
...@@ -39,7 +39,7 @@ func Main(version string) func(ctx *cli.Context) error { ...@@ -39,7 +39,7 @@ func Main(version string) func(ctx *cli.Context) error {
} }
l := oplog.NewLogger(oplog.AppOut(cliCtx), cfg.Log) l := oplog.NewLogger(oplog.AppOut(cliCtx), cfg.Log)
oplog.SetGlobalLogHandler(l.GetHandler()) oplog.SetGlobalLogHandler(l.Handler())
l.Info("starting heartbeat monitor", "version", version) l.Info("starting heartbeat monitor", "version", version)
srv, err := Start(cliCtx.Context, l, cfg, version) srv, err := Start(cliCtx.Context, l, cfg, version)
......
...@@ -74,7 +74,7 @@ func main() { ...@@ -74,7 +74,7 @@ func main() {
func RollupNodeMain(ctx *cli.Context, closeApp context.CancelCauseFunc) (cliapp.Lifecycle, error) { func RollupNodeMain(ctx *cli.Context, closeApp context.CancelCauseFunc) (cliapp.Lifecycle, error) {
logCfg := oplog.ReadCLIConfig(ctx) logCfg := oplog.ReadCLIConfig(ctx)
log := oplog.NewLogger(oplog.AppOut(ctx), logCfg) log := oplog.NewLogger(oplog.AppOut(ctx), logCfg)
oplog.SetGlobalLogHandler(log.GetHandler()) oplog.SetGlobalLogHandler(log.Handler())
opservice.ValidateEnvVars(flags.EnvVarPrefix, flags.Flags, log) opservice.ValidateEnvVars(flags.EnvVarPrefix, flags.Flags, log)
opservice.WarnOnDeprecatedFlags(ctx, flags.DeprecatedFlags, log) opservice.WarnOnDeprecatedFlags(ctx, flags.DeprecatedFlags, log)
m := metrics.NewMetrics("default") m := metrics.NewMetrics("default")
......
...@@ -23,7 +23,7 @@ import ( ...@@ -23,7 +23,7 @@ import (
) )
func TestOutputAtBlock(t *testing.T) { func TestOutputAtBlock(t *testing.T) {
log := testlog.Logger(t, log.LvlError) log := testlog.Logger(t, log.LevelError)
// Test data for Merkle Patricia Trie: proof the eth2 deposit contract account contents (mainnet). // Test data for Merkle Patricia Trie: proof the eth2 deposit contract account contents (mainnet).
headerTestData := ` headerTestData := `
...@@ -124,7 +124,7 @@ func TestOutputAtBlock(t *testing.T) { ...@@ -124,7 +124,7 @@ func TestOutputAtBlock(t *testing.T) {
} }
func TestVersion(t *testing.T) { func TestVersion(t *testing.T) {
log := testlog.Logger(t, log.LvlError) log := testlog.Logger(t, log.LevelError)
l2Client := &testutils.MockL2Client{} l2Client := &testutils.MockL2Client{}
drClient := &mockDriverClient{} drClient := &mockDriverClient{}
rpcCfg := &RPCConfig{ rpcCfg := &RPCConfig{
...@@ -165,7 +165,7 @@ func randomSyncStatus(rng *rand.Rand) *eth.SyncStatus { ...@@ -165,7 +165,7 @@ func randomSyncStatus(rng *rand.Rand) *eth.SyncStatus {
} }
func TestSyncStatus(t *testing.T) { func TestSyncStatus(t *testing.T) {
log := testlog.Logger(t, log.LvlError) log := testlog.Logger(t, log.LevelError)
l2Client := &testutils.MockL2Client{} l2Client := &testutils.MockL2Client{}
drClient := &mockDriverClient{} drClient := &mockDriverClient{}
rng := rand.New(rand.NewSource(1234)) rng := rand.New(rand.NewSource(1234))
......
...@@ -63,7 +63,7 @@ func (a *appScoreTestData) WaitForNextScoreBookUpdate(t *testing.T) stubScoreBoo ...@@ -63,7 +63,7 @@ func (a *appScoreTestData) WaitForNextScoreBookUpdate(t *testing.T) stubScoreBoo
func setupPeerApplicationScorerTest(t *testing.T, params *ApplicationScoreParams) (*appScoreTestData, *peerApplicationScorer) { func setupPeerApplicationScorerTest(t *testing.T, params *ApplicationScoreParams) (*appScoreTestData, *peerApplicationScorer) {
data := &appScoreTestData{ data := &appScoreTestData{
ctx: context.Background(), ctx: context.Background(),
logger: testlog.Logger(t, log.LvlInfo), logger: testlog.Logger(t, log.LevelInfo),
clock: clock.NewDeterministicClock(time.UnixMilli(1000)), clock: clock.NewDeterministicClock(time.UnixMilli(1000)),
peers: []peer.ID{}, peers: []peer.ID{},
scorebook: &stubScoreBook{ scorebook: &stubScoreBook{
......
...@@ -20,7 +20,7 @@ import ( ...@@ -20,7 +20,7 @@ import (
func expiryTestSetup(t *testing.T) (*clock.DeterministicClock, *mocks.ExpiryStore, *mocks.BlockingConnectionGater, *ExpiryConnectionGater) { func expiryTestSetup(t *testing.T) (*clock.DeterministicClock, *mocks.ExpiryStore, *mocks.BlockingConnectionGater, *ExpiryConnectionGater) {
mockGater := mocks.NewBlockingConnectionGater(t) mockGater := mocks.NewBlockingConnectionGater(t)
log := testlog.Logger(t, log.LvlError) log := testlog.Logger(t, log.LevelError)
cl := clock.NewDeterministicClock(time.Now()) cl := clock.NewDeterministicClock(time.Now())
mockExpiryStore := mocks.NewExpiryStore(t) mockExpiryStore := mocks.NewExpiryStore(t)
gater := AddBanExpiry(mockGater, mockExpiryStore, log, cl, metrics.NoopMetrics) gater := AddBanExpiry(mockGater, mockExpiryStore, log, cl, metrics.NoopMetrics)
......
...@@ -31,7 +31,7 @@ import ( ...@@ -31,7 +31,7 @@ import (
) )
func TestGuardGossipValidator(t *testing.T) { func TestGuardGossipValidator(t *testing.T) {
logger := testlog.Logger(t, log.LvlCrit) logger := testlog.Logger(t, log.LevelCrit)
val := guardGossipValidator(logger, func(ctx context.Context, id peer.ID, message *pubsub.Message) pubsub.ValidationResult { val := guardGossipValidator(logger, func(ctx context.Context, id peer.ID, message *pubsub.Message) pubsub.ValidationResult {
if id == "mallory" { if id == "mallory" {
panic("mallory was here") panic("mallory was here")
...@@ -57,7 +57,7 @@ func TestCombinePeers(t *testing.T) { ...@@ -57,7 +57,7 @@ func TestCombinePeers(t *testing.T) {
} }
func TestVerifyBlockSignature(t *testing.T) { func TestVerifyBlockSignature(t *testing.T) {
logger := testlog.Logger(t, log.LvlCrit) logger := testlog.Logger(t, log.LevelCrit)
cfg := &rollup.Config{ cfg := &rollup.Config{
L2ChainID: big.NewInt(100), L2ChainID: big.NewInt(100),
} }
...@@ -153,8 +153,8 @@ func TestBlockValidator(t *testing.T) { ...@@ -153,8 +153,8 @@ func TestBlockValidator(t *testing.T) {
// Params Set 2: Call the validation function // Params Set 2: Call the validation function
peerID := peer.ID("foo") peerID := peer.ID("foo")
v2Validator := BuildBlocksValidator(testlog.Logger(t, log.LvlCrit), cfg, runCfg, eth.BlockV2) v2Validator := BuildBlocksValidator(testlog.Logger(t, log.LevelCrit), cfg, runCfg, eth.BlockV2)
v3Validator := BuildBlocksValidator(testlog.Logger(t, log.LvlCrit), cfg, runCfg, eth.BlockV3) v3Validator := BuildBlocksValidator(testlog.Logger(t, log.LevelCrit), cfg, runCfg, eth.BlockV3)
zero, one := uint64(0), uint64(1) zero, one := uint64(0), uint64(1)
beaconHash := common.HexToHash("0x1234") beaconHash := common.HexToHash("0x1234")
......
...@@ -61,10 +61,10 @@ func TestingConfig(t *testing.T) *Config { ...@@ -61,10 +61,10 @@ func TestingConfig(t *testing.T) *Config {
func TestP2PSimple(t *testing.T) { func TestP2PSimple(t *testing.T) {
confA := TestingConfig(t) confA := TestingConfig(t)
confB := TestingConfig(t) confB := TestingConfig(t)
hostA, err := confA.Host(testlog.Logger(t, log.LvlError).New("host", "A"), nil, metrics.NoopMetrics) hostA, err := confA.Host(testlog.Logger(t, log.LevelError).New("host", "A"), nil, metrics.NoopMetrics)
require.NoError(t, err, "failed to launch host A") require.NoError(t, err, "failed to launch host A")
defer hostA.Close() defer hostA.Close()
hostB, err := confB.Host(testlog.Logger(t, log.LvlError).New("host", "B"), nil, metrics.NoopMetrics) hostB, err := confB.Host(testlog.Logger(t, log.LevelError).New("host", "B"), nil, metrics.NoopMetrics)
require.NoError(t, err, "failed to launch host B") require.NoError(t, err, "failed to launch host B")
defer hostB.Close() defer hostB.Close()
err = hostA.Connect(context.Background(), peer.AddrInfo{ID: hostB.ID(), Addrs: hostB.Addrs()}) err = hostA.Connect(context.Background(), peer.AddrInfo{ID: hostB.ID(), Addrs: hostB.Addrs()})
...@@ -119,7 +119,7 @@ func TestP2PFull(t *testing.T) { ...@@ -119,7 +119,7 @@ func TestP2PFull(t *testing.T) {
runCfgA := &testutils.MockRuntimeConfig{P2PSeqAddress: common.Address{0x42}} runCfgA := &testutils.MockRuntimeConfig{P2PSeqAddress: common.Address{0x42}}
runCfgB := &testutils.MockRuntimeConfig{P2PSeqAddress: common.Address{0x42}} runCfgB := &testutils.MockRuntimeConfig{P2PSeqAddress: common.Address{0x42}}
logA := testlog.Logger(t, log.LvlError).New("host", "A") logA := testlog.Logger(t, log.LevelError).New("host", "A")
nodeA, err := NewNodeP2P(context.Background(), &rollup.Config{}, logA, &confA, &mockGossipIn{}, nil, runCfgA, metrics.NoopMetrics, false) nodeA, err := NewNodeP2P(context.Background(), &rollup.Config{}, logA, &confA, &mockGossipIn{}, nil, runCfgA, metrics.NoopMetrics, false)
require.NoError(t, err) require.NoError(t, err)
defer nodeA.Close() defer nodeA.Close()
...@@ -148,7 +148,7 @@ func TestP2PFull(t *testing.T) { ...@@ -148,7 +148,7 @@ func TestP2PFull(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
confB.StaticPeers = append(confB.StaticPeers, altAddrB) confB.StaticPeers = append(confB.StaticPeers, altAddrB)
logB := testlog.Logger(t, log.LvlError).New("host", "B") logB := testlog.Logger(t, log.LevelError).New("host", "B")
nodeB, err := NewNodeP2P(context.Background(), &rollup.Config{}, logB, &confB, &mockGossipIn{}, nil, runCfgB, metrics.NoopMetrics, false) nodeB, err := NewNodeP2P(context.Background(), &rollup.Config{}, logB, &confB, &mockGossipIn{}, nil, runCfgB, metrics.NoopMetrics, false)
require.NoError(t, err) require.NoError(t, err)
...@@ -231,9 +231,9 @@ func TestDiscovery(t *testing.T) { ...@@ -231,9 +231,9 @@ func TestDiscovery(t *testing.T) {
pC, _, err := crypto.GenerateSecp256k1Key(rand.Reader) pC, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
require.NoError(t, err, "failed to generate new p2p priv key") require.NoError(t, err, "failed to generate new p2p priv key")
logA := testlog.Logger(t, log.LvlError).New("host", "A") logA := testlog.Logger(t, log.LevelError).New("host", "A")
logB := testlog.Logger(t, log.LvlError).New("host", "B") logB := testlog.Logger(t, log.LevelError).New("host", "B")
logC := testlog.Logger(t, log.LvlError).New("host", "C") logC := testlog.Logger(t, log.LevelError).New("host", "C")
discDBA, err := enode.OpenDB("") // "" = memory db discDBA, err := enode.OpenDB("") // "" = memory db
require.NoError(t, err) require.NoError(t, err)
......
...@@ -18,7 +18,7 @@ import ( ...@@ -18,7 +18,7 @@ import (
const testBanDuration = 2 * time.Hour const testBanDuration = 2 * time.Hour
func peerMonitorSetup(t *testing.T) (*PeerMonitor, *clock2.DeterministicClock, *mocks.PeerManager) { func peerMonitorSetup(t *testing.T) (*PeerMonitor, *clock2.DeterministicClock, *mocks.PeerManager) {
l := testlog.Logger(t, log.LvlInfo) l := testlog.Logger(t, log.LevelInfo)
clock := clock2.NewDeterministicClock(time.UnixMilli(10000)) clock := clock2.NewDeterministicClock(time.UnixMilli(10000))
manager := mocks.NewPeerManager(t) manager := mocks.NewPeerManager(t)
monitor := NewPeerMonitor(context.Background(), l, clock, manager, -100, testBanDuration) monitor := NewPeerMonitor(context.Background(), l, clock, manager, -100, testBanDuration)
......
...@@ -30,7 +30,7 @@ type PeerScorerTestSuite struct { ...@@ -30,7 +30,7 @@ type PeerScorerTestSuite struct {
func (testSuite *PeerScorerTestSuite) SetupTest() { func (testSuite *PeerScorerTestSuite) SetupTest() {
testSuite.mockStore = &p2pMocks.Peerstore{} testSuite.mockStore = &p2pMocks.Peerstore{}
testSuite.mockMetricer = &p2pMocks.ScoreMetrics{} testSuite.mockMetricer = &p2pMocks.ScoreMetrics{}
testSuite.logger = testlog.Logger(testSuite.T(), log.LvlError) testSuite.logger = testlog.Logger(testSuite.T(), log.LevelError)
} }
// TestPeerScorer runs the PeerScorerTestSuite. // TestPeerScorer runs the PeerScorerTestSuite.
......
...@@ -46,7 +46,7 @@ type PeerScoresTestSuite struct { ...@@ -46,7 +46,7 @@ type PeerScoresTestSuite struct {
func (testSuite *PeerScoresTestSuite) SetupTest() { func (testSuite *PeerScoresTestSuite) SetupTest() {
testSuite.mockStore = &p2pMocks.Peerstore{} testSuite.mockStore = &p2pMocks.Peerstore{}
testSuite.mockMetricer = &p2pMocks.ScoreMetrics{} testSuite.mockMetricer = &p2pMocks.ScoreMetrics{}
testSuite.logger = testlog.Logger(testSuite.T(), log.LvlError) testSuite.logger = testlog.Logger(testSuite.T(), log.LevelError)
} }
// TestPeerScores runs the PeerScoresTestSuite. // TestPeerScores runs the PeerScoresTestSuite.
...@@ -71,7 +71,7 @@ func (c *customPeerstoreNetwork) Close() error { ...@@ -71,7 +71,7 @@ func (c *customPeerstoreNetwork) Close() error {
// getNetHosts generates a slice of hosts using the [libp2p/go-libp2p] library. // getNetHosts generates a slice of hosts using the [libp2p/go-libp2p] library.
func getNetHosts(testSuite *PeerScoresTestSuite, ctx context.Context, n int) []host.Host { func getNetHosts(testSuite *PeerScoresTestSuite, ctx context.Context, n int) []host.Host {
var out []host.Host var out []host.Host
log := testlog.Logger(testSuite.T(), log.LvlError) log := testlog.Logger(testSuite.T(), log.LevelError)
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
swarm := tswarm.GenSwarm(testSuite.T()) swarm := tswarm.GenSwarm(testSuite.T())
eps, err := store.NewExtendedPeerstore(ctx, log, clock.SystemClock, swarm.Peerstore(), sync.MutexWrap(ds.NewMapDatastore()), 1*time.Hour) eps, err := store.NewExtendedPeerstore(ctx, log, clock.SystemClock, swarm.Peerstore(), sync.MutexWrap(ds.NewMapDatastore()), 1*time.Hour)
...@@ -99,7 +99,7 @@ func (d *discriminatingAppScorer) ApplicationScore(id peer.ID) float64 { ...@@ -99,7 +99,7 @@ func (d *discriminatingAppScorer) ApplicationScore(id peer.ID) float64 {
func newGossipSubs(testSuite *PeerScoresTestSuite, ctx context.Context, hosts []host.Host) []*pubsub.PubSub { func newGossipSubs(testSuite *PeerScoresTestSuite, ctx context.Context, hosts []host.Host) []*pubsub.PubSub {
var psubs []*pubsub.PubSub var psubs []*pubsub.PubSub
logger := testlog.Logger(testSuite.T(), log.LvlCrit) logger := testlog.Logger(testSuite.T(), log.LevelCrit)
// For each host, create a default gossipsub router. // For each host, create a default gossipsub router.
for _, h := range hosts { for _, h := range hosts {
......
...@@ -20,7 +20,7 @@ func TestScheduleGcPeriodically(t *testing.T) { ...@@ -20,7 +20,7 @@ func TestScheduleGcPeriodically(t *testing.T) {
// Wait for the gc background process to complete after cancelling the context // Wait for the gc background process to complete after cancelling the context
bgTasks.Wait() bgTasks.Wait()
}() }()
logger := testlog.Logger(t, log.LvlInfo) logger := testlog.Logger(t, log.LevelInfo)
clock := clock.NewDeterministicClock(time.UnixMilli(5000)) clock := clock.NewDeterministicClock(time.UnixMilli(5000))
called := make(chan struct{}, 10) called := make(chan struct{}, 10)
......
...@@ -35,7 +35,7 @@ func TestRoundTripIPBan(t *testing.T) { ...@@ -35,7 +35,7 @@ func TestRoundTripIPBan(t *testing.T) {
func createMemoryIPBanBook(t *testing.T) *ipBanBook { func createMemoryIPBanBook(t *testing.T) *ipBanBook {
store := sync.MutexWrap(ds.NewMapDatastore()) store := sync.MutexWrap(ds.NewMapDatastore())
logger := testlog.Logger(t, log.LvlInfo) logger := testlog.Logger(t, log.LevelInfo)
c := clock.NewDeterministicClock(time.UnixMilli(100)) c := clock.NewDeterministicClock(time.UnixMilli(100))
book, err := newIPBanBook(context.Background(), logger, c, store) book, err := newIPBanBook(context.Background(), logger, c, store)
require.NoError(t, err) require.NoError(t, err)
......
...@@ -33,7 +33,7 @@ func TestRoundTripPeerBan(t *testing.T) { ...@@ -33,7 +33,7 @@ func TestRoundTripPeerBan(t *testing.T) {
func createMemoryPeerBanBook(t *testing.T) *peerBanBook { func createMemoryPeerBanBook(t *testing.T) *peerBanBook {
store := sync.MutexWrap(ds.NewMapDatastore()) store := sync.MutexWrap(ds.NewMapDatastore())
logger := testlog.Logger(t, log.LvlInfo) logger := testlog.Logger(t, log.LevelInfo)
c := clock.NewDeterministicClock(time.UnixMilli(100)) c := clock.NewDeterministicClock(time.UnixMilli(100))
book, err := newPeerBanBook(context.Background(), logger, c, store) book, err := newPeerBanBook(context.Background(), logger, c, store)
require.NoError(t, err) require.NoError(t, err)
......
...@@ -165,7 +165,7 @@ func TestCloseCompletes(t *testing.T) { ...@@ -165,7 +165,7 @@ func TestCloseCompletes(t *testing.T) {
func TestPrune(t *testing.T) { func TestPrune(t *testing.T) {
ctx, cancelFunc := context.WithCancel(context.Background()) ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc() defer cancelFunc()
logger := testlog.Logger(t, log.LvlInfo) logger := testlog.Logger(t, log.LevelInfo)
store := sync.MutexWrap(ds.NewMapDatastore()) store := sync.MutexWrap(ds.NewMapDatastore())
clock := clock.NewDeterministicClock(time.UnixMilli(1000)) clock := clock.NewDeterministicClock(time.UnixMilli(1000))
book, err := newScoreBook(ctx, logger, clock, store, 24*time.Hour) book, err := newScoreBook(ctx, logger, clock, store, 24*time.Hour)
...@@ -220,7 +220,7 @@ func TestPrune(t *testing.T) { ...@@ -220,7 +220,7 @@ func TestPrune(t *testing.T) {
func TestPruneMultipleBatches(t *testing.T) { func TestPruneMultipleBatches(t *testing.T) {
ctx, cancelFunc := context.WithCancel(context.Background()) ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc() defer cancelFunc()
logger := testlog.Logger(t, log.LvlInfo) logger := testlog.Logger(t, log.LevelInfo)
clock := clock.NewDeterministicClock(time.UnixMilli(1000)) clock := clock.NewDeterministicClock(time.UnixMilli(1000))
book, err := newScoreBook(ctx, logger, clock, sync.MutexWrap(ds.NewMapDatastore()), 24*time.Hour) book, err := newScoreBook(ctx, logger, clock, sync.MutexWrap(ds.NewMapDatastore()), 24*time.Hour)
require.NoError(t, err) require.NoError(t, err)
...@@ -250,7 +250,7 @@ func TestPruneMultipleBatches(t *testing.T) { ...@@ -250,7 +250,7 @@ func TestPruneMultipleBatches(t *testing.T) {
func TestIgnoreOutdatedScores(t *testing.T) { func TestIgnoreOutdatedScores(t *testing.T) {
ctx, cancelFunc := context.WithCancel(context.Background()) ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc() defer cancelFunc()
logger := testlog.Logger(t, log.LvlInfo) logger := testlog.Logger(t, log.LevelInfo)
clock := clock.NewDeterministicClock(time.UnixMilli(1000)) clock := clock.NewDeterministicClock(time.UnixMilli(1000))
retentionPeriod := 24 * time.Hour retentionPeriod := 24 * time.Hour
book, err := newScoreBook(ctx, logger, clock, sync.MutexWrap(ds.NewMapDatastore()), retentionPeriod) book, err := newScoreBook(ctx, logger, clock, sync.MutexWrap(ds.NewMapDatastore()), retentionPeriod)
...@@ -289,7 +289,7 @@ func createMemoryStore(t *testing.T) ExtendedPeerstore { ...@@ -289,7 +289,7 @@ func createMemoryStore(t *testing.T) ExtendedPeerstore {
func createPeerstoreWithBacking(t *testing.T, store *sync.MutexDatastore) ExtendedPeerstore { func createPeerstoreWithBacking(t *testing.T, store *sync.MutexDatastore) ExtendedPeerstore {
ps, err := pstoreds.NewPeerstore(context.Background(), store, pstoreds.DefaultOpts()) ps, err := pstoreds.NewPeerstore(context.Background(), store, pstoreds.DefaultOpts())
require.NoError(t, err, "Failed to create peerstore") require.NoError(t, err, "Failed to create peerstore")
logger := testlog.Logger(t, log.LvlInfo) logger := testlog.Logger(t, log.LevelInfo)
c := clock.NewDeterministicClock(time.UnixMilli(100)) c := clock.NewDeterministicClock(time.UnixMilli(100))
eps, err := NewExtendedPeerstore(context.Background(), logger, c, ps, store, 24*time.Hour) eps, err := NewExtendedPeerstore(context.Background(), logger, c, ps, store, 24*time.Hour)
require.NoError(t, err) require.NoError(t, err)
......
...@@ -124,7 +124,7 @@ func setupSyncTestData(length uint64) (*rollup.Config, *syncTestData) { ...@@ -124,7 +124,7 @@ func setupSyncTestData(length uint64) (*rollup.Config, *syncTestData) {
func TestSinglePeerSync(t *testing.T) { func TestSinglePeerSync(t *testing.T) {
t.Parallel() // Takes a while, but can run in parallel t.Parallel() // Takes a while, but can run in parallel
log := testlog.Logger(t, log.LvlError) log := testlog.Logger(t, log.LevelError)
cfg, payloads := setupSyncTestData(25) cfg, payloads := setupSyncTestData(25)
...@@ -191,7 +191,7 @@ func TestSinglePeerSync(t *testing.T) { ...@@ -191,7 +191,7 @@ func TestSinglePeerSync(t *testing.T) {
func TestMultiPeerSync(t *testing.T) { func TestMultiPeerSync(t *testing.T) {
t.Parallel() // Takes a while, but can run in parallel t.Parallel() // Takes a while, but can run in parallel
log := testlog.Logger(t, log.LvlDebug) log := testlog.Logger(t, log.LevelDebug)
cfg, payloads := setupSyncTestData(100) cfg, payloads := setupSyncTestData(100)
...@@ -329,7 +329,7 @@ func TestMultiPeerSync(t *testing.T) { ...@@ -329,7 +329,7 @@ func TestMultiPeerSync(t *testing.T) {
func TestNetworkNotifyAddPeerAndRemovePeer(t *testing.T) { func TestNetworkNotifyAddPeerAndRemovePeer(t *testing.T) {
t.Parallel() t.Parallel()
log := testlog.Logger(t, log.LvlDebug) log := testlog.Logger(t, log.LevelDebug)
cfg, _ := setupSyncTestData(25) cfg, _ := setupSyncTestData(25)
......
...@@ -79,7 +79,7 @@ func TestAttributesQueue(t *testing.T) { ...@@ -79,7 +79,7 @@ func TestAttributesQueue(t *testing.T) {
} }
attrBuilder := NewFetchingAttributesBuilder(cfg, l1Fetcher, l2Fetcher) attrBuilder := NewFetchingAttributesBuilder(cfg, l1Fetcher, l2Fetcher)
aq := NewAttributesQueue(testlog.Logger(t, log.LvlError), cfg, attrBuilder, nil) aq := NewAttributesQueue(testlog.Logger(t, log.LevelError), cfg, attrBuilder, nil)
actual, err := aq.createNextAttributes(context.Background(), &batch, safeHead) actual, err := aq.createNextAttributes(context.Background(), &batch, safeHead)
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment