Commit 29c6f318 authored by protolambda's avatar protolambda

Merge branch 'develop' into cleanup-logging-service-utils

parents c2b73c58 ef469a9a
...@@ -53,7 +53,7 @@ jobs: ...@@ -53,7 +53,7 @@ jobs:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v4
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1 uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub - name: Login to Docker Hub
uses: docker/login-action@v3 uses: docker/login-action@v3
...@@ -80,7 +80,7 @@ jobs: ...@@ -80,7 +80,7 @@ jobs:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v4
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1 uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub - name: Login to Docker Hub
uses: docker/login-action@v3 uses: docker/login-action@v3
...@@ -107,7 +107,7 @@ jobs: ...@@ -107,7 +107,7 @@ jobs:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v4
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1 uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub - name: Login to Docker Hub
uses: docker/login-action@v3 uses: docker/login-action@v3
...@@ -134,7 +134,7 @@ jobs: ...@@ -134,7 +134,7 @@ jobs:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v4
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1 uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub - name: Login to Docker Hub
uses: docker/login-action@v3 uses: docker/login-action@v3
...@@ -161,7 +161,7 @@ jobs: ...@@ -161,7 +161,7 @@ jobs:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v4
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1 uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub - name: Login to Docker Hub
uses: docker/login-action@v3 uses: docker/login-action@v3
...@@ -188,7 +188,7 @@ jobs: ...@@ -188,7 +188,7 @@ jobs:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v4
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1 uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub - name: Login to Docker Hub
uses: docker/login-action@v3 uses: docker/login-action@v3
...@@ -226,7 +226,7 @@ jobs: ...@@ -226,7 +226,7 @@ jobs:
uses: actions/checkout@v4 uses: actions/checkout@v4
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1 uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub - name: Login to Docker Hub
uses: docker/login-action@v3 uses: docker/login-action@v3
......
...@@ -78,7 +78,7 @@ jobs: ...@@ -78,7 +78,7 @@ jobs:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v4
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1 uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub - name: Login to Docker Hub
uses: docker/login-action@v3 uses: docker/login-action@v3
...@@ -115,7 +115,7 @@ jobs: ...@@ -115,7 +115,7 @@ jobs:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v4
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1 uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub - name: Login to Docker Hub
uses: docker/login-action@v3 uses: docker/login-action@v3
...@@ -142,7 +142,7 @@ jobs: ...@@ -142,7 +142,7 @@ jobs:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v4
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1 uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub - name: Login to Docker Hub
uses: docker/login-action@v3 uses: docker/login-action@v3
...@@ -169,7 +169,7 @@ jobs: ...@@ -169,7 +169,7 @@ jobs:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v4
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1 uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub - name: Login to Docker Hub
uses: docker/login-action@v3 uses: docker/login-action@v3
...@@ -196,7 +196,7 @@ jobs: ...@@ -196,7 +196,7 @@ jobs:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v4
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1 uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub - name: Login to Docker Hub
uses: docker/login-action@v3 uses: docker/login-action@v3
...@@ -223,7 +223,7 @@ jobs: ...@@ -223,7 +223,7 @@ jobs:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v4
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1 uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub - name: Login to Docker Hub
uses: docker/login-action@v3 uses: docker/login-action@v3
...@@ -250,7 +250,7 @@ jobs: ...@@ -250,7 +250,7 @@ jobs:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v4
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1 uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub - name: Login to Docker Hub
uses: docker/login-action@v3 uses: docker/login-action@v3
......
...@@ -35,7 +35,7 @@ func runIndexer(ctx *cli.Context) error { ...@@ -35,7 +35,7 @@ func runIndexer(ctx *cli.Context) error {
return err return err
} }
db, err := database.NewDB(cfg.DB) db, err := database.NewDB(log, cfg.DB)
if err != nil { if err != nil {
log.Error("failed to connect to database", "err", err) log.Error("failed to connect to database", "err", err)
return err return err
...@@ -59,7 +59,7 @@ func runApi(ctx *cli.Context) error { ...@@ -59,7 +59,7 @@ func runApi(ctx *cli.Context) error {
return err return err
} }
db, err := database.NewDB(cfg.DB) db, err := database.NewDB(log, cfg.DB)
if err != nil { if err != nil {
log.Error("failed to connect to database", "err", err) log.Error("failed to connect to database", "err", err)
return err return err
...@@ -79,7 +79,7 @@ func runMigrations(ctx *cli.Context) error { ...@@ -79,7 +79,7 @@ func runMigrations(ctx *cli.Context) error {
return err return err
} }
db, err := database.NewDB(cfg.DB) db, err := database.NewDB(log, cfg.DB)
if err != nil { if err != nil {
log.Error("failed to connect to database", "err", err) log.Error("failed to connect to database", "err", err)
return err return err
......
...@@ -12,9 +12,10 @@ import ( ...@@ -12,9 +12,10 @@ import (
"github.com/ethereum-optimism/optimism/op-service/retry" "github.com/ethereum-optimism/optimism/op-service/retry"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/ethereum/go-ethereum/log"
"gorm.io/driver/postgres" "gorm.io/driver/postgres"
"gorm.io/gorm" "gorm.io/gorm"
"gorm.io/gorm/logger"
) )
var ( var (
...@@ -35,7 +36,7 @@ type DB struct { ...@@ -35,7 +36,7 @@ type DB struct {
BridgeTransactions BridgeTransactionsDB BridgeTransactions BridgeTransactionsDB
} }
func NewDB(dbConfig config.DBConfig) (*DB, error) { func NewDB(log log.Logger, dbConfig config.DBConfig) (*DB, error) {
retryStrategy := &retry.ExponentialStrategy{Min: 1000, Max: 20_000, MaxJitter: 250} retryStrategy := &retry.ExponentialStrategy{Min: 1000, Max: 20_000, MaxJitter: 250}
dsn := fmt.Sprintf("host=%s dbname=%s sslmode=disable", dbConfig.Host, dbConfig.Name) dsn := fmt.Sprintf("host=%s dbname=%s sslmode=disable", dbConfig.Host, dbConfig.Name)
...@@ -52,21 +53,22 @@ func NewDB(dbConfig config.DBConfig) (*DB, error) { ...@@ -52,21 +53,22 @@ func NewDB(dbConfig config.DBConfig) (*DB, error) {
gormConfig := gorm.Config{ gormConfig := gorm.Config{
// The indexer will explicitly manage the transactions // The indexer will explicitly manage the transactions
SkipDefaultTransaction: true, SkipDefaultTransaction: true,
Logger: logger.Default.LogMode(logger.Silent), Logger: newLogger(log),
} }
gorm, err := retry.Do[*gorm.DB](context.Background(), 10, retryStrategy, func() (*gorm.DB, error) { gorm, err := retry.Do[*gorm.DB](context.Background(), 10, retryStrategy, func() (*gorm.DB, error) {
gorm, err := gorm.Open(postgres.Open(dsn), &gormConfig) gorm, err := gorm.Open(postgres.Open(dsn), &gormConfig)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to connect to database") return nil, fmt.Errorf("failed to connect to database: %w", err)
} }
return gorm, nil return gorm, nil
}) })
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to connect to database after multiple retries") return nil, fmt.Errorf("failed to connect to database after multiple retries: %w", err)
} }
db := &DB{ db := &DB{
gorm: gorm, gorm: gorm,
Blocks: newBlocksDB(gorm), Blocks: newBlocksDB(gorm),
...@@ -75,6 +77,7 @@ func NewDB(dbConfig config.DBConfig) (*DB, error) { ...@@ -75,6 +77,7 @@ func NewDB(dbConfig config.DBConfig) (*DB, error) {
BridgeMessages: newBridgeMessagesDB(gorm), BridgeMessages: newBridgeMessagesDB(gorm),
BridgeTransactions: newBridgeTransactionsDB(gorm), BridgeTransactions: newBridgeTransactionsDB(gorm),
} }
return db, nil return db, nil
} }
......
package database
import (
"context"
"fmt"
"strings"
"time"
"github.com/ethereum/go-ethereum/log"
"gorm.io/gorm/logger"
)
var (
_ logger.Interface = Logger{}
SlowThresholdMilliseconds = 200
)
type Logger struct {
log log.Logger
}
func newLogger(log log.Logger) Logger {
return Logger{log.New("module", "db")}
}
func (l Logger) LogMode(lvl logger.LogLevel) logger.Interface {
return l
}
func (l Logger) Info(ctx context.Context, msg string, data ...interface{}) {
l.log.Info(fmt.Sprintf(msg, data...))
}
func (l Logger) Warn(ctx context.Context, msg string, data ...interface{}) {
l.log.Warn(fmt.Sprintf(msg, data...))
}
func (l Logger) Error(ctx context.Context, msg string, data ...interface{}) {
l.log.Error(fmt.Sprintf(msg, data...))
}
func (l Logger) Trace(ctx context.Context, begin time.Time, fc func() (sql string, rowsAffected int64), err error) {
elapsedMs := time.Since(begin).Milliseconds()
// omit any values for batch inserts as they can be very long
sql, rows := fc()
if i := strings.Index(strings.ToLower(sql), "values"); i > 0 {
sql = fmt.Sprintf("%sVALUES (...)", sql[:i])
}
if elapsedMs < 200 {
l.log.Debug("database operation", "duration_ms", elapsedMs, "rows_affected", rows, "sql", sql)
} else {
l.log.Warn("database operation", "duration_ms", elapsedMs, "rows_affected", rows, "sql", sql)
}
}
...@@ -86,7 +86,8 @@ func createE2ETestSuite(t *testing.T) E2ETestSuite { ...@@ -86,7 +86,8 @@ func createE2ETestSuite(t *testing.T) E2ETestSuite {
MetricsServer: config.ServerConfig{Host: "127.0.0.1", Port: 0}, MetricsServer: config.ServerConfig{Host: "127.0.0.1", Port: 0},
} }
db, err := database.NewDB(indexerCfg.DB) // Emit debug log levels
db, err := database.NewDB(testlog.Logger(t, log.LvlDebug).New("role", "db"), indexerCfg.DB)
require.NoError(t, err) require.NoError(t, err)
t.Cleanup(func() { db.Close() }) t.Cleanup(func() { db.Close() })
...@@ -138,9 +139,13 @@ func setupTestDatabase(t *testing.T) string { ...@@ -138,9 +139,13 @@ func setupTestDatabase(t *testing.T) string {
Password: "", Password: "",
} }
// NewDB will create the database schema // NewDB will create the database schema
db, err := database.NewDB(dbConfig)
silentLog := log.New()
silentLog.SetHandler(log.DiscardHandler())
db, err := database.NewDB(silentLog, dbConfig)
require.NoError(t, err) require.NoError(t, err)
defer db.Close() defer db.Close()
err = db.ExecuteSQLMigration("../migrations") err = db.ExecuteSQLMigration("../migrations")
require.NoError(t, err) require.NoError(t, err)
......
# Chain configures l1 chain addresses # Chain configures l1 chain addresses
# Can configure them manually or use a preset l2 ChainId for known chains including OP Mainnet, OP Goerli, Base, Base Goerli, Zora, and Zora goerli # Can configure them manually or use a preset L2 ChainId for known chains
# - i.e OP Mainnet, OP Goerli, Base, Base Goerli, Zora, Zora Goerli, etc
[chain] [chain]
preset = $INDEXER_CHAIN_PRESET preset = $INDEXER_CHAIN_PRESET
# L1 Config # L1 Config
...@@ -15,17 +15,12 @@ l2-polling-interval = 0 ...@@ -15,17 +15,12 @@ l2-polling-interval = 0
l2-header-buffer-size = 0 l2-header-buffer-size = 0
l2-confirmation-depth = 0 l2-confirmation-depth = 0
[rpcs] [rpcs]
l1-rpc = "${INDEXER_RPC_URL_L1}" l1-rpc = "${INDEXER_RPC_URL_L1}"
l2-rpc = "${INDEXER_RPC_URL_L2}" l2-rpc = "${INDEXER_RPC_URL_L2}"
[db] [db]
host = "$INDEXER_DB_HOST" host = "$INDEXER_DB_HOST"
# this port may be problematic once we depoly
# the DATABASE_URL looks like this for previous services and didn't include a port
# DATABASE_URL="postgresql://${INDEXER_DB_USER}:${INDEXER_DB_PASS}@localhost/${INDEXER_DB_NAME}?host=${INDEXER_DB_HOST}"
# If not problematic delete these comments
port = $INDEXER_DB_PORT port = $INDEXER_DB_PORT
user = "$INDEXER_DB_USER" user = "$INDEXER_DB_USER"
password = "$INDEXER_DB_PASS" password = "$INDEXER_DB_PASS"
......
...@@ -109,6 +109,9 @@ type DeployConfig struct { ...@@ -109,6 +109,9 @@ type DeployConfig struct {
// L2GenesisRegolithTimeOffset is the number of seconds after genesis block that Regolith hard fork activates. // L2GenesisRegolithTimeOffset is the number of seconds after genesis block that Regolith hard fork activates.
// Set it to 0 to activate at genesis. Nil to disable regolith. // Set it to 0 to activate at genesis. Nil to disable regolith.
L2GenesisRegolithTimeOffset *hexutil.Uint64 `json:"l2GenesisRegolithTimeOffset,omitempty"` L2GenesisRegolithTimeOffset *hexutil.Uint64 `json:"l2GenesisRegolithTimeOffset,omitempty"`
// L2GenesisSpanBatchTimeOffset is the number of seconds after genesis block that Span Batch hard fork activates.
// Set it to 0 to activate at genesis. Nil to disable SpanBatch.
L2GenesisSpanBatchTimeOffset *hexutil.Uint64 `json:"l2GenesisSpanBatchTimeOffset,omitempty"`
// L2GenesisBlockExtraData is configurable extradata. Will default to []byte("BEDROCK") if left unspecified. // L2GenesisBlockExtraData is configurable extradata. Will default to []byte("BEDROCK") if left unspecified.
L2GenesisBlockExtraData []byte `json:"l2GenesisBlockExtraData"` L2GenesisBlockExtraData []byte `json:"l2GenesisBlockExtraData"`
// ProxyAdminOwner represents the owner of the ProxyAdmin predeploy on L2. // ProxyAdminOwner represents the owner of the ProxyAdmin predeploy on L2.
...@@ -441,6 +444,17 @@ func (d *DeployConfig) RegolithTime(genesisTime uint64) *uint64 { ...@@ -441,6 +444,17 @@ func (d *DeployConfig) RegolithTime(genesisTime uint64) *uint64 {
return &v return &v
} }
func (d *DeployConfig) SpanBatchTime(genesisTime uint64) *uint64 {
if d.L2GenesisSpanBatchTimeOffset == nil {
return nil
}
v := uint64(0)
if offset := *d.L2GenesisSpanBatchTimeOffset; offset > 0 {
v = genesisTime + uint64(offset)
}
return &v
}
// RollupConfig converts a DeployConfig to a rollup.Config // RollupConfig converts a DeployConfig to a rollup.Config
func (d *DeployConfig) RollupConfig(l1StartBlock *types.Block, l2GenesisBlockHash common.Hash, l2GenesisBlockNumber uint64) (*rollup.Config, error) { func (d *DeployConfig) RollupConfig(l1StartBlock *types.Block, l2GenesisBlockHash common.Hash, l2GenesisBlockNumber uint64) (*rollup.Config, error) {
if d.OptimismPortalProxy == (common.Address{}) { if d.OptimismPortalProxy == (common.Address{}) {
...@@ -478,6 +492,7 @@ func (d *DeployConfig) RollupConfig(l1StartBlock *types.Block, l2GenesisBlockHas ...@@ -478,6 +492,7 @@ func (d *DeployConfig) RollupConfig(l1StartBlock *types.Block, l2GenesisBlockHas
DepositContractAddress: d.OptimismPortalProxy, DepositContractAddress: d.OptimismPortalProxy,
L1SystemConfigAddress: d.SystemConfigProxy, L1SystemConfigAddress: d.SystemConfigProxy,
RegolithTime: d.RegolithTime(l1StartBlock.Time()), RegolithTime: d.RegolithTime(l1StartBlock.Time()),
SpanBatchTime: d.SpanBatchTime(l1StartBlock.Time()),
}, nil }, nil
} }
......
...@@ -30,7 +30,6 @@ type ClaimLoader interface { ...@@ -30,7 +30,6 @@ type ClaimLoader interface {
type Agent struct { type Agent struct {
metrics metrics.Metricer metrics metrics.Metricer
fdgAddr common.Address
solver *solver.GameSolver solver *solver.GameSolver
loader ClaimLoader loader ClaimLoader
responder Responder responder Responder
...@@ -40,10 +39,9 @@ type Agent struct { ...@@ -40,10 +39,9 @@ type Agent struct {
log log.Logger log log.Logger
} }
func NewAgent(m metrics.Metricer, addr common.Address, loader ClaimLoader, maxDepth int, trace types.TraceProvider, responder Responder, updater types.OracleUpdater, agreeWithProposedOutput bool, log log.Logger) *Agent { func NewAgent(m metrics.Metricer, loader ClaimLoader, maxDepth int, trace types.TraceProvider, responder Responder, updater types.OracleUpdater, agreeWithProposedOutput bool, log log.Logger) *Agent {
return &Agent{ return &Agent{
metrics: m, metrics: m,
fdgAddr: addr,
solver: solver.NewGameSolver(maxDepth, trace), solver: solver.NewGameSolver(maxDepth, trace),
loader: loader, loader: loader,
responder: responder, responder: responder,
...@@ -198,7 +196,6 @@ func (a *Agent) newGameFromContracts(ctx context.Context) (types.Game, error) { ...@@ -198,7 +196,6 @@ func (a *Agent) newGameFromContracts(ctx context.Context) (types.Game, error) {
if len(claims) == 0 { if len(claims) == 0 {
return nil, errors.New("no claims") return nil, errors.New("no claims")
} }
a.metrics.RecordGameClaimCount(a.fdgAddr.String(), len(claims))
game := types.NewGameState(a.agreeWithProposedOutput, claims[0], uint64(a.maxDepth)) game := types.NewGameState(a.agreeWithProposedOutput, claims[0], uint64(a.maxDepth))
if err := game.PutAll(claims[1:]); err != nil { if err := game.PutAll(claims[1:]); err != nil {
return nil, fmt.Errorf("failed to load claims into the local state: %w", err) return nil, fmt.Errorf("failed to load claims into the local state: %w", err)
......
...@@ -5,16 +5,15 @@ import ( ...@@ -5,16 +5,15 @@ import (
"errors" "errors"
"testing" "testing"
"github.com/stretchr/testify/require"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/test" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/test"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/alphabet" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/alphabet"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/types" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/types"
gameTypes "github.com/ethereum-optimism/optimism/op-challenger/game/types" gameTypes "github.com/ethereum-optimism/optimism/op-challenger/game/types"
"github.com/ethereum-optimism/optimism/op-challenger/metrics" "github.com/ethereum-optimism/optimism/op-challenger/metrics"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require"
"github.com/ethereum-optimism/optimism/op-service/testlog" "github.com/ethereum-optimism/optimism/op-service/testlog"
) )
...@@ -112,12 +111,11 @@ func TestLoadClaimsWhenGameNotResolvable(t *testing.T) { ...@@ -112,12 +111,11 @@ func TestLoadClaimsWhenGameNotResolvable(t *testing.T) {
func setupTestAgent(t *testing.T, agreeWithProposedOutput bool) (*Agent, *stubClaimLoader, *stubResponder) { func setupTestAgent(t *testing.T, agreeWithProposedOutput bool) (*Agent, *stubClaimLoader, *stubResponder) {
logger := testlog.Logger(t, log.LvlInfo) logger := testlog.Logger(t, log.LvlInfo)
claimLoader := &stubClaimLoader{} claimLoader := &stubClaimLoader{}
addr := common.HexToAddress("0x1234")
depth := 4 depth := 4
trace := alphabet.NewTraceProvider("abcd", uint64(depth)) trace := alphabet.NewTraceProvider("abcd", uint64(depth))
responder := &stubResponder{} responder := &stubResponder{}
updater := &stubUpdater{} updater := &stubUpdater{}
agent := NewAgent(metrics.NoopMetrics, addr, claimLoader, depth, trace, responder, updater, agreeWithProposedOutput, logger) agent := NewAgent(metrics.NoopMetrics, claimLoader, depth, trace, responder, updater, agreeWithProposedOutput, logger)
return agent, claimLoader, responder return agent, claimLoader, responder
} }
......
...@@ -106,7 +106,7 @@ func NewGamePlayer( ...@@ -106,7 +106,7 @@ func NewGamePlayer(
} }
return &GamePlayer{ return &GamePlayer{
act: NewAgent(m, addr, loader, int(gameDepth), provider, responder, updater, cfg.AgreeWithProposedOutput, logger).Act, act: NewAgent(m, loader, int(gameDepth), provider, responder, updater, cfg.AgreeWithProposedOutput, logger).Act,
agreeWithProposedOutput: cfg.AgreeWithProposedOutput, agreeWithProposedOutput: cfg.AgreeWithProposedOutput,
loader: loader, loader: loader,
logger: logger, logger: logger,
......
...@@ -25,8 +25,6 @@ type Metricer interface { ...@@ -25,8 +25,6 @@ type Metricer interface {
RecordGameMove() RecordGameMove()
RecordCannonExecutionTime(t float64) RecordCannonExecutionTime(t float64)
RecordGameClaimCount(addr string, count int)
RecordGamesStatus(inProgress, defenderWon, challengerWon int) RecordGamesStatus(inProgress, defenderWon, challengerWon int)
RecordGameUpdateScheduled() RecordGameUpdateScheduled()
...@@ -55,8 +53,6 @@ type Metrics struct { ...@@ -55,8 +53,6 @@ type Metrics struct {
cannonExecutionTime prometheus.Histogram cannonExecutionTime prometheus.Histogram
gameClaimCount prometheus.GaugeVec
trackedGames prometheus.GaugeVec trackedGames prometheus.GaugeVec
inflightGames prometheus.Gauge inflightGames prometheus.Gauge
} }
...@@ -111,13 +107,6 @@ func NewMetrics() *Metrics { ...@@ -111,13 +107,6 @@ func NewMetrics() *Metrics {
[]float64{1.0, 10.0}, []float64{1.0, 10.0},
prometheus.ExponentialBuckets(30.0, 2.0, 14)...), prometheus.ExponentialBuckets(30.0, 2.0, 14)...),
}), }),
gameClaimCount: *factory.NewGaugeVec(prometheus.GaugeOpts{
Namespace: Namespace,
Name: "game_claim_count",
Help: "Number of claims in the game",
}, []string{
"game_address",
}),
trackedGames: *factory.NewGaugeVec(prometheus.GaugeOpts{ trackedGames: *factory.NewGaugeVec(prometheus.GaugeOpts{
Namespace: Namespace, Namespace: Namespace,
Name: "tracked_games", Name: "tracked_games",
...@@ -190,10 +179,6 @@ func (m *Metrics) DecIdleExecutors() { ...@@ -190,10 +179,6 @@ func (m *Metrics) DecIdleExecutors() {
m.executors.WithLabelValues("idle").Dec() m.executors.WithLabelValues("idle").Dec()
} }
func (m *Metrics) RecordGameClaimCount(addr string, count int) {
m.gameClaimCount.With(prometheus.Labels{"game_address": addr}).Set(float64(count))
}
func (m *Metrics) RecordGamesStatus(inProgress, defenderWon, challengerWon int) { func (m *Metrics) RecordGamesStatus(inProgress, defenderWon, challengerWon int) {
m.trackedGames.WithLabelValues("in_progress").Set(float64(inProgress)) m.trackedGames.WithLabelValues("in_progress").Set(float64(inProgress))
m.trackedGames.WithLabelValues("defender_won").Set(float64(defenderWon)) m.trackedGames.WithLabelValues("defender_won").Set(float64(defenderWon))
......
...@@ -27,5 +27,3 @@ func (*NoopMetricsImpl) IncActiveExecutors() {} ...@@ -27,5 +27,3 @@ func (*NoopMetricsImpl) IncActiveExecutors() {}
func (*NoopMetricsImpl) DecActiveExecutors() {} func (*NoopMetricsImpl) DecActiveExecutors() {}
func (*NoopMetricsImpl) IncIdleExecutors() {} func (*NoopMetricsImpl) IncIdleExecutors() {}
func (*NoopMetricsImpl) DecIdleExecutors() {} func (*NoopMetricsImpl) DecIdleExecutors() {}
func (*NoopMetricsImpl) RecordGameClaimCount(addr string, count int) {}
...@@ -58,6 +58,7 @@ func MakeDeployParams(t require.TestingT, tp *TestParams) *DeployParams { ...@@ -58,6 +58,7 @@ func MakeDeployParams(t require.TestingT, tp *TestParams) *DeployParams {
deployConfig.ChannelTimeout = tp.ChannelTimeout deployConfig.ChannelTimeout = tp.ChannelTimeout
deployConfig.L1BlockTime = tp.L1BlockTime deployConfig.L1BlockTime = tp.L1BlockTime
deployConfig.L2GenesisRegolithTimeOffset = nil deployConfig.L2GenesisRegolithTimeOffset = nil
deployConfig.L2GenesisSpanBatchTimeOffset = SpanBatchTimeOffset()
require.NoError(t, deployConfig.Check()) require.NoError(t, deployConfig.Check())
require.Equal(t, addresses.Batcher, deployConfig.BatchSenderAddress) require.Equal(t, addresses.Batcher, deployConfig.BatchSenderAddress)
...@@ -156,6 +157,7 @@ func Setup(t require.TestingT, deployParams *DeployParams, alloc *AllocParams) * ...@@ -156,6 +157,7 @@ func Setup(t require.TestingT, deployParams *DeployParams, alloc *AllocParams) *
DepositContractAddress: deployConf.OptimismPortalProxy, DepositContractAddress: deployConf.OptimismPortalProxy,
L1SystemConfigAddress: deployConf.SystemConfigProxy, L1SystemConfigAddress: deployConf.SystemConfigProxy,
RegolithTime: deployConf.RegolithTime(uint64(deployConf.L1GenesisBlockTimestamp)), RegolithTime: deployConf.RegolithTime(uint64(deployConf.L1GenesisBlockTimestamp)),
SpanBatchTime: deployConf.SpanBatchTime(uint64(deployConf.L1GenesisBlockTimestamp)),
} }
require.NoError(t, rollupCfg.Check()) require.NoError(t, rollupCfg.Check())
...@@ -181,3 +183,11 @@ func SystemConfigFromDeployConfig(deployConfig *genesis.DeployConfig) eth.System ...@@ -181,3 +183,11 @@ func SystemConfigFromDeployConfig(deployConfig *genesis.DeployConfig) eth.System
GasLimit: uint64(deployConfig.L2GenesisBlockGasLimit), GasLimit: uint64(deployConfig.L2GenesisBlockGasLimit),
} }
} }
func SpanBatchTimeOffset() *hexutil.Uint64 {
if os.Getenv("OP_E2E_USE_SPAN_BATCH") == "true" {
offset := hexutil.Uint64(0)
return &offset
}
return nil
}
...@@ -85,6 +85,7 @@ func DefaultSystemConfig(t *testing.T) SystemConfig { ...@@ -85,6 +85,7 @@ func DefaultSystemConfig(t *testing.T) SystemConfig {
require.NoError(t, err) require.NoError(t, err)
deployConfig := config.DeployConfig.Copy() deployConfig := config.DeployConfig.Copy()
deployConfig.L1GenesisBlockTimestamp = hexutil.Uint64(time.Now().Unix()) deployConfig.L1GenesisBlockTimestamp = hexutil.Uint64(time.Now().Unix())
deployConfig.L2GenesisSpanBatchTimeOffset = e2eutils.SpanBatchTimeOffset()
require.NoError(t, deployConfig.Check(), "Deploy config is invalid, do you need to run make devnet-allocs?") require.NoError(t, deployConfig.Check(), "Deploy config is invalid, do you need to run make devnet-allocs?")
l1Deployments := config.L1Deployments.Copy() l1Deployments := config.L1Deployments.Copy()
require.NoError(t, l1Deployments.Check()) require.NoError(t, l1Deployments.Check())
...@@ -418,6 +419,7 @@ func (cfg SystemConfig) Start(t *testing.T, _opts ...SystemConfigOption) (*Syste ...@@ -418,6 +419,7 @@ func (cfg SystemConfig) Start(t *testing.T, _opts ...SystemConfigOption) (*Syste
DepositContractAddress: cfg.DeployConfig.OptimismPortalProxy, DepositContractAddress: cfg.DeployConfig.OptimismPortalProxy,
L1SystemConfigAddress: cfg.DeployConfig.SystemConfigProxy, L1SystemConfigAddress: cfg.DeployConfig.SystemConfigProxy,
RegolithTime: cfg.DeployConfig.RegolithTime(uint64(cfg.DeployConfig.L1GenesisBlockTimestamp)), RegolithTime: cfg.DeployConfig.RegolithTime(uint64(cfg.DeployConfig.L1GenesisBlockTimestamp)),
SpanBatchTime: cfg.DeployConfig.SpanBatchTime(uint64(cfg.DeployConfig.L1GenesisBlockTimestamp)),
ProtocolVersionsAddress: cfg.L1Deployments.ProtocolVersionsProxy, ProtocolVersionsAddress: cfg.L1Deployments.ProtocolVersionsProxy,
} }
} }
......
...@@ -79,6 +79,8 @@ type Config struct { ...@@ -79,6 +79,8 @@ type Config struct {
// Active if CanyonTime != nil && L2 block timestamp >= *CanyonTime, inactive otherwise. // Active if CanyonTime != nil && L2 block timestamp >= *CanyonTime, inactive otherwise.
CanyonTime *uint64 `json:"canyon_time,omitempty"` CanyonTime *uint64 `json:"canyon_time,omitempty"`
SpanBatchTime *uint64 `json:"span_batch_time,omitempty"`
// Note: below addresses are part of the block-derivation process, // Note: below addresses are part of the block-derivation process,
// and required to be the same network-wide to stay in consensus. // and required to be the same network-wide to stay in consensus.
...@@ -268,6 +270,10 @@ func (c *Config) IsCanyon(timestamp uint64) bool { ...@@ -268,6 +270,10 @@ func (c *Config) IsCanyon(timestamp uint64) bool {
return c.CanyonTime != nil && timestamp >= *c.CanyonTime return c.CanyonTime != nil && timestamp >= *c.CanyonTime
} }
func (c *Config) IsSpanBatch(timestamp uint64) bool {
return c.SpanBatchTime != nil && timestamp >= *c.SpanBatchTime
}
// Description outputs a banner describing the important parts of rollup configuration in a human-readable form. // Description outputs a banner describing the important parts of rollup configuration in a human-readable form.
// Optionally provide a mapping of L2 chain IDs to network names to label the L2 chain with if not unknown. // Optionally provide a mapping of L2 chain IDs to network names to label the L2 chain with if not unknown.
// The config should be config.Check()-ed before creating a description. // The config should be config.Check()-ed before creating a description.
...@@ -296,6 +302,7 @@ func (c *Config) Description(l2Chains map[string]string) string { ...@@ -296,6 +302,7 @@ func (c *Config) Description(l2Chains map[string]string) string {
banner += "Post-Bedrock Network Upgrades (timestamp based):\n" banner += "Post-Bedrock Network Upgrades (timestamp based):\n"
banner += fmt.Sprintf(" - Regolith: %s\n", fmtForkTimeOrUnset(c.RegolithTime)) banner += fmt.Sprintf(" - Regolith: %s\n", fmtForkTimeOrUnset(c.RegolithTime))
banner += fmt.Sprintf(" - Canyon: %s\n", fmtForkTimeOrUnset(c.CanyonTime)) banner += fmt.Sprintf(" - Canyon: %s\n", fmtForkTimeOrUnset(c.CanyonTime))
banner += fmt.Sprintf(" - SpanBatch: %s\n", fmtForkTimeOrUnset(c.SpanBatchTime))
// Report the protocol version // Report the protocol version
banner += fmt.Sprintf("Node supports up to OP-Stack Protocol Version: %s\n", OPStackSupport) banner += fmt.Sprintf("Node supports up to OP-Stack Protocol Version: %s\n", OPStackSupport)
return banner return banner
...@@ -321,7 +328,9 @@ func (c *Config) LogDescription(log log.Logger, l2Chains map[string]string) { ...@@ -321,7 +328,9 @@ func (c *Config) LogDescription(log log.Logger, l2Chains map[string]string) {
"l1_network", networkL1, "l2_start_time", c.Genesis.L2Time, "l2_block_hash", c.Genesis.L2.Hash.String(), "l1_network", networkL1, "l2_start_time", c.Genesis.L2Time, "l2_block_hash", c.Genesis.L2.Hash.String(),
"l2_block_number", c.Genesis.L2.Number, "l1_block_hash", c.Genesis.L1.Hash.String(), "l2_block_number", c.Genesis.L2.Number, "l1_block_hash", c.Genesis.L1.Hash.String(),
"l1_block_number", c.Genesis.L1.Number, "regolith_time", fmtForkTimeOrUnset(c.RegolithTime), "l1_block_number", c.Genesis.L1.Number, "regolith_time", fmtForkTimeOrUnset(c.RegolithTime),
"canyon_time", fmtForkTimeOrUnset(c.CanyonTime)) "canyon_time", fmtForkTimeOrUnset(c.CanyonTime),
"span_batch_time", fmtForkTimeOrUnset(c.SpanBatchTime),
)
} }
func fmtForkTimeOrUnset(v *uint64) string { func fmtForkTimeOrUnset(v *uint64) string {
......
...@@ -43,6 +43,7 @@ ...@@ -43,6 +43,7 @@
"eip1559Elasticity": 6, "eip1559Elasticity": 6,
"l1GenesisBlockTimestamp": "0x64c811bf", "l1GenesisBlockTimestamp": "0x64c811bf",
"l2GenesisRegolithTimeOffset": "0x0", "l2GenesisRegolithTimeOffset": "0x0",
"l2GenesisSpanBatchTimeOffset": "0x0",
"faultGameAbsolutePrestate": "0x03c7ae758795765c6664a5d39bf63841c71ff191e9189522bad8ebff5d4eca98", "faultGameAbsolutePrestate": "0x03c7ae758795765c6664a5d39bf63841c71ff191e9189522bad8ebff5d4eca98",
"faultGameMaxDepth": 30, "faultGameMaxDepth": 30,
"faultGameMaxDuration": 1200, "faultGameMaxDuration": 1200,
......
...@@ -254,8 +254,15 @@ the potential opponent's chess clock has run out. Because each claim is the root ...@@ -254,8 +254,15 @@ the potential opponent's chess clock has run out. Because each claim is the root
truth percolates upwards towards the root claim by resolving each individual sub-game bottom-up. truth percolates upwards towards the root claim by resolving each individual sub-game bottom-up.
In a game like the one below, we can resolve up from the deepest subgames. Here, we'd resolve `b0` In a game like the one below, we can resolve up from the deepest subgames. Here, we'd resolve `b0`
to uncountered and `a0` to countered by walking up from their deepest children, and now that all children of the to uncountered and `a0` to countered by walking up from their deepest children, and once all children of the
root game are resolved, we can resolve the root to countered due to `b0` remaining uncountered. root game are recursively resolved, we can resolve the root to countered due to `b0` remaining uncountered.
<!-- https://gist.github.com/clabby/e98bdd80ef3c038424f3372b70e34e08 -->
<!-- markdownlint-disable no-inline-html -->
<https://github.com/ethereum-optimism/optimism/assets/8406232/d2b708a0-539e-439d-96bd-c2f66f3a45f8>
Another example is this game, which has a slightly different structure. Here, the root claim will also
be countered due to `b0` remaining uncountered.
<!-- <!--
digraph G { digraph G {
...@@ -275,7 +282,6 @@ digraph G { ...@@ -275,7 +282,6 @@ digraph G {
key:i1:e -> key2:i1:w [color=green] key:i1:e -> key2:i1:w [color=green]
key:i2:e -> key2:i2:w [color=coral1, style=dotted] key:i2:e -> key2:i2:w [color=coral1, style=dotted]
} }
subgraph cluster_0 { subgraph cluster_0 {
color=cornflowerblue; color=cornflowerblue;
node [style=filled]; node [style=filled];
...@@ -284,37 +290,28 @@ digraph G { ...@@ -284,37 +290,28 @@ digraph G {
subgraph cluster_0_0 { subgraph cluster_0_0 {
label = "subgame #5"; label = "subgame #5";
color=purple; color=purple;
a1 -> a2 [color=green]; a1 -> a2 [color=green];
a2 -> a1 [color=coral1, style=dotted]; a2 -> a1 [color=coral1, style=dotted];
subgraph cluster_0_1 { subgraph cluster_0_1 {
label = "subgame #6"; label = "subgame #6";
color=magenta; color=magenta;
a2 -> a3 [color=green]; a2 -> a3 [color=green];
a3 -> a2 [color=coral1, style=dotted]; a3 -> a2 [color=coral1, style=dotted];
a2 -> a4 [color=green]; a2 -> a4 [color=green];
a4 -> a2 [color=coral1, style=dotted]; a4 -> a2 [color=coral1, style=dotted];
subgraph cluster_0_2 { subgraph cluster_0_2 {
label = "subgame #7"; label = "subgame #7";
color=lightpink; color=lightpink;
a3 a3
} }
subgraph cluster_0_3 { subgraph cluster_0_3 {
label = "subgame #8"; label = "subgame #8";
color=lightpink; color=lightpink;
a4 -> a5 [color=green]; a4 -> a5 [color=green];
a5 -> a4 [color=coral1, style=dotted]; a5 -> a4 [color=coral1, style=dotted];
subgraph cluster_0_4 { subgraph cluster_0_4 {
label = "subgame #9"; label = "subgame #9";
color=palegreen; color=palegreen;
a5 a5
} }
} }
...@@ -322,40 +319,33 @@ digraph G { ...@@ -322,40 +319,33 @@ digraph G {
} }
label = "subgame #4"; label = "subgame #4";
} }
subgraph cluster_1 { subgraph cluster_1 {
node [style=filled]; node [style=filled];
label = "subgame #1"; label = "subgame #1";
color=cornflowerblue color=cornflowerblue
b0 -> b1 [color=green]; b0 -> b1 [color=green];
b1 -> b0 [color=coral1, style=dotted]; b1 -> b0 [color=coral1, style=dotted];
subgraph cluster_1_0 { subgraph cluster_1_0 {
label = "subgame #2"; label = "subgame #2";
color=purple; color=purple;
b1 -> b2 [color=green]; b1 -> b2 [color=green];
b2 -> b1 [color=coral1, style=dotted]; b2 -> b1 [color=coral1, style=dotted];
subgraph cluster_1_1 { subgraph cluster_1_1 {
label = "subgame #3"; label = "subgame #3";
edge [style=invis] edge [style=invis]
color=magenta; color=magenta;
b2 b2
} }
} }
} }
Root -> a0 [color=green]; Root -> a0 [color=green];
Root -> b0 [color=green]; Root -> b0 [color=green];
a0 -> Root [color=coral1, style=dotted]; a0 -> Root [color=coral1, style=dotted];
b0 -> Root [color=coral1, style=dotted]; b0 -> Root [color=coral1, style=dotted];
Root [shape=Mdiamond]; Root [shape=Mdiamond];
} }
--> -->
<!-- markdownlint-disable no-inline-html --> <!-- markdownlint-disable no-inline-html -->
<p align="center"> <p align="center">
<img src="https://github.com/ethereum-optimism/optimism/assets/8406232/9b20ba8d-0b64-47b3-9962-5533f7eb4ef7" width=60%> <img src="https://github.com/ethereum-optimism/optimism/assets/8406232/9b20ba8d-0b64-47b3-9962-5533f7eb4ef7" width=60%>
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment