Commit 6ebb3aeb authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge branch 'develop' into aj/gossip-float

parents 23e2219c f77a83b8
# indexer/cmd/indexer-refresh
Entrypoint for the new WIP indexer. After project is deployed and stable we will delete the [indexer/main.go](../indexer/main.go) file and move [indexer-refresh/main.go](./main.go) in it's place.
package main
import (
"fmt"
"os"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/urfave/cli"
"github.com/ethereum-optimism/optimism/indexer"
"github.com/ethereum-optimism/optimism/indexer/flags"
)
var (
GitVersion = ""
GitCommit = ""
GitDate = ""
)
func main() {
// Set up logger with a default INFO level in case we fail to parse flags.
// Otherwise the final crtiical log won't show what the parsing error was.
log.Root().SetHandler(
log.LvlFilterHandler(
log.LvlInfo,
log.StreamHandler(os.Stdout, log.TerminalFormat(true)),
),
)
app := cli.NewApp()
app.Flags = flags.Flags
app.Version = fmt.Sprintf("%s-%s", GitVersion, params.VersionWithCommit(GitCommit, GitDate))
app.Name = "indexer"
app.Usage = "Indexer Service"
app.Description = "Service for indexing deposits and withdrawals " +
"by account on L1 and L2"
app.Action = indexer.Main(GitVersion)
err := app.Run(os.Args)
if err != nil {
log.Crit("Application failed", "message", err)
}
}
...@@ -8,8 +8,8 @@ import ( ...@@ -8,8 +8,8 @@ import (
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/urfave/cli" "github.com/urfave/cli"
"github.com/ethereum-optimism/optimism/indexer"
"github.com/ethereum-optimism/optimism/indexer/flags" "github.com/ethereum-optimism/optimism/indexer/flags"
"github.com/ethereum-optimism/optimism/indexer/legacy"
) )
var ( var (
...@@ -36,7 +36,7 @@ func main() { ...@@ -36,7 +36,7 @@ func main() {
app.Description = "Service for indexing deposits and withdrawals " + app.Description = "Service for indexing deposits and withdrawals " +
"by account on L1 and L2" "by account on L1 and L2"
app.Action = indexer.Main(GitVersion) app.Action = legacy.Main(GitVersion)
err := app.Run(os.Args) err := app.Run(os.Args)
if err != nil { if err != nil {
log.Crit("Application failed", "message", err) log.Crit("Application failed", "message", err)
......
...@@ -38,13 +38,27 @@ services: ...@@ -38,13 +38,27 @@ services:
- INDEXER_REST_HOSTNAME=0.0.0.0 - INDEXER_REST_HOSTNAME=0.0.0.0
- INDEXER_REST_PORT=8080 - INDEXER_REST_PORT=8080
- INDEXER_BEDROCK_L1_STANDARD_BRIDGE=0 - INDEXER_BEDROCK_L1_STANDARD_BRIDGE=0
- INDEXER_BEDROCK_L1_STANDARD_BRIDGE=0x636Af16bf2f682dD3109e60102b8E1A089FedAa8 - INDEXER_BEDROCK_L1_STANDARD_BRIDGE=${INDEXER_BEDROCK_L1_STANDARD_BRIDGE:-0x636Af16bf2f682dD3109e60102b8E1A089FedAa8}
- INDEXER_BEDROCK_OPTIMISM_PORTAL=0xB7040fd32359688346A3D1395a42114cf8E3b9b2 - INDEXER_BEDROCK_OPTIMISM_PORTAL=${INDEXER_BEDROCK_OPTIMISM_PORTAL:-0xB7040fd32359688346A3D1395a42114cf8E3b9b2}
- INDEXER_L1_ADDRESS_MANAGER_ADDRESS=${INDEXER_L1_ADDRESS_MANAGER_ADDRESS:-0xdE1FCfB0851916CA5101820A69b13a4E276bd81F}
ports: ports:
- 8080:8080 - 8080:8080
depends_on: depends_on:
postgres: postgres:
condition: service_healthy condition: service_healthy
ui:
build:
context: ..
dockerfile: indexer/ui/Dockerfile
environment:
- DATABASE_URL=${DATABASE_URL:-postgresql://db_username:db_password@postgres:5432/db_name}
ports:
- 5555:5555
healthcheck:
test: wget localhost:5555 -q -O - > /dev/null 2>&1
depends_on:
postgres:
condition: service_healthy
volumes: volumes:
postgres_data: postgres_data:
...@@ -2,3 +2,5 @@ ...@@ -2,3 +2,5 @@
INDEXER_L1_ETH_RPC=FILL_ME_IN INDEXER_L1_ETH_RPC=FILL_ME_IN
INDEXER_L2_ETH_RPC=FILL_ME_IN INDEXER_L2_ETH_RPC=FILL_ME_IN
# Fill in to use prisma studio ui with a db other than the default
# DATABASE_URL=FILL_ME_IN
...@@ -2,28 +2,12 @@ package indexer ...@@ -2,28 +2,12 @@ package indexer
import ( import (
"context" "context"
"fmt"
"math/big"
"net"
"net/http"
"os" "os"
"strconv"
"time" "time"
"github.com/ethereum-optimism/optimism/indexer/services"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum-optimism/optimism/indexer/metrics"
"github.com/ethereum-optimism/optimism/indexer/server"
"github.com/rs/cors"
database "github.com/ethereum-optimism/optimism/indexer/db"
"github.com/ethereum-optimism/optimism/indexer/services/l1"
"github.com/ethereum-optimism/optimism/indexer/services/l2"
"github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
"github.com/gorilla/mux"
"github.com/urfave/cli" "github.com/urfave/cli"
) )
...@@ -39,30 +23,7 @@ const ( ...@@ -39,30 +23,7 @@ const (
// e.g. GitVersion, to be captured and used once the function is executed. // e.g. GitVersion, to be captured and used once the function is executed.
func Main(gitVersion string) func(ctx *cli.Context) error { func Main(gitVersion string) func(ctx *cli.Context) error {
return func(ctx *cli.Context) error { return func(ctx *cli.Context) error {
cfg, err := NewConfig(ctx)
if err != nil {
return err
}
log.Info("Initializing indexer") log.Info("Initializing indexer")
indexer, err := NewIndexer(cfg)
if err != nil {
log.Error("Unable to create indexer", "error", err)
return err
}
log.Info("Starting indexer")
if err := indexer.Start(); err != nil {
return err
}
defer indexer.Stop()
log.Info("Indexer started")
<-(chan struct{})(nil)
return nil return nil
} }
} }
...@@ -70,35 +31,23 @@ func Main(gitVersion string) func(ctx *cli.Context) error { ...@@ -70,35 +31,23 @@ func Main(gitVersion string) func(ctx *cli.Context) error {
// Indexer is a service that configures the necessary resources for // Indexer is a service that configures the necessary resources for
// running the Sync and BlockHandler sub-services. // running the Sync and BlockHandler sub-services.
type Indexer struct { type Indexer struct {
ctx context.Context
cfg Config
l1Client *ethclient.Client l1Client *ethclient.Client
l2Client *ethclient.Client l2Client *ethclient.Client
l1IndexingService *l1.Service
l2IndexingService *l2.Service
airdropService *services.Airdrop
router *mux.Router
metrics *metrics.Metrics
db *database.Database
server *http.Server
} }
// NewIndexer initializes the Indexer, gathering any resources // NewIndexer initializes the Indexer, gathering any resources
// that will be needed by the TxIndexer and StateIndexer // that will be needed by the TxIndexer and StateIndexer
// sub-services. // sub-services.
func NewIndexer(cfg Config) (*Indexer, error) { func NewIndexer() (*Indexer, error) {
ctx := context.Background() ctx := context.Background()
var logHandler log.Handler var logHandler log.Handler = log.StreamHandler(os.Stdout, log.TerminalFormat(true))
if cfg.LogTerminal { // TODO https://linear.app/optimism/issue/DX-55/api-implement-rest-api-with-mocked-data
logHandler = log.StreamHandler(os.Stdout, log.TerminalFormat(true)) // do json format too
} else { // TODO https://linear.app/optimism/issue/DX-55/api-implement-rest-api-with-mocked-data
logHandler = log.StreamHandler(os.Stdout, log.JSONFormat()) // pass in loglevel from config
} // logHandler = log.StreamHandler(os.Stdout, log.JSONFormat())
logLevel, err := log.LvlFromString("info")
logLevel, err := log.LvlFromString(cfg.LogLevel)
if err != nil { if err != nil {
return nil, err return nil, err
} }
...@@ -107,182 +56,43 @@ func NewIndexer(cfg Config) (*Indexer, error) { ...@@ -107,182 +56,43 @@ func NewIndexer(cfg Config) (*Indexer, error) {
// Connect to L1 and L2 providers. Perform these last since they are the // Connect to L1 and L2 providers. Perform these last since they are the
// most expensive. // most expensive.
l1Client, rawl1Client, err := dialEthClientWithTimeout(ctx, cfg.L1EthRpc) // TODO https://linear.app/optimism/issue/DX-55/api-implement-rest-api-with-mocked-data
if err != nil { // pass in rpc url from config
return nil, err l1Client, _, err := dialEthClientWithTimeout(ctx, "http://localhost:8545")
}
l2Client, l2RPC, err := dialEthClientWithTimeout(ctx, cfg.L2EthRpc)
if err != nil {
return nil, err
}
m := metrics.NewMetrics(nil)
if cfg.MetricsServerEnable {
go func() {
_, err := m.Serve(cfg.MetricsHostname, cfg.MetricsPort)
if err != nil {
log.Error("metrics server failed to start", "err", err)
}
}()
log.Info("metrics server enabled", "host", cfg.MetricsHostname, "port", cfg.MetricsPort)
}
dsn := fmt.Sprintf("host=%s port=%d dbname=%s sslmode=disable",
cfg.DBHost, cfg.DBPort, cfg.DBName)
if cfg.DBUser != "" {
dsn += fmt.Sprintf(" user=%s", cfg.DBUser)
}
if cfg.DBPassword != "" {
dsn += fmt.Sprintf(" password=%s", cfg.DBPassword)
}
db, err := database.NewDatabase(dsn)
if err != nil {
return nil, err
}
var addrManager services.AddressManager
if cfg.Bedrock {
addrManager, err = services.NewBedrockAddresses(
l1Client,
cfg.BedrockL1StandardBridgeAddress,
cfg.BedrockOptimismPortalAddress,
)
} else {
addrManager, err = services.NewLegacyAddresses(l1Client, common.HexToAddress(cfg.L1AddressManagerAddress))
}
if err != nil { if err != nil {
return nil, err return nil, err
} }
l1IndexingService, err := l1.NewService(l1.ServiceConfig{ // TODO https://linear.app/optimism/issue/DX-55/api-implement-rest-api-with-mocked-data
Context: ctx, // pass in rpc url from config
Metrics: m, l2Client, _, err := dialEthClientWithTimeout(ctx, "http://localhost:9545")
L1Client: l1Client,
RawL1Client: rawl1Client,
ChainID: new(big.Int).SetUint64(cfg.ChainID),
AddressManager: addrManager,
DB: db,
ConfDepth: cfg.L1ConfDepth,
MaxHeaderBatchSize: cfg.MaxHeaderBatchSize,
StartBlockNumber: cfg.L1StartBlockNumber,
Bedrock: cfg.Bedrock,
})
if err != nil { if err != nil {
return nil, err return nil, err
} }
l2IndexingService, err := l2.NewService(l2.ServiceConfig{
Context: ctx,
Metrics: m,
L2RPC: l2RPC,
L2Client: l2Client,
DB: db,
ConfDepth: cfg.L2ConfDepth,
MaxHeaderBatchSize: cfg.MaxHeaderBatchSize,
StartBlockNumber: uint64(0),
Bedrock: cfg.Bedrock,
})
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &Indexer{ return &Indexer{
ctx: ctx, l1Client: l1Client,
cfg: cfg, l2Client: l2Client,
l1Client: l1Client,
l2Client: l2Client,
l1IndexingService: l1IndexingService,
l2IndexingService: l2IndexingService,
airdropService: services.NewAirdrop(db, m),
router: mux.NewRouter(),
metrics: m,
db: db,
}, nil }, nil
} }
// Serve spins up a REST API server at the given hostname and port. // Serve spins up a REST API server at the given hostname and port.
func (b *Indexer) Serve() error { func (b *Indexer) Serve() error {
c := cors.New(cors.Options{ return nil
AllowedOrigins: []string{"*"},
})
b.router.HandleFunc("/v1/l1/status", b.l1IndexingService.GetIndexerStatus).Methods("GET")
b.router.HandleFunc("/v1/l2/status", b.l2IndexingService.GetIndexerStatus).Methods("GET")
b.router.HandleFunc("/v1/deposits/0x{address:[a-fA-F0-9]{40}}", b.l1IndexingService.GetDeposits).Methods("GET")
b.router.HandleFunc("/v1/withdrawal/0x{hash:[a-fA-F0-9]{64}}", b.l2IndexingService.GetWithdrawalBatch).Methods("GET")
b.router.HandleFunc("/v1/withdrawals/0x{address:[a-fA-F0-9]{40}}", b.l2IndexingService.GetWithdrawals).Methods("GET")
b.router.HandleFunc("/v1/airdrops/0x{address:[a-fA-F0-9]{40}}", b.airdropService.GetAirdrop)
b.router.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(200)
_, err := w.Write([]byte("OK"))
if err != nil {
log.Error("Error handling /healthz", "error", err)
}
})
middleware := server.LoggingMiddleware(b.metrics, log.New("service", "server"))
port := strconv.FormatUint(b.cfg.RESTPort, 10)
addr := net.JoinHostPort(b.cfg.RESTHostname, port)
b.server = &http.Server{
Addr: addr,
Handler: middleware(c.Handler(b.router)),
}
errCh := make(chan error, 1)
go func() {
errCh <- b.server.ListenAndServe()
}()
// Capture server startup errors
<-time.After(10 * time.Millisecond)
select {
case err := <-errCh:
return err
default:
log.Info("indexer REST server listening on", "addr", addr)
return nil
}
} }
// Start starts the starts the indexing service on L1 and L2 chains and also // Start starts the starts the indexing service on L1 and L2 chains and also
// starts the REST server. // starts the REST server.
func (b *Indexer) Start() error { func (b *Indexer) Start() error {
if b.cfg.DisableIndexer { return nil
log.Info("indexer disabled, only serving data")
} else {
err := b.l1IndexingService.Start()
if err != nil {
return err
}
err = b.l2IndexingService.Start()
if err != nil {
return err
}
}
return b.Serve()
} }
// Stop stops the indexing service on L1 and L2 chains. // Stop stops the indexing service on L1 and L2 chains.
func (b *Indexer) Stop() { func (b *Indexer) Stop() {
b.db.Close()
if b.server != nil {
// background context here so it waits for
// conns to close
_ = b.server.Shutdown(context.Background())
}
if !b.cfg.DisableIndexer {
b.l1IndexingService.Stop()
b.l2IndexingService.Stop()
}
} }
// dialL1EthClientWithTimeout attempts to dial the L1 provider using the // dialL1EthClientWithTimeout attempts to dial the L1 provider using the
......
...@@ -20,8 +20,8 @@ import ( ...@@ -20,8 +20,8 @@ import (
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/ethereum-optimism/optimism/indexer"
"github.com/ethereum-optimism/optimism/indexer/db" "github.com/ethereum-optimism/optimism/indexer/db"
"github.com/ethereum-optimism/optimism/indexer/legacy"
"github.com/ethereum-optimism/optimism/indexer/services/l1" "github.com/ethereum-optimism/optimism/indexer/services/l1"
"github.com/ethereum-optimism/optimism/op-bindings/bindings" "github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys" "github.com/ethereum-optimism/optimism/op-bindings/predeploys"
...@@ -60,7 +60,7 @@ func TestBedrockIndexer(t *testing.T) { ...@@ -60,7 +60,7 @@ func TestBedrockIndexer(t *testing.T) {
l2Opts, err := bind.NewKeyedTransactorWithChainID(cfg.Secrets.Alice, cfg.L2ChainIDBig()) l2Opts, err := bind.NewKeyedTransactorWithChainID(cfg.Secrets.Alice, cfg.L2ChainIDBig())
require.NoError(t, err) require.NoError(t, err)
idxrCfg := indexer.Config{ idxrCfg := legacy.Config{
ChainID: cfg.DeployConfig.L1ChainID, ChainID: cfg.DeployConfig.L1ChainID,
L1EthRpc: sys.Nodes["l1"].HTTPEndpoint(), L1EthRpc: sys.Nodes["l1"].HTTPEndpoint(),
L2EthRpc: sys.Nodes["sequencer"].HTTPEndpoint(), L2EthRpc: sys.Nodes["sequencer"].HTTPEndpoint(),
...@@ -83,7 +83,7 @@ func TestBedrockIndexer(t *testing.T) { ...@@ -83,7 +83,7 @@ func TestBedrockIndexer(t *testing.T) {
BedrockL1StandardBridgeAddress: cfg.DeployConfig.L1StandardBridgeProxy, BedrockL1StandardBridgeAddress: cfg.DeployConfig.L1StandardBridgeProxy,
BedrockOptimismPortalAddress: cfg.DeployConfig.OptimismPortalProxy, BedrockOptimismPortalAddress: cfg.DeployConfig.OptimismPortalProxy,
} }
idxr, err := indexer.NewIndexer(idxrCfg) idxr, err := legacy.NewIndexer(idxrCfg)
require.NoError(t, err) require.NoError(t, err)
errCh := make(chan error, 1) errCh := make(chan error, 1)
......
package indexer package legacy
import ( import (
"errors" "errors"
......
package indexer_test package legacy_test
import ( import (
"fmt" "fmt"
"testing" "testing"
indexer "github.com/ethereum-optimism/optimism/indexer" legacy "github.com/ethereum-optimism/optimism/indexer/legacy"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
var validateConfigTests = []struct { var validateConfigTests = []struct {
name string name string
cfg indexer.Config cfg legacy.Config
expErr error expErr error
}{ }{
{ {
name: "bad log level", name: "bad log level",
cfg: indexer.Config{ cfg: legacy.Config{
LogLevel: "unknown", LogLevel: "unknown",
}, },
expErr: fmt.Errorf("unknown level: unknown"), expErr: fmt.Errorf("unknown level: unknown"),
...@@ -27,7 +27,7 @@ var validateConfigTests = []struct { ...@@ -27,7 +27,7 @@ var validateConfigTests = []struct {
func TestValidateConfig(t *testing.T) { func TestValidateConfig(t *testing.T) {
for _, test := range validateConfigTests { for _, test := range validateConfigTests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
err := indexer.ValidateConfig(&test.cfg) err := legacy.ValidateConfig(&test.cfg)
require.Equal(t, err, test.expErr) require.Equal(t, err, test.expErr)
}) })
} }
......
package legacy
import (
"context"
"fmt"
"math/big"
"net"
"net/http"
"os"
"strconv"
"time"
"github.com/ethereum-optimism/optimism/indexer/services"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum-optimism/optimism/indexer/metrics"
"github.com/ethereum-optimism/optimism/indexer/server"
"github.com/rs/cors"
database "github.com/ethereum-optimism/optimism/indexer/db"
"github.com/ethereum-optimism/optimism/indexer/services/l1"
"github.com/ethereum-optimism/optimism/indexer/services/l2"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rpc"
"github.com/gorilla/mux"
"github.com/urfave/cli"
)
const (
// defaultDialTimeout is default duration the service will wait on
// startup to make a connection to either the L1 or L2 backends.
defaultDialTimeout = 5 * time.Second
)
// Main is the entrypoint into the indexer service. This method returns
// a closure that executes the service and blocks until the service exits. The
// use of a closure allows the parameters bound to the top-level main package,
// e.g. GitVersion, to be captured and used once the function is executed.
func Main(gitVersion string) func(ctx *cli.Context) error {
return func(ctx *cli.Context) error {
cfg, err := NewConfig(ctx)
if err != nil {
return err
}
log.Info("Initializing indexer")
indexer, err := NewIndexer(cfg)
if err != nil {
log.Error("Unable to create indexer", "error", err)
return err
}
log.Info("Starting indexer")
if err := indexer.Start(); err != nil {
return err
}
defer indexer.Stop()
log.Info("Indexer started")
<-(chan struct{})(nil)
return nil
}
}
// Indexer is a service that configures the necessary resources for
// running the Sync and BlockHandler sub-services.
type Indexer struct {
ctx context.Context
cfg Config
l1Client *ethclient.Client
l2Client *ethclient.Client
l1IndexingService *l1.Service
l2IndexingService *l2.Service
airdropService *services.Airdrop
router *mux.Router
metrics *metrics.Metrics
db *database.Database
server *http.Server
}
// NewIndexer initializes the Indexer, gathering any resources
// that will be needed by the TxIndexer and StateIndexer
// sub-services.
func NewIndexer(cfg Config) (*Indexer, error) {
ctx := context.Background()
var logHandler log.Handler
if cfg.LogTerminal {
logHandler = log.StreamHandler(os.Stdout, log.TerminalFormat(true))
} else {
logHandler = log.StreamHandler(os.Stdout, log.JSONFormat())
}
logLevel, err := log.LvlFromString(cfg.LogLevel)
if err != nil {
return nil, err
}
log.Root().SetHandler(log.LvlFilterHandler(logLevel, logHandler))
// Connect to L1 and L2 providers. Perform these last since they are the
// most expensive.
l1Client, rawl1Client, err := dialEthClientWithTimeout(ctx, cfg.L1EthRpc)
if err != nil {
return nil, err
}
l2Client, l2RPC, err := dialEthClientWithTimeout(ctx, cfg.L2EthRpc)
if err != nil {
return nil, err
}
m := metrics.NewMetrics(nil)
if cfg.MetricsServerEnable {
go func() {
_, err := m.Serve(cfg.MetricsHostname, cfg.MetricsPort)
if err != nil {
log.Error("metrics server failed to start", "err", err)
}
}()
log.Info("metrics server enabled", "host", cfg.MetricsHostname, "port", cfg.MetricsPort)
}
dsn := fmt.Sprintf("host=%s port=%d dbname=%s sslmode=disable",
cfg.DBHost, cfg.DBPort, cfg.DBName)
if cfg.DBUser != "" {
dsn += fmt.Sprintf(" user=%s", cfg.DBUser)
}
if cfg.DBPassword != "" {
dsn += fmt.Sprintf(" password=%s", cfg.DBPassword)
}
db, err := database.NewDatabase(dsn)
if err != nil {
return nil, err
}
var addrManager services.AddressManager
if cfg.Bedrock {
addrManager, err = services.NewBedrockAddresses(
l1Client,
cfg.BedrockL1StandardBridgeAddress,
cfg.BedrockOptimismPortalAddress,
)
} else {
addrManager, err = services.NewLegacyAddresses(l1Client, common.HexToAddress(cfg.L1AddressManagerAddress))
}
if err != nil {
return nil, err
}
l1IndexingService, err := l1.NewService(l1.ServiceConfig{
Context: ctx,
Metrics: m,
L1Client: l1Client,
RawL1Client: rawl1Client,
ChainID: new(big.Int).SetUint64(cfg.ChainID),
AddressManager: addrManager,
DB: db,
ConfDepth: cfg.L1ConfDepth,
MaxHeaderBatchSize: cfg.MaxHeaderBatchSize,
StartBlockNumber: cfg.L1StartBlockNumber,
Bedrock: cfg.Bedrock,
})
if err != nil {
return nil, err
}
l2IndexingService, err := l2.NewService(l2.ServiceConfig{
Context: ctx,
Metrics: m,
L2RPC: l2RPC,
L2Client: l2Client,
DB: db,
ConfDepth: cfg.L2ConfDepth,
MaxHeaderBatchSize: cfg.MaxHeaderBatchSize,
StartBlockNumber: uint64(0),
Bedrock: cfg.Bedrock,
})
if err != nil {
return nil, err
}
return &Indexer{
ctx: ctx,
cfg: cfg,
l1Client: l1Client,
l2Client: l2Client,
l1IndexingService: l1IndexingService,
l2IndexingService: l2IndexingService,
airdropService: services.NewAirdrop(db, m),
router: mux.NewRouter(),
metrics: m,
db: db,
}, nil
}
// Serve spins up a REST API server at the given hostname and port.
func (b *Indexer) Serve() error {
c := cors.New(cors.Options{
AllowedOrigins: []string{"*"},
})
b.router.HandleFunc("/v1/l1/status", b.l1IndexingService.GetIndexerStatus).Methods("GET")
b.router.HandleFunc("/v1/l2/status", b.l2IndexingService.GetIndexerStatus).Methods("GET")
b.router.HandleFunc("/v1/deposits/0x{address:[a-fA-F0-9]{40}}", b.l1IndexingService.GetDeposits).Methods("GET")
b.router.HandleFunc("/v1/withdrawal/0x{hash:[a-fA-F0-9]{64}}", b.l2IndexingService.GetWithdrawalBatch).Methods("GET")
b.router.HandleFunc("/v1/withdrawals/0x{address:[a-fA-F0-9]{40}}", b.l2IndexingService.GetWithdrawals).Methods("GET")
b.router.HandleFunc("/v1/airdrops/0x{address:[a-fA-F0-9]{40}}", b.airdropService.GetAirdrop)
b.router.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(200)
_, err := w.Write([]byte("OK"))
if err != nil {
log.Error("Error handling /healthz", "error", err)
}
})
middleware := server.LoggingMiddleware(b.metrics, log.New("service", "server"))
port := strconv.FormatUint(b.cfg.RESTPort, 10)
addr := net.JoinHostPort(b.cfg.RESTHostname, port)
b.server = &http.Server{
Addr: addr,
Handler: middleware(c.Handler(b.router)),
}
errCh := make(chan error, 1)
go func() {
errCh <- b.server.ListenAndServe()
}()
// Capture server startup errors
<-time.After(10 * time.Millisecond)
select {
case err := <-errCh:
return err
default:
log.Info("indexer REST server listening on", "addr", addr)
return nil
}
}
// Start starts the starts the indexing service on L1 and L2 chains and also
// starts the REST server.
func (b *Indexer) Start() error {
if b.cfg.DisableIndexer {
log.Info("indexer disabled, only serving data")
} else {
err := b.l1IndexingService.Start()
if err != nil {
return err
}
err = b.l2IndexingService.Start()
if err != nil {
return err
}
}
return b.Serve()
}
// Stop stops the indexing service on L1 and L2 chains.
func (b *Indexer) Stop() {
b.db.Close()
if b.server != nil {
// background context here so it waits for
// conns to close
_ = b.server.Shutdown(context.Background())
}
if !b.cfg.DisableIndexer {
b.l1IndexingService.Stop()
b.l2IndexingService.Stop()
}
}
// dialL1EthClientWithTimeout attempts to dial the L1 provider using the
// provided URL. If the dial doesn't complete within defaultDialTimeout seconds,
// this method will return an error.
func dialEthClientWithTimeout(ctx context.Context, url string) (
*ethclient.Client, *rpc.Client, error) {
ctxt, cancel := context.WithTimeout(ctx, defaultDialTimeout)
defer cancel()
c, err := rpc.DialContext(ctxt, url)
if err != nil {
return nil, nil, err
}
return ethclient.NewClient(c), c, nil
}
FROM node:18.16.0-bullseye-slim
WORKDIR /app
RUN echo {} > package.json && \
npm install prisma
COPY indexer/ui/schema.prisma schema.prisma
RUN npx prisma generate --schema schema.prisma
CMD ["npx", "prisma", "studio", "--port", "5555", "--hostname", "0.0.0.0", "--schema", "schema.prisma" ]
## @eth-optimism/indexer-ui
A simple UI for exploring the indexer DB using [Prisma studio](https://www.prisma.io)
## Usage
Included in the docker-compose file as `ui` service
```bash
docker-compose up
```
Prisma can be viewed at [localhost:5555](http://localhost:5555)
## Update the schema
The [prisma schema](https://www.prisma.io/docs/reference/api-reference/prisma-schema-reference) is what allows prisma to work. It is automatically generated from the db schema.
To update the schema to the latest db schema simply pass in the database url to [prisma pull](https://www.prisma.io/docs/reference/api-reference/command-reference#db-pull). Prisma pull will introspect the schema and generate a prisma schema
```bash
DATABASE_URL=postgresql://db_username:db_password@postgres:5432/db_name npx prisma db pull
```
## Other functionality
We mostly just use prisma as a UI. But brisma provides much other functionality that can be useful including.
- Ability to change the [db schema](https://www.prisma.io/docs/reference/api-reference/command-reference#db-push) direction from modifying the [schema.prisma](./schema.prisma) in place. This can be a fast way to [start prototyping](https://www.prisma.io/docs/guides/migrate/prototyping-schema-db-push)
- Ability to [seed the database](https://www.prisma.io/docs/guides/migrate/seed-database)
- Ability to write quick scripts with [prisma client](https://www.prisma.io/docs/reference/api-reference/prisma-client-reference)
## Running prisma studio outside of docker
Prisma can also be run with [npx](https://docs.npmjs.com/cli/v8/commands/npx)
```bash
npx prisma studio --schema indexer/ui/schema.prisma
```
generator client {
provider = "prisma-client-js"
}
datasource db {
provider = "postgresql"
url = env("DATABASE_URL")
}
model airdrops {
address String @id @db.VarChar(42)
voter_amount String @default("0") @db.VarChar
multisig_signer_amount String @default("0") @db.VarChar
gitcoin_amount String @default("0") @db.VarChar
active_bridged_amount String @default("0") @db.VarChar
op_user_amount String @default("0") @db.VarChar
op_repeat_user_amount String @default("0") @db.VarChar
op_og_amount String @default("0") @db.VarChar
bonus_amount String @default("0") @db.VarChar
total_amount String @db.VarChar
}
model deposits {
guid String @id @db.VarChar
from_address String @db.VarChar
to_address String @db.VarChar
l1_token String @db.VarChar
l2_token String @db.VarChar
amount String @db.VarChar
data Bytes
log_index Int
block_hash String @db.VarChar
tx_hash String @db.VarChar
l1_blocks l1_blocks @relation(fields: [block_hash], references: [hash], onDelete: NoAction, onUpdate: NoAction)
l1_tokens l1_tokens @relation(fields: [l1_token], references: [address], onDelete: NoAction, onUpdate: NoAction)
}
model l1_blocks {
hash String @id @db.VarChar
parent_hash String @db.VarChar
number Int @unique(map: "l1_blocks_number")
timestamp Int
deposits deposits[]
state_batches state_batches[]
}
model l1_tokens {
address String @id @db.VarChar
name String @db.VarChar
symbol String @db.VarChar
decimals Int
deposits deposits[]
}
model l2_blocks {
hash String @id @db.VarChar
parent_hash String @db.VarChar
number Int @unique(map: "l2_blocks_number")
timestamp Int
withdrawals withdrawals[]
}
model l2_tokens {
address String @id
name String
symbol String
decimals Int
withdrawals withdrawals[]
}
model state_batches {
index Int @id
root String @db.VarChar
size Int
prev_total Int
extra_data Bytes
block_hash String @db.VarChar
l1_blocks l1_blocks @relation(fields: [block_hash], references: [hash], onDelete: NoAction, onUpdate: NoAction)
withdrawals withdrawals[]
@@index([block_hash], map: "state_batches_block_hash")
@@index([prev_total], map: "state_batches_prev_total")
@@index([size], map: "state_batches_size")
}
model withdrawals {
guid String @id @db.VarChar
from_address String @db.VarChar
to_address String @db.VarChar
l1_token String @db.VarChar
l2_token String @db.VarChar
amount String @db.VarChar
data Bytes
log_index Int
block_hash String @db.VarChar
tx_hash String @db.VarChar
state_batch Int?
br_withdrawal_hash String? @db.VarChar
br_withdrawal_proven_tx_hash String? @db.VarChar
br_withdrawal_proven_log_index Int?
br_withdrawal_finalized_tx_hash String? @db.VarChar
br_withdrawal_finalized_log_index Int?
br_withdrawal_finalized_success Boolean?
l2_blocks l2_blocks @relation(map: "l2", fields: [block_hash], references: [hash], onDelete: NoAction, onUpdate: NoAction)
l2_tokens l2_tokens @relation(fields: [l2_token], references: [address], onDelete: NoAction, onUpdate: NoAction)
state_batches state_batches? @relation(fields: [state_batch], references: [index], onDelete: NoAction, onUpdate: NoAction)
@@index([br_withdrawal_hash], map: "withdrawals_br_withdrawal_hash")
}
...@@ -4,9 +4,6 @@ import ( ...@@ -4,9 +4,6 @@ import (
"context" "context"
"fmt" "fmt"
_ "net/http/pprof" _ "net/http/pprof"
"os"
"os/signal"
"syscall"
gethrpc "github.com/ethereum/go-ethereum/rpc" gethrpc "github.com/ethereum/go-ethereum/rpc"
"github.com/urfave/cli" "github.com/urfave/cli"
...@@ -16,6 +13,7 @@ import ( ...@@ -16,6 +13,7 @@ import (
"github.com/ethereum-optimism/optimism/op-batcher/rpc" "github.com/ethereum-optimism/optimism/op-batcher/rpc"
opservice "github.com/ethereum-optimism/optimism/op-service" opservice "github.com/ethereum-optimism/optimism/op-service"
oplog "github.com/ethereum-optimism/optimism/op-service/log" oplog "github.com/ethereum-optimism/optimism/op-service/log"
"github.com/ethereum-optimism/optimism/op-service/opio"
oppprof "github.com/ethereum-optimism/optimism/op-service/pprof" oppprof "github.com/ethereum-optimism/optimism/op-service/pprof"
oprpc "github.com/ethereum-optimism/optimism/op-service/rpc" oprpc "github.com/ethereum-optimism/optimism/op-service/rpc"
) )
...@@ -98,14 +96,7 @@ func Main(version string, cliCtx *cli.Context) error { ...@@ -98,14 +96,7 @@ func Main(version string, cliCtx *cli.Context) error {
m.RecordInfo(version) m.RecordInfo(version)
m.RecordUp() m.RecordUp()
interruptChannel := make(chan os.Signal, 1) opio.BlockOnInterrupts()
signal.Notify(interruptChannel, []os.Signal{
os.Interrupt,
os.Kill,
syscall.SIGTERM,
syscall.SIGQUIT,
}...)
<-interruptChannel
if err := server.Stop(); err != nil { if err := server.Stop(); err != nil {
l.Error("Error shutting down http server: %w", err) l.Error("Error shutting down http server: %w", err)
} }
......
...@@ -30,7 +30,8 @@ bindings: l1block-bindings \ ...@@ -30,7 +30,8 @@ bindings: l1block-bindings \
optimism-mintable-erc721-factory-bindings \ optimism-mintable-erc721-factory-bindings \
l1-fee-vault-bindings \ l1-fee-vault-bindings \
basefee-vault-bindings \ basefee-vault-bindings \
legacy-erc20-eth-bindings legacy-erc20-eth-bindings \
dispute-game-factory-bindings
version: version:
forge --version forge --version
...@@ -122,6 +123,9 @@ deployer-whitelist-bindings: compile ...@@ -122,6 +123,9 @@ deployer-whitelist-bindings: compile
l1-blocknumber-bindings: compile l1-blocknumber-bindings: compile
./gen_bindings.sh contracts/legacy/L1BlockNumber.sol:L1BlockNumber $(pkg) ./gen_bindings.sh contracts/legacy/L1BlockNumber.sol:L1BlockNumber $(pkg)
dispute-game-factory-bindings: compile
./gen_bindings.sh contracts/dispute/DisputeGameFactory.sol:DisputeGameFactory $(pkg)
more: more:
go run ./gen/main.go \ go run ./gen/main.go \
-artifacts ../packages/contracts-bedrock/artifacts \ -artifacts ../packages/contracts-bedrock/artifacts \
......
This diff is collapsed.
This diff is collapsed.
...@@ -43,9 +43,9 @@ type Challenger struct { ...@@ -43,9 +43,9 @@ type Challenger struct {
l2ooABI *abi.ABI l2ooABI *abi.ABI
// dispute game factory contract // dispute game factory contract
// dgfContract *bindings.DisputeGameFactoryCaller dgfContract *bindings.DisputeGameFactoryCaller
dgfContractAddr common.Address dgfContractAddr common.Address
// dgfABI *abi.ABI dgfABI *abi.ABI
networkTimeout time.Duration networkTimeout time.Duration
} }
...@@ -79,6 +79,12 @@ func NewChallenger(cfg config.Config, l log.Logger, m metrics.Metricer) (*Challe ...@@ -79,6 +79,12 @@ func NewChallenger(cfg config.Config, l log.Logger, m metrics.Metricer) (*Challe
return nil, err return nil, err
} }
dgfContract, err := bindings.NewDisputeGameFactoryCaller(cfg.DGFAddress, l1Client)
if err != nil {
cancel()
return nil, err
}
cCtx, cCancel := context.WithTimeout(ctx, cfg.NetworkTimeout) cCtx, cCancel := context.WithTimeout(ctx, cfg.NetworkTimeout)
defer cCancel() defer cCancel()
version, err := l2ooContract.Version(&bind.CallOpts{Context: cCtx}) version, err := l2ooContract.Version(&bind.CallOpts{Context: cCtx})
...@@ -88,7 +94,13 @@ func NewChallenger(cfg config.Config, l log.Logger, m metrics.Metricer) (*Challe ...@@ -88,7 +94,13 @@ func NewChallenger(cfg config.Config, l log.Logger, m metrics.Metricer) (*Challe
} }
l.Info("Connected to L2OutputOracle", "address", cfg.L2OOAddress, "version", version) l.Info("Connected to L2OutputOracle", "address", cfg.L2OOAddress, "version", version)
parsed, err := bindings.L2OutputOracleMetaData.GetAbi() parsedL2oo, err := bindings.L2OutputOracleMetaData.GetAbi()
if err != nil {
cancel()
return nil, err
}
parsedDgf, err := bindings.DisputeGameFactoryMetaData.GetAbi()
if err != nil { if err != nil {
cancel() cancel()
return nil, err return nil, err
...@@ -110,9 +122,11 @@ func NewChallenger(cfg config.Config, l log.Logger, m metrics.Metricer) (*Challe ...@@ -110,9 +122,11 @@ func NewChallenger(cfg config.Config, l log.Logger, m metrics.Metricer) (*Challe
l2ooContract: l2ooContract, l2ooContract: l2ooContract,
l2ooContractAddr: cfg.L2OOAddress, l2ooContractAddr: cfg.L2OOAddress,
l2ooABI: parsed, l2ooABI: parsedL2oo,
dgfContract: dgfContract,
dgfContractAddr: cfg.DGFAddress, dgfContractAddr: cfg.DGFAddress,
dgfABI: parsedDgf,
networkTimeout: cfg.NetworkTimeout, networkTimeout: cfg.NetworkTimeout,
}, nil }, nil
......
...@@ -4,14 +4,12 @@ import ( ...@@ -4,14 +4,12 @@ import (
"context" "context"
"fmt" "fmt"
_ "net/http/pprof" _ "net/http/pprof"
"os"
"os/signal"
"syscall"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-challenger/config" "github.com/ethereum-optimism/optimism/op-challenger/config"
"github.com/ethereum-optimism/optimism/op-challenger/metrics" "github.com/ethereum-optimism/optimism/op-challenger/metrics"
"github.com/ethereum-optimism/optimism/op-service/opio"
oppprof "github.com/ethereum-optimism/optimism/op-service/pprof" oppprof "github.com/ethereum-optimism/optimism/op-service/pprof"
oprpc "github.com/ethereum-optimism/optimism/op-service/rpc" oprpc "github.com/ethereum-optimism/optimism/op-service/rpc"
) )
...@@ -73,14 +71,7 @@ func Main(logger log.Logger, version string, cfg *config.Config) error { ...@@ -73,14 +71,7 @@ func Main(logger log.Logger, version string, cfg *config.Config) error {
m.RecordInfo(version) m.RecordInfo(version)
m.RecordUp() m.RecordUp()
interruptChannel := make(chan os.Signal, 1) opio.BlockOnInterrupts()
signal.Notify(interruptChannel, []os.Signal{
os.Interrupt,
os.Kill,
syscall.SIGTERM,
syscall.SIGQUIT,
}...)
<-interruptChannel
cancel() cancel()
return nil return nil
......
package challenger
import (
"context"
"sync"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-service/backoff"
)
// logStore manages log subscriptions.
type logStore struct {
// The log filter query
query ethereum.FilterQuery
// core sync mutex for log store
// this locks the entire log store
mu sync.Mutex
logList []types.Log
logMap map[common.Hash][]types.Log
// Log subscription
subscription *Subscription
// Client to query for logs
client ethereum.LogFilterer
// Logger
log log.Logger
}
// NewLogStore creates a new log store.
func NewLogStore(query ethereum.FilterQuery, client ethereum.LogFilterer, log log.Logger) *logStore {
return &logStore{
query: query,
mu: sync.Mutex{},
logList: make([]types.Log, 0),
logMap: make(map[common.Hash][]types.Log),
subscription: NewSubscription(query, client, log),
client: client,
log: log,
}
}
// Subscribed returns true if the subscription has started.
func (l *logStore) Subscribed() bool {
return l.subscription.Started()
}
// Query returns the log filter query.
func (l *logStore) Query() ethereum.FilterQuery {
return l.query
}
// Client returns the log filter client.
func (l *logStore) Client() ethereum.LogFilterer {
return l.client
}
// GetLogs returns all logs in the log store.
func (l *logStore) GetLogs() []types.Log {
l.mu.Lock()
defer l.mu.Unlock()
logs := make([]types.Log, len(l.logList))
copy(logs, l.logList)
return logs
}
// GetLogByBlockHash returns all logs in the log store for a given block hash.
func (l *logStore) GetLogByBlockHash(blockHash common.Hash) []types.Log {
l.mu.Lock()
defer l.mu.Unlock()
logs := make([]types.Log, len(l.logMap[blockHash]))
copy(logs, l.logMap[blockHash])
return logs
}
// Subscribe starts the subscription.
// This function spawns a new goroutine.
func (l *logStore) Subscribe(ctx context.Context) error {
err := l.subscription.Subscribe()
if err != nil {
l.log.Error("failed to subscribe", "err", err)
return err
}
go l.dispatchLogs(ctx)
return nil
}
// Quit stops all log store asynchronous tasks.
func (l *logStore) Quit() {
l.subscription.Quit()
}
// buildBackoffStrategy builds a [backoff.Strategy].
func (l *logStore) buildBackoffStrategy() backoff.Strategy {
return &backoff.ExponentialStrategy{
Min: 1000,
Max: 20_000,
MaxJitter: 250,
}
}
// resubscribe attempts to re-establish the log store internal
// subscription with a backoff strategy.
func (l *logStore) resubscribe(ctx context.Context) error {
l.log.Info("log store resubscribing with backoff")
backoffStrategy := l.buildBackoffStrategy()
return backoff.DoCtx(ctx, 10, backoffStrategy, func() error {
if l.subscription == nil {
l.log.Error("subscription zeroed out")
return nil
}
err := l.subscription.Subscribe()
if err == nil {
l.log.Info("subscription reconnected", "id", l.subscription.ID())
}
return err
})
}
// insertLog inserts a log into the log store.
func (l *logStore) insertLog(log types.Log) {
l.mu.Lock()
l.logList = append(l.logList, log)
l.logMap[log.BlockHash] = append(l.logMap[log.BlockHash], log)
l.mu.Unlock()
}
// dispatchLogs dispatches logs to the log store.
// This function is intended to be run as a goroutine.
func (l *logStore) dispatchLogs(ctx context.Context) {
for {
select {
case err := <-l.subscription.sub.Err():
l.log.Error("log subscription error", "err", err)
for {
err = l.resubscribe(ctx)
if err == nil {
break
}
}
case log := <-l.subscription.logs:
l.insertLog(log)
case <-l.subscription.quit:
l.log.Info("received quit signal from subscription", "id", l.subscription.ID())
return
}
}
}
package challenger
import (
"context"
"errors"
"testing"
"time"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils"
"github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/stretchr/testify/require"
)
type mockLogStoreClient struct {
sub mockSubscription
logs chan<- types.Log
subcount int
}
func newMockLogStoreClient() *mockLogStoreClient {
return &mockLogStoreClient{
sub: mockSubscription{
errorChan: make(chan error),
},
}
}
func (m *mockLogStoreClient) FilterLogs(context.Context, ethereum.FilterQuery) ([]types.Log, error) {
panic("this should not be called by the Subscription.Subscribe method")
}
func (m *mockLogStoreClient) SubscribeFilterLogs(ctx context.Context, query ethereum.FilterQuery, logs chan<- types.Log) (ethereum.Subscription, error) {
m.subcount = m.subcount + 1
m.logs = logs
return m.sub, nil
}
var (
ErrTestError = errors.New("test error")
)
// errLogStoreClient implements the [ethereum.LogFilter] interface for testing.
type errLogStoreClient struct{}
func (m errLogStoreClient) FilterLogs(context.Context, ethereum.FilterQuery) ([]types.Log, error) {
panic("this should not be called by the Subscription.Subscribe method")
}
func (m errLogStoreClient) SubscribeFilterLogs(context.Context, ethereum.FilterQuery, chan<- types.Log) (ethereum.Subscription, error) {
return nil, ErrTestError
}
type mockSubscription struct {
errorChan chan error
}
func (m mockSubscription) Err() <-chan error {
return m.errorChan
}
func (m mockSubscription) Unsubscribe() {}
func newLogStore(t *testing.T) (*logStore, *mockLogStoreClient) {
query := ethereum.FilterQuery{}
client := newMockLogStoreClient()
log := testlog.Logger(t, log.LvlError)
return NewLogStore(query, client, log), client
}
func newErrorLogStore(t *testing.T, client *errLogStoreClient) (*logStore, *errLogStoreClient) {
query := ethereum.FilterQuery{}
log := testlog.Logger(t, log.LvlError)
return NewLogStore(query, client, log), client
}
func TestLogStore_NewLogStore_NotSubscribed(t *testing.T) {
logStore, _ := newLogStore(t)
require.False(t, logStore.Subscribed())
}
func TestLogStore_NewLogStore_EmptyLogs(t *testing.T) {
logStore, _ := newLogStore(t)
require.Empty(t, logStore.GetLogs())
require.Empty(t, logStore.GetLogByBlockHash(common.Hash{}))
}
func TestLogStore_Subscribe_EstablishesSubscription(t *testing.T) {
logStore, client := newLogStore(t)
defer logStore.Quit()
require.Equal(t, 0, client.subcount)
require.False(t, logStore.Subscribed())
require.NoError(t, logStore.Subscribe(context.Background()))
require.True(t, logStore.Subscribed())
require.Equal(t, 1, client.subcount)
}
func TestLogStore_Subscribe_ReceivesLogs(t *testing.T) {
logStore, client := newLogStore(t)
defer logStore.Quit()
require.NoError(t, logStore.Subscribe(context.Background()))
mockLog := types.Log{
BlockHash: common.HexToHash("0x1"),
}
client.logs <- mockLog
timeout, tCancel := context.WithTimeout(context.Background(), 30*time.Second)
defer tCancel()
err := e2eutils.WaitFor(timeout, 500*time.Millisecond, func() (bool, error) {
result := logStore.GetLogByBlockHash(mockLog.BlockHash)
return result[0].BlockHash == mockLog.BlockHash, nil
})
require.NoError(t, err)
}
func TestLogStore_Subscribe_SubscriptionErrors(t *testing.T) {
logStore, client := newLogStore(t)
defer logStore.Quit()
require.NoError(t, logStore.Subscribe(context.Background()))
client.sub.errorChan <- ErrTestError
timeout, tCancel := context.WithTimeout(context.Background(), 30*time.Second)
defer tCancel()
err := e2eutils.WaitFor(timeout, 500*time.Millisecond, func() (bool, error) {
subcount := client.subcount == 2
started := logStore.subscription.Started()
return subcount && started, nil
})
require.NoError(t, err)
}
func TestLogStore_Subscribe_NoClient_Panics(t *testing.T) {
require.Panics(t, func() {
logStore, _ := newErrorLogStore(t, nil)
_ = logStore.Subscribe(context.Background())
})
}
func TestLogStore_Subscribe_ErrorSubscribing(t *testing.T) {
logStore, _ := newErrorLogStore(t, &errLogStoreClient{})
require.False(t, logStore.Subscribed())
require.EqualError(t, logStore.Subscribe(context.Background()), ErrTestError.Error())
}
func TestLogStore_Quit_ResetsSubscription(t *testing.T) {
logStore, _ := newLogStore(t)
require.False(t, logStore.Subscribed())
require.NoError(t, logStore.Subscribe(context.Background()))
require.True(t, logStore.Subscribed())
logStore.Quit()
require.False(t, logStore.Subscribed())
}
func TestLogStore_Quit_NoSubscription_Panics(t *testing.T) {
require.Panics(t, func() {
logStore, _ := newErrorLogStore(t, nil)
logStore.Quit()
})
}
package challenger
import (
"context"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
)
// SubscriptionId is a unique subscription ID.
type SubscriptionId uint64
// Increment returns the next subscription ID.
func (s *SubscriptionId) Increment() SubscriptionId {
*s++
return *s
}
// Subscription wraps an [ethereum.Subscription] to provide a restart.
type Subscription struct {
// The subscription ID
id SubscriptionId
// The current subscription
sub ethereum.Subscription
// If the subscription is started
started bool
// The query used to create the subscription
query ethereum.FilterQuery
// The log channel
logs chan types.Log
// The quit channel
quit chan struct{}
// Filter client used to open the log subscription
client ethereum.LogFilterer
// Logger
log log.Logger
}
// NewSubscription creates a new subscription.
func NewSubscription(query ethereum.FilterQuery, client ethereum.LogFilterer, log log.Logger) *Subscription {
return &Subscription{
id: SubscriptionId(0),
sub: nil,
started: false,
query: query,
logs: make(chan types.Log),
quit: make(chan struct{}),
client: client,
log: log,
}
}
// ID returns the subscription ID.
func (s *Subscription) ID() SubscriptionId {
return s.id
}
// Started returns true if the subscription has started.
func (s *Subscription) Started() bool {
return s.started
}
// Subscribe constructs the subscription.
func (s *Subscription) Subscribe() error {
s.log.Info("Subscribing to", "query", s.query.Topics, "id", s.id)
sub, err := s.client.SubscribeFilterLogs(context.Background(), s.query, s.logs)
if err != nil {
s.log.Error("failed to subscribe to logs", "err", err)
return err
}
s.sub = sub
s.started = true
return nil
}
// Quit closes the subscription.
func (s *Subscription) Quit() {
s.log.Info("Quitting subscription", "id", s.id)
s.sub.Unsubscribe()
s.quit <- struct{}{}
s.started = false
s.log.Info("Quit subscription", "id", s.id)
}
package challenger
import (
"context"
"errors"
"math"
"testing"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/stretchr/testify/require"
)
type mockLogFilterClient struct{}
func (m mockLogFilterClient) FilterLogs(context.Context, ethereum.FilterQuery) ([]types.Log, error) {
panic("this should not be called by the Subscription.Subscribe method")
}
func (m mockLogFilterClient) SubscribeFilterLogs(context.Context, ethereum.FilterQuery, chan<- types.Log) (ethereum.Subscription, error) {
return nil, nil
}
func newSubscription(t *testing.T, client *mockLogFilterClient) (*Subscription, *mockLogFilterClient) {
query := ethereum.FilterQuery{}
log := testlog.Logger(t, log.LvlError)
return NewSubscription(query, client, log), client
}
func FuzzSubscriptionId_Increment(f *testing.F) {
maxUint64 := uint64(math.MaxUint64)
f.Fuzz(func(t *testing.T, id uint64) {
if id >= maxUint64 {
t.Skip("skipping due to overflow")
} else {
subId := SubscriptionId(id)
require.Equal(t, subId.Increment(), SubscriptionId(id+1))
}
})
}
func TestSubscription_Subscribe_NilClient_Panics(t *testing.T) {
defer func() {
if recover() == nil {
t.Error("expected nil client to panic")
}
}()
subscription, _ := newSubscription(t, nil)
require.NoError(t, subscription.Subscribe())
}
func TestSubscription_Subscribe(t *testing.T) {
subscription, _ := newSubscription(t, &mockLogFilterClient{})
require.NoError(t, subscription.Subscribe())
require.True(t, subscription.Started())
}
var ErrSubscriptionFailed = errors.New("failed to subscribe to logs")
type errLogFilterClient struct{}
func (m errLogFilterClient) FilterLogs(context.Context, ethereum.FilterQuery) ([]types.Log, error) {
panic("this should not be called by the Subscription.Subscribe method")
}
func (m errLogFilterClient) SubscribeFilterLogs(context.Context, ethereum.FilterQuery, chan<- types.Log) (ethereum.Subscription, error) {
return nil, ErrSubscriptionFailed
}
func TestSubscription_Subscribe_SubscriptionErrors(t *testing.T) {
query := ethereum.FilterQuery{}
log := testlog.Logger(t, log.LvlError)
subscription := Subscription{
query: query,
client: errLogFilterClient{},
log: log,
}
require.EqualError(t, subscription.Subscribe(), ErrSubscriptionFailed.Error())
}
...@@ -27,24 +27,22 @@ var Mainnet = rollup.Config{ ...@@ -27,24 +27,22 @@ var Mainnet = rollup.Config{
// moose: Update this during migration // moose: Update this during migration
L2Time: 0, L2Time: 0,
SystemConfig: eth.SystemConfig{ SystemConfig: eth.SystemConfig{
BatcherAddr: common.HexToAddress("0x70997970C51812dc3A010C7d01b50e0d17dc79C8"), BatcherAddr: common.HexToAddress("0x6887246668a3b87f54deb3b94ba47a6f63f32985"),
Overhead: eth.Bytes32(common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000834")), Overhead: eth.Bytes32(common.HexToHash("0x00000000000000000000000000000000000000000000000000000000000000bc")),
Scalar: eth.Bytes32(common.HexToHash("0x00000000000000000000000000000000000000000000000000000000000f4240")), Scalar: eth.Bytes32(common.HexToHash("0x00000000000000000000000000000000000000000000000000000000000a6fe0")),
GasLimit: 25_000_000, GasLimit: 30_000_000,
}, },
}, },
BlockTime: 2, BlockTime: 2,
MaxSequencerDrift: 600, MaxSequencerDrift: 600,
SeqWindowSize: 3600, SeqWindowSize: 3600,
ChannelTimeout: 300, ChannelTimeout: 300,
L1ChainID: big.NewInt(1), L1ChainID: big.NewInt(1),
L2ChainID: big.NewInt(10), L2ChainID: big.NewInt(10),
BatchInboxAddress: common.HexToAddress("0xff00000000000000000000000000000000000010"), BatchInboxAddress: common.HexToAddress("0xff00000000000000000000000000000000000010"),
// moose: Update this during migration DepositContractAddress: common.HexToAddress("0xbEb5Fc579115071764c7423A4f12eDde41f106Ed"),
DepositContractAddress: common.HexToAddress("0x"), L1SystemConfigAddress: common.HexToAddress("0x229047fed2591dbec1eF1118d64F7aF3dB9EB290"),
// moose: Update this during migration RegolithTime: u64Ptr(0),
L1SystemConfigAddress: common.HexToAddress("0x"),
RegolithTime: u64Ptr(0),
} }
var Goerli = rollup.Config{ var Goerli = rollup.Config{
......
...@@ -4,9 +4,7 @@ import ( ...@@ -4,9 +4,7 @@ import (
"context" "context"
"net" "net"
"os" "os"
"os/signal"
"strconv" "strconv"
"syscall"
"github.com/ethereum-optimism/optimism/op-node/chaincfg" "github.com/ethereum-optimism/optimism/op-node/chaincfg"
"github.com/ethereum-optimism/optimism/op-node/cmd/doc" "github.com/ethereum-optimism/optimism/op-node/cmd/doc"
...@@ -25,6 +23,7 @@ import ( ...@@ -25,6 +23,7 @@ import (
"github.com/ethereum-optimism/optimism/op-node/version" "github.com/ethereum-optimism/optimism/op-node/version"
opservice "github.com/ethereum-optimism/optimism/op-service" opservice "github.com/ethereum-optimism/optimism/op-service"
oplog "github.com/ethereum-optimism/optimism/op-service/log" oplog "github.com/ethereum-optimism/optimism/op-service/log"
"github.com/ethereum-optimism/optimism/op-service/opio"
oppprof "github.com/ethereum-optimism/optimism/op-service/pprof" oppprof "github.com/ethereum-optimism/optimism/op-service/pprof"
) )
...@@ -162,14 +161,7 @@ func RollupNodeMain(ctx *cli.Context) error { ...@@ -162,14 +161,7 @@ func RollupNodeMain(ctx *cli.Context) error {
defer pprofCancel() defer pprofCancel()
} }
interruptChannel := make(chan os.Signal, 1) opio.BlockOnInterrupts()
signal.Notify(interruptChannel, []os.Signal{
os.Interrupt,
os.Kill,
syscall.SIGTERM,
syscall.SIGQUIT,
}...)
<-interruptChannel
return nil return nil
......
...@@ -40,7 +40,7 @@ var ( ...@@ -40,7 +40,7 @@ var (
"Should be provided in following format: <threshold>:<label>;<threshold>:<label>;..." + "Should be provided in following format: <threshold>:<label>;<threshold>:<label>;..." +
"For example: -40:graylist;-20:restricted;0:nopx;20:friend;", "For example: -40:graylist;-20:restricted;0:nopx;20:friend;",
Required: false, Required: false,
Value: "-40:graylist;-20:restricted;0:nopx;20:friend;", Value: "-40:<=-40;-10:<=-10;-5:<=-05;-0.05:<=-00.05;0:<=0;0.05:<=00.05;5:<=05;10:<=10;20:<=20;100:>20;",
EnvVar: p2pEnv("SCORE_BANDS"), EnvVar: p2pEnv("SCORE_BANDS"),
} }
......
...@@ -6,10 +6,7 @@ import ( ...@@ -6,10 +6,7 @@ import (
"fmt" "fmt"
"math/big" "math/big"
_ "net/http/pprof" _ "net/http/pprof"
"os"
"os/signal"
"sync" "sync"
"syscall"
"time" "time"
"github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi"
...@@ -27,6 +24,7 @@ import ( ...@@ -27,6 +24,7 @@ import (
opservice "github.com/ethereum-optimism/optimism/op-service" opservice "github.com/ethereum-optimism/optimism/op-service"
opclient "github.com/ethereum-optimism/optimism/op-service/client" opclient "github.com/ethereum-optimism/optimism/op-service/client"
oplog "github.com/ethereum-optimism/optimism/op-service/log" oplog "github.com/ethereum-optimism/optimism/op-service/log"
"github.com/ethereum-optimism/optimism/op-service/opio"
oppprof "github.com/ethereum-optimism/optimism/op-service/pprof" oppprof "github.com/ethereum-optimism/optimism/op-service/pprof"
oprpc "github.com/ethereum-optimism/optimism/op-service/rpc" oprpc "github.com/ethereum-optimism/optimism/op-service/rpc"
"github.com/ethereum-optimism/optimism/op-service/txmgr" "github.com/ethereum-optimism/optimism/op-service/txmgr"
...@@ -103,14 +101,7 @@ func Main(version string, cliCtx *cli.Context) error { ...@@ -103,14 +101,7 @@ func Main(version string, cliCtx *cli.Context) error {
m.RecordInfo(version) m.RecordInfo(version)
m.RecordUp() m.RecordUp()
interruptChannel := make(chan os.Signal, 1) opio.BlockOnInterrupts()
signal.Notify(interruptChannel, []os.Signal{
os.Interrupt,
os.Kill,
syscall.SIGTERM,
syscall.SIGQUIT,
}...)
<-interruptChannel
cancel() cancel()
return nil return nil
......
package opio
import (
"os"
"os/signal"
"syscall"
)
// DefaultInterruptSignals is a set of default interrupt signals.
var DefaultInterruptSignals = []os.Signal{
os.Interrupt,
os.Kill,
syscall.SIGTERM,
syscall.SIGQUIT,
}
// BlockOnInterrupts blocks until a SIGTERM is received.
// Passing in signals will override the default signals.
func BlockOnInterrupts(signals ...os.Signal) {
if len(signals) == 0 {
signals = DefaultInterruptSignals
}
interruptChannel := make(chan os.Signal, 1)
signal.Notify(interruptChannel, signals...)
<-interruptChannel
}
...@@ -2,6 +2,7 @@ package engine ...@@ -2,6 +2,7 @@ package engine
import ( import (
"context" "context"
"encoding/json"
"fmt" "fmt"
"math/big" "math/big"
"time" "time"
...@@ -18,6 +19,28 @@ import ( ...@@ -18,6 +19,28 @@ import (
"github.com/ethereum-optimism/optimism/op-node/eth" "github.com/ethereum-optimism/optimism/op-node/eth"
) )
type PayloadAttributesV2 struct {
Timestamp uint64 `json:"timestamp"`
Random common.Hash `json:"prevRandao"`
SuggestedFeeRecipient common.Address `json:"suggestedFeeRecipient"`
Withdrawals []*types.Withdrawal `json:"withdrawals"`
}
func (p PayloadAttributesV2) MarshalJSON() ([]byte, error) {
type PayloadAttributes struct {
Timestamp hexutil.Uint64 `json:"timestamp" gencodec:"required"`
Random common.Hash `json:"prevRandao" gencodec:"required"`
SuggestedFeeRecipient common.Address `json:"suggestedFeeRecipient" gencodec:"required"`
Withdrawals []*types.Withdrawal `json:"withdrawals"`
}
var enc PayloadAttributes
enc.Timestamp = hexutil.Uint64(p.Timestamp)
enc.Random = p.Random
enc.SuggestedFeeRecipient = p.SuggestedFeeRecipient
enc.Withdrawals = make([]*types.Withdrawal, 0)
return json.Marshal(&enc)
}
func DialClient(ctx context.Context, endpoint string, jwtSecret [32]byte) (client.RPC, error) { func DialClient(ctx context.Context, endpoint string, jwtSecret [32]byte) (client.RPC, error) {
auth := node.NewJWTAuth(jwtSecret) auth := node.NewJWTAuth(jwtSecret)
...@@ -69,7 +92,7 @@ func headSafeFinalized(ctx context.Context, client client.RPC) (head *types.Bloc ...@@ -69,7 +92,7 @@ func headSafeFinalized(ctx context.Context, client client.RPC) (head *types.Bloc
func insertBlock(ctx context.Context, client client.RPC, payload *engine.ExecutableData) error { func insertBlock(ctx context.Context, client client.RPC, payload *engine.ExecutableData) error {
var payloadResult *engine.PayloadStatusV1 var payloadResult *engine.PayloadStatusV1
if err := client.CallContext(ctx, &payloadResult, "engine_newPayloadV1", payload); err != nil { if err := client.CallContext(ctx, &payloadResult, "engine_newPayloadV2", payload); err != nil {
return fmt.Errorf("failed to insert block %d: %w", payload.Number, err) return fmt.Errorf("failed to insert block %d: %w", payload.Number, err)
} }
if payloadResult.Status != string(eth.ExecutionValid) { if payloadResult.Status != string(eth.ExecutionValid) {
...@@ -80,7 +103,7 @@ func insertBlock(ctx context.Context, client client.RPC, payload *engine.Executa ...@@ -80,7 +103,7 @@ func insertBlock(ctx context.Context, client client.RPC, payload *engine.Executa
func updateForkchoice(ctx context.Context, client client.RPC, head, safe, finalized common.Hash) error { func updateForkchoice(ctx context.Context, client client.RPC, head, safe, finalized common.Hash) error {
var post engine.ForkChoiceResponse var post engine.ForkChoiceResponse
if err := client.CallContext(ctx, &post, "engine_forkchoiceUpdatedV1", if err := client.CallContext(ctx, &post, "engine_forkchoiceUpdatedV2",
engine.ForkchoiceStateV1{ engine.ForkchoiceStateV1{
HeadBlockHash: head, HeadBlockHash: head,
SafeBlockHash: safe, SafeBlockHash: safe,
...@@ -112,21 +135,17 @@ func BuildBlock(ctx context.Context, client client.RPC, status *StatusData, sett ...@@ -112,21 +135,17 @@ func BuildBlock(ctx context.Context, client client.RPC, status *StatusData, sett
} }
} }
var pre engine.ForkChoiceResponse var pre engine.ForkChoiceResponse
if err := client.CallContext(ctx, &pre, "engine_forkchoiceUpdatedV1", if err := client.CallContext(ctx, &pre, "engine_forkchoiceUpdatedV2",
engine.ForkchoiceStateV1{ engine.ForkchoiceStateV1{
HeadBlockHash: status.Head.Hash, HeadBlockHash: status.Head.Hash,
SafeBlockHash: status.Safe.Hash, SafeBlockHash: status.Safe.Hash,
FinalizedBlockHash: status.Finalized.Hash, FinalizedBlockHash: status.Finalized.Hash,
}, engine.PayloadAttributes{ }, PayloadAttributesV2{
Timestamp: timestamp, Timestamp: timestamp,
Random: settings.Random, Random: settings.Random,
SuggestedFeeRecipient: settings.FeeRecipient, SuggestedFeeRecipient: settings.FeeRecipient,
// TODO: maybe use the L2 fields to hack in tx embedding CLI option?
//Transactions: nil,
//NoTxPool: false,
//GasLimit: nil,
}); err != nil { }); err != nil {
return nil, fmt.Errorf("failed to set forkchoice with new block: %w", err) return nil, fmt.Errorf("failed to set forkchoice when building new block: %w", err)
} }
if pre.PayloadStatus.Status != string(eth.ExecutionValid) { if pre.PayloadStatus.Status != string(eth.ExecutionValid) {
return nil, fmt.Errorf("pre-block forkchoice update was not valid: %v", pre.PayloadStatus.ValidationError) return nil, fmt.Errorf("pre-block forkchoice update was not valid: %v", pre.PayloadStatus.ValidationError)
...@@ -139,19 +158,19 @@ func BuildBlock(ctx context.Context, client client.RPC, status *StatusData, sett ...@@ -139,19 +158,19 @@ func BuildBlock(ctx context.Context, client client.RPC, status *StatusData, sett
case <-time.After(settings.BuildTime): case <-time.After(settings.BuildTime):
} }
var payload *engine.ExecutableData var payload *engine.ExecutionPayloadEnvelope
if err := client.CallContext(ctx, &payload, "engine_getPayloadV1", pre.PayloadID); err != nil { if err := client.CallContext(ctx, &payload, "engine_getPayloadV2", pre.PayloadID); err != nil {
return nil, fmt.Errorf("failed to get payload %v, %d time after instructing engine to build it: %w", pre.PayloadID, settings.BuildTime, err) return nil, fmt.Errorf("failed to get payload %v, %d time after instructing engine to build it: %w", pre.PayloadID, settings.BuildTime, err)
} }
if err := insertBlock(ctx, client, payload); err != nil { if err := insertBlock(ctx, client, payload.ExecutionPayload); err != nil {
return nil, err return nil, err
} }
if err := updateForkchoice(ctx, client, payload.BlockHash, status.Safe.Hash, status.Finalized.Hash); err != nil { if err := updateForkchoice(ctx, client, payload.ExecutionPayload.BlockHash, status.Safe.Hash, status.Finalized.Hash); err != nil {
return nil, err return nil, err
} }
return payload, nil return payload.ExecutionPayload, nil
} }
func Auto(ctx context.Context, metrics Metricer, client client.RPC, log log.Logger, shutdown <-chan struct{}, settings *BlockBuildingSettings) error { func Auto(ctx context.Context, metrics Metricer, client client.RPC, log log.Logger, shutdown <-chan struct{}, settings *BlockBuildingSettings) error {
......
pragma solidity 0.8.15;
import { Test } from "forge-std/Test.sol";
import { StdInvariant } from "forge-std/StdInvariant.sol";
import { AddressAliasHelper } from "../../vendor/AddressAliasHelper.sol";
contract AddressAliasHelper_Converter {
bool public failedRoundtrip;
/**
* @dev Allows the actor to convert L1 to L2 addresses and vice versa.
*/
function convertRoundTrip(address addr) external {
// Alias our address
address aliasedAddr = AddressAliasHelper.applyL1ToL2Alias(addr);
// Unalias our address
address undoneAliasAddr = AddressAliasHelper.undoL1ToL2Alias(aliasedAddr);
// If our round trip aliasing did not return the original result, set our state.
if (addr != undoneAliasAddr) {
failedRoundtrip = true;
}
}
}
contract AddressAliasHelper_AddressAliasing_Invariant is StdInvariant, Test {
AddressAliasHelper_Converter internal actor;
function setUp() public {
// Create a converter actor.
actor = new AddressAliasHelper_Converter();
targetContract(address(actor));
bytes4[] memory selectors = new bytes4[](1);
selectors[0] = actor.convertRoundTrip.selector;
FuzzSelector memory selector = FuzzSelector({ addr: address(actor), selectors: selectors });
targetSelector(selector);
}
/**
* @custom:invariant Address aliases are always able to be undone.
*
* Asserts that an address that has been aliased with `applyL1ToL2Alias` can always
* be unaliased with `undoL1ToL2Alias`.
*/
function invariant_round_trip_aliasing() external {
// ASSERTION: The round trip aliasing done in testRoundTrip(...) should never fail.
assertEq(actor.failedRoundtrip(), false);
}
}
// SPDX-License-Identifier: MIT
pragma solidity 0.8.15;
import { StdUtils } from "forge-std/StdUtils.sol";
import { Test } from "forge-std/Test.sol";
import { Vm } from "forge-std/Vm.sol";
import { StdInvariant } from "forge-std/StdInvariant.sol";
import { Burn } from "../../libraries/Burn.sol";
contract Burn_EthBurner is StdUtils {
Vm internal vm;
bool public failedEthBurn;
constructor(Vm _vm) {
vm = _vm;
}
/**
* @notice Takes an integer amount of eth to burn through the Burn library and
* updates the contract state if an incorrect amount of eth moved from the contract
*/
function burnEth(uint256 _value) external {
uint256 preBurnvalue = bound(_value, 0, type(uint128).max);
// Give the burner some ether for gas being used
vm.deal(address(this), preBurnvalue);
// cache the contract's eth balance
uint256 preBurnBalance = address(this).balance;
uint256 value = bound(preBurnvalue, 0, preBurnBalance);
// execute a burn of _value eth
Burn.eth(value);
// check that exactly value eth was transfered from the contract
unchecked {
if (address(this).balance != preBurnBalance - value) {
failedEthBurn = true;
}
}
}
}
contract Burn_BurnEth_Invariant is StdInvariant, Test {
Burn_EthBurner internal actor;
function setUp() public {
// Create a Eth burner actor.
actor = new Burn_EthBurner(vm);
targetContract(address(actor));
bytes4[] memory selectors = new bytes4[](1);
selectors[0] = actor.burnEth.selector;
FuzzSelector memory selector = FuzzSelector({ addr: address(actor), selectors: selectors });
targetSelector(selector);
}
/**
* @custom:invariant `eth(uint256)` always burns the exact amount of eth passed.
*
* Asserts that when `Burn.eth(uint256)` is called, it always burns the exact amount
* of ETH passed to the function.
*/
function invariant_burn_eth() external {
// ASSERTION: The amount burned should always match the amount passed exactly
assertEq(actor.failedEthBurn(), false);
}
}
// SPDX-License-Identifier: MIT
pragma solidity 0.8.15;
import { StdUtils } from "forge-std/StdUtils.sol";
import { Test } from "forge-std/Test.sol";
import { Vm } from "forge-std/Vm.sol";
import { StdInvariant } from "forge-std/StdInvariant.sol";
import { Burn } from "../../libraries/Burn.sol";
contract Burn_GasBurner is StdUtils {
Vm internal vm;
bool public failedGasBurn;
constructor(Vm _vm) {
vm = _vm;
}
/**
* @notice Takes an integer amount of gas to burn through the Burn library and
* updates the contract state if at least that amount of gas was not burned
* by the library
*/
function burnGas(uint256 _value) external {
// cap the value to the max resource limit
uint256 MAX_RESOURCE_LIMIT = 8_000_000;
uint256 value = bound(_value, 0, MAX_RESOURCE_LIMIT);
// cache the contract's current remaining gas
uint256 preBurnGas = gasleft();
// execute the gas burn
Burn.gas(value);
// cache the remaining gas post burn
uint256 postBurnGas = gasleft();
// check that at least value gas was burnt (and that there was no underflow)
unchecked {
if (postBurnGas - preBurnGas <= value && preBurnGas - value > preBurnGas) {
failedGasBurn = true;
}
}
}
}
contract Burn_BurnGas_Invariant is StdInvariant, Test {
Burn_GasBurner internal actor;
function setUp() public {
// Create a gas burner actor.
actor = new Burn_GasBurner(vm);
targetContract(address(actor));
bytes4[] memory selectors = new bytes4[](1);
selectors[0] = actor.burnGas.selector;
FuzzSelector memory selector = FuzzSelector({ addr: address(actor), selectors: selectors });
targetSelector(selector);
}
/**
* @custom:invariant `gas(uint256)` always burns at least the amount of gas passed.
*
* Asserts that when `Burn.gas(uint256)` is called, it always burns at least the amount
* of gas passed to the function.
*/
function invariant_burn_gas() external {
// ASSERTION: The amount burned should always match the amount passed exactly
assertEq(actor.failedGasBurn(), false);
}
}
# `AddressAliasHelper` Invariants
## Address aliases are always able to be undone.
**Test:** [`AddressAliasHelper.t.sol#L48`](../contracts/test/invariants/AddressAliasHelper.t.sol#L48)
Asserts that an address that has been aliased with `applyL1ToL2Alias` can always be unaliased with `undoL1ToL2Alias`.
# `Burn.Eth` Invariants
## `eth(uint256)` always burns the exact amount of eth passed.
**Test:** [`Burn.Eth.t.sol#L68`](../contracts/test/invariants/Burn.Eth.t.sol#L68)
Asserts that when `Burn.eth(uint256)` is called, it always burns the exact amount of ETH passed to the function.
# `Burn.Gas` Invariants
## `gas(uint256)` always burns at least the amount of gas passed.
**Test:** [`Burn.Gas.t.sol#L68`](../contracts/test/invariants/Burn.Gas.t.sol#L68)
Asserts that when `Burn.gas(uint256)` is called, it always burns at least the amount of gas passed to the function.
...@@ -6,7 +6,10 @@ This directory contains documentation for all defined invariant tests within `co ...@@ -6,7 +6,10 @@ This directory contains documentation for all defined invariant tests within `co
<!-- START autoTOC --> <!-- START autoTOC -->
## Table of Contents ## Table of Contents
- [AddressAliasHelper](./AddressAliasHelper.md)
- [AddressAliasing](./AddressAliasing.md) - [AddressAliasing](./AddressAliasing.md)
- [Burn.Eth](./Burn.Eth.md)
- [Burn.Gas](./Burn.Gas.md)
- [Burn](./Burn.md) - [Burn](./Burn.md)
- [CrossDomainMessenger](./CrossDomainMessenger.md) - [CrossDomainMessenger](./CrossDomainMessenger.md)
- [Encoding](./Encoding.md) - [Encoding](./Encoding.md)
......
...@@ -15,6 +15,12 @@ const config: HardhatUserConfig = { ...@@ -15,6 +15,12 @@ const config: HardhatUserConfig = {
sources: './test/contracts', sources: './test/contracts',
}, },
networks: { networks: {
mainnet: {
url: process.env.L1_RPC || 'https://mainnet-l1-rehearsal.optimism.io',
accounts: [
'ac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80',
],
},
devnetL1: { devnetL1: {
url: 'http://localhost:8545', url: 'http://localhost:8545',
accounts: [ accounts: [
...@@ -43,6 +49,10 @@ const config: HardhatUserConfig = { ...@@ -43,6 +49,10 @@ const config: HardhatUserConfig = {
}, },
], ],
deployments: { deployments: {
mainnet: [
'../contracts/deployments/mainnet',
'../contracts-bedrock/deployments/mainnet',
],
hivenet: ['../contracts-bedrock/deployments/hivenet'], hivenet: ['../contracts-bedrock/deployments/hivenet'],
devnetL1: ['../contracts-bedrock/deployments/devnetL1'], devnetL1: ['../contracts-bedrock/deployments/devnetL1'],
goerli: [ goerli: [
......
...@@ -130,8 +130,8 @@ task('deposit-eth', 'Deposits ether to L2.') ...@@ -130,8 +130,8 @@ task('deposit-eth', 'Deposits ether to L2.')
contractAddrs = { contractAddrs = {
l1: { l1: {
AddressManager: Deployment__AddressManager.address, AddressManager: Deployment__AddressManager.address,
L1CrossDomainMessenger: Deployment__L1CrossDomainMessenger, L1CrossDomainMessenger: Deployment__L1CrossDomainMessenger.address,
L1StandardBridge: Deployment__L1StandardBridge, L1StandardBridge: Deployment__L1StandardBridge.address,
StateCommitmentChain: ethers.constants.AddressZero, StateCommitmentChain: ethers.constants.AddressZero,
CanonicalTransactionChain: ethers.constants.AddressZero, CanonicalTransactionChain: ethers.constants.AddressZero,
BondManager: ethers.constants.AddressZero, BondManager: ethers.constants.AddressZero,
......
...@@ -5,7 +5,12 @@ This tool implements `proxyd`, an RPC request router and proxy. It does the foll ...@@ -5,7 +5,12 @@ This tool implements `proxyd`, an RPC request router and proxy. It does the foll
1. Whitelists RPC methods. 1. Whitelists RPC methods.
2. Routes RPC methods to groups of backend services. 2. Routes RPC methods to groups of backend services.
3. Automatically retries failed backend requests. 3. Automatically retries failed backend requests.
4. Provides metrics the measure request latency, error rates, and the like. 4. Track backend consensus (`latest`, `safe`, `finalized` blocks), peer count and sync state.
5. Re-write requests and responses to enforce consensus.
6. Load balance requests across backend services.
7. Cache immutable responses from backends.
8. Provides metrics the measure request latency, error rates, and the like.
## Usage ## Usage
...@@ -15,12 +20,80 @@ To configure `proxyd` for use, you'll need to create a configuration file to def ...@@ -15,12 +20,80 @@ To configure `proxyd` for use, you'll need to create a configuration file to def
Once you have a config file, start the daemon via `proxyd <path-to-config>.toml`. Once you have a config file, start the daemon via `proxyd <path-to-config>.toml`.
## Consensus awareness
Starting on v4.0.0, `proxyd` is aware of the consensus state of its backends. This helps minimize chain reorgs experienced by clients.
To enable this behavior, you must set `consensus_aware` value to `true` in the backend group.
When consensus awareness is enabled, `proxyd` will poll the backends for their states and resolve a consensus group based on:
* the common ancestor `latest` block, i.e. if a backend is experiencing a fork, the fork won't be visible to the clients
* the lowest `safe` block
* the lowest `finalized` block
* peer count
* sync state
The backend group then acts as a round-robin load balancer distributing traffic equally across healthy backends in the consensus group, increasing the availability of the proxy.
A backend is considered healthy if it meets the following criteria:
* not banned
* avg 1-min moving window error rate ≤ configurable threshold
* avg 1-min moving window latency ≤ configurable threshold
* peer count ≥ configurable threshold
* `latest` block lag ≤ configurable threshold
* last state update ≤ configurable threshold
* not currently syncing
When a backend is experiencing inconsistent consensus, high error rates or high latency,
the backend will be banned for a configurable amount of time (default 5 minutes)
and won't receive any traffic during this period.
## Tag rewrite
When consensus awareness is enabled, `proxyd` will enforce the consensus state transparently for all the clients.
For example, if a client requests the `eth_getBlockByNumber` method with the `latest` tag,
`proxyd` will rewrite the request to use the resolved latest block from the consensus group
and forward it to the backend.
The following request methods are rewritten:
* `eth_getLogs`
* `eth_newFilter`
* `eth_getBalance`
* `eth_getCode`
* `eth_getTransactionCount`
* `eth_call`
* `eth_getStorageAt`
* `eth_getBlockTransactionCountByNumber`
* `eth_getUncleCountByBlockNumber`
* `eth_getBlockByNumber`
* `eth_getTransactionByBlockNumberAndIndex`
* `eth_getUncleByBlockNumberAndIndex`
And `eth_blockNumber` response is overridden with current block consensus.
## Cacheable methods
Cache use Redis and can be enabled for the following immutable methods:
* `eth_chainId`
* `net_version`
* `eth_getBlockTransactionCountByHash`
* `eth_getUncleCountByBlockHash`
* `eth_getBlockByHash`
* `eth_getTransactionByBlockHashAndIndex`
* `eth_getUncleByBlockHashAndIndex`
## Metrics ## Metrics
See `metrics.go` for a list of all available metrics. See `metrics.go` for a list of all available metrics.
The metrics port is configurable via the `metrics.port` and `metrics.host` keys in the config. The metrics port is configurable via the `metrics.port` and `metrics.host` keys in the config.
## Adding Backend SSL Certificates in Docker ## Adding Backend SSL Certificates in Docker
The Docker image runs on Alpine Linux. If you get SSL errors when connecting to a backend within Docker, you may need to add additional certificates to Alpine's certificate store. To do this, bind mount the certificate bundle into a file in `/usr/local/share/ca-certificates`. The `entrypoint.sh` script will then update the store with whatever is in the `ca-certificates` directory prior to starting `proxyd`. The Docker image runs on Alpine Linux. If you get SSL errors when connecting to a backend within Docker, you may need to add additional certificates to Alpine's certificate store. To do this, bind mount the certificate bundle into a file in `/usr/local/share/ca-certificates`. The `entrypoint.sh` script will then update the store with whatever is in the `ca-certificates` directory prior to starting `proxyd`.
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment