Commit bd204cdc authored by Adrian Sutton's avatar Adrian Sutton

Merge branch 'develop' into aj/geth-1.13.4

parents 2a93a094 38125b92
---
'@eth-optimism/contracts-ts': minor
---
Removed unused hooks and actions
This diff is collapsed.
3b1129b5bc43ba22a9bcf4e4323c5a9df0023140
ee5d02c3ef5f55a06b069e4a70a820661a9130c8
v1.13.4
......@@ -16,7 +16,7 @@
/op-exporter @ethereum-optimism/go-reviewers
/op-heartbeat @ethereum-optimism/go-reviewers
/op-node @ethereum-optimism/go-reviewers
/op-node/rollup @protolambda @trianglesphere
/op-node/rollup @protolambda @trianglesphere @ajsutton
/op-preimage @ethereum-optimism/go-reviewers
/op-program @ethereum-optimism/go-reviewers
/op-proposer @ethereum-optimism/go-reviewers
......
COMPOSEFLAGS=-d
ITESTS_L2_HOST=http://localhost:9545
BEDROCK_TAGS_REMOTE?=origin
OP_STACK_GO_BUILDER?=us-docker.pkg.dev/oplabs-tools-artifacts/images/op_stack_go:latest
build: build-go build-ts
.PHONY: build
......@@ -8,6 +9,10 @@ build: build-go build-ts
build-go: submodules op-node op-proposer op-batcher
.PHONY: build-go
lint-go:
golangci-lint run -E goimports,sqlclosecheck,bodyclose,asciicheck,misspell,errorlint --timeout 5m -e "errors.As" -e "errors.Is" ./...
.PHONY: lint-go
build-ts: submodules
if [ -n "$$NVM_DIR" ]; then \
. $$NVM_DIR/nvm.sh && nvm use; \
......@@ -19,6 +24,13 @@ build-ts: submodules
ci-builder:
docker build -t ci-builder -f ops/docker/ci-builder/Dockerfile .
golang-docker:
DOCKER_BUILDKIT=1 docker build -t op-stack-go \
--build-arg GIT_COMMIT=$$(git rev-parse HEAD) \
--build-arg GIT_DATE=$$(git show -s --format='%ct') \
-f ops/docker/op-stack-go/Dockerfile .
.PHONY: golang-docker
submodules:
# CI will checkout submodules on its own (and fails on these commands)
if [ -z "$$GITHUB_ENV" ]; then \
......@@ -163,4 +175,10 @@ bedrock-markdown-links:
--exclude-mail /input/README.md "/input/specs/**/*.md"
install-geth:
go install github.com/ethereum/go-ethereum/cmd/geth@v1.12.0
./ops/scripts/geth-version-checker.sh && \
(echo "Geth versions match, not installing geth..."; true) || \
(echo "Versions do not match, installing geth!"; \
go install -v github.com/ethereum/go-ethereum/cmd/geth@$(shell cat .gethrc); \
echo "Installed geth!"; true)
.PHONY: install-geth
......@@ -96,7 +96,9 @@ def main():
log.info('Building docker images')
run_command(['docker', 'compose', 'build', '--progress', 'plain'], cwd=paths.ops_bedrock_dir, env={
'PWD': paths.ops_bedrock_dir
'PWD': paths.ops_bedrock_dir,
'DOCKER_BUILDKIT': '1', # (should be available by default in later versions, but explicitly enable it anyway)
'COMPOSE_DOCKER_CLI_BUILD': '1' # use the docker cache
})
log.info('Devnet starting')
......
GITCOMMIT := $(shell git rev-parse HEAD)
GITDATE := $(shell git show -s --format='%ct')
GITCOMMIT ?= $(shell git rev-parse HEAD)
GITDATE ?= $(shell git show -s --format='%ct')
VERSION := v0.0.0
LDFLAGSSTRING +=-X main.GitCommit=$(GITCOMMIT)
......@@ -20,9 +20,6 @@ elf:
test: elf
go test -v ./...
lint:
golangci-lint run -E goimports,sqlclosecheck,bodyclose,asciicheck,misspell,errorlint --timeout 5m -e "errors.As" -e "errors.Is"
fuzz:
go test -run NOTAREALTEST -v -fuzztime 10s -fuzz=FuzzStateSyscallBrk ./mipsevm
go test -run NOTAREALTEST -v -fuzztime 10s -fuzz=FuzzStateSyscallClone ./mipsevm
......
GITCOMMIT := $(shell git rev-parse HEAD)
GITDATE := $(shell git show -s --format='%ct')
GITCOMMIT ?= $(shell git rev-parse HEAD)
GITDATE ?= $(shell git show -s --format='%ct')
VERSION := v0.0.0
LDFLAGSSTRING +=-X main.GitCommit=$(GITCOMMIT)
......
......@@ -8,7 +8,7 @@ require (
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0
github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3
github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20231001123245-7b48d3818686
github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20231018202221-fdba3d104171
github.com/ethereum/go-ethereum v1.13.4
github.com/fsnotify/fsnotify v1.6.0
github.com/go-chi/chi/v5 v5.0.10
......
......@@ -178,8 +178,8 @@ github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3 h1:RWHKLhCrQThMfch+QJ1Z
github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3/go.mod h1:QziizLAiF0KqyLdNJYD7O5cpDlaFMNZzlxYNcWsJUxs=
github.com/ethereum-optimism/op-geth v1.101301.2-0.20231018010910-b142ad43dda3 h1:WdAevEuDn3I1YPChnIlgqCXvDFx2/oRvcvDG7aU86sk=
github.com/ethereum-optimism/op-geth v1.101301.2-0.20231018010910-b142ad43dda3/go.mod h1:V2K+IIUITMRKVGLiPCr29RvbDaA9P7YE5JL7UrFsbD8=
github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20231001123245-7b48d3818686 h1:f57hd8G96c8ORWd4ameFpveSnHcb0hA2D1VatviwoDc=
github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20231001123245-7b48d3818686/go.mod h1:q0u2UbyOr1q/y94AgMOj/V8b1KO05ZwILTR/qKt7Auo=
github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20231018202221-fdba3d104171 h1:MjCUj16JSLZRDnQQ6OOUy6Chfb4dKo7ahFceNi0RKZ8=
github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20231018202221-fdba3d104171/go.mod h1:/70H/KqrtKcvWvNGVj6S3rAcLC+kUPr3t2aDmYIS+Xk=
github.com/ethereum/c-kzg-4844 v0.3.1 h1:sR65+68+WdnMKxseNWxSJuAv2tsUrihTpVBTfM/U5Zg=
github.com/ethereum/c-kzg-4844 v0.3.1/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8=
......
......@@ -25,3 +25,9 @@ func Clamp(start, end *big.Int, size uint64) *big.Int {
func Matcher(num int64) func(*big.Int) bool {
return func(bi *big.Int) bool { return bi.Int64() == num }
}
func WeiToETH(wei *big.Int) *big.Float {
f := new(big.Float)
f.SetString(wei.String())
return f.Quo(f, big.NewFloat(1e18))
}
......@@ -30,6 +30,8 @@ var (
func runIndexer(ctx *cli.Context) error {
log := oplog.NewLogger(oplog.AppOut(ctx), oplog.ReadCLIConfig(ctx)).New("role", "indexer")
oplog.SetGlobalLogHandler(log.GetHandler())
log.Info("running indexer...")
cfg, err := config.LoadConfig(log, ctx.String(ConfigFlag.Name))
if err != nil {
log.Error("failed to load config", "err", err)
......@@ -49,13 +51,14 @@ func runIndexer(ctx *cli.Context) error {
return err
}
log.Info("running indexer...")
return indexer.Run(ctx.Context)
}
func runApi(ctx *cli.Context) error {
log := oplog.NewLogger(oplog.AppOut(ctx), oplog.ReadCLIConfig(ctx)).New("role", "api")
oplog.SetGlobalLogHandler(log.GetHandler())
log.Info("running api...")
cfg, err := config.LoadConfig(log, ctx.String(ConfigFlag.Name))
if err != nil {
log.Error("failed to load config", "err", err)
......@@ -69,7 +72,6 @@ func runApi(ctx *cli.Context) error {
}
defer db.Close()
log.Info("running api...")
api := api.NewApi(log, db.BridgeTransfers, cfg.HTTPServer, cfg.MetricsServer)
return api.Run(ctx.Context)
}
......@@ -77,8 +79,9 @@ func runApi(ctx *cli.Context) error {
func runMigrations(ctx *cli.Context) error {
log := oplog.NewLogger(oplog.AppOut(ctx), oplog.ReadCLIConfig(ctx)).New("role", "migrations")
oplog.SetGlobalLogHandler(log.GetHandler())
log.Info("running migrations...")
cfg, err := config.LoadConfig(log, ctx.String(ConfigFlag.Name))
migrationsDir := ctx.String(MigrationsFlag.Name)
if err != nil {
log.Error("failed to load config", "err", err)
return err
......@@ -91,7 +94,7 @@ func runMigrations(ctx *cli.Context) error {
}
defer db.Close()
log.Info("running migrations...")
migrationsDir := ctx.String(MigrationsFlag.Name)
return db.ExecuteSQLMigration(migrationsDir)
}
......
......@@ -27,5 +27,6 @@ func main() {
app := newCli(GitCommit, GitDate)
if err := app.RunContext(ctx, os.Args); err != nil {
log.Error("application failed", "err", err)
os.Exit(1)
}
}
......@@ -7,8 +7,10 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"gorm.io/gorm"
"gorm.io/gorm/clause"
)
/**
......@@ -67,17 +69,23 @@ type BlocksDB interface {
*/
type blocksDB struct {
log log.Logger
gorm *gorm.DB
}
func newBlocksDB(db *gorm.DB) BlocksDB {
return &blocksDB{gorm: db}
func newBlocksDB(log log.Logger, db *gorm.DB) BlocksDB {
return &blocksDB{log: log.New("table", "blocks"), gorm: db}
}
// L1
func (db *blocksDB) StoreL1BlockHeaders(headers []L1BlockHeader) error {
result := db.gorm.CreateInBatches(&headers, batchInsertSize)
deduped := db.gorm.Clauses(clause.OnConflict{Columns: []clause.Column{{Name: "hash"}}, DoNothing: true})
result := deduped.Create(&headers)
if result.Error == nil && int(result.RowsAffected) < len(headers) {
db.log.Warn("ignored L1 block duplicates", "duplicates", len(headers)-int(result.RowsAffected))
}
return result.Error
}
......@@ -115,7 +123,12 @@ func (db *blocksDB) L1LatestBlockHeader() (*L1BlockHeader, error) {
// L2
func (db *blocksDB) StoreL2BlockHeaders(headers []L2BlockHeader) error {
result := db.gorm.CreateInBatches(&headers, batchInsertSize)
deduped := db.gorm.Clauses(clause.OnConflict{Columns: []clause.Column{{Name: "hash"}}, DoNothing: true})
result := deduped.Create(&headers)
if result.Error == nil && int(result.RowsAffected) < len(headers) {
db.log.Warn("ignored L2 block duplicates", "duplicates", len(headers)-int(result.RowsAffected))
}
return result.Error
}
......
......@@ -6,8 +6,10 @@ import (
"math/big"
"gorm.io/gorm"
"gorm.io/gorm/clause"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/google/uuid"
)
......@@ -60,11 +62,12 @@ type BridgeMessagesDB interface {
*/
type bridgeMessagesDB struct {
log log.Logger
gorm *gorm.DB
}
func newBridgeMessagesDB(db *gorm.DB) BridgeMessagesDB {
return &bridgeMessagesDB{gorm: db}
func newBridgeMessagesDB(log log.Logger, db *gorm.DB) BridgeMessagesDB {
return &bridgeMessagesDB{log: log.New("table", "bridge_messages"), gorm: db}
}
/**
......@@ -72,7 +75,12 @@ func newBridgeMessagesDB(db *gorm.DB) BridgeMessagesDB {
*/
func (db bridgeMessagesDB) StoreL1BridgeMessages(messages []L1BridgeMessage) error {
result := db.gorm.CreateInBatches(&messages, batchInsertSize)
deduped := db.gorm.Clauses(clause.OnConflict{Columns: []clause.Column{{Name: "message_hash"}}, DoNothing: true})
result := deduped.Create(&messages)
if result.Error == nil && int(result.RowsAffected) < len(messages) {
db.log.Warn("ignored L1 bridge message duplicates", "duplicates", len(messages)-int(result.RowsAffected))
}
return result.Error
}
......@@ -98,7 +106,13 @@ func (db bridgeMessagesDB) MarkRelayedL1BridgeMessage(messageHash common.Hash, r
if err != nil {
return err
} else if message == nil {
return fmt.Errorf("L1BridgeMessage with message hash %s not found", messageHash)
return fmt.Errorf("L1BridgeMessage %s not found", messageHash)
}
if message.RelayedMessageEventGUID != nil && message.RelayedMessageEventGUID.ID() == relayEvent.ID() {
return nil
} else if message.RelayedMessageEventGUID != nil {
return fmt.Errorf("relayed message %s re-relayed with a different event %d", messageHash, relayEvent)
}
message.RelayedMessageEventGUID = &relayEvent
......@@ -111,7 +125,12 @@ func (db bridgeMessagesDB) MarkRelayedL1BridgeMessage(messageHash common.Hash, r
*/
func (db bridgeMessagesDB) StoreL2BridgeMessages(messages []L2BridgeMessage) error {
result := db.gorm.CreateInBatches(&messages, batchInsertSize)
deduped := db.gorm.Clauses(clause.OnConflict{Columns: []clause.Column{{Name: "message_hash"}}, DoNothing: true})
result := deduped.Create(&messages)
if result.Error == nil && int(result.RowsAffected) < len(messages) {
db.log.Warn("ignored L2 bridge message duplicates", "duplicates", len(messages)-int(result.RowsAffected))
}
return result.Error
}
......@@ -137,7 +156,13 @@ func (db bridgeMessagesDB) MarkRelayedL2BridgeMessage(messageHash common.Hash, r
if err != nil {
return err
} else if message == nil {
return fmt.Errorf("L2BridgeMessage with message hash %s not found", messageHash)
return fmt.Errorf("L2BridgeMessage %s not found", messageHash)
}
if message.RelayedMessageEventGUID != nil && message.RelayedMessageEventGUID.ID() == relayEvent.ID() {
return nil
} else if message.RelayedMessageEventGUID != nil {
return fmt.Errorf("relayed message %s re-relayed with a different event %s", messageHash, relayEvent)
}
message.RelayedMessageEventGUID = &relayEvent
......
......@@ -7,8 +7,10 @@ import (
"github.com/google/uuid"
"gorm.io/gorm"
"gorm.io/gorm/clause"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
)
/**
......@@ -68,11 +70,12 @@ type BridgeTransactionsDB interface {
*/
type bridgeTransactionsDB struct {
log log.Logger
gorm *gorm.DB
}
func newBridgeTransactionsDB(db *gorm.DB) BridgeTransactionsDB {
return &bridgeTransactionsDB{gorm: db}
func newBridgeTransactionsDB(log log.Logger, db *gorm.DB) BridgeTransactionsDB {
return &bridgeTransactionsDB{log: log.New("table", "bridge_transactions"), gorm: db}
}
/**
......@@ -80,7 +83,12 @@ func newBridgeTransactionsDB(db *gorm.DB) BridgeTransactionsDB {
*/
func (db *bridgeTransactionsDB) StoreL1TransactionDeposits(deposits []L1TransactionDeposit) error {
result := db.gorm.CreateInBatches(&deposits, batchInsertSize)
deduped := db.gorm.Clauses(clause.OnConflict{Columns: []clause.Column{{Name: "source_hash"}}, DoNothing: true})
result := deduped.Create(&deposits)
if result.Error == nil && int(result.RowsAffected) < len(deposits) {
db.log.Warn("ignored L1 tx deposit duplicates", "duplicates", len(deposits)-int(result.RowsAffected))
}
return result.Error
}
......@@ -133,7 +141,12 @@ func (db *bridgeTransactionsDB) L1LatestBlockHeader() (*L1BlockHeader, error) {
*/
func (db *bridgeTransactionsDB) StoreL2TransactionWithdrawals(withdrawals []L2TransactionWithdrawal) error {
result := db.gorm.CreateInBatches(&withdrawals, batchInsertSize)
deduped := db.gorm.Clauses(clause.OnConflict{Columns: []clause.Column{{Name: "withdrawal_hash"}}, DoNothing: true})
result := deduped.Create(&withdrawals)
if result.Error == nil && int(result.RowsAffected) < len(withdrawals) {
db.log.Warn("ignored L2 tx withdrawal duplicates", "duplicates", len(withdrawals)-int(result.RowsAffected))
}
return result.Error
}
......@@ -155,11 +168,16 @@ func (db *bridgeTransactionsDB) MarkL2TransactionWithdrawalProvenEvent(withdrawa
withdrawal, err := db.L2TransactionWithdrawal(withdrawalHash)
if err != nil {
return err
}
if withdrawal == nil {
} else if withdrawal == nil {
return fmt.Errorf("transaction withdrawal hash %s not found", withdrawalHash)
}
if withdrawal.ProvenL1EventGUID != nil && withdrawal.ProvenL1EventGUID.ID() == provenL1EventGuid.ID() {
return nil
} else if withdrawal.ProvenL1EventGUID != nil {
return fmt.Errorf("proven withdrawal %s re-proven with a different event %s", withdrawalHash, provenL1EventGuid)
}
withdrawal.ProvenL1EventGUID = &provenL1EventGuid
result := db.gorm.Save(&withdrawal)
return result.Error
......@@ -170,14 +188,18 @@ func (db *bridgeTransactionsDB) MarkL2TransactionWithdrawalFinalizedEvent(withdr
withdrawal, err := db.L2TransactionWithdrawal(withdrawalHash)
if err != nil {
return err
}
if withdrawal == nil {
} else if withdrawal == nil {
return fmt.Errorf("transaction withdrawal hash %s not found", withdrawalHash)
}
if withdrawal.ProvenL1EventGUID == nil {
} else if withdrawal.ProvenL1EventGUID == nil {
return fmt.Errorf("cannot mark unproven withdrawal hash %s as finalized", withdrawal.WithdrawalHash)
}
if withdrawal.FinalizedL1EventGUID != nil && withdrawal.FinalizedL1EventGUID.ID() == finalizedL1EventGuid.ID() {
return nil
} else if withdrawal.FinalizedL1EventGUID != nil {
return fmt.Errorf("finalized withdrawal %s re-finalized with a different event %s", withdrawalHash, finalizedL1EventGuid)
}
withdrawal.FinalizedL1EventGUID = &finalizedL1EventGuid
withdrawal.Succeeded = &succeeded
result := db.gorm.Save(&withdrawal)
......
......@@ -5,9 +5,11 @@ import (
"fmt"
"gorm.io/gorm"
"gorm.io/gorm/clause"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
)
var (
......@@ -79,11 +81,12 @@ type BridgeTransfersDB interface {
*/
type bridgeTransfersDB struct {
log log.Logger
gorm *gorm.DB
}
func newBridgeTransfersDB(db *gorm.DB) BridgeTransfersDB {
return &bridgeTransfersDB{gorm: db}
func newBridgeTransfersDB(log log.Logger, db *gorm.DB) BridgeTransfersDB {
return &bridgeTransfersDB{log: log.New("table", "bridge_transfers"), gorm: db}
}
/**
......@@ -91,7 +94,12 @@ func newBridgeTransfersDB(db *gorm.DB) BridgeTransfersDB {
*/
func (db *bridgeTransfersDB) StoreL1BridgeDeposits(deposits []L1BridgeDeposit) error {
result := db.gorm.CreateInBatches(&deposits, batchInsertSize)
deduped := db.gorm.Clauses(clause.OnConflict{Columns: []clause.Column{{Name: "transaction_source_hash"}}, DoNothing: true})
result := deduped.Create(&deposits)
if result.Error == nil && int(result.RowsAffected) < len(deposits) {
db.log.Warn("ignored L1 bridge transfer duplicates", "duplicates", len(deposits)-int(result.RowsAffected))
}
return result.Error
}
......@@ -204,7 +212,12 @@ l1_bridge_deposits.timestamp, cross_domain_message_hash, local_token_address, re
*/
func (db *bridgeTransfersDB) StoreL2BridgeWithdrawals(withdrawals []L2BridgeWithdrawal) error {
result := db.gorm.CreateInBatches(&withdrawals, batchInsertSize)
deduped := db.gorm.Clauses(clause.OnConflict{Columns: []clause.Column{{Name: "transaction_withdrawal_hash"}}, DoNothing: true})
result := deduped.Create(&withdrawals)
if result.Error == nil && int(result.RowsAffected) < len(withdrawals) {
db.log.Warn("ignored L2 bridge transfer duplicates", "duplicates", len(withdrawals)-int(result.RowsAffected))
}
return result.Error
}
......
......@@ -6,9 +6,11 @@ import (
"math/big"
"gorm.io/gorm"
"gorm.io/gorm/clause"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/google/uuid"
)
......@@ -99,17 +101,25 @@ type ContractEventsDB interface {
*/
type contractEventsDB struct {
log log.Logger
gorm *gorm.DB
}
func newContractEventsDB(db *gorm.DB) ContractEventsDB {
return &contractEventsDB{gorm: db}
func newContractEventsDB(log log.Logger, db *gorm.DB) ContractEventsDB {
return &contractEventsDB{log: log.New("table", "events"), gorm: db}
}
// L1
func (db *contractEventsDB) StoreL1ContractEvents(events []L1ContractEvent) error {
result := db.gorm.CreateInBatches(&events, batchInsertSize)
// Since the block hash refers back to L1, we dont necessarily have to check
// that the RLP bytes match when doing conflict resolution.
deduped := db.gorm.Clauses(clause.OnConflict{OnConstraint: "l1_contract_events_block_hash_log_index_key", DoNothing: true})
result := deduped.Create(&events)
if result.Error == nil && int(result.RowsAffected) < len(events) {
db.log.Warn("ignored L1 contract event duplicates", "duplicates", len(events)-int(result.RowsAffected))
}
return result.Error
}
......@@ -144,7 +154,7 @@ func (db *contractEventsDB) L1ContractEventsWithFilter(filter ContractEvent, fro
query := db.gorm.Table("l1_contract_events").Where(&filter)
query = query.Joins("INNER JOIN l1_block_headers ON l1_contract_events.block_hash = l1_block_headers.hash")
query = query.Where("l1_block_headers.number >= ? AND l1_block_headers.number <= ?", fromHeight, toHeight)
query = query.Order("l1_block_headers.number ASC").Select("l1_contract_events.*")
query = query.Order("l1_block_headers.number ASC, l1_contract_events.log_index ASC").Select("l1_contract_events.*")
// NOTE: We use `Find` here instead of `Scan` since `Scan` doesn't not support
// model hooks like `ContractEvent#AfterFind`. Functionally they are the same
......@@ -176,7 +186,14 @@ func (db *contractEventsDB) L1LatestContractEventWithFilter(filter ContractEvent
// L2
func (db *contractEventsDB) StoreL2ContractEvents(events []L2ContractEvent) error {
result := db.gorm.CreateInBatches(&events, batchInsertSize)
// Since the block hash refers back to L2, we dont necessarily have to check
// that the RLP bytes match when doing conflict resolution.
deduped := db.gorm.Clauses(clause.OnConflict{OnConstraint: "l2_contract_events_block_hash_log_index_key", DoNothing: true})
result := deduped.Create(&events)
if result.Error == nil && int(result.RowsAffected) < len(events) {
db.log.Warn("ignored L2 contract event duplicates", "duplicates", len(events)-int(result.RowsAffected))
}
return result.Error
}
......@@ -211,7 +228,7 @@ func (db *contractEventsDB) L2ContractEventsWithFilter(filter ContractEvent, fro
query := db.gorm.Table("l2_contract_events").Where(&filter)
query = query.Joins("INNER JOIN l2_block_headers ON l2_contract_events.block_hash = l2_block_headers.hash")
query = query.Where("l2_block_headers.number >= ? AND l2_block_headers.number <= ?", fromHeight, toHeight)
query = query.Order("l2_block_headers.number ASC").Select("l2_contract_events.*")
query = query.Order("l2_block_headers.number ASC, l2_contract_events.log_index ASC").Select("l2_contract_events.*")
// NOTE: We use `Find` here instead of `Scan` since `Scan` doesn't not support
// model hooks like `ContractEvent#AfterFind`. Functionally they are the same
......
......@@ -10,6 +10,7 @@ import (
"github.com/ethereum-optimism/optimism/indexer/config"
_ "github.com/ethereum-optimism/optimism/indexer/database/serializers"
"github.com/ethereum-optimism/optimism/op-service/retry"
"github.com/pkg/errors"
"github.com/ethereum/go-ethereum/log"
......@@ -18,16 +19,9 @@ import (
"gorm.io/gorm"
)
var (
// The postgres parameter counter for a given query is stored via a uint16,
// resulting in a parameter limit of 65535. In order to avoid reaching this limit
// we'll utilize a batch size of 3k for inserts, well below as long as the the number
// of columns < 20.
batchInsertSize int = 3_000
)
type DB struct {
gorm *gorm.DB
log log.Logger
Blocks BlocksDB
ContractEvents ContractEventsDB
......@@ -37,7 +31,7 @@ type DB struct {
}
func NewDB(log log.Logger, dbConfig config.DBConfig) (*DB, error) {
retryStrategy := &retry.ExponentialStrategy{Min: 1000, Max: 20_000, MaxJitter: 250}
log = log.New("module", "db")
dsn := fmt.Sprintf("host=%s dbname=%s sslmode=disable", dbConfig.Host, dbConfig.Name)
if dbConfig.Port != 0 {
......@@ -51,11 +45,19 @@ func NewDB(log log.Logger, dbConfig config.DBConfig) (*DB, error) {
}
gormConfig := gorm.Config{
Logger: newLogger(log),
// The indexer will explicitly manage the transactions
SkipDefaultTransaction: true,
Logger: newLogger(log),
// The postgres parameter counter for a given query is represented with uint16,
// resulting in a parameter limit of 65535. In order to avoid reaching this limit
// we'll utilize a batch size of 3k for inserts, well below the limit as long as
// the number of columns < 20.
CreateBatchSize: 3_000,
}
retryStrategy := &retry.ExponentialStrategy{Min: 1000, Max: 20_000, MaxJitter: 250}
gorm, err := retry.Do[*gorm.DB](context.Background(), 10, retryStrategy, func() (*gorm.DB, error) {
gorm, err := gorm.Open(postgres.Open(dsn), &gormConfig)
if err != nil {
......@@ -66,16 +68,17 @@ func NewDB(log log.Logger, dbConfig config.DBConfig) (*DB, error) {
})
if err != nil {
return nil, fmt.Errorf("failed to connect to database after multiple retries: %w", err)
return nil, err
}
db := &DB{
gorm: gorm,
Blocks: newBlocksDB(gorm),
ContractEvents: newContractEventsDB(gorm),
BridgeTransfers: newBridgeTransfersDB(gorm),
BridgeMessages: newBridgeMessagesDB(gorm),
BridgeTransactions: newBridgeTransactionsDB(gorm),
log: log,
Blocks: newBlocksDB(log, gorm),
ContractEvents: newContractEventsDB(log, gorm),
BridgeTransfers: newBridgeTransfersDB(log, gorm),
BridgeMessages: newBridgeMessagesDB(log, gorm),
BridgeTransactions: newBridgeTransactionsDB(log, gorm),
}
return db, nil
......@@ -85,11 +88,21 @@ func NewDB(log log.Logger, dbConfig config.DBConfig) (*DB, error) {
// transaction. If the supplied function errors, the transaction is rolled back.
func (db *DB) Transaction(fn func(db *DB) error) error {
return db.gorm.Transaction(func(tx *gorm.DB) error {
return fn(dbFromGormTx(tx))
txDB := &DB{
gorm: tx,
Blocks: newBlocksDB(db.log, tx),
ContractEvents: newContractEventsDB(db.log, tx),
BridgeTransfers: newBridgeTransfersDB(db.log, tx),
BridgeMessages: newBridgeMessagesDB(db.log, tx),
BridgeTransactions: newBridgeTransactionsDB(db.log, tx),
}
return fn(txDB)
})
}
func (db *DB) Close() error {
db.log.Info("closing database")
sql, err := db.gorm.DB()
if err != nil {
return err
......@@ -98,17 +111,6 @@ func (db *DB) Close() error {
return sql.Close()
}
func dbFromGormTx(tx *gorm.DB) *DB {
return &DB{
gorm: tx,
Blocks: newBlocksDB(tx),
ContractEvents: newContractEventsDB(tx),
BridgeTransfers: newBridgeTransfersDB(tx),
BridgeMessages: newBridgeMessagesDB(tx),
BridgeTransactions: newBridgeTransactionsDB(tx),
}
}
func (db *DB) ExecuteSQLMigration(migrationsFolder string) error {
err := filepath.Walk(migrationsFolder, func(path string, info os.FileInfo, err error) error {
// Check for any walking error
......@@ -122,12 +124,14 @@ func (db *DB) ExecuteSQLMigration(migrationsFolder string) error {
}
// Read the migration file content
db.log.Info("reading sql file", "path", path)
fileContent, readErr := os.ReadFile(path)
if readErr != nil {
return errors.Wrap(readErr, fmt.Sprintf("Error reading SQL file: %s", path))
}
// Execute the migration
db.log.Info("executing sql file", "path", path)
execErr := db.gorm.Exec(string(fileContent)).Error
if execErr != nil {
return errors.Wrap(execErr, fmt.Sprintf("Error executing SQL script: %s", path))
......@@ -136,5 +140,6 @@ func (db *DB) ExecuteSQLMigration(migrationsFolder string) error {
return nil
})
db.log.Info("finished migrations")
return err
}
......@@ -14,7 +14,7 @@ import (
var (
_ logger.Interface = Logger{}
SlowThresholdMilliseconds = 200
SlowThresholdMilliseconds int64 = 500
)
type Logger struct {
......@@ -22,7 +22,7 @@ type Logger struct {
}
func newLogger(log log.Logger) Logger {
return Logger{log.New("module", "db")}
return Logger{log}
}
func (l Logger) LogMode(lvl logger.LogLevel) logger.Interface {
......@@ -50,7 +50,7 @@ func (l Logger) Trace(ctx context.Context, begin time.Time, fc func() (sql strin
sql = fmt.Sprintf("%sVALUES (...)", sql[:i])
}
if elapsedMs < 200 {
if elapsedMs < SlowThresholdMilliseconds {
l.log.Debug("database operation", "duration_ms", elapsedMs, "rows_affected", rows, "sql", sql)
} else {
l.log.Warn("database operation", "duration_ms", elapsedMs, "rows_affected", rows, "sql", sql)
......
......@@ -4,6 +4,7 @@ import (
"context"
"fmt"
"reflect"
"strings"
"github.com/ethereum/go-ethereum/common/hexutil"
"gorm.io/gorm/schema"
......@@ -70,5 +71,5 @@ func (BytesSerializer) Value(ctx context.Context, field *schema.Field, dst refle
}
hexStr := hexutil.Encode(fieldBytes.Bytes())
return hexStr, nil
return strings.ToLower(hexStr), nil
}
......@@ -4,6 +4,7 @@ import (
"context"
"fmt"
"reflect"
"strings"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/rlp"
......@@ -52,5 +53,5 @@ func (RLPSerializer) Value(ctx context.Context, field *schema.Field, dst reflect
}
hexStr := hexutil.Encode(rlpBytes)
return hexStr, nil
return strings.ToLower(hexStr), nil
}
......@@ -63,6 +63,7 @@ CREATE INDEX IF NOT EXISTS l1_contract_events_timestamp ON l1_contract_events(ti
CREATE INDEX IF NOT EXISTS l1_contract_events_block_hash ON l1_contract_events(block_hash);
CREATE INDEX IF NOT EXISTS l1_contract_events_event_signature ON l1_contract_events(event_signature);
CREATE INDEX IF NOT EXISTS l1_contract_events_contract_address ON l1_contract_events(contract_address);
ALTER TABLE l1_contract_events ADD UNIQUE (block_hash, log_index);
CREATE TABLE IF NOT EXISTS l2_contract_events (
-- Searchable fields
......@@ -81,6 +82,7 @@ CREATE INDEX IF NOT EXISTS l2_contract_events_timestamp ON l2_contract_events(ti
CREATE INDEX IF NOT EXISTS l2_contract_events_block_hash ON l2_contract_events(block_hash);
CREATE INDEX IF NOT EXISTS l2_contract_events_event_signature ON l2_contract_events(event_signature);
CREATE INDEX IF NOT EXISTS l2_contract_events_contract_address ON l2_contract_events(contract_address);
ALTER TABLE l2_contract_events ADD UNIQUE (block_hash, log_index);
/**
* BRIDGING DATA
......
......@@ -4,6 +4,7 @@ import (
"fmt"
"math/big"
"github.com/ethereum-optimism/optimism/indexer/bigint"
"github.com/ethereum-optimism/optimism/indexer/config"
"github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum-optimism/optimism/indexer/processors/contracts"
......@@ -28,11 +29,14 @@ func L1ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, metrics L1M
log.Info("detected transaction deposits", "size", len(optimismPortalTxDeposits))
}
var mintedGWEI = bigint.Zero
portalDeposits := make(map[logKey]*contracts.OptimismPortalTransactionDepositEvent, len(optimismPortalTxDeposits))
transactionDeposits := make([]database.L1TransactionDeposit, len(optimismPortalTxDeposits))
for i := range optimismPortalTxDeposits {
depositTx := optimismPortalTxDeposits[i]
portalDeposits[logKey{depositTx.Event.BlockHash, depositTx.Event.LogIndex}] = &depositTx
mintedGWEI = new(big.Int).Add(mintedGWEI, depositTx.Tx.Amount)
transactionDeposits[i] = database.L1TransactionDeposit{
SourceHash: depositTx.DepositTx.SourceHash,
L2TransactionHash: types.NewTx(depositTx.DepositTx).Hash(),
......@@ -40,12 +44,17 @@ func L1ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, metrics L1M
GasLimit: depositTx.GasLimit,
Tx: depositTx.Tx,
}
}
if len(transactionDeposits) > 0 {
if err := db.BridgeTransactions.StoreL1TransactionDeposits(transactionDeposits); err != nil {
return err
}
metrics.RecordL1TransactionDeposits(len(transactionDeposits))
// Convert to from wei to eth
mintedETH, _ := bigint.WeiToETH(mintedGWEI).Float64()
metrics.RecordL1TransactionDeposits(len(transactionDeposits), mintedETH)
}
// (2) L1CrossDomainMessenger
......@@ -68,6 +77,9 @@ func L1ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, metrics L1M
if !ok {
log.Error("expected TransactionDeposit preceding SentMessage event", "tx_hash", sentMessage.Event.TransactionHash.String())
return fmt.Errorf("expected TransactionDeposit preceding SentMessage event. tx_hash = %s", sentMessage.Event.TransactionHash.String())
} else if portalDeposit.Event.TransactionHash != sentMessage.Event.TransactionHash {
log.Error("correlated events tx hash mismatch", "deposit_tx_hash", portalDeposit.Event.TransactionHash.String(), "message_tx_hash", sentMessage.Event.TransactionHash.String())
return fmt.Errorf("correlated events tx hash mismatch")
}
bridgeMessages[i] = database.L1BridgeMessage{TransactionSourceHash: portalDeposit.DepositTx.SourceHash, BridgeMessage: sentMessage.BridgeMessage}
......@@ -98,15 +110,22 @@ func L1ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, metrics L1M
if !ok {
log.Error("expected TransactionDeposit following BridgeInitiated event", "tx_hash", initiatedBridge.Event.TransactionHash.String())
return fmt.Errorf("expected TransactionDeposit following BridgeInitiated event. tx_hash = %s", initiatedBridge.Event.TransactionHash.String())
} else if portalDeposit.Event.TransactionHash != initiatedBridge.Event.TransactionHash {
log.Error("correlated events tx hash mismatch", "deposit_tx_hash", portalDeposit.Event.TransactionHash.String(), "bridge_tx_hash", initiatedBridge.Event.TransactionHash.String())
return fmt.Errorf("correlated events tx hash mismatch")
}
sentMessage, ok := sentMessages[logKey{initiatedBridge.Event.BlockHash, initiatedBridge.Event.LogIndex + 2}]
if !ok {
log.Error("expected SentMessage following TransactionDeposit event", "tx_hash", initiatedBridge.Event.TransactionHash.String())
return fmt.Errorf("expected SentMessage following TransactionDeposit event. tx_hash = %s", initiatedBridge.Event.TransactionHash.String())
log.Error("expected SentMessage following BridgeInitiated event", "tx_hash", initiatedBridge.Event.TransactionHash.String())
return fmt.Errorf("expected SentMessage following BridgeInitiated event. tx_hash = %s", initiatedBridge.Event.TransactionHash.String())
} else if sentMessage.Event.TransactionHash != initiatedBridge.Event.TransactionHash {
log.Error("correlated events tx hash mismatch", "message_tx_hash", sentMessage.Event.TransactionHash.String(), "bridge_tx_hash", initiatedBridge.Event.TransactionHash.String())
return fmt.Errorf("correlated events tx hash mismatch")
}
initiatedBridge.BridgeTransfer.CrossDomainMessageHash = &sentMessage.BridgeMessage.MessageHash
bridgedTokens[initiatedBridge.BridgeTransfer.TokenPair.LocalTokenAddress]++
initiatedBridge.BridgeTransfer.CrossDomainMessageHash = &sentMessage.BridgeMessage.MessageHash
bridgeDeposits[i] = database.L1BridgeDeposit{
TransactionSourceHash: portalDeposit.DepositTx.SourceHash,
BridgeTransfer: initiatedBridge.BridgeTransfer,
......@@ -168,17 +187,17 @@ func L1ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, metrics L1M
}
for i := range finalizedWithdrawals {
finalized := finalizedWithdrawals[i]
withdrawal, err := db.BridgeTransactions.L2TransactionWithdrawal(finalized.WithdrawalHash)
finalizedWithdrawal := finalizedWithdrawals[i]
withdrawal, err := db.BridgeTransactions.L2TransactionWithdrawal(finalizedWithdrawal.WithdrawalHash)
if err != nil {
return err
} else if withdrawal == nil {
log.Error("missing indexed withdrawal on finalization event!", "tx_hash", finalized.Event.TransactionHash.String())
return fmt.Errorf("missing indexed withdrawal on finalization! tx_hash: %s", finalized.Event.TransactionHash.String())
log.Error("missing indexed withdrawal on finalization event!", "tx_hash", finalizedWithdrawal.Event.TransactionHash.String())
return fmt.Errorf("missing indexed withdrawal on finalization! tx_hash: %s", finalizedWithdrawal.Event.TransactionHash.String())
}
if err = db.BridgeTransactions.MarkL2TransactionWithdrawalFinalizedEvent(finalized.WithdrawalHash, finalized.Event.GUID, finalized.Success); err != nil {
log.Error("failed to mark withdrawal as finalized", "err", err, "tx_hash", finalized.Event.TransactionHash.String())
if err = db.BridgeTransactions.MarkL2TransactionWithdrawalFinalizedEvent(finalizedWithdrawal.WithdrawalHash, finalizedWithdrawal.Event.GUID, finalizedWithdrawal.Success); err != nil {
log.Error("failed to mark withdrawal as finalized", "err", err, "tx_hash", finalizedWithdrawal.Event.TransactionHash.String())
return err
}
}
......@@ -234,6 +253,9 @@ func L1ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, metrics L1M
if !ok {
log.Error("expected RelayedMessage following BridgeFinalized event", "tx_hash", finalizedBridge.Event.TransactionHash.String())
return fmt.Errorf("expected RelayedMessage following BridgeFinalized event. tx_hash = %s", finalizedBridge.Event.TransactionHash.String())
} else if relayedMessage.Event.TransactionHash != finalizedBridge.Event.TransactionHash {
log.Error("correlated events tx hash mismatch", "message_tx_hash", relayedMessage.Event.TransactionHash.String(), "bridge_tx_hash", finalizedBridge.Event.TransactionHash.String())
return fmt.Errorf("correlated events tx hash mismatch")
}
// Since the message hash is computed from the relayed message, this ensures the deposit fields must match
......
......@@ -5,6 +5,7 @@ import (
"fmt"
"math/big"
"github.com/ethereum-optimism/optimism/indexer/bigint"
"github.com/ethereum-optimism/optimism/indexer/config"
"github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum-optimism/optimism/indexer/processors/contracts"
......@@ -28,11 +29,14 @@ func L2ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, metrics L2M
log.Info("detected transaction withdrawals", "size", len(l2ToL1MPMessagesPassed))
}
var withdrawnWEI = bigint.Zero
messagesPassed := make(map[logKey]*contracts.L2ToL1MessagePasserMessagePassed, len(l2ToL1MPMessagesPassed))
transactionWithdrawals := make([]database.L2TransactionWithdrawal, len(l2ToL1MPMessagesPassed))
for i := range l2ToL1MPMessagesPassed {
messagePassed := l2ToL1MPMessagesPassed[i]
messagesPassed[logKey{messagePassed.Event.BlockHash, messagePassed.Event.LogIndex}] = &messagePassed
withdrawnWEI = new(big.Int).Add(withdrawnWEI, messagePassed.Tx.Amount)
transactionWithdrawals[i] = database.L2TransactionWithdrawal{
WithdrawalHash: messagePassed.WithdrawalHash,
InitiatedL2EventGUID: messagePassed.Event.GUID,
......@@ -45,7 +49,10 @@ func L2ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, metrics L2M
if err := db.BridgeTransactions.StoreL2TransactionWithdrawals(transactionWithdrawals); err != nil {
return err
}
metrics.RecordL2TransactionWithdrawals(len(transactionWithdrawals))
// Convert the withdrawn WEI to ETH
withdrawnETH, _ := bigint.WeiToETH(withdrawnWEI).Float64()
metrics.RecordL2TransactionWithdrawals(len(transactionWithdrawals), withdrawnETH)
}
// (2) L2CrossDomainMessenger
......@@ -68,6 +75,9 @@ func L2ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, metrics L2M
if !ok {
log.Error("expected MessagePassedEvent preceding SentMessage", "tx_hash", sentMessage.Event.TransactionHash.String())
return fmt.Errorf("expected MessagePassedEvent preceding SentMessage. tx_hash = %s", sentMessage.Event.TransactionHash.String())
} else if messagePassed.Event.TransactionHash != sentMessage.Event.TransactionHash {
log.Error("correlated events tx hash mismatch", "withdraw_tx_hash", messagePassed.Event.TransactionHash.String(), "message_tx_hash", sentMessage.Event.TransactionHash.String())
return fmt.Errorf("correlated events tx hash mismatch")
}
bridgeMessages[i] = database.L2BridgeMessage{TransactionWithdrawalHash: messagePassed.WithdrawalHash, BridgeMessage: sentMessage.BridgeMessage}
......@@ -93,16 +103,23 @@ func L2ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, metrics L2M
for i := range initiatedBridges {
initiatedBridge := initiatedBridges[i]
// extract the cross domain message hash & deposit source hash from the following events
// extract the cross domain message hash & withdraw hash from the following events
messagePassed, ok := messagesPassed[logKey{initiatedBridge.Event.BlockHash, initiatedBridge.Event.LogIndex + 1}]
if !ok {
log.Error("expected MessagePassed following BridgeInitiated event", "tx_hash", initiatedBridge.Event.TransactionHash.String())
return fmt.Errorf("expected MessagePassed following BridgeInitiated event. tx_hash = %s", initiatedBridge.Event.TransactionHash.String())
} else if messagePassed.Event.TransactionHash != initiatedBridge.Event.TransactionHash {
log.Error("correlated events tx hash mismatch", "withdraw_tx_hash", messagePassed.Event.TransactionHash.String(), "bridge_tx_hash", initiatedBridge.Event.TransactionHash.String())
return fmt.Errorf("correlated events tx hash mismatch")
}
sentMessage, ok := sentMessages[logKey{initiatedBridge.Event.BlockHash, initiatedBridge.Event.LogIndex + 2}]
if !ok {
log.Error("expected SentMessage following MessagePassed event", "tx_hash", initiatedBridge.Event.TransactionHash.String())
return fmt.Errorf("expected SentMessage following MessagePassed event. tx_hash = %s", initiatedBridge.Event.TransactionHash.String())
log.Error("expected SentMessage following BridgeInitiated event", "tx_hash", initiatedBridge.Event.TransactionHash.String())
return fmt.Errorf("expected SentMessage following BridgeInitiated event. tx_hash = %s", initiatedBridge.Event.TransactionHash.String())
} else if sentMessage.Event.TransactionHash != initiatedBridge.Event.TransactionHash {
log.Error("correlated events tx hash mismatch", "message_tx_hash", sentMessage.Event.TransactionHash.String(), "bridge_tx_hash", initiatedBridge.Event.TransactionHash.String())
return fmt.Errorf("correlated events tx hash mismatch")
}
initiatedBridge.BridgeTransfer.CrossDomainMessageHash = &sentMessage.BridgeMessage.MessageHash
......@@ -180,6 +197,9 @@ func L2ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, metrics L2M
if !ok {
log.Error("expected RelayedMessage following BridgeFinalized event", "tx_hash", finalizedBridge.Event.TransactionHash.String())
return fmt.Errorf("expected RelayedMessage following BridgeFinalized event. tx_hash = %s", finalizedBridge.Event.TransactionHash.String())
} else if relayedMessage.Event.TransactionHash != finalizedBridge.Event.TransactionHash {
log.Error("correlated events tx hash mismatch", "message_tx_hash", relayedMessage.Event.TransactionHash.String(), "bridge_tx_hash", finalizedBridge.Event.TransactionHash.String())
return fmt.Errorf("correlated events tx hash mismatch")
}
// Since the message hash is computed from the relayed message, this ensures the withdrawal fields must match
......
......@@ -8,6 +8,7 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/indexer/bigint"
"github.com/ethereum-optimism/optimism/indexer/config"
"github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum-optimism/optimism/indexer/node"
......@@ -31,17 +32,18 @@ func LegacyL1ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, metri
log.Info("detected legacy transaction deposits", "size", len(ctcTxDepositEvents))
}
mintedWEI := bigint.Zero
ctcTxDeposits := make(map[logKey]*contracts.LegacyCTCDepositEvent, len(ctcTxDepositEvents))
transactionDeposits := make([]database.L1TransactionDeposit, len(ctcTxDepositEvents))
for i := range ctcTxDepositEvents {
deposit := ctcTxDepositEvents[i]
ctcTxDeposits[logKey{deposit.Event.BlockHash, deposit.Event.LogIndex}] = &deposit
transactionDeposits[i] = database.L1TransactionDeposit{
// We re-use the L2 Transaction hash as the source hash
// to remain consistent in the schema.
SourceHash: deposit.TxHash,
L2TransactionHash: deposit.TxHash,
mintedWEI = new(big.Int).Add(mintedWEI, deposit.Tx.Amount)
transactionDeposits[i] = database.L1TransactionDeposit{
// We re-use the L2 Transaction hash as the source hash to remain consistent in the schema.
SourceHash: deposit.TxHash,
L2TransactionHash: deposit.TxHash,
InitiatedL1EventGUID: deposit.Event.GUID,
GasLimit: deposit.GasLimit,
Tx: deposit.Tx,
......@@ -51,7 +53,9 @@ func LegacyL1ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, metri
if err := db.BridgeTransactions.StoreL1TransactionDeposits(transactionDeposits); err != nil {
return err
}
metrics.RecordL1TransactionDeposits(len(transactionDeposits))
mintedETH, _ := bigint.WeiToETH(mintedWEI).Float64()
metrics.RecordL1TransactionDeposits(len(transactionDeposits), mintedETH)
}
// (2) L1CrossDomainMessenger
......@@ -72,8 +76,11 @@ func LegacyL1ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, metri
// extract the deposit hash from the previous TransactionDepositedEvent
ctcTxDeposit, ok := ctcTxDeposits[logKey{sentMessage.Event.BlockHash, sentMessage.Event.LogIndex - 1}]
if !ok {
log.Error("missing transaction deposit for cross domain message", "tx_hash", sentMessage.Event.TransactionHash.String())
return fmt.Errorf("missing preceding TransactionEnqueued for SentMessage event. tx_hash = %s", sentMessage.Event.TransactionHash.String())
log.Error("expected TransactionEnqueued preceding SentMessage event", "tx_hash", sentMessage.Event.TransactionHash.String())
return fmt.Errorf("expected TransactionEnqueued preceding SentMessage event. tx_hash = %s", sentMessage.Event.TransactionHash.String())
} else if ctcTxDeposit.Event.TransactionHash != sentMessage.Event.TransactionHash {
log.Error("correlated events tx hash mismatch", "deposit_tx_hash", ctcTxDeposit.Event.TransactionHash.String(), "message_tx_hash", sentMessage.Event.TransactionHash.String())
return fmt.Errorf("correlated events tx hash mismatch")
}
bridgeMessages[i] = database.L1BridgeMessage{TransactionSourceHash: ctcTxDeposit.TxHash, BridgeMessage: sentMessage.BridgeMessage}
......@@ -104,13 +111,20 @@ func LegacyL1ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, metri
// - Event Flow: TransactionEnqueued -> SentMessage -> DepositInitiated
sentMessage, ok := sentMessages[logKey{initiatedBridge.Event.BlockHash, initiatedBridge.Event.LogIndex - 1}]
if !ok {
log.Error("missing cross domain message for bridge transfer", "tx_hash", initiatedBridge.Event.TransactionHash.String())
log.Error("expected SentMessage preceding BridgeInitiated event", "tx_hash", sentMessage.Event.TransactionHash.String())
return fmt.Errorf("expected SentMessage preceding DepositInitiated event. tx_hash = %s", initiatedBridge.Event.TransactionHash.String())
} else if sentMessage.Event.TransactionHash != initiatedBridge.Event.TransactionHash {
log.Error("correlated events tx hash mismatch", "message_tx_hash", sentMessage.Event.TransactionHash.String(), "bridge_tx_hash", initiatedBridge.Event.TransactionHash.String())
return fmt.Errorf("correlated events tx hash mismatch")
}
ctcTxDeposit, ok := ctcTxDeposits[logKey{initiatedBridge.Event.BlockHash, initiatedBridge.Event.LogIndex - 2}]
if !ok {
log.Error("missing transaction deposit for bridge transfer", "tx_hash", initiatedBridge.Event.TransactionHash.String())
return fmt.Errorf("expected TransactionEnqueued preceding DepostInitiated event. tx_hash = %s", initiatedBridge.Event.TransactionHash.String())
log.Error("expected TransactionEnqueued preceding BridgeInitiated event", "tx_hash", initiatedBridge.Event.TransactionHash.String())
return fmt.Errorf("expected TransactionEnqueued preceding BridgeInitiated event. tx_hash = %s", initiatedBridge.Event.TransactionHash.String())
} else if ctcTxDeposit.Event.TransactionHash != initiatedBridge.Event.TransactionHash {
log.Error("correlated events tx hash mismatch", "deposit_tx_hash", ctcTxDeposit.Event.TransactionHash.String(), "bridge_tx_hash", initiatedBridge.Event.TransactionHash.String())
return fmt.Errorf("correlated events tx hash mismatch")
}
initiatedBridge.BridgeTransfer.CrossDomainMessageHash = &sentMessage.BridgeMessage.MessageHash
......@@ -148,12 +162,14 @@ func LegacyL2ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, metri
log.Info("detected legacy transaction withdrawals (via L2CrossDomainMessenger)", "size", len(crossDomainSentMessages))
}
withdrawnWEI := bigint.Zero
sentMessages := make(map[logKey]*contracts.CrossDomainMessengerSentMessageEvent, len(crossDomainSentMessages))
bridgeMessages := make([]database.L2BridgeMessage, len(crossDomainSentMessages))
transactionWithdrawals := make([]database.L2TransactionWithdrawal, len(crossDomainSentMessages))
for i := range crossDomainSentMessages {
sentMessage := crossDomainSentMessages[i]
sentMessages[logKey{sentMessage.Event.BlockHash, sentMessage.Event.LogIndex}] = &sentMessage
withdrawnWEI = new(big.Int).Add(withdrawnWEI, sentMessage.BridgeMessage.Tx.Amount)
// To ensure consistency in the schema, we duplicate this as the "root" transaction withdrawal. The storage key in the message
// passer contract is sha3(calldata + sender). The sender always being the L2CrossDomainMessenger pre-bedrock.
......@@ -184,7 +200,9 @@ func LegacyL2ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, metri
if err := db.BridgeMessages.StoreL2BridgeMessages(bridgeMessages); err != nil {
return err
}
metrics.RecordL2TransactionWithdrawals(len(transactionWithdrawals))
withdrawnETH, _ := bigint.WeiToETH(withdrawnWEI).Float64()
metrics.RecordL2TransactionWithdrawals(len(transactionWithdrawals), withdrawnETH)
metrics.RecordL2CrossDomainSentMessages(len(bridgeMessages))
}
......@@ -207,12 +225,15 @@ func LegacyL2ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, metri
// - Event Flow: TransactionEnqueued -> SentMessage -> DepositInitiated
sentMessage, ok := sentMessages[logKey{initiatedBridge.Event.BlockHash, initiatedBridge.Event.LogIndex - 1}]
if !ok {
log.Error("expected SentMessage preceding DepositInitiated event", "tx_hash", initiatedBridge.Event.TransactionHash.String())
return fmt.Errorf("expected SentMessage preceding DepositInitiated event. tx_hash = %s", initiatedBridge.Event.TransactionHash)
log.Error("expected SentMessage preceding BridgeInitiated event", "tx_hash", initiatedBridge.Event.TransactionHash.String())
return fmt.Errorf("expected SentMessage preceding BridgeInitiated event. tx_hash = %s", initiatedBridge.Event.TransactionHash)
} else if sentMessage.Event.TransactionHash != initiatedBridge.Event.TransactionHash {
log.Error("correlated events tx hash mismatch", "message_tx_hash", sentMessage.Event.TransactionHash.String(), "bridge_tx_hash", initiatedBridge.Event.TransactionHash.String())
return fmt.Errorf("correlated events tx hash mismatch")
}
initiatedBridge.BridgeTransfer.CrossDomainMessageHash = &sentMessage.BridgeMessage.MessageHash
bridgedTokens[initiatedBridge.BridgeTransfer.TokenPair.LocalTokenAddress]++
initiatedBridge.BridgeTransfer.CrossDomainMessageHash = &sentMessage.BridgeMessage.MessageHash
l2BridgeWithdrawals[i] = database.L2BridgeWithdrawal{
TransactionWithdrawalHash: sentMessage.BridgeMessage.MessageHash,
BridgeTransfer: initiatedBridge.BridgeTransfer,
......
......@@ -16,7 +16,7 @@ var (
type L1Metricer interface {
RecordLatestIndexedL1Height(height *big.Int)
RecordL1TransactionDeposits(size int)
RecordL1TransactionDeposits(size int, mintedETH float64)
RecordL1ProvenWithdrawals(size int)
RecordL1FinalizedWithdrawals(size int)
......@@ -30,7 +30,7 @@ type L1Metricer interface {
type L2Metricer interface {
RecordLatestIndexedL2Height(height *big.Int)
RecordL2TransactionWithdrawals(size int)
RecordL2TransactionWithdrawals(size int, withdrawnETH float64)
RecordL2CrossDomainSentMessages(size int)
RecordL2CrossDomainRelayedMessages(size int)
......@@ -55,7 +55,9 @@ type bridgeMetrics struct {
latestL2Height prometheus.Gauge
txDeposits prometheus.Counter
txMintedETH prometheus.Counter
txWithdrawals prometheus.Counter
txWithdrawnETH prometheus.Counter
provenWithdrawals prometheus.Counter
finalizedWithdrawals prometheus.Counter
......@@ -101,11 +103,21 @@ func NewMetrics(registry *prometheus.Registry) Metricer {
Name: "tx_deposits",
Help: "number of processed transactions deposited from l1",
}),
txMintedETH: factory.NewCounter(prometheus.CounterOpts{
Namespace: MetricsNamespace,
Name: "tx_minted_eth",
Help: "amount of eth bridged from l1",
}),
txWithdrawals: factory.NewCounter(prometheus.CounterOpts{
Namespace: MetricsNamespace,
Name: "tx_withdrawals",
Help: "number of processed transactions withdrawn from l2",
}),
txWithdrawnETH: factory.NewCounter(prometheus.CounterOpts{
Namespace: MetricsNamespace,
Name: "tx_withdrawn_eth",
Help: "amount of eth withdrawn from l2",
}),
provenWithdrawals: factory.NewCounter(prometheus.CounterOpts{
Namespace: MetricsNamespace,
Name: "proven_withdrawals",
......@@ -166,8 +178,9 @@ func (m *bridgeMetrics) RecordLatestIndexedL1Height(height *big.Int) {
m.latestL1Height.Set(float64(height.Uint64()))
}
func (m *bridgeMetrics) RecordL1TransactionDeposits(size int) {
func (m *bridgeMetrics) RecordL1TransactionDeposits(size int, mintedETH float64) {
m.txDeposits.Add(float64(size))
m.txMintedETH.Add(mintedETH)
}
func (m *bridgeMetrics) RecordL1ProvenWithdrawals(size int) {
......@@ -200,8 +213,9 @@ func (m *bridgeMetrics) RecordLatestIndexedL2Height(height *big.Int) {
m.latestL2Height.Set(float64(height.Uint64()))
}
func (m *bridgeMetrics) RecordL2TransactionWithdrawals(size int) {
func (m *bridgeMetrics) RecordL2TransactionWithdrawals(size int, withdrawnETH float64) {
m.txWithdrawals.Add(float64(size))
m.txWithdrawnETH.Add(withdrawnETH)
}
func (m *bridgeMetrics) RecordL2CrossDomainSentMessages(size int) {
......
FROM --platform=$BUILDPLATFORM golang:1.21.1-alpine3.18 as builder
ARG VERSION=v0.0.0
RUN apk add --no-cache make gcc musl-dev linux-headers git jq bash
COPY ./go.mod /app/go.mod
COPY ./go.sum /app/go.sum
WORKDIR /app
RUN go mod download
# build op-batcher with the shared go.mod & go.sum files
COPY ./op-batcher /app/op-batcher
COPY ./op-bindings /app/op-bindings
COPY ./op-node /app/op-node
COPY ./op-service /app/op-service
COPY ./.git /app/.git
WORKDIR /app/op-batcher
ARG TARGETOS TARGETARCH
RUN make op-batcher VERSION="$VERSION" GOOS=$TARGETOS GOARCH=$TARGETARCH
ARG OP_STACK_GO_BUILDER=us-docker.pkg.dev/oplabs-tools-artifacts/images/op_stack_go:latest
FROM $OP_STACK_GO_BUILDER as builder
# See "make golang-docker" and /ops/docker/op-stack-go
FROM alpine:3.18
COPY --from=builder /app/op-batcher/bin/op-batcher /usr/local/bin
COPY --from=builder /usr/local/bin/op-batcher /usr/local/bin/op-batcher
ENTRYPOINT ["op-batcher"]
CMD ["op-batcher"]
# ignore everything but the dockerfile, the op-stack-go base image performs the build
*
GITCOMMIT := $(shell git rev-parse HEAD)
GITDATE := $(shell git show -s --format='%ct')
GITCOMMIT ?= $(shell git rev-parse HEAD)
GITDATE ?= $(shell git show -s --format='%ct')
VERSION := v0.0.0
LDFLAGSSTRING +=-X main.GitCommit=$(GITCOMMIT)
......@@ -16,9 +16,6 @@ clean:
test:
go test -v ./...
lint:
golangci-lint run -E goimports,sqlclosecheck,bodyclose,asciicheck,misspell,errorlint --timeout 5m -e "errors.As" -e "errors.Is" ./...
fuzz:
go test -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzChannelConfig_CheckTimeout ./batcher
go test -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzDurationZero ./batcher
......@@ -33,5 +30,4 @@ fuzz:
op-batcher \
clean \
test \
lint \
fuzz
......@@ -26,9 +26,6 @@ bindings-build:
-package $(pkg) \
-monorepo-base $(monorepo-base)
lint:
golangci-lint run -E goimports,sqlclosecheck,bodyclose,asciicheck,misspell,errorlint --timeout 5m -e "errors.As" -e "errors.Is" ./...
mkdir:
mkdir -p $(pkg)
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
GITCOMMIT := $(shell git rev-parse HEAD)
GITDATE := $(shell git show -s --format='%ct')
GITCOMMIT ?= $(shell git rev-parse HEAD)
GITDATE ?= $(shell git show -s --format='%ct')
VERSION := v0.0.0
LDFLAGSSTRING +=-X main.GitCommit=$(GITCOMMIT)
......@@ -16,11 +16,7 @@ clean:
test:
go test -v ./...
lint:
golangci-lint run -E goimports,sqlclosecheck,bodyclose,asciicheck,misspell,errorlint --timeout 5m -e "errors.As" -e "errors.Is" ./...
.PHONY: \
op-bootnode \
clean \
test \
lint
test
......@@ -6,9 +6,6 @@ check-l2:
test:
go test ./...
lint:
golangci-lint run -E goimports,sqlclosecheck,bodyclose,asciicheck,misspell,errorlint --timeout 5m -e "errors.As" -e "errors.Is" ./...
fuzz:
go test -run NOTAREALTEST -v -fuzztime 10s -fuzz=FuzzEncodeDecodeWithdrawal ./crossdomain
go test -run NOTAREALTEST -v -fuzztime 10s -fuzz=FuzzEncodeDecodeLegacyWithdrawal ./crossdomain
......
......@@ -26,6 +26,11 @@ import (
"github.com/ethereum-optimism/optimism/op-service/eth"
)
// initialzedValue represents the `Initializable` contract value. It should be kept in
// sync with the constant in `Constants.sol`.
// https://github.com/ethereum-optimism/optimism/blob/develop/packages/contracts-bedrock/src/libraries/Constants.sol
const initializedValue = 3
var (
ErrInvalidDeployConfig = errors.New("invalid deploy config")
ErrInvalidImmutablesConfig = errors.New("invalid immutables config")
......@@ -107,8 +112,11 @@ type DeployConfig struct {
L2GenesisBlockBaseFeePerGas *hexutil.Big `json:"l2GenesisBlockBaseFeePerGas"`
// L2GenesisRegolithTimeOffset is the number of seconds after genesis block that Regolith hard fork activates.
// Set it to 0 to activate at genesis. Nil to disable regolith.
// Set it to 0 to activate at genesis. Nil to disable Regolith.
L2GenesisRegolithTimeOffset *hexutil.Uint64 `json:"l2GenesisRegolithTimeOffset,omitempty"`
// L2GenesisCanyonTimeOffset is the number of seconds after genesis block that Canyon hard fork activates.
// Set it to 0 to activate at genesis. Nil to disable Canyon.
L2GenesisCanyonTimeOffset *hexutil.Uint64 `json:"L2GenesisCanyonTimeOffset,omitempty"`
// L2GenesisSpanBatchTimeOffset is the number of seconds after genesis block that Span Batch hard fork activates.
// Set it to 0 to activate at genesis. Nil to disable SpanBatch.
L2GenesisSpanBatchTimeOffset *hexutil.Uint64 `json:"l2GenesisSpanBatchTimeOffset,omitempty"`
......@@ -444,6 +452,17 @@ func (d *DeployConfig) RegolithTime(genesisTime uint64) *uint64 {
return &v
}
func (d *DeployConfig) CanyonTime(genesisTime uint64) *uint64 {
if d.L2GenesisCanyonTimeOffset == nil {
return nil
}
v := uint64(0)
if offset := *d.L2GenesisCanyonTimeOffset; offset > 0 {
v = genesisTime + uint64(offset)
}
return &v
}
func (d *DeployConfig) SpanBatchTime(genesisTime uint64) *uint64 {
if d.L2GenesisSpanBatchTimeOffset == nil {
return nil
......@@ -492,6 +511,7 @@ func (d *DeployConfig) RollupConfig(l1StartBlock *types.Block, l2GenesisBlockHas
DepositContractAddress: d.OptimismPortalProxy,
L1SystemConfigAddress: d.SystemConfigProxy,
RegolithTime: d.RegolithTime(l1StartBlock.Time()),
CanyonTime: d.CanyonTime(l1StartBlock.Time()),
SpanBatchTime: d.SpanBatchTime(l1StartBlock.Time()),
}, nil
}
......@@ -708,13 +728,13 @@ func NewL2StorageConfig(config *DeployConfig, block *types.Block) (state.Storage
"msgNonce": 0,
}
storage["L2CrossDomainMessenger"] = state.StorageValues{
"_initialized": 1,
"_initialized": initializedValue,
"_initializing": false,
"xDomainMsgSender": "0x000000000000000000000000000000000000dEaD",
"msgNonce": 0,
}
storage["L2StandardBridge"] = state.StorageValues{
"_initialized": 2,
"_initialized": initializedValue,
"_initializing": false,
"messenger": predeploys.L2CrossDomainMessengerAddr,
}
......@@ -749,12 +769,12 @@ func NewL2StorageConfig(config *DeployConfig, block *types.Block) (state.Storage
}
storage["L2ERC721Bridge"] = state.StorageValues{
"messenger": predeploys.L2CrossDomainMessengerAddr,
"_initialized": 2,
"_initialized": initializedValue,
"_initializing": false,
}
storage["OptimismMintableERC20Factory"] = state.StorageValues{
"bridge": predeploys.L2StandardBridgeAddr,
"_initialized": 2,
"_initialized": initializedValue,
"_initializing": false,
}
return storage, nil
......
......@@ -49,6 +49,18 @@ func TestRegolithTimeAsOffset(t *testing.T) {
require.Equal(t, uint64(1500+5000), *config.RegolithTime(5000))
}
func TestCanyonTimeZero(t *testing.T) {
canyonOffset := hexutil.Uint64(0)
config := &DeployConfig{L2GenesisCanyonTimeOffset: &canyonOffset}
require.Equal(t, uint64(0), *config.CanyonTime(1234))
}
func TestCanyonTimeOffset(t *testing.T) {
canyonOffset := hexutil.Uint64(1500)
config := &DeployConfig{L2GenesisCanyonTimeOffset: &canyonOffset}
require.Equal(t, uint64(1234+1500), *config.CanyonTime(1234))
}
// TestCopy will copy a DeployConfig and ensure that the copy is equal to the original.
func TestCopy(t *testing.T) {
b, err := os.ReadFile("testdata/test-deploy-config-full.json")
......
......@@ -58,6 +58,8 @@ func NewL2Genesis(config *DeployConfig, block *types.Block) (*core.Genesis, erro
TerminalTotalDifficultyPassed: true,
BedrockBlock: new(big.Int).SetUint64(uint64(config.L2GenesisBlockNumber)),
RegolithTime: config.RegolithTime(block.Time()),
CanyonTime: config.CanyonTime(block.Time()),
ShanghaiTime: config.CanyonTime(block.Time()),
Optimism: &params.OptimismConfig{
EIP1559Denominator: eip1559Denom,
EIP1559Elasticity: eip1559Elasticity,
......
FROM --platform=$BUILDPLATFORM golang:1.21.1-alpine3.18 as builder
ARG VERSION=v0.0.0
RUN apk add --no-cache make gcc musl-dev linux-headers git jq bash
COPY ./go.mod /app/go.mod
COPY ./go.sum /app/go.sum
WORKDIR /app
RUN go mod download
# build op-challenger with the shared go.mod & go.sum files
COPY ./op-challenger /app/op-challenger
COPY ./op-program /app/op-program
COPY ./op-preimage /app/op-preimage
COPY ./op-bindings /app/op-bindings
COPY ./op-node /app/op-node
COPY ./op-service /app/op-service
COPY ./.git /app/.git
# Copy cannon and its dependencies
COPY ./cannon /app/cannon
COPY ./op-preimage /app/op-preimage
COPY ./op-chain-ops /app/op-chain-ops
WORKDIR /app/op-program
ARG TARGETOS TARGETARCH
RUN make op-program-host VERSION="$VERSION" GOOS=$TARGETOS GOARCH=$TARGETARCH
WORKDIR /app/cannon
ARG TARGETOS TARGETARCH
RUN make cannon VERSION="$VERSION" GOOS=$TARGETOS GOARCH=$TARGETARCH
WORKDIR /app/op-challenger
RUN make op-challenger VERSION="$VERSION" GOOS=$TARGETOS GOARCH=$TARGETARCH
ARG OP_STACK_GO_BUILDER=us-docker.pkg.dev/oplabs-tools-artifacts/images/op_stack_go:latest
FROM $OP_STACK_GO_BUILDER as builder
# See "make golang-docker" and /ops/docker/op-stack-go
FROM alpine:3.18
# Make the bundled op-program the default cannon server
COPY --from=builder /app/op-program/bin/op-program /usr/local/bin
COPY --from=builder /usr/local/bin/op-program /usr/local/bin/op-program
ENV OP_CHALLENGER_CANNON_SERVER /usr/local/bin/op-program
# Make the bundled cannon the default cannon executable
COPY --from=builder /app/cannon/bin/cannon /usr/local/bin
COPY --from=builder /usr/local/bin/cannon /usr/local/bin/cannon
ENV OP_CHALLENGER_CANNON_BIN /usr/local/bin/cannon
COPY --from=builder /app/op-challenger/bin/op-challenger /usr/local/bin
COPY --from=builder /usr/local/bin/op-challenger /usr/local/bin/op-challenger
CMD ["op-challenger"]
# ignore everything but the dockerfile, the op-stack-go base image performs the build
*
GITCOMMIT := $(shell git rev-parse HEAD)
GITDATE := $(shell git show -s --format='%ct')
GITCOMMIT ?= $(shell git rev-parse HEAD)
GITDATE ?= $(shell git show -s --format='%ct')
VERSION := v0.0.0
LDFLAGSSTRING +=-X main.GitCommit=$(GITCOMMIT)
......@@ -16,9 +16,6 @@ clean:
test:
go test -v ./...
lint:
golangci-lint run -E goimports,sqlclosecheck,bodyclose,asciicheck,misspell,errorlint --timeout 5m -e "errors.As" -e "errors.Is" ./...
visualize:
./scripts/visualize.sh
......
......@@ -51,7 +51,3 @@ clean:
rm -r ../.devnet
rm -r ../op-program/bin
.PHONY: clean
lint:
golangci-lint run -E goimports,sqlclosecheck,bodyclose,asciicheck,misspell,errorlint --timeout 5m -e "errors.As" -e "errors.Is" ./...
.PHONY: lint
......@@ -180,7 +180,7 @@ func (s *L1Replica) RPCClient() client.RPC {
}
func (s *L1Replica) L1Client(t Testing, cfg *rollup.Config) *sources.L1Client {
l1F, err := sources.NewL1Client(s.RPCClient(), s.log, nil, sources.L1ClientDefaultConfig(cfg, false, sources.RPCKindBasic))
l1F, err := sources.NewL1Client(s.RPCClient(), s.log, nil, sources.L1ClientDefaultConfig(cfg, false, sources.RPCKindStandard))
require.NoError(t, err)
return l1F
}
......
......@@ -42,7 +42,7 @@ func TestL1Replica_ActL1RPCFail(gt *testing.T) {
// mock an RPC failure
replica.ActL1RPCFail(t)
// check RPC failure
l1Cl, err := sources.NewL1Client(replica.RPCClient(), log, nil, sources.L1ClientDefaultConfig(sd.RollupCfg, false, sources.RPCKindBasic))
l1Cl, err := sources.NewL1Client(replica.RPCClient(), log, nil, sources.L1ClientDefaultConfig(sd.RollupCfg, false, sources.RPCKindStandard))
require.NoError(t, err)
_, err = l1Cl.InfoByLabel(t.Ctx(), eth.Unsafe)
require.ErrorContains(t, err, "mock")
......
......@@ -46,7 +46,7 @@ func TestL2EngineAPI(gt *testing.T) {
chainA, _ := core.GenerateChain(sd.L2Cfg.Config, genesisBlock, consensus, db, 1, func(i int, gen *core.BlockGen) {
gen.SetCoinbase(common.Address{'A'})
})
payloadA, err := eth.BlockAsPayload(chainA[0])
payloadA, err := eth.BlockAsPayload(chainA[0], sd.RollupCfg.CanyonTime)
require.NoError(t, err)
// apply the payload
......@@ -69,7 +69,7 @@ func TestL2EngineAPI(gt *testing.T) {
chainB, _ := core.GenerateChain(sd.L2Cfg.Config, genesisBlock, consensus, db, 1, func(i int, gen *core.BlockGen) {
gen.SetCoinbase(common.Address{'B'})
})
payloadB, err := eth.BlockAsPayload(chainB[0])
payloadB, err := eth.BlockAsPayload(chainB[0], sd.RollupCfg.CanyonTime)
require.NoError(t, err)
// apply the payload
......@@ -125,18 +125,26 @@ func TestL2EngineAPIBlockBuilding(gt *testing.T) {
l2Cl, err := sources.NewEngineClient(engine.RPCClient(), log, nil, sources.EngineClientDefaultConfig(sd.RollupCfg))
require.NoError(t, err)
nextBlockTime := eth.Uint64Quantity(parent.Time) + 2
var w *eth.Withdrawals
if sd.RollupCfg.IsCanyon(uint64(nextBlockTime)) {
w = &eth.Withdrawals{}
}
// Now let's ask the engine to build a block
fcRes, err := l2Cl.ForkchoiceUpdate(t.Ctx(), &eth.ForkchoiceState{
HeadBlockHash: parent.Hash(),
SafeBlockHash: genesisBlock.Hash(),
FinalizedBlockHash: genesisBlock.Hash(),
}, &eth.PayloadAttributes{
Timestamp: eth.Uint64Quantity(parent.Time) + 2,
Timestamp: nextBlockTime,
PrevRandao: eth.Bytes32{},
SuggestedFeeRecipient: common.Address{'C'},
Transactions: nil,
NoTxPool: false,
GasLimit: (*eth.Uint64Quantity)(&sd.RollupCfg.Genesis.SystemConfig.GasLimit),
Withdrawals: w,
})
require.NoError(t, err)
require.Equal(t, fcRes.PayloadStatus.Status, eth.ExecutionValid)
......
......@@ -21,7 +21,7 @@ func setupSequencerTest(t Testing, sd *e2eutils.SetupData, log log.Logger) (*L1M
miner := NewL1Miner(t, log, sd.L1Cfg)
l1F, err := sources.NewL1Client(miner.RPCClient(), log, nil, sources.L1ClientDefaultConfig(sd.RollupCfg, false, sources.RPCKindBasic))
l1F, err := sources.NewL1Client(miner.RPCClient(), log, nil, sources.L1ClientDefaultConfig(sd.RollupCfg, false, sources.RPCKindStandard))
require.NoError(t, err)
engine := NewL2Engine(t, log, sd.L2Cfg, sd.RollupCfg.Genesis.L1, jwtPath)
l2Cl, err := sources.NewEngineClient(engine.RPCClient(), log, nil, sources.EngineClientDefaultConfig(sd.RollupCfg))
......
......@@ -572,7 +572,7 @@ func TestRestartOpGeth(gt *testing.T) {
jwtPath := e2eutils.WriteDefaultJWT(t)
// L1
miner := NewL1Miner(t, log, sd.L1Cfg)
l1F, err := sources.NewL1Client(miner.RPCClient(), log, nil, sources.L1ClientDefaultConfig(sd.RollupCfg, false, sources.RPCKindBasic))
l1F, err := sources.NewL1Client(miner.RPCClient(), log, nil, sources.L1ClientDefaultConfig(sd.RollupCfg, false, sources.RPCKindStandard))
require.NoError(t, err)
// Sequencer
seqEng := NewL2Engine(t, log, sd.L2Cfg, sd.RollupCfg.Genesis.L1, jwtPath, dbOption)
......@@ -667,7 +667,7 @@ func TestConflictingL2Blocks(gt *testing.T) {
altSeqEng := NewL2Engine(t, log, sd.L2Cfg, sd.RollupCfg.Genesis.L1, jwtPath)
altSeqEngCl, err := sources.NewEngineClient(altSeqEng.RPCClient(), log, nil, sources.EngineClientDefaultConfig(sd.RollupCfg))
require.NoError(t, err)
l1F, err := sources.NewL1Client(miner.RPCClient(), log, nil, sources.L1ClientDefaultConfig(sd.RollupCfg, false, sources.RPCKindBasic))
l1F, err := sources.NewL1Client(miner.RPCClient(), log, nil, sources.L1ClientDefaultConfig(sd.RollupCfg, false, sources.RPCKindStandard))
require.NoError(t, err)
altSequencer := NewL2Sequencer(t, log, l1F, altSeqEngCl, sd.RollupCfg, 0)
altBatcher := NewL2Batcher(log, sd.RollupCfg, &BatcherCfg{
......
......@@ -58,6 +58,7 @@ func MakeDeployParams(t require.TestingT, tp *TestParams) *DeployParams {
deployConfig.ChannelTimeout = tp.ChannelTimeout
deployConfig.L1BlockTime = tp.L1BlockTime
deployConfig.L2GenesisRegolithTimeOffset = nil
deployConfig.L2GenesisCanyonTimeOffset = CanyonTimeOffset()
deployConfig.L2GenesisSpanBatchTimeOffset = SpanBatchTimeOffset()
require.NoError(t, deployConfig.Check())
......@@ -157,6 +158,7 @@ func Setup(t require.TestingT, deployParams *DeployParams, alloc *AllocParams) *
DepositContractAddress: deployConf.OptimismPortalProxy,
L1SystemConfigAddress: deployConf.SystemConfigProxy,
RegolithTime: deployConf.RegolithTime(uint64(deployConf.L1GenesisBlockTimestamp)),
CanyonTime: deployConf.CanyonTime(uint64(deployConf.L1GenesisBlockTimestamp)),
SpanBatchTime: deployConf.SpanBatchTime(uint64(deployConf.L1GenesisBlockTimestamp)),
}
......@@ -191,3 +193,11 @@ func SpanBatchTimeOffset() *hexutil.Uint64 {
}
return nil
}
func CanyonTimeOffset() *hexutil.Uint64 {
if os.Getenv("OP_E2E_USE_CANYON") == "true" {
offset := hexutil.Uint64(0)
return &offset
}
return nil
}
......@@ -105,7 +105,7 @@ func NewOpGeth(t *testing.T, ctx context.Context, cfg *SystemConfig) (*OpGeth, e
l2Client, err := ethclient.Dial(node.HTTPEndpoint())
require.Nil(t, err)
genesisPayload, err := eth.BlockAsPayload(l2GenesisBlock)
genesisPayload, err := eth.BlockAsPayload(l2GenesisBlock, cfg.DeployConfig.CanyonTime(l2GenesisBlock.Time()))
require.Nil(t, err)
return &OpGeth{
......@@ -209,11 +209,18 @@ func (d *OpGeth) CreatePayloadAttributes(txs ...*types.Transaction) (*eth.Payloa
}
txBytes = append(txBytes, bin)
}
var withdrawals *eth.Withdrawals
if d.L2ChainConfig.IsCanyon(uint64(timestamp)) {
withdrawals = &eth.Withdrawals{}
}
attrs := eth.PayloadAttributes{
Timestamp: timestamp,
Transactions: txBytes,
NoTxPool: true,
GasLimit: (*eth.Uint64Quantity)(&d.SystemConfig.GasLimit),
Withdrawals: withdrawals,
}
return &attrs, nil
}
......@@ -2,12 +2,13 @@ package op_e2e
import (
"context"
"fmt"
"math/big"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
......@@ -16,9 +17,8 @@ import (
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TestMissingGasLimit tests that op-geth cannot build a block without gas limit while optimism is active in the chain config.
......@@ -719,3 +719,145 @@ func TestRegolith(t *testing.T) {
})
}
}
func TestPreCanyon(t *testing.T) {
InitParallel(t)
futureTimestamp := hexutil.Uint64(4)
tests := []struct {
name string
canyonTime *hexutil.Uint64
}{
{name: "CanyonNotScheduled"},
{name: "CanyonNotYetActive", canyonTime: &futureTimestamp},
}
for _, test := range tests {
test := test
t.Run(fmt.Sprintf("ReturnsNilWithdrawals_%s", test.name), func(t *testing.T) {
InitParallel(t)
cfg := DefaultSystemConfig(t)
cfg.DeployConfig.L2GenesisCanyonTimeOffset = test.canyonTime
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
opGeth, err := NewOpGeth(t, ctx, &cfg)
require.NoError(t, err)
defer opGeth.Close()
b, err := opGeth.AddL2Block(ctx)
require.NoError(t, err)
assert.Nil(t, b.Withdrawals, "should not have withdrawals")
l1Block, err := opGeth.L2Client.BlockByNumber(ctx, nil)
require.Nil(t, err)
assert.Equal(t, types.Withdrawals(nil), l1Block.Withdrawals())
})
t.Run(fmt.Sprintf("RejectPushZeroTx_%s", test.name), func(t *testing.T) {
InitParallel(t)
cfg := DefaultSystemConfig(t)
cfg.DeployConfig.L2GenesisCanyonTimeOffset = test.canyonTime
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
opGeth, err := NewOpGeth(t, ctx, &cfg)
require.NoError(t, err)
defer opGeth.Close()
pushZeroContractCreateTxn := types.NewTx(&types.DepositTx{
From: cfg.Secrets.Addresses().Alice,
Value: big.NewInt(params.Ether),
Gas: 1000001,
Data: []byte{
byte(vm.PUSH0),
},
IsSystemTransaction: false,
})
_, err = opGeth.AddL2Block(ctx, pushZeroContractCreateTxn)
require.NoError(t, err)
receipt, err := opGeth.L2Client.TransactionReceipt(ctx, pushZeroContractCreateTxn.Hash())
require.NoError(t, err)
assert.Equal(t, types.ReceiptStatusFailed, receipt.Status)
})
}
}
func TestCanyon(t *testing.T) {
InitParallel(t)
tests := []struct {
name string
canyonTime hexutil.Uint64
activeCanyon func(ctx context.Context, opGeth *OpGeth)
}{
{name: "ActivateAtGenesis", canyonTime: 0, activeCanyon: func(ctx context.Context, opGeth *OpGeth) {}},
{name: "ActivateAfterGenesis", canyonTime: 2, activeCanyon: func(ctx context.Context, opGeth *OpGeth) {
// Adding this block advances us to the fork time.
_, err := opGeth.AddL2Block(ctx)
require.NoError(t, err)
}},
}
for _, test := range tests {
test := test
t.Run(fmt.Sprintf("ReturnsEmptyWithdrawals_%s", test.name), func(t *testing.T) {
InitParallel(t)
cfg := DefaultSystemConfig(t)
s := hexutil.Uint64(0)
cfg.DeployConfig.L2GenesisRegolithTimeOffset = &s
cfg.DeployConfig.L2GenesisCanyonTimeOffset = &test.canyonTime
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
opGeth, err := NewOpGeth(t, ctx, &cfg)
require.NoError(t, err)
defer opGeth.Close()
test.activeCanyon(ctx, opGeth)
b, err := opGeth.AddL2Block(ctx)
require.NoError(t, err)
assert.Equal(t, *b.Withdrawals, eth.Withdrawals{})
l1Block, err := opGeth.L2Client.BlockByNumber(ctx, nil)
require.Nil(t, err)
assert.Equal(t, l1Block.Withdrawals(), types.Withdrawals{})
})
t.Run(fmt.Sprintf("AcceptsPushZeroTxn_%s", test.name), func(t *testing.T) {
InitParallel(t)
cfg := DefaultSystemConfig(t)
cfg.DeployConfig.L2GenesisCanyonTimeOffset = &test.canyonTime
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
opGeth, err := NewOpGeth(t, ctx, &cfg)
require.NoError(t, err)
defer opGeth.Close()
pushZeroContractCreateTxn := types.NewTx(&types.DepositTx{
From: cfg.Secrets.Addresses().Alice,
Value: big.NewInt(params.Ether),
Gas: 1000001,
Data: []byte{
byte(vm.PUSH0),
},
IsSystemTransaction: false,
})
_, err = opGeth.AddL2Block(ctx, pushZeroContractCreateTxn)
require.NoError(t, err)
receipt, err := opGeth.L2Client.TransactionReceipt(ctx, pushZeroContractCreateTxn.Hash())
require.NoError(t, err)
assert.Equal(t, types.ReceiptStatusSuccessful, receipt.Status)
})
}
}
......@@ -86,6 +86,7 @@ func DefaultSystemConfig(t *testing.T) SystemConfig {
require.NoError(t, err)
deployConfig := config.DeployConfig.Copy()
deployConfig.L1GenesisBlockTimestamp = hexutil.Uint64(time.Now().Unix())
deployConfig.L2GenesisCanyonTimeOffset = e2eutils.CanyonTimeOffset()
deployConfig.L2GenesisSpanBatchTimeOffset = e2eutils.SpanBatchTimeOffset()
require.NoError(t, deployConfig.Check(), "Deploy config is invalid, do you need to run make devnet-allocs?")
l1Deployments := config.L1Deployments.Copy()
......@@ -425,6 +426,7 @@ func (cfg SystemConfig) Start(t *testing.T, _opts ...SystemConfigOption) (*Syste
DepositContractAddress: cfg.DeployConfig.OptimismPortalProxy,
L1SystemConfigAddress: cfg.DeployConfig.SystemConfigProxy,
RegolithTime: cfg.DeployConfig.RegolithTime(uint64(cfg.DeployConfig.L1GenesisBlockTimestamp)),
CanyonTime: cfg.DeployConfig.CanyonTime(uint64(cfg.DeployConfig.L1GenesisBlockTimestamp)),
SpanBatchTime: cfg.DeployConfig.SpanBatchTime(uint64(cfg.DeployConfig.L1GenesisBlockTimestamp)),
ProtocolVersionsAddress: cfg.L1Deployments.ProtocolVersionsProxy,
}
......@@ -773,7 +775,7 @@ func configureL1(rollupNodeCfg *rollupNode.Config, l1Node EthInstance) {
rollupNodeCfg.L1 = &rollupNode.L1EndpointConfig{
L1NodeAddr: l1EndpointConfig,
L1TrustRPC: false,
L1RPCKind: sources.RPCKindBasic,
L1RPCKind: sources.RPCKindStandard,
RateLimit: 0,
BatchSize: 20,
HttpPollInterval: time.Millisecond * 100,
......
......@@ -491,7 +491,7 @@ func TestSystemMockP2P(t *testing.T) {
verifierPeerID := sys.RollupNodes["verifier"].P2P().Host().ID()
check := func() bool {
sequencerBlocksTopicPeers := sys.RollupNodes["sequencer"].P2P().GossipOut().BlocksTopicPeers()
sequencerBlocksTopicPeers := sys.RollupNodes["sequencer"].P2P().GossipOut().AllBlockTopicsPeers()
return slices.Contains[[]peer.ID](sequencerBlocksTopicPeers, verifierPeerID)
}
......@@ -592,6 +592,10 @@ func TestSystemRPCAltSync(t *testing.T) {
opts.VerifyOnClients(l2Verif)
})
// Sometimes we get duplicate blocks on the sequencer which makes this test flaky
published = slices.Compact(published)
received = slices.Compact(received)
// Verify that the tx was received via RPC sync (P2P is disabled)
require.Contains(t, received, eth.BlockID{Hash: receiptSeq.BlockHash, Number: receiptSeq.BlockNumber.Uint64()}.String())
......
FROM golang:1.21.1-alpine3.18 as builder
RUN apk add --no-cache make gcc musl-dev linux-headers git jq bash
# build op-heartbeat with local monorepo go modules
COPY ./op-heartbeat /app/op-heartbeat
COPY ./op-node /app/op-node
COPY ./op-service /app/op-service
COPY ./go.mod /app/go.mod
COPY ./go.sum /app/go.sum
COPY ./.git /app/.git
WORKDIR /app/op-heartbeat
RUN make op-heartbeat
ARG OP_STACK_GO_BUILDER=us-docker.pkg.dev/oplabs-tools-artifacts/images/op_stack_go:latest
FROM $OP_STACK_GO_BUILDER as builder
# See "make golang-docker" and /ops/docker/op-stack-go
FROM alpine:3.18
COPY --from=builder /app/op-heartbeat/bin/op-heartbeat /usr/local/bin
COPY --from=builder /usr/local/bin/op-heartbeat /usr/local/bin/op-heartbeat
CMD ["op-heartbeat"]
# ignore everything but the dockerfile, the op-stack-go base image performs the build
*
GITCOMMIT := $(shell git rev-parse HEAD)
GITDATE := $(shell git show -s --format='%ct')
GITCOMMIT ?= $(shell git rev-parse HEAD)
GITDATE ?= $(shell git show -s --format='%ct')
VERSION := v0.0.0
LDFLAGSSTRING +=-X main.GitCommit=$(GITCOMMIT)
......@@ -16,9 +16,6 @@ clean:
test:
go test -v ./...
lint:
golangci-lint run -E goimports,sqlclosecheck,bodyclose,asciicheck,misspell,errorlint --timeout 5m -e "errors.As" -e "errors.Is" ./...
.PHONY: \
clean \
op-heartbeat \
......
FROM --platform=$BUILDPLATFORM golang:1.21.1-alpine3.18 as builder
ARG VERSION=v0.0.0
RUN apk add --no-cache make gcc musl-dev linux-headers git jq bash
COPY ./go.mod /app/go.mod
COPY ./go.sum /app/go.sum
WORKDIR /app
RUN go mod download
# build op-node with the shared go.mod & go.sum files
COPY ./op-node /app/op-node
COPY ./op-chain-ops /app/op-chain-ops
COPY ./op-service /app/op-service
COPY ./op-bindings /app/op-bindings
COPY ./.git /app/.git
WORKDIR /app/op-node
ARG TARGETOS TARGETARCH
RUN make op-node VERSION="$VERSION" GOOS=$TARGETOS GOARCH=$TARGETARCH
ARG OP_STACK_GO_BUILDER=us-docker.pkg.dev/oplabs-tools-artifacts/images/op_stack_go:latest
FROM $OP_STACK_GO_BUILDER as builder
# See "make golang-docker" and /ops/docker/op-stack-go
FROM alpine:3.18
COPY --from=builder /app/op-node/bin/op-node /usr/local/bin
COPY --from=builder /usr/local/bin/op-node /usr/local/bin/op-node
CMD ["op-node"]
# ignore everything but the dockerfile, the op-stack-go base image performs the build
*
GITCOMMIT := $(shell git rev-parse HEAD)
GITDATE := $(shell git show -s --format='%ct')
GITCOMMIT ?= $(shell git rev-parse HEAD)
GITDATE ?= $(shell git show -s --format='%ct')
VERSION := v0.0.0
LDFLAGSSTRING +=-X main.GitCommit=$(GITCOMMIT)
......@@ -17,9 +17,6 @@ clean:
test:
go test -v ./...
lint:
golangci-lint run -E goimports,sqlclosecheck,bodyclose,asciicheck,misspell,errorlint --timeout 5m -e "errors.As" -e "errors.Is" ./...
fuzz:
go test -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzL1InfoRoundTrip ./rollup/derive
go test -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzL1InfoAgainstContract ./rollup/derive
......
......@@ -32,7 +32,7 @@ func TestGetRollupConfig(t *testing.T) {
gotCfg, err := GetRollupConfig(name)
require.NoError(t, err)
require.Equal(t, expectedCfg, *gotCfg, "rollup-configs from superchain-registry must match")
require.Equalf(t, expectedCfg, *gotCfg, "rollup-configs from superchain-registry must match for %v", name)
}
}
......@@ -54,16 +54,17 @@ var mainnetCfg = rollup.Config{
GasLimit: 30_000_000,
},
},
BlockTime: 2,
MaxSequencerDrift: 600,
SeqWindowSize: 3600,
ChannelTimeout: 300,
L1ChainID: big.NewInt(1),
L2ChainID: big.NewInt(10),
BatchInboxAddress: common.HexToAddress("0xff00000000000000000000000000000000000010"),
DepositContractAddress: common.HexToAddress("0xbEb5Fc579115071764c7423A4f12eDde41f106Ed"),
L1SystemConfigAddress: common.HexToAddress("0x229047fed2591dbec1eF1118d64F7aF3dB9EB290"),
RegolithTime: u64Ptr(0),
BlockTime: 2,
MaxSequencerDrift: 600,
SeqWindowSize: 3600,
ChannelTimeout: 300,
L1ChainID: big.NewInt(1),
L2ChainID: big.NewInt(10),
BatchInboxAddress: common.HexToAddress("0xff00000000000000000000000000000000000010"),
DepositContractAddress: common.HexToAddress("0xbEb5Fc579115071764c7423A4f12eDde41f106Ed"),
L1SystemConfigAddress: common.HexToAddress("0x229047fed2591dbec1eF1118d64F7aF3dB9EB290"),
RegolithTime: u64Ptr(0),
ProtocolVersionsAddress: common.HexToAddress("0x8062AbC286f5e7D9428a0Ccb9AbD71e50d93b935"),
}
var goerliCfg = rollup.Config{
......@@ -115,16 +116,17 @@ var sepoliaCfg = rollup.Config{
GasLimit: 30000000,
},
},
BlockTime: 2,
MaxSequencerDrift: 600,
SeqWindowSize: 3600,
ChannelTimeout: 300,
L1ChainID: big.NewInt(11155111),
L2ChainID: big.NewInt(11155420),
BatchInboxAddress: common.HexToAddress("0xff00000000000000000000000000000011155420"),
DepositContractAddress: common.HexToAddress("0x16fc5058f25648194471939df75cf27a2fdc48bc"),
L1SystemConfigAddress: common.HexToAddress("0x034edd2a225f7f429a63e0f1d2084b9e0a93b538"),
RegolithTime: u64Ptr(0),
BlockTime: 2,
MaxSequencerDrift: 600,
SeqWindowSize: 3600,
ChannelTimeout: 300,
L1ChainID: big.NewInt(11155111),
L2ChainID: big.NewInt(11155420),
BatchInboxAddress: common.HexToAddress("0xff00000000000000000000000000000011155420"),
DepositContractAddress: common.HexToAddress("0x16fc5058f25648194471939df75cf27a2fdc48bc"),
L1SystemConfigAddress: common.HexToAddress("0x034edd2a225f7f429a63e0f1d2084b9e0a93b538"),
RegolithTime: u64Ptr(0),
ProtocolVersionsAddress: common.HexToAddress("0x79ADD5713B383DAa0a138d3C4780C7A1804a8090"),
}
func u64Ptr(v uint64) *uint64 {
......
......@@ -78,7 +78,7 @@ var (
openum.EnumString(sources.RPCProviderKinds),
EnvVars: prefixEnvVars("L1_RPC_KIND"),
Value: func() *sources.RPCProviderKind {
out := sources.RPCKindBasic
out := sources.RPCKindStandard
return &out
}(),
}
......
......@@ -510,6 +510,7 @@ func (n *OpNode) OnUnsafeL2Payload(ctx context.Context, from peer.ID, payload *e
// Pass on the event to the L2 Engine
ctx, cancel := context.WithTimeout(ctx, time.Second*30)
defer cancel()
if err := n.l2Driver.OnUnsafeL2Payload(ctx, payload); err != nil {
n.log.Warn("failed to notify engine driver of new L2 payload", "err", err, "id", payload.ID())
}
......
......@@ -70,10 +70,14 @@ func blocksTopicV1(cfg *rollup.Config) string {
return fmt.Sprintf("/optimism/%s/0/blocks", cfg.L2ChainID.String())
}
func blocksTopicV2(cfg *rollup.Config) string {
return fmt.Sprintf("/optimism/%s/1/blocks", cfg.L2ChainID.String())
}
// BuildSubscriptionFilter builds a simple subscription filter,
// to help protect against peers spamming useless subscriptions.
func BuildSubscriptionFilter(cfg *rollup.Config) pubsub.SubscriptionFilter {
return pubsub.NewAllowlistSubscriptionFilter(blocksTopicV1(cfg)) // add more topics here in the future, if any.
return pubsub.NewAllowlistSubscriptionFilter(blocksTopicV1(cfg), blocksTopicV2(cfg)) // add more topics here in the future, if any.
}
var msgBufPool = sync.Pool{New: func() any {
......@@ -239,7 +243,7 @@ func (sb *seenBlocks) markSeen(h common.Hash) {
sb.blockHashes = append(sb.blockHashes, h)
}
func BuildBlocksValidator(log log.Logger, cfg *rollup.Config, runCfg GossipRuntimeConfig) pubsub.ValidatorEx {
func BuildBlocksValidator(log log.Logger, cfg *rollup.Config, runCfg GossipRuntimeConfig, blockVersion eth.BlockVersion) pubsub.ValidatorEx {
// Seen block hashes per block height
// uint64 -> *seenBlocks
......@@ -284,7 +288,7 @@ func BuildBlocksValidator(log log.Logger, cfg *rollup.Config, runCfg GossipRunti
// [REJECT] if the block encoding is not valid
var payload eth.ExecutionPayload
if err := payload.UnmarshalSSZ(uint32(len(payloadBytes)), bytes.NewReader(payloadBytes)); err != nil {
if err := payload.UnmarshalSSZ(blockVersion, uint32(len(payloadBytes)), bytes.NewReader(payloadBytes)); err != nil {
log.Warn("invalid payload", "err", err, "peer", id)
return pubsub.ValidationReject
}
......@@ -310,6 +314,18 @@ func BuildBlocksValidator(log log.Logger, cfg *rollup.Config, runCfg GossipRunti
return pubsub.ValidationReject
}
// [REJECT] if a V1 Block has withdrawals
if blockVersion == eth.BlockV1 && payload.Withdrawals != nil {
log.Warn("payload is on v1 topic, but has withdrawals", "bad_hash", payload.BlockHash.String())
return pubsub.ValidationReject
}
// [REJECT] if a V2 Block does not have withdrawals
if blockVersion == eth.BlockV2 && payload.Withdrawals == nil {
log.Warn("payload is on v2 topic, but does not have withdrawals", "bad_hash", payload.BlockHash.String())
return pubsub.ValidationReject
}
seen, ok := blockHeightLRU.Get(uint64(payload.BlockNumber))
if !ok {
seen = new(seenBlocks)
......@@ -370,7 +386,9 @@ type GossipIn interface {
}
type GossipTopicInfo interface {
BlocksTopicPeers() []peer.ID
AllBlockTopicsPeers() []peer.ID
BlocksTopicV1Peers() []peer.ID
BlocksTopicV2Peers() []peer.ID
}
type GossipOut interface {
......@@ -379,6 +397,21 @@ type GossipOut interface {
Close() error
}
type blockTopic struct {
// blocks topic, main handle on block gossip
topic *pubsub.Topic
// block events handler, to be cancelled before closing the blocks topic.
events *pubsub.TopicEventHandler
// block subscriptions, to be cancelled before closing blocks topic.
sub *pubsub.Subscription
}
func (bt *blockTopic) Close() error {
bt.events.Cancel()
bt.sub.Cancel()
return bt.topic.Close()
}
type publisher struct {
log log.Logger
cfg *rollup.Config
......@@ -388,20 +421,39 @@ type publisher struct {
// thus we have to stop it ourselves this way.
p2pCancel context.CancelFunc
// blocks topic, main handle on block gossip
blocksTopic *pubsub.Topic
// block events handler, to be cancelled before closing the blocks topic.
blocksEvents *pubsub.TopicEventHandler
// block subscriptions, to be cancelled before closing blocks topic.
blocksSub *pubsub.Subscription
blocksV1 *blockTopic
blocksV2 *blockTopic
runCfg GossipRuntimeConfig
}
var _ GossipOut = (*publisher)(nil)
func (p *publisher) BlocksTopicPeers() []peer.ID {
return p.blocksTopic.ListPeers()
func combinePeers(allPeers ...[]peer.ID) []peer.ID {
var seen = make(map[peer.ID]bool)
var res []peer.ID
for _, peers := range allPeers {
for _, p := range peers {
if _, ok := seen[p]; ok {
continue
}
res = append(res, p)
seen[p] = true
}
}
return res
}
func (p *publisher) AllBlockTopicsPeers() []peer.ID {
return combinePeers(p.BlocksTopicV1Peers(), p.BlocksTopicV2Peers())
}
func (p *publisher) BlocksTopicV1Peers() []peer.ID {
return p.blocksV1.topic.ListPeers()
}
func (p *publisher) BlocksTopicV2Peers() []peer.ID {
return p.blocksV2.topic.ListPeers()
}
func (p *publisher) PublishL2Payload(ctx context.Context, payload *eth.ExecutionPayload, signer Signer) error {
......@@ -428,55 +480,84 @@ func (p *publisher) PublishL2Payload(ctx context.Context, payload *eth.Execution
// This also copies the data, freeing up the original buffer to go back into the pool
out := snappy.Encode(nil, data)
return p.blocksTopic.Publish(ctx, out)
if p.cfg.IsCanyon(uint64(payload.Timestamp)) {
return p.blocksV2.topic.Publish(ctx, out)
} else {
return p.blocksV1.topic.Publish(ctx, out)
}
}
func (p *publisher) Close() error {
p.p2pCancel()
p.blocksEvents.Cancel()
p.blocksSub.Cancel()
return p.blocksTopic.Close()
e1 := p.blocksV1.Close()
e2 := p.blocksV2.Close()
return errors.Join(e1, e2)
}
func JoinGossip(self peer.ID, ps *pubsub.PubSub, log log.Logger, cfg *rollup.Config, runCfg GossipRuntimeConfig, gossipIn GossipIn) (GossipOut, error) {
val := guardGossipValidator(log, logValidationResult(self, "validated block", log, BuildBlocksValidator(log, cfg, runCfg)))
blocksTopicName := blocksTopicV1(cfg)
err := ps.RegisterTopicValidator(blocksTopicName,
val,
p2pCtx, p2pCancel := context.WithCancel(context.Background())
v1Logger := log.New("topic", "blocksV1")
blocksV1Validator := guardGossipValidator(log, logValidationResult(self, "validated blockv1", v1Logger, BuildBlocksValidator(v1Logger, cfg, runCfg, eth.BlockV1)))
blocksV1, err := newBlockTopic(p2pCtx, blocksTopicV1(cfg), ps, v1Logger, gossipIn, blocksV1Validator)
if err != nil {
p2pCancel()
return nil, fmt.Errorf("failed to setup blocks v1 p2p: %w", err)
}
v2Logger := log.New("topic", "blocksV2")
blocksV2Validator := guardGossipValidator(log, logValidationResult(self, "validated blockv2", v2Logger, BuildBlocksValidator(v2Logger, cfg, runCfg, eth.BlockV2)))
blocksV2, err := newBlockTopic(p2pCtx, blocksTopicV2(cfg), ps, v2Logger, gossipIn, blocksV2Validator)
if err != nil {
p2pCancel()
return nil, fmt.Errorf("failed to setup blocks v2 p2p: %w", err)
}
return &publisher{
log: log,
cfg: cfg,
p2pCancel: p2pCancel,
blocksV1: blocksV1,
blocksV2: blocksV2,
runCfg: runCfg,
}, nil
}
func newBlockTopic(ctx context.Context, topicId string, ps *pubsub.PubSub, log log.Logger, gossipIn GossipIn, validator pubsub.ValidatorEx) (*blockTopic, error) {
err := ps.RegisterTopicValidator(topicId,
validator,
pubsub.WithValidatorTimeout(3*time.Second),
pubsub.WithValidatorConcurrency(4))
if err != nil {
return nil, fmt.Errorf("failed to register blocks gossip topic: %w", err)
return nil, fmt.Errorf("failed to register gossip topic: %w", err)
}
blocksTopic, err := ps.Join(blocksTopicName)
blocksTopic, err := ps.Join(topicId)
if err != nil {
return nil, fmt.Errorf("failed to join blocks gossip topic: %w", err)
return nil, fmt.Errorf("failed to join gossip topic: %w", err)
}
blocksTopicEvents, err := blocksTopic.EventHandler()
if err != nil {
return nil, fmt.Errorf("failed to create blocks gossip topic handler: %w", err)
}
p2pCtx, p2pCancel := context.WithCancel(context.Background())
go LogTopicEvents(p2pCtx, log.New("topic", "blocks"), blocksTopicEvents)
go LogTopicEvents(ctx, log, blocksTopicEvents)
subscription, err := blocksTopic.Subscribe()
if err != nil {
p2pCancel()
err = errors.Join(err, blocksTopic.Close())
return nil, fmt.Errorf("failed to subscribe to blocks gossip topic: %w", err)
}
subscriber := MakeSubscriber(log, BlocksHandler(gossipIn.OnUnsafeL2Payload))
go subscriber(p2pCtx, subscription)
go subscriber(ctx, subscription)
return &publisher{
log: log,
cfg: cfg,
blocksTopic: blocksTopic,
blocksEvents: blocksTopicEvents,
blocksSub: subscription,
p2pCancel: p2pCancel,
runCfg: runCfg,
return &blockTopic{
topic: blocksTopic,
events: blocksTopicEvents,
sub: subscription,
}, nil
}
......
......@@ -40,6 +40,11 @@ func TestGuardGossipValidator(t *testing.T) {
require.Equal(t, pubsub.ValidationIgnore, val(context.Background(), "bob", nil))
}
func TestCombinePeers(t *testing.T) {
res := combinePeers([]peer.ID{"foo", "bar"}, []peer.ID{"bar", "baz"})
require.Equal(t, []peer.ID{"foo", "bar", "baz"}, res)
}
func TestVerifyBlockSignature(t *testing.T) {
logger := testlog.Logger(t, log.LvlCrit)
cfg := &rollup.Config{
......
......@@ -194,7 +194,7 @@ func (s *APIBackend) Peers(ctx context.Context, connected bool) (*PeerDump, erro
dump.TotalConnected += 1
}
}
for _, id := range s.node.GossipOut().BlocksTopicPeers() {
for _, id := range s.node.GossipOut().AllBlockTopicsPeers() {
if p, ok := dump.Peers[id.String()]; ok {
p.GossipBlocks = true
}
......@@ -208,11 +208,12 @@ func (s *APIBackend) Peers(ctx context.Context, connected bool) (*PeerDump, erro
}
type PeerStats struct {
Connected uint `json:"connected"`
Table uint `json:"table"`
BlocksTopic uint `json:"blocksTopic"`
Banned uint `json:"banned"`
Known uint `json:"known"`
Connected uint `json:"connected"`
Table uint `json:"table"`
BlocksTopic uint `json:"blocksTopic"`
BlocksTopicV2 uint `json:"blocksTopicV2"`
Banned uint `json:"banned"`
Known uint `json:"known"`
}
func (s *APIBackend) PeerStats(_ context.Context) (*PeerStats, error) {
......@@ -223,11 +224,12 @@ func (s *APIBackend) PeerStats(_ context.Context) (*PeerStats, error) {
pstore := h.Peerstore()
stats := &PeerStats{
Connected: uint(len(nw.Peers())),
Table: 0,
BlocksTopic: uint(len(s.node.GossipOut().BlocksTopicPeers())),
Banned: 0,
Known: uint(len(pstore.Peers())),
Connected: uint(len(nw.Peers())),
Table: 0,
BlocksTopic: uint(len(s.node.GossipOut().BlocksTopicV1Peers())),
BlocksTopicV2: uint(len(s.node.GossipOut().BlocksTopicV2Peers())),
Banned: 0,
Known: uint(len(pstore.Peers())),
}
if gater := s.node.ConnectionGater(); gater != nil {
stats.Banned = uint(len(gater.ListBlockedPeers()))
......
......@@ -571,7 +571,7 @@ func (r requestResultErr) ResultCode() byte {
return byte(r)
}
func (s *SyncClient) doRequest(ctx context.Context, id peer.ID, n uint64) error {
func (s *SyncClient) doRequest(ctx context.Context, id peer.ID, expectedBlockNum uint64) error {
// open stream to peer
reqCtx, reqCancel := context.WithTimeout(ctx, streamTimeout)
str, err := s.newStreamFn(reqCtx, id, s.payloadByNumber)
......@@ -582,8 +582,8 @@ func (s *SyncClient) doRequest(ctx context.Context, id peer.ID, n uint64) error
defer str.Close()
// set write timeout (if available)
_ = str.SetWriteDeadline(time.Now().Add(clientWriteRequestTimeout))
if err := binary.Write(str, binary.LittleEndian, n); err != nil {
return fmt.Errorf("failed to write request (%d): %w", n, err)
if err := binary.Write(str, binary.LittleEndian, expectedBlockNum); err != nil {
return fmt.Errorf("failed to write request (%d): %w", expectedBlockNum, err)
}
if err := str.CloseWrite(); err != nil {
return fmt.Errorf("failed to close writer side while making request: %w", err)
......@@ -620,14 +620,22 @@ func (s *SyncClient) doRequest(ctx context.Context, id peer.ID, n uint64) error
if err != nil {
return fmt.Errorf("failed to read response: %w", err)
}
expectedBlockTime := s.cfg.TimestampForBlock(expectedBlockNum)
blockVersion := eth.BlockV1
if s.cfg.IsCanyon(expectedBlockTime) {
blockVersion = eth.BlockV2
}
var res eth.ExecutionPayload
if err := res.UnmarshalSSZ(uint32(len(data)), bytes.NewReader(data)); err != nil {
if err := res.UnmarshalSSZ(blockVersion, uint32(len(data)), bytes.NewReader(data)); err != nil {
return fmt.Errorf("failed to decode response: %w", err)
}
if err := str.CloseRead(); err != nil {
return fmt.Errorf("failed to close reading side")
}
if err := verifyBlock(&res, n); err != nil {
if err := verifyBlock(&res, expectedBlockNum); err != nil {
return fmt.Errorf("received execution payload is invalid: %w", err)
}
select {
......
......@@ -109,6 +109,11 @@ func (ba *FetchingAttributesBuilder) PreparePayloadAttributes(ctx context.Contex
txs = append(txs, l1InfoTx)
txs = append(txs, depositTxs...)
var withdrawals *eth.Withdrawals
if ba.cfg.IsCanyon(nextL2Time) {
withdrawals = &eth.Withdrawals{}
}
return &eth.PayloadAttributes{
Timestamp: hexutil.Uint64(nextL2Time),
PrevRandao: eth.Bytes32(l1Info.MixDigest()),
......@@ -116,5 +121,6 @@ func (ba *FetchingAttributesBuilder) PreparePayloadAttributes(ctx context.Contex
Transactions: txs,
NoTxPool: true,
GasLimit: (*eth.Uint64Quantity)(&sysConfig.GasLimit),
Withdrawals: withdrawals,
}, nil
}
......@@ -41,6 +41,37 @@ func AttributesMatchBlock(attrs *eth.PayloadAttributes, parentHash common.Hash,
if *attrs.GasLimit != block.GasLimit {
return fmt.Errorf("gas limit does not match. expected %d. got: %d", *attrs.GasLimit, block.GasLimit)
}
if withdrawalErr := checkWithdrawalsMatch(attrs.Withdrawals, block.Withdrawals); withdrawalErr != nil {
return withdrawalErr
}
return nil
}
func checkWithdrawalsMatch(attrWithdrawals *eth.Withdrawals, blockWithdrawals *eth.Withdrawals) error {
if attrWithdrawals == nil && blockWithdrawals == nil {
return nil
}
if attrWithdrawals == nil && blockWithdrawals != nil {
return fmt.Errorf("expected withdrawals in block to be nil, actual %v", *blockWithdrawals)
}
if attrWithdrawals != nil && blockWithdrawals == nil {
return fmt.Errorf("expected withdrawals in block to be non-nil %v, actual nil", *attrWithdrawals)
}
if len(*attrWithdrawals) != len(*blockWithdrawals) {
return fmt.Errorf("expected withdrawals in block to be %d, actual %d", len(*attrWithdrawals), len(*blockWithdrawals))
}
for idx, expected := range *attrWithdrawals {
actual := (*blockWithdrawals)[idx]
if expected != actual {
return fmt.Errorf("expected withdrawal %d to be %v, actual %v", idx, expected, actual)
}
}
return nil
}
......
package derive
import (
"testing"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/stretchr/testify/require"
)
func TestWithdrawalsMatch(t *testing.T) {
tests := []struct {
attrs *eth.Withdrawals
block *eth.Withdrawals
shouldMatch bool
}{
{
attrs: nil,
block: nil,
shouldMatch: true,
},
{
attrs: &eth.Withdrawals{},
block: nil,
shouldMatch: false,
},
{
attrs: nil,
block: &eth.Withdrawals{},
shouldMatch: false,
},
{
attrs: &eth.Withdrawals{},
block: &eth.Withdrawals{},
shouldMatch: true,
},
{
attrs: &eth.Withdrawals{
{
Index: 1,
},
},
block: &eth.Withdrawals{},
shouldMatch: false,
},
{
attrs: &eth.Withdrawals{
{
Index: 1,
},
},
block: &eth.Withdrawals{
{
Index: 2,
},
},
shouldMatch: false,
},
}
for _, test := range tests {
err := checkWithdrawalsMatch(test.attrs, test.block)
if test.shouldMatch {
require.NoError(t, err)
} else {
require.Error(t, err)
}
}
}
......@@ -182,6 +182,7 @@ func (eq *EngineQueue) AddUnsafePayload(payload *eth.ExecutionPayload) {
eq.log.Warn("cannot add nil unsafe payload")
return
}
if err := eq.unsafePayloads.Push(payload); err != nil {
eq.log.Warn("Could not add unsafe payload", "id", payload.ID(), "timestamp", uint64(payload.Timestamp), "err", err)
return
......
......@@ -73,7 +73,7 @@ func (l1t *L1Traversal) AdvanceL1Block(ctx context.Context) error {
// Parse L1 receipts of the given block and update the L1 system configuration
_, receipts, err := l1t.l1Blocks.FetchReceipts(ctx, nextL1Origin.Hash)
if err != nil {
return NewTemporaryError(fmt.Errorf("failed to fetch receipts of L1 block %s for L1 sysCfg update: %w", origin, err))
return NewTemporaryError(fmt.Errorf("failed to fetch receipts of L1 block %s (parent: %s) for L1 sysCfg update: %w", nextL1Origin, origin, err))
}
if err := UpdateSystemConfigWithL1Receipts(&l1t.sysCfg, receipts, l1t.cfg); err != nil {
// the sysCfg changes should always be formatted correctly.
......
......@@ -125,6 +125,10 @@ func (cfg *Config) ValidateL2Config(ctx context.Context, client L2Client) error
return nil
}
func (cfg *Config) TimestampForBlock(blockNumber uint64) uint64 {
return cfg.Genesis.L2Time + ((blockNumber - cfg.Genesis.L2.Number) * cfg.BlockTime)
}
func (cfg *Config) TargetBlockNumber(timestamp uint64) (num uint64, err error) {
// subtract genesis time from timestamp to get the time elapsed since genesis, and then divide that
// difference by the block time to get the expected L2 block number at the current time. If the
......
......@@ -396,3 +396,54 @@ func TestConfig_Check(t *testing.T) {
})
}
}
func TestTimestampForBlock(t *testing.T) {
config := randConfig()
tests := []struct {
name string
genesisTime uint64
genesisBlock uint64
blockTime uint64
blockNum uint64
expectedBlockTime uint64
}{
{
name: "FirstBlock",
genesisTime: 100,
genesisBlock: 0,
blockTime: 2,
blockNum: 0,
expectedBlockTime: 100,
},
{
name: "SecondBlock",
genesisTime: 100,
genesisBlock: 0,
blockTime: 2,
blockNum: 1,
expectedBlockTime: 102,
},
{
name: "NBlock",
genesisTime: 100,
genesisBlock: 0,
blockTime: 2,
blockNum: 25,
expectedBlockTime: 150,
},
}
for _, test := range tests {
test := test
t.Run(fmt.Sprintf("TestTimestampForBlock_%s", test.name), func(t *testing.T) {
config.Genesis.L2Time = test.genesisTime
config.Genesis.L2.Number = test.genesisBlock
config.BlockTime = test.blockTime
timestamp := config.TimestampForBlock(test.blockNum)
assert.Equal(t, timestamp, test.expectedBlockTime)
})
}
}
......@@ -15,9 +15,9 @@ func TestFilePoller_Read(t *testing.T) {
chanAPoller := NewFilePoller(ctx, chanA, time.Millisecond*100)
go func() {
chanB.Write([]byte("hello"))
_, _ = chanB.Write([]byte("hello"))
time.Sleep(time.Second * 1)
chanB.Write([]byte("world"))
_, _ = chanB.Write([]byte("world"))
}()
var buf [10]byte
n, err := chanAPoller.Read(buf[:])
......@@ -34,9 +34,9 @@ func TestFilePoller_Write(t *testing.T) {
bufch := make(chan []byte, 1)
go func() {
var buf [10]byte
chanB.Read(buf[:5])
_, _ = chanB.Read(buf[:5])
time.Sleep(time.Second * 1)
chanB.Read(buf[5:])
_, _ = chanB.Read(buf[5:])
bufch <- buf[:]
close(bufch)
}()
......@@ -59,7 +59,7 @@ func TestFilePoller_ReadCancel(t *testing.T) {
chanAPoller := NewFilePoller(ctx, chanA, time.Millisecond*100)
go func() {
chanB.Write([]byte("hello"))
_, _ = chanB.Write([]byte("hello"))
cancel()
}()
var buf [10]byte
......@@ -76,7 +76,7 @@ func TestFilePoller_WriteCancel(t *testing.T) {
go func() {
var buf [5]byte
chanB.Read(buf[:])
_, _ = chanB.Read(buf[:])
cancel()
}()
// use a large buffer to overflow the kernel buffer provided to pipe(2) so the write actually blocks
......
FROM --platform=$BUILDPLATFORM golang:1.21.1-alpine3.18 as builder
ARG VERSION=v0.0.0
RUN apk add --no-cache make gcc musl-dev linux-headers git jq bash
COPY ./go.mod /app/go.mod
COPY ./go.sum /app/go.sum
WORKDIR /app
RUN go mod download
# build op-program with the shared go.mod & go.sum files
COPY ./op-program /app/op-program
COPY ./op-preimage /app/op-preimage
COPY ./op-node /app/op-node
COPY ./op-chain-ops /app/op-chain-ops
COPY ./op-service /app/op-service
COPY ./op-bindings /app/op-bindings
COPY ./.git /app/.git
WORKDIR /app/op-program
ARG TARGETOS TARGETARCH
RUN make op-program VERSION="$VERSION" GOOS=$TARGETOS GOARCH=$TARGETARCH
ARG OP_STACK_GO_BUILDER=us-docker.pkg.dev/oplabs-tools-artifacts/images/op_stack_go:latest
FROM $OP_STACK_GO_BUILDER as builder
# See "make golang-docker" and /ops/docker/op-stack-go
FROM alpine:3.18
COPY --from=builder /app/op-program/bin/op-program /usr/local/bin
COPY --from=builder /usr/local/bin/op-program /usr/local/bin/op-program
CMD ["op-program"]
# ignore everything but the dockerfile, the op-stack-go base image performs the build
*
GITCOMMIT := $(shell git rev-parse HEAD)
GITDATE := $(shell git show -s --format='%ct')
GITCOMMIT ?= $(shell git rev-parse HEAD)
GITDATE ?= $(shell git show -s --format='%ct')
VERSION := v0.0.0
LDFLAGSSTRING +=-X main.GitCommit=$(GITCOMMIT)
......@@ -30,9 +30,6 @@ clean:
test:
go test -v ./...
lint:
golangci-lint run -E goimports,sqlclosecheck,bodyclose,asciicheck,misspell,errorlint --timeout 5m -e "errors.As" -e "errors.Is" ./...
verify-goerli: op-program-host op-program-client
env GO111MODULE=on go run ./verify/cmd/goerli.go $$L1URL $$L2URL
......
......@@ -48,15 +48,19 @@ func (o *OracleEngine) L2OutputRoot() (eth.Bytes32, error) {
}
func (o *OracleEngine) GetPayload(ctx context.Context, payloadId eth.PayloadID) (*eth.ExecutionPayload, error) {
return o.api.GetPayloadV1(ctx, payloadId)
res, err := o.api.GetPayloadV2(ctx, payloadId)
if err != nil {
return nil, err
}
return res.ExecutionPayload, nil
}
func (o *OracleEngine) ForkchoiceUpdate(ctx context.Context, state *eth.ForkchoiceState, attr *eth.PayloadAttributes) (*eth.ForkchoiceUpdatedResult, error) {
return o.api.ForkchoiceUpdatedV1(ctx, state, attr)
return o.api.ForkchoiceUpdatedV2(ctx, state, attr)
}
func (o *OracleEngine) NewPayload(ctx context.Context, payload *eth.ExecutionPayload) (*eth.PayloadStatusV1, error) {
return o.api.NewPayloadV1(ctx, payload)
return o.api.NewPayloadV2(ctx, payload)
}
func (o *OracleEngine) PayloadByHash(ctx context.Context, hash common.Hash) (*eth.ExecutionPayload, error) {
......@@ -64,7 +68,7 @@ func (o *OracleEngine) PayloadByHash(ctx context.Context, hash common.Hash) (*et
if block == nil {
return nil, ErrNotFound
}
return eth.BlockAsPayload(block)
return eth.BlockAsPayload(block, o.rollupCfg.CanyonTime)
}
func (o *OracleEngine) PayloadByNumber(ctx context.Context, n uint64) (*eth.ExecutionPayload, error) {
......
......@@ -29,7 +29,7 @@ func TestPayloadByHash(t *testing.T) {
block := stub.head
payload, err := engine.PayloadByHash(ctx, block.Hash())
require.NoError(t, err)
expected, err := eth.BlockAsPayload(block)
expected, err := eth.BlockAsPayload(block, engine.rollupCfg.CanyonTime)
require.NoError(t, err)
require.Equal(t, expected, payload)
})
......@@ -51,7 +51,7 @@ func TestPayloadByNumber(t *testing.T) {
block := stub.head
payload, err := engine.PayloadByNumber(ctx, block.NumberU64())
require.NoError(t, err)
expected, err := eth.BlockAsPayload(block)
expected, err := eth.BlockAsPayload(block, engine.rollupCfg.CanyonTime)
require.NoError(t, err)
require.Equal(t, expected, payload)
})
......@@ -124,7 +124,7 @@ func TestSystemConfigByL2Hash(t *testing.T) {
engine, stub := createOracleEngine(t)
t.Run("KnownBlock", func(t *testing.T) {
payload, err := eth.BlockAsPayload(stub.safe)
payload, err := eth.BlockAsPayload(stub.safe, engine.rollupCfg.CanyonTime)
require.NoError(t, err)
expected, err := derive.PayloadToSystemConfig(payload, engine.rollupCfg)
require.NoError(t, err)
......
......@@ -264,7 +264,7 @@ func (ea *L2EngineAPI) getPayload(ctx context.Context, payloadId eth.PayloadID)
ea.log.Error("failed to finish block building", "err", err)
return nil, engine.UnknownPayload
}
return eth.BlockAsPayload(bl)
return eth.BlockAsPayload(bl, ea.config().CanyonTime)
}
func (ea *L2EngineAPI) forkchoiceUpdated(ctx context.Context, state *eth.ForkchoiceState, attr *eth.PayloadAttributes) (*eth.ForkchoiceUpdatedResult, error) {
......@@ -350,6 +350,25 @@ func (ea *L2EngineAPI) forkchoiceUpdated(ctx context.Context, state *eth.Forkcho
return valid(nil), nil
}
func toGethWithdrawals(payload *eth.ExecutionPayload) []*types.Withdrawal {
if payload.Withdrawals == nil {
return nil
}
result := make([]*types.Withdrawal, 0, len(*payload.Withdrawals))
for _, w := range *payload.Withdrawals {
result = append(result, &types.Withdrawal{
Index: w.Index,
Validator: w.Validator,
Address: w.Address,
Amount: w.Amount,
})
}
return result
}
func (ea *L2EngineAPI) newPayload(ctx context.Context, payload *eth.ExecutionPayload) (*eth.PayloadStatusV1, error) {
ea.log.Trace("L2Engine API request received", "method", "ExecutePayload", "number", payload.BlockNumber, "hash", payload.BlockHash)
txs := make([][]byte, len(payload.Transactions))
......@@ -371,6 +390,7 @@ func (ea *L2EngineAPI) newPayload(ctx context.Context, payload *eth.ExecutionPay
BaseFeePerGas: payload.BaseFeePerGas.ToBig(),
BlockHash: payload.BlockHash,
Transactions: txs,
Withdrawals: toGethWithdrawals(payload),
}, nil, nil)
if err != nil {
log.Debug("Invalid NewPayload params", "params", payload, "error", err)
......
......@@ -55,17 +55,25 @@ func RunEngineAPITests(t *testing.T, createBackend func(t *testing.T) engineapi.
txRlp, err := tx.MarshalBinary()
api.assert.NoError(err)
result, err := api.engine.ForkchoiceUpdatedV1(api.ctx, &eth.ForkchoiceState{
nextBlockTime := eth.Uint64Quantity(genesis.Time + 1)
var w *eth.Withdrawals
if api.backend.Config().IsCanyon(uint64(nextBlockTime)) {
w = &eth.Withdrawals{}
}
result, err := api.engine.ForkchoiceUpdatedV2(api.ctx, &eth.ForkchoiceState{
HeadBlockHash: genesis.Hash(),
SafeBlockHash: genesis.Hash(),
FinalizedBlockHash: genesis.Hash(),
}, &eth.PayloadAttributes{
Timestamp: eth.Uint64Quantity(genesis.Time + 1),
Timestamp: nextBlockTime,
PrevRandao: eth.Bytes32(genesis.MixDigest),
SuggestedFeeRecipient: feeRecipient,
Transactions: []eth.Data{txRlp},
NoTxPool: true,
GasLimit: &gasLimit,
Withdrawals: w,
})
api.assert.Error(err)
api.assert.Equal(eth.ExecutionInvalid, result.PayloadStatus.Status)
......@@ -103,9 +111,16 @@ func RunEngineAPITests(t *testing.T, createBackend func(t *testing.T) engineapi.
t.Run("RejectInvalidBlockHash", func(t *testing.T) {
api := newTestHelper(t, createBackend)
var w *eth.Withdrawals
if api.backend.Config().IsCanyon(uint64(0)) {
w = &eth.Withdrawals{}
}
// Invalid because BlockHash won't be correct (among many other reasons)
block := &eth.ExecutionPayload{}
r, err := api.engine.NewPayloadV1(api.ctx, block)
block := &eth.ExecutionPayload{
Withdrawals: w,
}
r, err := api.engine.NewPayloadV2(api.ctx, block)
api.assert.NoError(err)
api.assert.Equal(eth.ExecutionInvalidBlockHash, r.Status)
})
......@@ -122,7 +137,7 @@ func RunEngineAPITests(t *testing.T, createBackend func(t *testing.T) engineapi.
newBlock.StateRoot = eth.Bytes32(genesis.TxHash)
updateBlockHash(newBlock)
r, err := api.engine.NewPayloadV1(api.ctx, newBlock)
r, err := api.engine.NewPayloadV2(api.ctx, newBlock)
api.assert.NoError(err)
api.assert.Equal(eth.ExecutionInvalid, r.Status)
})
......@@ -139,7 +154,7 @@ func RunEngineAPITests(t *testing.T, createBackend func(t *testing.T) engineapi.
newBlock.Timestamp = eth.Uint64Quantity(genesis.Time)
updateBlockHash(newBlock)
r, err := api.engine.NewPayloadV1(api.ctx, newBlock)
r, err := api.engine.NewPayloadV2(api.ctx, newBlock)
api.assert.NoError(err)
api.assert.Equal(eth.ExecutionInvalid, r.Status)
})
......@@ -156,7 +171,7 @@ func RunEngineAPITests(t *testing.T, createBackend func(t *testing.T) engineapi.
newBlock.Timestamp = eth.Uint64Quantity(genesis.Time - 1)
updateBlockHash(newBlock)
r, err := api.engine.NewPayloadV1(api.ctx, newBlock)
r, err := api.engine.NewPayloadV2(api.ctx, newBlock)
api.assert.NoError(err)
api.assert.Equal(eth.ExecutionInvalid, r.Status)
})
......@@ -165,7 +180,7 @@ func RunEngineAPITests(t *testing.T, createBackend func(t *testing.T) engineapi.
api := newTestHelper(t, createBackend)
genesis := api.backend.CurrentHeader()
result, err := api.engine.ForkchoiceUpdatedV1(api.ctx, &eth.ForkchoiceState{
result, err := api.engine.ForkchoiceUpdatedV2(api.ctx, &eth.ForkchoiceState{
HeadBlockHash: genesis.Hash(),
SafeBlockHash: genesis.Hash(),
FinalizedBlockHash: genesis.Hash(),
......@@ -185,7 +200,7 @@ func RunEngineAPITests(t *testing.T, createBackend func(t *testing.T) engineapi.
api := newTestHelper(t, createBackend)
genesis := api.backend.CurrentHeader()
result, err := api.engine.ForkchoiceUpdatedV1(api.ctx, &eth.ForkchoiceState{
result, err := api.engine.ForkchoiceUpdatedV2(api.ctx, &eth.ForkchoiceState{
HeadBlockHash: genesis.Hash(),
SafeBlockHash: genesis.Hash(),
FinalizedBlockHash: genesis.Hash(),
......@@ -207,7 +222,7 @@ func RunEngineAPITests(t *testing.T, createBackend func(t *testing.T) engineapi.
gasLimit := eth.Uint64Quantity(params.MaxGasLimit + 1)
result, err := api.engine.ForkchoiceUpdatedV1(api.ctx, &eth.ForkchoiceState{
result, err := api.engine.ForkchoiceUpdatedV2(api.ctx, &eth.ForkchoiceState{
HeadBlockHash: genesis.Hash(),
SafeBlockHash: genesis.Hash(),
FinalizedBlockHash: genesis.Hash(),
......@@ -246,7 +261,7 @@ func RunEngineAPITests(t *testing.T, createBackend func(t *testing.T) engineapi.
chainB1 := api.addBlockWithParent(genesis, eth.Uint64Quantity(genesis.Time+3))
result, err := api.engine.ForkchoiceUpdatedV1(api.ctx, &eth.ForkchoiceState{
result, err := api.engine.ForkchoiceUpdatedV2(api.ctx, &eth.ForkchoiceState{
HeadBlockHash: chainA3.BlockHash,
SafeBlockHash: chainB1.BlockHash,
FinalizedBlockHash: chainA2.BlockHash,
......@@ -266,7 +281,7 @@ func RunEngineAPITests(t *testing.T, createBackend func(t *testing.T) engineapi.
chainB1 := api.addBlockWithParent(genesis, eth.Uint64Quantity(genesis.Time+3))
result, err := api.engine.ForkchoiceUpdatedV1(api.ctx, &eth.ForkchoiceState{
result, err := api.engine.ForkchoiceUpdatedV2(api.ctx, &eth.ForkchoiceState{
HeadBlockHash: chainA3.BlockHash,
SafeBlockHash: chainA2.BlockHash,
FinalizedBlockHash: chainB1.BlockHash,
......@@ -349,7 +364,7 @@ func (h *testHelper) addBlockWithParent(head *types.Header, timestamp eth.Uint64
func (h *testHelper) forkChoiceUpdated(head common.Hash, safe common.Hash, finalized common.Hash) {
h.Log("forkChoiceUpdated", "head", head, "safe", safe, "finalized", finalized)
result, err := h.engine.ForkchoiceUpdatedV1(h.ctx, &eth.ForkchoiceState{
result, err := h.engine.ForkchoiceUpdatedV2(h.ctx, &eth.ForkchoiceState{
HeadBlockHash: head,
SafeBlockHash: safe,
FinalizedBlockHash: finalized,
......@@ -368,7 +383,14 @@ func (h *testHelper) startBlockBuilding(head *types.Header, newBlockTimestamp et
h.assert.NoError(err, "Failed to marshall tx %v", tx)
txData = append(txData, rlp)
}
result, err := h.engine.ForkchoiceUpdatedV1(h.ctx, &eth.ForkchoiceState{
canyonTime := h.backend.Config().CanyonTime
var w *eth.Withdrawals
if canyonTime != nil && *canyonTime <= uint64(newBlockTimestamp) {
w = &eth.Withdrawals{}
}
result, err := h.engine.ForkchoiceUpdatedV2(h.ctx, &eth.ForkchoiceState{
HeadBlockHash: head.Hash(),
SafeBlockHash: head.Hash(),
FinalizedBlockHash: head.Hash(),
......@@ -379,6 +401,7 @@ func (h *testHelper) startBlockBuilding(head *types.Header, newBlockTimestamp et
Transactions: txData,
NoTxPool: true,
GasLimit: &gasLimit,
Withdrawals: w,
})
h.assert.NoError(err)
h.assert.Equal(eth.ExecutionValid, result.PayloadStatus.Status)
......@@ -389,15 +412,16 @@ func (h *testHelper) startBlockBuilding(head *types.Header, newBlockTimestamp et
func (h *testHelper) getPayload(id *eth.PayloadID) *eth.ExecutionPayload {
h.Log("getPayload", "id", id)
block, err := h.engine.GetPayloadV1(h.ctx, *id)
envelope, err := h.engine.GetPayloadV2(h.ctx, *id)
h.assert.NoError(err)
h.assert.NotNil(block)
return block
h.assert.NotNil(envelope)
h.assert.NotNil(envelope.ExecutionPayload)
return envelope.ExecutionPayload
}
func (h *testHelper) newPayload(block *eth.ExecutionPayload) {
h.Log("newPayload", "hash", block.BlockHash)
r, err := h.engine.NewPayloadV1(h.ctx, block)
r, err := h.engine.NewPayloadV2(h.ctx, block)
h.assert.NoError(err)
h.assert.Equal(eth.ExecutionValid, r.Status)
h.assert.Nil(r.ValidationError)
......
......@@ -210,7 +210,7 @@ func TestL1TrustRPC(t *testing.T) {
func TestL1RPCKind(t *testing.T) {
t.Run("DefaultBasic", func(t *testing.T) {
cfg := configForArgs(t, addRequiredArgs())
require.Equal(t, sources.RPCKindBasic, cfg.L1RPCKind)
require.Equal(t, sources.RPCKindStandard, cfg.L1RPCKind)
})
for _, kind := range sources.RPCProviderKinds {
t.Run(kind.String(), func(t *testing.T) {
......
......@@ -130,7 +130,7 @@ func NewConfig(
L2OutputRoot: l2OutputRoot,
L2Claim: l2Claim,
L2ClaimBlockNumber: l2ClaimBlockNum,
L1RPCKind: sources.RPCKindBasic,
L1RPCKind: sources.RPCKindStandard,
IsCustomChainConfig: isCustomConfig,
}
}
......
......@@ -86,7 +86,7 @@ var (
openum.EnumString(sources.RPCProviderKinds),
EnvVars: prefixEnvVars("L1_RPC_KIND"),
Value: func() *sources.RPCProviderKind {
out := sources.RPCKindBasic
out := sources.RPCKindStandard
return &out
}(),
}
......
FROM --platform=$BUILDPLATFORM golang:1.21.1-alpine3.18 as builder
ARG VERSION=v0.0.0
RUN apk add --no-cache make gcc musl-dev linux-headers git jq bash
COPY ./go.mod /app/go.mod
COPY ./go.sum /app/go.sum
WORKDIR /app
RUN go mod download
# build op-proposer with the shared go.mod & go.sum files
COPY ./op-proposer /app/op-proposer
COPY ./op-bindings /app/op-bindings
COPY ./op-node /app/op-node
COPY ./op-service /app/op-service
COPY ./.git /app/.git
WORKDIR /app/op-proposer
ARG TARGETOS TARGETARCH
RUN make op-proposer VERSION="$VERSION" GOOS=$TARGETOS GOARCH=$TARGETARCH
ARG OP_STACK_GO_BUILDER=us-docker.pkg.dev/oplabs-tools-artifacts/images/op_stack_go:latest
FROM $OP_STACK_GO_BUILDER as builder
# See "make golang-docker" and /ops/docker/op-stack-go
FROM alpine:3.18
COPY --from=builder /app/op-proposer/bin/op-proposer /usr/local/bin
COPY --from=builder /usr/local/bin/op-proposer /usr/local/bin/op-proposer
CMD ["op-proposer"]
# ignore everything but the dockerfile, the op-stack-go base image performs the build
*
GITCOMMIT := $(shell git rev-parse HEAD)
GITDATE := $(shell git show -s --format='%ct')
GITCOMMIT ?= $(shell git rev-parse HEAD)
GITDATE ?= $(shell git show -s --format='%ct')
VERSION := v0.0.0
LDFLAGSSTRING +=-X main.GitCommit=$(GITCOMMIT)
......@@ -16,9 +16,6 @@ clean:
test:
go test -v ./...
lint:
golangci-lint run -E goimports,sqlclosecheck,bodyclose,asciicheck,misspell,errorlint --timeout 5m -e "errors.As" -e "errors.Is" ./...
.PHONY: \
clean \
op-proposer \
......
test:
go test -v ./...
lint:
golangci-lint run -E goimports,sqlclosecheck,bodyclose,asciicheck,misspell,errorlint --timeout 5m -e "errors.As" -e "errors.Is" ./...
generate-mocks:
go generate ./...
......@@ -15,5 +12,6 @@ generate-mocks:
fuzz:
go test -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzExecutionPayloadUnmarshal ./eth
go test -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzExecutionPayloadMarshalUnmarshal ./eth
go test -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzExecutionPayloadMarshalUnmarshalV1 ./eth
go test -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzExecutionPayloadMarshalUnmarshalV2 ./eth
go test -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzOBP01 ./eth
......@@ -9,16 +9,33 @@ import (
"sync"
)
type BlockVersion int
const ( // iota is reset to 0
BlockV1 BlockVersion = iota
BlockV2 = iota
)
// ExecutionPayload is the only SSZ type we have to marshal/unmarshal,
// so instead of importing a SSZ lib we implement the bare minimum.
// This is more efficient than RLP, and matches the L1 consensus-layer encoding of ExecutionPayload.
// All fields (4s are offsets to dynamic data)
const executionPayloadFixedPart = 32 + 20 + 32 + 32 + 256 + 32 + 8 + 8 + 8 + 8 + 4 + 32 + 32 + 4
const blockV1FixedPart = 32 + 20 + 32 + 32 + 256 + 32 + 8 + 8 + 8 + 8 + 4 + 32 + 32 + 4
// V1 + Withdrawals offset
const blockV2FixedPart = blockV1FixedPart + 4
const withdrawalSize = 8 + 8 + 20 + 8
// MAX_TRANSACTIONS_PER_PAYLOAD in consensus spec
// https://github.com/ethereum/consensus-specs/blob/ef434e87165e9a4c82a99f54ffd4974ae113f732/specs/bellatrix/beacon-chain.md#execution
const maxTransactionsPerPayload = 1 << 20
// MAX_WITHDRAWALS_PER_PAYLOAD in consensus spec
// https://github.com/ethereum/consensus-specs/blob/dev/specs/capella/beacon-chain.md#execution
const maxWithdrawalsPerPayload = 1 << 4
// ErrExtraDataTooLarge occurs when the ExecutionPayload's ExtraData field
// is too large to be properly represented in SSZ.
var ErrExtraDataTooLarge = errors.New("extra data too large")
......@@ -31,16 +48,44 @@ var payloadBufPool = sync.Pool{New: func() any {
}}
var ErrBadTransactionOffset = errors.New("transactions offset is smaller than extra data offset, aborting")
var ErrBadWithdrawalsOffset = errors.New("withdrawals offset is smaller than transaction offset, aborting")
func executionPayloadFixedPart(version BlockVersion) uint32 {
if version == BlockV2 {
return blockV2FixedPart
} else {
return blockV1FixedPart
}
}
func (payload *ExecutionPayload) inferVersion() BlockVersion {
if payload.Withdrawals != nil {
return BlockV2
} else {
return BlockV1
}
}
func (payload *ExecutionPayload) SizeSSZ() (full uint32) {
full = executionPayloadFixedPart + uint32(len(payload.ExtraData))
return executionPayloadFixedPart(payload.inferVersion()) + uint32(len(payload.ExtraData)) + payload.transactionSize() + payload.withdrawalSize()
}
func (payload *ExecutionPayload) withdrawalSize() uint32 {
if payload.Withdrawals == nil {
return 0
}
return uint32(len(*payload.Withdrawals) * withdrawalSize)
}
func (payload *ExecutionPayload) transactionSize() uint32 {
// One offset to each transaction
full += uint32(len(payload.Transactions)) * 4
result := uint32(len(payload.Transactions)) * 4
// Each transaction
for _, tx := range payload.Transactions {
full += uint32(len(tx))
result += uint32(len(tx))
}
return full
return result
}
// marshalBytes32LE returns the value of z as a 32-byte little-endian array.
......@@ -62,9 +107,13 @@ func unmarshalBytes32LE(in []byte, z *Uint256Quantity) {
// MarshalSSZ encodes the ExecutionPayload as SSZ type
func (payload *ExecutionPayload) MarshalSSZ(w io.Writer) (n int, err error) {
fixedSize := executionPayloadFixedPart(payload.inferVersion())
transactionSize := payload.transactionSize()
// Cast to uint32 to enable 32-bit MIPS support where math.MaxUint32-executionPayloadFixedPart is too big for int
// In that case, len(payload.ExtraData) can't be longer than an int so this is always false anyway.
if uint32(len(payload.ExtraData)) > math.MaxUint32-uint32(executionPayloadFixedPart) {
extraDataSize := uint32(len(payload.ExtraData))
if extraDataSize > math.MaxUint32-fixedSize {
return 0, ErrExtraDataTooLarge
}
......@@ -100,26 +149,58 @@ func (payload *ExecutionPayload) MarshalSSZ(w io.Writer) (n int, err error) {
binary.LittleEndian.PutUint64(buf[offset:offset+8], uint64(payload.Timestamp))
offset += 8
// offset to ExtraData
binary.LittleEndian.PutUint32(buf[offset:offset+4], executionPayloadFixedPart)
binary.LittleEndian.PutUint32(buf[offset:offset+4], fixedSize)
offset += 4
marshalBytes32LE(buf[offset:offset+32], &payload.BaseFeePerGas)
offset += 32
copy(buf[offset:offset+32], payload.BlockHash[:])
offset += 32
// offset to Transactions
binary.LittleEndian.PutUint32(buf[offset:offset+4], executionPayloadFixedPart+uint32(len(payload.ExtraData)))
binary.LittleEndian.PutUint32(buf[offset:offset+4], fixedSize+extraDataSize)
offset += 4
if offset != executionPayloadFixedPart {
panic("fixed part size is inconsistent")
if payload.Withdrawals == nil && offset != fixedSize {
panic("transactions - fixed part size is inconsistent")
}
if payload.Withdrawals != nil {
binary.LittleEndian.PutUint32(buf[offset:offset+4], fixedSize+extraDataSize+transactionSize)
offset += 4
if offset != fixedSize {
panic("withdrawals - fixed part size is inconsistent")
}
}
// dynamic value 1: ExtraData
copy(buf[offset:offset+uint32(len(payload.ExtraData))], payload.ExtraData[:])
offset += uint32(len(payload.ExtraData))
copy(buf[offset:offset+extraDataSize], payload.ExtraData[:])
offset += extraDataSize
// dynamic value 2: Transactions
marshalTransactions(buf[offset:], payload.Transactions)
marshalTransactions(buf[offset:offset+transactionSize], payload.Transactions)
offset += transactionSize
// dyanmic value 3: Withdrawals
if payload.Withdrawals != nil {
marshalWithdrawals(buf[offset:], payload.Withdrawals)
}
return w.Write(buf)
}
func marshalWithdrawals(out []byte, withdrawals *Withdrawals) {
offset := uint32(0)
for _, withdrawal := range *withdrawals {
binary.LittleEndian.PutUint64(out[offset:offset+8], withdrawal.Index)
offset += 8
binary.LittleEndian.PutUint64(out[offset:offset+8], withdrawal.Validator)
offset += 8
copy(out[offset:offset+20], withdrawal.Address[:])
offset += 20
binary.LittleEndian.PutUint64(out[offset:offset+8], withdrawal.Amount)
offset += 8
}
}
func marshalTransactions(out []byte, txs []Data) {
offset := uint32(0)
txOffset := uint32(len(txs)) * 4
......@@ -133,8 +214,10 @@ func marshalTransactions(out []byte, txs []Data) {
}
// UnmarshalSSZ decodes the ExecutionPayload as SSZ type
func (payload *ExecutionPayload) UnmarshalSSZ(scope uint32, r io.Reader) error {
if scope < executionPayloadFixedPart {
func (payload *ExecutionPayload) UnmarshalSSZ(version BlockVersion, scope uint32, r io.Reader) error {
fixedSize := executionPayloadFixedPart(version)
if scope < fixedSize {
return fmt.Errorf("scope too small to decode execution payload: %d", scope)
}
......@@ -171,36 +254,99 @@ func (payload *ExecutionPayload) UnmarshalSSZ(scope uint32, r io.Reader) error {
payload.Timestamp = Uint64Quantity(binary.LittleEndian.Uint64(buf[offset : offset+8]))
offset += 8
extraDataOffset := binary.LittleEndian.Uint32(buf[offset : offset+4])
if extraDataOffset != executionPayloadFixedPart {
return fmt.Errorf("unexpected extra data offset: %d <> %d", extraDataOffset, executionPayloadFixedPart)
if extraDataOffset != fixedSize {
return fmt.Errorf("unexpected extra data offset: %d <> %d", extraDataOffset, fixedSize)
}
offset += 4
unmarshalBytes32LE(buf[offset:offset+32], &payload.BaseFeePerGas)
offset += 32
copy(payload.BlockHash[:], buf[offset:offset+32])
offset += 32
transactionsOffset := binary.LittleEndian.Uint32(buf[offset : offset+4])
if transactionsOffset < extraDataOffset {
return ErrBadTransactionOffset
}
offset += 4
if offset != executionPayloadFixedPart {
if version == BlockV1 && offset != fixedSize {
panic("fixed part size is inconsistent")
}
withdrawalsOffset := scope
if version == BlockV2 {
withdrawalsOffset = binary.LittleEndian.Uint32(buf[offset : offset+4])
// No offset increment, due to this being the last field
if withdrawalsOffset < transactionsOffset {
return ErrBadWithdrawalsOffset
}
}
if transactionsOffset > extraDataOffset+32 || transactionsOffset > scope {
return fmt.Errorf("extra-data is too large: %d", transactionsOffset-extraDataOffset)
}
extraDataSize := transactionsOffset - extraDataOffset
payload.ExtraData = make(BytesMax32, extraDataSize)
copy(payload.ExtraData, buf[extraDataOffset:transactionsOffset])
txs, err := unmarshalTransactions(buf[transactionsOffset:])
txs, err := unmarshalTransactions(buf[transactionsOffset:withdrawalsOffset])
if err != nil {
return fmt.Errorf("failed to unmarshal transactions list: %w", err)
}
payload.Transactions = txs
if version == BlockV2 {
if withdrawalsOffset > scope {
return fmt.Errorf("withdrawals offset is too large: %d", withdrawalsOffset)
}
withdrawals, err := unmarshalWithdrawals(buf[withdrawalsOffset:])
if err != nil {
return fmt.Errorf("failed to unmarshal withdrawals list: %w", err)
}
payload.Withdrawals = withdrawals
}
return nil
}
func unmarshalWithdrawals(in []byte) (*Withdrawals, error) {
result := &Withdrawals{}
if len(in)%withdrawalSize != 0 {
return nil, errors.New("invalid withdrawals data")
}
withdrawalCount := len(in) / withdrawalSize
if withdrawalCount > maxWithdrawalsPerPayload {
return nil, fmt.Errorf("too many withdrawals: %d > %d", withdrawalCount, maxWithdrawalsPerPayload)
}
offset := 0
for i := 0; i < withdrawalCount; i++ {
withdrawal := Withdrawal{}
withdrawal.Index = binary.LittleEndian.Uint64(in[offset : offset+8])
offset += 8
withdrawal.Validator = binary.LittleEndian.Uint64(in[offset : offset+8])
offset += 8
copy(withdrawal.Address[:], in[offset:offset+20])
offset += 20
withdrawal.Amount = binary.LittleEndian.Uint64(in[offset : offset+8])
offset += 8
*result = append(*result, withdrawal)
}
return result, nil
}
func unmarshalTransactions(in []byte) (txs []Data, err error) {
scope := uint32(len(in))
if scope == 0 { // empty txs list
......
......@@ -3,29 +3,41 @@ package eth
import (
"bytes"
"encoding/binary"
"fmt"
"math"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/google/go-cmp/cmp"
"github.com/holiman/uint256"
"github.com/stretchr/testify/require"
"github.com/ethereum/go-ethereum/common"
)
// FuzzExecutionPayloadUnmarshal checks that our SSZ decoding never panics
func FuzzExecutionPayloadUnmarshal(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
var payload ExecutionPayload
err := payload.UnmarshalSSZ(uint32(len(data)), bytes.NewReader(data))
if err != nil {
// not every input is a valid ExecutionPayload, that's ok. Should just not panic.
return
{
var payload ExecutionPayload
err := payload.UnmarshalSSZ(BlockV1, uint32(len(data)), bytes.NewReader(data))
if err != nil {
// not every input is a valid ExecutionPayload, that's ok. Should just not panic.
return
}
}
{
var payload ExecutionPayload
err := payload.UnmarshalSSZ(BlockV2, uint32(len(data)), bytes.NewReader(data))
if err != nil {
// not every input is a valid ExecutionPayload, that's ok. Should just not panic.
return
}
}
})
}
// FuzzExecutionPayloadMarshalUnmarshal checks that our SSZ encoding>decoding round trips properly
func FuzzExecutionPayloadMarshalUnmarshal(f *testing.F) {
func FuzzExecutionPayloadMarshalUnmarshalV1(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte, a, b, c, d uint64, extraData []byte, txs uint16, txsData []byte) {
if len(data) < 32+20+32+32+256+32+32+32 {
return
......@@ -72,7 +84,77 @@ func FuzzExecutionPayloadMarshalUnmarshal(f *testing.F) {
t.Fatalf("failed to marshal ExecutionPayload: %v", err)
}
var roundTripped ExecutionPayload
err := roundTripped.UnmarshalSSZ(uint32(len(buf.Bytes())), bytes.NewReader(buf.Bytes()))
err := roundTripped.UnmarshalSSZ(BlockV1, uint32(len(buf.Bytes())), bytes.NewReader(buf.Bytes()))
if err != nil {
t.Fatalf("failed to decode previously marshalled payload: %v", err)
}
if diff := cmp.Diff(payload, roundTripped); diff != "" {
t.Fatalf("The data did not round trip correctly:\n%s", diff)
}
})
}
func FuzzExecutionPayloadMarshalUnmarshalV2(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte, a, b, c, d uint64, extraData []byte, txs uint16, txsData []byte, wCount uint16) {
if len(data) < 32+20+32+32+256+32+32+32 {
return
}
var payload ExecutionPayload
payload.ParentHash = *(*common.Hash)(data[:32])
data = data[32:]
payload.FeeRecipient = *(*common.Address)(data[:20])
data = data[20:]
payload.StateRoot = *(*Bytes32)(data[:32])
data = data[32:]
payload.ReceiptsRoot = *(*Bytes32)(data[:32])
data = data[32:]
payload.LogsBloom = *(*Bytes256)(data[:256])
data = data[256:]
payload.PrevRandao = *(*Bytes32)(data[:32])
data = data[32:]
payload.BlockNumber = Uint64Quantity(a)
payload.GasLimit = Uint64Quantity(a)
payload.GasUsed = Uint64Quantity(a)
payload.Timestamp = Uint64Quantity(a)
if len(extraData) > 32 {
extraData = extraData[:32]
}
payload.ExtraData = extraData
payload.BaseFeePerGas.SetBytes(data[:32])
payload.BlockHash = *(*common.Hash)(data[:32])
payload.Transactions = make([]Data, txs)
for i := 0; i < int(txs); i++ {
if len(txsData) < 2 {
payload.Transactions[i] = make(Data, 0)
continue
}
txSize := binary.LittleEndian.Uint16(txsData[:2])
txsData = txsData[2:]
if int(txSize) > len(txsData) {
txSize = uint16(len(txsData))
}
payload.Transactions[i] = txsData[:txSize]
txsData = txsData[txSize:]
}
wCount = wCount % maxWithdrawalsPerPayload
withdrawals := make(Withdrawals, wCount)
for i := 0; i < int(wCount); i++ {
withdrawals[i] = Withdrawal{
Index: a,
Validator: b,
Address: common.BytesToAddress(data[:20]),
Amount: c,
}
}
payload.Withdrawals = &withdrawals
var buf bytes.Buffer
if _, err := payload.MarshalSSZ(&buf); err != nil {
t.Fatalf("failed to marshal ExecutionPayload: %v", err)
}
var roundTripped ExecutionPayload
err := roundTripped.UnmarshalSSZ(BlockV2, uint32(len(buf.Bytes())), bytes.NewReader(buf.Bytes()))
if err != nil {
t.Fatalf("failed to decode previously marshalled payload: %v", err)
}
......@@ -99,7 +181,7 @@ func FuzzOBP01(f *testing.F) {
binary.LittleEndian.PutUint32(clone[504:508], txOffset)
var unmarshalled ExecutionPayload
err = unmarshalled.UnmarshalSSZ(uint32(len(clone)), bytes.NewReader(clone))
err = unmarshalled.UnmarshalSSZ(BlockV1, uint32(len(clone)), bytes.NewReader(clone))
if err == nil {
t.Fatalf("expected a failure, but didn't get one")
}
......@@ -122,7 +204,7 @@ func TestOPB01(t *testing.T) {
copy(data[504:508], make([]byte, 4))
var unmarshalled ExecutionPayload
err = unmarshalled.UnmarshalSSZ(uint32(len(data)), bytes.NewReader(data))
err = unmarshalled.UnmarshalSSZ(BlockV1, uint32(len(data)), bytes.NewReader(data))
require.Equal(t, ErrBadTransactionOffset, err)
}
......@@ -130,20 +212,126 @@ func TestOPB01(t *testing.T) {
// properly returns an error when the ExtraData field
// cannot be represented in the outputted SSZ.
func TestOPB04(t *testing.T) {
data := make([]byte, math.MaxUint32)
var buf bytes.Buffer
// First, test the maximum len - which in this case is the max uint32
// minus the execution payload fixed part.
payload := &ExecutionPayload{
ExtraData: make([]byte, math.MaxUint32-executionPayloadFixedPart),
ExtraData: data[:math.MaxUint32-executionPayloadFixedPart(BlockV1)],
Withdrawals: nil,
}
var buf bytes.Buffer
_, err := payload.MarshalSSZ(&buf)
require.NoError(t, err)
buf.Reset()
payload = &ExecutionPayload{
ExtraData: make([]byte, math.MaxUint32-executionPayloadFixedPart+1),
tests := []struct {
version BlockVersion
withdrawals *Withdrawals
}{
{BlockV1, nil},
{BlockV2, &Withdrawals{}},
}
for _, test := range tests {
payload := &ExecutionPayload{
ExtraData: data[:math.MaxUint32-executionPayloadFixedPart(test.version)+1],
Withdrawals: test.withdrawals,
}
_, err := payload.MarshalSSZ(&buf)
require.Error(t, err)
require.Equal(t, ErrExtraDataTooLarge, err)
}
}
func createPayloadWithWithdrawals(w *Withdrawals) *ExecutionPayload {
return &ExecutionPayload{
ParentHash: common.HexToHash("0x123"),
FeeRecipient: common.HexToAddress("0x456"),
StateRoot: Bytes32(common.HexToHash("0x789")),
ReceiptsRoot: Bytes32(common.HexToHash("0xabc")),
LogsBloom: Bytes256{byte(13), byte(14), byte(15)},
PrevRandao: Bytes32(common.HexToHash("0x111")),
BlockNumber: Uint64Quantity(222),
GasLimit: Uint64Quantity(333),
GasUsed: Uint64Quantity(444),
Timestamp: Uint64Quantity(555),
ExtraData: common.Hex2Bytes("0x666"),
BaseFeePerGas: *uint256.NewInt(777),
BlockHash: common.HexToHash("0x888"),
Withdrawals: w,
Transactions: []Data{common.Hex2Bytes("0x999")},
}
}
func TestMarshalUnmarshalWithdrawals(t *testing.T) {
emptyWithdrawal := &Withdrawals{}
withdrawals := &Withdrawals{
{
Index: 987,
Validator: 654,
Address: common.HexToAddress("0x898"),
Amount: 321,
},
}
maxWithdrawals := make(Withdrawals, maxWithdrawalsPerPayload)
for i := 0; i < maxWithdrawalsPerPayload; i++ {
maxWithdrawals[i] = Withdrawal{
Index: 987,
Validator: 654,
Address: common.HexToAddress("0x898"),
Amount: 321,
}
}
tooManyWithdrawals := make(Withdrawals, maxWithdrawalsPerPayload+1)
for i := 0; i < maxWithdrawalsPerPayload+1; i++ {
tooManyWithdrawals[i] = Withdrawal{
Index: 987,
Validator: 654,
Address: common.HexToAddress("0x898"),
Amount: 321,
}
}
tests := []struct {
name string
version BlockVersion
hasError bool
withdrawals *Withdrawals
}{
{"ZeroWithdrawalsSucceeds", BlockV2, false, emptyWithdrawal},
{"ZeroWithdrawalsFailsToDeserialize", BlockV1, true, emptyWithdrawal},
{"WithdrawalsSucceeds", BlockV2, false, withdrawals},
{"WithdrawalsFailsToDeserialize", BlockV1, true, withdrawals},
{"MaxWithdrawalsSucceeds", BlockV2, false, &maxWithdrawals},
{"TooManyWithdrawalsErrors", BlockV2, true, &tooManyWithdrawals},
}
for _, test := range tests {
test := test
t.Run(fmt.Sprintf("TestWithdrawalUnmarshalMarshal_%s", test.name), func(t *testing.T) {
input := createPayloadWithWithdrawals(test.withdrawals)
var buf bytes.Buffer
_, err := input.MarshalSSZ(&buf)
require.NoError(t, err)
data := buf.Bytes()
output := &ExecutionPayload{}
err = output.UnmarshalSSZ(test.version, uint32(len(data)), bytes.NewReader(data))
if test.hasError {
require.Error(t, err)
} else {
require.NoError(t, err)
require.Equal(t, input, output)
if test.withdrawals != nil {
require.Equal(t, len(*test.withdrawals), len(*output.Withdrawals))
}
}
})
}
_, err = payload.MarshalSSZ(&buf)
require.Error(t, err)
require.Equal(t, ErrExtraDataTooLarge, err)
}
......@@ -6,13 +6,13 @@ import (
"math/big"
"reflect"
"github.com/holiman/uint256"
"github.com/ethereum/go-ethereum/beacon/engine"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
"github.com/holiman/uint256"
)
type ErrorCode int
......@@ -143,7 +143,7 @@ type ExecutionPayload struct {
ExtraData BytesMax32 `json:"extraData"`
BaseFeePerGas Uint256Quantity `json:"baseFeePerGas"`
BlockHash common.Hash `json:"blockHash"`
Withdrawals *[]Withdrawal `json:"withdrawals,omitempty"`
Withdrawals *Withdrawals `json:"withdrawals,omitempty"`
// Array of transaction objects, each object is a byte list (DATA) representing
// TransactionType || TransactionPayload or LegacyTransaction as defined in EIP-2718
Transactions []Data `json:"transactions"`
......@@ -168,6 +168,10 @@ func (s rawTransactions) EncodeIndex(i int, w *bytes.Buffer) {
w.Write(s[i])
}
func (payload *ExecutionPayload) CanyonBlock() bool {
return payload.Withdrawals != nil
}
// CheckBlockHash recomputes the block hash and returns if the embedded block hash matches.
func (payload *ExecutionPayload) CheckBlockHash() (actual common.Hash, ok bool) {
hasher := trie.NewStackTrie(nil)
......@@ -191,11 +195,17 @@ func (payload *ExecutionPayload) CheckBlockHash() (actual common.Hash, ok bool)
Nonce: types.BlockNonce{}, // zeroed, proof-of-work legacy
BaseFee: payload.BaseFeePerGas.ToBig(),
}
if payload.CanyonBlock() {
withdrawalHash := types.DeriveSha(*payload.Withdrawals, hasher)
header.WithdrawalsHash = &withdrawalHash
}
blockHash := header.Hash()
return blockHash, blockHash == payload.BlockHash
}
func BlockAsPayload(bl *types.Block) (*ExecutionPayload, error) {
func BlockAsPayload(bl *types.Block, canyonForkTime *uint64) (*ExecutionPayload, error) {
baseFee, overflow := uint256.FromBig(bl.BaseFee())
if overflow {
return nil, fmt.Errorf("invalid base fee in block: %s", bl.BaseFee())
......@@ -208,7 +218,8 @@ func BlockAsPayload(bl *types.Block) (*ExecutionPayload, error) {
}
opaqueTxs[i] = otx
}
return &ExecutionPayload{
payload := &ExecutionPayload{
ParentHash: bl.ParentHash(),
FeeRecipient: bl.Coinbase(),
StateRoot: Bytes32(bl.Root()),
......@@ -220,10 +231,16 @@ func BlockAsPayload(bl *types.Block) (*ExecutionPayload, error) {
GasUsed: Uint64Quantity(bl.GasUsed()),
Timestamp: Uint64Quantity(bl.Time()),
ExtraData: bl.Extra(),
BaseFeePerGas: Uint256Quantity(*baseFee),
BaseFeePerGas: *baseFee,
BlockHash: bl.Hash(),
Transactions: opaqueTxs,
}, nil
}
if canyonForkTime != nil && uint64(payload.Timestamp) >= *canyonForkTime {
payload.Withdrawals = &Withdrawals{}
}
return payload, nil
}
type PayloadAttributes struct {
......@@ -234,7 +251,7 @@ type PayloadAttributes struct {
// suggested value for the coinbase field of the new payload
SuggestedFeeRecipient common.Address `json:"suggestedFeeRecipient"`
// Withdrawals to include into the block -- should be nil or empty depending on Shanghai enablement
Withdrawals *[]Withdrawal `json:"withdrawals,omitempty"`
Withdrawals *Withdrawals `json:"withdrawals,omitempty"`
// Transactions to force into the block (always at the start of the transactions list).
Transactions []Data `json:"transactions,omitempty"`
// NoTxPool to disable adding any transactions from the transaction-pool.
......@@ -302,9 +319,23 @@ type SystemConfig struct {
}
// Withdrawal represents a validator withdrawal from the consensus layer.
// https://github.com/ethereum/consensus-specs/blob/dev/specs/capella/beacon-chain.md#withdrawal
type Withdrawal struct {
Index uint64 `json:"index"` // monotonically increasing identifier issued by consensus layer
Validator uint64 `json:"validatorIndex"` // index of validator associated with withdrawal
Address common.Address `json:"address"` // target address for withdrawn ether
Amount uint64 `json:"amount"` // value of withdrawal in Gwei
}
// Withdrawals implements DerivableList for withdrawals.
type Withdrawals []Withdrawal
// Len returns the length of s.
func (s Withdrawals) Len() int { return len(s) }
// EncodeIndex encodes the i'th withdrawal to w. Note that this does not check for errors
// because we assume that *Withdrawal will only ever contain valid withdrawals that were either
// constructed by decoding or via public API in this package.
func (s Withdrawals) EncodeIndex(i int, w *bytes.Buffer) {
_ = rlp.Encode(w, s[i])
}
......@@ -52,7 +52,7 @@ var testEthClientConfig = &EthClientConfig{
MaxConcurrentRequests: 10,
TrustRPC: false,
MustBePostMerge: false,
RPCProviderKind: RPCKindBasic,
RPCProviderKind: RPCKindStandard,
}
func randHash() (out common.Hash) {
......@@ -133,7 +133,7 @@ func TestEthClient_InfoByNumber(t *testing.T) {
"eth_getBlockByNumber", []any{n.String(), false}).Run(func(args mock.Arguments) {
*args[1].(**rpcHeader) = rhdr
}).Return([]error{nil})
s, err := NewL1Client(m, nil, nil, L1ClientDefaultConfig(&rollup.Config{SeqWindowSize: 10}, true, RPCKindBasic))
s, err := NewL1Client(m, nil, nil, L1ClientDefaultConfig(&rollup.Config{SeqWindowSize: 10}, true, RPCKindStandard))
require.NoError(t, err)
info, err := s.InfoByNumber(ctx, uint64(n))
require.NoError(t, err)
......@@ -152,7 +152,7 @@ func TestEthClient_WrongInfoByNumber(t *testing.T) {
"eth_getBlockByNumber", []any{n.String(), false}).Run(func(args mock.Arguments) {
*args[1].(**rpcHeader) = &rhdr2
}).Return([]error{nil})
s, err := NewL1Client(m, nil, nil, L1ClientDefaultConfig(&rollup.Config{SeqWindowSize: 10}, true, RPCKindBasic))
s, err := NewL1Client(m, nil, nil, L1ClientDefaultConfig(&rollup.Config{SeqWindowSize: 10}, true, RPCKindStandard))
require.NoError(t, err)
_, err = s.InfoByNumber(ctx, uint64(n))
require.Error(t, err, "cannot accept the wrong block")
......@@ -171,7 +171,7 @@ func TestEthClient_WrongInfoByHash(t *testing.T) {
"eth_getBlockByHash", []any{k, false}).Run(func(args mock.Arguments) {
*args[1].(**rpcHeader) = &rhdr2
}).Return([]error{nil})
s, err := NewL1Client(m, nil, nil, L1ClientDefaultConfig(&rollup.Config{SeqWindowSize: 10}, true, RPCKindBasic))
s, err := NewL1Client(m, nil, nil, L1ClientDefaultConfig(&rollup.Config{SeqWindowSize: 10}, true, RPCKindStandard))
require.NoError(t, err)
_, err = s.InfoByHash(ctx, k)
require.Error(t, err, "cannot accept the wrong block")
......
......@@ -51,7 +51,7 @@ func L2ClientDefaultConfig(config *rollup.Config, trustRPC bool) *L2ClientConfig
MaxConcurrentRequests: 10,
TrustRPC: trustRPC,
MustBePostMerge: true,
RPCProviderKind: RPCKindBasic,
RPCProviderKind: RPCKindStandard,
MethodResetDuration: time.Minute,
},
// Not bounded by span, to cover find-sync-start range fully for speedy recovery after errors.
......
......@@ -121,8 +121,9 @@ const (
RPCKindNethermind RPCProviderKind = "nethermind"
RPCKindDebugGeth RPCProviderKind = "debug_geth"
RPCKindErigon RPCProviderKind = "erigon"
RPCKindBasic RPCProviderKind = "basic" // try only the standard most basic receipt fetching
RPCKindAny RPCProviderKind = "any" // try any method available
RPCKindBasic RPCProviderKind = "basic" // try only the standard most basic receipt fetching
RPCKindAny RPCProviderKind = "any" // try any method available
RPCKindStandard RPCProviderKind = "standard" // try standard methods, including newer optimized standard RPC methods
)
var RPCProviderKinds = []RPCProviderKind{
......@@ -135,6 +136,7 @@ var RPCProviderKinds = []RPCProviderKind{
RPCKindErigon,
RPCKindBasic,
RPCKindAny,
RPCKindStandard,
}
func (kind RPCProviderKind) String() string {
......@@ -235,11 +237,14 @@ const (
// - Alchemy: https://docs.alchemy.com/reference/eth-getblockreceipts
// - Nethermind: https://docs.nethermind.io/nethermind/ethereum-client/json-rpc/parity#parity_getblockreceipts
ParityGetBlockReceipts
// EthGetBlockReceipts is a non-standard receipt fetching method in the eth namespace,
// EthGetBlockReceipts is a previously non-standard receipt fetching method in the eth namespace,
// supported by some RPC platforms.
// This since has been standardized in https://github.com/ethereum/execution-apis/pull/438 and adopted in Geth:
// https://github.com/ethereum/go-ethereum/pull/27702
// Available in:
// - Alchemy: 500 CU total (and deprecated)
// - QuickNode: 59 credits total (does not seem to work with block hash arg, inaccurate docs)
// - Standard, incl. Geth, Besu and Reth, and Nethermind has a PR in review.
// Method: eth_getBlockReceipts
// Params:
// - QuickNode: string, "quantity or tag", docs say incl. block hash, but API does not actually accept it.
......@@ -296,6 +301,8 @@ func AvailableReceiptsFetchingMethods(kind RPCProviderKind) ReceiptsFetchingMeth
return AlchemyGetTransactionReceipts | EthGetBlockReceipts |
DebugGetRawReceipts | ErigonGetBlockReceiptsByBlockHash |
ParityGetBlockReceipts | EthGetTransactionReceiptBatch
case RPCKindStandard:
return EthGetBlockReceipts | EthGetTransactionReceiptBatch
default:
return EthGetTransactionReceiptBatch
}
......
......@@ -305,6 +305,16 @@ func TestEthClient_FetchReceipts(t *testing.T) {
providerKind: RPCKindBasic,
setup: fallbackCase(4, EthGetTransactionReceiptBatch),
},
{
name: "standard",
providerKind: RPCKindStandard,
setup: fallbackCase(4, EthGetBlockReceipts),
},
{
name: "standard fallback",
providerKind: RPCKindStandard,
setup: fallbackCase(4, EthGetBlockReceipts, EthGetTransactionReceiptBatch),
},
{
name: "any discovers alchemy",
providerKind: RPCKindAny,
......
......@@ -181,6 +181,7 @@ func (hdr *rpcHeader) Info(trustCache bool, mustBePostMerge bool) (eth.BlockInfo
type rpcBlock struct {
rpcHeader
Transactions []*types.Transaction `json:"transactions"`
Withdrawals *eth.Withdrawals `json:"withdrawals,omitempty"`
}
func (block *rpcBlock) verify() error {
......@@ -252,6 +253,7 @@ func (block *rpcBlock) ExecutionPayload(trustCache bool) (*eth.ExecutionPayload,
BaseFeePerGas: baseFee,
BlockHash: block.Hash,
Transactions: opaqueTxs,
Withdrawals: block.Withdrawals,
}, nil
}
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment