Commit afe534c5 authored by Adrian Sutton's avatar Adrian Sutton

Merge branch 'develop' into aj/verify-more-than-block-hash

parents abba09a4 6c4a9ac4
...@@ -1420,13 +1420,6 @@ workflows: ...@@ -1420,13 +1420,6 @@ workflows:
- op-stack-go-lint - op-stack-go-lint
- devnet-allocs - devnet-allocs
- l1-geth-version-check - l1-geth-version-check
- go-e2e-test:
name: op-e2e-span-batch-tests
module: op-e2e
target: test-span-batch
requires:
- op-stack-go-lint
- devnet-allocs
- op-program-compat: - op-program-compat:
requires: requires:
- op-program-tests - op-program-tests
......
##################################################
# Getting Started #
##################################################
# Admin account
export GS_ADMIN_ADDRESS=
export GS_ADMIN_PRIVATE_KEY=
# Batcher account
export GS_BATCHER_ADDRESS=
export GS_BATCHER_PRIVATE_KEY=
# Proposer account
export GS_PROPOSER_ADDRESS=
export GS_PROPOSER_PRIVATE_KEY=
# Sequencer account
export GS_SEQUENCER_ADDRESS=
export GS_SEQUENCER_PRIVATE_KEY=
##################################################
# op-node Configuration #
##################################################
# The kind of RPC provider, used to inform optimal transactions receipts
# fetching. Valid options: alchemy, quicknode, infura, parity, nethermind,
# debug_geth, erigon, basic, any.
export L1_RPC_KIND=
##################################################
# Contract Deployment #
##################################################
# RPC URL for the L1 network to interact with
export L1_RPC_URL=
# Salt used via CREATE2 to determine implementation addresses
# NOTE: If you want to deploy contracts from scratch you MUST reload this
# variable to ensure the salt is regenerated and the contracts are
# deployed to new addresses (otherwise deployment will fail)
export IMPL_SALT=$(openssl rand -hex 32)
# Name for the deployed network
export DEPLOYMENT_CONTEXT=getting-started
# Optional Tenderly details for simulation link during deployment
export TENDERLY_PROJECT=
export TENDERLY_USERNAME=
# Optional Etherscan API key for contract verification
export ETHERSCAN_API_KEY=
# Private key to use for contract deployments, you don't need to worry about
# this for the Getting Started guide.
export PRIVATE_KEY=
...@@ -30,6 +30,7 @@ packages/contracts-bedrock/deployments/anvil ...@@ -30,6 +30,7 @@ packages/contracts-bedrock/deployments/anvil
.env .env
.env* .env*
!.env.example !.env.example
!.envrc.example
*.log *.log
.devnet .devnet
......
...@@ -8,7 +8,7 @@ require github.com/ethereum-optimism/optimism v0.0.0 ...@@ -8,7 +8,7 @@ require github.com/ethereum-optimism/optimism v0.0.0
require ( require (
golang.org/x/crypto v0.14.0 // indirect golang.org/x/crypto v0.14.0 // indirect
golang.org/x/sys v0.13.0 // indirect golang.org/x/sys v0.14.0 // indirect
) )
replace github.com/ethereum-optimism/optimism v0.0.0 => ../../.. replace github.com/ethereum-optimism/optimism v0.0.0 => ../../..
...@@ -6,7 +6,7 @@ github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcU ...@@ -6,7 +6,7 @@ github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcU
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc=
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q=
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
...@@ -42,7 +42,7 @@ require ( ...@@ -42,7 +42,7 @@ require (
golang.org/x/crypto v0.14.0 golang.org/x/crypto v0.14.0
golang.org/x/exp v0.0.0-20231006140011-7918f672742d golang.org/x/exp v0.0.0-20231006140011-7918f672742d
golang.org/x/sync v0.5.0 golang.org/x/sync v0.5.0
golang.org/x/term v0.13.0 golang.org/x/term v0.14.0
golang.org/x/time v0.4.0 golang.org/x/time v0.4.0
gorm.io/driver/postgres v1.5.4 gorm.io/driver/postgres v1.5.4
gorm.io/gorm v1.25.5 gorm.io/gorm v1.25.5
...@@ -199,7 +199,7 @@ require ( ...@@ -199,7 +199,7 @@ require (
go.uber.org/zap v1.26.0 // indirect go.uber.org/zap v1.26.0 // indirect
golang.org/x/mod v0.13.0 // indirect golang.org/x/mod v0.13.0 // indirect
golang.org/x/net v0.17.0 // indirect golang.org/x/net v0.17.0 // indirect
golang.org/x/sys v0.13.0 // indirect golang.org/x/sys v0.14.0 // indirect
golang.org/x/text v0.13.0 // indirect golang.org/x/text v0.13.0 // indirect
golang.org/x/tools v0.14.0 // indirect golang.org/x/tools v0.14.0 // indirect
google.golang.org/protobuf v1.31.0 // indirect google.golang.org/protobuf v1.31.0 // indirect
......
...@@ -879,13 +879,13 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= ...@@ -879,13 +879,13 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q=
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= golang.org/x/term v0.14.0 h1:LGK9IlZ8T9jvdy6cTdfKUCltatMFOehAQo9SRC46UQ8=
golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
......
...@@ -51,3 +51,7 @@ export interface WithdrawalResponse { ...@@ -51,3 +51,7 @@ export interface WithdrawalResponse {
hasNextPage: boolean; hasNextPage: boolean;
items: WithdrawalItem[]; items: WithdrawalItem[];
} }
export interface BridgeSupplyView {
l1DepositSum: number /* float64 */;
l2WithdrawalSum: number /* float64 */;
}
...@@ -35,6 +35,8 @@ const ( ...@@ -35,6 +35,8 @@ const (
HealthPath = "/healthz" HealthPath = "/healthz"
DepositsPath = "/api/v0/deposits/" DepositsPath = "/api/v0/deposits/"
WithdrawalsPath = "/api/v0/withdrawals/" WithdrawalsPath = "/api/v0/withdrawals/"
SupplyPath = "/api/v0/supply"
) )
// Api ... Indexer API struct // Api ... Indexer API struct
...@@ -140,15 +142,14 @@ func (a *APIService) initRouter(apiConfig config.ServerConfig) { ...@@ -140,15 +142,14 @@ func (a *APIService) initRouter(apiConfig config.ServerConfig) {
promRecorder := metrics.NewPromHTTPRecorder(a.metricsRegistry, MetricsNamespace) promRecorder := metrics.NewPromHTTPRecorder(a.metricsRegistry, MetricsNamespace)
// (2) Inject routing middleware
apiRouter.Use(chiMetricsMiddleware(promRecorder)) apiRouter.Use(chiMetricsMiddleware(promRecorder))
apiRouter.Use(middleware.Timeout(time.Duration(apiConfig.WriteTimeout) * time.Second)) apiRouter.Use(middleware.Timeout(time.Duration(apiConfig.WriteTimeout) * time.Second))
apiRouter.Use(middleware.Recoverer) apiRouter.Use(middleware.Recoverer)
apiRouter.Use(middleware.Heartbeat(HealthPath)) apiRouter.Use(middleware.Heartbeat(HealthPath))
// (3) Set GET routes
apiRouter.Get(fmt.Sprintf(DepositsPath+addressParam, ethereumAddressRegex), h.L1DepositsHandler) apiRouter.Get(fmt.Sprintf(DepositsPath+addressParam, ethereumAddressRegex), h.L1DepositsHandler)
apiRouter.Get(fmt.Sprintf(WithdrawalsPath+addressParam, ethereumAddressRegex), h.L2WithdrawalsHandler) apiRouter.Get(fmt.Sprintf(WithdrawalsPath+addressParam, ethereumAddressRegex), h.L2WithdrawalsHandler)
apiRouter.Get(SupplyPath, h.SupplyView)
a.router = apiRouter a.router = apiRouter
} }
......
...@@ -95,6 +95,14 @@ func (mbv *MockBridgeTransfersView) L2BridgeWithdrawalsByAddress(address common. ...@@ -95,6 +95,14 @@ func (mbv *MockBridgeTransfersView) L2BridgeWithdrawalsByAddress(address common.
}, },
}, nil }, nil
} }
func (mbv *MockBridgeTransfersView) L1BridgeDepositSum() (float64, error) {
return 69, nil
}
func (mbv *MockBridgeTransfersView) L2BridgeWithdrawalSum() (float64, error) {
return 420, nil
}
func TestHealthz(t *testing.T) { func TestHealthz(t *testing.T) {
logger := testlog.Logger(t, log.LvlInfo) logger := testlog.Logger(t, log.LvlInfo)
cfg := &Config{ cfg := &Config{
......
...@@ -49,6 +49,11 @@ type WithdrawalResponse struct { ...@@ -49,6 +49,11 @@ type WithdrawalResponse struct {
Items []WithdrawalItem `json:"items"` Items []WithdrawalItem `json:"items"`
} }
type BridgeSupplyView struct {
L1DepositSum float64 `json:"l1DepositSum"`
L2WithdrawalSum float64 `json:"l2WithdrawalSum"`
}
// FIXME make a pure function that returns a struct instead of newWithdrawalResponse // FIXME make a pure function that returns a struct instead of newWithdrawalResponse
// newWithdrawalResponse ... Converts a database.L2BridgeWithdrawalsResponse to an api.WithdrawalResponse // newWithdrawalResponse ... Converts a database.L2BridgeWithdrawalsResponse to an api.WithdrawalResponse
func CreateWithdrawalResponse(withdrawals *database.L2BridgeWithdrawalsResponse) WithdrawalResponse { func CreateWithdrawalResponse(withdrawals *database.L2BridgeWithdrawalsResponse) WithdrawalResponse {
......
package routes
import (
"net/http"
"github.com/ethereum-optimism/optimism/indexer/api/models"
)
// SupplyView ... Handles /api/v0/supply GET requests
func (h Routes) SupplyView(w http.ResponseWriter, r *http.Request) {
depositSum, err := h.view.L1BridgeDepositSum()
if err != nil {
http.Error(w, "internal server error reading deposits", http.StatusInternalServerError)
h.logger.Error("unable to read deposits from DB", "err", err.Error())
return
}
withdrawalSum, err := h.view.L2BridgeWithdrawalSum()
if err != nil {
http.Error(w, "internal server error reading withdrawals", http.StatusInternalServerError)
h.logger.Error("unable to read withdrawals from DB", "err", err.Error())
return
}
view := models.BridgeSupplyView{
L1DepositSum: depositSum,
L2WithdrawalSum: withdrawalSum,
}
err = jsonResponse(w, view, http.StatusOK)
if err != nil {
h.logger.Error("error writing response", "err", err)
}
}
...@@ -23,6 +23,7 @@ const ( ...@@ -23,6 +23,7 @@ const (
healthz = "get_health" healthz = "get_health"
deposits = "get_deposits" deposits = "get_deposits"
withdrawals = "get_withdrawals" withdrawals = "get_withdrawals"
sum = "get_sum"
) )
// Option ... Provides configuration through callback injection // Option ... Provides configuration through callback injection
...@@ -164,6 +165,25 @@ func (c *Client) GetAllDepositsByAddress(l1Address common.Address) ([]models.Dep ...@@ -164,6 +165,25 @@ func (c *Client) GetAllDepositsByAddress(l1Address common.Address) ([]models.Dep
} }
// GetSupplyAssessment ... Returns an assessment of the current supply
// on both L1 and L2. This includes the individual sums of
// (L1/L2) deposits and withdrawals
func (c *Client) GetSupplyAssessment() (*models.BridgeSupplyView, error) {
url := c.cfg.BaseURL + api.SupplyPath
resp, err := c.doRecordRequest(sum, url)
if err != nil {
return nil, err
}
var bsv *models.BridgeSupplyView
if err := json.Unmarshal(resp, &bsv); err != nil {
return nil, err
}
return bsv, nil
}
// GetAllWithdrawalsByAddress ... Gets all withdrawals provided a L2 address // GetAllWithdrawalsByAddress ... Gets all withdrawals provided a L2 address
func (c *Client) GetAllWithdrawalsByAddress(l2Address common.Address) ([]models.WithdrawalItem, error) { func (c *Client) GetAllWithdrawalsByAddress(l2Address common.Address) ([]models.WithdrawalItem, error) {
var withdrawals []models.WithdrawalItem var withdrawals []models.WithdrawalItem
......
...@@ -61,10 +61,12 @@ type L2BridgeWithdrawalWithTransactionHashes struct { ...@@ -61,10 +61,12 @@ type L2BridgeWithdrawalWithTransactionHashes struct {
type BridgeTransfersView interface { type BridgeTransfersView interface {
L1BridgeDeposit(common.Hash) (*L1BridgeDeposit, error) L1BridgeDeposit(common.Hash) (*L1BridgeDeposit, error)
L1BridgeDepositSum() (float64, error)
L1BridgeDepositWithFilter(BridgeTransfer) (*L1BridgeDeposit, error) L1BridgeDepositWithFilter(BridgeTransfer) (*L1BridgeDeposit, error)
L1BridgeDepositsByAddress(common.Address, string, int) (*L1BridgeDepositsResponse, error) L1BridgeDepositsByAddress(common.Address, string, int) (*L1BridgeDepositsResponse, error)
L2BridgeWithdrawal(common.Hash) (*L2BridgeWithdrawal, error) L2BridgeWithdrawal(common.Hash) (*L2BridgeWithdrawal, error)
L2BridgeWithdrawalSum() (float64, error)
L2BridgeWithdrawalWithFilter(BridgeTransfer) (*L2BridgeWithdrawal, error) L2BridgeWithdrawalWithFilter(BridgeTransfer) (*L2BridgeWithdrawal, error)
L2BridgeWithdrawalsByAddress(common.Address, string, int) (*L2BridgeWithdrawalsResponse, error) L2BridgeWithdrawalsByAddress(common.Address, string, int) (*L2BridgeWithdrawalsResponse, error)
} }
...@@ -136,6 +138,17 @@ type L1BridgeDepositsResponse struct { ...@@ -136,6 +138,17 @@ type L1BridgeDepositsResponse struct {
HasNextPage bool HasNextPage bool
} }
// L1BridgeDepositSum ... returns the sum of all l1 bridge deposit mints in gwei
func (db *bridgeTransfersDB) L1BridgeDepositSum() (float64, error) {
var sum float64
result := db.gorm.Model(&L1TransactionDeposit{}).Select("sum(amount)").Scan(&sum)
if result.Error != nil {
return 0, result.Error
}
return sum, nil
}
// L1BridgeDepositsByAddress retrieves a list of deposits initiated by the specified address, // L1BridgeDepositsByAddress retrieves a list of deposits initiated by the specified address,
// coupled with the L1/L2 transaction hashes that complete the bridge transaction. // coupled with the L1/L2 transaction hashes that complete the bridge transaction.
func (db *bridgeTransfersDB) L1BridgeDepositsByAddress(address common.Address, cursor string, limit int) (*L1BridgeDepositsResponse, error) { func (db *bridgeTransfersDB) L1BridgeDepositsByAddress(address common.Address, cursor string, limit int) (*L1BridgeDepositsResponse, error) {
...@@ -233,6 +246,16 @@ func (db *bridgeTransfersDB) L2BridgeWithdrawal(txWithdrawalHash common.Hash) (* ...@@ -233,6 +246,16 @@ func (db *bridgeTransfersDB) L2BridgeWithdrawal(txWithdrawalHash common.Hash) (*
return &withdrawal, nil return &withdrawal, nil
} }
func (db *bridgeTransfersDB) L2BridgeWithdrawalSum() (float64, error) {
var sum float64
result := db.gorm.Model(&L2TransactionWithdrawal{}).Select("sum(amount)").Scan(&sum)
if result.Error != nil {
return 0, result.Error
}
return sum, nil
}
// L2BridgeWithdrawalWithFilter queries for a bridge withdrawal with set fields in the `BridgeTransfer` filter // L2BridgeWithdrawalWithFilter queries for a bridge withdrawal with set fields in the `BridgeTransfer` filter
func (db *bridgeTransfersDB) L2BridgeWithdrawalWithFilter(filter BridgeTransfer) (*L2BridgeWithdrawal, error) { func (db *bridgeTransfersDB) L2BridgeWithdrawalWithFilter(filter BridgeTransfer) (*L2BridgeWithdrawal, error) {
var withdrawal L2BridgeWithdrawal var withdrawal L2BridgeWithdrawal
......
...@@ -8,6 +8,7 @@ import ( ...@@ -8,6 +8,7 @@ import (
"testing" "testing"
"time" "time"
"github.com/ethereum-optimism/optimism/indexer/bigint"
e2etest_utils "github.com/ethereum-optimism/optimism/indexer/e2e_tests/utils" e2etest_utils "github.com/ethereum-optimism/optimism/indexer/e2e_tests/utils"
op_e2e "github.com/ethereum-optimism/optimism/op-e2e" op_e2e "github.com/ethereum-optimism/optimism/op-e2e"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/transactions" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/transactions"
...@@ -447,7 +448,7 @@ func TestE2EBridgeTransfersCursoredWithdrawals(t *testing.T) { ...@@ -447,7 +448,7 @@ func TestE2EBridgeTransfersCursoredWithdrawals(t *testing.T) {
} }
} }
func TestClientGetWithdrawals(t *testing.T) { func TestClientBridgeFunctions(t *testing.T) {
testSuite := createE2ETestSuite(t) testSuite := createE2ETestSuite(t)
// (1) Generate contract bindings for the L1 and L2 standard bridges // (1) Generate contract bindings for the L1 and L2 standard bridges
...@@ -459,12 +460,16 @@ func TestClientGetWithdrawals(t *testing.T) { ...@@ -459,12 +460,16 @@ func TestClientGetWithdrawals(t *testing.T) {
// (2) Create test actors that will deposit and withdraw using the standard bridge // (2) Create test actors that will deposit and withdraw using the standard bridge
aliceAddr := testSuite.OpCfg.Secrets.Addresses().Alice aliceAddr := testSuite.OpCfg.Secrets.Addresses().Alice
bobAddr := testSuite.OpCfg.Secrets.Addresses().Bob bobAddr := testSuite.OpCfg.Secrets.Addresses().Bob
malAddr := testSuite.OpCfg.Secrets.Addresses().Mallory
type actor struct { type actor struct {
addr common.Address addr common.Address
priv *ecdsa.PrivateKey priv *ecdsa.PrivateKey
} }
mintSum := bigint.Zero
withdrawSum := bigint.Zero
actors := []actor{ actors := []actor{
{ {
addr: aliceAddr, addr: aliceAddr,
...@@ -474,6 +479,10 @@ func TestClientGetWithdrawals(t *testing.T) { ...@@ -474,6 +479,10 @@ func TestClientGetWithdrawals(t *testing.T) {
addr: bobAddr, addr: bobAddr,
priv: testSuite.OpCfg.Secrets.Bob, priv: testSuite.OpCfg.Secrets.Bob,
}, },
{
addr: malAddr,
priv: testSuite.OpCfg.Secrets.Mallory,
},
} }
// (3) Iterate over each actor and deposit / withdraw // (3) Iterate over each actor and deposit / withdraw
...@@ -491,6 +500,8 @@ func TestClientGetWithdrawals(t *testing.T) { ...@@ -491,6 +500,8 @@ func TestClientGetWithdrawals(t *testing.T) {
_, err = wait.ForReceiptOK(context.Background(), testSuite.L1Client, depositTx.Hash()) _, err = wait.ForReceiptOK(context.Background(), testSuite.L1Client, depositTx.Hash())
require.NoError(t, err) require.NoError(t, err)
mintSum = new(big.Int).Add(mintSum, depositTx.Value())
// (3.b) Initiate withdrawal transaction via L2ToL1MessagePasser contract // (3.b) Initiate withdrawal transaction via L2ToL1MessagePasser contract
l2ToL1MessagePasserWithdrawTx, err := l2ToL1MessagePasser.Receive(l2Opts) l2ToL1MessagePasserWithdrawTx, err := l2ToL1MessagePasser.Receive(l2Opts)
require.NoError(t, err) require.NoError(t, err)
...@@ -503,6 +514,8 @@ func TestClientGetWithdrawals(t *testing.T) { ...@@ -503,6 +514,8 @@ func TestClientGetWithdrawals(t *testing.T) {
return l2Header != nil && l2Header.Number.Uint64() >= l2ToL1WithdrawReceipt.BlockNumber.Uint64(), nil return l2Header != nil && l2Header.Number.Uint64() >= l2ToL1WithdrawReceipt.BlockNumber.Uint64(), nil
})) }))
withdrawSum = new(big.Int).Add(withdrawSum, l2ToL1MessagePasserWithdrawTx.Value())
// (3.d) Ensure that withdrawal and deposit txs are retrievable via API // (3.d) Ensure that withdrawal and deposit txs are retrievable via API
deposits, err := testSuite.Client.GetAllDepositsByAddress(actor.addr) deposits, err := testSuite.Client.GetAllDepositsByAddress(actor.addr)
require.NoError(t, err) require.NoError(t, err)
...@@ -513,6 +526,17 @@ func TestClientGetWithdrawals(t *testing.T) { ...@@ -513,6 +526,17 @@ func TestClientGetWithdrawals(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
require.Len(t, withdrawals, 1) require.Len(t, withdrawals, 1)
require.Equal(t, l2ToL1MessagePasserWithdrawTx.Hash().String(), withdrawals[0].TransactionHash) require.Equal(t, l2ToL1MessagePasserWithdrawTx.Hash().String(), withdrawals[0].TransactionHash)
} }
// (4) Ensure that supply assessment is correct
assessment, err := testSuite.Client.GetSupplyAssessment()
require.NoError(t, err)
mintFloat, _ := mintSum.Float64()
require.Equal(t, mintFloat, assessment.L1DepositSum)
withdrawFloat, _ := withdrawSum.Float64()
require.Equal(t, withdrawFloat, assessment.L2WithdrawalSum)
} }
...@@ -96,8 +96,8 @@ CREATE TABLE IF NOT EXISTS l1_transaction_deposits ( ...@@ -96,8 +96,8 @@ CREATE TABLE IF NOT EXISTS l1_transaction_deposits (
-- transaction data. NOTE: `to_address` is the recipient of funds transferred in value field of the -- transaction data. NOTE: `to_address` is the recipient of funds transferred in value field of the
-- L2 deposit transaction and not the amount minted on L1 from the source address. Hence the `amount` -- L2 deposit transaction and not the amount minted on L1 from the source address. Hence the `amount`
-- column in this table does NOT indiciate the amount transferred to the recipient but instead funds -- column in this table does NOT indicate the amount transferred to the recipient but instead funds
-- bridged from L1 into `from_address`. -- bridged from L1 by the `from_address`.
from_address VARCHAR NOT NULL, from_address VARCHAR NOT NULL,
to_address VARCHAR NOT NULL, to_address VARCHAR NOT NULL,
......
This diff is collapsed.
This diff is collapsed.
package main
import (
"bytes"
"context"
"errors"
"flag"
"fmt"
"math/big"
"os"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-service/client"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/sources"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/trie"
)
func CalcBaseFee(parent eth.BlockInfo, elasticity uint64, canyonActive bool) *big.Int {
denomUint := uint64(50)
if canyonActive {
denomUint = uint64(250)
}
parentGasTarget := parent.GasLimit() / elasticity
// If the parent gasUsed is the same as the target, the baseFee remains unchanged.
if parent.GasUsed() == parentGasTarget {
return new(big.Int).Set(parent.BaseFee())
}
var (
num = new(big.Int)
denom = new(big.Int)
)
if parent.GasUsed() > parentGasTarget {
// If the parent block used more gas than its target, the baseFee should increase.
// max(1, parentBaseFee * gasUsedDelta / parentGasTarget / baseFeeChangeDenominator)
num.SetUint64(parent.GasUsed() - parentGasTarget)
num.Mul(num, parent.BaseFee())
num.Div(num, denom.SetUint64(parentGasTarget))
num.Div(num, denom.SetUint64(denomUint))
baseFeeDelta := math.BigMax(num, common.Big1)
return num.Add(parent.BaseFee(), baseFeeDelta)
} else {
// Otherwise if the parent block used less gas than its target, the baseFee should decrease.
// max(0, parentBaseFee * gasUsedDelta / parentGasTarget / baseFeeChangeDenominator)
num.SetUint64(parentGasTarget - parent.GasUsed())
num.Mul(num, parent.BaseFee())
num.Div(num, denom.SetUint64(parentGasTarget))
num.Div(num, denom.SetUint64(denomUint))
baseFee := num.Sub(parent.BaseFee(), num)
return math.BigMax(baseFee, common.Big0)
}
}
func ManuallyEncodeReceipts(receipts types.Receipts, canyonActive bool) [][]byte {
v := uint64(1)
for _, receipt := range receipts {
if receipt.Type == types.DepositTxType {
if canyonActive {
receipt.DepositReceiptVersion = &v
} else {
receipt.DepositReceiptVersion = nil
}
}
}
var out [][]byte
for i := range receipts {
var buf bytes.Buffer
receipts.EncodeIndex(i, &buf)
out = append(out, buf.Bytes())
}
return out
}
type rawReceipts [][]byte
func (s rawReceipts) Len() int { return len(s) }
func (s rawReceipts) EncodeIndex(i int, w *bytes.Buffer) {
w.Write(s[i])
}
func HashList(list [][]byte) common.Hash {
hasher := trie.NewStackTrie(nil)
return types.DeriveSha(rawReceipts(list), hasher)
}
type L2Client interface {
BlockByNumber(context.Context, *big.Int) (*types.Block, error)
CodeAt(context.Context, common.Address, *big.Int) ([]byte, error)
InfoByNumber(context.Context, uint64) (eth.BlockInfo, error)
FetchReceipts(context.Context, common.Hash) (eth.BlockInfo, types.Receipts, error)
}
type Client struct {
*ethclient.Client
*sources.L1Client
}
type Args struct {
Number uint64
Elasticity uint64
Client L2Client
}
func ValidateReceipts(ctx Args, canyonActive bool) error {
block, err := ctx.Client.InfoByNumber(context.Background(), ctx.Number)
if err != nil {
return err
}
_, receipts, err := ctx.Client.FetchReceipts(context.Background(), block.Hash())
if err != nil {
return err
}
have := block.ReceiptHash()
want := HashList(ManuallyEncodeReceipts(receipts, canyonActive))
if have != want {
return fmt.Errorf("Receipts do not look correct. canyonActive: %v. have: %v, want: %v", canyonActive, have, want)
}
return nil
}
func Validate1559Params(ctx Args, canyonActive bool) error {
block, err := ctx.Client.InfoByNumber(context.Background(), ctx.Number)
if err != nil {
return err
}
parent, err := ctx.Client.InfoByNumber(context.Background(), ctx.Number-1)
if err != nil {
return err
}
want := CalcBaseFee(parent, ctx.Elasticity, canyonActive)
have := block.BaseFee()
if have.Cmp(want) != 0 {
return fmt.Errorf("BaseFee does not match. canyonActive: %v. have: %v, want: %v", canyonActive, have, want)
}
return nil
}
func ValidateWithdrawals(ctx Args, canyonActive bool) error {
block, err := ctx.Client.BlockByNumber(context.Background(), new(big.Int).SetUint64(ctx.Number))
if err != nil {
return err
}
if canyonActive && block.Withdrawals() == nil {
return errors.New("No nonwithdrawals in a canyon block")
} else if canyonActive && len(block.Withdrawals()) > 0 {
return errors.New("Withdrawals length is not zero in a canyon block")
} else if !canyonActive && block.Withdrawals() != nil {
return errors.New("Withdrawals in a pre-canyon block")
}
return nil
}
func ValidateCreate2Deployer(ctx Args, canyonActive bool) error {
addr := common.HexToAddress("0x13b0D85CcB8bf860b6b79AF3029fCA081AE9beF2")
code, err := ctx.Client.CodeAt(context.Background(), addr, new(big.Int).SetUint64(ctx.Number))
if err != nil {
return err
}
codeHash := crypto.Keccak256Hash(code)
expectedCodeHash := common.HexToHash("0xb0550b5b431e30d38000efb7107aaa0ade03d48a7198a140edda9d27134468b2")
if canyonActive && codeHash != expectedCodeHash {
return fmt.Errorf("Canyon active but code hash does not match. have: %v, want: %v", codeHash, expectedCodeHash)
} else if !canyonActive && codeHash == expectedCodeHash {
return fmt.Errorf("Canyon not active but code hashes do match. codeHash: %v", codeHash)
}
return nil
}
// CheckActivation takes a function f which determines in a specific block follows the rules of a fork.
func CheckActivation(f func(Args, bool) error, ctx Args, forkActivated bool, valid *bool) {
if err := f(ctx, forkActivated); err != nil {
log.Error("Block did not follow fork rules", "err", err)
*valid = false
}
}
// CheckInactivation takes a function f which determines in a specific block follows the rules of a fork.
// It passes the oppose value of forkActivated & asserts that an error is returned.
func CheckInactivation(f func(Args, bool) error, ctx Args, forkActivated bool, valid *bool) {
if err := f(ctx, !forkActivated); err == nil {
log.Error("Block followed the wrong side of the fork rules")
*valid = false
}
}
func main() {
logger := log.New()
// Define the flag variables
var (
canyonActive bool
number uint64
elasticity uint64
rpcURL string
)
valid := true
// Define and parse the command-line flags
flag.BoolVar(&canyonActive, "canyon", false, "Set this flag to assert canyon behavior")
flag.Uint64Var(&number, "number", 31, "Block number to check")
flag.Uint64Var(&elasticity, "elasticity", 6, "Specify the EIP-1559 elasticity. 6 on mainnet/sepolia. 10 on goerli")
flag.StringVar(&rpcURL, "rpc-url", "http://localhost:8545", "Specify the L2 ETH RPC URL")
// Parse the command-line arguments
flag.Parse()
l2RPC, err := client.NewRPC(context.Background(), logger, rpcURL, client.WithDialBackoff(10))
if err != nil {
log.Crit("Error creating RPC", "err", err)
}
c := &rollup.Config{SeqWindowSize: 10}
l2Cfg := sources.L1ClientDefaultConfig(c, true, sources.RPCKindBasic)
sourceClient, err := sources.NewL1Client(l2RPC, logger, nil, l2Cfg)
if err != nil {
log.Crit("Error creating RPC", "err", err)
}
ethClient, err := ethclient.Dial(rpcURL)
if err != nil {
log.Crit("Error creating RPC", "err", err)
}
client := Client{ethClient, sourceClient}
ctx := Args{
Number: number,
Elasticity: elasticity,
Client: client,
}
CheckActivation(ValidateReceipts, ctx, canyonActive, &valid)
CheckInactivation(ValidateReceipts, ctx, canyonActive, &valid)
CheckActivation(Validate1559Params, ctx, canyonActive, &valid)
// Don't check in-activation for 1559 b/c at low basefees the two cannot be differentiated
CheckActivation(ValidateWithdrawals, ctx, canyonActive, &valid)
CheckInactivation(ValidateWithdrawals, ctx, canyonActive, &valid)
CheckActivation(ValidateCreate2Deployer, ctx, canyonActive, &valid)
CheckInactivation(ValidateCreate2Deployer, ctx, canyonActive, &valid)
if !valid {
os.Exit(1)
} else if canyonActive {
log.Info(fmt.Sprintf("Successfully validated block %v as a Canyon block", number))
} else {
log.Info(fmt.Sprintf("Successfully validated block %v as a Pre-Canyon block", number))
}
}
...@@ -39,7 +39,7 @@ type Agent struct { ...@@ -39,7 +39,7 @@ type Agent struct {
log log.Logger log log.Logger
} }
func NewAgent(m metrics.Metricer, loader ClaimLoader, maxDepth int, trace types.TraceProvider, responder Responder, updater types.OracleUpdater, agreeWithProposedOutput bool, log log.Logger) *Agent { func NewAgent(m metrics.Metricer, loader ClaimLoader, maxDepth int, trace types.TraceAccessor, responder Responder, updater types.OracleUpdater, agreeWithProposedOutput bool, log log.Logger) *Agent {
return &Agent{ return &Agent{
metrics: m, metrics: m,
solver: solver.NewGameSolver(maxDepth, trace), solver: solver.NewGameSolver(maxDepth, trace),
......
...@@ -5,6 +5,7 @@ import ( ...@@ -5,6 +5,7 @@ import (
"errors" "errors"
"testing" "testing"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
...@@ -112,10 +113,10 @@ func setupTestAgent(t *testing.T, agreeWithProposedOutput bool) (*Agent, *stubCl ...@@ -112,10 +113,10 @@ func setupTestAgent(t *testing.T, agreeWithProposedOutput bool) (*Agent, *stubCl
logger := testlog.Logger(t, log.LvlInfo) logger := testlog.Logger(t, log.LvlInfo)
claimLoader := &stubClaimLoader{} claimLoader := &stubClaimLoader{}
depth := 4 depth := 4
trace := alphabet.NewTraceProvider("abcd", uint64(depth)) provider := alphabet.NewTraceProvider("abcd", uint64(depth))
responder := &stubResponder{} responder := &stubResponder{}
updater := &stubUpdater{} updater := &stubUpdater{}
agent := NewAgent(metrics.NoopMetrics, claimLoader, depth, trace, responder, updater, agreeWithProposedOutput, logger) agent := NewAgent(metrics.NoopMetrics, claimLoader, depth, trace.NewSimpleTraceAccessor(provider), responder, updater, agreeWithProposedOutput, logger)
return agent, claimLoader, responder return agent, claimLoader, responder
} }
......
...@@ -19,6 +19,8 @@ const ( ...@@ -19,6 +19,8 @@ const (
methodStatus = "status" methodStatus = "status"
methodClaimCount = "claimDataLen" methodClaimCount = "claimDataLen"
methodClaim = "claimData" methodClaim = "claimData"
methodL1Head = "l1Head"
methodProposals = "proposals"
) )
type FaultDisputeGameContract struct { type FaultDisputeGameContract struct {
...@@ -26,6 +28,12 @@ type FaultDisputeGameContract struct { ...@@ -26,6 +28,12 @@ type FaultDisputeGameContract struct {
contract *batching.BoundContract contract *batching.BoundContract
} }
type Proposal struct {
Index *big.Int
L2BlockNumber *big.Int
OutputRoot common.Hash
}
func NewFaultDisputeGameContract(addr common.Address, caller *batching.MultiCaller) (*FaultDisputeGameContract, error) { func NewFaultDisputeGameContract(addr common.Address, caller *batching.MultiCaller) (*FaultDisputeGameContract, error) {
fdgAbi, err := bindings.FaultDisputeGameMetaData.GetAbi() fdgAbi, err := bindings.FaultDisputeGameMetaData.GetAbi()
if err != nil { if err != nil {
...@@ -62,6 +70,27 @@ func (f *FaultDisputeGameContract) GetAbsolutePrestateHash(ctx context.Context) ...@@ -62,6 +70,27 @@ func (f *FaultDisputeGameContract) GetAbsolutePrestateHash(ctx context.Context)
return result.GetHash(0), nil return result.GetHash(0), nil
} }
func (f *FaultDisputeGameContract) GetL1Head(ctx context.Context) (common.Hash, error) {
result, err := f.multiCaller.SingleCall(ctx, batching.BlockLatest, f.contract.Call(methodL1Head))
if err != nil {
return common.Hash{}, fmt.Errorf("failed to fetch L1 head: %w", err)
}
return result.GetHash(0), nil
}
// GetProposals returns the agreed and disputed proposals
func (f *FaultDisputeGameContract) GetProposals(ctx context.Context) (Proposal, Proposal, error) {
result, err := f.multiCaller.SingleCall(ctx, batching.BlockLatest, f.contract.Call(methodProposals))
if err != nil {
return Proposal{}, Proposal{}, fmt.Errorf("failed to fetch proposals: %w", err)
}
var agreed, disputed Proposal
result.GetStruct(0, &agreed)
result.GetStruct(1, &disputed)
return agreed, disputed, nil
}
func (f *FaultDisputeGameContract) GetStatus(ctx context.Context) (gameTypes.GameStatus, error) { func (f *FaultDisputeGameContract) GetStatus(ctx context.Context) (gameTypes.GameStatus, error) {
result, err := f.multiCaller.SingleCall(ctx, batching.BlockLatest, f.contract.Call(methodStatus)) result, err := f.multiCaller.SingleCall(ctx, batching.BlockLatest, f.contract.Call(methodStatus))
if err != nil { if err != nil {
......
...@@ -60,6 +60,13 @@ func TestSimpleGetters(t *testing.T) { ...@@ -60,6 +60,13 @@ func TestSimpleGetters(t *testing.T) {
return game.GetClaimCount(context.Background()) return game.GetClaimCount(context.Background())
}, },
}, },
{
method: methodL1Head,
result: common.Hash{0xdd, 0xbb},
call: func(game *FaultDisputeGameContract) (any, error) {
return game.GetL1Head(context.Background())
},
},
} }
for _, test := range tests { for _, test := range tests {
test := test test := test
...@@ -77,6 +84,33 @@ func TestSimpleGetters(t *testing.T) { ...@@ -77,6 +84,33 @@ func TestSimpleGetters(t *testing.T) {
} }
} }
func TestGetProposals(t *testing.T) {
stubRpc, game := setup(t)
agreedIndex := big.NewInt(5)
agreedBlockNum := big.NewInt(6)
agreedRoot := common.Hash{0xaa}
disputedIndex := big.NewInt(7)
disputedBlockNum := big.NewInt(8)
disputedRoot := common.Hash{0xdd}
agreed := Proposal{
Index: agreedIndex,
L2BlockNumber: agreedBlockNum,
OutputRoot: agreedRoot,
}
disputed := Proposal{
Index: disputedIndex,
L2BlockNumber: disputedBlockNum,
OutputRoot: disputedRoot,
}
stubRpc.SetResponse(methodProposals, batching.BlockLatest, []interface{}{}, []interface{}{
agreed, disputed,
})
actualAgreed, actualDisputed, err := game.GetProposals(context.Background())
require.NoError(t, err)
require.Equal(t, agreed, actualAgreed)
require.Equal(t, disputed, actualDisputed)
}
func TestGetClaim(t *testing.T) { func TestGetClaim(t *testing.T) {
stubRpc, game := setup(t) stubRpc, game := setup(t)
idx := big.NewInt(2) idx := big.NewInt(2)
......
...@@ -8,6 +8,7 @@ import ( ...@@ -8,6 +8,7 @@ import (
"github.com/ethereum-optimism/optimism/op-challenger/config" "github.com/ethereum-optimism/optimism/op-challenger/config"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/contracts" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/contracts"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/responder" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/responder"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/types" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/types"
gameTypes "github.com/ethereum-optimism/optimism/op-challenger/game/types" gameTypes "github.com/ethereum-optimism/optimism/op-challenger/game/types"
"github.com/ethereum-optimism/optimism/op-challenger/metrics" "github.com/ethereum-optimism/optimism/op-challenger/metrics"
...@@ -33,7 +34,7 @@ type GamePlayer struct { ...@@ -33,7 +34,7 @@ type GamePlayer struct {
status gameTypes.GameStatus status gameTypes.GameStatus
} }
type resourceCreator func(addr common.Address, gameDepth uint64, dir string) (types.TraceProvider, types.OracleUpdater, error) type resourceCreator func(addr common.Address, contract *contracts.FaultDisputeGameContract, gameDepth uint64, dir string) (types.TraceProvider, types.OracleUpdater, error)
func NewGamePlayer( func NewGamePlayer(
ctx context.Context, ctx context.Context,
...@@ -76,7 +77,7 @@ func NewGamePlayer( ...@@ -76,7 +77,7 @@ func NewGamePlayer(
return nil, fmt.Errorf("failed to fetch the game depth: %w", err) return nil, fmt.Errorf("failed to fetch the game depth: %w", err)
} }
provider, updater, err := creator(addr, gameDepth, dir) provider, updater, err := creator(addr, loader, gameDepth, dir)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to create trace provider: %w", err) return nil, fmt.Errorf("failed to create trace provider: %w", err)
} }
...@@ -90,8 +91,10 @@ func NewGamePlayer( ...@@ -90,8 +91,10 @@ func NewGamePlayer(
return nil, fmt.Errorf("failed to create the responder: %w", err) return nil, fmt.Errorf("failed to create the responder: %w", err)
} }
accessor := trace.NewSimpleTraceAccessor(provider)
agent := NewAgent(m, loader, int(gameDepth), accessor, responder, updater, cfg.AgreeWithProposedOutput, logger)
return &GamePlayer{ return &GamePlayer{
act: NewAgent(m, loader, int(gameDepth), provider, responder, updater, cfg.AgreeWithProposedOutput, logger).Act, act: agent.Act,
agreeWithProposedOutput: cfg.AgreeWithProposedOutput, agreeWithProposedOutput: cfg.AgreeWithProposedOutput,
loader: loader, loader: loader,
logger: logger, logger: logger,
......
...@@ -5,6 +5,7 @@ import ( ...@@ -5,6 +5,7 @@ import (
"fmt" "fmt"
"github.com/ethereum-optimism/optimism/op-challenger/config" "github.com/ethereum-optimism/optimism/op-challenger/config"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/contracts"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/alphabet" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/alphabet"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/cannon" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/cannon"
faultTypes "github.com/ethereum-optimism/optimism/op-challenger/game/fault/types" faultTypes "github.com/ethereum-optimism/optimism/op-challenger/game/fault/types"
...@@ -36,8 +37,8 @@ func RegisterGameTypes( ...@@ -36,8 +37,8 @@ func RegisterGameTypes(
client *ethclient.Client, client *ethclient.Client,
) { ) {
if cfg.TraceTypeEnabled(config.TraceTypeCannon) { if cfg.TraceTypeEnabled(config.TraceTypeCannon) {
resourceCreator := func(addr common.Address, gameDepth uint64, dir string) (faultTypes.TraceProvider, faultTypes.OracleUpdater, error) { resourceCreator := func(addr common.Address, contract *contracts.FaultDisputeGameContract, gameDepth uint64, dir string) (faultTypes.TraceProvider, faultTypes.OracleUpdater, error) {
provider, err := cannon.NewTraceProvider(ctx, logger, m, cfg, client, dir, addr, gameDepth) provider, err := cannon.NewTraceProvider(ctx, logger, m, cfg, contract, dir, gameDepth)
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("create cannon trace provider: %w", err) return nil, nil, fmt.Errorf("create cannon trace provider: %w", err)
} }
...@@ -53,7 +54,7 @@ func RegisterGameTypes( ...@@ -53,7 +54,7 @@ func RegisterGameTypes(
registry.RegisterGameType(cannonGameType, playerCreator) registry.RegisterGameType(cannonGameType, playerCreator)
} }
if cfg.TraceTypeEnabled(config.TraceTypeAlphabet) { if cfg.TraceTypeEnabled(config.TraceTypeAlphabet) {
resourceCreator := func(addr common.Address, gameDepth uint64, dir string) (faultTypes.TraceProvider, faultTypes.OracleUpdater, error) { resourceCreator := func(addr common.Address, contract *contracts.FaultDisputeGameContract, gameDepth uint64, dir string) (faultTypes.TraceProvider, faultTypes.OracleUpdater, error) {
provider := alphabet.NewTraceProvider(cfg.AlphabetTrace, gameDepth) provider := alphabet.NewTraceProvider(cfg.AlphabetTrace, gameDepth)
updater := alphabet.NewOracleUpdater(logger) updater := alphabet.NewOracleUpdater(logger)
return provider, updater, nil return provider, updater, nil
......
...@@ -12,7 +12,7 @@ type GameSolver struct { ...@@ -12,7 +12,7 @@ type GameSolver struct {
claimSolver *claimSolver claimSolver *claimSolver
} }
func NewGameSolver(gameDepth int, trace types.TraceProvider) *GameSolver { func NewGameSolver(gameDepth int, trace types.TraceAccessor) *GameSolver {
return &GameSolver{ return &GameSolver{
claimSolver: newClaimSolver(gameDepth, trace), claimSolver: newClaimSolver(gameDepth, trace),
} }
......
...@@ -6,6 +6,7 @@ import ( ...@@ -6,6 +6,7 @@ import (
"testing" "testing"
faulttest "github.com/ethereum-optimism/optimism/op-challenger/game/fault/test" faulttest "github.com/ethereum-optimism/optimism/op-challenger/game/fault/test"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
...@@ -113,7 +114,7 @@ func TestCalculateNextActions(t *testing.T) { ...@@ -113,7 +114,7 @@ func TestCalculateNextActions(t *testing.T) {
i, claim.Position.ToGIndex(), claim.Position.TraceIndex(maxDepth), claim.ParentContractIndex, claim.Countered, claim.Value) i, claim.Position.ToGIndex(), claim.Position.TraceIndex(maxDepth), claim.ParentContractIndex, claim.Countered, claim.Value)
} }
solver := NewGameSolver(maxDepth, claimBuilder.CorrectTraceProvider()) solver := NewGameSolver(maxDepth, trace.NewSimpleTraceAccessor(claimBuilder.CorrectTraceProvider()))
actions, err := solver.CalculateNextActions(context.Background(), game) actions, err := solver.CalculateNextActions(context.Background(), game)
require.NoError(t, err) require.NoError(t, err)
for i, action := range actions { for i, action := range actions {
......
...@@ -7,7 +7,6 @@ import ( ...@@ -7,7 +7,6 @@ import (
"fmt" "fmt"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/types" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/types"
"github.com/ethereum/go-ethereum/common"
) )
var ( var (
...@@ -18,14 +17,14 @@ var ( ...@@ -18,14 +17,14 @@ var (
// claimSolver uses a [TraceProvider] to determine the moves to make in a dispute game. // claimSolver uses a [TraceProvider] to determine the moves to make in a dispute game.
type claimSolver struct { type claimSolver struct {
trace types.TraceProvider trace types.TraceAccessor
gameDepth int gameDepth int
} }
// newClaimSolver creates a new [claimSolver] using the provided [TraceProvider]. // newClaimSolver creates a new [claimSolver] using the provided [TraceProvider].
func newClaimSolver(gameDepth int, traceProvider types.TraceProvider) *claimSolver { func newClaimSolver(gameDepth int, trace types.TraceAccessor) *claimSolver {
return &claimSolver{ return &claimSolver{
traceProvider, trace,
gameDepth, gameDepth,
} }
} }
...@@ -53,14 +52,14 @@ func (s *claimSolver) NextMove(ctx context.Context, claim types.Claim, game type ...@@ -53,14 +52,14 @@ func (s *claimSolver) NextMove(ctx context.Context, claim types.Claim, game type
} }
} }
agree, err := s.agreeWithClaim(ctx, claim.ClaimData) agree, err := s.agreeWithClaim(ctx, game, claim)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if agree { if agree {
return s.defend(ctx, claim) return s.defend(ctx, game, claim)
} else { } else {
return s.attack(ctx, claim) return s.attack(ctx, game, claim)
} }
} }
...@@ -93,28 +92,25 @@ func (s *claimSolver) AttemptStep(ctx context.Context, game types.Game, claim ty ...@@ -93,28 +92,25 @@ func (s *claimSolver) AttemptStep(ctx context.Context, game types.Game, claim ty
return StepData{}, ErrStepIgnoreInvalidPath return StepData{}, ErrStepIgnoreInvalidPath
} }
claimCorrect, err := s.agreeWithClaim(ctx, claim.ClaimData) claimCorrect, err := s.agreeWithClaim(ctx, game, claim)
if err != nil { if err != nil {
return StepData{}, err return StepData{}, err
} }
var preState []byte
var proofData []byte
var oracleData *types.PreimageOracleData
var position types.Position
if !claimCorrect { if !claimCorrect {
// Attack the claim by executing step index, so we need to get the pre-state of that index // Attack the claim by executing step index, so we need to get the pre-state of that index
preState, proofData, oracleData, err = s.trace.GetStepData(ctx, claim.Position) position = claim.Position
if err != nil {
return StepData{}, err
}
} else { } else {
// We agree with the claim so Defend and use this claim as the starting point to // Defend and use this claim as the starting point to execute the step after.
// execute the step after. Thus we need the pre-state of the next step. // Thus, we need the pre-state of the next step.
preState, proofData, oracleData, err = s.trace.GetStepData(ctx, claim.MoveRight()) position = claim.Position.MoveRight()
}
preState, proofData, oracleData, err := s.trace.GetStepData(ctx, game, claim, position)
if err != nil { if err != nil {
return StepData{}, err return StepData{}, err
} }
}
return StepData{ return StepData{
LeafClaim: claim, LeafClaim: claim,
...@@ -126,9 +122,9 @@ func (s *claimSolver) AttemptStep(ctx context.Context, game types.Game, claim ty ...@@ -126,9 +122,9 @@ func (s *claimSolver) AttemptStep(ctx context.Context, game types.Game, claim ty
} }
// attack returns a response that attacks the claim. // attack returns a response that attacks the claim.
func (s *claimSolver) attack(ctx context.Context, claim types.Claim) (*types.Claim, error) { func (s *claimSolver) attack(ctx context.Context, game types.Game, claim types.Claim) (*types.Claim, error) {
position := claim.Attack() position := claim.Attack()
value, err := s.traceAtPosition(ctx, position) value, err := s.trace.Get(ctx, game, claim, position)
if err != nil { if err != nil {
return nil, fmt.Errorf("attack claim: %w", err) return nil, fmt.Errorf("attack claim: %w", err)
} }
...@@ -139,12 +135,12 @@ func (s *claimSolver) attack(ctx context.Context, claim types.Claim) (*types.Cla ...@@ -139,12 +135,12 @@ func (s *claimSolver) attack(ctx context.Context, claim types.Claim) (*types.Cla
} }
// defend returns a response that defends the claim. // defend returns a response that defends the claim.
func (s *claimSolver) defend(ctx context.Context, claim types.Claim) (*types.Claim, error) { func (s *claimSolver) defend(ctx context.Context, game types.Game, claim types.Claim) (*types.Claim, error) {
if claim.IsRoot() { if claim.IsRoot() {
return nil, nil return nil, nil
} }
position := claim.Defend() position := claim.Defend()
value, err := s.traceAtPosition(ctx, position) value, err := s.trace.Get(ctx, game, claim, position)
if err != nil { if err != nil {
return nil, fmt.Errorf("defend claim: %w", err) return nil, fmt.Errorf("defend claim: %w", err)
} }
...@@ -155,19 +151,14 @@ func (s *claimSolver) defend(ctx context.Context, claim types.Claim) (*types.Cla ...@@ -155,19 +151,14 @@ func (s *claimSolver) defend(ctx context.Context, claim types.Claim) (*types.Cla
} }
// agreeWithClaim returns true if the claim is correct according to the internal [TraceProvider]. // agreeWithClaim returns true if the claim is correct according to the internal [TraceProvider].
func (s *claimSolver) agreeWithClaim(ctx context.Context, claim types.ClaimData) (bool, error) { func (s *claimSolver) agreeWithClaim(ctx context.Context, game types.Game, claim types.Claim) (bool, error) {
ourValue, err := s.traceAtPosition(ctx, claim.Position) ourValue, err := s.trace.Get(ctx, game, claim, claim.Position)
return bytes.Equal(ourValue[:], claim.Value[:]), err return bytes.Equal(ourValue[:], claim.Value[:]), err
} }
// traceAtPosition returns the [common.Hash] from internal [TraceProvider] at the given [Position].
func (s *claimSolver) traceAtPosition(ctx context.Context, p types.Position) (common.Hash, error) {
return s.trace.Get(ctx, p)
}
// agreeWithClaimPath returns true if the every other claim in the path to root is correct according to the internal [TraceProvider]. // agreeWithClaimPath returns true if the every other claim in the path to root is correct according to the internal [TraceProvider].
func (s *claimSolver) agreeWithClaimPath(ctx context.Context, game types.Game, claim types.Claim) (bool, error) { func (s *claimSolver) agreeWithClaimPath(ctx context.Context, game types.Game, claim types.Claim) (bool, error) {
agree, err := s.agreeWithClaim(ctx, claim.ClaimData) agree, err := s.agreeWithClaim(ctx, game, claim)
if err != nil { if err != nil {
return false, err return false, err
} }
......
...@@ -6,6 +6,7 @@ import ( ...@@ -6,6 +6,7 @@ import (
"testing" "testing"
faulttest "github.com/ethereum-optimism/optimism/op-challenger/game/fault/test" faulttest "github.com/ethereum-optimism/optimism/op-challenger/game/fault/test"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/types" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/types"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
...@@ -161,7 +162,7 @@ func TestAttemptStep(t *testing.T) { ...@@ -161,7 +162,7 @@ func TestAttemptStep(t *testing.T) {
t.Run(tableTest.name, func(t *testing.T) { t.Run(tableTest.name, func(t *testing.T) {
builder := claimBuilder.GameBuilder(tableTest.agreeWithOutputRoot, !tableTest.agreeWithOutputRoot) builder := claimBuilder.GameBuilder(tableTest.agreeWithOutputRoot, !tableTest.agreeWithOutputRoot)
tableTest.setupGame(builder) tableTest.setupGame(builder)
alphabetSolver := newClaimSolver(maxDepth, claimBuilder.CorrectTraceProvider()) alphabetSolver := newClaimSolver(maxDepth, trace.NewSimpleTraceAccessor(claimBuilder.CorrectTraceProvider()))
game := builder.Game game := builder.Game
claims := game.Claims() claims := game.Claims()
lastClaim := claims[len(claims)-1] lastClaim := claims[len(claims)-1]
......
package trace
import (
"context"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/types"
"github.com/ethereum/go-ethereum/common"
)
type ProviderCreator func(ctx context.Context, pre types.Claim, post types.Claim) (types.TraceProvider, error)
func NewSimpleTraceAccessor(trace types.TraceProvider) *Accessor {
selector := func(_ context.Context, _ types.Game, _ types.Claim, _ types.Position) (types.TraceProvider, error) {
return trace, nil
}
return &Accessor{selector}
}
type Accessor struct {
selector func(ctx context.Context, game types.Game, ref types.Claim, pos types.Position) (types.TraceProvider, error)
}
func (t *Accessor) Get(ctx context.Context, game types.Game, ref types.Claim, pos types.Position) (common.Hash, error) {
provider, err := t.selector(ctx, game, ref, pos)
if err != nil {
return common.Hash{}, err
}
return provider.Get(ctx, pos)
}
func (t *Accessor) GetStepData(ctx context.Context, game types.Game, ref types.Claim, pos types.Position) (prestate []byte, proofData []byte, preimageData *types.PreimageOracleData, err error) {
provider, err := t.selector(ctx, game, ref, pos)
if err != nil {
return nil, nil, nil, err
}
return provider.GetStepData(ctx, pos)
}
var _ types.TraceAccessor = (*Accessor)(nil)
package trace
import (
"context"
"fmt"
"math/big"
"testing"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/test"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/alphabet"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/types"
"github.com/stretchr/testify/require"
)
func TestAccessor_UsesSelector(t *testing.T) {
ctx := context.Background()
depth := uint64(4)
provider1 := test.NewAlphabetWithProofProvider(t, int(depth), nil)
provider2 := alphabet.NewTraceProvider("qrstuv", depth)
claim := types.Claim{}
game := types.NewGameState(true, []types.Claim{claim}, depth)
pos1 := types.NewPositionFromGIndex(big.NewInt(4))
pos2 := types.NewPositionFromGIndex(big.NewInt(6))
accessor := &Accessor{
selector: func(ctx context.Context, actualGame types.Game, ref types.Claim, pos types.Position) (types.TraceProvider, error) {
require.Equal(t, game, actualGame)
require.Equal(t, claim, ref)
if pos == pos1 {
return provider1, nil
} else if pos == pos2 {
return provider2, nil
}
return nil, fmt.Errorf("incorrect position requested: %v", pos)
},
}
t.Run("Get", func(t *testing.T) {
actual, err := accessor.Get(ctx, game, claim, pos1)
require.NoError(t, err)
expected, err := provider1.Get(ctx, pos1)
require.NoError(t, err)
require.Equal(t, expected, actual)
actual, err = accessor.Get(ctx, game, claim, pos2)
require.NoError(t, err)
expected, err = provider2.Get(ctx, pos2)
require.NoError(t, err)
require.Equal(t, expected, actual)
})
t.Run("GetStepData", func(t *testing.T) {
actualPrestate, actualProofData, actualPreimageData, err := accessor.GetStepData(ctx, game, claim, pos1)
require.NoError(t, err)
expectedPrestate, expectedProofData, expectedPreimageData, err := provider1.GetStepData(ctx, pos1)
require.NoError(t, err)
require.Equal(t, expectedPrestate, actualPrestate)
require.Equal(t, expectedProofData, actualProofData)
require.Equal(t, expectedPreimageData, actualPreimageData)
actualPrestate, actualProofData, actualPreimageData, err = accessor.GetStepData(ctx, game, claim, pos2)
require.NoError(t, err)
expectedPrestate, expectedProofData, expectedPreimageData, err = provider2.GetStepData(ctx, pos2)
require.NoError(t, err)
require.Equal(t, expectedPrestate, actualPrestate)
require.Equal(t, expectedProofData, actualProofData)
require.Equal(t, expectedPreimageData, actualPreimageData)
})
}
...@@ -5,8 +5,7 @@ import ( ...@@ -5,8 +5,7 @@ import (
"fmt" "fmt"
"math/big" "math/big"
"github.com/ethereum-optimism/optimism/op-bindings/bindings" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/contracts"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
ethtypes "github.com/ethereum/go-ethereum/core/types" ethtypes "github.com/ethereum/go-ethereum/core/types"
) )
...@@ -25,26 +24,20 @@ type L2DataSource interface { ...@@ -25,26 +24,20 @@ type L2DataSource interface {
} }
type GameInputsSource interface { type GameInputsSource interface {
L1Head(opts *bind.CallOpts) ([32]byte, error) GetL1Head(ctx context.Context) (common.Hash, error)
Proposals(opts *bind.CallOpts) (struct { GetProposals(ctx context.Context) (agreed contracts.Proposal, disputed contracts.Proposal, err error)
Starting bindings.IFaultDisputeGameOutputProposal
Disputed bindings.IFaultDisputeGameOutputProposal
}, error)
} }
func fetchLocalInputs(ctx context.Context, gameAddr common.Address, caller GameInputsSource, l2Client L2DataSource) (LocalGameInputs, error) { func fetchLocalInputs(ctx context.Context, caller GameInputsSource, l2Client L2DataSource) (LocalGameInputs, error) {
opts := &bind.CallOpts{Context: ctx} l1Head, err := caller.GetL1Head(ctx)
l1Head, err := caller.L1Head(opts)
if err != nil { if err != nil {
return LocalGameInputs{}, fmt.Errorf("fetch L1 head for game %v: %w", gameAddr, err) return LocalGameInputs{}, fmt.Errorf("fetch L1 head: %w", err)
} }
proposals, err := caller.Proposals(opts) agreedOutput, claimedOutput, err := caller.GetProposals(ctx)
if err != nil { if err != nil {
return LocalGameInputs{}, fmt.Errorf("fetch proposals: %w", err) return LocalGameInputs{}, fmt.Errorf("fetch proposals: %w", err)
} }
claimedOutput := proposals.Disputed
agreedOutput := proposals.Starting
agreedHeader, err := l2Client.HeaderByNumber(ctx, agreedOutput.L2BlockNumber) agreedHeader, err := l2Client.HeaderByNumber(ctx, agreedOutput.L2BlockNumber)
if err != nil { if err != nil {
return LocalGameInputs{}, fmt.Errorf("fetch L2 block header %v: %w", agreedOutput.L2BlockNumber, err) return LocalGameInputs{}, fmt.Errorf("fetch L2 block header %v: %w", agreedOutput.L2BlockNumber, err)
......
...@@ -5,9 +5,8 @@ import ( ...@@ -5,9 +5,8 @@ import (
"math/big" "math/big"
"testing" "testing"
"github.com/ethereum-optimism/optimism/op-bindings/bindings" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/contracts"
"github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
ethtypes "github.com/ethereum/go-ethereum/core/types" ethtypes "github.com/ethereum/go-ethereum/core/types"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
...@@ -15,15 +14,14 @@ import ( ...@@ -15,15 +14,14 @@ import (
func TestFetchLocalInputs(t *testing.T) { func TestFetchLocalInputs(t *testing.T) {
ctx := context.Background() ctx := context.Background()
gameAddr := common.Address{0xab} contract := &mockGameInputsSource{
l1Client := &mockGameInputsSource{
l1Head: common.Hash{0xcc}, l1Head: common.Hash{0xcc},
starting: bindings.IFaultDisputeGameOutputProposal{ starting: contracts.Proposal{
Index: big.NewInt(6), Index: big.NewInt(6),
L2BlockNumber: big.NewInt(2222), L2BlockNumber: big.NewInt(2222),
OutputRoot: common.Hash{0xdd}, OutputRoot: common.Hash{0xdd},
}, },
disputed: bindings.IFaultDisputeGameOutputProposal{ disputed: contracts.Proposal{
Index: big.NewInt(7), Index: big.NewInt(7),
L2BlockNumber: big.NewInt(3333), L2BlockNumber: big.NewInt(3333),
OutputRoot: common.Hash{0xee}, OutputRoot: common.Hash{0xee},
...@@ -32,41 +30,32 @@ func TestFetchLocalInputs(t *testing.T) { ...@@ -32,41 +30,32 @@ func TestFetchLocalInputs(t *testing.T) {
l2Client := &mockL2DataSource{ l2Client := &mockL2DataSource{
chainID: big.NewInt(88422), chainID: big.NewInt(88422),
header: ethtypes.Header{ header: ethtypes.Header{
Number: l1Client.starting.L2BlockNumber, Number: contract.starting.L2BlockNumber,
}, },
} }
inputs, err := fetchLocalInputs(ctx, gameAddr, l1Client, l2Client) inputs, err := fetchLocalInputs(ctx, contract, l2Client)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, l1Client.l1Head, inputs.L1Head) require.Equal(t, contract.l1Head, inputs.L1Head)
require.Equal(t, l2Client.header.Hash(), inputs.L2Head) require.Equal(t, l2Client.header.Hash(), inputs.L2Head)
require.EqualValues(t, l1Client.starting.OutputRoot, inputs.L2OutputRoot) require.EqualValues(t, contract.starting.OutputRoot, inputs.L2OutputRoot)
require.EqualValues(t, l1Client.disputed.OutputRoot, inputs.L2Claim) require.EqualValues(t, contract.disputed.OutputRoot, inputs.L2Claim)
require.Equal(t, l1Client.disputed.L2BlockNumber, inputs.L2BlockNumber) require.Equal(t, contract.disputed.L2BlockNumber, inputs.L2BlockNumber)
} }
type mockGameInputsSource struct { type mockGameInputsSource struct {
l1Head common.Hash l1Head common.Hash
starting bindings.IFaultDisputeGameOutputProposal starting contracts.Proposal
disputed bindings.IFaultDisputeGameOutputProposal disputed contracts.Proposal
} }
func (s *mockGameInputsSource) L1Head(opts *bind.CallOpts) ([32]byte, error) { func (s *mockGameInputsSource) GetL1Head(_ context.Context) (common.Hash, error) {
return s.l1Head, nil return s.l1Head, nil
} }
func (s *mockGameInputsSource) Proposals(opts *bind.CallOpts) (struct { func (s *mockGameInputsSource) GetProposals(_ context.Context) (contracts.Proposal, contracts.Proposal, error) {
Starting bindings.IFaultDisputeGameOutputProposal return s.starting, s.disputed, nil
Disputed bindings.IFaultDisputeGameOutputProposal
}, error) {
return struct {
Starting bindings.IFaultDisputeGameOutputProposal
Disputed bindings.IFaultDisputeGameOutputProposal
}{
Starting: s.starting,
Disputed: s.disputed,
}, nil
} }
type mockL2DataSource struct { type mockL2DataSource struct {
......
...@@ -8,11 +8,10 @@ import ( ...@@ -8,11 +8,10 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-challenger/config" "github.com/ethereum-optimism/optimism/op-challenger/config"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/contracts"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/types" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/types"
"github.com/ethereum-optimism/optimism/op-service/ioutil" "github.com/ethereum-optimism/optimism/op-service/ioutil"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethclient"
...@@ -56,17 +55,13 @@ type CannonTraceProvider struct { ...@@ -56,17 +55,13 @@ type CannonTraceProvider struct {
lastStep uint64 lastStep uint64
} }
func NewTraceProvider(ctx context.Context, logger log.Logger, m CannonMetricer, cfg *config.Config, l1Client bind.ContractCaller, dir string, gameAddr common.Address, gameDepth uint64) (*CannonTraceProvider, error) { func NewTraceProvider(ctx context.Context, logger log.Logger, m CannonMetricer, cfg *config.Config, gameContract *contracts.FaultDisputeGameContract, dir string, gameDepth uint64) (*CannonTraceProvider, error) {
l2Client, err := ethclient.DialContext(ctx, cfg.CannonL2) l2Client, err := ethclient.DialContext(ctx, cfg.CannonL2)
if err != nil { if err != nil {
return nil, fmt.Errorf("dial l2 client %v: %w", cfg.CannonL2, err) return nil, fmt.Errorf("dial l2 client %v: %w", cfg.CannonL2, err)
} }
defer l2Client.Close() // Not needed after fetching the inputs defer l2Client.Close() // Not needed after fetching the inputs
gameCaller, err := bindings.NewFaultDisputeGameCaller(gameAddr, l1Client) localInputs, err := fetchLocalInputs(ctx, gameContract, l2Client)
if err != nil {
return nil, fmt.Errorf("create caller for game %v: %w", gameAddr, err)
}
localInputs, err := fetchLocalInputs(ctx, gameAddr, gameCaller, l2Client)
if err != nil { if err != nil {
return nil, fmt.Errorf("fetch local game inputs: %w", err) return nil, fmt.Errorf("fetch local game inputs: %w", err)
} }
......
...@@ -57,6 +57,18 @@ type OracleUpdater interface { ...@@ -57,6 +57,18 @@ type OracleUpdater interface {
UpdateOracle(ctx context.Context, data *PreimageOracleData) error UpdateOracle(ctx context.Context, data *PreimageOracleData) error
} }
// TraceAccessor defines an interface to request data from a TraceProvider with additional context for the game position.
// This can be used to implement split games where lower layers of the game may have different values depending on claims
// at higher levels in the game.
type TraceAccessor interface {
// Get returns the claim value at the requested position, evaluated in the context of the specified claim (ref).
Get(ctx context.Context, game Game, ref Claim, pos Position) (common.Hash, error)
// GetStepData returns the data required to execute the step at the specified position,
// evaluated in the context of the specified claim (ref).
GetStepData(ctx context.Context, game Game, ref Claim, pos Position) (prestate []byte, proofData []byte, preimageData *PreimageOracleData, err error)
}
// TraceProvider is a generic way to get a claim value at a specific step in the trace. // TraceProvider is a generic way to get a claim value at a specific step in the trace.
type TraceProvider interface { type TraceProvider interface {
// Get returns the claim value at the requested index. // Get returns the claim value at the requested index.
......
...@@ -23,10 +23,6 @@ test-http: pre-test ...@@ -23,10 +23,6 @@ test-http: pre-test
OP_E2E_USE_HTTP=true $(go_test) $(go_test_flags) ./... OP_E2E_USE_HTTP=true $(go_test) $(go_test_flags) ./...
.PHONY: test-http .PHONY: test-http
test-span-batch: pre-test
OP_E2E_USE_SPAN_BATCH=true $(go_test) $(go_test_flags) ./...
.PHONY: test-span-batch
cannon-prestate: cannon-prestate:
make -C .. cannon-prestate make -C .. cannon-prestate
.PHONY: cannon-prestate .PHONY: cannon-prestate
......
...@@ -6,20 +6,47 @@ import ( ...@@ -6,20 +6,47 @@ import (
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils"
"github.com/ethereum-optimism/optimism/op-service/testlog" "github.com/ethereum-optimism/optimism/op-service/testlog"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
// TestBatchInLastPossibleBlocks tests that the derivation pipeline // TestBlockTimeBatchType run each blocktime-related test case in singular batch mode and span batch mode.
func TestBlockTimeBatchType(t *testing.T) {
tests := []struct {
name string
f func(gt *testing.T, spanBatchTimeOffset *hexutil.Uint64)
}{
{"BatchInLastPossibleBlocks", BatchInLastPossibleBlocks},
{"LargeL1Gaps", LargeL1Gaps},
}
for _, test := range tests {
test := test
t.Run(test.name+"_SingularBatch", func(t *testing.T) {
test.f(t, nil)
})
}
spanBatchTimeOffset := hexutil.Uint64(0)
for _, test := range tests {
test := test
t.Run(test.name+"_SpanBatch", func(t *testing.T) {
test.f(t, &spanBatchTimeOffset)
})
}
}
// BatchInLastPossibleBlocks tests that the derivation pipeline
// accepts a batch that is included in the last possible L1 block // accepts a batch that is included in the last possible L1 block
// where there are also no other batches included in the sequence // where there are also no other batches included in the sequence
// window. // window.
// This is a regression test against the bug fixed in PR #4566 // This is a regression test against the bug fixed in PR #4566
func TestBatchInLastPossibleBlocks(gt *testing.T) { func BatchInLastPossibleBlocks(gt *testing.T, spanBatchTimeOffset *hexutil.Uint64) {
t := NewDefaultTesting(gt) t := NewDefaultTesting(gt)
dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams) dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams)
dp.DeployConfig.L2GenesisSpanBatchTimeOffset = spanBatchTimeOffset
dp.DeployConfig.SequencerWindowSize = 4 dp.DeployConfig.SequencerWindowSize = 4
dp.DeployConfig.L2BlockTime = 2 dp.DeployConfig.L2BlockTime = 2
...@@ -116,7 +143,7 @@ func TestBatchInLastPossibleBlocks(gt *testing.T) { ...@@ -116,7 +143,7 @@ func TestBatchInLastPossibleBlocks(gt *testing.T) {
verifyChainStateOnSequencer(12, 23, 11, 17, 8) verifyChainStateOnSequencer(12, 23, 11, 17, 8)
} }
// TestLargeL1Gaps tests the case that there is a gap between two L1 blocks which // LargeL1Gaps tests the case that there is a gap between two L1 blocks which
// is larger than the sequencer drift. // is larger than the sequencer drift.
// This test has the following parameters: // This test has the following parameters:
// L1 Block time: 4s. L2 Block time: 2s. Sequencer Drift: 32s // L1 Block time: 4s. L2 Block time: 2s. Sequencer Drift: 32s
...@@ -127,13 +154,14 @@ func TestBatchInLastPossibleBlocks(gt *testing.T) { ...@@ -127,13 +154,14 @@ func TestBatchInLastPossibleBlocks(gt *testing.T) {
// Then it generates 3 more L1 blocks. // Then it generates 3 more L1 blocks.
// At this point it can verify that the batches where properly generated. // At this point it can verify that the batches where properly generated.
// Note: It batches submits when possible. // Note: It batches submits when possible.
func TestLargeL1Gaps(gt *testing.T) { func LargeL1Gaps(gt *testing.T, spanBatchTimeOffset *hexutil.Uint64) {
t := NewDefaultTesting(gt) t := NewDefaultTesting(gt)
dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams) dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams)
dp.DeployConfig.L1BlockTime = 4 dp.DeployConfig.L1BlockTime = 4
dp.DeployConfig.L2BlockTime = 2 dp.DeployConfig.L2BlockTime = 2
dp.DeployConfig.SequencerWindowSize = 4 dp.DeployConfig.SequencerWindowSize = 4
dp.DeployConfig.MaxSequencerDrift = 32 dp.DeployConfig.MaxSequencerDrift = 32
dp.DeployConfig.L2GenesisSpanBatchTimeOffset = spanBatchTimeOffset
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlDebug) log := testlog.Logger(t, log.LvlDebug)
......
package actions
import (
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils"
"github.com/ethereum-optimism/optimism/op-service/testlog"
)
func TestDencunL1Fork(gt *testing.T) {
t := NewDefaultTesting(gt)
dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams)
sd := e2eutils.Setup(t, dp, defaultAlloc)
activation := sd.L1Cfg.Timestamp + 24
sd.L1Cfg.Config.CancunTime = &activation
log := testlog.Logger(t, log.LvlDebug)
_, _, miner, sequencer, _, verifier, _, batcher := setupReorgTestActors(t, dp, sd, log)
l1Head := miner.l1Chain.CurrentBlock()
require.False(t, sd.L1Cfg.Config.IsCancun(l1Head.Number, l1Head.Time), "Cancun not active yet")
// start op-nodes
sequencer.ActL2PipelineFull(t)
verifier.ActL2PipelineFull(t)
// build empty L1 blocks, crossing the fork boundary
miner.ActL1SetFeeRecipient(common.Address{'A', 0})
miner.ActEmptyBlock(t)
miner.ActEmptyBlock(t) // Cancun activates here
miner.ActEmptyBlock(t)
// verify Cancun is active
l1Head = miner.l1Chain.CurrentBlock()
require.True(t, sd.L1Cfg.Config.IsCancun(l1Head.Number, l1Head.Time), "Cancun active")
// build L2 chain up to and including L2 blocks referencing Cancun L1 blocks
sequencer.ActL1HeadSignal(t)
sequencer.ActBuildToL1Head(t)
miner.ActL1StartBlock(12)(t)
batcher.ActSubmitAll(t)
miner.ActL1IncludeTx(batcher.batcherAddr)(t)
miner.ActL1EndBlock(t)
// sync verifier
verifier.ActL1HeadSignal(t)
verifier.ActL2PipelineFull(t)
// verify verifier accepted Cancun L1 inputs
require.Equal(t, l1Head.Hash(), verifier.SyncStatus().SafeL2.L1Origin.Hash, "verifier synced L1 chain that includes Cancun headers")
require.Equal(t, sequencer.SyncStatus().UnsafeL2, verifier.SyncStatus().UnsafeL2, "verifier and sequencer agree")
}
...@@ -76,6 +76,13 @@ func (s *L1Miner) ActL1StartBlock(timeDelta uint64) Action { ...@@ -76,6 +76,13 @@ func (s *L1Miner) ActL1StartBlock(timeDelta uint64) Action {
if s.l1Cfg.Config.IsShanghai(header.Number, header.Time) { if s.l1Cfg.Config.IsShanghai(header.Number, header.Time) {
header.WithdrawalsHash = &types.EmptyWithdrawalsHash header.WithdrawalsHash = &types.EmptyWithdrawalsHash
} }
if s.l1Cfg.Config.IsCancun(header.Number, header.Time) {
var root common.Hash
var zero uint64
header.BlobGasUsed = &zero
header.ExcessBlobGas = &zero
header.ParentBeaconRoot = &root
}
s.l1Building = true s.l1Building = true
s.l1BuildingHeader = header s.l1BuildingHeader = header
......
...@@ -7,6 +7,7 @@ import ( ...@@ -7,6 +7,7 @@ import (
"testing" "testing"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
...@@ -20,7 +21,36 @@ import ( ...@@ -20,7 +21,36 @@ import (
"github.com/ethereum-optimism/optimism/op-service/testlog" "github.com/ethereum-optimism/optimism/op-service/testlog"
) )
func TestBatcher(gt *testing.T) { // TestL2BatcherBatchType run each batcher-related test case in singular batch mode and span batch mode.
func TestL2BatcherBatchType(t *testing.T) {
tests := []struct {
name string
f func(gt *testing.T, spanBatchTimeOffset *hexutil.Uint64)
}{
{"NormalBatcher", NormalBatcher},
{"L2Finalization", L2Finalization},
{"L2FinalizationWithSparseL1", L2FinalizationWithSparseL1},
{"GarbageBatch", GarbageBatch},
{"ExtendedTimeWithoutL1Batches", ExtendedTimeWithoutL1Batches},
{"BigL2Txs", BigL2Txs},
}
for _, test := range tests {
test := test
t.Run(test.name+"_SingularBatch", func(t *testing.T) {
test.f(t, nil)
})
}
spanBatchTimeOffset := hexutil.Uint64(0)
for _, test := range tests {
test := test
t.Run(test.name+"_SpanBatch", func(t *testing.T) {
test.f(t, &spanBatchTimeOffset)
})
}
}
func NormalBatcher(gt *testing.T, spanBatchTimeOffset *hexutil.Uint64) {
t := NewDefaultTesting(gt) t := NewDefaultTesting(gt)
p := &e2eutils.TestParams{ p := &e2eutils.TestParams{
MaxSequencerDrift: 20, // larger than L1 block time we simulate in this test (12) MaxSequencerDrift: 20, // larger than L1 block time we simulate in this test (12)
...@@ -29,6 +59,7 @@ func TestBatcher(gt *testing.T) { ...@@ -29,6 +59,7 @@ func TestBatcher(gt *testing.T) {
L1BlockTime: 12, L1BlockTime: 12,
} }
dp := e2eutils.MakeDeployParams(t, p) dp := e2eutils.MakeDeployParams(t, p)
dp.DeployConfig.L2GenesisSpanBatchTimeOffset = spanBatchTimeOffset
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlDebug) log := testlog.Logger(t, log.LvlDebug)
miner, seqEngine, sequencer := setupSequencerTest(t, sd, log) miner, seqEngine, sequencer := setupSequencerTest(t, sd, log)
...@@ -55,7 +86,7 @@ func TestBatcher(gt *testing.T) { ...@@ -55,7 +86,7 @@ func TestBatcher(gt *testing.T) {
To: &dp.Addresses.Bob, To: &dp.Addresses.Bob,
Value: e2eutils.Ether(2), Value: e2eutils.Ether(2),
}) })
require.NoError(gt, cl.SendTransaction(t.Ctx(), tx)) require.NoError(t, cl.SendTransaction(t.Ctx(), tx))
sequencer.ActL2PipelineFull(t) sequencer.ActL2PipelineFull(t)
verifier.ActL2PipelineFull(t) verifier.ActL2PipelineFull(t)
...@@ -97,9 +128,10 @@ func TestBatcher(gt *testing.T) { ...@@ -97,9 +128,10 @@ func TestBatcher(gt *testing.T) {
require.NotNil(t, vTx) require.NotNil(t, vTx)
} }
func TestL2Finalization(gt *testing.T) { func L2Finalization(gt *testing.T, spanBatchTimeOffset *hexutil.Uint64) {
t := NewDefaultTesting(gt) t := NewDefaultTesting(gt)
dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams) dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams)
dp.DeployConfig.L2GenesisSpanBatchTimeOffset = spanBatchTimeOffset
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlDebug) log := testlog.Logger(t, log.LvlDebug)
miner, engine, sequencer := setupSequencerTest(t, sd, log) miner, engine, sequencer := setupSequencerTest(t, sd, log)
...@@ -202,10 +234,11 @@ func TestL2Finalization(gt *testing.T) { ...@@ -202,10 +234,11 @@ func TestL2Finalization(gt *testing.T) {
require.Equal(t, heightToSubmit, sequencer.SyncStatus().FinalizedL2.Number, "unknown/bad finalized L1 blocks are ignored") require.Equal(t, heightToSubmit, sequencer.SyncStatus().FinalizedL2.Number, "unknown/bad finalized L1 blocks are ignored")
} }
// TestL2FinalizationWithSparseL1 tests that safe L2 blocks can be finalized even if we do not regularly get a L1 finalization signal // L2FinalizationWithSparseL1 tests that safe L2 blocks can be finalized even if we do not regularly get a L1 finalization signal
func TestL2FinalizationWithSparseL1(gt *testing.T) { func L2FinalizationWithSparseL1(gt *testing.T, spanBatchTimeOffset *hexutil.Uint64) {
t := NewDefaultTesting(gt) t := NewDefaultTesting(gt)
dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams) dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams)
dp.DeployConfig.L2GenesisSpanBatchTimeOffset = spanBatchTimeOffset
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlDebug) log := testlog.Logger(t, log.LvlDebug)
miner, engine, sequencer := setupSequencerTest(t, sd, log) miner, engine, sequencer := setupSequencerTest(t, sd, log)
...@@ -258,13 +291,14 @@ func TestL2FinalizationWithSparseL1(gt *testing.T) { ...@@ -258,13 +291,14 @@ func TestL2FinalizationWithSparseL1(gt *testing.T) {
require.Equal(t, finalStatus.FinalizedL2.Number, finalStatus.UnsafeL2.Number, "sequencer submitted its L2 block and it finalized") require.Equal(t, finalStatus.FinalizedL2.Number, finalStatus.UnsafeL2.Number, "sequencer submitted its L2 block and it finalized")
} }
// TestGarbageBatch tests the behavior of an invalid/malformed output channel frame containing // GarbageBatch tests the behavior of an invalid/malformed output channel frame containing
// valid batches being submitted to the batch inbox. These batches should always be rejected // valid batches being submitted to the batch inbox. These batches should always be rejected
// and the safe L2 head should remain unaltered. // and the safe L2 head should remain unaltered.
func TestGarbageBatch(gt *testing.T) { func GarbageBatch(gt *testing.T, spanBatchTimeOffset *hexutil.Uint64) {
t := NewDefaultTesting(gt) t := NewDefaultTesting(gt)
p := defaultRollupTestParams p := defaultRollupTestParams
dp := e2eutils.MakeDeployParams(t, p) dp := e2eutils.MakeDeployParams(t, p)
dp.DeployConfig.L2GenesisSpanBatchTimeOffset = spanBatchTimeOffset
for _, garbageKind := range GarbageKinds { for _, garbageKind := range GarbageKinds {
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlError) log := testlog.Logger(t, log.LvlError)
...@@ -340,7 +374,7 @@ func TestGarbageBatch(gt *testing.T) { ...@@ -340,7 +374,7 @@ func TestGarbageBatch(gt *testing.T) {
} }
} }
func TestExtendedTimeWithoutL1Batches(gt *testing.T) { func ExtendedTimeWithoutL1Batches(gt *testing.T, spanBatchTimeOffset *hexutil.Uint64) {
t := NewDefaultTesting(gt) t := NewDefaultTesting(gt)
p := &e2eutils.TestParams{ p := &e2eutils.TestParams{
MaxSequencerDrift: 20, // larger than L1 block time we simulate in this test (12) MaxSequencerDrift: 20, // larger than L1 block time we simulate in this test (12)
...@@ -349,6 +383,7 @@ func TestExtendedTimeWithoutL1Batches(gt *testing.T) { ...@@ -349,6 +383,7 @@ func TestExtendedTimeWithoutL1Batches(gt *testing.T) {
L1BlockTime: 12, L1BlockTime: 12,
} }
dp := e2eutils.MakeDeployParams(t, p) dp := e2eutils.MakeDeployParams(t, p)
dp.DeployConfig.L2GenesisSpanBatchTimeOffset = spanBatchTimeOffset
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlError) log := testlog.Logger(t, log.LvlError)
miner, engine, sequencer := setupSequencerTest(t, sd, log) miner, engine, sequencer := setupSequencerTest(t, sd, log)
...@@ -386,7 +421,7 @@ func TestExtendedTimeWithoutL1Batches(gt *testing.T) { ...@@ -386,7 +421,7 @@ func TestExtendedTimeWithoutL1Batches(gt *testing.T) {
require.Equal(t, sequencer.L2Unsafe(), sequencer.L2Safe(), "same for sequencer") require.Equal(t, sequencer.L2Unsafe(), sequencer.L2Safe(), "same for sequencer")
} }
// TestBigL2Txs tests a high-throughput case with constrained batcher: // BigL2Txs tests a high-throughput case with constrained batcher:
// - Fill 40 L2 blocks to near max-capacity, with txs of 120 KB each // - Fill 40 L2 blocks to near max-capacity, with txs of 120 KB each
// - Buffer the L2 blocks into channels together as much as possible, submit data-txs only when necessary // - Buffer the L2 blocks into channels together as much as possible, submit data-txs only when necessary
// (just before crossing the max RLP channel size) // (just before crossing the max RLP channel size)
...@@ -398,7 +433,7 @@ func TestExtendedTimeWithoutL1Batches(gt *testing.T) { ...@@ -398,7 +433,7 @@ func TestExtendedTimeWithoutL1Batches(gt *testing.T) {
// The goal of this test is to quickly run through an otherwise very slow process of submitting and including lots of data. // The goal of this test is to quickly run through an otherwise very slow process of submitting and including lots of data.
// This does not test the batcher code, but is really focused at testing the batcher utils // This does not test the batcher code, but is really focused at testing the batcher utils
// and channel-decoding verifier code in the derive package. // and channel-decoding verifier code in the derive package.
func TestBigL2Txs(gt *testing.T) { func BigL2Txs(gt *testing.T, spanBatchTimeOffset *hexutil.Uint64) {
t := NewDefaultTesting(gt) t := NewDefaultTesting(gt)
p := &e2eutils.TestParams{ p := &e2eutils.TestParams{
MaxSequencerDrift: 100, MaxSequencerDrift: 100,
...@@ -407,6 +442,7 @@ func TestBigL2Txs(gt *testing.T) { ...@@ -407,6 +442,7 @@ func TestBigL2Txs(gt *testing.T) {
L1BlockTime: 12, L1BlockTime: 12,
} }
dp := e2eutils.MakeDeployParams(t, p) dp := e2eutils.MakeDeployParams(t, p)
dp.DeployConfig.L2GenesisSpanBatchTimeOffset = spanBatchTimeOffset
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlInfo) log := testlog.Logger(t, log.LvlInfo)
miner, engine, sequencer := setupSequencerTest(t, sd, log) miner, engine, sequencer := setupSequencerTest(t, sd, log)
...@@ -464,7 +500,7 @@ func TestBigL2Txs(gt *testing.T) { ...@@ -464,7 +500,7 @@ func TestBigL2Txs(gt *testing.T) {
Value: big.NewInt(0), Value: big.NewInt(0),
Data: data, Data: data,
}) })
require.NoError(gt, cl.SendTransaction(t.Ctx(), tx)) require.NoError(t, cl.SendTransaction(t.Ctx(), tx))
engine.ActL2IncludeTx(dp.Addresses.Alice)(t) engine.ActL2IncludeTx(dp.Addresses.Alice)(t)
} }
sequencer.ActL2EndBlock(t) sequencer.ActL2EndBlock(t)
......
...@@ -4,6 +4,7 @@ import ( ...@@ -4,6 +4,7 @@ import (
"testing" "testing"
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
...@@ -14,9 +15,34 @@ import ( ...@@ -14,9 +15,34 @@ import (
"github.com/ethereum-optimism/optimism/op-service/testlog" "github.com/ethereum-optimism/optimism/op-service/testlog"
) )
func TestProposer(gt *testing.T) { // TestProposerBatchType run each proposer-related test case in singular batch mode and span batch mode.
func TestProposerBatchType(t *testing.T) {
tests := []struct {
name string
f func(gt *testing.T, spanBatchTimeOffset *hexutil.Uint64)
}{
{"RunProposerTest", RunProposerTest},
}
for _, test := range tests {
test := test
t.Run(test.name+"_SingularBatch", func(t *testing.T) {
test.f(t, nil)
})
}
spanBatchTimeOffset := hexutil.Uint64(0)
for _, test := range tests {
test := test
t.Run(test.name+"_SpanBatch", func(t *testing.T) {
test.f(t, &spanBatchTimeOffset)
})
}
}
func RunProposerTest(gt *testing.T, spanBatchTimeOffset *hexutil.Uint64) {
t := NewDefaultTesting(gt) t := NewDefaultTesting(gt)
dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams) dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams)
dp.DeployConfig.L2GenesisSpanBatchTimeOffset = spanBatchTimeOffset
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlDebug) log := testlog.Logger(t, log.LvlDebug)
miner, seqEngine, sequencer := setupSequencerTest(t, sd, log) miner, seqEngine, sequencer := setupSequencerTest(t, sd, log)
......
...@@ -7,6 +7,7 @@ import ( ...@@ -7,6 +7,7 @@ import (
"testing" "testing"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/eth/ethconfig"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
...@@ -21,8 +22,9 @@ import ( ...@@ -21,8 +22,9 @@ import (
"github.com/ethereum-optimism/optimism/op-service/testlog" "github.com/ethereum-optimism/optimism/op-service/testlog"
) )
func setupReorgTest(t Testing, config *e2eutils.TestParams) (*e2eutils.SetupData, *e2eutils.DeployParams, *L1Miner, *L2Sequencer, *L2Engine, *L2Verifier, *L2Engine, *L2Batcher) { func setupReorgTest(t Testing, config *e2eutils.TestParams, spanBatchTimeOffset *hexutil.Uint64) (*e2eutils.SetupData, *e2eutils.DeployParams, *L1Miner, *L2Sequencer, *L2Engine, *L2Verifier, *L2Engine, *L2Batcher) {
dp := e2eutils.MakeDeployParams(t, config) dp := e2eutils.MakeDeployParams(t, config)
dp.DeployConfig.L2GenesisSpanBatchTimeOffset = spanBatchTimeOffset
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlDebug) log := testlog.Logger(t, log.LvlDebug)
...@@ -44,9 +46,38 @@ func setupReorgTestActors(t Testing, dp *e2eutils.DeployParams, sd *e2eutils.Set ...@@ -44,9 +46,38 @@ func setupReorgTestActors(t Testing, dp *e2eutils.DeployParams, sd *e2eutils.Set
return sd, dp, miner, sequencer, seqEngine, verifier, verifEngine, batcher return sd, dp, miner, sequencer, seqEngine, verifier, verifEngine, batcher
} }
func TestReorgOrphanBlock(gt *testing.T) { // TestReorgBatchType run each reorg-related test case in singular batch mode and span batch mode.
func TestReorgBatchType(t *testing.T) {
tests := []struct {
name string
f func(gt *testing.T, spanBatchTimeOffset *hexutil.Uint64)
}{
{"ReorgOrphanBlock", ReorgOrphanBlock},
{"ReorgFlipFlop", ReorgFlipFlop},
{"DeepReorg", DeepReorg},
{"RestartOpGeth", RestartOpGeth},
{"ConflictingL2Blocks", ConflictingL2Blocks},
{"SyncAfterReorg", SyncAfterReorg},
}
for _, test := range tests {
test := test
t.Run(test.name+"_SingularBatch", func(t *testing.T) {
test.f(t, nil)
})
}
spanBatchTimeOffset := hexutil.Uint64(0)
for _, test := range tests {
test := test
t.Run(test.name+"_SpanBatch", func(t *testing.T) {
test.f(t, &spanBatchTimeOffset)
})
}
}
func ReorgOrphanBlock(gt *testing.T, spanBatchTimeOffset *hexutil.Uint64) {
t := NewDefaultTesting(gt) t := NewDefaultTesting(gt)
sd, _, miner, sequencer, _, verifier, verifierEng, batcher := setupReorgTest(t, defaultRollupTestParams) sd, _, miner, sequencer, _, verifier, verifierEng, batcher := setupReorgTest(t, defaultRollupTestParams, spanBatchTimeOffset)
verifEngClient := verifierEng.EngineClient(t, sd.RollupCfg) verifEngClient := verifierEng.EngineClient(t, sd.RollupCfg)
sequencer.ActL2PipelineFull(t) sequencer.ActL2PipelineFull(t)
...@@ -112,9 +143,9 @@ func TestReorgOrphanBlock(gt *testing.T) { ...@@ -112,9 +143,9 @@ func TestReorgOrphanBlock(gt *testing.T) {
require.Equal(t, verifier.L2Safe(), sequencer.L2Safe(), "verifier and sequencer see same safe L2 block, while only verifier dealt with the orphan and replay") require.Equal(t, verifier.L2Safe(), sequencer.L2Safe(), "verifier and sequencer see same safe L2 block, while only verifier dealt with the orphan and replay")
} }
func TestReorgFlipFlop(gt *testing.T) { func ReorgFlipFlop(gt *testing.T, spanBatchTimeOffset *hexutil.Uint64) {
t := NewDefaultTesting(gt) t := NewDefaultTesting(gt)
sd, _, miner, sequencer, _, verifier, verifierEng, batcher := setupReorgTest(t, defaultRollupTestParams) sd, _, miner, sequencer, _, verifier, verifierEng, batcher := setupReorgTest(t, defaultRollupTestParams, spanBatchTimeOffset)
minerCl := miner.L1Client(t, sd.RollupCfg) minerCl := miner.L1Client(t, sd.RollupCfg)
verifEngClient := verifierEng.EngineClient(t, sd.RollupCfg) verifEngClient := verifierEng.EngineClient(t, sd.RollupCfg)
checkVerifEngine := func() { checkVerifEngine := func() {
...@@ -333,7 +364,7 @@ func TestReorgFlipFlop(gt *testing.T) { ...@@ -333,7 +364,7 @@ func TestReorgFlipFlop(gt *testing.T) {
// Verifier // Verifier
// - Unsafe head is 62 // - Unsafe head is 62
// - Safe head is 42 // - Safe head is 42
func TestDeepReorg(gt *testing.T) { func DeepReorg(gt *testing.T, spanBatchTimeOffset *hexutil.Uint64) {
t := NewDefaultTesting(gt) t := NewDefaultTesting(gt)
// Create actor and verification engine client // Create actor and verification engine client
...@@ -342,7 +373,7 @@ func TestDeepReorg(gt *testing.T) { ...@@ -342,7 +373,7 @@ func TestDeepReorg(gt *testing.T) {
SequencerWindowSize: 20, SequencerWindowSize: 20,
ChannelTimeout: 120, ChannelTimeout: 120,
L1BlockTime: 4, L1BlockTime: 4,
}) }, spanBatchTimeOffset)
minerCl := miner.L1Client(t, sd.RollupCfg) minerCl := miner.L1Client(t, sd.RollupCfg)
l2Client := seqEngine.EthClient() l2Client := seqEngine.EthClient()
verifEngClient := verifierEng.EngineClient(t, sd.RollupCfg) verifEngClient := verifierEng.EngineClient(t, sd.RollupCfg)
...@@ -566,9 +597,9 @@ type rpcWrapper struct { ...@@ -566,9 +597,9 @@ type rpcWrapper struct {
client.RPC client.RPC
} }
// TestRestartOpGeth tests that the sequencer can restart its execution engine without rollup-node restart, // RestartOpGeth tests that the sequencer can restart its execution engine without rollup-node restart,
// including recovering the finalized/safe state of L2 chain without reorging. // including recovering the finalized/safe state of L2 chain without reorging.
func TestRestartOpGeth(gt *testing.T) { func RestartOpGeth(gt *testing.T, spanBatchTimeOffset *hexutil.Uint64) {
t := NewDefaultTesting(gt) t := NewDefaultTesting(gt)
dbPath := path.Join(t.TempDir(), "testdb") dbPath := path.Join(t.TempDir(), "testdb")
dbOption := func(_ *ethconfig.Config, nodeCfg *node.Config) error { dbOption := func(_ *ethconfig.Config, nodeCfg *node.Config) error {
...@@ -576,6 +607,7 @@ func TestRestartOpGeth(gt *testing.T) { ...@@ -576,6 +607,7 @@ func TestRestartOpGeth(gt *testing.T) {
return nil return nil
} }
dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams) dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams)
dp.DeployConfig.L2GenesisSpanBatchTimeOffset = spanBatchTimeOffset
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlDebug) log := testlog.Logger(t, log.LvlDebug)
jwtPath := e2eutils.WriteDefaultJWT(t) jwtPath := e2eutils.WriteDefaultJWT(t)
...@@ -660,12 +692,13 @@ func TestRestartOpGeth(gt *testing.T) { ...@@ -660,12 +692,13 @@ func TestRestartOpGeth(gt *testing.T) {
require.Equal(t, statusBeforeRestart.SafeL2, sequencer.L2Safe(), "expecting the safe block to catch up to what it was before shutdown after syncing from L1, and not be stuck at the finalized block") require.Equal(t, statusBeforeRestart.SafeL2, sequencer.L2Safe(), "expecting the safe block to catch up to what it was before shutdown after syncing from L1, and not be stuck at the finalized block")
} }
// TestConflictingL2Blocks tests that a second copy of the sequencer stack cannot introduce an alternative // ConflictingL2Blocks tests that a second copy of the sequencer stack cannot introduce an alternative
// L2 block (compared to something already secured by the first sequencer): // L2 block (compared to something already secured by the first sequencer):
// the alt block is not synced by the verifier, in unsafe and safe sync modes. // the alt block is not synced by the verifier, in unsafe and safe sync modes.
func TestConflictingL2Blocks(gt *testing.T) { func ConflictingL2Blocks(gt *testing.T, spanBatchTimeOffset *hexutil.Uint64) {
t := NewDefaultTesting(gt) t := NewDefaultTesting(gt)
dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams) dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams)
dp.DeployConfig.L2GenesisSpanBatchTimeOffset = spanBatchTimeOffset
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlDebug) log := testlog.Logger(t, log.LvlDebug)
...@@ -772,7 +805,7 @@ func TestConflictingL2Blocks(gt *testing.T) { ...@@ -772,7 +805,7 @@ func TestConflictingL2Blocks(gt *testing.T) {
require.Equal(t, sequencer.L2Unsafe(), altSequencer.L2Unsafe(), "and gets back in harmony with original sequencer") require.Equal(t, sequencer.L2Unsafe(), altSequencer.L2Unsafe(), "and gets back in harmony with original sequencer")
} }
func TestSyncAfterReorg(gt *testing.T) { func SyncAfterReorg(gt *testing.T, spanBatchTimeOffset *hexutil.Uint64) {
t := NewDefaultTesting(gt) t := NewDefaultTesting(gt)
testingParams := e2eutils.TestParams{ testingParams := e2eutils.TestParams{
MaxSequencerDrift: 60, MaxSequencerDrift: 60,
...@@ -780,7 +813,7 @@ func TestSyncAfterReorg(gt *testing.T) { ...@@ -780,7 +813,7 @@ func TestSyncAfterReorg(gt *testing.T) {
ChannelTimeout: 2, ChannelTimeout: 2,
L1BlockTime: 12, L1BlockTime: 12,
} }
sd, dp, miner, sequencer, seqEngine, verifier, _, batcher := setupReorgTest(t, &testingParams) sd, dp, miner, sequencer, seqEngine, verifier, _, batcher := setupReorgTest(t, &testingParams, spanBatchTimeOffset)
l2Client := seqEngine.EthClient() l2Client := seqEngine.EthClient()
log := testlog.Logger(t, log.LvlDebug) log := testlog.Logger(t, log.LvlDebug)
addresses := e2eutils.CollectAddresses(sd, dp) addresses := e2eutils.CollectAddresses(sd, dp)
......
...@@ -23,9 +23,35 @@ import ( ...@@ -23,9 +23,35 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
func TestDerivationWithFlakyL1RPC(gt *testing.T) { // TestSyncBatchType run each sync test case in singular batch mode and span batch mode.
func TestSyncBatchType(t *testing.T) {
tests := []struct {
name string
f func(gt *testing.T, spanBatchTimeOffset *hexutil.Uint64)
}{
{"DerivationWithFlakyL1RPC", DerivationWithFlakyL1RPC},
{"FinalizeWhileSyncing", FinalizeWhileSyncing},
}
for _, test := range tests {
test := test
t.Run(test.name+"_SingularBatch", func(t *testing.T) {
test.f(t, nil)
})
}
spanBatchTimeOffset := hexutil.Uint64(0)
for _, test := range tests {
test := test
t.Run(test.name+"_SpanBatch", func(t *testing.T) {
test.f(t, &spanBatchTimeOffset)
})
}
}
func DerivationWithFlakyL1RPC(gt *testing.T, spanBatchTimeOffset *hexutil.Uint64) {
t := NewDefaultTesting(gt) t := NewDefaultTesting(gt)
dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams) dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams)
dp.DeployConfig.L2GenesisSpanBatchTimeOffset = spanBatchTimeOffset
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlError) // mute all the temporary derivation errors that we forcefully create log := testlog.Logger(t, log.LvlError) // mute all the temporary derivation errors that we forcefully create
_, _, miner, sequencer, _, verifier, _, batcher := setupReorgTestActors(t, dp, sd, log) _, _, miner, sequencer, _, verifier, _, batcher := setupReorgTestActors(t, dp, sd, log)
...@@ -62,9 +88,10 @@ func TestDerivationWithFlakyL1RPC(gt *testing.T) { ...@@ -62,9 +88,10 @@ func TestDerivationWithFlakyL1RPC(gt *testing.T) {
require.Equal(t, sequencer.L2Unsafe(), verifier.L2Safe(), "verifier is synced") require.Equal(t, sequencer.L2Unsafe(), verifier.L2Safe(), "verifier is synced")
} }
func TestFinalizeWhileSyncing(gt *testing.T) { func FinalizeWhileSyncing(gt *testing.T, spanBatchTimeOffset *hexutil.Uint64) {
t := NewDefaultTesting(gt) t := NewDefaultTesting(gt)
dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams) dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams)
dp.DeployConfig.L2GenesisSpanBatchTimeOffset = spanBatchTimeOffset
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlError) // mute all the temporary derivation errors that we forcefully create log := testlog.Logger(t, log.LvlError) // mute all the temporary derivation errors that we forcefully create
_, _, miner, sequencer, _, verifier, _, batcher := setupReorgTestActors(t, dp, sd, log) _, _, miner, sequencer, _, verifier, _, batcher := setupReorgTestActors(t, dp, sd, log)
......
...@@ -7,6 +7,7 @@ import ( ...@@ -7,6 +7,7 @@ import (
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
...@@ -19,13 +20,40 @@ import ( ...@@ -19,13 +20,40 @@ import (
"github.com/ethereum-optimism/optimism/op-service/testlog" "github.com/ethereum-optimism/optimism/op-service/testlog"
) )
// TestBatcherKeyRotation tests that batcher A can operate, then be replaced with batcher B, then ignore old batcher A, // TestSystemConfigBatchType run each system config-related test case in singular batch mode and span batch mode.
func TestSystemConfigBatchType(t *testing.T) {
tests := []struct {
name string
f func(gt *testing.T, spanBatchTimeOffset *hexutil.Uint64)
}{
{"BatcherKeyRotation", BatcherKeyRotation},
{"GPOParamsChange", GPOParamsChange},
{"GasLimitChange", GasLimitChange},
}
for _, test := range tests {
test := test
t.Run(test.name+"_SingularBatch", func(t *testing.T) {
test.f(t, nil)
})
}
spanBatchTimeOffset := hexutil.Uint64(0)
for _, test := range tests {
test := test
t.Run(test.name+"_SpanBatch", func(t *testing.T) {
test.f(t, &spanBatchTimeOffset)
})
}
}
// BatcherKeyRotation tests that batcher A can operate, then be replaced with batcher B, then ignore old batcher A,
// and that the change to batcher B is reverted properly upon reorg of L1. // and that the change to batcher B is reverted properly upon reorg of L1.
func TestBatcherKeyRotation(gt *testing.T) { func BatcherKeyRotation(gt *testing.T, spanBatchTimeOffset *hexutil.Uint64) {
t := NewDefaultTesting(gt) t := NewDefaultTesting(gt)
dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams) dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams)
dp.DeployConfig.L2BlockTime = 2 dp.DeployConfig.L2BlockTime = 2
dp.DeployConfig.L2GenesisSpanBatchTimeOffset = spanBatchTimeOffset
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlDebug) log := testlog.Logger(t, log.LvlDebug)
miner, seqEngine, sequencer := setupSequencerTest(t, sd, log) miner, seqEngine, sequencer := setupSequencerTest(t, sd, log)
...@@ -198,11 +226,12 @@ func TestBatcherKeyRotation(gt *testing.T) { ...@@ -198,11 +226,12 @@ func TestBatcherKeyRotation(gt *testing.T) {
require.Equal(t, sequencer.L2Unsafe(), verifier.L2Unsafe(), "verifier synced") require.Equal(t, sequencer.L2Unsafe(), verifier.L2Unsafe(), "verifier synced")
} }
// TestGPOParamsChange tests that the GPO params can be updated to adjust fees of L2 transactions, // GPOParamsChange tests that the GPO params can be updated to adjust fees of L2 transactions,
// and that the L1 data fees to the L2 transaction are applied correctly before, during and after the GPO update in L2. // and that the L1 data fees to the L2 transaction are applied correctly before, during and after the GPO update in L2.
func TestGPOParamsChange(gt *testing.T) { func GPOParamsChange(gt *testing.T, spanBatchTimeOffset *hexutil.Uint64) {
t := NewDefaultTesting(gt) t := NewDefaultTesting(gt)
dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams) dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams)
dp.DeployConfig.L2GenesisSpanBatchTimeOffset = spanBatchTimeOffset
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlDebug) log := testlog.Logger(t, log.LvlDebug)
miner, seqEngine, sequencer := setupSequencerTest(t, sd, log) miner, seqEngine, sequencer := setupSequencerTest(t, sd, log)
...@@ -326,12 +355,13 @@ func TestGPOParamsChange(gt *testing.T) { ...@@ -326,12 +355,13 @@ func TestGPOParamsChange(gt *testing.T) {
require.Equal(t, "2.3", receipt.FeeScalar.String(), "2_300_000 divided by 6 decimals = float(2.3)") require.Equal(t, "2.3", receipt.FeeScalar.String(), "2_300_000 divided by 6 decimals = float(2.3)")
} }
// TestGasLimitChange tests that the gas limit can be configured to L1, // GasLimitChange tests that the gas limit can be configured to L1,
// and that the L2 changes the gas limit instantly at the exact block that adopts the L1 origin with // and that the L2 changes the gas limit instantly at the exact block that adopts the L1 origin with
// the gas limit change event. And checks if a verifier node can reproduce the same gas limit change. // the gas limit change event. And checks if a verifier node can reproduce the same gas limit change.
func TestGasLimitChange(gt *testing.T) { func GasLimitChange(gt *testing.T, spanBatchTimeOffset *hexutil.Uint64) {
t := NewDefaultTesting(gt) t := NewDefaultTesting(gt)
dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams) dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams)
dp.DeployConfig.L2GenesisSpanBatchTimeOffset = spanBatchTimeOffset
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlDebug) log := testlog.Logger(t, log.LvlDebug)
miner, seqEngine, sequencer := setupSequencerTest(t, sd, log) miner, seqEngine, sequencer := setupSequencerTest(t, sd, log)
......
...@@ -13,10 +13,12 @@ import ( ...@@ -13,10 +13,12 @@ import (
"github.com/ethereum-optimism/optimism/op-service/testlog" "github.com/ethereum-optimism/optimism/op-service/testlog"
) )
type regolithScheduledTest struct { type hardforkScheduledTest struct {
name string name string
regolithTime *hexutil.Uint64 regolithTime *hexutil.Uint64
spanBatchTime *hexutil.Uint64
activateRegolith bool activateRegolith bool
activateSpanBatch bool
} }
// TestCrossLayerUser tests that common actions of the CrossLayerUser actor work in various regolith configurations: // TestCrossLayerUser tests that common actions of the CrossLayerUser actor work in various regolith configurations:
...@@ -31,11 +33,18 @@ func TestCrossLayerUser(t *testing.T) { ...@@ -31,11 +33,18 @@ func TestCrossLayerUser(t *testing.T) {
zeroTime := hexutil.Uint64(0) zeroTime := hexutil.Uint64(0)
futureTime := hexutil.Uint64(20) futureTime := hexutil.Uint64(20)
farFutureTime := hexutil.Uint64(2000) farFutureTime := hexutil.Uint64(2000)
tests := []regolithScheduledTest{ tests := []hardforkScheduledTest{
{name: "NoRegolith", regolithTime: nil, activateRegolith: false}, {name: "NoRegolith", regolithTime: nil, activateRegolith: false, spanBatchTime: nil, activateSpanBatch: false},
{name: "NotYetRegolith", regolithTime: &farFutureTime, activateRegolith: false}, {name: "NotYetRegolith", regolithTime: &farFutureTime, activateRegolith: false, spanBatchTime: nil, activateSpanBatch: false},
{name: "RegolithAtGenesis", regolithTime: &zeroTime, activateRegolith: true}, {name: "RegolithAtGenesis", regolithTime: &zeroTime, activateRegolith: true, spanBatchTime: nil, activateSpanBatch: false},
{name: "RegolithAfterGenesis", regolithTime: &futureTime, activateRegolith: true}, {name: "RegolithAfterGenesis", regolithTime: &futureTime, activateRegolith: true, spanBatchTime: nil, activateSpanBatch: false},
{name: "NoSpanBatch", regolithTime: &zeroTime, activateRegolith: true, spanBatchTime: nil, activateSpanBatch: false},
{name: "NotYetSpanBatch", regolithTime: &zeroTime, activateRegolith: true,
spanBatchTime: &farFutureTime, activateSpanBatch: false},
{name: "SpanBatchAtGenesis", regolithTime: &zeroTime, activateRegolith: true,
spanBatchTime: &zeroTime, activateSpanBatch: true},
{name: "SpanBatchAfterGenesis", regolithTime: &zeroTime, activateRegolith: true,
spanBatchTime: &futureTime, activateSpanBatch: true},
} }
for _, test := range tests { for _, test := range tests {
test := test // Use a fixed reference as the tests run in parallel test := test // Use a fixed reference as the tests run in parallel
...@@ -45,10 +54,11 @@ func TestCrossLayerUser(t *testing.T) { ...@@ -45,10 +54,11 @@ func TestCrossLayerUser(t *testing.T) {
} }
} }
func runCrossLayerUserTest(gt *testing.T, test regolithScheduledTest) { func runCrossLayerUserTest(gt *testing.T, test hardforkScheduledTest) {
t := NewDefaultTesting(gt) t := NewDefaultTesting(gt)
dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams) dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams)
dp.DeployConfig.L2GenesisRegolithTimeOffset = test.regolithTime dp.DeployConfig.L2GenesisRegolithTimeOffset = test.regolithTime
dp.DeployConfig.L2GenesisSpanBatchTimeOffset = test.spanBatchTime
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlDebug) log := testlog.Logger(t, log.LvlDebug)
......
...@@ -4,13 +4,15 @@ import ( ...@@ -4,13 +4,15 @@ import (
"context" "context"
"path/filepath" "path/filepath"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/contracts"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/cannon" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/cannon"
"github.com/ethereum-optimism/optimism/op-challenger/metrics" "github.com/ethereum-optimism/optimism/op-challenger/metrics"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/challenger" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/challenger"
"github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-service/sources/batching"
"github.com/ethereum-optimism/optimism/op-service/testlog" "github.com/ethereum-optimism/optimism/op-service/testlog"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
) )
...@@ -32,7 +34,7 @@ func (g *CannonGameHelper) StartChallenger(ctx context.Context, rollupCfg *rollu ...@@ -32,7 +34,7 @@ func (g *CannonGameHelper) StartChallenger(ctx context.Context, rollupCfg *rollu
return c return c
} }
func (g *CannonGameHelper) CreateHonestActor(ctx context.Context, rollupCfg *rollup.Config, l2Genesis *core.Genesis, l1Client bind.ContractCaller, l1Endpoint string, l2Endpoint string, options ...challenger.Option) *HonestHelper { func (g *CannonGameHelper) CreateHonestActor(ctx context.Context, rollupCfg *rollup.Config, l2Genesis *core.Genesis, l1Client *ethclient.Client, l1Endpoint string, l2Endpoint string, options ...challenger.Option) *HonestHelper {
opts := []challenger.Option{ opts := []challenger.Option{
challenger.WithCannon(g.t, rollupCfg, l2Genesis, l2Endpoint), challenger.WithCannon(g.t, rollupCfg, l2Genesis, l2Endpoint),
challenger.WithFactoryAddress(g.factoryAddr), challenger.WithFactoryAddress(g.factoryAddr),
...@@ -42,7 +44,9 @@ func (g *CannonGameHelper) CreateHonestActor(ctx context.Context, rollupCfg *rol ...@@ -42,7 +44,9 @@ func (g *CannonGameHelper) CreateHonestActor(ctx context.Context, rollupCfg *rol
cfg := challenger.NewChallengerConfig(g.t, l1Endpoint, opts...) cfg := challenger.NewChallengerConfig(g.t, l1Endpoint, opts...)
logger := testlog.Logger(g.t, log.LvlInfo).New("role", "CorrectTrace") logger := testlog.Logger(g.t, log.LvlInfo).New("role", "CorrectTrace")
maxDepth := g.MaxDepth(ctx) maxDepth := g.MaxDepth(ctx)
provider, err := cannon.NewTraceProvider(ctx, logger, metrics.NoopMetrics, cfg, l1Client, filepath.Join(cfg.Datadir, "honest"), g.addr, uint64(maxDepth)) gameContract, err := contracts.NewFaultDisputeGameContract(g.addr, batching.NewMultiCaller(l1Client.Client(), batching.DefaultBatchSize))
g.require.NoError(err, "Create game contract bindings")
provider, err := cannon.NewTraceProvider(ctx, logger, metrics.NoopMetrics, cfg, gameContract, filepath.Join(cfg.Datadir, "honest"), uint64(maxDepth))
g.require.NoError(err, "create cannon trace provider") g.require.NoError(err, "create cannon trace provider")
return &HonestHelper{ return &HonestHelper{
......
...@@ -59,7 +59,6 @@ func MakeDeployParams(t require.TestingT, tp *TestParams) *DeployParams { ...@@ -59,7 +59,6 @@ func MakeDeployParams(t require.TestingT, tp *TestParams) *DeployParams {
deployConfig.L1BlockTime = tp.L1BlockTime deployConfig.L1BlockTime = tp.L1BlockTime
deployConfig.L2GenesisRegolithTimeOffset = nil deployConfig.L2GenesisRegolithTimeOffset = nil
deployConfig.L2GenesisCanyonTimeOffset = CanyonTimeOffset() deployConfig.L2GenesisCanyonTimeOffset = CanyonTimeOffset()
deployConfig.L2GenesisSpanBatchTimeOffset = SpanBatchTimeOffset()
require.NoError(t, deployConfig.Check()) require.NoError(t, deployConfig.Check())
require.Equal(t, addresses.Batcher, deployConfig.BatchSenderAddress) require.Equal(t, addresses.Batcher, deployConfig.BatchSenderAddress)
...@@ -186,14 +185,6 @@ func SystemConfigFromDeployConfig(deployConfig *genesis.DeployConfig) eth.System ...@@ -186,14 +185,6 @@ func SystemConfigFromDeployConfig(deployConfig *genesis.DeployConfig) eth.System
} }
} }
func SpanBatchTimeOffset() *hexutil.Uint64 {
if os.Getenv("OP_E2E_USE_SPAN_BATCH") == "true" {
offset := hexutil.Uint64(0)
return &offset
}
return nil
}
func CanyonTimeOffset() *hexutil.Uint64 { func CanyonTimeOffset() *hexutil.Uint64 {
if os.Getenv("OP_E2E_USE_CANYON") == "true" { if os.Getenv("OP_E2E_USE_CANYON") == "true" {
offset := hexutil.Uint64(0) offset := hexutil.Uint64(0)
......
...@@ -88,7 +88,6 @@ func DefaultSystemConfig(t *testing.T) SystemConfig { ...@@ -88,7 +88,6 @@ func DefaultSystemConfig(t *testing.T) SystemConfig {
deployConfig := config.DeployConfig.Copy() deployConfig := config.DeployConfig.Copy()
deployConfig.L1GenesisBlockTimestamp = hexutil.Uint64(time.Now().Unix()) deployConfig.L1GenesisBlockTimestamp = hexutil.Uint64(time.Now().Unix())
deployConfig.L2GenesisCanyonTimeOffset = e2eutils.CanyonTimeOffset() deployConfig.L2GenesisCanyonTimeOffset = e2eutils.CanyonTimeOffset()
deployConfig.L2GenesisSpanBatchTimeOffset = e2eutils.SpanBatchTimeOffset()
require.NoError(t, deployConfig.Check(), "Deploy config is invalid, do you need to run make devnet-allocs?") require.NoError(t, deployConfig.Check(), "Deploy config is invalid, do you need to run make devnet-allocs?")
l1Deployments := config.L1Deployments.Copy() l1Deployments := config.L1Deployments.Copy()
require.NoError(t, l1Deployments.Check()) require.NoError(t, l1Deployments.Check())
...@@ -684,7 +683,7 @@ func (cfg SystemConfig) Start(t *testing.T, _opts ...SystemConfigOption) (*Syste ...@@ -684,7 +683,7 @@ func (cfg SystemConfig) Start(t *testing.T, _opts ...SystemConfigOption) (*Syste
} }
var batchType uint = derive.SingularBatchType var batchType uint = derive.SingularBatchType
if os.Getenv("OP_E2E_USE_SPAN_BATCH") == "true" { if cfg.DeployConfig.L2GenesisSpanBatchTimeOffset != nil && *cfg.DeployConfig.L2GenesisSpanBatchTimeOffset == hexutil.Uint64(0) {
batchType = derive.SpanBatchType batchType = derive.SpanBatchType
} }
batcherMaxL1TxSizeBytes := cfg.BatcherMaxL1TxSizeBytes batcherMaxL1TxSizeBytes := cfg.BatcherMaxL1TxSizeBytes
......
...@@ -15,25 +15,42 @@ import ( ...@@ -15,25 +15,42 @@ import (
"github.com/ethereum-optimism/optimism/op-service/testlog" "github.com/ethereum-optimism/optimism/op-service/testlog"
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
func TestVerifyL2OutputRoot(t *testing.T) { func TestVerifyL2OutputRoot(t *testing.T) {
testVerifyL2OutputRoot(t, false) testVerifyL2OutputRoot(t, false, false)
}
func TestVerifyL2OutputRootSpanBatch(t *testing.T) {
testVerifyL2OutputRoot(t, false, true)
} }
func TestVerifyL2OutputRootDetached(t *testing.T) { func TestVerifyL2OutputRootDetached(t *testing.T) {
testVerifyL2OutputRoot(t, true) testVerifyL2OutputRoot(t, true, false)
}
func TestVerifyL2OutputRootDetachedSpanBatch(t *testing.T) {
testVerifyL2OutputRoot(t, true, true)
} }
func TestVerifyL2OutputRootEmptyBlock(t *testing.T) { func TestVerifyL2OutputRootEmptyBlock(t *testing.T) {
testVerifyL2OutputRootEmptyBlock(t, false) testVerifyL2OutputRootEmptyBlock(t, false, false)
}
func TestVerifyL2OutputRootEmptyBlockSpanBatch(t *testing.T) {
testVerifyL2OutputRootEmptyBlock(t, false, true)
} }
func TestVerifyL2OutputRootEmptyBlockDetached(t *testing.T) { func TestVerifyL2OutputRootEmptyBlockDetached(t *testing.T) {
testVerifyL2OutputRootEmptyBlock(t, true) testVerifyL2OutputRootEmptyBlock(t, true, false)
}
func TestVerifyL2OutputRootEmptyBlockDetachedSpanBatch(t *testing.T) {
testVerifyL2OutputRootEmptyBlock(t, true, true)
} }
// TestVerifyL2OutputRootEmptyBlock asserts that the program can verify the output root of an empty block // TestVerifyL2OutputRootEmptyBlock asserts that the program can verify the output root of an empty block
...@@ -46,7 +63,7 @@ func TestVerifyL2OutputRootEmptyBlockDetached(t *testing.T) { ...@@ -46,7 +63,7 @@ func TestVerifyL2OutputRootEmptyBlockDetached(t *testing.T) {
// - reboot the batch submitter // - reboot the batch submitter
// - update the state root via a tx // - update the state root via a tx
// - run program // - run program
func testVerifyL2OutputRootEmptyBlock(t *testing.T, detached bool) { func testVerifyL2OutputRootEmptyBlock(t *testing.T, detached bool, spanBatchActivated bool) {
InitParallel(t) InitParallel(t)
ctx := context.Background() ctx := context.Background()
...@@ -56,6 +73,13 @@ func testVerifyL2OutputRootEmptyBlock(t *testing.T, detached bool) { ...@@ -56,6 +73,13 @@ func testVerifyL2OutputRootEmptyBlock(t *testing.T, detached bool) {
// Use a small sequencer window size to avoid test timeout while waiting for empty blocks // Use a small sequencer window size to avoid test timeout while waiting for empty blocks
// But not too small to ensure that our claim and subsequent state change is published // But not too small to ensure that our claim and subsequent state change is published
cfg.DeployConfig.SequencerWindowSize = 16 cfg.DeployConfig.SequencerWindowSize = 16
if spanBatchActivated {
// Activate span batch hard fork
minTs := hexutil.Uint64(0)
cfg.DeployConfig.L2GenesisSpanBatchTimeOffset = &minTs
} else {
cfg.DeployConfig.L2GenesisSpanBatchTimeOffset = nil
}
sys, err := cfg.Start(t) sys, err := cfg.Start(t)
require.Nil(t, err, "Error starting up system") require.Nil(t, err, "Error starting up system")
...@@ -147,13 +171,20 @@ func testVerifyL2OutputRootEmptyBlock(t *testing.T, detached bool) { ...@@ -147,13 +171,20 @@ func testVerifyL2OutputRootEmptyBlock(t *testing.T, detached bool) {
}) })
} }
func testVerifyL2OutputRoot(t *testing.T, detached bool) { func testVerifyL2OutputRoot(t *testing.T, detached bool, spanBatchActivated bool) {
InitParallel(t) InitParallel(t)
ctx := context.Background() ctx := context.Background()
cfg := DefaultSystemConfig(t) cfg := DefaultSystemConfig(t)
// We don't need a verifier - just the sequencer is enough // We don't need a verifier - just the sequencer is enough
delete(cfg.Nodes, "verifier") delete(cfg.Nodes, "verifier")
if spanBatchActivated {
// Activate span batch hard fork
minTs := hexutil.Uint64(0)
cfg.DeployConfig.L2GenesisSpanBatchTimeOffset = &minTs
} else {
cfg.DeployConfig.L2GenesisSpanBatchTimeOffset = nil
}
sys, err := cfg.Start(t) sys, err := cfg.Start(t)
require.Nil(t, err, "Error starting up system") require.Nil(t, err, "Error starting up system")
......
...@@ -45,6 +45,31 @@ import ( ...@@ -45,6 +45,31 @@ import (
"github.com/ethereum-optimism/optimism/op-service/testlog" "github.com/ethereum-optimism/optimism/op-service/testlog"
) )
// TestSystemBatchType run each system e2e test case in singular batch mode and span batch mode.
// If the test case tests batch submission and advancing safe head, it should be tested in both singular and span batch mode.
func TestSystemBatchType(t *testing.T) {
tests := []struct {
name string
f func(gt *testing.T, spanBatchTimeOffset *hexutil.Uint64)
}{
{"StopStartBatcher", StopStartBatcher},
}
for _, test := range tests {
test := test
t.Run(test.name+"_SingularBatch", func(t *testing.T) {
test.f(t, nil)
})
}
spanBatchTimeOffset := hexutil.Uint64(0)
for _, test := range tests {
test := test
t.Run(test.name+"_SpanBatch", func(t *testing.T) {
test.f(t, &spanBatchTimeOffset)
})
}
}
func TestMain(m *testing.M) { func TestMain(m *testing.M) {
if config.ExternalL2Shim != "" { if config.ExternalL2Shim != "" {
fmt.Println("Running tests with external L2 process adapter at ", config.ExternalL2Shim) fmt.Println("Running tests with external L2 process adapter at ", config.ExternalL2Shim)
...@@ -1222,10 +1247,11 @@ func TestFees(t *testing.T) { ...@@ -1222,10 +1247,11 @@ func TestFees(t *testing.T) {
require.Equal(t, balanceDiff, totalFee, "balances should add up") require.Equal(t, balanceDiff, totalFee, "balances should add up")
} }
func TestStopStartBatcher(t *testing.T) { func StopStartBatcher(t *testing.T, spanBatchTimeOffset *hexutil.Uint64) {
InitParallel(t) InitParallel(t)
cfg := DefaultSystemConfig(t) cfg := DefaultSystemConfig(t)
cfg.DeployConfig.L2GenesisSpanBatchTimeOffset = spanBatchTimeOffset
sys, err := cfg.Start(t) sys, err := cfg.Start(t)
require.Nil(t, err, "Error starting up system") require.Nil(t, err, "Error starting up system")
defer sys.Close() defer sys.Close()
......
bin bin
# config files
genesis.json
jwt.txt
rollup.json
...@@ -2,6 +2,7 @@ package cli ...@@ -2,6 +2,7 @@ package cli
import ( import (
"fmt" "fmt"
"strings"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
...@@ -19,7 +20,7 @@ func LoadSignerSetup(ctx *cli.Context) (p2p.SignerSetup, error) { ...@@ -19,7 +20,7 @@ func LoadSignerSetup(ctx *cli.Context) (p2p.SignerSetup, error) {
if key != "" { if key != "" {
// Mnemonics are bad because they leak *all* keys when they leak. // Mnemonics are bad because they leak *all* keys when they leak.
// Unencrypted keys from file are bad because they are easy to leak (and we are not checking file permissions). // Unencrypted keys from file are bad because they are easy to leak (and we are not checking file permissions).
priv, err := crypto.HexToECDSA(key) priv, err := crypto.HexToECDSA(strings.TrimPrefix(key, "0x"))
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to read batch submitter key: %w", err) return nil, fmt.Errorf("failed to read batch submitter key: %w", err)
} }
......
...@@ -326,6 +326,12 @@ func BuildBlocksValidator(log log.Logger, cfg *rollup.Config, runCfg GossipRunti ...@@ -326,6 +326,12 @@ func BuildBlocksValidator(log log.Logger, cfg *rollup.Config, runCfg GossipRunti
return pubsub.ValidationReject return pubsub.ValidationReject
} }
// [REJECT] if a V2 Block has non-empty withdrawals
if blockVersion == eth.BlockV2 && len(*payload.Withdrawals) != 0 {
log.Warn("payload is on v2 topic, but has non-empty withdrawals", "bad_hash", payload.BlockHash.String(), "withdrawal_count", len(*payload.Withdrawals))
return pubsub.ValidationReject
}
seen, ok := blockHeightLRU.Get(uint64(payload.BlockNumber)) seen, ok := blockHeightLRU.Get(uint64(payload.BlockNumber))
if !ok { if !ok {
seen = new(seenBlocks) seen = new(seenBlocks)
......
package p2p package p2p
import ( import (
"bytes"
"context" "context"
"fmt"
"math/big" "math/big"
"testing" "testing"
"time"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils"
"github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/golang/snappy"
// "github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/testutils" "github.com/ethereum-optimism/optimism/op-service/testutils"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
pubsub "github.com/libp2p/go-libp2p-pubsub" pubsub "github.com/libp2p/go-libp2p-pubsub"
pubsub_pb "github.com/libp2p/go-libp2p-pubsub/pb"
"github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/peer"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
...@@ -89,3 +99,65 @@ func TestVerifyBlockSignature(t *testing.T) { ...@@ -89,3 +99,65 @@ func TestVerifyBlockSignature(t *testing.T) {
require.Equal(t, pubsub.ValidationIgnore, result) require.Equal(t, pubsub.ValidationIgnore, result)
}) })
} }
func createSignedP2Payload(payload *eth.ExecutionPayload, signer Signer, l2ChainID *big.Int) ([]byte, error) {
var buf bytes.Buffer
buf.Write(make([]byte, 65))
if _, err := payload.MarshalSSZ(&buf); err != nil {
return nil, fmt.Errorf("failed to encoded execution payload to publish: %w", err)
}
data := buf.Bytes()
payloadData := data[65:]
sig, err := signer.Sign(context.TODO(), SigningDomainBlocksV1, l2ChainID, payloadData)
if err != nil {
return nil, fmt.Errorf("failed to sign execution payload with signer: %w", err)
}
copy(data[:65], sig[:])
// compress the full message
// This also copies the data, freeing up the original buffer to go back into the pool
return snappy.Encode(nil, data), nil
}
// TestBlockValidator does some very basic tests of the p2p block validation logic
func TestBlockValidator(t *testing.T) {
// Params Set 1: Create the validation function
cfg := &rollup.Config{
L2ChainID: big.NewInt(100),
}
secrets, err := e2eutils.DefaultMnemonicConfig.Secrets()
require.NoError(t, err)
runCfg := &testutils.MockRuntimeConfig{P2PSeqAddress: crypto.PubkeyToAddress(secrets.SequencerP2P.PublicKey)}
signer := &PreparedSigner{Signer: NewLocalSigner(secrets.SequencerP2P)}
// valFnV1 := BuildBlocksValidator(testlog.Logger(t, log.LvlCrit), rollupCfg, runCfg, eth.BlockV1)
valFnV2 := BuildBlocksValidator(testlog.Logger(t, log.LvlCrit), cfg, runCfg, eth.BlockV2)
// Params Set 2: Call the validation function
peerID := peer.ID("foo")
// Valid Case
payload := eth.ExecutionPayload{
Timestamp: hexutil.Uint64(time.Now().Unix()),
Withdrawals: &types.Withdrawals{},
}
payload.BlockHash, _ = payload.CheckBlockHash() // hack to generate the block hash easily.
data, err := createSignedP2Payload(&payload, signer, cfg.L2ChainID)
require.NoError(t, err)
message := &pubsub.Message{Message: &pubsub_pb.Message{Data: data}}
res := valFnV2(context.TODO(), peerID, message)
require.Equal(t, res, pubsub.ValidationAccept)
// Invalid because non-empty withdrawals when Canyon is active
payload = eth.ExecutionPayload{
Timestamp: hexutil.Uint64(time.Now().Unix()),
Withdrawals: &types.Withdrawals{&types.Withdrawal{Index: 1, Validator: 1}},
}
payload.BlockHash, _ = payload.CheckBlockHash()
data, err = createSignedP2Payload(&payload, signer, cfg.L2ChainID)
require.NoError(t, err)
message = &pubsub.Message{Message: &pubsub_pb.Message{Data: data}}
res = valFnV2(context.TODO(), peerID, message)
require.Equal(t, res, pubsub.ValidationReject)
}
...@@ -110,5 +110,9 @@ func LoadOPStackRollupConfig(chainID uint64) (*Config, error) { ...@@ -110,5 +110,9 @@ func LoadOPStackRollupConfig(chainID uint64) (*Config, error) {
cfg.ChannelTimeout = 120 cfg.ChannelTimeout = 120
cfg.MaxSequencerDrift = 1200 cfg.MaxSequencerDrift = 1200
} }
if chainID == pgnSepolia {
cfg.MaxSequencerDrift = 1000
cfg.SeqWindowSize = 7200
}
return cfg, nil return cfg, nil
} }
...@@ -20,6 +20,7 @@ type BlockInfo interface { ...@@ -20,6 +20,7 @@ type BlockInfo interface {
BaseFee() *big.Int BaseFee() *big.Int
ReceiptHash() common.Hash ReceiptHash() common.Hash
GasUsed() uint64 GasUsed() uint64
GasLimit() uint64
// HeaderRLP returns the RLP of the block header as per consensus rules // HeaderRLP returns the RLP of the block header as per consensus rules
// Returns an error if the header RLP could not be written // Returns an error if the header RLP could not be written
...@@ -100,6 +101,10 @@ func (h headerBlockInfo) GasUsed() uint64 { ...@@ -100,6 +101,10 @@ func (h headerBlockInfo) GasUsed() uint64 {
return h.Header.GasUsed return h.Header.GasUsed
} }
func (h headerBlockInfo) GasLimit() uint64 {
return h.Header.GasLimit
}
func (h headerBlockInfo) HeaderRLP() ([]byte, error) { func (h headerBlockInfo) HeaderRLP() ([]byte, error) {
return rlp.EncodeToBytes(h.Header) return rlp.EncodeToBytes(h.Header)
} }
......
...@@ -117,3 +117,7 @@ func (c *CallResult) GetAddress(i int) common.Address { ...@@ -117,3 +117,7 @@ func (c *CallResult) GetAddress(i int) common.Address {
func (c *CallResult) GetBigInt(i int) *big.Int { func (c *CallResult) GetBigInt(i int) *big.Int {
return *abi.ConvertType(c.out[i], new(*big.Int)).(**big.Int) return *abi.ConvertType(c.out[i], new(*big.Int)).(**big.Int)
} }
func (c *CallResult) GetStruct(i int, target interface{}) {
abi.ConvertType(c.out[i], target)
}
...@@ -139,6 +139,24 @@ func TestCallResult_GetValues(t *testing.T) { ...@@ -139,6 +139,24 @@ func TestCallResult_GetValues(t *testing.T) {
}, },
expected: big.NewInt(2398423), expected: big.NewInt(2398423),
}, },
{
name: "GetStruct",
getter: func(result *CallResult, i int) interface{} {
out := struct {
a *big.Int
b common.Hash
}{}
result.GetStruct(i, &out)
return out
},
expected: struct {
a *big.Int
b common.Hash
}{
a: big.NewInt(6),
b: common.Hash{0xee},
},
},
} }
for _, test := range tests { for _, test := range tests {
......
This diff is collapsed.
...@@ -78,6 +78,10 @@ func (h headerInfo) GasUsed() uint64 { ...@@ -78,6 +78,10 @@ func (h headerInfo) GasUsed() uint64 {
return h.Header.GasUsed return h.Header.GasUsed
} }
func (h headerInfo) GasLimit() uint64 {
return h.Header.GasLimit
}
func (h headerInfo) HeaderRLP() ([]byte, error) { func (h headerInfo) HeaderRLP() ([]byte, error) {
return rlp.EncodeToBytes(h.Header) return rlp.EncodeToBytes(h.Header)
} }
...@@ -103,7 +107,16 @@ type rpcHeader struct { ...@@ -103,7 +107,16 @@ type rpcHeader struct {
BaseFee *hexutil.Big `json:"baseFeePerGas"` BaseFee *hexutil.Big `json:"baseFeePerGas"`
// WithdrawalsRoot was added by EIP-4895 and is ignored in legacy headers. // WithdrawalsRoot was added by EIP-4895 and is ignored in legacy headers.
WithdrawalsRoot *common.Hash `json:"withdrawalsRoot"` WithdrawalsRoot *common.Hash `json:"withdrawalsRoot,omitempty"`
// BlobGasUsed was added by EIP-4844 and is ignored in legacy headers.
BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed,omitempty"`
// ExcessBlobGas was added by EIP-4844 and is ignored in legacy headers.
ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas,omitempty"`
// ParentBeaconRoot was added by EIP-4788 and is ignored in legacy headers.
ParentBeaconRoot *common.Hash `json:"parentBeaconBlockRoot,omitempty"`
// untrusted info included by RPC, may have to be checked // untrusted info included by RPC, may have to be checked
Hash common.Hash `json:"hash"` Hash common.Hash `json:"hash"`
...@@ -156,6 +169,10 @@ func (hdr *rpcHeader) createGethHeader() *types.Header { ...@@ -156,6 +169,10 @@ func (hdr *rpcHeader) createGethHeader() *types.Header {
Nonce: hdr.Nonce, Nonce: hdr.Nonce,
BaseFee: (*big.Int)(hdr.BaseFee), BaseFee: (*big.Int)(hdr.BaseFee),
WithdrawalsHash: hdr.WithdrawalsRoot, WithdrawalsHash: hdr.WithdrawalsRoot,
// Cancun
BlobGasUsed: (*uint64)(hdr.BlobGasUsed),
ExcessBlobGas: (*uint64)(hdr.ExcessBlobGas),
ParentBeaconRoot: hdr.ParentBeaconRoot,
} }
} }
...@@ -185,7 +202,7 @@ func (block *rpcBlock) verify() error { ...@@ -185,7 +202,7 @@ func (block *rpcBlock) verify() error {
} }
for i, tx := range block.Transactions { for i, tx := range block.Transactions {
if tx == nil { if tx == nil {
return fmt.Errorf("block tx %d is null", i) return fmt.Errorf("block tx %d is nil", i)
} }
} }
if computed := types.DeriveSha(types.Transactions(block.Transactions), trie.NewStackTrie(nil)); block.TxHash != computed { if computed := types.DeriveSha(types.Transactions(block.Transactions), trie.NewStackTrie(nil)); block.TxHash != computed {
......
...@@ -23,6 +23,7 @@ type MockBlockInfo struct { ...@@ -23,6 +23,7 @@ type MockBlockInfo struct {
InfoBaseFee *big.Int InfoBaseFee *big.Int
InfoReceiptRoot common.Hash InfoReceiptRoot common.Hash
InfoGasUsed uint64 InfoGasUsed uint64
InfoGasLimit uint64
InfoHeaderRLP []byte InfoHeaderRLP []byte
} }
...@@ -66,6 +67,10 @@ func (l *MockBlockInfo) GasUsed() uint64 { ...@@ -66,6 +67,10 @@ func (l *MockBlockInfo) GasUsed() uint64 {
return l.InfoGasUsed return l.InfoGasUsed
} }
func (l *MockBlockInfo) GasLimit() uint64 {
return l.InfoGasLimit
}
func (l *MockBlockInfo) ID() eth.BlockID { func (l *MockBlockInfo) ID() eth.BlockID {
return eth.BlockID{Hash: l.InfoHash, Number: l.InfoNum} return eth.BlockID{Hash: l.InfoHash, Number: l.InfoNum}
} }
......
# RPC for the network to deploy to
export ETH_RPC_URL=
# Sets the deployer's key to match the first default hardhat account
export PRIVATE_KEY=ac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80
# Name of the deployed network
export DEPLOYMENT_CONTEXT=getting-started
# Optional Tenderly details for a simulation link during deployment
export TENDERLY_PROJECT=
export TENDERLY_USERNAME=
export ETHERSCAN_API_KEY=
# Optional create2 salt for deterministic deployment of
# contract implementations
export IMPL_SALT=$(openssl rand -hex 32)
This diff is collapsed.
...@@ -26,3 +26,6 @@ deployments/1337 ...@@ -26,3 +26,6 @@ deployments/1337
# Devnet config which changes with each 'make devnet-up' # Devnet config which changes with each 'make devnet-up'
deploy-config/devnetL1.json deploy-config/devnetL1.json
# Getting Started guide deploy config
deploy-config/getting-started.json
# `SystemConfig` Invariants # `SystemConfig` Invariants
## The gas limit of the `SystemConfig` contract can never be lower than the hard-coded lower bound. ## The gas limit of the `SystemConfig` contract can never be lower than the hard-coded lower bound.
**Test:** [`SystemConfig.t.sol#L80`](../test/invariants/SystemConfig.t.sol#L80) **Test:** [`SystemConfig.t.sol#L70`](../test/invariants/SystemConfig.t.sol#L70)
Subproject commit e8a047e3f40f13fa37af6fe14e6e06283d9a060e Subproject commit 37a37ab73364d6644bfe11edf88a07880f99bd56
...@@ -20,5 +20,22 @@ ...@@ -20,5 +20,22 @@
"faucetOnchainAuthModuleAmount": 1000000000000000000, "faucetOnchainAuthModuleAmount": 1000000000000000000,
"faucetOffchainAuthModuleAdmin": "0xFe44Ae787A632c45ACea658492dDBebE39f002aC", "faucetOffchainAuthModuleAdmin": "0xFe44Ae787A632c45ACea658492dDBebE39f002aC",
"faucetOffchainAuthModuleTtl": 86400, "faucetOffchainAuthModuleTtl": 86400,
"faucetOffchainAuthModuleAmount": 50000000000000000 "faucetOffchainAuthModuleAmount": 50000000000000000,
"opL1BridgeAddress": "0xFBb0621E0B23b5478B630BD55a5f21f67730B0F1",
"baseL1BridgeAddress": "0xfd0Bf71F60660E2f608ed56e1659C450eB113120",
"pgnL1BridgeAddress": "0xFaE6abCAF30D23e233AC7faF747F2fC3a5a6Bfa3",
"zoraL1BridgeAddress": "0x5376f1D543dcbB5BD416c56C189e4cB7399fCcCB",
"orderlyL1BridgeAddress": "0x1Af0494040d6904A9F3EE21921de4b359C736333",
"modeL1BridgeAddress": "0xbC5C679879B2965296756CD959C3C739769995E2",
"lyraL1BridgeAddress": "0x915f179A77FB2e1AeA8b56Ebc0D75A7e1A8a7A17",
"installOpChainFaucetsDrips": false,
"archivePreviousOpChainFaucetsDrips": false,
"dripVersion": 1,
"previousDripVersion": 0,
"smallOpChainFaucetDripValue": 2000000000000000000,
"smallOpChainFaucetDripInterval": 86400,
"largeOpChainFaucetDripValue": 34000000000000000000,
"largeOpChainFaucetDripInterval": 86400,
"opChainAdminWalletDripValue": 1000000000000000000,
"opChainAdminWalletDripInterval": 2592000
} }
...@@ -20,5 +20,22 @@ ...@@ -20,5 +20,22 @@
"faucetOnchainAuthModuleAmount": 1000000000000000000, "faucetOnchainAuthModuleAmount": 1000000000000000000,
"faucetOffchainAuthModuleAdmin": "0xFe44Ae787A632c45ACea658492dDBebE39f002aC", "faucetOffchainAuthModuleAdmin": "0xFe44Ae787A632c45ACea658492dDBebE39f002aC",
"faucetOffchainAuthModuleTtl": 86400, "faucetOffchainAuthModuleTtl": 86400,
"faucetOffchainAuthModuleAmount": 50000000000000000 "faucetOffchainAuthModuleAmount": 50000000000000000,
"opL1BridgeAddress": "0xFBb0621E0B23b5478B630BD55a5f21f67730B0F1",
"baseL1BridgeAddress": "0xfd0Bf71F60660E2f608ed56e1659C450eB113120",
"pgnL1BridgeAddress": "0xFaE6abCAF30D23e233AC7faF747F2fC3a5a6Bfa3",
"zoraL1BridgeAddress": "0x5376f1D543dcbB5BD416c56C189e4cB7399fCcCB",
"orderlyL1BridgeAddress": "0x1Af0494040d6904A9F3EE21921de4b359C736333",
"modeL1BridgeAddress": "0xbC5C679879B2965296756CD959C3C739769995E2",
"lyraL1BridgeAddress": "0x915f179A77FB2e1AeA8b56Ebc0D75A7e1A8a7A17",
"installOpChainFaucetsDrips": false,
"archivePreviousOpChainFaucetsDrips": false,
"dripVersion": 1,
"previousDripVersion": 0,
"smallOpChainFaucetDripValue": 2000000000000000000,
"smallOpChainFaucetDripInterval": 86400,
"largeOpChainFaucetDripValue": 34000000000000000000,
"largeOpChainFaucetDripInterval": 86400,
"opChainAdminWalletDripValue": 1000000000000000000,
"opChainAdminWalletDripInterval": 2592000
} }
...@@ -20,5 +20,22 @@ ...@@ -20,5 +20,22 @@
"faucetOnchainAuthModuleAmount": 1000000000000000000, "faucetOnchainAuthModuleAmount": 1000000000000000000,
"faucetOffchainAuthModuleAdmin": "0xFe44Ae787A632c45ACea658492dDBebE39f002aC", "faucetOffchainAuthModuleAdmin": "0xFe44Ae787A632c45ACea658492dDBebE39f002aC",
"faucetOffchainAuthModuleTtl": 86400, "faucetOffchainAuthModuleTtl": 86400,
"faucetOffchainAuthModuleAmount": 50000000000000000 "faucetOffchainAuthModuleAmount": 50000000000000000,
"opL1BridgeAddress": "0xFBb0621E0B23b5478B630BD55a5f21f67730B0F1",
"baseL1BridgeAddress": "0xfd0Bf71F60660E2f608ed56e1659C450eB113120",
"pgnL1BridgeAddress": "0xFaE6abCAF30D23e233AC7faF747F2fC3a5a6Bfa3",
"zoraL1BridgeAddress": "0x5376f1D543dcbB5BD416c56C189e4cB7399fCcCB",
"orderlyL1BridgeAddress": "0x1Af0494040d6904A9F3EE21921de4b359C736333",
"modeL1BridgeAddress": "0xbC5C679879B2965296756CD959C3C739769995E2",
"lyraL1BridgeAddress": "0x915f179A77FB2e1AeA8b56Ebc0D75A7e1A8a7A17",
"installOpChainFaucetsDrips": false,
"archivePreviousOpChainFaucetsDrips": false,
"dripVersion": 1,
"previousDripVersion": 0,
"smallOpChainFaucetDripValue": 2000000000000000000,
"smallOpChainFaucetDripInterval": 86400,
"largeOpChainFaucetDripValue": 34000000000000000000,
"largeOpChainFaucetDripInterval": 86400,
"opChainAdminWalletDripValue": 1000000000000000000,
"opChainAdminWalletDripInterval": 2592000
} }
...@@ -20,5 +20,22 @@ ...@@ -20,5 +20,22 @@
"faucetOnchainAuthModuleAmount": 1000000000000000000, "faucetOnchainAuthModuleAmount": 1000000000000000000,
"faucetOffchainAuthModuleAdmin": "0xFe44Ae787A632c45ACea658492dDBebE39f002aC", "faucetOffchainAuthModuleAdmin": "0xFe44Ae787A632c45ACea658492dDBebE39f002aC",
"faucetOffchainAuthModuleTtl": 86400, "faucetOffchainAuthModuleTtl": 86400,
"faucetOffchainAuthModuleAmount": 50000000000000000 "faucetOffchainAuthModuleAmount": 50000000000000000,
"opL1BridgeAddress": "0xFBb0621E0B23b5478B630BD55a5f21f67730B0F1",
"baseL1BridgeAddress": "0xfd0Bf71F60660E2f608ed56e1659C450eB113120",
"pgnL1BridgeAddress": "0xFaE6abCAF30D23e233AC7faF747F2fC3a5a6Bfa3",
"zoraL1BridgeAddress": "0x5376f1D543dcbB5BD416c56C189e4cB7399fCcCB",
"orderlyL1BridgeAddress": "0x1Af0494040d6904A9F3EE21921de4b359C736333",
"modeL1BridgeAddress": "0xbC5C679879B2965296756CD959C3C739769995E2",
"lyraL1BridgeAddress": "0x915f179A77FB2e1AeA8b56Ebc0D75A7e1A8a7A17",
"installOpChainFaucetsDrips": false,
"archivePreviousOpChainFaucetsDrips": false,
"dripVersion": 1,
"previousDripVersion": 0,
"smallOpChainFaucetDripValue": 2000000000000000000,
"smallOpChainFaucetDripInterval": 86400,
"largeOpChainFaucetDripValue": 34000000000000000000,
"largeOpChainFaucetDripInterval": 86400,
"opChainAdminWalletDripValue": 1000000000000000000,
"opChainAdminWalletDripInterval": 2592000
} }
...@@ -20,5 +20,22 @@ ...@@ -20,5 +20,22 @@
"faucetOnchainAuthModuleAmount": 1000000000000000000, "faucetOnchainAuthModuleAmount": 1000000000000000000,
"faucetOffchainAuthModuleAdmin": "0xFe44Ae787A632c45ACea658492dDBebE39f002aC", "faucetOffchainAuthModuleAdmin": "0xFe44Ae787A632c45ACea658492dDBebE39f002aC",
"faucetOffchainAuthModuleTtl": 86400, "faucetOffchainAuthModuleTtl": 86400,
"faucetOffchainAuthModuleAmount": 50000000000000000 "faucetOffchainAuthModuleAmount": 50000000000000000,
"opL1BridgeAddress": "0xFBb0621E0B23b5478B630BD55a5f21f67730B0F1",
"baseL1BridgeAddress": "0xfd0Bf71F60660E2f608ed56e1659C450eB113120",
"pgnL1BridgeAddress": "0xFaE6abCAF30D23e233AC7faF747F2fC3a5a6Bfa3",
"zoraL1BridgeAddress": "0x5376f1D543dcbB5BD416c56C189e4cB7399fCcCB",
"orderlyL1BridgeAddress": "0x1Af0494040d6904A9F3EE21921de4b359C736333",
"modeL1BridgeAddress": "0xbC5C679879B2965296756CD959C3C739769995E2",
"lyraL1BridgeAddress": "0x915f179A77FB2e1AeA8b56Ebc0D75A7e1A8a7A17",
"installOpChainFaucetsDrips": false,
"archivePreviousOpChainFaucetsDrips": false,
"dripVersion": 1,
"previousDripVersion": 0,
"smallOpChainFaucetDripValue": 2000000000000000000,
"smallOpChainFaucetDripInterval": 86400,
"largeOpChainFaucetDripValue": 34000000000000000000,
"largeOpChainFaucetDripInterval": 86400,
"opChainAdminWalletDripValue": 1000000000000000000,
"opChainAdminWalletDripInterval": 2592000
} }
...@@ -20,5 +20,22 @@ ...@@ -20,5 +20,22 @@
"faucetOnchainAuthModuleAmount": 1000000000000000000, "faucetOnchainAuthModuleAmount": 1000000000000000000,
"faucetOffchainAuthModuleAdmin": "0xFe44Ae787A632c45ACea658492dDBebE39f002aC", "faucetOffchainAuthModuleAdmin": "0xFe44Ae787A632c45ACea658492dDBebE39f002aC",
"faucetOffchainAuthModuleTtl": 86400, "faucetOffchainAuthModuleTtl": 86400,
"faucetOffchainAuthModuleAmount": 50000000000000000 "faucetOffchainAuthModuleAmount": 50000000000000000,
"opL1BridgeAddress": "0xFBb0621E0B23b5478B630BD55a5f21f67730B0F1",
"baseL1BridgeAddress": "0xfd0Bf71F60660E2f608ed56e1659C450eB113120",
"pgnL1BridgeAddress": "0xFaE6abCAF30D23e233AC7faF747F2fC3a5a6Bfa3",
"zoraL1BridgeAddress": "0x5376f1D543dcbB5BD416c56C189e4cB7399fCcCB",
"orderlyL1BridgeAddress": "0x1Af0494040d6904A9F3EE21921de4b359C736333",
"modeL1BridgeAddress": "0xbC5C679879B2965296756CD959C3C739769995E2",
"lyraL1BridgeAddress": "0x915f179A77FB2e1AeA8b56Ebc0D75A7e1A8a7A17",
"installOpChainFaucetsDrips": false,
"archivePreviousOpChainFaucetsDrips": false,
"dripVersion": 1,
"previousDripVersion": 0,
"smallOpChainFaucetDripValue": 2000000000000000000,
"smallOpChainFaucetDripInterval": 86400,
"largeOpChainFaucetDripValue": 34000000000000000000,
"largeOpChainFaucetDripInterval": 86400,
"opChainAdminWalletDripValue": 1000000000000000000,
"opChainAdminWalletDripInterval": 2592000
} }
...@@ -20,5 +20,22 @@ ...@@ -20,5 +20,22 @@
"faucetOnchainAuthModuleAmount": 1000000000000000000, "faucetOnchainAuthModuleAmount": 1000000000000000000,
"faucetOffchainAuthModuleAdmin": "0xFe44Ae787A632c45ACea658492dDBebE39f002aC", "faucetOffchainAuthModuleAdmin": "0xFe44Ae787A632c45ACea658492dDBebE39f002aC",
"faucetOffchainAuthModuleTtl": 86400, "faucetOffchainAuthModuleTtl": 86400,
"faucetOffchainAuthModuleAmount": 50000000000000000 "faucetOffchainAuthModuleAmount": 50000000000000000,
"opL1BridgeAddress": "0x636Af16bf2f682dD3109e60102b8E1A089FedAa8",
"baseL1BridgeAddress": "0xfd0Bf71F60660E2f608ed56e1659C450eB113120",
"pgnL1BridgeAddress": "0xFaE6abCAF30D23e233AC7faF747F2fC3a5a6Bfa3",
"zoraL1BridgeAddress": "0x5376f1D543dcbB5BD416c56C189e4cB7399fCcCB",
"orderlyL1BridgeAddress": "0x1Af0494040d6904A9F3EE21921de4b359C736333",
"modeL1BridgeAddress": "0xbC5C679879B2965296756CD959C3C739769995E2",
"lyraL1BridgeAddress": "0x915f179A77FB2e1AeA8b56Ebc0D75A7e1A8a7A17",
"installOpChainFaucetsDrips": false,
"archivePreviousOpChainFaucetsDrips": false,
"dripVersion": 1,
"previousDripVersion": 0,
"smallOpChainFaucetDripValue": 2000000000000000000,
"smallOpChainFaucetDripInterval": 86400,
"largeOpChainFaucetDripValue": 34000000000000000000,
"largeOpChainFaucetDripInterval": 86400,
"opChainAdminWalletDripValue": 1000000000000000000,
"opChainAdminWalletDripInterval": 2592000
} }
...@@ -20,5 +20,22 @@ ...@@ -20,5 +20,22 @@
"faucetOnchainAuthModuleAmount": 1000000000000000000, "faucetOnchainAuthModuleAmount": 1000000000000000000,
"faucetOffchainAuthModuleAdmin": "0xFe44Ae787A632c45ACea658492dDBebE39f002aC", "faucetOffchainAuthModuleAdmin": "0xFe44Ae787A632c45ACea658492dDBebE39f002aC",
"faucetOffchainAuthModuleTtl": 86400, "faucetOffchainAuthModuleTtl": 86400,
"faucetOffchainAuthModuleAmount": 50000000000000000 "faucetOffchainAuthModuleAmount": 50000000000000000,
"opL1BridgeAddress": "0xFBb0621E0B23b5478B630BD55a5f21f67730B0F1",
"baseL1BridgeAddress": "0xfd0Bf71F60660E2f608ed56e1659C450eB113120",
"pgnL1BridgeAddress": "0xFaE6abCAF30D23e233AC7faF747F2fC3a5a6Bfa3",
"zoraL1BridgeAddress": "0x5376f1D543dcbB5BD416c56C189e4cB7399fCcCB",
"orderlyL1BridgeAddress": "0x1Af0494040d6904A9F3EE21921de4b359C736333",
"modeL1BridgeAddress": "0xbC5C679879B2965296756CD959C3C739769995E2",
"lyraL1BridgeAddress": "0x915f179A77FB2e1AeA8b56Ebc0D75A7e1A8a7A17",
"installOpChainFaucetsDrips": false,
"archivePreviousOpChainFaucetsDrips": false,
"dripVersion": 1,
"previousDripVersion": 0,
"smallOpChainFaucetDripValue": 2000000000000000000,
"smallOpChainFaucetDripInterval": 86400,
"largeOpChainFaucetDripValue": 34000000000000000000,
"largeOpChainFaucetDripInterval": 86400,
"opChainAdminWalletDripValue": 1000000000000000000,
"opChainAdminWalletDripInterval": 2592000
} }
{
"faucetAdmin": "0x212E789D4523D4BAF464f8Fb2A9B9dff2B36e5A6",
"faucetDrippieOwner": "0x10ab157483dd308f8B38aCF2ad823dfD255F56b5",
"faucetDripV1Value": 20000000000000000000,
"faucetDripV1Interval": 3600,
"faucetDripV1Threshold": 100000000000000000000,
"faucetDripV2Interval": 604800,
"faucetDripV2Threshold": 20000000000000000000,
"faucetDripV2Value": 500000000000000000000,
"faucetAdminDripV1Interval": 86400,
"faucetAdminDripV1Threshold": 100000000000000000,
"faucetAdminDripV1Value": 1000000000000000000,
"faucetGelatoTreasury": "0x644CB00854EDC55FE8CCC9c1967BABb22F08Ad2f",
"faucetGelatoRecipient": "0x0E9b4649eB0760A4F01646636E032D68cFDe58FF",
"faucetGelatoBalanceV1DripInterval": 86400,
"faucetGelatoBalanceV1Value": 1000000000000000000,
"faucetGelatoThreshold": 100000000000000000,
"faucetOnchainAuthModuleAdmin": "0xFe44Ae787A632c45ACea658492dDBebE39f002aC",
"faucetOnchainAuthModuleTtl": 86400,
"faucetOnchainAuthModuleAmount": 1000000000000000000,
"faucetOffchainAuthModuleAdmin": "0xFe44Ae787A632c45ACea658492dDBebE39f002aC",
"faucetOffchainAuthModuleTtl": 86400,
"faucetOffchainAuthModuleAmount": 50000000000000000,
"opL1BridgeAddress": "0xFBb0621E0B23b5478B630BD55a5f21f67730B0F1",
"baseL1BridgeAddress": "0xfd0Bf71F60660E2f608ed56e1659C450eB113120",
"pgnL1BridgeAddress": "0xFaE6abCAF30D23e233AC7faF747F2fC3a5a6Bfa3",
"zoraL1BridgeAddress": "0x5376f1D543dcbB5BD416c56C189e4cB7399fCcCB",
"orderlyL1BridgeAddress": "0x1Af0494040d6904A9F3EE21921de4b359C736333",
"modeL1BridgeAddress": "0xbC5C679879B2965296756CD959C3C739769995E2",
"lyraL1BridgeAddress": "0x915f179A77FB2e1AeA8b56Ebc0D75A7e1A8a7A17",
"installOpChainFaucetsDrips": true,
"archivePreviousOpChainFaucetsDrips": false,
"dripVersion": 2,
"previousDripVersion": 1,
"smallOpChainFaucetDripValue": 2000000000000000000,
"smallOpChainFaucetDripInterval": 86400,
"largeOpChainFaucetDripValue": 34000000000000000000,
"largeOpChainFaucetDripInterval": 86400,
"opChainAdminWalletDripValue": 1000000000000000000,
"opChainAdminWalletDripInterval": 2592000
}
...@@ -88,7 +88,10 @@ abstract contract Deployer is Script { ...@@ -88,7 +88,10 @@ abstract contract Deployer is Script {
string memory chainIdPath = string.concat(deploymentsDir, "/.chainId"); string memory chainIdPath = string.concat(deploymentsDir, "/.chainId");
try vm.readFile(chainIdPath) returns (string memory localChainId) { try vm.readFile(chainIdPath) returns (string memory localChainId) {
if (vm.envOr("STRICT_DEPLOYMENT", true)) { if (vm.envOr("STRICT_DEPLOYMENT", true)) {
require(vm.parseUint(localChainId) == chainId, "Misconfigured networks"); require(
vm.parseUint(localChainId) == chainId,
string.concat("Misconfigured networks: ", localChainId, " != ", vm.toString(chainId))
);
} }
} catch { } catch {
vm.writeFile(chainIdPath, vm.toString(chainId)); vm.writeFile(chainIdPath, vm.toString(chainId));
...@@ -281,7 +284,7 @@ abstract contract Deployer is Script { ...@@ -281,7 +284,7 @@ abstract contract Deployer is Script {
} }
/// @notice Returns the contract name from a deploy transaction. /// @notice Returns the contract name from a deploy transaction.
function _getContractNameFromDeployTransaction(string memory _deployTx) internal returns (string memory) { function _getContractNameFromDeployTransaction(string memory _deployTx) internal pure returns (string memory) {
return stdJson.readString(_deployTx, ".contractName"); return stdJson.readString(_deployTx, ".contractName");
} }
......
...@@ -34,6 +34,25 @@ contract PeripheryDeployConfig is Script { ...@@ -34,6 +34,25 @@ contract PeripheryDeployConfig is Script {
address public faucetOffchainAuthModuleAdmin; address public faucetOffchainAuthModuleAdmin;
uint256 public faucetOffchainAuthModuleTtl; uint256 public faucetOffchainAuthModuleTtl;
uint256 public faucetOffchainAuthModuleAmount; uint256 public faucetOffchainAuthModuleAmount;
bool public installOpChainFaucetsDrips;
bool public archivePreviousOpChainFaucetsDrips;
uint256 public smallOpChainFaucetDripValue;
uint256 public smallOpChainFaucetDripInterval;
uint256 public largeOpChainFaucetDripValue;
uint256 public largeOpChainFaucetDripInterval;
uint256 public opChainAdminWalletDripValue;
uint256 public opChainAdminWalletDripInterval;
address public opL1BridgeAddress;
address public baseL1BridgeAddress;
address public zoraL1BridgeAddress;
address public pgnL1BridgeAddress;
address public orderlyL1BridgeAddress;
address public modeL1BridgeAddress;
address public lyraL1BridgeAddress;
address[5] public smallFaucetsL1BridgeAddresses;
address[2] public largeFaucetsL1BridgeAddresses;
uint256 public dripVersion;
uint256 public previousDripVersion;
constructor(string memory _path) { constructor(string memory _path) {
console.log("PeripheryDeployConfig: reading file %s", _path); console.log("PeripheryDeployConfig: reading file %s", _path);
...@@ -66,5 +85,37 @@ contract PeripheryDeployConfig is Script { ...@@ -66,5 +85,37 @@ contract PeripheryDeployConfig is Script {
faucetOffchainAuthModuleAdmin = stdJson.readAddress(_json, "$.faucetOffchainAuthModuleAdmin"); faucetOffchainAuthModuleAdmin = stdJson.readAddress(_json, "$.faucetOffchainAuthModuleAdmin");
faucetOffchainAuthModuleTtl = stdJson.readUint(_json, "$.faucetOffchainAuthModuleTtl"); faucetOffchainAuthModuleTtl = stdJson.readUint(_json, "$.faucetOffchainAuthModuleTtl");
faucetOffchainAuthModuleAmount = stdJson.readUint(_json, "$.faucetOffchainAuthModuleAmount"); faucetOffchainAuthModuleAmount = stdJson.readUint(_json, "$.faucetOffchainAuthModuleAmount");
installOpChainFaucetsDrips = stdJson.readBool(_json, "$.installOpChainFaucetsDrips");
archivePreviousOpChainFaucetsDrips = stdJson.readBool(_json, "$.archivePreviousOpChainFaucetsDrips");
opL1BridgeAddress = stdJson.readAddress(_json, "$.opL1BridgeAddress");
baseL1BridgeAddress = stdJson.readAddress(_json, "$.baseL1BridgeAddress");
zoraL1BridgeAddress = stdJson.readAddress(_json, "$.zoraL1BridgeAddress");
pgnL1BridgeAddress = stdJson.readAddress(_json, "$.pgnL1BridgeAddress");
orderlyL1BridgeAddress = stdJson.readAddress(_json, "$.orderlyL1BridgeAddress");
modeL1BridgeAddress = stdJson.readAddress(_json, "$.modeL1BridgeAddress");
lyraL1BridgeAddress = stdJson.readAddress(_json, "$.lyraL1BridgeAddress");
dripVersion = stdJson.readUint(_json, "$.dripVersion");
previousDripVersion = stdJson.readUint(_json, "$.previousDripVersion");
smallOpChainFaucetDripValue = stdJson.readUint(_json, "$.smallOpChainFaucetDripValue");
smallOpChainFaucetDripInterval = stdJson.readUint(_json, "$.smallOpChainFaucetDripInterval");
largeOpChainFaucetDripValue = stdJson.readUint(_json, "$.largeOpChainFaucetDripValue");
largeOpChainFaucetDripInterval = stdJson.readUint(_json, "$.largeOpChainFaucetDripInterval");
opChainAdminWalletDripValue = stdJson.readUint(_json, "$.opChainAdminWalletDripValue");
opChainAdminWalletDripInterval = stdJson.readUint(_json, "$.opChainAdminWalletDripInterval");
largeFaucetsL1BridgeAddresses[0] = opL1BridgeAddress;
largeFaucetsL1BridgeAddresses[1] = baseL1BridgeAddress;
smallFaucetsL1BridgeAddresses[0] = zoraL1BridgeAddress;
smallFaucetsL1BridgeAddresses[1] = pgnL1BridgeAddress;
smallFaucetsL1BridgeAddresses[2] = orderlyL1BridgeAddress;
smallFaucetsL1BridgeAddresses[3] = modeL1BridgeAddress;
smallFaucetsL1BridgeAddresses[4] = lyraL1BridgeAddress;
}
function getSmallFaucetsL1BridgeAddressesCount() public view returns (uint256 count) {
return smallFaucetsL1BridgeAddresses.length;
}
function getLargeFaucetsL1BridgeAddressesCount() public view returns (uint256 count) {
return largeFaucetsL1BridgeAddresses.length;
} }
} }
#!/usr/bin/env bash
# This script is used to generate the getting-started.json configuration file
# used in the Getting Started quickstart guide on the docs site. Avoids the
# need to have the getting-started.json committed to the repo since it's an
# invalid JSON file when not filled in, which is annoying.
reqenv() {
if [ -z "${!1}" ]; then
echo "Error: environment variable '$1' is undefined"
exit 1
fi
}
# Check required environment variables
reqenv "GS_ADMIN_ADDRESS"
reqenv "GS_BATCHER_ADDRESS"
reqenv "GS_PROPOSER_ADDRESS"
reqenv "GS_SEQUENCER_ADDRESS"
reqenv "L1_RPC_URL"
# Get the finalized block timestamp and hash
block=$(cast block finalized --rpc-url $L1_RPC_URL)
timestamp=$(echo "$block" | awk '/timestamp/ { print $2 }')
blockhash=$(echo "$block" | awk '/hash/ { print $2 }')
# Generate the config file
config=$(cat << EOL
{ {
"finalSystemOwner": "ADMIN", "finalSystemOwner": "$GS_ADMIN_ADDRESS",
"portalGuardian": "ADMIN", "portalGuardian": "$GS_ADMIN_ADDRESS",
"l1StartingBlockTag": "BLOCKHASH", "l1StartingBlockTag": "$blockhash",
"l1ChainID": 5, "l1ChainID": 11155111,
"l2ChainID": 42069, "l2ChainID": 42069,
"l2BlockTime": 2, "l2BlockTime": 2,
"l1BlockTime": 12, "l1BlockTime": 12,
...@@ -13,23 +41,23 @@ ...@@ -13,23 +41,23 @@
"sequencerWindowSize": 3600, "sequencerWindowSize": 3600,
"channelTimeout": 300, "channelTimeout": 300,
"p2pSequencerAddress": "SEQUENCER", "p2pSequencerAddress": "$GS_SEQUENCER_ADDRESS",
"batchInboxAddress": "0xff00000000000000000000000000000000042069", "batchInboxAddress": "0xff00000000000000000000000000000000042069",
"batchSenderAddress": "BATCHER", "batchSenderAddress": "$GS_BATCHER_ADDRESS",
"l2OutputOracleSubmissionInterval": 120, "l2OutputOracleSubmissionInterval": 120,
"l2OutputOracleStartingBlockNumber": 0, "l2OutputOracleStartingBlockNumber": 0,
"l2OutputOracleStartingTimestamp": TIMESTAMP, "l2OutputOracleStartingTimestamp": $timestamp,
"l2OutputOracleProposer": "PROPOSER", "l2OutputOracleProposer": "$GS_PROPOSER_ADDRESS",
"l2OutputOracleChallenger": "ADMIN", "l2OutputOracleChallenger": "$GS_ADMIN_ADDRESS",
"finalizationPeriodSeconds": 12, "finalizationPeriodSeconds": 12,
"proxyAdminOwner": "ADMIN", "proxyAdminOwner": "$GS_ADMIN_ADDRESS",
"baseFeeVaultRecipient": "ADMIN", "baseFeeVaultRecipient": "$GS_ADMIN_ADDRESS",
"l1FeeVaultRecipient": "ADMIN", "l1FeeVaultRecipient": "$GS_ADMIN_ADDRESS",
"sequencerFeeVaultRecipient": "ADMIN", "sequencerFeeVaultRecipient": "$GS_ADMIN_ADDRESS",
"baseFeeVaultMinimumWithdrawalAmount": "0x8ac7230489e80000", "baseFeeVaultMinimumWithdrawalAmount": "0x8ac7230489e80000",
"l1FeeVaultMinimumWithdrawalAmount": "0x8ac7230489e80000", "l1FeeVaultMinimumWithdrawalAmount": "0x8ac7230489e80000",
...@@ -44,7 +72,7 @@ ...@@ -44,7 +72,7 @@
"enableGovernance": true, "enableGovernance": true,
"governanceTokenSymbol": "OP", "governanceTokenSymbol": "OP",
"governanceTokenName": "Optimism", "governanceTokenName": "Optimism",
"governanceTokenOwner": "ADMIN", "governanceTokenOwner": "$GS_ADMIN_ADDRESS",
"l2GenesisBlockGasLimit": "0x1c9c380", "l2GenesisBlockGasLimit": "0x1c9c380",
"l2GenesisBlockBaseFeePerGas": "0x3b9aca00", "l2GenesisBlockBaseFeePerGas": "0x3b9aca00",
...@@ -59,3 +87,8 @@ ...@@ -59,3 +87,8 @@
"requiredProtocolVersion": "0x0000000000000000000000000000000000000000000000000000000000000000", "requiredProtocolVersion": "0x0000000000000000000000000000000000000000000000000000000000000000",
"recommendedProtocolVersion": "0x0000000000000000000000000000000000000000000000000000000000000000" "recommendedProtocolVersion": "0x0000000000000000000000000000000000000000000000000000000000000000"
} }
EOL
)
# Write the config file
echo "$config" > deploy-config/getting-started.json
#!/usr/bin/env bash
# This script prints out the versions of the various tools used in the Getting
# Started quickstart guide on the docs site. Simplifies things for users so
# they can easily see if they're using the right versions of everything.
version() {
local string=$1
local version_regex='([0-9]+(\.[0-9]+)+)'
if [[ $string =~ $version_regex ]]; then
echo "${BASH_REMATCH[1]}"
else
echo "No version found."
fi
}
# Grab versions
ver_git=$(version "$(git --version)")
ver_go=$(version "$(go version)")
ver_node=$(version "$(node --version)")
ver_pnpm=$(version "$(pnpm --version)")
ver_foundry=$(version "$(forge --version)")
ver_make=$(version "$(make --version)")
ver_jq=$(version "$(jq --version)")
ver_direnv=$(version "$(direnv --version)")
# Print versions
echo "Dependency | Minimum | Actual"
echo "git 2 $ver_git"
echo "go 1.21 $ver_go"
echo "node 20 $ver_node"
echo "pnpm 8 $ver_pnpm"
echo "foundry 0.2.0 $ver_foundry"
echo "make 3 $ver_make"
echo "jq 1.6 $ver_jq"
echo "direnv 2 $ver_direnv"
#!/usr/bin/env bash
# This script is used to generate the four wallets that are used in the Getting
# Started quickstart guide on the docs site. Simplifies things for users
# slightly while also avoiding the need for users to manually copy/paste a
# bunch of stuff over to the environment file.
# Generate wallets
wallet1=$(cast wallet new)
wallet2=$(cast wallet new)
wallet3=$(cast wallet new)
wallet4=$(cast wallet new)
# Grab wallet addresses
address1=$(echo "$wallet1" | awk '/Address/ { print $2 }')
address2=$(echo "$wallet2" | awk '/Address/ { print $2 }')
address3=$(echo "$wallet3" | awk '/Address/ { print $2 }')
address4=$(echo "$wallet4" | awk '/Address/ { print $2 }')
# Grab wallet private keys
key1=$(echo "$wallet1" | awk '/Private key/ { print $3 }')
key2=$(echo "$wallet2" | awk '/Private key/ { print $3 }')
key3=$(echo "$wallet3" | awk '/Private key/ { print $3 }')
key4=$(echo "$wallet4" | awk '/Private key/ { print $3 }')
# Print out the environment variables to copy
echo "Copy the following into your .envrc file:"
echo
echo "# Admin account"
echo "export GS_ADMIN_ADDRESS=$address1"
echo "export GS_ADMIN_PRIVATE_KEY=$key1"
echo
echo "# Batcher account"
echo "export GS_BATCHER_ADDRESS=$address2"
echo "export GS_BATCHER_PRIVATE_KEY=$key2"
echo
echo "# Proposer account"
echo "export GS_PROPOSER_ADDRESS=$address3"
echo "export GS_PROPOSER_PRIVATE_KEY=$key3"
echo
echo "# Sequencer account"
echo "export GS_SEQUENCER_ADDRESS=$address4"
echo "export GS_SEQUENCER_PRIVATE_KEY=$key4"
{ {
"src/EAS/EAS.sol": "0xe5e9700b94a88a2e1baabe4bfa66d5c1c94e811bcc5d2c64526afc649df59118", "src/EAS/EAS.sol": "0x850a0eb089d5a01f489c7239f5b9a1b09120afb1bc80239268215c2dfe1de26c",
"src/EAS/SchemaRegistry.sol": "0x5ee1a0c3b2bf1eb5edb53fb0967cf13856be546f0f16fe7acdc3e4f286db6831", "src/EAS/SchemaRegistry.sol": "0x5ee1a0c3b2bf1eb5edb53fb0967cf13856be546f0f16fe7acdc3e4f286db6831",
"src/L1/DelayedVetoable.sol": "0x276c6276292095e6aa37a70008cf4e0d1cbcc020dbc9107459bbc72ab5ed744f", "src/L1/DelayedVetoable.sol": "0x276c6276292095e6aa37a70008cf4e0d1cbcc020dbc9107459bbc72ab5ed744f",
"src/L1/L1CrossDomainMessenger.sol": "0x2aa4e06827bc48484212eb2bdc30fd604ffd23b37e401b78ef428c12fa9b8385", "src/L1/L1CrossDomainMessenger.sol": "0x2aa4e06827bc48484212eb2bdc30fd604ffd23b37e401b78ef428c12fa9b8385",
......
...@@ -80,11 +80,11 @@ contract EAS is IEAS, ISemver, EIP1271Verifier { ...@@ -80,11 +80,11 @@ contract EAS is IEAS, ISemver, EIP1271Verifier {
uint256[MAX_GAP - 3] private __gap; uint256[MAX_GAP - 3] private __gap;
/// @notice Semantic version. /// @notice Semantic version.
/// @custom:semver 1.3.0 /// @custom:semver 1.4.0
string public constant version = "1.3.0"; string public constant version = "1.4.0";
/// @dev Creates a new EAS instance. /// @dev Creates a new EAS instance.
constructor() EIP1271Verifier("EAS", "1.2.0") { } constructor() EIP1271Verifier("EAS", "1.3.0") { }
/// @inheritdoc IEAS /// @inheritdoc IEAS
function getSchemaRegistry() external pure returns (ISchemaRegistry) { function getSchemaRegistry() external pure returns (ISchemaRegistry) {
......
...@@ -30,13 +30,13 @@ abstract contract EIP1271Verifier is EIP712 { ...@@ -30,13 +30,13 @@ abstract contract EIP1271Verifier is EIP712 {
error InvalidNonce(); error InvalidNonce();
// The hash of the data type used to relay calls to the attest function. It's the value of // The hash of the data type used to relay calls to the attest function. It's the value of
// keccak256("Attest(bytes32 schema,address recipient,uint64 expirationTime,bool revocable,bytes32 refUID,bytes // keccak256("Attest(address attester,bytes32 schema,address recipient,uint64 expirationTime,bool revocable,bytes32
// data,uint256 value,uint256 nonce,uint64 deadline)"). // refUID,bytes data,uint256 value,uint256 nonce,uint64 deadline)").
bytes32 private constant ATTEST_TYPEHASH = 0xf83bb2b0ede93a840239f7e701a54d9bc35f03701f51ae153d601c6947ff3d3f; bytes32 private constant ATTEST_TYPEHASH = 0xfeb2925a02bae3dae48d424a0437a2b6ac939aa9230ddc55a1a76f065d988076;
// The hash of the data type used to relay calls to the revoke function. It's the value of // The hash of the data type used to relay calls to the revoke function. It's the value of
// keccak256("Revoke(bytes32 schema,bytes32 uid,uint256 value,uint256 nonce,uint64 deadline)"). // keccak256("Revoke(address revoker,bytes32 schema,bytes32 uid,uint256 value,uint256 nonce,uint64 deadline)").
bytes32 private constant REVOKE_TYPEHASH = 0x2d4116d8c9824e4c316453e5c2843a1885580374159ce8768603c49085ef424c; bytes32 private constant REVOKE_TYPEHASH = 0xb5d556f07587ec0f08cf386545cc4362c702a001650c2058002615ee5c9d1e75;
// The user readable name of the signing domain. // The user readable name of the signing domain.
bytes32 private immutable _name; bytes32 private immutable _name;
...@@ -116,6 +116,7 @@ abstract contract EIP1271Verifier is EIP712 { ...@@ -116,6 +116,7 @@ abstract contract EIP1271Verifier is EIP712 {
keccak256( keccak256(
abi.encode( abi.encode(
ATTEST_TYPEHASH, ATTEST_TYPEHASH,
request.attester,
request.schema, request.schema,
data.recipient, data.recipient,
data.expirationTime, data.expirationTime,
...@@ -150,7 +151,13 @@ abstract contract EIP1271Verifier is EIP712 { ...@@ -150,7 +151,13 @@ abstract contract EIP1271Verifier is EIP712 {
bytes32 hash = _hashTypedDataV4( bytes32 hash = _hashTypedDataV4(
keccak256( keccak256(
abi.encode( abi.encode(
REVOKE_TYPEHASH, request.schema, data.uid, data.value, _nonces[request.revoker]++, request.deadline REVOKE_TYPEHASH,
request.revoker,
request.schema,
data.uid,
data.value,
_nonces[request.revoker]++,
request.deadline
) )
) )
); );
......
...@@ -13,7 +13,7 @@ contract SafeCall_Test is Test { ...@@ -13,7 +13,7 @@ contract SafeCall_Test is Test {
vm.assume(from.balance == 0); vm.assume(from.balance == 0);
vm.assume(to.balance == 0); vm.assume(to.balance == 0);
// no precompiles (mainnet) // no precompiles (mainnet)
assumeNoPrecompiles(to, 1); assumeNotPrecompile(to);
// don't call the vm // don't call the vm
vm.assume(to != address(vm)); vm.assume(to != address(vm));
vm.assume(from != address(vm)); vm.assume(from != address(vm));
...@@ -47,7 +47,7 @@ contract SafeCall_Test is Test { ...@@ -47,7 +47,7 @@ contract SafeCall_Test is Test {
vm.assume(from.balance == 0); vm.assume(from.balance == 0);
vm.assume(to.balance == 0); vm.assume(to.balance == 0);
// no precompiles (mainnet) // no precompiles (mainnet)
assumeNoPrecompiles(to, 1); assumeNotPrecompile(to);
// don't call the vm // don't call the vm
vm.assume(to != address(vm)); vm.assume(to != address(vm));
vm.assume(from != address(vm)); vm.assume(from != address(vm));
...@@ -89,7 +89,7 @@ contract SafeCall_Test is Test { ...@@ -89,7 +89,7 @@ contract SafeCall_Test is Test {
vm.assume(from.balance == 0); vm.assume(from.balance == 0);
vm.assume(to.balance == 0); vm.assume(to.balance == 0);
// no precompiles (mainnet) // no precompiles (mainnet)
assumeNoPrecompiles(to, 1); assumeNotPrecompile(to);
// don't call the vm // don't call the vm
vm.assume(to != address(vm)); vm.assume(to != address(vm));
vm.assume(from != address(vm)); vm.assume(from != address(vm));
......
...@@ -8,11 +8,6 @@ import { ResourceMetering } from "src/L1/ResourceMetering.sol"; ...@@ -8,11 +8,6 @@ import { ResourceMetering } from "src/L1/ResourceMetering.sol";
import { Constants } from "src/libraries/Constants.sol"; import { Constants } from "src/libraries/Constants.sol";
contract SystemConfig_GasLimitLowerBound_Invariant is Test { contract SystemConfig_GasLimitLowerBound_Invariant is Test {
struct FuzzInterface {
address target;
string[] artifacts;
}
SystemConfig public config; SystemConfig public config;
function setUp() external { function setUp() external {
...@@ -59,19 +54,14 @@ contract SystemConfig_GasLimitLowerBound_Invariant is Test { ...@@ -59,19 +54,14 @@ contract SystemConfig_GasLimitLowerBound_Invariant is Test {
selectors[0] = config.setGasLimit.selector; selectors[0] = config.setGasLimit.selector;
FuzzSelector memory selector = FuzzSelector({ addr: address(config), selectors: selectors }); FuzzSelector memory selector = FuzzSelector({ addr: address(config), selectors: selectors });
targetSelector(selector); targetSelector(selector);
}
/// @dev Allows the SystemConfig contract to be the target of the invariant test /// Allows the SystemConfig contract to be the target of the invariant test
/// when it is behind a proxy. Foundry calls this function under the hood to /// when it is behind a proxy. Foundry calls this function under the hood to
/// know the ABI to use when calling the target contract. /// know the ABI to use when calling the target contract.
function targetInterfaces() public view returns (FuzzInterface[] memory) {
require(address(config) != address(0), "SystemConfig not initialized");
FuzzInterface[] memory targets = new FuzzInterface[](1);
string[] memory artifacts = new string[](1); string[] memory artifacts = new string[](1);
artifacts[0] = "SystemConfig"; artifacts[0] = "SystemConfig";
targets[0] = FuzzInterface(address(config), artifacts); FuzzInterface memory target = FuzzInterface(address(config), artifacts);
return targets; targetInterface(target);
} }
/// @custom:invariant The gas limit of the `SystemConfig` contract can never be lower /// @custom:invariant The gas limit of the `SystemConfig` contract can never be lower
......
...@@ -82,6 +82,6 @@ ...@@ -82,6 +82,6 @@
"change-case": "4.1.2", "change-case": "4.1.2",
"react": "^18.2.0", "react": "^18.2.0",
"react-dom": "^18.2.0", "react-dom": "^18.2.0",
"viem": "^1.18.4" "viem": "^1.18.6"
} }
} }
...@@ -44,7 +44,7 @@ ...@@ -44,7 +44,7 @@
"jsdom": "^22.1.0", "jsdom": "^22.1.0",
"tsup": "^7.2.0", "tsup": "^7.2.0",
"typescript": "^5.2.2", "typescript": "^5.2.2",
"viem": "^1.18.4", "viem": "^1.18.6",
"vite": "^4.5.0", "vite": "^4.5.0",
"vitest": "^0.34.2" "vitest": "^0.34.2"
}, },
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment