Commit 580d6bff authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge branch 'develop' into op-node-rpc-checks-errs

parents e9303108 b4243cc6
...@@ -880,7 +880,7 @@ jobs: ...@@ -880,7 +880,7 @@ jobs:
patterns: indexer patterns: indexer
- run: - run:
name: Lint name: Lint
command: golangci-lint run -E goimports,sqlclosecheck,bodyclose,asciicheck,misspell,errorlint --timeout 2m -e "errors.As" -e "errors.Is" ./... command: golangci-lint run -E goimports,sqlclosecheck,bodyclose,asciicheck,misspell,errorlint --timeout 4m -e "errors.As" -e "errors.Is" ./...
working_directory: indexer working_directory: indexer
- run: - run:
name: install geth name: install geth
...@@ -1053,59 +1053,6 @@ jobs: ...@@ -1053,59 +1053,6 @@ jobs:
name: "Go mod tidy" name: "Go mod tidy"
command: make mod-tidy && git diff --exit-code command: make mod-tidy && git diff --exit-code
hive-test:
parameters:
version:
type: string
default: develop
sim:
type: string
machine:
image: ubuntu-2204:2022.10.2
docker_layer_caching: true
resource_class: large
steps:
- attach_workspace:
at: /tmp/docker_images
- run:
name: Docker Load
command: |
docker load -i /tmp/docker_images/op-batcher_<<parameters.version>>.tar
docker load -i /tmp/docker_images/op-proposer_<<parameters.version>>.tar
docker load -i /tmp/docker_images/op-node_<<parameters.version>>.tar
- run:
command: git clone https://github.com/ethereum-optimism/hive.git .
- go/load-cache
- go/mod-download
- go/save-cache
- run: { command: "go build ." }
- run: { command: "go build junit/junitformatter.go" }
- run:
command: |
./hive \
-sim=<<parameters.sim>> \
-sim.loglevel=5 \
-client=go-ethereum_v1.11.6,op-geth_optimism,op-proposer_<<parameters.version>>,op-batcher_<<parameters.version>>,op-node_<<parameters.version>> |& tee /tmp/hive.log
- run:
command: |
tar -cvf /tmp/workspace.tgz -C /home/circleci/project /home/circleci/project/workspace
name: "Archive workspace"
when: always
- run:
command: |
./junitformatter /home/circleci/project/workspace/logs/*.json > /home/circleci/project/workspace/logs/junit.xml
when: always
- store_artifacts:
path: /tmp/workspace.tgz
destination: hive-workspace.tgz
when: always
- store_test_results:
path: /home/circleci/project/workspace/logs/junit.xml
when: always
- store_artifacts:
path: /home/circleci/project/workspace/logs/junit.xml
when: always
bedrock-go-tests: bedrock-go-tests:
docker: docker:
- image: cimg/go:1.20 - image: cimg/go:1.20
...@@ -1423,33 +1370,6 @@ workflows: ...@@ -1423,33 +1370,6 @@ workflows:
docker_target: wd-mon docker_target: wd-mon
context: context:
- oplabs-gcr - oplabs-gcr
- hive-test:
name: hive-test-rpc
version: <<pipeline.git.revision>>
sim: optimism/rpc
requires:
- op-node-docker-build
- op-batcher-docker-build
- op-proposer-docker-build
- op-challenger-docker-build
- hive-test:
name: hive-test-p2p
version: <<pipeline.git.revision>>
sim: optimism/p2p
requires:
- op-node-docker-build
- op-batcher-docker-build
- op-proposer-docker-build
- op-challenger-docker-build
- hive-test:
name: hive-test-l1ops
version: <<pipeline.git.revision>>
sim: optimism/l1ops
requires:
- op-node-docker-build
- op-batcher-docker-build
- op-proposer-docker-build
- op-challenger-docker-build
- check-generated-mocks-op-node - check-generated-mocks-op-node
- check-generated-mocks-op-service - check-generated-mocks-op-service
- cannon-go-lint-and-test - cannon-go-lint-and-test
......
...@@ -16,7 +16,7 @@ require ( ...@@ -16,7 +16,7 @@ require (
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb
github.com/google/go-cmp v0.5.9 github.com/google/go-cmp v0.5.9
github.com/google/gofuzz v1.2.1-0.20220503160820-4a35382e8fc8 github.com/google/gofuzz v1.2.1-0.20220503160820-4a35382e8fc8
github.com/google/uuid v1.3.0 github.com/google/uuid v1.3.1
github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/go-multierror v1.1.1
github.com/hashicorp/golang-lru/v2 v2.0.2 github.com/hashicorp/golang-lru/v2 v2.0.2
github.com/holiman/uint256 v1.2.3 github.com/holiman/uint256 v1.2.3
......
...@@ -318,8 +318,8 @@ github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm4 ...@@ -318,8 +318,8 @@ github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm4
github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
### Setup env ### Setup env
The `indexer.toml` stores a set of preset environmental variables that can be used to run the indexer with the exception of the network specific `l1-rpc` and `l2-rpc` variables. The `indexer.toml` file can be ran as a default config, otherwise a custom `.toml` config can provided via the `--config` flag when running the application. Additionally, L1 system contract addresses must provided for the specific OP Stack network actively being indexed. Currently the indexer has no way to infer L1 system config addresses provided a L2 chain ID or network enum. The `indexer.toml` stores a set of preset environmental variables that can be used to run the indexer with the exception of the network specific `l1-rpc` and `l2-rpc` variables. The `indexer.toml` file can be ran as a default config, otherwise a custom `.toml` config can provided via the `--config` flag when running the application. An optional `l1-starting-height` value can be provided to the indexer to specify the L1 starting block height to begin indexing from. This should be ideally be an L1 block that holds a correlated L2 genesis commitment. Furthermore, this value must be less than the current L1 block height to pass validation. If no starting height value is provided and the database is empty, the indexer will begin sequentially processing from L1 genesis.
### Testing ### Testing
All tests can be ran by running `make test` from the `/indexer` directory. This will run all unit and e2e tests. All tests can be ran by running `make test` from the `/indexer` directory. This will run all unit and e2e tests.
......
...@@ -2,7 +2,9 @@ package config ...@@ -2,7 +2,9 @@ package config
import ( import (
"fmt" "fmt"
"math/big"
"os" "os"
"reflect"
"github.com/BurntSushi/toml" "github.com/BurntSushi/toml"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
...@@ -13,11 +15,11 @@ import ( ...@@ -13,11 +15,11 @@ import (
// Config represents the `indexer.toml` file used to configure the indexer // Config represents the `indexer.toml` file used to configure the indexer
type Config struct { type Config struct {
Chain ChainConfig Chain ChainConfig `toml:"chain"`
RPCs RPCsConfig `toml:"rpcs"` RPCs RPCsConfig `toml:"rpcs"`
DB DBConfig DB DBConfig `toml:"db"`
API APIConfig API APIConfig `toml:"api"`
Metrics MetricsConfig Metrics MetricsConfig `toml:"metrics"`
} }
// fetch this via onchain config from RPCsConfig and remove from config in future // fetch this via onchain config from RPCsConfig and remove from config in future
...@@ -34,6 +36,26 @@ type L1Contracts struct { ...@@ -34,6 +36,26 @@ type L1Contracts struct {
// Remove afterwards? // Remove afterwards?
} }
// converts struct of to a slice of addresses for easy iteration
// also validates that all fields are addresses
func (c *L1Contracts) AsSlice() ([]common.Address, error) {
clone := *c
contractValue := reflect.ValueOf(clone)
fields := reflect.VisibleFields(reflect.TypeOf(clone))
l1Contracts := make([]common.Address, len(fields))
for i, field := range fields {
// ruleid: unsafe-reflect-by-name
addr, ok := (contractValue.FieldByName(field.Name).Interface()).(common.Address)
if !ok {
return nil, fmt.Errorf("non-address found in L1Contracts: %s", field.Name)
}
l1Contracts[i] = addr
}
return l1Contracts, nil
}
// ChainConfig configures of the chain being indexed // ChainConfig configures of the chain being indexed
type ChainConfig struct { type ChainConfig struct {
// Configure known chains with the l2 chain id // Configure known chains with the l2 chain id
...@@ -41,8 +63,11 @@ type ChainConfig struct { ...@@ -41,8 +63,11 @@ type ChainConfig struct {
Preset int Preset int
L1Contracts L1Contracts `toml:"l1-contracts"` L1Contracts L1Contracts `toml:"l1-contracts"`
// L1StartingHeight is the block height to start indexing from // L1StartingHeight is the block height to start indexing from
// NOTE - This is currently unimplemented L1StartingHeight uint `toml:"l1-starting-height"`
L1StartingHeight int }
func (cc *ChainConfig) L1StartHeight() *big.Int {
return big.NewInt(int64(cc.L1StartingHeight))
} }
// RPCsConfig configures the RPC urls // RPCsConfig configures the RPC urls
...@@ -53,23 +78,23 @@ type RPCsConfig struct { ...@@ -53,23 +78,23 @@ type RPCsConfig struct {
// DBConfig configures the postgres database // DBConfig configures the postgres database
type DBConfig struct { type DBConfig struct {
Host string Host string `toml:"host"`
Port int Port int `toml:"port"`
Name string Name string `toml:"name"`
User string User string `toml:"user"`
Password string Password string `toml:"password"`
} }
// APIConfig configures the API server // APIConfig configures the API server
type APIConfig struct { type APIConfig struct {
Host string Host string `toml:"host"`
Port int Port int `toml:"port"`
} }
// MetricsConfig configures the metrics server // MetricsConfig configures the metrics server
type MetricsConfig struct { type MetricsConfig struct {
Host string Host string `toml:"host"`
Port int Port int `toml:"port"`
} }
// LoadConfig loads the `indexer.toml` config file from a given path // LoadConfig loads the `indexer.toml` config file from a given path
......
...@@ -139,3 +139,23 @@ func TestLoadConfig_WithUnknownPreset(t *testing.T) { ...@@ -139,3 +139,23 @@ func TestLoadConfig_WithUnknownPreset(t *testing.T) {
require.Error(t, err) require.Error(t, err)
require.Equal(t, fmt.Sprintf("unknown preset: %d", faultyPreset), err.Error()) require.Equal(t, fmt.Sprintf("unknown preset: %d", faultyPreset), err.Error())
} }
func Test_AsSliceSuccess(t *testing.T) {
// error cases are intentionally ignored for testing since they can only be
// generated when the L1Contracts struct is developer modified to hold a non-address var field
testCfg := &L1Contracts{
OptimismPortalProxy: common.HexToAddress("0x4205Fc579115071764c7423A4f12eDde41f106Ed"),
L2OutputOracleProxy: common.HexToAddress("0x42097868233d1aa22e815a266982f2cf17685a27"),
L1CrossDomainMessengerProxy: common.HexToAddress("0x420ce71c97B33Cc4729CF772ae268934F7ab5fA1"),
L1StandardBridgeProxy: common.HexToAddress("0x4209fc46f92E8a1c0deC1b1747d010903E884bE1"),
}
slice, err := testCfg.AsSlice()
require.NoError(t, err)
require.Equal(t, len(slice), 4)
require.Equal(t, slice[0].String(), testCfg.OptimismPortalProxy.String())
require.Equal(t, slice[1].String(), testCfg.L2OutputOracleProxy.String())
require.Equal(t, slice[2].String(), testCfg.L1CrossDomainMessengerProxy.String())
require.Equal(t, slice[3].String(), testCfg.L1StandardBridgeProxy.String())
}
...@@ -18,8 +18,8 @@ import ( ...@@ -18,8 +18,8 @@ import (
*/ */
type BlockHeader struct { type BlockHeader struct {
Hash common.Hash `gorm:"primaryKey;serializer:json"` Hash common.Hash `gorm:"primaryKey;serializer:bytes"`
ParentHash common.Hash `gorm:"serializer:json"` ParentHash common.Hash `gorm:"serializer:bytes"`
Number U256 Number U256
Timestamp uint64 Timestamp uint64
...@@ -50,14 +50,14 @@ type LegacyStateBatch struct { ...@@ -50,14 +50,14 @@ type LegacyStateBatch struct {
// violating the primary key constraint. // violating the primary key constraint.
Index uint64 `gorm:"primaryKey;default:0"` Index uint64 `gorm:"primaryKey;default:0"`
Root common.Hash `gorm:"serializer:json"` Root common.Hash `gorm:"serializer:bytes"`
Size uint64 Size uint64
PrevTotal uint64 PrevTotal uint64
L1ContractEventGUID uuid.UUID L1ContractEventGUID uuid.UUID
} }
type OutputProposal struct { type OutputProposal struct {
OutputRoot common.Hash `gorm:"primaryKey;serializer:json"` OutputRoot common.Hash `gorm:"primaryKey;serializer:bytes"`
L2OutputIndex U256 L2OutputIndex U256
L2BlockNumber U256 L2BlockNumber U256
......
...@@ -16,7 +16,7 @@ import ( ...@@ -16,7 +16,7 @@ import (
*/ */
type BridgeMessage struct { type BridgeMessage struct {
MessageHash common.Hash `gorm:"primaryKey;serializer:json"` MessageHash common.Hash `gorm:"primaryKey;serializer:bytes"`
Nonce U256 Nonce U256
SentMessageEventGUID uuid.UUID SentMessageEventGUID uuid.UUID
...@@ -28,12 +28,12 @@ type BridgeMessage struct { ...@@ -28,12 +28,12 @@ type BridgeMessage struct {
type L1BridgeMessage struct { type L1BridgeMessage struct {
BridgeMessage `gorm:"embedded"` BridgeMessage `gorm:"embedded"`
TransactionSourceHash common.Hash `gorm:"serializer:json"` TransactionSourceHash common.Hash `gorm:"serializer:bytes"`
} }
type L2BridgeMessage struct { type L2BridgeMessage struct {
BridgeMessage `gorm:"embedded"` BridgeMessage `gorm:"embedded"`
TransactionWithdrawalHash common.Hash `gorm:"serializer:json"` TransactionWithdrawalHash common.Hash `gorm:"serializer:bytes"`
} }
type BridgeMessagesView interface { type BridgeMessagesView interface {
......
...@@ -8,7 +8,6 @@ import ( ...@@ -8,7 +8,6 @@ import (
"gorm.io/gorm" "gorm.io/gorm"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
) )
/** /**
...@@ -16,16 +15,16 @@ import ( ...@@ -16,16 +15,16 @@ import (
*/ */
type Transaction struct { type Transaction struct {
FromAddress common.Address `gorm:"serializer:json"` FromAddress common.Address `gorm:"serializer:bytes"`
ToAddress common.Address `gorm:"serializer:json"` ToAddress common.Address `gorm:"serializer:bytes"`
Amount U256 Amount U256
Data hexutil.Bytes `gorm:"serializer:json"` Data Bytes `gorm:"serializer:bytes"`
Timestamp uint64 Timestamp uint64
} }
type L1TransactionDeposit struct { type L1TransactionDeposit struct {
SourceHash common.Hash `gorm:"serializer:json;primaryKey"` SourceHash common.Hash `gorm:"serializer:bytes;primaryKey"`
L2TransactionHash common.Hash `gorm:"serializer:json"` L2TransactionHash common.Hash `gorm:"serializer:bytes"`
InitiatedL1EventGUID uuid.UUID InitiatedL1EventGUID uuid.UUID
Tx Transaction `gorm:"embedded"` Tx Transaction `gorm:"embedded"`
...@@ -33,7 +32,7 @@ type L1TransactionDeposit struct { ...@@ -33,7 +32,7 @@ type L1TransactionDeposit struct {
} }
type L2TransactionWithdrawal struct { type L2TransactionWithdrawal struct {
WithdrawalHash common.Hash `gorm:"serializer:json;primaryKey"` WithdrawalHash common.Hash `gorm:"serializer:bytes;primaryKey"`
Nonce U256 Nonce U256
InitiatedL2EventGUID uuid.UUID InitiatedL2EventGUID uuid.UUID
......
...@@ -19,12 +19,12 @@ var ( ...@@ -19,12 +19,12 @@ var (
*/ */
type TokenPair struct { type TokenPair struct {
LocalTokenAddress common.Address `gorm:"serializer:json"` LocalTokenAddress common.Address `gorm:"serializer:bytes"`
RemoteTokenAddress common.Address `gorm:"serializer:json"` RemoteTokenAddress common.Address `gorm:"serializer:bytes"`
} }
type BridgeTransfer struct { type BridgeTransfer struct {
CrossDomainMessageHash *common.Hash `gorm:"serializer:json"` CrossDomainMessageHash *common.Hash `gorm:"serializer:bytes"`
Tx Transaction `gorm:"embedded"` Tx Transaction `gorm:"embedded"`
TokenPair TokenPair `gorm:"embedded"` TokenPair TokenPair `gorm:"embedded"`
...@@ -32,27 +32,27 @@ type BridgeTransfer struct { ...@@ -32,27 +32,27 @@ type BridgeTransfer struct {
type L1BridgeDeposit struct { type L1BridgeDeposit struct {
BridgeTransfer `gorm:"embedded"` BridgeTransfer `gorm:"embedded"`
TransactionSourceHash common.Hash `gorm:"primaryKey;serializer:json"` TransactionSourceHash common.Hash `gorm:"primaryKey;serializer:bytes"`
} }
type L1BridgeDepositWithTransactionHashes struct { type L1BridgeDepositWithTransactionHashes struct {
L1BridgeDeposit L1BridgeDeposit `gorm:"embedded"` L1BridgeDeposit L1BridgeDeposit `gorm:"embedded"`
L1TransactionHash common.Hash `gorm:"serializer:json"` L1TransactionHash common.Hash `gorm:"serializer:bytes"`
L2TransactionHash common.Hash `gorm:"serializer:json"` L2TransactionHash common.Hash `gorm:"serializer:bytes"`
} }
type L2BridgeWithdrawal struct { type L2BridgeWithdrawal struct {
BridgeTransfer `gorm:"embedded"` BridgeTransfer `gorm:"embedded"`
TransactionWithdrawalHash common.Hash `gorm:"primaryKey;serializer:json"` TransactionWithdrawalHash common.Hash `gorm:"primaryKey;serializer:bytes"`
} }
type L2BridgeWithdrawalWithTransactionHashes struct { type L2BridgeWithdrawalWithTransactionHashes struct {
L2BridgeWithdrawal L2BridgeWithdrawal `gorm:"embedded"` L2BridgeWithdrawal L2BridgeWithdrawal `gorm:"embedded"`
L2TransactionHash common.Hash `gorm:"serializer:json"` L2TransactionHash common.Hash `gorm:"serializer:bytes"`
ProvenL1TransactionHash common.Hash `gorm:"serializer:json"` ProvenL1TransactionHash common.Hash `gorm:"serializer:bytes"`
FinalizedL1TransactionHash common.Hash `gorm:"serializer:json"` FinalizedL1TransactionHash common.Hash `gorm:"serializer:bytes"`
} }
type BridgeTransfersView interface { type BridgeTransfersView interface {
......
...@@ -21,12 +21,12 @@ type ContractEvent struct { ...@@ -21,12 +21,12 @@ type ContractEvent struct {
GUID uuid.UUID `gorm:"primaryKey"` GUID uuid.UUID `gorm:"primaryKey"`
// Some useful derived fields // Some useful derived fields
BlockHash common.Hash `gorm:"serializer:json"` BlockHash common.Hash `gorm:"serializer:bytes"`
ContractAddress common.Address `gorm:"serializer:json"` ContractAddress common.Address `gorm:"serializer:bytes"`
TransactionHash common.Hash `gorm:"serializer:json"` TransactionHash common.Hash `gorm:"serializer:bytes"`
LogIndex uint64 LogIndex uint64
EventSignature common.Hash `gorm:"serializer:json"` EventSignature common.Hash `gorm:"serializer:bytes"`
Timestamp uint64 Timestamp uint64
// NOTE: NOT ALL THE DERIVED FIELDS ON `types.Log` ARE // NOTE: NOT ALL THE DERIVED FIELDS ON `types.Log` ARE
......
...@@ -5,6 +5,8 @@ import ( ...@@ -5,6 +5,8 @@ import (
"fmt" "fmt"
"github.com/ethereum-optimism/optimism/indexer/config" "github.com/ethereum-optimism/optimism/indexer/config"
_ "github.com/ethereum-optimism/optimism/indexer/database/serializers"
"gorm.io/driver/postgres" "gorm.io/driver/postgres"
"gorm.io/gorm" "gorm.io/gorm"
"gorm.io/gorm/logger" "gorm.io/gorm/logger"
......
package database
import (
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/stretchr/testify/mock"
)
type MockBlocksView struct {
mock.Mock
}
func (m *MockBlocksView) L1BlockHeader(common.Hash) (*L1BlockHeader, error) {
args := m.Called()
header, ok := args.Get(0).(*L1BlockHeader)
if !ok {
header = nil
}
return header, args.Error(1)
}
func (m *MockBlocksView) L1BlockHeaderWithFilter(BlockHeader) (*L1BlockHeader, error) {
args := m.Called()
return args.Get(0).(*L1BlockHeader), args.Error(1)
}
func (m *MockBlocksView) L1LatestBlockHeader() (*L1BlockHeader, error) {
args := m.Called()
header, ok := args.Get(0).(*L1BlockHeader)
if !ok {
header = nil
}
return header, args.Error(1)
}
func (m *MockBlocksView) L2BlockHeader(common.Hash) (*L2BlockHeader, error) {
args := m.Called()
return args.Get(0).(*L2BlockHeader), args.Error(1)
}
func (m *MockBlocksView) L2BlockHeaderWithFilter(BlockHeader) (*L2BlockHeader, error) {
args := m.Called()
return args.Get(0).(*L2BlockHeader), args.Error(1)
}
func (m *MockBlocksView) L2LatestBlockHeader() (*L2BlockHeader, error) {
args := m.Called()
return args.Get(0).(*L2BlockHeader), args.Error(1)
}
func (m *MockBlocksView) LatestCheckpointedOutput() (*OutputProposal, error) {
args := m.Called()
return args.Get(0).(*OutputProposal), args.Error(1)
}
func (m *MockBlocksView) OutputProposal(index *big.Int) (*OutputProposal, error) {
args := m.Called()
return args.Get(0).(*OutputProposal), args.Error(1)
}
func (m *MockBlocksView) LatestEpoch() (*Epoch, error) {
args := m.Called()
return args.Get(0).(*Epoch), args.Error(1)
}
type MockBlocksDB struct {
MockBlocksView
}
func (m *MockBlocksDB) StoreL1BlockHeaders(headers []L1BlockHeader) error {
args := m.Called(headers)
return args.Error(1)
}
func (m *MockBlocksDB) StoreL2BlockHeaders(headers []L2BlockHeader) error {
args := m.Called(headers)
return args.Error(1)
}
func (m *MockBlocksDB) StoreLegacyStateBatches(headers []LegacyStateBatch) error {
args := m.Called(headers)
return args.Error(1)
}
func (m *MockBlocksDB) StoreOutputProposals(headers []OutputProposal) error {
args := m.Called(headers)
return args.Error(1)
}
// MockDB is a mock database that can be used for testing
type MockDB struct {
MockBlocks *MockBlocksDB
DB *DB
}
func NewMockDB() *MockDB {
// This is currently just mocking the BlocksDB interface
// but can be expanded to mock other inner DB interfaces
// as well
mockBlocks := new(MockBlocksDB)
db := &DB{Blocks: mockBlocks}
return &MockDB{MockBlocks: mockBlocks, DB: db}
}
package serializers
import (
"context"
"fmt"
"reflect"
"github.com/ethereum/go-ethereum/common/hexutil"
"gorm.io/gorm/schema"
)
type BytesSerializer struct{}
type BytesInterface interface{ Bytes() []byte }
type SetBytesInterface interface{ SetBytes([]byte) }
func init() {
schema.RegisterSerializer("bytes", BytesSerializer{})
}
func (BytesSerializer) Scan(ctx context.Context, field *schema.Field, dst reflect.Value, dbValue interface{}) error {
if dbValue == nil {
return nil
}
hexStr, ok := dbValue.(string)
if !ok {
return fmt.Errorf("expected hex string as the database value: %T", dbValue)
}
b, err := hexutil.Decode(hexStr)
if err != nil {
return fmt.Errorf("failed to decode database value: %w", err)
}
fieldValue := reflect.New(field.FieldType)
fieldInterface := fieldValue.Interface()
// Detect if we're deserializing into a pointer. If so, we'll need to
// also allocate memory to where the allocated pointer should point to
if field.FieldType.Kind() == reflect.Pointer {
nestedField := fieldValue.Elem()
if nestedField.Elem().Kind() == reflect.Pointer {
return fmt.Errorf("double pointers are the max depth supported: %T", fieldValue)
}
// We'll want to call `SetBytes` on the pointer to
// the allocated memory and not the double pointer
nestedField.Set(reflect.New(field.FieldType.Elem()))
fieldInterface = nestedField.Interface()
}
fieldSetBytes, ok := fieldInterface.(SetBytesInterface)
if !ok {
return fmt.Errorf("field does not satisfy the `SetBytes([]byte)` interface: %T", fieldInterface)
}
fieldSetBytes.SetBytes(b)
field.ReflectValueOf(ctx, dst).Set(fieldValue.Elem())
return nil
}
func (BytesSerializer) Value(ctx context.Context, field *schema.Field, dst reflect.Value, fieldValue interface{}) (interface{}, error) {
if fieldValue == nil || (field.FieldType.Kind() == reflect.Pointer && reflect.ValueOf(fieldValue).IsNil()) {
return nil, nil
}
fieldBytes, ok := fieldValue.(BytesInterface)
if !ok {
return nil, fmt.Errorf("field does not satisfy the `Bytes() []byte` interface")
}
hexStr := hexutil.Encode(fieldBytes.Bytes())
return hexStr, nil
}
package database package serializers
import ( import (
"context" "context"
...@@ -13,38 +13,28 @@ import ( ...@@ -13,38 +13,28 @@ import (
type RLPSerializer struct{} type RLPSerializer struct{}
type RLPInterface interface {
rlp.Encoder
rlp.Decoder
}
func init() { func init() {
schema.RegisterSerializer("rlp", RLPSerializer{}) schema.RegisterSerializer("rlp", RLPSerializer{})
} }
func (RLPSerializer) Scan(ctx context.Context, field *schema.Field, dst reflect.Value, dbValue interface{}) error { func (RLPSerializer) Scan(ctx context.Context, field *schema.Field, dst reflect.Value, dbValue interface{}) error {
fieldValue := reflect.New(field.FieldType) if dbValue == nil {
if dbValue != nil { return nil
var bytes []byte }
switch v := dbValue.(type) {
case []byte: hexStr, ok := dbValue.(string)
bytes = v if !ok {
case string: return fmt.Errorf("expected hex string as the database value: %T", dbValue)
b, err := hexutil.Decode(v) }
if err != nil {
return err
}
bytes = b
default:
return fmt.Errorf("unrecognized RLP bytes: %#v", dbValue)
}
if len(bytes) > 0 { b, err := hexutil.Decode(hexStr)
err := rlp.DecodeBytes(bytes, fieldValue.Interface()) if err != nil {
if err != nil { return fmt.Errorf("failed to decode database value: %w", err)
return err }
}
} fieldValue := reflect.New(field.FieldType)
if err := rlp.DecodeBytes(b, fieldValue.Interface()); err != nil {
return fmt.Errorf("failed to decode rlp bytes: %w", err)
} }
field.ReflectValueOf(ctx, dst).Set(fieldValue.Elem()) field.ReflectValueOf(ctx, dst).Set(fieldValue.Elem())
...@@ -52,18 +42,15 @@ func (RLPSerializer) Scan(ctx context.Context, field *schema.Field, dst reflect. ...@@ -52,18 +42,15 @@ func (RLPSerializer) Scan(ctx context.Context, field *schema.Field, dst reflect.
} }
func (RLPSerializer) Value(ctx context.Context, field *schema.Field, dst reflect.Value, fieldValue interface{}) (interface{}, error) { func (RLPSerializer) Value(ctx context.Context, field *schema.Field, dst reflect.Value, fieldValue interface{}) (interface{}, error) {
// Even though rlp.Encode takes an interface and will error out if the passed interface does not if fieldValue == nil || (field.FieldType.Kind() == reflect.Pointer && reflect.ValueOf(fieldValue).IsNil()) {
// satisfy the interface, we check here since we also want to make sure this type satisfies the return nil, nil
// rlp.Decoder interface as well
i := reflect.TypeOf(new(RLPInterface)).Elem()
if !reflect.TypeOf(fieldValue).Implements(i) {
return nil, fmt.Errorf("%T does not satisfy RLP Encoder & Decoder interface", fieldValue)
} }
rlpBytes, err := rlp.EncodeToBytes(fieldValue) rlpBytes, err := rlp.EncodeToBytes(fieldValue)
if err != nil { if err != nil {
return nil, err return nil, fmt.Errorf("failed to encode rlp bytes: %w", err)
} }
return hexutil.Bytes(rlpBytes).MarshalText() hexStr := hexutil.Encode(rlpBytes)
return hexStr, nil
} }
...@@ -93,3 +93,12 @@ func (h *RLPHeader) Header() *types.Header { ...@@ -93,3 +93,12 @@ func (h *RLPHeader) Header() *types.Header {
func (h *RLPHeader) Hash() common.Hash { func (h *RLPHeader) Hash() common.Hash {
return h.Header().Hash() return h.Header().Hash()
} }
type Bytes []byte
func (b Bytes) Bytes() []byte {
return b[:]
}
func (b *Bytes) SetBytes(bytes []byte) {
*b = bytes
}
...@@ -14,6 +14,9 @@ import ( ...@@ -14,6 +14,9 @@ import (
) )
const ( const (
// NOTE - These values can be made configurable to allow for more fine grained control
// Additionally a default interval of 5 seconds may be too slow for reading L2 blocks provided
// the current rate of L2 block production on OP Stack chains (2 seconds per block)
defaultLoopInterval = 5 * time.Second defaultLoopInterval = 5 * time.Second
defaultHeaderBufferSize = 500 defaultHeaderBufferSize = 500
) )
...@@ -90,7 +93,7 @@ func (etl *ETL) Start(ctx context.Context) error { ...@@ -90,7 +93,7 @@ func (etl *ETL) Start(ctx context.Context) error {
for i := range logs { for i := range logs {
if _, ok := headerMap[logs[i].BlockHash]; !ok { if _, ok := headerMap[logs[i].BlockHash]; !ok {
// NOTE. Definitely an error state if the none of the headers were re-orged out in between // NOTE. Definitely an error state if the none of the headers were re-orged out in between
// the blocks and logs retreival operations. However, we need to gracefully handle reorgs // the blocks and logs retrieval operations. However, we need to gracefully handle reorgs
batchLog.Error("log found with block hash not in the batch", "block_hash", logs[i].BlockHash, "log_index", logs[i].Index) batchLog.Error("log found with block hash not in the batch", "block_hash", logs[i].BlockHash, "log_index", logs[i].Index)
return errors.New("parsed log with a block hash not in the fetched batch") return errors.New("parsed log with a block hash not in the fetched batch")
} }
......
...@@ -2,14 +2,13 @@ package etl ...@@ -2,14 +2,13 @@ package etl
import ( import (
"context" "context"
"errors" "fmt"
"reflect" "math/big"
"github.com/ethereum-optimism/optimism/indexer/config" "github.com/ethereum-optimism/optimism/indexer/config"
"github.com/ethereum-optimism/optimism/indexer/database" "github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum-optimism/optimism/indexer/node" "github.com/ethereum-optimism/optimism/indexer/node"
"github.com/ethereum-optimism/optimism/op-service/retry" "github.com/ethereum-optimism/optimism/op-service/retry"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
) )
...@@ -20,43 +19,49 @@ type L1ETL struct { ...@@ -20,43 +19,49 @@ type L1ETL struct {
db *database.DB db *database.DB
} }
func NewL1ETL(log log.Logger, db *database.DB, client node.EthClient, contracts config.L1Contracts) (*L1ETL, error) { // NewL1ETL creates a new L1ETL instance that will start indexing from different starting points
// depending on the state of the database and the supplied start height.
func NewL1ETL(log log.Logger, db *database.DB, client node.EthClient, startHeight *big.Int,
contracts config.L1Contracts) (*L1ETL, error) {
log = log.New("etl", "l1") log = log.New("etl", "l1")
contractValue := reflect.ValueOf(contracts) latestHeader, err := db.Blocks.L1LatestBlockHeader()
fields := reflect.VisibleFields(reflect.TypeOf(contracts)) if err != nil {
l1Contracts := make([]common.Address, len(fields)) return nil, err
for i, field := range fields {
// ruleid: unsafe-reflect-by-name
addr, ok := (contractValue.FieldByName(field.Name).Interface()).(common.Address)
if !ok {
log.Error("non-address found in L1Contracts", "name", field.Name)
return nil, errors.New("non-address found in L1Contracts")
}
log.Info("configured contract", "name", field.Name, "addr", addr)
l1Contracts[i] = addr
} }
latestHeader, err := db.Blocks.L1LatestBlockHeader() cSlice, err := contracts.AsSlice()
if err != nil { if err != nil {
return nil, err return nil, err
} }
// Determine the starting height for traversal
var fromHeader *types.Header var fromHeader *types.Header
if latestHeader != nil { if latestHeader != nil {
log.Info("detected last indexed block", "number", latestHeader.Number.Int, "hash", latestHeader.Hash) log.Info("detected last indexed block", "number", latestHeader.Number.Int, "hash", latestHeader.Hash)
fromHeader = latestHeader.RLPHeader.Header() fromHeader = latestHeader.RLPHeader.Header()
} else if startHeight.BitLen() > 0 {
log.Info("no indexed state in storage, starting from supplied L1 height", "height", startHeight.String())
header, err := client.BlockHeaderByNumber(startHeight)
if err != nil {
return nil, fmt.Errorf("could not fetch starting block header: %w", err)
}
fromHeader = header
} else { } else {
log.Info("no indexed state, starting from genesis") log.Info("no indexed state in storage, starting from L1 genesis")
} }
// NOTE - The use of un-buffered channel here assumes that downstream consumers
// will be able to keep up with the rate of incoming batches
etlBatches := make(chan ETLBatch) etlBatches := make(chan ETLBatch)
etl := ETL{ etl := ETL{
log: log, log: log,
headerTraversal: node.NewHeaderTraversal(client, fromHeader), headerTraversal: node.NewHeaderTraversal(client, fromHeader),
ethClient: client.GethEthClient(), ethClient: client.GethEthClient(),
contracts: l1Contracts, contracts: cSlice,
etlBatches: etlBatches, etlBatches: etlBatches,
} }
......
package etl
import (
"math/big"
"github.com/ethereum-optimism/optimism/op-service/log"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"github.com/ethereum-optimism/optimism/indexer/config"
"github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum-optimism/optimism/indexer/node"
"testing"
)
func Test_L1ETL_Construction(t *testing.T) {
type testSuite struct {
db *database.MockDB
client *node.MockEthClient
start *big.Int
contracts config.L1Contracts
}
var tests = []struct {
name string
construction func() *testSuite
assertion func(*L1ETL, error)
}{
{
name: "Start from L1 config height",
construction: func() *testSuite {
client := new(node.MockEthClient)
db := database.NewMockDB()
testStart := big.NewInt(100)
db.MockBlocks.On("L1LatestBlockHeader").Return(nil, nil)
client.On("BlockHeaderByNumber", mock.MatchedBy(
node.BigIntMatcher(100))).Return(
&types.Header{
ParentHash: common.HexToHash("0x69"),
}, nil)
client.On("GethEthClient").Return(nil)
return &testSuite{
db: db,
client: client,
start: testStart,
contracts: config.L1Contracts{},
}
},
assertion: func(etl *L1ETL, err error) {
require.NoError(t, err)
require.Equal(t, etl.headerTraversal.LastHeader().ParentHash, common.HexToHash("0x69"))
},
},
{
name: "Start from recent height stored in DB",
construction: func() *testSuite {
client := new(node.MockEthClient)
db := database.NewMockDB()
testStart := big.NewInt(100)
db.MockBlocks.On("L1LatestBlockHeader").Return(
&database.L1BlockHeader{
BlockHeader: database.BlockHeader{
RLPHeader: &database.RLPHeader{
Number: big.NewInt(69),
},
}}, nil)
client.On("GethEthClient").Return(nil)
return &testSuite{
db: db,
client: client,
start: testStart,
contracts: config.L1Contracts{},
}
},
assertion: func(etl *L1ETL, err error) {
require.NoError(t, err)
header := etl.headerTraversal.LastHeader()
require.True(t, header.Number.Cmp(big.NewInt(69)) == 0)
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
ts := test.construction()
logger := log.NewLogger(log.DefaultCLIConfig())
etl, err := NewL1ETL(logger, ts.db.DB, ts.client, ts.start, ts.contracts)
test.assertion(etl, err)
})
}
}
...@@ -34,7 +34,7 @@ func NewIndexer(logger log.Logger, chainConfig config.ChainConfig, rpcsConfig co ...@@ -34,7 +34,7 @@ func NewIndexer(logger log.Logger, chainConfig config.ChainConfig, rpcsConfig co
return nil, err return nil, err
} }
l1Etl, err := etl.NewL1ETL(logger, db, l1EthClient, chainConfig.L1Contracts) l1Etl, err := etl.NewL1ETL(logger, db, l1EthClient, chainConfig.L1StartHeight(), chainConfig.L1Contracts)
if err != nil { if err != nil {
return nil, err return nil, err
} }
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
[chain] [chain]
# OP Goerli # OP Goerli
preset = 420 preset = 420
l1-starting-height = 0
[rpcs] [rpcs]
l1-rpc = "${INDEXER_RPC_URL_L1}" l1-rpc = "${INDEXER_RPC_URL_L1}"
......
...@@ -19,3 +19,8 @@ func clampBigInt(start, end *big.Int, size uint64) *big.Int { ...@@ -19,3 +19,8 @@ func clampBigInt(start, end *big.Int, size uint64) *big.Int {
temp.Add(start, big.NewInt(int64(size-1))) temp.Add(start, big.NewInt(int64(size-1)))
return temp return temp
} }
// returns an inner comparison function result for a big.Int
func BigIntMatcher(num int64) func(*big.Int) bool {
return func(bi *big.Int) bool { return bi.Int64() == num }
}
...@@ -7,17 +7,13 @@ import ( ...@@ -7,17 +7,13 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
func bigIntMatcher(num int64) func(*big.Int) bool {
return func(bi *big.Int) bool { return bi.Int64() == num }
}
func TestClampBigInt(t *testing.T) { func TestClampBigInt(t *testing.T) {
assert.True(t, true) assert.True(t, true)
start := big.NewInt(1) start := big.NewInt(1)
end := big.NewInt(10) end := big.NewInt(10)
// When the (start, end) boudnds are within range // When the (start, end) bounds are within range
// the same end pointer should be returned // the same end pointer should be returned
// larger range // larger range
......
...@@ -27,8 +27,9 @@ const ( ...@@ -27,8 +27,9 @@ const (
type EthClient interface { type EthClient interface {
FinalizedBlockHeight() (*big.Int, error) FinalizedBlockHeight() (*big.Int, error)
BlockHeadersByRange(*big.Int, *big.Int) ([]types.Header, error) BlockHeaderByNumber(*big.Int) (*types.Header, error)
BlockHeaderByHash(common.Hash) (*types.Header, error) BlockHeaderByHash(common.Hash) (*types.Header, error)
BlockHeadersByRange(*big.Int, *big.Int) ([]types.Header, error)
StorageHash(common.Address, *big.Int) (common.Hash, error) StorageHash(common.Address, *big.Int) (common.Hash, error)
...@@ -99,7 +100,20 @@ func (c *client) BlockHeaderByHash(hash common.Hash) (*types.Header, error) { ...@@ -99,7 +100,20 @@ func (c *client) BlockHeaderByHash(hash common.Hash) (*types.Header, error) {
return header, nil return header, nil
} }
// BlockHeadersByRange will retrieve block headers within the specified range -- includsive. No restrictions // BlockHeaderByNumber retrieves the block header attributed to the supplied height
func (c *client) BlockHeaderByNumber(number *big.Int) (*types.Header, error) {
ctxwt, cancel := context.WithTimeout(context.Background(), defaultRequestTimeout)
defer cancel()
header, err := ethclient.NewClient(c.rpcClient).HeaderByNumber(ctxwt, number)
if err != nil {
return nil, err
}
return header, nil
}
// BlockHeadersByRange will retrieve block headers within the specified range -- inclusive. No restrictions
// are placed on the range such as blocks in the "latest", "safe" or "finalized" states. If the specified // are placed on the range such as blocks in the "latest", "safe" or "finalized" states. If the specified
// range is too large, `endHeight > latest`, the resulting list is truncated to the available headers // range is too large, `endHeight > latest`, the resulting list is truncated to the available headers
func (c *client) BlockHeadersByRange(startHeight, endHeight *big.Int) ([]types.Header, error) { func (c *client) BlockHeadersByRange(startHeight, endHeight *big.Int) ([]types.Header, error) {
......
...@@ -24,6 +24,12 @@ func NewHeaderTraversal(ethClient EthClient, fromHeader *types.Header) *HeaderTr ...@@ -24,6 +24,12 @@ func NewHeaderTraversal(ethClient EthClient, fromHeader *types.Header) *HeaderTr
return &HeaderTraversal{ethClient: ethClient, lastHeader: fromHeader} return &HeaderTraversal{ethClient: ethClient, lastHeader: fromHeader}
} }
// LastHeader returns the last header that was fetched by the HeaderTraversal
// This is useful for testing the state of the HeaderTraversal
func (f *HeaderTraversal) LastHeader() *types.Header {
return f.lastHeader
}
// NextFinalizedHeaders retrives the next set of headers that have been // NextFinalizedHeaders retrives the next set of headers that have been
// marked as finalized by the connected client, bounded by the supplied size // marked as finalized by the connected client, bounded by the supplied size
func (f *HeaderTraversal) NextFinalizedHeaders(maxSize uint64) ([]types.Header, error) { func (f *HeaderTraversal) NextFinalizedHeaders(maxSize uint64) ([]types.Header, error) {
......
...@@ -55,7 +55,7 @@ func TestHeaderTraversalNextFinalizedHeadersCursored(t *testing.T) { ...@@ -55,7 +55,7 @@ func TestHeaderTraversalNextFinalizedHeadersCursored(t *testing.T) {
// blocks [0..4] // blocks [0..4]
headers := makeHeaders(5, nil) headers := makeHeaders(5, nil)
client.On("FinalizedBlockHeight").Return(big.NewInt(4), nil).Times(1) // Times so that we can override next client.On("FinalizedBlockHeight").Return(big.NewInt(4), nil).Times(1) // Times so that we can override next
client.On("BlockHeadersByRange", mock.MatchedBy(bigIntMatcher(0)), mock.MatchedBy(bigIntMatcher(4))).Return(headers, nil) client.On("BlockHeadersByRange", mock.MatchedBy(BigIntMatcher(0)), mock.MatchedBy(BigIntMatcher(4))).Return(headers, nil)
headers, err := headerTraversal.NextFinalizedHeaders(5) headers, err := headerTraversal.NextFinalizedHeaders(5)
require.NoError(t, err) require.NoError(t, err)
require.Len(t, headers, 5) require.Len(t, headers, 5)
...@@ -63,7 +63,7 @@ func TestHeaderTraversalNextFinalizedHeadersCursored(t *testing.T) { ...@@ -63,7 +63,7 @@ func TestHeaderTraversalNextFinalizedHeadersCursored(t *testing.T) {
// blocks [5..9] // blocks [5..9]
headers = makeHeaders(5, &headers[len(headers)-1]) headers = makeHeaders(5, &headers[len(headers)-1])
client.On("FinalizedBlockHeight").Return(big.NewInt(9), nil) client.On("FinalizedBlockHeight").Return(big.NewInt(9), nil)
client.On("BlockHeadersByRange", mock.MatchedBy(bigIntMatcher(5)), mock.MatchedBy(bigIntMatcher(9))).Return(headers, nil) client.On("BlockHeadersByRange", mock.MatchedBy(BigIntMatcher(5)), mock.MatchedBy(BigIntMatcher(9))).Return(headers, nil)
headers, err = headerTraversal.NextFinalizedHeaders(5) headers, err = headerTraversal.NextFinalizedHeaders(5)
require.NoError(t, err) require.NoError(t, err)
require.Len(t, headers, 5) require.Len(t, headers, 5)
...@@ -80,14 +80,14 @@ func TestHeaderTraversalNextFinalizedHeadersMaxSize(t *testing.T) { ...@@ -80,14 +80,14 @@ func TestHeaderTraversalNextFinalizedHeadersMaxSize(t *testing.T) {
// clamped by the supplied size // clamped by the supplied size
headers := makeHeaders(5, nil) headers := makeHeaders(5, nil)
client.On("BlockHeadersByRange", mock.MatchedBy(bigIntMatcher(0)), mock.MatchedBy(bigIntMatcher(4))).Return(headers, nil) client.On("BlockHeadersByRange", mock.MatchedBy(BigIntMatcher(0)), mock.MatchedBy(BigIntMatcher(4))).Return(headers, nil)
headers, err := headerTraversal.NextFinalizedHeaders(5) headers, err := headerTraversal.NextFinalizedHeaders(5)
require.NoError(t, err) require.NoError(t, err)
require.Len(t, headers, 5) require.Len(t, headers, 5)
// clamped by the supplied size. FinalizedHeight == 100 // clamped by the supplied size. FinalizedHeight == 100
headers = makeHeaders(10, &headers[len(headers)-1]) headers = makeHeaders(10, &headers[len(headers)-1])
client.On("BlockHeadersByRange", mock.MatchedBy(bigIntMatcher(5)), mock.MatchedBy(bigIntMatcher(14))).Return(headers, nil) client.On("BlockHeadersByRange", mock.MatchedBy(BigIntMatcher(5)), mock.MatchedBy(BigIntMatcher(14))).Return(headers, nil)
headers, err = headerTraversal.NextFinalizedHeaders(10) headers, err = headerTraversal.NextFinalizedHeaders(10)
require.NoError(t, err) require.NoError(t, err)
require.Len(t, headers, 10) require.Len(t, headers, 10)
...@@ -102,7 +102,7 @@ func TestHeaderTraversalMismatchedProviderStateError(t *testing.T) { ...@@ -102,7 +102,7 @@ func TestHeaderTraversalMismatchedProviderStateError(t *testing.T) {
// blocks [0..4] // blocks [0..4]
headers := makeHeaders(5, nil) headers := makeHeaders(5, nil)
client.On("FinalizedBlockHeight").Return(big.NewInt(4), nil).Times(1) // Times so that we can override next client.On("FinalizedBlockHeight").Return(big.NewInt(4), nil).Times(1) // Times so that we can override next
client.On("BlockHeadersByRange", mock.MatchedBy(bigIntMatcher(0)), mock.MatchedBy(bigIntMatcher(4))).Return(headers, nil) client.On("BlockHeadersByRange", mock.MatchedBy(BigIntMatcher(0)), mock.MatchedBy(BigIntMatcher(4))).Return(headers, nil)
headers, err := headerTraversal.NextFinalizedHeaders(5) headers, err := headerTraversal.NextFinalizedHeaders(5)
require.NoError(t, err) require.NoError(t, err)
require.Len(t, headers, 5) require.Len(t, headers, 5)
...@@ -110,7 +110,7 @@ func TestHeaderTraversalMismatchedProviderStateError(t *testing.T) { ...@@ -110,7 +110,7 @@ func TestHeaderTraversalMismatchedProviderStateError(t *testing.T) {
// blocks [5..9]. Next batch is not chained correctly (starts again from genesis) // blocks [5..9]. Next batch is not chained correctly (starts again from genesis)
headers = makeHeaders(5, nil) headers = makeHeaders(5, nil)
client.On("FinalizedBlockHeight").Return(big.NewInt(9), nil) client.On("FinalizedBlockHeight").Return(big.NewInt(9), nil)
client.On("BlockHeadersByRange", mock.MatchedBy(bigIntMatcher(5)), mock.MatchedBy(bigIntMatcher(9))).Return(headers, nil) client.On("BlockHeadersByRange", mock.MatchedBy(BigIntMatcher(5)), mock.MatchedBy(BigIntMatcher(9))).Return(headers, nil)
headers, err = headerTraversal.NextFinalizedHeaders(5) headers, err = headerTraversal.NextFinalizedHeaders(5)
require.Nil(t, headers) require.Nil(t, headers)
require.Equal(t, ErrHeaderTraversalAndProviderMismatchedState, err) require.Equal(t, ErrHeaderTraversalAndProviderMismatchedState, err)
......
...@@ -3,20 +3,22 @@ package node ...@@ -3,20 +3,22 @@ package node
import ( import (
"math/big" "math/big"
"github.com/stretchr/testify/mock"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
"github.com/stretchr/testify/mock"
) )
var _ EthClient = &MockEthClient{}
type MockEthClient struct { type MockEthClient struct {
mock.Mock mock.Mock
} }
func (m *MockEthClient) BlockHeaderByNumber(number *big.Int) (*types.Header, error) {
args := m.Called(number)
return args.Get(0).(*types.Header), args.Error(1)
}
func (m *MockEthClient) FinalizedBlockHeight() (*big.Int, error) { func (m *MockEthClient) FinalizedBlockHeight() (*big.Int, error) {
args := m.Called() args := m.Called()
return args.Get(0).(*big.Int), args.Error(1) return args.Get(0).(*big.Int), args.Error(1)
...@@ -44,5 +46,10 @@ func (m *MockEthClient) GethRpcClient() *rpc.Client { ...@@ -44,5 +46,10 @@ func (m *MockEthClient) GethRpcClient() *rpc.Client {
func (m *MockEthClient) GethEthClient() *ethclient.Client { func (m *MockEthClient) GethEthClient() *ethclient.Client {
args := m.Called() args := m.Called()
return args.Get(0).(*ethclient.Client)
client, ok := args.Get(0).(*ethclient.Client)
if !ok {
return nil
}
return client
} }
package main
import (
"bytes"
"compress/flate"
"compress/gzip"
"encoding/json"
"errors"
"fmt"
"os"
"path/filepath"
opservice "github.com/ethereum-optimism/optimism/op-service"
"github.com/ethereum-optimism/optimism/op-service/jsonutil"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/mattn/go-isatty"
"github.com/urfave/cli/v2"
)
const EnvPrefix = "OP_CHAIN_OPS_REGISTRY_DATA"
var (
L2GenesisFlag = &cli.PathFlag{
Name: "l2-genesis",
Value: "genesis.json",
Usage: "Path to genesis json (go-ethereum format)",
EnvVars: opservice.PrefixEnvVar(EnvPrefix, "L2_GENESIS"),
}
L2GenesisHeaderFlag = &cli.PathFlag{
Name: "l2-genesis-header",
Value: "genesis-header.json",
Usage: "Alternative to l2-genesis flag, if genesis-state is omitted. Path to block header at genesis",
EnvVars: opservice.PrefixEnvVar(EnvPrefix, "L2_GENESIS_HEADER"),
}
BytecodesDirFlag = &cli.PathFlag{
Name: "bytecodes-dir",
Value: "superchain-registry/superchain/bytecodes",
Usage: "Collection of gzipped L2 bytecodes",
EnvVars: opservice.PrefixEnvVar(EnvPrefix, "BYTECODES_DIR"),
}
OutputFlag = &cli.PathFlag{
Name: "output",
Value: "out.json.gz",
Usage: "output gzipped JSON file path to write to",
EnvVars: opservice.PrefixEnvVar(EnvPrefix, "OUTPUT"),
}
)
func main() {
log.Root().SetHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(isatty.IsTerminal(os.Stderr.Fd()))))
app := &cli.App{
Name: "registry-genesis",
Usage: "Prepare superchain-registry genesis data files based on full genesis dump",
Flags: []cli.Flag{
L2GenesisFlag,
L2GenesisHeaderFlag,
BytecodesDirFlag,
OutputFlag,
},
Action: entrypoint,
}
if err := app.Run(os.Args); err != nil {
log.Crit("error while generating registry data", "err", err)
}
}
type GenesisAccount struct {
CodeHash common.Hash `json:"codeHash,omitempty"`
Storage jsonutil.LazySortedJsonMap[common.Hash, common.Hash] `json:"storage,omitempty"`
Balance *hexutil.Big `json:"balance,omitempty"`
Nonce uint64 `json:"nonce,omitempty"`
}
type Genesis struct {
Nonce uint64 `json:"nonce"`
Timestamp uint64 `json:"timestamp"`
ExtraData []byte `json:"extraData"`
GasLimit uint64 `json:"gasLimit"`
Difficulty *hexutil.Big `json:"difficulty"`
Mixhash common.Hash `json:"mixHash"`
Coinbase common.Address `json:"coinbase"`
Number uint64 `json:"number"`
GasUsed uint64 `json:"gasUsed"`
ParentHash common.Hash `json:"parentHash"`
BaseFee *hexutil.Big `json:"baseFeePerGas"`
Alloc jsonutil.LazySortedJsonMap[common.Address, GenesisAccount] `json:"alloc"`
// For genesis definitions without full state (OP-Mainnet, OP-Goerli)
StateHash *common.Hash `json:"stateHash,omitempty"`
}
func loadJSON[X any](inputPath string) (*X, error) {
if inputPath == "" {
return nil, errors.New("no path specified")
}
f, err := os.OpenFile(inputPath, os.O_RDONLY, 0)
if err != nil {
return nil, fmt.Errorf("failed to open file %q: %w", inputPath, err)
}
defer f.Close()
var obj X
if err := json.NewDecoder(f).Decode(&obj); err != nil {
return nil, fmt.Errorf("failed to decode file %q: %w", inputPath, err)
}
return &obj, nil
}
func writeGzipJSON(outputPath string, value any) error {
f, err := os.OpenFile(outputPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0755)
if err != nil {
return fmt.Errorf("failed to open output file: %w", err)
}
defer f.Close()
w, err := gzip.NewWriterLevel(f, flate.BestCompression)
if err != nil {
return fmt.Errorf("failed to create gzip writer: %w", err)
}
defer w.Close()
enc := json.NewEncoder(w)
if err := enc.Encode(value); err != nil {
return fmt.Errorf("failed to encode to JSON: %w", err)
}
return nil
}
func entrypoint(ctx *cli.Context) error {
genesisPath := ctx.Path(L2GenesisFlag.Name)
if genesisPath == "" {
// When the genesis-state is too large, or not meant to be available, only the header data is made available.
// This allows the user to verify the header-chain starting from genesis, and state-sync the latest state,
// skipping the historical state.
// Archive nodes that depend on this historical state should instantiate the chain from a full genesis dump
// with allocation data, or datadir.
genesisHeaderPath := ctx.Path(L2GenesisHeaderFlag.Name)
genesisHeader, err := loadJSON[types.Header](genesisHeaderPath)
if err != nil {
return fmt.Errorf("genesis-header %q failed to load: %w", genesisHeaderPath, err)
}
if genesisHeader.TxHash != types.EmptyTxsHash {
return errors.New("genesis-header based genesis must have no transactions")
}
if genesisHeader.ReceiptHash != types.EmptyReceiptsHash {
return errors.New("genesis-header based genesis must have no receipts")
}
if genesisHeader.UncleHash != types.EmptyUncleHash {
return errors.New("genesis-header based genesis must have no uncle hashes")
}
if genesisHeader.WithdrawalsHash != nil && *genesisHeader.WithdrawalsHash != types.EmptyWithdrawalsHash {
return errors.New("genesis-header based genesis must have no withdrawals")
}
out := Genesis{
Nonce: genesisHeader.Nonce.Uint64(),
Timestamp: genesisHeader.Time,
ExtraData: genesisHeader.Extra,
GasLimit: genesisHeader.GasLimit,
Difficulty: (*hexutil.Big)(genesisHeader.Difficulty),
Mixhash: genesisHeader.MixDigest,
Coinbase: genesisHeader.Coinbase,
Number: genesisHeader.Number.Uint64(),
GasUsed: genesisHeader.GasUsed,
ParentHash: genesisHeader.ParentHash,
BaseFee: (*hexutil.Big)(genesisHeader.BaseFee),
Alloc: make(jsonutil.LazySortedJsonMap[common.Address, GenesisAccount]),
StateHash: &genesisHeader.Root,
}
if err := writeGzipJSON(ctx.Path(OutputFlag.Name), out); err != nil {
return fmt.Errorf("failed to write output: %w", err)
}
return nil
}
genesis, err := loadJSON[core.Genesis](genesisPath)
if err != nil {
return fmt.Errorf("failed to load L2 genesis: %w", err)
}
// export all contract bytecodes, write them to bytecodes collection
bytecodesDir := ctx.Path(BytecodesDirFlag.Name)
if err := os.MkdirAll(bytecodesDir, 0755); err != nil {
return fmt.Errorf("failed to make bytecodes dir: %w", err)
}
for addr, account := range genesis.Alloc {
if len(account.Code) > 0 {
codeHash := crypto.Keccak256Hash(account.Code)
name := filepath.Join(bytecodesDir, fmt.Sprintf("%s.bin.gz", codeHash))
_, err := os.Stat(name)
if err != nil {
if os.IsNotExist(err) {
var buf bytes.Buffer
w, err := gzip.NewWriterLevel(&buf, 9)
if err != nil {
return fmt.Errorf("failed to construct gzip writer for bytecode %s: %w", codeHash, err)
}
if _, err := w.Write(account.Code); err != nil {
return fmt.Errorf("failed to write bytecode %s to gzip writer: %w", codeHash, err)
}
if err := w.Close(); err != nil {
return fmt.Errorf("failed to close gzip writer: %w", err)
}
// new bytecode
if err := os.WriteFile(name, buf.Bytes(), 0755); err != nil {
return fmt.Errorf("failed to write bytecode %s of account %s: %w", codeHash, addr, err)
}
} else {
return fmt.Errorf("failed to check for pre-existing bytecode %s for address %s: %w", codeHash, addr, err)
}
} // else: already exists
}
}
// convert into allocation data
out := Genesis{
Nonce: genesis.Nonce,
Timestamp: genesis.Timestamp,
ExtraData: genesis.ExtraData,
GasLimit: genesis.GasLimit,
Difficulty: (*hexutil.Big)(genesis.Difficulty),
Mixhash: genesis.Mixhash,
Coinbase: genesis.Coinbase,
Number: genesis.Number,
GasUsed: genesis.GasUsed,
ParentHash: genesis.ParentHash,
BaseFee: (*hexutil.Big)(genesis.BaseFee),
Alloc: make(jsonutil.LazySortedJsonMap[common.Address, GenesisAccount]),
}
// write genesis, but only reference code by code-hash, and don't encode the L2 predeploys to save space.
for addr, account := range genesis.Alloc {
var codeHash common.Hash
if len(account.Code) > 0 {
codeHash = crypto.Keccak256Hash(account.Code)
}
outAcc := GenesisAccount{
CodeHash: codeHash,
Nonce: account.Nonce,
}
if account.Balance != nil && account.Balance.Cmp(common.Big0) != 0 {
outAcc.Balance = (*hexutil.Big)(account.Balance)
}
if len(account.Storage) > 0 {
outAcc.Storage = make(jsonutil.LazySortedJsonMap[common.Hash, common.Hash])
for k, v := range account.Storage {
outAcc.Storage[k] = v
}
}
out.Alloc[addr] = outAcc
}
// write genesis alloc
if err := writeGzipJSON(ctx.Path(OutputFlag.Name), out); err != nil {
return fmt.Errorf("failed to write output: %w", err)
}
return nil
}
...@@ -15,12 +15,14 @@ import ( ...@@ -15,12 +15,14 @@ import (
"github.com/ethereum-optimism/optimism/op-challenger/fault/alphabet" "github.com/ethereum-optimism/optimism/op-challenger/fault/alphabet"
"github.com/ethereum-optimism/optimism/op-challenger/fault/cannon" "github.com/ethereum-optimism/optimism/op-challenger/fault/cannon"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/challenger" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/challenger"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/transactions"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait"
"github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/testlog" "github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
...@@ -106,8 +108,11 @@ func (h *FactoryHelper) StartAlphabetGame(ctx context.Context, claimedAlphabet s ...@@ -106,8 +108,11 @@ func (h *FactoryHelper) StartAlphabetGame(ctx context.Context, claimedAlphabet s
extraData := make([]byte, 64) extraData := make([]byte, 64)
binary.BigEndian.PutUint64(extraData[24:], l2BlockNumber) binary.BigEndian.PutUint64(extraData[24:], l2BlockNumber)
binary.BigEndian.PutUint64(extraData[56:], l1Head.Uint64()) binary.BigEndian.PutUint64(extraData[56:], l1Head.Uint64())
tx, err := h.factory.Create(h.opts, alphabetGameType, rootClaim, extraData) tx, err := transactions.PadGasEstimate(h.opts, 2, func(opts *bind.TransactOpts) (*types.Transaction, error) {
return h.factory.Create(opts, alphabetGameType, rootClaim, extraData)
})
h.require.NoError(err, "create fault dispute game") h.require.NoError(err, "create fault dispute game")
h.opts.GasLimit = 0
rcpt, err := wait.ForReceiptOK(ctx, h.client, tx.Hash()) rcpt, err := wait.ForReceiptOK(ctx, h.client, tx.Hash())
h.require.NoError(err, "wait for create fault dispute game receipt to be OK") h.require.NoError(err, "wait for create fault dispute game receipt to be OK")
h.require.Len(rcpt.Logs, 1, "should have emitted a single DisputeGameCreated event") h.require.Len(rcpt.Logs, 1, "should have emitted a single DisputeGameCreated event")
...@@ -191,7 +196,9 @@ func (h *FactoryHelper) createCannonGame(ctx context.Context, l2BlockNumber uint ...@@ -191,7 +196,9 @@ func (h *FactoryHelper) createCannonGame(ctx context.Context, l2BlockNumber uint
extraData := make([]byte, 64) extraData := make([]byte, 64)
binary.BigEndian.PutUint64(extraData[24:], l2BlockNumber) binary.BigEndian.PutUint64(extraData[24:], l2BlockNumber)
binary.BigEndian.PutUint64(extraData[56:], l1Head.Uint64()) binary.BigEndian.PutUint64(extraData[56:], l1Head.Uint64())
tx, err := h.factory.Create(h.opts, cannonGameType, rootClaim, extraData) tx, err := transactions.PadGasEstimate(h.opts, 2, func(opts *bind.TransactOpts) (*types.Transaction, error) {
return h.factory.Create(opts, cannonGameType, rootClaim, extraData)
})
h.require.NoError(err, "create fault dispute game") h.require.NoError(err, "create fault dispute game")
rcpt, err := wait.ForReceiptOK(ctx, h.client, tx.Hash()) rcpt, err := wait.ForReceiptOK(ctx, h.client, tx.Hash())
h.require.NoError(err, "wait for create fault dispute game receipt to be OK") h.require.NoError(err, "wait for create fault dispute game receipt to be OK")
......
package transactions
import (
"fmt"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/core/types"
)
// TxBuilder creates and sends a transaction using the supplied bind.TransactOpts.
// Returns the created transaction and any error reported.
type TxBuilder func(opts *bind.TransactOpts) (*types.Transaction, error)
// PadGasEstimate multiplies the gas estimate for a transaction by the specified paddingFactor before sending the
// actual transaction. Useful for cases where the gas required is variable.
// The builder will be invoked twice, first with NoSend=true to estimate the gas and the second time with
// NoSend=false and GasLimit including the requested padding.
func PadGasEstimate(opts *bind.TransactOpts, paddingFactor float64, builder TxBuilder) (*types.Transaction, error) {
// Take a copy of the opts to avoid mutating the original
o := *opts
o.NoSend = true
tx, err := builder(&o)
if err != nil {
return nil, fmt.Errorf("failed to estimate gas: %w", err)
}
gas := float64(tx.Gas()) * paddingFactor
o.GasLimit = uint64(gas)
o.NoSend = false
return builder(&o)
}
...@@ -8,6 +8,7 @@ import ( ...@@ -8,6 +8,7 @@ import (
"time" "time"
"github.com/ethereum-optimism/optimism/op-bindings/bindings" "github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/transactions"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
...@@ -29,19 +30,12 @@ func SendDepositTx(t *testing.T, cfg SystemConfig, l1Client *ethclient.Client, l ...@@ -29,19 +30,12 @@ func SendDepositTx(t *testing.T, cfg SystemConfig, l1Client *ethclient.Client, l
require.Nil(t, err) require.Nil(t, err)
// Finally send TX // Finally send TX
l1Opts.NoSend = true
tx, err := depositContract.DepositTransaction(l1Opts, l2Opts.ToAddr, l2Opts.Value, l2Opts.GasLimit, l2Opts.IsCreation, l2Opts.Data)
require.Nil(t, err, "with deposit tx")
l1Opts.NoSend = false
// Add 10% padding for the L1 gas limit because the estimation process can be affected by the 1559 style cost scale // Add 10% padding for the L1 gas limit because the estimation process can be affected by the 1559 style cost scale
// for buying L2 gas in the portal contracts. // for buying L2 gas in the portal contracts.
l1Opts.GasLimit = tx.Gas() + (tx.Gas() / 10) tx, err := transactions.PadGasEstimate(l1Opts, 1.1, func(opts *bind.TransactOpts) (*types.Transaction, error) {
return depositContract.DepositTransaction(opts, l2Opts.ToAddr, l2Opts.Value, l2Opts.GasLimit, l2Opts.IsCreation, l2Opts.Data)
// Now resend with gas specified })
tx, err = depositContract.DepositTransaction(l1Opts, l2Opts.ToAddr, l2Opts.Value, l2Opts.GasLimit, l2Opts.IsCreation, l2Opts.Data)
require.Nil(t, err, "with deposit tx") require.Nil(t, err, "with deposit tx")
l1Opts.GasLimit = 0
// Wait for transaction on L1 // Wait for transaction on L1
receipt, err := waitForTransaction(tx.Hash(), l1Client, 10*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second) receipt, err := waitForTransaction(tx.Hash(), l1Client, 10*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
......
...@@ -50,7 +50,8 @@ var ( ...@@ -50,7 +50,8 @@ var (
// Banning Flag - whether or not we want to act on the scoring // Banning Flag - whether or not we want to act on the scoring
Banning = &cli.BoolFlag{ Banning = &cli.BoolFlag{
Name: "p2p.ban.peers", Name: "p2p.ban.peers",
Usage: "Enables peer banning. This should ONLY be enabled once certain peer scoring is working correctly.", Usage: "Enables peer banning.",
Value: true,
Required: false, Required: false,
EnvVars: p2pEnv("PEER_BANNING"), EnvVars: p2pEnv("PEER_BANNING"),
} }
......
package jsonutil
import (
"encoding/json"
"fmt"
"sort"
)
// LazySortedJsonMap provides sorted encoding order for JSON maps.
// The sorting is lazy: in-memory it's just a map, until it sorts just-in-time when the map is encoded to JSON.
// Warning: the just-in-time sorting requires a full allocation of the map structure and keys slice during encoding.
// Sorting order is not enforced when decoding from JSON.
type LazySortedJsonMap[K comparable, V any] map[K]V
func (m LazySortedJsonMap[K, V]) MarshalJSON() ([]byte, error) {
keys := make([]string, 0, len(m))
values := make(map[string]V)
for k, v := range m {
s := fmt.Sprintf("%q", any(k)) // format as quoted string
keys = append(keys, s)
values[s] = v
}
sort.Strings(keys)
var out []byte
out = append(out, '{')
for i, k := range keys {
out = append(out, k...) // quotes are already included
out = append(out, ':')
v, err := json.Marshal(values[k])
if err != nil {
return nil, fmt.Errorf("failed to encode value of %s: %w", k, err)
}
out = append(out, v...)
if i != len(keys)-1 {
out = append(out, ',')
}
}
out = append(out, '}')
return out, nil
}
func (m *LazySortedJsonMap[K, V]) UnmarshalJSON(data []byte) error {
return json.Unmarshal(data, (*map[K]V)(m))
}
package jsonutil
import (
"encoding/json"
"fmt"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/stretchr/testify/require"
)
type LazySortedJsonMapTestCase[K comparable, V any] struct {
Object LazySortedJsonMap[K, V]
Json string
}
func (tc *LazySortedJsonMapTestCase[K, V]) Run(t *testing.T) {
t.Run("Marshal", func(t *testing.T) {
got, err := json.Marshal(tc.Object)
require.NoError(t, err)
require.Equal(t, tc.Json, string(got), "json output must match")
})
t.Run("Unmarshal", func(t *testing.T) {
var dest LazySortedJsonMap[K, V]
err := json.Unmarshal([]byte(tc.Json), &dest)
require.NoError(t, err)
require.Equal(t, len(tc.Object), len(dest), "lengths match")
for k, v := range tc.Object {
require.Equal(t, v, dest[k], "values of %q match", k)
}
})
}
func TestLazySortedJsonMap(t *testing.T) {
testCases := []interface{ Run(t *testing.T) }{
&LazySortedJsonMapTestCase[string, int]{Object: LazySortedJsonMap[string, int]{}, Json: `{}`},
&LazySortedJsonMapTestCase[string, int]{Object: LazySortedJsonMap[string, int]{"a": 1, "c": 2, "b": 3}, Json: `{"a":1,"b":3,"c":2}`},
&LazySortedJsonMapTestCase[common.Address, int]{Object: LazySortedJsonMap[common.Address, int]{
common.HexToAddress("0x4100000000000000000000000000000000000000"): 123,
common.HexToAddress("0x4200000000000000000000000000000000000000"): 100,
common.HexToAddress("0x4200000000000000000000000000000000000001"): 100,
},
Json: `{"0x4100000000000000000000000000000000000000":123,` +
`"0x4200000000000000000000000000000000000000":100,` +
`"0x4200000000000000000000000000000000000001":100}`},
}
for i, tc := range testCases {
t.Run(fmt.Sprintf("case %d", i), tc.Run)
}
}
...@@ -62,7 +62,7 @@ ...@@ -62,7 +62,7 @@
"tsup": "^7.1.0", "tsup": "^7.1.0",
"typescript": "^5.1.6", "typescript": "^5.1.6",
"vite": "^4.4.6", "vite": "^4.4.6",
"vitest": "^0.33.0" "vitest": "^0.34.2"
}, },
"peerDependencies": { "peerDependencies": {
"@wagmi/core": ">1.0.0", "@wagmi/core": ">1.0.0",
......
...@@ -46,7 +46,7 @@ ...@@ -46,7 +46,7 @@
"typescript": "^5.1.6", "typescript": "^5.1.6",
"viem": "^1.3.1", "viem": "^1.3.1",
"vite": "^4.4.6", "vite": "^4.4.6",
"vitest": "^0.33.0" "vitest": "^0.34.2"
}, },
"peerDependencies": { "peerDependencies": {
"viem": "^0.3.30" "viem": "^0.3.30"
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment