Commit bf8b130c authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge branch 'develop' into fix/comment-l2oo

parents 4d24aaa8 17e4ffe2
---
'@eth-optimism/contracts-bedrock': patch
---
Reduce the time that the system dictator deploy scripts wait before checking the chain state.
---
'@eth-optimism/sdk': patch
---
Have SDK automatically create Standard and ETH bridges when L1StandardBridge is provided.
---
'@eth-optimism/batch-submitter-service': patch
---
Allow deposit only batches
---
'@eth-optimism/data-transport-layer': patch
---
Add better logging to DTL about shutoff block
---
'@eth-optimism/chain-mon': minor
---
Introduces the balance-mon service to chain-mon.
---
'@eth-optimism/contracts-bedrock': patch
---
Makes the Proxy contract inheritable by making functions (public virtual).
---
'@eth-optimism/contracts-bedrock': patch
---
Added a contsructor to the System Dictator
---
'@eth-optimism/hardhat-deploy-config': patch
---
Add getter for other network's deploy config
---
'@eth-optimism/batch-submitter-service': patch
---
fix flag name for MaxStateRootElements in batch-submitter
fix log package for proposer
---
'@eth-optimism/chain-mon': patch
'@eth-optimism/data-transport-layer': patch
'@eth-optimism/fault-detector': patch
'@eth-optimism/message-relayer': patch
'@eth-optimism/replica-healthcheck': patch
---
Empty patch release to re-release packages that failed to be released by a bug in the release process.
......@@ -620,7 +620,7 @@ jobs:
name: run tests
command: |
gotestsum --format=standard-verbose --junitfile=/tmp/test-results/<<parameters.module>>.xml \
-- -coverpkg=github.com/ethereum-optimism/optimism/... -coverprofile=coverage.out ./...
-- -parallel=8 -coverpkg=github.com/ethereum-optimism/optimism/... -coverprofile=coverage.out ./...
working_directory: <<parameters.module>>
- run:
name: upload coverage
......@@ -651,9 +651,10 @@ jobs:
command: |
# Note: We don't use circle CI test splits because we need to split by test name, not by package. There is an additional
# constraint that gotestsum does not currently (nor likely will) accept files from different pacakges when building.
OP_TESTLOG_DISABLE_COLOR=true OP_E2E_DISABLE_PARALLEL=true OP_E2E_USE_HTTP=<<parameters.use_http>> gotestsum \
# Note: -parallel must be set to match the number of cores in the resource class
OP_TESTLOG_DISABLE_COLOR=true OP_E2E_DISABLE_PARALLEL=false OP_E2E_USE_HTTP=<<parameters.use_http>> gotestsum \
--format=standard-verbose --junitfile=/tmp/test-results/<<parameters.module>>_http_<<parameters.use_http>>.xml \
-- -timeout=20m ./...
-- -timeout=20m -parallel=8 ./...
working_directory: <<parameters.module>>
- store_test_results:
path: /tmp/test-results
......
# @eth-optimism/batch-submitter-service
## 0.1.16
### Patch Changes
- 32bd79ec9: Allow deposit only batches
- da79ef441: fix flag name for MaxStateRootElements in batch-submitter
fix log package for proposer
## 0.1.15
### Patch Changes
......
{
"name": "@eth-optimism/batch-submitter-service",
"version": "0.1.15",
"version": "0.1.16",
"private": true,
"devDependencies": {}
}
......@@ -30,10 +30,10 @@
"devDependencies": {
"@babel/eslint-parser": "^7.5.4",
"@eth-optimism/contracts": "^0.5.40",
"@eth-optimism/contracts-bedrock": "0.13.1",
"@eth-optimism/contracts-bedrock": "0.13.2",
"@eth-optimism/contracts-periphery": "^1.0.7",
"@eth-optimism/core-utils": "0.12.0",
"@eth-optimism/sdk": "2.0.1",
"@eth-optimism/sdk": "2.0.2",
"@ethersproject/abstract-provider": "^5.7.0",
"@ethersproject/providers": "^5.7.0",
"@ethersproject/transactions": "^5.7.0",
......
......@@ -3,6 +3,7 @@ package main
import (
"context"
"encoding/json"
"errors"
"fmt"
"math/big"
"os"
......@@ -168,10 +169,14 @@ func main() {
var block *types.Block
tag := config.L1StartingBlockTag
if tag.BlockNumber != nil {
block, err = l1Client.BlockByNumber(context.Background(), big.NewInt(tag.BlockNumber.Int64()))
} else if tag.BlockHash != nil {
block, err = l1Client.BlockByHash(context.Background(), *tag.BlockHash)
if tag == nil {
return errors.New("l1StartingBlockTag cannot be nil")
}
log.Info("Using L1 Starting Block Tag", "tag", tag.String())
if number, isNumber := tag.Number(); isNumber {
block, err = l1Client.BlockByNumber(context.Background(), big.NewInt(number.Int64()))
} else if hash, isHash := tag.Hash(); isHash {
block, err = l1Client.BlockByHash(context.Background(), hash)
} else {
return fmt.Errorf("invalid l1StartingBlockTag in deploy config: %v", tag)
}
......
......@@ -208,6 +208,11 @@ func (d *DeployConfig) Check() error {
if d.L2GenesisBlockGasLimit == 0 {
return fmt.Errorf("%w: L2 genesis block gas limit cannot be 0", ErrInvalidDeployConfig)
}
// When the initial resource config is made to be configurable by the DeployConfig, ensure
// that this check is updated to use the values from the DeployConfig instead of the defaults.
if uint64(d.L2GenesisBlockGasLimit) < uint64(defaultResourceConfig.MaxResourceLimit+defaultResourceConfig.SystemTxMaxGas) {
return fmt.Errorf("%w: L2 genesis block gas limit is too small", ErrInvalidDeployConfig)
}
if d.L2GenesisBlockBaseFeePerGas == nil {
return fmt.Errorf("%w: L2 genesis block base fee per gas cannot be nil", ErrInvalidDeployConfig)
}
......@@ -493,3 +498,18 @@ func (m *MarshalableRPCBlockNumberOrHash) UnmarshalJSON(b []byte) error {
*m = asMarshalable
return nil
}
// Number wraps the rpc.BlockNumberOrHash Number method.
func (m *MarshalableRPCBlockNumberOrHash) Number() (rpc.BlockNumber, bool) {
return (*rpc.BlockNumberOrHash)(m).Number()
}
// Hash wraps the rpc.BlockNumberOrHash Hash method.
func (m *MarshalableRPCBlockNumberOrHash) Hash() (common.Hash, bool) {
return (*rpc.BlockNumberOrHash)(m).Hash()
}
// String wraps the rpc.BlockNumberOrHash String method.
func (m *MarshalableRPCBlockNumberOrHash) String() string {
return (*rpc.BlockNumberOrHash)(m).String()
}
......@@ -15,7 +15,8 @@ import (
"github.com/ethereum/go-ethereum/params"
)
const defaultL2GasLimit = 15_000_000
// defaultL2GasLimit represents the default gas limit for an L2 block.
const defaultL2GasLimit = 30_000_000
// NewL2Genesis will create a new L2 genesis
func NewL2Genesis(config *DeployConfig, block *types.Block) (*core.Genesis, error) {
......
......@@ -384,7 +384,7 @@ func TestExtendedTimeWithoutL1Batches(gt *testing.T) {
}
// TestBigL2Txs tests a high-throughput case with constrained batcher:
// - Fill 100 L2 blocks to near max-capacity, with txs of 120 KB each
// - Fill 40 L2 blocks to near max-capacity, with txs of 120 KB each
// - Buffer the L2 blocks into channels together as much as possible, submit data-txs only when necessary
// (just before crossing the max RLP channel size)
// - Limit the data-tx size to 40 KB, to force data to be split across multiple datat-txs
......@@ -428,7 +428,7 @@ func TestBigL2Txs(gt *testing.T) {
}
// build many L2 blocks filled to the brim with large txs of random data
for i := 0; i < 100; i++ {
for i := 0; i < 40; i++ {
aliceNonce, err := cl.PendingNonceAt(t.Ctx(), dp.Addresses.Alice)
status := sequencer.SyncStatus()
// build empty L1 blocks as necessary, so the L2 sequencer can continue to include txs while not drifting too far out
......
......@@ -121,6 +121,7 @@ var hardcodedSlots = []storageSlot{
}
func TestMigration(t *testing.T) {
parallel(t)
if !config.enabled {
t.Skipf("skipping migration tests")
return
......
......@@ -20,6 +20,7 @@ import (
// TestMissingGasLimit tests that op-geth cannot build a block without gas limit while optimism is active in the chain config.
func TestMissingGasLimit(t *testing.T) {
parallel(t)
cfg := DefaultSystemConfig(t)
cfg.DeployConfig.FundDevAccounts = false
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
......@@ -42,6 +43,7 @@ func TestMissingGasLimit(t *testing.T) {
// TestInvalidDepositInFCU runs an invalid deposit through a FCU/GetPayload/NewPayload/FCU set of calls.
// This tests that deposits must always allow the block to be built even if they are invalid.
func TestInvalidDepositInFCU(t *testing.T) {
parallel(t)
cfg := DefaultSystemConfig(t)
cfg.DeployConfig.FundDevAccounts = false
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
......@@ -76,6 +78,7 @@ func TestInvalidDepositInFCU(t *testing.T) {
}
func TestPreregolith(t *testing.T) {
parallel(t)
futureTimestamp := hexutil.Uint64(4)
tests := []struct {
name string
......@@ -255,6 +258,7 @@ func TestPreregolith(t *testing.T) {
}
func TestRegolith(t *testing.T) {
parallel(t)
tests := []struct {
name string
regolithTime hexutil.Uint64
......
......@@ -68,7 +68,7 @@ func DefaultSystemConfig(t *testing.T) SystemConfig {
deployConfig := &genesis.DeployConfig{
L1ChainID: 900,
L2ChainID: 901,
L2BlockTime: 2,
L2BlockTime: 1,
FinalizationPeriodSeconds: 60 * 60 * 24,
MaxSequencerDrift: 10,
......
......@@ -589,9 +589,10 @@ func TestSystemMockP2P(t *testing.T) {
}
cfg := DefaultSystemConfig(t)
// slow down L1 blocks so we can see the L2 blocks arrive well before the L1 blocks do.
// Keep the seq window small so the L2 chain is started quick
cfg.DeployConfig.L1BlockTime = 10
// Disable batcher, so we don't sync from L1
cfg.DisableBatcher = true
// disable at the start, so we don't miss any gossiped blocks.
cfg.Nodes["sequencer"].Driver.SequencerStopped = true
// connect the nodes
cfg.P2PTopology = map[string][]string{
......@@ -613,6 +614,11 @@ func TestSystemMockP2P(t *testing.T) {
require.Nil(t, err, "Error starting up system")
defer sys.Close()
// Enable the sequencer now that everyone is ready to receive payloads.
rollupRPCClient, err := rpc.DialContext(context.Background(), sys.RollupNodes["sequencer"].HTTPEndpoint())
require.Nil(t, err)
require.NoError(t, rollupRPCClient.Call(nil, "admin_startSequencer", sys.L2GenesisCfg.ToBlock().Hash()))
l2Seq := sys.Clients["sequencer"]
l2Verif := sys.Clients["verifier"]
......@@ -634,11 +640,11 @@ func TestSystemMockP2P(t *testing.T) {
require.Nil(t, err, "Sending L2 tx to sequencer")
// Wait for tx to be mined on the L2 sequencer chain
receiptSeq, err := waitForTransaction(tx.Hash(), l2Seq, 6*time.Duration(sys.RollupConfig.BlockTime)*time.Second)
receiptSeq, err := waitForTransaction(tx.Hash(), l2Seq, 10*time.Duration(sys.RollupConfig.BlockTime)*time.Second)
require.Nil(t, err, "Waiting for L2 tx on sequencer")
// Wait until the block it was first included in shows up in the safe chain on the verifier
receiptVerif, err := waitForTransaction(tx.Hash(), l2Verif, 6*time.Duration(sys.RollupConfig.BlockTime)*time.Second)
receiptVerif, err := waitForTransaction(tx.Hash(), l2Verif, 10*time.Duration(sys.RollupConfig.BlockTime)*time.Second)
require.Nil(t, err, "Waiting for L2 tx on verifier")
require.Equal(t, receiptSeq, receiptVerif)
......
......@@ -425,6 +425,8 @@ func TestMixedWithdrawalValidity(t *testing.T) {
for i := 0; i <= 8; i++ {
i := i // avoid loop var capture
t.Run(fmt.Sprintf("withdrawal test#%d", i+1), func(t *testing.T) {
parallel(t)
// Create our system configuration, funding all accounts we created for L1/L2, and start it
cfg := DefaultSystemConfig(t)
cfg.DeployConfig.FinalizationPeriodSeconds = 6
......
......@@ -124,6 +124,7 @@ func (cb *ChannelBank) Read() (data []byte, err error) {
if !ch.IsReady() {
return nil, io.EOF
}
cb.log.Info("Reading channel", "channel", first, "frames", len(ch.inputs))
delete(cb.channels, first)
cb.channelQueue = cb.channelQueue[1:]
......
......@@ -206,6 +206,9 @@ func BlockToBatch(block *types.Block) (*BatchData, L1BlockInfo, error) {
}
opaqueTxs = append(opaqueTxs, otx)
}
if len(block.Transactions()) == 0 {
return nil, L1BlockInfo{}, fmt.Errorf("block %v has no transactions", block.Hash())
}
l1InfoTx := block.Transactions()[0]
if l1InfoTx.Type() != types.DepositTxType {
return nil, L1BlockInfo{}, ErrNotDepositTx
......
......@@ -132,3 +132,9 @@ func TestForceCloseTxData(t *testing.T) {
}
}
}
func TestBlockToBatchValidity(t *testing.T) {
block := new(types.Block)
_, _, err := BlockToBatch(block)
require.ErrorContains(t, err, "has no transactions")
}
package main
import (
"context"
"errors"
"fmt"
"os"
......@@ -8,6 +9,7 @@ import (
"github.com/ethereum-optimism/optimism/op-node/chaincfg"
"github.com/ethereum-optimism/optimism/op-program/config"
"github.com/ethereum-optimism/optimism/op-program/flags"
"github.com/ethereum-optimism/optimism/op-program/l1"
"github.com/ethereum-optimism/optimism/op-program/l2"
"github.com/ethereum-optimism/optimism/op-program/version"
oplog "github.com/ethereum-optimism/optimism/op-service/log"
......@@ -92,8 +94,15 @@ func FaultProofProgram(logger log.Logger, cfg *config.Config) error {
return errors.New("offline mode not supported")
}
ctx := context.Background()
logger.Info("Connecting to L1 node", "l1", cfg.L1URL)
_, err := l1.NewFetchingL1(ctx, logger, cfg)
if err != nil {
return fmt.Errorf("connect l1 oracle: %w", err)
}
logger.Info("Connecting to L2 node", "l2", cfg.L2URL)
_, err := l2.NewFetchingL2Oracle(logger, cfg.L2URL)
_, err = l2.NewFetchingEngine(ctx, logger, cfg)
if err != nil {
return fmt.Errorf("connect l2 oracle: %w", err)
}
......
......@@ -6,11 +6,15 @@ import (
"testing"
"github.com/ethereum-optimism/optimism/op-node/chaincfg"
"github.com/ethereum-optimism/optimism/op-node/sources"
"github.com/ethereum-optimism/optimism/op-program/config"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require"
)
var l2HeadValue = "0x6303578b1fa9480389c51bbcef6fe045bb877da39740819e9eb5f36f94949bd0"
func TestLogLevel(t *testing.T) {
t.Run("RejectInvalid", func(t *testing.T) {
verifyArgsInvalid(t, "unknown level: foo", addRequiredArgs("--log.level=foo"))
......@@ -28,7 +32,7 @@ func TestLogLevel(t *testing.T) {
func TestDefaultCLIOptionsMatchDefaultConfig(t *testing.T) {
cfg := configForArgs(t, addRequiredArgs())
require.Equal(t, config.NewConfig(&chaincfg.Goerli), cfg)
require.Equal(t, config.NewConfig(&chaincfg.Goerli, "genesis.json", common.HexToHash(l2HeadValue)), cfg)
}
func TestNetwork(t *testing.T) {
......@@ -72,10 +76,80 @@ func TestL2(t *testing.T) {
require.Equal(t, expected, cfg.L2URL)
}
func TestL2Genesis(t *testing.T) {
t.Run("Required", func(t *testing.T) {
verifyArgsInvalid(t, "flag l2.genesis is required", addRequiredArgsExcept("--l2.genesis"))
})
t.Run("Valid", func(t *testing.T) {
cfg := configForArgs(t, replaceRequiredArg("--l2.genesis", "/tmp/genesis.json"))
require.Equal(t, "/tmp/genesis.json", cfg.L2GenesisPath)
})
}
func TestL2Head(t *testing.T) {
t.Run("Required", func(t *testing.T) {
verifyArgsInvalid(t, "flag l2.head is required", addRequiredArgsExcept("--l2.head"))
})
t.Run("Valid", func(t *testing.T) {
cfg := configForArgs(t, replaceRequiredArg("--l2.head", l2HeadValue))
require.Equal(t, common.HexToHash(l2HeadValue), cfg.L2Head)
})
t.Run("Invalid", func(t *testing.T) {
verifyArgsInvalid(t, config.ErrInvalidL2Head.Error(), replaceRequiredArg("--l2.head", "something"))
})
}
func TestL1(t *testing.T) {
expected := "https://example.com:8545"
cfg := configForArgs(t, addRequiredArgs("--l1", expected))
require.Equal(t, expected, cfg.L1URL)
}
func TestL1TrustRPC(t *testing.T) {
t.Run("DefaultFalse", func(t *testing.T) {
cfg := configForArgs(t, addRequiredArgs())
require.False(t, cfg.L1TrustRPC)
})
t.Run("Enabled", func(t *testing.T) {
cfg := configForArgs(t, addRequiredArgs("--l1.trustrpc"))
require.True(t, cfg.L1TrustRPC)
})
t.Run("EnabledWithArg", func(t *testing.T) {
cfg := configForArgs(t, addRequiredArgs("--l1.trustrpc=true"))
require.True(t, cfg.L1TrustRPC)
})
t.Run("Disabled", func(t *testing.T) {
cfg := configForArgs(t, addRequiredArgs("--l1.trustrpc=false"))
require.False(t, cfg.L1TrustRPC)
})
}
func TestL1RPCKind(t *testing.T) {
t.Run("DefaultBasic", func(t *testing.T) {
cfg := configForArgs(t, addRequiredArgs())
require.Equal(t, sources.RPCKindBasic, cfg.L1RPCKind)
})
for _, kind := range sources.RPCProviderKinds {
t.Run(kind.String(), func(t *testing.T) {
cfg := configForArgs(t, addRequiredArgs("--l1.rpckind", kind.String()))
require.Equal(t, kind, cfg.L1RPCKind)
})
}
t.Run("RequireLowercase", func(t *testing.T) {
verifyArgsInvalid(t, "rpc kind", addRequiredArgs("--l1.rpckind", "AlChemY"))
})
t.Run("UnknownKind", func(t *testing.T) {
verifyArgsInvalid(t, "\"foo\"", addRequiredArgs("--l1.rpckind", "foo"))
})
}
// Offline support will be added later, but for now it just bails out with an error
func TestOfflineModeNotSupported(t *testing.T) {
logger := log.New()
err := FaultProofProgram(logger, config.NewConfig(&chaincfg.Goerli))
err := FaultProofProgram(logger, config.NewConfig(&chaincfg.Goerli, "genesis.json", common.HexToHash(l2HeadValue)))
require.ErrorContains(t, err, "offline mode not supported")
}
......@@ -124,7 +198,9 @@ func replaceRequiredArg(name string, value string) []string {
// to create a valid Config
func requiredArgs() map[string]string {
return map[string]string{
"--network": "goerli",
"--network": "goerli",
"--l2.genesis": "genesis.json",
"--l2.head": l2HeadValue,
}
}
......
......@@ -5,17 +5,27 @@ import (
opnode "github.com/ethereum-optimism/optimism/op-node"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/sources"
"github.com/ethereum-optimism/optimism/op-program/flags"
"github.com/ethereum/go-ethereum/common"
"github.com/urfave/cli"
)
var (
ErrMissingRollupConfig = errors.New("missing rollup config")
ErrMissingL2Genesis = errors.New("missing l2 genesis")
ErrInvalidL2Head = errors.New("invalid l2 head")
ErrL1AndL2Inconsistent = errors.New("l1 and l2 options must be specified together or both omitted")
)
type Config struct {
Rollup *rollup.Config
L2URL string
Rollup *rollup.Config
L2URL string
L2GenesisPath string
L2Head common.Hash
L1URL string
L1TrustRPC bool
L1RPCKind sources.RPCProviderKind
}
func (c *Config) Check() error {
......@@ -25,17 +35,29 @@ func (c *Config) Check() error {
if err := c.Rollup.Check(); err != nil {
return err
}
if c.L2GenesisPath == "" {
return ErrMissingL2Genesis
}
if c.L2Head == (common.Hash{}) {
return ErrInvalidL2Head
}
if (c.L1URL != "") != (c.L2URL != "") {
return ErrL1AndL2Inconsistent
}
return nil
}
func (c *Config) FetchingEnabled() bool {
return c.L2URL != ""
return c.L1URL != "" && c.L2URL != ""
}
// NewConfig creates a Config with all optional values set to the CLI default value
func NewConfig(rollupCfg *rollup.Config) *Config {
func NewConfig(rollupCfg *rollup.Config, l2GenesisPath string, l2Head common.Hash) *Config {
return &Config{
Rollup: rollupCfg,
Rollup: rollupCfg,
L2GenesisPath: l2GenesisPath,
L2Head: l2Head,
L1RPCKind: sources.RPCKindBasic,
}
}
......@@ -47,8 +69,17 @@ func NewConfigFromCLI(ctx *cli.Context) (*Config, error) {
if err != nil {
return nil, err
}
l2Head := common.HexToHash(ctx.GlobalString(flags.L2Head.Name))
if l2Head == (common.Hash{}) {
return nil, ErrInvalidL2Head
}
return &Config{
Rollup: rollupCfg,
L2URL: ctx.GlobalString(flags.L2NodeAddr.Name),
Rollup: rollupCfg,
L2URL: ctx.GlobalString(flags.L2NodeAddr.Name),
L2GenesisPath: ctx.GlobalString(flags.L2GenesisPath.Name),
L2Head: l2Head,
L1URL: ctx.GlobalString(flags.L1NodeAddr.Name),
L1TrustRPC: ctx.GlobalBool(flags.L1TrustRPC.Name),
L1RPCKind: sources.RPCProviderKind(ctx.GlobalString(flags.L1RPCProviderKind.Name)),
}, nil
}
......@@ -5,35 +5,106 @@ import (
"github.com/ethereum-optimism/optimism/op-node/chaincfg"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum/go-ethereum/common"
"github.com/stretchr/testify/require"
)
var validRollupConfig = &chaincfg.Goerli
var validL2GenesisPath = "genesis.json"
var validL2Head = common.HexToHash("0x6303578b1fa9480389c51bbcef6fe045bb877da39740819e9eb5f36f94949bd0")
func TestDefaultConfigIsValid(t *testing.T) {
err := NewConfig(&chaincfg.Goerli).Check()
err := NewConfig(validRollupConfig, validL2GenesisPath, validL2Head).Check()
require.NoError(t, err)
}
func TestRollupConfig(t *testing.T) {
t.Run("Required", func(t *testing.T) {
err := NewConfig(nil).Check()
err := NewConfig(nil, validL2GenesisPath, validL2Head).Check()
require.ErrorIs(t, err, ErrMissingRollupConfig)
})
t.Run("Valid", func(t *testing.T) {
err := NewConfig(&rollup.Config{}).Check()
t.Run("Invalid", func(t *testing.T) {
err := NewConfig(&rollup.Config{}, validL2GenesisPath, validL2Head).Check()
require.ErrorIs(t, err, rollup.ErrBlockTimeZero)
})
}
func TestL2Genesis(t *testing.T) {
t.Run("Required", func(t *testing.T) {
err := NewConfig(validRollupConfig, "", validL2Head).Check()
require.ErrorIs(t, err, ErrMissingL2Genesis)
})
t.Run("Valid", func(t *testing.T) {
err := NewConfig(validRollupConfig, validL2GenesisPath, validL2Head).Check()
require.NoError(t, err)
})
}
func TestL2Head(t *testing.T) {
t.Run("Required", func(t *testing.T) {
err := NewConfig(validRollupConfig, validL2GenesisPath, common.Hash{}).Check()
require.ErrorIs(t, err, ErrInvalidL2Head)
})
t.Run("Valid", func(t *testing.T) {
err := NewConfig(validRollupConfig, validL2GenesisPath, validL2Head).Check()
require.NoError(t, err)
})
}
func TestFetchingArgConsistency(t *testing.T) {
t.Run("RequireL2WhenL1Set", func(t *testing.T) {
cfg := NewConfig(&chaincfg.Beta1, validL2GenesisPath, validL2Head)
cfg.L1URL = "https://example.com:1234"
require.ErrorIs(t, cfg.Check(), ErrL1AndL2Inconsistent)
})
t.Run("RequireL1WhenL2Set", func(t *testing.T) {
cfg := NewConfig(&chaincfg.Beta1, validL2GenesisPath, validL2Head)
cfg.L2URL = "https://example.com:1234"
require.ErrorIs(t, cfg.Check(), ErrL1AndL2Inconsistent)
})
t.Run("AllowNeitherSet", func(t *testing.T) {
cfg := NewConfig(&chaincfg.Beta1, validL2GenesisPath, validL2Head)
require.NoError(t, cfg.Check())
})
t.Run("AllowBothSet", func(t *testing.T) {
cfg := NewConfig(&chaincfg.Beta1, validL2GenesisPath, validL2Head)
cfg.L1URL = "https://example.com:1234"
cfg.L2URL = "https://example.com:4678"
require.NoError(t, cfg.Check())
})
}
func TestFetchingEnabled(t *testing.T) {
t.Run("FetchingNotEnabledWhenNoFetcherUrlsSpecified", func(t *testing.T) {
cfg := NewConfig(&chaincfg.Beta1)
cfg := NewConfig(&chaincfg.Beta1, validL2GenesisPath, validL2Head)
require.False(t, cfg.FetchingEnabled(), "Should not enable fetching when node URL not supplied")
})
t.Run("FetchingEnabledWhenFetcherUrlsSpecified", func(t *testing.T) {
cfg := NewConfig(&chaincfg.Beta1)
cfg := NewConfig(&chaincfg.Beta1, validL2GenesisPath, validL2Head)
cfg.L2URL = "https://example.com:1234"
require.False(t, cfg.FetchingEnabled(), "Should not enable fetching when node URL not supplied")
})
t.Run("FetchingNotEnabledWhenNoL1UrlSpecified", func(t *testing.T) {
cfg := NewConfig(&chaincfg.Beta1, validL2GenesisPath, validL2Head)
cfg.L2URL = "https://example.com:1234"
require.False(t, cfg.FetchingEnabled(), "Should not enable L1 fetching when L1 node URL not supplied")
})
t.Run("FetchingNotEnabledWhenNoL2UrlSpecified", func(t *testing.T) {
cfg := NewConfig(&chaincfg.Beta1, validL2GenesisPath, validL2Head)
cfg.L1URL = "https://example.com:1234"
require.False(t, cfg.FetchingEnabled(), "Should not enable L2 fetching when L2 node URL not supplied")
})
t.Run("FetchingEnabledWhenBothFetcherUrlsSpecified", func(t *testing.T) {
cfg := NewConfig(&chaincfg.Beta1, validL2GenesisPath, validL2Head)
cfg.L1URL = "https://example.com:1234"
cfg.L2URL = "https://example.com:5678"
require.True(t, cfg.FetchingEnabled(), "Should enable fetching when node URL supplied")
})
}
......@@ -4,10 +4,13 @@ import (
"fmt"
"strings"
"github.com/urfave/cli"
"github.com/ethereum-optimism/optimism/op-node/chaincfg"
nodeflags "github.com/ethereum-optimism/optimism/op-node/flags"
"github.com/ethereum-optimism/optimism/op-node/sources"
service "github.com/ethereum-optimism/optimism/op-service"
oplog "github.com/ethereum-optimism/optimism/op-service/log"
"github.com/urfave/cli"
)
const envVarPrefix = "OP_PROGRAM"
......@@ -28,6 +31,36 @@ var (
Usage: "Address of L2 JSON-RPC endpoint to use (eth and debug namespace required)",
EnvVar: service.PrefixEnvVar(envVarPrefix, "L2_RPC"),
}
L2GenesisPath = cli.StringFlag{
Name: "l2.genesis",
Usage: "Path to the op-geth genesis file",
EnvVar: service.PrefixEnvVar(envVarPrefix, "L2_GENESIS"),
}
L2Head = cli.StringFlag{
Name: "l2.head",
Usage: "Hash of the agreed L2 block to start derivation from",
EnvVar: service.PrefixEnvVar(envVarPrefix, "L2_HEAD"),
}
L1NodeAddr = cli.StringFlag{
Name: "l1",
Usage: "Address of L1 JSON-RPC endpoint to use (eth namespace required)",
EnvVar: service.PrefixEnvVar(envVarPrefix, "L1_RPC"),
}
L1TrustRPC = cli.BoolFlag{
Name: "l1.trustrpc",
Usage: "Trust the L1 RPC, sync faster at risk of malicious/buggy RPC providing bad or inconsistent L1 data",
EnvVar: service.PrefixEnvVar(envVarPrefix, "L1_TRUST_RPC"),
}
L1RPCProviderKind = cli.GenericFlag{
Name: "l1.rpckind",
Usage: "The kind of RPC provider, used to inform optimal transactions receipts fetching, and thus reduce costs. Valid options: " +
nodeflags.EnumString[sources.RPCProviderKind](sources.RPCProviderKinds),
EnvVar: service.PrefixEnvVar(envVarPrefix, "L1_RPC_KIND"),
Value: func() *sources.RPCProviderKind {
out := sources.RPCKindBasic
return &out
}(),
}
)
// Flags contains the list of configuration options available to the binary.
......@@ -37,6 +70,11 @@ var programFlags = []cli.Flag{
RollupConfig,
Network,
L2NodeAddr,
L2GenesisPath,
L2Head,
L1NodeAddr,
L1TrustRPC,
L1RPCProviderKind,
}
func init() {
......@@ -53,5 +91,11 @@ func CheckRequired(ctx *cli.Context) error {
if rollupConfig != "" && network != "" {
return fmt.Errorf("cannot specify both %s and %s", RollupConfig.Name, Network.Name)
}
if ctx.GlobalString(L2GenesisPath.Name) == "" {
return fmt.Errorf("flag %s is required", L2GenesisPath.Name)
}
if ctx.GlobalString(L2Head.Name) == "" {
return fmt.Errorf("flag %s is required", L2Head.Name)
}
return nil
}
......@@ -4,6 +4,8 @@ import (
"reflect"
"strings"
"testing"
"github.com/urfave/cli"
)
// TestUniqueFlags asserts that all flag names are unique, to avoid accidental conflicts between the many flags.
......@@ -19,15 +21,25 @@ func TestUniqueFlags(t *testing.T) {
}
}
// TestUniqueEnvVars asserts that all flag env vars are unique, to avoid accidental conflicts between the many flags.
func TestUniqueEnvVars(t *testing.T) {
seenCLI := make(map[string]struct{})
for _, flag := range Flags {
envVar := envVarForFlag(flag)
if _, ok := seenCLI[envVar]; envVar != "" && ok {
t.Errorf("duplicate flag env var %s", envVar)
continue
}
seenCLI[envVar] = struct{}{}
}
}
func TestCorrectEnvVarPrefix(t *testing.T) {
for _, flag := range Flags {
values := reflect.ValueOf(flag)
envVarValue := values.FieldByName("EnvVar")
if envVarValue == (reflect.Value{}) {
envVar := envVarForFlag(flag)
if envVar == "" {
t.Errorf("Failed to find EnvVar for flag %v", flag.GetName())
continue
}
envVar := envVarValue.String()
if envVar[:len("OP_PROGRAM_")] != "OP_PROGRAM_" {
t.Errorf("Flag %v env var (%v) does not start with OP_PROGRAM_", flag.GetName(), envVar)
}
......@@ -36,3 +48,12 @@ func TestCorrectEnvVarPrefix(t *testing.T) {
}
}
}
func envVarForFlag(flag cli.Flag) string {
values := reflect.ValueOf(flag)
envVarValue := values.FieldByName("EnvVar")
if envVarValue == (reflect.Value{}) {
return ""
}
return envVarValue.String()
}
package l1
import (
"context"
"github.com/ethereum-optimism/optimism/op-node/client"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-node/sources"
"github.com/ethereum-optimism/optimism/op-program/config"
"github.com/ethereum/go-ethereum/log"
)
func NewFetchingL1(ctx context.Context, logger log.Logger, cfg *config.Config) (derive.L1Fetcher, error) {
rpc, err := client.NewRPC(ctx, logger, cfg.L1URL)
if err != nil {
return nil, err
}
return sources.NewL1Client(rpc, logger, nil, sources.L1ClientDefaultConfig(cfg.Rollup, cfg.L1TrustRPC, cfg.L1RPCKind))
}
package l2
import (
"bytes"
"errors"
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/ethdb/memorydb"
)
var codePrefixedKeyLength = common.HashLength + len(rawdb.CodePrefix)
var (
ErrInvalidKeyLength = errors.New("pre-images must be identified by 32-byte hash keys")
)
type OracleKeyValueStore struct {
db ethdb.KeyValueStore
oracle StateOracle
}
func NewOracleBackedDB(oracle StateOracle) *OracleKeyValueStore {
return &OracleKeyValueStore{
db: memorydb.New(),
oracle: oracle,
}
}
func (o *OracleKeyValueStore) Get(key []byte) ([]byte, error) {
has, err := o.db.Has(key)
if err != nil {
return nil, fmt.Errorf("checking in-memory db: %w", err)
}
if has {
return o.db.Get(key)
}
if len(key) == codePrefixedKeyLength && bytes.HasPrefix(key, rawdb.CodePrefix) {
key = key[len(rawdb.CodePrefix):]
return o.oracle.CodeByHash(*(*[common.HashLength]byte)(key))
}
if len(key) != common.HashLength {
return nil, ErrInvalidKeyLength
}
return o.oracle.NodeByHash(*(*[common.HashLength]byte)(key))
}
func (o *OracleKeyValueStore) NewBatch() ethdb.Batch {
return o.db.NewBatch()
}
func (o *OracleKeyValueStore) NewBatchWithSize(size int) ethdb.Batch {
return o.db.NewBatchWithSize(size)
}
func (o *OracleKeyValueStore) Put(key []byte, value []byte) error {
return o.db.Put(key, value)
}
func (o *OracleKeyValueStore) Close() error {
return nil
}
// Remaining methods are unused when accessing the state for block processing so leaving unimplemented.
func (o *OracleKeyValueStore) Has(key []byte) (bool, error) {
panic("not supported")
}
func (o *OracleKeyValueStore) Delete(key []byte) error {
panic("not supported")
}
func (o *OracleKeyValueStore) Stat(property string) (string, error) {
panic("not supported")
}
func (o *OracleKeyValueStore) NewIterator(prefix []byte, start []byte) ethdb.Iterator {
panic("not supported")
}
func (o *OracleKeyValueStore) Compact(start []byte, limit []byte) error {
panic("not supported")
}
func (o *OracleKeyValueStore) NewSnapshot() (ethdb.Snapshot, error) {
panic("not supported")
}
package l2
import (
"fmt"
"math/big"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/ethdb/memorydb"
"github.com/ethereum/go-ethereum/params"
"github.com/stretchr/testify/require"
)
var (
userAccount = common.HexToAddress("0x1111")
codeAccount = common.HexToAddress("0x2222")
unknownAccount = common.HexToAddress("0x3333")
)
// Should implement the KeyValueStore API
var _ ethdb.KeyValueStore = (*OracleKeyValueStore)(nil)
func TestGet(t *testing.T) {
t.Run("UnknownKey", func(t *testing.T) {
oracle := newStubStateOracle()
db := NewOracleBackedDB(oracle)
val, err := db.Get(common.Hash{}.Bytes())
require.Error(t, err)
require.Nil(t, val)
})
t.Run("IncorrectLengthKey", func(t *testing.T) {
oracle := newStubStateOracle()
db := NewOracleBackedDB(oracle)
val, err := db.Get([]byte{1, 2, 3})
require.ErrorIs(t, err, ErrInvalidKeyLength)
require.Nil(t, val)
})
t.Run("KeyWithCodePrefix", func(t *testing.T) {
oracle := newStubStateOracle()
db := NewOracleBackedDB(oracle)
key := common.HexToHash("0x12345678")
prefixedKey := append(rawdb.CodePrefix, key.Bytes()...)
expected := []byte{1, 2, 3}
oracle.code[key] = expected
val, err := db.Get(prefixedKey)
require.NoError(t, err)
require.Equal(t, expected, val)
})
t.Run("NormalKeyThatHappensToStartWithCodePrefix", func(t *testing.T) {
oracle := newStubStateOracle()
db := NewOracleBackedDB(oracle)
key := make([]byte, common.HashLength)
copy(rawdb.CodePrefix, key)
println(key[0])
expected := []byte{1, 2, 3}
oracle.data[common.BytesToHash(key)] = expected
val, err := db.Get(key)
require.NoError(t, err)
require.Equal(t, expected, val)
})
t.Run("KnownKey", func(t *testing.T) {
key := common.HexToHash("0xAA4488")
expected := []byte{2, 6, 3, 8}
oracle := newStubStateOracle()
oracle.data[key] = expected
db := NewOracleBackedDB(oracle)
val, err := db.Get(key.Bytes())
require.NoError(t, err)
require.Equal(t, expected, val)
})
}
func TestPut(t *testing.T) {
t.Run("NewKey", func(t *testing.T) {
oracle := newStubStateOracle()
db := NewOracleBackedDB(oracle)
key := common.HexToHash("0xAA4488")
value := []byte{2, 6, 3, 8}
err := db.Put(key.Bytes(), value)
require.NoError(t, err)
actual, err := db.Get(key.Bytes())
require.NoError(t, err)
require.Equal(t, value, actual)
})
t.Run("ReplaceKey", func(t *testing.T) {
oracle := newStubStateOracle()
db := NewOracleBackedDB(oracle)
key := common.HexToHash("0xAA4488")
value1 := []byte{2, 6, 3, 8}
value2 := []byte{1, 2, 3}
err := db.Put(key.Bytes(), value1)
require.NoError(t, err)
err = db.Put(key.Bytes(), value2)
require.NoError(t, err)
actual, err := db.Get(key.Bytes())
require.NoError(t, err)
require.Equal(t, value2, actual)
})
}
func TestSupportsStateDBOperations(t *testing.T) {
l2Genesis := createGenesis()
realDb := rawdb.NewDatabase(memorydb.New())
genesisBlock := l2Genesis.MustCommit(realDb)
loader := &kvStateOracle{
source: realDb,
}
assertStateDataAvailable(t, NewOracleBackedDB(loader), l2Genesis, genesisBlock)
}
func TestUpdateState(t *testing.T) {
l2Genesis := createGenesis()
oracle := newStubStateOracle()
db := rawdb.NewDatabase(NewOracleBackedDB(oracle))
genesisBlock := l2Genesis.MustCommit(db)
assertStateDataAvailable(t, db, l2Genesis, genesisBlock)
statedb, err := state.New(genesisBlock.Root(), state.NewDatabase(rawdb.NewDatabase(db)), nil)
require.NoError(t, err)
statedb.SetBalance(userAccount, big.NewInt(50))
require.Equal(t, big.NewInt(50), statedb.GetBalance(userAccount))
statedb.SetNonce(userAccount, uint64(5))
require.Equal(t, uint64(5), statedb.GetNonce(userAccount))
statedb.SetBalance(unknownAccount, big.NewInt(60))
require.Equal(t, big.NewInt(60), statedb.GetBalance(unknownAccount))
statedb.SetCode(codeAccount, []byte{1})
require.Equal(t, []byte{1}, statedb.GetCode(codeAccount))
// Changes should be available under the new state root after committing
newRoot, err := statedb.Commit(false)
require.NoError(t, err)
err = statedb.Database().TrieDB().Commit(newRoot, true)
require.NoError(t, err)
statedb, err = state.New(newRoot, state.NewDatabase(rawdb.NewDatabase(db)), nil)
require.NoError(t, err)
require.Equal(t, big.NewInt(50), statedb.GetBalance(userAccount))
require.Equal(t, uint64(5), statedb.GetNonce(userAccount))
require.Equal(t, big.NewInt(60), statedb.GetBalance(unknownAccount))
require.Equal(t, []byte{1}, statedb.GetCode(codeAccount))
}
func createGenesis() *core.Genesis {
l2Genesis := &core.Genesis{
Config: &params.ChainConfig{},
Difficulty: common.Big0,
ParentHash: common.Hash{},
BaseFee: big.NewInt(7),
Alloc: map[common.Address]core.GenesisAccount{
userAccount: {
Balance: big.NewInt(1_000_000_000_000_000_000),
Nonce: 10,
},
codeAccount: {
Balance: big.NewInt(100),
Code: []byte{5, 7, 8, 3, 4},
Storage: map[common.Hash]common.Hash{
common.HexToHash("0x01"): common.HexToHash("0x11"),
common.HexToHash("0x02"): common.HexToHash("0x12"),
common.HexToHash("0x03"): common.HexToHash("0x13"),
},
},
},
}
return l2Genesis
}
func assertStateDataAvailable(t *testing.T, db ethdb.KeyValueStore, l2Genesis *core.Genesis, genesisBlock *types.Block) {
statedb, err := state.New(genesisBlock.Root(), state.NewDatabase(rawdb.NewDatabase(db)), nil)
require.NoError(t, err)
for address, account := range l2Genesis.Alloc {
require.Equal(t, account.Balance, statedb.GetBalance(address))
require.Equal(t, account.Nonce, statedb.GetNonce(address))
require.Equal(t, common.BytesToHash(crypto.Keccak256(account.Code)), statedb.GetCodeHash(address))
require.Equal(t, account.Code, statedb.GetCode(address))
for key, value := range account.Storage {
require.Equal(t, value, statedb.GetState(address, key))
}
}
require.Equal(t, common.Hash{}, statedb.GetState(codeAccount, common.HexToHash("0x99")), "retrieve unset storage key")
require.Equal(t, common.Big0, statedb.GetBalance(unknownAccount), "unset account balance")
require.Equal(t, uint64(0), statedb.GetNonce(unknownAccount), "unset account balance")
require.Nil(t, statedb.GetCode(unknownAccount), "unset account code")
require.Equal(t, common.Hash{}, statedb.GetCodeHash(unknownAccount), "unset account code hash")
}
func newStubStateOracle() *stubStateOracle {
return &stubStateOracle{
data: make(map[common.Hash][]byte),
code: make(map[common.Hash][]byte),
}
}
type stubStateOracle struct {
data map[common.Hash][]byte
code map[common.Hash][]byte
}
func (o *stubStateOracle) NodeByHash(nodeHash common.Hash) ([]byte, error) {
data, ok := o.data[nodeHash]
if !ok {
return nil, fmt.Errorf("no value for node %v", nodeHash)
}
return data, nil
}
func (o *stubStateOracle) CodeByHash(hash common.Hash) ([]byte, error) {
data, ok := o.code[hash]
if !ok {
return nil, fmt.Errorf("no value for code %v", hash)
}
return data, nil
}
// kvStateOracle loads data from a source ethdb.KeyValueStore
type kvStateOracle struct {
source ethdb.KeyValueStore
}
func (o *kvStateOracle) NodeByHash(nodeHash common.Hash) ([]byte, error) {
return o.source.Get(nodeHash.Bytes())
}
func (o *kvStateOracle) CodeByHash(hash common.Hash) ([]byte, error) {
return rawdb.ReadCode(o.source, hash), nil
}
package l2
import (
"context"
"errors"
"fmt"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-program/l2/engineapi"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
)
var (
ErrNotFound = errors.New("not found")
)
type OracleEngine struct {
api *engineapi.L2EngineAPI
backend engineapi.EngineBackend
rollupCfg *rollup.Config
}
func NewOracleEngine(rollupCfg *rollup.Config, logger log.Logger, backend engineapi.EngineBackend) *OracleEngine {
engineAPI := engineapi.NewL2EngineAPI(logger, backend)
return &OracleEngine{
api: engineAPI,
backend: backend,
rollupCfg: rollupCfg,
}
}
func (o OracleEngine) GetPayload(ctx context.Context, payloadId eth.PayloadID) (*eth.ExecutionPayload, error) {
return o.api.GetPayloadV1(ctx, payloadId)
}
func (o OracleEngine) ForkchoiceUpdate(ctx context.Context, state *eth.ForkchoiceState, attr *eth.PayloadAttributes) (*eth.ForkchoiceUpdatedResult, error) {
return o.api.ForkchoiceUpdatedV1(ctx, state, attr)
}
func (o OracleEngine) NewPayload(ctx context.Context, payload *eth.ExecutionPayload) (*eth.PayloadStatusV1, error) {
return o.api.NewPayloadV1(ctx, payload)
}
func (o OracleEngine) PayloadByHash(ctx context.Context, hash common.Hash) (*eth.ExecutionPayload, error) {
block := o.backend.GetBlockByHash(hash)
if block == nil {
return nil, ErrNotFound
}
return eth.BlockAsPayload(block)
}
func (o OracleEngine) PayloadByNumber(ctx context.Context, n uint64) (*eth.ExecutionPayload, error) {
hash := o.backend.GetCanonicalHash(n)
if hash == (common.Hash{}) {
return nil, ErrNotFound
}
return o.PayloadByHash(ctx, hash)
}
func (o OracleEngine) L2BlockRefByLabel(ctx context.Context, label eth.BlockLabel) (eth.L2BlockRef, error) {
var header *types.Header
switch label {
case eth.Unsafe:
header = o.backend.CurrentHeader()
case eth.Safe:
header = o.backend.CurrentSafeBlock()
case eth.Finalized:
header = o.backend.CurrentFinalBlock()
default:
return eth.L2BlockRef{}, fmt.Errorf("unknown label: %v", label)
}
if header == nil {
return eth.L2BlockRef{}, ErrNotFound
}
block := o.backend.GetBlockByHash(header.Hash())
if block == nil {
return eth.L2BlockRef{}, ErrNotFound
}
return derive.L2BlockToBlockRef(block, &o.rollupCfg.Genesis)
}
func (o OracleEngine) L2BlockRefByHash(ctx context.Context, l2Hash common.Hash) (eth.L2BlockRef, error) {
block := o.backend.GetBlockByHash(l2Hash)
if block == nil {
return eth.L2BlockRef{}, ErrNotFound
}
return derive.L2BlockToBlockRef(block, &o.rollupCfg.Genesis)
}
func (o OracleEngine) SystemConfigByL2Hash(ctx context.Context, hash common.Hash) (eth.SystemConfig, error) {
payload, err := o.PayloadByHash(ctx, hash)
if err != nil {
return eth.SystemConfig{}, err
}
return derive.PayloadToSystemConfig(payload, o.rollupCfg)
}
package l2
import (
"fmt"
"math/big"
"github.com/ethereum-optimism/optimism/op-program/l2/engineapi"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/consensus/beacon"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
)
type OracleBackedL2Chain struct {
log log.Logger
oracle Oracle
chainCfg *params.ChainConfig
engine consensus.Engine
head *types.Header
safe *types.Header
finalized *types.Header
vmCfg vm.Config
// Inserted blocks
blocks map[common.Hash]*types.Block
db ethdb.KeyValueStore
}
var _ engineapi.EngineBackend = (*OracleBackedL2Chain)(nil)
func NewOracleBackedL2Chain(logger log.Logger, oracle Oracle, chainCfg *params.ChainConfig, l2Head common.Hash) (*OracleBackedL2Chain, error) {
head, err := oracle.BlockByHash(l2Head)
if err != nil {
return nil, fmt.Errorf("loading l2 head: %w", err)
}
return &OracleBackedL2Chain{
log: logger,
oracle: oracle,
chainCfg: chainCfg,
engine: beacon.New(nil),
// Treat the agreed starting head as finalized - nothing before it can be disputed
head: head.Header(),
safe: head.Header(),
finalized: head.Header(),
blocks: make(map[common.Hash]*types.Block),
db: NewOracleBackedDB(oracle),
}, nil
}
func (o *OracleBackedL2Chain) CurrentHeader() *types.Header {
return o.head
}
func (o *OracleBackedL2Chain) GetHeaderByNumber(n uint64) *types.Header {
// Walk back from current head to the requested block number
h := o.head
if h.Number.Uint64() < n {
return nil
}
for h.Number.Uint64() > n {
h = o.GetHeaderByHash(h.ParentHash)
}
return h
}
func (o *OracleBackedL2Chain) GetTd(hash common.Hash, number uint64) *big.Int {
// Difficulty is always 0 post-merge and bedrock starts post-merge so total difficulty also always 0
return common.Big0
}
func (o *OracleBackedL2Chain) CurrentSafeBlock() *types.Header {
return o.safe
}
func (o *OracleBackedL2Chain) CurrentFinalBlock() *types.Header {
return o.finalized
}
func (o *OracleBackedL2Chain) GetHeaderByHash(hash common.Hash) *types.Header {
block := o.GetBlockByHash(hash)
if block == nil {
return nil
}
return block.Header()
}
func (o *OracleBackedL2Chain) GetBlockByHash(hash common.Hash) *types.Block {
// Check inserted blocks
block, ok := o.blocks[hash]
if ok {
return block
}
// Retrieve from the oracle
block, err := o.oracle.BlockByHash(hash)
if err != nil {
handleError(err)
}
if block == nil {
return nil
}
return block
}
func (o *OracleBackedL2Chain) GetBlock(hash common.Hash, number uint64) *types.Block {
block := o.GetBlockByHash(hash)
if block == nil {
return nil
}
if block.NumberU64() != number {
return nil
}
return block
}
func (o *OracleBackedL2Chain) GetHeader(hash common.Hash, u uint64) *types.Header {
block := o.GetBlock(hash, u)
if block == nil {
return nil
}
return block.Header()
}
func (o *OracleBackedL2Chain) HasBlockAndState(hash common.Hash, number uint64) bool {
block := o.GetBlock(hash, number)
return block != nil
}
func (o *OracleBackedL2Chain) GetCanonicalHash(n uint64) common.Hash {
header := o.GetHeaderByNumber(n)
if header == nil {
return common.Hash{}
}
return header.Hash()
}
func (o *OracleBackedL2Chain) GetVMConfig() *vm.Config {
return &o.vmCfg
}
func (o *OracleBackedL2Chain) Config() *params.ChainConfig {
return o.chainCfg
}
func (o *OracleBackedL2Chain) Engine() consensus.Engine {
return o.engine
}
func (o *OracleBackedL2Chain) StateAt(root common.Hash) (*state.StateDB, error) {
return state.New(root, state.NewDatabase(rawdb.NewDatabase(o.db)), nil)
}
func (o *OracleBackedL2Chain) InsertBlockWithoutSetHead(block *types.Block) error {
processor, err := engineapi.NewBlockProcessorFromHeader(o, block.Header())
if err != nil {
return err
}
for i, tx := range block.Transactions() {
err = processor.AddTx(tx)
if err != nil {
return fmt.Errorf("invalid transaction (%d): %w", i, err)
}
}
expected, err := processor.Assemble()
if err != nil {
return fmt.Errorf("invalid block: %w", err)
}
if expected.Hash() != block.Hash() {
return fmt.Errorf("block root mismatch, expected: %v, actual: %v", expected.Hash(), block.Hash())
}
err = processor.Commit()
if err != nil {
return fmt.Errorf("commit block: %w", err)
}
o.blocks[block.Hash()] = block
return nil
}
func (o *OracleBackedL2Chain) SetCanonical(head *types.Block) (common.Hash, error) {
o.head = head.Header()
return head.Hash(), nil
}
func (o *OracleBackedL2Chain) SetFinalized(header *types.Header) {
o.finalized = header
}
func (o *OracleBackedL2Chain) SetSafe(header *types.Header) {
o.safe = header
}
func handleError(err error) {
panic(err)
}
package l2
import (
"math/big"
"testing"
"github.com/ethereum-optimism/optimism/op-chain-ops/genesis"
"github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum-optimism/optimism/op-program/l2/engineapi"
"github.com/ethereum-optimism/optimism/op-program/l2/engineapi/test"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus/beacon"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/stretchr/testify/require"
)
var fundedKey, _ = crypto.GenerateKey()
var fundedAddress = crypto.PubkeyToAddress(fundedKey.PublicKey)
var targetAddress = common.HexToAddress("0x001122334455")
func TestInitialState(t *testing.T) {
blocks, chain := setupOracleBackedChain(t, 5)
head := blocks[5]
require.Equal(t, head.Header(), chain.CurrentHeader())
require.Equal(t, head.Header(), chain.CurrentSafeBlock())
require.Equal(t, head.Header(), chain.CurrentFinalBlock())
}
func TestGetBlocks(t *testing.T) {
blocks, chain := setupOracleBackedChain(t, 5)
for i, block := range blocks {
blockNumber := uint64(i)
assertBlockDataAvailable(t, chain, block, blockNumber)
require.Equal(t, block.Hash(), chain.GetCanonicalHash(blockNumber), "get canonical hash for block %v", blockNumber)
}
}
func TestUnknownBlock(t *testing.T) {
_, chain := setupOracleBackedChain(t, 1)
hash := common.HexToHash("0x556677881122")
blockNumber := uint64(1)
require.Nil(t, chain.GetBlockByHash(hash))
require.Nil(t, chain.GetHeaderByHash(hash))
require.Nil(t, chain.GetBlock(hash, blockNumber))
require.Nil(t, chain.GetHeader(hash, blockNumber))
require.False(t, chain.HasBlockAndState(hash, blockNumber))
}
func TestCanonicalHashNotFoundPastChainHead(t *testing.T) {
blocks, chain := setupOracleBackedChainWithLowerHead(t, 5, 3)
for i := 0; i <= 3; i++ {
require.Equal(t, blocks[i].Hash(), chain.GetCanonicalHash(uint64(i)))
require.Equal(t, blocks[i].Header(), chain.GetHeaderByNumber(uint64(i)))
}
for i := 4; i <= 5; i++ {
require.Equal(t, common.Hash{}, chain.GetCanonicalHash(uint64(i)))
require.Nil(t, chain.GetHeaderByNumber(uint64(i)))
}
}
func TestAppendToChain(t *testing.T) {
blocks, chain := setupOracleBackedChainWithLowerHead(t, 4, 3)
newBlock := blocks[4]
require.Nil(t, chain.GetBlockByHash(newBlock.Hash()), "block unknown before being added")
require.NoError(t, chain.InsertBlockWithoutSetHead(newBlock))
require.Equal(t, blocks[3].Header(), chain.CurrentHeader(), "should not update chain head yet")
require.Equal(t, common.Hash{}, chain.GetCanonicalHash(uint64(4)), "not yet a canonical hash")
require.Nil(t, chain.GetHeaderByNumber(uint64(4)), "not yet a canonical header")
assertBlockDataAvailable(t, chain, newBlock, 4)
canonical, err := chain.SetCanonical(newBlock)
require.NoError(t, err)
require.Equal(t, newBlock.Hash(), canonical)
require.Equal(t, newBlock.Hash(), chain.GetCanonicalHash(uint64(4)), "get canonical hash for new head")
require.Equal(t, newBlock.Header(), chain.GetHeaderByNumber(uint64(4)), "get canonical header for new head")
}
func TestSetFinalized(t *testing.T) {
blocks, chain := setupOracleBackedChainWithLowerHead(t, 5, 0)
for _, block := range blocks[1:] {
require.NoError(t, chain.InsertBlockWithoutSetHead(block))
}
chain.SetFinalized(blocks[2].Header())
require.Equal(t, blocks[2].Header(), chain.CurrentFinalBlock())
}
func TestSetSafe(t *testing.T) {
blocks, chain := setupOracleBackedChainWithLowerHead(t, 5, 0)
for _, block := range blocks[1:] {
require.NoError(t, chain.InsertBlockWithoutSetHead(block))
}
chain.SetSafe(blocks[2].Header())
require.Equal(t, blocks[2].Header(), chain.CurrentSafeBlock())
}
func TestUpdateStateDatabaseWhenImportingBlock(t *testing.T) {
blocks, chain := setupOracleBackedChain(t, 3)
newBlock := createBlock(t, chain)
db, err := chain.StateAt(blocks[1].Root())
require.NoError(t, err)
balance := db.GetBalance(fundedAddress)
require.NotEqual(t, big.NewInt(0), balance, "should have balance at imported block")
require.NotEqual(t, blocks[1].Root(), newBlock.Root(), "block should have modified world state")
_, err = chain.StateAt(newBlock.Root())
require.Error(t, err, "state from non-imported block should not be available")
err = chain.InsertBlockWithoutSetHead(newBlock)
require.NoError(t, err)
db, err = chain.StateAt(newBlock.Root())
require.NoError(t, err, "state should be available after importing")
balance = db.GetBalance(fundedAddress)
require.NotEqual(t, big.NewInt(0), balance, "should have balance from imported block")
}
func TestRejectBlockWithStateRootMismatch(t *testing.T) {
_, chain := setupOracleBackedChain(t, 1)
newBlock := createBlock(t, chain)
// Create invalid block by keeping the modified state root but exclude the transaction
invalidBlock := types.NewBlockWithHeader(newBlock.Header())
err := chain.InsertBlockWithoutSetHead(invalidBlock)
require.ErrorContains(t, err, "block root mismatch")
}
func assertBlockDataAvailable(t *testing.T, chain *OracleBackedL2Chain, block *types.Block, blockNumber uint64) {
require.Equal(t, block, chain.GetBlockByHash(block.Hash()), "get block %v by hash", blockNumber)
require.Equal(t, block.Header(), chain.GetHeaderByHash(block.Hash()), "get header %v by hash", blockNumber)
require.Equal(t, block, chain.GetBlock(block.Hash(), blockNumber), "get block %v by hash and number", blockNumber)
require.Equal(t, block.Header(), chain.GetHeader(block.Hash(), blockNumber), "get header %v by hash and number", blockNumber)
require.True(t, chain.HasBlockAndState(block.Hash(), blockNumber), "has block and state for block %v", blockNumber)
}
func setupOracleBackedChain(t *testing.T, blockCount int) ([]*types.Block, *OracleBackedL2Chain) {
return setupOracleBackedChainWithLowerHead(t, blockCount, blockCount)
}
func setupOracleBackedChainWithLowerHead(t *testing.T, blockCount int, headBlockNumber int) ([]*types.Block, *OracleBackedL2Chain) {
logger := testlog.Logger(t, log.LvlDebug)
chainCfg, blocks, oracle := setupOracle(t, blockCount, headBlockNumber)
head := blocks[headBlockNumber].Hash()
chain, err := NewOracleBackedL2Chain(logger, oracle, chainCfg, head)
require.NoError(t, err)
return blocks, chain
}
func setupOracle(t *testing.T, blockCount int, headBlockNumber int) (*params.ChainConfig, []*types.Block, *stubBlockOracle) {
deployConfig := &genesis.DeployConfig{
L1ChainID: 900,
L2ChainID: 901,
L2BlockTime: 2,
FundDevAccounts: true,
L2GenesisBlockGasLimit: 30_000_000,
}
l1Genesis, err := genesis.NewL1Genesis(deployConfig)
require.NoError(t, err)
l2Genesis, err := genesis.NewL2Genesis(deployConfig, l1Genesis.ToBlock())
require.NoError(t, err)
l2Genesis.Alloc[fundedAddress] = core.GenesisAccount{
Balance: big.NewInt(1_000_000_000_000_000_000),
Nonce: 0,
}
chainCfg := l2Genesis.Config
consensus := beacon.New(nil)
db := rawdb.NewMemoryDatabase()
// Set minimal amount of stuff to avoid nil references later
genesisBlock := l2Genesis.MustCommit(db)
blocks, _ := core.GenerateChain(chainCfg, genesisBlock, consensus, db, blockCount, func(i int, gen *core.BlockGen) {})
blocks = append([]*types.Block{genesisBlock}, blocks...)
oracle := newStubBlockOracle(blocks[:headBlockNumber+1], db)
return chainCfg, blocks, oracle
}
func createBlock(t *testing.T, chain *OracleBackedL2Chain) *types.Block {
parent := chain.GetBlockByHash(chain.CurrentHeader().Hash())
parentDB, err := chain.StateAt(parent.Root())
require.NoError(t, err)
nonce := parentDB.GetNonce(fundedAddress)
config := chain.Config()
db := rawdb.NewDatabase(NewOracleBackedDB(chain.oracle))
blocks, _ := core.GenerateChain(config, parent, chain.Engine(), db, 1, func(i int, gen *core.BlockGen) {
rawTx := &types.DynamicFeeTx{
ChainID: config.ChainID,
Nonce: nonce,
To: &targetAddress,
GasTipCap: big.NewInt(0),
GasFeeCap: parent.BaseFee(),
Gas: 21_000,
Value: big.NewInt(1),
}
tx := types.MustSignNewTx(fundedKey, types.NewLondonSigner(config.ChainID), rawTx)
gen.AddTx(tx)
})
return blocks[0]
}
type stubBlockOracle struct {
blocks map[common.Hash]*types.Block
kvStateOracle
}
func newStubBlockOracle(chain []*types.Block, db ethdb.Database) *stubBlockOracle {
blocks := make(map[common.Hash]*types.Block, len(chain))
for _, block := range chain {
blocks[block.Hash()] = block
}
return &stubBlockOracle{
blocks: blocks,
kvStateOracle: kvStateOracle{source: db},
}
}
func (o stubBlockOracle) BlockByHash(blockHash common.Hash) (*types.Block, error) {
return o.blocks[blockHash], nil
}
func TestEngineAPITests(t *testing.T) {
test.RunEngineAPITests(t, func() engineapi.EngineBackend {
_, chain := setupOracleBackedChain(t, 0)
return chain
})
}
package l2
import (
"context"
"math/big"
"testing"
"github.com/ethereum-optimism/optimism/op-node/chaincfg"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/trie"
"github.com/stretchr/testify/require"
)
// Should implement derive.Engine
var _ derive.Engine = (*OracleEngine)(nil)
func TestPayloadByHash(t *testing.T) {
ctx := context.Background()
t.Run("KnownBlock", func(t *testing.T) {
engine, stub := createOracleEngine(t)
block := stub.head
payload, err := engine.PayloadByHash(ctx, block.Hash())
require.NoError(t, err)
expected, err := eth.BlockAsPayload(block)
require.NoError(t, err)
require.Equal(t, expected, payload)
})
t.Run("UnknownBlock", func(t *testing.T) {
engine, _ := createOracleEngine(t)
hash := common.HexToHash("0x878899")
payload, err := engine.PayloadByHash(ctx, hash)
require.ErrorIs(t, err, ErrNotFound)
require.Nil(t, payload)
})
}
func TestPayloadByNumber(t *testing.T) {
ctx := context.Background()
t.Run("KnownBlock", func(t *testing.T) {
engine, stub := createOracleEngine(t)
block := stub.head
payload, err := engine.PayloadByNumber(ctx, block.NumberU64())
require.NoError(t, err)
expected, err := eth.BlockAsPayload(block)
require.NoError(t, err)
require.Equal(t, expected, payload)
})
t.Run("NoCanonicalHash", func(t *testing.T) {
engine, _ := createOracleEngine(t)
payload, err := engine.PayloadByNumber(ctx, uint64(700))
require.ErrorIs(t, err, ErrNotFound)
require.Nil(t, payload)
})
t.Run("UnknownBlock", func(t *testing.T) {
engine, stub := createOracleEngine(t)
hash := common.HexToHash("0x878899")
number := uint64(700)
stub.canonical[number] = hash
payload, err := engine.PayloadByNumber(ctx, number)
require.ErrorIs(t, err, ErrNotFound)
require.Nil(t, payload)
})
}
func TestL2BlockRefByLabel(t *testing.T) {
ctx := context.Background()
engine, stub := createOracleEngine(t)
tests := []struct {
name eth.BlockLabel
block *types.Block
}{
{eth.Unsafe, stub.head},
{eth.Safe, stub.safe},
{eth.Finalized, stub.finalized},
}
for _, test := range tests {
t.Run(string(test.name), func(t *testing.T) {
expected, err := derive.L2BlockToBlockRef(test.block, &engine.rollupCfg.Genesis)
require.NoError(t, err)
blockRef, err := engine.L2BlockRefByLabel(ctx, test.name)
require.NoError(t, err)
require.Equal(t, expected, blockRef)
})
}
t.Run("UnknownLabel", func(t *testing.T) {
_, err := engine.L2BlockRefByLabel(ctx, "nope")
require.ErrorContains(t, err, "unknown label")
})
}
func TestL2BlockRefByHash(t *testing.T) {
ctx := context.Background()
engine, stub := createOracleEngine(t)
t.Run("KnownBlock", func(t *testing.T) {
expected, err := derive.L2BlockToBlockRef(stub.safe, &engine.rollupCfg.Genesis)
require.NoError(t, err)
ref, err := engine.L2BlockRefByHash(ctx, stub.safe.Hash())
require.NoError(t, err)
require.Equal(t, expected, ref)
})
t.Run("UnknownBlock", func(t *testing.T) {
ref, err := engine.L2BlockRefByHash(ctx, common.HexToHash("0x878899"))
require.ErrorIs(t, err, ErrNotFound)
require.Equal(t, eth.L2BlockRef{}, ref)
})
}
func TestSystemConfigByL2Hash(t *testing.T) {
ctx := context.Background()
engine, stub := createOracleEngine(t)
t.Run("KnownBlock", func(t *testing.T) {
payload, err := eth.BlockAsPayload(stub.safe)
require.NoError(t, err)
expected, err := derive.PayloadToSystemConfig(payload, engine.rollupCfg)
require.NoError(t, err)
cfg, err := engine.SystemConfigByL2Hash(ctx, stub.safe.Hash())
require.NoError(t, err)
require.Equal(t, expected, cfg)
})
t.Run("UnknownBlock", func(t *testing.T) {
ref, err := engine.SystemConfigByL2Hash(ctx, common.HexToHash("0x878899"))
require.ErrorIs(t, err, ErrNotFound)
require.Equal(t, eth.SystemConfig{}, ref)
})
}
func createOracleEngine(t *testing.T) (*OracleEngine, *stubEngineBackend) {
head := createL2Block(t, 4)
safe := createL2Block(t, 3)
finalized := createL2Block(t, 2)
backend := &stubEngineBackend{
head: head,
safe: safe,
finalized: finalized,
blocks: map[common.Hash]*types.Block{
head.Hash(): head,
safe.Hash(): safe,
finalized.Hash(): finalized,
},
canonical: map[uint64]common.Hash{
head.NumberU64(): head.Hash(),
safe.NumberU64(): safe.Hash(),
finalized.NumberU64(): finalized.Hash(),
},
}
engine := OracleEngine{
backend: backend,
rollupCfg: &chaincfg.Goerli,
}
return &engine, backend
}
func createL2Block(t *testing.T, number int) *types.Block {
tx, err := derive.L1InfoDeposit(uint64(1), eth.HeaderBlockInfo(&types.Header{
Number: big.NewInt(32),
BaseFee: big.NewInt(7),
}), eth.SystemConfig{}, true)
require.NoError(t, err)
header := &types.Header{
Number: big.NewInt(int64(number)),
BaseFee: big.NewInt(7),
}
return types.NewBlock(header, []*types.Transaction{types.NewTx(tx)}, nil, nil, trie.NewStackTrie(nil))
}
type stubEngineBackend struct {
head *types.Block
safe *types.Block
finalized *types.Block
blocks map[common.Hash]*types.Block
canonical map[uint64]common.Hash
}
func (s stubEngineBackend) CurrentHeader() *types.Header {
return s.head.Header()
}
func (s stubEngineBackend) CurrentSafeBlock() *types.Header {
return s.safe.Header()
}
func (s stubEngineBackend) CurrentFinalBlock() *types.Header {
return s.finalized.Header()
}
func (s stubEngineBackend) GetBlockByHash(hash common.Hash) *types.Block {
return s.blocks[hash]
}
func (s stubEngineBackend) GetCanonicalHash(n uint64) common.Hash {
return s.canonical[n]
}
func (s stubEngineBackend) GetBlock(hash common.Hash, number uint64) *types.Block {
panic("unsupported")
}
func (s stubEngineBackend) HasBlockAndState(hash common.Hash, number uint64) bool {
panic("unsupported")
}
func (s stubEngineBackend) GetVMConfig() *vm.Config {
panic("unsupported")
}
func (s stubEngineBackend) Config() *params.ChainConfig {
panic("unsupported")
}
func (s stubEngineBackend) Engine() consensus.Engine {
panic("unsupported")
}
func (s stubEngineBackend) StateAt(root common.Hash) (*state.StateDB, error) {
panic("unsupported")
}
func (s stubEngineBackend) InsertBlockWithoutSetHead(block *types.Block) error {
panic("unsupported")
}
func (s stubEngineBackend) SetCanonical(head *types.Block) (common.Hash, error) {
panic("unsupported")
}
func (s stubEngineBackend) SetFinalized(header *types.Header) {
panic("unsupported")
}
func (s stubEngineBackend) SetSafe(header *types.Header) {
panic("unsupported")
}
func (s stubEngineBackend) GetHeader(hash common.Hash, number uint64) *types.Header {
panic("unsupported")
}
func (s stubEngineBackend) GetHeaderByNumber(number uint64) *types.Header {
panic("unsupported")
}
func (s stubEngineBackend) GetHeaderByHash(hash common.Hash) *types.Header {
panic("unsupported")
}
func (s stubEngineBackend) GetTd(hash common.Hash, number uint64) *big.Int {
panic("unsupported")
}
package engineapi
import (
"errors"
"fmt"
"math/big"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/consensus/misc"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/params"
)
var (
ErrExceedsGasLimit = errors.New("tx gas exceeds block gas limit")
ErrUsesTooMuchGas = errors.New("action takes too much gas")
)
type BlockDataProvider interface {
StateAt(root common.Hash) (*state.StateDB, error)
GetHeader(common.Hash, uint64) *types.Header
Engine() consensus.Engine
GetVMConfig() *vm.Config
Config() *params.ChainConfig
consensus.ChainHeaderReader
}
type BlockProcessor struct {
header *types.Header
state *state.StateDB
receipts types.Receipts
transactions types.Transactions
gasPool *core.GasPool
dataProvider BlockDataProvider
}
func NewBlockProcessorFromPayloadAttributes(provider BlockDataProvider, parent common.Hash, params *eth.PayloadAttributes) (*BlockProcessor, error) {
header := &types.Header{
ParentHash: parent,
Coinbase: params.SuggestedFeeRecipient,
Difficulty: common.Big0,
GasLimit: uint64(*params.GasLimit),
Time: uint64(params.Timestamp),
Extra: nil,
MixDigest: common.Hash(params.PrevRandao),
Nonce: types.EncodeNonce(0),
}
return NewBlockProcessorFromHeader(provider, header)
}
func NewBlockProcessorFromHeader(provider BlockDataProvider, h *types.Header) (*BlockProcessor, error) {
header := types.CopyHeader(h) // Copy to avoid mutating the original header
if header.GasLimit > params.MaxGasLimit {
return nil, fmt.Errorf("invalid gasLimit: have %v, max %v", header.GasLimit, params.MaxGasLimit)
}
parentHeader := provider.GetHeaderByHash(header.ParentHash)
if header.Time <= parentHeader.Time {
return nil, errors.New("invalid timestamp")
}
statedb, err := provider.StateAt(parentHeader.Root)
if err != nil {
return nil, fmt.Errorf("get parent state: %w", err)
}
header.Number = new(big.Int).Add(parentHeader.Number, common.Big1)
header.BaseFee = misc.CalcBaseFee(provider.Config(), parentHeader)
header.GasUsed = 0
gasPool := new(core.GasPool).AddGas(header.GasLimit)
return &BlockProcessor{
header: header,
state: statedb,
gasPool: gasPool,
dataProvider: provider,
}, nil
}
func (b *BlockProcessor) CheckTxWithinGasLimit(tx *types.Transaction) error {
if tx.Gas() > b.header.GasLimit {
return fmt.Errorf("%w tx gas: %d, block gas limit: %d", ErrExceedsGasLimit, tx.Gas(), b.header.GasLimit)
}
if tx.Gas() > b.gasPool.Gas() {
return fmt.Errorf("%w: %d, only have %d", ErrUsesTooMuchGas, tx.Gas(), b.gasPool.Gas())
}
return nil
}
func (b *BlockProcessor) AddTx(tx *types.Transaction) error {
txIndex := len(b.transactions)
b.state.SetTxContext(tx.Hash(), txIndex)
receipt, err := core.ApplyTransaction(b.dataProvider.Config(), b.dataProvider, &b.header.Coinbase,
b.gasPool, b.state, b.header, tx, &b.header.GasUsed, *b.dataProvider.GetVMConfig())
if err != nil {
return fmt.Errorf("failed to apply deposit transaction to L2 block (tx %d): %w", txIndex, err)
}
b.receipts = append(b.receipts, receipt)
b.transactions = append(b.transactions, tx)
return nil
}
func (b *BlockProcessor) Assemble() (*types.Block, error) {
return b.dataProvider.Engine().FinalizeAndAssemble(b.dataProvider, b.header, b.state, b.transactions, nil, b.receipts, nil)
}
func (b *BlockProcessor) Commit() error {
root, err := b.state.Commit(b.dataProvider.Config().IsEIP158(b.header.Number))
if err != nil {
return fmt.Errorf("state write error: %w", err)
}
if err := b.state.Database().TrieDB().Commit(root, false); err != nil {
return fmt.Errorf("trie write error: %w", err)
}
return nil
}
......@@ -6,33 +6,24 @@ import (
"encoding/binary"
"errors"
"fmt"
"math/big"
"time"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum/go-ethereum/beacon/engine"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/consensus/misc"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/trie"
"github.com/ethereum-optimism/optimism/op-node/eth"
)
type EngineBackend interface {
CurrentBlock() *types.Header
CurrentSafeBlock() *types.Header
CurrentFinalBlock() *types.Header
GetHeaderByHash(hash common.Hash) *types.Header
GetBlockByHash(hash common.Hash) *types.Block
GetBlock(hash common.Hash, number uint64) *types.Block
// GetHeader returns the header corresponding to the hash/number argument pair.
GetHeader(common.Hash, uint64) *types.Header
HasBlockAndState(hash common.Hash, number uint64) bool
GetCanonicalHash(n uint64) common.Hash
......@@ -47,6 +38,8 @@ type EngineBackend interface {
SetCanonical(head *types.Block) (common.Hash, error)
SetFinalized(header *types.Header)
SetSafe(header *types.Header)
consensus.ChainHeaderReader
}
// L2EngineAPI wraps an engine actor, and implements the RPC backend required to serve the engine API.
......@@ -57,14 +50,10 @@ type L2EngineAPI struct {
backend EngineBackend
// L2 block building data
l2BuildingHeader *types.Header // block header that we add txs to for block building
l2BuildingState *state.StateDB // state used for block building
l2GasPool *core.GasPool // track gas used of ongoing building
pendingIndices map[common.Address]uint64 // per account, how many txs from the pool were already included in the block, since the pool is lagging behind block mining.
l2Transactions []*types.Transaction // collects txs that were successfully included into current block build
l2Receipts []*types.Receipt // collect receipts of ongoing building
l2ForceEmpty bool // when no additional txs may be processed (i.e. when sequencer drift runs out)
l2TxFailed []*types.Transaction // log of failed transactions which could not be included
blockProcessor *BlockProcessor
pendingIndices map[common.Address]uint64 // per account, how many txs from the pool were already included in the block, since the pool is lagging behind block mining.
l2ForceEmpty bool // when no additional txs may be processed (i.e. when sequencer drift runs out)
l2TxFailed []*types.Transaction // log of failed transactions which could not be included
payloadID engine.PayloadID // ID of payload that is currently being built
}
......@@ -102,7 +91,10 @@ func computePayloadId(headBlockHash common.Hash, params *eth.PayloadAttributes)
}
func (ea *L2EngineAPI) RemainingBlockGas() uint64 {
return ea.l2GasPool.Gas()
if ea.blockProcessor == nil {
return 0
}
return ea.blockProcessor.gasPool.Gas()
}
func (ea *L2EngineAPI) ForcedEmpty() bool {
......@@ -115,12 +107,10 @@ func (ea *L2EngineAPI) PendingIndices(from common.Address) uint64 {
var (
ErrNotBuildingBlock = errors.New("not currently building a block, cannot include tx from queue")
ErrExceedsGasLimit = errors.New("tx gas exceeds block gas limit")
ErrUsesTooMuchGas = errors.New("action takes too much gas")
)
func (ea *L2EngineAPI) IncludeTx(tx *types.Transaction, from common.Address) error {
if ea.l2BuildingHeader == nil {
if ea.blockProcessor == nil {
return ErrNotBuildingBlock
}
if ea.l2ForceEmpty {
......@@ -129,60 +119,32 @@ func (ea *L2EngineAPI) IncludeTx(tx *types.Transaction, from common.Address) err
return nil
}
if tx.Gas() > ea.l2BuildingHeader.GasLimit {
return fmt.Errorf("%w tx gas: %d, block gas limit: %d", ErrExceedsGasLimit, tx.Gas(), ea.l2BuildingHeader.GasLimit)
}
if tx.Gas() > uint64(*ea.l2GasPool) {
return fmt.Errorf("%w: %d, only have %d", ErrUsesTooMuchGas, tx.Gas(), uint64(*ea.l2GasPool))
err := ea.blockProcessor.CheckTxWithinGasLimit(tx)
if err != nil {
return err
}
ea.pendingIndices[from] = ea.pendingIndices[from] + 1 // won't retry the tx
ea.l2BuildingState.SetTxContext(tx.Hash(), len(ea.l2Transactions))
receipt, err := core.ApplyTransaction(ea.backend.Config(), ea.backend, &ea.l2BuildingHeader.Coinbase,
ea.l2GasPool, ea.l2BuildingState, ea.l2BuildingHeader, tx, &ea.l2BuildingHeader.GasUsed, *ea.backend.GetVMConfig())
err = ea.blockProcessor.AddTx(tx)
if err != nil {
ea.l2TxFailed = append(ea.l2TxFailed, tx)
return fmt.Errorf("invalid L2 block (tx %d): %w", len(ea.l2Transactions), err)
return fmt.Errorf("invalid L2 block (tx %d): %w", len(ea.blockProcessor.transactions), err)
}
ea.l2Receipts = append(ea.l2Receipts, receipt)
ea.l2Transactions = append(ea.l2Transactions, tx)
return nil
}
func (ea *L2EngineAPI) startBlock(parent common.Hash, params *eth.PayloadAttributes) error {
if ea.l2BuildingHeader != nil {
ea.log.Warn("started building new block without ending previous block", "previous", ea.l2BuildingHeader, "prev_payload_id", ea.payloadID)
if ea.blockProcessor != nil {
ea.log.Warn("started building new block without ending previous block", "previous", ea.blockProcessor.header, "prev_payload_id", ea.payloadID)
}
parentHeader := ea.backend.GetHeaderByHash(parent)
if parentHeader == nil {
return fmt.Errorf("uknown parent block: %s", parent)
}
statedb, err := ea.backend.StateAt(parentHeader.Root)
processor, err := NewBlockProcessorFromPayloadAttributes(ea.backend, parent, params)
if err != nil {
return fmt.Errorf("failed to init state db around block %s (state %s): %w", parent, parentHeader.Root, err)
}
header := &types.Header{
ParentHash: parent,
Coinbase: params.SuggestedFeeRecipient,
Difficulty: common.Big0,
Number: new(big.Int).Add(parentHeader.Number, common.Big1),
GasLimit: uint64(*params.GasLimit),
Time: uint64(params.Timestamp),
Extra: nil,
MixDigest: common.Hash(params.PrevRandao),
return err
}
header.BaseFee = misc.CalcBaseFee(ea.backend.Config(), parentHeader)
ea.l2BuildingHeader = header
ea.l2BuildingState = statedb
ea.l2Receipts = make([]*types.Receipt, 0)
ea.l2Transactions = make([]*types.Transaction, 0)
ea.blockProcessor = processor
ea.pendingIndices = make(map[common.Address]uint64)
ea.l2ForceEmpty = params.NoTxPool
ea.l2GasPool = new(core.GasPool).AddGas(header.GasLimit)
ea.payloadID = computePayloadId(parent, params)
// pre-process the deposits
......@@ -191,29 +153,26 @@ func (ea *L2EngineAPI) startBlock(parent common.Hash, params *eth.PayloadAttribu
if err := tx.UnmarshalBinary(otx); err != nil {
return fmt.Errorf("transaction %d is not valid: %w", i, err)
}
ea.l2BuildingState.SetTxContext(tx.Hash(), i)
receipt, err := core.ApplyTransaction(ea.backend.Config(), ea.backend, &ea.l2BuildingHeader.Coinbase,
ea.l2GasPool, ea.l2BuildingState, ea.l2BuildingHeader, &tx, &ea.l2BuildingHeader.GasUsed, *ea.backend.GetVMConfig())
err := ea.blockProcessor.AddTx(&tx)
if err != nil {
ea.l2TxFailed = append(ea.l2TxFailed, &tx)
return fmt.Errorf("failed to apply deposit transaction to L2 block (tx %d): %w", i, err)
}
ea.l2Receipts = append(ea.l2Receipts, receipt)
ea.l2Transactions = append(ea.l2Transactions, &tx)
}
return nil
}
func (ea *L2EngineAPI) endBlock() (*types.Block, error) {
if ea.l2BuildingHeader == nil {
if ea.blockProcessor == nil {
return nil, fmt.Errorf("no block is being built currently (id %s)", ea.payloadID)
}
header := ea.l2BuildingHeader
ea.l2BuildingHeader = nil
processor := ea.blockProcessor
ea.blockProcessor = nil
header.GasUsed = header.GasLimit - uint64(*ea.l2GasPool)
header.Root = ea.l2BuildingState.IntermediateRoot(ea.backend.Config().IsEIP158(header.Number))
block := types.NewBlock(header, ea.l2Transactions, nil, ea.l2Receipts, trie.NewStackTrie(nil))
block, err := processor.Assemble()
if err != nil {
return nil, fmt.Errorf("assemble block: %w", err)
}
return block, nil
}
......@@ -262,7 +221,7 @@ func (ea *L2EngineAPI) ForkchoiceUpdatedV1(ctx context.Context, state *eth.Forkc
if latestValid, err := ea.backend.SetCanonical(block); err != nil {
return &eth.ForkchoiceUpdatedResult{PayloadStatus: eth.PayloadStatusV1{Status: eth.ExecutionInvalid, LatestValidHash: &latestValid}}, err
}
} else if ea.backend.CurrentBlock().Hash() == state.HeadBlockHash {
} else if ea.backend.CurrentHeader().Hash() == state.HeadBlockHash {
// If the specified head matches with our local head, do nothing and keep
// generating the payload. It's a special corner case that a few slots are
// missing and we are requested to generate the payload in slot.
......@@ -376,7 +335,7 @@ func (ea *L2EngineAPI) NewPayloadV1(ctx context.Context, payload *eth.ExecutionP
}
func (ea *L2EngineAPI) invalid(err error, latestValid *types.Header) *eth.PayloadStatusV1 {
currentHash := ea.backend.CurrentBlock().Hash()
currentHash := ea.backend.CurrentHeader().Hash()
if latestValid != nil {
// Set latest valid hash to 0x0 if parent is PoW block
currentHash = common.Hash{}
......
......@@ -11,6 +11,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/stretchr/testify/require"
)
......@@ -27,7 +28,7 @@ func RunEngineAPITests(t *testing.T, createBackend func() engineapi.EngineBacken
t.Run("IncludeRequiredTransactions", func(t *testing.T) {
api := newTestHelper(t, createBackend)
genesis := api.backend.CurrentBlock()
genesis := api.backend.CurrentHeader()
txData, err := derive.L1InfoDeposit(1, eth.HeaderBlockInfo(genesis), eth.SystemConfig{}, true)
api.assert.NoError(err)
......@@ -36,6 +37,38 @@ func RunEngineAPITests(t *testing.T, createBackend func() engineapi.EngineBacken
api.assert.Equal(block.BlockHash, api.headHash(), "should create and import new block")
imported := api.backend.GetBlockByHash(block.BlockHash)
api.assert.Len(imported.Transactions(), 1, "should include transaction")
api.assert.NotEqual(genesis.Root, block.StateRoot)
newState, err := api.backend.StateAt(common.Hash(block.StateRoot))
require.NoError(t, err, "imported block state should be available")
require.NotNil(t, newState)
})
t.Run("RejectCreatingBlockWithInvalidRequiredTransaction", func(t *testing.T) {
api := newTestHelper(t, createBackend)
genesis := api.backend.CurrentHeader()
txData, err := derive.L1InfoDeposit(1, eth.HeaderBlockInfo(genesis), eth.SystemConfig{}, true)
api.assert.NoError(err)
txData.Gas = uint64(gasLimit + 1)
tx := types.NewTx(txData)
txRlp, err := tx.MarshalBinary()
api.assert.NoError(err)
result, err := api.engine.ForkchoiceUpdatedV1(api.ctx, &eth.ForkchoiceState{
HeadBlockHash: genesis.Hash(),
SafeBlockHash: genesis.Hash(),
FinalizedBlockHash: genesis.Hash(),
}, &eth.PayloadAttributes{
Timestamp: eth.Uint64Quantity(genesis.Time + 1),
PrevRandao: eth.Bytes32(genesis.MixDigest),
SuggestedFeeRecipient: feeRecipient,
Transactions: []eth.Data{txRlp},
NoTxPool: true,
GasLimit: &gasLimit,
})
api.assert.Error(err)
api.assert.Equal(eth.ExecutionInvalid, result.PayloadStatus.Status)
})
t.Run("IgnoreUpdateHeadToOlderBlock", func(t *testing.T) {
......@@ -51,7 +84,7 @@ func RunEngineAPITests(t *testing.T, createBackend func() engineapi.EngineBacken
t.Run("AllowBuildingOnOlderBlock", func(t *testing.T) {
api := newTestHelper(t, createBackend)
genesis := api.backend.CurrentBlock()
genesis := api.backend.CurrentHeader()
api.addBlock()
block := api.addBlock()
api.assert.Equal(block.BlockHash, api.headHash(), "should have extended chain")
......@@ -79,7 +112,7 @@ func RunEngineAPITests(t *testing.T, createBackend func() engineapi.EngineBacken
t.Run("RejectBlockWithInvalidStateTransition", func(t *testing.T) {
api := newTestHelper(t, createBackend)
genesis := api.backend.CurrentBlock()
genesis := api.backend.CurrentHeader()
// Build a valid block
payloadID := api.startBlockBuilding(genesis, eth.Uint64Quantity(genesis.Time+2))
......@@ -96,11 +129,16 @@ func RunEngineAPITests(t *testing.T, createBackend func() engineapi.EngineBacken
t.Run("RejectBlockWithSameTimeAsParent", func(t *testing.T) {
api := newTestHelper(t, createBackend)
genesis := api.backend.CurrentBlock()
genesis := api.backend.CurrentHeader()
payloadID := api.startBlockBuilding(genesis, eth.Uint64Quantity(genesis.Time))
// Start with a valid time
payloadID := api.startBlockBuilding(genesis, eth.Uint64Quantity(genesis.Time+1))
newBlock := api.getPayload(payloadID)
// Then make it invalid to check NewPayload rejects it
newBlock.Timestamp = eth.Uint64Quantity(genesis.Time)
updateBlockHash(newBlock)
r, err := api.engine.NewPayloadV1(api.ctx, newBlock)
api.assert.NoError(err)
api.assert.Equal(eth.ExecutionInvalid, r.Status)
......@@ -108,16 +146,83 @@ func RunEngineAPITests(t *testing.T, createBackend func() engineapi.EngineBacken
t.Run("RejectBlockWithTimeBeforeParent", func(t *testing.T) {
api := newTestHelper(t, createBackend)
genesis := api.backend.CurrentBlock()
genesis := api.backend.CurrentHeader()
payloadID := api.startBlockBuilding(genesis, eth.Uint64Quantity(genesis.Time-1))
// Start with a valid time
payloadID := api.startBlockBuilding(genesis, eth.Uint64Quantity(genesis.Time+1))
newBlock := api.getPayload(payloadID)
// Then make it invalid to check NewPayload rejects it
newBlock.Timestamp = eth.Uint64Quantity(genesis.Time - 1)
updateBlockHash(newBlock)
r, err := api.engine.NewPayloadV1(api.ctx, newBlock)
api.assert.NoError(err)
api.assert.Equal(eth.ExecutionInvalid, r.Status)
})
t.Run("RejectCreateBlockWithSameTimeAsParent", func(t *testing.T) {
api := newTestHelper(t, createBackend)
genesis := api.backend.CurrentHeader()
result, err := api.engine.ForkchoiceUpdatedV1(api.ctx, &eth.ForkchoiceState{
HeadBlockHash: genesis.Hash(),
SafeBlockHash: genesis.Hash(),
FinalizedBlockHash: genesis.Hash(),
}, &eth.PayloadAttributes{
Timestamp: eth.Uint64Quantity(genesis.Time),
PrevRandao: eth.Bytes32(genesis.MixDigest),
SuggestedFeeRecipient: feeRecipient,
Transactions: nil,
NoTxPool: true,
GasLimit: &gasLimit,
})
api.assert.Error(err)
api.assert.Equal(eth.ExecutionInvalid, result.PayloadStatus.Status)
})
t.Run("RejectCreateBlockWithTimeBeforeParent", func(t *testing.T) {
api := newTestHelper(t, createBackend)
genesis := api.backend.CurrentHeader()
result, err := api.engine.ForkchoiceUpdatedV1(api.ctx, &eth.ForkchoiceState{
HeadBlockHash: genesis.Hash(),
SafeBlockHash: genesis.Hash(),
FinalizedBlockHash: genesis.Hash(),
}, &eth.PayloadAttributes{
Timestamp: eth.Uint64Quantity(genesis.Time - 1),
PrevRandao: eth.Bytes32(genesis.MixDigest),
SuggestedFeeRecipient: feeRecipient,
Transactions: nil,
NoTxPool: true,
GasLimit: &gasLimit,
})
api.assert.Error(err)
api.assert.Equal(eth.ExecutionInvalid, result.PayloadStatus.Status)
})
t.Run("RejectCreateBlockWithGasLimitAboveMax", func(t *testing.T) {
api := newTestHelper(t, createBackend)
genesis := api.backend.CurrentHeader()
gasLimit := eth.Uint64Quantity(params.MaxGasLimit + 1)
result, err := api.engine.ForkchoiceUpdatedV1(api.ctx, &eth.ForkchoiceState{
HeadBlockHash: genesis.Hash(),
SafeBlockHash: genesis.Hash(),
FinalizedBlockHash: genesis.Hash(),
}, &eth.PayloadAttributes{
Timestamp: eth.Uint64Quantity(genesis.Time + 1),
PrevRandao: eth.Bytes32(genesis.MixDigest),
SuggestedFeeRecipient: feeRecipient,
Transactions: nil,
NoTxPool: true,
GasLimit: &gasLimit,
})
api.assert.Error(err)
api.assert.Equal(eth.ExecutionInvalid, result.PayloadStatus.Status)
})
t.Run("UpdateSafeAndFinalizedHead", func(t *testing.T) {
api := newTestHelper(t, createBackend)
......@@ -133,7 +238,7 @@ func RunEngineAPITests(t *testing.T, createBackend func() engineapi.EngineBacken
t.Run("RejectSafeHeadWhenNotAncestor", func(t *testing.T) {
api := newTestHelper(t, createBackend)
genesis := api.backend.CurrentBlock()
genesis := api.backend.CurrentHeader()
api.addBlock()
chainA2 := api.addBlock()
......@@ -153,7 +258,7 @@ func RunEngineAPITests(t *testing.T, createBackend func() engineapi.EngineBacken
t.Run("RejectFinalizedHeadWhenNotAncestor", func(t *testing.T) {
api := newTestHelper(t, createBackend)
genesis := api.backend.CurrentBlock()
genesis := api.backend.CurrentHeader()
api.addBlock()
chainA2 := api.addBlock()
......@@ -203,7 +308,7 @@ func newTestHelper(t *testing.T, createBackend func() engineapi.EngineBackend) *
}
func (h *testHelper) headHash() common.Hash {
return h.backend.CurrentBlock().Hash()
return h.backend.CurrentHeader().Hash()
}
func (h *testHelper) safeHash() common.Hash {
......@@ -219,12 +324,12 @@ func (h *testHelper) Log(args ...any) {
}
func (h *testHelper) addBlock(txs ...*types.Transaction) *eth.ExecutionPayload {
head := h.backend.CurrentBlock()
head := h.backend.CurrentHeader()
return h.addBlockWithParent(head, eth.Uint64Quantity(head.Time+2), txs...)
}
func (h *testHelper) addBlockWithParent(head *types.Header, timestamp eth.Uint64Quantity, txs ...*types.Transaction) *eth.ExecutionPayload {
prevHead := h.backend.CurrentBlock()
prevHead := h.backend.CurrentHeader()
id := h.startBlockBuilding(head, timestamp, txs...)
block := h.getPayload(id)
......@@ -235,10 +340,10 @@ func (h *testHelper) addBlockWithParent(head *types.Header, timestamp eth.Uint64
h.newPayload(block)
// Should not have changed the chain head yet
h.assert.Equal(prevHead, h.backend.CurrentBlock())
h.assert.Equal(prevHead, h.backend.CurrentHeader())
h.forkChoiceUpdated(block.BlockHash, head.Hash(), head.Hash())
h.assert.Equal(block.BlockHash, h.backend.CurrentBlock().Hash())
h.assert.Equal(block.BlockHash, h.backend.CurrentHeader().Hash())
return block
}
......
......@@ -6,6 +6,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
......@@ -21,35 +22,52 @@ type CallContext interface {
}
type FetchingL2Oracle struct {
ctx context.Context
logger log.Logger
blockSource BlockSource
callContext CallContext
}
func NewFetchingL2Oracle(logger log.Logger, l2Url string) (*FetchingL2Oracle, error) {
func NewFetchingL2Oracle(ctx context.Context, logger log.Logger, l2Url string) (*FetchingL2Oracle, error) {
rpcClient, err := rpc.Dial(l2Url)
if err != nil {
return nil, err
}
ethClient := ethclient.NewClient(rpcClient)
return &FetchingL2Oracle{
ctx: ctx,
logger: logger,
blockSource: ethClient,
callContext: rpcClient,
}, nil
}
func (s FetchingL2Oracle) NodeByHash(ctx context.Context, nodeHash common.Hash) ([]byte, error) {
func (o *FetchingL2Oracle) NodeByHash(hash common.Hash) ([]byte, error) {
// MPT nodes are stored as the hash of the node (with no prefix)
return o.dbGet(hash.Bytes())
}
func (o *FetchingL2Oracle) CodeByHash(hash common.Hash) ([]byte, error) {
// First try retrieving with the new code prefix
code, err := o.dbGet(append(rawdb.CodePrefix, hash.Bytes()...))
if err != nil {
// Fallback to the legacy un-prefixed version
return o.dbGet(hash.Bytes())
}
return code, nil
}
func (o *FetchingL2Oracle) dbGet(key []byte) ([]byte, error) {
var node hexutil.Bytes
err := s.callContext.CallContext(ctx, &node, "debug_dbGet", nodeHash.Hex())
err := o.callContext.CallContext(o.ctx, &node, "debug_dbGet", hexutil.Encode(key))
if err != nil {
return nil, fmt.Errorf("fetch node %s: %w", nodeHash.Hex(), err)
return nil, fmt.Errorf("fetch node %s: %w", hexutil.Encode(key), err)
}
return node, nil
}
func (s FetchingL2Oracle) BlockByHash(ctx context.Context, blockHash common.Hash) (*types.Block, error) {
block, err := s.blockSource.BlockByHash(ctx, blockHash)
func (o *FetchingL2Oracle) BlockByHash(blockHash common.Hash) (*types.Block, error) {
block, err := o.blockSource.BlockByHash(o.ctx, blockHash)
if err != nil {
return nil, fmt.Errorf("fetch block %s: %w", blockHash.Hex(), err)
}
......
......@@ -12,11 +12,15 @@ import (
"github.com/ethereum-optimism/optimism/op-node/testutils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require"
)
// Require the fetching oracle to implement StateOracle
var _ StateOracle = (*FetchingL2Oracle)(nil)
type callContextRequest struct {
ctx context.Context
method string
......@@ -50,7 +54,6 @@ func (c *stubCallContext) CallContext(ctx context.Context, result any, method st
func TestNodeByHash(t *testing.T) {
rng := rand.New(rand.NewSource(1234))
hash := testutils.RandomHash(rng)
ctx := context.Background()
t.Run("Error", func(t *testing.T) {
stub := &stubCallContext{
......@@ -58,7 +61,7 @@ func TestNodeByHash(t *testing.T) {
}
fetcher := newFetcher(nil, stub)
node, err := fetcher.NodeByHash(ctx, hash)
node, err := fetcher.NodeByHash(hash)
require.ErrorIs(t, err, stub.nextErr)
require.Nil(t, node)
})
......@@ -70,7 +73,7 @@ func TestNodeByHash(t *testing.T) {
}
fetcher := newFetcher(nil, stub)
node, err := fetcher.NodeByHash(ctx, hash)
node, err := fetcher.NodeByHash(hash)
require.NoError(t, err)
require.EqualValues(t, expected, node)
})
......@@ -81,7 +84,7 @@ func TestNodeByHash(t *testing.T) {
}
fetcher := newFetcher(nil, stub)
_, _ = fetcher.NodeByHash(ctx, hash)
_, _ = fetcher.NodeByHash(hash)
require.Len(t, stub.requests, 1, "should make single request")
req := stub.requests[0]
require.Equal(t, "debug_dbGet", req.method)
......@@ -89,6 +92,67 @@ func TestNodeByHash(t *testing.T) {
})
}
func TestCodeByHash(t *testing.T) {
rng := rand.New(rand.NewSource(1234))
hash := testutils.RandomHash(rng)
t.Run("Error", func(t *testing.T) {
stub := &stubCallContext{
nextErr: errors.New("oops"),
}
fetcher := newFetcher(nil, stub)
node, err := fetcher.CodeByHash(hash)
require.ErrorIs(t, err, stub.nextErr)
require.Nil(t, node)
})
t.Run("Success", func(t *testing.T) {
expected := (hexutil.Bytes)([]byte{12, 34})
stub := &stubCallContext{
nextResult: expected,
}
fetcher := newFetcher(nil, stub)
node, err := fetcher.CodeByHash(hash)
require.NoError(t, err)
require.EqualValues(t, expected, node)
})
t.Run("RequestArgs", func(t *testing.T) {
stub := &stubCallContext{
nextResult: (hexutil.Bytes)([]byte{12, 34}),
}
fetcher := newFetcher(nil, stub)
_, _ = fetcher.CodeByHash(hash)
require.Len(t, stub.requests, 1, "should make single request")
req := stub.requests[0]
require.Equal(t, "debug_dbGet", req.method)
codeDbKey := append(rawdb.CodePrefix, hash.Bytes()...)
require.Equal(t, []interface{}{hexutil.Encode(codeDbKey)}, req.args)
})
t.Run("FallbackToUnprefixed", func(t *testing.T) {
stub := &stubCallContext{
nextErr: errors.New("not found"),
}
fetcher := newFetcher(nil, stub)
_, _ = fetcher.CodeByHash(hash)
require.Len(t, stub.requests, 2, "should request with and without prefix")
req := stub.requests[0]
require.Equal(t, "debug_dbGet", req.method)
codeDbKey := append(rawdb.CodePrefix, hash.Bytes()...)
require.Equal(t, []interface{}{hexutil.Encode(codeDbKey)}, req.args)
req = stub.requests[1]
require.Equal(t, "debug_dbGet", req.method)
codeDbKey = hash.Bytes()
require.Equal(t, []interface{}{hexutil.Encode(codeDbKey)}, req.args)
})
}
type blockRequest struct {
ctx context.Context
blockHash common.Hash
......@@ -111,14 +175,13 @@ func (s *stubBlockSource) BlockByHash(ctx context.Context, blockHash common.Hash
func TestBlockByHash(t *testing.T) {
rng := rand.New(rand.NewSource(1234))
hash := testutils.RandomHash(rng)
ctx := context.Background()
t.Run("Success", func(t *testing.T) {
block, _ := testutils.RandomBlock(rng, 1)
stub := &stubBlockSource{nextResult: block}
fetcher := newFetcher(stub, nil)
res, err := fetcher.BlockByHash(ctx, hash)
res, err := fetcher.BlockByHash(hash)
require.NoError(t, err)
require.Same(t, block, res)
})
......@@ -127,7 +190,7 @@ func TestBlockByHash(t *testing.T) {
stub := &stubBlockSource{nextErr: errors.New("boom")}
fetcher := newFetcher(stub, nil)
res, err := fetcher.BlockByHash(ctx, hash)
res, err := fetcher.BlockByHash(hash)
require.ErrorIs(t, err, stub.nextErr)
require.Nil(t, res)
})
......@@ -136,7 +199,7 @@ func TestBlockByHash(t *testing.T) {
stub := &stubBlockSource{}
fetcher := newFetcher(stub, nil)
_, _ = fetcher.BlockByHash(ctx, hash)
_, _ = fetcher.BlockByHash(hash)
require.Len(t, stub.requests, 1, "should make single request")
req := stub.requests[0]
......
package l2
import (
"context"
"encoding/json"
"fmt"
"os"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-program/config"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
)
func NewFetchingEngine(ctx context.Context, logger log.Logger, cfg *config.Config) (derive.Engine, error) {
genesis, err := loadL2Genesis(cfg)
if err != nil {
return nil, err
}
oracle, err := NewFetchingL2Oracle(ctx, logger, cfg.L2URL)
if err != nil {
return nil, fmt.Errorf("connect l2 oracle: %w", err)
}
engineBackend, err := NewOracleBackedL2Chain(logger, oracle, genesis, cfg.L2Head)
if err != nil {
return nil, fmt.Errorf("create l2 chain: %w", err)
}
return NewOracleEngine(cfg.Rollup, logger, engineBackend), nil
}
func loadL2Genesis(cfg *config.Config) (*params.ChainConfig, error) {
data, err := os.ReadFile(cfg.L2GenesisPath)
if err != nil {
return nil, fmt.Errorf("read l2 genesis file: %w", err)
}
var genesis core.Genesis
err = json.Unmarshal(data, &genesis)
if err != nil {
return nil, fmt.Errorf("parse l2 genesis file: %w", err)
}
return genesis.Config, nil
}
package l2
import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
)
// StateOracle defines the high-level API used to retrieve L2 state data pre-images
// The returned data is always the preimage of the requested hash.
type StateOracle interface {
// NodeByHash retrieves the merkle-patricia trie node pre-image for a given hash.
// Trie nodes may be from the world state trie or any account storage trie.
// Contract code is not stored as part of the trie and must be retrieved via CodeByHash
// Returns an error if the pre-image is unavailable.
NodeByHash(nodeHash common.Hash) ([]byte, error)
// CodeByHash retrieves the contract code pre-image for a given hash.
// codeHash should be retrieved from the world state account for a contract.
// Returns an error if the pre-image is unavailable.
CodeByHash(codeHash common.Hash) ([]byte, error)
}
// Oracle defines the high-level API used to retrieve L2 data.
// The returned data is always the preimage of the requested hash.
type Oracle interface {
StateOracle
// BlockByHash retrieves the block with the given hash.
// Returns an error if the block is not available.
BlockByHash(blockHash common.Hash) (*types.Block, error)
}
......@@ -52,7 +52,6 @@ func NewEventVec(factory Factory, ns string, name string, displayName string, la
Namespace: ns,
Name: fmt.Sprintf("last_%s_unix", name),
Help: fmt.Sprintf("Timestamp of last %s event", displayName),
},
labelNames),
}, labelNames),
}
}
......@@ -4,6 +4,9 @@ import "github.com/ethereum/go-ethereum/core/types"
type NoopTxMetrics struct{}
func (*NoopTxMetrics) RecordL1GasFee(*types.Receipt) {}
func (*NoopTxMetrics) RecordNonce(uint64) {}
func (*NoopTxMetrics) RecordGasBumpCount(int) {}
func (*NoopTxMetrics) RecordTxConfirmationLatency(int64) {}
func (*NoopTxMetrics) TxConfirmed(*types.Receipt) {}
func (*NoopTxMetrics) TxPublished(string) {}
func (*NoopTxMetrics) RPCError() {}
......@@ -9,15 +9,34 @@ import (
)
type TxMetricer interface {
RecordL1GasFee(receipt *types.Receipt)
RecordGasBumpCount(times int)
RecordTxConfirmationLatency(latency int64)
RecordGasBumpCount(int)
RecordTxConfirmationLatency(int64)
RecordNonce(uint64)
TxConfirmed(*types.Receipt)
TxPublished(string)
RPCError()
}
type TxMetrics struct {
TxL1GasFee prometheus.Gauge
TxGasBump prometheus.Gauge
LatencyConfirmedTx prometheus.Gauge
currentNonce prometheus.Gauge
txPublishError *prometheus.CounterVec
publishEvent metrics.Event
confirmEvent metrics.EventVec
rpcError prometheus.Counter
}
func receiptStatusString(receipt *types.Receipt) string {
switch receipt.Status {
case types.ReceiptStatusSuccessful:
return "success"
case types.ReceiptStatusFailed:
return "failed"
default:
return "unknown_status"
}
}
var _ TxMetricer = (*TxMetrics)(nil)
......@@ -42,10 +61,36 @@ func MakeTxMetrics(ns string, factory metrics.Factory) TxMetrics {
Help: "Latency of a confirmed transaction in milliseconds",
Subsystem: "txmgr",
}),
currentNonce: factory.NewGauge(prometheus.GaugeOpts{
Namespace: ns,
Name: "current_nonce",
Help: "Current nonce of the from address",
Subsystem: "txmgr",
}),
txPublishError: factory.NewCounterVec(prometheus.CounterOpts{
Namespace: ns,
Name: "tx_publish_error_count",
Help: "Count of publish errors. Labells are sanitized error strings",
Subsystem: "txmgr",
}, []string{"error"}),
confirmEvent: metrics.NewEventVec(factory, ns, "confirm", "tx confirm", []string{"status"}),
publishEvent: metrics.NewEvent(factory, ns, "publish", "tx publish"),
rpcError: factory.NewCounter(prometheus.CounterOpts{
Namespace: ns,
Name: "rpc_error_count",
Help: "Temporrary: Count of RPC errors (like timeouts) that have occurrred",
Subsystem: "txmgr",
}),
}
}
func (t *TxMetrics) RecordL1GasFee(receipt *types.Receipt) {
func (t *TxMetrics) RecordNonce(nonce uint64) {
t.currentNonce.Set(float64(nonce))
}
// TxConfirmed records lots of information about the confirmed transaction
func (t *TxMetrics) TxConfirmed(receipt *types.Receipt) {
t.confirmEvent.Record(receiptStatusString(receipt))
t.TxL1GasFee.Set(float64(receipt.EffectiveGasPrice.Uint64() * receipt.GasUsed / params.GWei))
}
......@@ -56,3 +101,15 @@ func (t *TxMetrics) RecordGasBumpCount(times int) {
func (t *TxMetrics) RecordTxConfirmationLatency(latency int64) {
t.LatencyConfirmedTx.Set(float64(latency))
}
func (t *TxMetrics) TxPublished(errString string) {
if errString != "" {
t.txPublishError.WithLabelValues(errString).Inc()
} else {
t.publishEvent.Record()
}
}
func (t *TxMetrics) RPCError() {
t.rpcError.Inc()
}
......@@ -148,6 +148,7 @@ func (m *SimpleTxManager) Send(ctx context.Context, candidate TxCandidate) (*typ
func (m *SimpleTxManager) craftTx(ctx context.Context, candidate TxCandidate) (*types.Transaction, error) {
gasTipCap, basefee, err := m.suggestGasPriceCaps(ctx)
if err != nil {
m.metr.RPCError()
return nil, fmt.Errorf("failed to get gas price info: %w", err)
}
gasFeeCap := calcGasFeeCap(basefee, gasTipCap)
......@@ -157,8 +158,10 @@ func (m *SimpleTxManager) craftTx(ctx context.Context, candidate TxCandidate) (*
defer cancel()
nonce, err := m.backend.NonceAt(childCtx, m.cfg.From, nil)
if err != nil {
m.metr.RPCError()
return nil, fmt.Errorf("failed to get nonce: %w", err)
}
m.metr.RecordNonce(nonce)
rawTx := &types.DynamicFeeTx{
ChainID: m.chainID,
......@@ -240,6 +243,7 @@ func (m *SimpleTxManager) send(ctx context.Context, tx *types.Transaction) (*typ
case receipt := <-receiptChan:
m.metr.RecordGasBumpCount(bumpCounter)
m.metr.TxConfirmed(receipt)
return receipt, nil
}
}
......@@ -263,19 +267,28 @@ func (m *SimpleTxManager) publishAndWaitForTx(ctx context.Context, tx *types.Tra
switch {
case errStringMatch(err, core.ErrNonceTooLow):
log.Warn("nonce too low", "err", err)
m.metr.TxPublished("nonce_to_low")
case errStringMatch(err, context.Canceled):
m.metr.RPCError()
log.Warn("transaction send cancelled", "err", err)
m.metr.TxPublished("context_cancelled")
case errStringMatch(err, txpool.ErrAlreadyKnown):
log.Warn("resubmitted already known transaction", "err", err)
m.metr.TxPublished("tx_already_known")
case errStringMatch(err, txpool.ErrReplaceUnderpriced):
log.Warn("transaction replacement is underpriced", "err", err)
m.metr.TxPublished("tx_replacement_underpriced")
case errStringMatch(err, txpool.ErrUnderpriced):
log.Warn("transaction is underpriced", "err", err)
m.metr.TxPublished("tx_underpriced")
default:
m.metr.RPCError()
log.Error("unable to publish transaction", "err", err)
m.metr.TxPublished("unknown_error")
}
return
}
m.metr.TxPublished("")
log.Info("Transaction successfully published")
// Poll for the transaction to be ready & then send the result to receiptChan
......@@ -287,7 +300,6 @@ func (m *SimpleTxManager) publishAndWaitForTx(ctx context.Context, tx *types.Tra
select {
case receiptChan <- receipt:
m.metr.RecordTxConfirmationLatency(time.Since(t).Milliseconds())
m.metr.RecordL1GasFee(receipt)
default:
}
}
......@@ -319,9 +331,11 @@ func (m *SimpleTxManager) queryReceipt(ctx context.Context, txHash common.Hash,
m.l.Trace("Transaction not yet mined", "hash", txHash)
return nil
} else if err != nil {
m.metr.RPCError()
m.l.Info("Receipt retrieval failed", "hash", txHash, "err", err)
return nil
} else if receipt == nil {
m.metr.RPCError()
m.l.Warn("Receipt and error are both nil", "hash", txHash)
return nil
}
......@@ -405,6 +419,7 @@ func (m *SimpleTxManager) suggestGasPriceCaps(ctx context.Context) (*big.Int, *b
defer cancel()
tip, err := m.backend.SuggestGasTipCap(cCtx)
if err != nil {
m.metr.RPCError()
return nil, nil, fmt.Errorf("failed to fetch the suggested gas tip cap: %w", err)
} else if tip == nil {
return nil, nil, errors.New("the suggested tip was nil")
......@@ -413,6 +428,7 @@ func (m *SimpleTxManager) suggestGasPriceCaps(ctx context.Context) (*big.Int, *b
defer cancel()
head, err := m.backend.HeaderByNumber(cCtx, nil)
if err != nil {
m.metr.RPCError()
return nil, nil, fmt.Errorf("failed to fetch the suggested basefee: %w", err)
} else if head.BaseFee == nil {
return nil, nil, errors.New("txmgr does not support pre-london blocks that do not have a basefee")
......
......@@ -691,6 +691,7 @@ func TestWaitMinedReturnsReceiptAfterFailure(t *testing.T) {
name: "TEST",
backend: &borkedBackend,
l: testlog.Logger(t, log.LvlCrit),
metr: &metrics.NoopTxMetrics{},
}
// Don't mine the tx with the default backend. The failingBackend will
......@@ -726,6 +727,7 @@ func doGasPriceIncrease(t *testing.T, txTipCap, txFeeCap, newTip, newBaseFee int
name: "TEST",
backend: &borkedBackend,
l: testlog.Logger(t, log.LvlCrit),
metr: &metrics.NoopTxMetrics{},
}
tx := types.NewTx(&types.DynamicFeeTx{
......@@ -829,6 +831,7 @@ func TestIncreaseGasPriceNotExponential(t *testing.T) {
name: "TEST",
backend: &borkedBackend,
l: testlog.Logger(t, log.LvlCrit),
metr: &metrics.NoopTxMetrics{},
}
tx := types.NewTx(&types.DynamicFeeTx{
GasTipCap: big.NewInt(10),
......
......@@ -78,11 +78,17 @@ var (
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "BUILDING_TIME"),
Value: time.Second * 6,
}
AllowGaps = cli.BoolFlag{
Name: "allow-gaps",
Usage: "allow gaps in block building, like missed slots on the beacon chain.",
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "ALLOW_GAPS"),
}
)
func ParseBuildingArgs(ctx *cli.Context) *engine.BlockBuildingSettings {
return &engine.BlockBuildingSettings{
BlockTime: ctx.Uint64(BlockTimeFlag.Name),
AllowGaps: ctx.Bool(AllowGaps.Name),
Random: hashFlagValue(RandaoFlag.Name, ctx),
FeeRecipient: addrFlagValue(FeeRecipientFlag.Name, ctx),
BuildTime: ctx.Duration(BuildingTime.Name),
......@@ -336,7 +342,7 @@ var (
Usage: "build the next block using the Engine API",
Flags: []cli.Flag{
EngineEndpoint, EngineJWTPath,
FeeRecipientFlag, RandaoFlag, BlockTimeFlag, BuildingTime,
FeeRecipientFlag, RandaoFlag, BlockTimeFlag, BuildingTime, AllowGaps,
},
// TODO: maybe support transaction and tx pool engine flags, since we use op-geth?
// TODO: reorg flag
......@@ -362,7 +368,7 @@ var (
Description: "The block time can be changed. The execution engine must be synced to a post-Merge state first.",
Flags: append(append([]cli.Flag{
EngineEndpoint, EngineJWTPath,
FeeRecipientFlag, RandaoFlag, BlockTimeFlag, BuildingTime,
FeeRecipientFlag, RandaoFlag, BlockTimeFlag, BuildingTime, AllowGaps,
}, oplog.CLIFlags(envVarPrefix)...), opmetrics.CLIFlags(envVarPrefix)...),
Action: EngineAction(func(ctx *cli.Context, client client.RPC) error {
logCfg := oplog.ReadLocalCLIConfig(ctx)
......
......@@ -95,13 +95,22 @@ func updateForkchoice(ctx context.Context, client client.RPC, head, safe, finali
}
type BlockBuildingSettings struct {
BlockTime uint64
BlockTime uint64
// skip a block; timestamps will still increase in multiples of BlockTime like L1, but there may be gaps.
AllowGaps bool
Random common.Hash
FeeRecipient common.Address
BuildTime time.Duration
}
func BuildBlock(ctx context.Context, client client.RPC, status *StatusData, settings *BlockBuildingSettings) (*engine.ExecutableData, error) {
timestamp := status.Head.Time + settings.BlockTime
if settings.AllowGaps {
now := uint64(time.Now().Unix())
if now > timestamp {
timestamp = now - ((now - timestamp) % settings.BlockTime)
}
}
var pre engine.ForkChoiceResponse
if err := client.CallContext(ctx, &pre, "engine_forkchoiceUpdatedV1",
engine.ForkchoiceStateV1{
......@@ -109,7 +118,7 @@ func BuildBlock(ctx context.Context, client client.RPC, status *StatusData, sett
SafeBlockHash: status.Safe.Hash,
FinalizedBlockHash: status.Finalized.Hash,
}, engine.PayloadAttributes{
Timestamp: status.Head.Time + settings.BlockTime,
Timestamp: timestamp,
Random: settings.Random,
SuggestedFeeRecipient: settings.FeeRecipient,
// TODO: maybe use the L2 fields to hack in tx embedding CLI option?
......@@ -210,6 +219,7 @@ func Auto(ctx context.Context, metrics Metricer, client client.RPC, log log.Logg
payload, err := BuildBlock(ctx, client, status, &BlockBuildingSettings{
BlockTime: settings.BlockTime,
AllowGaps: settings.AllowGaps,
Random: settings.Random,
FeeRecipient: settings.FeeRecipient,
BuildTime: buildTime,
......
# @eth-optimism/actor-tests
## 0.0.24
### Patch Changes
- Updated dependencies [b16067a9f]
- Updated dependencies [be3315689]
- Updated dependencies [9a02079eb]
- Updated dependencies [98fbe9d22]
- @eth-optimism/contracts-bedrock@0.13.2
- @eth-optimism/sdk@2.0.2
## 0.0.23
### Patch Changes
......
{
"name": "@eth-optimism/actor-tests",
"version": "0.0.23",
"version": "0.0.24",
"description": "A library and suite of tests to stress test Optimism Bedrock.",
"license": "MIT",
"author": "",
......@@ -18,9 +18,9 @@
"test:coverage": "yarn test"
},
"dependencies": {
"@eth-optimism/contracts-bedrock": "0.13.1",
"@eth-optimism/contracts-bedrock": "0.13.2",
"@eth-optimism/core-utils": "^0.12.0",
"@eth-optimism/sdk": "^2.0.1",
"@eth-optimism/sdk": "^2.0.2",
"@types/chai": "^4.2.18",
"@types/chai-as-promised": "^7.1.4",
"async-mutex": "^0.3.2",
......
# @eth-optimism/drippie-mon
## 0.3.0
### Minor Changes
- 1e7897c81: Introduces the balance-mon service to chain-mon.
### Patch Changes
- dbe5eb308: Empty patch release to re-release packages that failed to be released by a bug in the release process.
- Updated dependencies [be3315689]
- @eth-optimism/sdk@2.0.2
## 0.2.1
### Patch Changes
......
{
"private": true,
"name": "@eth-optimism/chain-mon",
"version": "0.2.1",
"version": "0.3.0",
"description": "[Optimism] Chain monitoring services",
"main": "dist/index",
"types": "dist/index",
......@@ -36,7 +36,7 @@
"@eth-optimism/common-ts": "0.8.1",
"@eth-optimism/contracts-periphery": "1.0.7",
"@eth-optimism/core-utils": "0.12.0",
"@eth-optimism/sdk": "2.0.1",
"@eth-optimism/sdk": "2.0.2",
"ethers": "^5.7.0",
"@types/dateformat": "^5.0.0",
"chai-as-promised": "^7.1.1",
......
# @eth-optimism/contracts-bedrock
## 0.13.2
### Patch Changes
- b16067a9f: Reduce the time that the system dictator deploy scripts wait before checking the chain state.
- 9a02079eb: Makes the Proxy contract inheritable by making functions (public virtual).
- 98fbe9d22: Added a contsructor to the System Dictator
## 0.13.1
### Patch Changes
......
......@@ -215,7 +215,7 @@ contract SystemDictator is OwnableUpgradeable {
/**
* @notice Configures the ProxyAdmin contract.
*/
function step1() external onlyOwner step(1) {
function step1() public onlyOwner step(1) {
// Set the AddressManager in the ProxyAdmin.
config.globalConfig.proxyAdmin.setAddressManager(config.globalConfig.addressManager);
......@@ -260,7 +260,7 @@ contract SystemDictator is OwnableUpgradeable {
* @notice Pauses the system by shutting down the L1CrossDomainMessenger and setting the
* deposit halt flag to tell the Sequencer's DTL to stop accepting deposits.
*/
function step2() external onlyOwner step(2) {
function step2() public onlyOwner step(2) {
// Store the address of the old L1CrossDomainMessenger implementation. We will need this
// address in the case that we have to exit early.
oldL1CrossDomainMessenger = config.globalConfig.addressManager.getAddress(
......@@ -410,6 +410,14 @@ contract SystemDictator is OwnableUpgradeable {
);
}
/**
* @notice Calls the first 2 steps of the migration process.
*/
function phase1() external onlyOwner {
step1();
step2();
}
/**
* @notice Tranfers admin ownership to the final owner.
*/
......
......@@ -2,6 +2,7 @@
"finalSystemOwner": "0x9965507D1a55bcC2695C58ba16FB37d819B0A4dc",
"portalGuardian": "0x9965507D1a55bcC2695C58ba16FB37d819B0A4dc",
"controller": "0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266",
"proxyAdminOwner": "0x9965507D1a55bcC2695C58ba16FB37d819B0A4dc",
"l1StartingBlockTag": "earliest",
"l1ChainID": 900,
......@@ -16,12 +17,22 @@
"batchSenderAddress": "0x3C44CdDdB6a900fa2b585dd299e03d12FA4293BC",
"l2OutputOracleSubmissionInterval": 6,
"l2OutputOracleStartingTimestamp": -1,
"l2OutputOracleStartingTimestamp": 0,
"l2OutputOracleStartingBlockNumber": 0,
"l2OutputOracleProposer": "0x70997970C51812dc3A010C7d01b50e0d17dc79C8",
"l2OutputOracleChallenger": "0x6925B8704Ff96DEe942623d6FB5e946EF5884b63",
"l2GenesisBlockBaseFeePerGas": "0x3B9ACA00",
"baseFeeVaultRecipient": "0xBcd4042DE499D14e55001CcbB24a551F3b954096",
"l2GenesisBlockGasLimit": "0x17D7840",
"baseFeeVaultRecipient": "0x9965507D1a55bcC2695C58ba16FB37d819B0A4dc",
"l1FeeVaultRecipient": "0x9965507D1a55bcC2695C58ba16FB37d819B0A4dc",
"sequencerFeeVaultRecipient": "0x9965507D1a55bcC2695C58ba16FB37d819B0A4dc",
"governanceTokenName": "Optimism",
"governanceTokenSymbol": "OP",
"governanceTokenOwner": "0x9965507D1a55bcC2695C58ba16FB37d819B0A4dc",
"l1FeeVaultRecipient": "0x71bE63f3384f5fb98995898A86B02Fb2426c5788",
"sequencerFeeVaultRecipient": "0xfabb0ac9d68b0b445fb7357272ff202c5651694a",
......
......@@ -25,7 +25,7 @@
"governanceTokenName": "Optimism",
"governanceTokenSymbol": "OP",
"governanceTokenOwner": "0x90F79bf6EB2c4f870365E785982E1f101E93b906",
"l2GenesisBlockGasLimit": "0x17D7840",
"l2GenesisBlockGasLimit": "0x1c9c380",
"l2GenesisBlockCoinbase": "0x4200000000000000000000000000000000000011",
"l2GenesisBlockBaseFeePerGas": "0x3b9aca00",
"gasPriceOracleOverhead": 2100,
......@@ -33,4 +33,4 @@
"eip1559Denominator": 50,
"eip1559Elasticity": 10,
"l2GenesisRegolithTimeOffset": "0x0"
}
\ No newline at end of file
}
......@@ -37,7 +37,7 @@ const config: DeployConfig = {
governanceTokenSymbol: 'OP',
governanceTokenOwner: '0x90F79bf6EB2c4f870365E785982E1f101E93b906',
l2GenesisBlockGasLimit: '0x17D7840',
l2GenesisBlockGasLimit: '0x1c9c380',
l2GenesisBlockCoinbase: '0x4200000000000000000000000000000000000011',
l2GenesisBlockBaseFeePerGas: '0x3b9aca00',
......
......@@ -11,10 +11,8 @@ import {
assertContractVariable,
getContractsFromArtifacts,
getDeploymentAddress,
doStep,
jsonifyTransaction,
getTenderlySimulationLink,
getCastCommand,
doOwnershipTransfer,
doPhase,
} from '../src/deploy-utils'
const uint128Max = ethers.BigNumber.from('0xffffffffffffffffffffffffffffffff')
......@@ -73,10 +71,13 @@ const deployFn: DeployFunction = async (hre) => {
// Transfer ownership of the ProxyAdmin to the SystemDictator.
if ((await ProxyAdmin.owner()) !== SystemDictator.address) {
console.log(`Setting ProxyAdmin owner to MSD`)
await ProxyAdmin.transferOwnership(SystemDictator.address)
} else {
console.log(`Proxy admin already owned by MSD`)
await doOwnershipTransfer({
isLiveDeployer,
proxy: ProxyAdmin,
name: 'ProxyAdmin',
transferFunc: 'transferOwnership',
dictator: SystemDictator,
})
}
// We don't need to transfer proxy addresses if we're already beyond the proxy transfer step.
......@@ -89,31 +90,13 @@ const deployFn: DeployFunction = async (hre) => {
needsProxyTransfer &&
(await AddressManager.owner()) !== SystemDictator.address
) {
if (isLiveDeployer) {
console.log(`Setting AddressManager owner to MSD`)
await AddressManager.transferOwnership(SystemDictator.address)
} else {
const tx = await AddressManager.populateTransaction.transferOwnership(
SystemDictator.address
)
console.log(`Please transfer AddressManager owner to MSD`)
console.log(`AddressManager address: ${AddressManager.address}`)
console.log(`MSD address: ${SystemDictator.address}`)
console.log(`JSON:`)
console.log(jsonifyTransaction(tx))
console.log(getCastCommand(tx))
console.log(await getTenderlySimulationLink(SystemDictator.provider, tx))
}
// Wait for the ownership transfer to complete.
await awaitCondition(
async () => {
const owner = await AddressManager.owner()
return owner === SystemDictator.address
},
5000,
1000
)
await doOwnershipTransfer({
isLiveDeployer,
proxy: AddressManager,
name: 'AddressManager',
transferFunc: 'transferOwnership',
dictator: SystemDictator,
})
} else {
console.log(`AddressManager already owned by the SystemDictator`)
}
......@@ -125,35 +108,13 @@ const deployFn: DeployFunction = async (hre) => {
from: ethers.constants.AddressZero,
})) !== SystemDictator.address
) {
if (isLiveDeployer) {
console.log(`Setting L1StandardBridge owner to MSD`)
await L1StandardBridgeProxyWithSigner.setOwner(SystemDictator.address)
} else {
const tx = await L1StandardBridgeProxy.populateTransaction.setOwner(
SystemDictator.address
)
console.log(`Please transfer L1StandardBridge (proxy) owner to MSD`)
console.log(
`L1StandardBridgeProxy address: ${L1StandardBridgeProxy.address}`
)
console.log(`MSD address: ${SystemDictator.address}`)
console.log(`JSON:`)
console.log(jsonifyTransaction(tx))
console.log(getCastCommand(tx))
console.log(await getTenderlySimulationLink(SystemDictator.provider, tx))
}
// Wait for the ownership transfer to complete.
await awaitCondition(
async () => {
const owner = await L1StandardBridgeProxy.callStatic.getOwner({
from: ethers.constants.AddressZero,
})
return owner === SystemDictator.address
},
5000,
1000
)
await doOwnershipTransfer({
isLiveDeployer,
proxy: L1StandardBridgeProxyWithSigner,
name: 'L1StandardBridgeProxy',
transferFunc: 'setOwner',
dictator: SystemDictator,
})
} else {
console.log(`L1StandardBridge already owned by MSD`)
}
......@@ -165,47 +126,58 @@ const deployFn: DeployFunction = async (hre) => {
from: ethers.constants.AddressZero,
})) !== SystemDictator.address
) {
if (isLiveDeployer) {
console.log(`Setting L1ERC721Bridge owner to MSD`)
await L1ERC721BridgeProxyWithSigner.changeAdmin(SystemDictator.address)
} else {
const tx = await L1ERC721BridgeProxy.populateTransaction.changeAdmin(
SystemDictator.address
)
console.log(`Please transfer L1ERC721Bridge (proxy) owner to MSD`)
console.log(`L1ERC721BridgeProxy address: ${L1ERC721BridgeProxy.address}`)
console.log(`MSD address: ${SystemDictator.address}`)
console.log(`JSON:`)
console.log(jsonifyTransaction(tx))
console.log(getCastCommand(tx))
console.log(await getTenderlySimulationLink(SystemDictator.provider, tx))
}
// Wait for the ownership transfer to complete.
await awaitCondition(
async () => {
const owner = await L1ERC721BridgeProxy.callStatic.admin({
from: ethers.constants.AddressZero,
})
return owner === SystemDictator.address
},
5000,
1000
)
await doOwnershipTransfer({
isLiveDeployer,
proxy: L1ERC721BridgeProxyWithSigner,
name: 'L1ERC721BridgeProxy',
transferFunc: 'changeAdmin',
dictator: SystemDictator,
})
} else {
console.log(`L1ERC721Bridge already owned by MSD`)
}
// Step 1 is a freebie, it doesn't impact the system.
await doStep({
// Wait for the ownership transfers to complete before continuing.
await awaitCondition(
async (): Promise<boolean> => {
const proxyAdminOwner = await ProxyAdmin.owner()
const addressManagerOwner = await AddressManager.owner()
const l1StandardBridgeOwner =
await L1StandardBridgeProxy.callStatic.getOwner({
from: ethers.constants.AddressZero,
})
const l1Erc721BridgeOwner = await L1ERC721BridgeProxy.callStatic.admin({
from: ethers.constants.AddressZero,
})
return (
proxyAdminOwner === SystemDictator.address &&
addressManagerOwner === SystemDictator.address &&
l1StandardBridgeOwner === SystemDictator.address &&
l1Erc721BridgeOwner === SystemDictator.address
)
},
5000,
1000
)
await doPhase({
isLiveDeployer,
SystemDictator,
step: 1,
phase: 1,
message: `
Phase 1 includes the following steps:
Step 1 will configure the ProxyAdmin contract, you can safely execute this step at any time
without impacting the functionality of the rest of the system.
Step 2 will stop deposits and withdrawals via the L1CrossDomainMessenger and will stop the
DTL from syncing new deposits via the CTC, effectively shutting down the legacy system. Once
this step has been executed, you should immediately begin the L2 migration process. If you
need to restart the system, run exit1() followed by finalize().
`,
checks: async () => {
// Step 1 checks
await assertContractVariable(
ProxyAdmin,
'addressManager',
......@@ -264,21 +236,8 @@ const deployFn: DeployFunction = async (hre) => {
assert(config.systemTxMaxGas === 1_000_000)
assert(ethers.utils.parseUnits('1', 'gwei').eq(config.minimumBaseFee))
assert(config.maximumBaseFee.eq(uint128Max))
},
})
// Step 2 shuts down the system.
await doStep({
isLiveDeployer,
SystemDictator,
step: 2,
message: `
Step 2 will stop deposits and withdrawals via the L1CrossDomainMessenger and will stop the
DTL from syncing new deposits via the CTC, effectively shutting down the legacy system. Once
this step has been executed, you should immediately begin the L2 migration process. If you
need to restart the system, run exit1() followed by finalize().
`,
checks: async () => {
// Step 2 checks
const messenger = await AddressManager.getAddress(
'OVM_L1CrossDomainMessenger'
)
......
......@@ -10,11 +10,11 @@ import '@nomiclabs/hardhat-ethers'
import {
assertContractVariable,
getContractsFromArtifacts,
jsonifyTransaction,
printJsonTransaction,
isStep,
doStep,
getTenderlySimulationLink,
getCastCommand,
printTenderlySimulationLink,
printCastCommand,
} from '../src/deploy-utils'
const deployFn: DeployFunction = async (hre) => {
......@@ -206,10 +206,9 @@ const deployFn: DeployFunction = async (hre) => {
)
)
console.log(`MSD address: ${SystemDictator.address}`)
console.log(`JSON:`)
console.log(jsonifyTransaction(tx))
console.log(getCastCommand(tx))
console.log(await getTenderlySimulationLink(SystemDictator.provider, tx))
printJsonTransaction(tx)
printCastCommand(tx)
await printTenderlySimulationLink(SystemDictator.provider, tx)
}
await awaitCondition(
......@@ -318,10 +317,9 @@ const deployFn: DeployFunction = async (hre) => {
const tx = await OptimismPortal.populateTransaction.unpause()
console.log(`Please unpause the OptimismPortal...`)
console.log(`OptimismPortal address: ${OptimismPortal.address}`)
console.log(`JSON:`)
console.log(jsonifyTransaction(tx))
console.log(getCastCommand(tx))
console.log(await getTenderlySimulationLink(SystemDictator.provider, tx))
printJsonTransaction(tx)
printCastCommand(tx)
await printTenderlySimulationLink(SystemDictator.provider, tx)
}
await awaitCondition(
......@@ -348,10 +346,9 @@ const deployFn: DeployFunction = async (hre) => {
const tx = await SystemDictator.populateTransaction.finalize()
console.log(`Please finalize deployment...`)
console.log(`MSD address: ${SystemDictator.address}`)
console.log(`JSON:`)
console.log(jsonifyTransaction(tx))
console.log(getCastCommand(tx))
console.log(await getTenderlySimulationLink(SystemDictator.provider, tx))
printJsonTransaction(tx)
printCastCommand(tx)
await printTenderlySimulationLink(SystemDictator.provider, tx)
}
await awaitCondition(
......
{
"name": "@eth-optimism/contracts-bedrock",
"version": "0.13.1",
"version": "0.13.2",
"description": "Contracts for Optimism Specs",
"main": "dist/index",
"types": "dist/index",
......@@ -59,7 +59,7 @@
"hardhat": "^2.9.6"
},
"devDependencies": {
"@eth-optimism/hardhat-deploy-config": "^0.2.5",
"@eth-optimism/hardhat-deploy-config": "^0.2.6",
"@ethersproject/abstract-provider": "^5.7.0",
"@ethersproject/abstract-signer": "^5.7.0",
"ethereumjs-wallet": "^1.0.2",
......
......@@ -305,20 +305,56 @@ export const getDeploymentAddress = async (
* @param tx Ethers transaction object.
* @returns JSON-ified transaction object.
*/
export const jsonifyTransaction = (tx: ethers.PopulatedTransaction): string => {
return JSON.stringify(
{
from: tx.from,
to: tx.to,
data: tx.data,
value: tx.value,
chainId: tx.chainId,
},
null,
2
export const printJsonTransaction = (tx: ethers.PopulatedTransaction): void => {
console.log(
'JSON transaction parameters:\n' +
JSON.stringify(
{
from: tx.from,
to: tx.to,
data: tx.data,
value: tx.value,
chainId: tx.chainId,
},
null,
2
)
)
}
/**
* Mini helper for transferring a Proxy to the MSD
*
* @param opts Options for executing the step.
* @param opts.isLiveDeployer True if the deployer is live.
* @param opts.proxy proxy contract.
* @param opts.dictator dictator contract.
*/
export const doOwnershipTransfer = async (opts: {
isLiveDeployer?: boolean
proxy: ethers.Contract
name: string
transferFunc: string
dictator: ethers.Contract
}): Promise<void> => {
if (opts.isLiveDeployer) {
console.log(`Setting ${opts.name} owner to MSD`)
await opts.proxy[opts.transferFunc](opts.dictator.address)
} else {
const tx = await opts.proxy.populateTransaction[opts.transferFunc](
opts.dictator.address
)
console.log(`
Please transfer ${opts.name} (proxy) owner to MSD
- ${opts.name} address: ${opts.proxy.address}
- MSD address: ${opts.dictator.address}
`)
printJsonTransaction(tx)
printCastCommand(tx)
await printTenderlySimulationLink(opts.dictator.provider, tx)
}
}
/**
* Mini helper for checking if the current step is a target step.
*
......@@ -333,6 +369,25 @@ export const isStep = async (
return (await dictator.currentStep()) === step
}
/**
* Mini helper for checking if the current step is the first step in target phase.
*
* @param dictator SystemDictator contract.
* @param phase Target phase.
* @returns True if the current step is the first step in target phase.
*/
export const isStartOfPhase = async (
dictator: ethers.Contract,
phase: number
): Promise<boolean> => {
const phaseToStep = {
1: 1,
2: 3,
3: 6,
}
return (await dictator.currentStep()) === phaseToStep[phase]
}
/**
* Mini helper for executing a given step.
*
......@@ -350,7 +405,8 @@ export const doStep = async (opts: {
message: string
checks: () => Promise<void>
}): Promise<void> => {
if (!(await isStep(opts.SystemDictator, opts.step))) {
const isStepVal = await isStep(opts.SystemDictator, opts.step)
if (!isStepVal) {
console.log(`Step already completed: ${opts.step}`)
return
}
......@@ -368,11 +424,8 @@ export const doStep = async (opts: {
]()
console.log(`Please execute step ${opts.step}...`)
console.log(`MSD address: ${opts.SystemDictator.address}`)
console.log(`JSON:`)
console.log(jsonifyTransaction(tx))
console.log(
await getTenderlySimulationLink(opts.SystemDictator.provider, tx)
)
printJsonTransaction(tx)
await printTenderlySimulationLink(opts.SystemDictator.provider, tx)
}
// Wait for the step to complete.
......@@ -389,36 +442,91 @@ export const doStep = async (opts: {
}
/**
* Returns a direct link to a Tenderly simulation.
* Mini helper for executing a given phase.
*
* @param opts Options for executing the step.
* @param opts.isLiveDeployer True if the deployer is live.
* @param opts.SystemDictator SystemDictator contract.
* @param opts.step Step to execute.
* @param opts.message Message to print before executing the step.
* @param opts.checks Checks to perform after executing the step.
*/
export const doPhase = async (opts: {
isLiveDeployer?: boolean
SystemDictator: ethers.Contract
phase: number
message: string
checks: () => Promise<void>
}): Promise<void> => {
const isStart = await isStartOfPhase(opts.SystemDictator, opts.phase)
if (!isStart) {
console.log(`Start of phase ${opts.phase} already completed`)
return
}
// Extra message to help the user understand what's going on.
console.log(opts.message)
// Either automatically or manually execute the step.
if (opts.isLiveDeployer) {
console.log(`Executing phase ${opts.phase}...`)
await opts.SystemDictator[`phase${opts.phase}`]()
} else {
const tx = await opts.SystemDictator.populateTransaction[
`phase${opts.phase}`
]()
console.log(`Please execute phase ${opts.phase}...`)
console.log(`MSD address: ${opts.SystemDictator.address}`)
printJsonTransaction(tx)
await printTenderlySimulationLink(opts.SystemDictator.provider, tx)
}
// Wait for the step to complete.
await awaitCondition(
async () => {
return isStartOfPhase(opts.SystemDictator, opts.phase + 1)
},
30000,
1000
)
// Perform post-step checks.
await opts.checks()
}
/**
* Prints a direct link to a Tenderly simulation.
*
* @param provider Ethers Provider.
* @param tx Ethers transaction object.
* @returns the url of the tenderly simulation.
*/
export const getTenderlySimulationLink = async (
export const printTenderlySimulationLink = async (
provider: ethers.providers.Provider,
tx: ethers.PopulatedTransaction
): Promise<string> => {
): Promise<void> => {
if (process.env.TENDERLY_PROJECT && process.env.TENDERLY_USERNAME) {
return `https://dashboard.tenderly.co/${process.env.TENDERLY_PROJECT}/${
process.env.TENDERLY_USERNAME
}/simulator/new?${new URLSearchParams({
network: (await provider.getNetwork()).chainId.toString(),
contractAddress: tx.to,
rawFunctionInput: tx.data,
from: tx.from,
}).toString()}`
console.log(
`https://dashboard.tenderly.co/${process.env.TENDERLY_PROJECT}/${
process.env.TENDERLY_USERNAME
}/simulator/new?${new URLSearchParams({
network: (await provider.getNetwork()).chainId.toString(),
contractAddress: tx.to,
rawFunctionInput: tx.data,
from: tx.from,
}).toString()}`
)
}
}
/**
* Returns a cast commmand for submitting a given transaction.
* Prints a cast commmand for submitting a given transaction.
*
* @param tx Ethers transaction object.
* @returns the cast command
*/
export const getCastCommand = (tx: ethers.PopulatedTransaction): string => {
export const printCastCommand = (tx: ethers.PopulatedTransaction): void => {
if (process.env.CAST_COMMANDS) {
return `cast send ${tx.to} ${tx.data} --from ${tx.from} --value ${tx.value}`
console.log(
`cast send ${tx.to} ${tx.data} --from ${tx.from} --value ${tx.value}`
)
}
}
......@@ -5,9 +5,11 @@ const config: DeployConfig = {
l2ProxyOwnerAddress: '',
optimistName: '',
optimistSymbol: '',
attestorAddress: '',
optimistInviterName: '',
optimistBaseUriAttestorAddress: '',
optimistInviterInviteGranter: '',
optimistInviterName: '',
optimistAllowlistAllowlistAttestor: '',
optimistAllowlistCoinbaseQuestAttestor: '',
}
export default config
......@@ -5,9 +5,13 @@ const config: DeployConfig = {
l2ProxyOwnerAddress: '0x60c5C9c98bcBd0b0F2fD89B24c16e533BaA8CdA3',
optimistName: 'Optimist',
optimistSymbol: 'OPTIMIST',
attestorAddress: '0x60c5C9c98bcBd0b0F2fD89B24c16e533BaA8CdA3',
optimistBaseUriAttestorAddress: '0x60c5C9c98bcBd0b0F2fD89B24c16e533BaA8CdA3',
optimistInviterInviteGranter: '0x60c5C9c98bcBd0b0F2fD89B24c16e533BaA8CdA3',
optimistInviterName: 'OptimistInviter',
optimistAllowlistAllowlistAttestor:
'0x60c5C9c98bcBd0b0F2fD89B24c16e533BaA8CdA3',
optimistAllowlistCoinbaseQuestAttestor:
'0x661B7Acca8ebd93AFd349a088e9a9A00053DB1BF',
}
export default config
......@@ -5,9 +5,13 @@ const config: DeployConfig = {
l2ProxyOwnerAddress: '0x70997970c51812dc3a010c7d01b50e0d17dc79c8',
optimistName: 'OP Citizenship',
optimistSymbol: 'OPNFT',
attestorAddress: '0x70997970c51812dc3a010c7d01b50e0d17dc79c8',
optimistBaseUriAttestorAddress: '0x70997970c51812dc3a010c7d01b50e0d17dc79c8',
optimistInviterInviteGranter: '0x70997970c51812dc3a010c7d01b50e0d17dc79c8',
optimistInviterName: 'OptimistInviter',
optimistAllowlistAllowlistAttestor:
'0x70997970c51812dc3a010c7d01b50e0d17dc79c8',
optimistAllowlistCoinbaseQuestAttestor:
'0x70997970c51812dc3a010c7d01b50e0d17dc79c8',
}
export default config
import config from './hardhat'
// uses the same config as hardhat.ts
export default config
......@@ -5,9 +5,13 @@ const config: DeployConfig = {
l2ProxyOwnerAddress: '0x60c5C9c98bcBd0b0F2fD89B24c16e533BaA8CdA3',
optimistName: 'Optimist',
optimistSymbol: 'OPTIMIST',
attestorAddress: '0x60c5C9c98bcBd0b0F2fD89B24c16e533BaA8CdA3',
optimistBaseUriAttestorAddress: '0x60c5C9c98bcBd0b0F2fD89B24c16e533BaA8CdA3',
optimistInviterInviteGranter: '0x60c5C9c98bcBd0b0F2fD89B24c16e533BaA8CdA3',
optimistInviterName: 'OptimistInviter',
optimistAllowlistAllowlistAttestor:
'0x60c5C9c98bcBd0b0F2fD89B24c16e533BaA8CdA3',
optimistAllowlistCoinbaseQuestAttestor:
'0x661B7Acca8ebd93AFd349a088e9a9A00053DB1BF',
}
export default config
......@@ -2,12 +2,17 @@ import { DeployConfig } from '../../src'
const config: DeployConfig = {
ddd: '0x9C6373dE60c2D3297b18A8f964618ac46E011B58',
// EcoPod test account
l2ProxyOwnerAddress: '0x8F0EBDaA1cF7106bE861753B0f9F5c0250fE0819',
optimistName: 'Optimist',
optimistSymbol: 'OPTIMIST',
attestorAddress: '0x8F0EBDaA1cF7106bE861753B0f9F5c0250fE0819',
optimistBaseUriAttestorAddress: '0x8F0EBDaA1cF7106bE861753B0f9F5c0250fE0819',
optimistInviterInviteGranter: '0x8F0EBDaA1cF7106bE861753B0f9F5c0250fE0819',
optimistInviterName: 'OptimistInviter',
optimistAllowlistAllowlistAttestor:
'0x8F0EBDaA1cF7106bE861753B0f9F5c0250fE0819',
optimistAllowlistCoinbaseQuestAttestor:
'0x8F0EBDaA1cF7106bE861753B0f9F5c0250fE0819',
}
export default config
......@@ -5,9 +5,13 @@ const config: DeployConfig = {
l2ProxyOwnerAddress: '0x60c5C9c98bcBd0b0F2fD89B24c16e533BaA8CdA3',
optimistName: 'Optimist',
optimistSymbol: 'OPTIMIST',
attestorAddress: '0x60c5C9c98bcBd0b0F2fD89B24c16e533BaA8CdA3',
optimistBaseUriAttestorAddress: '0x60c5C9c98bcBd0b0F2fD89B24c16e533BaA8CdA3',
optimistInviterInviteGranter: '0x60c5C9c98bcBd0b0F2fD89B24c16e533BaA8CdA3',
optimistInviterName: 'OptimistInviter',
optimistAllowlistAllowlistAttestor:
'0x60c5C9c98bcBd0b0F2fD89B24c16e533BaA8CdA3',
optimistAllowlistCoinbaseQuestAttestor:
'0x661B7Acca8ebd93AFd349a088e9a9A00053DB1BF',
}
export default config
/* Imports: External */
import { DeployFunction } from 'hardhat-deploy/dist/types'
import { HardhatRuntimeEnvironment } from 'hardhat/types'
import '@nomiclabs/hardhat-ethers'
import '@eth-optimism/hardhat-deploy-config'
import 'hardhat-deploy'
import type { DeployConfig } from '../../src'
const deployFn: DeployFunction = async (hre: HardhatRuntimeEnvironment) => {
const deployConfig = hre.deployConfig as DeployConfig
const { deployer } = await hre.getNamedAccounts()
console.log(`Deploying OptimistAllowlist implementation with ${deployer}`)
const Deployment__AttestationStation = await hre.deployments.get(
'AttestationStationProxy'
)
const Deployment__OptimistInviter = await hre.deployments.get(
'OptimistInviterProxy'
)
const attestationStationAddress = Deployment__AttestationStation.address
const optimistInviterAddress = Deployment__OptimistInviter.address
console.log(`Using ${attestationStationAddress} as the ATTESTATION_STATION`)
console.log(
`Using ${deployConfig.optimistAllowlistAllowlistAttestor} as ALLOWLIST_ATTESTOR`
)
console.log(
`Using ${deployConfig.optimistAllowlistCoinbaseQuestAttestor} as COINBASE_QUEST_ATTESTOR`
)
console.log(`Using ${optimistInviterAddress} as OPTIMIST_INVITER`)
const { deploy } = await hre.deployments.deterministic('OptimistAllowlist', {
salt: hre.ethers.utils.solidityKeccak256(['string'], ['OptimistAllowlist']),
from: deployer,
args: [
attestationStationAddress,
deployConfig.optimistAllowlistAllowlistAttestor,
deployConfig.optimistAllowlistCoinbaseQuestAttestor,
optimistInviterAddress,
],
log: true,
})
await deploy()
}
deployFn.tags = ['OptimistAllowlistImpl', 'OptimistEnvironment']
deployFn.dependencies = ['AttestationStationProxy', 'OptimistInviterProxy']
export default deployFn
/* Imports: External */
import assert from 'assert'
import { DeployFunction } from 'hardhat-deploy/dist/types'
import { HardhatRuntimeEnvironment } from 'hardhat/types'
import '@eth-optimism/hardhat-deploy-config'
import '@nomiclabs/hardhat-ethers'
import 'hardhat-deploy'
import { assertContractVariable } from '@eth-optimism/contracts-bedrock/src/deploy-utils'
import { ethers, utils } from 'ethers'
import type { DeployConfig } from '../../src'
import { setupProxyContract } from '../../src/helpers/setupProxyContract'
const { getAddress } = utils
// Required conditions before deploying - Specified in `deployFn.dependencies`
// - AttestationStationProxy is deployed and points to the correct implementation
// - OptimistInviterProxy is deployed and points to the correct implementation
// - OptimistAllowlistImpl is deployed
//
// Steps
// 1. Deploy OptimistAllowlistProxy
// 2. Point the newly deployed proxy to the implementation, if it hasn't been done already
// 3. Update the admin of the proxy to the l2ProxyOwnerAddress, if it hasn't been done already
// 4. Basic sanity checks for contract variables
const deployFn: DeployFunction = async (hre: HardhatRuntimeEnvironment) => {
const deployConfig = hre.deployConfig as DeployConfig
// Deployer should be set in hardhat.config.ts
const { deployer } = await hre.getNamedAccounts()
// We want the ability to deploy to a deterministic address, so we need the init bytecode to be
// consistent across deployments. The ddd will quickly transfer the ownership of the Proxy to a
// multisig after deployment.
//
// We need a consistent ddd, since the Proxy takes a `_admin` constructor argument, which
// affects the init bytecode and hence deployed address.
const ddd = deployConfig.ddd
if (getAddress(deployer) !== getAddress(ddd)) {
// Not a hard requirement. We can deploy with any account and just set the `_admin` to the
// ddd, but requiring that the deployer is the same as the ddd minimizes number of hot wallets
// we need to keep track of during deployment.
throw new Error('Must deploy with the ddd')
}
// Get the up to date deployment of the OptimistAllowlist contract
const Deployment__OptimistAllowlistImpl = await hre.deployments.get(
'OptimistAllowlist'
)
console.log(`Deploying OptimistAllowlistProxy with ${deployer}`)
// Deploys the Proxy.sol contract with the `_admin` constructor param set to the ddd (=== deployer).
const { deploy } = await hre.deployments.deterministic(
'OptimistAllowlistProxy',
{
salt: hre.ethers.utils.solidityKeccak256(
['string'],
['OptimistAllowlistProxy']
),
contract: 'Proxy',
from: deployer,
args: [deployer],
log: true,
}
)
// Deploy the Proxy contract
await deploy()
const Deployment__OptimistAllowlistProxy = await hre.deployments.get(
'OptimistAllowlistProxy'
)
console.log(
`OptimistAllowlistProxy deployed to ${Deployment__OptimistAllowlistProxy.address}`
)
// Deployed Proxy.sol contract
const Proxy = await hre.ethers.getContractAt(
'Proxy',
Deployment__OptimistAllowlistProxy.address
)
// Deployed Proxy.sol contract with the OptimistAllowlist interface
const OptimistAllowlist = await hre.ethers.getContractAt(
'OptimistAllowlist',
Deployment__OptimistAllowlistProxy.address
)
// ethers.Signer for the ddd. Should be the current owner of the Proxy.
const dddSigner = await hre.ethers.provider.getSigner(deployer)
// intended admin of the Proxy
const l2ProxyOwnerAddress = deployConfig.l2ProxyOwnerAddress
// setup the Proxy contract with correct implementation and admin
await setupProxyContract(Proxy, dddSigner, {
targetImplAddress: Deployment__OptimistAllowlistImpl.address,
targetProxyOwnerAddress: l2ProxyOwnerAddress,
})
const Deployment__AttestationStationProxy = await hre.deployments.get(
'AttestationStationProxy'
)
const Deployment__OptimistInviter = await hre.deployments.get(
'OptimistInviterProxy'
)
await assert(
getAddress(
await Proxy.connect(ethers.constants.AddressZero).callStatic.admin()
) === getAddress(l2ProxyOwnerAddress)
)
await assertContractVariable(OptimistAllowlist, 'version', '1.0.0')
await assertContractVariable(
OptimistAllowlist,
'ATTESTATION_STATION',
Deployment__AttestationStationProxy.address
)
await assertContractVariable(
OptimistAllowlist,
'ALLOWLIST_ATTESTOR',
deployConfig.optimistAllowlistAllowlistAttestor
)
await assertContractVariable(
OptimistAllowlist,
'COINBASE_QUEST_ATTESTOR',
deployConfig.optimistAllowlistCoinbaseQuestAttestor
)
await assertContractVariable(
OptimistAllowlist,
'OPTIMIST_INVITER',
Deployment__OptimistInviter.address
)
}
deployFn.tags = ['OptimistAllowlistProxy', 'OptimistEnvironment']
deployFn.dependencies = [
'AttestationStationProxy',
'OptimistInviterProxy',
'OptimistAllowlistImpl',
]
export default deployFn
......@@ -14,13 +14,19 @@ const deployFn: DeployFunction = async (hre: HardhatRuntimeEnvironment) => {
console.log(`Deploying Optimist implementation with ${deployer}`)
const Deployment__AttestationStation = await hre.deployments.get(
const Deployment__AttestationStationProxy = await hre.deployments.get(
'AttestationStationProxy'
)
const attestationStationAddress = Deployment__AttestationStation.address
const attestationStationAddress = Deployment__AttestationStationProxy.address
console.log(`Using ${attestationStationAddress} as the ATTESTATION_STATION`)
console.log(
`Using ${deployConfig.optimistBaseUriAttestorAddress} as BASE_URI_ATTESTOR`
)
console.log(`Using ${attestationStationAddress} as the AttestationStation`)
console.log(`Using ${deployConfig.attestorAddress} as ATTESTOR`)
const Deployment__OptimistAllowlistProxy = await hre.deployments.get(
'OptimistAllowlistProxy'
)
const optimistAllowlistAddress = Deployment__OptimistAllowlistProxy.address
const { deploy } = await hre.deployments.deterministic('Optimist', {
salt: hre.ethers.utils.solidityKeccak256(['string'], ['Optimist']),
......@@ -28,8 +34,9 @@ const deployFn: DeployFunction = async (hre: HardhatRuntimeEnvironment) => {
args: [
deployConfig.optimistName,
deployConfig.optimistSymbol,
deployConfig.attestorAddress,
deployConfig.optimistBaseUriAttestorAddress,
attestationStationAddress,
optimistAllowlistAddress,
],
log: true,
})
......@@ -37,6 +44,7 @@ const deployFn: DeployFunction = async (hre: HardhatRuntimeEnvironment) => {
await deploy()
}
deployFn.tags = ['Optimist', 'OptimistEnvironment']
deployFn.tags = ['OptimistImpl', 'OptimistEnvironment']
deployFn.dependencies = ['AttestationStationProxy', 'OptimistAllowlistProxy']
export default deployFn
......@@ -37,7 +37,7 @@ const deployFn: DeployFunction = async (hre: HardhatRuntimeEnvironment) => {
await deploy()
}
deployFn.tags = ['OptimistInviter', 'OptimistEnvironment']
deployFn.tags = ['OptimistInviterImpl', 'OptimistEnvironment']
deployFn.dependencies = ['AttestationStationProxy']
export default deployFn
......@@ -10,6 +10,7 @@ import { assertContractVariable } from '@eth-optimism/contracts-bedrock/src/depl
import { ethers, utils } from 'ethers'
import type { DeployConfig } from '../../src'
import { setupProxyContract } from '../../src/helpers/setupProxyContract'
const { getAddress } = utils
......@@ -73,7 +74,7 @@ const deployFn: DeployFunction = async (hre: HardhatRuntimeEnvironment) => {
'OptimistInviterProxy'
)
console.log(
`OptimistProxy deployed to ${Deployment__OptimistInviterProxy.address}`
`OptimistInviterProxy deployed to ${Deployment__OptimistInviterProxy.address}`
)
// Deployed Proxy.sol contract
......@@ -88,79 +89,25 @@ const deployFn: DeployFunction = async (hre: HardhatRuntimeEnvironment) => {
Deployment__OptimistInviterProxy.address
)
// Gets the current implementation address the proxy is pointing to.
// callStatic is used since the `Proxy.implementation()` is not a view function and ethers will
// try to make a transaction if we don't use callStatic. Using the zero address as `from` lets us
// call functions on the proxy and not trigger the delegatecall. See Proxy.sol proxyCallIfNotAdmin
// modifier for more details.
const implementation = await Proxy.connect(
ethers.constants.AddressZero
).callStatic.implementation()
console.log(`implementation set to ${implementation}`)
if (
getAddress(implementation) !==
getAddress(Deployment__OptimistInviterImpl.address)
) {
// If the proxy isn't pointing to the correct implementation, we need to set it to the correct
// one, then call initialize() in the proxy's context.
console.log(
'implementation not set to OptimistInviter implementation contract'
)
console.log(
`Setting implementation to ${Deployment__OptimistInviterImpl.address}`
)
const name = deployConfig.optimistInviterName
// Create the calldata for the call to `initialize()`
const calldata = OptimistInviter.interface.encodeFunctionData(
'initialize',
[name]
)
// ethers.Signer for the ddd
const dddSigner = await hre.ethers.provider.getSigner(deployer)
// Point the proxy to the deployed OptimistInviter implementation contract,
// and call `initialize()` in the proxy's context
const tx = await Proxy.connect(dddSigner).upgradeToAndCall(
Deployment__OptimistInviterImpl.address,
calldata
)
const receipt = await tx.wait()
console.log(`implementation set in ${receipt.transactionHash}`)
} else {
console.log(
'implementation already set to OptimistInviter implementation contract'
)
}
const l2ProxyOwnerAddress = deployConfig.l2ProxyOwnerAddress
// Get the current proxy admin address
const admin = await Proxy.connect(
ethers.constants.AddressZero
).callStatic.admin()
console.log(`admin currently set to ${admin}`)
const name = deployConfig.optimistInviterName
// Create the calldata for the call to `initialize()`
const initializeCalldata = OptimistInviter.interface.encodeFunctionData(
'initialize',
[name]
)
if (getAddress(admin) !== getAddress(l2ProxyOwnerAddress)) {
// If the proxy admin isn't the l2ProxyOwnerAddress, we need to update it
// We're assuming that the proxy admin is the ddd right now.
console.log('admin is not set to the l2ProxyOwnerAddress')
console.log(`Setting admin to ${l2ProxyOwnerAddress}`)
// ethers.Signer for the ddd. Should be the current owner of the Proxy.
const dddSigner = await hre.ethers.provider.getSigner(deployer)
// ethers.Signer for the ddd
const dddSigner = await hre.ethers.provider.getSigner(deployer)
// intended admin of the Proxy
const l2ProxyOwnerAddress = deployConfig.l2ProxyOwnerAddress
// change admin to the l2ProxyOwnerAddress
const tx = await Proxy.connect(dddSigner).changeAdmin(l2ProxyOwnerAddress)
const receipt = await tx.wait()
console.log(`admin set in ${receipt.transactionHash}`)
} else {
console.log('admin already set to proxy owner address')
}
// setup the Proxy contract with correct implementation and admin
await setupProxyContract(Proxy, dddSigner, {
targetImplAddress: Deployment__OptimistInviterImpl.address,
targetProxyOwnerAddress: l2ProxyOwnerAddress,
postUpgradeCallCalldata: initializeCalldata,
})
const Deployment__AttestationStation = await hre.deployments.get(
'AttestationStationProxy'
......@@ -190,6 +137,6 @@ const deployFn: DeployFunction = async (hre: HardhatRuntimeEnvironment) => {
}
deployFn.tags = ['OptimistInviterProxy', 'OptimistEnvironment']
deployFn.dependencies = ['AttestationStationProxy', 'OptimistInviter']
deployFn.dependencies = ['AttestationStationProxy', 'OptimistInviterImpl']
export default deployFn
......@@ -5,8 +5,9 @@ import '@eth-optimism/hardhat-deploy-config'
import '@nomiclabs/hardhat-ethers'
import 'hardhat-deploy'
import { assertContractVariable } from '@eth-optimism/contracts-bedrock/src/deploy-utils'
import { ethers, utils } from 'ethers'
import { utils } from 'ethers'
import { setupProxyContract } from '../../src/helpers/setupProxyContract'
import type { DeployConfig } from '../../src'
const { getAddress } = utils
......@@ -21,7 +22,7 @@ const deployFn: DeployFunction = async (hre: HardhatRuntimeEnvironment) => {
throw new Error('Must deploy with the ddd')
}
const Deployment__Optimist = await hre.deployments.get('Optimist')
const Deployment__OptimistImpl = await hre.deployments.get('Optimist')
console.log(`Deploying OptimistProxy with ${deployer}`)
......@@ -48,69 +49,58 @@ const deployFn: DeployFunction = async (hre: HardhatRuntimeEnvironment) => {
Deployment__OptimistProxy.address
)
const implementation = await Proxy.connect(
ethers.constants.AddressZero
).callStatic.implementation()
console.log(`implementation set to ${implementation}`)
if (getAddress(implementation) !== getAddress(Deployment__Optimist.address)) {
console.log('implementation not set to Optimist contract')
console.log(`Setting implementation to ${Deployment__Optimist.address}`)
// Create the calldata for the call to `initialize()`
const name = deployConfig.optimistName
const symbol = deployConfig.optimistSymbol
const calldata = Optimist.interface.encodeFunctionData('initialize', [
name,
symbol,
])
const tx = await Proxy.upgradeToAndCall(
Deployment__Optimist.address,
calldata
)
const receipt = await tx.wait()
console.log(`implementation set in ${receipt.transactionHash}`)
} else {
console.log('implementation already set to Optimist contract')
}
// ethers.Signer for the ddd. Should be the current owner of the Proxy.
const dddSigner = await hre.ethers.provider.getSigner(deployer)
// intended admin of the Proxy
const l2ProxyOwnerAddress = deployConfig.l2ProxyOwnerAddress
const admin = await Proxy.connect(
ethers.constants.AddressZero
).callStatic.admin()
console.log(`admin set to ${admin}`)
if (getAddress(admin) !== getAddress(l2ProxyOwnerAddress)) {
console.log('detected admin is not set')
console.log(`Setting admin to ${l2ProxyOwnerAddress}`)
const tx = await Proxy.changeAdmin(l2ProxyOwnerAddress)
const receipt = await tx.wait()
console.log(`admin set in ${receipt.transactionHash}`)
} else {
console.log('admin already set to proxy owner address')
}
const Deployment__AttestationStation = await hre.deployments.get(
// Create the calldata for the call to `initialize()`
const name = deployConfig.optimistName
const symbol = deployConfig.optimistSymbol
const initializeCalldata = Optimist.interface.encodeFunctionData(
'initialize',
[name, symbol]
)
// setup the Proxy contract with correct implementation and admin, and initialize atomically
await setupProxyContract(Proxy, dddSigner, {
targetImplAddress: Deployment__OptimistImpl.address,
targetProxyOwnerAddress: l2ProxyOwnerAddress,
postUpgradeCallCalldata: initializeCalldata,
})
const Deployment__AttestationStationProxy = await hre.deployments.get(
'AttestationStationProxy'
)
const Deployment__OptimistAllowlistProxy = await hre.deployments.get(
'OptimistAllowlistProxy'
)
await assertContractVariable(Proxy, 'admin', l2ProxyOwnerAddress)
await assertContractVariable(Optimist, 'name', deployConfig.optimistName)
await assertContractVariable(Optimist, 'version', '1.0.0')
await assertContractVariable(Optimist, 'version', '2.0.0')
await assertContractVariable(Optimist, 'symbol', deployConfig.optimistSymbol)
await assertContractVariable(
Optimist,
'ATTESTOR',
deployConfig.attestorAddress
'BASE_URI_ATTESTOR',
deployConfig.optimistBaseUriAttestorAddress
)
await assertContractVariable(
Optimist,
'OPTIMIST_ALLOWLIST',
Deployment__OptimistAllowlistProxy.address
)
await assertContractVariable(
Optimist,
'ATTESTATION_STATION',
Deployment__AttestationStation.address
Deployment__AttestationStationProxy.address
)
}
deployFn.tags = ['OptimistProxy', 'OptimistEnvironment']
deployFn.dependencies = ['AttestationStationProxy', 'Optimist']
deployFn.dependencies = ['AttestationStationProxy', 'OptimistImpl']
export default deployFn
......@@ -53,9 +53,9 @@
"url": "https://github.com/ethereum-optimism/optimism.git"
},
"devDependencies": {
"@eth-optimism/contracts-bedrock": "0.13.1",
"@eth-optimism/contracts-bedrock": "0.13.2",
"@eth-optimism/core-utils": "^0.12.0",
"@eth-optimism/hardhat-deploy-config": "^0.2.5",
"@eth-optimism/hardhat-deploy-config": "^0.2.6",
"@ethersproject/hardware-wallets": "^5.7.0",
"@nomiclabs/hardhat-ethers": "^2.0.2",
"@nomiclabs/hardhat-etherscan": "^3.0.3",
......
......@@ -31,7 +31,7 @@ export interface DeployConfig {
/**
* Address of the privileged attestor for the Optimist contract.
*/
attestorAddress: string
optimistBaseUriAttestorAddress: string
/**
* Address of the privileged account for the OptimistInviter contract that can grant invites.
......@@ -43,6 +43,17 @@ export interface DeployConfig {
*/
optimistInviterName: string
/**
* Address of previleged account for the OptimistAllowlist contract that can add/remove people
* from allowlist.
*/
optimistAllowlistAllowlistAttestor: string
/**
* Address of Coinbase attestor that attests to whether someone has completed Quest.
*/
optimistAllowlistCoinbaseQuestAttestor: string
/**
* Address of the owner of the proxies on L2. There will be a ProxyAdmin deployed as a predeploy
* after bedrock, so the owner of proxies should be updated to that after the upgrade.
......@@ -70,7 +81,7 @@ export const configSpec: DeployConfigSpec<DeployConfig> = {
type: 'string',
default: 'OPTIMIST',
},
attestorAddress: {
optimistBaseUriAttestorAddress: {
type: 'address',
},
optimistInviterInviteGranter: {
......@@ -79,6 +90,15 @@ export const configSpec: DeployConfigSpec<DeployConfig> = {
optimistInviterName: {
type: 'string',
},
optimistAllowlistAllowlistAttestor: {
type: 'address',
},
optimistAllowlistCoinbaseQuestAttestor: {
type: 'address',
},
l2ProxyOwnerAddress: {
type: 'address',
},
......
import assert from 'assert'
import { ethers, utils } from 'ethers'
const { getAddress } = utils
type ProxyConfig = {
targetImplAddress: string
targetProxyOwnerAddress: string
postUpgradeCallCalldata?: string
}
// Sets up the newly deployed proxy contract such that:
// 1. The proxy's implementation is set to the target implementation
// 2. The proxy's admin is set to the target proxy owner
//
// If the values are set correctly already, it makes no transactions.
const setupProxyContract = async (
proxyContract: ethers.Contract,
signer: ethers.Signer,
{
targetImplAddress,
targetProxyOwnerAddress,
postUpgradeCallCalldata,
}: ProxyConfig
) => {
const currentAdmin = await proxyContract
.connect(ethers.constants.AddressZero)
.callStatic.admin()
const signerAddress = await signer.getAddress()
// Gets the current implementation address the proxy is pointing to.
// callStatic is used since the `Proxy.implementation()` is not a view function and ethers will
// try to make a transaction if we don't use callStatic. Using the zero address as `from` lets us
// call functions on the proxy and not trigger the delegatecall. See Proxy.sol proxyCallIfNotAdmin
// modifier for more details.
const currentImplementation = await proxyContract
.connect(ethers.constants.AddressZero)
.callStatic.implementation()
console.log(`implementation currently set to ${currentImplementation}`)
if (getAddress(currentImplementation) !== getAddress(targetImplAddress)) {
// If the proxy isn't pointing to the correct implementation, we need to set it to the correct
// one, then call initialize() in the proxy's context.
console.log('implementation not set to correct contract')
console.log(`Setting implementation to ${targetImplAddress}`)
// The signer needs to be the current admin, otherwise we don't have permission
// to update the implmentation or admin
assert(
signerAddress === currentAdmin,
'the passed signer is not the admin, cannot update implementation'
)
let tx: ethers.providers.TransactionResponse
if (!postUpgradeCallCalldata) {
console.log(
'postUpgradeCallCalldata is not provided. Using Proxy.upgrade()'
)
// Point the proxy to the target implementation
tx = await proxyContract.connect(signer).upgradeTo(targetImplAddress)
} else {
console.log(
'postUpgradeCallCalldata is provided. Using Proxy.upgradeAndCall()'
)
// Point the proxy to the target implementation,
// and call function in the proxy's context
tx = await proxyContract
.connect(signer)
.upgradeToAndCall(targetImplAddress, postUpgradeCallCalldata)
}
const receipt = await tx.wait()
console.log(`implementation set in ${receipt.transactionHash}`)
} else {
console.log(`implementation already set correctly to ${targetImplAddress}`)
}
console.log(`admin set to ${currentAdmin}`)
if (getAddress(currentAdmin) !== getAddress(targetProxyOwnerAddress)) {
// If the proxy admin isn't the l2ProxyOwnerAddress, we need to update it
// We're assuming that the proxy admin is the ddd right now.
console.log('detected admin is not set correctly')
console.log(`Setting admin to ${targetProxyOwnerAddress}`)
// The signer needs to be the current admin, otherwise we don't have permission
// to update the implmentation or admin
assert(
signerAddress === currentAdmin,
'proxyOwnerSigner is not the admin, cannot update admin'
)
// change admin to the l2ProxyOwnerAddress
const tx = await proxyContract
.connect(signer)
.changeAdmin(targetProxyOwnerAddress)
const receipt = await tx.wait()
console.log(`admin set in ${receipt.transactionHash}`)
} else {
console.log(`admin already set correctly to ${targetProxyOwnerAddress}`)
}
const updatedImplementation = await proxyContract
.connect(ethers.constants.AddressZero)
.callStatic.implementation()
const updatedAdmin = await proxyContract
.connect(ethers.constants.AddressZero)
.callStatic.admin()
assert(
getAddress(updatedAdmin) === getAddress(targetProxyOwnerAddress),
'Something went wrong - admin not set correctly after transaction'
)
assert(
getAddress(updatedImplementation) === getAddress(targetImplAddress),
'Something went wrong - implementation not set correctly after transaction'
)
console.log(
`Proxy at ${proxyContract.address} is set up with implementation: ${updatedImplementation} and admin: ${updatedAdmin}`
)
}
export { setupProxyContract }
# data transport layer
## 0.5.55
### Patch Changes
- b33208a8f: Add better logging to DTL about shutoff block
- dbe5eb308: Empty patch release to re-release packages that failed to be released by a bug in the release process.
## 0.5.54
### Patch Changes
......
{
"private": true,
"name": "@eth-optimism/data-transport-layer",
"version": "0.5.54",
"version": "0.5.55",
"description": "[Optimism] Service for shuttling data from L1 into L2",
"main": "dist/index",
"types": "dist/index",
......
# @eth-optimism/fault-detector
## 0.6.3
### Patch Changes
- dbe5eb308: Empty patch release to re-release packages that failed to be released by a bug in the release process.
- Updated dependencies [be3315689]
- @eth-optimism/sdk@2.0.2
## 0.6.2
### Patch Changes
......
{
"private": true,
"name": "@eth-optimism/fault-detector",
"version": "0.6.2",
"version": "0.6.3",
"description": "[Optimism] Service for detecting faulty L2 output proposals",
"main": "dist/index",
"types": "dist/index",
......@@ -50,7 +50,7 @@
"@eth-optimism/common-ts": "^0.8.1",
"@eth-optimism/contracts": "^0.5.40",
"@eth-optimism/core-utils": "^0.12.0",
"@eth-optimism/sdk": "^2.0.1",
"@eth-optimism/sdk": "^2.0.2",
"@ethersproject/abstract-provider": "^5.7.0"
}
}
# @eth-optimism/hardhat-deploy-config
## 0.2.6
### Patch Changes
- 5cf646bab: Add getter for other network's deploy config
## 0.2.5
### Patch Changes
......
{
"name": "@eth-optimism/hardhat-deploy-config",
"version": "0.2.5",
"version": "0.2.6",
"description": "[Optimism] Hardhat deploy configuration plugin",
"main": "dist/src/index.js",
"types": "dist/src/index.d.ts",
......
# @eth-optimism/message-relayer
## 0.5.33
### Patch Changes
- dbe5eb308: Empty patch release to re-release packages that failed to be released by a bug in the release process.
- Updated dependencies [be3315689]
- @eth-optimism/sdk@2.0.2
## 0.5.32
### Patch Changes
......
{
"private": true,
"name": "@eth-optimism/message-relayer",
"version": "0.5.32",
"version": "0.5.33",
"description": "[Optimism] Service for automatically relaying L2 to L1 transactions",
"main": "dist/index",
"types": "dist/index",
......@@ -33,7 +33,7 @@
"dependencies": {
"@eth-optimism/common-ts": "0.8.1",
"@eth-optimism/core-utils": "0.12.0",
"@eth-optimism/sdk": "2.0.1",
"@eth-optimism/sdk": "2.0.2",
"ethers": "^5.7.0"
},
"devDependencies": {
......
# @eth-optimism/replica-healthcheck
## 1.2.4
### Patch Changes
- dbe5eb308: Empty patch release to re-release packages that failed to be released by a bug in the release process.
## 1.2.3
### Patch Changes
......
{
"private": true,
"name": "@eth-optimism/replica-healthcheck",
"version": "1.2.3",
"version": "1.2.4",
"description": "[Optimism] Service for monitoring the health of replica nodes",
"main": "dist/index",
"types": "dist/index",
......
# @eth-optimism/sdk
## 2.0.2
### Patch Changes
- be3315689: Have SDK automatically create Standard and ETH bridges when L1StandardBridge is provided.
- Updated dependencies [b16067a9f]
- Updated dependencies [9a02079eb]
- Updated dependencies [98fbe9d22]
- @eth-optimism/contracts-bedrock@0.13.2
## 2.0.1
### Patch Changes
......
{
"name": "@eth-optimism/sdk",
"version": "2.0.1",
"version": "2.0.2",
"description": "[Optimism] Tools for working with Optimism",
"main": "dist/index",
"types": "dist/index",
......@@ -50,7 +50,7 @@
"dependencies": {
"@eth-optimism/contracts": "0.5.40",
"@eth-optimism/core-utils": "0.12.0",
"@eth-optimism/contracts-bedrock": "0.13.1",
"@eth-optimism/contracts-bedrock": "0.13.2",
"lodash": "^4.17.21",
"merkletreejs": "^0.2.27",
"rlp": "^2.2.7"
......
......@@ -35,7 +35,7 @@ interface StandardBridge {
function finalizeBridgeERC20(address _localToken, address _remoteToken, address _from, address _to, uint256 _amount, bytes memory _extraData) external;
function finalizeBridgeETH(address _from, address _to, uint256 _amount, bytes memory _extraData) payable external;
function messenger() view external returns (address);
function otherBridge() view external returns (address);
function OTHER_BRIDGE() view external returns (address);
}
```
......
# Challenger Specification
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
**Table of Contents**
- [Description](#description)
- [Terminology](#terminology)
- [Event and Response Lifecycle](#event-and-response-lifecycle)
- [`GameType.FAULT`](#gametypefault)
- [`GameType.ATTESTATION`](#gametypeattestation)
- [`GameType.VALIDITY`](#gametypevalidity)
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
## Description
The Challenger is an off-chain agent that listens for faulty claims made about the state of
the L2 on the data availability layer. It is responsible for challenging these incorrect claims
and ensuring the correctness of all finalized claims on the settlement layer.
The Challenger agent is intended to be ran as a permissionless service by participants of the network
alongside a [rollup-node](./rollup-node.md). Challenger agents will be rewarded in the form of the
bond attached to the claims they disprove.
## Terminology
- **data availability layer** - In the context of this document, the data availability layer is the
generic term for the location where claims about the state of the layer two are made. In the context
of Optimism, this is Ethereum Mainnet.
- **settlement layer** - In the context of this document, the settlement layer is the location of the
bridge as well as where funds deposited to the rollup reside. In the context of Optimism, this is
Ethereum Mainnet.
- **L2** - In the context of this document, the layer two of the Optimistic Rollup. In the context
of Optimism, this is the Optimism Mainnet.
- **rollup-node** - In the context of this document, the rollup node describes the
[rollup-node specification](./rollup-node.md). In the context of Optimism, this is the implementation
of the [rollup-node specification](./rollup-node.md), the `op-node`.
## Event and Response Lifecycle
The Challenger agent is expected to be able to listen for and respond to several different events
on the data availability layer. These events and responses are parameterized depending on the type
of dispute game being played, and the Challenger listens to different events and responds uniquely
to each of the different game types. For specification of dispute game types, see the
[Dispute Game Interfaces specification](./dispute-game-interface.md) and
[Dispute Game specification](./dispute-game.md).
### `GameType.FAULT`
> **Warning**
> The `FAULT` game type is not yet implemented. In the first iteration of Optimism's decentralization effort,
> challengers will respond to `ATTESTATION` games only.
**Events and Responses**
*TODO*
### `GameType.ATTESTATION`
**Events and Responses**
- [`L2OutputOracle.OutputProposed`](../packages/contracts-bedrock/contracts/L1/L2OutputOracle.sol#L57-70)
The `L2OutputOracle` contract emits this event when a new output is proposed on the data availability
layer. Each time an output is proposed, the Challenger should check to see if the output is equal
the output given by the `optimism_outputAtBlock` endpoint of their `rollup-node`.
- If it is, the Challenger should do nothing to challenge this output proposal.
- If it is not, the Challenger should respond by creating a new `DisputeGame` with the
`DisputeGameType.ATTESTATION` `gameType`, the correct output root as the `rootClaim`, and the abi-encoded
`l2BlockNumber` of the correct output root as the `extraData`.
![Attestation `OutputProposed` Diagram](./assets/challenger_attestation_output_proposed.png)
- `DisputeGameFactory.DisputeGameCreated` A new dispute game has been created and is ready to be reviewed. The
Challenger agent should listen for this event and check if the `rootClaim` of the `AttestationDisputeGame`
created by the `DisputeGameFactory` is equal to the output root of their `rollup-node` at the game's `l2BlockNumber`.
- If it is, the Challenger should sign the [EIP-712 typeHash](./dispute-game.md) of the struct containing the
`AttestationDisputeGame`'s `rootClaim` and `l2BlockNumber`. The Challenger should then submit the abi-encoded
signature to the `AttetationDisputeGame`'s `challenge` function.
- If it is not, the Challenger should do nothing in support of this dispute game.
![Attestation `DisputeGameCreated` Diagram](./assets/challenger_attestation_dispute_game_created.png)
A full diagram and lifecycle of the Challenger's role in the `ATTESTATION` game type can be found below:
![Attestation Diagram](./assets/challenger_attestation.png)
### `GameType.VALIDITY`
**TODO**
> **Warning**
> The `VALIDITY` game type is not yet implemented. In the first iteration of Optimism's decentralization effort,
> challengers will respond to `ATTESTATION` games only. A validity proof based dispute game is a possibility,
> but fault proof based dispute games will be the primary focus of the team in the near future.
**Events and Responses**
*TODO*
......@@ -119,7 +119,7 @@ transactions, based on their positioning in the L2 block:
1. The first transaction MUST be a [L1 attributes deposited transaction][l1-attr-deposit], followed by
2. an array of zero-or-more [user-deposited transactions][user-deposited] submitted to the deposit
feed contract on L1. User-deposited transactions are only present in the first block of a L2 epoch.
feed contract on L1 (called `OptimismPortal`). User-deposited transactions are only present in the first block of a L2 epoch.
We only define a single new transaction type in order to minimize modifications to L1 client
software, and complexity in general.
......@@ -320,8 +320,7 @@ generated by the [L2 Chain Derivation][g-derivation] process. The content of eac
transaction are determined by the corresponding `TransactionDeposited` event emitted by the
[deposit contract][deposit-contract] on L1.
1. `from` is unchanged from the emitted value (though it may have been transformed to an alias in
the deposit feed contract).
1. `from` is unchanged from the emitted value (though it may have been transformed to an alias in `OptimismPortal`, the deposit feed contract).
2. `to` is any 20-byte address (including the zero address)
- In case of a contract creation (cf. `isCreation`), this address is set to `null`.
3. `mint` is set to the emitted value.
......
# Dispute Game Interface
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
**Table of Contents**
- [Overview](#overview)
- [Types](#types)
- [`DisputeGameFactory` Interface](#disputegamefactory-interface)
- [`DisputeGame` Interface](#disputegame-interface)
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
## Overview
A dispute game is played between multiple parties when contesting the truthiness
of a claim. In the context of an optimistic rollup, claims are made about the
state of the layer two network to enable withdrawals to the layer one. A proposer
makes a claim about the layer two state such that they can withdraw and a
challenger can dispute the validity of the claim. The security of the layer two
comes from the ability of fraudulent withdrawals being able to be disputed.
A dispute game interface is defined to allow for multiple implementations of
dispute games to exist. If multiple dispute games run in production, it gives
a similar security model as having multiple protocol clients, as a bug in a
single dispute game will not result in the bug becoming consensus.
## Types
For added context, we define a few types that are used in the following snippets.
```solidity
/// @notice The type of proof system being used.
enum GameType {
/// @dev The game will use a `IDisputeGame` implementation that utilizes fault proofs.
FAULT,
/// @dev The game will use a `IDisputeGame` implementation that utilizes validity proofs.
VALIDITY,
/// @dev The game will use a `IDisputeGame` implementation that utilizes attestation proofs.
ATTESTATION
}
/// @notice The current status of the dispute game.
enum GameStatus {
/// @dev The game is currently in progress, and has not been resolved.
IN_PROGRESS,
/// @dev The game has concluded, and the `rootClaim` was challenged successfully.
CHALLENGER_WINS,
/// @dev The game has concluded, and the `rootClaim` could not be contested.
DEFENDER_WINS
}
/// @notice A dedicated timestamp type.
type Timestamp is uint64;
/// @notice A `Claim` type represents a 32 byte hash or other unique identifier for a claim about
/// a certain piece of information.
/// @dev For the `FAULT` `GameType`, this will be a root of the merklized state of the fault proof
/// program at the end of the state transition.
/// For the `ATTESTATION` `GameType`, this will be an output root.
type Claim is bytes32;
```
## `DisputeGameFactory` Interface
The dispute game factory is responsible for creating new `DisputeGame` contracts
given a `GameType` and a root `Claim`. Challenger agents will listen to the
`DisputeGameCreated` events that are emitted by the factory as well as other events
that pertain to detecting fault (i.e. `OutputProposed(bytes32,uint256,uint256,uint256)`) in order to keep up
with on-going disputes in the protocol.
A [`clones-with-immutable-args`](https://github.com/Saw-mon-and-Natalie/clones-with-immutable-args) factory
(originally by @wighawag, but forked by @Saw-mon-and-Natalie) is used to create Clones. Each `GameType` has
a corresponding implementation within the factory, and when a new game is created, the factory creates a
clone of the `GameType`'s pre-deployed implementation contract.
The `rootClaim` of created dispute games can either be a claim that the creator agrees or disagrees with.
This is an implementation detail that is left up to the `IDisputeGame` to handle within its `resolve` function.
When the `DisputeGameFactory` creates a new `DisputeGame` contract, it calls `initialize()` on the clone to
set up the game.
```solidity
/// @title IDisputeGameFactory
/// @notice The interface for a DisputeGameFactory contract.
interface IDisputeGameFactory {
/// @notice Emitted when a new dispute game is created
/// @param disputeProxy The address of the dispute game proxy
/// @param gameType The type of the dispute game proxy's implementation
/// @param rootClaim The root claim of the dispute game
event DisputeGameCreated(address indexed disputeProxy, GameType indexed gameType, Claim indexed rootClaim);
/// @notice `games` queries an internal a mapping that maps the hash of `gameType ++ rootClaim ++ extraData`
/// to the deployed `DisputeGame` clone.
/// @dev `++` equates to concatenation.
/// @param gameType The type of the DisputeGame - used to decide the proxy implementation
/// @param rootClaim The root claim of the DisputeGame.
/// @param extraData Any extra data that should be provided to the created dispute game.
/// @return _proxy The clone of the `DisputeGame` created with the given parameters. Returns `address(0)` if nonexistent.
function games(GameType gameType, Claim rootClaim, bytes calldata extraData) external view returns (IDisputeGame _proxy);
/// @notice `gameImpls` is a mapping that maps `GameType`s to their respective `IDisputeGame` implementations.
/// @param gameType The type of the dispute game.
/// @return _impl The address of the implementation of the game type. Will be cloned on creation of a new dispute game
/// with the given `gameType`.
function gameImpls(GameType gameType) public view returns (IDisputeGame _impl);
/// @notice The owner of the contract.
/// @dev Owner Permissions:
/// - Update the implementation contracts for a given game type.
/// @return _owner The owner of the contract.
function owner() public view returns (address _owner);
/// @notice Creates a new DisputeGame proxy contract.
/// @param gameType The type of the DisputeGame - used to decide the proxy implementation
/// @param rootClaim The root claim of the DisputeGame.
/// @param extraData Any extra data that should be provided to the created dispute game.
function create(GameType gameType, Claim rootClaim, bytes calldata extraData) external returns (IDisputeGame proxy);
/// @notice Sets the implementation contract for a specific `GameType`
/// @dev May only be called by the `owner`.
/// @param gameType The type of the DisputeGame
/// @param impl The implementation contract for the given `GameType`
function setImplementation(GameType gameType, IDisputeGame impl) external;
}
```
## `DisputeGame` Interface
The dispute game interface should be generic enough to allow it to work with any
proof system. This means that it should work fault proofs, validity proofs,
an attestation based proof system, or any other source of truth that adheres to
the interface.
Clones of the `IDisputeGame`'s `initialize` functions will be called by the `DisputeGameFactory` upon creation.
```solidity
////////////////////////////////////////////////////////////////
// GENERIC DISPUTE GAME //
////////////////////////////////////////////////////////////////
/// @title IDisputeGame
/// @notice The generic interface for a DisputeGame contract.
interface IDisputeGame {
/// @notice Initializes the DisputeGame contract.
/// @custom:invariant The `initialize` function may only be called once.
function initialize() external;
/// @notice Returns the semantic version of the DisputeGame contract
function version() external pure returns (string memory _version);
/// @notice Returns the timestamp that the DisputeGame contract was created at.
function createdAt() external pure returns (Timestamp _createdAt);
/// @notice Returns the current status of the game.
function status() external view returns (GameStatus _status);
/// @notice Getter for the game type.
/// @dev `clones-with-immutable-args` argument #1
/// @dev The reference impl should be entirely different depending on the type (fault, validity)
/// i.e. The game type should indicate the security model.
/// @return _gameType The type of proof system being used.
function gameType() external view returns (GameType _gameType);
/// @notice Getter for the root claim.
/// @return _rootClaim The root claim of the DisputeGame.
/// @dev `clones-with-immutable-args` argument #2
function rootClaim() external view returns (Claim _rootClaim);
/// @notice Getter for the extra data.
/// @dev `clones-with-immutable-args` argument #3
/// @return _extraData Any extra data supplied to the dispute game contract by the creator.
function extraData() external view returns (bytes memory _extraData);
/// @notice Returns the address of the `BondManager` used
function bondManager() public view returns (IBondManager _bondManager);
/// @notice If all necessary information has been gathered, this function should mark the game
/// status as either `CHALLENGER_WINS` or `DEFENDER_WINS` and return the status of
/// the resolved game. It is at this stage that the bonds should be awarded to the
/// necessary parties.
/// @dev May only be called if the `status` is `IN_PROGRESS`.
function resolve() public returns (GameStatus _status);
}
////////////////////////////////////////////////////////////////
// OUTPUT ATTESTATION DISPUTE GAME //
////////////////////////////////////////////////////////////////
/// @title IDisputeGame_OutputAttestation
/// @notice The interface for an attestation-based DisputeGame meant to contest output
/// proposals in Optimism's `L2OutputOracle` contract.
interface IDisputeGame_OutputAttestation is IDisputeGame {
/// @notice A mapping of addresses from the `signerSet` to booleans signifying whether
/// or not they have authorized the `rootClaim` to be invalidated.
function challenges(address challenger) external view returns (bool _challenged);
/// @notice The signer set consists of authorized public keys that may challenge the `rootClaim`.
/// @return An array of authorized signers.
function signerSet() external view returns (address[] memory _signers);
/// @notice The amount of signatures required to successfully challenge the `rootClaim`
/// output proposal. Once this threshold is met by members of the `signerSet`
/// calling `challenge`, the game will be resolved to `CHALLENGER_WINS`.
/// @custom:invariant The `signatureThreshold` may never be greater than the length of the `signerSet`.
function signatureThreshold() public view returns (uint16 _signatureThreshold);
/// @notice Returns the L2 Block Number that the `rootClaim` commits to. Exists within the `extraData`.
function l2BlockNumber() public view returns (uint256 _l2BlockNumber);
/// @notice Challenge the `rootClaim`.
/// @dev - If the `ecrecover`ed address that created the signature is not a part of the
/// signer set returned by `signerSet`, this function should revert.
/// - If the `ecrecover`ed address that created the signature is not the msg.sender,
/// this function should revert.
/// - If the signature provided is the signature that breaches the signature threshold,
/// the function should call the `resolve` function to resolve the game as `CHALLENGER_WINS`.
/// - When the game resolves, the bond attached to the root claim should be distributed among
/// the signers who participated in challenging the invalid claim.
/// @param signature An EIP-712 signature committing to the `rootClaim` and `l2BlockNumber` (within the `extraData`)
/// from a key that exists within the `signerSet`.
function challenge(bytes calldata signature) external;
}
```
# Dispute Game
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
**Table of Contents**
- [Attestation Dispute Game](#attestation-dispute-game)
- [Smart Contract Implementation](#smart-contract-implementation)
- [Attestation Structure](#attestation-structure)
- [Why EIP-712](#why-eip-712)
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
## Attestation Dispute Game
The output attestation based dispute game shifts the current permissioned output proposal process
to a permissionless, social-consensus based architecture that can progressively decentralize over
time by increasing the size of the signer set. In this "game," output proposals can be submitted
permissionlessly. To prevent "invalid output proposals," a social quorum can revert an output proposal
when an invalid one is discovered. The set of signers is maintained in the `SystemConfig` contract,
and these signers will issue [EIP-712](https://eips.ethereum.org/EIPS/eip-712) signatures
over canonical output roots and the `l2BlockNumber`s they commit to as attestations. To learn more,
see the [DisputeGame Interface Spec](./dispute-game-interface.md).
In the above language, an "invalid output proposal" is defined as an output proposal that represents
a non-canonical state of the L2 chain.
### Smart Contract Implementation
The `AttestationDisputeGame` should implement the `IDisputeGame` interface and also be able to call
out to the `L2OutputOracle`. It is expected that the `L2OutputOracle` will grant permissions to
`AttestationDisputeGame` contracts to call its `deleteL2Outputs` function at the *specific* `l2BlockNumber`
that is embedded in the `AttestationDisputeGame`'s `extraData`.
The `AttestationDisputeGame` should be configured with a quorum ratio at deploy time. It should also
maintain a set of attestor accounts, which is fetched by the `SystemConfig` contract and snapshotted
at deploy time. This snapshot is necessary to have a fixed upper bound on resolution cost, which in
turn gives a fix cost for the necessary bond attached to output proposals.
The ability to add and remove attestor accounts should be enabled by a single immutable
account that controls the `SystemConfig`. It should be impossible to remove accounts such that quorum
is not able to be reached. It is ok to allow accounts to be added or removed in the middle of an
open challenge, as it will not affect the `signerSet` that exists within open challenges.
A challenge is created when an alternative output root for a given `l2BlockNumber` is presented to the
`DisputeGameFactory` contract. Multiple challenges should be able to run in parallel.
For simplicity, the `AttestationDisputeGame` does not need to track what output proposals are
committed to as part of the attestations. It only needs to check that the attested output root
is different than the proposed output root. If this is not checked, then it will be possible
to remove output proposals that are in agreement with the attestations and create a griefing vector.
#### Attestation Structure
The EIP-712 [typeHash](https://eips.ethereum.org/EIPS/eip-712#rationale-for-typehash) should be
defined as the following:
```solidity
TYPE_HASH = keccak256("Dispute(bytes32 outputRoot,uint256 l2BlockNumber)");
```
The components for the `typeHash` are as follows:
- `outputRoot` - The **correct** output root that commits to the given `l2BlockNumber`. This should be a
positive attestation where the `rootClaim` of the `AttestationDisputeGame` is the **correct** output root
for the given `l2BlockNumber`.
- `l2BlockNumber` - The L2 block number that the `outputRoot` commits to. The `outputRoot` should commit
to the entirety of the L2 state from genesis up to and including this `l2BlockNumber`.
### Why EIP-712
It is important to use EIP-712 to decouple the originator of the transaction and the attestor. This
will allow a decentralized network of attestors that serve attestations to bots that are responsible
for ensuring that all output proposals submitted to the network will not allow for malicious withdrawals
from the bridge.
It is important to have replay protection to ensure that attestations cannot be used more than once.
......@@ -67,7 +67,7 @@ censorship resistance.
- All information required to derive the chain is embedded into layer 1 blocks. That way as long as the layer 1
chain is available, so is the rollup.
1. **Validity** - All transactions must be correctly executed and all withdrawals correctly processed.
- The rollup state and withdrawals are managed on an L1 contract called the `L2 State Oracle`. This oracle is
- The rollup state and withdrawals are managed on an L1 contract called the `L2OutputOracle`. This oracle is
guaranteed to _only_ finalize correct (ie. valid) rollup block hashes given a **single honest verifier** assumption. If
there is ever an invalid block hash asserted on layer 1, an honest verifier will prove it is invalid and win a bond.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment