Commit 73d5902c authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge branch 'develop' into dependabot/go_modules/golang.org/x/term-0.12.0

parents ec3c1772 e984fe56
...@@ -56,8 +56,9 @@ def main(): ...@@ -56,8 +56,9 @@ def main():
deployment_dir = pjoin(contracts_bedrock_dir, 'deployments', 'devnetL1') deployment_dir = pjoin(contracts_bedrock_dir, 'deployments', 'devnetL1')
op_node_dir = pjoin(args.monorepo_dir, 'op-node') op_node_dir = pjoin(args.monorepo_dir, 'op-node')
ops_bedrock_dir = pjoin(monorepo_dir, 'ops-bedrock') ops_bedrock_dir = pjoin(monorepo_dir, 'ops-bedrock')
deploy_config_dir = pjoin(contracts_bedrock_dir, 'deploy-config'), deploy_config_dir = pjoin(contracts_bedrock_dir, 'deploy-config')
devnet_config_path = pjoin(contracts_bedrock_dir, 'deploy-config', 'devnetL1.json') devnet_config_path = pjoin(deploy_config_dir, 'devnetL1.json')
devnet_config_template_path = pjoin(deploy_config_dir, 'devnetL1-template.json')
ops_chain_ops = pjoin(monorepo_dir, 'op-chain-ops') ops_chain_ops = pjoin(monorepo_dir, 'op-chain-ops')
sdk_dir = pjoin(monorepo_dir, 'packages', 'sdk') sdk_dir = pjoin(monorepo_dir, 'packages', 'sdk')
...@@ -69,6 +70,7 @@ def main(): ...@@ -69,6 +70,7 @@ def main():
l1_deployments_path=pjoin(deployment_dir, '.deploy'), l1_deployments_path=pjoin(deployment_dir, '.deploy'),
deploy_config_dir=deploy_config_dir, deploy_config_dir=deploy_config_dir,
devnet_config_path=devnet_config_path, devnet_config_path=devnet_config_path,
devnet_config_template_path=devnet_config_template_path,
op_node_dir=op_node_dir, op_node_dir=op_node_dir,
ops_bedrock_dir=ops_bedrock_dir, ops_bedrock_dir=ops_bedrock_dir,
ops_chain_ops=ops_chain_ops, ops_chain_ops=ops_chain_ops,
...@@ -124,10 +126,16 @@ def deploy_contracts(paths): ...@@ -124,10 +126,16 @@ def deploy_contracts(paths):
'--rpc-url', 'http://127.0.0.1:8545' '--rpc-url', 'http://127.0.0.1:8545'
], env={}, cwd=paths.contracts_bedrock_dir) ], env={}, cwd=paths.contracts_bedrock_dir)
def init_devnet_l1_deploy_config(paths, update_timestamp=False):
deploy_config = read_json(paths.devnet_config_template_path)
if update_timestamp:
deploy_config['l1GenesisBlockTimestamp'] = '{:#x}'.format(int(time.time()))
write_json(paths.devnet_config_path, deploy_config)
def devnet_l1_genesis(paths): def devnet_l1_genesis(paths):
log.info('Generating L1 genesis state') log.info('Generating L1 genesis state')
init_devnet_l1_deploy_config(paths)
geth = subprocess.Popen([ geth = subprocess.Popen([
'geth', '--dev', '--http', '--http.api', 'eth,debug', 'geth', '--dev', '--http', '--http.api', 'eth,debug',
'--verbosity', '4', '--gcmode', 'archive', '--dev.gaslimit', '30000000' '--verbosity', '4', '--gcmode', 'archive', '--dev.gaslimit', '30000000'
...@@ -157,13 +165,13 @@ def devnet_deploy(paths): ...@@ -157,13 +165,13 @@ def devnet_deploy(paths):
if os.path.exists(paths.allocs_path) == False: if os.path.exists(paths.allocs_path) == False:
devnet_l1_genesis(paths) devnet_l1_genesis(paths)
devnet_config_backup = pjoin(paths.devnet_dir, 'devnetL1.json.bak') # It's odd that we want to regenerate the devnetL1.json file with
shutil.copy(paths.devnet_config_path, devnet_config_backup) # an updated timestamp different than the one used in the devnet_l1_genesis
deploy_config = read_json(paths.devnet_config_path) # function. But, without it, CI flakes on this test rather consistently.
deploy_config['l1GenesisBlockTimestamp'] = '{:#x}'.format(int(time.time())) # If someone reads this comment and understands why this is being done, please
write_json(paths.devnet_config_path, deploy_config) # update this comment to explain.
init_devnet_l1_deploy_config(paths, update_timestamp=True)
outfile_l1 = pjoin(paths.devnet_dir, 'genesis-l1.json') outfile_l1 = pjoin(paths.devnet_dir, 'genesis-l1.json')
run_command([ run_command([
'go', 'run', 'cmd/main.go', 'genesis', 'l1', 'go', 'run', 'cmd/main.go', 'genesis', 'l1',
'--deploy-config', paths.devnet_config_path, '--deploy-config', paths.devnet_config_path,
......
...@@ -9,7 +9,6 @@ import ( ...@@ -9,7 +9,6 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
...@@ -331,12 +330,18 @@ func Run(ctx *cli.Context) error { ...@@ -331,12 +330,18 @@ func Run(ctx *cli.Context) error {
} }
if proofAt(state) { if proofAt(state) {
preStateHash := crypto.Keccak256Hash(state.EncodeWitness()) preStateHash, err := state.EncodeWitness().StateHash()
if err != nil {
return fmt.Errorf("failed to hash prestate witness: %w", err)
}
witness, err := stepFn(true) witness, err := stepFn(true)
if err != nil { if err != nil {
return fmt.Errorf("failed at proof-gen step %d (PC: %08x): %w", step, state.PC, err) return fmt.Errorf("failed at proof-gen step %d (PC: %08x): %w", step, state.PC, err)
} }
postStateHash := crypto.Keccak256Hash(state.EncodeWitness()) postStateHash, err := state.EncodeWitness().StateHash()
if err != nil {
return fmt.Errorf("failed to hash poststate witness: %w", err)
}
proof := &Proof{ proof := &Proof{
Step: step, Step: step,
Pre: preStateHash, Pre: preStateHash,
......
...@@ -5,7 +5,6 @@ import ( ...@@ -5,7 +5,6 @@ import (
"os" "os"
"github.com/ethereum-optimism/optimism/cannon/mipsevm" "github.com/ethereum-optimism/optimism/cannon/mipsevm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
) )
...@@ -31,7 +30,10 @@ func Witness(ctx *cli.Context) error { ...@@ -31,7 +30,10 @@ func Witness(ctx *cli.Context) error {
return fmt.Errorf("invalid input state (%v): %w", input, err) return fmt.Errorf("invalid input state (%v): %w", input, err)
} }
witness := state.EncodeWitness() witness := state.EncodeWitness()
h := crypto.Keccak256Hash(witness) h, err := witness.StateHash()
if err != nil {
return fmt.Errorf("failed to compute witness hash: %w", err)
}
if output != "" { if output != "" {
if err := os.WriteFile(output, witness, 0755); err != nil { if err := os.WriteFile(output, witness, 0755); err != nil {
return fmt.Errorf("writing output to %v: %w", output, err) return fmt.Errorf("writing output to %v: %w", output, err)
......
...@@ -15,7 +15,6 @@ import ( ...@@ -15,7 +15,6 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth/tracers/logger" "github.com/ethereum/go-ethereum/eth/tracers/logger"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
...@@ -92,7 +91,10 @@ func (m *MIPSEVM) Step(t *testing.T, stepWitness *StepWitness) []byte { ...@@ -92,7 +91,10 @@ func (m *MIPSEVM) Step(t *testing.T, stepWitness *StepWitness) []byte {
logs := m.evmState.Logs() logs := m.evmState.Logs()
require.Equal(t, 1, len(logs), "expecting a log with post-state") require.Equal(t, 1, len(logs), "expecting a log with post-state")
evmPost := logs[0].Data evmPost := logs[0].Data
require.Equal(t, crypto.Keccak256Hash(evmPost), postHash, "logged state must be accurate")
stateHash, err := StateWitness(evmPost).StateHash()
require.NoError(t, err, "state hash could not be computed")
require.Equal(t, stateHash, postHash, "logged state must be accurate")
m.env.StateDB.RevertToSnapshot(snap) m.env.StateDB.RevertToSnapshot(snap)
t.Logf("EVM step took %d gas, and returned stateHash %s", startingGas-leftOverGas, postHash) t.Logf("EVM step took %d gas, and returned stateHash %s", startingGas-leftOverGas, postHash)
......
...@@ -2,11 +2,16 @@ package mipsevm ...@@ -2,11 +2,16 @@ package mipsevm
import ( import (
"encoding/binary" "encoding/binary"
"fmt"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/crypto"
) )
// StateWitnessSize is the size of the state witness encoding in bytes.
var StateWitnessSize = 226
type State struct { type State struct {
Memory *Memory `json:"memory"` Memory *Memory `json:"memory"`
...@@ -37,7 +42,11 @@ type State struct { ...@@ -37,7 +42,11 @@ type State struct {
LastHint hexutil.Bytes `json:"lastHint,omitempty"` LastHint hexutil.Bytes `json:"lastHint,omitempty"`
} }
func (s *State) EncodeWitness() []byte { func (s *State) VMStatus() uint8 {
return vmStatus(s.Exited, s.ExitCode)
}
func (s *State) EncodeWitness() StateWitness {
out := make([]byte, 0) out := make([]byte, 0)
memRoot := s.Memory.MerkleRoot() memRoot := s.Memory.MerkleRoot()
out = append(out, memRoot[:]...) out = append(out, memRoot[:]...)
...@@ -60,3 +69,41 @@ func (s *State) EncodeWitness() []byte { ...@@ -60,3 +69,41 @@ func (s *State) EncodeWitness() []byte {
} }
return out return out
} }
type StateWitness []byte
const (
VMStatusValid = 0
VMStatusInvalid = 1
VMStatusPanic = 2
VMStatusUnfinished = 3
)
func (sw StateWitness) StateHash() (common.Hash, error) {
if len(sw) != 226 {
return common.Hash{}, fmt.Errorf("Invalid witness length. Got %d, expected at least 88", len(sw))
}
hash := crypto.Keccak256Hash(sw)
offset := 32*2 + 4*6
exitCode := sw[offset]
exited := sw[offset+1]
status := vmStatus(exited == 1, exitCode)
hash[0] = status
return hash, nil
}
func vmStatus(exited bool, exitCode uint8) uint8 {
if !exited {
return VMStatusUnfinished
}
switch exitCode {
case 0:
return VMStatusValid
case 1:
return VMStatusInvalid
default:
return VMStatusPanic
}
}
...@@ -82,6 +82,53 @@ func TestState(t *testing.T) { ...@@ -82,6 +82,53 @@ func TestState(t *testing.T) {
} }
} }
// Run through all permutations of `exited` / `exitCode` and ensure that the
// correct witness, state hash, and VM Status is produced.
func TestStateHash(t *testing.T) {
cases := []struct {
exited bool
exitCode uint8
}{
{exited: false, exitCode: 0},
{exited: false, exitCode: 1},
{exited: false, exitCode: 2},
{exited: false, exitCode: 3},
{exited: true, exitCode: 0},
{exited: true, exitCode: 1},
{exited: true, exitCode: 2},
{exited: true, exitCode: 3},
}
exitedOffset := 32*2 + 4*6
for _, c := range cases {
state := &State{
Memory: NewMemory(),
Exited: c.exited,
ExitCode: c.exitCode,
}
actualWitness := state.EncodeWitness()
actualStateHash, err := StateWitness(actualWitness).StateHash()
require.NoError(t, err, "Error hashing witness")
require.Equal(t, len(actualWitness), StateWitnessSize, "Incorrect witness size")
expectedWitness := make(StateWitness, 226)
memRoot := state.Memory.MerkleRoot()
copy(expectedWitness[:32], memRoot[:])
expectedWitness[exitedOffset] = c.exitCode
var exited uint8
if c.exited {
exited = 1
}
expectedWitness[exitedOffset+1] = uint8(exited)
require.Equal(t, expectedWitness[:], actualWitness[:], "Incorrect witness")
expectedStateHash := crypto.Keccak256Hash(actualWitness)
expectedStateHash[0] = vmStatus(c.exited, c.exitCode)
require.Equal(t, expectedStateHash, actualStateHash, "Incorrect state hash")
}
}
func TestHello(t *testing.T) { func TestHello(t *testing.T) {
elfProgram, err := elf.Open("../example/bin/hello.elf") elfProgram, err := elf.Open("../example/bin/hello.elf")
require.NoError(t, err, "open ELF file") require.NoError(t, err, "open ELF file")
......
...@@ -14,3 +14,4 @@ finalized and may change without notice. ...@@ -14,3 +14,4 @@ finalized and may change without notice.
* [Manual Usage](./manual.md) * [Manual Usage](./manual.md)
* [Creating Traces with Cannon](./cannon.md) * [Creating Traces with Cannon](./cannon.md)
* [Automation with `op-challenger`](./run-challenger.md) * [Automation with `op-challenger`](./run-challenger.md)
* [Challenging Invalid Output Proposals](./invalid-proposals.md)
## Challenging Invalid Output Proposals
The dispute game factory deployed to Goerli reads from the permissioned L2 Output Oracle contract. This restricts games
to challenging valid output proposals and an honest challenger should win every game. To test creating games that
challenge an invalid output proposal, a custom chain is required. The simplest way to do this is using the end-to-end
test utilities in [`op-e2e`](https://github.com/ethereum-optimism/optimism/tree/develop/op-e2e).
A simple starting point has been provided in the `TestCannonProposedOutputRootInvalid` test case
in [`faultproof_test.go`](https://github.com/ethereum-optimism/optimism/blob/6e174ae2b2587d9ac5e2930d7574f85d254ca8b4/op-e2e/faultproof_test.go#L334).
This is a table test that takes the output root to propose, plus functions for move and step to counter the honest
claims. The test asserts that the defender always wins and thus the output root is found to be invalid.
...@@ -120,12 +120,12 @@ func LoadConfig(logger geth_log.Logger, path string) (Config, error) { ...@@ -120,12 +120,12 @@ func LoadConfig(logger geth_log.Logger, path string) (Config, error) {
} }
if conf.Chain.Preset != 0 { if conf.Chain.Preset != 0 {
knownContracts, ok := presetL1Contracts[conf.Chain.Preset] knownPreset, ok := presetConfigs[conf.Chain.Preset]
if ok { if !ok {
conf.Chain.L1Contracts = knownContracts
} else {
return conf, fmt.Errorf("unknown preset: %d", conf.Chain.Preset) return conf, fmt.Errorf("unknown preset: %d", conf.Chain.Preset)
} }
conf.Chain.L1Contracts = knownPreset.L1Contracts
conf.Chain.L1StartingHeight = knownPreset.L1StartingHeight
} }
// Set polling defaults if not set // Set polling defaults if not set
......
...@@ -54,10 +54,10 @@ func TestLoadConfig(t *testing.T) { ...@@ -54,10 +54,10 @@ func TestLoadConfig(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, conf.Chain.Preset, 420) require.Equal(t, conf.Chain.Preset, 420)
require.Equal(t, conf.Chain.L1Contracts.OptimismPortalProxy.String(), presetL1Contracts[420].OptimismPortalProxy.String()) require.Equal(t, conf.Chain.L1Contracts.OptimismPortalProxy.String(), presetConfigs[420].L1Contracts.OptimismPortalProxy.String())
require.Equal(t, conf.Chain.L1Contracts.L1CrossDomainMessengerProxy.String(), presetL1Contracts[420].L1CrossDomainMessengerProxy.String()) require.Equal(t, conf.Chain.L1Contracts.L1CrossDomainMessengerProxy.String(), presetConfigs[420].L1Contracts.L1CrossDomainMessengerProxy.String())
require.Equal(t, conf.Chain.L1Contracts.L1StandardBridgeProxy.String(), presetL1Contracts[420].L1StandardBridgeProxy.String()) require.Equal(t, conf.Chain.L1Contracts.L1StandardBridgeProxy.String(), presetConfigs[420].L1Contracts.L1StandardBridgeProxy.String())
require.Equal(t, conf.Chain.L1Contracts.L2OutputOracleProxy.String(), presetL1Contracts[420].L2OutputOracleProxy.String()) require.Equal(t, conf.Chain.L1Contracts.L2OutputOracleProxy.String(), presetConfigs[420].L1Contracts.L2OutputOracleProxy.String())
require.Equal(t, conf.RPCs.L1RPC, "https://l1.example.com") require.Equal(t, conf.RPCs.L1RPC, "https://l1.example.com")
require.Equal(t, conf.RPCs.L2RPC, "https://l2.example.com") require.Equal(t, conf.RPCs.L2RPC, "https://l2.example.com")
require.Equal(t, conf.DB.Host, "127.0.0.1") require.Equal(t, conf.DB.Host, "127.0.0.1")
......
...@@ -5,49 +5,67 @@ import ( ...@@ -5,49 +5,67 @@ import (
) )
// in future presets can just be onchain config and fetched on initialization // in future presets can just be onchain config and fetched on initialization
// Mapping of l2 chain ids to their preset chain configurations // Mapping of l2 chain ids to their preset chain configurations
var presetL1Contracts = map[int]L1Contracts{ var presetConfigs = map[int]ChainConfig{
// OP Mainnet // OP Mainnet
10: { 10: {
OptimismPortalProxy: common.HexToAddress("0xbEb5Fc579115071764c7423A4f12eDde41f106Ed"), L1Contracts: L1Contracts{
L2OutputOracleProxy: common.HexToAddress("0xdfe97868233d1aa22e815a266982f2cf17685a27"), OptimismPortalProxy: common.HexToAddress("0xbEb5Fc579115071764c7423A4f12eDde41f106Ed"),
L1CrossDomainMessengerProxy: common.HexToAddress("0x25ace71c97B33Cc4729CF772ae268934F7ab5fA1"), L2OutputOracleProxy: common.HexToAddress("0xdfe97868233d1aa22e815a266982f2cf17685a27"),
L1StandardBridgeProxy: common.HexToAddress("0x99C9fc46f92E8a1c0deC1b1747d010903E884bE1"), L1CrossDomainMessengerProxy: common.HexToAddress("0x25ace71c97B33Cc4729CF772ae268934F7ab5fA1"),
L1StandardBridgeProxy: common.HexToAddress("0x99C9fc46f92E8a1c0deC1b1747d010903E884bE1"),
// LegacyCanonicalTransactionChain: common.HexToAddress("0x5e4e65926ba27467555eb562121fac00d24e9dd2"),
},
L1StartingHeight: 13596466,
}, },
// OP Goerli // OP Goerli
420: { 420: {
OptimismPortalProxy: common.HexToAddress("0x5b47E1A08Ea6d985D6649300584e6722Ec4B1383"), L1Contracts: L1Contracts{
L2OutputOracleProxy: common.HexToAddress("0xE6Dfba0953616Bacab0c9A8ecb3a9BBa77FC15c0"), OptimismPortalProxy: common.HexToAddress("0x5b47E1A08Ea6d985D6649300584e6722Ec4B1383"),
L1CrossDomainMessengerProxy: common.HexToAddress("0x5086d1eEF304eb5284A0f6720f79403b4e9bE294"), L2OutputOracleProxy: common.HexToAddress("0xE6Dfba0953616Bacab0c9A8ecb3a9BBa77FC15c0"),
L1StandardBridgeProxy: common.HexToAddress("0x636Af16bf2f682dD3109e60102b8E1A089FedAa8"), L1CrossDomainMessengerProxy: common.HexToAddress("0x5086d1eEF304eb5284A0f6720f79403b4e9bE294"),
L1StandardBridgeProxy: common.HexToAddress("0x636Af16bf2f682dD3109e60102b8E1A089FedAa8"),
},
L1StartingHeight: 7017096,
}, },
// Base Mainnet // Base Mainnet
8453: { 8453: {
OptimismPortalProxy: common.HexToAddress("0x49048044D57e1C92A77f79988d21Fa8fAF74E97e"), L1Contracts: L1Contracts{
L2OutputOracleProxy: common.HexToAddress("0x56315b90c40730925ec5485cf004d835058518A0"), OptimismPortalProxy: common.HexToAddress("0x49048044D57e1C92A77f79988d21Fa8fAF74E97e"),
L1CrossDomainMessengerProxy: common.HexToAddress("0x866E82a600A1414e583f7F13623F1aC5d58b0Afa"), L2OutputOracleProxy: common.HexToAddress("0x56315b90c40730925ec5485cf004d835058518A0"),
L1StandardBridgeProxy: common.HexToAddress("0x3154Cf16ccdb4C6d922629664174b904d80F2C35"), L1CrossDomainMessengerProxy: common.HexToAddress("0x866E82a600A1414e583f7F13623F1aC5d58b0Afa"),
L1StandardBridgeProxy: common.HexToAddress("0x3154Cf16ccdb4C6d922629664174b904d80F2C35"),
},
L1StartingHeight: 17481768,
}, },
// Base Goerli // Base Goerli
84531: { 84531: {
OptimismPortalProxy: common.HexToAddress("0xe93c8cD0D409341205A592f8c4Ac1A5fe5585cfA"), L1Contracts: L1Contracts{
L2OutputOracleProxy: common.HexToAddress("0x2A35891ff30313CcFa6CE88dcf3858bb075A2298"), OptimismPortalProxy: common.HexToAddress("0xe93c8cD0D409341205A592f8c4Ac1A5fe5585cfA"),
L1CrossDomainMessengerProxy: common.HexToAddress("0x8e5693140eA606bcEB98761d9beB1BC87383706D"), L2OutputOracleProxy: common.HexToAddress("0x2A35891ff30313CcFa6CE88dcf3858bb075A2298"),
L1StandardBridgeProxy: common.HexToAddress("0xfA6D8Ee5BE770F84FC001D098C4bD604Fe01284a"), L1CrossDomainMessengerProxy: common.HexToAddress("0x8e5693140eA606bcEB98761d9beB1BC87383706D"),
L1StandardBridgeProxy: common.HexToAddress("0xfA6D8Ee5BE770F84FC001D098C4bD604Fe01284a"),
},
L1StartingHeight: 8410981,
}, },
// Zora mainnet // Zora mainnet
7777777: { 7777777: {
OptimismPortalProxy: common.HexToAddress("0x1a0ad011913A150f69f6A19DF447A0CfD9551054"), L1Contracts: L1Contracts{
L2OutputOracleProxy: common.HexToAddress("0x9E6204F750cD866b299594e2aC9eA824E2e5f95c"), OptimismPortalProxy: common.HexToAddress("0x1a0ad011913A150f69f6A19DF447A0CfD9551054"),
L1CrossDomainMessengerProxy: common.HexToAddress("0xdC40a14d9abd6F410226f1E6de71aE03441ca506"), L2OutputOracleProxy: common.HexToAddress("0x9E6204F750cD866b299594e2aC9eA824E2e5f95c"),
L1StandardBridgeProxy: common.HexToAddress("0x3e2Ea9B92B7E48A52296fD261dc26fd995284631"), L1CrossDomainMessengerProxy: common.HexToAddress("0xdC40a14d9abd6F410226f1E6de71aE03441ca506"),
L1StandardBridgeProxy: common.HexToAddress("0x3e2Ea9B92B7E48A52296fD261dc26fd995284631"),
},
L1StartingHeight: 17473923,
}, },
// Zora goerli // Zora goerli
999: { 999: {
OptimismPortalProxy: common.HexToAddress("0xDb9F51790365e7dc196e7D072728df39Be958ACe"), L1Contracts: L1Contracts{
L2OutputOracleProxy: common.HexToAddress("0xdD292C9eEd00f6A32Ff5245d0BCd7f2a15f24e00"), OptimismPortalProxy: common.HexToAddress("0xDb9F51790365e7dc196e7D072728df39Be958ACe"),
L1CrossDomainMessengerProxy: common.HexToAddress("0xD87342e16352D33170557A7dA1e5fB966a60FafC"), L2OutputOracleProxy: common.HexToAddress("0xdD292C9eEd00f6A32Ff5245d0BCd7f2a15f24e00"),
L1StandardBridgeProxy: common.HexToAddress("0x7CC09AC2452D6555d5e0C213Ab9E2d44eFbFc956"), L1CrossDomainMessengerProxy: common.HexToAddress("0xD87342e16352D33170557A7dA1e5fB966a60FafC"),
L1StandardBridgeProxy: common.HexToAddress("0x7CC09AC2452D6555d5e0C213Ab9E2d44eFbFc956"),
},
L1StartingHeight: 8942381,
}, },
} }
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
[chain] [chain]
# OP Goerli # OP Goerli
preset = 420 preset = $INDEXER_CHAIN_PRESET
# L1 Config # L1 Config
l1-polling-interval = 0 l1-polling-interval = 0
......
...@@ -4,7 +4,7 @@ generator client { ...@@ -4,7 +4,7 @@ generator client {
datasource db { datasource db {
provider = "postgresql" provider = "postgresql"
url = "postgresql://db_username:db_password@localhost:5434/db_name" url = env("DATABASE_URL")
} }
model l1_bridged_tokens { model l1_bridged_tokens {
...@@ -111,7 +111,7 @@ model l2_block_headers { ...@@ -111,7 +111,7 @@ model l2_block_headers {
hash String @id @db.VarChar hash String @id @db.VarChar
parent_hash String @unique @db.VarChar parent_hash String @unique @db.VarChar
number Decimal @unique @db.Decimal number Decimal @unique @db.Decimal
timestamp Int @unique timestamp Int
rlp_bytes String @db.VarChar rlp_bytes String @db.VarChar
l2_contract_events l2_contract_events[] l2_contract_events l2_contract_events[]
} }
......
...@@ -12,6 +12,7 @@ version: ...@@ -12,6 +12,7 @@ version:
compile: compile:
cd $(contracts-dir) && \ cd $(contracts-dir) && \
forge clean && \
pnpm build pnpm build
bindings: compile bindings-build bindings: compile bindings-build
......
This diff is collapsed.
...@@ -13,7 +13,7 @@ const AlphabetVMStorageLayoutJSON = "{\"storage\":[{\"astId\":1000,\"contract\": ...@@ -13,7 +13,7 @@ const AlphabetVMStorageLayoutJSON = "{\"storage\":[{\"astId\":1000,\"contract\":
var AlphabetVMStorageLayout = new(solc.StorageLayout) var AlphabetVMStorageLayout = new(solc.StorageLayout)
var AlphabetVMDeployedBin = "0x608060405234801561001057600080fd5b50600436106100365760003560e01c80637dc0d1d01461003b578063f8e0cb9614610085575b600080fd5b60005461005b9073ffffffffffffffffffffffffffffffffffffffff1681565b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020015b60405180910390f35b6100986100933660046101a8565b6100a6565b60405190815260200161007c565b60008060007f000000000000000000000000000000000000000000000000000000000000000087876040516100dc929190610214565b60405180910390200361010057600091506100f986880188610224565b905061011f565b61010c8688018861023d565b90925090508161011b8161028e565b9250505b8161012b8260016102c6565b6040805160208101939093528201526060016040516020818303038152906040528051906020012092505050949350505050565b60008083601f84011261017157600080fd5b50813567ffffffffffffffff81111561018957600080fd5b6020830191508360208285010111156101a157600080fd5b9250929050565b600080600080604085870312156101be57600080fd5b843567ffffffffffffffff808211156101d657600080fd5b6101e28883890161015f565b909650945060208701359150808211156101fb57600080fd5b506102088782880161015f565b95989497509550505050565b8183823760009101908152919050565b60006020828403121561023657600080fd5b5035919050565b6000806040838503121561025057600080fd5b50508035926020909101359150565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff82036102bf576102bf61025f565b5060010190565b600082198211156102d9576102d961025f565b50019056fea164736f6c634300080f000a" var AlphabetVMDeployedBin = "0x608060405234801561001057600080fd5b50600436106100365760003560e01c80637dc0d1d01461003b578063f8e0cb9614610085575b600080fd5b60005461005b9073ffffffffffffffffffffffffffffffffffffffff1681565b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020015b60405180910390f35b610098610093366004610212565b6100a6565b60405190815260200161007c565b600080600060087f0000000000000000000000000000000000000000000000000000000000000000901b600888886040516100e292919061027e565b6040518091039020901b0361010857600091506101018688018861028e565b9050610127565b610114868801886102a7565b909250905081610123816102f8565b9250505b81610133826001610330565b604080516020810193909352820152606001604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe081840301815291905280516020909101207effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff167f010000000000000000000000000000000000000000000000000000000000000017979650505050505050565b60008083601f8401126101db57600080fd5b50813567ffffffffffffffff8111156101f357600080fd5b60208301915083602082850101111561020b57600080fd5b9250929050565b6000806000806040858703121561022857600080fd5b843567ffffffffffffffff8082111561024057600080fd5b61024c888389016101c9565b9096509450602087013591508082111561026557600080fd5b50610272878288016101c9565b95989497509550505050565b8183823760009101908152919050565b6000602082840312156102a057600080fd5b5035919050565b600080604083850312156102ba57600080fd5b50508035926020909101359150565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8203610329576103296102c9565b5060010190565b60008219821115610343576103436102c9565b50019056fea164736f6c634300080f000a"
func init() { func init() {
if err := json.Unmarshal([]byte(AlphabetVMStorageLayoutJSON), AlphabetVMStorageLayout); err != nil { if err := json.Unmarshal([]byte(AlphabetVMStorageLayoutJSON), AlphabetVMStorageLayout); err != nil {
......
This source diff could not be displayed because it is too large. You can view the blob instead.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -139,16 +139,15 @@ func (l *loader) FetchClaims(ctx context.Context) ([]types.Claim, error) { ...@@ -139,16 +139,15 @@ func (l *loader) FetchClaims(ctx context.Context) ([]types.Claim, error) {
} }
// FetchAbsolutePrestateHash fetches the hashed absolute prestate from the fault dispute game. // FetchAbsolutePrestateHash fetches the hashed absolute prestate from the fault dispute game.
func (l *loader) FetchAbsolutePrestateHash(ctx context.Context) ([]byte, error) { func (l *loader) FetchAbsolutePrestateHash(ctx context.Context) (common.Hash, error) {
callOpts := bind.CallOpts{ callOpts := bind.CallOpts{
Context: ctx, Context: ctx,
} }
absolutePrestate, err := l.caller.ABSOLUTEPRESTATE(&callOpts) absolutePrestate, err := l.caller.ABSOLUTEPRESTATE(&callOpts)
if err != nil { if err != nil {
return nil, err return common.Hash{}, err
} }
returnValue := absolutePrestate[:]
return returnValue, nil return absolutePrestate, nil
} }
...@@ -16,7 +16,6 @@ import ( ...@@ -16,7 +16,6 @@ import (
"github.com/ethereum-optimism/optimism/op-service/txmgr" "github.com/ethereum-optimism/optimism/op-service/txmgr"
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
) )
...@@ -159,22 +158,21 @@ func (g *GamePlayer) logGameStatus(ctx context.Context, status gameTypes.GameSta ...@@ -159,22 +158,21 @@ func (g *GamePlayer) logGameStatus(ctx context.Context, status gameTypes.GameSta
} }
type PrestateLoader interface { type PrestateLoader interface {
FetchAbsolutePrestateHash(ctx context.Context) ([]byte, error) FetchAbsolutePrestateHash(ctx context.Context) (common.Hash, error)
} }
// ValidateAbsolutePrestate validates the absolute prestate of the fault game. // ValidateAbsolutePrestate validates the absolute prestate of the fault game.
func ValidateAbsolutePrestate(ctx context.Context, trace types.TraceProvider, loader PrestateLoader) error { func ValidateAbsolutePrestate(ctx context.Context, trace types.TraceProvider, loader PrestateLoader) error {
providerPrestate, err := trace.AbsolutePreState(ctx) providerPrestateHash, err := trace.AbsolutePreStateCommitment(ctx)
if err != nil { if err != nil {
return fmt.Errorf("failed to get the trace provider's absolute prestate: %w", err) return fmt.Errorf("failed to get the trace provider's absolute prestate: %w", err)
} }
providerPrestateHash := crypto.Keccak256(providerPrestate)
onchainPrestate, err := loader.FetchAbsolutePrestateHash(ctx) onchainPrestate, err := loader.FetchAbsolutePrestateHash(ctx)
if err != nil { if err != nil {
return fmt.Errorf("failed to get the onchain absolute prestate: %w", err) return fmt.Errorf("failed to get the onchain absolute prestate: %w", err)
} }
if !bytes.Equal(providerPrestateHash, onchainPrestate) { if !bytes.Equal(providerPrestateHash[:], onchainPrestate[:]) {
return fmt.Errorf("trace provider's absolute prestate does not match onchain absolute prestate") return fmt.Errorf("trace provider's absolute prestate does not match onchain absolute prestate: Provider: %s | Chain %s", providerPrestateHash.Hex(), onchainPrestate.Hex())
} }
return nil return nil
} }
...@@ -6,6 +6,7 @@ import ( ...@@ -6,6 +6,7 @@ import (
"fmt" "fmt"
"testing" "testing"
"github.com/ethereum-optimism/optimism/cannon/mipsevm"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/types" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/types"
gameTypes "github.com/ethereum-optimism/optimism/op-challenger/game/types" gameTypes "github.com/ethereum-optimism/optimism/op-challenger/game/types"
"github.com/ethereum-optimism/optimism/op-node/testlog" "github.com/ethereum-optimism/optimism/op-node/testlog"
...@@ -120,8 +121,9 @@ func TestValidateAbsolutePrestate(t *testing.T) { ...@@ -120,8 +121,9 @@ func TestValidateAbsolutePrestate(t *testing.T) {
t.Run("ValidPrestates", func(t *testing.T) { t.Run("ValidPrestates", func(t *testing.T) {
prestate := []byte{0x00, 0x01, 0x02, 0x03} prestate := []byte{0x00, 0x01, 0x02, 0x03}
prestateHash := crypto.Keccak256(prestate) prestateHash := crypto.Keccak256(prestate)
prestateHash[0] = mipsevm.VMStatusUnfinished
mockTraceProvider := newMockTraceProvider(false, prestate) mockTraceProvider := newMockTraceProvider(false, prestate)
mockLoader := newMockPrestateLoader(false, prestateHash) mockLoader := newMockPrestateLoader(false, common.BytesToHash(prestateHash))
err := ValidateAbsolutePrestate(context.Background(), mockTraceProvider, mockLoader) err := ValidateAbsolutePrestate(context.Background(), mockTraceProvider, mockLoader)
require.NoError(t, err) require.NoError(t, err)
}) })
...@@ -129,7 +131,7 @@ func TestValidateAbsolutePrestate(t *testing.T) { ...@@ -129,7 +131,7 @@ func TestValidateAbsolutePrestate(t *testing.T) {
t.Run("TraceProviderErrors", func(t *testing.T) { t.Run("TraceProviderErrors", func(t *testing.T) {
prestate := []byte{0x00, 0x01, 0x02, 0x03} prestate := []byte{0x00, 0x01, 0x02, 0x03}
mockTraceProvider := newMockTraceProvider(true, prestate) mockTraceProvider := newMockTraceProvider(true, prestate)
mockLoader := newMockPrestateLoader(false, prestate) mockLoader := newMockPrestateLoader(false, common.BytesToHash(prestate))
err := ValidateAbsolutePrestate(context.Background(), mockTraceProvider, mockLoader) err := ValidateAbsolutePrestate(context.Background(), mockTraceProvider, mockLoader)
require.ErrorIs(t, err, mockTraceProviderError) require.ErrorIs(t, err, mockTraceProviderError)
}) })
...@@ -137,14 +139,14 @@ func TestValidateAbsolutePrestate(t *testing.T) { ...@@ -137,14 +139,14 @@ func TestValidateAbsolutePrestate(t *testing.T) {
t.Run("LoaderErrors", func(t *testing.T) { t.Run("LoaderErrors", func(t *testing.T) {
prestate := []byte{0x00, 0x01, 0x02, 0x03} prestate := []byte{0x00, 0x01, 0x02, 0x03}
mockTraceProvider := newMockTraceProvider(false, prestate) mockTraceProvider := newMockTraceProvider(false, prestate)
mockLoader := newMockPrestateLoader(true, prestate) mockLoader := newMockPrestateLoader(true, common.BytesToHash(prestate))
err := ValidateAbsolutePrestate(context.Background(), mockTraceProvider, mockLoader) err := ValidateAbsolutePrestate(context.Background(), mockTraceProvider, mockLoader)
require.ErrorIs(t, err, mockLoaderError) require.ErrorIs(t, err, mockLoaderError)
}) })
t.Run("PrestateMismatch", func(t *testing.T) { t.Run("PrestateMismatch", func(t *testing.T) {
mockTraceProvider := newMockTraceProvider(false, []byte{0x00, 0x01, 0x02, 0x03}) mockTraceProvider := newMockTraceProvider(false, []byte{0x00, 0x01, 0x02, 0x03})
mockLoader := newMockPrestateLoader(false, []byte{0x00}) mockLoader := newMockPrestateLoader(false, common.BytesToHash([]byte{0x00}))
err := ValidateAbsolutePrestate(context.Background(), mockTraceProvider, mockLoader) err := ValidateAbsolutePrestate(context.Background(), mockTraceProvider, mockLoader)
require.Error(t, err) require.Error(t, err)
}) })
...@@ -210,21 +212,31 @@ func (m *mockTraceProvider) AbsolutePreState(ctx context.Context) ([]byte, error ...@@ -210,21 +212,31 @@ func (m *mockTraceProvider) AbsolutePreState(ctx context.Context) ([]byte, error
} }
return m.prestate, nil return m.prestate, nil
} }
func (m *mockTraceProvider) AbsolutePreStateCommitment(ctx context.Context) (common.Hash, error) {
prestate, err := m.AbsolutePreState(ctx)
if err != nil {
return common.Hash{}, err
}
hash := common.BytesToHash(crypto.Keccak256(prestate))
hash[0] = mipsevm.VMStatusUnfinished
return hash, nil
}
type mockLoader struct { type mockLoader struct {
prestateError bool prestateError bool
prestate []byte prestate common.Hash
} }
func newMockPrestateLoader(prestateError bool, prestate []byte) *mockLoader { func newMockPrestateLoader(prestateError bool, prestate common.Hash) *mockLoader {
return &mockLoader{ return &mockLoader{
prestateError: prestateError, prestateError: prestateError,
prestate: prestate, prestate: prestate,
} }
} }
func (m *mockLoader) FetchAbsolutePrestateHash(ctx context.Context) ([]byte, error) { func (m *mockLoader) FetchAbsolutePrestateHash(ctx context.Context) (common.Hash, error) {
if m.prestateError { if m.prestateError {
return nil, mockLoaderError return common.Hash{}, mockLoaderError
} }
return m.prestate, nil return m.prestate, nil
} }
package solver package solver
import ( import (
"bytes"
"context" "context"
"errors" "errors"
"fmt" "fmt"
...@@ -132,7 +133,7 @@ func (s *Solver) defend(ctx context.Context, claim types.Claim) (*types.Claim, e ...@@ -132,7 +133,7 @@ func (s *Solver) defend(ctx context.Context, claim types.Claim) (*types.Claim, e
// agreeWithClaim returns true if the claim is correct according to the internal [TraceProvider]. // agreeWithClaim returns true if the claim is correct according to the internal [TraceProvider].
func (s *Solver) agreeWithClaim(ctx context.Context, claim types.ClaimData) (bool, error) { func (s *Solver) agreeWithClaim(ctx context.Context, claim types.ClaimData) (bool, error) {
ourValue, err := s.traceAtPosition(ctx, claim.Position) ourValue, err := s.traceAtPosition(ctx, claim.Position)
return ourValue == claim.Value, err return bytes.Equal(ourValue[:], claim.Value[:]), err
} }
// traceAtPosition returns the [common.Hash] from internal [TraceProvider] at the given [Position]. // traceAtPosition returns the [common.Hash] from internal [TraceProvider] at the given [Position].
......
...@@ -6,6 +6,7 @@ import ( ...@@ -6,6 +6,7 @@ import (
"math/big" "math/big"
"strings" "strings"
"github.com/ethereum-optimism/optimism/cannon/mipsevm"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/types" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/types"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
...@@ -58,7 +59,7 @@ func (ap *AlphabetTraceProvider) Get(ctx context.Context, i uint64) (common.Hash ...@@ -58,7 +59,7 @@ func (ap *AlphabetTraceProvider) Get(ctx context.Context, i uint64) (common.Hash
if err != nil { if err != nil {
return common.Hash{}, err return common.Hash{}, err
} }
return crypto.Keccak256Hash(claimBytes), nil return alphabetStateHash(claimBytes), nil
} }
// AbsolutePreState returns the absolute pre-state for the alphabet trace. // AbsolutePreState returns the absolute pre-state for the alphabet trace.
...@@ -66,11 +67,27 @@ func (ap *AlphabetTraceProvider) AbsolutePreState(ctx context.Context) ([]byte, ...@@ -66,11 +67,27 @@ func (ap *AlphabetTraceProvider) AbsolutePreState(ctx context.Context) ([]byte,
return common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000060"), nil return common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000060"), nil
} }
func (ap *AlphabetTraceProvider) AbsolutePreStateCommitment(ctx context.Context) (common.Hash, error) {
prestate, err := ap.AbsolutePreState(ctx)
if err != nil {
return common.Hash{}, err
}
hash := common.BytesToHash(crypto.Keccak256(prestate))
hash[0] = mipsevm.VMStatusUnfinished
return hash, nil
}
// BuildAlphabetPreimage constructs the claim bytes for the index and state item. // BuildAlphabetPreimage constructs the claim bytes for the index and state item.
func BuildAlphabetPreimage(i uint64, letter string) []byte { func BuildAlphabetPreimage(i uint64, letter string) []byte {
return append(IndexToBytes(i), LetterToBytes(letter)...) return append(IndexToBytes(i), LetterToBytes(letter)...)
} }
func alphabetStateHash(state []byte) common.Hash {
h := crypto.Keccak256Hash(state)
h[0] = mipsevm.VMStatusInvalid
return h
}
// IndexToBytes converts an index to a byte slice big endian // IndexToBytes converts an index to a byte slice big endian
func IndexToBytes(i uint64) []byte { func IndexToBytes(i uint64) []byte {
big := new(big.Int) big := new(big.Int)
......
...@@ -6,12 +6,11 @@ import ( ...@@ -6,12 +6,11 @@ import (
"testing" "testing"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
func alphabetClaim(index uint64, letter string) common.Hash { func alphabetClaim(index uint64, letter string) common.Hash {
return crypto.Keccak256Hash(BuildAlphabetPreimage(index, letter)) return alphabetStateHash(BuildAlphabetPreimage(index, letter))
} }
// TestAlphabetProvider_Get_ClaimsByTraceIndex tests the [fault.AlphabetProvider] Get function. // TestAlphabetProvider_Get_ClaimsByTraceIndex tests the [fault.AlphabetProvider] Get function.
...@@ -60,7 +59,7 @@ func FuzzIndexToBytes(f *testing.F) { ...@@ -60,7 +59,7 @@ func FuzzIndexToBytes(f *testing.F) {
// returns the correct pre-image for a index. // returns the correct pre-image for a index.
func TestGetStepData_Succeeds(t *testing.T) { func TestGetStepData_Succeeds(t *testing.T) {
ap := NewTraceProvider("abc", 2) ap := NewTraceProvider("abc", 2)
expected := BuildAlphabetPreimage(0, "a'") expected := BuildAlphabetPreimage(0, "a")
retrieved, proof, data, err := ap.GetStepData(context.Background(), uint64(1)) retrieved, proof, data, err := ap.GetStepData(context.Background(), uint64(1))
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, expected, retrieved) require.Equal(t, expected, retrieved)
......
...@@ -15,9 +15,10 @@ import ( ...@@ -15,9 +15,10 @@ import (
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/cannon/mipsevm"
) )
const ( const (
...@@ -25,7 +26,7 @@ const ( ...@@ -25,7 +26,7 @@ const (
) )
type proofData struct { type proofData struct {
ClaimValue hexutil.Bytes `json:"post"` ClaimValue common.Hash `json:"post"`
StateData hexutil.Bytes `json:"state-data"` StateData hexutil.Bytes `json:"state-data"`
ProofData hexutil.Bytes `json:"proof-data"` ProofData hexutil.Bytes `json:"proof-data"`
OracleKey hexutil.Bytes `json:"oracle-key,omitempty"` OracleKey hexutil.Bytes `json:"oracle-key,omitempty"`
...@@ -86,7 +87,7 @@ func (p *CannonTraceProvider) Get(ctx context.Context, i uint64) (common.Hash, e ...@@ -86,7 +87,7 @@ func (p *CannonTraceProvider) Get(ctx context.Context, i uint64) (common.Hash, e
if err != nil { if err != nil {
return common.Hash{}, err return common.Hash{}, err
} }
value := common.BytesToHash(proof.ClaimValue) value := proof.ClaimValue
if value == (common.Hash{}) { if value == (common.Hash{}) {
return common.Hash{}, errors.New("proof missing post hash") return common.Hash{}, errors.New("proof missing post hash")
...@@ -122,6 +123,18 @@ func (p *CannonTraceProvider) AbsolutePreState(ctx context.Context) ([]byte, err ...@@ -122,6 +123,18 @@ func (p *CannonTraceProvider) AbsolutePreState(ctx context.Context) ([]byte, err
return state.EncodeWitness(), nil return state.EncodeWitness(), nil
} }
func (p *CannonTraceProvider) AbsolutePreStateCommitment(ctx context.Context) (common.Hash, error) {
state, err := p.AbsolutePreState(ctx)
if err != nil {
return common.Hash{}, fmt.Errorf("cannot load absolute pre-state: %w", err)
}
hash, err := mipsevm.StateWitness(state).StateHash()
if err != nil {
return common.Hash{}, fmt.Errorf("cannot hash absolute pre-state: %w", err)
}
return hash, nil
}
// loadProof will attempt to load or generate the proof data at the specified index // loadProof will attempt to load or generate the proof data at the specified index
// If the requested index is beyond the end of the actual trace it is extended with no-op instructions. // If the requested index is beyond the end of the actual trace it is extended with no-op instructions.
func (p *CannonTraceProvider) loadProof(ctx context.Context, i uint64) (*proofData, error) { func (p *CannonTraceProvider) loadProof(ctx context.Context, i uint64) (*proofData, error) {
...@@ -151,9 +164,13 @@ func (p *CannonTraceProvider) loadProof(ctx context.Context, i uint64) (*proofDa ...@@ -151,9 +164,13 @@ func (p *CannonTraceProvider) loadProof(ctx context.Context, i uint64) (*proofDa
// Extend the trace out to the full length using a no-op instruction that doesn't change any state // Extend the trace out to the full length using a no-op instruction that doesn't change any state
// No execution is done, so no proof-data or oracle values are required. // No execution is done, so no proof-data or oracle values are required.
witness := state.EncodeWitness() witness := state.EncodeWitness()
witnessHash, err := mipsevm.StateWitness(witness).StateHash()
if err != nil {
return nil, fmt.Errorf("cannot hash witness: %w", err)
}
proof := &proofData{ proof := &proofData{
ClaimValue: crypto.Keccak256(witness), ClaimValue: witnessHash,
StateData: witness, StateData: hexutil.Bytes(witness),
ProofData: []byte{}, ProofData: []byte{},
OracleKey: nil, OracleKey: nil,
OracleValue: nil, OracleValue: nil,
......
...@@ -15,7 +15,6 @@ import ( ...@@ -15,7 +15,6 @@ import (
"github.com/ethereum-optimism/optimism/op-node/testlog" "github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum-optimism/optimism/op-service/ioutil" "github.com/ethereum-optimism/optimism/op-service/ioutil"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
...@@ -43,7 +42,9 @@ func TestGet(t *testing.T) { ...@@ -43,7 +42,9 @@ func TestGet(t *testing.T) {
value, err := provider.Get(context.Background(), 7000) value, err := provider.Get(context.Background(), 7000)
require.NoError(t, err) require.NoError(t, err)
require.Contains(t, generator.generated, 7000, "should have tried to generate the proof") require.Contains(t, generator.generated, 7000, "should have tried to generate the proof")
require.Equal(t, crypto.Keccak256Hash(generator.finalState.EncodeWitness()), value) stateHash, err := generator.finalState.EncodeWitness().StateHash()
require.NoError(t, err)
require.Equal(t, stateHash, value)
}) })
t.Run("MissingPostHash", func(t *testing.T) { t.Run("MissingPostHash", func(t *testing.T) {
...@@ -86,7 +87,7 @@ func TestGetStepData(t *testing.T) { ...@@ -86,7 +87,7 @@ func TestGetStepData(t *testing.T) {
Exited: true, Exited: true,
} }
generator.proof = &proofData{ generator.proof = &proofData{
ClaimValue: common.Hash{0xaa}.Bytes(), ClaimValue: common.Hash{0xaa},
StateData: []byte{0xbb}, StateData: []byte{0xbb},
ProofData: []byte{0xcc}, ProofData: []byte{0xcc},
OracleKey: common.Hash{0xdd}.Bytes(), OracleKey: common.Hash{0xdd}.Bytes(),
...@@ -111,7 +112,7 @@ func TestGetStepData(t *testing.T) { ...@@ -111,7 +112,7 @@ func TestGetStepData(t *testing.T) {
Exited: true, Exited: true,
} }
generator.proof = &proofData{ generator.proof = &proofData{
ClaimValue: common.Hash{0xaa}.Bytes(), ClaimValue: common.Hash{0xaa},
StateData: []byte{0xbb}, StateData: []byte{0xbb},
ProofData: []byte{0xcc}, ProofData: []byte{0xcc},
OracleKey: common.Hash{0xdd}.Bytes(), OracleKey: common.Hash{0xdd}.Bytes(),
...@@ -185,7 +186,7 @@ func TestAbsolutePreState(t *testing.T) { ...@@ -185,7 +186,7 @@ func TestAbsolutePreState(t *testing.T) {
Step: 0, Step: 0,
Registers: [32]uint32{}, Registers: [32]uint32{},
} }
require.Equal(t, state.EncodeWitness(), preState) require.Equal(t, []byte(state.EncodeWitness()), preState)
}) })
} }
......
...@@ -74,6 +74,9 @@ type TraceProvider interface { ...@@ -74,6 +74,9 @@ type TraceProvider interface {
// AbsolutePreState is the pre-image value of the trace that transitions to the trace value at index 0 // AbsolutePreState is the pre-image value of the trace that transitions to the trace value at index 0
AbsolutePreState(ctx context.Context) (preimage []byte, err error) AbsolutePreState(ctx context.Context) (preimage []byte, err error)
// AbsolutePreStateCommitment is the commitment of the pre-image value of the trace that transitions to the trace value at index 0
AbsolutePreStateCommitment(ctx context.Context) (hash common.Hash, err error)
} }
// ClaimData is the core of a claim. It must be unique inside a specific game. // ClaimData is the core of a claim. It must be unique inside a specific game.
......
...@@ -65,16 +65,16 @@ func (g *FaultGameHelper) MaxDepth(ctx context.Context) int64 { ...@@ -65,16 +65,16 @@ func (g *FaultGameHelper) MaxDepth(ctx context.Context) int64 {
} }
func (g *FaultGameHelper) waitForClaim(ctx context.Context, errorMsg string, predicate func(claim ContractClaim) bool) { func (g *FaultGameHelper) waitForClaim(ctx context.Context, errorMsg string, predicate func(claim ContractClaim) bool) {
ctx, cancel := context.WithTimeout(ctx, time.Minute) timedCtx, cancel := context.WithTimeout(ctx, time.Minute)
defer cancel() defer cancel()
err := wait.For(ctx, time.Second, func() (bool, error) { err := wait.For(timedCtx, time.Second, func() (bool, error) {
count, err := g.game.ClaimDataLen(&bind.CallOpts{Context: ctx}) count, err := g.game.ClaimDataLen(&bind.CallOpts{Context: timedCtx})
if err != nil { if err != nil {
return false, fmt.Errorf("retrieve number of claims: %w", err) return false, fmt.Errorf("retrieve number of claims: %w", err)
} }
// Search backwards because the new claims are at the end and more likely the ones we want. // Search backwards because the new claims are at the end and more likely the ones we want.
for i := count.Int64() - 1; i >= 0; i-- { for i := count.Int64() - 1; i >= 0; i-- {
claimData, err := g.game.ClaimData(&bind.CallOpts{Context: ctx}, big.NewInt(i)) claimData, err := g.game.ClaimData(&bind.CallOpts{Context: timedCtx}, big.NewInt(i))
if err != nil { if err != nil {
return false, fmt.Errorf("retrieve claim %v: %w", i, err) return false, fmt.Errorf("retrieve claim %v: %w", i, err)
} }
...@@ -127,10 +127,10 @@ func (g *FaultGameHelper) Resolve(ctx context.Context) { ...@@ -127,10 +127,10 @@ func (g *FaultGameHelper) Resolve(ctx context.Context) {
func (g *FaultGameHelper) WaitForGameStatus(ctx context.Context, expected Status) { func (g *FaultGameHelper) WaitForGameStatus(ctx context.Context, expected Status) {
g.t.Logf("Waiting for game %v to have status %v", g.addr, expected) g.t.Logf("Waiting for game %v to have status %v", g.addr, expected)
ctx, cancel := context.WithTimeout(ctx, time.Minute) timedCtx, cancel := context.WithTimeout(ctx, time.Minute)
defer cancel() defer cancel()
err := wait.For(ctx, time.Second, func() (bool, error) { err := wait.For(timedCtx, time.Second, func() (bool, error) {
ctx, cancel := context.WithTimeout(ctx, 30*time.Second) ctx, cancel := context.WithTimeout(timedCtx, 30*time.Second)
defer cancel() defer cancel()
status, err := g.game.Status(&bind.CallOpts{Context: ctx}) status, err := g.game.Status(&bind.CallOpts{Context: ctx})
if err != nil { if err != nil {
...@@ -139,7 +139,60 @@ func (g *FaultGameHelper) WaitForGameStatus(ctx context.Context, expected Status ...@@ -139,7 +139,60 @@ func (g *FaultGameHelper) WaitForGameStatus(ctx context.Context, expected Status
g.t.Logf("Game %v has state %v, waiting for state %v", g.addr, Status(status), expected) g.t.Logf("Game %v has state %v, waiting for state %v", g.addr, Status(status), expected)
return expected == Status(status), nil return expected == Status(status), nil
}) })
g.require.NoError(err, "wait for game status") g.require.NoErrorf(err, "wait for game status. Game state: \n%v", g.gameData(ctx))
}
// Mover is a function that either attacks or defends the claim at parentClaimIdx
type Mover func(parentClaimIdx int64)
// Stepper is a function that attempts to perform a step against the claim at parentClaimIdx
type Stepper func(parentClaimIdx int64)
// DefendRootClaim uses the supplied Mover to perform moves in an attempt to defend the root claim.
// It is assumed that the output root being disputed is valid and that an honest op-challenger is already running.
// When the game has reached the maximum depth it waits for the honest challenger to counter the leaf claim with step.
func (g *FaultGameHelper) DefendRootClaim(ctx context.Context, performMove Mover) {
maxDepth := g.MaxDepth(ctx)
for claimCount := int64(1); claimCount < maxDepth; {
g.LogGameData(ctx)
claimCount++
// Wait for the challenger to counter
g.WaitForClaimCount(ctx, claimCount)
// Respond with our own move
performMove(claimCount - 1)
claimCount++
g.WaitForClaimCount(ctx, claimCount)
}
// Wait for the challenger to call step and counter our invalid claim
g.WaitForClaimAtMaxDepth(ctx, true)
}
// ChallengeRootClaim uses the supplied Mover and Stepper to perform moves and steps in an attempt to challenge the root claim.
// It is assumed that the output root being disputed is invalid and that an honest op-challenger is already running.
// When the game has reached the maximum depth it calls the Stepper to attempt to counter the leaf claim.
// Since the output root is invalid, it should not be possible for the Stepper to call step successfully.
func (g *FaultGameHelper) ChallengeRootClaim(ctx context.Context, performMove Mover, attemptStep Stepper) {
maxDepth := g.MaxDepth(ctx)
for claimCount := int64(1); claimCount < maxDepth; {
g.LogGameData(ctx)
// Perform our move
performMove(claimCount - 1)
claimCount++
g.WaitForClaimCount(ctx, claimCount)
// Wait for the challenger to counter
claimCount++
g.WaitForClaimCount(ctx, claimCount)
}
// Confirm the game has reached max depth and the last claim hasn't been countered
g.WaitForClaimAtMaxDepth(ctx, false)
g.LogGameData(ctx)
// It's on us to call step if we want to win but shouldn't be possible
attemptStep(maxDepth)
} }
func (g *FaultGameHelper) Attack(ctx context.Context, claimIdx int64, claim common.Hash) { func (g *FaultGameHelper) Attack(ctx context.Context, claimIdx int64, claim common.Hash) {
...@@ -156,6 +209,19 @@ func (g *FaultGameHelper) Defend(ctx context.Context, claimIdx int64, claim comm ...@@ -156,6 +209,19 @@ func (g *FaultGameHelper) Defend(ctx context.Context, claimIdx int64, claim comm
g.require.NoError(err, "Defend transaction was not OK") g.require.NoError(err, "Defend transaction was not OK")
} }
type ErrWithData interface {
ErrorData() interface{}
}
// StepFails attempts to call step and verifies that it fails with ValidStep()
func (g *FaultGameHelper) StepFails(claimIdx int64, isAttack bool, stateData []byte, proof []byte) {
g.t.Logf("Attempting step against claim %v isAttack: %v", claimIdx, isAttack)
_, err := g.game.Step(g.opts, big.NewInt(claimIdx), isAttack, stateData, proof)
errData, ok := err.(ErrWithData)
g.require.Truef(ok, "Error should provide ErrorData method: %v", err)
g.require.Equal("0xfb4e40dd", errData.ErrorData(), "Revert reason should be abi encoded ValidStep()")
}
func (g *FaultGameHelper) gameData(ctx context.Context) string { func (g *FaultGameHelper) gameData(ctx context.Context) string {
opts := &bind.CallOpts{Context: ctx} opts := &bind.CallOpts{Context: ctx}
maxDepth := int(g.MaxDepth(ctx)) maxDepth := int(g.MaxDepth(ctx))
......
...@@ -9,6 +9,7 @@ import ( ...@@ -9,6 +9,7 @@ import (
"testing" "testing"
"time" "time"
"github.com/ethereum-optimism/optimism/cannon/mipsevm"
"github.com/ethereum-optimism/optimism/op-bindings/bindings" "github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-chain-ops/deployer" "github.com/ethereum-optimism/optimism/op-chain-ops/deployer"
"github.com/ethereum-optimism/optimism/op-chain-ops/genesis" "github.com/ethereum-optimism/optimism/op-chain-ops/genesis"
...@@ -174,6 +175,9 @@ func (h *FactoryHelper) StartCannonGameWithCorrectRoot(ctx context.Context, roll ...@@ -174,6 +175,9 @@ func (h *FactoryHelper) StartCannonGameWithCorrectRoot(ctx context.Context, roll
provider := cannon.NewTraceProviderFromInputs(testlog.Logger(h.t, log.LvlInfo).New("role", "CorrectTrace"), metrics.NoopMetrics, cfg, inputs, cfg.Datadir) provider := cannon.NewTraceProviderFromInputs(testlog.Logger(h.t, log.LvlInfo).New("role", "CorrectTrace"), metrics.NoopMetrics, cfg, inputs, cfg.Datadir)
rootClaim, err := provider.Get(ctx, math.MaxUint64) rootClaim, err := provider.Get(ctx, math.MaxUint64)
h.require.NoError(err, "Compute correct root hash") h.require.NoError(err, "Compute correct root hash")
// Override the VM status to claim the root is invalid
// Otherwise creating the game will fail
rootClaim[0] = mipsevm.VMStatusInvalid
game := h.createCannonGame(ctx, l2BlockNumber, l1Head, rootClaim) game := h.createCannonGame(ctx, l2BlockNumber, l1Head, rootClaim)
honestHelper := &HonestHelper{ honestHelper := &HonestHelper{
......
...@@ -42,3 +42,18 @@ func (h *HonestHelper) Defend(ctx context.Context, claimIdx int64) { ...@@ -42,3 +42,18 @@ func (h *HonestHelper) Defend(ctx context.Context, claimIdx int64) {
h.game.require.NoErrorf(err, "Get correct claim at trace index %v", traceIdx) h.game.require.NoErrorf(err, "Get correct claim at trace index %v", traceIdx)
h.game.Defend(ctx, claimIdx, value) h.game.Defend(ctx, claimIdx, value)
} }
func (h *HonestHelper) StepFails(ctx context.Context, claimIdx int64, isAttack bool) {
ctx, cancel := context.WithTimeout(ctx, 2*time.Minute)
defer cancel()
claim := h.game.getClaim(ctx, claimIdx)
pos := types.NewPositionFromGIndex(claim.Position.Uint64())
traceIdx := pos.TraceIndex(int(h.game.MaxDepth(ctx)))
if !isAttack {
// If we're defending, then the step will be from the trace to the next one
traceIdx += 1
}
prestate, proofData, _, err := h.correctTrace.GetStepData(ctx, traceIdx)
h.require.NoError(err, "Get step data")
h.game.StepFails(claimIdx, isAttack, prestate, proofData)
}
This diff is collapsed.
...@@ -120,8 +120,9 @@ func DefaultSystemConfig(t *testing.T) SystemConfig { ...@@ -120,8 +120,9 @@ func DefaultSystemConfig(t *testing.T) SystemConfig {
ListenPort: 0, ListenPort: 0,
EnableAdmin: true, EnableAdmin: true,
}, },
L1EpochPollInterval: time.Second * 2, L1EpochPollInterval: time.Second * 2,
ConfigPersistence: &rollupNode.DisabledConfigPersistence{}, RuntimeConfigReloadInterval: time.Minute * 10,
ConfigPersistence: &rollupNode.DisabledConfigPersistence{},
}, },
"verifier": { "verifier": {
Driver: driver.Config{ Driver: driver.Config{
...@@ -129,8 +130,9 @@ func DefaultSystemConfig(t *testing.T) SystemConfig { ...@@ -129,8 +130,9 @@ func DefaultSystemConfig(t *testing.T) SystemConfig {
SequencerConfDepth: 0, SequencerConfDepth: 0,
SequencerEnabled: false, SequencerEnabled: false,
}, },
L1EpochPollInterval: time.Second * 4, L1EpochPollInterval: time.Second * 4,
ConfigPersistence: &rollupNode.DisabledConfigPersistence{}, RuntimeConfigReloadInterval: time.Minute * 10,
ConfigPersistence: &rollupNode.DisabledConfigPersistence{},
}, },
}, },
Loggers: map[string]log.Logger{ Loggers: map[string]log.Logger{
......
...@@ -30,6 +30,7 @@ import ( ...@@ -30,6 +30,7 @@ import (
"github.com/ethereum-optimism/optimism/op-bindings/bindings" "github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys" "github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum-optimism/optimism/op-e2e/config" "github.com/ethereum-optimism/optimism/op-e2e/config"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait"
"github.com/ethereum-optimism/optimism/op-node/client" "github.com/ethereum-optimism/optimism/op-node/client"
"github.com/ethereum-optimism/optimism/op-node/metrics" "github.com/ethereum-optimism/optimism/op-node/metrics"
rollupNode "github.com/ethereum-optimism/optimism/op-node/node" rollupNode "github.com/ethereum-optimism/optimism/op-node/node"
...@@ -1389,3 +1390,47 @@ func TestPendingBlockIsLatest(t *testing.T) { ...@@ -1389,3 +1390,47 @@ func TestPendingBlockIsLatest(t *testing.T) {
t.Fatal("failed to get pending header with same number as latest header") t.Fatal("failed to get pending header with same number as latest header")
}) })
} }
func TestRuntimeConfigReload(t *testing.T) {
InitParallel(t)
cfg := DefaultSystemConfig(t)
// to speed up the test, make it reload the config more often, and do not impose a long conf depth
cfg.Nodes["verifier"].RuntimeConfigReloadInterval = time.Second * 5
cfg.Nodes["verifier"].Driver.VerifierConfDepth = 1
sys, err := cfg.Start(t)
require.Nil(t, err, "Error starting up system")
defer sys.Close()
initialRuntimeConfig := sys.RollupNodes["verifier"].RuntimeConfig()
// close the EL node, since we want to block derivation, to solely rely on the reloading mechanism for updates.
sys.EthInstances["verifier"].Close()
l1 := sys.Clients["l1"]
// Change the system-config via L1
sysCfgContract, err := bindings.NewSystemConfig(cfg.L1Deployments.SystemConfigProxy, l1)
require.NoError(t, err)
newUnsafeBlocksSigner := common.Address{0x12, 0x23, 0x45}
require.NotEqual(t, initialRuntimeConfig.P2PSequencerAddress(), newUnsafeBlocksSigner, "changing to a different address")
opts, err := bind.NewKeyedTransactorWithChainID(cfg.Secrets.SysCfgOwner, cfg.L1ChainIDBig())
require.Nil(t, err)
// the unsafe signer address is part of the runtime config
tx, err := sysCfgContract.SetUnsafeBlockSigner(opts, newUnsafeBlocksSigner)
require.NoError(t, err)
// wait for the change to confirm
_, err = wait.ForReceiptOK(context.Background(), l1, tx.Hash())
require.NoError(t, err)
// wait for the address to change
_, err = retry.Do(context.Background(), 10, retry.Fixed(time.Second*10), func() (struct{}, error) {
v := sys.RollupNodes["verifier"].RuntimeConfig().P2PSequencerAddress()
if v == newUnsafeBlocksSigner {
return struct{}{}, nil
}
return struct{}{}, fmt.Errorf("no change yet, seeing %s but looking for %s", v, newUnsafeBlocksSigner)
})
require.NoError(t, err)
}
...@@ -146,6 +146,13 @@ var ( ...@@ -146,6 +146,13 @@ var (
Required: false, Required: false,
Value: time.Second * 12 * 32, Value: time.Second * 12 * 32,
} }
RuntimeConfigReloadIntervalFlag = &cli.DurationFlag{
Name: "l1.runtime-config-reload-interval",
Usage: "Poll interval for reloading the runtime config, useful when config events are not being picked up. Disabled if 0 or negative.",
EnvVars: prefixEnvVars("L1_RUNTIME_CONFIG_RELOAD_INTERVAL"),
Required: false,
Value: time.Minute * 10,
}
MetricsEnabledFlag = &cli.BoolFlag{ MetricsEnabledFlag = &cli.BoolFlag{
Name: "metrics.enabled", Name: "metrics.enabled",
Usage: "Enable the metrics server", Usage: "Enable the metrics server",
...@@ -261,6 +268,7 @@ var optionalFlags = []cli.Flag{ ...@@ -261,6 +268,7 @@ var optionalFlags = []cli.Flag{
SequencerMaxSafeLagFlag, SequencerMaxSafeLagFlag,
SequencerL1Confs, SequencerL1Confs,
L1EpochPollIntervalFlag, L1EpochPollIntervalFlag,
RuntimeConfigReloadIntervalFlag,
RPCEnableAdmin, RPCEnableAdmin,
RPCAdminPersistence, RPCAdminPersistence,
MetricsEnabledFlag, MetricsEnabledFlag,
......
...@@ -41,6 +41,12 @@ type Config struct { ...@@ -41,6 +41,12 @@ type Config struct {
ConfigPersistence ConfigPersistence ConfigPersistence ConfigPersistence
// RuntimeConfigReloadInterval defines the interval between runtime config reloads.
// Disabled if 0.
// Runtime config changes should be picked up from log-events,
// but if log-events are not coming in (e.g. not syncing blocks) then the reload ensures the config stays accurate.
RuntimeConfigReloadInterval time.Duration
// Optional // Optional
Tracer Tracer Tracer Tracer
Heartbeat HeartbeatConfig Heartbeat HeartbeatConfig
......
...@@ -2,7 +2,6 @@ package node ...@@ -2,7 +2,6 @@ package node
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"time" "time"
...@@ -19,6 +18,7 @@ import ( ...@@ -19,6 +18,7 @@ import (
"github.com/ethereum-optimism/optimism/op-node/rollup/driver" "github.com/ethereum-optimism/optimism/op-node/rollup/driver"
"github.com/ethereum-optimism/optimism/op-node/sources" "github.com/ethereum-optimism/optimism/op-node/sources"
"github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/retry"
) )
type OpNode struct { type OpNode struct {
...@@ -159,27 +159,70 @@ func (n *OpNode) initRuntimeConfig(ctx context.Context, cfg *Config) error { ...@@ -159,27 +159,70 @@ func (n *OpNode) initRuntimeConfig(ctx context.Context, cfg *Config) error {
// attempt to load runtime config, repeat N times // attempt to load runtime config, repeat N times
n.runCfg = NewRuntimeConfig(n.log, n.l1Source, &cfg.Rollup) n.runCfg = NewRuntimeConfig(n.log, n.l1Source, &cfg.Rollup)
for i := 0; i < 5; i++ { confDepth := cfg.Driver.VerifierConfDepth
reload := func(ctx context.Context) (eth.L1BlockRef, error) {
fetchCtx, fetchCancel := context.WithTimeout(ctx, time.Second*10) fetchCtx, fetchCancel := context.WithTimeout(ctx, time.Second*10)
l1Head, err := n.l1Source.L1BlockRefByLabel(fetchCtx, eth.Unsafe) l1Head, err := n.l1Source.L1BlockRefByLabel(fetchCtx, eth.Unsafe)
fetchCancel() fetchCancel()
if err != nil { if err != nil {
n.log.Error("failed to fetch L1 head for runtime config initialization", "err", err) n.log.Error("failed to fetch L1 head for runtime config initialization", "err", err)
continue return eth.L1BlockRef{}, err
}
// Apply confirmation-distance
blNum := l1Head.Number
if blNum >= confDepth {
blNum -= confDepth
}
fetchCtx, fetchCancel = context.WithTimeout(ctx, time.Second*10)
confirmed, err := n.l1Source.L1BlockRefByNumber(fetchCtx, blNum)
fetchCancel()
if err != nil {
n.log.Error("failed to fetch confirmed L1 block for runtime config loading", "err", err, "number", blNum)
return eth.L1BlockRef{}, err
} }
fetchCtx, fetchCancel = context.WithTimeout(ctx, time.Second*10) fetchCtx, fetchCancel = context.WithTimeout(ctx, time.Second*10)
err = n.runCfg.Load(fetchCtx, l1Head) err = n.runCfg.Load(fetchCtx, confirmed)
fetchCancel() fetchCancel()
if err != nil { if err != nil {
n.log.Error("failed to fetch runtime config data", "err", err) n.log.Error("failed to fetch runtime config data", "err", err)
continue return l1Head, err
} }
return l1Head, nil
}
return nil // initialize the runtime config before unblocking
if _, err := retry.Do(ctx, 5, retry.Fixed(time.Second*10), func() (eth.L1BlockRef, error) {
return reload(ctx)
}); err != nil {
return fmt.Errorf("failed to load runtime configuration repeatedly, last error: %w", err)
} }
return errors.New("failed to load runtime configuration repeatedly") // start a background loop, to keep reloading it at the configured reload interval
go func(ctx context.Context, reloadInterval time.Duration) {
if reloadInterval <= 0 {
n.log.Debug("not running runtime-config reloading background loop")
return
}
ticker := time.NewTicker(reloadInterval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
// If the reload fails, we will try again the next interval.
// Missing a runtime-config update is not critical, and we do not want to overwhelm the L1 RPC.
if l1Head, err := reload(ctx); err != nil {
n.log.Warn("failed to reload runtime config", "err", err)
} else {
n.log.Debug("reloaded runtime config", "l1_head", l1Head)
}
case <-ctx.Done():
return
}
}
}(n.resourcesCtx, cfg.RuntimeConfigReloadInterval) // this keeps running after initialization
return nil
} }
func (n *OpNode) initL2(ctx context.Context, cfg *Config, snapshotLog log.Logger) error { func (n *OpNode) initL2(ctx context.Context, cfg *Config, snapshotLog log.Logger) error {
...@@ -397,6 +440,10 @@ func (n *OpNode) P2P() p2p.Node { ...@@ -397,6 +440,10 @@ func (n *OpNode) P2P() p2p.Node {
return n.p2pNode return n.p2pNode
} }
func (n *OpNode) RuntimeConfig() ReadonlyRuntimeConfig {
return n.runCfg
}
// Close closes all resources. // Close closes all resources.
func (n *OpNode) Close() error { func (n *OpNode) Close() error {
var result *multierror.Error var result *multierror.Error
......
...@@ -23,6 +23,10 @@ type RuntimeCfgL1Source interface { ...@@ -23,6 +23,10 @@ type RuntimeCfgL1Source interface {
ReadStorageAt(ctx context.Context, address common.Address, storageSlot common.Hash, blockHash common.Hash) (common.Hash, error) ReadStorageAt(ctx context.Context, address common.Address, storageSlot common.Hash, blockHash common.Hash) (common.Hash, error)
} }
type ReadonlyRuntimeConfig interface {
P2PSequencerAddress() common.Address
}
// RuntimeConfig maintains runtime-configurable options. // RuntimeConfig maintains runtime-configurable options.
// These options are loaded based on initial loading + updates for every subsequent L1 block. // These options are loaded based on initial loading + updates for every subsequent L1 block.
// Only the *latest* values are maintained however, the runtime config has no concept of chain history, // Only the *latest* values are maintained however, the runtime config has no concept of chain history,
......
...@@ -82,9 +82,10 @@ func NewConfig(ctx *cli.Context, log log.Logger) (*node.Config, error) { ...@@ -82,9 +82,10 @@ func NewConfig(ctx *cli.Context, log log.Logger) (*node.Config, error) {
ListenAddr: ctx.String(flags.PprofAddrFlag.Name), ListenAddr: ctx.String(flags.PprofAddrFlag.Name),
ListenPort: ctx.Int(flags.PprofPortFlag.Name), ListenPort: ctx.Int(flags.PprofPortFlag.Name),
}, },
P2P: p2pConfig, P2P: p2pConfig,
P2PSigner: p2pSignerSetup, P2PSigner: p2pSignerSetup,
L1EpochPollInterval: ctx.Duration(flags.L1EpochPollIntervalFlag.Name), L1EpochPollInterval: ctx.Duration(flags.L1EpochPollIntervalFlag.Name),
RuntimeConfigReloadInterval: ctx.Duration(flags.RuntimeConfigReloadIntervalFlag.Name),
Heartbeat: node.HeartbeatConfig{ Heartbeat: node.HeartbeatConfig{
Enabled: ctx.Bool(flags.HeartbeatEnabledFlag.Name), Enabled: ctx.Bool(flags.HeartbeatEnabledFlag.Name),
Moniker: ctx.String(flags.HeartbeatMonikerFlag.Name), Moniker: ctx.String(flags.HeartbeatMonikerFlag.Name),
......
...@@ -18,9 +18,10 @@ COPY --from=builder /app/entrypoint.sh /bin/entrypoint.sh ...@@ -18,9 +18,10 @@ COPY --from=builder /app/entrypoint.sh /bin/entrypoint.sh
COPY --from=builder /app/bin/ufm /bin/ufm COPY --from=builder /app/bin/ufm /bin/ufm
RUN apk update && \ RUN apk update && \
apk add ca-certificates && \
chmod +x /bin/entrypoint.sh chmod +x /bin/entrypoint.sh
RUN apk add ca-certificates jq curl bind-tools
VOLUME /etc/ufm VOLUME /etc/ufm
EXPOSE 8080 EXPOSE 8080
......
...@@ -39,12 +39,6 @@ address = "0x0000000000000000000000000000000000000000" ...@@ -39,12 +39,6 @@ address = "0x0000000000000000000000000000000000000000"
private_key = "0000000000000000000000000000000000000000000000000000000000000000" private_key = "0000000000000000000000000000000000000000000000000000000000000000"
# Transaction value in wei # Transaction value in wei
tx_value = 100000000000000 tx_value = 100000000000000
# Gas limit
gas_limit = 21000
# Gas tip cap
gas_tip_cap = 2000000000
# Fee cap
gas_fee_cap = 20000000000
[providers.p1] [providers.p1]
# URL to the RPC provider # URL to the RPC provider
...@@ -52,13 +46,15 @@ url = "http://localhost:8551" ...@@ -52,13 +46,15 @@ url = "http://localhost:8551"
# Read only providers are only used to check for transactions # Read only providers are only used to check for transactions
read_only = true read_only = true
# Interval to poll the provider for expected transactions # Interval to poll the provider for expected transactions
read_interval = "1s" read_interval = "10s"
# Interval to submit new transactions to the provider # Interval to submit new transactions to the provider
send_interval = "5s" send_interval = "30s"
# Wallet to be used for sending transactions # Interval between send transaction when we get "already known" txpool err
wallet = "default" send_transaction_retry_interval = "100ms"
# Network to pool transactions, i.e. providers in the same network will check transactions from each other # Max time to retry
network = "op-goerli" send_transaction_retry_timeout = "5s"
# Interval between each send transaction to the same network
send_transaction_cool_down = "30s"
# Interval between receipt retrieval # Interval between receipt retrieval
receipt_retrieval_interval = "500ms" receipt_retrieval_interval = "500ms"
# Max time to check for receipt # Max time to check for receipt
...@@ -72,13 +68,15 @@ url = "http://localhost:8552" ...@@ -72,13 +68,15 @@ url = "http://localhost:8552"
# Read only providers are only used to check for transactions # Read only providers are only used to check for transactions
read_only = false read_only = false
# Interval to poll the provider for expected transactions # Interval to poll the provider for expected transactions
read_interval = "2s" read_interval = "10s"
# Interval to submit new transactions to the provider # Interval to submit new transactions to the provider
send_interval = "3s" send_interval = "30s"
# Wallet to be used for sending transactions # Interval between send transaction when we get "already known" txpool err
wallet = "default" send_transaction_retry_interval = "100ms"
# Network to pool transactions, i.e. providers in the same network will check transactions from each other # Max time to retry
network = "op-goerli" send_transaction_retry_timeout = "5s"
# Interval between each send transaction to the same network
send_transaction_cool_down = "30s"
# Interval between receipt retrieval # Interval between receipt retrieval
receipt_retrieval_interval = "500ms" receipt_retrieval_interval = "500ms"
# Max time to check for receipt # Max time to check for receipt
......
...@@ -48,10 +48,7 @@ type WalletConfig struct { ...@@ -48,10 +48,7 @@ type WalletConfig struct {
PrivateKey string `toml:"private_key"` PrivateKey string `toml:"private_key"`
// transaction parameters // transaction parameters
TxValue big.Int `toml:"tx_value"` TxValue big.Int `toml:"tx_value"`
GasLimit uint64 `toml:"gas_limit"`
GasTipCap big.Int `toml:"gas_tip_cap"`
GasFeeCap big.Int `toml:"gas_fee_cap"`
} }
type ProviderConfig struct { type ProviderConfig struct {
...@@ -64,6 +61,7 @@ type ProviderConfig struct { ...@@ -64,6 +61,7 @@ type ProviderConfig struct {
SendInterval TOMLDuration `toml:"send_interval"` SendInterval TOMLDuration `toml:"send_interval"`
SendTransactionRetryInterval TOMLDuration `toml:"send_transaction_retry_interval"` SendTransactionRetryInterval TOMLDuration `toml:"send_transaction_retry_interval"`
SendTransactionRetryTimeout TOMLDuration `toml:"send_transaction_retry_timeout"` SendTransactionRetryTimeout TOMLDuration `toml:"send_transaction_retry_timeout"`
SendTransactionCoolDown TOMLDuration `toml:"send_transaction_cool_down"`
ReceiptRetrievalInterval TOMLDuration `toml:"receipt_retrieval_interval"` ReceiptRetrievalInterval TOMLDuration `toml:"receipt_retrieval_interval"`
ReceiptRetrievalTimeout TOMLDuration `toml:"receipt_retrieval_timeout"` ReceiptRetrievalTimeout TOMLDuration `toml:"receipt_retrieval_timeout"`
...@@ -130,12 +128,6 @@ func (c *Config) Validate() error { ...@@ -130,12 +128,6 @@ func (c *Config) Validate() error {
if wallet.TxValue.BitLen() == 0 { if wallet.TxValue.BitLen() == 0 {
return errors.Errorf("wallet [%s] tx_value is missing", name) return errors.Errorf("wallet [%s] tx_value is missing", name)
} }
if wallet.GasLimit == 0 {
return errors.Errorf("wallet [%s] gas_limit is missing", name)
}
if wallet.GasFeeCap.BitLen() == 0 {
return errors.Errorf("wallet [%s] gas_fee_cap is missing", name)
}
} }
for name, provider := range c.Providers { for name, provider := range c.Providers {
...@@ -154,6 +146,9 @@ func (c *Config) Validate() error { ...@@ -154,6 +146,9 @@ func (c *Config) Validate() error {
if provider.SendTransactionRetryTimeout == 0 { if provider.SendTransactionRetryTimeout == 0 {
return errors.Errorf("provider [%s] send_transaction_retry_timeout is missing", name) return errors.Errorf("provider [%s] send_transaction_retry_timeout is missing", name)
} }
if provider.SendTransactionCoolDown == 0 {
return errors.Errorf("provider [%s] send_transaction_cool_down is missing", name)
}
if provider.ReceiptRetrievalInterval == 0 { if provider.ReceiptRetrievalInterval == 0 {
return errors.Errorf("provider [%s] receipt_retrieval_interval is missing", name) return errors.Errorf("provider [%s] receipt_retrieval_interval is missing", name)
} }
......
...@@ -2,6 +2,7 @@ package clients ...@@ -2,6 +2,7 @@ package clients
import ( import (
"context" "context"
"math/big"
"time" "time"
"github.com/ethereum-optimism/optimism/op-ufm/pkg/metrics" "github.com/ethereum-optimism/optimism/op-ufm/pkg/metrics"
...@@ -22,7 +23,7 @@ func Dial(providerName string, url string) (*InstrumentedEthClient, error) { ...@@ -22,7 +23,7 @@ func Dial(providerName string, url string) (*InstrumentedEthClient, error) {
start := time.Now() start := time.Now()
c, err := ethclient.Dial(url) c, err := ethclient.Dial(url)
if err != nil { if err != nil {
metrics.RecordError(providerName, "ethclient.Dial") metrics.RecordErrorDetails(providerName, "ethclient.Dial", err)
return nil, err return nil, err
} }
metrics.RecordRPCLatency(providerName, "ethclient", "Dial", time.Since(start)) metrics.RecordRPCLatency(providerName, "ethclient", "Dial", time.Since(start))
...@@ -34,7 +35,7 @@ func (i *InstrumentedEthClient) TransactionByHash(ctx context.Context, hash comm ...@@ -34,7 +35,7 @@ func (i *InstrumentedEthClient) TransactionByHash(ctx context.Context, hash comm
tx, isPending, err := i.c.TransactionByHash(ctx, hash) tx, isPending, err := i.c.TransactionByHash(ctx, hash)
if err != nil { if err != nil {
if !i.ignorableErrors(err) { if !i.ignorableErrors(err) {
metrics.RecordError(i.providerName, "ethclient.TransactionByHash") metrics.RecordErrorDetails(i.providerName, "ethclient.TransactionByHash", err)
} }
return nil, false, err return nil, false, err
} }
...@@ -46,7 +47,7 @@ func (i *InstrumentedEthClient) PendingNonceAt(ctx context.Context, address stri ...@@ -46,7 +47,7 @@ func (i *InstrumentedEthClient) PendingNonceAt(ctx context.Context, address stri
start := time.Now() start := time.Now()
nonce, err := i.c.PendingNonceAt(ctx, common.HexToAddress(address)) nonce, err := i.c.PendingNonceAt(ctx, common.HexToAddress(address))
if err != nil { if err != nil {
metrics.RecordError(i.providerName, "ethclient.PendingNonceAt") metrics.RecordErrorDetails(i.providerName, "ethclient.PendingNonceAt", err)
return 0, err return 0, err
} }
metrics.RecordRPCLatency(i.providerName, "ethclient", "PendingNonceAt", time.Since(start)) metrics.RecordRPCLatency(i.providerName, "ethclient", "PendingNonceAt", time.Since(start))
...@@ -58,7 +59,7 @@ func (i *InstrumentedEthClient) TransactionReceipt(ctx context.Context, txHash c ...@@ -58,7 +59,7 @@ func (i *InstrumentedEthClient) TransactionReceipt(ctx context.Context, txHash c
receipt, err := i.c.TransactionReceipt(ctx, txHash) receipt, err := i.c.TransactionReceipt(ctx, txHash)
if err != nil { if err != nil {
if !i.ignorableErrors(err) { if !i.ignorableErrors(err) {
metrics.RecordError(i.providerName, "ethclient.TransactionReceipt") metrics.RecordErrorDetails(i.providerName, "ethclient.TransactionReceipt", err)
} }
return nil, err return nil, err
} }
...@@ -71,7 +72,7 @@ func (i *InstrumentedEthClient) SendTransaction(ctx context.Context, tx *types.T ...@@ -71,7 +72,7 @@ func (i *InstrumentedEthClient) SendTransaction(ctx context.Context, tx *types.T
err := i.c.SendTransaction(ctx, tx) err := i.c.SendTransaction(ctx, tx)
if err != nil { if err != nil {
if !i.ignorableErrors(err) { if !i.ignorableErrors(err) {
metrics.RecordError(i.providerName, "ethclient.SendTransaction") metrics.RecordErrorDetails(i.providerName, "ethclient.SendTransaction", err)
} }
return err return err
} }
...@@ -79,6 +80,39 @@ func (i *InstrumentedEthClient) SendTransaction(ctx context.Context, tx *types.T ...@@ -79,6 +80,39 @@ func (i *InstrumentedEthClient) SendTransaction(ctx context.Context, tx *types.T
return err return err
} }
func (i *InstrumentedEthClient) EstimateGas(ctx context.Context, msg ethereum.CallMsg) (uint64, error) {
start := time.Now()
gas, err := i.c.EstimateGas(ctx, msg)
if err != nil {
metrics.RecordErrorDetails(i.providerName, "ethclient.EstimateGas", err)
return 0, err
}
metrics.RecordRPCLatency(i.providerName, "ethclient", "EstimateGas", time.Since(start))
return gas, err
}
func (i *InstrumentedEthClient) SuggestGasTipCap(ctx context.Context) (*big.Int, error) {
start := time.Now()
gasTipCap, err := i.c.SuggestGasTipCap(ctx)
if err != nil {
metrics.RecordErrorDetails(i.providerName, "ethclient.SuggestGasTipCap", err)
return nil, err
}
metrics.RecordRPCLatency(i.providerName, "ethclient", "SuggestGasTipCap", time.Since(start))
return gasTipCap, err
}
func (i *InstrumentedEthClient) HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) {
start := time.Now()
header, err := i.c.HeaderByNumber(ctx, number)
if err != nil {
metrics.RecordErrorDetails(i.providerName, "ethclient.HeaderByNumber", err)
return nil, err
}
metrics.RecordRPCLatency(i.providerName, "ethclient", "HeaderByNumber", time.Since(start))
return header, err
}
func (i *InstrumentedEthClient) ignorableErrors(err error) bool { func (i *InstrumentedEthClient) ignorableErrors(err error) bool {
msg := err.Error() msg := err.Error()
// we dont use errors.Is because eth client actually uses errors.New, // we dont use errors.Is because eth client actually uses errors.New,
......
...@@ -22,7 +22,7 @@ func NewSignerClient(providerName string, logger log.Logger, endpoint string, tl ...@@ -22,7 +22,7 @@ func NewSignerClient(providerName string, logger log.Logger, endpoint string, tl
start := time.Now() start := time.Now()
c, err := signer.NewSignerClient(logger, endpoint, tlsConfig) c, err := signer.NewSignerClient(logger, endpoint, tlsConfig)
if err != nil { if err != nil {
metrics.RecordError(providerName, "signer.NewSignerClient") metrics.RecordErrorDetails(providerName, "signer.NewSignerClient", err)
return nil, err return nil, err
} }
metrics.RecordRPCLatency(providerName, "signer", "NewSignerClient", time.Since(start)) metrics.RecordRPCLatency(providerName, "signer", "NewSignerClient", time.Since(start))
...@@ -33,7 +33,7 @@ func (i *InstrumentedSignerClient) SignTransaction(ctx context.Context, chainId ...@@ -33,7 +33,7 @@ func (i *InstrumentedSignerClient) SignTransaction(ctx context.Context, chainId
start := time.Now() start := time.Now()
tx, err := i.c.SignTransaction(ctx, chainId, tx) tx, err := i.c.SignTransaction(ctx, chainId, tx)
if err != nil { if err != nil {
metrics.RecordError(i.providerName, "signer.SignTransaction") metrics.RecordErrorDetails(i.providerName, "signer.SignTransaction", err)
return nil, err return nil, err
} }
metrics.RecordRPCLatency(i.providerName, "signer", "SignTransaction", time.Since(start)) metrics.RecordRPCLatency(i.providerName, "signer", "SignTransaction", time.Since(start))
......
...@@ -2,6 +2,7 @@ package provider ...@@ -2,6 +2,7 @@ package provider
import ( import (
"context" "context"
"math/big"
"time" "time"
"github.com/ethereum-optimism/optimism/op-ufm/pkg/metrics" "github.com/ethereum-optimism/optimism/op-ufm/pkg/metrics"
...@@ -21,7 +22,7 @@ import ( ...@@ -21,7 +22,7 @@ import (
// RoundTrip send a new transaction to measure round trip latency // RoundTrip send a new transaction to measure round trip latency
func (p *Provider) RoundTrip(ctx context.Context) { func (p *Provider) RoundTrip(ctx context.Context) {
log.Debug("roundTripLatency", log.Debug("RoundTrip",
"provider", p.name) "provider", p.name)
client, err := iclients.Dial(p.name, p.config.URL) client, err := iclients.Dial(p.name, p.config.URL)
...@@ -33,33 +34,38 @@ func (p *Provider) RoundTrip(ctx context.Context) { ...@@ -33,33 +34,38 @@ func (p *Provider) RoundTrip(ctx context.Context) {
return return
} }
var nonce uint64 p.txPool.ExclusiveSend.Lock()
p.txPool.M.Lock() defer p.txPool.ExclusiveSend.Unlock()
if p.txPool.Nonce == uint64(0) {
nonce, err = client.PendingNonceAt(ctx, p.walletConfig.Address)
if err != nil {
log.Error("cant get nounce",
"provider", p.name,
"err", err)
p.txPool.M.Unlock()
return
}
p.txPool.Nonce = nonce
} else {
p.txPool.Nonce++
nonce = p.txPool.Nonce
}
p.txPool.M.Unlock()
txHash := common.Hash{} txHash := common.Hash{}
attempt := 0 attempt := 0
nonce := uint64(0)
// used for timeout // used for timeout
firstAttemptAt := time.Now() firstAttemptAt := time.Now()
// used for actual round trip time (disregard retry time) // used for actual round trip time (disregard retry time)
roundTripStartedAt := time.Now() var roundTripStartedAt time.Time
for { for {
tx := p.createTx(nonce)
txHash = tx.Hash() // sleep until we get a clear to send
for {
coolDown := time.Duration(p.config.SendTransactionCoolDown) - time.Since(p.txPool.LastSend)
if coolDown > 0 {
time.Sleep(coolDown)
} else {
break
}
}
tx, err := p.createTx(ctx, client, nonce)
nonce = tx.Nonce()
if err != nil {
log.Error("cant create tx",
"provider", p.name,
"nonce", nonce,
"err", err)
return
}
signedTx, err := p.sign(ctx, tx) signedTx, err := p.sign(ctx, tx)
if err != nil { if err != nil {
...@@ -69,7 +75,6 @@ func (p *Provider) RoundTrip(ctx context.Context) { ...@@ -69,7 +75,6 @@ func (p *Provider) RoundTrip(ctx context.Context) {
"err", err) "err", err)
return return
} }
txHash = signedTx.Hash() txHash = signedTx.Hash()
roundTripStartedAt = time.Now() roundTripStartedAt = time.Now()
...@@ -78,25 +83,29 @@ func (p *Provider) RoundTrip(ctx context.Context) { ...@@ -78,25 +83,29 @@ func (p *Provider) RoundTrip(ctx context.Context) {
if err.Error() == txpool.ErrAlreadyKnown.Error() || if err.Error() == txpool.ErrAlreadyKnown.Error() ||
err.Error() == txpool.ErrReplaceUnderpriced.Error() || err.Error() == txpool.ErrReplaceUnderpriced.Error() ||
err.Error() == core.ErrNonceTooLow.Error() { err.Error() == core.ErrNonceTooLow.Error() {
log.Warn("cant send transaction (retryable)",
"provider", p.name,
"err", err,
"nonce", nonce)
if time.Since(firstAttemptAt) >= time.Duration(p.config.SendTransactionRetryTimeout) { if time.Since(firstAttemptAt) >= time.Duration(p.config.SendTransactionRetryTimeout) {
log.Error("send transaction timed out (known already)", log.Error("send transaction timed out (known already)",
"provider", p.name, "provider", p.name,
"hash", txHash.Hex(), "hash", txHash.Hex(),
"nonce", nonce,
"elapsed", time.Since(firstAttemptAt), "elapsed", time.Since(firstAttemptAt),
"attempt", attempt, "attempt", attempt)
"nonce", nonce) metrics.RecordErrorDetails(p.name, "send.timeout", err)
metrics.RecordError(p.name, "ethclient.SendTransaction.nonce")
return return
} }
log.Warn("tx already known, incrementing nonce and trying again", log.Warn("tx already known, incrementing nonce and trying again",
"provider", p.name, "provider", p.name,
"nonce", nonce) "nonce", nonce)
time.Sleep(time.Duration(p.config.SendTransactionRetryInterval)) time.Sleep(time.Duration(p.config.SendTransactionRetryInterval))
p.txPool.M.Lock() nonce++
p.txPool.Nonce++
nonce = p.txPool.Nonce
p.txPool.M.Unlock()
attempt++ attempt++
if attempt%10 == 0 { if attempt%10 == 0 {
log.Debug("retrying send transaction...", log.Debug("retrying send transaction...",
...@@ -108,6 +117,7 @@ func (p *Provider) RoundTrip(ctx context.Context) { ...@@ -108,6 +117,7 @@ func (p *Provider) RoundTrip(ctx context.Context) {
} else { } else {
log.Error("cant send transaction", log.Error("cant send transaction",
"provider", p.name, "provider", p.name,
"nonce", nonce,
"err", err) "err", err)
metrics.RecordErrorDetails(p.name, "ethclient.SendTransaction", err) metrics.RecordErrorDetails(p.name, "ethclient.SendTransaction", err)
return return
...@@ -131,6 +141,7 @@ func (p *Provider) RoundTrip(ctx context.Context) { ...@@ -131,6 +141,7 @@ func (p *Provider) RoundTrip(ctx context.Context) {
SentAt: sentAt, SentAt: sentAt,
SeenBy: make(map[string]time.Time), SeenBy: make(map[string]time.Time),
} }
p.txPool.LastSend = sentAt
p.txPool.M.Unlock() p.txPool.M.Unlock()
var receipt *types.Receipt var receipt *types.Receipt
...@@ -140,13 +151,17 @@ func (p *Provider) RoundTrip(ctx context.Context) { ...@@ -140,13 +151,17 @@ func (p *Provider) RoundTrip(ctx context.Context) {
log.Error("receipt retrieval timed out", log.Error("receipt retrieval timed out",
"provider", p.name, "provider", p.name,
"hash", txHash, "hash", txHash,
"nonce", nonce,
"elapsed", time.Since(sentAt)) "elapsed", time.Since(sentAt))
metrics.RecordErrorDetails(p.name, "receipt.timeout", err)
return return
} }
time.Sleep(time.Duration(p.config.ReceiptRetrievalInterval)) time.Sleep(time.Duration(p.config.ReceiptRetrievalInterval))
if attempt%10 == 0 { if attempt%10 == 0 {
log.Debug("checking for receipt...", log.Debug("checking for receipt...",
"provider", p.name, "provider", p.name,
"hash", txHash,
"nonce", nonce,
"attempt", attempt, "attempt", attempt,
"elapsed", time.Since(sentAt)) "elapsed", time.Since(sentAt))
} }
...@@ -155,6 +170,7 @@ func (p *Provider) RoundTrip(ctx context.Context) { ...@@ -155,6 +170,7 @@ func (p *Provider) RoundTrip(ctx context.Context) {
log.Error("cant get receipt for transaction", log.Error("cant get receipt for transaction",
"provider", p.name, "provider", p.name,
"hash", txHash.Hex(), "hash", txHash.Hex(),
"nonce", nonce,
"err", err) "err", err)
return return
} }
...@@ -168,6 +184,7 @@ func (p *Provider) RoundTrip(ctx context.Context) { ...@@ -168,6 +184,7 @@ func (p *Provider) RoundTrip(ctx context.Context) {
log.Info("got transaction receipt", log.Info("got transaction receipt",
"hash", txHash.Hex(), "hash", txHash.Hex(),
"nonce", nonce,
"roundTripLatency", roundTripLatency, "roundTripLatency", roundTripLatency,
"provider", p.name, "provider", p.name,
"blockNumber", receipt.BlockNumber, "blockNumber", receipt.BlockNumber,
...@@ -175,20 +192,83 @@ func (p *Provider) RoundTrip(ctx context.Context) { ...@@ -175,20 +192,83 @@ func (p *Provider) RoundTrip(ctx context.Context) {
"gasUsed", receipt.GasUsed) "gasUsed", receipt.GasUsed)
} }
func (p *Provider) createTx(nonce uint64) *types.Transaction { func (p *Provider) createTx(ctx context.Context, client *iclients.InstrumentedEthClient, nonce uint64) (*types.Transaction, error) {
toAddress := common.HexToAddress(p.walletConfig.Address) var err error
if nonce == 0 {
nonce, err = client.PendingNonceAt(ctx, p.walletConfig.Address)
if err != nil {
log.Error("cant get nounce",
"provider", p.name,
"nonce", nonce,
"err", err)
return nil, err
}
}
gasTipCap, err := client.SuggestGasTipCap(ctx)
if err != nil {
log.Error("cant get gas tip cap",
"provider", p.name,
"err", err)
return nil, err
}
gasTipCap = new(big.Int).Mul(gasTipCap, big.NewInt(110))
gasTipCap = new(big.Int).Div(gasTipCap, big.NewInt(100))
head, err := client.HeaderByNumber(ctx, nil)
if err != nil {
log.Error("cant get base fee from head",
"provider", p.name,
"err", err)
return nil, err
}
baseFee := head.BaseFee
gasFeeCap := new(big.Int).Add(
gasTipCap,
new(big.Int).Mul(baseFee, big.NewInt(2)))
addr := common.HexToAddress(p.walletConfig.Address)
var data []byte var data []byte
tx := types.NewTx(&types.DynamicFeeTx{ dynamicTx := &types.DynamicFeeTx{
ChainID: &p.walletConfig.ChainID, ChainID: &p.walletConfig.ChainID,
Nonce: nonce, Nonce: nonce,
GasFeeCap: &p.walletConfig.GasFeeCap, GasFeeCap: gasFeeCap,
GasTipCap: &p.walletConfig.GasTipCap, GasTipCap: gasTipCap,
Gas: p.walletConfig.GasLimit, To: &addr,
To: &toAddress,
Value: &p.walletConfig.TxValue, Value: &p.walletConfig.TxValue,
Data: data, Data: data,
}
gas, err := client.EstimateGas(ctx, ethereum.CallMsg{
From: addr,
To: &addr,
GasFeeCap: gasFeeCap,
GasTipCap: gasTipCap,
Data: dynamicTx.Data,
Value: dynamicTx.Value,
}) })
return tx if err != nil {
log.Error("cant estimate gas",
"provider", p.name,
"err", err)
return nil, err
}
dynamicTx.Gas = gas
tx := types.NewTx(dynamicTx)
log.Info("tx created",
"provider", p.name,
"from", addr,
"to", dynamicTx.To,
"nonce", dynamicTx.Nonce,
"value", dynamicTx.Value,
"gas", dynamicTx.Gas,
"gasTipCap", dynamicTx.GasTipCap,
"gasFeeCap", dynamicTx.GasFeeCap,
)
return tx, nil
} }
func (p *Provider) sign(ctx context.Context, tx *types.Transaction) (*types.Transaction, error) { func (p *Provider) sign(ctx context.Context, tx *types.Transaction) (*types.Transaction, error) {
......
...@@ -15,7 +15,11 @@ type NetworkTransactionPool struct { ...@@ -15,7 +15,11 @@ type NetworkTransactionPool struct {
M sync.Mutex M sync.Mutex
Transactions map[string]*TransactionState Transactions map[string]*TransactionState
Expected int Expected int
Nonce uint64
// Last time a transaction was sent
LastSend time.Time
// Prevents concurrent transaction send
ExclusiveSend sync.Mutex
} }
type TransactionState struct { type TransactionState struct {
......
This diff is collapsed.
...@@ -26,3 +26,6 @@ deployments/hardhat ...@@ -26,3 +26,6 @@ deployments/hardhat
deployments/getting-started deployments/getting-started
deployments/*/.deploy deployments/*/.deploy
deployments/1337 deployments/1337
# Devnet config which changes with each 'make devnet-up'
deploy-config/devnetL1.json
...@@ -138,6 +138,9 @@ Test contracts should be named one of the following according to their use: ...@@ -138,6 +138,9 @@ Test contracts should be named one of the following according to their use:
- `TargetContract_Function_Test` for contracts containing happy path tests for a given function. - `TargetContract_Function_Test` for contracts containing happy path tests for a given function.
- `TargetContract_Function_TestFail` for contracts containing sad path tests for a given function. - `TargetContract_Function_TestFail` for contracts containing sad path tests for a given function.
To minimize clutter, getter functions can be grouped together into a single test contract,
ie. `TargetContract_Getters_Test`.
## Withdrawaing From Fee Vaults ## Withdrawaing From Fee Vaults
See the file `scripts/FeeVaultWithdrawal.s.sol` to withdraw from the L2 fee vaults. It includes See the file `scripts/FeeVaultWithdrawal.s.sol` to withdraw from the L2 fee vaults. It includes
......
...@@ -43,7 +43,7 @@ ...@@ -43,7 +43,7 @@
"eip1559Elasticity": 6, "eip1559Elasticity": 6,
"l1GenesisBlockTimestamp": "0x64c811bf", "l1GenesisBlockTimestamp": "0x64c811bf",
"l2GenesisRegolithTimeOffset": "0x0", "l2GenesisRegolithTimeOffset": "0x0",
"faultGameAbsolutePrestate": "0x41c7ae758795765c6664a5d39bf63841c71ff191e9189522bad8ebff5d4eca98", "faultGameAbsolutePrestate": "0x03c7ae758795765c6664a5d39bf63841c71ff191e9189522bad8ebff5d4eca98",
"faultGameMaxDepth": 30, "faultGameMaxDepth": 30,
"faultGameMaxDuration": 1200, "faultGameMaxDuration": 1200,
"systemConfigStartBlock": 0 "systemConfigStartBlock": 0
......
################################################################
# PROFILE: DEFAULT (Local) #
################################################################
[profile.default] [profile.default]
# Compilation settings
src = 'src' src = 'src'
out = 'forge-artifacts' out = 'forge-artifacts'
script = 'scripts' script = 'scripts'
optimizer = true optimizer = true
optimizer_runs = 999999 optimizer_runs = 999999
remappings = [ remappings = [
'@openzeppelin/contracts-upgradeable/=lib/openzeppelin-contracts-upgradeable/contracts', '@openzeppelin/contracts-upgradeable/=lib/openzeppelin-contracts-upgradeable/contracts',
'@openzeppelin/contracts/=lib/openzeppelin-contracts/contracts', '@openzeppelin/contracts/=lib/openzeppelin-contracts/contracts',
'@rari-capital/solmate/=lib/solmate', '@rari-capital/solmate/=lib/solmate',
"@cwia/=lib/clones-with-immutable-args/src", '@cwia/=lib/clones-with-immutable-args/src',
'forge-std/=lib/forge-std/src', 'forge-std/=lib/forge-std/src',
'ds-test/=lib/forge-std/lib/ds-test/src' 'ds-test/=lib/forge-std/lib/ds-test/src'
] ]
...@@ -17,9 +22,9 @@ extra_output = ['devdoc', 'userdoc', 'metadata', 'storageLayout'] ...@@ -17,9 +22,9 @@ extra_output = ['devdoc', 'userdoc', 'metadata', 'storageLayout']
bytecode_hash = 'none' bytecode_hash = 'none'
build_info = true build_info = true
build_info_path = 'artifacts/build-info' build_info_path = 'artifacts/build-info'
ffi = true
fuzz_runs = 16
# Test / Script Runner Settings
ffi = true
fs_permissions = [ fs_permissions = [
{ access='read-write', path='./.resource-metering.csv' }, { access='read-write', path='./.resource-metering.csv' },
{ access='read-write', path='./deployments/' }, { access='read-write', path='./deployments/' },
...@@ -29,14 +34,19 @@ fs_permissions = [ ...@@ -29,14 +34,19 @@ fs_permissions = [
{ access='write', path='./semver-lock.json' }, { access='write', path='./semver-lock.json' },
] ]
[fuzz]
runs = 64
[fmt] [fmt]
line_length=120 line_length=120
multiline_func_header="all" multiline_func_header='all'
bracket_spacing=true bracket_spacing=true
wrap_comments=true wrap_comments=true
ignore = [ ignore = ['src/vendor/WETH9.sol']
'src/vendor/WETH9.sol'
] ################################################################
# PROFILE: CI #
################################################################
[profile.ci] [profile.ci.fuzz]
fuzz_runs = 512 runs = 512
{ {
"src/EAS/EAS.sol": "0x00862a9f0088230acc1f5c5d0e4041bcc28cb3b3675d0eb7e1cceee7cf9502f8", "src/EAS/EAS.sol": "0x1acb25751a1206eb859cc5fcf934da2f84cfb907b8e8951d86fc4e43c53a7303",
"src/EAS/SchemaRegistry.sol": "0xf1cd4415f85775124c226e1a356d8b9b5126b9e9bdbe5aebb3876d46f8e1217a", "src/EAS/SchemaRegistry.sol": "0x305f3afed2e337cd70aac70fc202e6503b947b0a31e0d4e18c49486eeb635bb5",
"src/L1/L1CrossDomainMessenger.sol": "0x0e663b5d608b07cf278b94b1eeb3202abc01bea6b5905a3869010353df33ad1a", "src/L1/L1CrossDomainMessenger.sol": "0x0e663b5d608b07cf278b94b1eeb3202abc01bea6b5905a3869010353df33ad1a",
"src/L1/L1ERC721Bridge.sol": "0xbb10b777d1cd36ef98b53df6675f37a20b14a9a82b174f0d8f8872eedca65f17", "src/L1/L1ERC721Bridge.sol": "0xbb10b777d1cd36ef98b53df6675f37a20b14a9a82b174f0d8f8872eedca65f17",
"src/L1/L1StandardBridge.sol": "0xbd7b303cefe46bc14bf1a2b81e5702ff45ce9c5257524e59778e11c75f7f5bdc", "src/L1/L1StandardBridge.sol": "0xbd7b303cefe46bc14bf1a2b81e5702ff45ce9c5257524e59778e11c75f7f5bdc",
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
"src/L2/L2StandardBridge.sol": "0xe025dcccbf21d48828ecf588941c9ba04c91b87bdd177a653d3f1b265b0b02a8", "src/L2/L2StandardBridge.sol": "0xe025dcccbf21d48828ecf588941c9ba04c91b87bdd177a653d3f1b265b0b02a8",
"src/L2/L2ToL1MessagePasser.sol": "0xda56ba2e5b2c28fa8ca2df24077d49e96155a00ecc99cd0778d681be6ed166fe", "src/L2/L2ToL1MessagePasser.sol": "0xda56ba2e5b2c28fa8ca2df24077d49e96155a00ecc99cd0778d681be6ed166fe",
"src/L2/SequencerFeeVault.sol": "0x37816035c992d38cf7e3d5a1846b02d017dd7bdca46abe6e5c5171b9ee6225ab", "src/L2/SequencerFeeVault.sol": "0x37816035c992d38cf7e3d5a1846b02d017dd7bdca46abe6e5c5171b9ee6225ab",
"src/dispute/FaultDisputeGame.sol": "0x72c917e8513d17f274753a391bdbddc1f4daeca1a392f79492df29a1107c3525", "src/dispute/FaultDisputeGame.sol": "0x7b8462c29d003e96a73491c644001e1a9034bcc45c5be2a7bac3caf80d521635",
"src/legacy/DeployerWhitelist.sol": "0xf2129ec3da75307ba8e21bc943c332bb04704642e6e263149b5c8ee92dbcb7a8", "src/legacy/DeployerWhitelist.sol": "0xf2129ec3da75307ba8e21bc943c332bb04704642e6e263149b5c8ee92dbcb7a8",
"src/legacy/L1BlockNumber.sol": "0x30aae1fc85103476af0226b6e98c71c01feebbdc35d93401390b1ad438a37be6", "src/legacy/L1BlockNumber.sol": "0x30aae1fc85103476af0226b6e98c71c01feebbdc35d93401390b1ad438a37be6",
"src/legacy/LegacyMessagePasser.sol": "0x5c08b0a663cc49d30e4e38540f6aefab19ef287c3ecd31c8d8c3decd5f5bd497", "src/legacy/LegacyMessagePasser.sol": "0x5c08b0a663cc49d30e4e38540f6aefab19ef287c3ecd31c8d8c3decd5f5bd497",
......
...@@ -8,6 +8,7 @@ bytes32 constant EMPTY_UID = 0; ...@@ -8,6 +8,7 @@ bytes32 constant EMPTY_UID = 0;
uint64 constant NO_EXPIRATION_TIME = 0; uint64 constant NO_EXPIRATION_TIME = 0;
error AccessDenied(); error AccessDenied();
error DeadlineExpired();
error InvalidEAS(); error InvalidEAS();
error InvalidLength(); error InvalidLength();
error InvalidSignature(); error InvalidSignature();
......
This diff is collapsed.
...@@ -27,6 +27,7 @@ struct DelegatedAttestationRequest { ...@@ -27,6 +27,7 @@ struct DelegatedAttestationRequest {
AttestationRequestData data; // The arguments of the attestation request. AttestationRequestData data; // The arguments of the attestation request.
Signature signature; // The ECDSA signature data. Signature signature; // The ECDSA signature data.
address attester; // The attesting account. address attester; // The attesting account.
uint64 deadline; // The deadline of the signature/request.
} }
/// @dev A struct representing the full arguments of the multi attestation request. /// @dev A struct representing the full arguments of the multi attestation request.
...@@ -42,6 +43,7 @@ struct MultiDelegatedAttestationRequest { ...@@ -42,6 +43,7 @@ struct MultiDelegatedAttestationRequest {
Signature[] signatures; // The ECDSA signatures data. Please note that the signatures are assumed to be signed with Signature[] signatures; // The ECDSA signatures data. Please note that the signatures are assumed to be signed with
// increasing nonces. // increasing nonces.
address attester; // The attesting account. address attester; // The attesting account.
uint64 deadline; // The deadline of the signature/request.
} }
/// @dev A struct representing the arguments of the revocation request. /// @dev A struct representing the arguments of the revocation request.
...@@ -63,6 +65,7 @@ struct DelegatedRevocationRequest { ...@@ -63,6 +65,7 @@ struct DelegatedRevocationRequest {
RevocationRequestData data; // The arguments of the revocation request. RevocationRequestData data; // The arguments of the revocation request.
Signature signature; // The ECDSA signature data. Signature signature; // The ECDSA signature data.
address revoker; // The revoking account. address revoker; // The revoking account.
uint64 deadline; // The deadline of the signature/request.
} }
/// @dev A struct representing the full arguments of the multi revocation request. /// @dev A struct representing the full arguments of the multi revocation request.
...@@ -78,6 +81,7 @@ struct MultiDelegatedRevocationRequest { ...@@ -78,6 +81,7 @@ struct MultiDelegatedRevocationRequest {
Signature[] signatures; // The ECDSA signatures data. Please note that the signatures are assumed to be signed with Signature[] signatures; // The ECDSA signatures data. Please note that the signatures are assumed to be signed with
// increasing nonces. // increasing nonces.
address revoker; // The revoking account. address revoker; // The revoking account.
uint64 deadline; // The deadline of the signature/request.
} }
/// @title IEAS /// @title IEAS
...@@ -87,15 +91,15 @@ interface IEAS { ...@@ -87,15 +91,15 @@ interface IEAS {
/// @param recipient The recipient of the attestation. /// @param recipient The recipient of the attestation.
/// @param attester The attesting account. /// @param attester The attesting account.
/// @param uid The UID the revoked attestation. /// @param uid The UID the revoked attestation.
/// @param schema The UID of the schema. /// @param schemaUID The UID of the schema.
event Attested(address indexed recipient, address indexed attester, bytes32 uid, bytes32 indexed schema); event Attested(address indexed recipient, address indexed attester, bytes32 uid, bytes32 indexed schemaUID);
/// @dev Emitted when an attestation has been revoked. /// @dev Emitted when an attestation has been revoked.
/// @param recipient The recipient of the attestation. /// @param recipient The recipient of the attestation.
/// @param attester The attesting account. /// @param attester The attesting account.
/// @param schema The UID of the schema. /// @param schemaUID The UID of the schema.
/// @param uid The UID the revoked attestation. /// @param uid The UID the revoked attestation.
event Revoked(address indexed recipient, address indexed attester, bytes32 uid, bytes32 indexed schema); event Revoked(address indexed recipient, address indexed attester, bytes32 uid, bytes32 indexed schemaUID);
/// @dev Emitted when a data has been timestamped. /// @dev Emitted when a data has been timestamped.
/// @param data The data. /// @param data The data.
...@@ -151,7 +155,8 @@ interface IEAS { ...@@ -151,7 +155,8 @@ interface IEAS {
/// r: '0x148c...b25b', /// r: '0x148c...b25b',
/// s: '0x5a72...be22' /// s: '0x5a72...be22'
/// }, /// },
/// attester: '0xc5E8740aD971409492b1A63Db8d83025e0Fc427e' /// attester: '0xc5E8740aD971409492b1A63Db8d83025e0Fc427e',
/// deadline: 1673891048
/// }) /// })
/// ///
/// @param delegatedRequest The arguments of the delegated attestation request. /// @param delegatedRequest The arguments of the delegated attestation request.
...@@ -236,7 +241,8 @@ interface IEAS { ...@@ -236,7 +241,8 @@ interface IEAS {
/// r: '0x487s...67bb', /// r: '0x487s...67bb',
/// s: '0x12ad...2366' /// s: '0x12ad...2366'
/// }], /// }],
/// attester: '0x1D86495b2A7B524D747d2839b3C645Bed32e8CF4' /// attester: '0x1D86495b2A7B524D747d2839b3C645Bed32e8CF4',
/// deadline: 1673891048
/// }]) /// }])
/// ///
/// @param multiDelegatedRequests The arguments of the delegated multi attestation requests. The requests should be /// @param multiDelegatedRequests The arguments of the delegated multi attestation requests. The requests should be
...@@ -277,7 +283,8 @@ interface IEAS { ...@@ -277,7 +283,8 @@ interface IEAS {
/// r: '0xb593...7142', /// r: '0xb593...7142',
/// s: '0x0f5b...2cce' /// s: '0x0f5b...2cce'
/// }, /// },
/// revoker: '0x244934dd3e31bE2c81f84ECf0b3E6329F5381992' /// revoker: '0x244934dd3e31bE2c81f84ECf0b3E6329F5381992',
/// deadline: 1673891048
/// }) /// })
/// ///
/// @param delegatedRequest The arguments of the delegated revocation request. /// @param delegatedRequest The arguments of the delegated revocation request.
...@@ -334,7 +341,8 @@ interface IEAS { ...@@ -334,7 +341,8 @@ interface IEAS {
/// r: '0x487s...67bb', /// r: '0x487s...67bb',
/// s: '0x12ad...2366' /// s: '0x12ad...2366'
/// }], /// }],
/// revoker: '0x244934dd3e31bE2c81f84ECf0b3E6329F5381992' /// revoker: '0x244934dd3e31bE2c81f84ECf0b3E6329F5381992',
/// deadline: 1673891048
/// }]) /// }])
/// ///
/// @param multiDelegatedRequests The arguments of the delegated multi revocation attestation requests. The requests /// @param multiDelegatedRequests The arguments of the delegated multi revocation attestation requests. The requests
......
...@@ -17,7 +17,8 @@ interface ISchemaRegistry { ...@@ -17,7 +17,8 @@ interface ISchemaRegistry {
/// @dev Emitted when a new schema has been registered /// @dev Emitted when a new schema has been registered
/// @param uid The schema UID. /// @param uid The schema UID.
/// @param registerer The address of the account used to register the schema. /// @param registerer The address of the account used to register the schema.
event Registered(bytes32 indexed uid, address registerer); /// @param schema The schema data.
event Registered(bytes32 indexed uid, address indexed registerer, SchemaRecord schema);
/// @dev Submits and reserves a new schema /// @dev Submits and reserves a new schema
/// @param schema The schema data schema. /// @param schema The schema data schema.
......
...@@ -20,8 +20,8 @@ contract SchemaRegistry is ISchemaRegistry, Semver { ...@@ -20,8 +20,8 @@ contract SchemaRegistry is ISchemaRegistry, Semver {
uint256[MAX_GAP - 1] private __gap; uint256[MAX_GAP - 1] private __gap;
/// @dev Creates a new SchemaRegistry instance. /// @dev Creates a new SchemaRegistry instance.
/// @custom:semver 1.0.3 /// @custom:semver 1.2.0
constructor() Semver(1, 0, 3) { } constructor() Semver(1, 2, 0) { }
/// @inheritdoc ISchemaRegistry /// @inheritdoc ISchemaRegistry
function register(string calldata schema, ISchemaResolver resolver, bool revocable) external returns (bytes32) { function register(string calldata schema, ISchemaResolver resolver, bool revocable) external returns (bytes32) {
...@@ -36,7 +36,7 @@ contract SchemaRegistry is ISchemaRegistry, Semver { ...@@ -36,7 +36,7 @@ contract SchemaRegistry is ISchemaRegistry, Semver {
schemaRecord.uid = uid; schemaRecord.uid = uid;
_registry[uid] = schemaRecord; _registry[uid] = schemaRecord;
emit Registered(uid, msg.sender); emit Registered(uid, msg.sender, schemaRecord);
return uid; return uid;
} }
......
...@@ -12,21 +12,31 @@ import { ...@@ -12,21 +12,31 @@ import {
RevocationRequestData RevocationRequestData
} from "../IEAS.sol"; } from "../IEAS.sol";
import { Signature, InvalidSignature, MAX_GAP, stringToBytes32, bytes32ToString } from "../Common.sol"; import {
DeadlineExpired,
NO_EXPIRATION_TIME,
Signature,
InvalidSignature,
MAX_GAP,
stringToBytes32,
bytes32ToString
} from "../Common.sol";
/// @title EIP1271Verifier /// @title EIP1271Verifier
/// @notice EIP1271Verifier typed signatures verifier for EAS delegated attestations. /// @notice EIP1271Verifier typed signatures verifier for EAS delegated attestations.
abstract contract EIP1271Verifier is EIP712 { abstract contract EIP1271Verifier is EIP712 {
using Address for address; using Address for address;
error InvalidNonce();
// The hash of the data type used to relay calls to the attest function. It's the value of // The hash of the data type used to relay calls to the attest function. It's the value of
// keccak256("Attest(bytes32 schema,address recipient,uint64 expirationTime,bool revocable,bytes32 refUID,bytes // keccak256("Attest(bytes32 schema,address recipient,uint64 expirationTime,bool revocable,bytes32 refUID,bytes
// data,uint256 nonce)"). // data,uint256 value,uint256 nonce,uint64 deadline)").
bytes32 private constant ATTEST_TYPEHASH = 0xdbfdf8dc2b135c26253e00d5b6cbe6f20457e003fd526d97cea183883570de61; bytes32 private constant ATTEST_TYPEHASH = 0xf83bb2b0ede93a840239f7e701a54d9bc35f03701f51ae153d601c6947ff3d3f;
// The hash of the data type used to relay calls to the revoke function. It's the value of // The hash of the data type used to relay calls to the revoke function. It's the value of
// keccak256("Revoke(bytes32 schema,bytes32 uid,uint256 nonce)"). // keccak256("Revoke(bytes32 schema,bytes32 uid,uint256 value,uint256 nonce,uint64 deadline)").
bytes32 private constant REVOKE_TYPEHASH = 0xa98d02348410c9c76735e0d0bb1396f4015ac2bb9615f9c2611d19d7a8a99650; bytes32 private constant REVOKE_TYPEHASH = 0x2d4116d8c9824e4c316453e5c2843a1885580374159ce8768603c49085ef424c;
// The user readable name of the signing domain. // The user readable name of the signing domain.
bytes32 private immutable _name; bytes32 private immutable _name;
...@@ -37,6 +47,11 @@ abstract contract EIP1271Verifier is EIP712 { ...@@ -37,6 +47,11 @@ abstract contract EIP1271Verifier is EIP712 {
// Upgrade forward-compatibility storage gap // Upgrade forward-compatibility storage gap
uint256[MAX_GAP - 1] private __gap; uint256[MAX_GAP - 1] private __gap;
/// @dev Emitted when users invalidate nonces by increasing their nonces to (higher) new values.
/// @param oldNonce The previous nonce.
/// @param newNonce The new value.
event NonceIncreased(uint256 oldNonce, uint256 newNonce);
/// @dev Creates a new EIP1271Verifier instance. /// @dev Creates a new EIP1271Verifier instance.
/// @param version The current major version of the signing domain /// @param version The current major version of the signing domain
constructor(string memory name, string memory version) EIP712(name, version) { constructor(string memory name, string memory version) EIP712(name, version) {
...@@ -74,17 +89,29 @@ abstract contract EIP1271Verifier is EIP712 { ...@@ -74,17 +89,29 @@ abstract contract EIP1271Verifier is EIP712 {
return bytes32ToString(_name); return bytes32ToString(_name);
} }
/// @notice Provides users an option to invalidate nonces by increasing their nonces to (higher) new values.
/// @param newNonce The (higher) new value.
function increaseNonce(uint256 newNonce) external {
uint256 oldNonce = _nonces[msg.sender];
if (newNonce <= oldNonce) {
revert InvalidNonce();
}
_nonces[msg.sender] = newNonce;
emit NonceIncreased({ oldNonce: oldNonce, newNonce: newNonce });
}
/// @notice Verifies delegated attestation request. /// @notice Verifies delegated attestation request.
/// @param request The arguments of the delegated attestation request. /// @param request The arguments of the delegated attestation request.
function _verifyAttest(DelegatedAttestationRequest memory request) internal { function _verifyAttest(DelegatedAttestationRequest memory request) internal {
if (request.deadline != NO_EXPIRATION_TIME && request.deadline < _time()) {
revert DeadlineExpired();
}
AttestationRequestData memory data = request.data; AttestationRequestData memory data = request.data;
Signature memory signature = request.signature; Signature memory signature = request.signature;
uint256 nonce;
unchecked {
nonce = _nonces[request.attester]++;
}
bytes32 hash = _hashTypedDataV4( bytes32 hash = _hashTypedDataV4(
keccak256( keccak256(
abi.encode( abi.encode(
...@@ -95,7 +122,9 @@ abstract contract EIP1271Verifier is EIP712 { ...@@ -95,7 +122,9 @@ abstract contract EIP1271Verifier is EIP712 {
data.revocable, data.revocable,
data.refUID, data.refUID,
keccak256(data.data), keccak256(data.data),
nonce data.value,
_nonces[request.attester]++,
request.deadline
) )
) )
); );
...@@ -111,15 +140,20 @@ abstract contract EIP1271Verifier is EIP712 { ...@@ -111,15 +140,20 @@ abstract contract EIP1271Verifier is EIP712 {
/// @notice Verifies delegated revocation request. /// @notice Verifies delegated revocation request.
/// @param request The arguments of the delegated revocation request. /// @param request The arguments of the delegated revocation request.
function _verifyRevoke(DelegatedRevocationRequest memory request) internal { function _verifyRevoke(DelegatedRevocationRequest memory request) internal {
if (request.deadline != NO_EXPIRATION_TIME && request.deadline < _time()) {
revert DeadlineExpired();
}
RevocationRequestData memory data = request.data; RevocationRequestData memory data = request.data;
Signature memory signature = request.signature; Signature memory signature = request.signature;
uint256 nonce; bytes32 hash = _hashTypedDataV4(
unchecked { keccak256(
nonce = _nonces[request.revoker]++; abi.encode(
} REVOKE_TYPEHASH, request.schema, data.uid, data.value, _nonces[request.revoker]++, request.deadline
)
bytes32 hash = _hashTypedDataV4(keccak256(abi.encode(REVOKE_TYPEHASH, request.schema, data.uid, nonce))); )
);
if ( if (
!SignatureChecker.isValidSignatureNow( !SignatureChecker.isValidSignatureNow(
request.revoker, hash, abi.encodePacked(signature.r, signature.s, signature.v) request.revoker, hash, abi.encodePacked(signature.r, signature.s, signature.v)
...@@ -128,4 +162,10 @@ abstract contract EIP1271Verifier is EIP712 { ...@@ -128,4 +162,10 @@ abstract contract EIP1271Verifier is EIP712 {
revert InvalidSignature(); revert InvalidSignature();
} }
} }
/// @dev Returns the current's block timestamp. This method is overridden during tests and used to simulate the
/// current block time.
function _time() internal view virtual returns (uint64) {
return uint64(block.timestamp);
}
} }
...@@ -6,7 +6,7 @@ import { Attestation } from "../Common.sol"; ...@@ -6,7 +6,7 @@ import { Attestation } from "../Common.sol";
/// @title ISchemaResolver /// @title ISchemaResolver
/// @notice The interface of an optional schema resolver. /// @notice The interface of an optional schema resolver.
interface ISchemaResolver { interface ISchemaResolver {
/// @notice Checks if the resolve can be sent ETH. /// @notice Checks if the resolver can be sent ETH.
/// @return Whether the resolver supports ETH transfers. /// @return Whether the resolver supports ETH transfers.
function isPayable() external pure returns (bool); function isPayable() external pure returns (bool);
......
...@@ -4,14 +4,13 @@ pragma solidity 0.8.19; ...@@ -4,14 +4,13 @@ pragma solidity 0.8.19;
import { Semver } from "../../universal/Semver.sol"; import { Semver } from "../../universal/Semver.sol";
import { IEAS, Attestation } from "../IEAS.sol"; import { IEAS, Attestation } from "../IEAS.sol";
import { InvalidEAS, uncheckedInc } from "../Common.sol"; import { AccessDenied, InvalidEAS, InvalidLength, uncheckedInc } from "../Common.sol";
import { ISchemaResolver } from "./ISchemaResolver.sol"; import { ISchemaResolver } from "./ISchemaResolver.sol";
/// @title SchemaResolver /// @title SchemaResolver
/// @notice The base schema resolver contract. /// @notice The base schema resolver contract.
abstract contract SchemaResolver is ISchemaResolver, Semver { abstract contract SchemaResolver is ISchemaResolver, Semver {
error AccessDenied();
error InsufficientValue(); error InsufficientValue();
error NotPayable(); error NotPayable();
...@@ -20,7 +19,7 @@ abstract contract SchemaResolver is ISchemaResolver, Semver { ...@@ -20,7 +19,7 @@ abstract contract SchemaResolver is ISchemaResolver, Semver {
/// @dev Creates a new resolver. /// @dev Creates a new resolver.
/// @param eas The address of the global EAS contract. /// @param eas The address of the global EAS contract.
constructor(IEAS eas) Semver(1, 0, 1) { constructor(IEAS eas) Semver(1, 2, 0) {
if (address(eas) == address(0)) { if (address(eas) == address(0)) {
revert InvalidEAS(); revert InvalidEAS();
} }
...@@ -63,6 +62,9 @@ abstract contract SchemaResolver is ISchemaResolver, Semver { ...@@ -63,6 +62,9 @@ abstract contract SchemaResolver is ISchemaResolver, Semver {
returns (bool) returns (bool)
{ {
uint256 length = attestations.length; uint256 length = attestations.length;
if (length != values.length) {
revert InvalidLength();
}
// We are keeping track of the remaining ETH amount that can be sent to resolvers and will keep deducting // We are keeping track of the remaining ETH amount that can be sent to resolvers and will keep deducting
// from it to verify that there isn't any attempt to send too much ETH to resolvers. Please note that unless // from it to verify that there isn't any attempt to send too much ETH to resolvers. Please note that unless
...@@ -77,7 +79,7 @@ abstract contract SchemaResolver is ISchemaResolver, Semver { ...@@ -77,7 +79,7 @@ abstract contract SchemaResolver is ISchemaResolver, Semver {
revert InsufficientValue(); revert InsufficientValue();
} }
// Forward the attestation to the underlying resolver and revert in case it isn't approved. // Forward the attestation to the underlying resolver and return false in case it isn't approved.
if (!onAttest(attestations[i], value)) { if (!onAttest(attestations[i], value)) {
return false; return false;
} }
...@@ -107,6 +109,9 @@ abstract contract SchemaResolver is ISchemaResolver, Semver { ...@@ -107,6 +109,9 @@ abstract contract SchemaResolver is ISchemaResolver, Semver {
returns (bool) returns (bool)
{ {
uint256 length = attestations.length; uint256 length = attestations.length;
if (length != values.length) {
revert InvalidLength();
}
// We are keeping track of the remaining ETH amount that can be sent to resolvers and will keep deducting // We are keeping track of the remaining ETH amount that can be sent to resolvers and will keep deducting
// from it to verify that there isn't any attempt to send too much ETH to resolvers. Please note that unless // from it to verify that there isn't any attempt to send too much ETH to resolvers. Please note that unless
...@@ -121,7 +126,7 @@ abstract contract SchemaResolver is ISchemaResolver, Semver { ...@@ -121,7 +126,7 @@ abstract contract SchemaResolver is ISchemaResolver, Semver {
revert InsufficientValue(); revert InsufficientValue();
} }
// Forward the revocation to the underlying resolver and revert in case it isn't approved. // Forward the revocation to the underlying resolver and return false in case it isn't approved.
if (!onRevoke(attestations[i], value)) { if (!onRevoke(attestations[i], value)) {
return false; return false;
} }
......
...@@ -103,7 +103,9 @@ contract MIPS { ...@@ -103,7 +103,9 @@ contract MIPS {
from, to := copyMem(from, to, 4) // lo from, to := copyMem(from, to, 4) // lo
from, to := copyMem(from, to, 4) // hi from, to := copyMem(from, to, 4) // hi
from, to := copyMem(from, to, 4) // heap from, to := copyMem(from, to, 4) // heap
let exitCode := mload(from)
from, to := copyMem(from, to, 1) // exitCode from, to := copyMem(from, to, 1) // exitCode
let exited := mload(from)
from, to := copyMem(from, to, 1) // exited from, to := copyMem(from, to, 1) // exited
from, to := copyMem(from, to, 8) // step from, to := copyMem(from, to, 8) // step
from := add(from, 32) // offset to registers from := add(from, 32) // offset to registers
...@@ -117,8 +119,24 @@ contract MIPS { ...@@ -117,8 +119,24 @@ contract MIPS {
// Log the resulting MIPS state, for debugging // Log the resulting MIPS state, for debugging
log0(start, sub(to, start)) log0(start, sub(to, start))
// Compute the hash of the resulting MIPS state // Determine the VM status
let status := 0
switch exited
case 1 {
switch exitCode
// VMStatusValid
case 0 { status := 0 }
// VMStatusInvalid
case 1 { status := 1 }
// VMStatusPanic
default { status := 2 }
}
// VMStatusUnfinished
default { status := 3 }
// Compute the hash of the resulting MIPS state and set the status byte
out_ := keccak256(start, sub(to, start)) out_ := keccak256(start, sub(to, start))
out_ := or(and(not(shl(248, 0xFF)), out_), shl(248, status))
} }
} }
......
...@@ -85,7 +85,7 @@ contract FaultDisputeGame is IFaultDisputeGame, Clone, Semver { ...@@ -85,7 +85,7 @@ contract FaultDisputeGame is IFaultDisputeGame, Clone, Semver {
/// @param _blockOracle The block oracle, used for loading block hashes further back /// @param _blockOracle The block oracle, used for loading block hashes further back
/// than the `BLOCKHASH` opcode allows as well as their estimated /// than the `BLOCKHASH` opcode allows as well as their estimated
/// timestamps. /// timestamps.
/// @custom:semver 0.0.7 /// @custom:semver 0.0.9
constructor( constructor(
GameType _gameType, GameType _gameType,
Claim _absolutePrestate, Claim _absolutePrestate,
...@@ -95,7 +95,7 @@ contract FaultDisputeGame is IFaultDisputeGame, Clone, Semver { ...@@ -95,7 +95,7 @@ contract FaultDisputeGame is IFaultDisputeGame, Clone, Semver {
L2OutputOracle _l2oo, L2OutputOracle _l2oo,
BlockOracle _blockOracle BlockOracle _blockOracle
) )
Semver(0, 0, 8) Semver(0, 0, 9)
{ {
GAME_TYPE = _gameType; GAME_TYPE = _gameType;
ABSOLUTE_PRESTATE = _absolutePrestate; ABSOLUTE_PRESTATE = _absolutePrestate;
...@@ -149,7 +149,11 @@ contract FaultDisputeGame is IFaultDisputeGame, Clone, Semver { ...@@ -149,7 +149,11 @@ contract FaultDisputeGame is IFaultDisputeGame, Clone, Semver {
// INVARIANT: The prestate is always invalid if the passed `_stateData` is not the // INVARIANT: The prestate is always invalid if the passed `_stateData` is not the
// preimage of the prestate claim hash. // preimage of the prestate claim hash.
if (keccak256(_stateData) != Claim.unwrap(preStateClaim)) revert InvalidPrestate(); // We ignore the highest order byte of the digest because it is used to
// indicate the VM Status and is added after the digest is computed.
if (keccak256(_stateData) << 8 != Claim.unwrap(preStateClaim) << 8) {
revert InvalidPrestate();
}
// INVARIANT: If a step is an attack, the poststate is valid if the step produces // INVARIANT: If a step is an attack, the poststate is valid if the step produces
// the same poststate hash as the parent claim's value. // the same poststate hash as the parent claim's value.
...@@ -434,9 +438,18 @@ contract FaultDisputeGame is IFaultDisputeGame, Clone, Semver { ...@@ -434,9 +438,18 @@ contract FaultDisputeGame is IFaultDisputeGame, Clone, Semver {
function initialize() external { function initialize() external {
// SAFETY: Any revert in this function will bubble up to the DisputeGameFactory and // SAFETY: Any revert in this function will bubble up to the DisputeGameFactory and
// prevent the game from being created. // prevent the game from being created.
//
// Implicit assumptions: // Implicit assumptions:
// - The `gameStatus` state variable defaults to 0, which is `GameStatus.IN_PROGRESS` // - The `gameStatus` state variable defaults to 0, which is `GameStatus.IN_PROGRESS`
// The VMStatus must indicate (1) 'invalid', to argue that disputed thing is invalid.
// Games that agree with the existing outcome are not allowed.
// NOTE(clabby): This assumption will change in Alpha Chad.
uint8 vmStatus = uint8(Claim.unwrap(rootClaim())[0]);
if (!(vmStatus == VMStatus.unwrap(VMStatuses.INVALID) || vmStatus == VMStatus.unwrap(VMStatuses.PANIC))) {
revert UnexpectedRootClaim(rootClaim());
}
// Set the game's starting timestamp // Set the game's starting timestamp
createdAt = Timestamp.wrap(uint64(block.timestamp)); createdAt = Timestamp.wrap(uint64(block.timestamp));
......
...@@ -15,6 +15,11 @@ error NoImplementation(GameType gameType); ...@@ -15,6 +15,11 @@ error NoImplementation(GameType gameType);
/// @param uuid The UUID of the dispute game that already exists. /// @param uuid The UUID of the dispute game that already exists.
error GameAlreadyExists(Hash uuid); error GameAlreadyExists(Hash uuid);
/// @notice Thrown when the root claim has an unexpected VM status.
/// Some games can only start with a root-claim with a specific status.
/// @param rootClaim is the claim that was unexpected.
error UnexpectedRootClaim(Claim rootClaim);
//////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////
// `FaultDisputeGame` Errors // // `FaultDisputeGame` Errors //
//////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////
......
...@@ -62,6 +62,9 @@ type Position is uint128; ...@@ -62,6 +62,9 @@ type Position is uint128;
/// @notice A `GameType` represents the type of game being played. /// @notice A `GameType` represents the type of game being played.
type GameType is uint8; type GameType is uint8;
/// @notice A `VMStatus` represents the status of a VM execution.
type VMStatus is uint8;
/// @notice The current status of the dispute game. /// @notice The current status of the dispute game.
enum GameStatus enum GameStatus
// The game is currently in progress, and has not been resolved. // The game is currently in progress, and has not been resolved.
...@@ -85,3 +88,18 @@ library GameTypes { ...@@ -85,3 +88,18 @@ library GameTypes {
/// @dev The game will use a `IDisputeGame` implementation that utilizes attestation proofs. /// @dev The game will use a `IDisputeGame` implementation that utilizes attestation proofs.
GameType internal constant ATTESTATION = GameType.wrap(2); GameType internal constant ATTESTATION = GameType.wrap(2);
} }
/// @title VMStatuses
library VMStatuses {
/// @dev The VM has executed successfully and the outcome is valid.
VMStatus internal constant VALID = VMStatus.wrap(0);
/// @dev The VM has executed successfully and the outcome is invalid.
VMStatus internal constant INVALID = VMStatus.wrap(1);
/// @dev The VM has paniced.
VMStatus internal constant PANIC = VMStatus.wrap(2);
/// @dev The VM execution is still in progress.
VMStatus internal constant UNFINISHED = VMStatus.wrap(3);
}
...@@ -41,6 +41,8 @@ contract DisputeGameFactory_Create_Test is DisputeGameFactory_Init { ...@@ -41,6 +41,8 @@ contract DisputeGameFactory_Create_Test is DisputeGameFactory_Init {
function testFuzz_create_succeeds(uint8 gameType, Claim rootClaim, bytes calldata extraData) public { function testFuzz_create_succeeds(uint8 gameType, Claim rootClaim, bytes calldata extraData) public {
// Ensure that the `gameType` is within the bounds of the `GameType` enum's possible values. // Ensure that the `gameType` is within the bounds of the `GameType` enum's possible values.
GameType gt = GameType.wrap(uint8(bound(gameType, 0, 2))); GameType gt = GameType.wrap(uint8(bound(gameType, 0, 2)));
// Ensure the rootClaim has a VMStatus that disagrees with the validity.
rootClaim = changeClaimStatus(rootClaim, VMStatuses.INVALID);
// Set all three implementations to the same `FakeClone` contract. // Set all three implementations to the same `FakeClone` contract.
for (uint8 i; i < 3; i++) { for (uint8 i; i < 3; i++) {
...@@ -68,6 +70,8 @@ contract DisputeGameFactory_Create_Test is DisputeGameFactory_Init { ...@@ -68,6 +70,8 @@ contract DisputeGameFactory_Create_Test is DisputeGameFactory_Init {
function testFuzz_create_noImpl_reverts(uint8 gameType, Claim rootClaim, bytes calldata extraData) public { function testFuzz_create_noImpl_reverts(uint8 gameType, Claim rootClaim, bytes calldata extraData) public {
// Ensure that the `gameType` is within the bounds of the `GameType` enum's possible values. // Ensure that the `gameType` is within the bounds of the `GameType` enum's possible values.
GameType gt = GameType.wrap(uint8(bound(gameType, 0, 2))); GameType gt = GameType.wrap(uint8(bound(gameType, 0, 2)));
// Ensure the rootClaim has a VMStatus that disagrees with the validity.
rootClaim = changeClaimStatus(rootClaim, VMStatuses.INVALID);
vm.expectRevert(abi.encodeWithSelector(NoImplementation.selector, gt)); vm.expectRevert(abi.encodeWithSelector(NoImplementation.selector, gt));
factory.create(gt, rootClaim, extraData); factory.create(gt, rootClaim, extraData);
...@@ -77,6 +81,8 @@ contract DisputeGameFactory_Create_Test is DisputeGameFactory_Init { ...@@ -77,6 +81,8 @@ contract DisputeGameFactory_Create_Test is DisputeGameFactory_Init {
function testFuzz_create_sameUUID_reverts(uint8 gameType, Claim rootClaim, bytes calldata extraData) public { function testFuzz_create_sameUUID_reverts(uint8 gameType, Claim rootClaim, bytes calldata extraData) public {
// Ensure that the `gameType` is within the bounds of the `GameType` enum's possible values. // Ensure that the `gameType` is within the bounds of the `GameType` enum's possible values.
GameType gt = GameType.wrap(uint8(bound(gameType, 0, 2))); GameType gt = GameType.wrap(uint8(bound(gameType, 0, 2)));
// Ensure the rootClaim has a VMStatus that disagrees with the validity.
rootClaim = changeClaimStatus(rootClaim, VMStatuses.INVALID);
// Set all three implementations to the same `FakeClone` contract. // Set all three implementations to the same `FakeClone` contract.
for (uint8 i; i < 3; i++) { for (uint8 i; i < 3; i++) {
...@@ -99,6 +105,12 @@ contract DisputeGameFactory_Create_Test is DisputeGameFactory_Init { ...@@ -99,6 +105,12 @@ contract DisputeGameFactory_Create_Test is DisputeGameFactory_Init {
); );
factory.create(gt, rootClaim, extraData); factory.create(gt, rootClaim, extraData);
} }
function changeClaimStatus(Claim _claim, VMStatus _status) public pure returns (Claim out_) {
assembly {
out_ := or(and(not(shl(248, 0xFF)), _claim), shl(248, _status))
}
}
} }
contract DisputeGameFactory_SetImplementation_Test is DisputeGameFactory_Init { contract DisputeGameFactory_SetImplementation_Test is DisputeGameFactory_Init {
......
...@@ -77,9 +77,9 @@ contract FaultDisputeGame_Init is DisputeGameFactory_Init { ...@@ -77,9 +77,9 @@ contract FaultDisputeGame_Init is DisputeGameFactory_Init {
contract FaultDisputeGame_Test is FaultDisputeGame_Init { contract FaultDisputeGame_Test is FaultDisputeGame_Init {
/// @dev The root claim of the game. /// @dev The root claim of the game.
Claim internal constant ROOT_CLAIM = Claim.wrap(bytes32(uint256(10))); Claim internal constant ROOT_CLAIM = Claim.wrap(bytes32((uint256(1) << 248) | uint256(10)));
/// @dev The absolute prestate of the trace. /// @dev The absolute prestate of the trace.
Claim internal constant ABSOLUTE_PRESTATE = Claim.wrap(bytes32(uint256(0))); Claim internal constant ABSOLUTE_PRESTATE = Claim.wrap(bytes32((uint256(3) << 248) | uint256(0)));
function setUp() public override { function setUp() public override {
super.init(ROOT_CLAIM, ABSOLUTE_PRESTATE); super.init(ROOT_CLAIM, ABSOLUTE_PRESTATE);
...@@ -143,6 +143,17 @@ contract FaultDisputeGame_Test is FaultDisputeGame_Init { ...@@ -143,6 +143,17 @@ contract FaultDisputeGame_Test is FaultDisputeGame_Init {
factory.create(GAME_TYPE, ROOT_CLAIM, abi.encode(1800, block.number - 1)); factory.create(GAME_TYPE, ROOT_CLAIM, abi.encode(1800, block.number - 1));
} }
/// @dev Tests that the `create` function reverts when the rootClaim does not disagree with the outcome.
function testFuzz_initialize_badRootStatus_reverts(Claim rootClaim, bytes calldata extraData) public {
// Ensure that the `gameType` is within the bounds of the `GameType` enum's possible values.
// Ensure the root claim does not have the correct VM status
uint8 vmStatus = uint8(Claim.unwrap(rootClaim)[0]);
if (vmStatus == 1 || vmStatus == 2) rootClaim = changeClaimStatus(rootClaim, VMStatuses.VALID);
vm.expectRevert(abi.encodeWithSelector(UnexpectedRootClaim.selector, rootClaim));
factory.create(GameTypes.FAULT, rootClaim, extraData);
}
/// @dev Tests that the game is initialized with the correct data. /// @dev Tests that the game is initialized with the correct data.
function test_initialize_correctData_succeeds() public { function test_initialize_correctData_succeeds() public {
// Starting // Starting
...@@ -449,6 +460,12 @@ contract FaultDisputeGame_Test is FaultDisputeGame_Init { ...@@ -449,6 +460,12 @@ contract FaultDisputeGame_Test is FaultDisputeGame_Init {
bytes32 h = keccak256(abi.encode(_ident | (1 << 248), address(gameProxy))); bytes32 h = keccak256(abi.encode(_ident | (1 << 248), address(gameProxy)));
return bytes32((uint256(h) & ~uint256(0xFF << 248)) | (1 << 248)); return bytes32((uint256(h) & ~uint256(0xFF << 248)) | (1 << 248));
} }
function changeClaimStatus(Claim _claim, VMStatus _status) public pure returns (Claim out_) {
assembly {
out_ := or(and(not(shl(248, 0xFF)), _claim), shl(248, _status))
}
}
} }
/// @notice A generic game player actor with a configurable trace. /// @notice A generic game player actor with a configurable trace.
...@@ -593,9 +610,11 @@ contract GamePlayer { ...@@ -593,9 +610,11 @@ contract GamePlayer {
/// @notice Returns the player's claim that commits to a given trace index. /// @notice Returns the player's claim that commits to a given trace index.
function claimAt(uint256 _traceIndex) public view returns (Claim claim_) { function claimAt(uint256 _traceIndex) public view returns (Claim claim_) {
return Claim.wrap( bytes32 hash =
keccak256(abi.encode(_traceIndex >= trace.length ? trace.length - 1 : _traceIndex, traceAt(_traceIndex))) keccak256(abi.encode(_traceIndex >= trace.length ? trace.length - 1 : _traceIndex, traceAt(_traceIndex)));
); assembly {
claim_ := or(and(hash, not(shl(248, 0xFF))), shl(248, 1))
}
} }
/// @notice Returns the player's claim that commits to a given trace index. /// @notice Returns the player's claim that commits to a given trace index.
...@@ -608,14 +627,15 @@ contract OneVsOne_Arena is FaultDisputeGame_Init { ...@@ -608,14 +627,15 @@ contract OneVsOne_Arena is FaultDisputeGame_Init {
/// @dev The absolute prestate of the trace. /// @dev The absolute prestate of the trace.
bytes ABSOLUTE_PRESTATE = abi.encode(15); bytes ABSOLUTE_PRESTATE = abi.encode(15);
/// @dev The absolute prestate claim. /// @dev The absolute prestate claim.
Claim internal constant ABSOLUTE_PRESTATE_CLAIM = Claim.wrap(keccak256(abi.encode(15))); Claim internal constant ABSOLUTE_PRESTATE_CLAIM =
Claim.wrap(bytes32((uint256(3) << 248) | (~uint256(0xFF << 248) & uint256(keccak256(abi.encode(15))))));
/// @dev The defender. /// @dev The defender.
GamePlayer internal defender; GamePlayer internal defender;
/// @dev The challenger. /// @dev The challenger.
GamePlayer internal challenger; GamePlayer internal challenger;
function init(GamePlayer _defender, GamePlayer _challenger, uint256 _finalTraceIndex) public { function init(GamePlayer _defender, GamePlayer _challenger, uint256 _finalTraceIndex) public {
Claim rootClaim = Claim.wrap(keccak256(abi.encode(_finalTraceIndex, _defender.traceAt(_finalTraceIndex)))); Claim rootClaim = _defender.claimAt(_finalTraceIndex);
super.init(rootClaim, ABSOLUTE_PRESTATE_CLAIM); super.init(rootClaim, ABSOLUTE_PRESTATE_CLAIM);
defender = _defender; defender = _defender;
challenger = _challenger; challenger = _challenger;
...@@ -874,7 +894,6 @@ contract FaultDisputeGame_ResolvesCorrectly_IncorrectRootFuzz is OneVsOne_Arena ...@@ -874,7 +894,6 @@ contract FaultDisputeGame_ResolvesCorrectly_IncorrectRootFuzz is OneVsOne_Arena
contract FaultDisputeGame_ResolvesCorrectly_CorrectRootFuzz is OneVsOne_Arena { contract FaultDisputeGame_ResolvesCorrectly_CorrectRootFuzz is OneVsOne_Arena {
function testFuzz_resolvesCorrectly_succeeds(uint256 _dishonestTraceLength) public { function testFuzz_resolvesCorrectly_succeeds(uint256 _dishonestTraceLength) public {
_dishonestTraceLength = bound(_dishonestTraceLength, 1, 16); _dishonestTraceLength = bound(_dishonestTraceLength, 1, 16);
for (uint256 i = 0; i < _dishonestTraceLength; i++) { for (uint256 i = 0; i < _dishonestTraceLength; i++) {
uint256 snapshot = vm.snapshot(); uint256 snapshot = vm.snapshot();
...@@ -968,7 +987,7 @@ contract AlphabetVM is IBigStepper { ...@@ -968,7 +987,7 @@ contract AlphabetVM is IBigStepper {
function step(bytes calldata _stateData, bytes calldata) external view returns (bytes32 postState_) { function step(bytes calldata _stateData, bytes calldata) external view returns (bytes32 postState_) {
uint256 traceIndex; uint256 traceIndex;
uint256 claim; uint256 claim;
if (keccak256(_stateData) == Claim.unwrap(ABSOLUTE_PRESTATE)) { if ((keccak256(_stateData) << 8) == (Claim.unwrap(ABSOLUTE_PRESTATE) << 8)) {
// If the state data is empty, then the absolute prestate is the claim. // If the state data is empty, then the absolute prestate is the claim.
traceIndex = 0; traceIndex = 0;
(claim) = abi.decode(_stateData, (uint256)); (claim) = abi.decode(_stateData, (uint256));
...@@ -979,5 +998,8 @@ contract AlphabetVM is IBigStepper { ...@@ -979,5 +998,8 @@ contract AlphabetVM is IBigStepper {
} }
// STF: n -> n + 1 // STF: n -> n + 1
postState_ = keccak256(abi.encode(traceIndex, claim + 1)); postState_ = keccak256(abi.encode(traceIndex, claim + 1));
assembly {
postState_ := or(and(postState_, not(shl(248, 0xFF))), shl(248, 1))
}
} }
} }
...@@ -4,6 +4,7 @@ pragma solidity 0.8.15; ...@@ -4,6 +4,7 @@ pragma solidity 0.8.15;
import { CommonTest } from "./CommonTest.t.sol"; import { CommonTest } from "./CommonTest.t.sol";
import { MIPS } from "src/cannon/MIPS.sol"; import { MIPS } from "src/cannon/MIPS.sol";
import { PreimageOracle } from "src/cannon/PreimageOracle.sol"; import { PreimageOracle } from "src/cannon/PreimageOracle.sol";
import "src/libraries/DisputeTypes.sol";
contract MIPS_Test is CommonTest { contract MIPS_Test is CommonTest {
MIPS internal mips; MIPS internal mips;
...@@ -1553,10 +1554,29 @@ contract MIPS_Test is CommonTest { ...@@ -1553,10 +1554,29 @@ contract MIPS_Test is CommonTest {
); );
} }
/// @dev MIPS VM status codes:
/// 0. Exited with success (Valid)
/// 1. Exited with success (Invalid)
/// 2. Exited with failure (Panic)
/// 3. Unfinished
function vmStatus(MIPS.State memory state) internal pure returns (VMStatus out_) {
if (!state.exited) {
return VMStatuses.UNFINISHED;
} else if (state.exitCode == 0) {
return VMStatuses.VALID;
} else if (state.exitCode == 1) {
return VMStatuses.INVALID;
} else {
return VMStatuses.PANIC;
}
}
function outputState(MIPS.State memory state) internal pure returns (bytes32 out_) { function outputState(MIPS.State memory state) internal pure returns (bytes32 out_) {
bytes memory enc = encodeState(state); bytes memory enc = encodeState(state);
VMStatus status = vmStatus(state);
assembly { assembly {
out_ := keccak256(add(enc, 0x20), 226) out_ := keccak256(add(enc, 0x20), 226)
out_ := or(and(not(shl(248, 0xFF)), out_), shl(248, status))
} }
} }
......
This diff is collapsed.
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
- [Overview](#overview) - [Overview](#overview)
- [State](#state) - [State](#state)
- [State Hash](#state-hash)
- [Memory](#memory) - [Memory](#memory)
- [Heap](#heap) - [Heap](#heap)
- [Delay Slots](#delay-slots) - [Delay Slots](#delay-slots)
...@@ -53,6 +54,34 @@ It consists of the following fields: ...@@ -53,6 +54,34 @@ It consists of the following fields:
The state is represented by packing the above fields, in order, into a 226-byte buffer. The state is represented by packing the above fields, in order, into a 226-byte buffer.
### State Hash
The state hash is computed by hashing the 226-byte state buffer with the Keccak256 hash function
and then setting the high-order byte to the respective VM status.
The VM status can be derived from the state's `exited` and `exitCode` fields.
```rs
enum VmStatus {
Valid = 0,
Invalid = 1,
Panic = 2,
Unfinished = 3,
}
fn vm_status(exit_code: u8, exited: bool) -> u8 {
if exited {
match exit_code {
0 => VmStatus::Valid,
1 => VmStatus::Invalid,
_ => VmStatus::Panic,
}
} else {
VmStatus::Unfinished
}
}
```
## Memory ## Memory
Memory is represented as a binary merkle tree. Memory is represented as a binary merkle tree.
......
This diff is collapsed.
This diff is collapsed.
CI=false
GRAFANA_ADMIN_PWD=
# Used to set the display to be used by playwright when running Metamask test service.
# The following are to be used if running on MacOS
# MM_DISPLAY=host.docker.internal:0
# MM_DISPLAY_VOLUME=/tmp/.X11-unix:/tmp/.X11-unix
MM_DISPLAY=
MM_DISPLAY_VOLUME=
# Needs to point to docker, otherwise you'll get the error: exec: "docker": executable file not found in $PATH
PATH=/
# Runs every minute
* * * * * /usr/local/bin/docker-compose -f /path/to/docker-compose.yml --profile 1minute up -d
# Runs every 5 minutes
*/5 * * * * /usr/local/bin/docker-compose -f /path/to/docker-compose.yml --profile 5minutes up -d
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
node_modules/
/test-results/
/playwright-report/
/playwright/.cache/
.env
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
#!/bin/bash
npm test
\ No newline at end of file
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment