Commit 0c969256 authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge branch 'develop' into inphi/dispute-specs

parents 01864c57 081ea016
......@@ -56,8 +56,9 @@ def main():
deployment_dir = pjoin(contracts_bedrock_dir, 'deployments', 'devnetL1')
op_node_dir = pjoin(args.monorepo_dir, 'op-node')
ops_bedrock_dir = pjoin(monorepo_dir, 'ops-bedrock')
deploy_config_dir = pjoin(contracts_bedrock_dir, 'deploy-config'),
devnet_config_path = pjoin(contracts_bedrock_dir, 'deploy-config', 'devnetL1.json')
deploy_config_dir = pjoin(contracts_bedrock_dir, 'deploy-config')
devnet_config_path = pjoin(deploy_config_dir, 'devnetL1.json')
devnet_config_template_path = pjoin(deploy_config_dir, 'devnetL1-template.json')
ops_chain_ops = pjoin(monorepo_dir, 'op-chain-ops')
sdk_dir = pjoin(monorepo_dir, 'packages', 'sdk')
......@@ -69,6 +70,7 @@ def main():
l1_deployments_path=pjoin(deployment_dir, '.deploy'),
deploy_config_dir=deploy_config_dir,
devnet_config_path=devnet_config_path,
devnet_config_template_path=devnet_config_template_path,
op_node_dir=op_node_dir,
ops_bedrock_dir=ops_bedrock_dir,
ops_chain_ops=ops_chain_ops,
......@@ -124,10 +126,16 @@ def deploy_contracts(paths):
'--rpc-url', 'http://127.0.0.1:8545'
], env={}, cwd=paths.contracts_bedrock_dir)
def init_devnet_l1_deploy_config(paths, update_timestamp=False):
deploy_config = read_json(paths.devnet_config_template_path)
if update_timestamp:
deploy_config['l1GenesisBlockTimestamp'] = '{:#x}'.format(int(time.time()))
write_json(paths.devnet_config_path, deploy_config)
def devnet_l1_genesis(paths):
log.info('Generating L1 genesis state')
init_devnet_l1_deploy_config(paths)
geth = subprocess.Popen([
'geth', '--dev', '--http', '--http.api', 'eth,debug',
'--verbosity', '4', '--gcmode', 'archive', '--dev.gaslimit', '30000000'
......@@ -157,13 +165,13 @@ def devnet_deploy(paths):
if os.path.exists(paths.allocs_path) == False:
devnet_l1_genesis(paths)
devnet_config_backup = pjoin(paths.devnet_dir, 'devnetL1.json.bak')
shutil.copy(paths.devnet_config_path, devnet_config_backup)
deploy_config = read_json(paths.devnet_config_path)
deploy_config['l1GenesisBlockTimestamp'] = '{:#x}'.format(int(time.time()))
write_json(paths.devnet_config_path, deploy_config)
# It's odd that we want to regenerate the devnetL1.json file with
# an updated timestamp different than the one used in the devnet_l1_genesis
# function. But, without it, CI flakes on this test rather consistently.
# If someone reads this comment and understands why this is being done, please
# update this comment to explain.
init_devnet_l1_deploy_config(paths, update_timestamp=True)
outfile_l1 = pjoin(paths.devnet_dir, 'genesis-l1.json')
run_command([
'go', 'run', 'cmd/main.go', 'genesis', 'l1',
'--deploy-config', paths.devnet_config_path,
......
......@@ -9,7 +9,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/urfave/cli/v2"
......@@ -331,12 +330,18 @@ func Run(ctx *cli.Context) error {
}
if proofAt(state) {
preStateHash := crypto.Keccak256Hash(state.EncodeWitness())
preStateHash, err := state.EncodeWitness().StateHash()
if err != nil {
return fmt.Errorf("failed to hash prestate witness: %w", err)
}
witness, err := stepFn(true)
if err != nil {
return fmt.Errorf("failed at proof-gen step %d (PC: %08x): %w", step, state.PC, err)
}
postStateHash := crypto.Keccak256Hash(state.EncodeWitness())
postStateHash, err := state.EncodeWitness().StateHash()
if err != nil {
return fmt.Errorf("failed to hash poststate witness: %w", err)
}
proof := &Proof{
Step: step,
Pre: preStateHash,
......
......@@ -5,7 +5,6 @@ import (
"os"
"github.com/ethereum-optimism/optimism/cannon/mipsevm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/urfave/cli/v2"
)
......@@ -31,7 +30,10 @@ func Witness(ctx *cli.Context) error {
return fmt.Errorf("invalid input state (%v): %w", input, err)
}
witness := state.EncodeWitness()
h := crypto.Keccak256Hash(witness)
h, err := witness.StateHash()
if err != nil {
return fmt.Errorf("failed to compute witness hash: %w", err)
}
if output != "" {
if err := os.WriteFile(output, witness, 0755); err != nil {
return fmt.Errorf("writing output to %v: %w", output, err)
......
......@@ -15,7 +15,6 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth/tracers/logger"
"github.com/stretchr/testify/require"
......@@ -92,7 +91,10 @@ func (m *MIPSEVM) Step(t *testing.T, stepWitness *StepWitness) []byte {
logs := m.evmState.Logs()
require.Equal(t, 1, len(logs), "expecting a log with post-state")
evmPost := logs[0].Data
require.Equal(t, crypto.Keccak256Hash(evmPost), postHash, "logged state must be accurate")
stateHash, err := StateWitness(evmPost).StateHash()
require.NoError(t, err, "state hash could not be computed")
require.Equal(t, stateHash, postHash, "logged state must be accurate")
m.env.StateDB.RevertToSnapshot(snap)
t.Logf("EVM step took %d gas, and returned stateHash %s", startingGas-leftOverGas, postHash)
......
......@@ -2,11 +2,16 @@ package mipsevm
import (
"encoding/binary"
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/crypto"
)
// StateWitnessSize is the size of the state witness encoding in bytes.
var StateWitnessSize = 226
type State struct {
Memory *Memory `json:"memory"`
......@@ -37,7 +42,11 @@ type State struct {
LastHint hexutil.Bytes `json:"lastHint,omitempty"`
}
func (s *State) EncodeWitness() []byte {
func (s *State) VMStatus() uint8 {
return vmStatus(s.Exited, s.ExitCode)
}
func (s *State) EncodeWitness() StateWitness {
out := make([]byte, 0)
memRoot := s.Memory.MerkleRoot()
out = append(out, memRoot[:]...)
......@@ -60,3 +69,41 @@ func (s *State) EncodeWitness() []byte {
}
return out
}
type StateWitness []byte
const (
VMStatusValid = 0
VMStatusInvalid = 1
VMStatusPanic = 2
VMStatusUnfinished = 3
)
func (sw StateWitness) StateHash() (common.Hash, error) {
if len(sw) != 226 {
return common.Hash{}, fmt.Errorf("Invalid witness length. Got %d, expected at least 88", len(sw))
}
hash := crypto.Keccak256Hash(sw)
offset := 32*2 + 4*6
exitCode := sw[offset]
exited := sw[offset+1]
status := vmStatus(exited == 1, exitCode)
hash[0] = status
return hash, nil
}
func vmStatus(exited bool, exitCode uint8) uint8 {
if !exited {
return VMStatusUnfinished
}
switch exitCode {
case 0:
return VMStatusValid
case 1:
return VMStatusInvalid
default:
return VMStatusPanic
}
}
......@@ -82,6 +82,53 @@ func TestState(t *testing.T) {
}
}
// Run through all permutations of `exited` / `exitCode` and ensure that the
// correct witness, state hash, and VM Status is produced.
func TestStateHash(t *testing.T) {
cases := []struct {
exited bool
exitCode uint8
}{
{exited: false, exitCode: 0},
{exited: false, exitCode: 1},
{exited: false, exitCode: 2},
{exited: false, exitCode: 3},
{exited: true, exitCode: 0},
{exited: true, exitCode: 1},
{exited: true, exitCode: 2},
{exited: true, exitCode: 3},
}
exitedOffset := 32*2 + 4*6
for _, c := range cases {
state := &State{
Memory: NewMemory(),
Exited: c.exited,
ExitCode: c.exitCode,
}
actualWitness := state.EncodeWitness()
actualStateHash, err := StateWitness(actualWitness).StateHash()
require.NoError(t, err, "Error hashing witness")
require.Equal(t, len(actualWitness), StateWitnessSize, "Incorrect witness size")
expectedWitness := make(StateWitness, 226)
memRoot := state.Memory.MerkleRoot()
copy(expectedWitness[:32], memRoot[:])
expectedWitness[exitedOffset] = c.exitCode
var exited uint8
if c.exited {
exited = 1
}
expectedWitness[exitedOffset+1] = uint8(exited)
require.Equal(t, expectedWitness[:], actualWitness[:], "Incorrect witness")
expectedStateHash := crypto.Keccak256Hash(actualWitness)
expectedStateHash[0] = vmStatus(c.exited, c.exitCode)
require.Equal(t, expectedStateHash, actualStateHash, "Incorrect state hash")
}
}
func TestHello(t *testing.T) {
elfProgram, err := elf.Open("../example/bin/hello.elf")
require.NoError(t, err, "open ELF file")
......
......@@ -6,3 +6,4 @@ The directory layout is divided into the following sub-directories.
- [`postmortems/`](./postmortems/): Timestamped post-mortem documents.
- [`security-reviews`](./security-reviews/): Audit summaries and other security review documents.
- [`fault-proof-alpha`](./fault-proof-alpha): Information on the alpha version of the fault proof system.
## Fault Proofs Alpha
The fault proof alpha is a pre-release version of the OP Stack fault proof system.
This documentation provides an overview of the system and instructions on how to help
test the fault proof system.
The overall design of this system along with the APIs and interfaces it exposes are not
finalized and may change without notice.
### Contents
* Overview
* [Deployment Details](./deployments.md)
* [Manual Usage](./manual.md)
* [Creating Traces with Cannon](./cannon.md)
* [Automation with `op-challenger`](./run-challenger.md)
* [Challenging Invalid Output Proposals](./invalid-proposals.md)
## Generate Traces with `cannon` and `op-program`
Normally, `op-challenger` handles creating the required traces as part of responding to games. However, for manual
testing it may be useful to manually generate the trace. This can be done by running `cannon` directly.
### Prerequisites
- The cannon pre-state downloaded from [Goerli deployment](./deployments.md#goerli).
- A Goerli L1 node.
- An archive node is not required.
- Public RPC providers can be used, however a significant number of requests will need to be made which may exceed
rate limits for free plans.
- An OP-Goerli L2 archive node with `debug` APIs enabled.
- An archive node is required to ensure world-state pre-images remain available.
- Public RPC providers are generally not usable as they don’t support the `debug_dbGet` RPC method.
### Compilation
To compile the required programs, in the top level of the monorepo run:
```bash
make cannon-prestate
```
This will compile the `cannon` executable to `cannon/bin/cannon` as well as the `op-program` executable used to fetch
pre-image data to `op-program/bin/op-program`.
### Run Cannon
To run cannon to generate a proof use:
```bash
mkdir -p temp/cannon/proofs temp/cannon/snapshots temp/cannon/preimages
./cannon/bin/cannon run \
--pprof.cpu \
--info-at '%10000000' \
--proof-at '=<TRACE_INDEX>' \
--stop-at '=<STOP_INDEX>' \
--proof-fmt 'temp/cannon/proofs/%d.json' \
--snapshot-at '%1000000000' \
--snapshot-fmt 'temp/cannon/snapshots/%d.json.gz' \
--input <PRESTATE> \
--output temp/cannon/stop-state.json \
-- \
./op-program/bin/op-program \
--network goerli \
--l1 <L1_URL> \
--l2 <L2_URL> \
--l1.head <L1_HEAD> \
--l2.claim <L2_CLAIM> \
--l2.head <L2_HEAD> \
--l2.blocknumber <L2_BLOCK_NUMBER> \
--datadir temp/cannon/preimages \
--log.format terminal \
--server
```
The placeholders are:
- `<TRACE_INDEX>` the index in the trace to generate a proof for
- `<STOP_INDEX>` the index to stop execution at. Typically this is one instruction after `<TRACE_INDEX>` to stop as soon
as the required proof has been generated.
- `<PRESTATE>` the prestate.json downloaded above. Note that this needs to precisely match the prestate used on-chain so
must be the downloaded version and not a version built locally.
- `<L1_URL>` the Goerli L1 JSON RPC endpoint
- `<L2_URL>` the OP-Goerli L2 archive node JSON RPC endpoint
- `<L1_HEAD>` the hash of the L1 head block used for the dispute game
- `<L2_CLAIM>` the output root immediately prior to the disputed root in the L2 output oracle
- `<L2_HEAD>` the hash of the L2 block that `<L2_CLAIM>`is from
- `<L2_BLOCK_NUMBER>` the block number that `<L2_CLAIM>` is from
The generated proof will be stored in the `temp/cannon/proofs/` directory. The hash to use as the claim value is
the `post` field of the generated proof which provides the hash of the cannon state witness after execution of the step.
Since cannon can be very slow to execute, the above command uses the `--snapshot-at` option to generate a snapshot of
the cannon state every 1000000000 instructions. Once generated, these snapshots can be used as the `--input` to begin
execution at that step rather than from the very beginning. Generated snapshots are stored in
the `temp/cannon/snapshots` directory.
See `./cannon/bin/cannon --help` for further information on the options available.
### Trace Extension
Fault dispute games always use a trace with a fixed length of `2 ^ MAX_GAME_DEPTH`. The trace generated by `cannon`
stops when the client program exits, so this trace must be extended by repeating the hash of the final state in the
actual trace for all remaining steps. Cannon does not perform this trace extension automatically.
If cannon stops execution before the trace index you requested a proof at, it simply will not generate a proof. When it
stops executing, it will write its final state to `temp/cannon/stop-state.json` (controlled by the `--output` option).
The `step` field of this state contains the last step cannon executed. Once the final step is known, rerun cannon to
generate the proof at that final step and use the `post` hash as the claim value for all later trace indices.
## Fault Proof Alpha Deployment Information
### Goerli
Information on the fault proofs alpha deployment to Goerli is not yet available.
### Local Devnet
The local devnet includes a deployment of the fault proof alpha. To start the devnet, in the top level of this repo,
run:
```bash
make devnet-up
```
| Input | Value |
|----------------------|-------------------------------------------------------------|
| Dispute Game Factory | Run `jq -r .DisputeGameFactoryProxy .devnet/addresses.json` |
| Absolute Prestate | `op-program/bin/prestate.json` |
| Max Depth | 30 |
| Max Game Duration | 1200 (20 minutes) |
See the [op-challenger README](../../op-challenger#running-with-cannon-on-local-devnet) for information on
running `op-challenger` against the local devnet.
## Challenging Invalid Output Proposals
The dispute game factory deployed to Goerli reads from the permissioned L2 Output Oracle contract. This restricts games
to challenging valid output proposals and an honest challenger should win every game. To test creating games that
challenge an invalid output proposal, a custom chain is required. The simplest way to do this is using the end-to-end
test utilities in [`op-e2e`](https://github.com/ethereum-optimism/optimism/tree/develop/op-e2e).
A simple starting point has been provided in the `TestCannonProposedOutputRootInvalid` test case
in [`faultproof_test.go`](https://github.com/ethereum-optimism/optimism/blob/6e174ae2b2587d9ac5e2930d7574f85d254ca8b4/op-e2e/faultproof_test.go#L334).
This is a table test that takes the output root to propose, plus functions for move and step to counter the honest
claims. The test asserts that the defender always wins and thus the output root is found to be invalid.
## Manual Fault Proof Interactions
### Creating a Game
The process of disputing an output root starts by creating a new dispute game. There are conceptually three key inputs
required for a dispute game:
- The output root being disputed
- The agreed output root the derivation process will start from
- The L1 head block that defines the canonical L1 chain containing all required batch data to perform the derivation
The creator of the game selects the output root to dispute. It is identified by its L2 block number which can be used to
look up the full details in the L2 output oracle.
The agreed output root is defined as the output root immediately prior to the disputed output root in the L2 output
oracle. Therefore, a dispute game should only be created for the first invalid output root. If it is successfully
disputed, all output roots after it are considered invalid by inference.
The L1 head block can be any L1 block where the disputed output root is present in the L2 output oracle. Proposers
should therefore ensure that all batch data has been submitted to L1 before submitting a proposal. The L1 head block is
recorded in the `BlockOracle` and then referenced by its block number.
Creating a game requires two separate transactions. First the L1 head block is recorded in the `BlockOracle` by calling
its `checkpoint` function. This records the parent of the block the transaction is included in. The `BlockOracle` emits
a log `Checkpoint(blockNumber, blockHash, childTimestamp)`.
Now, using the L1 head along with output root info available in the L2 output oracle, cannon can be executed to
determine the root claim to use when creating the game. In simple cases, where the claim is expected to be incorrect, an
arbitrary hash can be used for claim values. For more advanced cases [cannon can be used](./cannon.md) to generate a
trace, including the claim values to use at specific steps. Note that it is not valid to create a game that disputes an
output root, using the final hash from a trace that confirms the output root is valid. To dispute an output root
successfully, the trace must resolve that the disputed output root is invalid.
The game can then be created by calling the `create` method on the `DisputeGameFactory` contract. This requires three
parameters:
- `gameType` - a `uint8` representing the type of game to create. For fault dispute games using cannon and op-program
traces, the game type is 0.
- `rootClaim` - a `bytes32` hash of the final state from the trace.
- `extraData` - arbitrary bytes which are used as the initial inputs for the game. For fault dispute games using cannon
and op-program traces, this is the abi encoding of `(uint256(l2_block_number), uint256(l1_checkpoint))`.
- `l2_block_number` is the L2 block number from the output root being disputed
- `l1_checkpoint` is the L1 block number recorded by the `BlockOracle` checkpoint
This emits a log event `DisputeGameCreated(gameAddress, gameType, rootClaim)` where `gameAddress` is the address of the
newly created dispute game.
The helper script, [create_game.sh](../../op-challenger#create_gamesh) can be used to easily create a new dispute
game and also acts as an example of using `cast` to manually create a game.
### Performing Moves
The dispute game progresses by actors countering existing claims via either the `attack` or `defend` methods in
the `FaultDisputeGame` contract. Note that only `attack` can be used to counter the root claim. In both cases, there are
two inputs required:
- `parentIndex` - the index in the claims array of the parent claim that is being countered.
- `claim` - a `bytes32` hash of the state at the trace index corresponding to the new claim’s position.
The helper script, [move.sh](../../op-challenger#movesh), can be used to easily perform moves and also
acts as an example of using `cast` to manually call `attack` and `defend`.
### Performing Steps
Attacking or defending are teh only available actions before the maximum depth of the game is reached. To counter claims
at the maximum depth, a step must be performed instead. Calling the `step` method in the `FaultDisputeGame` contract
counters a claim at the maximum depth by running a single step of the cannon VM on chain. The `step` method will revert
unless the cannon execution confirms the claim being countered is invalid. Note, if an actor's clock runs out at any
point, the game can be [resolved](#resolving-a-game).
The inputs for step are:
- `claimIndex` - the index in the claims array of the claim that is being countered
- `isAttack` - Similar to regular moves, steps can either be attacking or defending
- `stateData` - the full cannon state witness to use as the starting state for execution
- `proof` - the additional proof data for the state witness required by cannon to perform the step
When a step is attacking, the caller is asserting that the claim at `claimIndex` is incorrect, and the claim for
the previous trace index (made at a previous level in the game) was correct. The `stateData` must be the pre-image for
the agreed correct hash at the previous trace index. The call to `step` will revert if the post-state from cannon
matches the claim at `claimIndex` since the on-chain execution has proven the claim correct and it should not be
countered.
When a step is defending, the caller is asserting that the claim at `claimIndex` is correct, and the claim for
the next trace index (made at a previous level in the game) is incorrect. The `stateData` must be the pre-image for the
hash in the claim at `claimIndex`.
The `step` function will revert with `ValidStep()` if the cannon execution proves that the claim attempting to be
countered is correct. As a result, claims at the maximum game depth can only be countered by a valid execution of the
single instruction in cannon running on-chain.
#### Populating the Pre-image Oracle
When the instruction to be executed as part of a `step` call reads from some pre-image data, that data must be loaded
into the pre-image oracle prior to calling `step`.
For [local pre-image keys](../../specs/fault-proof.md#type-1-local-key), the pre-image must be populated via
the `FaultDisputeGame` contract by calling the `addLocalData` function.
For [global keccak256 keys](../../specs/fault-proof.md#type-2-global-keccak256-key), the data should be added directly
to the pre-image oracle contract.
### Resolving a Game
The final action required for a game is to resolve it by calling the `resolve` method in the `FaultDisputeGame`
contract. This can only be done once the clock of the left-most uncontested claim’s parent has expired. A game can only
be resolved once.
There are no inputs required for the `resolve` method. When successful, a log event is emitted with the game’s final
status.
The helper script, [resolve.sh](../../op-challenger#resolvesh), can be used to easily resolve a game and also acts as an
example of using `cast` to manually call `resolve` and understand the result.
## Running op-challenger
`op-challenger` is a program that implements the honest actor algorithm to automatically “play” the dispute games.
### Prerequisites
- The cannon pre-state downloaded from [Goerli deployment](./deployments.md#goerli).
- An account on the Goerli testnet with funds available. The amount of GöETH required depends on the number of claims
the challenger needs to post, but 0.01 ETH should be plenty to start.
- A Goerli L1 node.
- An archive node is not required.
- Public RPC providers can be used, however a significant number of requests will need to be made which may exceed
rate limits for free plans.
- An OP-Goerli L2 archive node with `debug` APIs enabled.
- An archive node is required to ensure world-state pre-images remain available.
- Public RPC providers are generally not usable as they don’t support the `debug_dbGet` RPC method.
- Approximately 3.5Gb of disk space for each game being played.
### Starting op-challenger
When executing `op-challenger`, there are a few placeholders that need to be set to concreate values:
- `<L1_URL>` the Goerli L1 JSON RPC endpoint
- `<DISPUTE_GAME_FACTORY_ADDRESS>` the address of the dispute game factory contract (see
the [Goerli deployment details](./deployments.md#goerli))
- `<PRESTATE>` the prestate.json downloaded above. Note that this needs to precisely match the prestate used on-chain so
must be the downloaded version and not a version built locally (see the [Goerli deployment details](./deployments.md#goerli))
- `<L2_URL>` the OP-Goerli L2 archive node JSON RPC endpoint
- `<PRIVATE_KEY>` the private key for a funded Goerli account. For other ways to specify the account to use
see `./op-challenger/bin/op-challenger --help`
From inside the monorepo directory, run the challenger with these placeholders set.
```bash
# Build the required components
make op-challenger op-program cannon
# Run op-challenger
./op-challenger/bin/op-challenger \
--trace-type cannon \
--l1-eth-rpc <L1_URL> \
--game-factory-address <DISPUTE_GAME_FACTORY_ADDRESS> \
--agree-with-proposed-output=true \
--datadir temp/challenger-goerli \
--cannon-network goerli \
--cannon-bin ./cannon/bin/cannon \
--cannon-server ./op-program/bin/op-program \
--cannon-prestate <PRESTATE> \
--cannon-l2 <L2_URL> \
--private-key <PRIVATE_KEY>
```
### Restricting Games to Play
By default `op-challenger` will generate traces and respond to any game created by the dispute game factory contract. On
a public testnet like Goerli, that could be a large number of games, requiring significant CPU and disk resources. To
avoid this, `op-challenger` supports specifying an allowlist of games for it to respond to with the `--game-allowlist`
option.
```bash
./op-challenger/bin/op-challenger \
... \
--game-allowlist <GAME_ADDR> <GAME_ADDR> <GAME_ADDR>...
```
......@@ -192,7 +192,7 @@ func (db *bridgeTransactionsDB) L2LatestBlockHeader() (*L2BlockHeader, error) {
l2Query := db.gorm.Table("(?) AS l1_deposit_events", l1DepositQuery)
l2Query = l2Query.Joins("INNER JOIN l2_block_headers ON l2_block_headers.timestamp = l1_deposit_events.timestamp")
l2Query = l2Query.Select("l2_block_headers.*")
l2Query = l2Query.Order("l2_block_headers.timestamp DESC").Select("l2_block_headers.*")
var l2Header L2BlockHeader
result := l2Query.Take(&l2Header)
......
......@@ -64,7 +64,7 @@ func L1ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, chainConfig
// extract the deposit hash from the previous TransactionDepositedEvent
portalDeposit, ok := portalDeposits[logKey{sentMessage.Event.BlockHash, sentMessage.Event.LogIndex - 1}]
if !ok {
return fmt.Errorf("missing expected preceding TransactionDeposit for SentMessage. tx_hash = %s", sentMessage.Event.TransactionHash)
return fmt.Errorf("expected TransactionDeposit preceding SentMessage event. tx_hash = %s", sentMessage.Event.TransactionHash)
}
l1BridgeMessages[i] = database.L1BridgeMessage{TransactionSourceHash: portalDeposit.DepositTx.SourceHash, BridgeMessage: sentMessage.BridgeMessage}
......@@ -93,11 +93,11 @@ func L1ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, chainConfig
// extract the cross domain message hash & deposit source hash from the following events
portalDeposit, ok := portalDeposits[logKey{initiatedBridge.Event.BlockHash, initiatedBridge.Event.LogIndex + 1}]
if !ok {
return fmt.Errorf("missing expected following TransactionDeposit for BridgeInitiated. tx_hash = %s", initiatedBridge.Event.TransactionHash)
return fmt.Errorf("expected TransactionDeposit following BridgeInitiated event. tx_hash = %s", initiatedBridge.Event.TransactionHash)
}
sentMessage, ok := sentMessages[logKey{initiatedBridge.Event.BlockHash, initiatedBridge.Event.LogIndex + 2}]
if !ok {
return fmt.Errorf("missing expected following SentMessage for BridgeInitiated. tx_hash = %s", initiatedBridge.Event.TransactionHash)
return fmt.Errorf("expected SentMessage following TransactionDeposit event. tx_hash = %s", initiatedBridge.Event.TransactionHash)
}
initiatedBridge.BridgeTransfer.CrossDomainMessageHash = &sentMessage.BridgeMessage.MessageHash
......@@ -215,7 +215,7 @@ func L1ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, chainConfig
finalizedBridge := finalizedBridges[i]
relayedMessage, ok := relayedMessages[logKey{finalizedBridge.Event.BlockHash, finalizedBridge.Event.LogIndex + 1}]
if !ok {
return fmt.Errorf("missing following RelayedMessage for BridgeFinalized event. tx_hash = %s", finalizedBridge.Event.TransactionHash)
return fmt.Errorf("expected RelayedMessage following BridgeFinalized event. tx_hash = %s", finalizedBridge.Event.TransactionHash)
}
// Since the message hash is computed from the relayed message, this ensures the deposit fields must match. For good measure,
......
......@@ -63,7 +63,7 @@ func L2ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, fromHeight
// extract the withdrawal hash from the previous MessagePassed event
messagePassed, ok := messagesPassed[logKey{sentMessage.Event.BlockHash, sentMessage.Event.LogIndex - 1}]
if !ok {
return fmt.Errorf("missing expected preceding MessagePassedEvent for SentMessage. tx_hash = %s", sentMessage.Event.TransactionHash)
return fmt.Errorf("expected MessagePassedEvent preceding SentMessage. tx_hash = %s", sentMessage.Event.TransactionHash)
}
l2BridgeMessages[i] = database.L2BridgeMessage{TransactionWithdrawalHash: messagePassed.WithdrawalHash, BridgeMessage: sentMessage.BridgeMessage}
......@@ -92,11 +92,11 @@ func L2ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, fromHeight
// extract the cross domain message hash & deposit source hash from the following events
messagePassed, ok := messagesPassed[logKey{initiatedBridge.Event.BlockHash, initiatedBridge.Event.LogIndex + 1}]
if !ok {
return fmt.Errorf("missing expected following MessagePassed for BridgeInitiated. tx_hash = %s", initiatedBridge.Event.TransactionHash)
return fmt.Errorf("expected MessagePassed following BridgeInitiated event. tx_hash = %s", initiatedBridge.Event.TransactionHash)
}
sentMessage, ok := sentMessages[logKey{initiatedBridge.Event.BlockHash, initiatedBridge.Event.LogIndex + 2}]
if !ok {
return fmt.Errorf("missing expected following SentMessage for BridgeInitiated. tx_hash = %s", initiatedBridge.Event.TransactionHash)
return fmt.Errorf("expected SentMessage following MessagePassed event. tx_hash = %s", initiatedBridge.Event.TransactionHash)
}
initiatedBridge.BridgeTransfer.CrossDomainMessageHash = &sentMessage.BridgeMessage.MessageHash
......@@ -163,7 +163,7 @@ func L2ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, fromHeight
finalizedBridge := finalizedBridges[i]
relayedMessage, ok := relayedMessages[logKey{finalizedBridge.Event.BlockHash, finalizedBridge.Event.LogIndex + 1}]
if !ok {
return fmt.Errorf("missing following RelayedMessage for BridgeFinalized event. tx_hash = %s", finalizedBridge.Event.TransactionHash)
return fmt.Errorf("expected RelayedMessage following BridgeFinalized event. tx_hash = %s", finalizedBridge.Event.TransactionHash)
}
// Since the message hash is computed from the relayed message, this ensures the withdrawal fields must match. For good measure,
......
......@@ -4,7 +4,7 @@ generator client {
datasource db {
provider = "postgresql"
url = "postgresql://db_username:db_password@localhost:5434/db_name"
url = env("DATABASE_URL")
}
model l1_bridged_tokens {
......@@ -111,7 +111,7 @@ model l2_block_headers {
hash String @id @db.VarChar
parent_hash String @unique @db.VarChar
number Decimal @unique @db.Decimal
timestamp Int @unique
timestamp Int
rlp_bytes String @db.VarChar
l2_contract_events l2_contract_events[]
}
......
......@@ -12,6 +12,7 @@ version:
compile:
cd $(contracts-dir) && \
forge clean && \
pnpm build
bindings: compile bindings-build
......
This diff is collapsed.
......@@ -13,7 +13,7 @@ const AlphabetVMStorageLayoutJSON = "{\"storage\":[{\"astId\":1000,\"contract\":
var AlphabetVMStorageLayout = new(solc.StorageLayout)
var AlphabetVMDeployedBin = "0x608060405234801561001057600080fd5b50600436106100365760003560e01c80637dc0d1d01461003b578063f8e0cb9614610085575b600080fd5b60005461005b9073ffffffffffffffffffffffffffffffffffffffff1681565b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020015b60405180910390f35b6100986100933660046101a8565b6100a6565b60405190815260200161007c565b60008060007f000000000000000000000000000000000000000000000000000000000000000087876040516100dc929190610214565b60405180910390200361010057600091506100f986880188610224565b905061011f565b61010c8688018861023d565b90925090508161011b8161028e565b9250505b8161012b8260016102c6565b6040805160208101939093528201526060016040516020818303038152906040528051906020012092505050949350505050565b60008083601f84011261017157600080fd5b50813567ffffffffffffffff81111561018957600080fd5b6020830191508360208285010111156101a157600080fd5b9250929050565b600080600080604085870312156101be57600080fd5b843567ffffffffffffffff808211156101d657600080fd5b6101e28883890161015f565b909650945060208701359150808211156101fb57600080fd5b506102088782880161015f565b95989497509550505050565b8183823760009101908152919050565b60006020828403121561023657600080fd5b5035919050565b6000806040838503121561025057600080fd5b50508035926020909101359150565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff82036102bf576102bf61025f565b5060010190565b600082198211156102d9576102d961025f565b50019056fea164736f6c634300080f000a"
var AlphabetVMDeployedBin = "0x608060405234801561001057600080fd5b50600436106100365760003560e01c80637dc0d1d01461003b578063f8e0cb9614610085575b600080fd5b60005461005b9073ffffffffffffffffffffffffffffffffffffffff1681565b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020015b60405180910390f35b610098610093366004610212565b6100a6565b60405190815260200161007c565b600080600060087f0000000000000000000000000000000000000000000000000000000000000000901b600888886040516100e292919061027e565b6040518091039020901b0361010857600091506101018688018861028e565b9050610127565b610114868801886102a7565b909250905081610123816102f8565b9250505b81610133826001610330565b604080516020810193909352820152606001604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe081840301815291905280516020909101207effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff167f010000000000000000000000000000000000000000000000000000000000000017979650505050505050565b60008083601f8401126101db57600080fd5b50813567ffffffffffffffff8111156101f357600080fd5b60208301915083602082850101111561020b57600080fd5b9250929050565b6000806000806040858703121561022857600080fd5b843567ffffffffffffffff8082111561024057600080fd5b61024c888389016101c9565b9096509450602087013591508082111561026557600080fd5b50610272878288016101c9565b95989497509550505050565b8183823760009101908152919050565b6000602082840312156102a057600080fd5b5035919050565b600080604083850312156102ba57600080fd5b50508035926020909101359150565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8203610329576103296102c9565b5060010190565b60008219821115610343576103436102c9565b50019056fea164736f6c634300080f000a"
func init() {
if err := json.Unmarshal([]byte(AlphabetVMStorageLayoutJSON), AlphabetVMStorageLayout); err != nil {
......
This source diff could not be displayed because it is too large. You can view the blob instead.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -139,16 +139,15 @@ func (l *loader) FetchClaims(ctx context.Context) ([]types.Claim, error) {
}
// FetchAbsolutePrestateHash fetches the hashed absolute prestate from the fault dispute game.
func (l *loader) FetchAbsolutePrestateHash(ctx context.Context) ([]byte, error) {
func (l *loader) FetchAbsolutePrestateHash(ctx context.Context) (common.Hash, error) {
callOpts := bind.CallOpts{
Context: ctx,
}
absolutePrestate, err := l.caller.ABSOLUTEPRESTATE(&callOpts)
if err != nil {
return nil, err
return common.Hash{}, err
}
returnValue := absolutePrestate[:]
return returnValue, nil
return absolutePrestate, nil
}
......@@ -16,7 +16,6 @@ import (
"github.com/ethereum-optimism/optimism/op-service/txmgr"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
)
......@@ -159,22 +158,21 @@ func (g *GamePlayer) logGameStatus(ctx context.Context, status gameTypes.GameSta
}
type PrestateLoader interface {
FetchAbsolutePrestateHash(ctx context.Context) ([]byte, error)
FetchAbsolutePrestateHash(ctx context.Context) (common.Hash, error)
}
// ValidateAbsolutePrestate validates the absolute prestate of the fault game.
func ValidateAbsolutePrestate(ctx context.Context, trace types.TraceProvider, loader PrestateLoader) error {
providerPrestate, err := trace.AbsolutePreState(ctx)
providerPrestateHash, err := trace.AbsolutePreStateCommitment(ctx)
if err != nil {
return fmt.Errorf("failed to get the trace provider's absolute prestate: %w", err)
}
providerPrestateHash := crypto.Keccak256(providerPrestate)
onchainPrestate, err := loader.FetchAbsolutePrestateHash(ctx)
if err != nil {
return fmt.Errorf("failed to get the onchain absolute prestate: %w", err)
}
if !bytes.Equal(providerPrestateHash, onchainPrestate) {
return fmt.Errorf("trace provider's absolute prestate does not match onchain absolute prestate")
if !bytes.Equal(providerPrestateHash[:], onchainPrestate[:]) {
return fmt.Errorf("trace provider's absolute prestate does not match onchain absolute prestate: Provider: %s | Chain %s", providerPrestateHash.Hex(), onchainPrestate.Hex())
}
return nil
}
......@@ -6,6 +6,7 @@ import (
"fmt"
"testing"
"github.com/ethereum-optimism/optimism/cannon/mipsevm"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/types"
gameTypes "github.com/ethereum-optimism/optimism/op-challenger/game/types"
"github.com/ethereum-optimism/optimism/op-node/testlog"
......@@ -120,8 +121,9 @@ func TestValidateAbsolutePrestate(t *testing.T) {
t.Run("ValidPrestates", func(t *testing.T) {
prestate := []byte{0x00, 0x01, 0x02, 0x03}
prestateHash := crypto.Keccak256(prestate)
prestateHash[0] = mipsevm.VMStatusUnfinished
mockTraceProvider := newMockTraceProvider(false, prestate)
mockLoader := newMockPrestateLoader(false, prestateHash)
mockLoader := newMockPrestateLoader(false, common.BytesToHash(prestateHash))
err := ValidateAbsolutePrestate(context.Background(), mockTraceProvider, mockLoader)
require.NoError(t, err)
})
......@@ -129,7 +131,7 @@ func TestValidateAbsolutePrestate(t *testing.T) {
t.Run("TraceProviderErrors", func(t *testing.T) {
prestate := []byte{0x00, 0x01, 0x02, 0x03}
mockTraceProvider := newMockTraceProvider(true, prestate)
mockLoader := newMockPrestateLoader(false, prestate)
mockLoader := newMockPrestateLoader(false, common.BytesToHash(prestate))
err := ValidateAbsolutePrestate(context.Background(), mockTraceProvider, mockLoader)
require.ErrorIs(t, err, mockTraceProviderError)
})
......@@ -137,14 +139,14 @@ func TestValidateAbsolutePrestate(t *testing.T) {
t.Run("LoaderErrors", func(t *testing.T) {
prestate := []byte{0x00, 0x01, 0x02, 0x03}
mockTraceProvider := newMockTraceProvider(false, prestate)
mockLoader := newMockPrestateLoader(true, prestate)
mockLoader := newMockPrestateLoader(true, common.BytesToHash(prestate))
err := ValidateAbsolutePrestate(context.Background(), mockTraceProvider, mockLoader)
require.ErrorIs(t, err, mockLoaderError)
})
t.Run("PrestateMismatch", func(t *testing.T) {
mockTraceProvider := newMockTraceProvider(false, []byte{0x00, 0x01, 0x02, 0x03})
mockLoader := newMockPrestateLoader(false, []byte{0x00})
mockLoader := newMockPrestateLoader(false, common.BytesToHash([]byte{0x00}))
err := ValidateAbsolutePrestate(context.Background(), mockTraceProvider, mockLoader)
require.Error(t, err)
})
......@@ -210,21 +212,31 @@ func (m *mockTraceProvider) AbsolutePreState(ctx context.Context) ([]byte, error
}
return m.prestate, nil
}
func (m *mockTraceProvider) AbsolutePreStateCommitment(ctx context.Context) (common.Hash, error) {
prestate, err := m.AbsolutePreState(ctx)
if err != nil {
return common.Hash{}, err
}
hash := common.BytesToHash(crypto.Keccak256(prestate))
hash[0] = mipsevm.VMStatusUnfinished
return hash, nil
}
type mockLoader struct {
prestateError bool
prestate []byte
prestate common.Hash
}
func newMockPrestateLoader(prestateError bool, prestate []byte) *mockLoader {
func newMockPrestateLoader(prestateError bool, prestate common.Hash) *mockLoader {
return &mockLoader{
prestateError: prestateError,
prestate: prestate,
}
}
func (m *mockLoader) FetchAbsolutePrestateHash(ctx context.Context) ([]byte, error) {
func (m *mockLoader) FetchAbsolutePrestateHash(ctx context.Context) (common.Hash, error) {
if m.prestateError {
return nil, mockLoaderError
return common.Hash{}, mockLoaderError
}
return m.prestate, nil
}
package solver
import (
"bytes"
"context"
"errors"
"fmt"
......@@ -132,7 +133,7 @@ func (s *Solver) defend(ctx context.Context, claim types.Claim) (*types.Claim, e
// agreeWithClaim returns true if the claim is correct according to the internal [TraceProvider].
func (s *Solver) agreeWithClaim(ctx context.Context, claim types.ClaimData) (bool, error) {
ourValue, err := s.traceAtPosition(ctx, claim.Position)
return ourValue == claim.Value, err
return bytes.Equal(ourValue[:], claim.Value[:]), err
}
// traceAtPosition returns the [common.Hash] from internal [TraceProvider] at the given [Position].
......
......@@ -6,6 +6,7 @@ import (
"math/big"
"strings"
"github.com/ethereum-optimism/optimism/cannon/mipsevm"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/types"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
......@@ -58,7 +59,7 @@ func (ap *AlphabetTraceProvider) Get(ctx context.Context, i uint64) (common.Hash
if err != nil {
return common.Hash{}, err
}
return crypto.Keccak256Hash(claimBytes), nil
return alphabetStateHash(claimBytes), nil
}
// AbsolutePreState returns the absolute pre-state for the alphabet trace.
......@@ -66,11 +67,27 @@ func (ap *AlphabetTraceProvider) AbsolutePreState(ctx context.Context) ([]byte,
return common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000060"), nil
}
func (ap *AlphabetTraceProvider) AbsolutePreStateCommitment(ctx context.Context) (common.Hash, error) {
prestate, err := ap.AbsolutePreState(ctx)
if err != nil {
return common.Hash{}, err
}
hash := common.BytesToHash(crypto.Keccak256(prestate))
hash[0] = mipsevm.VMStatusUnfinished
return hash, nil
}
// BuildAlphabetPreimage constructs the claim bytes for the index and state item.
func BuildAlphabetPreimage(i uint64, letter string) []byte {
return append(IndexToBytes(i), LetterToBytes(letter)...)
}
func alphabetStateHash(state []byte) common.Hash {
h := crypto.Keccak256Hash(state)
h[0] = mipsevm.VMStatusInvalid
return h
}
// IndexToBytes converts an index to a byte slice big endian
func IndexToBytes(i uint64) []byte {
big := new(big.Int)
......
......@@ -6,12 +6,11 @@ import (
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/stretchr/testify/require"
)
func alphabetClaim(index uint64, letter string) common.Hash {
return crypto.Keccak256Hash(BuildAlphabetPreimage(index, letter))
return alphabetStateHash(BuildAlphabetPreimage(index, letter))
}
// TestAlphabetProvider_Get_ClaimsByTraceIndex tests the [fault.AlphabetProvider] Get function.
......@@ -60,7 +59,7 @@ func FuzzIndexToBytes(f *testing.F) {
// returns the correct pre-image for a index.
func TestGetStepData_Succeeds(t *testing.T) {
ap := NewTraceProvider("abc", 2)
expected := BuildAlphabetPreimage(0, "a'")
expected := BuildAlphabetPreimage(0, "a")
retrieved, proof, data, err := ap.GetStepData(context.Background(), uint64(1))
require.NoError(t, err)
require.Equal(t, expected, retrieved)
......
......@@ -15,9 +15,10 @@ import (
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/cannon/mipsevm"
)
const (
......@@ -25,7 +26,7 @@ const (
)
type proofData struct {
ClaimValue hexutil.Bytes `json:"post"`
ClaimValue common.Hash `json:"post"`
StateData hexutil.Bytes `json:"state-data"`
ProofData hexutil.Bytes `json:"proof-data"`
OracleKey hexutil.Bytes `json:"oracle-key,omitempty"`
......@@ -86,7 +87,7 @@ func (p *CannonTraceProvider) Get(ctx context.Context, i uint64) (common.Hash, e
if err != nil {
return common.Hash{}, err
}
value := common.BytesToHash(proof.ClaimValue)
value := proof.ClaimValue
if value == (common.Hash{}) {
return common.Hash{}, errors.New("proof missing post hash")
......@@ -122,6 +123,18 @@ func (p *CannonTraceProvider) AbsolutePreState(ctx context.Context) ([]byte, err
return state.EncodeWitness(), nil
}
func (p *CannonTraceProvider) AbsolutePreStateCommitment(ctx context.Context) (common.Hash, error) {
state, err := p.AbsolutePreState(ctx)
if err != nil {
return common.Hash{}, fmt.Errorf("cannot load absolute pre-state: %w", err)
}
hash, err := mipsevm.StateWitness(state).StateHash()
if err != nil {
return common.Hash{}, fmt.Errorf("cannot hash absolute pre-state: %w", err)
}
return hash, nil
}
// loadProof will attempt to load or generate the proof data at the specified index
// If the requested index is beyond the end of the actual trace it is extended with no-op instructions.
func (p *CannonTraceProvider) loadProof(ctx context.Context, i uint64) (*proofData, error) {
......@@ -151,9 +164,13 @@ func (p *CannonTraceProvider) loadProof(ctx context.Context, i uint64) (*proofDa
// Extend the trace out to the full length using a no-op instruction that doesn't change any state
// No execution is done, so no proof-data or oracle values are required.
witness := state.EncodeWitness()
witnessHash, err := mipsevm.StateWitness(witness).StateHash()
if err != nil {
return nil, fmt.Errorf("cannot hash witness: %w", err)
}
proof := &proofData{
ClaimValue: crypto.Keccak256(witness),
StateData: witness,
ClaimValue: witnessHash,
StateData: hexutil.Bytes(witness),
ProofData: []byte{},
OracleKey: nil,
OracleValue: nil,
......
......@@ -15,7 +15,6 @@ import (
"github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum-optimism/optimism/op-service/ioutil"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require"
)
......@@ -43,7 +42,9 @@ func TestGet(t *testing.T) {
value, err := provider.Get(context.Background(), 7000)
require.NoError(t, err)
require.Contains(t, generator.generated, 7000, "should have tried to generate the proof")
require.Equal(t, crypto.Keccak256Hash(generator.finalState.EncodeWitness()), value)
stateHash, err := generator.finalState.EncodeWitness().StateHash()
require.NoError(t, err)
require.Equal(t, stateHash, value)
})
t.Run("MissingPostHash", func(t *testing.T) {
......@@ -86,7 +87,7 @@ func TestGetStepData(t *testing.T) {
Exited: true,
}
generator.proof = &proofData{
ClaimValue: common.Hash{0xaa}.Bytes(),
ClaimValue: common.Hash{0xaa},
StateData: []byte{0xbb},
ProofData: []byte{0xcc},
OracleKey: common.Hash{0xdd}.Bytes(),
......@@ -111,7 +112,7 @@ func TestGetStepData(t *testing.T) {
Exited: true,
}
generator.proof = &proofData{
ClaimValue: common.Hash{0xaa}.Bytes(),
ClaimValue: common.Hash{0xaa},
StateData: []byte{0xbb},
ProofData: []byte{0xcc},
OracleKey: common.Hash{0xdd}.Bytes(),
......@@ -185,7 +186,7 @@ func TestAbsolutePreState(t *testing.T) {
Step: 0,
Registers: [32]uint32{},
}
require.Equal(t, state.EncodeWitness(), preState)
require.Equal(t, []byte(state.EncodeWitness()), preState)
})
}
......
......@@ -74,6 +74,9 @@ type TraceProvider interface {
// AbsolutePreState is the pre-image value of the trace that transitions to the trace value at index 0
AbsolutePreState(ctx context.Context) (preimage []byte, err error)
// AbsolutePreStateCommitment is the commitment of the pre-image value of the trace that transitions to the trace value at index 0
AbsolutePreStateCommitment(ctx context.Context) (hash common.Hash, err error)
}
// ClaimData is the core of a claim. It must be unique inside a specific game.
......
......@@ -65,16 +65,16 @@ func (g *FaultGameHelper) MaxDepth(ctx context.Context) int64 {
}
func (g *FaultGameHelper) waitForClaim(ctx context.Context, errorMsg string, predicate func(claim ContractClaim) bool) {
ctx, cancel := context.WithTimeout(ctx, time.Minute)
timedCtx, cancel := context.WithTimeout(ctx, time.Minute)
defer cancel()
err := wait.For(ctx, time.Second, func() (bool, error) {
count, err := g.game.ClaimDataLen(&bind.CallOpts{Context: ctx})
err := wait.For(timedCtx, time.Second, func() (bool, error) {
count, err := g.game.ClaimDataLen(&bind.CallOpts{Context: timedCtx})
if err != nil {
return false, fmt.Errorf("retrieve number of claims: %w", err)
}
// Search backwards because the new claims are at the end and more likely the ones we want.
for i := count.Int64() - 1; i >= 0; i-- {
claimData, err := g.game.ClaimData(&bind.CallOpts{Context: ctx}, big.NewInt(i))
claimData, err := g.game.ClaimData(&bind.CallOpts{Context: timedCtx}, big.NewInt(i))
if err != nil {
return false, fmt.Errorf("retrieve claim %v: %w", i, err)
}
......@@ -127,10 +127,10 @@ func (g *FaultGameHelper) Resolve(ctx context.Context) {
func (g *FaultGameHelper) WaitForGameStatus(ctx context.Context, expected Status) {
g.t.Logf("Waiting for game %v to have status %v", g.addr, expected)
ctx, cancel := context.WithTimeout(ctx, time.Minute)
timedCtx, cancel := context.WithTimeout(ctx, time.Minute)
defer cancel()
err := wait.For(ctx, time.Second, func() (bool, error) {
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
err := wait.For(timedCtx, time.Second, func() (bool, error) {
ctx, cancel := context.WithTimeout(timedCtx, 30*time.Second)
defer cancel()
status, err := g.game.Status(&bind.CallOpts{Context: ctx})
if err != nil {
......@@ -139,7 +139,60 @@ func (g *FaultGameHelper) WaitForGameStatus(ctx context.Context, expected Status
g.t.Logf("Game %v has state %v, waiting for state %v", g.addr, Status(status), expected)
return expected == Status(status), nil
})
g.require.NoError(err, "wait for game status")
g.require.NoErrorf(err, "wait for game status. Game state: \n%v", g.gameData(ctx))
}
// Mover is a function that either attacks or defends the claim at parentClaimIdx
type Mover func(parentClaimIdx int64)
// Stepper is a function that attempts to perform a step against the claim at parentClaimIdx
type Stepper func(parentClaimIdx int64)
// DefendRootClaim uses the supplied Mover to perform moves in an attempt to defend the root claim.
// It is assumed that the output root being disputed is valid and that an honest op-challenger is already running.
// When the game has reached the maximum depth it waits for the honest challenger to counter the leaf claim with step.
func (g *FaultGameHelper) DefendRootClaim(ctx context.Context, performMove Mover) {
maxDepth := g.MaxDepth(ctx)
for claimCount := int64(1); claimCount < maxDepth; {
g.LogGameData(ctx)
claimCount++
// Wait for the challenger to counter
g.WaitForClaimCount(ctx, claimCount)
// Respond with our own move
performMove(claimCount - 1)
claimCount++
g.WaitForClaimCount(ctx, claimCount)
}
// Wait for the challenger to call step and counter our invalid claim
g.WaitForClaimAtMaxDepth(ctx, true)
}
// ChallengeRootClaim uses the supplied Mover and Stepper to perform moves and steps in an attempt to challenge the root claim.
// It is assumed that the output root being disputed is invalid and that an honest op-challenger is already running.
// When the game has reached the maximum depth it calls the Stepper to attempt to counter the leaf claim.
// Since the output root is invalid, it should not be possible for the Stepper to call step successfully.
func (g *FaultGameHelper) ChallengeRootClaim(ctx context.Context, performMove Mover, attemptStep Stepper) {
maxDepth := g.MaxDepth(ctx)
for claimCount := int64(1); claimCount < maxDepth; {
g.LogGameData(ctx)
// Perform our move
performMove(claimCount - 1)
claimCount++
g.WaitForClaimCount(ctx, claimCount)
// Wait for the challenger to counter
claimCount++
g.WaitForClaimCount(ctx, claimCount)
}
// Confirm the game has reached max depth and the last claim hasn't been countered
g.WaitForClaimAtMaxDepth(ctx, false)
g.LogGameData(ctx)
// It's on us to call step if we want to win but shouldn't be possible
attemptStep(maxDepth)
}
func (g *FaultGameHelper) Attack(ctx context.Context, claimIdx int64, claim common.Hash) {
......@@ -156,6 +209,19 @@ func (g *FaultGameHelper) Defend(ctx context.Context, claimIdx int64, claim comm
g.require.NoError(err, "Defend transaction was not OK")
}
type ErrWithData interface {
ErrorData() interface{}
}
// StepFails attempts to call step and verifies that it fails with ValidStep()
func (g *FaultGameHelper) StepFails(claimIdx int64, isAttack bool, stateData []byte, proof []byte) {
g.t.Logf("Attempting step against claim %v isAttack: %v", claimIdx, isAttack)
_, err := g.game.Step(g.opts, big.NewInt(claimIdx), isAttack, stateData, proof)
errData, ok := err.(ErrWithData)
g.require.Truef(ok, "Error should provide ErrorData method: %v", err)
g.require.Equal("0xfb4e40dd", errData.ErrorData(), "Revert reason should be abi encoded ValidStep()")
}
func (g *FaultGameHelper) gameData(ctx context.Context) string {
opts := &bind.CallOpts{Context: ctx}
maxDepth := int(g.MaxDepth(ctx))
......
......@@ -9,6 +9,7 @@ import (
"testing"
"time"
"github.com/ethereum-optimism/optimism/cannon/mipsevm"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-chain-ops/deployer"
"github.com/ethereum-optimism/optimism/op-chain-ops/genesis"
......@@ -16,6 +17,7 @@ import (
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/cannon"
"github.com/ethereum-optimism/optimism/op-challenger/metrics"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/challenger"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/l2oo"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/transactions"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait"
"github.com/ethereum-optimism/optimism/op-node/rollup"
......@@ -65,7 +67,7 @@ type FactoryHelper struct {
factoryAddr common.Address
factory *bindings.DisputeGameFactory
blockOracle *bindings.BlockOracle
l2oo *bindings.L2OutputOracleCaller
l2ooHelper *l2oo.L2OOHelper
}
func NewFactoryHelper(t *testing.T, ctx context.Context, deployments *genesis.L1Deployments, client *ethclient.Client) *FactoryHelper {
......@@ -81,8 +83,6 @@ func NewFactoryHelper(t *testing.T, ctx context.Context, deployments *genesis.L1
require.NoError(err)
blockOracle, err := bindings.NewBlockOracle(deployments.BlockOracle, client)
require.NoError(err)
l2oo, err := bindings.NewL2OutputOracleCaller(deployments.L2OutputOracleProxy, client)
require.NoError(err, "Error creating l2oo caller")
return &FactoryHelper{
t: t,
......@@ -92,7 +92,7 @@ func NewFactoryHelper(t *testing.T, ctx context.Context, deployments *genesis.L1
factory: factory,
factoryAddr: factoryAddr,
blockOracle: blockOracle,
l2oo: l2oo,
l2ooHelper: l2oo.NewL2OOHelperReadOnly(t, deployments, client),
}
}
......@@ -150,12 +150,8 @@ func (h *FactoryHelper) StartCannonGameWithCorrectRoot(ctx context.Context, roll
challengerOpts = append(challengerOpts, options...)
cfg := challenger.NewChallengerConfig(h.t, l1Endpoint, challengerOpts...)
opts := &bind.CallOpts{Context: ctx}
outputIdx, err := h.l2oo.GetL2OutputIndexAfter(opts, new(big.Int).SetUint64(l2BlockNumber))
h.require.NoError(err, "Fetch challenged output index")
challengedOutput, err := h.l2oo.GetL2Output(opts, outputIdx)
h.require.NoError(err, "Fetch challenged output")
agreedOutput, err := h.l2oo.GetL2Output(opts, new(big.Int).Sub(outputIdx, common.Big1))
h.require.NoError(err, "Fetch agreed output")
challengedOutput := h.l2ooHelper.GetL2OutputAfter(ctx, l2BlockNumber)
agreedOutput := h.l2ooHelper.GetL2OutputBefore(ctx, l2BlockNumber)
l1BlockInfo, err := h.blockOracle.Load(opts, l1Head)
h.require.NoError(err, "Fetch L1 block info")
......@@ -179,6 +175,9 @@ func (h *FactoryHelper) StartCannonGameWithCorrectRoot(ctx context.Context, roll
provider := cannon.NewTraceProviderFromInputs(testlog.Logger(h.t, log.LvlInfo).New("role", "CorrectTrace"), metrics.NoopMetrics, cfg, inputs, cfg.Datadir)
rootClaim, err := provider.Get(ctx, math.MaxUint64)
h.require.NoError(err, "Compute correct root hash")
// Override the VM status to claim the root is invalid
// Otherwise creating the game will fail
rootClaim[0] = mipsevm.VMStatusInvalid
game := h.createCannonGame(ctx, l2BlockNumber, l1Head, rootClaim)
honestHelper := &HonestHelper{
......@@ -246,26 +245,8 @@ func (h *FactoryHelper) prepareCannonGame(ctx context.Context) (uint64, *big.Int
func (h *FactoryHelper) waitForProposals(ctx context.Context) uint64 {
ctx, cancel := context.WithTimeout(ctx, 2*time.Minute)
defer cancel()
opts := &bind.CallOpts{Context: ctx}
latestOutputIndex, err := wait.AndGet(
ctx,
time.Second,
func() (*big.Int, error) {
index, err := h.l2oo.LatestOutputIndex(opts)
if err != nil {
h.t.Logf("Could not get latest output index: %v", err.Error())
return nil, nil
}
h.t.Logf("Latest output index: %v", index)
return index, nil
},
func(index *big.Int) bool {
return index != nil && index.Cmp(big.NewInt(1)) >= 0
})
h.require.NoError(err, "Did not get two output roots")
output, err := h.l2oo.GetL2Output(opts, latestOutputIndex)
h.require.NoErrorf(err, "Could not get latst output root index: %v", latestOutputIndex)
return output.L2BlockNumber.Uint64()
latestOutputIdx := h.l2ooHelper.WaitForProposals(ctx, 2)
return h.l2ooHelper.GetL2Output(ctx, latestOutputIdx).L2BlockNumber.Uint64()
}
// checkpointL1Block stores the current L1 block in the oracle
......
......@@ -42,3 +42,18 @@ func (h *HonestHelper) Defend(ctx context.Context, claimIdx int64) {
h.game.require.NoErrorf(err, "Get correct claim at trace index %v", traceIdx)
h.game.Defend(ctx, claimIdx, value)
}
func (h *HonestHelper) StepFails(ctx context.Context, claimIdx int64, isAttack bool) {
ctx, cancel := context.WithTimeout(ctx, 2*time.Minute)
defer cancel()
claim := h.game.getClaim(ctx, claimIdx)
pos := types.NewPositionFromGIndex(claim.Position.Uint64())
traceIdx := pos.TraceIndex(int(h.game.MaxDepth(ctx)))
if !isAttack {
// If we're defending, then the step will be from the trace to the next one
traceIdx += 1
}
prestate, proofData, _, err := h.correctTrace.GetStepData(ctx, traceIdx)
h.require.NoError(err, "Get step data")
h.game.StepFails(claimIdx, isAttack, prestate, proofData)
}
package l2oo
import (
"context"
"crypto/ecdsa"
"math/big"
"testing"
"time"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-chain-ops/genesis"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/stretchr/testify/require"
)
type L2OOHelper struct {
t *testing.T
require *require.Assertions
client *ethclient.Client
l2oo *bindings.L2OutputOracle
// Nil when read-only
transactOpts *bind.TransactOpts
rollupCfg *rollup.Config
}
func NewL2OOHelperReadOnly(t *testing.T, deployments *genesis.L1Deployments, client *ethclient.Client) *L2OOHelper {
require := require.New(t)
l2oo, err := bindings.NewL2OutputOracle(deployments.L2OutputOracleProxy, client)
require.NoError(err, "Error creating l2oo bindings")
return &L2OOHelper{
t: t,
require: require,
client: client,
l2oo: l2oo,
}
}
func NewL2OOHelper(t *testing.T, deployments *genesis.L1Deployments, client *ethclient.Client, proposerKey *ecdsa.PrivateKey, rollupCfg *rollup.Config) *L2OOHelper {
h := NewL2OOHelperReadOnly(t, deployments, client)
chainID, err := client.ChainID(context.Background())
h.require.NoError(err, "Failed to get chain ID")
transactOpts, err := bind.NewKeyedTransactorWithChainID(proposerKey, chainID)
h.require.NoError(err)
h.transactOpts = transactOpts
h.rollupCfg = rollupCfg
return h
}
// WaitForProposals waits until there are at least the specified number of proposals in the output oracle
// Returns the index of the latest output proposal
func (h *L2OOHelper) WaitForProposals(ctx context.Context, req int64) uint64 {
ctx, cancel := context.WithTimeout(ctx, 2*time.Minute)
defer cancel()
opts := &bind.CallOpts{Context: ctx}
latestOutputIndex, err := wait.AndGet(
ctx,
time.Second,
func() (*big.Int, error) {
index, err := h.l2oo.LatestOutputIndex(opts)
if err != nil {
h.t.Logf("Could not get latest output index: %v", err.Error())
return nil, nil
}
h.t.Logf("Latest output index: %v", index)
return index, nil
},
func(index *big.Int) bool {
return index != nil && index.Cmp(big.NewInt(req-1)) >= 0
})
h.require.NoErrorf(err, "Did not get %v output roots", req)
return latestOutputIndex.Uint64()
}
func (h *L2OOHelper) GetL2Output(ctx context.Context, idx uint64) bindings.TypesOutputProposal {
output, err := h.l2oo.GetL2Output(&bind.CallOpts{Context: ctx}, new(big.Int).SetUint64(idx))
h.require.NoErrorf(err, "Failed to get output root at index: %v", idx)
return output
}
func (h *L2OOHelper) GetL2OutputAfter(ctx context.Context, l2BlockNum uint64) bindings.TypesOutputProposal {
opts := &bind.CallOpts{Context: ctx}
outputIdx, err := h.l2oo.GetL2OutputIndexAfter(opts, new(big.Int).SetUint64(l2BlockNum))
h.require.NoError(err, "Fetch challenged output index")
output, err := h.l2oo.GetL2Output(opts, outputIdx)
h.require.NoError(err, "Fetch challenged output")
return output
}
func (h *L2OOHelper) GetL2OutputBefore(ctx context.Context, l2BlockNum uint64) bindings.TypesOutputProposal {
opts := &bind.CallOpts{Context: ctx}
latestBlockNum, err := h.l2oo.LatestBlockNumber(opts)
h.require.NoError(err, "Failed to get latest output root block number")
var outputIdx *big.Int
if latestBlockNum.Uint64() < l2BlockNum {
outputIdx, err = h.l2oo.LatestOutputIndex(opts)
h.require.NoError(err, "Failed to get latest output index")
} else {
outputIdx, err = h.l2oo.GetL2OutputIndexAfter(opts, new(big.Int).SetUint64(l2BlockNum))
h.require.NoErrorf(err, "Failed to get output index after block %v", l2BlockNum)
h.require.NotZerof(outputIdx.Uint64(), "No l2 output before block %v", l2BlockNum)
outputIdx = new(big.Int).Sub(outputIdx, common.Big1)
}
return h.GetL2Output(ctx, outputIdx.Uint64())
}
func (h *L2OOHelper) PublishNextOutput(ctx context.Context, outputRoot common.Hash) {
h.require.NotNil(h.transactOpts, "Can't publish outputs from a read only L2OOHelper")
nextBlockNum, err := h.l2oo.NextBlockNumber(&bind.CallOpts{Context: ctx})
h.require.NoError(err, "Should get next block number")
genesis := h.rollupCfg.Genesis
targetTimestamp := genesis.L2Time + ((nextBlockNum.Uint64() - genesis.L2.Number) * h.rollupCfg.BlockTime)
timedCtx, cancel := context.WithTimeout(ctx, 30*time.Second)
defer cancel()
h.require.NoErrorf(
wait.ForBlockWithTimestamp(timedCtx, h.client, targetTimestamp),
"Wait for L1 block with timestamp >= %v", targetTimestamp)
tx, err := h.l2oo.ProposeL2Output(h.transactOpts, outputRoot, nextBlockNum, [32]byte{}, common.Big0)
h.require.NoErrorf(err, "Failed to propose output root for l2 block number %v", nextBlockNum)
_, err = wait.ForReceiptOK(ctx, h.client, tx.Hash())
h.require.NoErrorf(err, "Proposal for l2 block %v failed", nextBlockNum)
}
......@@ -85,6 +85,19 @@ func ForBlock(ctx context.Context, client *ethclient.Client, n uint64) error {
return nil
}
func ForBlockWithTimestamp(ctx context.Context, client *ethclient.Client, target uint64) error {
_, err := AndGet(ctx, time.Second, func() (uint64, error) {
head, err := client.BlockByNumber(ctx, nil)
if err != nil {
return 0, err
}
return head.Time(), nil
}, func(actual uint64) bool {
return actual >= target
})
return err
}
func ForNextBlock(ctx context.Context, client *ethclient.Client) error {
current, err := client.BlockNumber(ctx)
if err != nil {
......
This diff is collapsed.
......@@ -121,6 +121,7 @@ func DefaultSystemConfig(t *testing.T) SystemConfig {
EnableAdmin: true,
},
L1EpochPollInterval: time.Second * 2,
RuntimeConfigReloadInterval: time.Minute * 10,
ConfigPersistence: &rollupNode.DisabledConfigPersistence{},
},
"verifier": {
......@@ -130,6 +131,7 @@ func DefaultSystemConfig(t *testing.T) SystemConfig {
SequencerEnabled: false,
},
L1EpochPollInterval: time.Second * 4,
RuntimeConfigReloadInterval: time.Minute * 10,
ConfigPersistence: &rollupNode.DisabledConfigPersistence{},
},
},
......
......@@ -30,6 +30,7 @@ import (
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum-optimism/optimism/op-e2e/config"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait"
"github.com/ethereum-optimism/optimism/op-node/client"
"github.com/ethereum-optimism/optimism/op-node/metrics"
rollupNode "github.com/ethereum-optimism/optimism/op-node/node"
......@@ -1389,3 +1390,47 @@ func TestPendingBlockIsLatest(t *testing.T) {
t.Fatal("failed to get pending header with same number as latest header")
})
}
func TestRuntimeConfigReload(t *testing.T) {
InitParallel(t)
cfg := DefaultSystemConfig(t)
// to speed up the test, make it reload the config more often, and do not impose a long conf depth
cfg.Nodes["verifier"].RuntimeConfigReloadInterval = time.Second * 5
cfg.Nodes["verifier"].Driver.VerifierConfDepth = 1
sys, err := cfg.Start(t)
require.Nil(t, err, "Error starting up system")
defer sys.Close()
initialRuntimeConfig := sys.RollupNodes["verifier"].RuntimeConfig()
// close the EL node, since we want to block derivation, to solely rely on the reloading mechanism for updates.
sys.EthInstances["verifier"].Close()
l1 := sys.Clients["l1"]
// Change the system-config via L1
sysCfgContract, err := bindings.NewSystemConfig(cfg.L1Deployments.SystemConfigProxy, l1)
require.NoError(t, err)
newUnsafeBlocksSigner := common.Address{0x12, 0x23, 0x45}
require.NotEqual(t, initialRuntimeConfig.P2PSequencerAddress(), newUnsafeBlocksSigner, "changing to a different address")
opts, err := bind.NewKeyedTransactorWithChainID(cfg.Secrets.SysCfgOwner, cfg.L1ChainIDBig())
require.Nil(t, err)
// the unsafe signer address is part of the runtime config
tx, err := sysCfgContract.SetUnsafeBlockSigner(opts, newUnsafeBlocksSigner)
require.NoError(t, err)
// wait for the change to confirm
_, err = wait.ForReceiptOK(context.Background(), l1, tx.Hash())
require.NoError(t, err)
// wait for the address to change
_, err = retry.Do(context.Background(), 10, retry.Fixed(time.Second*10), func() (struct{}, error) {
v := sys.RollupNodes["verifier"].RuntimeConfig().P2PSequencerAddress()
if v == newUnsafeBlocksSigner {
return struct{}{}, nil
}
return struct{}{}, fmt.Errorf("no change yet, seeing %s but looking for %s", v, newUnsafeBlocksSigner)
})
require.NoError(t, err)
}
......@@ -3,8 +3,8 @@ module github.com/ethereum-optimism/optimism/op-exporter
go 1.20
require (
github.com/ethereum/go-ethereum v1.10.17
github.com/prometheus/client_golang v1.11.1
github.com/ethereum/go-ethereum v1.12.1
github.com/prometheus/client_golang v1.14.0
github.com/sirupsen/logrus v1.7.0
github.com/ybbus/jsonrpc v2.1.2+incompatible
gopkg.in/alecthomas/kingpin.v2 v2.2.6
......@@ -16,30 +16,30 @@ require (
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 // indirect
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/go-logr/logr v0.4.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/google/go-cmp v0.5.8 // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa // indirect
github.com/googleapis/gnostic v0.4.1 // indirect
github.com/json-iterator/go v1.1.11 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.1 // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/onsi/gomega v1.16.0 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.30.0 // indirect
github.com/prometheus/procfs v0.7.3 // indirect
golang.org/x/net v0.7.0 // indirect
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c // indirect
golang.org/x/sys v0.5.0 // indirect
golang.org/x/term v0.5.0 // indirect
golang.org/x/text v0.7.0 // indirect
golang.org/x/time v0.0.0-20220224211638-0e9765cccd65 // indirect
google.golang.org/appengine v1.6.6 // indirect
google.golang.org/protobuf v1.27.1 // indirect
github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/common v0.39.0 // indirect
github.com/prometheus/procfs v0.9.0 // indirect
golang.org/x/net v0.10.0 // indirect
golang.org/x/oauth2 v0.3.0 // indirect
golang.org/x/sys v0.9.0 // indirect
golang.org/x/term v0.8.0 // indirect
golang.org/x/text v0.9.0 // indirect
golang.org/x/time v0.3.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/protobuf v1.28.1 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
k8s.io/api v0.21.2 // indirect
......
This diff is collapsed.
......@@ -146,6 +146,13 @@ var (
Required: false,
Value: time.Second * 12 * 32,
}
RuntimeConfigReloadIntervalFlag = &cli.DurationFlag{
Name: "l1.runtime-config-reload-interval",
Usage: "Poll interval for reloading the runtime config, useful when config events are not being picked up. Disabled if 0 or negative.",
EnvVars: prefixEnvVars("L1_RUNTIME_CONFIG_RELOAD_INTERVAL"),
Required: false,
Value: time.Minute * 10,
}
MetricsEnabledFlag = &cli.BoolFlag{
Name: "metrics.enabled",
Usage: "Enable the metrics server",
......@@ -261,6 +268,7 @@ var optionalFlags = []cli.Flag{
SequencerMaxSafeLagFlag,
SequencerL1Confs,
L1EpochPollIntervalFlag,
RuntimeConfigReloadIntervalFlag,
RPCEnableAdmin,
RPCAdminPersistence,
MetricsEnabledFlag,
......
......@@ -41,6 +41,12 @@ type Config struct {
ConfigPersistence ConfigPersistence
// RuntimeConfigReloadInterval defines the interval between runtime config reloads.
// Disabled if 0.
// Runtime config changes should be picked up from log-events,
// but if log-events are not coming in (e.g. not syncing blocks) then the reload ensures the config stays accurate.
RuntimeConfigReloadInterval time.Duration
// Optional
Tracer Tracer
Heartbeat HeartbeatConfig
......
......@@ -2,7 +2,6 @@ package node
import (
"context"
"errors"
"fmt"
"time"
......@@ -19,6 +18,7 @@ import (
"github.com/ethereum-optimism/optimism/op-node/rollup/driver"
"github.com/ethereum-optimism/optimism/op-node/sources"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/retry"
)
type OpNode struct {
......@@ -159,27 +159,70 @@ func (n *OpNode) initRuntimeConfig(ctx context.Context, cfg *Config) error {
// attempt to load runtime config, repeat N times
n.runCfg = NewRuntimeConfig(n.log, n.l1Source, &cfg.Rollup)
for i := 0; i < 5; i++ {
confDepth := cfg.Driver.VerifierConfDepth
reload := func(ctx context.Context) (eth.L1BlockRef, error) {
fetchCtx, fetchCancel := context.WithTimeout(ctx, time.Second*10)
l1Head, err := n.l1Source.L1BlockRefByLabel(fetchCtx, eth.Unsafe)
fetchCancel()
if err != nil {
n.log.Error("failed to fetch L1 head for runtime config initialization", "err", err)
continue
return eth.L1BlockRef{}, err
}
// Apply confirmation-distance
blNum := l1Head.Number
if blNum >= confDepth {
blNum -= confDepth
}
fetchCtx, fetchCancel = context.WithTimeout(ctx, time.Second*10)
err = n.runCfg.Load(fetchCtx, l1Head)
confirmed, err := n.l1Source.L1BlockRefByNumber(fetchCtx, blNum)
fetchCancel()
if err != nil {
n.log.Error("failed to fetch confirmed L1 block for runtime config loading", "err", err, "number", blNum)
return eth.L1BlockRef{}, err
}
fetchCtx, fetchCancel = context.WithTimeout(ctx, time.Second*10)
err = n.runCfg.Load(fetchCtx, confirmed)
fetchCancel()
if err != nil {
n.log.Error("failed to fetch runtime config data", "err", err)
continue
return l1Head, err
}
return l1Head, nil
}
return nil
// initialize the runtime config before unblocking
if _, err := retry.Do(ctx, 5, retry.Fixed(time.Second*10), func() (eth.L1BlockRef, error) {
return reload(ctx)
}); err != nil {
return fmt.Errorf("failed to load runtime configuration repeatedly, last error: %w", err)
}
return errors.New("failed to load runtime configuration repeatedly")
// start a background loop, to keep reloading it at the configured reload interval
go func(ctx context.Context, reloadInterval time.Duration) {
if reloadInterval <= 0 {
n.log.Debug("not running runtime-config reloading background loop")
return
}
ticker := time.NewTicker(reloadInterval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
// If the reload fails, we will try again the next interval.
// Missing a runtime-config update is not critical, and we do not want to overwhelm the L1 RPC.
if l1Head, err := reload(ctx); err != nil {
n.log.Warn("failed to reload runtime config", "err", err)
} else {
n.log.Debug("reloaded runtime config", "l1_head", l1Head)
}
case <-ctx.Done():
return
}
}
}(n.resourcesCtx, cfg.RuntimeConfigReloadInterval) // this keeps running after initialization
return nil
}
func (n *OpNode) initL2(ctx context.Context, cfg *Config, snapshotLog log.Logger) error {
......@@ -397,6 +440,10 @@ func (n *OpNode) P2P() p2p.Node {
return n.p2pNode
}
func (n *OpNode) RuntimeConfig() ReadonlyRuntimeConfig {
return n.runCfg
}
// Close closes all resources.
func (n *OpNode) Close() error {
var result *multierror.Error
......
......@@ -23,6 +23,10 @@ type RuntimeCfgL1Source interface {
ReadStorageAt(ctx context.Context, address common.Address, storageSlot common.Hash, blockHash common.Hash) (common.Hash, error)
}
type ReadonlyRuntimeConfig interface {
P2PSequencerAddress() common.Address
}
// RuntimeConfig maintains runtime-configurable options.
// These options are loaded based on initial loading + updates for every subsequent L1 block.
// Only the *latest* values are maintained however, the runtime config has no concept of chain history,
......
......@@ -85,6 +85,7 @@ func NewConfig(ctx *cli.Context, log log.Logger) (*node.Config, error) {
P2P: p2pConfig,
P2PSigner: p2pSignerSetup,
L1EpochPollInterval: ctx.Duration(flags.L1EpochPollIntervalFlag.Name),
RuntimeConfigReloadInterval: ctx.Duration(flags.RuntimeConfigReloadIntervalFlag.Name),
Heartbeat: node.HeartbeatConfig{
Enabled: ctx.Bool(flags.HeartbeatEnabledFlag.Name),
Moniker: ctx.String(flags.HeartbeatMonikerFlag.Name),
......
......@@ -18,9 +18,10 @@ COPY --from=builder /app/entrypoint.sh /bin/entrypoint.sh
COPY --from=builder /app/bin/ufm /bin/ufm
RUN apk update && \
apk add ca-certificates && \
chmod +x /bin/entrypoint.sh
RUN apk add ca-certificates jq curl bind-tools
VOLUME /etc/ufm
EXPOSE 8080
......
......@@ -39,12 +39,6 @@ address = "0x0000000000000000000000000000000000000000"
private_key = "0000000000000000000000000000000000000000000000000000000000000000"
# Transaction value in wei
tx_value = 100000000000000
# Gas limit
gas_limit = 21000
# Gas tip cap
gas_tip_cap = 2000000000
# Fee cap
gas_fee_cap = 20000000000
[providers.p1]
# URL to the RPC provider
......@@ -52,13 +46,15 @@ url = "http://localhost:8551"
# Read only providers are only used to check for transactions
read_only = true
# Interval to poll the provider for expected transactions
read_interval = "1s"
read_interval = "10s"
# Interval to submit new transactions to the provider
send_interval = "5s"
# Wallet to be used for sending transactions
wallet = "default"
# Network to pool transactions, i.e. providers in the same network will check transactions from each other
network = "op-goerli"
send_interval = "30s"
# Interval between send transaction when we get "already known" txpool err
send_transaction_retry_interval = "100ms"
# Max time to retry
send_transaction_retry_timeout = "5s"
# Interval between each send transaction to the same network
send_transaction_cool_down = "30s"
# Interval between receipt retrieval
receipt_retrieval_interval = "500ms"
# Max time to check for receipt
......@@ -72,13 +68,15 @@ url = "http://localhost:8552"
# Read only providers are only used to check for transactions
read_only = false
# Interval to poll the provider for expected transactions
read_interval = "2s"
read_interval = "10s"
# Interval to submit new transactions to the provider
send_interval = "3s"
# Wallet to be used for sending transactions
wallet = "default"
# Network to pool transactions, i.e. providers in the same network will check transactions from each other
network = "op-goerli"
send_interval = "30s"
# Interval between send transaction when we get "already known" txpool err
send_transaction_retry_interval = "100ms"
# Max time to retry
send_transaction_retry_timeout = "5s"
# Interval between each send transaction to the same network
send_transaction_cool_down = "30s"
# Interval between receipt retrieval
receipt_retrieval_interval = "500ms"
# Max time to check for receipt
......
......@@ -49,9 +49,6 @@ type WalletConfig struct {
// transaction parameters
TxValue big.Int `toml:"tx_value"`
GasLimit uint64 `toml:"gas_limit"`
GasTipCap big.Int `toml:"gas_tip_cap"`
GasFeeCap big.Int `toml:"gas_fee_cap"`
}
type ProviderConfig struct {
......@@ -64,6 +61,7 @@ type ProviderConfig struct {
SendInterval TOMLDuration `toml:"send_interval"`
SendTransactionRetryInterval TOMLDuration `toml:"send_transaction_retry_interval"`
SendTransactionRetryTimeout TOMLDuration `toml:"send_transaction_retry_timeout"`
SendTransactionCoolDown TOMLDuration `toml:"send_transaction_cool_down"`
ReceiptRetrievalInterval TOMLDuration `toml:"receipt_retrieval_interval"`
ReceiptRetrievalTimeout TOMLDuration `toml:"receipt_retrieval_timeout"`
......@@ -130,12 +128,6 @@ func (c *Config) Validate() error {
if wallet.TxValue.BitLen() == 0 {
return errors.Errorf("wallet [%s] tx_value is missing", name)
}
if wallet.GasLimit == 0 {
return errors.Errorf("wallet [%s] gas_limit is missing", name)
}
if wallet.GasFeeCap.BitLen() == 0 {
return errors.Errorf("wallet [%s] gas_fee_cap is missing", name)
}
}
for name, provider := range c.Providers {
......@@ -154,6 +146,9 @@ func (c *Config) Validate() error {
if provider.SendTransactionRetryTimeout == 0 {
return errors.Errorf("provider [%s] send_transaction_retry_timeout is missing", name)
}
if provider.SendTransactionCoolDown == 0 {
return errors.Errorf("provider [%s] send_transaction_cool_down is missing", name)
}
if provider.ReceiptRetrievalInterval == 0 {
return errors.Errorf("provider [%s] receipt_retrieval_interval is missing", name)
}
......
......@@ -2,6 +2,7 @@ package clients
import (
"context"
"math/big"
"time"
"github.com/ethereum-optimism/optimism/op-ufm/pkg/metrics"
......@@ -22,7 +23,7 @@ func Dial(providerName string, url string) (*InstrumentedEthClient, error) {
start := time.Now()
c, err := ethclient.Dial(url)
if err != nil {
metrics.RecordError(providerName, "ethclient.Dial")
metrics.RecordErrorDetails(providerName, "ethclient.Dial", err)
return nil, err
}
metrics.RecordRPCLatency(providerName, "ethclient", "Dial", time.Since(start))
......@@ -34,7 +35,7 @@ func (i *InstrumentedEthClient) TransactionByHash(ctx context.Context, hash comm
tx, isPending, err := i.c.TransactionByHash(ctx, hash)
if err != nil {
if !i.ignorableErrors(err) {
metrics.RecordError(i.providerName, "ethclient.TransactionByHash")
metrics.RecordErrorDetails(i.providerName, "ethclient.TransactionByHash", err)
}
return nil, false, err
}
......@@ -46,7 +47,7 @@ func (i *InstrumentedEthClient) PendingNonceAt(ctx context.Context, address stri
start := time.Now()
nonce, err := i.c.PendingNonceAt(ctx, common.HexToAddress(address))
if err != nil {
metrics.RecordError(i.providerName, "ethclient.PendingNonceAt")
metrics.RecordErrorDetails(i.providerName, "ethclient.PendingNonceAt", err)
return 0, err
}
metrics.RecordRPCLatency(i.providerName, "ethclient", "PendingNonceAt", time.Since(start))
......@@ -58,7 +59,7 @@ func (i *InstrumentedEthClient) TransactionReceipt(ctx context.Context, txHash c
receipt, err := i.c.TransactionReceipt(ctx, txHash)
if err != nil {
if !i.ignorableErrors(err) {
metrics.RecordError(i.providerName, "ethclient.TransactionReceipt")
metrics.RecordErrorDetails(i.providerName, "ethclient.TransactionReceipt", err)
}
return nil, err
}
......@@ -71,7 +72,7 @@ func (i *InstrumentedEthClient) SendTransaction(ctx context.Context, tx *types.T
err := i.c.SendTransaction(ctx, tx)
if err != nil {
if !i.ignorableErrors(err) {
metrics.RecordError(i.providerName, "ethclient.SendTransaction")
metrics.RecordErrorDetails(i.providerName, "ethclient.SendTransaction", err)
}
return err
}
......@@ -79,6 +80,39 @@ func (i *InstrumentedEthClient) SendTransaction(ctx context.Context, tx *types.T
return err
}
func (i *InstrumentedEthClient) EstimateGas(ctx context.Context, msg ethereum.CallMsg) (uint64, error) {
start := time.Now()
gas, err := i.c.EstimateGas(ctx, msg)
if err != nil {
metrics.RecordErrorDetails(i.providerName, "ethclient.EstimateGas", err)
return 0, err
}
metrics.RecordRPCLatency(i.providerName, "ethclient", "EstimateGas", time.Since(start))
return gas, err
}
func (i *InstrumentedEthClient) SuggestGasTipCap(ctx context.Context) (*big.Int, error) {
start := time.Now()
gasTipCap, err := i.c.SuggestGasTipCap(ctx)
if err != nil {
metrics.RecordErrorDetails(i.providerName, "ethclient.SuggestGasTipCap", err)
return nil, err
}
metrics.RecordRPCLatency(i.providerName, "ethclient", "SuggestGasTipCap", time.Since(start))
return gasTipCap, err
}
func (i *InstrumentedEthClient) HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) {
start := time.Now()
header, err := i.c.HeaderByNumber(ctx, number)
if err != nil {
metrics.RecordErrorDetails(i.providerName, "ethclient.HeaderByNumber", err)
return nil, err
}
metrics.RecordRPCLatency(i.providerName, "ethclient", "HeaderByNumber", time.Since(start))
return header, err
}
func (i *InstrumentedEthClient) ignorableErrors(err error) bool {
msg := err.Error()
// we dont use errors.Is because eth client actually uses errors.New,
......
......@@ -22,7 +22,7 @@ func NewSignerClient(providerName string, logger log.Logger, endpoint string, tl
start := time.Now()
c, err := signer.NewSignerClient(logger, endpoint, tlsConfig)
if err != nil {
metrics.RecordError(providerName, "signer.NewSignerClient")
metrics.RecordErrorDetails(providerName, "signer.NewSignerClient", err)
return nil, err
}
metrics.RecordRPCLatency(providerName, "signer", "NewSignerClient", time.Since(start))
......@@ -33,7 +33,7 @@ func (i *InstrumentedSignerClient) SignTransaction(ctx context.Context, chainId
start := time.Now()
tx, err := i.c.SignTransaction(ctx, chainId, tx)
if err != nil {
metrics.RecordError(i.providerName, "signer.SignTransaction")
metrics.RecordErrorDetails(i.providerName, "signer.SignTransaction", err)
return nil, err
}
metrics.RecordRPCLatency(i.providerName, "signer", "SignTransaction", time.Since(start))
......
......@@ -2,6 +2,7 @@ package provider
import (
"context"
"math/big"
"time"
"github.com/ethereum-optimism/optimism/op-ufm/pkg/metrics"
......@@ -21,7 +22,7 @@ import (
// RoundTrip send a new transaction to measure round trip latency
func (p *Provider) RoundTrip(ctx context.Context) {
log.Debug("roundTripLatency",
log.Debug("RoundTrip",
"provider", p.name)
client, err := iclients.Dial(p.name, p.config.URL)
......@@ -33,33 +34,38 @@ func (p *Provider) RoundTrip(ctx context.Context) {
return
}
var nonce uint64
p.txPool.M.Lock()
if p.txPool.Nonce == uint64(0) {
nonce, err = client.PendingNonceAt(ctx, p.walletConfig.Address)
if err != nil {
log.Error("cant get nounce",
"provider", p.name,
"err", err)
p.txPool.M.Unlock()
return
}
p.txPool.Nonce = nonce
} else {
p.txPool.Nonce++
nonce = p.txPool.Nonce
}
p.txPool.M.Unlock()
p.txPool.ExclusiveSend.Lock()
defer p.txPool.ExclusiveSend.Unlock()
txHash := common.Hash{}
attempt := 0
nonce := uint64(0)
// used for timeout
firstAttemptAt := time.Now()
// used for actual round trip time (disregard retry time)
roundTripStartedAt := time.Now()
var roundTripStartedAt time.Time
for {
// sleep until we get a clear to send
for {
tx := p.createTx(nonce)
txHash = tx.Hash()
coolDown := time.Duration(p.config.SendTransactionCoolDown) - time.Since(p.txPool.LastSend)
if coolDown > 0 {
time.Sleep(coolDown)
} else {
break
}
}
tx, err := p.createTx(ctx, client, nonce)
nonce = tx.Nonce()
if err != nil {
log.Error("cant create tx",
"provider", p.name,
"nonce", nonce,
"err", err)
return
}
signedTx, err := p.sign(ctx, tx)
if err != nil {
......@@ -69,7 +75,6 @@ func (p *Provider) RoundTrip(ctx context.Context) {
"err", err)
return
}
txHash = signedTx.Hash()
roundTripStartedAt = time.Now()
......@@ -78,25 +83,29 @@ func (p *Provider) RoundTrip(ctx context.Context) {
if err.Error() == txpool.ErrAlreadyKnown.Error() ||
err.Error() == txpool.ErrReplaceUnderpriced.Error() ||
err.Error() == core.ErrNonceTooLow.Error() {
log.Warn("cant send transaction (retryable)",
"provider", p.name,
"err", err,
"nonce", nonce)
if time.Since(firstAttemptAt) >= time.Duration(p.config.SendTransactionRetryTimeout) {
log.Error("send transaction timed out (known already)",
"provider", p.name,
"hash", txHash.Hex(),
"nonce", nonce,
"elapsed", time.Since(firstAttemptAt),
"attempt", attempt,
"nonce", nonce)
metrics.RecordError(p.name, "ethclient.SendTransaction.nonce")
"attempt", attempt)
metrics.RecordErrorDetails(p.name, "send.timeout", err)
return
}
log.Warn("tx already known, incrementing nonce and trying again",
"provider", p.name,
"nonce", nonce)
time.Sleep(time.Duration(p.config.SendTransactionRetryInterval))
p.txPool.M.Lock()
p.txPool.Nonce++
nonce = p.txPool.Nonce
p.txPool.M.Unlock()
nonce++
attempt++
if attempt%10 == 0 {
log.Debug("retrying send transaction...",
......@@ -108,6 +117,7 @@ func (p *Provider) RoundTrip(ctx context.Context) {
} else {
log.Error("cant send transaction",
"provider", p.name,
"nonce", nonce,
"err", err)
metrics.RecordErrorDetails(p.name, "ethclient.SendTransaction", err)
return
......@@ -131,6 +141,7 @@ func (p *Provider) RoundTrip(ctx context.Context) {
SentAt: sentAt,
SeenBy: make(map[string]time.Time),
}
p.txPool.LastSend = sentAt
p.txPool.M.Unlock()
var receipt *types.Receipt
......@@ -140,13 +151,17 @@ func (p *Provider) RoundTrip(ctx context.Context) {
log.Error("receipt retrieval timed out",
"provider", p.name,
"hash", txHash,
"nonce", nonce,
"elapsed", time.Since(sentAt))
metrics.RecordErrorDetails(p.name, "receipt.timeout", err)
return
}
time.Sleep(time.Duration(p.config.ReceiptRetrievalInterval))
if attempt%10 == 0 {
log.Debug("checking for receipt...",
"provider", p.name,
"hash", txHash,
"nonce", nonce,
"attempt", attempt,
"elapsed", time.Since(sentAt))
}
......@@ -155,6 +170,7 @@ func (p *Provider) RoundTrip(ctx context.Context) {
log.Error("cant get receipt for transaction",
"provider", p.name,
"hash", txHash.Hex(),
"nonce", nonce,
"err", err)
return
}
......@@ -168,6 +184,7 @@ func (p *Provider) RoundTrip(ctx context.Context) {
log.Info("got transaction receipt",
"hash", txHash.Hex(),
"nonce", nonce,
"roundTripLatency", roundTripLatency,
"provider", p.name,
"blockNumber", receipt.BlockNumber,
......@@ -175,20 +192,83 @@ func (p *Provider) RoundTrip(ctx context.Context) {
"gasUsed", receipt.GasUsed)
}
func (p *Provider) createTx(nonce uint64) *types.Transaction {
toAddress := common.HexToAddress(p.walletConfig.Address)
func (p *Provider) createTx(ctx context.Context, client *iclients.InstrumentedEthClient, nonce uint64) (*types.Transaction, error) {
var err error
if nonce == 0 {
nonce, err = client.PendingNonceAt(ctx, p.walletConfig.Address)
if err != nil {
log.Error("cant get nounce",
"provider", p.name,
"nonce", nonce,
"err", err)
return nil, err
}
}
gasTipCap, err := client.SuggestGasTipCap(ctx)
if err != nil {
log.Error("cant get gas tip cap",
"provider", p.name,
"err", err)
return nil, err
}
gasTipCap = new(big.Int).Mul(gasTipCap, big.NewInt(110))
gasTipCap = new(big.Int).Div(gasTipCap, big.NewInt(100))
head, err := client.HeaderByNumber(ctx, nil)
if err != nil {
log.Error("cant get base fee from head",
"provider", p.name,
"err", err)
return nil, err
}
baseFee := head.BaseFee
gasFeeCap := new(big.Int).Add(
gasTipCap,
new(big.Int).Mul(baseFee, big.NewInt(2)))
addr := common.HexToAddress(p.walletConfig.Address)
var data []byte
tx := types.NewTx(&types.DynamicFeeTx{
dynamicTx := &types.DynamicFeeTx{
ChainID: &p.walletConfig.ChainID,
Nonce: nonce,
GasFeeCap: &p.walletConfig.GasFeeCap,
GasTipCap: &p.walletConfig.GasTipCap,
Gas: p.walletConfig.GasLimit,
To: &toAddress,
GasFeeCap: gasFeeCap,
GasTipCap: gasTipCap,
To: &addr,
Value: &p.walletConfig.TxValue,
Data: data,
}
gas, err := client.EstimateGas(ctx, ethereum.CallMsg{
From: addr,
To: &addr,
GasFeeCap: gasFeeCap,
GasTipCap: gasTipCap,
Data: dynamicTx.Data,
Value: dynamicTx.Value,
})
return tx
if err != nil {
log.Error("cant estimate gas",
"provider", p.name,
"err", err)
return nil, err
}
dynamicTx.Gas = gas
tx := types.NewTx(dynamicTx)
log.Info("tx created",
"provider", p.name,
"from", addr,
"to", dynamicTx.To,
"nonce", dynamicTx.Nonce,
"value", dynamicTx.Value,
"gas", dynamicTx.Gas,
"gasTipCap", dynamicTx.GasTipCap,
"gasFeeCap", dynamicTx.GasFeeCap,
)
return tx, nil
}
func (p *Provider) sign(ctx context.Context, tx *types.Transaction) (*types.Transaction, error) {
......
......@@ -15,7 +15,11 @@ type NetworkTransactionPool struct {
M sync.Mutex
Transactions map[string]*TransactionState
Expected int
Nonce uint64
// Last time a transaction was sent
LastSend time.Time
// Prevents concurrent transaction send
ExclusiveSend sync.Mutex
}
type TransactionState struct {
......
This diff is collapsed.
......@@ -26,3 +26,6 @@ deployments/hardhat
deployments/getting-started
deployments/*/.deploy
deployments/1337
# Devnet config which changes with each 'make devnet-up'
deploy-config/devnetL1.json
......@@ -43,7 +43,7 @@
"eip1559Elasticity": 6,
"l1GenesisBlockTimestamp": "0x64c811bf",
"l2GenesisRegolithTimeOffset": "0x0",
"faultGameAbsolutePrestate": "0x41c7ae758795765c6664a5d39bf63841c71ff191e9189522bad8ebff5d4eca98",
"faultGameAbsolutePrestate": "0x03c7ae758795765c6664a5d39bf63841c71ff191e9189522bad8ebff5d4eca98",
"faultGameMaxDepth": 30,
"faultGameMaxDuration": 1200,
"systemConfigStartBlock": 0
......
################################################################
# PROFILE: DEFAULT (Local) #
################################################################
[profile.default]
# Compilation settings
src = 'src'
out = 'forge-artifacts'
script = 'scripts'
optimizer = true
optimizer_runs = 999999
remappings = [
'@openzeppelin/contracts-upgradeable/=lib/openzeppelin-contracts-upgradeable/contracts',
'@openzeppelin/contracts/=lib/openzeppelin-contracts/contracts',
'@rari-capital/solmate/=lib/solmate',
"@cwia/=lib/clones-with-immutable-args/src",
'@cwia/=lib/clones-with-immutable-args/src',
'forge-std/=lib/forge-std/src',
'ds-test/=lib/forge-std/lib/ds-test/src'
]
......@@ -17,9 +22,9 @@ extra_output = ['devdoc', 'userdoc', 'metadata', 'storageLayout']
bytecode_hash = 'none'
build_info = true
build_info_path = 'artifacts/build-info'
ffi = true
fuzz_runs = 16
# Test / Script Runner Settings
ffi = true
fs_permissions = [
{ access='read-write', path='./.resource-metering.csv' },
{ access='read-write', path='./deployments/' },
......@@ -29,14 +34,19 @@ fs_permissions = [
{ access='write', path='./semver-lock.json' },
]
[fuzz]
runs = 64
[fmt]
line_length=120
multiline_func_header="all"
multiline_func_header='all'
bracket_spacing=true
wrap_comments=true
ignore = [
'src/vendor/WETH9.sol'
]
ignore = ['src/vendor/WETH9.sol']
################################################################
# PROFILE: CI #
################################################################
[profile.ci]
fuzz_runs = 512
[profile.ci.fuzz]
runs = 512
This diff is collapsed.
......@@ -8,6 +8,7 @@ bytes32 constant EMPTY_UID = 0;
uint64 constant NO_EXPIRATION_TIME = 0;
error AccessDenied();
error DeadlineExpired();
error InvalidEAS();
error InvalidLength();
error InvalidSignature();
......
This diff is collapsed.
This diff is collapsed.
......@@ -17,7 +17,8 @@ interface ISchemaRegistry {
/// @dev Emitted when a new schema has been registered
/// @param uid The schema UID.
/// @param registerer The address of the account used to register the schema.
event Registered(bytes32 indexed uid, address registerer);
/// @param schema The schema data.
event Registered(bytes32 indexed uid, address indexed registerer, SchemaRecord schema);
/// @dev Submits and reserves a new schema
/// @param schema The schema data schema.
......
......@@ -20,8 +20,8 @@ contract SchemaRegistry is ISchemaRegistry, Semver {
uint256[MAX_GAP - 1] private __gap;
/// @dev Creates a new SchemaRegistry instance.
/// @custom:semver 1.0.3
constructor() Semver(1, 0, 3) { }
/// @custom:semver 1.2.0
constructor() Semver(1, 2, 0) { }
/// @inheritdoc ISchemaRegistry
function register(string calldata schema, ISchemaResolver resolver, bool revocable) external returns (bytes32) {
......@@ -36,7 +36,7 @@ contract SchemaRegistry is ISchemaRegistry, Semver {
schemaRecord.uid = uid;
_registry[uid] = schemaRecord;
emit Registered(uid, msg.sender);
emit Registered(uid, msg.sender, schemaRecord);
return uid;
}
......
......@@ -6,7 +6,7 @@ import { Attestation } from "../Common.sol";
/// @title ISchemaResolver
/// @notice The interface of an optional schema resolver.
interface ISchemaResolver {
/// @notice Checks if the resolve can be sent ETH.
/// @notice Checks if the resolver can be sent ETH.
/// @return Whether the resolver supports ETH transfers.
function isPayable() external pure returns (bool);
......
......@@ -103,7 +103,9 @@ contract MIPS {
from, to := copyMem(from, to, 4) // lo
from, to := copyMem(from, to, 4) // hi
from, to := copyMem(from, to, 4) // heap
let exitCode := mload(from)
from, to := copyMem(from, to, 1) // exitCode
let exited := mload(from)
from, to := copyMem(from, to, 1) // exited
from, to := copyMem(from, to, 8) // step
from := add(from, 32) // offset to registers
......@@ -117,8 +119,24 @@ contract MIPS {
// Log the resulting MIPS state, for debugging
log0(start, sub(to, start))
// Compute the hash of the resulting MIPS state
// Determine the VM status
let status := 0
switch exited
case 1 {
switch exitCode
// VMStatusValid
case 0 { status := 0 }
// VMStatusInvalid
case 1 { status := 1 }
// VMStatusPanic
default { status := 2 }
}
// VMStatusUnfinished
default { status := 3 }
// Compute the hash of the resulting MIPS state and set the status byte
out_ := keccak256(start, sub(to, start))
out_ := or(and(not(shl(248, 0xFF)), out_), shl(248, status))
}
}
......
......@@ -15,6 +15,11 @@ error NoImplementation(GameType gameType);
/// @param uuid The UUID of the dispute game that already exists.
error GameAlreadyExists(Hash uuid);
/// @notice Thrown when the root claim has an unexpected VM status.
/// Some games can only start with a root-claim with a specific status.
/// @param rootClaim is the claim that was unexpected.
error UnexpectedRootClaim(Claim rootClaim);
////////////////////////////////////////////////////////////////
// `FaultDisputeGame` Errors //
////////////////////////////////////////////////////////////////
......
......@@ -4,6 +4,7 @@ pragma solidity 0.8.15;
import { CommonTest } from "./CommonTest.t.sol";
import { MIPS } from "src/cannon/MIPS.sol";
import { PreimageOracle } from "src/cannon/PreimageOracle.sol";
import "src/libraries/DisputeTypes.sol";
contract MIPS_Test is CommonTest {
MIPS internal mips;
......@@ -1553,10 +1554,29 @@ contract MIPS_Test is CommonTest {
);
}
/// @dev MIPS VM status codes:
/// 0. Exited with success (Valid)
/// 1. Exited with success (Invalid)
/// 2. Exited with failure (Panic)
/// 3. Unfinished
function vmStatus(MIPS.State memory state) internal pure returns (VMStatus out_) {
if (!state.exited) {
return VMStatuses.UNFINISHED;
} else if (state.exitCode == 0) {
return VMStatuses.VALID;
} else if (state.exitCode == 1) {
return VMStatuses.INVALID;
} else {
return VMStatuses.PANIC;
}
}
function outputState(MIPS.State memory state) internal pure returns (bytes32 out_) {
bytes memory enc = encodeState(state);
VMStatus status = vmStatus(state);
assembly {
out_ := keccak256(add(enc, 0x20), 226)
out_ := or(and(not(shl(248, 0xFF)), out_), shl(248, status))
}
}
......
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment