Commit 30db9dfc authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge branch 'develop' into aj/add-sepolia-op-program

parents 34d1dadb 47c96e19
...@@ -9,6 +9,7 @@ example/bin ...@@ -9,6 +9,7 @@ example/bin
contracts/out contracts/out
state.json state.json
*.json *.json
*.json.gz
*.pprof *.pprof
*.out *.out
bin bin
package cmd package cmd
import ( import (
"compress/gzip"
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"io" "io"
"os" "os"
"strings"
) )
func loadJSON[X any](inputPath string) (*X, error) { func loadJSON[X any](inputPath string) (*X, error) {
if inputPath == "" { if inputPath == "" {
return nil, errors.New("no path specified") return nil, errors.New("no path specified")
} }
var f io.ReadCloser
f, err := os.OpenFile(inputPath, os.O_RDONLY, 0) f, err := os.OpenFile(inputPath, os.O_RDONLY, 0)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to open file %q: %w", inputPath, err) return nil, fmt.Errorf("failed to open file %q: %w", inputPath, err)
} }
defer f.Close() defer f.Close()
if isGzip(inputPath) {
f, err = gzip.NewReader(f)
if err != nil {
return nil, fmt.Errorf("create gzip reader: %w", err)
}
defer f.Close()
}
var state X var state X
if err := json.NewDecoder(f).Decode(&state); err != nil { if err := json.NewDecoder(f).Decode(&state); err != nil {
return nil, fmt.Errorf("failed to decode file %q: %w", inputPath, err) return nil, fmt.Errorf("failed to decode file %q: %w", inputPath, err)
...@@ -33,6 +43,11 @@ func writeJSON[X any](outputPath string, value X, outIfEmpty bool) error { ...@@ -33,6 +43,11 @@ func writeJSON[X any](outputPath string, value X, outIfEmpty bool) error {
} }
defer f.Close() defer f.Close()
out = f out = f
if isGzip(outputPath) {
g := gzip.NewWriter(f)
defer g.Close()
out = g
}
} else if outIfEmpty { } else if outIfEmpty {
out = os.Stdout out = os.Stdout
} else { } else {
...@@ -48,3 +63,7 @@ func writeJSON[X any](outputPath string, value X, outIfEmpty bool) error { ...@@ -48,3 +63,7 @@ func writeJSON[X any](outputPath string, value X, outIfEmpty bool) error {
} }
return nil return nil
} }
func isGzip(path string) bool {
return strings.HasSuffix(path, ".gz")
}
package cmd
import (
"encoding/json"
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/require"
)
func TestRoundTripJSON(t *testing.T) {
dir := t.TempDir()
file := filepath.Join(dir, "test.json")
data := &jsonTestData{A: "yay", B: 3}
err := writeJSON(file, data, false)
require.NoError(t, err)
// Confirm the file is uncompressed
fileContent, err := os.ReadFile(file)
require.NoError(t, err)
err = json.Unmarshal(fileContent, &jsonTestData{})
require.NoError(t, err)
var result *jsonTestData
result, err = loadJSON[jsonTestData](file)
require.NoError(t, err)
require.EqualValues(t, data, result)
}
func TestRoundTripJSONWithGzip(t *testing.T) {
dir := t.TempDir()
file := filepath.Join(dir, "test.json.gz")
data := &jsonTestData{A: "yay", B: 3}
err := writeJSON(file, data, false)
require.NoError(t, err)
// Confirm the file isn't raw JSON
fileContent, err := os.ReadFile(file)
require.NoError(t, err)
err = json.Unmarshal(fileContent, &jsonTestData{})
require.Error(t, err, "should not be able to decode without decompressing")
var result *jsonTestData
result, err = loadJSON[jsonTestData](file)
require.NoError(t, err)
require.EqualValues(t, data, result)
}
type jsonTestData struct {
A string `json:"a"`
B int `json:"b"`
}
...@@ -27,6 +27,7 @@ var ( ...@@ -27,6 +27,7 @@ var (
StepBytes4 = crypto.Keccak256([]byte("step(bytes,bytes)"))[:4] StepBytes4 = crypto.Keccak256([]byte("step(bytes,bytes)"))[:4]
CheatBytes4 = crypto.Keccak256([]byte("cheat(uint256,bytes32,bytes32,uint256)"))[:4] CheatBytes4 = crypto.Keccak256([]byte("cheat(uint256,bytes32,bytes32,uint256)"))[:4]
LoadKeccak256PreimagePartBytes4 = crypto.Keccak256([]byte("loadKeccak256PreimagePart(uint256,bytes)"))[:4] LoadKeccak256PreimagePartBytes4 = crypto.Keccak256([]byte("loadKeccak256PreimagePart(uint256,bytes)"))[:4]
LoadLocalDataBytes4 = crypto.Keccak256([]byte("loadLocalData(uint256,bytes32,uint256,uint256)"))[:4]
) )
// LoadContracts loads the Cannon contracts, from op-bindings package // LoadContracts loads the Cannon contracts, from op-bindings package
......
...@@ -37,9 +37,12 @@ func testContractsSetup(t require.TestingT) (*Contracts, *Addresses) { ...@@ -37,9 +37,12 @@ func testContractsSetup(t require.TestingT) (*Contracts, *Addresses) {
} }
func SourceMapTracer(t *testing.T, contracts *Contracts, addrs *Addresses) vm.EVMLogger { func SourceMapTracer(t *testing.T, contracts *Contracts, addrs *Addresses) vm.EVMLogger {
mipsSrcMap, err := contracts.MIPS.SourceMap([]string{"../../packages/contracts-bedrock/contracts/cannon/MIPS.sol"}) t.Fatal("TODO(clabby): The source map tracer is disabled until source IDs have been added to foundry artifacts.")
contractsDir := "../../packages/contracts-bedrock"
mipsSrcMap, err := contracts.MIPS.SourceMap([]string{path.Join(contractsDir, "src/cannon/MIPS.sol")})
require.NoError(t, err) require.NoError(t, err)
oracleSrcMap, err := contracts.Oracle.SourceMap([]string{"../../packages/contracts-bedrock/contracts/cannon/PreimageOracle.sol"}) oracleSrcMap, err := contracts.Oracle.SourceMap([]string{path.Join(contractsDir, "src/cannon/PreimageOracle.sol")})
require.NoError(t, err) require.NoError(t, err)
return srcmap.NewSourceMapTracer(map[common.Address]*srcmap.SourceMap{addrs.MIPS: mipsSrcMap, addrs.Oracle: oracleSrcMap}, os.Stdout) return srcmap.NewSourceMapTracer(map[common.Address]*srcmap.SourceMap{addrs.MIPS: mipsSrcMap, addrs.Oracle: oracleSrcMap}, os.Stdout)
...@@ -76,7 +79,7 @@ func (m *MIPSEVM) Step(t *testing.T, stepWitness *StepWitness) []byte { ...@@ -76,7 +79,7 @@ func (m *MIPSEVM) Step(t *testing.T, stepWitness *StepWitness) []byte {
t.Logf("reading preimage key %x at offset %d", stepWitness.PreimageKey, stepWitness.PreimageOffset) t.Logf("reading preimage key %x at offset %d", stepWitness.PreimageKey, stepWitness.PreimageOffset)
poInput, err := stepWitness.EncodePreimageOracleInput() poInput, err := stepWitness.EncodePreimageOracleInput()
require.NoError(t, err, "encode preimage oracle input") require.NoError(t, err, "encode preimage oracle input")
_, leftOverGas, err := m.env.Call(vm.AccountRef(m.addrs.Sender), m.addrs.Oracle, poInput, startingGas, big.NewInt(0)) _, leftOverGas, err := m.env.Call(vm.AccountRef(sender), m.addrs.Oracle, poInput, startingGas, big.NewInt(0))
require.NoErrorf(t, err, "evm should not fail, took %d gas", startingGas-leftOverGas) require.NoErrorf(t, err, "evm should not fail, took %d gas", startingGas-leftOverGas)
} }
...@@ -171,30 +174,43 @@ func TestEVMFault(t *testing.T) { ...@@ -171,30 +174,43 @@ func TestEVMFault(t *testing.T) {
env, evmState := NewEVMEnv(contracts, addrs) env, evmState := NewEVMEnv(contracts, addrs)
env.Config.Tracer = tracer env.Config.Tracer = tracer
programMem := []byte{0xff, 0xff, 0xff, 0xff} type testInput struct {
state := &State{PC: 0, NextPC: 4, Memory: NewMemory()} name string
initialState := &State{PC: 0, NextPC: 4, Memory: state.Memory} nextPC uint32
err := state.Memory.SetMemoryRange(0, bytes.NewReader(programMem)) insn uint32
require.NoError(t, err, "load program into state") }
cases := []testInput{
{"illegal instruction", 0, 0xFF_FF_FF_FF},
{"branch in delay-slot", 8, 0x11_02_00_03},
{"jump in delay-slot", 8, 0x0c_00_00_0c},
}
// set the return address ($ra) to jump into when test completes for _, tt := range cases {
state.Registers[31] = endAddr t.Run(tt.name, func(t *testing.T) {
state := &State{PC: 0, NextPC: tt.nextPC, Memory: NewMemory()}
initialState := &State{PC: 0, NextPC: tt.nextPC, Memory: state.Memory}
state.Memory.SetMemory(0, tt.insn)
goState := NewInstrumentedState(state, nil, os.Stdout, os.Stderr) // set the return address ($ra) to jump into when test completes
require.Panics(t, func() { _, _ = goState.Step(true) }, "must panic on illegal instruction") state.Registers[31] = endAddr
insnProof := initialState.Memory.MerkleProof(0) us := NewInstrumentedState(state, nil, os.Stdout, os.Stderr)
stepWitness := &StepWitness{ require.Panics(t, func() { _, _ = us.Step(true) })
State: initialState.EncodeWitness(),
MemProof: insnProof[:], insnProof := initialState.Memory.MerkleProof(0)
} stepWitness := &StepWitness{
input := stepWitness.EncodeStepInput() State: initialState.EncodeWitness(),
startingGas := uint64(30_000_000) MemProof: insnProof[:],
}
input := stepWitness.EncodeStepInput()
startingGas := uint64(30_000_000)
_, _, err = env.Call(vm.AccountRef(sender), addrs.MIPS, input, startingGas, big.NewInt(0)) _, _, err := env.Call(vm.AccountRef(sender), addrs.MIPS, input, startingGas, big.NewInt(0))
require.EqualValues(t, err, vm.ErrExecutionReverted) require.EqualValues(t, err, vm.ErrExecutionReverted)
logs := evmState.Logs() logs := evmState.Logs()
require.Equal(t, 0, len(logs)) require.Equal(t, 0, len(logs))
})
}
} }
func TestHelloEVM(t *testing.T) { func TestHelloEVM(t *testing.T) {
......
...@@ -180,6 +180,10 @@ func (m *InstrumentedState) handleSyscall() error { ...@@ -180,6 +180,10 @@ func (m *InstrumentedState) handleSyscall() error {
} }
func (m *InstrumentedState) handleBranch(opcode uint32, insn uint32, rtReg uint32, rs uint32) error { func (m *InstrumentedState) handleBranch(opcode uint32, insn uint32, rtReg uint32, rs uint32) error {
if m.state.NextPC != m.state.PC+4 {
panic("branch in delay slot")
}
shouldBranch := false shouldBranch := false
if opcode == 4 || opcode == 5 { // beq/bne if opcode == 4 || opcode == 5 { // beq/bne
rt := m.state.Registers[rtReg] rt := m.state.Registers[rtReg]
...@@ -246,6 +250,9 @@ func (m *InstrumentedState) handleHiLo(fun uint32, rs uint32, rt uint32, storeRe ...@@ -246,6 +250,9 @@ func (m *InstrumentedState) handleHiLo(fun uint32, rs uint32, rt uint32, storeRe
} }
func (m *InstrumentedState) handleJump(linkReg uint32, dest uint32) error { func (m *InstrumentedState) handleJump(linkReg uint32, dest uint32) error {
if m.state.NextPC != m.state.PC+4 {
panic("jump in delay slot")
}
prevPC := m.state.PC prevPC := m.state.PC
m.state.PC = m.state.NextPC m.state.PC = m.state.NextPC
m.state.NextPC = dest m.state.NextPC = dest
......
...@@ -49,18 +49,19 @@ func (wit *StepWitness) EncodePreimageOracleInput() ([]byte, error) { ...@@ -49,18 +49,19 @@ func (wit *StepWitness) EncodePreimageOracleInput() ([]byte, error) {
switch preimage.KeyType(wit.PreimageKey[0]) { switch preimage.KeyType(wit.PreimageKey[0]) {
case preimage.LocalKeyType: case preimage.LocalKeyType:
// We have no on-chain form of preparing the bootstrap pre-images onchain yet. if len(wit.PreimageValue) > 32+8 {
// So instead we cheat them in. return nil, fmt.Errorf("local pre-image exceeds maximum size of 32 bytes with key 0x%x", wit.PreimageKey)
// In production usage there should be an on-chain contract that exposes this, }
// rather than going through the global keccak256 oracle.
var input []byte var input []byte
input = append(input, CheatBytes4...) input = append(input, LoadLocalDataBytes4...)
input = append(input, uint32ToBytes32(wit.PreimageOffset)...)
input = append(input, wit.PreimageKey[:]...) input = append(input, wit.PreimageKey[:]...)
preimagePart := wit.PreimageValue[8:]
var tmp [32]byte var tmp [32]byte
copy(tmp[:], wit.PreimageValue[wit.PreimageOffset:]) copy(tmp[:], preimagePart)
input = append(input, tmp[:]...) input = append(input, tmp[:]...)
input = append(input, uint32ToBytes32(uint32(len(wit.PreimageValue))-8)...) input = append(input, uint32ToBytes32(uint32(len(wit.PreimageValue)-8))...)
input = append(input, uint32ToBytes32(wit.PreimageOffset)...)
// Note: we can pad calldata to 32 byte multiple, but don't strictly have to // Note: we can pad calldata to 32 byte multiple, but don't strictly have to
return input, nil return input, nil
case preimage.Keccak256KeyType: case preimage.Keccak256KeyType:
......
...@@ -30,8 +30,8 @@ var ( ...@@ -30,8 +30,8 @@ var (
// AlphabetVMMetaData contains all meta data concerning the AlphabetVM contract. // AlphabetVMMetaData contains all meta data concerning the AlphabetVM contract.
var AlphabetVMMetaData = &bind.MetaData{ var AlphabetVMMetaData = &bind.MetaData{
ABI: "[{\"inputs\":[{\"internalType\":\"Claim\",\"name\":\"_absolutePrestate\",\"type\":\"bytes32\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"_stateData\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"name\":\"step\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"postState_\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]", ABI: "[{\"inputs\":[{\"internalType\":\"Claim\",\"name\":\"_absolutePrestate\",\"type\":\"bytes32\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"oracle\",\"outputs\":[{\"internalType\":\"contractIPreimageOracle\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"_stateData\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"name\":\"step\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"postState_\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]",
Bin: "0x60a060405234801561001057600080fd5b5060405161030438038061030483398101604081905261002f91610037565b608052610050565b60006020828403121561004957600080fd5b5051919050565b60805161029a61006a6000396000605c015261029a6000f3fe608060405234801561001057600080fd5b506004361061002b5760003560e01c8063f8e0cb9614610030575b600080fd5b61004361003e366004610157565b610055565b60405190815260200160405180910390f35b60008060007f0000000000000000000000000000000000000000000000000000000000000000878760405161008b9291906101c3565b6040518091039020036100af57600091506100a8868801886101d3565b90506100ce565b6100bb868801886101ec565b9092509050816100ca8161023d565b9250505b816100da826001610275565b6040805160208101939093528201526060016040516020818303038152906040528051906020012092505050949350505050565b60008083601f84011261012057600080fd5b50813567ffffffffffffffff81111561013857600080fd5b60208301915083602082850101111561015057600080fd5b9250929050565b6000806000806040858703121561016d57600080fd5b843567ffffffffffffffff8082111561018557600080fd5b6101918883890161010e565b909650945060208701359150808211156101aa57600080fd5b506101b78782880161010e565b95989497509550505050565b8183823760009101908152919050565b6000602082840312156101e557600080fd5b5035919050565b600080604083850312156101ff57600080fd5b50508035926020909101359150565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff820361026e5761026e61020e565b5060010190565b600082198211156102885761028861020e565b50019056fea164736f6c634300080f000a", Bin: "0x60a060405234801561001057600080fd5b5060405161039838038061039883398101604081905261002f9161007a565b608081905261003c610062565b600080546001600160a01b0319166001600160a01b039290921691909117905550610093565b6460016000f36000908152600580601b83f091505090565b60006020828403121561008c57600080fd5b5051919050565b6080516102eb6100ad600039600060ad01526102eb6000f3fe608060405234801561001057600080fd5b50600436106100365760003560e01c80637dc0d1d01461003b578063f8e0cb9614610085575b600080fd5b60005461005b9073ffffffffffffffffffffffffffffffffffffffff1681565b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020015b60405180910390f35b6100986100933660046101a8565b6100a6565b60405190815260200161007c565b60008060007f000000000000000000000000000000000000000000000000000000000000000087876040516100dc929190610214565b60405180910390200361010057600091506100f986880188610224565b905061011f565b61010c8688018861023d565b90925090508161011b8161028e565b9250505b8161012b8260016102c6565b6040805160208101939093528201526060016040516020818303038152906040528051906020012092505050949350505050565b60008083601f84011261017157600080fd5b50813567ffffffffffffffff81111561018957600080fd5b6020830191508360208285010111156101a157600080fd5b9250929050565b600080600080604085870312156101be57600080fd5b843567ffffffffffffffff808211156101d657600080fd5b6101e28883890161015f565b909650945060208701359150808211156101fb57600080fd5b506102088782880161015f565b95989497509550505050565b8183823760009101908152919050565b60006020828403121561023657600080fd5b5035919050565b6000806040838503121561025057600080fd5b50508035926020909101359150565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff82036102bf576102bf61025f565b5060010190565b600082198211156102d9576102d961025f565b50019056fea164736f6c634300080f000a",
} }
// AlphabetVMABI is the input ABI used to generate the binding from. // AlphabetVMABI is the input ABI used to generate the binding from.
...@@ -201,6 +201,37 @@ func (_AlphabetVM *AlphabetVMTransactorRaw) Transact(opts *bind.TransactOpts, me ...@@ -201,6 +201,37 @@ func (_AlphabetVM *AlphabetVMTransactorRaw) Transact(opts *bind.TransactOpts, me
return _AlphabetVM.Contract.contract.Transact(opts, method, params...) return _AlphabetVM.Contract.contract.Transact(opts, method, params...)
} }
// Oracle is a free data retrieval call binding the contract method 0x7dc0d1d0.
//
// Solidity: function oracle() view returns(address)
func (_AlphabetVM *AlphabetVMCaller) Oracle(opts *bind.CallOpts) (common.Address, error) {
var out []interface{}
err := _AlphabetVM.contract.Call(opts, &out, "oracle")
if err != nil {
return *new(common.Address), err
}
out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address)
return out0, err
}
// Oracle is a free data retrieval call binding the contract method 0x7dc0d1d0.
//
// Solidity: function oracle() view returns(address)
func (_AlphabetVM *AlphabetVMSession) Oracle() (common.Address, error) {
return _AlphabetVM.Contract.Oracle(&_AlphabetVM.CallOpts)
}
// Oracle is a free data retrieval call binding the contract method 0x7dc0d1d0.
//
// Solidity: function oracle() view returns(address)
func (_AlphabetVM *AlphabetVMCallerSession) Oracle() (common.Address, error) {
return _AlphabetVM.Contract.Oracle(&_AlphabetVM.CallOpts)
}
// Step is a free data retrieval call binding the contract method 0xf8e0cb96. // Step is a free data retrieval call binding the contract method 0xf8e0cb96.
// //
// Solidity: function step(bytes _stateData, bytes ) view returns(bytes32 postState_) // Solidity: function step(bytes _stateData, bytes ) view returns(bytes32 postState_)
......
...@@ -9,11 +9,11 @@ import ( ...@@ -9,11 +9,11 @@ import (
"github.com/ethereum-optimism/optimism/op-bindings/solc" "github.com/ethereum-optimism/optimism/op-bindings/solc"
) )
const AlphabetVMStorageLayoutJSON = "{\"storage\":null,\"types\":{}}" const AlphabetVMStorageLayoutJSON = "{\"storage\":[{\"astId\":1000,\"contract\":\"test/FaultDisputeGame.t.sol:AlphabetVM\",\"label\":\"oracle\",\"offset\":0,\"slot\":\"0\",\"type\":\"t_contract(IPreimageOracle)1001\"}],\"types\":{\"t_contract(IPreimageOracle)1001\":{\"encoding\":\"inplace\",\"label\":\"contract IPreimageOracle\",\"numberOfBytes\":\"20\"}}}"
var AlphabetVMStorageLayout = new(solc.StorageLayout) var AlphabetVMStorageLayout = new(solc.StorageLayout)
var AlphabetVMDeployedBin = "0x608060405234801561001057600080fd5b506004361061002b5760003560e01c8063f8e0cb9614610030575b600080fd5b61004361003e366004610157565b610055565b60405190815260200160405180910390f35b60008060007f0000000000000000000000000000000000000000000000000000000000000000878760405161008b9291906101c3565b6040518091039020036100af57600091506100a8868801886101d3565b90506100ce565b6100bb868801886101ec565b9092509050816100ca8161023d565b9250505b816100da826001610275565b6040805160208101939093528201526060016040516020818303038152906040528051906020012092505050949350505050565b60008083601f84011261012057600080fd5b50813567ffffffffffffffff81111561013857600080fd5b60208301915083602082850101111561015057600080fd5b9250929050565b6000806000806040858703121561016d57600080fd5b843567ffffffffffffffff8082111561018557600080fd5b6101918883890161010e565b909650945060208701359150808211156101aa57600080fd5b506101b78782880161010e565b95989497509550505050565b8183823760009101908152919050565b6000602082840312156101e557600080fd5b5035919050565b600080604083850312156101ff57600080fd5b50508035926020909101359150565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff820361026e5761026e61020e565b5060010190565b600082198211156102885761028861020e565b50019056fea164736f6c634300080f000a" var AlphabetVMDeployedBin = "0x608060405234801561001057600080fd5b50600436106100365760003560e01c80637dc0d1d01461003b578063f8e0cb9614610085575b600080fd5b60005461005b9073ffffffffffffffffffffffffffffffffffffffff1681565b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020015b60405180910390f35b6100986100933660046101a8565b6100a6565b60405190815260200161007c565b60008060007f000000000000000000000000000000000000000000000000000000000000000087876040516100dc929190610214565b60405180910390200361010057600091506100f986880188610224565b905061011f565b61010c8688018861023d565b90925090508161011b8161028e565b9250505b8161012b8260016102c6565b6040805160208101939093528201526060016040516020818303038152906040528051906020012092505050949350505050565b60008083601f84011261017157600080fd5b50813567ffffffffffffffff81111561018957600080fd5b6020830191508360208285010111156101a157600080fd5b9250929050565b600080600080604085870312156101be57600080fd5b843567ffffffffffffffff808211156101d657600080fd5b6101e28883890161015f565b909650945060208701359150808211156101fb57600080fd5b506102088782880161015f565b95989497509550505050565b8183823760009101908152919050565b60006020828403121561023657600080fd5b5035919050565b6000806040838503121561025057600080fd5b50508035926020909101359150565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff82036102bf576102bf61025f565b5060010190565b600082198211156102d9576102d961025f565b50019056fea164736f6c634300080f000a"
func init() { func init() {
if err := json.Unmarshal([]byte(AlphabetVMStorageLayoutJSON), AlphabetVMStorageLayout); err != nil { if err := json.Unmarshal([]byte(AlphabetVMStorageLayoutJSON), AlphabetVMStorageLayout); err != nil {
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
package srcmap package srcmap
import ( import (
"path"
"strings" "strings"
"testing" "testing"
...@@ -11,17 +12,24 @@ import ( ...@@ -11,17 +12,24 @@ import (
) )
func TestSourcemap(t *testing.T) { func TestSourcemap(t *testing.T) {
sourcePath := "../../packages/contracts-bedrock/src/cannon/MIPS.sol" t.Skip("TODO(clabby): This test is disabled until source IDs have been added to foundry artifacts.")
contractsDir := "../../packages/contracts-bedrock"
sources := []string{path.Join(contractsDir, "src/cannon/MIPS.sol")}
for i, source := range sources {
sources[i] = path.Join(contractsDir, source)
}
deployedByteCode := hexutil.MustDecode(bindings.MIPSDeployedBin) deployedByteCode := hexutil.MustDecode(bindings.MIPSDeployedBin)
srcMap, err := ParseSourceMap( srcMap, err := ParseSourceMap(
[]string{sourcePath}, sources,
deployedByteCode, deployedByteCode,
bindings.MIPSDeployedSourceMap) bindings.MIPSDeployedSourceMap)
require.NoError(t, err) require.NoError(t, err)
for i := 0; i < len(deployedByteCode); i++ { for i := 0; i < len(deployedByteCode); i++ {
info := srcMap.FormattedInfo(uint64(i)) info := srcMap.FormattedInfo(uint64(i))
if !strings.HasPrefix(info, "generated:") && !strings.HasPrefix(info, sourcePath) { if strings.HasPrefix(info, "unknown") {
t.Fatalf("unexpected info: %q", info) t.Fatalf("unexpected info: %q", info)
} }
} }
......
...@@ -30,10 +30,11 @@ func setupFaultDisputeGame() (common.Address, *bind.TransactOpts, *backends.Simu ...@@ -30,10 +30,11 @@ func setupFaultDisputeGame() (common.Address, *bind.TransactOpts, *backends.Simu
_, _, contract, err := bindings.DeployFaultDisputeGame( _, _, contract, err := bindings.DeployFaultDisputeGame(
opts, opts,
backend, backend,
[32]byte{0x01}, [32]byte{0x01}, // Absolute Prestate Claim
big.NewInt(15), big.NewInt(15), // Max Game Depth
uint64(604800), // 7 days uint64(604800), // 7 days
common.Address{0xdd}, common.Address{0xdd}, // VM
common.Address{0xee}, // L2OutputOracle (Not used in Alphabet Game)
) )
if err != nil { if err != nil {
return common.Address{}, nil, nil, nil, err return common.Address{}, nil, nil, nil, err
......
...@@ -16,6 +16,7 @@ import ( ...@@ -16,6 +16,7 @@ import (
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils"
"github.com/ethereum-optimism/optimism/op-node/eth" "github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-node/rollup/sync"
"github.com/ethereum-optimism/optimism/op-node/testlog" "github.com/ethereum-optimism/optimism/op-node/testlog"
) )
...@@ -30,7 +31,7 @@ func TestBatcher(gt *testing.T) { ...@@ -30,7 +31,7 @@ func TestBatcher(gt *testing.T) {
sd := e2eutils.Setup(t, dp, defaultAlloc) sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlDebug) log := testlog.Logger(t, log.LvlDebug)
miner, seqEngine, sequencer := setupSequencerTest(t, sd, log) miner, seqEngine, sequencer := setupSequencerTest(t, sd, log)
verifEngine, verifier := setupVerifier(t, sd, log, miner.L1Client(t, sd.RollupCfg)) verifEngine, verifier := setupVerifier(t, sd, log, miner.L1Client(t, sd.RollupCfg), &sync.Config{})
rollupSeqCl := sequencer.RollupClient() rollupSeqCl := sequencer.RollupClient()
batcher := NewL2Batcher(log, sd.RollupCfg, &BatcherCfg{ batcher := NewL2Batcher(log, sd.RollupCfg, &BatcherCfg{
...@@ -268,7 +269,7 @@ func TestGarbageBatch(gt *testing.T) { ...@@ -268,7 +269,7 @@ func TestGarbageBatch(gt *testing.T) {
log := testlog.Logger(t, log.LvlError) log := testlog.Logger(t, log.LvlError)
miner, engine, sequencer := setupSequencerTest(t, sd, log) miner, engine, sequencer := setupSequencerTest(t, sd, log)
_, verifier := setupVerifier(t, sd, log, miner.L1Client(t, sd.RollupCfg)) _, verifier := setupVerifier(t, sd, log, miner.L1Client(t, sd.RollupCfg), &sync.Config{})
batcherCfg := &BatcherCfg{ batcherCfg := &BatcherCfg{
MinL1TxSize: 0, MinL1TxSize: 0,
...@@ -350,7 +351,7 @@ func TestExtendedTimeWithoutL1Batches(gt *testing.T) { ...@@ -350,7 +351,7 @@ func TestExtendedTimeWithoutL1Batches(gt *testing.T) {
log := testlog.Logger(t, log.LvlError) log := testlog.Logger(t, log.LvlError)
miner, engine, sequencer := setupSequencerTest(t, sd, log) miner, engine, sequencer := setupSequencerTest(t, sd, log)
_, verifier := setupVerifier(t, sd, log, miner.L1Client(t, sd.RollupCfg)) _, verifier := setupVerifier(t, sd, log, miner.L1Client(t, sd.RollupCfg), &sync.Config{})
batcher := NewL2Batcher(log, sd.RollupCfg, &BatcherCfg{ batcher := NewL2Batcher(log, sd.RollupCfg, &BatcherCfg{
MinL1TxSize: 0, MinL1TxSize: 0,
...@@ -407,7 +408,7 @@ func TestBigL2Txs(gt *testing.T) { ...@@ -407,7 +408,7 @@ func TestBigL2Txs(gt *testing.T) {
log := testlog.Logger(t, log.LvlInfo) log := testlog.Logger(t, log.LvlInfo)
miner, engine, sequencer := setupSequencerTest(t, sd, log) miner, engine, sequencer := setupSequencerTest(t, sd, log)
_, verifier := setupVerifier(t, sd, log, miner.L1Client(t, sd.RollupCfg)) _, verifier := setupVerifier(t, sd, log, miner.L1Client(t, sd.RollupCfg), &sync.Config{})
batcher := NewL2Batcher(log, sd.RollupCfg, &BatcherCfg{ batcher := NewL2Batcher(log, sd.RollupCfg, &BatcherCfg{
MinL1TxSize: 0, MinL1TxSize: 0,
......
...@@ -12,6 +12,7 @@ import ( ...@@ -12,6 +12,7 @@ import (
"github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-node/rollup/driver" "github.com/ethereum-optimism/optimism/op-node/rollup/driver"
"github.com/ethereum-optimism/optimism/op-node/rollup/sync"
) )
// MockL1OriginSelector is a shim to override the origin as sequencer, so we can force it to stay on an older origin. // MockL1OriginSelector is a shim to override the origin as sequencer, so we can force it to stay on an older origin.
...@@ -40,7 +41,7 @@ type L2Sequencer struct { ...@@ -40,7 +41,7 @@ type L2Sequencer struct {
} }
func NewL2Sequencer(t Testing, log log.Logger, l1 derive.L1Fetcher, eng L2API, cfg *rollup.Config, seqConfDepth uint64) *L2Sequencer { func NewL2Sequencer(t Testing, log log.Logger, l1 derive.L1Fetcher, eng L2API, cfg *rollup.Config, seqConfDepth uint64) *L2Sequencer {
ver := NewL2Verifier(t, log, l1, eng, cfg) ver := NewL2Verifier(t, log, l1, eng, cfg, &sync.Config{})
attrBuilder := derive.NewFetchingAttributesBuilder(cfg, l1, eng) attrBuilder := derive.NewFetchingAttributesBuilder(cfg, l1, eng)
seqConfDepthL1 := driver.NewConfDepth(seqConfDepth, ver.l1State.L1Head, l1) seqConfDepthL1 := driver.NewConfDepth(seqConfDepth, ver.l1State.L1Head, l1)
l1OriginSelector := &MockL1OriginSelector{ l1OriginSelector := &MockL1OriginSelector{
......
...@@ -54,12 +54,11 @@ type L2API interface { ...@@ -54,12 +54,11 @@ type L2API interface {
InfoByHash(ctx context.Context, hash common.Hash) (eth.BlockInfo, error) InfoByHash(ctx context.Context, hash common.Hash) (eth.BlockInfo, error)
// GetProof returns a proof of the account, it may return a nil result without error if the address was not found. // GetProof returns a proof of the account, it may return a nil result without error if the address was not found.
GetProof(ctx context.Context, address common.Address, storage []common.Hash, blockTag string) (*eth.AccountResult, error) GetProof(ctx context.Context, address common.Address, storage []common.Hash, blockTag string) (*eth.AccountResult, error)
OutputV0AtBlock(ctx context.Context, blockHash common.Hash) (*eth.OutputV0, error)
} }
func NewL2Verifier(t Testing, log log.Logger, l1 derive.L1Fetcher, eng L2API, cfg *rollup.Config) *L2Verifier { func NewL2Verifier(t Testing, log log.Logger, l1 derive.L1Fetcher, eng L2API, cfg *rollup.Config, syncCfg *sync.Config) *L2Verifier {
metrics := &testutils.TestDerivationMetrics{} metrics := &testutils.TestDerivationMetrics{}
pipeline := derive.NewDerivationPipeline(log, cfg, l1, eng, metrics) pipeline := derive.NewDerivationPipeline(log, cfg, l1, eng, metrics, syncCfg)
pipeline.Reset() pipeline.Reset()
rollupNode := &L2Verifier{ rollupNode := &L2Verifier{
...@@ -139,6 +138,10 @@ func (s *L2Verifier) L2Unsafe() eth.L2BlockRef { ...@@ -139,6 +138,10 @@ func (s *L2Verifier) L2Unsafe() eth.L2BlockRef {
return s.derivation.UnsafeL2Head() return s.derivation.UnsafeL2Head()
} }
func (s *L2Verifier) EngineSyncTarget() eth.L2BlockRef {
return s.derivation.EngineSyncTarget()
}
func (s *L2Verifier) SyncStatus() *eth.SyncStatus { func (s *L2Verifier) SyncStatus() *eth.SyncStatus {
return &eth.SyncStatus{ return &eth.SyncStatus{
CurrentL1: s.derivation.Origin(), CurrentL1: s.derivation.Origin(),
...@@ -150,6 +153,7 @@ func (s *L2Verifier) SyncStatus() *eth.SyncStatus { ...@@ -150,6 +153,7 @@ func (s *L2Verifier) SyncStatus() *eth.SyncStatus {
SafeL2: s.L2Safe(), SafeL2: s.L2Safe(),
FinalizedL2: s.L2Finalized(), FinalizedL2: s.L2Finalized(),
UnsafeL2SyncTarget: s.derivation.UnsafeL2SyncTarget(), UnsafeL2SyncTarget: s.derivation.UnsafeL2SyncTarget(),
EngineSyncTarget: s.EngineSyncTarget(),
} }
} }
...@@ -206,7 +210,7 @@ func (s *L2Verifier) ActL2PipelineStep(t Testing) { ...@@ -206,7 +210,7 @@ func (s *L2Verifier) ActL2PipelineStep(t Testing) {
s.l2PipelineIdle = false s.l2PipelineIdle = false
err := s.derivation.Step(t.Ctx()) err := s.derivation.Step(t.Ctx())
if err == io.EOF { if err == io.EOF || (err != nil && errors.Is(err, derive.EngineP2PSyncing)) {
s.l2PipelineIdle = true s.l2PipelineIdle = true
return return
} else if err != nil && errors.Is(err, derive.NotEnoughData) { } else if err != nil && errors.Is(err, derive.NotEnoughData) {
......
...@@ -9,21 +9,22 @@ import ( ...@@ -9,21 +9,22 @@ import (
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-node/rollup/sync"
"github.com/ethereum-optimism/optimism/op-node/testlog" "github.com/ethereum-optimism/optimism/op-node/testlog"
) )
func setupVerifier(t Testing, sd *e2eutils.SetupData, log log.Logger, l1F derive.L1Fetcher) (*L2Engine, *L2Verifier) { func setupVerifier(t Testing, sd *e2eutils.SetupData, log log.Logger, l1F derive.L1Fetcher, syncCfg *sync.Config) (*L2Engine, *L2Verifier) {
jwtPath := e2eutils.WriteDefaultJWT(t) jwtPath := e2eutils.WriteDefaultJWT(t)
engine := NewL2Engine(t, log, sd.L2Cfg, sd.RollupCfg.Genesis.L1, jwtPath) engine := NewL2Engine(t, log, sd.L2Cfg, sd.RollupCfg.Genesis.L1, jwtPath)
engCl := engine.EngineClient(t, sd.RollupCfg) engCl := engine.EngineClient(t, sd.RollupCfg)
verifier := NewL2Verifier(t, log, l1F, engCl, sd.RollupCfg) verifier := NewL2Verifier(t, log, l1F, engCl, sd.RollupCfg, syncCfg)
return engine, verifier return engine, verifier
} }
func setupVerifierOnlyTest(t Testing, sd *e2eutils.SetupData, log log.Logger) (*L1Miner, *L2Engine, *L2Verifier) { func setupVerifierOnlyTest(t Testing, sd *e2eutils.SetupData, log log.Logger) (*L1Miner, *L2Engine, *L2Verifier) {
miner := NewL1Miner(t, log, sd.L1Cfg) miner := NewL1Miner(t, log, sd.L1Cfg)
l1Cl := miner.L1Client(t, sd.RollupCfg) l1Cl := miner.L1Client(t, sd.RollupCfg)
engine, verifier := setupVerifier(t, sd, log, l1Cl) engine, verifier := setupVerifier(t, sd, log, l1Cl, &sync.Config{})
return miner, engine, verifier return miner, engine, verifier
} }
......
...@@ -16,6 +16,7 @@ import ( ...@@ -16,6 +16,7 @@ import (
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils"
"github.com/ethereum-optimism/optimism/op-node/client" "github.com/ethereum-optimism/optimism/op-node/client"
"github.com/ethereum-optimism/optimism/op-node/eth" "github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup/sync"
"github.com/ethereum-optimism/optimism/op-node/sources" "github.com/ethereum-optimism/optimism/op-node/sources"
"github.com/ethereum-optimism/optimism/op-node/testlog" "github.com/ethereum-optimism/optimism/op-node/testlog"
) )
...@@ -33,7 +34,7 @@ func setupReorgTestActors(t Testing, dp *e2eutils.DeployParams, sd *e2eutils.Set ...@@ -33,7 +34,7 @@ func setupReorgTestActors(t Testing, dp *e2eutils.DeployParams, sd *e2eutils.Set
miner, seqEngine, sequencer := setupSequencerTest(t, sd, log) miner, seqEngine, sequencer := setupSequencerTest(t, sd, log)
miner.ActL1SetFeeRecipient(common.Address{'A'}) miner.ActL1SetFeeRecipient(common.Address{'A'})
sequencer.ActL2PipelineFull(t) sequencer.ActL2PipelineFull(t)
verifEngine, verifier := setupVerifier(t, sd, log, miner.L1Client(t, sd.RollupCfg)) verifEngine, verifier := setupVerifier(t, sd, log, miner.L1Client(t, sd.RollupCfg), &sync.Config{})
rollupSeqCl := sequencer.RollupClient() rollupSeqCl := sequencer.RollupClient()
batcher := NewL2Batcher(log, sd.RollupCfg, &BatcherCfg{ batcher := NewL2Batcher(log, sd.RollupCfg, &BatcherCfg{
MinL1TxSize: 0, MinL1TxSize: 0,
......
...@@ -6,6 +6,9 @@ import ( ...@@ -6,6 +6,9 @@ import (
"testing" "testing"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup/sync"
"github.com/ethereum-optimism/optimism/op-node/sources"
"github.com/ethereum-optimism/optimism/op-node/testlog" "github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
...@@ -92,3 +95,74 @@ func TestFinalizeWhileSyncing(gt *testing.T) { ...@@ -92,3 +95,74 @@ func TestFinalizeWhileSyncing(gt *testing.T) {
// Verify the verifier finalized something new // Verify the verifier finalized something new
require.Less(t, verifierStartStatus.FinalizedL2.Number, verifier.SyncStatus().FinalizedL2.Number, "verifier finalized L2 blocks during sync") require.Less(t, verifierStartStatus.FinalizedL2.Number, verifier.SyncStatus().FinalizedL2.Number, "verifier finalized L2 blocks during sync")
} }
func TestUnsafeSync(gt *testing.T) {
t := NewDefaultTesting(gt)
dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams)
sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlInfo)
sd, _, _, sequencer, seqEng, verifier, _, _ := setupReorgTestActors(t, dp, sd, log)
seqEngCl, err := sources.NewEngineClient(seqEng.RPCClient(), log, nil, sources.EngineClientDefaultConfig(sd.RollupCfg))
require.NoError(t, err)
sequencer.ActL2PipelineFull(t)
verifier.ActL2PipelineFull(t)
for i := 0; i < 10; i++ {
// Build a L2 block
sequencer.ActL2StartBlock(t)
sequencer.ActL2EndBlock(t)
// Notify new L2 block to verifier by unsafe gossip
seqHead, err := seqEngCl.PayloadByLabel(t.Ctx(), eth.Unsafe)
require.NoError(t, err)
verifier.ActL2UnsafeGossipReceive(seqHead)(t)
// Handle unsafe payload
verifier.ActL2PipelineFull(t)
// Verifier must advance its unsafe head and engine sync target.
require.Equal(t, sequencer.L2Unsafe().Hash, verifier.L2Unsafe().Hash)
// Check engine sync target updated.
require.Equal(t, sequencer.L2Unsafe().Hash, sequencer.EngineSyncTarget().Hash)
require.Equal(t, verifier.L2Unsafe().Hash, verifier.EngineSyncTarget().Hash)
}
}
func TestEngineP2PSync(gt *testing.T) {
t := NewDefaultTesting(gt)
dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams)
sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlInfo)
miner, seqEng, sequencer := setupSequencerTest(t, sd, log)
// Enable engine P2P sync
_, verifier := setupVerifier(t, sd, log, miner.L1Client(t, sd.RollupCfg), &sync.Config{EngineSync: true})
seqEngCl, err := sources.NewEngineClient(seqEng.RPCClient(), log, nil, sources.EngineClientDefaultConfig(sd.RollupCfg))
require.NoError(t, err)
sequencer.ActL2PipelineFull(t)
verifier.ActL2PipelineFull(t)
verifierUnsafeHead := verifier.L2Unsafe()
// Build a L2 block. This block will not be gossiped to verifier, so verifier can not advance chain by itself.
sequencer.ActL2StartBlock(t)
sequencer.ActL2EndBlock(t)
for i := 0; i < 10; i++ {
// Build a L2 block
sequencer.ActL2StartBlock(t)
sequencer.ActL2EndBlock(t)
// Notify new L2 block to verifier by unsafe gossip
seqHead, err := seqEngCl.PayloadByLabel(t.Ctx(), eth.Unsafe)
require.NoError(t, err)
verifier.ActL2UnsafeGossipReceive(seqHead)(t)
// Handle unsafe payload
verifier.ActL2PipelineFull(t)
// Verifier must advance only engine sync target.
require.NotEqual(t, sequencer.L2Unsafe().Hash, verifier.L2Unsafe().Hash)
require.NotEqual(t, verifier.L2Unsafe().Hash, verifier.EngineSyncTarget().Hash)
require.Equal(t, verifier.L2Unsafe().Hash, verifierUnsafeHead.Hash)
require.Equal(t, sequencer.L2Unsafe().Hash, verifier.EngineSyncTarget().Hash)
}
}
...@@ -15,6 +15,7 @@ import ( ...@@ -15,6 +15,7 @@ import (
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils"
"github.com/ethereum-optimism/optimism/op-node/eth" "github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-node/rollup/sync"
"github.com/ethereum-optimism/optimism/op-node/testlog" "github.com/ethereum-optimism/optimism/op-node/testlog"
) )
...@@ -29,7 +30,7 @@ func TestBatcherKeyRotation(gt *testing.T) { ...@@ -29,7 +30,7 @@ func TestBatcherKeyRotation(gt *testing.T) {
miner, seqEngine, sequencer := setupSequencerTest(t, sd, log) miner, seqEngine, sequencer := setupSequencerTest(t, sd, log)
miner.ActL1SetFeeRecipient(common.Address{'A'}) miner.ActL1SetFeeRecipient(common.Address{'A'})
sequencer.ActL2PipelineFull(t) sequencer.ActL2PipelineFull(t)
_, verifier := setupVerifier(t, sd, log, miner.L1Client(t, sd.RollupCfg)) _, verifier := setupVerifier(t, sd, log, miner.L1Client(t, sd.RollupCfg), &sync.Config{})
rollupSeqCl := sequencer.RollupClient() rollupSeqCl := sequencer.RollupClient()
// the default batcher // the default batcher
...@@ -358,7 +359,7 @@ func TestGasLimitChange(gt *testing.T) { ...@@ -358,7 +359,7 @@ func TestGasLimitChange(gt *testing.T) {
miner.ActL1IncludeTx(dp.Addresses.Batcher)(t) miner.ActL1IncludeTx(dp.Addresses.Batcher)(t)
miner.ActL1EndBlock(t) miner.ActL1EndBlock(t)
_, verifier := setupVerifier(t, sd, log, miner.L1Client(t, sd.RollupCfg)) _, verifier := setupVerifier(t, sd, log, miner.L1Client(t, sd.RollupCfg), &sync.Config{})
verifier.ActL2PipelineFull(t) verifier.ActL2PipelineFull(t)
require.Equal(t, sequencer.L2Unsafe(), verifier.L2Safe(), "verifier stays in sync, even with gaslimit changes") require.Equal(t, sequencer.L2Unsafe(), verifier.L2Safe(), "verifier stays in sync, even with gaslimit changes")
......
...@@ -9,6 +9,7 @@ import ( ...@@ -9,6 +9,7 @@ import (
"github.com/ethereum-optimism/optimism/op-chain-ops/deployer" "github.com/ethereum-optimism/optimism/op-chain-ops/deployer"
"github.com/ethereum-optimism/optimism/op-service/client/utils" "github.com/ethereum-optimism/optimism/op-service/client/utils"
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethclient"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
...@@ -50,7 +51,7 @@ func deployDisputeGameContracts(require *require.Assertions, ctx context.Context ...@@ -50,7 +51,7 @@ func deployDisputeGameContracts(require *require.Assertions, ctx context.Context
require.NoError(err) require.NoError(err)
// Deploy the fault dispute game implementation // Deploy the fault dispute game implementation
_, tx, _, err = bindings.DeployFaultDisputeGame(opts, client, alphabetVMAbsolutePrestateClaim, big.NewInt(alphabetGameDepth), gameDuration, alphaVMAddr) _, tx, _, err = bindings.DeployFaultDisputeGame(opts, client, alphabetVMAbsolutePrestateClaim, big.NewInt(alphabetGameDepth), gameDuration, alphaVMAddr, common.Address{0xBE, 0xEF})
require.NoError(err) require.NoError(err)
faultDisputeGameAddr, err := bind.WaitDeployed(ctx, client, tx) faultDisputeGameAddr, err := bind.WaitDeployed(ctx, client, tx)
require.NoError(err) require.NoError(err)
......
...@@ -86,10 +86,7 @@ func testVerifyL2OutputRootEmptyBlock(t *testing.T, detached bool) { ...@@ -86,10 +86,7 @@ func testVerifyL2OutputRootEmptyBlock(t *testing.T, detached bool) {
require.NoError(t, waitForSafeHead(ctx, receipt.BlockNumber.Uint64(), rollupClient)) require.NoError(t, waitForSafeHead(ctx, receipt.BlockNumber.Uint64(), rollupClient))
t.Logf("Capture current L2 head as agreed starting point. l2Head=%x l2BlockNumber=%v", receipt.BlockHash, receipt.BlockNumber) t.Logf("Capture current L2 head as agreed starting point. l2Head=%x l2BlockNumber=%v", receipt.BlockHash, receipt.BlockNumber)
agreedL2Output, err := rollupClient.OutputAtBlock(ctx, receipt.BlockNumber.Uint64()) l2Head := receipt.BlockHash
require.NoError(t, err, "could not retrieve l2 agreed block")
l2Head := agreedL2Output.BlockRef.Hash
l2OutputRoot := agreedL2Output.OutputRoot
t.Log("=====Stopping batch submitter=====") t.Log("=====Stopping batch submitter=====")
err = sys.BatchSubmitter.Stop(ctx) err = sys.BatchSubmitter.Stop(ctx)
...@@ -139,7 +136,6 @@ func testVerifyL2OutputRootEmptyBlock(t *testing.T, detached bool) { ...@@ -139,7 +136,6 @@ func testVerifyL2OutputRootEmptyBlock(t *testing.T, detached bool) {
testFaultProofProgramScenario(t, ctx, sys, &FaultProofProgramTestScenario{ testFaultProofProgramScenario(t, ctx, sys, &FaultProofProgramTestScenario{
L1Head: l1Head, L1Head: l1Head,
L2Head: l2Head, L2Head: l2Head,
L2OutputRoot: common.Hash(l2OutputRoot),
L2Claim: common.Hash(l2Claim), L2Claim: common.Hash(l2Claim),
L2ClaimBlockNumber: l2ClaimBlockNumber, L2ClaimBlockNumber: l2ClaimBlockNumber,
Detached: detached, Detached: detached,
...@@ -185,12 +181,9 @@ func testVerifyL2OutputRoot(t *testing.T, detached bool) { ...@@ -185,12 +181,9 @@ func testVerifyL2OutputRoot(t *testing.T, detached bool) {
}) })
t.Log("Capture current L2 head as agreed starting point") t.Log("Capture current L2 head as agreed starting point")
latestBlock, err := l2Seq.BlockByNumber(ctx, nil) l2AgreedBlock, err := l2Seq.BlockByNumber(ctx, nil)
require.NoError(t, err)
agreedL2Output, err := rollupClient.OutputAtBlock(ctx, latestBlock.NumberU64())
require.NoError(t, err, "could not retrieve l2 agreed block") require.NoError(t, err, "could not retrieve l2 agreed block")
l2Head := agreedL2Output.BlockRef.Hash l2Head := l2AgreedBlock.Hash()
l2OutputRoot := agreedL2Output.OutputRoot
t.Log("Sending transactions to modify existing state, within challenged period") t.Log("Sending transactions to modify existing state, within challenged period")
SendDepositTx(t, cfg, l1Client, l2Seq, opts, func(l2Opts *DepositTxOpts) { SendDepositTx(t, cfg, l1Client, l2Seq, opts, func(l2Opts *DepositTxOpts) {
...@@ -221,7 +214,6 @@ func testVerifyL2OutputRoot(t *testing.T, detached bool) { ...@@ -221,7 +214,6 @@ func testVerifyL2OutputRoot(t *testing.T, detached bool) {
testFaultProofProgramScenario(t, ctx, sys, &FaultProofProgramTestScenario{ testFaultProofProgramScenario(t, ctx, sys, &FaultProofProgramTestScenario{
L1Head: l1Head, L1Head: l1Head,
L2Head: l2Head, L2Head: l2Head,
L2OutputRoot: common.Hash(l2OutputRoot),
L2Claim: common.Hash(l2Claim), L2Claim: common.Hash(l2Claim),
L2ClaimBlockNumber: l2ClaimBlockNumber, L2ClaimBlockNumber: l2ClaimBlockNumber,
Detached: detached, Detached: detached,
...@@ -231,7 +223,6 @@ func testVerifyL2OutputRoot(t *testing.T, detached bool) { ...@@ -231,7 +223,6 @@ func testVerifyL2OutputRoot(t *testing.T, detached bool) {
type FaultProofProgramTestScenario struct { type FaultProofProgramTestScenario struct {
L1Head common.Hash L1Head common.Hash
L2Head common.Hash L2Head common.Hash
L2OutputRoot common.Hash
L2Claim common.Hash L2Claim common.Hash
L2ClaimBlockNumber uint64 L2ClaimBlockNumber uint64
Detached bool Detached bool
...@@ -240,7 +231,7 @@ type FaultProofProgramTestScenario struct { ...@@ -240,7 +231,7 @@ type FaultProofProgramTestScenario struct {
// testFaultProofProgramScenario runs the fault proof program in several contexts, given a test scenario. // testFaultProofProgramScenario runs the fault proof program in several contexts, given a test scenario.
func testFaultProofProgramScenario(t *testing.T, ctx context.Context, sys *System, s *FaultProofProgramTestScenario) { func testFaultProofProgramScenario(t *testing.T, ctx context.Context, sys *System, s *FaultProofProgramTestScenario) {
preimageDir := t.TempDir() preimageDir := t.TempDir()
fppConfig := oppconf.NewConfig(sys.RollupConfig, sys.L2GenesisCfg.Config, s.L1Head, s.L2Head, s.L2OutputRoot, common.Hash(s.L2Claim), s.L2ClaimBlockNumber) fppConfig := oppconf.NewConfig(sys.RollupConfig, sys.L2GenesisCfg.Config, s.L1Head, s.L2Head, common.Hash(s.L2Claim), s.L2ClaimBlockNumber)
fppConfig.L1URL = sys.NodeEndpoint("l1") fppConfig.L1URL = sys.NodeEndpoint("l1")
fppConfig.L2URL = sys.NodeEndpoint("sequencer") fppConfig.L2URL = sys.NodeEndpoint("sequencer")
fppConfig.DataDir = preimageDir fppConfig.DataDir = preimageDir
......
package eth package eth
import ( import (
"errors"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
) )
type OutputResponse struct { type OutputResponse struct {
...@@ -15,70 +12,3 @@ type OutputResponse struct { ...@@ -15,70 +12,3 @@ type OutputResponse struct {
StateRoot common.Hash `json:"stateRoot"` StateRoot common.Hash `json:"stateRoot"`
Status *SyncStatus `json:"syncStatus"` Status *SyncStatus `json:"syncStatus"`
} }
var (
ErrInvalidOutput = errors.New("invalid output")
ErrInvalidOutputVersion = errors.New("invalid output version")
OutputVersionV0 = Bytes32{}
)
type Output interface {
// Version returns the version of the L2 output
Version() Bytes32
// Marshal a L2 output into a byte slice for hashing
Marshal() []byte
}
type OutputV0 struct {
StateRoot Bytes32
MessagePasserStorageRoot Bytes32
BlockHash common.Hash
}
func (o *OutputV0) Version() Bytes32 {
return OutputVersionV0
}
func (o *OutputV0) Marshal() []byte {
var buf [128]byte
version := o.Version()
copy(buf[:32], version[:])
copy(buf[32:], o.StateRoot[:])
copy(buf[64:], o.MessagePasserStorageRoot[:])
copy(buf[96:], o.BlockHash[:])
return buf[:]
}
// OutputRoot returns the keccak256 hash of the marshaled L2 output
func OutputRoot(output Output) Bytes32 {
marshaled := output.Marshal()
return Bytes32(crypto.Keccak256Hash(marshaled))
}
func UnmarshalOutput(data []byte) (Output, error) {
if len(data) < 32 {
return nil, ErrInvalidOutput
}
var ver Bytes32
copy(ver[:], data[:32])
switch ver {
case OutputVersionV0:
return unmarshalOutputV0(data)
default:
return nil, ErrInvalidOutputVersion
}
}
func unmarshalOutputV0(data []byte) (*OutputV0, error) {
if len(data) != 128 {
return nil, ErrInvalidOutput
}
var output OutputV0
// data[:32] is the version
copy(output.StateRoot[:], data[32:64])
copy(output.MessagePasserStorageRoot[:], data[64:96])
copy(output.BlockHash[:], data[96:128])
return &output, nil
}
package eth
import (
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/stretchr/testify/require"
)
func TestOutputV0Codec(t *testing.T) {
output := OutputV0{
StateRoot: Bytes32{1, 2, 3},
MessagePasserStorageRoot: Bytes32{4, 5, 6},
BlockHash: common.Hash{7, 8, 9},
}
marshaled := output.Marshal()
unmarshaled, err := UnmarshalOutput(marshaled)
require.NoError(t, err)
unmarshaledV0 := unmarshaled.(*OutputV0)
require.Equal(t, output, *unmarshaledV0)
_, err = UnmarshalOutput([]byte{0: 0xA, 32: 0xA})
require.ErrorIs(t, err, ErrInvalidOutputVersion)
_, err = UnmarshalOutput([]byte{64: 0xA})
require.ErrorIs(t, err, ErrInvalidOutput)
}
...@@ -35,4 +35,7 @@ type SyncStatus struct { ...@@ -35,4 +35,7 @@ type SyncStatus struct {
// UnsafeL2SyncTarget points to the first unprocessed unsafe L2 block. // UnsafeL2SyncTarget points to the first unprocessed unsafe L2 block.
// It may be zeroed if there is no targeted block. // It may be zeroed if there is no targeted block.
UnsafeL2SyncTarget L2BlockRef `json:"queued_unsafe_l2"` UnsafeL2SyncTarget L2BlockRef `json:"queued_unsafe_l2"`
// EngineSyncTarget points to the L2 block that the execution engine is syncing to.
// If it is ahead from UnsafeL2, the engine is in progress of P2P sync.
EngineSyncTarget L2BlockRef `json:"engine_sync_target"`
} }
...@@ -214,6 +214,21 @@ var ( ...@@ -214,6 +214,21 @@ var (
EnvVars: prefixEnvVars("L2_BACKUP_UNSAFE_SYNC_RPC_TRUST_RPC"), EnvVars: prefixEnvVars("L2_BACKUP_UNSAFE_SYNC_RPC_TRUST_RPC"),
Required: false, Required: false,
} }
L2EngineSyncEnabled = &cli.BoolFlag{
Name: "l2.engine-sync",
Usage: "Enables or disables execution engine P2P sync",
EnvVars: prefixEnvVars("L2_ENGINE_SYNC_ENABLED"),
Required: false,
Value: false,
}
SkipSyncStartCheck = &cli.BoolFlag{
Name: "l2.skip-sync-start-check",
Usage: "Skip sanity check of consistency of L1 origins of the unsafe L2 blocks when determining the sync-starting point. " +
"This defers the L1-origin verification, and is recommended to use in when utilizing l2.engine-sync",
EnvVars: prefixEnvVars("L2_SKIP_SYNC_START_CHECK"),
Required: false,
Value: false,
}
) )
var requiredFlags = []cli.Flag{ var requiredFlags = []cli.Flag{
...@@ -252,6 +267,8 @@ var optionalFlags = []cli.Flag{ ...@@ -252,6 +267,8 @@ var optionalFlags = []cli.Flag{
HeartbeatURLFlag, HeartbeatURLFlag,
BackupL2UnsafeSyncRPC, BackupL2UnsafeSyncRPC,
BackupL2UnsafeSyncRPCTrustRPC, BackupL2UnsafeSyncRPCTrustRPC,
L2EngineSyncEnabled,
SkipSyncStartCheck,
} }
// Flags contains the list of configuration options available to the binary. // Flags contains the list of configuration options available to the binary.
......
...@@ -30,6 +30,7 @@ var ( ...@@ -30,6 +30,7 @@ var (
Name: "p2p.scoring", Name: "p2p.scoring",
Usage: "Sets the peer scoring strategy for the P2P stack. Can be one of: none or light.", Usage: "Sets the peer scoring strategy for the P2P stack. Can be one of: none or light.",
Required: false, Required: false,
Value: "light",
EnvVars: p2pEnv("PEER_SCORING"), EnvVars: p2pEnv("PEER_SCORING"),
} }
PeerScoring = &cli.StringFlag{ PeerScoring = &cli.StringFlag{
......
...@@ -4,10 +4,12 @@ import ( ...@@ -4,10 +4,12 @@ import (
"context" "context"
"fmt" "fmt"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum-optimism/optimism/op-node/eth" "github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/version" "github.com/ethereum-optimism/optimism/op-node/version"
...@@ -18,7 +20,6 @@ type l2EthClient interface { ...@@ -18,7 +20,6 @@ type l2EthClient interface {
// GetProof returns a proof of the account, it may return a nil result without error if the address was not found. // GetProof returns a proof of the account, it may return a nil result without error if the address was not found.
// Optionally keys of the account storage trie can be specified to include with corresponding values in the proof. // Optionally keys of the account storage trie can be specified to include with corresponding values in the proof.
GetProof(ctx context.Context, address common.Address, storage []common.Hash, blockTag string) (*eth.AccountResult, error) GetProof(ctx context.Context, address common.Address, storage []common.Hash, blockTag string) (*eth.AccountResult, error)
OutputV0AtBlock(ctx context.Context, blockHash common.Hash) (*eth.OutputV0, error)
} }
type driverClient interface { type driverClient interface {
...@@ -98,16 +99,40 @@ func (n *nodeAPI) OutputAtBlock(ctx context.Context, number hexutil.Uint64) (*et ...@@ -98,16 +99,40 @@ func (n *nodeAPI) OutputAtBlock(ctx context.Context, number hexutil.Uint64) (*et
return nil, fmt.Errorf("failed to get L2 block ref with sync status: %w", err) return nil, fmt.Errorf("failed to get L2 block ref with sync status: %w", err)
} }
output, err := n.client.OutputV0AtBlock(ctx, ref.Hash) head, err := n.client.InfoByHash(ctx, ref.Hash)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to get L2 output at block %s: %w", ref, err) return nil, fmt.Errorf("failed to get L2 block by hash %s: %w", ref, err)
} }
if head == nil {
return nil, ethereum.NotFound
}
proof, err := n.client.GetProof(ctx, predeploys.L2ToL1MessagePasserAddr, []common.Hash{}, ref.Hash.String())
if err != nil {
return nil, fmt.Errorf("failed to get contract proof at block %s: %w", ref, err)
}
if proof == nil {
return nil, fmt.Errorf("proof %w", ethereum.NotFound)
}
// make sure that the proof (including storage hash) that we retrieved is correct by verifying it against the state-root
if err := proof.Verify(head.Root()); err != nil {
n.log.Error("invalid withdrawal root detected in block", "stateRoot", head.Root(), "blocknum", number, "msg", err)
return nil, fmt.Errorf("invalid withdrawal root hash, state root was %s: %w", head.Root(), err)
}
var l2OutputRootVersion eth.Bytes32 // it's zero for now
l2OutputRoot, err := rollup.ComputeL2OutputRootV0(head, proof.StorageHash)
if err != nil {
n.log.Error("Error computing L2 output root, nil ptr passed to hashing function")
return nil, err
}
return &eth.OutputResponse{ return &eth.OutputResponse{
Version: output.Version(), Version: l2OutputRootVersion,
OutputRoot: eth.OutputRoot(output), OutputRoot: l2OutputRoot,
BlockRef: ref, BlockRef: ref,
WithdrawalStorageRoot: common.Hash(output.MessagePasserStorageRoot), WithdrawalStorageRoot: proof.StorageHash,
StateRoot: common.Hash(output.StateRoot), StateRoot: head.Root(),
Status: status, Status: status,
}, nil }, nil
} }
......
...@@ -10,6 +10,7 @@ import ( ...@@ -10,6 +10,7 @@ import (
"github.com/ethereum-optimism/optimism/op-node/p2p" "github.com/ethereum-optimism/optimism/op-node/p2p"
"github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/driver" "github.com/ethereum-optimism/optimism/op-node/rollup/driver"
"github.com/ethereum-optimism/optimism/op-node/rollup/sync"
oppprof "github.com/ethereum-optimism/optimism/op-service/pprof" oppprof "github.com/ethereum-optimism/optimism/op-service/pprof"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
) )
...@@ -43,6 +44,8 @@ type Config struct { ...@@ -43,6 +44,8 @@ type Config struct {
// Optional // Optional
Tracer Tracer Tracer Tracer
Heartbeat HeartbeatConfig Heartbeat HeartbeatConfig
Sync sync.Config
} }
type RPCConfig struct { type RPCConfig struct {
......
...@@ -199,7 +199,7 @@ func (n *OpNode) initL2(ctx context.Context, cfg *Config, snapshotLog log.Logger ...@@ -199,7 +199,7 @@ func (n *OpNode) initL2(ctx context.Context, cfg *Config, snapshotLog log.Logger
return err return err
} }
n.l2Driver = driver.NewDriver(&cfg.Driver, &cfg.Rollup, n.l2Source, n.l1Source, n, n, n.log, snapshotLog, n.metrics, cfg.ConfigPersistence) n.l2Driver = driver.NewDriver(&cfg.Driver, &cfg.Rollup, n.l2Source, n.l1Source, n, n, n.log, snapshotLog, n.metrics, cfg.ConfigPersistence, &cfg.Sync)
return nil return nil
} }
......
...@@ -16,6 +16,7 @@ import ( ...@@ -16,6 +16,7 @@ import (
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum-optimism/optimism/op-node/eth" "github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/metrics" "github.com/ethereum-optimism/optimism/op-node/metrics"
"github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup"
...@@ -83,6 +84,17 @@ func TestOutputAtBlock(t *testing.T) { ...@@ -83,6 +84,17 @@ func TestOutputAtBlock(t *testing.T) {
} }
l2Client := &testutils.MockL2Client{} l2Client := &testutils.MockL2Client{}
info := &testutils.MockBlockInfo{
InfoHash: header.Hash(),
InfoParentHash: header.ParentHash,
InfoCoinbase: header.Coinbase,
InfoRoot: header.Root,
InfoNum: header.Number.Uint64(),
InfoTime: header.Time,
InfoMixDigest: header.MixDigest,
InfoBaseFee: header.BaseFee,
InfoReceiptRoot: header.ReceiptHash,
}
ref := eth.L2BlockRef{ ref := eth.L2BlockRef{
Hash: header.Hash(), Hash: header.Hash(),
Number: header.Number.Uint64(), Number: header.Number.Uint64(),
...@@ -91,12 +103,8 @@ func TestOutputAtBlock(t *testing.T) { ...@@ -91,12 +103,8 @@ func TestOutputAtBlock(t *testing.T) {
L1Origin: eth.BlockID{}, L1Origin: eth.BlockID{},
SequenceNumber: 0, SequenceNumber: 0,
} }
output := &eth.OutputV0{ l2Client.ExpectInfoByHash(common.HexToHash("0x8512bee03061475e4b069171f7b406097184f16b22c3f5c97c0abfc49591c524"), info, nil)
StateRoot: eth.Bytes32(header.Root), l2Client.ExpectGetProof(predeploys.L2ToL1MessagePasserAddr, []common.Hash{}, "0x8512bee03061475e4b069171f7b406097184f16b22c3f5c97c0abfc49591c524", &result, nil)
BlockHash: ref.Hash,
MessagePasserStorageRoot: eth.Bytes32(result.StorageHash),
}
l2Client.ExpectOutputV0AtBlock(common.HexToHash("0x8512bee03061475e4b069171f7b406097184f16b22c3f5c97c0abfc49591c524"), output, nil)
drClient := &mockDriverClient{} drClient := &mockDriverClient{}
status := randomSyncStatus(rand.New(rand.NewSource(123))) status := randomSyncStatus(rand.New(rand.NewSource(123)))
...@@ -159,6 +167,7 @@ func randomSyncStatus(rng *rand.Rand) *eth.SyncStatus { ...@@ -159,6 +167,7 @@ func randomSyncStatus(rng *rand.Rand) *eth.SyncStatus {
SafeL2: testutils.RandomL2BlockRef(rng), SafeL2: testutils.RandomL2BlockRef(rng),
FinalizedL2: testutils.RandomL2BlockRef(rng), FinalizedL2: testutils.RandomL2BlockRef(rng),
UnsafeL2SyncTarget: testutils.RandomL2BlockRef(rng), UnsafeL2SyncTarget: testutils.RandomL2BlockRef(rng),
EngineSyncTarget: testutils.RandomL2BlockRef(rng),
} }
} }
......
This diff is collapsed.
...@@ -17,6 +17,7 @@ import ( ...@@ -17,6 +17,7 @@ import (
"github.com/ethereum-optimism/optimism/op-node/eth" "github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/metrics" "github.com/ethereum-optimism/optimism/op-node/metrics"
"github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/sync"
"github.com/ethereum-optimism/optimism/op-node/testlog" "github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum-optimism/optimism/op-node/testutils" "github.com/ethereum-optimism/optimism/op-node/testutils"
) )
...@@ -246,7 +247,7 @@ func TestEngineQueue_Finalize(t *testing.T) { ...@@ -246,7 +247,7 @@ func TestEngineQueue_Finalize(t *testing.T) {
prev := &fakeAttributesQueue{} prev := &fakeAttributesQueue{}
eq := NewEngineQueue(logger, cfg, eng, metrics, prev, l1F) eq := NewEngineQueue(logger, cfg, eng, metrics, prev, l1F, &sync.Config{})
require.ErrorIs(t, eq.Reset(context.Background(), eth.L1BlockRef{}, eth.SystemConfig{}), io.EOF) require.ErrorIs(t, eq.Reset(context.Background(), eth.L1BlockRef{}, eth.SystemConfig{}), io.EOF)
require.Equal(t, refB1, eq.SafeL2Head(), "L2 reset should go back to sequence window ago: blocks with origin E and D are not safe until we reconcile, C is extra, and B1 is the end we look for") require.Equal(t, refB1, eq.SafeL2Head(), "L2 reset should go back to sequence window ago: blocks with origin E and D are not safe until we reconcile, C is extra, and B1 is the end we look for")
...@@ -480,7 +481,7 @@ func TestEngineQueue_ResetWhenUnsafeOriginNotCanonical(t *testing.T) { ...@@ -480,7 +481,7 @@ func TestEngineQueue_ResetWhenUnsafeOriginNotCanonical(t *testing.T) {
prev := &fakeAttributesQueue{origin: refE} prev := &fakeAttributesQueue{origin: refE}
eq := NewEngineQueue(logger, cfg, eng, metrics, prev, l1F) eq := NewEngineQueue(logger, cfg, eng, metrics, prev, l1F, &sync.Config{})
require.ErrorIs(t, eq.Reset(context.Background(), eth.L1BlockRef{}, eth.SystemConfig{}), io.EOF) require.ErrorIs(t, eq.Reset(context.Background(), eth.L1BlockRef{}, eth.SystemConfig{}), io.EOF)
require.Equal(t, refB1, eq.SafeL2Head(), "L2 reset should go back to sequence window ago: blocks with origin E and D are not safe until we reconcile, C is extra, and B1 is the end we look for") require.Equal(t, refB1, eq.SafeL2Head(), "L2 reset should go back to sequence window ago: blocks with origin E and D are not safe until we reconcile, C is extra, and B1 is the end we look for")
...@@ -811,7 +812,7 @@ func TestVerifyNewL1Origin(t *testing.T) { ...@@ -811,7 +812,7 @@ func TestVerifyNewL1Origin(t *testing.T) {
}, nil) }, nil)
prev := &fakeAttributesQueue{origin: refE} prev := &fakeAttributesQueue{origin: refE}
eq := NewEngineQueue(logger, cfg, eng, metrics, prev, l1F) eq := NewEngineQueue(logger, cfg, eng, metrics, prev, l1F, &sync.Config{})
require.ErrorIs(t, eq.Reset(context.Background(), eth.L1BlockRef{}, eth.SystemConfig{}), io.EOF) require.ErrorIs(t, eq.Reset(context.Background(), eth.L1BlockRef{}, eth.SystemConfig{}), io.EOF)
require.Equal(t, refB1, eq.SafeL2Head(), "L2 reset should go back to sequence window ago: blocks with origin E and D are not safe until we reconcile, C is extra, and B1 is the end we look for") require.Equal(t, refB1, eq.SafeL2Head(), "L2 reset should go back to sequence window ago: blocks with origin E and D are not safe until we reconcile, C is extra, and B1 is the end we look for")
...@@ -909,7 +910,7 @@ func TestBlockBuildingRace(t *testing.T) { ...@@ -909,7 +910,7 @@ func TestBlockBuildingRace(t *testing.T) {
} }
prev := &fakeAttributesQueue{origin: refA, attrs: attrs} prev := &fakeAttributesQueue{origin: refA, attrs: attrs}
eq := NewEngineQueue(logger, cfg, eng, metrics, prev, l1F) eq := NewEngineQueue(logger, cfg, eng, metrics, prev, l1F, &sync.Config{})
require.ErrorIs(t, eq.Reset(context.Background(), eth.L1BlockRef{}, eth.SystemConfig{}), io.EOF) require.ErrorIs(t, eq.Reset(context.Background(), eth.L1BlockRef{}, eth.SystemConfig{}), io.EOF)
id := eth.PayloadID{0xff} id := eth.PayloadID{0xff}
...@@ -1079,8 +1080,9 @@ func TestResetLoop(t *testing.T) { ...@@ -1079,8 +1080,9 @@ func TestResetLoop(t *testing.T) {
prev := &fakeAttributesQueue{origin: refA, attrs: attrs} prev := &fakeAttributesQueue{origin: refA, attrs: attrs}
eq := NewEngineQueue(logger, cfg, eng, metrics.NoopMetrics, prev, l1F) eq := NewEngineQueue(logger, cfg, eng, metrics.NoopMetrics, prev, l1F, &sync.Config{})
eq.unsafeHead = refA2 eq.unsafeHead = refA2
eq.engineSyncTarget = refA2
eq.safeHead = refA1 eq.safeHead = refA1
eq.finalized = refA0 eq.finalized = refA0
...@@ -1176,7 +1178,7 @@ func TestEngineQueue_StepPopOlderUnsafe(t *testing.T) { ...@@ -1176,7 +1178,7 @@ func TestEngineQueue_StepPopOlderUnsafe(t *testing.T) {
prev := &fakeAttributesQueue{origin: refA} prev := &fakeAttributesQueue{origin: refA}
eq := NewEngineQueue(logger, cfg, eng, metrics.NoopMetrics, prev, l1F) eq := NewEngineQueue(logger, cfg, eng, metrics.NoopMetrics, prev, l1F, &sync.Config{})
eq.unsafeHead = refA2 eq.unsafeHead = refA2
eq.safeHead = refA0 eq.safeHead = refA0
eq.finalized = refA0 eq.finalized = refA0
......
...@@ -96,3 +96,6 @@ var ErrCritical = NewCriticalError(nil) ...@@ -96,3 +96,6 @@ var ErrCritical = NewCriticalError(nil)
// NotEnoughData implies that the function currently does not have enough data to progress // NotEnoughData implies that the function currently does not have enough data to progress
// but if it is retried enough times, it will eventually return a real value or io.EOF // but if it is retried enough times, it will eventually return a real value or io.EOF
var NotEnoughData = errors.New("not enough data") var NotEnoughData = errors.New("not enough data")
// EngineP2PSyncing implies that the execution engine is currently in progress of P2P sync.
var EngineP2PSyncing = errors.New("engine is P2P syncing")
...@@ -2,6 +2,7 @@ package derive ...@@ -2,6 +2,7 @@ package derive
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"io" "io"
...@@ -9,6 +10,7 @@ import ( ...@@ -9,6 +10,7 @@ import (
"github.com/ethereum-optimism/optimism/op-node/eth" "github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/sync"
) )
type Metrics interface { type Metrics interface {
...@@ -46,6 +48,7 @@ type EngineQueueStage interface { ...@@ -46,6 +48,7 @@ type EngineQueueStage interface {
Finalized() eth.L2BlockRef Finalized() eth.L2BlockRef
UnsafeL2Head() eth.L2BlockRef UnsafeL2Head() eth.L2BlockRef
SafeL2Head() eth.L2BlockRef SafeL2Head() eth.L2BlockRef
EngineSyncTarget() eth.L2BlockRef
Origin() eth.L1BlockRef Origin() eth.L1BlockRef
SystemConfig() eth.SystemConfig SystemConfig() eth.SystemConfig
SetUnsafeHead(head eth.L2BlockRef) SetUnsafeHead(head eth.L2BlockRef)
...@@ -75,7 +78,7 @@ type DerivationPipeline struct { ...@@ -75,7 +78,7 @@ type DerivationPipeline struct {
} }
// NewDerivationPipeline creates a derivation pipeline, which should be reset before use. // NewDerivationPipeline creates a derivation pipeline, which should be reset before use.
func NewDerivationPipeline(log log.Logger, cfg *rollup.Config, l1Fetcher L1Fetcher, engine Engine, metrics Metrics) *DerivationPipeline { func NewDerivationPipeline(log log.Logger, cfg *rollup.Config, l1Fetcher L1Fetcher, engine Engine, metrics Metrics, syncCfg *sync.Config) *DerivationPipeline {
// Pull stages // Pull stages
l1Traversal := NewL1Traversal(log, cfg, l1Fetcher) l1Traversal := NewL1Traversal(log, cfg, l1Fetcher)
...@@ -89,7 +92,7 @@ func NewDerivationPipeline(log log.Logger, cfg *rollup.Config, l1Fetcher L1Fetch ...@@ -89,7 +92,7 @@ func NewDerivationPipeline(log log.Logger, cfg *rollup.Config, l1Fetcher L1Fetch
attributesQueue := NewAttributesQueue(log, cfg, attrBuilder, batchQueue) attributesQueue := NewAttributesQueue(log, cfg, attrBuilder, batchQueue)
// Step stages // Step stages
eng := NewEngineQueue(log, cfg, engine, metrics, attributesQueue, l1Fetcher) eng := NewEngineQueue(log, cfg, engine, metrics, attributesQueue, l1Fetcher, syncCfg)
// Reset from engine queue then up from L1 Traversal. The stages do not talk to each other during // Reset from engine queue then up from L1 Traversal. The stages do not talk to each other during
// the reset, but after the engine queue, this is the order in which the stages could talk to each other. // the reset, but after the engine queue, this is the order in which the stages could talk to each other.
...@@ -147,6 +150,10 @@ func (dp *DerivationPipeline) UnsafeL2Head() eth.L2BlockRef { ...@@ -147,6 +150,10 @@ func (dp *DerivationPipeline) UnsafeL2Head() eth.L2BlockRef {
return dp.eng.UnsafeL2Head() return dp.eng.UnsafeL2Head()
} }
func (dp *DerivationPipeline) EngineSyncTarget() eth.L2BlockRef {
return dp.eng.EngineSyncTarget()
}
func (dp *DerivationPipeline) StartPayload(ctx context.Context, parent eth.L2BlockRef, attrs *eth.PayloadAttributes, updateSafe bool) (errType BlockInsertionErrType, err error) { func (dp *DerivationPipeline) StartPayload(ctx context.Context, parent eth.L2BlockRef, attrs *eth.PayloadAttributes, updateSafe bool) (errType BlockInsertionErrType, err error) {
return dp.eng.StartPayload(ctx, parent, attrs, updateSafe) return dp.eng.StartPayload(ctx, parent, attrs, updateSafe)
} }
...@@ -199,6 +206,8 @@ func (dp *DerivationPipeline) Step(ctx context.Context) error { ...@@ -199,6 +206,8 @@ func (dp *DerivationPipeline) Step(ctx context.Context) error {
if err := dp.eng.Step(ctx); err == io.EOF { if err := dp.eng.Step(ctx); err == io.EOF {
// If every stage has returned io.EOF, try to advance the L1 Origin // If every stage has returned io.EOF, try to advance the L1 Origin
return dp.traversal.AdvanceL1Block(ctx) return dp.traversal.AdvanceL1Block(ctx)
} else if errors.Is(err, EngineP2PSyncing) {
return err
} else if err != nil { } else if err != nil {
return fmt.Errorf("engine stage failed: %w", err) return fmt.Errorf("engine stage failed: %w", err)
} else { } else {
......
...@@ -10,6 +10,7 @@ import ( ...@@ -10,6 +10,7 @@ import (
"github.com/ethereum-optimism/optimism/op-node/eth" "github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-node/rollup/sync"
) )
type Metrics interface { type Metrics interface {
...@@ -58,6 +59,7 @@ type DerivationPipeline interface { ...@@ -58,6 +59,7 @@ type DerivationPipeline interface {
UnsafeL2Head() eth.L2BlockRef UnsafeL2Head() eth.L2BlockRef
Origin() eth.L1BlockRef Origin() eth.L1BlockRef
EngineReady() bool EngineReady() bool
EngineSyncTarget() eth.L2BlockRef
} }
type L1StateIface interface { type L1StateIface interface {
...@@ -108,13 +110,13 @@ type SequencerStateListener interface { ...@@ -108,13 +110,13 @@ type SequencerStateListener interface {
} }
// NewDriver composes an events handler that tracks L1 state, triggers L2 derivation, and optionally sequences new L2 blocks. // NewDriver composes an events handler that tracks L1 state, triggers L2 derivation, and optionally sequences new L2 blocks.
func NewDriver(driverCfg *Config, cfg *rollup.Config, l2 L2Chain, l1 L1Chain, altSync AltSync, network Network, log log.Logger, snapshotLog log.Logger, metrics Metrics, sequencerStateListener SequencerStateListener) *Driver { func NewDriver(driverCfg *Config, cfg *rollup.Config, l2 L2Chain, l1 L1Chain, altSync AltSync, network Network, log log.Logger, snapshotLog log.Logger, metrics Metrics, sequencerStateListener SequencerStateListener, syncCfg *sync.Config) *Driver {
l1 = NewMeteredL1Fetcher(l1, metrics) l1 = NewMeteredL1Fetcher(l1, metrics)
l1State := NewL1State(log, metrics) l1State := NewL1State(log, metrics)
sequencerConfDepth := NewConfDepth(driverCfg.SequencerConfDepth, l1State.L1Head, l1) sequencerConfDepth := NewConfDepth(driverCfg.SequencerConfDepth, l1State.L1Head, l1)
findL1Origin := NewL1OriginSelector(log, cfg, sequencerConfDepth) findL1Origin := NewL1OriginSelector(log, cfg, sequencerConfDepth)
verifConfDepth := NewConfDepth(driverCfg.VerifierConfDepth, l1State.L1Head, l1) verifConfDepth := NewConfDepth(driverCfg.VerifierConfDepth, l1State.L1Head, l1)
derivationPipeline := derive.NewDerivationPipeline(log, cfg, verifConfDepth, l2, metrics) derivationPipeline := derive.NewDerivationPipeline(log, cfg, verifConfDepth, l2, metrics, syncCfg)
attrBuilder := derive.NewFetchingAttributesBuilder(cfg, l1, l2) attrBuilder := derive.NewFetchingAttributesBuilder(cfg, l1, l2)
engine := derivationPipeline engine := derivationPipeline
meteredEngine := NewMeteredEngine(cfg, engine, metrics, log) meteredEngine := NewMeteredEngine(cfg, engine, metrics, log)
......
...@@ -314,7 +314,12 @@ func (s *Driver) eventLoop() { ...@@ -314,7 +314,12 @@ func (s *Driver) eventLoop() {
err := s.derivation.Step(context.Background()) err := s.derivation.Step(context.Background())
stepAttempts += 1 // count as attempt by default. We reset to 0 if we are making healthy progress. stepAttempts += 1 // count as attempt by default. We reset to 0 if we are making healthy progress.
if err == io.EOF { if err == io.EOF {
s.log.Debug("Derivation process went idle", "progress", s.derivation.Origin()) s.log.Debug("Derivation process went idle", "progress", s.derivation.Origin(), "err", err)
stepAttempts = 0
s.metrics.SetDerivationIdle(true)
continue
} else if err != nil && errors.Is(err, derive.EngineP2PSyncing) {
s.log.Debug("Derivation process went idle because the engine is syncing", "progress", s.derivation.Origin(), "sync_target", s.derivation.EngineSyncTarget(), "err", err)
stepAttempts = 0 stepAttempts = 0
s.metrics.SetDerivationIdle(true) s.metrics.SetDerivationIdle(true)
continue continue
...@@ -474,6 +479,7 @@ func (s *Driver) syncStatus() *eth.SyncStatus { ...@@ -474,6 +479,7 @@ func (s *Driver) syncStatus() *eth.SyncStatus {
SafeL2: s.derivation.SafeL2Head(), SafeL2: s.derivation.SafeL2Head(),
FinalizedL2: s.derivation.Finalized(), FinalizedL2: s.derivation.Finalized(),
UnsafeL2SyncTarget: s.derivation.UnsafeL2SyncTarget(), UnsafeL2SyncTarget: s.derivation.UnsafeL2SyncTarget(),
EngineSyncTarget: s.derivation.EngineSyncTarget(),
} }
} }
......
...@@ -5,33 +5,32 @@ import ( ...@@ -5,33 +5,32 @@ import (
"github.com/ethereum-optimism/optimism/op-bindings/bindings" "github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-node/eth" "github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum/go-ethereum/crypto"
) )
var ErrNilProof = errors.New("output root proof is nil") var NilProof = errors.New("Output root proof is nil")
// ComputeL2OutputRoot computes the L2 output root by hashing an output root proof. // ComputeL2OutputRoot computes the L2 output root by hashing an output root proof.
func ComputeL2OutputRoot(proofElements *bindings.TypesOutputRootProof) (eth.Bytes32, error) { func ComputeL2OutputRoot(proofElements *bindings.TypesOutputRootProof) (eth.Bytes32, error) {
if proofElements == nil { if proofElements == nil {
return eth.Bytes32{}, ErrNilProof return eth.Bytes32{}, NilProof
} }
if proofElements.Version != [32]byte{} { digest := crypto.Keccak256Hash(
return eth.Bytes32{}, errors.New("unsupported output root version") proofElements.Version[:],
} proofElements.StateRoot[:],
l2Output := eth.OutputV0{ proofElements.MessagePasserStorageRoot[:],
StateRoot: eth.Bytes32(proofElements.StateRoot), proofElements.LatestBlockhash[:],
MessagePasserStorageRoot: proofElements.MessagePasserStorageRoot, )
BlockHash: proofElements.LatestBlockhash, return eth.Bytes32(digest), nil
}
return eth.OutputRoot(&l2Output), nil
} }
func ComputeL2OutputRootV0(block eth.BlockInfo, storageRoot [32]byte) (eth.Bytes32, error) { func ComputeL2OutputRootV0(block eth.BlockInfo, storageRoot [32]byte) (eth.Bytes32, error) {
stateRoot := block.Root() var l2OutputRootVersion eth.Bytes32 // it's zero for now
l2Output := eth.OutputV0{ return ComputeL2OutputRoot(&bindings.TypesOutputRootProof{
StateRoot: eth.Bytes32(stateRoot), Version: l2OutputRootVersion,
StateRoot: block.Root(),
MessagePasserStorageRoot: storageRoot, MessagePasserStorageRoot: storageRoot,
BlockHash: block.Hash(), LatestBlockhash: block.Hash(),
} })
return eth.OutputRoot(&l2Output), nil
} }
package sync
type Config struct {
// EngineSync is true when the EngineQueue can trigger execution engine P2P sync.
EngineSync bool `json:"engine_sync"`
// SkipSyncStartCheck skip the sanity check of consistency of L1 origins of the unsafe L2 blocks when determining the sync-starting point. This defers the L1-origin verification, and is recommended to use in when utilizing l2.engine-sync
SkipSyncStartCheck bool `json:"skip_sync_start_check"`
}
...@@ -102,7 +102,7 @@ func currentHeads(ctx context.Context, cfg *rollup.Config, l2 L2Chain) (*FindHea ...@@ -102,7 +102,7 @@ func currentHeads(ctx context.Context, cfg *rollup.Config, l2 L2Chain) (*FindHea
// Plausible: meaning that the blockhash of the L2 block's L1 origin // Plausible: meaning that the blockhash of the L2 block's L1 origin
// (as reported in the L1 Attributes deposit within the L2 block) is not canonical at another height in the L1 chain, // (as reported in the L1 Attributes deposit within the L2 block) is not canonical at another height in the L1 chain,
// and the same holds for all its ancestors. // and the same holds for all its ancestors.
func FindL2Heads(ctx context.Context, cfg *rollup.Config, l1 L1Chain, l2 L2Chain, lgr log.Logger) (result *FindHeadsResult, err error) { func FindL2Heads(ctx context.Context, cfg *rollup.Config, l1 L1Chain, l2 L2Chain, lgr log.Logger, syncCfg *Config) (result *FindHeadsResult, err error) {
// Fetch current L2 forkchoice state // Fetch current L2 forkchoice state
result, err = currentHeads(ctx, cfg, l2) result, err = currentHeads(ctx, cfg, l2)
if err != nil { if err != nil {
...@@ -170,18 +170,18 @@ func FindL2Heads(ctx context.Context, cfg *rollup.Config, l1 L1Chain, l2 L2Chain ...@@ -170,18 +170,18 @@ func FindL2Heads(ctx context.Context, cfg *rollup.Config, l1 L1Chain, l2 L2Chain
if (n.Number == result.Finalized.Number) && (n.Hash != result.Finalized.Hash) { if (n.Number == result.Finalized.Number) && (n.Hash != result.Finalized.Hash) {
return nil, fmt.Errorf("%w: finalized %s, got: %s", ReorgFinalizedErr, result.Finalized, n) return nil, fmt.Errorf("%w: finalized %s, got: %s", ReorgFinalizedErr, result.Finalized, n)
} }
// Check we are not reorging L2 incredibly deep
if n.L1Origin.Number+(MaxReorgSeqWindows*cfg.SeqWindowSize) < prevUnsafe.L1Origin.Number {
// If the reorg depth is too large, something is fishy.
// This can legitimately happen if L1 goes down for a while. But in that case,
// restarting the L2 node with a bigger configured MaxReorgDepth is an acceptable
// stopgap solution.
return nil, fmt.Errorf("%w: traversed back to L2 block %s, but too deep compared to previous unsafe block %s", TooDeepReorgErr, n, prevUnsafe)
}
// If we don't have a usable unsafe head, then set it // If we don't have a usable unsafe head, then set it
if result.Unsafe == (eth.L2BlockRef{}) { if result.Unsafe == (eth.L2BlockRef{}) {
result.Unsafe = n result.Unsafe = n
// Check we are not reorging L2 incredibly deep
if n.L1Origin.Number+(MaxReorgSeqWindows*cfg.SeqWindowSize) < prevUnsafe.L1Origin.Number {
// If the reorg depth is too large, something is fishy.
// This can legitimately happen if L1 goes down for a while. But in that case,
// restarting the L2 node with a bigger configured MaxReorgDepth is an acceptable
// stopgap solution.
return nil, fmt.Errorf("%w: traversed back to L2 block %s, but too deep compared to previous unsafe block %s", TooDeepReorgErr, n, prevUnsafe)
}
} }
if ahead { if ahead {
...@@ -212,6 +212,11 @@ func FindL2Heads(ctx context.Context, cfg *rollup.Config, l1 L1Chain, l2 L2Chain ...@@ -212,6 +212,11 @@ func FindL2Heads(ctx context.Context, cfg *rollup.Config, l1 L1Chain, l2 L2Chain
return result, nil return result, nil
} }
if syncCfg.SkipSyncStartCheck && highestL2WithCanonicalL1Origin.Hash == n.Hash {
lgr.Info("Found highest L2 block with canonical L1 origin. Skip further sanity check and jump to the safe head")
n = result.Safe
continue
}
// Pull L2 parent for next iteration // Pull L2 parent for next iteration
parent, err := l2.L2BlockRefByHash(ctx, n.ParentHash) parent, err := l2.L2BlockRefByHash(ctx, n.ParentHash)
if err != nil { if err != nil {
......
...@@ -76,7 +76,7 @@ func (c *syncStartTestCase) Run(t *testing.T) { ...@@ -76,7 +76,7 @@ func (c *syncStartTestCase) Run(t *testing.T) {
} }
lgr := log.New() lgr := log.New()
lgr.SetHandler(log.DiscardHandler()) lgr.SetHandler(log.DiscardHandler())
result, err := FindL2Heads(context.Background(), cfg, chain, chain, lgr) result, err := FindL2Heads(context.Background(), cfg, chain, chain, lgr, &Config{})
if c.ExpectedErr != nil { if c.ExpectedErr != nil {
require.ErrorIs(t, err, c.ExpectedErr, "expected error") require.ErrorIs(t, err, c.ExpectedErr, "expected error")
return return
...@@ -286,6 +286,37 @@ func TestFindSyncStart(t *testing.T) { ...@@ -286,6 +286,37 @@ func TestFindSyncStart(t *testing.T) {
SafeL2Head: 'D', SafeL2Head: 'D',
ExpectedErr: WrongChainErr, ExpectedErr: WrongChainErr,
}, },
{
// FindL2Heads() keeps walking back to safe head after finding canonical unsafe head
// TooDeepReorgErr must not be raised
Name: "long traverse to safe head",
GenesisL1Num: 0,
L1: "abcdefgh",
L2: "ABCDEFGH",
NewL1: "abcdefgx",
PreFinalizedL2: 'B',
PreSafeL2: 'B',
GenesisL1: 'a',
GenesisL2: 'A',
UnsafeL2Head: 'G',
SeqWindowSize: 1,
SafeL2Head: 'B',
ExpectedErr: nil,
},
{
// L2 reorg is too deep
Name: "reorg too deep",
GenesisL1Num: 0,
L1: "abcdefgh",
L2: "ABCDEFGH",
NewL1: "abijklmn",
PreFinalizedL2: 'B',
PreSafeL2: 'B',
GenesisL1: 'a',
GenesisL2: 'A',
SeqWindowSize: 1,
ExpectedErr: TooDeepReorgErr,
},
} }
for _, testCase := range testCases { for _, testCase := range testCases {
......
...@@ -22,6 +22,7 @@ import ( ...@@ -22,6 +22,7 @@ import (
p2pcli "github.com/ethereum-optimism/optimism/op-node/p2p/cli" p2pcli "github.com/ethereum-optimism/optimism/op-node/p2p/cli"
"github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/driver" "github.com/ethereum-optimism/optimism/op-node/rollup/driver"
"github.com/ethereum-optimism/optimism/op-node/rollup/sync"
) )
// NewConfig creates a Config from the provided flags or environment variables. // NewConfig creates a Config from the provided flags or environment variables.
...@@ -58,6 +59,8 @@ func NewConfig(ctx *cli.Context, log log.Logger) (*node.Config, error) { ...@@ -58,6 +59,8 @@ func NewConfig(ctx *cli.Context, log log.Logger) (*node.Config, error) {
l2SyncEndpoint := NewL2SyncEndpointConfig(ctx) l2SyncEndpoint := NewL2SyncEndpointConfig(ctx)
syncConfig := NewSyncConfig(ctx)
cfg := &node.Config{ cfg := &node.Config{
L1: l1Endpoint, L1: l1Endpoint,
L2: l2Endpoint, L2: l2Endpoint,
...@@ -88,6 +91,7 @@ func NewConfig(ctx *cli.Context, log log.Logger) (*node.Config, error) { ...@@ -88,6 +91,7 @@ func NewConfig(ctx *cli.Context, log log.Logger) (*node.Config, error) {
URL: ctx.String(flags.HeartbeatURLFlag.Name), URL: ctx.String(flags.HeartbeatURLFlag.Name),
}, },
ConfigPersistence: configPersistence, ConfigPersistence: configPersistence,
Sync: *syncConfig,
} }
if err := cfg.LoadPersisted(log); err != nil { if err := cfg.LoadPersisted(log); err != nil {
...@@ -214,3 +218,10 @@ func NewSnapshotLogger(ctx *cli.Context) (log.Logger, error) { ...@@ -214,3 +218,10 @@ func NewSnapshotLogger(ctx *cli.Context) (log.Logger, error) {
logger.SetHandler(handler) logger.SetHandler(handler)
return logger, nil return logger, nil
} }
func NewSyncConfig(ctx *cli.Context) *sync.Config {
return &sync.Config{
EngineSync: ctx.Bool(flags.L2EngineSyncEnabled.Name),
SkipSyncStartCheck: ctx.Bool(flags.SkipSyncStartCheck.Name),
}
}
...@@ -10,7 +10,6 @@ import ( ...@@ -10,7 +10,6 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum-optimism/optimism/op-node/client" "github.com/ethereum-optimism/optimism/op-node/client"
"github.com/ethereum-optimism/optimism/op-node/eth" "github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup"
...@@ -166,31 +165,3 @@ func (s *L2Client) SystemConfigByL2Hash(ctx context.Context, hash common.Hash) ( ...@@ -166,31 +165,3 @@ func (s *L2Client) SystemConfigByL2Hash(ctx context.Context, hash common.Hash) (
s.systemConfigsCache.Add(hash, cfg) s.systemConfigsCache.Add(hash, cfg)
return cfg, nil return cfg, nil
} }
func (s *L2Client) OutputV0AtBlock(ctx context.Context, blockHash common.Hash) (*eth.OutputV0, error) {
head, err := s.InfoByHash(ctx, blockHash)
if err != nil {
return nil, fmt.Errorf("failed to get L2 block by hash: %w", err)
}
if head == nil {
return nil, ethereum.NotFound
}
proof, err := s.GetProof(ctx, predeploys.L2ToL1MessagePasserAddr, []common.Hash{}, blockHash.String())
if err != nil {
return nil, fmt.Errorf("failed to get contract proof at block %s: %w", blockHash, err)
}
if proof == nil {
return nil, fmt.Errorf("proof %w", ethereum.NotFound)
}
// make sure that the proof (including storage hash) that we retrieved is correct by verifying it against the state-root
if err := proof.Verify(head.Root()); err != nil {
return nil, fmt.Errorf("invalid withdrawal root hash, state root was %s: %w", head.Root(), err)
}
stateRoot := head.Root()
return &eth.OutputV0{
StateRoot: eth.Bytes32(stateRoot),
MessagePasserStorageRoot: eth.Bytes32(proof.StorageHash),
BlockHash: blockHash,
}, nil
}
...@@ -43,11 +43,3 @@ func (m *MockL2Client) SystemConfigByL2Hash(ctx context.Context, hash common.Has ...@@ -43,11 +43,3 @@ func (m *MockL2Client) SystemConfigByL2Hash(ctx context.Context, hash common.Has
func (m *MockL2Client) ExpectSystemConfigByL2Hash(hash common.Hash, cfg eth.SystemConfig, err error) { func (m *MockL2Client) ExpectSystemConfigByL2Hash(hash common.Hash, cfg eth.SystemConfig, err error) {
m.Mock.On("SystemConfigByL2Hash", hash).Once().Return(cfg, &err) m.Mock.On("SystemConfigByL2Hash", hash).Once().Return(cfg, &err)
} }
func (m *MockL2Client) OutputV0AtBlock(ctx context.Context, blockHash common.Hash) (*eth.OutputV0, error) {
return m.Mock.MethodCalled("OutputV0AtBlock", blockHash).Get(0).(*eth.OutputV0), nil
}
func (m *MockL2Client) ExpectOutputV0AtBlock(blockHash common.Hash, output *eth.OutputV0, err error) {
m.Mock.On("OutputV0AtBlock", blockHash).Once().Return(output, &err)
}
...@@ -266,14 +266,7 @@ func RandomOutputResponse(rng *rand.Rand) *eth.OutputResponse { ...@@ -266,14 +266,7 @@ func RandomOutputResponse(rng *rand.Rand) *eth.OutputResponse {
UnsafeL2: RandomL2BlockRef(rng), UnsafeL2: RandomL2BlockRef(rng),
SafeL2: RandomL2BlockRef(rng), SafeL2: RandomL2BlockRef(rng),
FinalizedL2: RandomL2BlockRef(rng), FinalizedL2: RandomL2BlockRef(rng),
EngineSyncTarget: RandomL2BlockRef(rng),
}, },
} }
} }
func RandomOutputV0(rng *rand.Rand) *eth.OutputV0 {
return &eth.OutputV0{
StateRoot: eth.Bytes32(RandomHash(rng)),
MessagePasserStorageRoot: eth.Bytes32(RandomHash(rng)),
BlockHash: RandomHash(rng),
}
}
...@@ -12,7 +12,7 @@ import ( ...@@ -12,7 +12,7 @@ import (
const ( const (
L1HeadLocalIndex preimage.LocalIndexKey = iota + 1 L1HeadLocalIndex preimage.LocalIndexKey = iota + 1
L2OutputRootLocalIndex L2HeadLocalIndex
L2ClaimLocalIndex L2ClaimLocalIndex
L2ClaimBlockNumberLocalIndex L2ClaimBlockNumberLocalIndex
L2ChainConfigLocalIndex L2ChainConfigLocalIndex
...@@ -21,7 +21,7 @@ const ( ...@@ -21,7 +21,7 @@ const (
type BootInfo struct { type BootInfo struct {
L1Head common.Hash L1Head common.Hash
L2OutputRoot common.Hash L2Head common.Hash
L2Claim common.Hash L2Claim common.Hash
L2ClaimBlockNumber uint64 L2ClaimBlockNumber uint64
L2ChainConfig *params.ChainConfig L2ChainConfig *params.ChainConfig
...@@ -42,7 +42,7 @@ func NewBootstrapClient(r oracleClient) *BootstrapClient { ...@@ -42,7 +42,7 @@ func NewBootstrapClient(r oracleClient) *BootstrapClient {
func (br *BootstrapClient) BootInfo() *BootInfo { func (br *BootstrapClient) BootInfo() *BootInfo {
l1Head := common.BytesToHash(br.r.Get(L1HeadLocalIndex)) l1Head := common.BytesToHash(br.r.Get(L1HeadLocalIndex))
l2OutputRoot := common.BytesToHash(br.r.Get(L2OutputRootLocalIndex)) l2Head := common.BytesToHash(br.r.Get(L2HeadLocalIndex))
l2Claim := common.BytesToHash(br.r.Get(L2ClaimLocalIndex)) l2Claim := common.BytesToHash(br.r.Get(L2ClaimLocalIndex))
l2ClaimBlockNumber := binary.BigEndian.Uint64(br.r.Get(L2ClaimBlockNumberLocalIndex)) l2ClaimBlockNumber := binary.BigEndian.Uint64(br.r.Get(L2ClaimBlockNumberLocalIndex))
l2ChainConfig := new(params.ChainConfig) l2ChainConfig := new(params.ChainConfig)
...@@ -58,7 +58,7 @@ func (br *BootstrapClient) BootInfo() *BootInfo { ...@@ -58,7 +58,7 @@ func (br *BootstrapClient) BootInfo() *BootInfo {
return &BootInfo{ return &BootInfo{
L1Head: l1Head, L1Head: l1Head,
L2OutputRoot: l2OutputRoot, L2Head: l2Head,
L2Claim: l2Claim, L2Claim: l2Claim,
L2ClaimBlockNumber: l2ClaimBlockNumber, L2ClaimBlockNumber: l2ClaimBlockNumber,
L2ChainConfig: l2ChainConfig, L2ChainConfig: l2ChainConfig,
......
...@@ -15,7 +15,7 @@ import ( ...@@ -15,7 +15,7 @@ import (
func TestBootstrapClient(t *testing.T) { func TestBootstrapClient(t *testing.T) {
bootInfo := &BootInfo{ bootInfo := &BootInfo{
L1Head: common.HexToHash("0x1111"), L1Head: common.HexToHash("0x1111"),
L2OutputRoot: common.HexToHash("0x2222"), L2Head: common.HexToHash("0x2222"),
L2Claim: common.HexToHash("0x3333"), L2Claim: common.HexToHash("0x3333"),
L2ClaimBlockNumber: 1, L2ClaimBlockNumber: 1,
L2ChainConfig: params.GoerliChainConfig, L2ChainConfig: params.GoerliChainConfig,
...@@ -34,8 +34,8 @@ func (o *mockBoostrapOracle) Get(key preimage.Key) []byte { ...@@ -34,8 +34,8 @@ func (o *mockBoostrapOracle) Get(key preimage.Key) []byte {
switch key.PreimageKey() { switch key.PreimageKey() {
case L1HeadLocalIndex.PreimageKey(): case L1HeadLocalIndex.PreimageKey():
return o.b.L1Head[:] return o.b.L1Head[:]
case L2OutputRootLocalIndex.PreimageKey(): case L2HeadLocalIndex.PreimageKey():
return o.b.L2OutputRoot[:] return o.b.L2Head[:]
case L2ClaimLocalIndex.PreimageKey(): case L2ClaimLocalIndex.PreimageKey():
return o.b.L2Claim[:] return o.b.L2Claim[:]
case L2ClaimBlockNumberLocalIndex.PreimageKey(): case L2ClaimBlockNumberLocalIndex.PreimageKey():
......
...@@ -10,6 +10,7 @@ import ( ...@@ -10,6 +10,7 @@ import (
"github.com/ethereum-optimism/optimism/op-node/metrics" "github.com/ethereum-optimism/optimism/op-node/metrics"
"github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-node/rollup/sync"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
) )
...@@ -35,7 +36,7 @@ type Driver struct { ...@@ -35,7 +36,7 @@ type Driver struct {
} }
func NewDriver(logger log.Logger, cfg *rollup.Config, l1Source derive.L1Fetcher, l2Source L2Source, targetBlockNum uint64) *Driver { func NewDriver(logger log.Logger, cfg *rollup.Config, l1Source derive.L1Fetcher, l2Source L2Source, targetBlockNum uint64) *Driver {
pipeline := derive.NewDerivationPipeline(logger, cfg, l1Source, l2Source, metrics.NoopMetrics) pipeline := derive.NewDerivationPipeline(logger, cfg, l1Source, l2Source, metrics.NoopMetrics, &sync.Config{})
pipeline.Reset() pipeline.Reset()
return &Driver{ return &Driver{
logger: logger, logger: logger,
......
package l2 package l2
import ( import (
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/hashicorp/golang-lru/v2/simplelru" "github.com/hashicorp/golang-lru/v2/simplelru"
...@@ -14,24 +13,21 @@ const nodeCacheSize = 100_000 ...@@ -14,24 +13,21 @@ const nodeCacheSize = 100_000
const codeCacheSize = 10_000 const codeCacheSize = 10_000
type CachingOracle struct { type CachingOracle struct {
oracle Oracle oracle Oracle
blocks *simplelru.LRU[common.Hash, *types.Block] blocks *simplelru.LRU[common.Hash, *types.Block]
nodes *simplelru.LRU[common.Hash, []byte] nodes *simplelru.LRU[common.Hash, []byte]
codes *simplelru.LRU[common.Hash, []byte] codes *simplelru.LRU[common.Hash, []byte]
outputs *simplelru.LRU[common.Hash, eth.Output]
} }
func NewCachingOracle(oracle Oracle) *CachingOracle { func NewCachingOracle(oracle Oracle) *CachingOracle {
blockLRU, _ := simplelru.NewLRU[common.Hash, *types.Block](blockCacheSize, nil) blockLRU, _ := simplelru.NewLRU[common.Hash, *types.Block](blockCacheSize, nil)
nodeLRU, _ := simplelru.NewLRU[common.Hash, []byte](nodeCacheSize, nil) nodeLRU, _ := simplelru.NewLRU[common.Hash, []byte](nodeCacheSize, nil)
codeLRU, _ := simplelru.NewLRU[common.Hash, []byte](codeCacheSize, nil) codeLRU, _ := simplelru.NewLRU[common.Hash, []byte](codeCacheSize, nil)
outputLRU, _ := simplelru.NewLRU[common.Hash, eth.Output](codeCacheSize, nil)
return &CachingOracle{ return &CachingOracle{
oracle: oracle, oracle: oracle,
blocks: blockLRU, blocks: blockLRU,
nodes: nodeLRU, nodes: nodeLRU,
codes: codeLRU, codes: codeLRU,
outputs: outputLRU,
} }
} }
...@@ -64,13 +60,3 @@ func (o *CachingOracle) BlockByHash(blockHash common.Hash) *types.Block { ...@@ -64,13 +60,3 @@ func (o *CachingOracle) BlockByHash(blockHash common.Hash) *types.Block {
o.blocks.Add(blockHash, block) o.blocks.Add(blockHash, block)
return block return block
} }
func (o *CachingOracle) OutputByRoot(root common.Hash) eth.Output {
output, ok := o.outputs.Get(root)
if ok {
return output
}
output = o.oracle.OutputByRoot(root)
o.outputs.Add(root, output)
return output
}
...@@ -4,7 +4,6 @@ import ( ...@@ -4,7 +4,6 @@ import (
"math/rand" "math/rand"
"testing" "testing"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/testutils" "github.com/ethereum-optimism/optimism/op-node/testutils"
"github.com/ethereum-optimism/optimism/op-program/client/l2/test" "github.com/ethereum-optimism/optimism/op-program/client/l2/test"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
...@@ -67,22 +66,3 @@ func TestCodeByHash(t *testing.T) { ...@@ -67,22 +66,3 @@ func TestCodeByHash(t *testing.T) {
actual = oracle.CodeByHash(hash) actual = oracle.CodeByHash(hash)
require.Equal(t, node, actual) require.Equal(t, node, actual)
} }
func TestOutputByRoot(t *testing.T) {
stub, _ := test.NewStubOracle(t)
oracle := NewCachingOracle(stub)
rng := rand.New(rand.NewSource(1))
output := testutils.RandomOutputV0(rng)
// Initial call retrieves from the stub
root := common.Hash(eth.OutputRoot(output))
stub.Outputs[root] = output
actual := oracle.OutputByRoot(root)
require.Equal(t, output, actual)
// Later calls should retrieve from cache
delete(stub.Outputs, root)
actual = oracle.OutputByRoot(root)
require.Equal(t, output, actual)
}
...@@ -4,7 +4,6 @@ import ( ...@@ -4,7 +4,6 @@ import (
"fmt" "fmt"
"math/big" "math/big"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-program/client/l2/engineapi" "github.com/ethereum-optimism/optimism/op-program/client/l2/engineapi"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/consensus"
...@@ -40,13 +39,8 @@ type OracleBackedL2Chain struct { ...@@ -40,13 +39,8 @@ type OracleBackedL2Chain struct {
var _ engineapi.EngineBackend = (*OracleBackedL2Chain)(nil) var _ engineapi.EngineBackend = (*OracleBackedL2Chain)(nil)
func NewOracleBackedL2Chain(logger log.Logger, oracle Oracle, chainCfg *params.ChainConfig, l2OutputRoot common.Hash) (*OracleBackedL2Chain, error) { func NewOracleBackedL2Chain(logger log.Logger, oracle Oracle, chainCfg *params.ChainConfig, l2Head common.Hash) (*OracleBackedL2Chain, error) {
output := oracle.OutputByRoot(l2OutputRoot) head := oracle.BlockByHash(l2Head)
outputV0, ok := output.(*eth.OutputV0)
if !ok {
return nil, fmt.Errorf("unsupported L2 output version: %d", output.Version())
}
head := oracle.BlockByHash(outputV0.BlockHash)
logger.Info("Loaded L2 head", "hash", head.Hash(), "number", head.Number()) logger.Info("Loaded L2 head", "hash", head.Hash(), "number", head.Number())
return &OracleBackedL2Chain{ return &OracleBackedL2Chain{
log: logger, log: logger,
......
...@@ -5,7 +5,6 @@ import ( ...@@ -5,7 +5,6 @@ import (
"testing" "testing"
"github.com/ethereum-optimism/optimism/op-chain-ops/genesis" "github.com/ethereum-optimism/optimism/op-chain-ops/genesis"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/testlog" "github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum-optimism/optimism/op-program/client/l2/engineapi" "github.com/ethereum-optimism/optimism/op-program/client/l2/engineapi"
"github.com/ethereum-optimism/optimism/op-program/client/l2/engineapi/test" "github.com/ethereum-optimism/optimism/op-program/client/l2/engineapi/test"
...@@ -200,8 +199,7 @@ func setupOracleBackedChainWithLowerHead(t *testing.T, blockCount int, headBlock ...@@ -200,8 +199,7 @@ func setupOracleBackedChainWithLowerHead(t *testing.T, blockCount int, headBlock
logger := testlog.Logger(t, log.LvlDebug) logger := testlog.Logger(t, log.LvlDebug)
chainCfg, blocks, oracle := setupOracle(t, blockCount, headBlockNumber) chainCfg, blocks, oracle := setupOracle(t, blockCount, headBlockNumber)
head := blocks[headBlockNumber].Hash() head := blocks[headBlockNumber].Hash()
stubOutput := eth.OutputV0{BlockHash: head} chain, err := NewOracleBackedL2Chain(logger, oracle, chainCfg, head)
chain, err := NewOracleBackedL2Chain(logger, oracle, chainCfg, common.Hash(eth.OutputRoot(&stubOutput)))
require.NoError(t, err) require.NoError(t, err)
return blocks, chain return blocks, chain
} }
...@@ -234,12 +232,7 @@ func setupOracle(t *testing.T, blockCount int, headBlockNumber int) (*params.Cha ...@@ -234,12 +232,7 @@ func setupOracle(t *testing.T, blockCount int, headBlockNumber int) (*params.Cha
genesisBlock := l2Genesis.MustCommit(db) genesisBlock := l2Genesis.MustCommit(db)
blocks, _ := core.GenerateChain(chainCfg, genesisBlock, consensus, db, blockCount, func(i int, gen *core.BlockGen) {}) blocks, _ := core.GenerateChain(chainCfg, genesisBlock, consensus, db, blockCount, func(i int, gen *core.BlockGen) {})
blocks = append([]*types.Block{genesisBlock}, blocks...) blocks = append([]*types.Block{genesisBlock}, blocks...)
oracle := l2test.NewStubOracleWithBlocks(t, blocks[:headBlockNumber+1], db)
var outputs []eth.Output
for _, block := range blocks {
outputs = append(outputs, &eth.OutputV0{BlockHash: block.Hash()})
}
oracle := l2test.NewStubOracleWithBlocks(t, blocks[:headBlockNumber+1], outputs, db)
return chainCfg, blocks, oracle return chainCfg, blocks, oracle
} }
......
...@@ -11,7 +11,6 @@ const ( ...@@ -11,7 +11,6 @@ const (
HintL2Transactions = "l2-transactions" HintL2Transactions = "l2-transactions"
HintL2Code = "l2-code" HintL2Code = "l2-code"
HintL2StateNode = "l2-state-node" HintL2StateNode = "l2-state-node"
HintL2Output = "l2-output"
) )
type BlockHeaderHint common.Hash type BlockHeaderHint common.Hash
...@@ -45,11 +44,3 @@ var _ preimage.Hint = StateNodeHint{} ...@@ -45,11 +44,3 @@ var _ preimage.Hint = StateNodeHint{}
func (l StateNodeHint) Hint() string { func (l StateNodeHint) Hint() string {
return HintL2StateNode + " " + (common.Hash)(l).String() return HintL2StateNode + " " + (common.Hash)(l).String()
} }
type L2OutputHint common.Hash
var _ preimage.Hint = L2OutputHint{}
func (l L2OutputHint) Hint() string {
return HintL2Output + " " + (common.Hash)(l).String()
}
...@@ -32,8 +32,6 @@ type Oracle interface { ...@@ -32,8 +32,6 @@ type Oracle interface {
// BlockByHash retrieves the block with the given hash. // BlockByHash retrieves the block with the given hash.
BlockByHash(blockHash common.Hash) *types.Block BlockByHash(blockHash common.Hash) *types.Block
OutputByRoot(root common.Hash) eth.Output
} }
// PreimageOracle implements Oracle using by interfacing with the pure preimage.Oracle // PreimageOracle implements Oracle using by interfacing with the pure preimage.Oracle
...@@ -87,13 +85,3 @@ func (p *PreimageOracle) CodeByHash(codeHash common.Hash) []byte { ...@@ -87,13 +85,3 @@ func (p *PreimageOracle) CodeByHash(codeHash common.Hash) []byte {
p.hint.Hint(CodeHint(codeHash)) p.hint.Hint(CodeHint(codeHash))
return p.oracle.Get(preimage.Keccak256Key(codeHash)) return p.oracle.Get(preimage.Keccak256Key(codeHash))
} }
func (p *PreimageOracle) OutputByRoot(l2OutputRoot common.Hash) eth.Output {
p.hint.Hint(L2OutputHint(l2OutputRoot))
data := p.oracle.Get(preimage.Keccak256Key(l2OutputRoot))
output, err := eth.UnmarshalOutput(data)
if err != nil {
panic(fmt.Errorf("invalid L2 output data for root %s: %w", l2OutputRoot, err))
}
return output
}
...@@ -122,21 +122,3 @@ func TestPreimageOracleCodeByHash(t *testing.T) { ...@@ -122,21 +122,3 @@ func TestPreimageOracleCodeByHash(t *testing.T) {
}) })
} }
} }
func TestPreimageOracleOutputByRoot(t *testing.T) {
rng := rand.New(rand.NewSource(123))
for i := 0; i < 10; i++ {
t.Run(fmt.Sprintf("output_%d", i), func(t *testing.T) {
po, hints, preimages := mockPreimageOracle(t)
output := testutils.RandomOutputV0(rng)
h := common.Hash(eth.OutputRoot(output))
preimages[preimage.Keccak256Key(h).PreimageKey()] = output.Marshal()
hints.On("hint", L2OutputHint(h).Hint()).Once().Return()
gotOutput := po.OutputByRoot(h)
hints.AssertExpectations(t)
require.Equal(t, hexutil.Bytes(output.Marshal()), hexutil.Bytes(gotOutput.Marshal()), "output matches")
})
}
}
...@@ -3,7 +3,6 @@ package test ...@@ -3,7 +3,6 @@ package test
import ( import (
"testing" "testing"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
...@@ -17,9 +16,8 @@ type stateOracle interface { ...@@ -17,9 +16,8 @@ type stateOracle interface {
} }
type StubBlockOracle struct { type StubBlockOracle struct {
t *testing.T t *testing.T
Blocks map[common.Hash]*types.Block Blocks map[common.Hash]*types.Block
Outputs map[common.Hash]eth.Output
stateOracle stateOracle
} }
...@@ -28,25 +26,18 @@ func NewStubOracle(t *testing.T) (*StubBlockOracle, *StubStateOracle) { ...@@ -28,25 +26,18 @@ func NewStubOracle(t *testing.T) (*StubBlockOracle, *StubStateOracle) {
blockOracle := StubBlockOracle{ blockOracle := StubBlockOracle{
t: t, t: t,
Blocks: make(map[common.Hash]*types.Block), Blocks: make(map[common.Hash]*types.Block),
Outputs: make(map[common.Hash]eth.Output),
stateOracle: stateOracle, stateOracle: stateOracle,
} }
return &blockOracle, stateOracle return &blockOracle, stateOracle
} }
func NewStubOracleWithBlocks(t *testing.T, chain []*types.Block, outputs []eth.Output, db ethdb.Database) *StubBlockOracle { func NewStubOracleWithBlocks(t *testing.T, chain []*types.Block, db ethdb.Database) *StubBlockOracle {
blocks := make(map[common.Hash]*types.Block, len(chain)) blocks := make(map[common.Hash]*types.Block, len(chain))
for _, block := range chain { for _, block := range chain {
blocks[block.Hash()] = block blocks[block.Hash()] = block
} }
o := make(map[common.Hash]eth.Output, len(outputs))
for _, output := range outputs {
o[common.Hash(eth.OutputRoot(output))] = output
}
return &StubBlockOracle{ return &StubBlockOracle{
t: t,
Blocks: blocks, Blocks: blocks,
Outputs: o,
stateOracle: &KvStateOracle{t: t, Source: db}, stateOracle: &KvStateOracle{t: t, Source: db},
} }
} }
...@@ -59,14 +50,6 @@ func (o StubBlockOracle) BlockByHash(blockHash common.Hash) *types.Block { ...@@ -59,14 +50,6 @@ func (o StubBlockOracle) BlockByHash(blockHash common.Hash) *types.Block {
return block return block
} }
func (o StubBlockOracle) OutputByRoot(root common.Hash) eth.Output {
output, ok := o.Outputs[root]
if !ok {
o.t.Fatalf("requested unknown output root %s", root)
}
return output
}
// KvStateOracle loads data from a source ethdb.KeyValueStore // KvStateOracle loads data from a source ethdb.KeyValueStore
type KvStateOracle struct { type KvStateOracle struct {
t *testing.T t *testing.T
......
...@@ -53,7 +53,7 @@ func RunProgram(logger log.Logger, preimageOracle io.ReadWriter, preimageHinter ...@@ -53,7 +53,7 @@ func RunProgram(logger log.Logger, preimageOracle io.ReadWriter, preimageHinter
bootInfo.RollupConfig, bootInfo.RollupConfig,
bootInfo.L2ChainConfig, bootInfo.L2ChainConfig,
bootInfo.L1Head, bootInfo.L1Head,
bootInfo.L2OutputRoot, bootInfo.L2Head,
bootInfo.L2Claim, bootInfo.L2Claim,
bootInfo.L2ClaimBlockNumber, bootInfo.L2ClaimBlockNumber,
l1PreimageOracle, l1PreimageOracle,
...@@ -62,9 +62,9 @@ func RunProgram(logger log.Logger, preimageOracle io.ReadWriter, preimageHinter ...@@ -62,9 +62,9 @@ func RunProgram(logger log.Logger, preimageOracle io.ReadWriter, preimageHinter
} }
// runDerivation executes the L2 state transition, given a minimal interface to retrieve data. // runDerivation executes the L2 state transition, given a minimal interface to retrieve data.
func runDerivation(logger log.Logger, cfg *rollup.Config, l2Cfg *params.ChainConfig, l1Head common.Hash, l2OutputRoot common.Hash, l2Claim common.Hash, l2ClaimBlockNum uint64, l1Oracle l1.Oracle, l2Oracle l2.Oracle) error { func runDerivation(logger log.Logger, cfg *rollup.Config, l2Cfg *params.ChainConfig, l1Head common.Hash, l2Head common.Hash, l2Claim common.Hash, l2ClaimBlockNum uint64, l1Oracle l1.Oracle, l2Oracle l2.Oracle) error {
l1Source := l1.NewOracleL1Client(logger, l1Oracle, l1Head) l1Source := l1.NewOracleL1Client(logger, l1Oracle, l1Head)
engineBackend, err := l2.NewOracleBackedL2Chain(logger, l2Oracle, l2Cfg, l2OutputRoot) engineBackend, err := l2.NewOracleBackedL2Chain(logger, l2Oracle, l2Cfg, l2Head)
if err != nil { if err != nil {
return fmt.Errorf("failed to create oracle-backed L2 chain: %w", err) return fmt.Errorf("failed to create oracle-backed L2 chain: %w", err)
} }
......
...@@ -20,7 +20,6 @@ var ( ...@@ -20,7 +20,6 @@ var (
l1HeadValue = common.HexToHash("0x111111").Hex() l1HeadValue = common.HexToHash("0x111111").Hex()
l2HeadValue = common.HexToHash("0x222222").Hex() l2HeadValue = common.HexToHash("0x222222").Hex()
l2ClaimValue = common.HexToHash("0x333333").Hex() l2ClaimValue = common.HexToHash("0x333333").Hex()
l2OutputRoot = common.HexToHash("0x444444").Hex()
l2ClaimBlockNumber = uint64(1203) l2ClaimBlockNumber = uint64(1203)
// Note: This is actually the L1 goerli genesis config. Just using it as an arbitrary, valid genesis config // Note: This is actually the L1 goerli genesis config. Just using it as an arbitrary, valid genesis config
l2Genesis = core.DefaultGoerliGenesisBlock() l2Genesis = core.DefaultGoerliGenesisBlock()
...@@ -49,7 +48,6 @@ func TestDefaultCLIOptionsMatchDefaultConfig(t *testing.T) { ...@@ -49,7 +48,6 @@ func TestDefaultCLIOptionsMatchDefaultConfig(t *testing.T) {
config.OPGoerliChainConfig, config.OPGoerliChainConfig,
common.HexToHash(l1HeadValue), common.HexToHash(l1HeadValue),
common.HexToHash(l2HeadValue), common.HexToHash(l2HeadValue),
common.HexToHash(l2OutputRoot),
common.HexToHash(l2ClaimValue), common.HexToHash(l2ClaimValue),
l2ClaimBlockNumber) l2ClaimBlockNumber)
require.Equal(t, defaultCfg, cfg) require.Equal(t, defaultCfg, cfg)
...@@ -133,21 +131,6 @@ func TestL2Head(t *testing.T) { ...@@ -133,21 +131,6 @@ func TestL2Head(t *testing.T) {
}) })
} }
func TestL2OutputRoot(t *testing.T) {
t.Run("Required", func(t *testing.T) {
verifyArgsInvalid(t, "flag l2.outputroot is required", addRequiredArgsExcept("--l2.outputroot"))
})
t.Run("Valid", func(t *testing.T) {
cfg := configForArgs(t, replaceRequiredArg("--l2.outputroot", l2OutputRoot))
require.Equal(t, common.HexToHash(l2OutputRoot), cfg.L2OutputRoot)
})
t.Run("Invalid", func(t *testing.T) {
verifyArgsInvalid(t, config.ErrInvalidL2OutputRoot.Error(), replaceRequiredArg("--l2.outputroot", "something"))
})
}
func TestL1Head(t *testing.T) { func TestL1Head(t *testing.T) {
t.Run("Required", func(t *testing.T) { t.Run("Required", func(t *testing.T) {
verifyArgsInvalid(t, "flag l1.head is required", addRequiredArgsExcept("--l1.head")) verifyArgsInvalid(t, "flag l1.head is required", addRequiredArgsExcept("--l1.head"))
...@@ -319,7 +302,6 @@ func requiredArgs() map[string]string { ...@@ -319,7 +302,6 @@ func requiredArgs() map[string]string {
"--network": "goerli", "--network": "goerli",
"--l1.head": l1HeadValue, "--l1.head": l1HeadValue,
"--l2.head": l2HeadValue, "--l2.head": l2HeadValue,
"--l2.outputroot": l2OutputRoot,
"--l2.claim": l2ClaimValue, "--l2.claim": l2ClaimValue,
"--l2.blocknumber": strconv.FormatUint(l2ClaimBlockNumber, 10), "--l2.blocknumber": strconv.FormatUint(l2ClaimBlockNumber, 10),
} }
......
...@@ -22,7 +22,6 @@ var ( ...@@ -22,7 +22,6 @@ var (
ErrMissingL2Genesis = errors.New("missing l2 genesis") ErrMissingL2Genesis = errors.New("missing l2 genesis")
ErrInvalidL1Head = errors.New("invalid l1 head") ErrInvalidL1Head = errors.New("invalid l1 head")
ErrInvalidL2Head = errors.New("invalid l2 head") ErrInvalidL2Head = errors.New("invalid l2 head")
ErrInvalidL2OutputRoot = errors.New("invalid l2 output root")
ErrL1AndL2Inconsistent = errors.New("l1 and l2 options must be specified together or both omitted") ErrL1AndL2Inconsistent = errors.New("l1 and l2 options must be specified together or both omitted")
ErrInvalidL2Claim = errors.New("invalid l2 claim") ErrInvalidL2Claim = errors.New("invalid l2 claim")
ErrInvalidL2ClaimBlock = errors.New("invalid l2 claim block number") ErrInvalidL2ClaimBlock = errors.New("invalid l2 claim block number")
...@@ -42,12 +41,9 @@ type Config struct { ...@@ -42,12 +41,9 @@ type Config struct {
L1TrustRPC bool L1TrustRPC bool
L1RPCKind sources.RPCProviderKind L1RPCKind sources.RPCProviderKind
// L2Head is the l2 block hash contained in the L2 Output referenced by the L2OutputRoot // L2Head is the agreed L2 block to start derivation from
// TODO(inphi): This can be made optional with hardcoded rollup configs and output oracle addresses by searching the oracle for the l2 output root
L2Head common.Hash L2Head common.Hash
// L2OutputRoot is the agreed L2 output root to start derivation from L2URL string
L2OutputRoot common.Hash
L2URL string
// L2Claim is the claimed L2 output root to verify // L2Claim is the claimed L2 output root to verify
L2Claim common.Hash L2Claim common.Hash
// L2ClaimBlockNumber is the block number the claimed L2 output root is from // L2ClaimBlockNumber is the block number the claimed L2 output root is from
...@@ -77,9 +73,6 @@ func (c *Config) Check() error { ...@@ -77,9 +73,6 @@ func (c *Config) Check() error {
if c.L2Head == (common.Hash{}) { if c.L2Head == (common.Hash{}) {
return ErrInvalidL2Head return ErrInvalidL2Head
} }
if c.L2OutputRoot == (common.Hash{}) {
return ErrInvalidL2OutputRoot
}
if c.L2Claim == (common.Hash{}) { if c.L2Claim == (common.Hash{}) {
return ErrInvalidL2Claim return ErrInvalidL2Claim
} }
...@@ -106,21 +99,12 @@ func (c *Config) FetchingEnabled() bool { ...@@ -106,21 +99,12 @@ func (c *Config) FetchingEnabled() bool {
} }
// NewConfig creates a Config with all optional values set to the CLI default value // NewConfig creates a Config with all optional values set to the CLI default value
func NewConfig( func NewConfig(rollupCfg *rollup.Config, l2Genesis *params.ChainConfig, l1Head common.Hash, l2Head common.Hash, l2Claim common.Hash, l2ClaimBlockNum uint64) *Config {
rollupCfg *rollup.Config,
l2Genesis *params.ChainConfig,
l1Head common.Hash,
l2Head common.Hash,
l2OutputRoot common.Hash,
l2Claim common.Hash,
l2ClaimBlockNum uint64,
) *Config {
return &Config{ return &Config{
Rollup: rollupCfg, Rollup: rollupCfg,
L2ChainConfig: l2Genesis, L2ChainConfig: l2Genesis,
L1Head: l1Head, L1Head: l1Head,
L2Head: l2Head, L2Head: l2Head,
L2OutputRoot: l2OutputRoot,
L2Claim: l2Claim, L2Claim: l2Claim,
L2ClaimBlockNumber: l2ClaimBlockNum, L2ClaimBlockNumber: l2ClaimBlockNum,
L1RPCKind: sources.RPCKindBasic, L1RPCKind: sources.RPCKindBasic,
...@@ -139,10 +123,6 @@ func NewConfigFromCLI(log log.Logger, ctx *cli.Context) (*Config, error) { ...@@ -139,10 +123,6 @@ func NewConfigFromCLI(log log.Logger, ctx *cli.Context) (*Config, error) {
if l2Head == (common.Hash{}) { if l2Head == (common.Hash{}) {
return nil, ErrInvalidL2Head return nil, ErrInvalidL2Head
} }
l2OutputRoot := common.HexToHash(ctx.String(flags.L2OutputRoot.Name))
if l2OutputRoot == (common.Hash{}) {
return nil, ErrInvalidL2OutputRoot
}
l2Claim := common.HexToHash(ctx.String(flags.L2Claim.Name)) l2Claim := common.HexToHash(ctx.String(flags.L2Claim.Name))
if l2Claim == (common.Hash{}) { if l2Claim == (common.Hash{}) {
return nil, ErrInvalidL2Claim return nil, ErrInvalidL2Claim
...@@ -172,7 +152,6 @@ func NewConfigFromCLI(log log.Logger, ctx *cli.Context) (*Config, error) { ...@@ -172,7 +152,6 @@ func NewConfigFromCLI(log log.Logger, ctx *cli.Context) (*Config, error) {
L2URL: ctx.String(flags.L2NodeAddr.Name), L2URL: ctx.String(flags.L2NodeAddr.Name),
L2ChainConfig: l2ChainConfig, L2ChainConfig: l2ChainConfig,
L2Head: l2Head, L2Head: l2Head,
L2OutputRoot: l2OutputRoot,
L2Claim: l2Claim, L2Claim: l2Claim,
L2ClaimBlockNumber: l2ClaimBlockNum, L2ClaimBlockNumber: l2ClaimBlockNum,
L1Head: l1Head, L1Head: l1Head,
......
...@@ -16,7 +16,6 @@ var ( ...@@ -16,7 +16,6 @@ var (
validL1Head = common.Hash{0xaa} validL1Head = common.Hash{0xaa}
validL2Head = common.Hash{0xbb} validL2Head = common.Hash{0xbb}
validL2Claim = common.Hash{0xcc} validL2Claim = common.Hash{0xcc}
validL2OutputRoot = common.Hash{0xdd}
validL2ClaimBlockNum = uint64(15) validL2ClaimBlockNum = uint64(15)
) )
...@@ -56,13 +55,6 @@ func TestL2HeadRequired(t *testing.T) { ...@@ -56,13 +55,6 @@ func TestL2HeadRequired(t *testing.T) {
require.ErrorIs(t, err, ErrInvalidL2Head) require.ErrorIs(t, err, ErrInvalidL2Head)
} }
func TestL2OutputRootRequired(t *testing.T) {
config := validConfig()
config.L2OutputRoot = common.Hash{}
err := config.Check()
require.ErrorIs(t, err, ErrInvalidL2OutputRoot)
}
func TestL2ClaimRequired(t *testing.T) { func TestL2ClaimRequired(t *testing.T) {
config := validConfig() config := validConfig()
config.L2Claim = common.Hash{} config.L2Claim = common.Hash{}
...@@ -159,7 +151,7 @@ func TestRejectExecAndServerMode(t *testing.T) { ...@@ -159,7 +151,7 @@ func TestRejectExecAndServerMode(t *testing.T) {
} }
func validConfig() *Config { func validConfig() *Config {
cfg := NewConfig(validRollupConfig, validL2Genesis, validL1Head, validL2Head, validL2OutputRoot, validL2Claim, validL2ClaimBlockNum) cfg := NewConfig(validRollupConfig, validL2Genesis, validL1Head, validL2Head, validL2Claim, validL2ClaimBlockNum)
cfg.DataDir = "/tmp/configTest" cfg.DataDir = "/tmp/configTest"
return cfg return cfg
} }
...@@ -47,14 +47,9 @@ var ( ...@@ -47,14 +47,9 @@ var (
} }
L2Head = &cli.StringFlag{ L2Head = &cli.StringFlag{
Name: "l2.head", Name: "l2.head",
Usage: "Hash of the L2 block at l2.outputroot", Usage: "Hash of the agreed L2 block to start derivation from",
EnvVars: prefixEnvVars("L2_HEAD"), EnvVars: prefixEnvVars("L2_HEAD"),
} }
L2OutputRoot = &cli.StringFlag{
Name: "l2.outputroot",
Usage: "Agreed L2 Output Root to start derivation from",
EnvVars: prefixEnvVars("L2_OUTPUT_ROOT"),
}
L2Claim = &cli.StringFlag{ L2Claim = &cli.StringFlag{
Name: "l2.claim", Name: "l2.claim",
Usage: "Claimed L2 output root to validate", Usage: "Claimed L2 output root to validate",
...@@ -108,7 +103,6 @@ var Flags []cli.Flag ...@@ -108,7 +103,6 @@ var Flags []cli.Flag
var requiredFlags = []cli.Flag{ var requiredFlags = []cli.Flag{
L1Head, L1Head,
L2Head, L2Head,
L2OutputRoot,
L2Claim, L2Claim,
L2BlockNumber, L2BlockNumber,
} }
......
...@@ -26,7 +26,7 @@ import ( ...@@ -26,7 +26,7 @@ import (
) )
type L2Source struct { type L2Source struct {
*L2Client *sources.L2Client
*sources.DebugClient *sources.DebugClient
} }
...@@ -205,7 +205,7 @@ func makePrefetcher(ctx context.Context, logger log.Logger, kv kvstore.KV, cfg * ...@@ -205,7 +205,7 @@ func makePrefetcher(ctx context.Context, logger log.Logger, kv kvstore.KV, cfg *
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to create L1 client: %w", err) return nil, fmt.Errorf("failed to create L1 client: %w", err)
} }
l2Cl, err := NewL2Client(l2RPC, logger, nil, &L2ClientConfig{L2ClientConfig: l2ClCfg, L2Head: cfg.L2Head}) l2Cl, err := sources.NewL2Client(l2RPC, logger, nil, l2ClCfg)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to create L2 client: %w", err) return nil, fmt.Errorf("failed to create L2 client: %w", err)
} }
......
...@@ -23,8 +23,7 @@ func TestServerMode(t *testing.T) { ...@@ -23,8 +23,7 @@ func TestServerMode(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
l1Head := common.Hash{0x11} l1Head := common.Hash{0x11}
l2OutputRoot := common.Hash{0x33} cfg := config.NewConfig(&chaincfg.Goerli, config.OPGoerliChainConfig, l1Head, common.Hash{0x22}, common.Hash{0x33}, 1000)
cfg := config.NewConfig(&chaincfg.Goerli, config.OPGoerliChainConfig, l1Head, common.Hash{0x22}, l2OutputRoot, common.Hash{0x44}, 1000)
cfg.DataDir = dir cfg.DataDir = dir
cfg.ServerMode = true cfg.ServerMode = true
...@@ -44,8 +43,7 @@ func TestServerMode(t *testing.T) { ...@@ -44,8 +43,7 @@ func TestServerMode(t *testing.T) {
hClient := preimage.NewHintWriter(hintClient) hClient := preimage.NewHintWriter(hintClient)
l1PreimageOracle := l1.NewPreimageOracle(pClient, hClient) l1PreimageOracle := l1.NewPreimageOracle(pClient, hClient)
require.Equal(t, l1Head.Bytes(), pClient.Get(client.L1HeadLocalIndex), "Should get l1 head preimages") require.Equal(t, l1Head.Bytes(), pClient.Get(client.L1HeadLocalIndex), "Should get preimages")
require.Equal(t, l2OutputRoot.Bytes(), pClient.Get(client.L2OutputRootLocalIndex), "Should get l2 output root preimages")
// Should exit when a preimage is unavailable // Should exit when a preimage is unavailable
require.Panics(t, func() { require.Panics(t, func() {
......
...@@ -19,7 +19,7 @@ func NewLocalPreimageSource(config *config.Config) *LocalPreimageSource { ...@@ -19,7 +19,7 @@ func NewLocalPreimageSource(config *config.Config) *LocalPreimageSource {
var ( var (
l1HeadKey = client.L1HeadLocalIndex.PreimageKey() l1HeadKey = client.L1HeadLocalIndex.PreimageKey()
l2OutputRootKey = client.L2OutputRootLocalIndex.PreimageKey() l2HeadKey = client.L2HeadLocalIndex.PreimageKey()
l2ClaimKey = client.L2ClaimLocalIndex.PreimageKey() l2ClaimKey = client.L2ClaimLocalIndex.PreimageKey()
l2ClaimBlockNumberKey = client.L2ClaimBlockNumberLocalIndex.PreimageKey() l2ClaimBlockNumberKey = client.L2ClaimBlockNumberLocalIndex.PreimageKey()
l2ChainConfigKey = client.L2ChainConfigLocalIndex.PreimageKey() l2ChainConfigKey = client.L2ChainConfigLocalIndex.PreimageKey()
...@@ -30,8 +30,8 @@ func (s *LocalPreimageSource) Get(key common.Hash) ([]byte, error) { ...@@ -30,8 +30,8 @@ func (s *LocalPreimageSource) Get(key common.Hash) ([]byte, error) {
switch [32]byte(key) { switch [32]byte(key) {
case l1HeadKey: case l1HeadKey:
return s.config.L1Head.Bytes(), nil return s.config.L1Head.Bytes(), nil
case l2OutputRootKey: case l2HeadKey:
return s.config.L2OutputRoot.Bytes(), nil return s.config.L2Head.Bytes(), nil
case l2ClaimKey: case l2ClaimKey:
return s.config.L2Claim.Bytes(), nil return s.config.L2Claim.Bytes(), nil
case l2ClaimBlockNumberKey: case l2ClaimBlockNumberKey:
......
...@@ -17,7 +17,7 @@ func TestLocalPreimageSource(t *testing.T) { ...@@ -17,7 +17,7 @@ func TestLocalPreimageSource(t *testing.T) {
cfg := &config.Config{ cfg := &config.Config{
Rollup: &chaincfg.Goerli, Rollup: &chaincfg.Goerli,
L1Head: common.HexToHash("0x1111"), L1Head: common.HexToHash("0x1111"),
L2OutputRoot: common.HexToHash("0x2222"), L2Head: common.HexToHash("0x2222"),
L2Claim: common.HexToHash("0x3333"), L2Claim: common.HexToHash("0x3333"),
L2ClaimBlockNumber: 1234, L2ClaimBlockNumber: 1234,
L2ChainConfig: params.GoerliChainConfig, L2ChainConfig: params.GoerliChainConfig,
...@@ -29,7 +29,7 @@ func TestLocalPreimageSource(t *testing.T) { ...@@ -29,7 +29,7 @@ func TestLocalPreimageSource(t *testing.T) {
expected []byte expected []byte
}{ }{
{"L1Head", l1HeadKey, cfg.L1Head.Bytes()}, {"L1Head", l1HeadKey, cfg.L1Head.Bytes()},
{"L2OutputRoot", l2OutputRootKey, cfg.L2OutputRoot.Bytes()}, {"L2Head", l2HeadKey, cfg.L2Head.Bytes()},
{"L2Claim", l2ClaimKey, cfg.L2Claim.Bytes()}, {"L2Claim", l2ClaimKey, cfg.L2Claim.Bytes()},
{"L2ClaimBlockNumber", l2ClaimBlockNumberKey, binary.BigEndian.AppendUint64(nil, cfg.L2ClaimBlockNumber)}, {"L2ClaimBlockNumber", l2ClaimBlockNumberKey, binary.BigEndian.AppendUint64(nil, cfg.L2ClaimBlockNumber)},
{"Rollup", rollupKey, asJson(t, cfg.Rollup)}, {"Rollup", rollupKey, asJson(t, cfg.Rollup)},
......
package host
import (
"context"
"fmt"
"github.com/ethereum-optimism/optimism/op-node/client"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/sources"
"github.com/ethereum-optimism/optimism/op-node/sources/caching"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
)
type L2Client struct {
*sources.L2Client
// l2Head is the L2 block hash that we use to fetch L2 output
l2Head common.Hash
}
type L2ClientConfig struct {
*sources.L2ClientConfig
L2Head common.Hash
}
func NewL2Client(client client.RPC, log log.Logger, metrics caching.Metrics, config *L2ClientConfig) (*L2Client, error) {
l2Client, err := sources.NewL2Client(client, log, metrics, config.L2ClientConfig)
if err != nil {
return nil, err
}
return &L2Client{
L2Client: l2Client,
l2Head: config.L2Head,
}, nil
}
func (s *L2Client) OutputByRoot(ctx context.Context, l2OutputRoot common.Hash) (eth.Output, error) {
output, err := s.OutputV0AtBlock(ctx, s.l2Head)
if err != nil {
return nil, err
}
if eth.OutputRoot(output) != eth.Bytes32(l2OutputRoot) {
// For fault proofs, we only reference outputs at the l2 head at boot time
// The caller shouldn't be requesting outputs at any other block
return nil, fmt.Errorf("unknown output root")
}
return output, nil
}
...@@ -29,7 +29,6 @@ type L2Source interface { ...@@ -29,7 +29,6 @@ type L2Source interface {
InfoAndTxsByHash(ctx context.Context, blockHash common.Hash) (eth.BlockInfo, types.Transactions, error) InfoAndTxsByHash(ctx context.Context, blockHash common.Hash) (eth.BlockInfo, types.Transactions, error)
NodeByHash(ctx context.Context, hash common.Hash) ([]byte, error) NodeByHash(ctx context.Context, hash common.Hash) ([]byte, error)
CodeByHash(ctx context.Context, hash common.Hash) ([]byte, error) CodeByHash(ctx context.Context, hash common.Hash) ([]byte, error)
OutputByRoot(ctx context.Context, root common.Hash) (eth.Output, error)
} }
type Prefetcher struct { type Prefetcher struct {
...@@ -125,12 +124,6 @@ func (p *Prefetcher) prefetch(ctx context.Context, hint string) error { ...@@ -125,12 +124,6 @@ func (p *Prefetcher) prefetch(ctx context.Context, hint string) error {
return fmt.Errorf("failed to fetch L2 contract code %s: %w", hash, err) return fmt.Errorf("failed to fetch L2 contract code %s: %w", hash, err)
} }
return p.kvStore.Put(preimage.Keccak256Key(hash).PreimageKey(), code) return p.kvStore.Put(preimage.Keccak256Key(hash).PreimageKey(), code)
case l2.HintL2Output:
output, err := p.l2Fetcher.OutputByRoot(ctx, hash)
if err != nil {
return fmt.Errorf("failed to fetch L2 output root %s: %w", hash, err)
}
return p.kvStore.Put(preimage.Keccak256Key(hash).PreimageKey(), output.Marshal())
} }
return fmt.Errorf("unknown hint type: %v", hintType) return fmt.Errorf("unknown hint type: %v", hintType)
} }
......
...@@ -286,15 +286,6 @@ type l2Client struct { ...@@ -286,15 +286,6 @@ type l2Client struct {
*testutils.MockDebugClient *testutils.MockDebugClient
} }
func (m *l2Client) OutputByRoot(ctx context.Context, root common.Hash) (eth.Output, error) {
out := m.Mock.MethodCalled("OutputByRoot", root)
return out[0].(eth.Output), *out[1].(*error)
}
func (m *l2Client) ExpectOutputByRoot(root common.Hash, output eth.Output, err error) {
m.Mock.On("OutputByRoot", root).Once().Return(output, &err)
}
func createPrefetcher(t *testing.T) (*Prefetcher, *testutils.MockL1Source, *l2Client, kvstore.KV) { func createPrefetcher(t *testing.T) (*Prefetcher, *testutils.MockL1Source, *l2Client, kvstore.KV) {
logger := testlog.Logger(t, log.LvlDebug) logger := testlog.Logger(t, log.LvlDebug)
kv := kvstore.NewMemKV() kv := kvstore.NewMemKV()
......
...@@ -125,20 +125,6 @@ func (s *RetryingL2Source) CodeByHash(ctx context.Context, hash common.Hash) ([] ...@@ -125,20 +125,6 @@ func (s *RetryingL2Source) CodeByHash(ctx context.Context, hash common.Hash) ([]
return code, err return code, err
} }
func (s *RetryingL2Source) OutputByRoot(ctx context.Context, root common.Hash) (eth.Output, error) {
var output eth.Output
err := backoff.DoCtx(ctx, maxAttempts, s.strategy, func() error {
o, err := s.source.OutputByRoot(ctx, root)
if err != nil {
s.logger.Warn("Failed to fetch l2 output", "root", root, "err", err)
return err
}
output = o
return nil
})
return output, err
}
func NewRetryingL2Source(logger log.Logger, source L2Source) *RetryingL2Source { func NewRetryingL2Source(logger log.Logger, source L2Source) *RetryingL2Source {
return &RetryingL2Source{ return &RetryingL2Source{
logger: logger, logger: logger,
......
...@@ -119,8 +119,6 @@ func TestRetryingL2Source(t *testing.T) { ...@@ -119,8 +119,6 @@ func TestRetryingL2Source(t *testing.T) {
&types.Transaction{}, &types.Transaction{},
} }
data := []byte{1, 2, 3, 4, 5} data := []byte{1, 2, 3, 4, 5}
output := &eth.OutputV0{}
wrongOutput := &eth.OutputV0{BlockHash: common.Hash{0x99}}
t.Run("InfoAndTxsByHash Success", func(t *testing.T) { t.Run("InfoAndTxsByHash Success", func(t *testing.T) {
source, mock := createL2Source(t) source, mock := createL2Source(t)
...@@ -189,28 +187,6 @@ func TestRetryingL2Source(t *testing.T) { ...@@ -189,28 +187,6 @@ func TestRetryingL2Source(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, data, actual) require.Equal(t, data, actual)
}) })
t.Run("OutputByRoot Success", func(t *testing.T) {
source, mock := createL2Source(t)
defer mock.AssertExpectations(t)
mock.ExpectOutputByRoot(hash, output, nil)
actualOutput, err := source.OutputByRoot(ctx, hash)
require.NoError(t, err)
require.Equal(t, output, actualOutput)
})
t.Run("OutputByRoot Error", func(t *testing.T) {
source, mock := createL2Source(t)
defer mock.AssertExpectations(t)
expectedErr := errors.New("boom")
mock.ExpectOutputByRoot(hash, wrongOutput, expectedErr)
mock.ExpectOutputByRoot(hash, output, nil)
actualOutput, err := source.OutputByRoot(ctx, hash)
require.NoError(t, err)
require.Equal(t, output, actualOutput)
})
} }
func createL2Source(t *testing.T) (*RetryingL2Source, *MockL2Source) { func createL2Source(t *testing.T) (*RetryingL2Source, *MockL2Source) {
...@@ -241,11 +217,6 @@ func (m *MockL2Source) CodeByHash(ctx context.Context, hash common.Hash) ([]byte ...@@ -241,11 +217,6 @@ func (m *MockL2Source) CodeByHash(ctx context.Context, hash common.Hash) ([]byte
return out[0].([]byte), *out[1].(*error) return out[0].([]byte), *out[1].(*error)
} }
func (m *MockL2Source) OutputByRoot(ctx context.Context, root common.Hash) (eth.Output, error) {
out := m.Mock.MethodCalled("OutputByRoot", root)
return out[0].(eth.Output), *out[1].(*error)
}
func (m *MockL2Source) ExpectInfoAndTxsByHash(blockHash common.Hash, info eth.BlockInfo, txs types.Transactions, err error) { func (m *MockL2Source) ExpectInfoAndTxsByHash(blockHash common.Hash, info eth.BlockInfo, txs types.Transactions, err error) {
m.Mock.On("InfoAndTxsByHash", blockHash).Once().Return(info, txs, &err) m.Mock.On("InfoAndTxsByHash", blockHash).Once().Return(info, txs, &err)
} }
...@@ -258,8 +229,4 @@ func (m *MockL2Source) ExpectCodeByHash(hash common.Hash, code []byte, err error ...@@ -258,8 +229,4 @@ func (m *MockL2Source) ExpectCodeByHash(hash common.Hash, code []byte, err error
m.Mock.On("CodeByHash", hash).Once().Return(code, &err) m.Mock.On("CodeByHash", hash).Once().Return(code, &err)
} }
func (m *MockL2Source) ExpectOutputByRoot(root common.Hash, output eth.Output, err error) {
m.Mock.On("OutputByRoot", root).Once().Return(output, &err)
}
var _ L2Source = (*MockL2Source)(nil) var _ L2Source = (*MockL2Source)(nil)
...@@ -100,31 +100,7 @@ func Run(l1RpcUrl string, l2RpcUrl string, l2OracleAddr common.Address) error { ...@@ -100,31 +100,7 @@ func Run(l1RpcUrl string, l2RpcUrl string, l2OracleAddr common.Address) error {
if err != nil { if err != nil {
return fmt.Errorf("retrieve agreed l2 block: %w", err) return fmt.Errorf("retrieve agreed l2 block: %w", err)
} }
agreedOutputIndex, err := outputOracle.GetL2OutputIndexAfter(callOpts, l2AgreedBlock.Number()) l2Head := l2AgreedBlock.Hash()
if err != nil {
return fmt.Errorf("failed to output index after agreed block")
}
// Find an output that differs from what is being claimed
var agreedOutput bindings.TypesOutputProposal
for {
agreedOutput, err = outputOracle.GetL2Output(callOpts, agreedOutputIndex)
if err != nil {
return fmt.Errorf("retrieve agreed output: %w", err)
}
if agreedOutput.OutputRoot != output.OutputRoot {
break
}
fmt.Printf("Output at %v equals output at finalized block. Continuing search...\n", agreedOutput.L2BlockNumber)
agreedOutputIndex.Sub(agreedOutputIndex, big.NewInt(1))
if agreedOutputIndex.Int64() < 0 {
return fmt.Errorf("failed to find an output different from finalized block output")
}
}
l2BlockAtOutput, err := l2Client.BlockByNumber(ctx, agreedOutput.L2BlockNumber)
if err != nil {
return fmt.Errorf("retrieve agreed block: %w", err)
}
l2Head := l2BlockAtOutput.Hash()
temp, err := os.MkdirTemp("", "oracledata") temp, err := os.MkdirTemp("", "oracledata")
if err != nil { if err != nil {
...@@ -144,7 +120,6 @@ func Run(l1RpcUrl string, l2RpcUrl string, l2OracleAddr common.Address) error { ...@@ -144,7 +120,6 @@ func Run(l1RpcUrl string, l2RpcUrl string, l2OracleAddr common.Address) error {
"--datadir", temp, "--datadir", temp,
"--l1.head", l1Head.Hex(), "--l1.head", l1Head.Hex(),
"--l2.head", l2Head.Hex(), "--l2.head", l2Head.Hex(),
"--l2.outputroot", common.Bytes2Hex(agreedOutput.OutputRoot[:]),
"--l2.claim", l2Claim.Hex(), "--l2.claim", l2Claim.Hex(),
"--l2.blocknumber", l2BlockNumber.String(), "--l2.blocknumber", l2BlockNumber.String(),
} }
......
...@@ -94,7 +94,8 @@ func RecordError(provider string, errorLabel string) { ...@@ -94,7 +94,8 @@ func RecordError(provider string, errorLabel string) {
func RecordErrorDetails(provider string, label string, err error) { func RecordErrorDetails(provider string, label string, err error) {
errClean := nonAlphanumericRegex.ReplaceAllString(err.Error(), "") errClean := nonAlphanumericRegex.ReplaceAllString(err.Error(), "")
errClean = strings.ReplaceAll(errClean, " ", "_") errClean = strings.ReplaceAll(errClean, " ", "_")
label = fmt.Sprintf("%s.%s", label) errClean = strings.ReplaceAll(errClean, "__", "_")
label = fmt.Sprintf("%s.%s", label, errClean)
RecordError(provider, label) RecordError(provider, label)
} }
......
...@@ -44,10 +44,11 @@ func resolveAddr(ctx context.Context, client *kms.KeyManagementClient, keyName s ...@@ -44,10 +44,11 @@ func resolveAddr(ctx context.Context, client *kms.KeyManagementClient, keyName s
if err != nil { if err != nil {
return common.Address{}, fmt.Errorf("google kms public key %q lookup: %w", keyName, err) return common.Address{}, fmt.Errorf("google kms public key %q lookup: %w", keyName, err)
} }
keyPem := resp.Pem
block, _ := pem.Decode([]byte(resp.Pem)) block, _ := pem.Decode([]byte(keyPem))
if block == nil { if block == nil {
return common.Address{}, fmt.Errorf("google kms public key %q pem empty: %.130q", keyName, resp.Pem) return common.Address{}, fmt.Errorf("google kms public key %q pem empty: %.130q", keyName, keyPem)
} }
var info struct { var info struct {
...@@ -59,11 +60,6 @@ func resolveAddr(ctx context.Context, client *kms.KeyManagementClient, keyName s ...@@ -59,11 +60,6 @@ func resolveAddr(ctx context.Context, client *kms.KeyManagementClient, keyName s
return common.Address{}, fmt.Errorf("google kms public key %q pem block %q: %v", keyName, block.Type, err) return common.Address{}, fmt.Errorf("google kms public key %q pem block %q: %v", keyName, block.Type, err)
} }
wantAlg := asn1.ObjectIdentifier{1, 2, 840, 10045, 2, 1}
if gotAlg := info.AlgID.Algorithm; !gotAlg.Equal(wantAlg) {
return common.Address{}, fmt.Errorf("google kms public key %q asn.1 algorithm %s intead of %s", keyName, gotAlg, wantAlg)
}
return pubKeyAddr(info.Key.Bytes), nil return pubKeyAddr(info.Key.Bytes), nil
} }
......
# @eth-optimism/drippie-mon # @eth-optimism/drippie-mon
## 0.4.2
### Patch Changes
- [#6469](https://github.com/ethereum-optimism/optimism/pull/6469) [`0c769680e`](https://github.com/ethereum-optimism/optimism/commit/0c769680e44208c086deef2f9c03c37da2b536fe) Thanks [@maurelian](https://github.com/maurelian)! - Update language in fault-mon from batches to outputs
## 0.4.1 ## 0.4.1
### Patch Changes ### Patch Changes
......
{ {
"private": true, "private": true,
"name": "@eth-optimism/chain-mon", "name": "@eth-optimism/chain-mon",
"version": "0.4.1", "version": "0.4.2",
"description": "[Optimism] Chain monitoring services", "description": "[Optimism] Chain monitoring services",
"main": "dist/index", "main": "dist/index",
"types": "dist/index", "types": "dist/index",
......
...@@ -31,22 +31,22 @@ export const findOutputForIndex = async ( ...@@ -31,22 +31,22 @@ export const findOutputForIndex = async (
} }
/** /**
* Finds the first state batch index that has not yet passed the fault proof window. * Finds the first L2 output index that has not yet passed the fault proof window.
* *
* @param oracle Output oracle contract. * @param oracle Output oracle contract.
* @returns Starting state root batch index. * @returns Starting L2 output index.
*/ */
export const findFirstUnfinalizedStateBatchIndex = async ( export const findFirstUnfinalizedOutputIndex = async (
oracle: Contract, oracle: Contract,
fpw: number, fpw: number,
logger?: Logger logger?: Logger
): Promise<number> => { ): Promise<number> => {
const latestBlock = await oracle.provider.getBlock('latest') const latestBlock = await oracle.provider.getBlock('latest')
const totalBatches = (await oracle.nextOutputIndex()).toNumber() const totalOutputs = (await oracle.nextOutputIndex()).toNumber()
// Perform a binary search to find the next batch that will pass the challenge period. // Perform a binary search to find the next batch that will pass the challenge period.
let lo = 0 let lo = 0
let hi = totalBatches let hi = totalOutputs
while (lo !== hi) { while (lo !== hi) {
const mid = Math.floor((lo + hi) / 2) const mid = Math.floor((lo + hi) / 2)
const outputData = await findOutputForIndex(oracle, mid, logger) const outputData = await findOutputForIndex(oracle, mid, logger)
...@@ -60,7 +60,7 @@ export const findFirstUnfinalizedStateBatchIndex = async ( ...@@ -60,7 +60,7 @@ export const findFirstUnfinalizedStateBatchIndex = async (
// Result will be zero if the chain is less than FPW seconds old. Only returns undefined in the // Result will be zero if the chain is less than FPW seconds old. Only returns undefined in the
// case that no batches have been submitted for an entire challenge period. // case that no batches have been submitted for an entire challenge period.
if (lo === totalBatches) { if (lo === totalOutputs) {
return undefined return undefined
} else { } else {
return lo return lo
......
...@@ -25,20 +25,17 @@ import { Contract, ethers } from 'ethers' ...@@ -25,20 +25,17 @@ import { Contract, ethers } from 'ethers'
import dateformat from 'dateformat' import dateformat from 'dateformat'
import { version } from '../../package.json' import { version } from '../../package.json'
import { import { findFirstUnfinalizedOutputIndex, findOutputForIndex } from './helpers'
findFirstUnfinalizedStateBatchIndex,
findOutputForIndex,
} from './helpers'
type Options = { type Options = {
l1RpcProvider: Provider l1RpcProvider: Provider
l2RpcProvider: Provider l2RpcProvider: Provider
startBatchIndex: number startOutputIndex: number
optimismPortalAddress?: string optimismPortalAddress?: string
} }
type Metrics = { type Metrics = {
highestBatchIndex: Gauge highestOutputIndex: Gauge
isCurrentlyMismatched: Gauge isCurrentlyMismatched: Gauge
nodeConnectionFailures: Gauge nodeConnectionFailures: Gauge
} }
...@@ -47,7 +44,7 @@ type State = { ...@@ -47,7 +44,7 @@ type State = {
faultProofWindow: number faultProofWindow: number
outputOracle: Contract outputOracle: Contract
messenger: CrossChainMessenger messenger: CrossChainMessenger
currentBatchIndex: number currentOutputIndex: number
diverged: boolean diverged: boolean
} }
...@@ -70,7 +67,7 @@ export class FaultDetector extends BaseServiceV2<Options, Metrics, State> { ...@@ -70,7 +67,7 @@ export class FaultDetector extends BaseServiceV2<Options, Metrics, State> {
validator: validators.provider, validator: validators.provider,
desc: 'Provider for interacting with L2', desc: 'Provider for interacting with L2',
}, },
startBatchIndex: { startOutputIndex: {
validator: validators.num, validator: validators.num,
default: -1, default: -1,
desc: 'The L2 height to start from', desc: 'The L2 height to start from',
...@@ -84,9 +81,9 @@ export class FaultDetector extends BaseServiceV2<Options, Metrics, State> { ...@@ -84,9 +81,9 @@ export class FaultDetector extends BaseServiceV2<Options, Metrics, State> {
}, },
}, },
metricsSpec: { metricsSpec: {
highestBatchIndex: { highestOutputIndex: {
type: Gauge, type: Gauge,
desc: 'Highest batch indices (checked and known)', desc: 'Highest output indices (checked and known)',
labels: ['type'], labels: ['type'],
}, },
isCurrentlyMismatched: { isCurrentlyMismatched: {
...@@ -200,30 +197,32 @@ export class FaultDetector extends BaseServiceV2<Options, Metrics, State> { ...@@ -200,30 +197,32 @@ export class FaultDetector extends BaseServiceV2<Options, Metrics, State> {
this.state.outputOracle = this.state.messenger.contracts.l1.L2OutputOracle this.state.outputOracle = this.state.messenger.contracts.l1.L2OutputOracle
// Figure out where to start syncing from. // Figure out where to start syncing from.
if (this.options.startBatchIndex === -1) { if (this.options.startOutputIndex === -1) {
this.logger.info('finding appropriate starting unfinalized batch') this.logger.info('finding appropriate starting unfinalized output')
const firstUnfinalized = await findFirstUnfinalizedStateBatchIndex( const firstUnfinalized = await findFirstUnfinalizedOutputIndex(
this.state.outputOracle, this.state.outputOracle,
this.state.faultProofWindow, this.state.faultProofWindow,
this.logger this.logger
) )
// We may not have an unfinalized batches in the case where no batches have been submitted // We may not have an unfinalized outputs in the case where no outputs have been submitted
// for the entire duration of the FAULTPROOFWINDOW. We generally do not expect this to happen on mainnet, // for the entire duration of the FAULTPROOFWINDOW. We generally do not expect this to happen on mainnet,
// but it happens often on testnets because the FAULTPROOFWINDOW is very short. // but it happens often on testnets because the FAULTPROOFWINDOW is very short.
if (firstUnfinalized === undefined) { if (firstUnfinalized === undefined) {
this.logger.info('no unfinalized batches found. skipping all batches.') this.logger.info(
const totalBatches = await this.state.outputOracle.nextOutputIndex() 'no unfinalized outputes found. skipping all outputes.'
this.state.currentBatchIndex = totalBatches.toNumber() - 1 )
const totalOutputes = await this.state.outputOracle.nextOutputIndex()
this.state.currentOutputIndex = totalOutputes.toNumber() - 1
} else { } else {
this.state.currentBatchIndex = firstUnfinalized this.state.currentOutputIndex = firstUnfinalized
} }
} else { } else {
this.state.currentBatchIndex = this.options.startBatchIndex this.state.currentOutputIndex = this.options.startOutputIndex
} }
this.logger.info('starting batch', { this.logger.info('starting output', {
batchIndex: this.state.currentBatchIndex, outputIndex: this.state.currentOutputIndex,
}) })
// Set the initial metrics. // Set the initial metrics.
...@@ -241,12 +240,12 @@ export class FaultDetector extends BaseServiceV2<Options, Metrics, State> { ...@@ -241,12 +240,12 @@ export class FaultDetector extends BaseServiceV2<Options, Metrics, State> {
async main(): Promise<void> { async main(): Promise<void> {
const startMs = Date.now() const startMs = Date.now()
let latestBatchIndex: number let latestOutputIndex: number
try { try {
const totalBatches = await this.state.outputOracle.nextOutputIndex() const totalOutputes = await this.state.outputOracle.nextOutputIndex()
latestBatchIndex = totalBatches.toNumber() - 1 latestOutputIndex = totalOutputes.toNumber() - 1
} catch (err) { } catch (err) {
this.logger.error('failed to query total # of batches', { this.logger.error('failed to query total # of outputes', {
error: err, error: err,
node: 'l1', node: 'l1',
section: 'nextOutputIndex', section: 'nextOutputIndex',
...@@ -259,34 +258,34 @@ export class FaultDetector extends BaseServiceV2<Options, Metrics, State> { ...@@ -259,34 +258,34 @@ export class FaultDetector extends BaseServiceV2<Options, Metrics, State> {
return return
} }
if (this.state.currentBatchIndex > latestBatchIndex) { if (this.state.currentOutputIndex > latestOutputIndex) {
this.logger.info('batch index is ahead of the oracle. waiting...', { this.logger.info('output index is ahead of the oracle. waiting...', {
batchIndex: this.state.currentBatchIndex, outputIndex: this.state.currentOutputIndex,
latestBatchIndex, latestOutputIndex,
}) })
await sleep(15000) await sleep(15000)
return return
} }
this.metrics.highestBatchIndex.set({ type: 'known' }, latestBatchIndex) this.metrics.highestOutputIndex.set({ type: 'known' }, latestOutputIndex)
this.logger.info('checking batch', { this.logger.info('checking output', {
batchIndex: this.state.currentBatchIndex, outputIndex: this.state.currentOutputIndex,
latestBatchIndex, latestOutputIndex,
}) })
let outputData: BedrockOutputData let outputData: BedrockOutputData
try { try {
outputData = await findOutputForIndex( outputData = await findOutputForIndex(
this.state.outputOracle, this.state.outputOracle,
this.state.currentBatchIndex, this.state.currentOutputIndex,
this.logger this.logger
) )
} catch (err) { } catch (err) {
this.logger.error('failed to fetch output associated with batch', { this.logger.error('failed to fetch output associated with output', {
error: err, error: err,
node: 'l1', node: 'l1',
section: 'findOutputForIndex', section: 'findOutputForIndex',
batchIndex: this.state.currentBatchIndex, outputIndex: this.state.currentOutputIndex,
}) })
this.metrics.nodeConnectionFailures.inc({ this.metrics.nodeConnectionFailures.inc({
layer: 'l1', layer: 'l1',
...@@ -397,20 +396,20 @@ export class FaultDetector extends BaseServiceV2<Options, Metrics, State> { ...@@ -397,20 +396,20 @@ export class FaultDetector extends BaseServiceV2<Options, Metrics, State> {
const elapsedMs = Date.now() - startMs const elapsedMs = Date.now() - startMs
// Mark the current batch index as checked // Mark the current output index as checked
this.logger.info('checked batch ok', { this.logger.info('checked output ok', {
batchIndex: this.state.currentBatchIndex, outputIndex: this.state.currentOutputIndex,
timeMs: elapsedMs, timeMs: elapsedMs,
}) })
this.metrics.highestBatchIndex.set( this.metrics.highestOutputIndex.set(
{ type: 'checked' }, { type: 'checked' },
this.state.currentBatchIndex this.state.currentOutputIndex
) )
// If we got through the above without throwing an error, we should be // If we got through the above without throwing an error, we should be
// fine to reset and move onto the next batch // fine to reset and move onto the next output
this.state.diverged = false this.state.diverged = false
this.state.currentBatchIndex++ this.state.currentOutputIndex++
this.metrics.isCurrentlyMismatched.set(0) this.metrics.isCurrentlyMismatched.set(0)
} }
} }
......
import { Provider } from '@ethersproject/abstract-provider'
import { Logger } from '@eth-optimism/common-ts'
/**
* Finds
*
* @param
* @param
* @param
* @returns
*/
export const getLastFinalizedBlock = async (
l1RpcProvider: Provider,
faultProofWindow: number,
logger: Logger
): Promise<number> => {
let guessWindowStartBlock
try {
const l1Block = await l1RpcProvider.getBlock('latest')
// The time corresponding to the start of the FPW, based on the current block.
const windowStartTime = l1Block.timestamp - faultProofWindow
// Use the FPW to find the block number that is the start of the FPW.
guessWindowStartBlock = l1Block.number - faultProofWindow / 12
let block = await l1RpcProvider.getBlock(guessWindowStartBlock)
while (block.timestamp > windowStartTime) {
guessWindowStartBlock--
block = await l1RpcProvider.getBlock(guessWindowStartBlock)
}
return block.number
} catch (err) {
logger.fatal('error when calling querying for block', {
errors: err,
})
throw new Error(
`unable to find block number ${guessWindowStartBlock || 'latest'}`
)
}
}
...@@ -13,6 +13,7 @@ import { Event } from 'ethers' ...@@ -13,6 +13,7 @@ import { Event } from 'ethers'
import dateformat from 'dateformat' import dateformat from 'dateformat'
import { version } from '../../package.json' import { version } from '../../package.json'
import { getLastFinalizedBlock as getLastFinalizedBlock } from './helpers'
type Options = { type Options = {
l1RpcProvider: Provider l1RpcProvider: Provider
...@@ -30,7 +31,7 @@ type Metrics = { ...@@ -30,7 +31,7 @@ type Metrics = {
type State = { type State = {
messenger: CrossChainMessenger messenger: CrossChainMessenger
highestUncheckedBlockNumber: number highestUncheckedBlockNumber: number
finalizationWindow: number faultProofWindow: number
forgeryDetected: boolean forgeryDetected: boolean
} }
...@@ -109,10 +110,20 @@ export class WithdrawalMonitor extends BaseServiceV2<Options, Metrics, State> { ...@@ -109,10 +110,20 @@ export class WithdrawalMonitor extends BaseServiceV2<Options, Metrics, State> {
// Not detected by default. // Not detected by default.
this.state.forgeryDetected = false this.state.forgeryDetected = false
// For now we'll just start take it from the env or the tip of the chain this.state.faultProofWindow =
await this.state.messenger.getChallengePeriodSeconds()
this.logger.info(
`fault proof window is ${this.state.faultProofWindow} seconds`
)
// Set the start block number.
if (this.options.startBlockNumber === -1) { if (this.options.startBlockNumber === -1) {
this.state.highestUncheckedBlockNumber = // We default to starting from the last finalized block.
await this.options.l1RpcProvider.getBlockNumber() this.state.highestUncheckedBlockNumber = await getLastFinalizedBlock(
this.options.l1RpcProvider,
this.state.faultProofWindow,
this.logger
)
} else { } else {
this.state.highestUncheckedBlockNumber = this.options.startBlockNumber this.state.highestUncheckedBlockNumber = this.options.startBlockNumber
} }
......
...@@ -8,7 +8,7 @@ import { SignerWithAddress } from '@nomiclabs/hardhat-ethers/signers' ...@@ -8,7 +8,7 @@ import { SignerWithAddress } from '@nomiclabs/hardhat-ethers/signers'
import { expect } from './setup' import { expect } from './setup'
import { import {
findOutputForIndex, findOutputForIndex,
findFirstUnfinalizedStateBatchIndex, findFirstUnfinalizedOutputIndex,
} from '../../src/fault-mon' } from '../../src/fault-mon'
describe('helpers', () => { describe('helpers', () => {
...@@ -122,7 +122,7 @@ describe('helpers', () => { ...@@ -122,7 +122,7 @@ describe('helpers', () => {
}) })
it('should find the first batch older than the FPW', async () => { it('should find the first batch older than the FPW', async () => {
const first = await findFirstUnfinalizedStateBatchIndex( const first = await findFirstUnfinalizedOutputIndex(
L2OutputOracle, L2OutputOracle,
deployConfig.finalizationPeriodSeconds deployConfig.finalizationPeriodSeconds
) )
...@@ -164,7 +164,7 @@ describe('helpers', () => { ...@@ -164,7 +164,7 @@ describe('helpers', () => {
}) })
it('should return zero', async () => { it('should return zero', async () => {
const first = await findFirstUnfinalizedStateBatchIndex( const first = await findFirstUnfinalizedOutputIndex(
L2OutputOracle, L2OutputOracle,
deployConfig.finalizationPeriodSeconds deployConfig.finalizationPeriodSeconds
) )
...@@ -214,7 +214,7 @@ describe('helpers', () => { ...@@ -214,7 +214,7 @@ describe('helpers', () => {
}) })
it('should return undefined', async () => { it('should return undefined', async () => {
const first = await findFirstUnfinalizedStateBatchIndex( const first = await findFirstUnfinalizedOutputIndex(
L2OutputOracle, L2OutputOracle,
deployConfig.finalizationPeriodSeconds deployConfig.finalizationPeriodSeconds
) )
......
...@@ -85,35 +85,35 @@ FaucetTest:test_nonAdmin_drip_fails() (gas: 262520) ...@@ -85,35 +85,35 @@ FaucetTest:test_nonAdmin_drip_fails() (gas: 262520)
FaucetTest:test_receive_succeeds() (gas: 17401) FaucetTest:test_receive_succeeds() (gas: 17401)
FaucetTest:test_withdraw_nonAdmin_reverts() (gas: 13145) FaucetTest:test_withdraw_nonAdmin_reverts() (gas: 13145)
FaucetTest:test_withdraw_succeeds() (gas: 78359) FaucetTest:test_withdraw_succeeds() (gas: 78359)
FaultDisputeGame_ResolvesCorrectly_CorrectRoot1:test_resolvesCorrectly_succeeds() (gas: 498839) FaultDisputeGame_ResolvesCorrectly_CorrectRoot1:test_resolvesCorrectly_succeeds() (gas: 501957)
FaultDisputeGame_ResolvesCorrectly_CorrectRoot2:test_resolvesCorrectly_succeeds() (gas: 505685) FaultDisputeGame_ResolvesCorrectly_CorrectRoot2:test_resolvesCorrectly_succeeds() (gas: 508759)
FaultDisputeGame_ResolvesCorrectly_CorrectRoot3:test_resolvesCorrectly_succeeds() (gas: 502382) FaultDisputeGame_ResolvesCorrectly_CorrectRoot3:test_resolvesCorrectly_succeeds() (gas: 505478)
FaultDisputeGame_ResolvesCorrectly_CorrectRoot4:test_resolvesCorrectly_succeeds() (gas: 505561) FaultDisputeGame_ResolvesCorrectly_CorrectRoot4:test_resolvesCorrectly_succeeds() (gas: 508657)
FaultDisputeGame_ResolvesCorrectly_CorrectRoot5:test_resolvesCorrectly_succeeds() (gas: 504878) FaultDisputeGame_ResolvesCorrectly_CorrectRoot5:test_resolvesCorrectly_succeeds() (gas: 507974)
FaultDisputeGame_ResolvesCorrectly_IncorrectRoot1:test_resolvesCorrectly_succeeds() (gas: 497604) FaultDisputeGame_ResolvesCorrectly_IncorrectRoot1:test_resolvesCorrectly_succeeds() (gas: 500722)
FaultDisputeGame_ResolvesCorrectly_IncorrectRoot2:test_resolvesCorrectly_succeeds() (gas: 504450) FaultDisputeGame_ResolvesCorrectly_IncorrectRoot2:test_resolvesCorrectly_succeeds() (gas: 507524)
FaultDisputeGame_ResolvesCorrectly_IncorrectRoot3:test_resolvesCorrectly_succeeds() (gas: 501147) FaultDisputeGame_ResolvesCorrectly_IncorrectRoot3:test_resolvesCorrectly_succeeds() (gas: 504243)
FaultDisputeGame_ResolvesCorrectly_IncorrectRoot4:test_resolvesCorrectly_succeeds() (gas: 502326) FaultDisputeGame_ResolvesCorrectly_IncorrectRoot4:test_resolvesCorrectly_succeeds() (gas: 505422)
FaultDisputeGame_ResolvesCorrectly_IncorrectRoot5:test_resolvesCorrectly_succeeds() (gas: 501643) FaultDisputeGame_ResolvesCorrectly_IncorrectRoot5:test_resolvesCorrectly_succeeds() (gas: 504739)
FaultDisputeGame_Test:test_extraData_succeeds() (gas: 17404) FaultDisputeGame_Test:test_extraData_succeeds() (gas: 17404)
FaultDisputeGame_Test:test_gameData_succeeds() (gas: 17917) FaultDisputeGame_Test:test_gameData_succeeds() (gas: 17939)
FaultDisputeGame_Test:test_gameStart_succeeds() (gas: 10315) FaultDisputeGame_Test:test_gameStart_succeeds() (gas: 10315)
FaultDisputeGame_Test:test_gameType_succeeds() (gas: 8260) FaultDisputeGame_Test:test_gameType_succeeds() (gas: 8260)
FaultDisputeGame_Test:test_initialRootClaimData_succeeds() (gas: 17669) FaultDisputeGame_Test:test_initialRootClaimData_succeeds() (gas: 17624)
FaultDisputeGame_Test:test_move_clockCorrectness_succeeds() (gas: 416029) FaultDisputeGame_Test:test_move_clockCorrectness_succeeds() (gas: 419180)
FaultDisputeGame_Test:test_move_clockTimeExceeded_reverts() (gas: 26399) FaultDisputeGame_Test:test_move_clockTimeExceeded_reverts() (gas: 26421)
FaultDisputeGame_Test:test_move_defendRoot_reverts() (gas: 13360) FaultDisputeGame_Test:test_move_defendRoot_reverts() (gas: 13360)
FaultDisputeGame_Test:test_move_duplicateClaim_reverts() (gas: 103254) FaultDisputeGame_Test:test_move_duplicateClaim_reverts() (gas: 104120)
FaultDisputeGame_Test:test_move_gameDepthExceeded_reverts() (gas: 408148) FaultDisputeGame_Test:test_move_gameDepthExceeded_reverts() (gas: 411546)
FaultDisputeGame_Test:test_move_gameNotInProgress_reverts() (gas: 10968) FaultDisputeGame_Test:test_move_gameNotInProgress_reverts() (gas: 11012)
FaultDisputeGame_Test:test_move_nonExistentParent_reverts() (gas: 24655) FaultDisputeGame_Test:test_move_nonExistentParent_reverts() (gas: 24677)
FaultDisputeGame_Test:test_move_simpleAttack_succeeds() (gas: 107356) FaultDisputeGame_Test:test_move_simpleAttack_succeeds() (gas: 108110)
FaultDisputeGame_Test:test_resolve_challengeContested_succeeds() (gas: 224820) FaultDisputeGame_Test:test_resolve_challengeContested_succeeds() (gas: 226511)
FaultDisputeGame_Test:test_resolve_notInProgress_reverts() (gas: 9657) FaultDisputeGame_Test:test_resolve_notInProgress_reverts() (gas: 9657)
FaultDisputeGame_Test:test_resolve_rootContested_succeeds() (gas: 109773) FaultDisputeGame_Test:test_resolve_rootContested_succeeds() (gas: 110642)
FaultDisputeGame_Test:test_resolve_rootUncontestedClockNotExpired_succeeds() (gas: 21434) FaultDisputeGame_Test:test_resolve_rootUncontestedClockNotExpired_succeeds() (gas: 21437)
FaultDisputeGame_Test:test_resolve_rootUncontested_succeeds() (gas: 27263) FaultDisputeGame_Test:test_resolve_rootUncontested_succeeds() (gas: 27288)
FaultDisputeGame_Test:test_resolve_teamDeathmatch_succeeds() (gas: 395502) FaultDisputeGame_Test:test_resolve_teamDeathmatch_succeeds() (gas: 398859)
FaultDisputeGame_Test:test_rootClaim_succeeds() (gas: 8225) FaultDisputeGame_Test:test_rootClaim_succeeds() (gas: 8225)
FeeVault_Test:test_constructor_succeeds() (gas: 18185) FeeVault_Test:test_constructor_succeeds() (gas: 18185)
GasBenchMark_L1CrossDomainMessenger:test_sendMessage_benchmark_0() (gas: 352113) GasBenchMark_L1CrossDomainMessenger:test_sendMessage_benchmark_0() (gas: 352113)
...@@ -437,9 +437,11 @@ OptimistTest:test_tokenURI_returnsCorrectTokenURI_succeeds() (gas: 195908) ...@@ -437,9 +437,11 @@ OptimistTest:test_tokenURI_returnsCorrectTokenURI_succeeds() (gas: 195908)
OptimistTest:test_transferFrom_soulbound_reverts() (gas: 75512) OptimistTest:test_transferFrom_soulbound_reverts() (gas: 75512)
PostSherlockL1:test_script_succeeds() (gas: 3078) PostSherlockL1:test_script_succeeds() (gas: 3078)
PostSherlockL2:test_script_succeeds() (gas: 3078) PostSherlockL2:test_script_succeeds() (gas: 3078)
PreimageOracle_Test:test_computePreimageKey_succeeds() (gas: 6242) PreimageOracle_Test:test_keccak256PreimageKey_succeeds() (gas: 319)
PreimageOracle_Test:test_loadKeccak256PreimagePart_outOfBoundsOffset_reverts() (gas: 9005) PreimageOracle_Test:test_loadKeccak256PreimagePart_outOfBoundsOffset_reverts() (gas: 8993)
PreimageOracle_Test:test_loadKeccak256PreimagePart_succeeds() (gas: 77502) PreimageOracle_Test:test_loadKeccak256PreimagePart_succeeds() (gas: 76098)
PreimageOracle_Test:test_loadLocalData_onePart_succeeds() (gas: 75840)
PreimageOracle_Test:test_loadLocalData_outOfBoundsOffset_reverts() (gas: 8803)
ProxyAdmin_Test:test_chugsplashChangeProxyAdmin_succeeds() (gas: 35586) ProxyAdmin_Test:test_chugsplashChangeProxyAdmin_succeeds() (gas: 35586)
ProxyAdmin_Test:test_chugsplashGetProxyAdmin_succeeds() (gas: 15675) ProxyAdmin_Test:test_chugsplashGetProxyAdmin_succeeds() (gas: 15675)
ProxyAdmin_Test:test_chugsplashGetProxyImplementation_succeeds() (gas: 51084) ProxyAdmin_Test:test_chugsplashGetProxyImplementation_succeeds() (gas: 51084)
......
...@@ -698,7 +698,8 @@ contract Deploy is Deployer { ...@@ -698,7 +698,8 @@ contract Deploy is Deployer {
_absolutePrestate: absolutePrestate, _absolutePrestate: absolutePrestate,
_maxGameDepth: cfg.faultGameMaxDepth(), _maxGameDepth: cfg.faultGameMaxDepth(),
_gameDuration: Duration.wrap(uint64(cfg.faultGameMaxDuration())), _gameDuration: Duration.wrap(uint64(cfg.faultGameMaxDuration())),
_vm: faultVm _vm: faultVm,
_l2oo: L2OutputOracle(mustGetAddress("L2OutputOracleProxy"))
})); }));
console.log("DisputeGameFactory: set `FaultDisputeGame` implementation"); console.log("DisputeGameFactory: set `FaultDisputeGame` implementation");
} }
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
"src/L2/L2StandardBridge.sol": "0x8ee5257e03ae4ba8555d9f7d13374c8a388315d62c16107bb4cadd450bfeb3d3", "src/L2/L2StandardBridge.sol": "0x8ee5257e03ae4ba8555d9f7d13374c8a388315d62c16107bb4cadd450bfeb3d3",
"src/L2/L2ToL1MessagePasser.sol": "0x7e35c3c4f1dd3d131dd71db07676301f7c477f02b6d6bf0ec468ecf2bed8325b", "src/L2/L2ToL1MessagePasser.sol": "0x7e35c3c4f1dd3d131dd71db07676301f7c477f02b6d6bf0ec468ecf2bed8325b",
"src/L2/SequencerFeeVault.sol": "0x17b30ccaed8b8dbe965c892cb8aae7f594fb4a87e0edd3ca6cd8f94559b86df9", "src/L2/SequencerFeeVault.sol": "0x17b30ccaed8b8dbe965c892cb8aae7f594fb4a87e0edd3ca6cd8f94559b86df9",
"src/dispute/FaultDisputeGame.sol": "0xb36f6456d74a9ee93df97bb6d5a554dcb531d730e6e2b10dd442897875f2ca81",
"src/legacy/DeployerWhitelist.sol": "0x47277d9c8409d517501d172db6697d55090d3d3a9e4bb2b1adea83471d793b6b", "src/legacy/DeployerWhitelist.sol": "0x47277d9c8409d517501d172db6697d55090d3d3a9e4bb2b1adea83471d793b6b",
"src/legacy/L1BlockNumber.sol": "0x1a1690b8b5ab53cf2b5c8e85fb86028b4078ae656286ae482cabe68374334f2a", "src/legacy/L1BlockNumber.sol": "0x1a1690b8b5ab53cf2b5c8e85fb86028b4078ae656286ae482cabe68374334f2a",
"src/legacy/LegacyMessagePasser.sol": "0xc7f42e6165507b4c50a5169a950f66602e6b4b8cff17f5d95994e121abb18390", "src/legacy/LegacyMessagePasser.sol": "0xc7f42e6165507b4c50a5169a950f66602e6b4b8cff17f5d95994e121abb18390",
......
// SPDX-License-Identifier: MIT
pragma solidity 0.8.15;
/// @title PreimageKeyLib
/// @notice Shared utilities for localizing local keys in the preimage oracle.
library PreimageKeyLib {
/// @notice Generates a context-specific local key for the given local data identifier.
/// @dev See `localize` for a description of the localization operation.
/// @param _ident The identifier of the local data. [0, 32) bytes in size.
/// @return key_ The context-specific local key.
function localizeIdent(uint256 _ident) internal view returns (bytes32 key_) {
assembly {
// Set the type byte in the given identifier to `1` (Local). We only care about
// the [1, 32) bytes in this value.
key_ := or(shl(248, 1), and(_ident, not(shl(248, 0xFF))))
}
key_ = localize(key_);
}
/// @notice Localizes a given local data key for the caller's context.
/// @dev The localization operation is defined as:
/// localize(k) = H(k .. sender) & ~(0xFF << 248) | (0x01 << 248)
/// where H is the Keccak-256 hash function.
/// @param _key The local data key to localize.
/// @return localizedKey_ The localized local data key.
function localize(bytes32 _key) internal view returns (bytes32 localizedKey_) {
assembly {
// Store the local data key and caller next to each other in memory for hashing.
mstore(0, _key)
mstore(0x20, caller())
// Localize the key with the above `localize` operation.
localizedKey_ := or(and(keccak256(0, 0x40), not(shl(248, 0xFF))), shl(248, 1))
}
}
/// @notice Computes and returns the key for a global keccak pre-image.
/// @param _preimage The pre-image.
/// @return key_ The pre-image key.
function keccak256PreimageKey(bytes memory _preimage) internal pure returns (bytes32 key_) {
assembly {
// Grab the size of the `_preimage`
let size := mload(_preimage)
// Compute the pre-image keccak256 hash (aka the pre-image key)
let h := keccak256(add(_preimage, 0x20), size)
// Mask out prefix byte, replace with type 2 byte
key_ := or(and(h, not(shl(248, 0xFF))), shl(248, 2))
}
}
}
// SPDX-License-Identifier: MIT // SPDX-License-Identifier: MIT
pragma solidity ^0.8.15; pragma solidity 0.8.15;
import { IPreimageOracle } from "./interfaces/IPreimageOracle.sol";
import { PreimageKeyLib } from "./PreimageKeyLib.sol";
import "./libraries/CannonErrors.sol";
/// @title PreimageOracle /// @title PreimageOracle
/// @notice A contract for storing permissioned pre-images. /// @notice A contract for storing permissioned pre-images.
contract PreimageOracle { contract PreimageOracle is IPreimageOracle {
/// @notice Mapping of pre-image keys to pre-image lengths. /// @notice Mapping of pre-image keys to pre-image lengths.
mapping(bytes32 => uint256) public preimageLengths; mapping(bytes32 => uint256) public preimageLengths;
/// @notice Mapping of pre-image keys to pre-image parts. /// @notice Mapping of pre-image keys to pre-image parts.
mapping(bytes32 => mapping(uint256 => bytes32)) public preimageParts; mapping(bytes32 => mapping(uint256 => bytes32)) public preimageParts;
/// @notice Mapping of pre-image keys to pre-image part offsets. /// @notice Mapping of pre-image keys to pre-image part offsets.
mapping(bytes32 => mapping(uint256 => bool)) public preimagePartOk; mapping(bytes32 => mapping(uint256 => bool)) public preimagePartOk;
/// @notice Reads a pre-image from the oracle. /// @inheritdoc IPreimageOracle
/// @param _key The key of the pre-image to read.
/// @param _offset The offset of the pre-image to read.
/// @return dat_ The pre-image data.
/// @return datLen_ The length of the pre-image data.
function readPreimage(bytes32 _key, uint256 _offset) function readPreimage(bytes32 _key, uint256 _offset)
external external
view view
...@@ -37,11 +35,11 @@ contract PreimageOracle { ...@@ -37,11 +35,11 @@ contract PreimageOracle {
dat_ = preimageParts[_key][_offset]; dat_ = preimageParts[_key][_offset];
} }
// TODO(CLI-4104): /// TODO(CLI-4104):
// we need to mix-in the ID of the dispute for local-type keys to avoid collisions, /// we need to mix-in the ID of the dispute for local-type keys to avoid collisions,
// and restrict local pre-image insertion to the dispute-managing contract. /// and restrict local pre-image insertion to the dispute-managing contract.
// For now we permit anyone to write any pre-image unchecked, to make testing easy. /// For now we permit anyone to write any pre-image unchecked, to make testing easy.
// This method is DANGEROUS. And NOT FOR PRODUCTION. /// This method is DANGEROUS. And NOT FOR PRODUCTION.
function cheat( function cheat(
uint256 partOffset, uint256 partOffset,
bytes32 key, bytes32 key,
...@@ -53,37 +51,43 @@ contract PreimageOracle { ...@@ -53,37 +51,43 @@ contract PreimageOracle {
preimageLengths[key] = size; preimageLengths[key] = size;
} }
/// @notice Computes and returns the key for a pre-image. /// @inheritdoc IPreimageOracle
/// @param _preimage The pre-image. function loadLocalData(
/// @return key_ The pre-image key. uint256 _ident,
function computePreimageKey(bytes calldata _preimage) external pure returns (bytes32 key_) { bytes32 _word,
uint256 size; uint256 _size,
assembly { uint256 _partOffset
size := calldataload(0x24) ) external returns (bytes32 key_) {
// Compute the localized key from the given local identifier.
// Leave slots 0x40 and 0x60 untouched, key_ = PreimageKeyLib.localizeIdent(_ident);
// and everything after as scratch-memory.
let ptr := 0x80
// Store size as a big-endian uint64 at the start of pre-image // Revert if the given part offset is not within bounds.
mstore(ptr, shl(192, size)) if (_partOffset > _size + 8 || _size > 32) {
ptr := add(ptr, 8) revert PartOffsetOOB();
}
// Copy preimage payload into memory so we can hash and read it. // Prepare the local data part at the given offset
calldatacopy(ptr, _preimage.offset, size) bytes32 part;
assembly {
// Clean the memory in [0x20, 0x40)
mstore(0x20, 0x00)
// Compute the pre-image keccak256 hash (aka the pre-image key) // Store the full local data in scratch space.
let h := keccak256(ptr, size) mstore(0x00, shl(192, _size))
mstore(0x08, _word)
// Mask out prefix byte, replace with type 2 byte // Prepare the local data part at the requested offset.
key_ := or(and(h, not(shl(248, 0xFF))), shl(248, 2)) part := mload(_partOffset)
} }
// Store the first part with `_partOffset`.
preimagePartOk[key_][_partOffset] = true;
preimageParts[key_][_partOffset] = part;
// Assign the length of the preimage at the localized key.
preimageLengths[key_] = _size;
} }
/// @notice Prepares a pre-image to be read by keccak256 key, starting at /// @inheritdoc IPreimageOracle
/// the given offset and up to 32 bytes (clipped at pre-image length, if out of data).
/// @param _partOffset The offset of the pre-image to read.
/// @param _preimage The preimage data.
function loadKeccak256PreimagePart(uint256 _partOffset, bytes calldata _preimage) external { function loadKeccak256PreimagePart(uint256 _partOffset, bytes calldata _preimage) external {
uint256 size; uint256 size;
bytes32 key; bytes32 key;
...@@ -94,7 +98,10 @@ contract PreimageOracle { ...@@ -94,7 +98,10 @@ contract PreimageOracle {
// revert if part offset > size+8 (i.e. parts must be within bounds) // revert if part offset > size+8 (i.e. parts must be within bounds)
if gt(_partOffset, add(size, 8)) { if gt(_partOffset, add(size, 8)) {
revert(0, 0) // Store "PartOffsetOOB()"
mstore(0, 0xfe254987)
// Revert with "PartOffsetOOB()"
revert(0x1c, 4)
} }
// we leave solidity slots 0x40 and 0x60 untouched, // we leave solidity slots 0x40 and 0x60 untouched,
// and everything after as scratch-memory. // and everything after as scratch-memory.
......
// SPDX-License-Identifier: MIT // SPDX-License-Identifier: MIT
pragma solidity 0.7.6; pragma solidity 0.8.15;
/// @title IPreimageOracle /// @title IPreimageOracle
/// @notice Interface for a preimage oracle. /// @notice Interface for a preimage oracle.
...@@ -14,10 +14,31 @@ interface IPreimageOracle { ...@@ -14,10 +14,31 @@ interface IPreimageOracle {
view view
returns (bytes32 dat_, uint256 datLen_); returns (bytes32 dat_, uint256 datLen_);
/// @notice Computes and returns the key for a pre-image. /// @notice Loads of local data part into the preimage oracle.
/// @param _preimage The pre-image. /// @param _ident The identifier of the local data.
/// @return key_ The pre-image key. /// @param _word The local data word.
function computePreimageKey(bytes calldata _preimage) external pure returns (bytes32 key_); /// @param _size The number of bytes in `_word` to load.
/// @param _partOffset The offset of the local data part to write to the oracle.
/// @dev The local data parts are loaded into the preimage oracle under the context
/// of the caller - no other account can write to the caller's context
/// specific data.
///
/// There are 5 local data identifiers:
/// ┌────────────┬────────────────────────┐
/// │ Identifier │ Data │
/// ├────────────┼────────────────────────┤
/// │ 1 │ L1 Head Hash (bytes32) │
/// │ 2 │ Output Root (bytes32) │
/// │ 3 │ Root Claim (bytes32) │
/// │ 4 │ L2 Block Number (u64) │
/// │ 5 │ Chain ID (u64) │
/// └────────────┴────────────────────────┘
function loadLocalData(
uint256 _ident,
bytes32 _word,
uint256 _size,
uint256 _partOffset
) external returns (bytes32 key_);
/// @notice Prepares a preimage to be read by keccak256 key, starting at /// @notice Prepares a preimage to be read by keccak256 key, starting at
/// the given offset and up to 32 bytes (clipped at preimage length, if out of data). /// the given offset and up to 32 bytes (clipped at preimage length, if out of data).
......
// SPDX-License-Identifier: MIT
pragma solidity 0.8.15;
/// @notice Thrown when a passed part offset is out of bounds.
error PartOffsetOOB();
...@@ -5,9 +5,11 @@ import { IDisputeGame } from "./interfaces/IDisputeGame.sol"; ...@@ -5,9 +5,11 @@ import { IDisputeGame } from "./interfaces/IDisputeGame.sol";
import { IFaultDisputeGame } from "./interfaces/IFaultDisputeGame.sol"; import { IFaultDisputeGame } from "./interfaces/IFaultDisputeGame.sol";
import { IInitializable } from "./interfaces/IInitializable.sol"; import { IInitializable } from "./interfaces/IInitializable.sol";
import { IBondManager } from "./interfaces/IBondManager.sol"; import { IBondManager } from "./interfaces/IBondManager.sol";
import { IBigStepper } from "./interfaces/IBigStepper.sol"; import { IBigStepper, IPreimageOracle } from "./interfaces/IBigStepper.sol";
import { L2OutputOracle } from "../L1/L2OutputOracle.sol";
import { Clone } from "../libraries/Clone.sol"; import { Clone } from "../libraries/Clone.sol";
import { Types } from "../libraries/Types.sol";
import { Semver } from "../universal/Semver.sol"; import { Semver } from "../universal/Semver.sol";
import { LibHashing } from "./lib/LibHashing.sol"; import { LibHashing } from "./lib/LibHashing.sol";
import { LibPosition } from "./lib/LibPosition.sol"; import { LibPosition } from "./lib/LibPosition.sol";
...@@ -33,9 +35,12 @@ contract FaultDisputeGame is IFaultDisputeGame, Clone, Semver { ...@@ -33,9 +35,12 @@ contract FaultDisputeGame is IFaultDisputeGame, Clone, Semver {
/// @notice The duration of the game. /// @notice The duration of the game.
Duration public immutable GAME_DURATION; Duration public immutable GAME_DURATION;
/// @notice A hypervisor that performs single instruction steps on a fault proof program trace. /// @notice An onchain VM that performs single instruction steps on a fault proof program trace.
IBigStepper public immutable VM; IBigStepper public immutable VM;
/// @notice The trusted L2OutputOracle contract.
L2OutputOracle public immutable L2_OUTPUT_ORACLE;
/// @notice The root claim's position is always at gindex 1. /// @notice The root claim's position is always at gindex 1.
Position internal constant ROOT_POSITION = Position.wrap(1); Position internal constant ROOT_POSITION = Position.wrap(1);
...@@ -48,6 +53,9 @@ contract FaultDisputeGame is IFaultDisputeGame, Clone, Semver { ...@@ -48,6 +53,9 @@ contract FaultDisputeGame is IFaultDisputeGame, Clone, Semver {
/// @inheritdoc IDisputeGame /// @inheritdoc IDisputeGame
IBondManager public bondManager; IBondManager public bondManager;
/// @inheritdoc IFaultDisputeGame
Hash public l1Head;
/// @notice An append-only array of all claims made during the dispute game. /// @notice An append-only array of all claims made during the dispute game.
ClaimData[] public claimData; ClaimData[] public claimData;
...@@ -55,16 +63,24 @@ contract FaultDisputeGame is IFaultDisputeGame, Clone, Semver { ...@@ -55,16 +63,24 @@ contract FaultDisputeGame is IFaultDisputeGame, Clone, Semver {
mapping(ClaimHash => bool) internal claims; mapping(ClaimHash => bool) internal claims;
/// @param _absolutePrestate The absolute prestate of the instruction trace. /// @param _absolutePrestate The absolute prestate of the instruction trace.
/// @param _maxGameDepth The maximum depth of bisection.
/// @param _gameDuration The duration of the game.
/// @param _vm An onchain VM that performs single instruction steps on a fault proof program
/// trace.
/// @param _l2oo The trusted L2OutputOracle contract.
/// @custom:semver 0.0.4
constructor( constructor(
Claim _absolutePrestate, Claim _absolutePrestate,
uint256 _maxGameDepth, uint256 _maxGameDepth,
Duration _gameDuration, Duration _gameDuration,
IBigStepper _vm IBigStepper _vm,
) Semver(0, 0, 3) { L2OutputOracle _l2oo
) Semver(0, 0, 4) {
ABSOLUTE_PRESTATE = _absolutePrestate; ABSOLUTE_PRESTATE = _absolutePrestate;
MAX_GAME_DEPTH = _maxGameDepth; MAX_GAME_DEPTH = _maxGameDepth;
GAME_DURATION = _gameDuration; GAME_DURATION = _gameDuration;
VM = _vm; VM = _vm;
L2_OUTPUT_ORACLE = _l2oo;
} }
//////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////
...@@ -241,6 +257,38 @@ contract FaultDisputeGame is IFaultDisputeGame, Clone, Semver { ...@@ -241,6 +257,38 @@ contract FaultDisputeGame is IFaultDisputeGame, Clone, Semver {
move(_parentIndex, _claim, false); move(_parentIndex, _claim, false);
} }
/// @inheritdoc IFaultDisputeGame
function addLocalData(uint256 _ident, uint256 _partOffset) external {
// INVARIANT: Local data can only be added if the game is currently in progress.
if (status != GameStatus.IN_PROGRESS) revert GameNotInProgress();
IPreimageOracle oracle = VM.oracle();
if (_ident == 1) {
// Load the L1 head hash into the game's local context in the preimage oracle.
oracle.loadLocalData(_ident, Hash.unwrap(l1Head), 32, _partOffset);
} else if (_ident == 2) {
// Load the earliest output root that commits to the passed L2 block number
// into the game's local context in the preimage oracle.
Types.OutputProposal memory proposal = L2_OUTPUT_ORACLE.getL2OutputAfter(
l2BlockNumber()
);
oracle.loadLocalData(_ident, proposal.outputRoot, 32, _partOffset);
} else if (_ident == 3) {
// Load the root claim into the game's local context in the preimage oracle.
oracle.loadLocalData(_ident, Claim.unwrap(rootClaim()), 32, _partOffset);
} else if (_ident == 4) {
// Load the L2 block number into the game's local context in the preimage oracle.
// The L2 block number is stored as a big-endian uint64 in the upper 8 bytes of the
// passed word.
oracle.loadLocalData(_ident, bytes32(l2BlockNumber() << 192), 8, _partOffset);
} else if (_ident == 5) {
// Load the chain ID into the game's local context in the preimage oracle.
// The chain ID is stored as a big-endian uint64 in the upper 8 bytes of the
// passed word.
oracle.loadLocalData(_ident, bytes32(block.chainid << 192), 8, _partOffset);
}
}
/// @inheritdoc IFaultDisputeGame /// @inheritdoc IFaultDisputeGame
function l2BlockNumber() public pure returns (uint256 l2BlockNumber_) { function l2BlockNumber() public pure returns (uint256 l2BlockNumber_) {
l2BlockNumber_ = _getArgUint256(0x20); l2BlockNumber_ = _getArgUint256(0x20);
...@@ -337,8 +385,6 @@ contract FaultDisputeGame is IFaultDisputeGame, Clone, Semver { ...@@ -337,8 +385,6 @@ contract FaultDisputeGame is IFaultDisputeGame, Clone, Semver {
/// @inheritdoc IDisputeGame /// @inheritdoc IDisputeGame
function extraData() public pure returns (bytes memory extraData_) { function extraData() public pure returns (bytes memory extraData_) {
// The extra data starts at the second word within the cwia calldata. // The extra data starts at the second word within the cwia calldata.
// TODO: What data do we need to pass along to this contract from the factory?
// Block hash, preimage data, etc.?
extraData_ = _getArgDynBytes(0x20, 0x20); extraData_ = _getArgDynBytes(0x20, 0x20);
} }
...@@ -378,6 +424,9 @@ contract FaultDisputeGame is IFaultDisputeGame, Clone, Semver { ...@@ -378,6 +424,9 @@ contract FaultDisputeGame is IFaultDisputeGame, Clone, Semver {
countered: false countered: false
}) })
); );
// Set the L1 head hash at the time of the game's creation.
l1Head = Hash.wrap(blockhash(block.number - 1));
} }
/// @notice Returns the length of the `claimData` array. /// @notice Returns the length of the `claimData` array.
......
// SPDX-License-Identifier: MIT // SPDX-License-Identifier: MIT
pragma solidity ^0.8.15; pragma solidity ^0.8.15;
import { IPreimageOracle } from "../../cannon/interfaces/IPreimageOracle.sol";
/// @title IBigStepper /// @title IBigStepper
/// @notice An interface for a contract with a state transition function that /// @notice An interface for a contract with a state transition function that
/// will accept a pre state and return a post state. /// will accept a pre state and return a post state.
...@@ -31,4 +33,7 @@ interface IBigStepper { ...@@ -31,4 +33,7 @@ interface IBigStepper {
function step(bytes calldata _stateData, bytes calldata _proof) function step(bytes calldata _stateData, bytes calldata _proof)
external external
returns (bytes32 postState_); returns (bytes32 postState_);
/// @notice Returns the preimage oracle used by the stepper.
function oracle() external view returns (IPreimageOracle oracle_);
} }
...@@ -22,19 +22,18 @@ interface IDisputeGame is IInitializable { ...@@ -22,19 +22,18 @@ interface IDisputeGame is IInitializable {
function status() external view returns (GameStatus status_); function status() external view returns (GameStatus status_);
/// @notice Getter for the game type. /// @notice Getter for the game type.
/// @dev `clones-with-immutable-args` argument #1
/// @dev The reference impl should be entirely different depending on the type (fault, validity) /// @dev The reference impl should be entirely different depending on the type (fault, validity)
/// i.e. The game type should indicate the security model. /// i.e. The game type should indicate the security model.
/// @return gameType_ The type of proof system being used. /// @return gameType_ The type of proof system being used.
function gameType() external pure returns (GameType gameType_); function gameType() external pure returns (GameType gameType_);
/// @notice Getter for the root claim. /// @notice Getter for the root claim.
/// @dev `clones-with-immutable-args` argument #2 /// @dev `clones-with-immutable-args` argument #1
/// @return rootClaim_ The root claim of the DisputeGame. /// @return rootClaim_ The root claim of the DisputeGame.
function rootClaim() external pure returns (Claim rootClaim_); function rootClaim() external pure returns (Claim rootClaim_);
/// @notice Getter for the extra data. /// @notice Getter for the extra data.
/// @dev `clones-with-immutable-args` argument #3 /// @dev `clones-with-immutable-args` argument #2
/// @return extraData_ Any extra data supplied to the dispute game contract by the creator. /// @return extraData_ Any extra data supplied to the dispute game contract by the creator.
function extraData() external pure returns (bytes memory extraData_); function extraData() external pure returns (bytes memory extraData_);
......
...@@ -53,6 +53,14 @@ interface IFaultDisputeGame is IDisputeGame { ...@@ -53,6 +53,14 @@ interface IFaultDisputeGame is IDisputeGame {
bytes calldata _proof bytes calldata _proof
) external; ) external;
/// @notice Posts the requested local data to the VM's `PreimageOralce`.
/// @param _ident The local identifier of the data to post.
/// @param _partOffset The offset of the data to post.
function addLocalData(uint256 _ident, uint256 _partOffset) external;
/// @notice Returns the L1 block hash at the time of the game's creation.
function l1Head() external view returns (Hash l1Head_);
/// @notice The l2BlockNumber that the `rootClaim` commits to. The trace being bisected within /// @notice The l2BlockNumber that the `rootClaim` commits to. The trace being bisected within
/// the game is from `l2BlockNumber - 1` -> `l2BlockNumber`. /// the game is from `l2BlockNumber - 1` -> `l2BlockNumber`.
/// @return l2BlockNumber_ The l2BlockNumber that the `rootClaim` commits to. /// @return l2BlockNumber_ The l2BlockNumber that the `rootClaim` commits to.
......
...@@ -64,6 +64,7 @@ contract AssetReceiver is Transactor { ...@@ -64,6 +64,7 @@ contract AssetReceiver is Transactor {
function withdrawETH(address payable _to, uint256 _amount) public onlyOwner { function withdrawETH(address payable _to, uint256 _amount) public onlyOwner {
// slither-disable-next-line reentrancy-unlimited-gas // slither-disable-next-line reentrancy-unlimited-gas
(bool success, ) = _to.call{ value: _amount }(""); (bool success, ) = _to.call{ value: _amount }("");
success; // Suppress warning; We ignore the low-level call result.
emit WithdrewETH(msg.sender, _to, _amount); emit WithdrewETH(msg.sender, _to, _amount);
} }
......
...@@ -242,7 +242,7 @@ as the engine implementation can sync state faster through methods like [snap-sy ...@@ -242,7 +242,7 @@ as the engine implementation can sync state faster through methods like [snap-sy
### Happy-path sync ### Happy-path sync
1. The rollup node informs the engine of the L2 chain head, unconditionally (part of regular node operation): 1. The rollup node informs the engine of the L2 chain head, unconditionally (part of regular node operation):
- [`engine_newPayloadV1`][engine_newPayloadV1] is called with latest L2 block derived from L1. - [`engine_newPayloadV1`][engine_newPayloadV1] is called with latest L2 block received from P2P.
- [`engine_forkchoiceUpdatedV1`][engine_forkchoiceUpdatedV1] is called with the current - [`engine_forkchoiceUpdatedV1`][engine_forkchoiceUpdatedV1] is called with the current
`unsafe`/`safe`/`finalized` L2 block hashes. `unsafe`/`safe`/`finalized` L2 block hashes.
2. The engine requests headers from peers, in reverse till the parent hash matches the local chain 2. The engine requests headers from peers, in reverse till the parent hash matches the local chain
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment