Commit f6480169 authored by Adrian Sutton's avatar Adrian Sutton Committed by GitHub

op-challenger: Load proposed large preimages (#9069)

Introduces the infrastructure for monitoring large pre-image proposals, up to loading the list of in-progress games from the contract.
Actual content of the preimages is not yet loaded or checked.
parent d1d92907
......@@ -190,19 +190,11 @@ func (f *FaultDisputeGameContract) GetClaim(ctx context.Context, idx uint64) (ty
}
func (f *FaultDisputeGameContract) GetAllClaims(ctx context.Context) ([]types.Claim, error) {
count, err := f.GetClaimCount(ctx)
results, err := batching.ReadArray(ctx, f.multiCaller, batching.BlockLatest, f.contract.Call(methodClaimCount), func(i *big.Int) *batching.ContractCall {
return f.contract.Call(methodClaim, i)
})
if err != nil {
return nil, fmt.Errorf("failed to load claim count: %w", err)
}
calls := make([]*batching.ContractCall, count)
for i := uint64(0); i < count; i++ {
calls[i] = f.contract.Call(methodClaim, new(big.Int).SetUint64(i))
}
results, err := f.multiCaller.Call(ctx, batching.BlockLatest, calls...)
if err != nil {
return nil, fmt.Errorf("failed to fetch claim data: %w", err)
return nil, fmt.Errorf("failed to load claims: %w", err)
}
var claims []types.Claim
......
package contracts
import (
"context"
"fmt"
"math/big"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/types"
"github.com/ethereum-optimism/optimism/op-challenger/game/keccak/matrix"
gameTypes "github.com/ethereum-optimism/optimism/op-challenger/game/types"
"github.com/ethereum-optimism/optimism/op-service/sources/batching"
"github.com/ethereum-optimism/optimism/op-service/txmgr"
"github.com/ethereum/go-ethereum/common"
......@@ -17,6 +19,8 @@ const (
methodAddLeavesLPP = "addLeavesLPP"
methodSqueezeLPP = "squeezeLPP"
methodLoadKeccak256PreimagePart = "loadKeccak256PreimagePart"
methodProposalCount = "proposalCount"
methodProposals = "proposals"
)
// PreimageOracleContract is a binding that works with contracts implementing the IPreimageOracle interface
......@@ -137,3 +141,25 @@ func abiEncodeStateMatrix(stateMatrix *matrix.StateMatrix) bindings.LibKeccakSta
}
return bindings.LibKeccakStateMatrix{State: *stateSlice}
}
func (c *PreimageOracleContract) GetActivePreimages(ctx context.Context, blockHash common.Hash) ([]gameTypes.LargePreimageMetaData, error) {
results, err := batching.ReadArray(ctx, c.multiCaller, batching.BlockByHash(blockHash), c.contract.Call(methodProposalCount), func(i *big.Int) *batching.ContractCall {
return c.contract.Call(methodProposals, i)
})
if err != nil {
return nil, fmt.Errorf("failed to load claims: %w", err)
}
var proposals []gameTypes.LargePreimageMetaData
for idx, result := range results {
proposals = append(proposals, c.decodeProposal(result, idx))
}
return proposals, nil
}
func (c *PreimageOracleContract) decodeProposal(result *batching.CallResult, idx int) gameTypes.LargePreimageMetaData {
return gameTypes.LargePreimageMetaData{
Claimant: result.GetAddress(0),
UUID: result.GetBigInt(1),
}
}
package contracts
import (
"context"
"math/big"
"testing"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-challenger/game/fault/types"
"github.com/ethereum-optimism/optimism/op-challenger/game/keccak/matrix"
gameTypes "github.com/ethereum-optimism/optimism/op-challenger/game/types"
"github.com/ethereum-optimism/optimism/op-service/sources/batching"
batchingTest "github.com/ethereum-optimism/optimism/op-service/sources/batching/test"
"github.com/ethereum/go-ethereum/common"
......@@ -104,6 +106,52 @@ func TestPreimageOracleContract_Squeeze(t *testing.T) {
stubRpc.VerifyTxCandidate(tx)
}
func TestGetActivePreimages(t *testing.T) {
stubRpc, oracle := setupPreimageOracleTest(t)
blockHash := common.Hash{0xaa}
stubRpc.SetResponse(
oracleAddr,
methodProposalCount,
batching.BlockByHash(blockHash),
[]interface{}{},
[]interface{}{big.NewInt(3)})
preimage1 := gameTypes.LargePreimageMetaData{
Claimant: common.Address{0xaa},
UUID: big.NewInt(1111),
}
preimage2 := gameTypes.LargePreimageMetaData{
Claimant: common.Address{0xbb},
UUID: big.NewInt(2222),
}
preimage3 := gameTypes.LargePreimageMetaData{
Claimant: common.Address{0xcc},
UUID: big.NewInt(3333),
}
expectGetProposals(stubRpc, batching.BlockByHash(blockHash), preimage1, preimage2, preimage3)
preimages, err := oracle.GetActivePreimages(context.Background(), blockHash)
require.NoError(t, err)
require.Equal(t, []gameTypes.LargePreimageMetaData{preimage1, preimage2, preimage3}, preimages)
}
func expectGetProposals(stubRpc *batchingTest.AbiBasedRpc, block batching.Block, proposals ...gameTypes.LargePreimageMetaData) {
for i, proposal := range proposals {
expectGetProposal(stubRpc, block, int64(i), proposal)
}
}
func expectGetProposal(stubRpc *batchingTest.AbiBasedRpc, block batching.Block, idx int64, proposal gameTypes.LargePreimageMetaData) {
stubRpc.SetResponse(
oracleAddr,
methodProposals,
block,
[]interface{}{big.NewInt(idx)},
[]interface{}{
proposal.Claimant,
proposal.UUID,
})
}
func setupPreimageOracleTest(t *testing.T) (*batchingTest.AbiBasedRpc, *PreimageOracleContract) {
oracleAbi, err := bindings.PreimageOracleMetaData.GetAbi()
require.NoError(t, err)
......
package keccak
import (
"context"
"sync"
"github.com/ethereum-optimism/optimism/op-challenger/game/types"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
)
type LargePreimageScheduler struct {
log log.Logger
ch chan common.Hash
oracles []types.LargePreimageOracle
cancel func()
wg sync.WaitGroup
}
func NewLargePreimageScheduler(logger log.Logger, oracles []types.LargePreimageOracle) *LargePreimageScheduler {
return &LargePreimageScheduler{
log: logger,
ch: make(chan common.Hash, 1),
oracles: oracles,
}
}
func (s *LargePreimageScheduler) Start(ctx context.Context) {
ctx, cancel := context.WithCancel(ctx)
s.cancel = cancel
s.wg.Add(1)
go s.run(ctx)
}
func (s *LargePreimageScheduler) Close() error {
s.cancel()
s.wg.Wait()
return nil
}
func (s *LargePreimageScheduler) run(ctx context.Context) {
defer s.wg.Done()
for {
select {
case <-ctx.Done():
return
case blockHash := <-s.ch:
if err := s.verifyPreimages(ctx, blockHash); err != nil {
s.log.Error("Failed to verify large preimages", "err", err)
}
}
}
}
func (s *LargePreimageScheduler) Schedule(blockHash common.Hash, _ uint64) error {
select {
case s.ch <- blockHash:
default:
// Already busy processing, skip this update
}
return nil
}
func (s *LargePreimageScheduler) verifyPreimages(ctx context.Context, blockHash common.Hash) error {
for _, oracle := range s.oracles {
if err := s.verifyOraclePreimages(ctx, oracle, blockHash); err != nil {
s.log.Error("Failed to verify preimages in oracle %v: %w", oracle.Addr(), err)
}
}
return nil
}
func (s *LargePreimageScheduler) verifyOraclePreimages(ctx context.Context, oracle types.LargePreimageOracle, blockHash common.Hash) error {
_, err := oracle.GetActivePreimages(ctx, blockHash)
return err
}
package keccak
import (
"context"
"sync"
"testing"
"time"
"github.com/ethereum-optimism/optimism/op-challenger/game/types"
"github.com/ethereum-optimism/optimism/op-service/testlog"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require"
)
func TestScheduleNextCheck(t *testing.T) {
ctx := context.Background()
logger := testlog.Logger(t, log.LvlInfo)
oracle := &stubOracle{}
scheduler := NewLargePreimageScheduler(logger, []types.LargePreimageOracle{oracle})
scheduler.Start(ctx)
defer scheduler.Close()
err := scheduler.Schedule(common.Hash{0xaa}, 3)
require.NoError(t, err)
require.Eventually(t, func() bool {
return oracle.GetPreimagesCount() == 1
}, 10*time.Second, 10*time.Millisecond)
}
type stubOracle struct {
m sync.Mutex
addr common.Address
getPreimagesCount int
}
func (s *stubOracle) Addr() common.Address {
return s.addr
}
func (s *stubOracle) GetActivePreimages(_ context.Context, _ common.Hash) ([]types.LargePreimageMetaData, error) {
s.m.Lock()
defer s.m.Unlock()
s.getPreimagesCount++
return nil, nil
}
func (s *stubOracle) GetPreimagesCount() int {
s.m.Lock()
defer s.m.Unlock()
return s.getPreimagesCount
}
......@@ -30,11 +30,16 @@ type gameScheduler interface {
Schedule([]types.GameMetadata, uint64) error
}
type preimageScheduler interface {
Schedule(blockHash common.Hash, blockNumber uint64) error
}
type gameMonitor struct {
logger log.Logger
clock clock.Clock
source gameSource
scheduler gameScheduler
preimages preimageScheduler
gameWindow time.Duration
fetchBlockNumber blockNumberFetcher
allowedGames []common.Address
......@@ -60,6 +65,7 @@ func newGameMonitor(
cl clock.Clock,
source gameSource,
scheduler gameScheduler,
preimages preimageScheduler,
gameWindow time.Duration,
fetchBlockNumber blockNumberFetcher,
allowedGames []common.Address,
......@@ -69,6 +75,7 @@ func newGameMonitor(
logger: logger,
clock: cl,
scheduler: scheduler,
preimages: preimages,
source: source,
gameWindow: gameWindow,
fetchBlockNumber: fetchBlockNumber,
......@@ -126,6 +133,9 @@ func (m *gameMonitor) onNewL1Head(ctx context.Context, sig eth.L1BlockRef) {
if err := m.progressGames(ctx, sig.Hash, sig.Number); err != nil {
m.logger.Error("Failed to progress games", "err", err)
}
if err := m.preimages.Schedule(sig.Hash, sig.Number); err != nil {
m.logger.Error("Failed to validate large preimages", "err", err)
}
}
func (m *gameMonitor) resubscribeFunction() event.ResubscribeErrFunc {
......
......@@ -24,20 +24,20 @@ func TestMonitorMinGameTimestamp(t *testing.T) {
t.Parallel()
t.Run("zero game window returns zero", func(t *testing.T) {
monitor, _, _, _ := setupMonitorTest(t, []common.Address{})
monitor, _, _, _, _ := setupMonitorTest(t, []common.Address{})
monitor.gameWindow = time.Duration(0)
require.Equal(t, monitor.minGameTimestamp(), uint64(0))
})
t.Run("non-zero game window with zero clock", func(t *testing.T) {
monitor, _, _, _ := setupMonitorTest(t, []common.Address{})
monitor, _, _, _, _ := setupMonitorTest(t, []common.Address{})
monitor.gameWindow = time.Minute
monitor.clock = clock.NewDeterministicClock(time.Unix(0, 0))
require.Equal(t, monitor.minGameTimestamp(), uint64(0))
})
t.Run("minimum computed correctly", func(t *testing.T) {
monitor, _, _, _ := setupMonitorTest(t, []common.Address{})
monitor, _, _, _, _ := setupMonitorTest(t, []common.Address{})
monitor.gameWindow = time.Minute
frozen := time.Unix(int64(time.Hour.Seconds()), 0)
monitor.clock = clock.NewDeterministicClock(frozen)
......@@ -52,7 +52,7 @@ func TestMonitorGames(t *testing.T) {
t.Run("Schedules games", func(t *testing.T) {
addr1 := common.Address{0xaa}
addr2 := common.Address{0xbb}
monitor, source, sched, mockHeadSource := setupMonitorTest(t, []common.Address{})
monitor, source, sched, mockHeadSource, preimages := setupMonitorTest(t, []common.Address{})
source.games = []types.GameMetadata{newFDG(addr1, 9999), newFDG(addr2, 9999)}
ctx, cancel := context.WithCancel(context.Background())
......@@ -91,12 +91,13 @@ func TestMonitorGames(t *testing.T) {
monitor.StopMonitoring()
require.Len(t, sched.Scheduled(), 1)
require.Equal(t, []common.Address{addr1, addr2}, sched.Scheduled()[0])
require.GreaterOrEqual(t, preimages.ScheduleCount(), 1, "Should schedule preimage checks")
})
t.Run("Resubscribes on error", func(t *testing.T) {
addr1 := common.Address{0xaa}
addr2 := common.Address{0xbb}
monitor, source, sched, mockHeadSource := setupMonitorTest(t, []common.Address{})
monitor, source, sched, mockHeadSource, preimages := setupMonitorTest(t, []common.Address{})
source.games = []types.GameMetadata{newFDG(addr1, 9999), newFDG(addr2, 9999)}
ctx, cancel := context.WithCancel(context.Background())
......@@ -138,11 +139,12 @@ func TestMonitorGames(t *testing.T) {
monitor.StopMonitoring()
require.NotEmpty(t, sched.Scheduled()) // We might get more than one update scheduled.
require.Equal(t, []common.Address{addr1, addr2}, sched.Scheduled()[0])
require.GreaterOrEqual(t, preimages.ScheduleCount(), 1, "Should schedule preimage checks")
})
}
func TestMonitorCreateAndProgressGameAgents(t *testing.T) {
monitor, source, sched, _ := setupMonitorTest(t, []common.Address{})
monitor, source, sched, _, _ := setupMonitorTest(t, []common.Address{})
addr1 := common.Address{0xaa}
addr2 := common.Address{0xbb}
......@@ -157,7 +159,7 @@ func TestMonitorCreateAndProgressGameAgents(t *testing.T) {
func TestMonitorOnlyScheduleSpecifiedGame(t *testing.T) {
addr1 := common.Address{0xaa}
addr2 := common.Address{0xbb}
monitor, source, sched, _ := setupMonitorTest(t, []common.Address{addr2})
monitor, source, sched, _, _ := setupMonitorTest(t, []common.Address{addr2})
source.games = []types.GameMetadata{newFDG(addr1, 9999), newFDG(addr2, 9999)}
require.NoError(t, monitor.progressGames(context.Background(), common.Hash{0x01}, 0))
......@@ -176,7 +178,7 @@ func newFDG(proxy common.Address, timestamp uint64) types.GameMetadata {
func setupMonitorTest(
t *testing.T,
allowedGames []common.Address,
) (*gameMonitor, *stubGameSource, *stubScheduler, *mockNewHeadSource) {
) (*gameMonitor, *stubGameSource, *stubScheduler, *mockNewHeadSource, *stubPreimageScheduler) {
logger := testlog.Logger(t, log.LvlDebug)
source := &stubGameSource{}
i := uint64(1)
......@@ -185,18 +187,20 @@ func setupMonitorTest(
return i, nil
}
sched := &stubScheduler{}
preimages := &stubPreimageScheduler{}
mockHeadSource := &mockNewHeadSource{}
monitor := newGameMonitor(
logger,
clock.SystemClock,
source,
sched,
preimages,
time.Duration(0),
fetchBlockNum,
allowedGames,
mockHeadSource,
)
return monitor, source, sched, mockHeadSource
return monitor, source, sched, mockHeadSource, preimages
}
type mockNewHeadSource struct {
......@@ -271,6 +275,7 @@ func (s *stubScheduler) Scheduled() [][]common.Address {
defer s.Unlock()
return s.scheduled
}
func (s *stubScheduler) Schedule(games []types.GameMetadata, blockNumber uint64) error {
s.Lock()
defer s.Unlock()
......@@ -281,3 +286,21 @@ func (s *stubScheduler) Schedule(games []types.GameMetadata, blockNumber uint64)
s.scheduled = append(s.scheduled, addrs)
return nil
}
type stubPreimageScheduler struct {
sync.Mutex
scheduleCount int
}
func (s *stubPreimageScheduler) Schedule(_ common.Hash, _ uint64) error {
s.Lock()
defer s.Unlock()
s.scheduleCount++
return nil
}
func (s *stubPreimageScheduler) ScheduleCount() int {
s.Lock()
defer s.Unlock()
return s.scheduleCount
}
package registry
import (
"context"
"testing"
"github.com/ethereum-optimism/optimism/op-challenger/game/scheduler"
......@@ -61,3 +62,7 @@ type stubPreimageOracle common.Address
func (s stubPreimageOracle) Addr() common.Address {
return common.Address(s)
}
func (s stubPreimageOracle) GetActivePreimages(_ context.Context, _ common.Hash) ([]types.LargePreimageMetaData, error) {
return nil, nil
}
......@@ -7,6 +7,7 @@ import (
"io"
"sync/atomic"
"github.com/ethereum-optimism/optimism/op-challenger/game/keccak"
"github.com/ethereum-optimism/optimism/op-service/sources"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
......@@ -37,6 +38,8 @@ type Service struct {
faultGamesCloser fault.CloseFunc
preimages *keccak.LargePreimageScheduler
txMgr *txmgr.SimpleTxManager
loader *loader.GameLoader
......@@ -102,6 +105,9 @@ func (s *Service) initFromConfig(ctx context.Context, cfg *config.Config) error
if err := s.initScheduler(cfg); err != nil {
return fmt.Errorf("failed to init scheduler: %w", err)
}
if err := s.initLargePreimages(); err != nil {
return fmt.Errorf("failed to init large preimage scheduler: %w", err)
}
s.initMonitor(cfg)
......@@ -218,9 +224,14 @@ func (s *Service) initScheduler(cfg *config.Config) error {
return nil
}
func (s *Service) initLargePreimages() error {
s.preimages = keccak.NewLargePreimageScheduler(s.logger, s.registry.Oracles())
return nil
}
func (s *Service) initMonitor(cfg *config.Config) {
cl := clock.SystemClock
s.monitor = newGameMonitor(s.logger, cl, s.loader, s.sched, cfg.GameWindow, s.l1Client.BlockNumber, cfg.GameAllowlist, s.pollClient)
s.monitor = newGameMonitor(s.logger, cl, s.loader, s.sched, s.preimages, cfg.GameWindow, s.l1Client.BlockNumber, cfg.GameAllowlist, s.pollClient)
}
func (s *Service) Start(ctx context.Context) error {
......
package types
import (
"context"
"fmt"
"math/big"
"github.com/ethereum/go-ethereum/common"
)
......@@ -42,6 +44,12 @@ type GameMetadata struct {
Proxy common.Address
}
type LargePreimageMetaData struct {
Claimant common.Address
UUID *big.Int
}
type LargePreimageOracle interface {
Addr() common.Address
GetActivePreimages(ctx context.Context, blockHash common.Hash) ([]LargePreimageMetaData, error)
}
package batching
import (
"context"
"fmt"
"math/big"
)
// ReadArray uses batch calls to load all entries from an array.
// countCall is used to retrieve the current array length, then getCall is used to create calls for each element
// which are sent in a batch call.
// The returned *CallResult slice, contains a result for each entry in the array, in the same order as in the contract.
func ReadArray(ctx context.Context, caller *MultiCaller, block Block, countCall *ContractCall, getCall func(i *big.Int) *ContractCall) ([]*CallResult, error) {
result, err := caller.SingleCall(ctx, block, countCall)
if err != nil {
return nil, fmt.Errorf("failed to load array length: %w", err)
}
count := result.GetBigInt(0).Uint64()
calls := make([]*ContractCall, count)
for i := uint64(0); i < count; i++ {
calls[i] = getCall(new(big.Int).SetUint64(i))
}
results, err := caller.Call(ctx, block, calls...)
if err != nil {
return nil, fmt.Errorf("failed to fetch array data: %w", err)
}
return results, nil
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment