Commit c21083c9 authored by refcell.eth's avatar refcell.eth Committed by GitHub

feat(op-challenger): large preimage uploader squueze (#9093)

parent b1cdaf73
...@@ -82,6 +82,24 @@ func (c *PreimageOracleContract) AddLeaves(uuid *big.Int, startingBlockIndex *bi ...@@ -82,6 +82,24 @@ func (c *PreimageOracleContract) AddLeaves(uuid *big.Int, startingBlockIndex *bi
return call.ToTxCandidate() return call.ToTxCandidate()
} }
func (c *PreimageOracleContract) CallSqueeze(
ctx context.Context,
claimant common.Address,
uuid *big.Int,
stateMatrix *matrix.StateMatrix,
preState keccakTypes.Leaf,
preStateProof merkle.Proof,
postState keccakTypes.Leaf,
postStateProof merkle.Proof,
) error {
call := c.contract.Call(methodSqueezeLPP, claimant, uuid, abiEncodeStateMatrix(stateMatrix), toPreimageOracleLeaf(preState), preStateProof, toPreimageOracleLeaf(postState), postStateProof)
_, err := c.multiCaller.SingleCall(ctx, batching.BlockLatest, call)
if err != nil {
return fmt.Errorf("failed to call resolve claim: %w", err)
}
return nil
}
func (c *PreimageOracleContract) Squeeze( func (c *PreimageOracleContract) Squeeze(
claimant common.Address, claimant common.Address,
uuid *big.Int, uuid *big.Int,
......
...@@ -19,8 +19,6 @@ import ( ...@@ -19,8 +19,6 @@ import (
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
) )
var errNotSupported = errors.New("not supported")
var _ PreimageUploader = (*LargePreimageUploader)(nil) var _ PreimageUploader = (*LargePreimageUploader)(nil)
// MaxBlocksPerChunk is the maximum number of keccak blocks per chunk. // MaxBlocksPerChunk is the maximum number of keccak blocks per chunk.
...@@ -46,7 +44,7 @@ func NewLargePreimageUploader(logger log.Logger, txMgr txmgr.TxManager, contract ...@@ -46,7 +44,7 @@ func NewLargePreimageUploader(logger log.Logger, txMgr txmgr.TxManager, contract
} }
func (p *LargePreimageUploader) UploadPreimage(ctx context.Context, parent uint64, data *types.PreimageOracleData) error { func (p *LargePreimageUploader) UploadPreimage(ctx context.Context, parent uint64, data *types.PreimageOracleData) error {
calls, err := p.splitCalls(data) stateMatrix, calls, err := p.splitCalls(data)
if err != nil { if err != nil {
return fmt.Errorf("failed to split preimage into chunks for data with oracle offset %d: %w", data.OracleOffset, err) return fmt.Errorf("failed to split preimage into chunks for data with oracle offset %d: %w", data.OracleOffset, err)
} }
...@@ -83,10 +81,7 @@ func (p *LargePreimageUploader) UploadPreimage(ctx context.Context, parent uint6 ...@@ -83,10 +81,7 @@ func (p *LargePreimageUploader) UploadPreimage(ctx context.Context, parent uint6
return fmt.Errorf("failed to add leaves to large preimage with uuid: %s: %w", uuid, err) return fmt.Errorf("failed to add leaves to large preimage with uuid: %s: %w", uuid, err)
} }
// todo(proofs#467): track the challenge period starting once the full preimage is posted. return p.Squeeze(ctx, uuid, stateMatrix)
// todo(proofs#467): once the challenge period is over, call `squeezeLPP` on the preimage oracle contract.
return errNotSupported
} }
// newUUID generates a new unique identifier for the preimage by hashing the // newUUID generates a new unique identifier for the preimage by hashing the
...@@ -102,7 +97,8 @@ func (p *LargePreimageUploader) newUUID(data *types.PreimageOracleData) *big.Int ...@@ -102,7 +97,8 @@ func (p *LargePreimageUploader) newUUID(data *types.PreimageOracleData) *big.Int
} }
// splitChunks splits the preimage data into chunks of size [MaxChunkSize] (except the last chunk). // splitChunks splits the preimage data into chunks of size [MaxChunkSize] (except the last chunk).
func (p *LargePreimageUploader) splitCalls(data *types.PreimageOracleData) ([]keccakTypes.InputData, error) { // It also returns the state matrix and the data for the squeeze call if possible.
func (p *LargePreimageUploader) splitCalls(data *types.PreimageOracleData) (*matrix.StateMatrix, []keccakTypes.InputData, error) {
// Split the preimage data into chunks of size [MaxChunkSize] (except the last chunk). // Split the preimage data into chunks of size [MaxChunkSize] (except the last chunk).
stateMatrix := matrix.NewStateMatrix() stateMatrix := matrix.NewStateMatrix()
var calls []keccakTypes.InputData var calls []keccakTypes.InputData
...@@ -113,11 +109,37 @@ func (p *LargePreimageUploader) splitCalls(data *types.PreimageOracleData) ([]ke ...@@ -113,11 +109,37 @@ func (p *LargePreimageUploader) splitCalls(data *types.PreimageOracleData) ([]ke
calls = append(calls, call) calls = append(calls, call)
break break
} else if err != nil { } else if err != nil {
return nil, fmt.Errorf("failed to absorb data: %w", err) return nil, nil, fmt.Errorf("failed to absorb data: %w", err)
} }
calls = append(calls, call) calls = append(calls, call)
} }
return calls, nil return stateMatrix, calls, nil
}
func (p *LargePreimageUploader) Squeeze(ctx context.Context, uuid *big.Int, stateMatrix *matrix.StateMatrix) error {
prestate, prestateProof, err := stateMatrix.PrestateWithProof()
if err != nil {
return fmt.Errorf("failed to generate prestate proof: %w", err)
}
poststate, poststateProof, err := stateMatrix.PoststateWithProof()
if err != nil {
return fmt.Errorf("failed to generate poststate proof: %w", err)
}
// TODO(client-pod#474): Return the ErrChallengePeriodNotOver error if the challenge period is not over.
// This allows the responder to retry the squeeze later.
// Other errors should force the responder to stop retrying.
// Nil errors should indicate the squeeze was successful.
if err := p.contract.CallSqueeze(ctx, p.txMgr.From(), uuid, stateMatrix, prestate, prestateProof, poststate, poststateProof); err != nil {
return fmt.Errorf("failed to call squeeze: %w", err)
}
tx, err := p.contract.Squeeze(p.txMgr.From(), uuid, stateMatrix, prestate, prestateProof, poststate, poststateProof)
if err != nil {
return fmt.Errorf("failed to create pre-image oracle tx: %w", err)
}
if err := p.sendTxAndWait(ctx, tx); err != nil {
return fmt.Errorf("failed to populate pre-image oracle: %w", err)
}
return nil
} }
// initLargePreimage initializes the large preimage proposal. // initLargePreimage initializes the large preimage proposal.
......
...@@ -18,7 +18,11 @@ import ( ...@@ -18,7 +18,11 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
var mockAddLeavesError = errors.New("mock add leaves error") var (
mockAddLeavesError = errors.New("mock add leaves error")
mockSqueezeError = errors.New("mock squeeze error")
mockSqueezeCallError = errors.New("mock squeeze call error")
)
func TestLargePreimageUploader_NewUUID(t *testing.T) { func TestLargePreimageUploader_NewUUID(t *testing.T) {
tests := []struct { tests := []struct {
...@@ -66,7 +70,7 @@ func TestLargePreimageUploader_NewUUID(t *testing.T) { ...@@ -66,7 +70,7 @@ func TestLargePreimageUploader_NewUUID(t *testing.T) {
} }
} }
func TestLargePreimageUploader_UploadPreimage(t *testing.T) { func TestLargePreimageUploader_UploadPreimage_EdgeCases(t *testing.T) {
t.Run("InitFails", func(t *testing.T) { t.Run("InitFails", func(t *testing.T) {
oracle, _, contract := newTestLargePreimageUploader(t) oracle, _, contract := newTestLargePreimageUploader(t)
contract.initFails = true contract.initFails = true
...@@ -85,53 +89,49 @@ func TestLargePreimageUploader_UploadPreimage(t *testing.T) { ...@@ -85,53 +89,49 @@ func TestLargePreimageUploader_UploadPreimage(t *testing.T) {
require.Equal(t, 1, contract.addCalls) require.Equal(t, 1, contract.addCalls)
}) })
t.Run("AlreadyInitialized", func(t *testing.T) { t.Run("NoBytesProcessed", func(t *testing.T) {
oracle, _, contract := newTestLargePreimageUploader(t) oracle, _, contract := newTestLargePreimageUploader(t)
data := mockPreimageOracleData() data := mockPreimageOracleData()
contract.initialized = true
contract.claimedSize = uint32(len(data.OracleData))
err := oracle.UploadPreimage(context.Background(), 0, &data) err := oracle.UploadPreimage(context.Background(), 0, &data)
require.Equal(t, 0, contract.initCalls) require.NoError(t, err)
require.Equal(t, 1, contract.initCalls)
require.Equal(t, 6, contract.addCalls) require.Equal(t, 6, contract.addCalls)
// TODO(client-pod#467): fix this to not error. See LargePreimageUploader.UploadPreimage. require.Equal(t, data.OracleData, contract.addData)
require.ErrorIs(t, err, errNotSupported)
}) })
t.Run("NoBytesProcessed", func(t *testing.T) { t.Run("AlreadyInitialized", func(t *testing.T) {
oracle, _, contract := newTestLargePreimageUploader(t) oracle, _, contract := newTestLargePreimageUploader(t)
data := mockPreimageOracleData() data := mockPreimageOracleData()
contract.initialized = true
contract.claimedSize = uint32(len(data.OracleData))
err := oracle.UploadPreimage(context.Background(), 0, &data) err := oracle.UploadPreimage(context.Background(), 0, &data)
require.Equal(t, 1, contract.initCalls) require.NoError(t, err)
require.Equal(t, 0, contract.initCalls)
require.Equal(t, 6, contract.addCalls) require.Equal(t, 6, contract.addCalls)
require.Equal(t, data.OracleData, contract.addData)
// TODO(client-pod#467): fix this to not error. See LargePreimageUploader.UploadPreimage.
require.ErrorIs(t, err, errNotSupported)
}) })
t.Run("PartialBytesProcessed", func(t *testing.T) { t.Run("SqueezeCallFails", func(t *testing.T) {
oracle, _, contract := newTestLargePreimageUploader(t) oracle, _, contract := newTestLargePreimageUploader(t)
data := mockPreimageOracleData() data := mockPreimageOracleData()
contract.bytesProcessed = 3 * MaxChunkSize contract.bytesProcessed = 5*MaxChunkSize + 1
contract.timestamp = 123
contract.claimedSize = uint32(len(data.OracleData)) contract.claimedSize = uint32(len(data.OracleData))
contract.squeezeCallFails = true
err := oracle.UploadPreimage(context.Background(), 0, &data) err := oracle.UploadPreimage(context.Background(), 0, &data)
require.Equal(t, 0, contract.initCalls) require.ErrorIs(t, err, mockSqueezeCallError)
require.Equal(t, 3, contract.addCalls) require.Equal(t, 0, contract.squeezeCalls)
require.Equal(t, data.OracleData[contract.bytesProcessed:], contract.addData)
// TODO(client-pod#467): fix this to not error. See LargePreimageUploader.UploadPreimage.
require.ErrorIs(t, err, errNotSupported)
}) })
t.Run("LastLeafNotProcessed", func(t *testing.T) { t.Run("SqueezeFails", func(t *testing.T) {
oracle, _, contract := newTestLargePreimageUploader(t) oracle, _, contract := newTestLargePreimageUploader(t)
data := mockPreimageOracleData() data := mockPreimageOracleData()
contract.bytesProcessed = 5 * MaxChunkSize contract.bytesProcessed = 5*MaxChunkSize + 1
contract.timestamp = 123
contract.claimedSize = uint32(len(data.OracleData)) contract.claimedSize = uint32(len(data.OracleData))
contract.squeezeFails = true
err := oracle.UploadPreimage(context.Background(), 0, &data) err := oracle.UploadPreimage(context.Background(), 0, &data)
require.Equal(t, 0, contract.initCalls) require.ErrorIs(t, err, mockSqueezeError)
require.Equal(t, 1, contract.addCalls) require.Equal(t, 1, contract.squeezeCalls)
require.Equal(t, data.OracleData[contract.bytesProcessed:], contract.addData)
// TODO(client-pod#467): fix this to not error. See LargePreimageUploader.UploadPreimage.
require.ErrorIs(t, err, errNotSupported)
}) })
t.Run("AllBytesProcessed", func(t *testing.T) { t.Run("AllBytesProcessed", func(t *testing.T) {
...@@ -141,11 +141,10 @@ func TestLargePreimageUploader_UploadPreimage(t *testing.T) { ...@@ -141,11 +141,10 @@ func TestLargePreimageUploader_UploadPreimage(t *testing.T) {
contract.timestamp = 123 contract.timestamp = 123
contract.claimedSize = uint32(len(data.OracleData)) contract.claimedSize = uint32(len(data.OracleData))
err := oracle.UploadPreimage(context.Background(), 0, &data) err := oracle.UploadPreimage(context.Background(), 0, &data)
require.NoError(t, err)
require.Equal(t, 0, contract.initCalls) require.Equal(t, 0, contract.initCalls)
require.Equal(t, 0, contract.addCalls) require.Equal(t, 0, contract.addCalls)
require.Empty(t, contract.addData) require.Empty(t, contract.addData)
// TODO(client-pod#467): fix this to not error. See LargePreimageUploader.UploadPreimage.
require.ErrorIs(t, err, errNotSupported)
}) })
} }
...@@ -165,6 +164,104 @@ func mockPreimageOracleData() types.PreimageOracleData { ...@@ -165,6 +164,104 @@ func mockPreimageOracleData() types.PreimageOracleData {
} }
} }
func TestLargePreimageUploader_UploadPreimage_Succeeds(t *testing.T) {
fullLeaf := new([keccakTypes.BlockSize]byte)
for i := 0; i < keccakTypes.BlockSize; i++ {
fullLeaf[i] = byte(i)
}
chunk := make([]byte, 0, MaxChunkSize)
for i := 0; i < MaxBlocksPerChunk; i++ {
chunk = append(chunk, fullLeaf[:]...)
}
tests := []struct {
name string
input []byte
addCalls int
prestateLeaf keccakTypes.Leaf
poststateLeaf keccakTypes.Leaf
}{
{
name: "FullLeaf",
input: fullLeaf[:],
addCalls: 1,
prestateLeaf: keccakTypes.Leaf{
Input: *fullLeaf,
Index: big.NewInt(0),
StateCommitment: common.HexToHash("9788a3b3bc36c482525b5890767be37130c997917bceca6e91a6c93359a4d1c6"),
},
poststateLeaf: keccakTypes.Leaf{
Input: [keccakTypes.BlockSize]byte{},
Index: big.NewInt(1),
StateCommitment: common.HexToHash("78358b902b7774b314bcffdf0948746f18d6044086e76e3924d585dca3486c7d"),
},
},
{
name: "MultipleLeaves",
input: append(fullLeaf[:], append(fullLeaf[:], fullLeaf[:]...)...),
addCalls: 1,
prestateLeaf: keccakTypes.Leaf{
Input: *fullLeaf,
Index: big.NewInt(2),
StateCommitment: common.HexToHash("e3deed8ab6f8bbcf3d4fe825d74f703b3f2fc2f5b0afaa2574926fcfd0d4c895"),
},
poststateLeaf: keccakTypes.Leaf{
Input: [keccakTypes.BlockSize]byte{},
Index: big.NewInt(3),
StateCommitment: common.HexToHash("79115eeab1ff2eccf5baf3ea2dda13bc79c548ce906bdd16433a23089c679df2"),
},
},
{
name: "MultipleLeavesUnaligned",
input: append(fullLeaf[:], append(fullLeaf[:], byte(9))...),
addCalls: 1,
prestateLeaf: keccakTypes.Leaf{
Input: *fullLeaf,
Index: big.NewInt(1),
StateCommitment: common.HexToHash("b5ea400e375b2c1ce348f3cc4ad5b6ad28e1b36759ddd2aba155f0b1d476b015"),
},
poststateLeaf: keccakTypes.Leaf{
Input: [keccakTypes.BlockSize]byte{byte(9)},
Index: big.NewInt(2),
StateCommitment: common.HexToHash("fa87e115dc4786e699bf80cc75d13ac1e2db0708c1418fc8cbc9800d17b5811a"),
},
},
{
name: "MultipleChunks",
input: append(chunk, append(fullLeaf[:], fullLeaf[:]...)...),
addCalls: 2,
prestateLeaf: keccakTypes.Leaf{
Input: *fullLeaf,
Index: big.NewInt(301),
StateCommitment: common.HexToHash("4e9c55542478939feca4ff55ee98fbc632bb65a784a55b94536644bc87298ca4"),
},
poststateLeaf: keccakTypes.Leaf{
Input: [keccakTypes.BlockSize]byte{},
Index: big.NewInt(302),
StateCommitment: common.HexToHash("775020bfcaa93700263d040a4eeec3c8c3cf09e178457d04044594beaaf5e20b"),
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
oracle, _, contract := newTestLargePreimageUploader(t)
data := types.PreimageOracleData{
OracleData: test.input,
}
err := oracle.UploadPreimage(context.Background(), 0, &data)
require.NoError(t, err)
require.Equal(t, test.addCalls, contract.addCalls)
// There must always be at least one init and squeeze call
// for successful large preimage upload calls.
require.Equal(t, 1, contract.initCalls)
require.Equal(t, 1, contract.squeezeCalls)
require.Equal(t, test.prestateLeaf, contract.squeezePrestate)
require.Equal(t, test.poststateLeaf, contract.squeezePoststate)
})
}
}
func newTestLargePreimageUploader(t *testing.T) (*LargePreimageUploader, *mockTxMgr, *mockPreimageOracleContract) { func newTestLargePreimageUploader(t *testing.T) (*LargePreimageUploader, *mockTxMgr, *mockPreimageOracleContract) {
logger := testlog.Logger(t, log.LvlError) logger := testlog.Logger(t, log.LvlError)
txMgr := &mockTxMgr{} txMgr := &mockTxMgr{}
...@@ -184,6 +281,11 @@ type mockPreimageOracleContract struct { ...@@ -184,6 +281,11 @@ type mockPreimageOracleContract struct {
addCalls int addCalls int
addFails bool addFails bool
addData []byte addData []byte
squeezeCalls int
squeezeFails bool
squeezeCallFails bool
squeezePrestate keccakTypes.Leaf
squeezePoststate keccakTypes.Leaf
} }
func (s *mockPreimageOracleContract) InitLargePreimage(_ *big.Int, _ uint32, _ uint32) (txmgr.TxCandidate, error) { func (s *mockPreimageOracleContract) InitLargePreimage(_ *big.Int, _ uint32, _ uint32) (txmgr.TxCandidate, error) {
...@@ -203,7 +305,13 @@ func (s *mockPreimageOracleContract) AddLeaves(_ *big.Int, _ *big.Int, input []b ...@@ -203,7 +305,13 @@ func (s *mockPreimageOracleContract) AddLeaves(_ *big.Int, _ *big.Int, input []b
return txmgr.TxCandidate{}, nil return txmgr.TxCandidate{}, nil
} }
func (s *mockPreimageOracleContract) Squeeze(_ common.Address, _ *big.Int, _ *matrix.StateMatrix, _ keccakTypes.Leaf, _ merkle.Proof, _ keccakTypes.Leaf, _ merkle.Proof) (txmgr.TxCandidate, error) { func (s *mockPreimageOracleContract) Squeeze(_ common.Address, _ *big.Int, _ *matrix.StateMatrix, prestate keccakTypes.Leaf, _ merkle.Proof, poststate keccakTypes.Leaf, _ merkle.Proof) (txmgr.TxCandidate, error) {
s.squeezeCalls++
s.squeezePrestate = prestate
s.squeezePoststate = poststate
if s.squeezeFails {
return txmgr.TxCandidate{}, mockSqueezeError
}
return txmgr.TxCandidate{}, nil return txmgr.TxCandidate{}, nil
} }
...@@ -222,3 +330,9 @@ func (s *mockPreimageOracleContract) GetProposalMetadata(_ context.Context, _ ba ...@@ -222,3 +330,9 @@ func (s *mockPreimageOracleContract) GetProposalMetadata(_ context.Context, _ ba
} }
return []keccakTypes.LargePreimageMetaData{{LargePreimageIdent: idents[0]}}, nil return []keccakTypes.LargePreimageMetaData{{LargePreimageIdent: idents[0]}}, nil
} }
func (s *mockPreimageOracleContract) CallSqueeze(_ context.Context, _ common.Address, _ *big.Int, _ *matrix.StateMatrix, _ keccakTypes.Leaf, _ merkle.Proof, _ keccakTypes.Leaf, _ merkle.Proof) error {
if s.squeezeCallFails {
return mockSqueezeCallError
}
return nil
}
...@@ -27,5 +27,6 @@ type PreimageOracleContract interface { ...@@ -27,5 +27,6 @@ type PreimageOracleContract interface {
InitLargePreimage(uuid *big.Int, partOffset uint32, claimedSize uint32) (txmgr.TxCandidate, error) InitLargePreimage(uuid *big.Int, partOffset uint32, claimedSize uint32) (txmgr.TxCandidate, error)
AddLeaves(uuid *big.Int, startingBlockIndex *big.Int, input []byte, commitments []common.Hash, finalize bool) (txmgr.TxCandidate, error) AddLeaves(uuid *big.Int, startingBlockIndex *big.Int, input []byte, commitments []common.Hash, finalize bool) (txmgr.TxCandidate, error)
Squeeze(claimant common.Address, uuid *big.Int, stateMatrix *matrix.StateMatrix, preState keccakTypes.Leaf, preStateProof merkle.Proof, postState keccakTypes.Leaf, postStateProof merkle.Proof) (txmgr.TxCandidate, error) Squeeze(claimant common.Address, uuid *big.Int, stateMatrix *matrix.StateMatrix, preState keccakTypes.Leaf, preStateProof merkle.Proof, postState keccakTypes.Leaf, postStateProof merkle.Proof) (txmgr.TxCandidate, error)
CallSqueeze(ctx context.Context, claimant common.Address, uuid *big.Int, stateMatrix *matrix.StateMatrix, preState keccakTypes.Leaf, preStateProof merkle.Proof, postState keccakTypes.Leaf, postStateProof merkle.Proof) error
GetProposalMetadata(ctx context.Context, block batching.Block, idents ...keccakTypes.LargePreimageIdent) ([]keccakTypes.LargePreimageMetaData, error) GetProposalMetadata(ctx context.Context, block batching.Block, idents ...keccakTypes.LargePreimageIdent) ([]keccakTypes.LargePreimageMetaData, error)
} }
...@@ -6,6 +6,7 @@ import ( ...@@ -6,6 +6,7 @@ import (
"io" "io"
"math/big" "math/big"
"github.com/ethereum-optimism/optimism/op-challenger/game/keccak/merkle"
"github.com/ethereum-optimism/optimism/op-challenger/game/keccak/types" "github.com/ethereum-optimism/optimism/op-challenger/game/keccak/types"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/common/math"
...@@ -15,6 +16,14 @@ import ( ...@@ -15,6 +16,14 @@ import (
// StateMatrix implements a stateful keccak sponge with the ability to create state commitments after each permutation // StateMatrix implements a stateful keccak sponge with the ability to create state commitments after each permutation
type StateMatrix struct { type StateMatrix struct {
s *state s *state
// prestateLeaf is the last prestate leaf.
// Used to retrieve the prestate to squeeze.
prestateLeaf types.Leaf
// poststateLeaf is the last poststate leaf.
// Used to retrieve the poststate to squeeze.
poststateLeaf types.Leaf
// merkleTree is the internal [merkle.BinaryMerkleTree] used to generate proofs
merkleTree *merkle.BinaryMerkleTree
} }
var ( var (
...@@ -76,7 +85,16 @@ func Challenge(data io.Reader, commitments []common.Hash) (types.Challenge, erro ...@@ -76,7 +85,16 @@ func Challenge(data io.Reader, commitments []common.Hash) (types.Challenge, erro
// NewStateMatrix creates a new state matrix initialized with the initial, zero keccak block. // NewStateMatrix creates a new state matrix initialized with the initial, zero keccak block.
func NewStateMatrix() *StateMatrix { func NewStateMatrix() *StateMatrix {
return &StateMatrix{s: newLegacyKeccak256()} return &StateMatrix{
s: newLegacyKeccak256(),
prestateLeaf: types.Leaf{
Index: big.NewInt(0),
},
poststateLeaf: types.Leaf{
Index: big.NewInt(0),
},
merkleTree: merkle.NewBinaryMerkleTree(),
}
} }
// StateCommitment returns the state commitment for the current state matrix. // StateCommitment returns the state commitment for the current state matrix.
...@@ -95,6 +113,18 @@ func (d *StateMatrix) PackState() []byte { ...@@ -95,6 +113,18 @@ func (d *StateMatrix) PackState() []byte {
return buf return buf
} }
// newLeafWithPadding creates a new [Leaf] from inputs, padding the input to the [BlockSize].
func newLeafWithPadding(input []byte, index *big.Int, commitment common.Hash) types.Leaf {
// TODO(client-pod#480): Add actual keccak padding to ensure the merkle proofs are correct (for readData)
paddedInput := make([]byte, types.BlockSize)
copy(paddedInput, input)
return types.Leaf{
Input: ([types.BlockSize]byte)(paddedInput),
Index: index,
StateCommitment: commitment,
}
}
func (d *StateMatrix) AbsorbUpTo(in io.Reader, maxLen int) (types.InputData, error) { func (d *StateMatrix) AbsorbUpTo(in io.Reader, maxLen int) (types.InputData, error) {
if maxLen < types.BlockSize || maxLen%types.BlockSize != 0 { if maxLen < types.BlockSize || maxLen%types.BlockSize != 0 {
return types.InputData{}, ErrInvalidMaxLen return types.InputData{}, ErrInvalidMaxLen
...@@ -125,6 +155,24 @@ func (d *StateMatrix) AbsorbUpTo(in io.Reader, maxLen int) (types.InputData, err ...@@ -125,6 +155,24 @@ func (d *StateMatrix) AbsorbUpTo(in io.Reader, maxLen int) (types.InputData, err
}, nil }, nil
} }
// PrestateWithProof returns the prestate leaf with its merkle proof.
func (d *StateMatrix) PrestateWithProof() (types.Leaf, merkle.Proof, error) {
proof, err := d.merkleTree.ProofAtIndex(d.prestateLeaf.Index.Uint64())
if err != nil {
return types.Leaf{}, merkle.Proof{}, err
}
return d.prestateLeaf, proof, nil
}
// PoststateWithProof returns the poststate leaf with its merkle proof.
func (d *StateMatrix) PoststateWithProof() (types.Leaf, merkle.Proof, error) {
proof, err := d.merkleTree.ProofAtIndex(d.poststateLeaf.Index.Uint64())
if err != nil {
return types.Leaf{}, merkle.Proof{}, err
}
return d.poststateLeaf, proof, nil
}
// absorbNextLeafInput reads up to [BlockSize] bytes from in and absorbs them into the state matrix. // absorbNextLeafInput reads up to [BlockSize] bytes from in and absorbs them into the state matrix.
// If EOF is reached while reading, the state matrix is finalized and [io.EOF] is returned. // If EOF is reached while reading, the state matrix is finalized and [io.EOF] is returned.
func (d *StateMatrix) absorbNextLeafInput(in io.Reader) ([]byte, error) { func (d *StateMatrix) absorbNextLeafInput(in io.Reader) ([]byte, error) {
...@@ -149,6 +197,14 @@ func (d *StateMatrix) absorbNextLeafInput(in io.Reader) ([]byte, error) { ...@@ -149,6 +197,14 @@ func (d *StateMatrix) absorbNextLeafInput(in io.Reader) ([]byte, error) {
// additional block. We can then return EOF to indicate there are no further blocks. // additional block. We can then return EOF to indicate there are no further blocks.
final = final && len(input) < types.BlockSize final = final && len(input) < types.BlockSize
d.absorbLeafInput(input, final) d.absorbLeafInput(input, final)
if d.prestateLeaf.StateCommitment == (common.Hash{}) {
d.prestateLeaf = newLeafWithPadding(input, d.prestateLeaf.Index, d.StateCommitment())
d.poststateLeaf = newLeafWithPadding(input, d.prestateLeaf.Index, d.StateCommitment())
} else {
d.prestateLeaf = d.poststateLeaf
d.poststateLeaf = newLeafWithPadding(input, new(big.Int).Add(d.prestateLeaf.Index, big.NewInt(1)), d.StateCommitment())
}
d.merkleTree.AddLeaf(d.poststateLeaf)
if final { if final {
return input, io.EOF return input, io.EOF
} }
......
...@@ -54,6 +54,8 @@ func TestStateCommitment(t *testing.T) { ...@@ -54,6 +54,8 @@ func TestStateCommitment(t *testing.T) {
type testData struct { type testData struct {
Input []byte `json:"input"` Input []byte `json:"input"`
Commitments []common.Hash `json:"commitments"` Commitments []common.Hash `json:"commitments"`
PrestateLeaf []byte `json:"prestateLeaf"`
PoststateLeaf []byte `json:"poststateLeaf"`
} }
func TestReferenceCommitmentsFromReader(t *testing.T) { func TestReferenceCommitmentsFromReader(t *testing.T) {
...@@ -66,20 +68,29 @@ func TestReferenceCommitmentsFromReader(t *testing.T) { ...@@ -66,20 +68,29 @@ func TestReferenceCommitmentsFromReader(t *testing.T) {
s := NewStateMatrix() s := NewStateMatrix()
commitments := []common.Hash{s.StateCommitment()} commitments := []common.Hash{s.StateCommitment()}
in := bytes.NewReader(test.Input) in := bytes.NewReader(test.Input)
var prestateLeaf []byte
var poststateLeaf []byte
for { for {
_, err := s.absorbNextLeafInput(in) readData, err := s.absorbNextLeafInput(in)
if errors.Is(err, io.EOF) { if errors.Is(err, io.EOF) {
if prestateLeaf == nil {
prestateLeaf = readData
}
poststateLeaf = readData
commitments = append(commitments, s.StateCommitment()) commitments = append(commitments, s.StateCommitment())
break break
} }
// Shouldn't get any error except EOF // Shouldn't get any error except EOF
require.NoError(t, err) require.NoError(t, err)
commitments = append(commitments, s.StateCommitment()) commitments = append(commitments, s.StateCommitment())
prestateLeaf = readData
} }
actual := s.Hash() actual := s.Hash()
expected := crypto.Keccak256Hash(test.Input) expected := crypto.Keccak256Hash(test.Input)
require.Equal(t, expected, actual) require.Equal(t, expected, actual)
require.Equal(t, test.Commitments, commitments) require.Equal(t, test.Commitments, commitments)
require.Equal(t, test.PrestateLeaf, prestateLeaf)
require.Equal(t, test.PoststateLeaf, poststateLeaf)
}) })
} }
} }
...@@ -270,15 +281,17 @@ func TestVerifyPreimage(t *testing.T) { ...@@ -270,15 +281,17 @@ func TestVerifyPreimage(t *testing.T) {
_, err := s.AbsorbUpTo(bytes.NewReader(preimage), invalidLeafStart) _, err := s.AbsorbUpTo(bytes.NewReader(preimage), invalidLeafStart)
require.NoError(t, err) require.NoError(t, err)
prestateLeaf := leafData(invalidIdx - 1)
poststateLeaf := leafData(invalidIdx)
return types.Challenge{ return types.Challenge{
StateMatrix: s.PackState(), StateMatrix: s.PackState(),
Prestate: types.Leaf{ Prestate: types.Leaf{
Input: leafData(invalidIdx - 1), Input: prestateLeaf,
Index: big.NewInt(int64(invalidIdx - 1)), Index: big.NewInt(int64(invalidIdx - 1)),
StateCommitment: commitments[invalidIdx-1], StateCommitment: commitments[invalidIdx-1],
}, },
Poststate: types.Leaf{ Poststate: types.Leaf{
Input: leafData(invalidIdx), Input: poststateLeaf,
Index: big.NewInt(int64(invalidIdx)), Index: big.NewInt(int64(invalidIdx)),
StateCommitment: commitments[invalidIdx], StateCommitment: commitments[invalidIdx],
}, },
...@@ -292,6 +305,7 @@ func TestVerifyPreimage(t *testing.T) { ...@@ -292,6 +305,7 @@ func TestVerifyPreimage(t *testing.T) {
expectedErr error expectedErr error
} }
poststateLeaf := leafData(0)
tests := []testInputs{ tests := []testInputs{
{ {
name: "Valid", name: "Valid",
...@@ -309,7 +323,7 @@ func TestVerifyPreimage(t *testing.T) { ...@@ -309,7 +323,7 @@ func TestVerifyPreimage(t *testing.T) {
StateMatrix: NewStateMatrix().PackState(), StateMatrix: NewStateMatrix().PackState(),
Prestate: types.Leaf{}, Prestate: types.Leaf{},
Poststate: types.Leaf{ Poststate: types.Leaf{
Input: leafData(0), Input: poststateLeaf,
Index: big.NewInt(int64(0)), Index: big.NewInt(int64(0)),
StateCommitment: common.Hash{0xaa}, StateCommitment: common.Hash{0xaa},
}, },
......
...@@ -11,6 +11,10 @@ import ( ...@@ -11,6 +11,10 @@ import (
// BinaryMerkleTreeDepth is the depth of the merkle tree. // BinaryMerkleTreeDepth is the depth of the merkle tree.
const BinaryMerkleTreeDepth = 16 const BinaryMerkleTreeDepth = 16
// Proof is a list of [common.Hash]s that prove the merkle inclusion of a leaf.
// These are the sibling hashes of the leaf's path from the root to the leaf.
type Proof [BinaryMerkleTreeDepth]common.Hash
var ( var (
// MaxLeafCount is the maximum number of leaves in the merkle tree. // MaxLeafCount is the maximum number of leaves in the merkle tree.
MaxLeafCount = 1<<BinaryMerkleTreeDepth - 1 // 2^16 - 1 MaxLeafCount = 1<<BinaryMerkleTreeDepth - 1 // 2^16 - 1
...@@ -32,10 +36,6 @@ func init() { ...@@ -32,10 +36,6 @@ func init() {
rootHash = crypto.Keccak256Hash(rootHash[:], zeroHashes[BinaryMerkleTreeDepth-1][:]) rootHash = crypto.Keccak256Hash(rootHash[:], zeroHashes[BinaryMerkleTreeDepth-1][:])
} }
// Proof is a list of [common.Hash]s that prove the merkle inclusion of a leaf.
// These are the sibling hashes of the leaf's path from the root to the leaf.
type Proof [BinaryMerkleTreeDepth]common.Hash
// merkleNode is a single node in the binary merkle tree. // merkleNode is a single node in the binary merkle tree.
type merkleNode struct { type merkleNode struct {
Label common.Hash Label common.Hash
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment