Commit 81c7aa03 authored by Joshua Gutow's avatar Joshua Gutow Committed by GitHub

Alt-DA: Refactor DAState and DAMgr to Separate Commitment and Challenge Tracking (#10618)

* plasma: Split commitments & challenges

This splits the current two queues into four queues. Two for commitments
and two for challenges. Challenges are commitments are split because they
are different things. Each has two physical queues to differentiate between
items which have not expired and items which have expired but not finalized.

This also splits the commitment origin from the challenge origin because the
challenge origin can advance independently of the commitment origin.

* Cleanup Refactor ; Fix Tests

Reading over the refactor and understanding it for myself,
I made some organizational edits, and fixed an issue in the E2E tests.

* remove commented assert

* Update op-plasma/damgr.go
Co-authored-by: default avatarAdrian Sutton <adrian@oplabs.co>

* add warn log for DA Server Not Found errors

---------
Co-authored-by: default avataraxelKingsley <axel.kingsley@gmail.com>
Co-authored-by: default avatarAdrian Sutton <adrian@oplabs.co>
parent 6e1dbea1
......@@ -28,7 +28,7 @@ type L1BlobsFetcher interface {
type PlasmaInputFetcher interface {
// GetInput fetches the input for the given commitment at the given block number from the DA storage service.
GetInput(ctx context.Context, l1 plasma.L1Fetcher, c plasma.CommitmentData, blockId eth.BlockID) (eth.Data, error)
GetInput(ctx context.Context, l1 plasma.L1Fetcher, c plasma.CommitmentData, blockId eth.L1BlockRef) (eth.Data, error)
// AdvanceL1Origin advances the L1 origin to the given block number, syncing the DA challenge events.
AdvanceL1Origin(ctx context.Context, l1 plasma.L1Fetcher, blockId eth.BlockID) error
// Reset the challenge origin in case of L1 reorg
......@@ -78,7 +78,7 @@ func (ds *DataSourceFactory) OpenData(ctx context.Context, ref eth.L1BlockRef, b
}
if ds.dsCfg.plasmaEnabled {
// plasma([calldata | blobdata](l1Ref)) -> data
return NewPlasmaDataSource(ds.log, src, ds.fetcher, ds.plasmaFetcher, ref.ID()), nil
return NewPlasmaDataSource(ds.log, src, ds.fetcher, ds.plasmaFetcher, ref), nil
}
return src, nil
}
......
......@@ -17,12 +17,12 @@ type PlasmaDataSource struct {
src DataIter
fetcher PlasmaInputFetcher
l1 L1Fetcher
id eth.BlockID
id eth.L1BlockRef
// keep track of a pending commitment so we can keep trying to fetch the input.
comm plasma.CommitmentData
}
func NewPlasmaDataSource(log log.Logger, src DataIter, l1 L1Fetcher, fetcher PlasmaInputFetcher, id eth.BlockID) *PlasmaDataSource {
func NewPlasmaDataSource(log log.Logger, src DataIter, l1 L1Fetcher, fetcher PlasmaInputFetcher, id eth.L1BlockRef) *PlasmaDataSource {
return &PlasmaDataSource{
log: log,
src: src,
......@@ -37,7 +37,7 @@ func (s *PlasmaDataSource) Next(ctx context.Context) (eth.Data, error) {
// before we can proceed to fetch the input data. This function can be called multiple times
// for the same origin and noop if the origin was already processed. It is also called if
// there is not commitment in the current origin.
if err := s.fetcher.AdvanceL1Origin(ctx, s.l1, s.id); err != nil {
if err := s.fetcher.AdvanceL1Origin(ctx, s.l1, s.id.ID()); err != nil {
if errors.Is(err, plasma.ErrReorgRequired) {
return nil, NewResetError(fmt.Errorf("new expired challenge"))
}
......
......@@ -56,7 +56,7 @@ func TestPlasmaDataSource(t *testing.T) {
}
metrics := &plasma.NoopMetrics{}
daState := plasma.NewState(logger, metrics)
daState := plasma.NewState(logger, metrics, pcfg)
da := plasma.NewPlasmaDAWithState(logger, pcfg, storage, metrics, daState)
......@@ -97,6 +97,7 @@ func TestPlasmaDataSource(t *testing.T) {
// keep track of random input data to validate against
var inputs [][]byte
var comms []plasma.CommitmentData
var inclusionBlocks []eth.L1BlockRef
signer := cfg.L1Signer()
......@@ -131,6 +132,7 @@ func TestPlasmaDataSource(t *testing.T) {
kComm := comm.(plasma.Keccak256Commitment)
inputs = append(inputs, input)
comms = append(comms, kComm)
inclusionBlocks = append(inclusionBlocks, ref)
tx, err := types.SignNewTx(batcherPriv, signer, &types.DynamicFeeTx{
ChainID: signer.ChainID(),
......@@ -161,7 +163,7 @@ func TestPlasmaDataSource(t *testing.T) {
if len(comms) >= 4 && nc < 7 {
// skip a block between each challenge transaction
if nc%2 == 0 {
daState.SetActiveChallenge(comms[nc/2].Encode(), ref.Number, pcfg.ResolveWindow)
daState.CreateChallenge(comms[nc/2], ref.ID(), inclusionBlocks[nc/2].Number)
logger.Info("setting active challenge", "comm", comms[nc/2])
}
nc++
......@@ -275,11 +277,9 @@ func TestPlasmaDataSource(t *testing.T) {
}
// trigger l1 finalization signal
da.Finalize(l1Refs[len(l1Refs)-32])
// finalize based on the second to last block, which will prune the commitment on block 2, and make it finalized
da.Finalize(l1Refs[len(l1Refs)-2])
finalitySignal.AssertExpectations(t)
l1F.AssertExpectations(t)
}
// This tests makes sure the pipeline returns a temporary error if data is not found.
......@@ -299,7 +299,7 @@ func TestPlasmaDataSourceStall(t *testing.T) {
metrics := &plasma.NoopMetrics{}
daState := plasma.NewState(logger, metrics)
daState := plasma.NewState(logger, metrics, pcfg)
da := plasma.NewPlasmaDAWithState(logger, pcfg, storage, metrics, daState)
......@@ -396,8 +396,11 @@ func TestPlasmaDataSourceStall(t *testing.T) {
_, err = src.Next(ctx)
require.ErrorIs(t, err, NotEnoughData)
// create and resolve a challenge
daState.CreateChallenge(comm, ref.ID(), ref.Number)
// now challenge is resolved
daState.SetResolvedChallenge(comm.Encode(), input, ref.Number+2)
err = daState.ResolveChallenge(comm, eth.BlockID{Number: ref.Number + 2}, ref.Number, input)
require.NoError(t, err)
// derivation can resume
data, err := src.Next(ctx)
......
......@@ -2,6 +2,7 @@ package plasma
import (
"bytes"
"encoding/hex"
"errors"
"fmt"
......@@ -29,7 +30,7 @@ func CommitmentTypeFromString(s string) (CommitmentType, error) {
}
// CommitmentType describes the binary format of the commitment.
// KeccakCommitmentStringType is the default commitment type for the centralized DA storage.
// KeccakCommitmentType is the default commitment type for the centralized DA storage.
// GenericCommitmentType indicates an opaque bytestring that the op-node never opens.
const (
Keccak256CommitmentType CommitmentType = 0
......@@ -44,6 +45,7 @@ type CommitmentData interface {
Encode() []byte
TxData() []byte
Verify(input []byte) error
String() string
}
// Keccak256Commitment is an implementation of CommitmentData that uses Keccak256 as the commitment function.
......@@ -124,6 +126,10 @@ func (c Keccak256Commitment) Verify(input []byte) error {
return nil
}
func (c Keccak256Commitment) String() string {
return hex.EncodeToString(c.Encode())
}
// NewGenericCommitment creates a new commitment from the given input.
func NewGenericCommitment(input []byte) GenericCommitment {
return GenericCommitment(input)
......@@ -156,3 +162,7 @@ func (c GenericCommitment) TxData() []byte {
func (c GenericCommitment) Verify(input []byte) error {
return nil
}
func (c GenericCommitment) String() string {
return hex.EncodeToString(c.Encode())
}
......@@ -9,7 +9,6 @@ import (
"github.com/ethereum-optimism/optimism/op-service/testlog"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/stretchr/testify/require"
)
......@@ -116,7 +115,7 @@ func TestDAClientService(t *testing.T) {
Enabled: true,
DAServerURL: fmt.Sprintf("http://%s", server.Endpoint()),
VerifyOnRead: false,
GenericDA: true,
GenericDA: false,
}
require.NoError(t, cfg.Check())
......@@ -129,7 +128,7 @@ func TestDAClientService(t *testing.T) {
comm, err := client.SetInput(ctx, input)
require.NoError(t, err)
require.Equal(t, comm, NewGenericCommitment(crypto.Keccak256(input)))
require.Equal(t, comm.String(), NewKeccak256Commitment(input).String())
stored, err := client.GetInput(ctx, comm)
require.NoError(t, err)
......@@ -144,7 +143,7 @@ func TestDAClientService(t *testing.T) {
require.NoError(t, err)
// test not found error
comm = NewGenericCommitment(RandomData(rng, 32))
comm = NewKeccak256Commitment(RandomData(rng, 32))
_, err = client.GetInput(ctx, comm)
require.ErrorIs(t, err, ErrNotFound)
......@@ -157,6 +156,6 @@ func TestDAClientService(t *testing.T) {
_, err = client.SetInput(ctx, input)
require.Error(t, err)
_, err = client.GetInput(ctx, NewGenericCommitment(input))
_, err = client.GetInput(ctx, NewKeccak256Commitment(input))
require.Error(t, err)
}
This diff is collapsed.
This diff is collapsed.
......@@ -82,7 +82,7 @@ var ErrNotEnabled = errors.New("plasma not enabled")
// PlasmaDisabled is a noop plasma DA implementation for stubbing.
type PlasmaDisabled struct{}
func (d *PlasmaDisabled) GetInput(ctx context.Context, l1 L1Fetcher, commitment CommitmentData, blockId eth.BlockID) (eth.Data, error) {
func (d *PlasmaDisabled) GetInput(ctx context.Context, l1 L1Fetcher, commitment CommitmentData, blockId eth.L1BlockRef) (eth.Data, error) {
return nil, ErrNotEnabled
}
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment