data_source.go 4.45 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
package derive

import (
	"context"
	"fmt"

	"github.com/ethereum/go-ethereum/common"
	"github.com/ethereum/go-ethereum/core/types"
	"github.com/ethereum/go-ethereum/log"

	"github.com/ethereum-optimism/optimism/op-node/rollup"
12
	plasma "github.com/ethereum-optimism/optimism/op-plasma"
13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
	"github.com/ethereum-optimism/optimism/op-service/eth"
)

type DataIter interface {
	Next(ctx context.Context) (eth.Data, error)
}

type L1TransactionFetcher interface {
	InfoAndTxsByHash(ctx context.Context, hash common.Hash) (eth.BlockInfo, types.Transactions, error)
}

type L1BlobsFetcher interface {
	// GetBlobs fetches blobs that were confirmed in the given L1 block with the given indexed hashes.
	GetBlobs(ctx context.Context, ref eth.L1BlockRef, hashes []eth.IndexedBlobHash) ([]*eth.Blob, error)
}

29 30
type PlasmaInputFetcher interface {
	// GetInput fetches the input for the given commitment at the given block number from the DA storage service.
31 32 33 34 35 36 37 38 39
	GetInput(ctx context.Context, l1 plasma.L1Fetcher, c plasma.Keccak256Commitment, blockId eth.BlockID) (eth.Data, error)
	// AdvanceL1Origin advances the L1 origin to the given block number, syncing the DA challenge events.
	AdvanceL1Origin(ctx context.Context, l1 plasma.L1Fetcher, blockId eth.BlockID) error
	// Reset the challenge origin in case of L1 reorg
	Reset(ctx context.Context, base eth.L1BlockRef, baseCfg eth.SystemConfig) error
	// Notify L1 finalized head so plasma finality is always behind L1
	Finalize(ref eth.L1BlockRef)
	// Set the engine finalization signal callback
	OnFinalizedHeadSignal(f plasma.HeadSignalFn)
40 41
}

42 43 44 45
// DataSourceFactory reads raw transactions from a given block & then filters for
// batch submitter transactions.
// This is not a stage in the pipeline, but a wrapper for another stage in the pipeline
type DataSourceFactory struct {
46 47
	log           log.Logger
	dsCfg         DataSourceConfig
48
	fetcher       L1Fetcher
49 50 51
	blobsFetcher  L1BlobsFetcher
	plasmaFetcher PlasmaInputFetcher
	ecotoneTime   *uint64
52 53
}

54
func NewDataSourceFactory(log log.Logger, cfg *rollup.Config, fetcher L1Fetcher, blobsFetcher L1BlobsFetcher, plasmaFetcher PlasmaInputFetcher) *DataSourceFactory {
55 56 57
	config := DataSourceConfig{
		l1Signer:          cfg.L1Signer(),
		batchInboxAddress: cfg.BatchInboxAddress,
58
		plasmaEnabled:     cfg.UsePlasma,
59 60 61 62 63 64 65 66
	}
	return &DataSourceFactory{
		log:           log,
		dsCfg:         config,
		fetcher:       fetcher,
		blobsFetcher:  blobsFetcher,
		plasmaFetcher: plasmaFetcher,
		ecotoneTime:   cfg.EcotoneTime,
67 68 69 70 71
	}
}

// OpenData returns the appropriate data source for the L1 block `ref`.
func (ds *DataSourceFactory) OpenData(ctx context.Context, ref eth.L1BlockRef, batcherAddr common.Address) (DataIter, error) {
72 73 74
	// Creates a data iterator from blob or calldata source so we can forward it to the plasma source
	// if enabled as it still requires an L1 data source for fetching input commmitments.
	var src DataIter
75 76 77 78
	if ds.ecotoneTime != nil && ref.Time >= *ds.ecotoneTime {
		if ds.blobsFetcher == nil {
			return nil, fmt.Errorf("ecotone upgrade active but beacon endpoint not configured")
		}
79 80 81 82 83 84
		src = NewBlobDataSource(ctx, ds.log, ds.dsCfg, ds.fetcher, ds.blobsFetcher, ref, batcherAddr)
	} else {
		src = NewCalldataSource(ctx, ds.log, ds.dsCfg, ds.fetcher, ref, batcherAddr)
	}
	if ds.dsCfg.plasmaEnabled {
		// plasma([calldata | blobdata](l1Ref)) -> data
85
		return NewPlasmaDataSource(ds.log, src, ds.fetcher, ds.plasmaFetcher, ref.ID()), nil
86
	}
87
	return src, nil
88 89 90 91 92 93
}

// DataSourceConfig regroups the mandatory rollup.Config fields needed for DataFromEVMTransactions.
type DataSourceConfig struct {
	l1Signer          types.Signer
	batchInboxAddress common.Address
94
	plasmaEnabled     bool
95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116
}

// isValidBatchTx returns true if:
//  1. the transaction has a To() address that matches the batch inbox address, and
//  2. the transaction has a valid signature from the batcher address
func isValidBatchTx(tx *types.Transaction, l1Signer types.Signer, batchInboxAddr, batcherAddr common.Address) bool {
	to := tx.To()
	if to == nil || *to != batchInboxAddr {
		return false
	}
	seqDataSubmitter, err := l1Signer.Sender(tx) // optimization: only derive sender if To is correct
	if err != nil {
		log.Warn("tx in inbox with invalid signature", "hash", tx.Hash(), "err", err)
		return false
	}
	// some random L1 user might have sent a transaction to our batch inbox, ignore them
	if seqDataSubmitter != batcherAddr {
		log.Warn("tx in inbox with unauthorized submitter", "addr", seqDataSubmitter, "hash", tx.Hash(), "err", err)
		return false
	}
	return true
}