Commit b12bf0ea authored by Adrian Sutton's avatar Adrian Sutton

Merge remote-tracking branch 'origin/develop' into aj/register-output-game

parents fb70f8c7 c7f6938e
......@@ -127,6 +127,9 @@ jobs:
resource_class: xlarge
steps:
- checkout
- run:
name: git submodules
command: make submodules
- check-changed:
patterns: op-chain-ops,packages/
- restore_cache:
......@@ -317,12 +320,16 @@ jobs:
resource_class: xlarge
steps:
- checkout
- run:
name: git submodules
command: make submodules
- check-changed:
patterns: contracts-bedrock,op-node
- run:
name: print forge version
command: forge --version
working_directory: packages/contracts-bedrock
# We do not use the pre-built contracts becuase forge coverage uses different optimizer settings
- run:
name: test and generate coverage
command: pnpm coverage:lcov
......@@ -339,11 +346,23 @@ jobs:
contracts-bedrock-tests:
docker:
- image: <<pipeline.parameters.ci_builder_image>>
resource_class: xlarge
resource_class: medium
steps:
- checkout
- run:
name: git submodules
command: make submodules
- restore_cache:
name: Restore PNPM Package Cache
keys:
- pnpm-packages-v2-{{ checksum "pnpm-lock.yaml" }}
- attach_workspace: { at: "." }
- check-changed:
patterns: contracts-bedrock,op-node
# populate node modules from the cache
- run:
name: Install dependencies
command: pnpm install --frozen-lockfile --prefer-offline
- run:
name: print forge version
command: forge --version
......@@ -359,22 +378,33 @@ jobs:
contracts-bedrock-checks:
docker:
- image: <<pipeline.parameters.ci_builder_image>>
resource_class: xlarge
resource_class: medium
steps:
- checkout
- run:
name: git submodules
command: make submodules
- restore_cache:
name: Restore PNPM Package Cache
keys:
- pnpm-packages-v2-{{ checksum "pnpm-lock.yaml" }}
- attach_workspace: { at: "." }
- check-changed:
patterns: contracts-bedrock,op-node
# populate node modules from the cache
- run:
name: Install dependencies
command: pnpm install --frozen-lockfile --prefer-offline
# Note: this step needs to come first because one of the later steps modifies the cache & forces a contracts rebuild
- run:
name: build contracts
command: pnpm build
name: semver lock
command: |
pnpm semver-lock
git diff --exit-code semver-lock.json || echo "export SEMVER_LOCK_STATUS=1" >> "$BASH_ENV"
working_directory: packages/contracts-bedrock
- run:
name: check deploy configs
command: pnpm validate-deploy-configs || echo "export DEPLOY_CONFIGS_STATUS=1" >> "$BASH_ENV"
working_directory: packages/contracts-bedrock
- run:
name: lint
......@@ -396,22 +426,12 @@ jobs:
pnpm storage-snapshot
git diff --exit-code .storage-layout || echo "export STORAGE_SNAPSHOT_STATUS=1" >> "$BASH_ENV"
working_directory: packages/contracts-bedrock
- run:
name: semver lock
command: |
pnpm semver-lock
git diff --exit-code semver-lock.json || echo "export SEMVER_LOCK_STATUS=1" >> "$BASH_ENV"
working_directory: packages/contracts-bedrock
- run:
name: invariant docs
command: |
pnpm autogen:invariant-docs
git diff --exit-code ./invariant-docs/*.md || echo "export INVARIANT_DOCS_STATUS=1" >> "$BASH_ENV"
working_directory: packages/contracts-bedrock
- run:
name: check deploy configs
command: pnpm validate-deploy-configs || echo "export DEPLOY_CONFIGS_STATUS=1" >> "$BASH_ENV"
working_directory: packages/contracts-bedrock
- run:
name: check statuses
command: |
......@@ -446,7 +466,7 @@ jobs:
contracts-bedrock-slither:
docker:
- image: <<pipeline.parameters.ci_builder_image>>
resource_class: xlarge
resource_class: medium
steps:
- checkout
- check-changed:
......@@ -1293,7 +1313,9 @@ workflows:
package_name: core-utils
requires:
- pnpm-monorepo
- contracts-bedrock-tests
- contracts-bedrock-tests:
requires:
- pnpm-monorepo
- contracts-bedrock-coverage
- contracts-bedrock-checks:
requires:
......
......@@ -70,6 +70,8 @@ func NewBackendWithGenesisTimestamp(ts uint64, shanghai bool) *backends.Simulate
LondonBlock: big.NewInt(0),
ArrowGlacierBlock: big.NewInt(0),
GrayGlacierBlock: big.NewInt(0),
ShanghaiTime: nil,
CancunTime: nil,
// Activated proof of stake. We manually build/commit blocks in the simulator anyway,
// and the timestamp verification of PoS is not against the wallclock,
// preventing blocks from getting stuck temporarily in the future-blocks queue, decreasing setup time a lot.
......
......@@ -217,6 +217,9 @@ type DeployConfig struct {
// RequiredProtocolVersion indicates the protocol version that
// nodes are recommended to adopt, to stay in sync with the network.
RecommendedProtocolVersion params.ProtocolVersion `json:"recommendedProtocolVersion"`
// When Cancun activates. Relative to L1 genesis.
L1CancunTimeOffset *uint64 `json:"l1CancunTimeOffset,omitempty"`
}
// Copy will deeply copy the DeployConfig. This does a JSON roundtrip to copy
......
......@@ -64,6 +64,7 @@ func NewL2Genesis(config *DeployConfig, block *types.Block) (*core.Genesis, erro
RegolithTime: config.RegolithTime(block.Time()),
CanyonTime: config.CanyonTime(block.Time()),
ShanghaiTime: config.CanyonTime(block.Time()),
CancunTime: nil, // no Dencun on L2 yet.
Optimism: &params.OptimismConfig{
EIP1559Denominator: eip1559Denom,
EIP1559Elasticity: eip1559Elasticity,
......@@ -134,6 +135,8 @@ func NewL1Genesis(config *DeployConfig) (*core.Genesis, error) {
LondonBlock: big.NewInt(0),
ArrowGlacierBlock: big.NewInt(0),
GrayGlacierBlock: big.NewInt(0),
ShanghaiTime: nil,
CancunTime: nil,
}
extraData := make([]byte, 0)
......@@ -168,6 +171,10 @@ func NewL1Genesis(config *DeployConfig) (*core.Genesis, error) {
if timestamp == 0 {
timestamp = hexutil.Uint64(time.Now().Unix())
}
if !config.L1UseClique && config.L1CancunTimeOffset != nil {
cancunTime := uint64(timestamp) + *config.L1CancunTimeOffset
chainConfig.CancunTime = &cancunTime
}
return &core.Genesis{
Config: &chainConfig,
......
......@@ -91,7 +91,7 @@ func registerCannon(
client *ethclient.Client) {
resourceCreator := func(addr common.Address, contract *contracts.FaultDisputeGameContract, gameDepth uint64, dir string) (faultTypes.TraceAccessor, gameValidator, error) {
logger := logger.New("game", addr)
provider, err := cannon.NewTraceProvider(ctx, logger, m, cfg, contract, dir, gameDepth)
provider, err := cannon.NewTraceProvider(ctx, logger, m, cfg, contract, cannon.NoLocalContext, dir, gameDepth)
if err != nil {
return nil, nil, fmt.Errorf("create cannon trace provider: %w", err)
}
......
......@@ -23,6 +23,10 @@ import (
const (
proofsDir = "proofs"
diskStateCache = "state.json.gz"
// NoLocalContext is the LocalContext value used when the cannon trace provider is used alone instead of as part
// of a split game.
NoLocalContext = 0
)
type proofData struct {
......@@ -44,18 +48,19 @@ type ProofGenerator interface {
}
type CannonTraceProvider struct {
logger log.Logger
dir string
prestate string
generator ProofGenerator
gameDepth uint64
logger log.Logger
dir string
prestate string
generator ProofGenerator
gameDepth uint64
localContext uint64
// lastStep stores the last step in the actual trace if known. 0 indicates unknown.
// Cached as an optimisation to avoid repeatedly attempting to execute beyond the end of the trace.
lastStep uint64
}
func NewTraceProvider(ctx context.Context, logger log.Logger, m CannonMetricer, cfg *config.Config, gameContract *contracts.FaultDisputeGameContract, dir string, gameDepth uint64) (*CannonTraceProvider, error) {
func NewTraceProvider(ctx context.Context, logger log.Logger, m CannonMetricer, cfg *config.Config, gameContract *contracts.FaultDisputeGameContract, localContext uint64, dir string, gameDepth uint64) (*CannonTraceProvider, error) {
l2Client, err := ethclient.DialContext(ctx, cfg.CannonL2)
if err != nil {
return nil, fmt.Errorf("dial l2 client %v: %w", cfg.CannonL2, err)
......@@ -65,16 +70,17 @@ func NewTraceProvider(ctx context.Context, logger log.Logger, m CannonMetricer,
if err != nil {
return nil, fmt.Errorf("fetch local game inputs: %w", err)
}
return NewTraceProviderFromInputs(logger, m, cfg, localInputs, dir, gameDepth), nil
return NewTraceProviderFromInputs(logger, m, cfg, localContext, localInputs, dir, gameDepth), nil
}
func NewTraceProviderFromInputs(logger log.Logger, m CannonMetricer, cfg *config.Config, localInputs LocalGameInputs, dir string, gameDepth uint64) *CannonTraceProvider {
func NewTraceProviderFromInputs(logger log.Logger, m CannonMetricer, cfg *config.Config, localContext uint64, localInputs LocalGameInputs, dir string, gameDepth uint64) *CannonTraceProvider {
return &CannonTraceProvider{
logger: logger,
dir: dir,
prestate: cfg.CannonAbsolutePreState,
generator: NewExecutor(logger, m, cfg, localInputs),
gameDepth: gameDepth,
logger: logger,
dir: dir,
prestate: cfg.CannonAbsolutePreState,
generator: NewExecutor(logger, m, cfg, localInputs),
gameDepth: gameDepth,
localContext: localContext,
}
}
......@@ -118,8 +124,7 @@ func (p *CannonTraceProvider) GetStepData(ctx context.Context, pos types.Positio
}
var oracleData *types.PreimageOracleData
if len(proof.OracleKey) > 0 {
// TODO(client-pod#104): Replace the LocalContext `0` argument below with the correct local context.
oracleData = types.NewPreimageOracleData(0, proof.OracleKey, proof.OracleValue, proof.OracleOffset)
oracleData = types.NewPreimageOracleData(p.localContext, proof.OracleKey, proof.OracleValue, proof.OracleOffset)
}
return value, data, oracleData, nil
}
......
......@@ -14,14 +14,15 @@ import (
func TestDencunL1Fork(gt *testing.T) {
t := NewDefaultTesting(gt)
dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams)
offset := uint64(24)
dp.DeployConfig.L1CancunTimeOffset = &offset
sd := e2eutils.Setup(t, dp, defaultAlloc)
activation := sd.L1Cfg.Timestamp + 24
sd.L1Cfg.Config.CancunTime = &activation
log := testlog.Logger(t, log.LvlDebug)
_, _, miner, sequencer, _, verifier, _, batcher := setupReorgTestActors(t, dp, sd, log)
l1Head := miner.l1Chain.CurrentBlock()
require.False(t, sd.L1Cfg.Config.IsCancun(l1Head.Number, l1Head.Time), "Cancun not active yet")
require.Nil(t, l1Head.ExcessBlobGas, "Cancun blob gas not in header")
// start op-nodes
sequencer.ActL2PipelineFull(t)
......@@ -35,6 +36,7 @@ func TestDencunL1Fork(gt *testing.T) {
// verify Cancun is active
l1Head = miner.l1Chain.CurrentBlock()
require.True(t, sd.L1Cfg.Config.IsCancun(l1Head.Number, l1Head.Time), "Cancun active")
require.NotNil(t, l1Head.ExcessBlobGas, "Cancun blob gas in header")
// build L2 chain up to and including L2 blocks referencing Cancun L1 blocks
sequencer.ActL1HeadSignal(t)
......@@ -51,3 +53,47 @@ func TestDencunL1Fork(gt *testing.T) {
require.Equal(t, l1Head.Hash(), verifier.SyncStatus().SafeL2.L1Origin.Hash, "verifier synced L1 chain that includes Cancun headers")
require.Equal(t, sequencer.SyncStatus().UnsafeL2, verifier.SyncStatus().UnsafeL2, "verifier and sequencer agree")
}
func TestDencunL1ForkAtGenesis(gt *testing.T) {
t := NewDefaultTesting(gt)
dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams)
offset := uint64(0)
dp.DeployConfig.L1CancunTimeOffset = &offset
sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlDebug)
_, _, miner, sequencer, _, verifier, _, batcher := setupReorgTestActors(t, dp, sd, log)
l1Head := miner.l1Chain.CurrentBlock()
require.True(t, sd.L1Cfg.Config.IsCancun(l1Head.Number, l1Head.Time), "Cancun active at genesis")
require.NotNil(t, l1Head.ExcessBlobGas, "Cancun blob gas in header")
// start op-nodes
sequencer.ActL2PipelineFull(t)
verifier.ActL2PipelineFull(t)
// build empty L1 blocks
miner.ActL1SetFeeRecipient(common.Address{'A', 0})
miner.ActEmptyBlock(t)
miner.ActEmptyBlock(t)
// verify Cancun is still active
l1Head = miner.l1Chain.CurrentBlock()
require.True(t, sd.L1Cfg.Config.IsCancun(l1Head.Number, l1Head.Time), "Cancun active")
require.NotNil(t, l1Head.ExcessBlobGas, "Cancun blob gas in header")
// build L2 chain
sequencer.ActL1HeadSignal(t)
sequencer.ActBuildToL1Head(t)
miner.ActL1StartBlock(12)(t)
batcher.ActSubmitAll(t)
miner.ActL1IncludeTx(batcher.batcherAddr)(t)
miner.ActL1EndBlock(t)
// sync verifier
verifier.ActL1HeadSignal(t)
verifier.ActL2PipelineFull(t)
// verify verifier accepted Cancun L1 inputs
require.Equal(t, l1Head.Hash(), verifier.SyncStatus().SafeL2.L1Origin.Hash, "verifier synced L1 chain that includes Cancun headers")
require.Equal(t, sequencer.SyncStatus().UnsafeL2, verifier.SyncStatus().UnsafeL2, "verifier and sequencer agree")
}
......@@ -4,6 +4,7 @@ import (
"errors"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/txpool/blobpool"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/eth/ethconfig"
......@@ -51,6 +52,11 @@ func NewL1Replica(t Testing, log log.Logger, genesis *core.Genesis) *L1Replica {
NetworkId: genesis.Config.ChainID.Uint64(),
Genesis: genesis,
RollupDisableTxPoolGossip: true,
BlobPool: blobpool.Config{
Datadir: t.TempDir(),
Datacap: blobpool.DefaultConfig.Datacap,
PriceBump: blobpool.DefaultConfig.PriceBump,
},
}
nodeCfg := &node.Config{
Name: "l1-geth",
......
......@@ -46,7 +46,7 @@ func (g *CannonGameHelper) CreateHonestActor(ctx context.Context, rollupCfg *rol
maxDepth := g.MaxDepth(ctx)
gameContract, err := contracts.NewFaultDisputeGameContract(g.addr, batching.NewMultiCaller(l1Client.Client(), batching.DefaultBatchSize))
g.require.NoError(err, "Create game contract bindings")
provider, err := cannon.NewTraceProvider(ctx, logger, metrics.NoopMetrics, cfg, gameContract, filepath.Join(cfg.Datadir, "honest"), uint64(maxDepth))
provider, err := cannon.NewTraceProvider(ctx, logger, metrics.NoopMetrics, cfg, gameContract, cannon.NoLocalContext, filepath.Join(cfg.Datadir, "honest"), uint64(maxDepth))
g.require.NoError(err, "create cannon trace provider")
return &HonestHelper{
......
......@@ -225,6 +225,7 @@ func (h *FactoryHelper) StartCannonGameWithCorrectRoot(ctx context.Context, roll
testlog.Logger(h.t, log.LvlInfo).New("role", "CorrectTrace"),
metrics.NoopMetrics,
cfg,
cannon.NoLocalContext,
inputs,
cfg.Datadir,
maxDepth.Uint64(),
......
package fakebeacon
import (
"encoding/binary"
"encoding/json"
"errors"
"fmt"
"io/fs"
"net"
"net/http"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
"time"
"github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/beacon/engine"
"github.com/ethereum/go-ethereum/log"
)
// FakeBeacon presents a beacon-node in testing, without leading any chain-building.
// This merely serves a fake beacon API, and holds on to blocks,
// to complement the actual block-building to happen in testing (e.g. through the fake consensus geth module).
type FakeBeacon struct {
log log.Logger
// directory to store blob contents in after the blobs are persisted in a block
blobsDir string
blobsLock sync.Mutex
beaconSrv *http.Server
beaconAPIListener net.Listener
genesisTime uint64
blockTime uint64
}
func NewBeacon(log log.Logger, blobsDir string, genesisTime uint64, blockTime uint64) *FakeBeacon {
return &FakeBeacon{
log: log,
blobsDir: blobsDir,
genesisTime: genesisTime,
blockTime: blockTime,
}
}
func (f *FakeBeacon) Start(addr string) error {
listener, err := net.Listen("tcp", addr)
if err != nil {
return fmt.Errorf("failed to open tcp listener for http beacon api server: %w", err)
}
f.beaconAPIListener = listener
mux := new(http.ServeMux)
mux.HandleFunc("/eth/v1/beacon/genesis", func(w http.ResponseWriter, r *http.Request) {
err := json.NewEncoder(w).Encode(&eth.APIGenesisResponse{Data: eth.ReducedGenesisData{GenesisTime: eth.Uint64String(f.genesisTime)}})
if err != nil {
f.log.Error("genesis handler err", "err", err)
}
})
mux.HandleFunc("/eth/v1/config/spec", func(w http.ResponseWriter, r *http.Request) {
err := json.NewEncoder(w).Encode(&eth.APIConfigResponse{Data: eth.ReducedConfigData{SecondsPerSlot: eth.Uint64String(f.blockTime)}})
if err != nil {
f.log.Error("config handler err", "err", err)
}
})
mux.HandleFunc("/eth/v1/beacon/blob_sidecars/", func(w http.ResponseWriter, r *http.Request) {
blockID := strings.TrimPrefix(r.URL.Path, "/eth/v1/beacon/blob_sidecars/")
slot, err := strconv.ParseUint(blockID, 10, 64)
if err != nil {
f.log.Error("could not parse block id from request", "url", r.URL.Path)
w.WriteHeader(http.StatusBadRequest)
return
}
bundle, err := f.LoadBlobsBundle(slot)
if err != nil {
f.log.Error("failed to load blobs bundle", "slot", slot)
w.WriteHeader(http.StatusInternalServerError)
return
}
query := r.URL.Query()
rawIndices := query["indices"]
indices := make([]int, 0, len(bundle.Blobs))
if len(rawIndices) == 0 {
// request is for all blobs
for i := range bundle.Blobs {
indices = append(indices, i)
}
} else {
for _, raw := range rawIndices {
ix, err := strconv.ParseUint(raw, 10, 64)
if err != nil {
f.log.Error("could not parse index from request", "url", r.URL)
w.WriteHeader(http.StatusBadRequest)
return
}
indices = append(indices, int(ix))
}
}
var mockBeaconBlockRoot [32]byte
mockBeaconBlockRoot[0] = 42
binary.LittleEndian.PutUint64(mockBeaconBlockRoot[32-8:], slot)
sidecars := make([]*eth.BlobSidecar, len(indices))
for i, ix := range indices {
if ix < 0 || ix >= len(bundle.Blobs) {
f.log.Error("blob index from request is out of range", "url", r.URL)
w.WriteHeader(http.StatusBadRequest)
return
}
sidecars[i] = &eth.BlobSidecar{
BlockRoot: mockBeaconBlockRoot,
Slot: eth.Uint64String(slot),
Index: eth.Uint64String(i),
KZGCommitment: eth.Bytes48(bundle.Commitments[ix]),
KZGProof: eth.Bytes48(bundle.Proofs[ix]),
}
copy(sidecars[i].Blob[:], bundle.Blobs[ix])
}
if err := json.NewEncoder(w).Encode(&eth.APIGetBlobSidecarsResponse{Data: sidecars}); err != nil {
f.log.Error("blobs handler err", "err", err)
}
})
f.beaconSrv = &http.Server{
Handler: mux,
ReadTimeout: time.Second * 20,
ReadHeaderTimeout: time.Second * 20,
WriteTimeout: time.Second * 20,
IdleTimeout: time.Second * 20,
}
go func() {
if err := f.beaconSrv.Serve(f.beaconAPIListener); err != nil && !errors.Is(err, http.ErrServerClosed) {
f.log.Error("failed to start fake-pos beacon server for blobs testing", "err", err)
}
}()
return nil
}
func (f *FakeBeacon) StoreBlobsBundle(slot uint64, bundle *engine.BlobsBundleV1) error {
data, err := json.Marshal(bundle)
if err != nil {
return fmt.Errorf("failed to encode blobs bundle of slot %d: %w", slot, err)
}
f.blobsLock.Lock()
defer f.blobsLock.Unlock()
bundlePath := fmt.Sprintf("blobs_bundle_%d.json", slot)
if err := os.MkdirAll(f.blobsDir, 0755); err != nil {
return fmt.Errorf("failed to create dir for blob storage: %w", err)
}
err = os.WriteFile(filepath.Join(f.blobsDir, bundlePath), data, 0755)
if err != nil {
return fmt.Errorf("failed to write blobs bundle of slot %d: %w", slot, err)
}
return nil
}
func (f *FakeBeacon) LoadBlobsBundle(slot uint64) (*engine.BlobsBundleV1, error) {
f.blobsLock.Lock()
defer f.blobsLock.Unlock()
bundlePath := fmt.Sprintf("blobs_bundle_%d.json", slot)
data, err := os.ReadFile(filepath.Join(f.blobsDir, bundlePath))
if err != nil {
if errors.Is(err, fs.ErrNotExist) {
return nil, fmt.Errorf("no blobs bundle found for slot %d (%q): %w", slot, bundlePath, ethereum.NotFound)
} else {
return nil, fmt.Errorf("failed to read blobs bundle of slot %d (%q): %w", slot, bundlePath, err)
}
}
var out engine.BlobsBundleV1
if err := json.Unmarshal(data, &out); err != nil {
return nil, fmt.Errorf("failed to decode blobs bundle of slot %d (%q): %w", slot, bundlePath, err)
}
return &out, nil
}
func (f *FakeBeacon) Close() error {
var out error
if f.beaconSrv != nil {
out = errors.Join(out, f.beaconSrv.Close())
}
if f.beaconAPIListener != nil {
out = errors.Join(out, f.beaconAPIListener.Close())
}
return out
}
func (f *FakeBeacon) BeaconAddr() string {
return "http://" + f.beaconAPIListener.Addr().String()
}
package geth
import (
"encoding/binary"
"math/big"
"math/rand"
"time"
......@@ -8,15 +10,21 @@ import (
"github.com/ethereum/go-ethereum/beacon/engine"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/eth/catalyst"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum-optimism/optimism/op-service/clock"
opeth "github.com/ethereum-optimism/optimism/op-service/eth"
"github.com/ethereum-optimism/optimism/op-service/testutils"
)
type Beacon interface {
StoreBlobsBundle(slot uint64, bundle *engine.BlobsBundleV1) error
}
// fakePoS is a testing-only utility to attach to Geth,
// to build a fake proof-of-stake L1 chain with fixed block time and basic lagging safe/finalized blocks.
type fakePoS struct {
......@@ -32,6 +40,14 @@ type fakePoS struct {
engineAPI *catalyst.ConsensusAPI
sub ethereum.Subscription
beacon Beacon
}
func (f *fakePoS) FakeBeaconBlockRoot(time uint64) common.Hash {
var dat [8]byte
binary.LittleEndian.PutUint64(dat[:], time)
return crypto.Keccak256Hash(dat[:])
}
func (f *fakePoS) Start() error {
......@@ -81,16 +97,29 @@ func (f *fakePoS) Start() error {
Amount: uint64(withdrawalsRNG.Intn(50_000_000_000) + 1),
}
}
res, err := f.engineAPI.ForkchoiceUpdatedV2(engine.ForkchoiceStateV1{
HeadBlockHash: head.Hash(),
SafeBlockHash: safe.Hash(),
FinalizedBlockHash: finalized.Hash(),
}, &engine.PayloadAttributes{
attrs := &engine.PayloadAttributes{
Timestamp: newBlockTime,
Random: common.Hash{},
SuggestedFeeRecipient: head.Coinbase,
Withdrawals: withdrawals,
})
}
parentBeaconBlockRoot := f.FakeBeaconBlockRoot(head.Time) // parent beacon block root
isCancun := f.eth.BlockChain().Config().IsCancun(new(big.Int).SetUint64(head.Number.Uint64()+1), newBlockTime)
if isCancun {
attrs.BeaconRoot = &parentBeaconBlockRoot
}
fcState := engine.ForkchoiceStateV1{
HeadBlockHash: head.Hash(),
SafeBlockHash: safe.Hash(),
FinalizedBlockHash: finalized.Hash(),
}
var err error
var res engine.ForkChoiceResponse
if isCancun {
res, err = f.engineAPI.ForkchoiceUpdatedV3(fcState, attrs)
} else {
res, err = f.engineAPI.ForkchoiceUpdatedV2(fcState, attrs)
}
if err != nil {
f.log.Error("failed to start building L1 block", "err", err)
continue
......@@ -114,10 +143,41 @@ func (f *fakePoS) Start() error {
f.log.Error("failed to finish building L1 block", "err", err)
continue
}
if _, err := f.engineAPI.NewPayloadV2(*envelope.ExecutionPayload); err != nil {
f.log.Error("failed to insert built L1 block", "err", err)
blobHashes := make([]common.Hash, 0) // must be non-nil even when empty, due to geth engine API checks
for _, commitment := range envelope.BlobsBundle.Commitments {
if len(commitment) != 48 {
f.log.Error("got malformed kzg commitment from engine", "commitment", commitment)
break
}
blobHashes = append(blobHashes, opeth.KZGToVersionedHash(*(*[48]byte)(commitment)))
}
if len(blobHashes) != len(envelope.BlobsBundle.Commitments) {
f.log.Error("invalid or incomplete blob data", "collected", len(blobHashes), "engine", len(envelope.BlobsBundle.Commitments))
continue
}
if isCancun {
if _, err := f.engineAPI.NewPayloadV3(*envelope.ExecutionPayload, blobHashes, &parentBeaconBlockRoot); err != nil {
f.log.Error("failed to insert built L1 block", "err", err)
continue
}
} else {
if _, err := f.engineAPI.NewPayloadV2(*envelope.ExecutionPayload); err != nil {
f.log.Error("failed to insert built L1 block", "err", err)
continue
}
}
if envelope.BlobsBundle != nil {
slot := (envelope.ExecutionPayload.Timestamp - f.eth.BlockChain().Genesis().Time()) / f.blockTime
if f.beacon == nil {
f.log.Error("no blobs storage available")
continue
}
if err := f.beacon.StoreBlobsBundle(slot, envelope.BlobsBundle); err != nil {
f.log.Error("failed to persist blobs-bundle of block, not making block canonical now", "err", err)
continue
}
}
if _, err := f.engineAPI.ForkchoiceUpdatedV2(engine.ForkchoiceStateV1{
HeadBlockHash: envelope.ExecutionPayload.BlockHash,
SafeBlockHash: safe.Hash(),
......
......@@ -8,6 +8,7 @@ import (
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/txpool/blobpool"
"github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/eth/catalyst"
"github.com/ethereum/go-ethereum/eth/ethconfig"
......@@ -21,10 +22,15 @@ import (
_ "github.com/ethereum/go-ethereum/eth/tracers/native"
)
func InitL1(chainID uint64, blockTime uint64, genesis *core.Genesis, c clock.Clock, opts ...GethOption) (*node.Node, *eth.Ethereum, error) {
func InitL1(chainID uint64, blockTime uint64, genesis *core.Genesis, c clock.Clock, blobPoolDir string, beaconSrv Beacon, opts ...GethOption) (*node.Node, *eth.Ethereum, error) {
ethConfig := &ethconfig.Config{
NetworkId: chainID,
Genesis: genesis,
BlobPool: blobpool.Config{
Datadir: blobPoolDir,
Datacap: blobpool.DefaultConfig.Datacap,
PriceBump: blobpool.DefaultConfig.PriceBump,
},
}
nodeConfig := &node.Config{
Name: "l1-geth",
......@@ -53,6 +59,7 @@ func InitL1(chainID uint64, blockTime uint64, genesis *core.Genesis, c clock.Clo
finalizedDistance: 8,
safeDistance: 4,
engineAPI: catalyst.NewConsensusAPI(l1Eth),
beacon: beaconSrv,
})
return l1Node, l1Eth, nil
......
......@@ -24,6 +24,8 @@ type ExternalRunner struct {
BinPath string
Genesis *core.Genesis
JWTPath string
// 4844: a datadir specifically for tx-pool blobs
BlobPoolPath string
}
type ExternalEthClient struct {
......
......@@ -42,6 +42,7 @@ import (
"github.com/ethereum-optimism/optimism/op-chain-ops/genesis"
"github.com/ethereum-optimism/optimism/op-e2e/config"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/fakebeacon"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/geth"
"github.com/ethereum-optimism/optimism/op-node/chaincfg"
"github.com/ethereum-optimism/optimism/op-node/metrics"
......@@ -110,6 +111,7 @@ func DefaultSystemConfig(t *testing.T) SystemConfig {
L1InfoPredeployAddress: predeploys.L1BlockAddr,
JWTFilePath: writeDefaultJWT(t),
JWTSecret: testingJWTSecret,
BlobsPath: t.TempDir(),
Nodes: map[string]*rollupNode.Config{
"sequencer": {
Driver: driver.Config{
......@@ -176,6 +178,8 @@ type SystemConfig struct {
JWTFilePath string
JWTSecret [32]byte
BlobsPath string
Premine map[common.Address]*big.Int
Nodes map[string]*rollupNode.Config // Per node config. Don't use populate rollup.Config
Loggers map[string]log.Logger
......@@ -260,6 +264,8 @@ type System struct {
BatchSubmitter *bss.BatcherService
Mocknet mocknet.Mocknet
L1BeaconAPIAddr string
// TimeTravelClock is nil unless SystemConfig.SupportL1TimeTravel was set to true
// It provides access to the clock instance used by the L1 node. Calling TimeTravelClock.AdvanceBy
// allows tests to quickly time travel L1 into the future.
......@@ -438,8 +444,19 @@ func (cfg SystemConfig) Start(t *testing.T, _opts ...SystemConfigOption) (*Syste
}
sys.RollupConfig = &defaultConfig
// Create a fake Beacon node to hold on to blobs created by the L1 miner, and to serve them to L2
bcn := fakebeacon.NewBeacon(testlog.Logger(t, log.LvlInfo).New("role", "l1_cl"),
path.Join(cfg.BlobsPath, "l1_cl"), l1Genesis.Timestamp, cfg.DeployConfig.L1BlockTime)
t.Cleanup(func() {
_ = bcn.Close()
})
require.NoError(t, bcn.Start("127.0.0.1:0"))
beaconApiAddr := bcn.BeaconAddr()
require.NotEmpty(t, beaconApiAddr, "beacon API listener must be up")
// Initialize nodes
l1Node, l1Backend, err := geth.InitL1(cfg.DeployConfig.L1ChainID, cfg.DeployConfig.L1BlockTime, l1Genesis, c, cfg.GethOptions["l1"]...)
l1Node, l1Backend, err := geth.InitL1(cfg.DeployConfig.L1ChainID, cfg.DeployConfig.L1BlockTime, l1Genesis, c,
path.Join(cfg.BlobsPath, "l1_el"), bcn, cfg.GethOptions["l1"]...)
if err != nil {
return nil, err
}
......
......@@ -155,6 +155,22 @@ func TestL2OutputSubmitter(t *testing.T) {
}
}
func TestSystemE2EDencunAtGenesis(t *testing.T) {
InitParallel(t)
cfg := DefaultSystemConfig(t)
genesisActivation := uint64(0)
cfg.DeployConfig.L1CancunTimeOffset = &genesisActivation
sys, err := cfg.Start(t)
require.Nil(t, err, "Error starting up system")
defer sys.Close()
runE2ESystemTest(t, sys)
head, err := sys.Clients["l1"].BlockByNumber(context.Background(), big.NewInt(0))
require.NoError(t, err)
require.NotNil(t, head.ExcessBlobGas(), "L1 is building dencun blocks since genesis")
}
// TestSystemE2E sets up a L1 Geth node, a rollup node, and a L2 geth node and then confirms that L1 deposits are reflected on L2.
// All nodes are run in process (but are the full nodes, not mocked or stubbed).
func TestSystemE2E(t *testing.T) {
......@@ -165,7 +181,9 @@ func TestSystemE2E(t *testing.T) {
sys, err := cfg.Start(t)
require.Nil(t, err, "Error starting up system")
defer sys.Close()
}
func runE2ESystemTest(t *testing.T, sys *System) {
log := testlog.Logger(t, log.LvlInfo)
log.Info("genesis", "l2", sys.RollupConfig.Genesis.L2, "l1", sys.RollupConfig.Genesis.L1, "l2_time", sys.RollupConfig.Genesis.L2Time)
......@@ -185,11 +203,11 @@ func TestSystemE2E(t *testing.T) {
require.Nil(t, err)
// Send deposit transaction
opts, err := bind.NewKeyedTransactorWithChainID(ethPrivKey, cfg.L1ChainIDBig())
opts, err := bind.NewKeyedTransactorWithChainID(ethPrivKey, sys.cfg.L1ChainIDBig())
require.Nil(t, err)
mintAmount := big.NewInt(1_000_000_000_000)
opts.Value = mintAmount
SendDepositTx(t, cfg, l1Client, l2Verif, opts, func(l2Opts *DepositTxOpts) {})
SendDepositTx(t, sys.cfg, l1Client, l2Verif, opts, func(l2Opts *DepositTxOpts) {})
// Confirm balance
ctx, cancel = context.WithTimeout(context.Background(), 15*time.Second)
......@@ -202,7 +220,7 @@ func TestSystemE2E(t *testing.T) {
require.Equal(t, mintAmount, diff, "Did not get expected balance change")
// Submit TX to L2 sequencer node
receipt := SendL2Tx(t, cfg, l2Seq, ethPrivKey, func(opts *TxOpts) {
receipt := SendL2Tx(t, sys.cfg, l2Seq, ethPrivKey, func(opts *TxOpts) {
opts.Value = big.NewInt(1_000_000_000)
opts.Nonce = 1 // Already have deposit
opts.ToAddr = &common.Address{0xff, 0xff}
......
package eth
import (
"crypto/sha256"
"fmt"
"reflect"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/crypto/kzg4844"
"github.com/ethereum/go-ethereum/params"
)
const (
BlobSize = 4096 * 32
MaxBlobDataSize = 4096*31 - 4
)
type Blob [BlobSize]byte
func (b *Blob) KZGBlob() *kzg4844.Blob {
return (*kzg4844.Blob)(b)
}
func (b *Blob) UnmarshalJSON(text []byte) error {
return hexutil.UnmarshalFixedJSON(reflect.TypeOf(b), text, b[:])
}
func (b *Blob) UnmarshalText(text []byte) error {
return hexutil.UnmarshalFixedText("Bytes32", text, b[:])
}
func (b *Blob) MarshalText() ([]byte, error) {
return hexutil.Bytes(b[:]).MarshalText()
}
func (b *Blob) String() string {
return hexutil.Encode(b[:])
}
// TerminalString implements log.TerminalStringer, formatting a string for console
// output during logging.
func (b *Blob) TerminalString() string {
return fmt.Sprintf("%x..%x", b[:3], b[BlobSize-3:])
}
func (b *Blob) ComputeKZGCommitment() (kzg4844.Commitment, error) {
return kzg4844.BlobToCommitment(*b.KZGBlob())
}
// KZGToVersionedHash computes the "blob hash" (a.k.a. versioned-hash) of a blob-commitment, as used in a blob-tx.
// We implement it here because it is unfortunately not (currently) exposed by geth.
func KZGToVersionedHash(commitment kzg4844.Commitment) (out common.Hash) {
// EIP-4844 spec:
// def kzg_to_versioned_hash(commitment: KZGCommitment) -> VersionedHash:
// return VERSIONED_HASH_VERSION_KZG + sha256(commitment)[1:]
h := sha256.New()
h.Write(commitment[:])
_ = h.Sum(out[:0])
out[0] = params.BlobTxHashVersion
return out
}
// VerifyBlobProof verifies that the given blob and proof corresponds to the given commitment,
// returning error if the verification fails.
func VerifyBlobProof(blob *Blob, commitment kzg4844.Commitment, proof kzg4844.Proof) error {
return kzg4844.VerifyBlobProof(*blob.KZGBlob(), commitment, proof)
}
package eth
type BlobSidecar struct {
BlockRoot Bytes32 `json:"block_root"`
Slot Uint64String `json:"slot"`
Blob Blob `json:"blob"`
Index Uint64String `json:"index"`
KZGCommitment Bytes48 `json:"kzg_commitment"`
KZGProof Bytes48 `json:"kzg_proof"`
}
type APIGetBlobSidecarsResponse struct {
Data []*BlobSidecar `json:"data"`
}
type ReducedGenesisData struct {
GenesisTime Uint64String `json:"genesis_time"`
}
type APIGenesisResponse struct {
Data ReducedGenesisData `json:"data"`
}
type ReducedConfigData struct {
SecondsPerSlot Uint64String `json:"SECONDS_PER_SLOT"`
}
type APIConfigResponse struct {
Data ReducedConfigData `json:"data"`
}
......@@ -94,3 +94,11 @@ func (id L2BlockRef) ParentID() BlockID {
Number: n,
}
}
// IndexedDataHash represents a data-hash that commits to a single blob confirmed in a block.
// The index helps us avoid unnecessary blob to data-hash conversions to find the right content in a sidecar.
type IndexedDataHash struct {
Index uint64 // absolute index in the block, a.k.a. position in sidecar blobs array
DataHash common.Hash // hash of the blob, used for consistency checks
// Might add tx index and/or tx hash here later, depending on blobs API design
}
......@@ -5,6 +5,7 @@ import (
"fmt"
"math/big"
"reflect"
"strconv"
"github.com/ethereum/go-ethereum/beacon/engine"
"github.com/ethereum/go-ethereum/common"
......@@ -317,3 +318,44 @@ type SystemConfig struct {
GasLimit uint64 `json:"gasLimit"`
// More fields can be added for future SystemConfig versions.
}
type Bytes48 [48]byte
func (b *Bytes48) UnmarshalJSON(text []byte) error {
return hexutil.UnmarshalFixedJSON(reflect.TypeOf(b), text, b[:])
}
func (b *Bytes48) UnmarshalText(text []byte) error {
return hexutil.UnmarshalFixedText("Bytes32", text, b[:])
}
func (b Bytes48) MarshalText() ([]byte, error) {
return hexutil.Bytes(b[:]).MarshalText()
}
func (b Bytes48) String() string {
return hexutil.Encode(b[:])
}
// TerminalString implements log.TerminalStringer, formatting a string for console
// output during logging.
func (b Bytes48) TerminalString() string {
return fmt.Sprintf("%x..%x", b[:3], b[45:])
}
// Uint64String is a decimal string representation of an uint64, for usage in the Beacon API JSON encoding
type Uint64String uint64
func (v Uint64String) MarshalText() (out []byte, err error) {
out = strconv.AppendUint(out, uint64(v), 10)
return
}
func (v *Uint64String) UnmarshalText(b []byte) error {
n, err := strconv.ParseUint(string(b), 0, 64)
if err != nil {
return err
}
*v = Uint64String(n)
return nil
}
......@@ -46,7 +46,7 @@
"express-prom-bundle": "^6.6.0",
"lodash": "^4.17.21",
"morgan": "^1.10.0",
"pino": "^8.16.1",
"pino": "^8.16.2",
"pino-multi-stream": "^6.0.0",
"pino-sentry": "^0.14.0",
"prom-client": "^14.2.0"
......
......@@ -225,8 +225,8 @@ importers:
specifier: ^1.10.0
version: 1.10.0
pino:
specifier: ^8.16.1
version: 8.16.1
specifier: ^8.16.2
version: 8.16.2
pino-multi-stream:
specifier: ^6.0.0
version: 6.0.0
......@@ -4074,7 +4074,7 @@ packages:
resolution: {integrity: sha512-wKoab31pknvILkxAF8ss+v9iNyhw5Iu/0jLtRkUD74cNfOOLJNnqfFKAv0r7wVaTQxRZtWrMpGfShwwBjOcgcg==}
deprecated: This is a stub types definition. pino provides its own type definitions, so you do not need this installed.
dependencies:
pino: 8.16.1
pino: 8.16.2
dev: true
/@types/prettier@2.3.2:
......@@ -11944,8 +11944,8 @@ packages:
sonic-boom: 2.8.0
thread-stream: 0.15.2
/pino@8.16.1:
resolution: {integrity: sha512-3bKsVhBmgPjGV9pyn4fO/8RtoVDR8ssW1ev819FsRXlRNgW8gR/9Kx+gCK4UPWd4JjrRDLWpzd/pb1AyWm3MGA==}
/pino@8.16.2:
resolution: {integrity: sha512-2advCDGVEvkKu9TTVSa/kWW7Z3htI/sBKEZpqiHk6ive0i/7f5b1rsU8jn0aimxqfnSz5bj/nOYkwhBUn5xxvg==}
hasBin: true
dependencies:
atomic-sleep: 1.0.0
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment