Commit 8f1c9303 authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge branch 'develop' into jg/op_node_logging

parents d62fe5de 9a85422f
---
'@eth-optimism/contracts-bedrock': patch
---
Optionally print cast commands during migration
---
'@eth-optimism/atst': minor
---
Update readAttestations and prepareWriteAttestation to handle keys longer than 32 bytes
---
'@eth-optimism/atst': minor
---
Remove broken allowFailures as option
---
'@eth-optimism/common-ts': patch
---
Fix BaseServiceV2 configuration for caseCase options
---
'@eth-optimism/atst': patch
---
Update docs
---
'@eth-optimism/atst': minor
---
Move react api to @eth-optimism/atst/react so react isn't required to run the core sdk
---
'@eth-optimism/sdk': patch
---
Update migrated withdrawal gaslimit calculation
---
'@eth-optimism/atst': minor
---
Fix main and module in atst package.json
---
'@eth-optimism/atst': patch
---
Fixed bug with atst not defaulting to currently connected chain
---
'@eth-optimism/atst': minor
---
Deprecate parseAttestationBytes and createRawKey in favor for createKey, createValue
---
'@eth-optimism/fault-detector': patch
---
Fixes a bug that would cause the fault detector to error out if no outputs had been proposed yet.
---
'@eth-optimism/contracts-bedrock': patch
---
Print tenderly simulation links during deployment
...@@ -19,13 +19,13 @@ aside.sidebar { ...@@ -19,13 +19,13 @@ aside.sidebar {
p.sidebar-heading { p.sidebar-heading {
color: #323A43 !important; color: #323A43 !important;
font-family: 'Open Sans', sans-serif; font-family: 'Open Sans', sans-serif;
font-weight: 600; font-weight: 600 !important;
font-size: 14px !important; font-size: 14px !important;
line-height: 24px !important; line-height: 24px !important;
min-height: 36px; min-height: 36px;
margin-left: 32px; margin-left: 20px;
padding: 8px 16px !important; padding: 8px 16px !important;
width: calc(100% - 64px) !important; width: calc(100% - 60px) !important;
border-radius: 8px; border-radius: 8px;
} }
...@@ -34,18 +34,20 @@ a.sidebar-link { ...@@ -34,18 +34,20 @@ a.sidebar-link {
font-size: 14px !important; font-size: 14px !important;
line-height: 24px !important; line-height: 24px !important;
min-height: 36px; min-height: 36px;
margin-left: 32px; margin-top: 3px;
margin-left: 20px;
padding: 8px 16px !important; padding: 8px 16px !important;
width: calc(100% - 64px) !important; width: calc(100% - 60px) !important;
border-radius: 8px; border-radius: 8px;
} }
section.sidebar-group a.sidebar-link { section.sidebar-group a.sidebar-link,
margin-left: 44px; section.sidebar-group p.sidebar-heading.clickable {
width: calc(100% - 64px) !important; margin-left: 32px;
width: calc(100% - 60px) !important;
} }
.sidebar-links:not(.sidebar-group-items) > li > a.sidebar-link { .sidebar-links:not(.sidebar-group-items) > li > a.sidebar-link {
font-weight: 600 !important; font-weight: 600 !important;
color: #323A43 !important; color: #323A43 !important;
} }
......
...@@ -30,10 +30,10 @@ ...@@ -30,10 +30,10 @@
"devDependencies": { "devDependencies": {
"@babel/eslint-parser": "^7.5.4", "@babel/eslint-parser": "^7.5.4",
"@eth-optimism/contracts": "^0.5.40", "@eth-optimism/contracts": "^0.5.40",
"@eth-optimism/contracts-bedrock": "0.13.0", "@eth-optimism/contracts-bedrock": "0.13.1",
"@eth-optimism/contracts-periphery": "^1.0.7", "@eth-optimism/contracts-periphery": "^1.0.7",
"@eth-optimism/core-utils": "0.12.0", "@eth-optimism/core-utils": "0.12.0",
"@eth-optimism/sdk": "2.0.0", "@eth-optimism/sdk": "2.0.1",
"@ethersproject/abstract-provider": "^5.7.0", "@ethersproject/abstract-provider": "^5.7.0",
"@ethersproject/providers": "^5.7.0", "@ethersproject/providers": "^5.7.0",
"@ethersproject/transactions": "^5.7.0", "@ethersproject/transactions": "^5.7.0",
......
...@@ -19,8 +19,18 @@ test: ...@@ -19,8 +19,18 @@ test:
lint: lint:
golangci-lint run -E goimports,sqlclosecheck,bodyclose,asciicheck,misspell,errorlint -e "errors.As" -e "errors.Is" golangci-lint run -E goimports,sqlclosecheck,bodyclose,asciicheck,misspell,errorlint -e "errors.As" -e "errors.Is"
fuzz:
go test -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzDurationZero ./batcher
go test -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzDurationTimeoutMaxChannelDuration ./batcher
go test -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzDurationTimeoutZeroMaxChannelDuration ./batcher
go test -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzChannelCloseTimeout ./batcher
go test -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzChannelZeroCloseTimeout ./batcher
go test -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzSeqWindowClose ./batcher
go test -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzSeqWindowZeroTimeoutClose ./batcher
.PHONY: \ .PHONY: \
op-batcher \ op-batcher \
clean \ clean \
test \ test \
lint lint \
fuzz
...@@ -129,6 +129,8 @@ type channelBuilder struct { ...@@ -129,6 +129,8 @@ type channelBuilder struct {
frames []frameData frames []frameData
} }
// newChannelBuilder creates a new channel builder or returns an error if the
// channel out could not be created.
func newChannelBuilder(cfg ChannelConfig) (*channelBuilder, error) { func newChannelBuilder(cfg ChannelConfig) (*channelBuilder, error) {
co, err := derive.NewChannelOut() co, err := derive.NewChannelOut()
if err != nil { if err != nil {
...@@ -255,7 +257,7 @@ func (c *channelBuilder) updateTimeout(timeoutBlockNum uint64, reason error) { ...@@ -255,7 +257,7 @@ func (c *channelBuilder) updateTimeout(timeoutBlockNum uint64, reason error) {
} }
// checkTimeout checks if the channel is timed out at the given block number and // checkTimeout checks if the channel is timed out at the given block number and
// in this case marks the channel as full, if it wasn't full alredy. // in this case marks the channel as full, if it wasn't full already.
func (c *channelBuilder) checkTimeout(blockNum uint64) { func (c *channelBuilder) checkTimeout(blockNum uint64) {
if !c.IsFull() && c.TimedOut(blockNum) { if !c.IsFull() && c.TimedOut(blockNum) {
c.setFullErr(c.timeoutReason) c.setFullErr(c.timeoutReason)
......
package batcher package batcher
import ( import (
"bytes"
"crypto/rand"
"math"
"math/big"
"testing" "testing"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/trie"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
// defaultChannelConfig returns a valid, default [ChannelConfig] struct. var defaultTestChannelConfig = ChannelConfig{
func defaultChannelConfig() ChannelConfig { SeqWindowSize: 15,
return ChannelConfig{ ChannelTimeout: 40,
SeqWindowSize: 15, MaxChannelDuration: 1,
ChannelTimeout: 40, SubSafetyMargin: 4,
MaxChannelDuration: 1, MaxFrameSize: 120000,
SubSafetyMargin: 4, TargetFrameSize: 100000,
MaxFrameSize: 120000, TargetNumFrames: 1,
TargetFrameSize: 100000, ApproxComprRatio: 0.4,
TargetNumFrames: 1,
ApproxComprRatio: 0.4,
}
} }
// TestConfigValidation tests the validation of the [ChannelConfig] struct. // TestConfigValidation tests the validation of the [ChannelConfig] struct.
func TestConfigValidation(t *testing.T) { func TestConfigValidation(t *testing.T) {
// Construct a valid config. // Construct a valid config.
validChannelConfig := defaultChannelConfig() validChannelConfig := defaultTestChannelConfig
require.NoError(t, validChannelConfig.Check()) require.NoError(t, validChannelConfig.Check())
// Set the config to have a zero max frame size. // Set the config to have a zero max frame size.
...@@ -33,8 +42,579 @@ func TestConfigValidation(t *testing.T) { ...@@ -33,8 +42,579 @@ func TestConfigValidation(t *testing.T) {
// Reset the config and test the Timeout error. // Reset the config and test the Timeout error.
// NOTE: We should be fuzzing these values with the constraint that // NOTE: We should be fuzzing these values with the constraint that
// SubSafetyMargin > ChannelTimeout to ensure validation. // SubSafetyMargin > ChannelTimeout to ensure validation.
validChannelConfig = defaultChannelConfig() validChannelConfig = defaultTestChannelConfig
validChannelConfig.ChannelTimeout = 0 validChannelConfig.ChannelTimeout = 0
validChannelConfig.SubSafetyMargin = 1 validChannelConfig.SubSafetyMargin = 1
require.ErrorIs(t, validChannelConfig.Check(), ErrInvalidChannelTimeout) require.ErrorIs(t, validChannelConfig.Check(), ErrInvalidChannelTimeout)
} }
// addNonsenseBlock is a helper function that adds a nonsense block
// to the channel builder using the [channelBuilder.AddBlock] method.
func addNonsenseBlock(cb *channelBuilder) error {
lBlock := types.NewBlock(&types.Header{
BaseFee: big.NewInt(10),
Difficulty: common.Big0,
Number: big.NewInt(100),
}, nil, nil, nil, trie.NewStackTrie(nil))
l1InfoTx, err := derive.L1InfoDeposit(0, lBlock, eth.SystemConfig{}, false)
if err != nil {
return err
}
txs := []*types.Transaction{types.NewTx(l1InfoTx)}
a := types.NewBlock(&types.Header{
Number: big.NewInt(0),
}, txs, nil, nil, trie.NewStackTrie(nil))
err = cb.AddBlock(a)
return err
}
// buildTooLargeRlpEncodedBlockBatch is a helper function that builds a batch
// of blocks that are too large to be added to a channel.
func buildTooLargeRlpEncodedBlockBatch(cb *channelBuilder) error {
// Construct a block with way too many txs
lBlock := types.NewBlock(&types.Header{
BaseFee: big.NewInt(10),
Difficulty: common.Big0,
Number: big.NewInt(100),
}, nil, nil, nil, trie.NewStackTrie(nil))
l1InfoTx, _ := derive.L1InfoDeposit(0, lBlock, eth.SystemConfig{}, false)
txs := []*types.Transaction{types.NewTx(l1InfoTx)}
for i := 0; i < 500_000; i++ {
txData := make([]byte, 32)
_, _ = rand.Read(txData)
tx := types.NewTransaction(0, common.Address{}, big.NewInt(0), 0, big.NewInt(0), txData)
txs = append(txs, tx)
}
block := types.NewBlock(&types.Header{
Number: big.NewInt(0),
}, txs, nil, nil, trie.NewStackTrie(nil))
// Try to add the block to the channel builder
// This should fail since the block is too large
// When a batch is constructed from the block and
// then rlp encoded in the channel out, the size
// will exceed [derive.MaxRLPBytesPerChannel]
err := cb.AddBlock(block)
return err
}
// FuzzDurationTimeoutZeroMaxChannelDuration ensures that when whenever the MaxChannelDuration
// is set to 0, the channel builder cannot have a duration timeout.
func FuzzDurationTimeoutZeroMaxChannelDuration(f *testing.F) {
for i := range [10]int{} {
f.Add(uint64(i))
}
f.Fuzz(func(t *testing.T, l1BlockNum uint64) {
channelConfig := defaultTestChannelConfig
channelConfig.MaxChannelDuration = 0
cb, err := newChannelBuilder(channelConfig)
require.NoError(t, err)
cb.timeout = 0
cb.updateDurationTimeout(l1BlockNum)
require.False(t, cb.TimedOut(l1BlockNum))
})
}
// FuzzDurationZero ensures that when whenever the MaxChannelDuration
// is not set to 0, the channel builder will always have a duration timeout
// as long as the channel builder's timeout is set to 0.
func FuzzDurationZero(f *testing.F) {
for i := range [10]int{} {
f.Add(uint64(i), uint64(i))
}
f.Fuzz(func(t *testing.T, l1BlockNum uint64, maxChannelDuration uint64) {
if maxChannelDuration == 0 {
t.Skip("Max channel duration cannot be 0")
}
// Create the channel builder
channelConfig := defaultTestChannelConfig
channelConfig.MaxChannelDuration = maxChannelDuration
cb, err := newChannelBuilder(channelConfig)
require.NoError(t, err)
// Whenever the timeout is set to 0, the channel builder should have a duration timeout
cb.timeout = 0
cb.updateDurationTimeout(l1BlockNum)
cb.checkTimeout(l1BlockNum + maxChannelDuration)
require.ErrorIs(t, cb.FullErr(), ErrMaxDurationReached)
})
}
// FuzzDurationTimeoutMaxChannelDuration ensures that when whenever the MaxChannelDuration
// is not set to 0, the channel builder will always have a duration timeout
// as long as the channel builder's timeout is greater than the target block number.
func FuzzDurationTimeoutMaxChannelDuration(f *testing.F) {
// Set multiple seeds in case fuzzing isn't explicitly used
for i := range [10]int{} {
f.Add(uint64(i), uint64(i), uint64(i))
}
f.Fuzz(func(t *testing.T, l1BlockNum uint64, maxChannelDuration uint64, timeout uint64) {
if maxChannelDuration == 0 {
t.Skip("Max channel duration cannot be 0")
}
// Create the channel builder
channelConfig := defaultTestChannelConfig
channelConfig.MaxChannelDuration = maxChannelDuration
cb, err := newChannelBuilder(channelConfig)
require.NoError(t, err)
// Whenever the timeout is greater than the l1BlockNum,
// the channel builder should have a duration timeout
cb.timeout = timeout
cb.updateDurationTimeout(l1BlockNum)
if timeout > l1BlockNum+maxChannelDuration {
// Notice: we cannot call this outside of the if statement
// because it would put the channel builder in an invalid state.
// That is, where the channel builder has a value set for the timeout
// with no timeoutReason. This subsequently causes a panic when
// a nil timeoutReason is used as an error (eg when calling FullErr).
cb.checkTimeout(l1BlockNum + maxChannelDuration)
require.ErrorIs(t, cb.FullErr(), ErrMaxDurationReached)
} else {
require.NoError(t, cb.FullErr())
}
})
}
// FuzzChannelCloseTimeout ensures that the channel builder has a [ErrChannelTimeoutClose]
// as long as the timeout constraint is met and the builder's timeout is greater than
// the calculated timeout
func FuzzChannelCloseTimeout(f *testing.F) {
// Set multiple seeds in case fuzzing isn't explicitly used
for i := range [10]int{} {
f.Add(uint64(i), uint64(i), uint64(i), uint64(i*5))
}
f.Fuzz(func(t *testing.T, l1BlockNum uint64, channelTimeout uint64, subSafetyMargin uint64, timeout uint64) {
// Create the channel builder
channelConfig := defaultTestChannelConfig
channelConfig.ChannelTimeout = channelTimeout
channelConfig.SubSafetyMargin = subSafetyMargin
cb, err := newChannelBuilder(channelConfig)
require.NoError(t, err)
// Check the timeout
cb.timeout = timeout
cb.FramePublished(l1BlockNum)
calculatedTimeout := l1BlockNum + channelTimeout - subSafetyMargin
if timeout > calculatedTimeout && calculatedTimeout != 0 {
cb.checkTimeout(calculatedTimeout)
require.ErrorIs(t, cb.FullErr(), ErrChannelTimeoutClose)
} else {
require.NoError(t, cb.FullErr())
}
})
}
// FuzzChannelZeroCloseTimeout ensures that the channel builder has a [ErrChannelTimeoutClose]
// as long as the timeout constraint is met and the builder's timeout is set to zero.
func FuzzChannelZeroCloseTimeout(f *testing.F) {
// Set multiple seeds in case fuzzing isn't explicitly used
for i := range [10]int{} {
f.Add(uint64(i), uint64(i), uint64(i))
}
f.Fuzz(func(t *testing.T, l1BlockNum uint64, channelTimeout uint64, subSafetyMargin uint64) {
// Create the channel builder
channelConfig := defaultTestChannelConfig
channelConfig.ChannelTimeout = channelTimeout
channelConfig.SubSafetyMargin = subSafetyMargin
cb, err := newChannelBuilder(channelConfig)
require.NoError(t, err)
// Check the timeout
cb.timeout = 0
cb.FramePublished(l1BlockNum)
calculatedTimeout := l1BlockNum + channelTimeout - subSafetyMargin
cb.checkTimeout(calculatedTimeout)
if cb.timeout != 0 {
require.ErrorIs(t, cb.FullErr(), ErrChannelTimeoutClose)
}
})
}
// FuzzSeqWindowClose ensures that the channel builder has a [ErrSeqWindowClose]
// as long as the timeout constraint is met and the builder's timeout is greater than
// the calculated timeout
func FuzzSeqWindowClose(f *testing.F) {
// Set multiple seeds in case fuzzing isn't explicitly used
for i := range [10]int{} {
f.Add(uint64(i), uint64(i), uint64(i), uint64(i*5))
}
f.Fuzz(func(t *testing.T, epochNum uint64, seqWindowSize uint64, subSafetyMargin uint64, timeout uint64) {
// Create the channel builder
channelConfig := defaultTestChannelConfig
channelConfig.SeqWindowSize = seqWindowSize
channelConfig.SubSafetyMargin = subSafetyMargin
cb, err := newChannelBuilder(channelConfig)
require.NoError(t, err)
// Check the timeout
cb.timeout = timeout
cb.updateSwTimeout(&derive.BatchData{
BatchV1: derive.BatchV1{
EpochNum: rollup.Epoch(epochNum),
},
})
calculatedTimeout := epochNum + seqWindowSize - subSafetyMargin
if timeout > calculatedTimeout && calculatedTimeout != 0 {
cb.checkTimeout(calculatedTimeout)
require.ErrorIs(t, cb.FullErr(), ErrSeqWindowClose)
} else {
require.NoError(t, cb.FullErr())
}
})
}
// FuzzSeqWindowZeroTimeoutClose ensures that the channel builder has a [ErrSeqWindowClose]
// as long as the timeout constraint is met and the builder's timeout is set to zero.
func FuzzSeqWindowZeroTimeoutClose(f *testing.F) {
// Set multiple seeds in case fuzzing isn't explicitly used
for i := range [10]int{} {
f.Add(uint64(i), uint64(i), uint64(i))
}
f.Fuzz(func(t *testing.T, epochNum uint64, seqWindowSize uint64, subSafetyMargin uint64) {
// Create the channel builder
channelConfig := defaultTestChannelConfig
channelConfig.SeqWindowSize = seqWindowSize
channelConfig.SubSafetyMargin = subSafetyMargin
cb, err := newChannelBuilder(channelConfig)
require.NoError(t, err)
// Check the timeout
cb.timeout = 0
cb.updateSwTimeout(&derive.BatchData{
BatchV1: derive.BatchV1{
EpochNum: rollup.Epoch(epochNum),
},
})
calculatedTimeout := epochNum + seqWindowSize - subSafetyMargin
cb.checkTimeout(calculatedTimeout)
if cb.timeout != 0 {
require.ErrorIs(t, cb.FullErr(), ErrSeqWindowClose, "Sequence window close should be reached")
}
})
}
// TestBuilderNextFrame tests calling NextFrame on a ChannelBuilder with only one frame
func TestBuilderNextFrame(t *testing.T) {
channelConfig := defaultTestChannelConfig
// Create a new channel builder
cb, err := newChannelBuilder(channelConfig)
require.NoError(t, err)
// Mock the internals of `channelBuilder.outputFrame`
// to construct a single frame
co := cb.co
var buf bytes.Buffer
fn, err := co.OutputFrame(&buf, channelConfig.MaxFrameSize)
require.NoError(t, err)
// Push one frame into to the channel builder
expectedTx := txID{chID: co.ID(), frameNumber: fn}
expectedBytes := buf.Bytes()
frameData := frameData{
id: frameID{
chID: co.ID(),
frameNumber: fn,
},
data: expectedBytes,
}
cb.PushFrame(frameData)
// There should only be 1 frame in the channel builder
require.Equal(t, 1, cb.NumFrames())
// We should be able to increment to the next frame
constructedFrame := cb.NextFrame()
require.Equal(t, expectedTx, constructedFrame.id)
require.Equal(t, expectedBytes, constructedFrame.data)
require.Equal(t, 0, cb.NumFrames())
// The next call should panic since the length of frames is 0
require.PanicsWithValue(t, "no next frame", func() { cb.NextFrame() })
}
// TestBuilderInvalidFrameId tests that a panic is thrown when a frame is pushed with an invalid frame id
func TestBuilderWrongFramePanic(t *testing.T) {
channelConfig := defaultTestChannelConfig
// Construct a channel builder
cb, err := newChannelBuilder(channelConfig)
require.NoError(t, err)
// Mock the internals of `channelBuilder.outputFrame`
// to construct a single frame
co, err := derive.NewChannelOut()
require.NoError(t, err)
var buf bytes.Buffer
fn, err := co.OutputFrame(&buf, channelConfig.MaxFrameSize)
require.NoError(t, err)
// The frame push should panic since we constructed a new channel out
// so the channel out id won't match
require.PanicsWithValue(t, "wrong channel", func() {
frame := frameData{
id: frameID{
chID: co.ID(),
frameNumber: fn,
},
data: buf.Bytes(),
}
cb.PushFrame(frame)
})
}
// TestOutputFrames tests the OutputFrames function
func TestOutputFrames(t *testing.T) {
channelConfig := defaultTestChannelConfig
channelConfig.MaxFrameSize = 2
// Construct the channel builder
cb, err := newChannelBuilder(channelConfig)
require.NoError(t, err)
require.False(t, cb.IsFull())
require.Equal(t, 0, cb.NumFrames())
// Calling OutputFrames without having called [AddBlock]
// should return no error
require.NoError(t, cb.OutputFrames())
// There should be no ready bytes yet
readyBytes := cb.co.ReadyBytes()
require.Equal(t, 0, readyBytes)
// Let's add a block
err = addNonsenseBlock(cb)
require.NoError(t, err)
// Check how many ready bytes
readyBytes = cb.co.ReadyBytes()
require.Equal(t, 2, readyBytes)
require.Equal(t, 0, cb.NumFrames())
// The channel should not be full
// but we want to output the frames for testing anyways
isFull := cb.IsFull()
require.False(t, isFull)
// Since we manually set the max frame size to 2,
// we should be able to compress the two frames now
err = cb.OutputFrames()
require.NoError(t, err)
// There should be one frame in the channel builder now
require.Equal(t, 1, cb.NumFrames())
// There should no longer be any ready bytes
readyBytes = cb.co.ReadyBytes()
require.Equal(t, 0, readyBytes)
}
// TestMaxRLPBytesPerChannel tests the [channelBuilder.OutputFrames]
// function errors when the max RLP bytes per channel is reached.
func TestMaxRLPBytesPerChannel(t *testing.T) {
channelConfig := defaultTestChannelConfig
channelConfig.MaxFrameSize = 2
// Construct the channel builder
cb, err := newChannelBuilder(channelConfig)
require.NoError(t, err)
require.False(t, cb.IsFull())
require.Equal(t, 0, cb.NumFrames())
// Add a block that overflows the [ChannelOut]
err = buildTooLargeRlpEncodedBlockBatch(cb)
require.ErrorIs(t, err, derive.ErrTooManyRLPBytes)
}
// TestOutputFramesMaxFrameIndex tests the [channelBuilder.OutputFrames]
// function errors when the max frame index is reached.
func TestOutputFramesMaxFrameIndex(t *testing.T) {
channelConfig := defaultTestChannelConfig
channelConfig.MaxFrameSize = 1
channelConfig.TargetNumFrames = math.MaxInt
channelConfig.TargetFrameSize = 1
channelConfig.ApproxComprRatio = 0
// Continuously add blocks until the max frame index is reached
// This should cause the [channelBuilder.OutputFrames] function
// to error
cb, err := newChannelBuilder(channelConfig)
require.NoError(t, err)
require.False(t, cb.IsFull())
require.Equal(t, 0, cb.NumFrames())
for {
lBlock := types.NewBlock(&types.Header{
BaseFee: common.Big0,
Difficulty: common.Big0,
Number: common.Big0,
}, nil, nil, nil, trie.NewStackTrie(nil))
l1InfoTx, _ := derive.L1InfoDeposit(0, lBlock, eth.SystemConfig{}, false)
txs := []*types.Transaction{types.NewTx(l1InfoTx)}
a := types.NewBlock(&types.Header{
Number: big.NewInt(0),
}, txs, nil, nil, trie.NewStackTrie(nil))
err = cb.AddBlock(a)
if cb.IsFull() {
fullErr := cb.FullErr()
require.ErrorIs(t, fullErr, ErrMaxFrameIndex)
break
}
require.NoError(t, err)
_ = cb.OutputFrames()
// Flushing so we can construct new frames
_ = cb.co.Flush()
}
}
// TestBuilderAddBlock tests the AddBlock function
func TestBuilderAddBlock(t *testing.T) {
channelConfig := defaultTestChannelConfig
// Lower the max frame size so that we can batch
channelConfig.MaxFrameSize = 2
// Configure the Input Threshold params so we observe a full channel
// In reality, we only need the input bytes (74) below to be greater than
// or equal to the input threshold (3 * 2) / 1 = 6
channelConfig.TargetFrameSize = 3
channelConfig.TargetNumFrames = 2
channelConfig.ApproxComprRatio = 1
// Construct the channel builder
cb, err := newChannelBuilder(channelConfig)
require.NoError(t, err)
// Add a nonsense block to the channel builder
err = addNonsenseBlock(cb)
require.NoError(t, err)
// Check the fields reset in the AddBlock function
require.Equal(t, 74, cb.co.InputBytes())
require.Equal(t, 1, len(cb.blocks))
require.Equal(t, 0, len(cb.frames))
require.True(t, cb.IsFull())
// Since the channel output is full, the next call to AddBlock
// should return the channel out full error
err = addNonsenseBlock(cb)
require.ErrorIs(t, err, ErrInputTargetReached)
}
// TestBuilderReset tests the Reset function
func TestBuilderReset(t *testing.T) {
channelConfig := defaultTestChannelConfig
// Lower the max frame size so that we can batch
channelConfig.MaxFrameSize = 2
cb, err := newChannelBuilder(channelConfig)
require.NoError(t, err)
// Add a nonsense block to the channel builder
err = addNonsenseBlock(cb)
require.NoError(t, err)
// Check the fields reset in the Reset function
require.Equal(t, 1, len(cb.blocks))
require.Equal(t, 0, len(cb.frames))
// Timeout should be updated in the AddBlock internal call to `updateSwTimeout`
timeout := uint64(100) + cb.cfg.SeqWindowSize - cb.cfg.SubSafetyMargin
require.Equal(t, timeout, cb.timeout)
require.NoError(t, cb.fullErr)
// Output frames so we can set the channel builder frames
err = cb.OutputFrames()
require.NoError(t, err)
// Add another block to increment the block count
err = addNonsenseBlock(cb)
require.NoError(t, err)
// Check the fields reset in the Reset function
require.Equal(t, 2, len(cb.blocks))
require.Equal(t, 1, len(cb.frames))
require.Equal(t, timeout, cb.timeout)
require.NoError(t, cb.fullErr)
// Reset the channel builder
err = cb.Reset()
require.NoError(t, err)
// Check the fields reset in the Reset function
require.Equal(t, 0, len(cb.blocks))
require.Equal(t, 0, len(cb.frames))
require.Equal(t, uint64(0), cb.timeout)
require.NoError(t, cb.fullErr)
require.Equal(t, 0, cb.co.InputBytes())
require.Equal(t, 0, cb.co.ReadyBytes())
}
// TestBuilderRegisterL1Block tests the RegisterL1Block function
func TestBuilderRegisterL1Block(t *testing.T) {
channelConfig := defaultTestChannelConfig
// Construct the channel builder
cb, err := newChannelBuilder(channelConfig)
require.NoError(t, err)
// Assert params modified in RegisterL1Block
require.Equal(t, uint64(1), channelConfig.MaxChannelDuration)
require.Equal(t, uint64(0), cb.timeout)
// Register a new L1 block
cb.RegisterL1Block(uint64(100))
// Assert params modified in RegisterL1Block
require.Equal(t, uint64(1), channelConfig.MaxChannelDuration)
require.Equal(t, uint64(101), cb.timeout)
}
// TestBuilderRegisterL1BlockZeroMaxChannelDuration tests the RegisterL1Block function
func TestBuilderRegisterL1BlockZeroMaxChannelDuration(t *testing.T) {
channelConfig := defaultTestChannelConfig
// Set the max channel duration to 0
channelConfig.MaxChannelDuration = 0
// Construct the channel builder
cb, err := newChannelBuilder(channelConfig)
require.NoError(t, err)
// Assert params modified in RegisterL1Block
require.Equal(t, uint64(0), channelConfig.MaxChannelDuration)
require.Equal(t, uint64(0), cb.timeout)
// Register a new L1 block
cb.RegisterL1Block(uint64(100))
// Since the max channel duration is set to 0,
// the L1 block register should not update the timeout
require.Equal(t, uint64(0), channelConfig.MaxChannelDuration)
require.Equal(t, uint64(0), cb.timeout)
}
// TestFramePublished tests the FramePublished function
func TestFramePublished(t *testing.T) {
channelConfig := defaultTestChannelConfig
// Construct the channel builder
cb, err := newChannelBuilder(channelConfig)
require.NoError(t, err)
// Let's say the block number is fed in as 100
// and the channel timeout is 1000
l1BlockNum := uint64(100)
cb.cfg.ChannelTimeout = uint64(1000)
cb.cfg.SubSafetyMargin = 100
// Then the frame published will update the timeout
cb.FramePublished(l1BlockNum)
// Now the timeout will be 1000
require.Equal(t, uint64(1000), cb.timeout)
}
package batcher_test
import (
"math"
"testing"
"github.com/ethereum-optimism/optimism/op-batcher/batcher"
"github.com/stretchr/testify/require"
)
// TestInputThreshold tests the [ChannelConfig.InputThreshold]
// function using a table-driven testing approach.
func TestInputThreshold(t *testing.T) {
type testInput struct {
TargetFrameSize uint64
TargetNumFrames int
ApproxComprRatio float64
}
type test struct {
input testInput
assertion func(uint64)
}
// Construct test cases that test the boundary conditions
tests := []test{
{
input: testInput{
TargetFrameSize: 1,
TargetNumFrames: 1,
ApproxComprRatio: 0.4,
},
assertion: func(output uint64) {
require.Equal(t, uint64(2), output)
},
},
{
input: testInput{
TargetFrameSize: 1,
TargetNumFrames: 100000,
ApproxComprRatio: 0.4,
},
assertion: func(output uint64) {
require.Equal(t, uint64(250_000), output)
},
},
{
input: testInput{
TargetFrameSize: 1,
TargetNumFrames: 1,
ApproxComprRatio: 1,
},
assertion: func(output uint64) {
require.Equal(t, uint64(1), output)
},
},
{
input: testInput{
TargetFrameSize: 1,
TargetNumFrames: 1,
ApproxComprRatio: 2,
},
assertion: func(output uint64) {
require.Equal(t, uint64(0), output)
},
},
{
input: testInput{
TargetFrameSize: 100000,
TargetNumFrames: 1,
ApproxComprRatio: 0.4,
},
assertion: func(output uint64) {
require.Equal(t, uint64(250_000), output)
},
},
{
input: testInput{
TargetFrameSize: 1,
TargetNumFrames: 100000,
ApproxComprRatio: 0.4,
},
assertion: func(output uint64) {
require.Equal(t, uint64(250_000), output)
},
},
{
input: testInput{
TargetFrameSize: 100000,
TargetNumFrames: 100000,
ApproxComprRatio: 0.4,
},
assertion: func(output uint64) {
require.Equal(t, uint64(25_000_000_000), output)
},
},
{
input: testInput{
TargetFrameSize: 1,
TargetNumFrames: 1,
ApproxComprRatio: 0.000001,
},
assertion: func(output uint64) {
require.Equal(t, uint64(1_000_000), output)
},
},
{
input: testInput{
TargetFrameSize: 0,
TargetNumFrames: 0,
ApproxComprRatio: 0,
},
assertion: func(output uint64) {
// Need to allow for NaN depending on the machine architecture
require.True(t, output == uint64(0) || output == uint64(math.NaN()))
},
},
}
// Validate each test case
for _, tt := range tests {
config := batcher.ChannelConfig{
TargetFrameSize: tt.input.TargetFrameSize,
TargetNumFrames: tt.input.TargetNumFrames,
ApproxComprRatio: tt.input.ApproxComprRatio,
}
got := config.InputThreshold()
tt.assertion(got)
}
}
package batcher_test package batcher
import ( import (
"io" "io"
...@@ -7,7 +7,6 @@ import ( ...@@ -7,7 +7,6 @@ import (
"testing" "testing"
"time" "time"
"github.com/ethereum-optimism/optimism/op-batcher/batcher"
"github.com/ethereum-optimism/optimism/op-node/eth" "github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-node/rollup/derive"
derivetest "github.com/ethereum-optimism/optimism/op-node/rollup/derive/test" derivetest "github.com/ethereum-optimism/optimism/op-node/rollup/derive/test"
...@@ -19,11 +18,56 @@ import ( ...@@ -19,11 +18,56 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
// TestPendingChannelTimeout tests that the channel manager
// correctly identifies when a pending channel is timed out.
func TestPendingChannelTimeout(t *testing.T) {
// Create a new channel manager with a ChannelTimeout
log := testlog.Logger(t, log.LvlCrit)
m := NewChannelManager(log, ChannelConfig{
ChannelTimeout: 100,
})
// Pending channel is nil so is cannot be timed out
timeout := m.pendingChannelIsTimedOut()
require.False(t, timeout)
// Set the pending channel
err := m.ensurePendingChannel(eth.BlockID{})
require.NoError(t, err)
// There are no confirmed transactions so
// the pending channel cannot be timed out
timeout = m.pendingChannelIsTimedOut()
require.False(t, timeout)
// Manually set a confirmed transactions
// To avoid other methods clearing state
m.confirmedTransactions[frameID{frameNumber: 0}] = eth.BlockID{Number: 0}
m.confirmedTransactions[frameID{frameNumber: 1}] = eth.BlockID{Number: 99}
// Since the ChannelTimeout is 100, the
// pending channel should not be timed out
timeout = m.pendingChannelIsTimedOut()
require.False(t, timeout)
// Add a confirmed transaction with a higher number
// than the ChannelTimeout
m.confirmedTransactions[frameID{
frameNumber: 2,
}] = eth.BlockID{
Number: 101,
}
// Now the pending channel should be timed out
timeout = m.pendingChannelIsTimedOut()
require.True(t, timeout)
}
// TestChannelManagerReturnsErrReorg ensures that the channel manager // TestChannelManagerReturnsErrReorg ensures that the channel manager
// detects a reorg when it has cached L1 blocks. // detects a reorg when it has cached L1 blocks.
func TestChannelManagerReturnsErrReorg(t *testing.T) { func TestChannelManagerReturnsErrReorg(t *testing.T) {
log := testlog.Logger(t, log.LvlCrit) log := testlog.Logger(t, log.LvlCrit)
m := batcher.NewChannelManager(log, batcher.ChannelConfig{}) m := NewChannelManager(log, ChannelConfig{})
a := types.NewBlock(&types.Header{ a := types.NewBlock(&types.Header{
Number: big.NewInt(0), Number: big.NewInt(0),
...@@ -48,14 +92,16 @@ func TestChannelManagerReturnsErrReorg(t *testing.T) { ...@@ -48,14 +92,16 @@ func TestChannelManagerReturnsErrReorg(t *testing.T) {
err = m.AddL2Block(c) err = m.AddL2Block(c)
require.NoError(t, err) require.NoError(t, err)
err = m.AddL2Block(x) err = m.AddL2Block(x)
require.ErrorIs(t, err, batcher.ErrReorg) require.ErrorIs(t, err, ErrReorg)
require.Equal(t, []*types.Block{a, b, c}, m.blocks)
} }
// TestChannelManagerReturnsErrReorgWhenDrained ensures that the channel manager // TestChannelManagerReturnsErrReorgWhenDrained ensures that the channel manager
// detects a reorg even if it does not have any blocks inside it. // detects a reorg even if it does not have any blocks inside it.
func TestChannelManagerReturnsErrReorgWhenDrained(t *testing.T) { func TestChannelManagerReturnsErrReorgWhenDrained(t *testing.T) {
log := testlog.Logger(t, log.LvlCrit) log := testlog.Logger(t, log.LvlCrit)
m := batcher.NewChannelManager(log, batcher.ChannelConfig{ m := NewChannelManager(log, ChannelConfig{
TargetFrameSize: 0, TargetFrameSize: 0,
MaxFrameSize: 120_000, MaxFrameSize: 120_000,
ApproxComprRatio: 1.0, ApproxComprRatio: 1.0,
...@@ -86,14 +132,226 @@ func TestChannelManagerReturnsErrReorgWhenDrained(t *testing.T) { ...@@ -86,14 +132,226 @@ func TestChannelManagerReturnsErrReorgWhenDrained(t *testing.T) {
require.ErrorIs(t, err, io.EOF) require.ErrorIs(t, err, io.EOF)
err = m.AddL2Block(x) err = m.AddL2Block(x)
require.ErrorIs(t, err, batcher.ErrReorg) require.ErrorIs(t, err, ErrReorg)
}
// TestChannelManagerNextTxData checks the nextTxData function.
func TestChannelManagerNextTxData(t *testing.T) {
log := testlog.Logger(t, log.LvlCrit)
m := NewChannelManager(log, ChannelConfig{})
// Nil pending channel should return EOF
returnedTxData, err := m.nextTxData()
require.ErrorIs(t, err, io.EOF)
require.Equal(t, txData{}, returnedTxData)
// Set the pending channel
// The nextTxData function should still return EOF
// since the pending channel has no frames
err = m.ensurePendingChannel(eth.BlockID{})
require.NoError(t, err)
returnedTxData, err = m.nextTxData()
require.ErrorIs(t, err, io.EOF)
require.Equal(t, txData{}, returnedTxData)
// Manually push a frame into the pending channel
channelID := m.pendingChannel.ID()
frame := frameData{
data: []byte{},
id: frameID{
chID: channelID,
frameNumber: uint16(0),
},
}
m.pendingChannel.PushFrame(frame)
require.Equal(t, 1, m.pendingChannel.NumFrames())
// Now the nextTxData function should return the frame
returnedTxData, err = m.nextTxData()
expectedTxData := txData{frame}
expectedChannelID := expectedTxData.ID()
require.NoError(t, err)
require.Equal(t, expectedTxData, returnedTxData)
require.Equal(t, 0, m.pendingChannel.NumFrames())
require.Equal(t, expectedTxData, m.pendingTransactions[expectedChannelID])
}
// TestClearChannelManager tests clearing the channel manager.
func TestClearChannelManager(t *testing.T) {
// Create a channel manager
log := testlog.Logger(t, log.LvlCrit)
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
m := NewChannelManager(log, ChannelConfig{
// Need to set the channel timeout here so we don't clear pending
// channels on confirmation. This would result in [TxConfirmed]
// clearing confirmed transactions, and reseting the pendingChannels map
ChannelTimeout: 10,
// Have to set the max frame size here otherwise the channel builder would not
// be able to output any frames
MaxFrameSize: 1,
})
// Channel Manager state should be empty by default
require.Empty(t, m.blocks)
require.Equal(t, common.Hash{}, m.tip)
require.Nil(t, m.pendingChannel)
require.Empty(t, m.pendingTransactions)
require.Empty(t, m.confirmedTransactions)
// Add a block to the channel manager
a, _ := derivetest.RandomL2Block(rng, 4)
newL1Tip := a.Hash()
l1BlockID := eth.BlockID{
Hash: a.Hash(),
Number: a.NumberU64(),
}
err := m.AddL2Block(a)
require.NoError(t, err)
// Make sure there is a channel builder
err = m.ensurePendingChannel(l1BlockID)
require.NoError(t, err)
require.NotNil(t, m.pendingChannel)
require.Equal(t, 0, len(m.confirmedTransactions))
// Process the blocks
// We should have a pending channel with 1 frame
// and no more blocks since processBlocks consumes
// the list
err = m.processBlocks()
require.NoError(t, err)
err = m.pendingChannel.OutputFrames()
require.NoError(t, err)
_, err = m.nextTxData()
require.NoError(t, err)
require.Equal(t, 0, len(m.blocks))
require.Equal(t, newL1Tip, m.tip)
require.Equal(t, 1, len(m.pendingTransactions))
// Add a new block so we can test clearing
// the channel manager with a full state
b := types.NewBlock(&types.Header{
Number: big.NewInt(1),
ParentHash: a.Hash(),
}, nil, nil, nil, nil)
err = m.AddL2Block(b)
require.NoError(t, err)
require.Equal(t, 1, len(m.blocks))
require.Equal(t, b.Hash(), m.tip)
// Clear the channel manager
m.Clear()
// Check that the entire channel manager state cleared
require.Empty(t, m.blocks)
require.Equal(t, common.Hash{}, m.tip)
require.Nil(t, m.pendingChannel)
require.Empty(t, m.pendingTransactions)
require.Empty(t, m.confirmedTransactions)
}
// TestChannelManagerTxConfirmed checks the [ChannelManager.TxConfirmed] function.
func TestChannelManagerTxConfirmed(t *testing.T) {
// Create a channel manager
log := testlog.Logger(t, log.LvlCrit)
m := NewChannelManager(log, ChannelConfig{
// Need to set the channel timeout here so we don't clear pending
// channels on confirmation. This would result in [TxConfirmed]
// clearing confirmed transactions, and reseting the pendingChannels map
ChannelTimeout: 10,
})
// Let's add a valid pending transaction to the channel manager
// So we can demonstrate that TxConfirmed's correctness
err := m.ensurePendingChannel(eth.BlockID{})
require.NoError(t, err)
channelID := m.pendingChannel.ID()
frame := frameData{
data: []byte{},
id: frameID{
chID: channelID,
frameNumber: uint16(0),
},
}
m.pendingChannel.PushFrame(frame)
require.Equal(t, 1, m.pendingChannel.NumFrames())
returnedTxData, err := m.nextTxData()
expectedTxData := txData{frame}
expectedChannelID := expectedTxData.ID()
require.NoError(t, err)
require.Equal(t, expectedTxData, returnedTxData)
require.Equal(t, 0, m.pendingChannel.NumFrames())
require.Equal(t, expectedTxData, m.pendingTransactions[expectedChannelID])
require.Equal(t, 1, len(m.pendingTransactions))
// An unknown pending transaction should not be marked as confirmed
// and should not be removed from the pending transactions map
actualChannelID := m.pendingChannel.ID()
unknownChannelID := derive.ChannelID([derive.ChannelIDLength]byte{0x69})
require.NotEqual(t, actualChannelID, unknownChannelID)
unknownTxID := frameID{chID: unknownChannelID, frameNumber: 0}
blockID := eth.BlockID{Number: 0, Hash: common.Hash{0x69}}
m.TxConfirmed(unknownTxID, blockID)
require.Empty(t, m.confirmedTransactions)
require.Equal(t, 1, len(m.pendingTransactions))
// Now let's mark the pending transaction as confirmed
// and check that it is removed from the pending transactions map
// and added to the confirmed transactions map
m.TxConfirmed(expectedChannelID, blockID)
require.Empty(t, m.pendingTransactions)
require.Equal(t, 1, len(m.confirmedTransactions))
require.Equal(t, blockID, m.confirmedTransactions[expectedChannelID])
}
// TestChannelManagerTxFailed checks the [ChannelManager.TxFailed] function.
func TestChannelManagerTxFailed(t *testing.T) {
// Create a channel manager
log := testlog.Logger(t, log.LvlCrit)
m := NewChannelManager(log, ChannelConfig{})
// Let's add a valid pending transaction to the channel
// manager so we can demonstrate correctness
err := m.ensurePendingChannel(eth.BlockID{})
require.NoError(t, err)
channelID := m.pendingChannel.ID()
frame := frameData{
data: []byte{},
id: frameID{
chID: channelID,
frameNumber: uint16(0),
},
}
m.pendingChannel.PushFrame(frame)
require.Equal(t, 1, m.pendingChannel.NumFrames())
returnedTxData, err := m.nextTxData()
expectedTxData := txData{frame}
expectedChannelID := expectedTxData.ID()
require.NoError(t, err)
require.Equal(t, expectedTxData, returnedTxData)
require.Equal(t, 0, m.pendingChannel.NumFrames())
require.Equal(t, expectedTxData, m.pendingTransactions[expectedChannelID])
require.Equal(t, 1, len(m.pendingTransactions))
// Trying to mark an unknown pending transaction as failed
// shouldn't modify state
m.TxFailed(frameID{})
require.Equal(t, 0, m.pendingChannel.NumFrames())
require.Equal(t, expectedTxData, m.pendingTransactions[expectedChannelID])
// Now we still have a pending transaction
// Let's mark it as failed
m.TxFailed(expectedChannelID)
require.Empty(t, m.pendingTransactions)
// There should be a frame in the pending channel now
require.Equal(t, 1, m.pendingChannel.NumFrames())
} }
func TestChannelManager_TxResend(t *testing.T) { func TestChannelManager_TxResend(t *testing.T) {
require := require.New(t) require := require.New(t)
rng := rand.New(rand.NewSource(time.Now().UnixNano())) rng := rand.New(rand.NewSource(time.Now().UnixNano()))
log := testlog.Logger(t, log.LvlError) log := testlog.Logger(t, log.LvlError)
m := batcher.NewChannelManager(log, batcher.ChannelConfig{ m := NewChannelManager(log, ChannelConfig{
TargetFrameSize: 0, TargetFrameSize: 0,
MaxFrameSize: 120_000, MaxFrameSize: 120_000,
ApproxComprRatio: 1.0, ApproxComprRatio: 1.0,
......
...@@ -55,7 +55,7 @@ func (t *TransactionManager) SendTransaction(ctx context.Context, data []byte) ( ...@@ -55,7 +55,7 @@ func (t *TransactionManager) SendTransaction(ctx context.Context, data []byte) (
return nil, fmt.Errorf("failed to create tx: %w", err) return nil, fmt.Errorf("failed to create tx: %w", err)
} }
ctx, cancel := context.WithTimeout(ctx, 100*time.Second) // TODO: Select a timeout that makes sense here. ctx, cancel := context.WithTimeout(ctx, 10*time.Minute) // TODO: Select a timeout that makes sense here.
defer cancel() defer cancel()
if receipt, err := t.txMgr.Send(ctx, tx); err != nil { if receipt, err := t.txMgr.Send(ctx, tx); err != nil {
t.log.Warn("unable to publish tx", "err", err, "data_size", len(data)) t.log.Warn("unable to publish tx", "err", err, "data_size", len(data))
......
...@@ -10,6 +10,7 @@ import ( ...@@ -10,6 +10,7 @@ import (
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
"github.com/stretchr/testify/require"
) )
// L1Miner wraps a L1Replica with instrumented block building ability. // L1Miner wraps a L1Replica with instrumented block building ability.
...@@ -100,26 +101,33 @@ func (s *L1Miner) ActL1IncludeTx(from common.Address) Action { ...@@ -100,26 +101,33 @@ func (s *L1Miner) ActL1IncludeTx(from common.Address) Action {
t.Fatalf("no pending txs from %s, and have %d unprocessable queued txs from this account", from, len(q)) t.Fatalf("no pending txs from %s, and have %d unprocessable queued txs from this account", from, len(q))
} }
tx := txs[i] tx := txs[i]
if tx.Gas() > s.l1BuildingHeader.GasLimit { s.IncludeTx(t, tx)
t.Fatalf("tx consumes %d gas, more than available in L1 block %d", tx.Gas(), s.l1BuildingHeader.GasLimit)
}
if tx.Gas() > uint64(*s.l1GasPool) {
t.InvalidAction("action takes too much gas: %d, only have %d", tx.Gas(), uint64(*s.l1GasPool))
return
}
s.pendingIndices[from] = i + 1 // won't retry the tx s.pendingIndices[from] = i + 1 // won't retry the tx
s.l1BuildingState.SetTxContext(tx.Hash(), len(s.l1Transactions))
receipt, err := core.ApplyTransaction(s.l1Cfg.Config, s.l1Chain, &s.l1BuildingHeader.Coinbase,
s.l1GasPool, s.l1BuildingState, s.l1BuildingHeader, tx, &s.l1BuildingHeader.GasUsed, *s.l1Chain.GetVMConfig())
if err != nil {
s.l1TxFailed = append(s.l1TxFailed, tx)
t.Fatalf("failed to apply transaction to L1 block (tx %d): %w", len(s.l1Transactions), err)
}
s.l1Receipts = append(s.l1Receipts, receipt)
s.l1Transactions = append(s.l1Transactions, tx)
} }
} }
func (s *L1Miner) IncludeTx(t Testing, tx *types.Transaction) {
from, err := s.l1Signer.Sender(tx)
require.NoError(t, err)
s.log.Info("including tx", "nonce", tx.Nonce(), "from", from)
if tx.Gas() > s.l1BuildingHeader.GasLimit {
t.Fatalf("tx consumes %d gas, more than available in L1 block %d", tx.Gas(), s.l1BuildingHeader.GasLimit)
}
if tx.Gas() > uint64(*s.l1GasPool) {
t.InvalidAction("action takes too much gas: %d, only have %d", tx.Gas(), uint64(*s.l1GasPool))
return
}
s.l1BuildingState.SetTxContext(tx.Hash(), len(s.l1Transactions))
receipt, err := core.ApplyTransaction(s.l1Cfg.Config, s.l1Chain, &s.l1BuildingHeader.Coinbase,
s.l1GasPool, s.l1BuildingState, s.l1BuildingHeader, tx, &s.l1BuildingHeader.GasUsed, *s.l1Chain.GetVMConfig())
if err != nil {
s.l1TxFailed = append(s.l1TxFailed, tx)
t.Fatalf("failed to apply transaction to L1 block (tx %d): %v", len(s.l1Transactions), err)
}
s.l1Receipts = append(s.l1Receipts, receipt)
s.l1Transactions = append(s.l1Transactions, tx)
}
func (s *L1Miner) ActL1SetFeeRecipient(coinbase common.Address) { func (s *L1Miner) ActL1SetFeeRecipient(coinbase common.Address) {
s.prefCoinbase = coinbase s.prefCoinbase = coinbase
if s.l1Building { if s.l1Building {
......
...@@ -91,6 +91,10 @@ func (s *L2Batcher) SubmittingData() bool { ...@@ -91,6 +91,10 @@ func (s *L2Batcher) SubmittingData() bool {
// ActL2BatchBuffer adds the next L2 block to the batch buffer. // ActL2BatchBuffer adds the next L2 block to the batch buffer.
// If the buffer is being submitted, the buffer is wiped. // If the buffer is being submitted, the buffer is wiped.
func (s *L2Batcher) ActL2BatchBuffer(t Testing) { func (s *L2Batcher) ActL2BatchBuffer(t Testing) {
require.NoError(t, s.Buffer(t), "failed to add block to channel")
}
func (s *L2Batcher) Buffer(t Testing) error {
if s.l2Submitting { // break ongoing submitting work if necessary if s.l2Submitting { // break ongoing submitting work if necessary
s.l2ChannelOut = nil s.l2ChannelOut = nil
s.l2Submitting = false s.l2Submitting = false
...@@ -120,7 +124,7 @@ func (s *L2Batcher) ActL2BatchBuffer(t Testing) { ...@@ -120,7 +124,7 @@ func (s *L2Batcher) ActL2BatchBuffer(t Testing) {
s.l2ChannelOut = nil s.l2ChannelOut = nil
} else { } else {
s.log.Info("nothing left to submit") s.log.Info("nothing left to submit")
return return nil
} }
} }
// Create channel if we don't have one yet // Create channel if we don't have one yet
...@@ -143,9 +147,10 @@ func (s *L2Batcher) ActL2BatchBuffer(t Testing) { ...@@ -143,9 +147,10 @@ func (s *L2Batcher) ActL2BatchBuffer(t Testing) {
s.l2ChannelOut = nil s.l2ChannelOut = nil
} }
if _, err := s.l2ChannelOut.AddBlock(block); err != nil { // should always succeed if _, err := s.l2ChannelOut.AddBlock(block); err != nil { // should always succeed
t.Fatalf("failed to add block to channel: %v", err) return err
} }
s.l2BufferedBlock = eth.ToBlockID(block) s.l2BufferedBlock = eth.ToBlockID(block)
return nil
} }
func (s *L2Batcher) ActL2ChannelClose(t Testing) { func (s *L2Batcher) ActL2ChannelClose(t Testing) {
...@@ -158,7 +163,7 @@ func (s *L2Batcher) ActL2ChannelClose(t Testing) { ...@@ -158,7 +163,7 @@ func (s *L2Batcher) ActL2ChannelClose(t Testing) {
} }
// ActL2BatchSubmit constructs a batch tx from previous buffered L2 blocks, and submits it to L1 // ActL2BatchSubmit constructs a batch tx from previous buffered L2 blocks, and submits it to L1
func (s *L2Batcher) ActL2BatchSubmit(t Testing) { func (s *L2Batcher) ActL2BatchSubmit(t Testing, txOpts ...func(tx *types.DynamicFeeTx)) {
// Don't run this action if there's no data to submit // Don't run this action if there's no data to submit
if s.l2ChannelOut == nil { if s.l2ChannelOut == nil {
t.InvalidAction("need to buffer data first, cannot batch submit with empty buffer") t.InvalidAction("need to buffer data first, cannot batch submit with empty buffer")
...@@ -192,6 +197,9 @@ func (s *L2Batcher) ActL2BatchSubmit(t Testing) { ...@@ -192,6 +197,9 @@ func (s *L2Batcher) ActL2BatchSubmit(t Testing) {
GasFeeCap: gasFeeCap, GasFeeCap: gasFeeCap,
Data: data.Bytes(), Data: data.Bytes(),
} }
for _, opt := range txOpts {
opt(rawTx)
}
gas, err := core.IntrinsicGas(rawTx.Data, nil, false, true, true, false) gas, err := core.IntrinsicGas(rawTx.Data, nil, false, true, true, false)
require.NoError(t, err, "need to compute intrinsic gas") require.NoError(t, err, "need to compute intrinsic gas")
rawTx.Gas = gas rawTx.Gas = gas
......
package actions package actions
import ( import (
"crypto/rand"
"errors"
"math/big" "math/big"
"testing" "testing"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
...@@ -12,6 +15,7 @@ import ( ...@@ -12,6 +15,7 @@ import (
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils"
"github.com/ethereum-optimism/optimism/op-node/eth" "github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-node/testlog" "github.com/ethereum-optimism/optimism/op-node/testlog"
) )
...@@ -378,3 +382,131 @@ func TestExtendedTimeWithoutL1Batches(gt *testing.T) { ...@@ -378,3 +382,131 @@ func TestExtendedTimeWithoutL1Batches(gt *testing.T) {
sequencer.ActL2PipelineFull(t) sequencer.ActL2PipelineFull(t)
require.Equal(t, sequencer.L2Unsafe(), sequencer.L2Safe(), "same for sequencer") require.Equal(t, sequencer.L2Unsafe(), sequencer.L2Safe(), "same for sequencer")
} }
// TestBigL2Txs tests a high-throughput case with constrained batcher:
// - Fill 100 L2 blocks to near max-capacity, with txs of 120 KB each
// - Buffer the L2 blocks into channels together as much as possible, submit data-txs only when necessary
// (just before crossing the max RLP channel size)
// - Limit the data-tx size to 40 KB, to force data to be split across multiple datat-txs
// - Defer all data-tx inclusion till the end
// - Fill L1 blocks with data-txs until we have processed them all
// - Run the verifier, and check if it derives the same L2 chain as was created by the sequencer.
//
// The goal of this test is to quickly run through an otherwise very slow process of submitting and including lots of data.
// This does not test the batcher code, but is really focused at testing the batcher utils
// and channel-decoding verifier code in the derive package.
func TestBigL2Txs(gt *testing.T) {
t := NewDefaultTesting(gt)
p := &e2eutils.TestParams{
MaxSequencerDrift: 100,
SequencerWindowSize: 1000,
ChannelTimeout: 200, // give enough space to buffer large amounts of data before submitting it
}
dp := e2eutils.MakeDeployParams(t, p)
sd := e2eutils.Setup(t, dp, defaultAlloc)
log := testlog.Logger(t, log.LvlInfo)
miner, engine, sequencer := setupSequencerTest(t, sd, log)
_, verifier := setupVerifier(t, sd, log, miner.L1Client(t, sd.RollupCfg))
batcher := NewL2Batcher(log, sd.RollupCfg, &BatcherCfg{
MinL1TxSize: 0,
MaxL1TxSize: 40_000, // try a small batch size, to force the data to be split between more frames
BatcherKey: dp.Secrets.Batcher,
}, sequencer.RollupClient(), miner.EthClient(), engine.EthClient())
sequencer.ActL2PipelineFull(t)
verifier.ActL2PipelineFull(t)
cl := engine.EthClient()
batcherNonce := uint64(0) // manually track batcher nonce. the "pending nonce" value in tx-pool is incorrect after we fill the pending-block gas limit and keep adding txs to the pool.
batcherTxOpts := func(tx *types.DynamicFeeTx) {
tx.Nonce = batcherNonce
batcherNonce++
tx.GasFeeCap = e2eutils.Ether(1) // be very generous with basefee, since we're spamming L1
}
// build many L2 blocks filled to the brim with large txs of random data
for i := 0; i < 100; i++ {
aliceNonce, err := cl.PendingNonceAt(t.Ctx(), dp.Addresses.Alice)
status := sequencer.SyncStatus()
// build empty L1 blocks as necessary, so the L2 sequencer can continue to include txs while not drifting too far out
if status.UnsafeL2.Time >= status.HeadL1.Time+12 {
miner.ActEmptyBlock(t)
}
sequencer.ActL1HeadSignal(t)
sequencer.ActL2StartBlock(t)
baseFee := engine.l2Chain.CurrentBlock().BaseFee() // this will go quite high, since so many consecutive blocks are filled at capacity.
// fill the block with large L2 txs from alice
for n := aliceNonce; ; n++ {
require.NoError(t, err)
signer := types.LatestSigner(sd.L2Cfg.Config)
data := make([]byte, 120_000) // very large L2 txs, as large as the tx-pool will accept
_, err := rand.Read(data[:]) // fill with random bytes, to make compression ineffective
require.NoError(t, err)
gas, err := core.IntrinsicGas(data, nil, false, true, true, false)
require.NoError(t, err)
if gas > engine.l2GasPool.Gas() {
break
}
tx := types.MustSignNewTx(dp.Secrets.Alice, signer, &types.DynamicFeeTx{
ChainID: sd.L2Cfg.Config.ChainID,
Nonce: n,
GasTipCap: big.NewInt(2 * params.GWei),
GasFeeCap: new(big.Int).Add(new(big.Int).Mul(baseFee, big.NewInt(2)), big.NewInt(2*params.GWei)),
Gas: gas,
To: &dp.Addresses.Bob,
Value: big.NewInt(0),
Data: data,
})
require.NoError(gt, cl.SendTransaction(t.Ctx(), tx))
engine.ActL2IncludeTx(dp.Addresses.Alice)(t)
}
sequencer.ActL2EndBlock(t)
for batcher.l2BufferedBlock.Number < sequencer.SyncStatus().UnsafeL2.Number {
// if we run out of space, close the channel and submit all the txs
if err := batcher.Buffer(t); errors.Is(err, derive.ErrTooManyRLPBytes) {
log.Info("flushing filled channel to batch txs", "id", batcher.l2ChannelOut.ID())
batcher.ActL2ChannelClose(t)
for batcher.l2ChannelOut != nil {
batcher.ActL2BatchSubmit(t, batcherTxOpts)
}
}
}
}
// if anything is left in the channel, submit it
if batcher.l2ChannelOut != nil {
log.Info("flushing trailing channel to batch txs", "id", batcher.l2ChannelOut.ID())
batcher.ActL2ChannelClose(t)
for batcher.l2ChannelOut != nil {
batcher.ActL2BatchSubmit(t, batcherTxOpts)
}
}
// build L1 blocks until we're out of txs
txs, _ := miner.eth.TxPool().ContentFrom(dp.Addresses.Batcher)
for {
if len(txs) == 0 {
break
}
miner.ActL1StartBlock(12)(t)
for range txs {
if len(txs) == 0 {
break
}
tx := txs[0]
if miner.l1GasPool.Gas() < tx.Gas() { // fill the L1 block with batcher txs until we run out of gas
break
}
log.Info("including batcher tx", "nonce", tx)
miner.IncludeTx(t, tx)
txs = txs[1:]
}
miner.ActL1EndBlock(t)
}
verifier.ActL1HeadSignal(t)
verifier.ActL2PipelineFull(t)
require.Equal(t, sequencer.SyncStatus().UnsafeL2, verifier.SyncStatus().SafeL2, "verifier synced sequencer data even though of huge tx in block")
}
...@@ -26,6 +26,16 @@ the transaction hash. ...@@ -26,6 +26,16 @@ the transaction hash.
into channels. It then stores the channels with metadata on disk where the file name is the Channel ID. into channels. It then stores the channels with metadata on disk where the file name is the Channel ID.
### Force Close
`batch_decoder force-close` will create a transaction data that can be sent from the batcher address to
the batch inbox address which will force close the given channels. This will allow future channels to
be read without waiting for the channel timeout. It uses uses the results from `batch_decoder fetch` to
create the close transaction because the transaction it creates for a specific channel requires information
about if the channel has been closed or not. If it has been closed already but is missing specific frames
those frames need to be generated differently than simply closing the channel.
## JQ Cheat Sheet ## JQ Cheat Sheet
`jq` is a really useful utility for manipulating JSON files. `jq` is a really useful utility for manipulating JSON files.
...@@ -48,7 +58,6 @@ jq "select(.is_ready == false)|[.id, .frames[0].inclusion_block, .frames[0].tran ...@@ -48,7 +58,6 @@ jq "select(.is_ready == false)|[.id, .frames[0].inclusion_block, .frames[0].tran
## Roadmap ## Roadmap
- Parallel transaction fetching (CLI-3563) - Parallel transaction fetching (CLI-3563)
- Create force-close channel tx data from channel ID (CLI-3564)
- Pull the batches out of channels & store that information inside the ChannelWithMetadata (CLI-3565) - Pull the batches out of channels & store that information inside the ChannelWithMetadata (CLI-3565)
- Transaction Bytes used - Transaction Bytes used
- Total uncompressed (different from tx bytes) + compressed bytes - Total uncompressed (different from tx bytes) + compressed bytes
......
...@@ -9,6 +9,7 @@ import ( ...@@ -9,6 +9,7 @@ import (
"github.com/ethereum-optimism/optimism/op-node/cmd/batch_decoder/fetch" "github.com/ethereum-optimism/optimism/op-node/cmd/batch_decoder/fetch"
"github.com/ethereum-optimism/optimism/op-node/cmd/batch_decoder/reassemble" "github.com/ethereum-optimism/optimism/op-node/cmd/batch_decoder/reassemble"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethclient"
"github.com/urfave/cli" "github.com/urfave/cli"
...@@ -113,6 +114,46 @@ func main() { ...@@ -113,6 +114,46 @@ func main() {
return nil return nil
}, },
}, },
{
Name: "force-close",
Usage: "Create the tx data which will force close a channel",
Flags: []cli.Flag{
cli.StringFlag{
Name: "id",
Required: true,
Usage: "ID of the channel to close",
},
cli.StringFlag{
Name: "inbox",
Value: "0x0000000000000000000000000000000000000000",
Usage: "(Optional) Batch Inbox Address",
},
cli.StringFlag{
Name: "in",
Value: "/tmp/batch_decoder/transactions_cache",
Usage: "Cache directory for the found transactions",
},
},
Action: func(cliCtx *cli.Context) error {
var id derive.ChannelID
if err := (&id).UnmarshalText([]byte(cliCtx.String("id"))); err != nil {
log.Fatal(err)
}
frames := reassemble.LoadFrames(cliCtx.String("in"), common.HexToAddress(cliCtx.String("inbox")))
var filteredFrames []derive.Frame
for _, frame := range frames {
if frame.Frame.ID == id {
filteredFrames = append(filteredFrames, frame.Frame)
}
}
data, err := derive.ForceCloseTxData(filteredFrames)
if err != nil {
log.Fatal(err)
}
fmt.Printf("%x\n", data)
return nil
},
},
} }
if err := app.Run(os.Args); err != nil { if err := app.Run(os.Args); err != nil {
......
...@@ -38,14 +38,8 @@ type Config struct { ...@@ -38,14 +38,8 @@ type Config struct {
OutDirectory string OutDirectory string
} }
// Channels loads all transactions from the given input directory that are submitted to the func LoadFrames(directory string, inbox common.Address) []FrameWithMetadata {
// specified batch inbox and then re-assembles all channels & writes the re-assembled channels txns := loadTransactions(directory, inbox)
// to the out directory.
func Channels(config Config) {
if err := os.MkdirAll(config.OutDirectory, 0750); err != nil {
log.Fatal(err)
}
txns := loadTransactions(config.InDirectory, config.BatchInbox)
// Sort first by block number then by transaction index inside the block number range. // Sort first by block number then by transaction index inside the block number range.
// This is to match the order they are processed in derivation. // This is to match the order they are processed in derivation.
sort.Slice(txns, func(i, j int) bool { sort.Slice(txns, func(i, j int) bool {
...@@ -56,7 +50,17 @@ func Channels(config Config) { ...@@ -56,7 +50,17 @@ func Channels(config Config) {
} }
}) })
frames := transactionsToFrames(txns) return transactionsToFrames(txns)
}
// Channels loads all transactions from the given input directory that are submitted to the
// specified batch inbox and then re-assembles all channels & writes the re-assembled channels
// to the out directory.
func Channels(config Config) {
if err := os.MkdirAll(config.OutDirectory, 0750); err != nil {
log.Fatal(err)
}
frames := LoadFrames(config.InDirectory, config.BatchInbox)
framesByChannel := make(map[derive.ChannelID][]FrameWithMetadata) framesByChannel := make(map[derive.ChannelID][]FrameWithMetadata)
for _, frame := range frames { for _, frame := range frames {
framesByChannel[frame.Frame.ID] = append(framesByChannel[frame.Frame.ID], frame) framesByChannel[frame.Frame.ID] = append(framesByChannel[frame.Frame.ID], frame)
...@@ -143,6 +147,7 @@ func transactionsToFrames(txns []fetch.TransactionWithMetadata) []FrameWithMetad ...@@ -143,6 +147,7 @@ func transactionsToFrames(txns []fetch.TransactionWithMetadata) []FrameWithMetad
return out return out
} }
// if inbox is the zero address, it will load all frames
func loadTransactions(dir string, inbox common.Address) []fetch.TransactionWithMetadata { func loadTransactions(dir string, inbox common.Address) []fetch.TransactionWithMetadata {
files, err := os.ReadDir(dir) files, err := os.ReadDir(dir)
if err != nil { if err != nil {
...@@ -152,7 +157,7 @@ func loadTransactions(dir string, inbox common.Address) []fetch.TransactionWithM ...@@ -152,7 +157,7 @@ func loadTransactions(dir string, inbox common.Address) []fetch.TransactionWithM
for _, file := range files { for _, file := range files {
f := path.Join(dir, file.Name()) f := path.Join(dir, file.Name())
txm := loadTransactionsFile(f) txm := loadTransactionsFile(f)
if txm.InboxAddr == inbox && txm.ValidSender { if (inbox == common.Address{} || txm.InboxAddr == inbox) && txm.ValidSender {
out = append(out, txm) out = append(out, txm)
} }
} }
......
...@@ -213,3 +213,58 @@ func BlockToBatch(block *types.Block) (*BatchData, error) { ...@@ -213,3 +213,58 @@ func BlockToBatch(block *types.Block) (*BatchData, error) {
}, },
}, nil }, nil
} }
// ForceCloseTxData generates the transaction data for a transaction which will force close
// a channel. It should be given every frame of that channel which has been submitted on
// chain. The frames should be given in order that they appear on L1.
func ForceCloseTxData(frames []Frame) ([]byte, error) {
if len(frames) == 0 {
return nil, errors.New("must provide at least one frame")
}
frameNumbers := make(map[uint16]struct{})
id := frames[0].ID
closeNumber := uint16(0)
closed := false
for i, frame := range frames {
if !closed && frame.IsLast {
closeNumber = frame.FrameNumber
}
closed = closed || frame.IsLast
frameNumbers[frame.FrameNumber] = struct{}{}
if frame.ID != id {
return nil, fmt.Errorf("invalid ID in list: first ID: %v, %vth ID: %v", id, i, frame.ID)
}
}
var out bytes.Buffer
out.WriteByte(DerivationVersion0)
if !closed {
f := Frame{
ID: id,
FrameNumber: 0,
Data: nil,
IsLast: true,
}
if err := f.MarshalBinary(&out); err != nil {
return nil, err
}
} else {
for i := uint16(0); i <= closeNumber; i++ {
if _, ok := frameNumbers[i]; ok {
continue
}
f := Frame{
ID: id,
FrameNumber: i,
Data: nil,
IsLast: false,
}
if err := f.MarshalBinary(&out); err != nil {
return nil, err
}
}
}
return out.Bytes(), nil
}
...@@ -5,6 +5,7 @@ import ( ...@@ -5,6 +5,7 @@ import (
"math/big" "math/big"
"testing" "testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
...@@ -49,3 +50,69 @@ func TestRLPByteLimit(t *testing.T) { ...@@ -49,3 +50,69 @@ func TestRLPByteLimit(t *testing.T) {
require.Equal(t, err, rlp.ErrValueTooLarge) require.Equal(t, err, rlp.ErrValueTooLarge)
require.Equal(t, out2, "") require.Equal(t, out2, "")
} }
func TestForceCloseTxData(t *testing.T) {
id := [16]byte{0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef}
tests := []struct {
frames []Frame
errors bool
output string
}{
{
frames: []Frame{},
errors: true,
output: "",
},
{
frames: []Frame{Frame{FrameNumber: 0, IsLast: false}, Frame{ID: id, FrameNumber: 1, IsLast: true}},
errors: true,
output: "",
},
{
frames: []Frame{Frame{ID: id, FrameNumber: 0, IsLast: false}},
errors: false,
output: "00deadbeefdeadbeefdeadbeefdeadbeef00000000000001",
},
{
frames: []Frame{Frame{ID: id, FrameNumber: 0, IsLast: true}},
errors: false,
output: "00",
},
{
frames: []Frame{Frame{ID: id, FrameNumber: 1, IsLast: false}},
errors: false,
output: "00deadbeefdeadbeefdeadbeefdeadbeef00000000000001",
},
{
frames: []Frame{Frame{ID: id, FrameNumber: 1, IsLast: true}},
errors: false,
output: "00deadbeefdeadbeefdeadbeefdeadbeef00000000000000",
},
{
frames: []Frame{Frame{ID: id, FrameNumber: 2, IsLast: true}},
errors: false,
output: "00deadbeefdeadbeefdeadbeefdeadbeef00000000000000deadbeefdeadbeefdeadbeefdeadbeef00010000000000",
},
{
frames: []Frame{Frame{ID: id, FrameNumber: 1, IsLast: false}, Frame{ID: id, FrameNumber: 3, IsLast: true}},
errors: false,
output: "00deadbeefdeadbeefdeadbeefdeadbeef00000000000000deadbeefdeadbeefdeadbeefdeadbeef00020000000000",
},
{
frames: []Frame{Frame{ID: id, FrameNumber: 1, IsLast: false}, Frame{ID: id, FrameNumber: 3, IsLast: true}, Frame{ID: id, FrameNumber: 5, IsLast: true}},
errors: false,
output: "00deadbeefdeadbeefdeadbeefdeadbeef00000000000000deadbeefdeadbeefdeadbeefdeadbeef00020000000000",
},
}
for i, test := range tests {
out, err := ForceCloseTxData(test.frames)
if test.errors {
require.NotNil(t, err, "Should error on tc %v", i)
require.Nil(t, out, "Should return no value in tc %v", i)
} else {
require.NoError(t, err, "Should not error on tc %v", i)
require.Equal(t, common.FromHex(test.output), out, "Should match output tc %v", i)
}
}
}
...@@ -104,8 +104,10 @@ type EngineQueue struct { ...@@ -104,8 +104,10 @@ type EngineQueue struct {
finalizedL1 eth.L1BlockRef finalizedL1 eth.L1BlockRef
safeAttributes *eth.PayloadAttributes // The queued-up attributes
unsafePayloads PayloadsQueue // queue of unsafe payloads, ordered by ascending block number, may have gaps safeAttributesParent eth.L2BlockRef
safeAttributes *eth.PayloadAttributes
unsafePayloads PayloadsQueue // queue of unsafe payloads, ordered by ascending block number, may have gaps
// Tracks which L2 blocks where last derived from which L1 block. At most finalityLookback large. // Tracks which L2 blocks where last derived from which L1 block. At most finalityLookback large.
finalityData []FinalityData finalityData []FinalityData
...@@ -225,6 +227,7 @@ func (eq *EngineQueue) Step(ctx context.Context) error { ...@@ -225,6 +227,7 @@ func (eq *EngineQueue) Step(ctx context.Context) error {
return err return err
} else { } else {
eq.safeAttributes = next eq.safeAttributes = next
eq.safeAttributesParent = eq.safeHead
eq.log.Debug("Adding next safe attributes", "safe_head", eq.safeHead, "next", eq.safeAttributes) eq.log.Debug("Adding next safe attributes", "safe_head", eq.safeHead, "next", eq.safeAttributes)
return NotEnoughData return NotEnoughData
} }
...@@ -427,6 +430,20 @@ func (eq *EngineQueue) tryNextUnsafePayload(ctx context.Context) error { ...@@ -427,6 +430,20 @@ func (eq *EngineQueue) tryNextUnsafePayload(ctx context.Context) error {
} }
func (eq *EngineQueue) tryNextSafeAttributes(ctx context.Context) error { func (eq *EngineQueue) tryNextSafeAttributes(ctx context.Context) error {
if eq.safeAttributes == nil { // sanity check the attributes are there
return nil
}
// validate the safe attributes before processing them. The engine may have completed processing them through other means.
if eq.safeHead != eq.safeAttributesParent {
if eq.safeHead.ParentHash != eq.safeAttributesParent.Hash {
return NewResetError(fmt.Errorf("safe head changed to %s with parent %s, conflicting with queued safe attributes on top of %s",
eq.safeHead, eq.safeHead.ParentID(), eq.safeAttributesParent))
}
eq.log.Warn("queued safe attributes are stale, safe-head progressed",
"safe_head", eq.safeHead, "safe_head_parent", eq.safeHead.ParentID(), "attributes_parent", eq.safeAttributesParent)
eq.safeAttributes = nil
return nil
}
if eq.safeHead.Number < eq.unsafeHead.Number { if eq.safeHead.Number < eq.unsafeHead.Number {
return eq.consolidateNextSafeAttributes(ctx) return eq.consolidateNextSafeAttributes(ctx)
} else if eq.safeHead.Number == eq.unsafeHead.Number { } else if eq.safeHead.Number == eq.unsafeHead.Number {
...@@ -486,14 +503,15 @@ func (eq *EngineQueue) forceNextSafeAttributes(ctx context.Context) error { ...@@ -486,14 +503,15 @@ func (eq *EngineQueue) forceNextSafeAttributes(ctx context.Context) error {
_, errType, err = eq.ConfirmPayload(ctx) _, errType, err = eq.ConfirmPayload(ctx)
} }
if err != nil { if err != nil {
_ = eq.CancelPayload(ctx, true)
switch errType { switch errType {
case BlockInsertTemporaryErr: case BlockInsertTemporaryErr:
// RPC errors are recoverable, we can retry the buffered payload attributes later. // RPC errors are recoverable, we can retry the buffered payload attributes later.
return NewTemporaryError(fmt.Errorf("temporarily cannot insert new safe block: %w", err)) return NewTemporaryError(fmt.Errorf("temporarily cannot insert new safe block: %w", err))
case BlockInsertPrestateErr: case BlockInsertPrestateErr:
_ = eq.CancelPayload(ctx, true)
return NewResetError(fmt.Errorf("need reset to resolve pre-state problem: %w", err)) return NewResetError(fmt.Errorf("need reset to resolve pre-state problem: %w", err))
case BlockInsertPayloadErr: case BlockInsertPayloadErr:
_ = eq.CancelPayload(ctx, true)
eq.log.Warn("could not process payload derived from L1 data, dropping batch", "err", err) eq.log.Warn("could not process payload derived from L1 data, dropping batch", "err", err)
// Count the number of deposits to see if the tx list is deposit only. // Count the number of deposits to see if the tx list is deposit only.
depositCount := 0 depositCount := 0
......
...@@ -2,10 +2,13 @@ package derive ...@@ -2,10 +2,13 @@ package derive
import ( import (
"context" "context"
"fmt"
"io" "io"
"math/big"
"math/rand" "math/rand"
"testing" "testing"
"github.com/holiman/uint256"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
...@@ -19,6 +22,7 @@ import ( ...@@ -19,6 +22,7 @@ import (
type fakeAttributesQueue struct { type fakeAttributesQueue struct {
origin eth.L1BlockRef origin eth.L1BlockRef
attrs *eth.PayloadAttributes
} }
func (f *fakeAttributesQueue) Origin() eth.L1BlockRef { func (f *fakeAttributesQueue) Origin() eth.L1BlockRef {
...@@ -26,7 +30,10 @@ func (f *fakeAttributesQueue) Origin() eth.L1BlockRef { ...@@ -26,7 +30,10 @@ func (f *fakeAttributesQueue) Origin() eth.L1BlockRef {
} }
func (f *fakeAttributesQueue) NextAttributes(_ context.Context, _ eth.L2BlockRef) (*eth.PayloadAttributes, error) { func (f *fakeAttributesQueue) NextAttributes(_ context.Context, _ eth.L2BlockRef) (*eth.PayloadAttributes, error) {
return nil, io.EOF if f.attrs == nil {
return nil, io.EOF
}
return f.attrs, nil
} }
var _ NextAttributesProvider = (*fakeAttributesQueue)(nil) var _ NextAttributesProvider = (*fakeAttributesQueue)(nil)
...@@ -837,3 +844,166 @@ func TestVerifyNewL1Origin(t *testing.T) { ...@@ -837,3 +844,166 @@ func TestVerifyNewL1Origin(t *testing.T) {
}) })
} }
} }
func TestBlockBuildingRace(t *testing.T) {
logger := testlog.Logger(t, log.LvlInfo)
eng := &testutils.MockEngine{}
rng := rand.New(rand.NewSource(1234))
refA := testutils.RandomBlockRef(rng)
refA0 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: 0,
ParentHash: common.Hash{},
Time: refA.Time,
L1Origin: refA.ID(),
SequenceNumber: 0,
}
cfg := &rollup.Config{
Genesis: rollup.Genesis{
L1: refA.ID(),
L2: refA0.ID(),
L2Time: refA0.Time,
SystemConfig: eth.SystemConfig{
BatcherAddr: common.Address{42},
Overhead: [32]byte{123},
Scalar: [32]byte{42},
GasLimit: 20_000_000,
},
},
BlockTime: 1,
SeqWindowSize: 2,
}
refA1 := eth.L2BlockRef{
Hash: testutils.RandomHash(rng),
Number: refA0.Number + 1,
ParentHash: refA0.Hash,
Time: refA0.Time + cfg.BlockTime,
L1Origin: refA.ID(),
SequenceNumber: 1,
}
l1F := &testutils.MockL1Source{}
eng.ExpectL2BlockRefByLabel(eth.Finalized, refA0, nil)
eng.ExpectL2BlockRefByLabel(eth.Safe, refA0, nil)
eng.ExpectL2BlockRefByLabel(eth.Unsafe, refA0, nil)
l1F.ExpectL1BlockRefByNumber(refA.Number, refA, nil)
l1F.ExpectL1BlockRefByHash(refA.Hash, refA, nil)
l1F.ExpectL1BlockRefByHash(refA.Hash, refA, nil)
eng.ExpectSystemConfigByL2Hash(refA0.Hash, cfg.Genesis.SystemConfig, nil)
metrics := &testutils.TestDerivationMetrics{}
gasLimit := eth.Uint64Quantity(20_000_000)
attrs := &eth.PayloadAttributes{
Timestamp: eth.Uint64Quantity(refA1.Time),
PrevRandao: eth.Bytes32{},
SuggestedFeeRecipient: common.Address{},
Transactions: nil,
NoTxPool: false,
GasLimit: &gasLimit,
}
prev := &fakeAttributesQueue{origin: refA, attrs: attrs}
eq := NewEngineQueue(logger, cfg, eng, metrics, prev, l1F)
require.ErrorIs(t, eq.Reset(context.Background(), eth.L1BlockRef{}, eth.SystemConfig{}), io.EOF)
id := eth.PayloadID{0xff}
preFc := &eth.ForkchoiceState{
HeadBlockHash: refA0.Hash,
SafeBlockHash: refA0.Hash,
FinalizedBlockHash: refA0.Hash,
}
preFcRes := &eth.ForkchoiceUpdatedResult{
PayloadStatus: eth.PayloadStatusV1{
Status: eth.ExecutionValid,
LatestValidHash: &refA0.Hash,
ValidationError: nil,
},
PayloadID: &id,
}
// Expect initial forkchoice update
eng.ExpectForkchoiceUpdate(preFc, nil, preFcRes, nil)
require.NoError(t, eq.Step(context.Background()), "clean forkchoice state after reset")
// Expect initial building update, to process the attributes we queued up
eng.ExpectForkchoiceUpdate(preFc, attrs, preFcRes, nil)
// Don't let the payload be confirmed straight away
mockErr := fmt.Errorf("mock error")
eng.ExpectGetPayload(id, nil, mockErr)
// The job will be not be cancelled, the untyped error is a temporary error
require.ErrorIs(t, eq.Step(context.Background()), NotEnoughData, "queue up attributes")
require.ErrorIs(t, eq.Step(context.Background()), mockErr, "expecting to fail to process attributes")
require.NotNil(t, eq.safeAttributes, "still have attributes")
// Now allow the building to complete
a1InfoTx, err := L1InfoDepositBytes(refA1.SequenceNumber, &testutils.MockBlockInfo{
InfoHash: refA.Hash,
InfoParentHash: refA.ParentHash,
InfoCoinbase: common.Address{},
InfoRoot: common.Hash{},
InfoNum: refA.Number,
InfoTime: refA.Time,
InfoMixDigest: [32]byte{},
InfoBaseFee: big.NewInt(7),
InfoReceiptRoot: common.Hash{},
InfoGasUsed: 0,
}, cfg.Genesis.SystemConfig, false)
require.NoError(t, err)
payloadA1 := &eth.ExecutionPayload{
ParentHash: refA1.ParentHash,
FeeRecipient: attrs.SuggestedFeeRecipient,
StateRoot: eth.Bytes32{},
ReceiptsRoot: eth.Bytes32{},
LogsBloom: eth.Bytes256{},
PrevRandao: eth.Bytes32{},
BlockNumber: eth.Uint64Quantity(refA1.Number),
GasLimit: gasLimit,
GasUsed: 0,
Timestamp: eth.Uint64Quantity(refA1.Time),
ExtraData: nil,
BaseFeePerGas: *uint256.NewInt(7),
BlockHash: refA1.Hash,
Transactions: []eth.Data{
a1InfoTx,
},
}
eng.ExpectGetPayload(id, payloadA1, nil)
eng.ExpectNewPayload(payloadA1, &eth.PayloadStatusV1{
Status: eth.ExecutionValid,
LatestValidHash: &refA1.Hash,
ValidationError: nil,
}, nil)
postFc := &eth.ForkchoiceState{
HeadBlockHash: refA1.Hash,
SafeBlockHash: refA1.Hash,
FinalizedBlockHash: refA0.Hash,
}
postFcRes := &eth.ForkchoiceUpdatedResult{
PayloadStatus: eth.PayloadStatusV1{
Status: eth.ExecutionValid,
LatestValidHash: &refA1.Hash,
ValidationError: nil,
},
PayloadID: &id,
}
eng.ExpectForkchoiceUpdate(postFc, nil, postFcRes, nil)
// Now complete the job, as external user of the engine
_, _, err = eq.ConfirmPayload(context.Background())
require.NoError(t, err)
require.Equal(t, refA1, eq.SafeL2Head(), "safe head should have changed")
require.NoError(t, eq.Step(context.Background()))
require.Nil(t, eq.safeAttributes, "attributes should now be invalidated")
l1F.AssertExpectations(t)
eng.AssertExpectations(t)
}
...@@ -107,6 +107,12 @@ func NewDerivationPipeline(log log.Logger, cfg *rollup.Config, l1Fetcher L1Fetch ...@@ -107,6 +107,12 @@ func NewDerivationPipeline(log log.Logger, cfg *rollup.Config, l1Fetcher L1Fetch
} }
} }
// EngineReady returns true if the engine is ready to be used.
// When it's being reset its state is inconsistent, and should not be used externally.
func (dp *DerivationPipeline) EngineReady() bool {
return dp.resetting > 0
}
func (dp *DerivationPipeline) Reset() { func (dp *DerivationPipeline) Reset() {
dp.resetting = 0 dp.resetting = 0
} }
......
...@@ -56,6 +56,7 @@ type DerivationPipeline interface { ...@@ -56,6 +56,7 @@ type DerivationPipeline interface {
SafeL2Head() eth.L2BlockRef SafeL2Head() eth.L2BlockRef
UnsafeL2Head() eth.L2BlockRef UnsafeL2Head() eth.L2BlockRef
Origin() eth.L1BlockRef Origin() eth.L1BlockRef
EngineReady() bool
} }
type L1StateIface interface { type L1StateIface interface {
......
...@@ -123,6 +123,14 @@ func (d *Sequencer) CancelBuildingBlock(ctx context.Context) { ...@@ -123,6 +123,14 @@ func (d *Sequencer) CancelBuildingBlock(ctx context.Context) {
// PlanNextSequencerAction returns a desired delay till the RunNextSequencerAction call. // PlanNextSequencerAction returns a desired delay till the RunNextSequencerAction call.
func (d *Sequencer) PlanNextSequencerAction() time.Duration { func (d *Sequencer) PlanNextSequencerAction() time.Duration {
// If the engine is busy building safe blocks (and thus changing the head that we would sync on top of),
// then give it time to sync up.
if onto, _, safe := d.engine.BuildingPayload(); safe {
d.log.Warn("delaying sequencing to not interrupt safe-head changes", "onto", onto, "onto_time", onto.Time)
// approximates the worst-case time it takes to build a block, to reattempt sequencing after.
return time.Second * time.Duration(d.config.BlockTime)
}
head := d.engine.UnsafeL2Head() head := d.engine.UnsafeL2Head()
now := d.timeNow() now := d.timeNow()
...@@ -173,7 +181,7 @@ func (d *Sequencer) BuildingOnto() eth.L2BlockRef { ...@@ -173,7 +181,7 @@ func (d *Sequencer) BuildingOnto() eth.L2BlockRef {
// Only critical errors are bubbled up, other errors are handled internally. // Only critical errors are bubbled up, other errors are handled internally.
// Internally starting or sealing of a block may fail with a derivation-like error: // Internally starting or sealing of a block may fail with a derivation-like error:
// - If it is a critical error, the error is bubbled up to the caller. // - If it is a critical error, the error is bubbled up to the caller.
// - If it is a reset error, the ResettableEngineControl used to build blocks is requested to reset, and a backoff aplies. // - If it is a reset error, the ResettableEngineControl used to build blocks is requested to reset, and a backoff applies.
// No attempt is made at completing the block building. // No attempt is made at completing the block building.
// - If it is a temporary error, a backoff is applied to reattempt building later. // - If it is a temporary error, a backoff is applied to reattempt building later.
// - If it is any other error, a backoff is applied and building is cancelled. // - If it is any other error, a backoff is applied and building is cancelled.
...@@ -187,8 +195,15 @@ func (d *Sequencer) BuildingOnto() eth.L2BlockRef { ...@@ -187,8 +195,15 @@ func (d *Sequencer) BuildingOnto() eth.L2BlockRef {
// since it can consolidate previously sequenced blocks by comparing sequenced inputs with derived inputs. // since it can consolidate previously sequenced blocks by comparing sequenced inputs with derived inputs.
// If the derivation pipeline does force a conflicting block, then an ongoing sequencer task might still finish, // If the derivation pipeline does force a conflicting block, then an ongoing sequencer task might still finish,
// but the derivation can continue to reset until the chain is correct. // but the derivation can continue to reset until the chain is correct.
// If the engine is currently building safe blocks, then that building is not interrupted, and sequencing is delayed.
func (d *Sequencer) RunNextSequencerAction(ctx context.Context) (*eth.ExecutionPayload, error) { func (d *Sequencer) RunNextSequencerAction(ctx context.Context) (*eth.ExecutionPayload, error) {
if _, buildingID, _ := d.engine.BuildingPayload(); buildingID != (eth.PayloadID{}) { if onto, buildingID, safe := d.engine.BuildingPayload(); buildingID != (eth.PayloadID{}) {
if safe {
d.log.Warn("avoiding sequencing to not interrupt safe-head changes", "onto", onto, "onto_time", onto.Time)
// approximates the worst-case time it takes to build a block, to reattempt sequencing after.
d.nextAction = d.timeNow().Add(time.Second * time.Duration(d.config.BlockTime))
return nil, nil
}
payload, err := d.CompleteBuildingBlock(ctx) payload, err := d.CompleteBuildingBlock(ctx)
if err != nil { if err != nil {
if errors.Is(err, derive.ErrCritical) { if errors.Is(err, derive.ErrCritical) {
......
...@@ -209,7 +209,9 @@ func (s *Driver) eventLoop() { ...@@ -209,7 +209,9 @@ func (s *Driver) eventLoop() {
for { for {
// If we are sequencing, and the L1 state is ready, update the trigger for the next sequencer action. // If we are sequencing, and the L1 state is ready, update the trigger for the next sequencer action.
// This may adjust at any time based on fork-choice changes or previous errors. // This may adjust at any time based on fork-choice changes or previous errors.
if s.driverConfig.SequencerEnabled && !s.driverConfig.SequencerStopped && s.l1State.L1Head() != (eth.L1BlockRef{}) { // And avoid sequencing if the derivation pipeline indicates the engine is not ready.
if s.driverConfig.SequencerEnabled && !s.driverConfig.SequencerStopped &&
s.l1State.L1Head() != (eth.L1BlockRef{}) && s.derivation.EngineReady() {
// update sequencer time if the head changed // update sequencer time if the head changed
if s.sequencer.BuildingOnto().ID() != s.derivation.UnsafeL2Head().ID() { if s.sequencer.BuildingOnto().ID() != s.derivation.UnsafeL2Head().ID() {
planSequencerAction() planSequencerAction()
......
...@@ -157,7 +157,7 @@ func (cfg *Config) CheckL2ChainID(ctx context.Context, client L2Client) error { ...@@ -157,7 +157,7 @@ func (cfg *Config) CheckL2ChainID(ctx context.Context, client L2Client) error {
return err return err
} }
if cfg.L2ChainID.Cmp(id) != 0 { if cfg.L2ChainID.Cmp(id) != 0 {
return fmt.Errorf("incorrect L2 RPC chain id %d, expected %d", cfg.L2ChainID, id) return fmt.Errorf("incorrect L2 RPC chain id, expected from config %d, obtained from client %d", cfg.L2ChainID, id)
} }
return nil return nil
} }
......
...@@ -384,30 +384,31 @@ func (l *L2OutputSubmitter) loop() { ...@@ -384,30 +384,31 @@ func (l *L2OutputSubmitter) loop() {
for { for {
select { select {
case <-ticker.C: case <-ticker.C:
cCtx, cancel := context.WithTimeout(ctx, 3*time.Minute) cCtx, cancel := context.WithTimeout(ctx, 30*time.Second)
output, shouldPropose, err := l.FetchNextOutputInfo(cCtx) output, shouldPropose, err := l.FetchNextOutputInfo(cCtx)
cancel()
if err != nil { if err != nil {
l.log.Error("Failed to fetch next output", "err", err)
cancel()
break break
} }
if !shouldPropose { if !shouldPropose {
cancel()
break break
} }
cCtx, cancel = context.WithTimeout(ctx, 30*time.Second)
tx, err := l.CreateProposalTx(cCtx, output) tx, err := l.CreateProposalTx(cCtx, output)
cancel()
if err != nil { if err != nil {
l.log.Error("Failed to create proposal transaction", "err", err) l.log.Error("Failed to create proposal transaction", "err", err)
cancel()
break break
} }
cCtx, cancel = context.WithTimeout(ctx, 10*time.Minute)
if err := l.SendTransaction(cCtx, tx); err != nil { if err := l.SendTransaction(cCtx, tx); err != nil {
l.log.Error("Failed to send proposal transaction", "err", err) l.log.Error("Failed to send proposal transaction", "err", err)
cancel() cancel()
break break
} else {
cancel()
} }
cancel()
case <-l.done: case <-l.done:
return return
......
# @eth-optimism/actor-tests # @eth-optimism/actor-tests
## 0.0.23
### Patch Changes
- Updated dependencies [22c3885f5]
- Updated dependencies [66cafc00a]
- Updated dependencies [f52c07529]
- @eth-optimism/contracts-bedrock@0.13.1
- @eth-optimism/sdk@2.0.1
## 0.0.22 ## 0.0.22
### Patch Changes ### Patch Changes
......
{ {
"name": "@eth-optimism/actor-tests", "name": "@eth-optimism/actor-tests",
"version": "0.0.22", "version": "0.0.23",
"description": "A library and suite of tests to stress test Optimism Bedrock.", "description": "A library and suite of tests to stress test Optimism Bedrock.",
"license": "MIT", "license": "MIT",
"author": "", "author": "",
...@@ -18,9 +18,9 @@ ...@@ -18,9 +18,9 @@
"test:coverage": "yarn test" "test:coverage": "yarn test"
}, },
"dependencies": { "dependencies": {
"@eth-optimism/contracts-bedrock": "0.13.0", "@eth-optimism/contracts-bedrock": "0.13.1",
"@eth-optimism/core-utils": "^0.12.0", "@eth-optimism/core-utils": "^0.12.0",
"@eth-optimism/sdk": "^2.0.0", "@eth-optimism/sdk": "^2.0.1",
"@types/chai": "^4.2.18", "@types/chai": "^4.2.18",
"@types/chai-as-promised": "^7.1.4", "@types/chai-as-promised": "^7.1.4",
"async-mutex": "^0.3.2", "async-mutex": "^0.3.2",
......
# @eth-optimism/atst # @eth-optimism/atst
## 0.2.0
### Minor Changes
- dcd13eec1: Update readAttestations and prepareWriteAttestation to handle keys longer than 32 bytes
- 9fd5be8e2: Remove broken allowFailures as option
- 3f4a43542: Move react api to @eth-optimism/atst/react so react isn't required to run the core sdk
- 71727eae9: Fix main and module in atst package.json
- 3d5f26c49: Deprecate parseAttestationBytes and createRawKey in favor for createKey, createValue
### Patch Changes
- 68bbe48b6: Update docs
- 6fea2f2db: Fixed bug with atst not defaulting to currently connected chain
## 0.1.0 ## 0.1.0
### Minor Changes ### Minor Changes
......
{ {
"name": "@eth-optimism/atst", "name": "@eth-optimism/atst",
"version": "0.1.0", "version": "0.2.0",
"type": "module", "type": "module",
"main": "dist/index.cjs", "main": "dist/index.cjs",
"types": "src/index.ts", "types": "src/index.ts",
......
# @eth-optimism/drippie-mon # @eth-optimism/drippie-mon
## 0.2.1
### Patch Changes
- Updated dependencies [fecd42d67]
- Updated dependencies [66cafc00a]
- @eth-optimism/common-ts@0.8.1
- @eth-optimism/sdk@2.0.1
## 0.2.0 ## 0.2.0
### Minor Changes ### Minor Changes
......
{ {
"private": true, "private": true,
"name": "@eth-optimism/chain-mon", "name": "@eth-optimism/chain-mon",
"version": "0.2.0", "version": "0.2.1",
"description": "[Optimism] Chain monitoring services", "description": "[Optimism] Chain monitoring services",
"main": "dist/index", "main": "dist/index",
"types": "dist/index", "types": "dist/index",
...@@ -32,10 +32,10 @@ ...@@ -32,10 +32,10 @@
"url": "https://github.com/ethereum-optimism/optimism.git" "url": "https://github.com/ethereum-optimism/optimism.git"
}, },
"dependencies": { "dependencies": {
"@eth-optimism/common-ts": "0.8.0", "@eth-optimism/common-ts": "0.8.1",
"@eth-optimism/contracts-periphery": "1.0.7", "@eth-optimism/contracts-periphery": "1.0.7",
"@eth-optimism/core-utils": "0.12.0", "@eth-optimism/core-utils": "0.12.0",
"@eth-optimism/sdk": "2.0.0", "@eth-optimism/sdk": "2.0.1",
"ethers": "^5.7.0", "ethers": "^5.7.0",
"@types/dateformat": "^5.0.0", "@types/dateformat": "^5.0.0",
"chai-as-promised": "^7.1.1", "chai-as-promised": "^7.1.1",
......
# @eth-optimism/common-ts # @eth-optimism/common-ts
## 0.8.1
### Patch Changes
- fecd42d67: Fix BaseServiceV2 configuration for caseCase options
## 0.8.0 ## 0.8.0
### Minor Changes ### Minor Changes
......
{ {
"name": "@eth-optimism/common-ts", "name": "@eth-optimism/common-ts",
"version": "0.8.0", "version": "0.8.1",
"description": "[Optimism] Advanced typescript tooling used by various services", "description": "[Optimism] Advanced typescript tooling used by various services",
"main": "dist/index", "main": "dist/index",
"types": "dist/index", "types": "dist/index",
......
# @eth-optimism/contracts-bedrock # @eth-optimism/contracts-bedrock
## 0.13.1
### Patch Changes
- 22c3885f5: Optionally print cast commands during migration
- f52c07529: Print tenderly simulation links during deployment
## 0.13.0 ## 0.13.0
### Minor Changes ### Minor Changes
......
...@@ -23,7 +23,7 @@ const config: HardhatUserConfig = { ...@@ -23,7 +23,7 @@ const config: HardhatUserConfig = {
live: false, live: false,
}, },
mainnet: { mainnet: {
url: process.env.RPC_URL || 'http://localhost:8545', url: process.env.L1_RPC || 'http://localhost:8545',
}, },
devnetL1: { devnetL1: {
live: false, live: false,
......
{ {
"name": "@eth-optimism/contracts-bedrock", "name": "@eth-optimism/contracts-bedrock",
"version": "0.13.0", "version": "0.13.1",
"description": "Contracts for Optimism Specs", "description": "Contracts for Optimism Specs",
"main": "dist/index", "main": "dist/index",
"types": "dist/index", "types": "dist/index",
......
...@@ -27,7 +27,7 @@ func checkOk(ok bool) { ...@@ -27,7 +27,7 @@ func checkOk(ok bool) {
// Shorthand to ease go's god awful error handling // Shorthand to ease go's god awful error handling
func checkErr(err error, failReason string) { func checkErr(err error, failReason string) {
if err != nil { if err != nil {
panic(fmt.Errorf("%s: %s", failReason, err)) panic(fmt.Errorf("%s: %w", failReason, err))
} }
} }
......
...@@ -53,7 +53,7 @@ ...@@ -53,7 +53,7 @@
"url": "https://github.com/ethereum-optimism/optimism.git" "url": "https://github.com/ethereum-optimism/optimism.git"
}, },
"devDependencies": { "devDependencies": {
"@eth-optimism/contracts-bedrock": "0.13.0", "@eth-optimism/contracts-bedrock": "0.13.1",
"@eth-optimism/core-utils": "^0.12.0", "@eth-optimism/core-utils": "^0.12.0",
"@eth-optimism/hardhat-deploy-config": "^0.2.5", "@eth-optimism/hardhat-deploy-config": "^0.2.5",
"@ethersproject/hardware-wallets": "^5.7.0", "@ethersproject/hardware-wallets": "^5.7.0",
......
# data transport layer # data transport layer
## 0.5.54
### Patch Changes
- Updated dependencies [fecd42d67]
- @eth-optimism/common-ts@0.8.1
## 0.5.53 ## 0.5.53
### Patch Changes ### Patch Changes
......
{ {
"private": true, "private": true,
"name": "@eth-optimism/data-transport-layer", "name": "@eth-optimism/data-transport-layer",
"version": "0.5.53", "version": "0.5.54",
"description": "[Optimism] Service for shuttling data from L1 into L2", "description": "[Optimism] Service for shuttling data from L1 into L2",
"main": "dist/index", "main": "dist/index",
"types": "dist/index", "types": "dist/index",
...@@ -36,7 +36,7 @@ ...@@ -36,7 +36,7 @@
"url": "https://github.com/ethereum-optimism/optimism.git" "url": "https://github.com/ethereum-optimism/optimism.git"
}, },
"dependencies": { "dependencies": {
"@eth-optimism/common-ts": "0.8.0", "@eth-optimism/common-ts": "0.8.1",
"@eth-optimism/contracts": "0.5.40", "@eth-optimism/contracts": "0.5.40",
"@eth-optimism/core-utils": "0.12.0", "@eth-optimism/core-utils": "0.12.0",
"@ethersproject/providers": "^5.7.0", "@ethersproject/providers": "^5.7.0",
......
# @eth-optimism/fault-detector # @eth-optimism/fault-detector
## 0.6.2
### Patch Changes
- f9b579d55: Fixes a bug that would cause the fault detector to error out if no outputs had been proposed yet.
- Updated dependencies [fecd42d67]
- Updated dependencies [66cafc00a]
- @eth-optimism/common-ts@0.8.1
- @eth-optimism/sdk@2.0.1
## 0.6.1 ## 0.6.1
### Patch Changes ### Patch Changes
......
{ {
"private": true, "private": true,
"name": "@eth-optimism/fault-detector", "name": "@eth-optimism/fault-detector",
"version": "0.6.1", "version": "0.6.2",
"description": "[Optimism] Service for detecting faulty L2 output proposals", "description": "[Optimism] Service for detecting faulty L2 output proposals",
"main": "dist/index", "main": "dist/index",
"types": "dist/index", "types": "dist/index",
...@@ -47,10 +47,10 @@ ...@@ -47,10 +47,10 @@
"ts-node": "^10.9.1" "ts-node": "^10.9.1"
}, },
"dependencies": { "dependencies": {
"@eth-optimism/common-ts": "^0.8.0", "@eth-optimism/common-ts": "^0.8.1",
"@eth-optimism/contracts": "^0.5.40", "@eth-optimism/contracts": "^0.5.40",
"@eth-optimism/core-utils": "^0.12.0", "@eth-optimism/core-utils": "^0.12.0",
"@eth-optimism/sdk": "^2.0.0", "@eth-optimism/sdk": "^2.0.1",
"@ethersproject/abstract-provider": "^5.7.0" "@ethersproject/abstract-provider": "^5.7.0"
} }
} }
# @eth-optimism/message-relayer # @eth-optimism/message-relayer
## 0.5.32
### Patch Changes
- Updated dependencies [fecd42d67]
- Updated dependencies [66cafc00a]
- @eth-optimism/common-ts@0.8.1
- @eth-optimism/sdk@2.0.1
## 0.5.31 ## 0.5.31
### Patch Changes ### Patch Changes
......
{ {
"private": true, "private": true,
"name": "@eth-optimism/message-relayer", "name": "@eth-optimism/message-relayer",
"version": "0.5.31", "version": "0.5.32",
"description": "[Optimism] Service for automatically relaying L2 to L1 transactions", "description": "[Optimism] Service for automatically relaying L2 to L1 transactions",
"main": "dist/index", "main": "dist/index",
"types": "dist/index", "types": "dist/index",
...@@ -31,9 +31,9 @@ ...@@ -31,9 +31,9 @@
"url": "https://github.com/ethereum-optimism/optimism.git" "url": "https://github.com/ethereum-optimism/optimism.git"
}, },
"dependencies": { "dependencies": {
"@eth-optimism/common-ts": "0.8.0", "@eth-optimism/common-ts": "0.8.1",
"@eth-optimism/core-utils": "0.12.0", "@eth-optimism/core-utils": "0.12.0",
"@eth-optimism/sdk": "2.0.0", "@eth-optimism/sdk": "2.0.1",
"ethers": "^5.7.0" "ethers": "^5.7.0"
}, },
"devDependencies": { "devDependencies": {
......
# @eth-optimism/replica-healthcheck # @eth-optimism/replica-healthcheck
## 1.2.3
### Patch Changes
- Updated dependencies [fecd42d67]
- @eth-optimism/common-ts@0.8.1
## 1.2.2 ## 1.2.2
### Patch Changes ### Patch Changes
......
{ {
"private": true, "private": true,
"name": "@eth-optimism/replica-healthcheck", "name": "@eth-optimism/replica-healthcheck",
"version": "1.2.2", "version": "1.2.3",
"description": "[Optimism] Service for monitoring the health of replica nodes", "description": "[Optimism] Service for monitoring the health of replica nodes",
"main": "dist/index", "main": "dist/index",
"types": "dist/index", "types": "dist/index",
...@@ -32,7 +32,7 @@ ...@@ -32,7 +32,7 @@
"url": "https://github.com/ethereum-optimism/optimism.git" "url": "https://github.com/ethereum-optimism/optimism.git"
}, },
"dependencies": { "dependencies": {
"@eth-optimism/common-ts": "0.8.0", "@eth-optimism/common-ts": "0.8.1",
"@eth-optimism/core-utils": "0.12.0", "@eth-optimism/core-utils": "0.12.0",
"@ethersproject/abstract-provider": "^5.7.0" "@ethersproject/abstract-provider": "^5.7.0"
}, },
......
# @eth-optimism/sdk # @eth-optimism/sdk
## 2.0.1
### Patch Changes
- 66cafc00a: Update migrated withdrawal gaslimit calculation
- Updated dependencies [22c3885f5]
- Updated dependencies [f52c07529]
- @eth-optimism/contracts-bedrock@0.13.1
## 2.0.0 ## 2.0.0
### Major Changes ### Major Changes
......
{ {
"name": "@eth-optimism/sdk", "name": "@eth-optimism/sdk",
"version": "2.0.0", "version": "2.0.1",
"description": "[Optimism] Tools for working with Optimism", "description": "[Optimism] Tools for working with Optimism",
"main": "dist/index", "main": "dist/index",
"types": "dist/index", "types": "dist/index",
...@@ -50,7 +50,7 @@ ...@@ -50,7 +50,7 @@
"dependencies": { "dependencies": {
"@eth-optimism/contracts": "0.5.40", "@eth-optimism/contracts": "0.5.40",
"@eth-optimism/core-utils": "0.12.0", "@eth-optimism/core-utils": "0.12.0",
"@eth-optimism/contracts-bedrock": "0.13.0", "@eth-optimism/contracts-bedrock": "0.13.1",
"lodash": "^4.17.21", "lodash": "^4.17.21",
"merkletreejs": "^0.2.27", "merkletreejs": "^0.2.27",
"rlp": "^2.2.7" "rlp": "^2.2.7"
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment