Commit f0618203 authored by Murphy Law's avatar Murphy Law Committed by GitHub

Merge branch 'develop' into feat/publish-tx

parents f8348862 4bfddfbb
---
'@eth-optimism/teleportr': patch
---
Add SuggestGasTipCap fallback
---
'@eth-optimism/batch-submitter-service': patch
'@eth-optimism/gas-oracle': patch
'@eth-optimism/integration-tests': patch
'@eth-optimism/l2geth': patch
'@eth-optimism/hardhat-node': patch
'@eth-optimism/contracts': patch
'@eth-optimism/data-transport-layer': patch
'@eth-optimism/message-relayer': patch
---
Refactored Dockerfiles
---
'@eth-optimism/indexer': patch
---
Indexer: initial release
---
'@eth-optimism/batch-submitter-service': patch
'@eth-optimism/teleportr': patch
---
Count reverted transactions in failed_submissions
---
'@eth-optimism/batch-submitter-service': patch
---
Enforce min/max tx size on plaintext batch encoding
name: indexer unit tests
on:
push:
paths:
- 'go/indexer/**'
branches:
- 'master'
- 'develop'
- '*rc'
- 'release/*'
pull_request:
branches:
- '*'
workflow_dispatch:
defaults:
run:
working-directory: './go/indexer'
jobs:
tests:
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.16.x
- name: Checkout code
uses: actions/checkout@v2
- name: Install
run: make
- name: Test
run: make test
...@@ -74,7 +74,7 @@ Our update process takes the form of a PR merging the `develop` branch into the ...@@ -74,7 +74,7 @@ Our update process takes the form of a PR merging the `develop` branch into the
### The `develop` branch ### The `develop` branch
Our primary development branch is [`develop`](https://github.com/ethereum-optimism/optimism/tree/develop/). Our primary development branch is [`develop`](https://github.com/ethereum-optimism/optimism/tree/develop/).
`develop` contains the most up-to-date software that remains backwards compatible with our latest experimental [network deployments](https://community.optimism.io/docs/developers/networks.html). `develop` contains the most up-to-date software that remains backwards compatible with our latest experimental [network deployments](https://community.optimism.io/docs/useful-tools/networks/).
If you're making a backwards compatible change, please direct your pull request towards `develop`. If you're making a backwards compatible change, please direct your pull request towards `develop`.
**Changes to contracts within `packages/contracts/contracts` are usually NOT considered backwards compatible and SHOULD be made against a release candidate branch**. **Changes to contracts within `packages/contracts/contracts` are usually NOT considered backwards compatible and SHOULD be made against a release candidate branch**.
......
...@@ -78,7 +78,6 @@ func GenSequencerBatchParams( ...@@ -78,7 +78,6 @@ func GenSequencerBatchParams(
shouldStartAtElement uint64, shouldStartAtElement uint64,
blockOffset uint64, blockOffset uint64,
batch []BatchElement, batch []BatchElement,
batchType BatchType,
) (*AppendSequencerBatchParams, error) { ) (*AppendSequencerBatchParams, error) {
var ( var (
...@@ -189,6 +188,5 @@ func GenSequencerBatchParams( ...@@ -189,6 +188,5 @@ func GenSequencerBatchParams(
TotalElementsToAppend: uint64(len(batch)), TotalElementsToAppend: uint64(len(batch)),
Contexts: contexts, Contexts: contexts,
Txs: txs, Txs: txs,
Type: batchType,
}, nil }, nil
} }
...@@ -199,39 +199,57 @@ func (d *Driver) CraftBatchTx( ...@@ -199,39 +199,57 @@ func (d *Driver) CraftBatchTx(
var pruneCount int var pruneCount int
for { for {
batchParams, err := GenSequencerBatchParams( batchParams, err := GenSequencerBatchParams(
shouldStartAt, d.cfg.BlockOffset, batchElements, d.cfg.BatchType, shouldStartAt, d.cfg.BlockOffset, batchElements,
) )
if err != nil { if err != nil {
return nil, err return nil, err
} }
batchArguments, err := batchParams.Serialize() // Use plaintext encoding to enforce size constraints.
plaintextBatchArguments, err := batchParams.Serialize(BatchTypeLegacy)
if err != nil { if err != nil {
return nil, err return nil, err
} }
appendSequencerBatchID := d.ctcABI.Methods[appendSequencerBatchMethodName].ID appendSequencerBatchID := d.ctcABI.Methods[appendSequencerBatchMethodName].ID
batchCallData := append(appendSequencerBatchID, batchArguments...) plaintextCalldata := append(appendSequencerBatchID, plaintextBatchArguments...)
// Continue pruning until calldata size is less than configured max. // Continue pruning until plaintext calldata size is less than
calldataSize := uint64(len(batchCallData)) // configured max.
if calldataSize > d.cfg.MaxTxSize { plaintextCalldataSize := uint64(len(plaintextCalldata))
if plaintextCalldataSize > d.cfg.MaxTxSize {
oldLen := len(batchElements) oldLen := len(batchElements)
newBatchElementsLen := (oldLen * 9) / 10 newBatchElementsLen := (oldLen * 9) / 10
batchElements = batchElements[:newBatchElementsLen] batchElements = batchElements[:newBatchElementsLen]
log.Info(name+" pruned batch", "old_num_txs", oldLen, "new_num_txs", newBatchElementsLen) log.Info(name+" pruned batch",
"plaintext_size", plaintextCalldataSize,
"max_tx_size", d.cfg.MaxTxSize,
"old_num_txs", oldLen,
"new_num_txs", newBatchElementsLen)
pruneCount++ pruneCount++
continue continue
} else if calldataSize < d.cfg.MinTxSize { } else if plaintextCalldataSize < d.cfg.MinTxSize {
log.Info(name+" batch tx size below minimum", log.Info(name+" batch tx size below minimum",
"size", calldataSize, "min_tx_size", d.cfg.MinTxSize) "plaintext_size", plaintextCalldataSize,
"min_tx_size", d.cfg.MinTxSize,
"num_txs", len(batchElements))
return nil, nil return nil, nil
} }
d.metrics.NumElementsPerBatch().Observe(float64(len(batchElements))) d.metrics.NumElementsPerBatch().Observe(float64(len(batchElements)))
d.metrics.BatchPruneCount.Set(float64(pruneCount)) d.metrics.BatchPruneCount.Set(float64(pruneCount))
log.Info(name+" batch constructed", "num_txs", len(batchElements), "length", len(batchCallData)) // Finally, encode the batch using the configured batch type.
var calldata = plaintextCalldata
if d.cfg.BatchType != BatchTypeLegacy {
batchArguments, err := batchParams.Serialize(d.cfg.BatchType)
if err != nil {
return nil, err
}
calldata = append(appendSequencerBatchID, batchArguments...)
}
log.Info(name+" batch constructed", "num_txs", len(batchElements), "length", len(calldata))
opts, err := bind.NewKeyedTransactorWithChainID( opts, err := bind.NewKeyedTransactorWithChainID(
d.cfg.PrivKey, d.cfg.ChainID, d.cfg.PrivKey, d.cfg.ChainID,
...@@ -243,7 +261,7 @@ func (d *Driver) CraftBatchTx( ...@@ -243,7 +261,7 @@ func (d *Driver) CraftBatchTx(
opts.Nonce = nonce opts.Nonce = nonce
opts.NoSend = true opts.NoSend = true
tx, err := d.rawCtcContract.RawTransact(opts, batchCallData) tx, err := d.rawCtcContract.RawTransact(opts, calldata)
switch { switch {
case err == nil: case err == nil:
return tx, nil return tx, nil
...@@ -258,7 +276,7 @@ func (d *Driver) CraftBatchTx( ...@@ -258,7 +276,7 @@ func (d *Driver) CraftBatchTx(
log.Warn(d.cfg.Name + " eth_maxPriorityFeePerGas is unsupported " + log.Warn(d.cfg.Name + " eth_maxPriorityFeePerGas is unsupported " +
"by current backend, using fallback gasTipCap") "by current backend, using fallback gasTipCap")
opts.GasTipCap = drivers.FallbackGasTipCap opts.GasTipCap = drivers.FallbackGasTipCap
return d.rawCtcContract.RawTransact(opts, batchCallData) return d.rawCtcContract.RawTransact(opts, calldata)
default: default:
return nil, err return nil, err
......
...@@ -47,6 +47,25 @@ type BatchContext struct { ...@@ -47,6 +47,25 @@ type BatchContext struct {
BlockNumber uint64 `json:"block_number"` BlockNumber uint64 `json:"block_number"`
} }
// IsMarkerContext returns true if the BatchContext is a marker context used to
// specify the encoding format. This is only valid if called on the first
// BatchContext in the calldata.
func (c BatchContext) IsMarkerContext() bool {
return c.Timestamp == 0
}
// MarkerBatchType returns the BatchType specified by a marker BatchContext.
// The return value is only valid if called on the first BatchContext in the
// calldata and IsMarkerContext returns true.
func (c BatchContext) MarkerBatchType() BatchType {
switch c.BlockNumber {
case 0:
return BatchTypeZlib
default:
return BatchTypeLegacy
}
}
// Write encodes the BatchContext into a 16-byte stream using the following // Write encodes the BatchContext into a 16-byte stream using the following
// encoding: // encoding:
// - num_sequenced_txs: 3 bytes // - num_sequenced_txs: 3 bytes
...@@ -83,13 +102,34 @@ func (c *BatchContext) Read(r io.Reader) error { ...@@ -83,13 +102,34 @@ func (c *BatchContext) Read(r io.Reader) error {
return readUint64(r, &c.BlockNumber, 5) return readUint64(r, &c.BlockNumber, 5)
} }
// BatchType represents the type of batch being // BatchType represents the type of batch being submitted. When the first
// submitted. When the first context in the batch // context in the batch has a timestamp of 0, the blocknumber is interpreted as
// has a timestamp of 0, the blocknumber is interpreted // an enum that represets the type.
// as an enum that represets the type
type BatchType int8 type BatchType int8
// Implements the Stringer interface for BatchType const (
// BatchTypeLegacy represets the legacy batch type.
BatchTypeLegacy BatchType = -1
// BatchTypeZlib represents a batch type where the transaction data is
// compressed using zlib.
BatchTypeZlib BatchType = 0
)
// BatchTypeFromString returns the BatchType enum based on a human readable
// string.
func BatchTypeFromString(s string) BatchType {
switch s {
case "zlib", "ZLIB":
return BatchTypeZlib
case "legacy", "LEGACY":
return BatchTypeLegacy
default:
return BatchTypeLegacy
}
}
// String implements the Stringer interface for BatchType.
func (b BatchType) String() string { func (b BatchType) String() string {
switch b { switch b {
case BatchTypeLegacy: case BatchTypeLegacy:
...@@ -101,27 +141,26 @@ func (b BatchType) String() string { ...@@ -101,27 +141,26 @@ func (b BatchType) String() string {
} }
} }
// BatchTypeFromString returns the BatchType // MarkerContext returns the marker context, if any, for the given batch type.
// enum based on a human readable string func (b BatchType) MarkerContext() *BatchContext {
func BatchTypeFromString(s string) BatchType { switch b {
switch s {
case "zlib", "ZLIB": // No marker context for legacy encoding.
return BatchTypeZlib case BatchTypeLegacy:
case "legacy", "LEGACY": return nil
return BatchTypeLegacy
// Zlib marker context sets block number equal to zero.
case BatchTypeZlib:
return &BatchContext{
Timestamp: 0,
BlockNumber: 0,
}
default: default:
return BatchTypeLegacy return nil
} }
} }
const (
// BatchTypeLegacy represets the legacy batch type
BatchTypeLegacy BatchType = -1
// BatchTypeZlib represents a batch type where the
// transaction data is compressed using zlib
BatchTypeZlib BatchType = 0
)
// AppendSequencerBatchParams holds the raw data required to submit a batch of // AppendSequencerBatchParams holds the raw data required to submit a batch of
// L2 txs to L1 CTC contract. Rather than encoding the objects using the // L2 txs to L1 CTC contract. Rather than encoding the objects using the
// standard ABI encoding, a custom encoding is and provided in the call data to // standard ABI encoding, a custom encoding is and provided in the call data to
...@@ -146,9 +185,6 @@ type AppendSequencerBatchParams struct { ...@@ -146,9 +185,6 @@ type AppendSequencerBatchParams struct {
// Txs contains all sequencer txs that will be recorded in the L1 CTC // Txs contains all sequencer txs that will be recorded in the L1 CTC
// contract. // contract.
Txs []*CachedTx Txs []*CachedTx
// The type of the batch
Type BatchType
} }
// Write encodes the AppendSequencerBatchParams using the following format: // Write encodes the AppendSequencerBatchParams using the following format:
...@@ -173,7 +209,11 @@ type AppendSequencerBatchParams struct { ...@@ -173,7 +209,11 @@ type AppendSequencerBatchParams struct {
// //
// Note that writing to a bytes.Buffer cannot // Note that writing to a bytes.Buffer cannot
// error, so errors are ignored here // error, so errors are ignored here
func (p *AppendSequencerBatchParams) Write(w *bytes.Buffer) error { func (p *AppendSequencerBatchParams) Write(
w *bytes.Buffer,
batchType BatchType,
) error {
_ = writeUint64(w, p.ShouldStartAtElement, 5) _ = writeUint64(w, p.ShouldStartAtElement, 5)
_ = writeUint64(w, p.TotalElementsToAppend, 3) _ = writeUint64(w, p.TotalElementsToAppend, 3)
...@@ -190,10 +230,10 @@ func (p *AppendSequencerBatchParams) Write(w *bytes.Buffer) error { ...@@ -190,10 +230,10 @@ func (p *AppendSequencerBatchParams) Write(w *bytes.Buffer) error {
// copy the contexts as to not malleate the struct // copy the contexts as to not malleate the struct
// when it is a typed batch // when it is a typed batch
contexts := make([]BatchContext, 0, len(p.Contexts)+1) contexts := make([]BatchContext, 0, len(p.Contexts)+1)
if p.Type == BatchTypeZlib { // Add the marker context, if any, for non-legacy encodings.
// All zero values for the single batch context markerContext := batchType.MarkerContext()
// is desired here as blocknumber 0 means it is a zlib batch if markerContext != nil {
contexts = append(contexts, BatchContext{}) contexts = append(contexts, *markerContext)
} }
contexts = append(contexts, p.Contexts...) contexts = append(contexts, p.Contexts...)
...@@ -203,7 +243,7 @@ func (p *AppendSequencerBatchParams) Write(w *bytes.Buffer) error { ...@@ -203,7 +243,7 @@ func (p *AppendSequencerBatchParams) Write(w *bytes.Buffer) error {
context.Write(w) context.Write(w)
} }
switch p.Type { switch batchType {
case BatchTypeLegacy: case BatchTypeLegacy:
// Write each length-prefixed tx. // Write each length-prefixed tx.
for _, tx := range p.Txs { for _, tx := range p.Txs {
...@@ -225,7 +265,7 @@ func (p *AppendSequencerBatchParams) Write(w *bytes.Buffer) error { ...@@ -225,7 +265,7 @@ func (p *AppendSequencerBatchParams) Write(w *bytes.Buffer) error {
} }
default: default:
return fmt.Errorf("Unknown batch type: %s", p.Type) return fmt.Errorf("Unknown batch type: %s", batchType)
} }
return nil return nil
...@@ -233,9 +273,12 @@ func (p *AppendSequencerBatchParams) Write(w *bytes.Buffer) error { ...@@ -233,9 +273,12 @@ func (p *AppendSequencerBatchParams) Write(w *bytes.Buffer) error {
// Serialize performs the same encoding as Write, but returns the resulting // Serialize performs the same encoding as Write, but returns the resulting
// bytes slice. // bytes slice.
func (p *AppendSequencerBatchParams) Serialize() ([]byte, error) { func (p *AppendSequencerBatchParams) Serialize(
batchType BatchType,
) ([]byte, error) {
var buf bytes.Buffer var buf bytes.Buffer
if err := p.Write(&buf); err != nil { if err := p.Write(&buf, batchType); err != nil {
return nil, err return nil, err
} }
return buf.Bytes(), nil return buf.Bytes(), nil
...@@ -266,6 +309,9 @@ func (p *AppendSequencerBatchParams) Read(r io.Reader) error { ...@@ -266,6 +309,9 @@ func (p *AppendSequencerBatchParams) Read(r io.Reader) error {
return err return err
} }
// Assume that it is a legacy batch at first, this will be overwrritten if
// we detect a marker context.
var batchType = BatchTypeLegacy
// Ensure that contexts is never nil // Ensure that contexts is never nil
p.Contexts = make([]BatchContext, 0) p.Contexts = make([]BatchContext, 0)
for i := uint64(0); i < numContexts; i++ { for i := uint64(0); i < numContexts; i++ {
...@@ -274,31 +320,34 @@ func (p *AppendSequencerBatchParams) Read(r io.Reader) error { ...@@ -274,31 +320,34 @@ func (p *AppendSequencerBatchParams) Read(r io.Reader) error {
return err return err
} }
if i == 0 && batchContext.IsMarkerContext() {
batchType = batchContext.MarkerBatchType()
continue
}
p.Contexts = append(p.Contexts, batchContext) p.Contexts = append(p.Contexts, batchContext)
} }
// Assume that it is a legacy batch at first // Define a closure to clean up the reader used by the specified encoding.
p.Type = BatchTypeLegacy var closeReader func() error
switch batchType {
// Handle backwards compatible batch types // The legacy serialization does not require clsing, so we instatiate a
if len(p.Contexts) > 0 && p.Contexts[0].Timestamp == 0 { // dummy closure.
switch p.Contexts[0].BlockNumber { case BatchTypeLegacy:
case 0: closeReader = func() error { return nil }
// zlib compressed transaction data
p.Type = BatchTypeZlib
// remove the first dummy context
p.Contexts = p.Contexts[1:]
numContexts--
// The zlib serialization requires decompression before reading the
// plaintext bytes, and also requires proper cleanup.
case BatchTypeZlib:
zr, err := zlib.NewReader(r) zr, err := zlib.NewReader(r)
if err != nil { if err != nil {
return err return err
} }
defer zr.Close() closeReader = zr.Close
r = bufio.NewReader(zr) r = bufio.NewReader(zr)
} }
}
// Deserialize any transactions. Since the number of txs is ommitted // Deserialize any transactions. Since the number of txs is ommitted
// from the encoding, loop until the stream is consumed. // from the encoding, loop until the stream is consumed.
...@@ -315,7 +364,7 @@ func (p *AppendSequencerBatchParams) Read(r io.Reader) error { ...@@ -315,7 +364,7 @@ func (p *AppendSequencerBatchParams) Read(r io.Reader) error {
if len(p.Txs) == 0 && len(p.Contexts) != 0 { if len(p.Txs) == 0 && len(p.Contexts) != 0 {
return ErrMalformedBatch return ErrMalformedBatch
} }
return nil return closeReader()
} else if err != nil { } else if err != nil {
return err return err
} }
...@@ -327,7 +376,6 @@ func (p *AppendSequencerBatchParams) Read(r io.Reader) error { ...@@ -327,7 +376,6 @@ func (p *AppendSequencerBatchParams) Read(r io.Reader) error {
p.Txs = append(p.Txs, NewCachedTx(tx)) p.Txs = append(p.Txs, NewCachedTx(tx))
} }
} }
// writeUint64 writes a the bottom `n` bytes of `val` to `w`. // writeUint64 writes a the bottom `n` bytes of `val` to `w`.
......
...@@ -119,7 +119,6 @@ func testAppendSequencerBatchParamsEncodeDecode( ...@@ -119,7 +119,6 @@ func testAppendSequencerBatchParamsEncodeDecode(
TotalElementsToAppend: test.TotalElementsToAppend, TotalElementsToAppend: test.TotalElementsToAppend,
Contexts: test.Contexts, Contexts: test.Contexts,
Txs: nil, Txs: nil,
Type: sequencer.BatchTypeLegacy,
} }
// Decode the batch from the test string. // Decode the batch from the test string.
...@@ -133,7 +132,6 @@ func testAppendSequencerBatchParamsEncodeDecode( ...@@ -133,7 +132,6 @@ func testAppendSequencerBatchParamsEncodeDecode(
} else { } else {
require.Nil(t, err) require.Nil(t, err)
} }
require.Equal(t, params.Type, sequencer.BatchTypeLegacy)
// Assert that the decoded params match the expected params. The // Assert that the decoded params match the expected params. The
// transactions are compared serparetly (via hash), since the internal // transactions are compared serparetly (via hash), since the internal
...@@ -149,7 +147,7 @@ func testAppendSequencerBatchParamsEncodeDecode( ...@@ -149,7 +147,7 @@ func testAppendSequencerBatchParamsEncodeDecode(
// Finally, encode the decoded object and assert it matches the original // Finally, encode the decoded object and assert it matches the original
// hex string. // hex string.
paramsBytes, err := params.Serialize() paramsBytes, err := params.Serialize(sequencer.BatchTypeLegacy)
// Return early when testing error cases, no need to reserialize again // Return early when testing error cases, no need to reserialize again
if test.Error { if test.Error {
...@@ -161,17 +159,14 @@ func testAppendSequencerBatchParamsEncodeDecode( ...@@ -161,17 +159,14 @@ func testAppendSequencerBatchParamsEncodeDecode(
require.Equal(t, test.HexEncoding, hex.EncodeToString(paramsBytes)) require.Equal(t, test.HexEncoding, hex.EncodeToString(paramsBytes))
// Serialize the batches in compressed form // Serialize the batches in compressed form
params.Type = sequencer.BatchTypeZlib compressedParamsBytes, err := params.Serialize(sequencer.BatchTypeZlib)
compressedParamsBytes, err := params.Serialize()
require.Nil(t, err) require.Nil(t, err)
// Deserialize the compressed batch // Deserialize the compressed batch
var paramsCompressed sequencer.AppendSequencerBatchParams var paramsCompressed sequencer.AppendSequencerBatchParams
err = paramsCompressed.Read(bytes.NewReader(compressedParamsBytes)) err = paramsCompressed.Read(bytes.NewReader(compressedParamsBytes))
require.Nil(t, err) require.Nil(t, err)
require.Equal(t, paramsCompressed.Type, sequencer.BatchTypeZlib)
expParams.Type = sequencer.BatchTypeZlib
decompressedTxs := paramsCompressed.Txs decompressedTxs := paramsCompressed.Txs
paramsCompressed.Txs = nil paramsCompressed.Txs = nil
...@@ -189,3 +184,71 @@ func compareTxs(t *testing.T, a []*l2types.Transaction, b []*sequencer.CachedTx) ...@@ -189,3 +184,71 @@ func compareTxs(t *testing.T, a []*l2types.Transaction, b []*sequencer.CachedTx)
require.Equal(t, txA.Hash(), b[i].Tx().Hash()) require.Equal(t, txA.Hash(), b[i].Tx().Hash())
} }
} }
// TestMarkerContext asserts that each batch type returns the correct marker
// context.
func TestMarkerContext(t *testing.T) {
batchTypes := []sequencer.BatchType{
sequencer.BatchTypeLegacy,
sequencer.BatchTypeZlib,
}
for _, batchType := range batchTypes {
t.Run(batchType.String(), func(t *testing.T) {
markerContext := batchType.MarkerContext()
if batchType == sequencer.BatchTypeLegacy {
require.Nil(t, markerContext)
} else {
require.NotNil(t, markerContext)
// All marker contexts MUST have a zero timestamp.
require.Equal(t, uint64(0), markerContext.Timestamp)
// Currently all other fields besides block number are defined
// as zero.
require.Equal(t, uint64(0), markerContext.NumSequencedTxs)
require.Equal(t, uint64(0), markerContext.NumSubsequentQueueTxs)
// Assert that the block number for each batch type is set to
// the correct constant.
switch batchType {
case sequencer.BatchTypeZlib:
require.Equal(t, uint64(0), markerContext.BlockNumber)
default:
t.Fatalf("unknown batch type")
}
// Ensure MarkerBatchType produces the expected BatchType.
require.Equal(t, batchType, markerContext.MarkerBatchType())
}
})
}
}
// TestIsMarkerContext asserts that IsMarkerContext returns true iff the
// timestamp is zero.
func TestIsMarkerContext(t *testing.T) {
batchContext := sequencer.BatchContext{
NumSequencedTxs: 1,
NumSubsequentQueueTxs: 2,
Timestamp: 3,
BlockNumber: 4,
}
require.False(t, batchContext.IsMarkerContext())
batchContext = sequencer.BatchContext{
NumSequencedTxs: 0,
NumSubsequentQueueTxs: 0,
Timestamp: 3,
BlockNumber: 0,
}
require.False(t, batchContext.IsMarkerContext())
batchContext = sequencer.BatchContext{
NumSequencedTxs: 1,
NumSubsequentQueueTxs: 2,
Timestamp: 0,
BlockNumber: 4,
}
require.True(t, batchContext.IsMarkerContext())
}
...@@ -231,6 +231,7 @@ func TestClearPendingTxClearingTxConfirms(t *testing.T) { ...@@ -231,6 +231,7 @@ func TestClearPendingTxClearingTxConfirms(t *testing.T) {
return &types.Receipt{ return &types.Receipt{
TxHash: txHash, TxHash: txHash,
BlockNumber: big.NewInt(int64(testBlockNumber)), BlockNumber: big.NewInt(int64(testBlockNumber)),
Status: types.ReceiptStatusSuccessful,
}, nil }, nil
}, },
}) })
...@@ -296,6 +297,7 @@ func TestClearPendingTxMultipleConfs(t *testing.T) { ...@@ -296,6 +297,7 @@ func TestClearPendingTxMultipleConfs(t *testing.T) {
return &types.Receipt{ return &types.Receipt{
TxHash: txHash, TxHash: txHash,
BlockNumber: big.NewInt(int64(testBlockNumber)), BlockNumber: big.NewInt(int64(testBlockNumber)),
Status: types.ReceiptStatusSuccessful,
}, nil }, nil
}, },
}, numConfs) }, numConfs)
......
...@@ -215,6 +215,18 @@ func (s *Service) eventLoop() { ...@@ -215,6 +215,18 @@ func (s *Service) eventLoop() {
receipt, err := s.txMgr.Send( receipt, err := s.txMgr.Send(
s.ctx, updateGasPrice, s.cfg.Driver.SendTransaction, s.ctx, updateGasPrice, s.cfg.Driver.SendTransaction,
) )
// Record the confirmation time and gas used if we receive a
// receipt, as this indicates the transaction confirmed. We record
// these metrics here as the transaction may have reverted, and will
// abort below.
if receipt != nil {
batchConfirmationTime := time.Since(batchConfirmationStart) /
time.Millisecond
s.metrics.BatchConfirmationTimeMs().Set(float64(batchConfirmationTime))
s.metrics.SubmissionGasUsedWei().Set(float64(receipt.GasUsed))
}
if err != nil { if err != nil {
log.Error(name+" unable to publish batch tx", log.Error(name+" unable to publish batch tx",
"err", err) "err", err)
...@@ -225,11 +237,7 @@ func (s *Service) eventLoop() { ...@@ -225,11 +237,7 @@ func (s *Service) eventLoop() {
// The transaction was successfully submitted. // The transaction was successfully submitted.
log.Info(name+" batch tx successfully published", log.Info(name+" batch tx successfully published",
"tx_hash", receipt.TxHash) "tx_hash", receipt.TxHash)
batchConfirmationTime := time.Since(batchConfirmationStart) /
time.Millisecond
s.metrics.BatchConfirmationTimeMs().Set(float64(batchConfirmationTime))
s.metrics.BatchesSubmitted().Inc() s.metrics.BatchesSubmitted().Inc()
s.metrics.SubmissionGasUsedWei().Set(float64(receipt.GasUsed))
s.metrics.SubmissionTimestamp().Set(float64(time.Now().UnixNano() / 1e6)) s.metrics.SubmissionTimestamp().Set(float64(time.Now().UnixNano() / 1e6))
case err := <-s.ctx.Done(): case err := <-s.ctx.Done():
......
...@@ -2,6 +2,7 @@ package txmgr ...@@ -2,6 +2,7 @@ package txmgr
import ( import (
"context" "context"
"errors"
"math/big" "math/big"
"strings" "strings"
"sync" "sync"
...@@ -12,6 +13,9 @@ import ( ...@@ -12,6 +13,9 @@ import (
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
) )
// ErrReverted signals that a mined transaction reverted.
var ErrReverted = errors.New("transaction reverted")
// UpdateGasPriceSendTxFunc defines a function signature for publishing a // UpdateGasPriceSendTxFunc defines a function signature for publishing a
// desired tx with a specific gas price. Implementations of this signature // desired tx with a specific gas price. Implementations of this signature
// should also return promptly when the context is canceled. // should also return promptly when the context is canceled.
...@@ -225,6 +229,9 @@ func (m *SimpleTxManager) Send( ...@@ -225,6 +229,9 @@ func (m *SimpleTxManager) Send(
// The transaction has confirmed. // The transaction has confirmed.
case receipt := <-receiptChan: case receipt := <-receiptChan:
if receipt.Status == types.ReceiptStatusFailed {
return receipt, ErrReverted
}
return receipt, nil return receipt, nil
} }
} }
...@@ -288,7 +295,10 @@ func waitMined( ...@@ -288,7 +295,10 @@ func waitMined(
// tipHeight. The equation is rewritten in this form to avoid // tipHeight. The equation is rewritten in this form to avoid
// underflows. // underflows.
if txHeight+numConfirmations <= tipHeight+1 { if txHeight+numConfirmations <= tipHeight+1 {
log.Info("Transaction confirmed", "txHash", txHash) reverted := receipt.Status == types.ReceiptStatusFailed
log.Info("Transaction confirmed",
"txHash", txHash,
"reverted", reverted)
return receipt, nil return receipt, nil
} }
......
...@@ -98,6 +98,7 @@ func (g *gasPricer) sample() (*big.Int, *big.Int) { ...@@ -98,6 +98,7 @@ func (g *gasPricer) sample() (*big.Int, *big.Int) {
type minedTxInfo struct { type minedTxInfo struct {
gasFeeCap *big.Int gasFeeCap *big.Int
blockNumber uint64 blockNumber uint64
reverted bool
} }
// mockBackend implements txmgr.ReceiptSource that tracks mined transactions // mockBackend implements txmgr.ReceiptSource that tracks mined transactions
...@@ -123,6 +124,20 @@ func newMockBackend() *mockBackend { ...@@ -123,6 +124,20 @@ func newMockBackend() *mockBackend {
// TransactionReceipt with a matching txHash will result in a non-nil receipt. // TransactionReceipt with a matching txHash will result in a non-nil receipt.
// If a nil txHash is supplied this has the effect of mining an empty block. // If a nil txHash is supplied this has the effect of mining an empty block.
func (b *mockBackend) mine(txHash *common.Hash, gasFeeCap *big.Int) { func (b *mockBackend) mine(txHash *common.Hash, gasFeeCap *big.Int) {
b.mineWithStatus(txHash, gasFeeCap, false)
}
// mineWithStatus records a (txHash, gasFeeCap) pair as confirmed, but also
// includes the option to specify whether or not the transaction reverted.
// Subsequent calls to TransactionReceipt with a matching txHash will result in
// a non-nil receipt. If a nil txHash is supplied this has the effect of mining
// an empty block.
func (b *mockBackend) mineWithStatus(
txHash *common.Hash,
gasFeeCap *big.Int,
revert bool,
) {
b.mu.Lock() b.mu.Lock()
defer b.mu.Unlock() defer b.mu.Unlock()
...@@ -131,6 +146,7 @@ func (b *mockBackend) mine(txHash *common.Hash, gasFeeCap *big.Int) { ...@@ -131,6 +146,7 @@ func (b *mockBackend) mine(txHash *common.Hash, gasFeeCap *big.Int) {
b.minedTxs[*txHash] = minedTxInfo{ b.minedTxs[*txHash] = minedTxInfo{
gasFeeCap: gasFeeCap, gasFeeCap: gasFeeCap,
blockNumber: b.blockHeight, blockNumber: b.blockHeight,
reverted: revert,
} }
} }
} }
...@@ -160,12 +176,18 @@ func (b *mockBackend) TransactionReceipt( ...@@ -160,12 +176,18 @@ func (b *mockBackend) TransactionReceipt(
return nil, nil return nil, nil
} }
var status = types.ReceiptStatusSuccessful
if txInfo.reverted {
status = types.ReceiptStatusFailed
}
// Return the gas fee cap for the transaction in the GasUsed field so that // Return the gas fee cap for the transaction in the GasUsed field so that
// we can assert the proper tx confirmed in our tests. // we can assert the proper tx confirmed in our tests.
return &types.Receipt{ return &types.Receipt{
TxHash: txHash, TxHash: txHash,
GasUsed: txInfo.gasFeeCap.Uint64(), GasUsed: txInfo.gasFeeCap.Uint64(),
BlockNumber: big.NewInt(int64(txInfo.blockNumber)), BlockNumber: big.NewInt(int64(txInfo.blockNumber)),
Status: status,
}, nil }, nil
} }
...@@ -201,6 +223,39 @@ func TestTxMgrConfirmAtMinGasPrice(t *testing.T) { ...@@ -201,6 +223,39 @@ func TestTxMgrConfirmAtMinGasPrice(t *testing.T) {
require.Equal(t, gasPricer.expGasFeeCap().Uint64(), receipt.GasUsed) require.Equal(t, gasPricer.expGasFeeCap().Uint64(), receipt.GasUsed)
} }
// TestTxMgrFailsForRevertedTxn asserts that Send returns ErrReverted if the
// confirmed transaction reverts during execution, and returns the resulting
// receipt.
func TestTxMgrFailsForRevertedTxn(t *testing.T) {
t.Parallel()
h := newTestHarness()
gasPricer := newGasPricer(1)
updateGasPrice := func(ctx context.Context) (*types.Transaction, error) {
gasTipCap, gasFeeCap := gasPricer.sample()
return types.NewTx(&types.DynamicFeeTx{
GasTipCap: gasTipCap,
GasFeeCap: gasFeeCap,
}), nil
}
sendTx := func(ctx context.Context, tx *types.Transaction) error {
if gasPricer.shouldMine(tx.GasFeeCap()) {
txHash := tx.Hash()
h.backend.mineWithStatus(&txHash, tx.GasFeeCap(), true)
}
return nil
}
ctx := context.Background()
receipt, err := h.mgr.Send(ctx, updateGasPrice, sendTx)
require.Equal(t, txmgr.ErrReverted, err)
require.NotNil(t, receipt)
require.Equal(t, gasPricer.expGasFeeCap().Uint64(), receipt.GasUsed)
}
// TestTxMgrNeverConfirmCancel asserts that a Send can be canceled even if no // TestTxMgrNeverConfirmCancel asserts that a Send can be canceled even if no
// transaction is mined. This is done to ensure the the tx mgr can properly // transaction is mined. This is done to ensure the the tx mgr can properly
// abort on shutdown, even if a txn is in the process of being published. // abort on shutdown, even if a txn is in the process of being published.
...@@ -519,6 +574,7 @@ func (b *failingBackend) TransactionReceipt( ...@@ -519,6 +574,7 @@ func (b *failingBackend) TransactionReceipt(
return &types.Receipt{ return &types.Receipt{
TxHash: txHash, TxHash: txHash,
BlockNumber: big.NewInt(1), BlockNumber: big.NewInt(1),
Status: types.ReceiptStatusSuccessful,
}, nil }, nil
} }
......
...@@ -17,6 +17,7 @@ import ( ...@@ -17,6 +17,7 @@ import (
bsscore "github.com/ethereum-optimism/optimism/go/bss-core" bsscore "github.com/ethereum-optimism/optimism/go/bss-core"
"github.com/ethereum-optimism/optimism/go/bss-core/dial" "github.com/ethereum-optimism/optimism/go/bss-core/dial"
"github.com/ethereum-optimism/optimism/go/bss-core/drivers"
"github.com/ethereum-optimism/optimism/go/bss-core/txmgr" "github.com/ethereum-optimism/optimism/go/bss-core/txmgr"
"github.com/ethereum-optimism/optimism/go/teleportr/bindings/deposit" "github.com/ethereum-optimism/optimism/go/teleportr/bindings/deposit"
"github.com/ethereum-optimism/optimism/go/teleportr/db" "github.com/ethereum-optimism/optimism/go/teleportr/db"
...@@ -346,8 +347,17 @@ func (s *Server) HandleEstimate( ...@@ -346,8 +347,17 @@ func (s *Server) HandleEstimate(
gasTipCap, err := s.l1Client.SuggestGasTipCap(ctx) gasTipCap, err := s.l1Client.SuggestGasTipCap(ctx)
if err != nil { if err != nil {
rpcErrorsTotal.WithLabelValues("suggest_gas_tip_cap").Inc() rpcErrorsTotal.WithLabelValues("suggest_gas_tip_cap").Inc()
// If the request failed because the backend does not support
// eth_maxPriorityFeePerGas, fallback to using the default constant.
// Currently Alchemy is the only backend provider that exposes this
// method, so in the event their API is unreachable we can fallback to a
// degraded mode of operation. This also applies to our test
// environments, as hardhat doesn't support the query either.
if !drivers.IsMaxPriorityFeePerGasNotFoundError(err) {
return err return err
} }
gasTipCap = drivers.FallbackGasTipCap
}
header, err := s.l1Client.HeaderByNumber(ctx, nil) header, err := s.l1Client.HeaderByNumber(ctx, nil)
if err != nil { if err != nil {
......
...@@ -26,7 +26,7 @@ $ make geth ...@@ -26,7 +26,7 @@ $ make geth
### Running a Sequencer ### Running a Sequencer
Running a sequencer that ingests L1 to L2 transactions requires running the Running a sequencer that ingests L1 to L2 transactions requires running the
[Data Transport Layer](https://github.com/ethereum-optimism/data-transport-layer). [Data Transport Layer](https://github.com/ethereum-optimism/optimism/tree/develop/packages/data-transport-layer).
The data transport layer is responsible for indexing transactions The data transport layer is responsible for indexing transactions
from layer one Ethereum. It is possible to run a local development sequencer from layer one Ethereum. It is possible to run a local development sequencer
without the data transport layer by turning off the sync service. To turn on without the data transport layer by turning off the sync service. To turn on
......
...@@ -15,5 +15,6 @@ FROM alpine:3.13 ...@@ -15,5 +15,6 @@ FROM alpine:3.13
RUN apk add --no-cache ca-certificates jq curl RUN apk add --no-cache ca-certificates jq curl
COPY --from=builder /go/batch-submitter/batch-submitter /usr/local/bin/ COPY --from=builder /go/batch-submitter/batch-submitter /usr/local/bin/
WORKDIR /usr/local/bin
COPY ./ops/scripts/batch-submitter.sh . COPY ./ops/scripts/batch-submitter.sh .
ENTRYPOINT ["batch-submitter"] ENTRYPOINT ["batch-submitter"]
...@@ -2,7 +2,7 @@ FROM golang:1.15-alpine3.13 as builder ...@@ -2,7 +2,7 @@ FROM golang:1.15-alpine3.13 as builder
RUN apk add --no-cache make gcc musl-dev linux-headers git jq bash RUN apk add --no-cache make gcc musl-dev linux-headers git jq bash
ADD ./go/gas-oracle /gas-oracle COPY ./go/gas-oracle /gas-oracle
RUN cd /gas-oracle && make gas-oracle RUN cd /gas-oracle && make gas-oracle
FROM alpine:3.13 FROM alpine:3.13
...@@ -10,5 +10,6 @@ FROM alpine:3.13 ...@@ -10,5 +10,6 @@ FROM alpine:3.13
RUN apk add --no-cache ca-certificates jq curl RUN apk add --no-cache ca-certificates jq curl
COPY --from=builder /gas-oracle/gas-oracle /usr/local/bin/ COPY --from=builder /gas-oracle/gas-oracle /usr/local/bin/
WORKDIR /usr/local/bin/
COPY ./ops/scripts/gas-oracle.sh . COPY ./ops/scripts/gas-oracle.sh .
ENTRYPOINT ["gas-oracle"] ENTRYPOINT ["gas-oracle"]
...@@ -15,6 +15,7 @@ FROM alpine:3.13 ...@@ -15,6 +15,7 @@ FROM alpine:3.13
RUN apk add --no-cache ca-certificates jq curl RUN apk add --no-cache ca-certificates jq curl
COPY --from=builder /go-ethereum/build/bin/geth /usr/local/bin/ COPY --from=builder /go-ethereum/build/bin/geth /usr/local/bin/
WORKDIR /usr/local/bin/
EXPOSE 8545 8546 8547 EXPOSE 8545 8546 8547
COPY ./ops/scripts/geth.sh . COPY ./ops/scripts/geth.sh .
ENTRYPOINT ["geth"] ENTRYPOINT ["geth"]
FROM golang:1.16 as builder FROM golang:1.16 as builder
ADD ./go/l2geth-exporter /app/ COPY ./go/l2geth-exporter /app/
WORKDIR /app/ WORKDIR /app/
RUN make build RUN make build
FROM alpine:latest FROM alpine:3.13
RUN apk --no-cache add ca-certificates RUN apk --no-cache add ca-certificates
WORKDIR /root/ WORKDIR /root/
COPY --from=builder /app/l2geth-exporter /usr/local/bin/ COPY --from=builder /app/l2geth-exporter /usr/local/bin/
......
FROM golang:1.16 as builder FROM golang:1.16 as builder
ADD ./go/op-exporter /app/ COPY ./go/op-exporter /app/
WORKDIR /app/ WORKDIR /app/
RUN make build RUN make build
FROM alpine:latest FROM alpine:3.13
RUN apk --no-cache add ca-certificates RUN apk --no-cache add ca-certificates
WORKDIR /root/ WORKDIR /root/
COPY --from=builder /app/op-exporter /usr/local/bin/ COPY --from=builder /app/op-exporter /usr/local/bin/
......
...@@ -6,7 +6,12 @@ ...@@ -6,7 +6,12 @@
# when used with typescript/hardhat: https://github.com/nomiclabs/hardhat/issues/1219 # when used with typescript/hardhat: https://github.com/nomiclabs/hardhat/issues/1219
FROM node:16.13-buster-slim as base FROM node:16.13-buster-slim as base
RUN apt-get update -y && apt-get install -y git curl jq python3 RUN apt-get update -y && apt-get install -y --no-install-recommends git \
curl \
jq \
python3 \
ca-certificates \
&& rm -rf /var/lib/apt/lists/*
# copy over the needed configs to run the dep installation # copy over the needed configs to run the dep installation
# note: this approach can be a bit unhandy to maintain, but it allows # note: this approach can be a bit unhandy to maintain, but it allows
...@@ -22,7 +27,7 @@ COPY packages/message-relayer/package.json ./packages/message-relayer/package.js ...@@ -22,7 +27,7 @@ COPY packages/message-relayer/package.json ./packages/message-relayer/package.js
COPY packages/replica-healthcheck/package.json ./packages/replica-healthcheck/package.json COPY packages/replica-healthcheck/package.json ./packages/replica-healthcheck/package.json
COPY integration-tests/package.json ./integration-tests/package.json COPY integration-tests/package.json ./integration-tests/package.json
RUN yarn install --frozen-lockfile RUN yarn install --frozen-lockfile && yarn cache clean
COPY ./packages ./packages COPY ./packages ./packages
COPY ./integration-tests ./integration-tests COPY ./integration-tests ./integration-tests
......
...@@ -8,7 +8,7 @@ COPY [ \ ...@@ -8,7 +8,7 @@ COPY [ \
# install deps # install deps
WORKDIR /hardhat WORKDIR /hardhat
RUN yarn install RUN yarn install && yarn cache clean
# bring in dockerenv so that hardhat launches with host = 0.0.0.0 instead of 127.0.0.1 # bring in dockerenv so that hardhat launches with host = 0.0.0.0 instead of 127.0.0.1
# so that it's accessible from other boxes as well # so that it's accessible from other boxes as well
......
...@@ -23,7 +23,7 @@ import { SomeContract } from "@eth-optimism/contracts/path/to/SomeContract.sol"; ...@@ -23,7 +23,7 @@ import { SomeContract } from "@eth-optimism/contracts/path/to/SomeContract.sol";
``` ```
Note that the `/path/to/SomeContract.sol` is the path to the target contract within the [contracts folder](https://github.com/ethereum-optimism/optimism/tree/develop/packages/contracts/contracts) inside of this package. Note that the `/path/to/SomeContract.sol` is the path to the target contract within the [contracts folder](https://github.com/ethereum-optimism/optimism/tree/develop/packages/contracts/contracts) inside of this package.
For example, the [L1CrossDomainMessenger](/contracts/L1/messaging/L1CrossDomainMessenger.sol) contract is located at `/contracts/L1/messaging/L1CrossDomainMessenger.sol`, relative to this README. For example, the [L1CrossDomainMessenger](https://github.com/ethereum-optimism/optimism/blob/develop/packages/contracts/contracts/L1/messaging/L1CrossDomainMessenger.sol) contract is located at `packages/contracts/contracts/L1/messaging/L1CrossDomainMessenger.sol`, relative to this README.
You would therefore import the contract as: You would therefore import the contract as:
......
import { ethers } from 'ethers' import { ethers } from 'ethers'
import { task } from 'hardhat/config' import { task } from 'hardhat/config'
import * as types from 'hardhat/internal/core/params/argumentTypes' import * as types from 'hardhat/internal/core/params/argumentTypes'
import { SequencerBatch } from '@eth-optimism/core-utils' import { BatchType, SequencerBatch } from '@eth-optimism/core-utils'
import { names } from '../src/address-names' import { names } from '../src/address-names'
import { getContractFromArtifact } from '../src/deploy-utils' import { getContractFromArtifact } from '../src/deploy-utils'
...@@ -51,7 +51,31 @@ task('fetch-batches') ...@@ -51,7 +51,31 @@ task('fetch-batches')
for (const event of events) { for (const event of events) {
const tx = await provider.getTransaction(event.transactionHash) const tx = await provider.getTransaction(event.transactionHash)
const batch = (SequencerBatch as any).fromHex(tx.data) const batch = (SequencerBatch as any).fromHex(tx.data)
batches.push(batch.toJSON())
// Add an extra field to the resulting json
// so that the serialization sizes can be observed
const json = batch.toJSON()
json.sizes = {
legacy: 0,
zlib: 0,
}
// Create a copy of the batch to serialize in
// the alternative format
const copy = (SequencerBatch as any).fromHex(tx.data)
if (batch.type === BatchType.ZLIB) {
copy.type = BatchType.LEGACY
json.sizes.legacy = copy.encode().length
json.sizes.zlib = batch.encode().length
} else {
copy.type = BatchType.ZLIB
json.sizes.zlib = copy.encode().length
json.sizes.legacy = batch.encode().length
}
json.compressionRatio = json.sizes.zlib / json.sizes.legacy
batches.push(json)
} }
} }
......
...@@ -19,7 +19,7 @@ We run two sub-services, the [`L1IngestionService`](./src/services/l1-ingestion/ ...@@ -19,7 +19,7 @@ We run two sub-services, the [`L1IngestionService`](./src/services/l1-ingestion/
See an example config at [.env.example](.env.example); copy into a `.env` file before running. See an example config at [.env.example](.env.example); copy into a `.env` file before running.
`L1_TRANSPORT__L1_RPC_ENDPOINT` can be the JSON RPC endpoint of any L1 Ethereum node. `L1_TRANSPORT__ADDRESS_MANAGER` should be the contract addresss of the Address Manager on the corresponding network; find their values in the [Regenesis repo](https://github.com/ethereum-optimism/regenesis). `L1_TRANSPORT__L1_RPC_ENDPOINT` can be the JSON RPC endpoint of any L1 Ethereum node. `L1_TRANSPORT__ADDRESS_MANAGER` should be the contract addresss of the Address Manager on the corresponding network; find their values in the [contracts package](https://github.com/ethereum-optimism/optimism/tree/develop/packages/contracts/deployments).
### Building and usage ### Building and usage
...@@ -47,7 +47,7 @@ Here's the list of environment variables you can change: ...@@ -47,7 +47,7 @@ Here's the list of environment variables you can change:
| Variable | Default | Description | | Variable | Default | Description |
| ------------------------------------------------------- | ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------- | | ------------------------------------------------------- | ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| DATA_TRANSPORT_LAYER__DB_PATH | ./db | Path to the database for this service. | | DATA_TRANSPORT_LAYER__DB_PATH | ./db | Path to the database for this service. |
| DATA_TRANSPORT_LAYER__ADDRESS_MANAGER | - | Address of the AddressManager contract on L1. See [regenesis](https://github.com/ethereum-optimism/regenesis) repo to find this address for mainnet or kovan. | | DATA_TRANSPORT_LAYER__ADDRESS_MANAGER | - | Address of the AddressManager contract on L1. See [contracts](https://github.com/ethereum-optimism/optimism/tree/develop/packages/contracts/deployments) package to find this address for mainnet or kovan. |
| DATA_TRANSPORT_LAYER__POLLING_INTERVAL | 5000 | Period of time between execution loops. | | DATA_TRANSPORT_LAYER__POLLING_INTERVAL | 5000 | Period of time between execution loops. |
| DATA_TRANSPORT_LAYER__DANGEROUSLY_CATCH_ALL_ERRORS | false | If true, will catch all errors without throwing. | | DATA_TRANSPORT_LAYER__DANGEROUSLY_CATCH_ALL_ERRORS | false | If true, will catch all errors without throwing. |
| DATA_TRANSPORT_LAYER__CONFIRMATIONS | 12 | Number of confirmations to wait before accepting transactions as "canonical". | | DATA_TRANSPORT_LAYER__CONFIRMATIONS | 12 | Number of confirmations to wait before accepting transactions as "canonical". |
......
# @eth-optimism/smock
## NOTICE
Smock v1 is being deprecated.
Please migrate to [Smock v2](https://github.com/defi-wonderland/smock).
You can find an archive of the Smock v1 codebase at [optimism@d337713c91](https://github.com/ethereum-optimism/optimism/tree/d337713c91c6634f546b8d6572392c0784ab8217/packages/smock).
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment