Commit f0618203 authored by Murphy Law's avatar Murphy Law Committed by GitHub

Merge branch 'develop' into feat/publish-tx

parents f8348862 4bfddfbb
---
'@eth-optimism/teleportr': patch
---
Add SuggestGasTipCap fallback
---
'@eth-optimism/batch-submitter-service': patch
'@eth-optimism/gas-oracle': patch
'@eth-optimism/integration-tests': patch
'@eth-optimism/l2geth': patch
'@eth-optimism/hardhat-node': patch
'@eth-optimism/contracts': patch
'@eth-optimism/data-transport-layer': patch
'@eth-optimism/message-relayer': patch
---
Refactored Dockerfiles
---
'@eth-optimism/indexer': patch
---
Indexer: initial release
---
'@eth-optimism/batch-submitter-service': patch
'@eth-optimism/teleportr': patch
---
Count reverted transactions in failed_submissions
---
'@eth-optimism/batch-submitter-service': patch
---
Enforce min/max tx size on plaintext batch encoding
name: indexer unit tests
on:
push:
paths:
- 'go/indexer/**'
branches:
- 'master'
- 'develop'
- '*rc'
- 'release/*'
pull_request:
branches:
- '*'
workflow_dispatch:
defaults:
run:
working-directory: './go/indexer'
jobs:
tests:
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.16.x
- name: Checkout code
uses: actions/checkout@v2
- name: Install
run: make
- name: Test
run: make test
......@@ -74,7 +74,7 @@ Our update process takes the form of a PR merging the `develop` branch into the
### The `develop` branch
Our primary development branch is [`develop`](https://github.com/ethereum-optimism/optimism/tree/develop/).
`develop` contains the most up-to-date software that remains backwards compatible with our latest experimental [network deployments](https://community.optimism.io/docs/developers/networks.html).
`develop` contains the most up-to-date software that remains backwards compatible with our latest experimental [network deployments](https://community.optimism.io/docs/useful-tools/networks/).
If you're making a backwards compatible change, please direct your pull request towards `develop`.
**Changes to contracts within `packages/contracts/contracts` are usually NOT considered backwards compatible and SHOULD be made against a release candidate branch**.
......
......@@ -78,7 +78,6 @@ func GenSequencerBatchParams(
shouldStartAtElement uint64,
blockOffset uint64,
batch []BatchElement,
batchType BatchType,
) (*AppendSequencerBatchParams, error) {
var (
......@@ -189,6 +188,5 @@ func GenSequencerBatchParams(
TotalElementsToAppend: uint64(len(batch)),
Contexts: contexts,
Txs: txs,
Type: batchType,
}, nil
}
......@@ -199,39 +199,57 @@ func (d *Driver) CraftBatchTx(
var pruneCount int
for {
batchParams, err := GenSequencerBatchParams(
shouldStartAt, d.cfg.BlockOffset, batchElements, d.cfg.BatchType,
shouldStartAt, d.cfg.BlockOffset, batchElements,
)
if err != nil {
return nil, err
}
batchArguments, err := batchParams.Serialize()
// Use plaintext encoding to enforce size constraints.
plaintextBatchArguments, err := batchParams.Serialize(BatchTypeLegacy)
if err != nil {
return nil, err
}
appendSequencerBatchID := d.ctcABI.Methods[appendSequencerBatchMethodName].ID
batchCallData := append(appendSequencerBatchID, batchArguments...)
plaintextCalldata := append(appendSequencerBatchID, plaintextBatchArguments...)
// Continue pruning until calldata size is less than configured max.
calldataSize := uint64(len(batchCallData))
if calldataSize > d.cfg.MaxTxSize {
// Continue pruning until plaintext calldata size is less than
// configured max.
plaintextCalldataSize := uint64(len(plaintextCalldata))
if plaintextCalldataSize > d.cfg.MaxTxSize {
oldLen := len(batchElements)
newBatchElementsLen := (oldLen * 9) / 10
batchElements = batchElements[:newBatchElementsLen]
log.Info(name+" pruned batch", "old_num_txs", oldLen, "new_num_txs", newBatchElementsLen)
log.Info(name+" pruned batch",
"plaintext_size", plaintextCalldataSize,
"max_tx_size", d.cfg.MaxTxSize,
"old_num_txs", oldLen,
"new_num_txs", newBatchElementsLen)
pruneCount++
continue
} else if calldataSize < d.cfg.MinTxSize {
} else if plaintextCalldataSize < d.cfg.MinTxSize {
log.Info(name+" batch tx size below minimum",
"size", calldataSize, "min_tx_size", d.cfg.MinTxSize)
"plaintext_size", plaintextCalldataSize,
"min_tx_size", d.cfg.MinTxSize,
"num_txs", len(batchElements))
return nil, nil
}
d.metrics.NumElementsPerBatch().Observe(float64(len(batchElements)))
d.metrics.BatchPruneCount.Set(float64(pruneCount))
log.Info(name+" batch constructed", "num_txs", len(batchElements), "length", len(batchCallData))
// Finally, encode the batch using the configured batch type.
var calldata = plaintextCalldata
if d.cfg.BatchType != BatchTypeLegacy {
batchArguments, err := batchParams.Serialize(d.cfg.BatchType)
if err != nil {
return nil, err
}
calldata = append(appendSequencerBatchID, batchArguments...)
}
log.Info(name+" batch constructed", "num_txs", len(batchElements), "length", len(calldata))
opts, err := bind.NewKeyedTransactorWithChainID(
d.cfg.PrivKey, d.cfg.ChainID,
......@@ -243,7 +261,7 @@ func (d *Driver) CraftBatchTx(
opts.Nonce = nonce
opts.NoSend = true
tx, err := d.rawCtcContract.RawTransact(opts, batchCallData)
tx, err := d.rawCtcContract.RawTransact(opts, calldata)
switch {
case err == nil:
return tx, nil
......@@ -258,7 +276,7 @@ func (d *Driver) CraftBatchTx(
log.Warn(d.cfg.Name + " eth_maxPriorityFeePerGas is unsupported " +
"by current backend, using fallback gasTipCap")
opts.GasTipCap = drivers.FallbackGasTipCap
return d.rawCtcContract.RawTransact(opts, batchCallData)
return d.rawCtcContract.RawTransact(opts, calldata)
default:
return nil, err
......
......@@ -47,6 +47,25 @@ type BatchContext struct {
BlockNumber uint64 `json:"block_number"`
}
// IsMarkerContext returns true if the BatchContext is a marker context used to
// specify the encoding format. This is only valid if called on the first
// BatchContext in the calldata.
func (c BatchContext) IsMarkerContext() bool {
return c.Timestamp == 0
}
// MarkerBatchType returns the BatchType specified by a marker BatchContext.
// The return value is only valid if called on the first BatchContext in the
// calldata and IsMarkerContext returns true.
func (c BatchContext) MarkerBatchType() BatchType {
switch c.BlockNumber {
case 0:
return BatchTypeZlib
default:
return BatchTypeLegacy
}
}
// Write encodes the BatchContext into a 16-byte stream using the following
// encoding:
// - num_sequenced_txs: 3 bytes
......@@ -83,13 +102,34 @@ func (c *BatchContext) Read(r io.Reader) error {
return readUint64(r, &c.BlockNumber, 5)
}
// BatchType represents the type of batch being
// submitted. When the first context in the batch
// has a timestamp of 0, the blocknumber is interpreted
// as an enum that represets the type
// BatchType represents the type of batch being submitted. When the first
// context in the batch has a timestamp of 0, the blocknumber is interpreted as
// an enum that represets the type.
type BatchType int8
// Implements the Stringer interface for BatchType
const (
// BatchTypeLegacy represets the legacy batch type.
BatchTypeLegacy BatchType = -1
// BatchTypeZlib represents a batch type where the transaction data is
// compressed using zlib.
BatchTypeZlib BatchType = 0
)
// BatchTypeFromString returns the BatchType enum based on a human readable
// string.
func BatchTypeFromString(s string) BatchType {
switch s {
case "zlib", "ZLIB":
return BatchTypeZlib
case "legacy", "LEGACY":
return BatchTypeLegacy
default:
return BatchTypeLegacy
}
}
// String implements the Stringer interface for BatchType.
func (b BatchType) String() string {
switch b {
case BatchTypeLegacy:
......@@ -101,27 +141,26 @@ func (b BatchType) String() string {
}
}
// BatchTypeFromString returns the BatchType
// enum based on a human readable string
func BatchTypeFromString(s string) BatchType {
switch s {
case "zlib", "ZLIB":
return BatchTypeZlib
case "legacy", "LEGACY":
return BatchTypeLegacy
// MarkerContext returns the marker context, if any, for the given batch type.
func (b BatchType) MarkerContext() *BatchContext {
switch b {
// No marker context for legacy encoding.
case BatchTypeLegacy:
return nil
// Zlib marker context sets block number equal to zero.
case BatchTypeZlib:
return &BatchContext{
Timestamp: 0,
BlockNumber: 0,
}
default:
return BatchTypeLegacy
return nil
}
}
const (
// BatchTypeLegacy represets the legacy batch type
BatchTypeLegacy BatchType = -1
// BatchTypeZlib represents a batch type where the
// transaction data is compressed using zlib
BatchTypeZlib BatchType = 0
)
// AppendSequencerBatchParams holds the raw data required to submit a batch of
// L2 txs to L1 CTC contract. Rather than encoding the objects using the
// standard ABI encoding, a custom encoding is and provided in the call data to
......@@ -146,9 +185,6 @@ type AppendSequencerBatchParams struct {
// Txs contains all sequencer txs that will be recorded in the L1 CTC
// contract.
Txs []*CachedTx
// The type of the batch
Type BatchType
}
// Write encodes the AppendSequencerBatchParams using the following format:
......@@ -173,7 +209,11 @@ type AppendSequencerBatchParams struct {
//
// Note that writing to a bytes.Buffer cannot
// error, so errors are ignored here
func (p *AppendSequencerBatchParams) Write(w *bytes.Buffer) error {
func (p *AppendSequencerBatchParams) Write(
w *bytes.Buffer,
batchType BatchType,
) error {
_ = writeUint64(w, p.ShouldStartAtElement, 5)
_ = writeUint64(w, p.TotalElementsToAppend, 3)
......@@ -190,10 +230,10 @@ func (p *AppendSequencerBatchParams) Write(w *bytes.Buffer) error {
// copy the contexts as to not malleate the struct
// when it is a typed batch
contexts := make([]BatchContext, 0, len(p.Contexts)+1)
if p.Type == BatchTypeZlib {
// All zero values for the single batch context
// is desired here as blocknumber 0 means it is a zlib batch
contexts = append(contexts, BatchContext{})
// Add the marker context, if any, for non-legacy encodings.
markerContext := batchType.MarkerContext()
if markerContext != nil {
contexts = append(contexts, *markerContext)
}
contexts = append(contexts, p.Contexts...)
......@@ -203,7 +243,7 @@ func (p *AppendSequencerBatchParams) Write(w *bytes.Buffer) error {
context.Write(w)
}
switch p.Type {
switch batchType {
case BatchTypeLegacy:
// Write each length-prefixed tx.
for _, tx := range p.Txs {
......@@ -225,7 +265,7 @@ func (p *AppendSequencerBatchParams) Write(w *bytes.Buffer) error {
}
default:
return fmt.Errorf("Unknown batch type: %s", p.Type)
return fmt.Errorf("Unknown batch type: %s", batchType)
}
return nil
......@@ -233,9 +273,12 @@ func (p *AppendSequencerBatchParams) Write(w *bytes.Buffer) error {
// Serialize performs the same encoding as Write, but returns the resulting
// bytes slice.
func (p *AppendSequencerBatchParams) Serialize() ([]byte, error) {
func (p *AppendSequencerBatchParams) Serialize(
batchType BatchType,
) ([]byte, error) {
var buf bytes.Buffer
if err := p.Write(&buf); err != nil {
if err := p.Write(&buf, batchType); err != nil {
return nil, err
}
return buf.Bytes(), nil
......@@ -266,6 +309,9 @@ func (p *AppendSequencerBatchParams) Read(r io.Reader) error {
return err
}
// Assume that it is a legacy batch at first, this will be overwrritten if
// we detect a marker context.
var batchType = BatchTypeLegacy
// Ensure that contexts is never nil
p.Contexts = make([]BatchContext, 0)
for i := uint64(0); i < numContexts; i++ {
......@@ -274,30 +320,33 @@ func (p *AppendSequencerBatchParams) Read(r io.Reader) error {
return err
}
if i == 0 && batchContext.IsMarkerContext() {
batchType = batchContext.MarkerBatchType()
continue
}
p.Contexts = append(p.Contexts, batchContext)
}
// Assume that it is a legacy batch at first
p.Type = BatchTypeLegacy
// Handle backwards compatible batch types
if len(p.Contexts) > 0 && p.Contexts[0].Timestamp == 0 {
switch p.Contexts[0].BlockNumber {
case 0:
// zlib compressed transaction data
p.Type = BatchTypeZlib
// remove the first dummy context
p.Contexts = p.Contexts[1:]
numContexts--
zr, err := zlib.NewReader(r)
if err != nil {
return err
}
defer zr.Close()
// Define a closure to clean up the reader used by the specified encoding.
var closeReader func() error
switch batchType {
// The legacy serialization does not require clsing, so we instatiate a
// dummy closure.
case BatchTypeLegacy:
closeReader = func() error { return nil }
r = bufio.NewReader(zr)
// The zlib serialization requires decompression before reading the
// plaintext bytes, and also requires proper cleanup.
case BatchTypeZlib:
zr, err := zlib.NewReader(r)
if err != nil {
return err
}
closeReader = zr.Close
r = bufio.NewReader(zr)
}
// Deserialize any transactions. Since the number of txs is ommitted
......@@ -315,7 +364,7 @@ func (p *AppendSequencerBatchParams) Read(r io.Reader) error {
if len(p.Txs) == 0 && len(p.Contexts) != 0 {
return ErrMalformedBatch
}
return nil
return closeReader()
} else if err != nil {
return err
}
......@@ -327,7 +376,6 @@ func (p *AppendSequencerBatchParams) Read(r io.Reader) error {
p.Txs = append(p.Txs, NewCachedTx(tx))
}
}
// writeUint64 writes a the bottom `n` bytes of `val` to `w`.
......
......@@ -119,7 +119,6 @@ func testAppendSequencerBatchParamsEncodeDecode(
TotalElementsToAppend: test.TotalElementsToAppend,
Contexts: test.Contexts,
Txs: nil,
Type: sequencer.BatchTypeLegacy,
}
// Decode the batch from the test string.
......@@ -133,7 +132,6 @@ func testAppendSequencerBatchParamsEncodeDecode(
} else {
require.Nil(t, err)
}
require.Equal(t, params.Type, sequencer.BatchTypeLegacy)
// Assert that the decoded params match the expected params. The
// transactions are compared serparetly (via hash), since the internal
......@@ -149,7 +147,7 @@ func testAppendSequencerBatchParamsEncodeDecode(
// Finally, encode the decoded object and assert it matches the original
// hex string.
paramsBytes, err := params.Serialize()
paramsBytes, err := params.Serialize(sequencer.BatchTypeLegacy)
// Return early when testing error cases, no need to reserialize again
if test.Error {
......@@ -161,17 +159,14 @@ func testAppendSequencerBatchParamsEncodeDecode(
require.Equal(t, test.HexEncoding, hex.EncodeToString(paramsBytes))
// Serialize the batches in compressed form
params.Type = sequencer.BatchTypeZlib
compressedParamsBytes, err := params.Serialize()
compressedParamsBytes, err := params.Serialize(sequencer.BatchTypeZlib)
require.Nil(t, err)
// Deserialize the compressed batch
var paramsCompressed sequencer.AppendSequencerBatchParams
err = paramsCompressed.Read(bytes.NewReader(compressedParamsBytes))
require.Nil(t, err)
require.Equal(t, paramsCompressed.Type, sequencer.BatchTypeZlib)
expParams.Type = sequencer.BatchTypeZlib
decompressedTxs := paramsCompressed.Txs
paramsCompressed.Txs = nil
......@@ -189,3 +184,71 @@ func compareTxs(t *testing.T, a []*l2types.Transaction, b []*sequencer.CachedTx)
require.Equal(t, txA.Hash(), b[i].Tx().Hash())
}
}
// TestMarkerContext asserts that each batch type returns the correct marker
// context.
func TestMarkerContext(t *testing.T) {
batchTypes := []sequencer.BatchType{
sequencer.BatchTypeLegacy,
sequencer.BatchTypeZlib,
}
for _, batchType := range batchTypes {
t.Run(batchType.String(), func(t *testing.T) {
markerContext := batchType.MarkerContext()
if batchType == sequencer.BatchTypeLegacy {
require.Nil(t, markerContext)
} else {
require.NotNil(t, markerContext)
// All marker contexts MUST have a zero timestamp.
require.Equal(t, uint64(0), markerContext.Timestamp)
// Currently all other fields besides block number are defined
// as zero.
require.Equal(t, uint64(0), markerContext.NumSequencedTxs)
require.Equal(t, uint64(0), markerContext.NumSubsequentQueueTxs)
// Assert that the block number for each batch type is set to
// the correct constant.
switch batchType {
case sequencer.BatchTypeZlib:
require.Equal(t, uint64(0), markerContext.BlockNumber)
default:
t.Fatalf("unknown batch type")
}
// Ensure MarkerBatchType produces the expected BatchType.
require.Equal(t, batchType, markerContext.MarkerBatchType())
}
})
}
}
// TestIsMarkerContext asserts that IsMarkerContext returns true iff the
// timestamp is zero.
func TestIsMarkerContext(t *testing.T) {
batchContext := sequencer.BatchContext{
NumSequencedTxs: 1,
NumSubsequentQueueTxs: 2,
Timestamp: 3,
BlockNumber: 4,
}
require.False(t, batchContext.IsMarkerContext())
batchContext = sequencer.BatchContext{
NumSequencedTxs: 0,
NumSubsequentQueueTxs: 0,
Timestamp: 3,
BlockNumber: 0,
}
require.False(t, batchContext.IsMarkerContext())
batchContext = sequencer.BatchContext{
NumSequencedTxs: 1,
NumSubsequentQueueTxs: 2,
Timestamp: 0,
BlockNumber: 4,
}
require.True(t, batchContext.IsMarkerContext())
}
......@@ -231,6 +231,7 @@ func TestClearPendingTxClearingTxConfirms(t *testing.T) {
return &types.Receipt{
TxHash: txHash,
BlockNumber: big.NewInt(int64(testBlockNumber)),
Status: types.ReceiptStatusSuccessful,
}, nil
},
})
......@@ -296,6 +297,7 @@ func TestClearPendingTxMultipleConfs(t *testing.T) {
return &types.Receipt{
TxHash: txHash,
BlockNumber: big.NewInt(int64(testBlockNumber)),
Status: types.ReceiptStatusSuccessful,
}, nil
},
}, numConfs)
......
......@@ -215,6 +215,18 @@ func (s *Service) eventLoop() {
receipt, err := s.txMgr.Send(
s.ctx, updateGasPrice, s.cfg.Driver.SendTransaction,
)
// Record the confirmation time and gas used if we receive a
// receipt, as this indicates the transaction confirmed. We record
// these metrics here as the transaction may have reverted, and will
// abort below.
if receipt != nil {
batchConfirmationTime := time.Since(batchConfirmationStart) /
time.Millisecond
s.metrics.BatchConfirmationTimeMs().Set(float64(batchConfirmationTime))
s.metrics.SubmissionGasUsedWei().Set(float64(receipt.GasUsed))
}
if err != nil {
log.Error(name+" unable to publish batch tx",
"err", err)
......@@ -225,11 +237,7 @@ func (s *Service) eventLoop() {
// The transaction was successfully submitted.
log.Info(name+" batch tx successfully published",
"tx_hash", receipt.TxHash)
batchConfirmationTime := time.Since(batchConfirmationStart) /
time.Millisecond
s.metrics.BatchConfirmationTimeMs().Set(float64(batchConfirmationTime))
s.metrics.BatchesSubmitted().Inc()
s.metrics.SubmissionGasUsedWei().Set(float64(receipt.GasUsed))
s.metrics.SubmissionTimestamp().Set(float64(time.Now().UnixNano() / 1e6))
case err := <-s.ctx.Done():
......
......@@ -2,6 +2,7 @@ package txmgr
import (
"context"
"errors"
"math/big"
"strings"
"sync"
......@@ -12,6 +13,9 @@ import (
"github.com/ethereum/go-ethereum/log"
)
// ErrReverted signals that a mined transaction reverted.
var ErrReverted = errors.New("transaction reverted")
// UpdateGasPriceSendTxFunc defines a function signature for publishing a
// desired tx with a specific gas price. Implementations of this signature
// should also return promptly when the context is canceled.
......@@ -225,6 +229,9 @@ func (m *SimpleTxManager) Send(
// The transaction has confirmed.
case receipt := <-receiptChan:
if receipt.Status == types.ReceiptStatusFailed {
return receipt, ErrReverted
}
return receipt, nil
}
}
......@@ -288,7 +295,10 @@ func waitMined(
// tipHeight. The equation is rewritten in this form to avoid
// underflows.
if txHeight+numConfirmations <= tipHeight+1 {
log.Info("Transaction confirmed", "txHash", txHash)
reverted := receipt.Status == types.ReceiptStatusFailed
log.Info("Transaction confirmed",
"txHash", txHash,
"reverted", reverted)
return receipt, nil
}
......
......@@ -98,6 +98,7 @@ func (g *gasPricer) sample() (*big.Int, *big.Int) {
type minedTxInfo struct {
gasFeeCap *big.Int
blockNumber uint64
reverted bool
}
// mockBackend implements txmgr.ReceiptSource that tracks mined transactions
......@@ -123,6 +124,20 @@ func newMockBackend() *mockBackend {
// TransactionReceipt with a matching txHash will result in a non-nil receipt.
// If a nil txHash is supplied this has the effect of mining an empty block.
func (b *mockBackend) mine(txHash *common.Hash, gasFeeCap *big.Int) {
b.mineWithStatus(txHash, gasFeeCap, false)
}
// mineWithStatus records a (txHash, gasFeeCap) pair as confirmed, but also
// includes the option to specify whether or not the transaction reverted.
// Subsequent calls to TransactionReceipt with a matching txHash will result in
// a non-nil receipt. If a nil txHash is supplied this has the effect of mining
// an empty block.
func (b *mockBackend) mineWithStatus(
txHash *common.Hash,
gasFeeCap *big.Int,
revert bool,
) {
b.mu.Lock()
defer b.mu.Unlock()
......@@ -131,6 +146,7 @@ func (b *mockBackend) mine(txHash *common.Hash, gasFeeCap *big.Int) {
b.minedTxs[*txHash] = minedTxInfo{
gasFeeCap: gasFeeCap,
blockNumber: b.blockHeight,
reverted: revert,
}
}
}
......@@ -160,12 +176,18 @@ func (b *mockBackend) TransactionReceipt(
return nil, nil
}
var status = types.ReceiptStatusSuccessful
if txInfo.reverted {
status = types.ReceiptStatusFailed
}
// Return the gas fee cap for the transaction in the GasUsed field so that
// we can assert the proper tx confirmed in our tests.
return &types.Receipt{
TxHash: txHash,
GasUsed: txInfo.gasFeeCap.Uint64(),
BlockNumber: big.NewInt(int64(txInfo.blockNumber)),
Status: status,
}, nil
}
......@@ -201,6 +223,39 @@ func TestTxMgrConfirmAtMinGasPrice(t *testing.T) {
require.Equal(t, gasPricer.expGasFeeCap().Uint64(), receipt.GasUsed)
}
// TestTxMgrFailsForRevertedTxn asserts that Send returns ErrReverted if the
// confirmed transaction reverts during execution, and returns the resulting
// receipt.
func TestTxMgrFailsForRevertedTxn(t *testing.T) {
t.Parallel()
h := newTestHarness()
gasPricer := newGasPricer(1)
updateGasPrice := func(ctx context.Context) (*types.Transaction, error) {
gasTipCap, gasFeeCap := gasPricer.sample()
return types.NewTx(&types.DynamicFeeTx{
GasTipCap: gasTipCap,
GasFeeCap: gasFeeCap,
}), nil
}
sendTx := func(ctx context.Context, tx *types.Transaction) error {
if gasPricer.shouldMine(tx.GasFeeCap()) {
txHash := tx.Hash()
h.backend.mineWithStatus(&txHash, tx.GasFeeCap(), true)
}
return nil
}
ctx := context.Background()
receipt, err := h.mgr.Send(ctx, updateGasPrice, sendTx)
require.Equal(t, txmgr.ErrReverted, err)
require.NotNil(t, receipt)
require.Equal(t, gasPricer.expGasFeeCap().Uint64(), receipt.GasUsed)
}
// TestTxMgrNeverConfirmCancel asserts that a Send can be canceled even if no
// transaction is mined. This is done to ensure the the tx mgr can properly
// abort on shutdown, even if a txn is in the process of being published.
......@@ -519,6 +574,7 @@ func (b *failingBackend) TransactionReceipt(
return &types.Receipt{
TxHash: txHash,
BlockNumber: big.NewInt(1),
Status: types.ReceiptStatusSuccessful,
}, nil
}
......
......@@ -17,6 +17,7 @@ import (
bsscore "github.com/ethereum-optimism/optimism/go/bss-core"
"github.com/ethereum-optimism/optimism/go/bss-core/dial"
"github.com/ethereum-optimism/optimism/go/bss-core/drivers"
"github.com/ethereum-optimism/optimism/go/bss-core/txmgr"
"github.com/ethereum-optimism/optimism/go/teleportr/bindings/deposit"
"github.com/ethereum-optimism/optimism/go/teleportr/db"
......@@ -346,7 +347,16 @@ func (s *Server) HandleEstimate(
gasTipCap, err := s.l1Client.SuggestGasTipCap(ctx)
if err != nil {
rpcErrorsTotal.WithLabelValues("suggest_gas_tip_cap").Inc()
return err
// If the request failed because the backend does not support
// eth_maxPriorityFeePerGas, fallback to using the default constant.
// Currently Alchemy is the only backend provider that exposes this
// method, so in the event their API is unreachable we can fallback to a
// degraded mode of operation. This also applies to our test
// environments, as hardhat doesn't support the query either.
if !drivers.IsMaxPriorityFeePerGasNotFoundError(err) {
return err
}
gasTipCap = drivers.FallbackGasTipCap
}
header, err := s.l1Client.HeaderByNumber(ctx, nil)
......
......@@ -26,7 +26,7 @@ $ make geth
### Running a Sequencer
Running a sequencer that ingests L1 to L2 transactions requires running the
[Data Transport Layer](https://github.com/ethereum-optimism/data-transport-layer).
[Data Transport Layer](https://github.com/ethereum-optimism/optimism/tree/develop/packages/data-transport-layer).
The data transport layer is responsible for indexing transactions
from layer one Ethereum. It is possible to run a local development sequencer
without the data transport layer by turning off the sync service. To turn on
......
......@@ -15,5 +15,6 @@ FROM alpine:3.13
RUN apk add --no-cache ca-certificates jq curl
COPY --from=builder /go/batch-submitter/batch-submitter /usr/local/bin/
WORKDIR /usr/local/bin
COPY ./ops/scripts/batch-submitter.sh .
ENTRYPOINT ["batch-submitter"]
......@@ -2,7 +2,7 @@ FROM golang:1.15-alpine3.13 as builder
RUN apk add --no-cache make gcc musl-dev linux-headers git jq bash
ADD ./go/gas-oracle /gas-oracle
COPY ./go/gas-oracle /gas-oracle
RUN cd /gas-oracle && make gas-oracle
FROM alpine:3.13
......@@ -10,5 +10,6 @@ FROM alpine:3.13
RUN apk add --no-cache ca-certificates jq curl
COPY --from=builder /gas-oracle/gas-oracle /usr/local/bin/
WORKDIR /usr/local/bin/
COPY ./ops/scripts/gas-oracle.sh .
ENTRYPOINT ["gas-oracle"]
......@@ -15,6 +15,7 @@ FROM alpine:3.13
RUN apk add --no-cache ca-certificates jq curl
COPY --from=builder /go-ethereum/build/bin/geth /usr/local/bin/
WORKDIR /usr/local/bin/
EXPOSE 8545 8546 8547
COPY ./ops/scripts/geth.sh .
ENTRYPOINT ["geth"]
FROM golang:1.16 as builder
ADD ./go/l2geth-exporter /app/
COPY ./go/l2geth-exporter /app/
WORKDIR /app/
RUN make build
FROM alpine:latest
FROM alpine:3.13
RUN apk --no-cache add ca-certificates
WORKDIR /root/
COPY --from=builder /app/l2geth-exporter /usr/local/bin/
......
FROM golang:1.16 as builder
ADD ./go/op-exporter /app/
COPY ./go/op-exporter /app/
WORKDIR /app/
RUN make build
FROM alpine:latest
FROM alpine:3.13
RUN apk --no-cache add ca-certificates
WORKDIR /root/
COPY --from=builder /app/op-exporter /usr/local/bin/
......
......@@ -6,7 +6,12 @@
# when used with typescript/hardhat: https://github.com/nomiclabs/hardhat/issues/1219
FROM node:16.13-buster-slim as base
RUN apt-get update -y && apt-get install -y git curl jq python3
RUN apt-get update -y && apt-get install -y --no-install-recommends git \
curl \
jq \
python3 \
ca-certificates \
&& rm -rf /var/lib/apt/lists/*
# copy over the needed configs to run the dep installation
# note: this approach can be a bit unhandy to maintain, but it allows
......@@ -22,7 +27,7 @@ COPY packages/message-relayer/package.json ./packages/message-relayer/package.js
COPY packages/replica-healthcheck/package.json ./packages/replica-healthcheck/package.json
COPY integration-tests/package.json ./integration-tests/package.json
RUN yarn install --frozen-lockfile
RUN yarn install --frozen-lockfile && yarn cache clean
COPY ./packages ./packages
COPY ./integration-tests ./integration-tests
......
......@@ -8,7 +8,7 @@ COPY [ \
# install deps
WORKDIR /hardhat
RUN yarn install
RUN yarn install && yarn cache clean
# bring in dockerenv so that hardhat launches with host = 0.0.0.0 instead of 127.0.0.1
# so that it's accessible from other boxes as well
......
......@@ -23,7 +23,7 @@ import { SomeContract } from "@eth-optimism/contracts/path/to/SomeContract.sol";
```
Note that the `/path/to/SomeContract.sol` is the path to the target contract within the [contracts folder](https://github.com/ethereum-optimism/optimism/tree/develop/packages/contracts/contracts) inside of this package.
For example, the [L1CrossDomainMessenger](/contracts/L1/messaging/L1CrossDomainMessenger.sol) contract is located at `/contracts/L1/messaging/L1CrossDomainMessenger.sol`, relative to this README.
For example, the [L1CrossDomainMessenger](https://github.com/ethereum-optimism/optimism/blob/develop/packages/contracts/contracts/L1/messaging/L1CrossDomainMessenger.sol) contract is located at `packages/contracts/contracts/L1/messaging/L1CrossDomainMessenger.sol`, relative to this README.
You would therefore import the contract as:
......
import { ethers } from 'ethers'
import { task } from 'hardhat/config'
import * as types from 'hardhat/internal/core/params/argumentTypes'
import { SequencerBatch } from '@eth-optimism/core-utils'
import { BatchType, SequencerBatch } from '@eth-optimism/core-utils'
import { names } from '../src/address-names'
import { getContractFromArtifact } from '../src/deploy-utils'
......@@ -51,7 +51,31 @@ task('fetch-batches')
for (const event of events) {
const tx = await provider.getTransaction(event.transactionHash)
const batch = (SequencerBatch as any).fromHex(tx.data)
batches.push(batch.toJSON())
// Add an extra field to the resulting json
// so that the serialization sizes can be observed
const json = batch.toJSON()
json.sizes = {
legacy: 0,
zlib: 0,
}
// Create a copy of the batch to serialize in
// the alternative format
const copy = (SequencerBatch as any).fromHex(tx.data)
if (batch.type === BatchType.ZLIB) {
copy.type = BatchType.LEGACY
json.sizes.legacy = copy.encode().length
json.sizes.zlib = batch.encode().length
} else {
copy.type = BatchType.ZLIB
json.sizes.zlib = copy.encode().length
json.sizes.legacy = batch.encode().length
}
json.compressionRatio = json.sizes.zlib / json.sizes.legacy
batches.push(json)
}
}
......
......@@ -19,7 +19,7 @@ We run two sub-services, the [`L1IngestionService`](./src/services/l1-ingestion/
See an example config at [.env.example](.env.example); copy into a `.env` file before running.
`L1_TRANSPORT__L1_RPC_ENDPOINT` can be the JSON RPC endpoint of any L1 Ethereum node. `L1_TRANSPORT__ADDRESS_MANAGER` should be the contract addresss of the Address Manager on the corresponding network; find their values in the [Regenesis repo](https://github.com/ethereum-optimism/regenesis).
`L1_TRANSPORT__L1_RPC_ENDPOINT` can be the JSON RPC endpoint of any L1 Ethereum node. `L1_TRANSPORT__ADDRESS_MANAGER` should be the contract addresss of the Address Manager on the corresponding network; find their values in the [contracts package](https://github.com/ethereum-optimism/optimism/tree/develop/packages/contracts/deployments).
### Building and usage
......@@ -47,7 +47,7 @@ Here's the list of environment variables you can change:
| Variable | Default | Description |
| ------------------------------------------------------- | ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| DATA_TRANSPORT_LAYER__DB_PATH | ./db | Path to the database for this service. |
| DATA_TRANSPORT_LAYER__ADDRESS_MANAGER | - | Address of the AddressManager contract on L1. See [regenesis](https://github.com/ethereum-optimism/regenesis) repo to find this address for mainnet or kovan. |
| DATA_TRANSPORT_LAYER__ADDRESS_MANAGER | - | Address of the AddressManager contract on L1. See [contracts](https://github.com/ethereum-optimism/optimism/tree/develop/packages/contracts/deployments) package to find this address for mainnet or kovan. |
| DATA_TRANSPORT_LAYER__POLLING_INTERVAL | 5000 | Period of time between execution loops. |
| DATA_TRANSPORT_LAYER__DANGEROUSLY_CATCH_ALL_ERRORS | false | If true, will catch all errors without throwing. |
| DATA_TRANSPORT_LAYER__CONFIRMATIONS | 12 | Number of confirmations to wait before accepting transactions as "canonical". |
......
# @eth-optimism/smock
## NOTICE
Smock v1 is being deprecated.
Please migrate to [Smock v2](https://github.com/defi-wonderland/smock).
You can find an archive of the Smock v1 codebase at [optimism@d337713c91](https://github.com/ethereum-optimism/optimism/tree/d337713c91c6634f546b8d6572392c0784ab8217/packages/smock).
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment