Commit 22ce313e authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge branch 'develop' into feat/lwd-cdm

parents 8d284916 2c26ac35
......@@ -26,6 +26,7 @@
/op-chain-ops @ethereum-optimism/go-reviewers
/op-e2e @ethereum-optimism/go-reviewers
/op-node @ethereum-optimism/go-reviewers
/op-node/rollup @protolambda @trianglesphere
/op-proposer @ethereum-optimism/go-reviewers
/op-service @ethereum-optimism/go-reviewers
......
......@@ -163,7 +163,7 @@ func NewBatchSubmitterWithSigner(cfg Config, addr common.Address, signer SignerF
done: make(chan struct{}),
log: l,
state: NewChannelManager(l, cfg.ChannelTimeout),
// TODO: this context only exists because the even loop doesn't reach done
// TODO: this context only exists because the event loop doesn't reach done
// if the tx manager is blocking forever due to e.g. insufficient balance.
ctx: ctx,
cancel: cancel,
......
This diff is collapsed.
This diff is collapsed.
......@@ -14,12 +14,12 @@ import (
// version 1 messages have a value and the most significant
// byte of the nonce is a 1
type CrossDomainMessage struct {
Nonce *big.Int
Sender *common.Address
Target *common.Address
Value *big.Int
GasLimit *big.Int
Data []byte
Nonce *big.Int `json:"nonce"`
Sender *common.Address `json:"sender"`
Target *common.Address `json:"target"`
Value *big.Int `json:"value"`
GasLimit *big.Int `json:"gasLimit"`
Data []byte `json:"data"`
}
// NewCrossDomainMessage creates a CrossDomainMessage.
......
......@@ -76,7 +76,7 @@ func MigrateWithdrawal(withdrawal *LegacyWithdrawal, l1CrossDomainMessenger *com
withdrawal.Target,
value,
new(big.Int),
withdrawal.Data,
[]byte(withdrawal.Data),
)
if err != nil {
return nil, fmt.Errorf("cannot abi encode relayMessage: %w", err)
......
......@@ -2,11 +2,13 @@ package crossdomain
import (
"errors"
"fmt"
"math/big"
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/crypto"
)
......@@ -28,7 +30,7 @@ type Withdrawal struct {
Target *common.Address `json:"target"`
Value *big.Int `json:"value"`
GasLimit *big.Int `json:"gasLimit"`
Data []byte `json:"data"`
Data hexutil.Bytes `json:"data"`
}
// NewWithdrawal will create a Withdrawal
......@@ -44,7 +46,7 @@ func NewWithdrawal(
Target: target,
Value: value,
GasLimit: gasLimit,
Data: data,
Data: hexutil.Bytes(data),
}
}
......@@ -58,9 +60,9 @@ func (w *Withdrawal) Encode() ([]byte, error) {
{Name: "gasLimit", Type: Uint256Type},
{Name: "data", Type: BytesType},
}
enc, err := args.Pack(w.Nonce, w.Sender, w.Target, w.Value, w.GasLimit, w.Data)
enc, err := args.Pack(w.Nonce, w.Sender, w.Target, w.Value, w.GasLimit, []byte(w.Data))
if err != nil {
return nil, err
return nil, fmt.Errorf("cannot encode withdrawal: %w", err)
}
return enc, nil
}
......@@ -110,7 +112,7 @@ func (w *Withdrawal) Decode(data []byte) error {
w.Target = &target
w.Value = value
w.GasLimit = gasLimit
w.Data = msgData
w.Data = hexutil.Bytes(msgData)
return nil
}
......@@ -150,6 +152,6 @@ func (w *Withdrawal) WithdrawalTransaction() bindings.TypesWithdrawalTransaction
Target: *w.Target,
Value: w.Value,
GasLimit: w.GasLimit,
Data: w.Data,
Data: []byte(w.Data),
}
}
......@@ -26,9 +26,10 @@ type L2Sequencer struct {
func NewL2Sequencer(t Testing, log log.Logger, l1 derive.L1Fetcher, eng L2API, cfg *rollup.Config, seqConfDepth uint64) *L2Sequencer {
ver := NewL2Verifier(t, log, l1, eng, cfg)
attrBuilder := derive.NewFetchingAttributesBuilder(cfg, l1, eng)
return &L2Sequencer{
L2Verifier: *ver,
sequencer: driver.NewSequencer(log, cfg, l1, eng, ver.derivation, metrics.NoopMetrics),
sequencer: driver.NewSequencer(log, cfg, eng, ver.derivation, attrBuilder, metrics.NoopMetrics),
l1OriginSelector: driver.NewL1OriginSelector(log, cfg, l1, seqConfDepth),
seqOldOrigin: false,
failL2GossipUnsafeBlock: nil,
......
package doc
import (
"encoding/json"
"fmt"
"os"
"strings"
"github.com/ethereum-optimism/optimism/op-node/metrics"
"github.com/olekukonko/tablewriter"
"github.com/urfave/cli"
)
var Subcommands = cli.Commands{
{
Name: "metrics",
Usage: "Dumps a list of supported metrics to stdout",
Flags: []cli.Flag{
cli.StringFlag{
Name: "format",
Value: "markdown",
Usage: "Output format (json|markdown)",
},
},
Action: func(ctx *cli.Context) error {
m := metrics.NewMetrics("default")
supportedMetrics := m.Document()
format := ctx.String("format")
if format != "markdown" && format != "json" {
return fmt.Errorf("invalid format: %s", format)
}
if format == "json" {
enc := json.NewEncoder(os.Stdout)
return enc.Encode(supportedMetrics)
}
table := tablewriter.NewWriter(os.Stdout)
table.SetBorders(tablewriter.Border{Left: true, Top: false, Right: true, Bottom: false})
table.SetCenterSeparator("|")
table.SetAutoWrapText(false)
table.SetHeader([]string{"Metric", "Description", "Labels", "Type"})
var data [][]string
for _, metric := range supportedMetrics {
labels := strings.Join(metric.Labels, ",")
data = append(data, []string{metric.Name, metric.Help, labels, metric.Type})
}
table.AppendBulk(data)
table.Render()
return nil
},
},
}
......@@ -9,6 +9,8 @@ import (
"syscall"
"time"
"github.com/ethereum-optimism/optimism/op-node/cmd/doc"
"github.com/urfave/cli"
opnode "github.com/ethereum-optimism/optimism/op-node"
......@@ -68,6 +70,10 @@ func main() {
Name: "genesis",
Subcommands: genesis.Subcommands,
},
{
Name: "doc",
Subcommands: doc.Subcommands,
},
}
err := app.Run(os.Args)
......
......@@ -23,6 +23,7 @@ require (
github.com/libp2p/go-libp2p-testing v0.12.0
github.com/multiformats/go-multiaddr v0.7.0
github.com/multiformats/go-multiaddr-dns v0.3.1
github.com/olekukonko/tablewriter v0.0.5
github.com/prometheus/client_golang v1.13.0
github.com/stretchr/testify v1.8.1
github.com/urfave/cli v1.22.9
......@@ -115,7 +116,6 @@ require (
github.com/multiformats/go-multistream v0.3.3 // indirect
github.com/multiformats/go-varint v0.0.6 // indirect
github.com/nxadm/tail v1.4.8 // indirect
github.com/olekukonko/tablewriter v0.0.5 // indirect
github.com/onsi/ginkgo v1.16.5 // indirect
github.com/opencontainers/runtime-spec v1.0.2 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect
......
package metrics
import (
"github.com/ethereum-optimism/optimism/op-service/metrics"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
// CacheMetrics implements the Metrics interface in the caching package,
......@@ -34,16 +34,16 @@ func (m *CacheMetrics) CacheGet(typeLabel string, hit bool) {
}
}
func NewCacheMetrics(registry prometheus.Registerer, ns string, name string, displayName string) *CacheMetrics {
func NewCacheMetrics(factory metrics.Factory, ns string, name string, displayName string) *CacheMetrics {
return &CacheMetrics{
SizeVec: promauto.With(registry).NewGaugeVec(prometheus.GaugeOpts{
SizeVec: factory.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns,
Name: name + "_size",
Help: displayName + " cache size",
}, []string{
"type",
}),
GetVec: promauto.With(registry).NewCounterVec(prometheus.CounterOpts{
GetVec: factory.NewCounterVec(prometheus.CounterOpts{
Namespace: ns,
Name: name + "_get",
Help: displayName + " lookups, hitting or not",
......@@ -51,7 +51,7 @@ func NewCacheMetrics(registry prometheus.Registerer, ns string, name string, dis
"type",
"hit",
}),
AddVec: promauto.With(registry).NewCounterVec(prometheus.CounterOpts{
AddVec: factory.NewCounterVec(prometheus.CounterOpts{
Namespace: ns,
Name: name + "_add",
Help: displayName + " additions, evicting previous values or not",
......
......@@ -4,8 +4,9 @@ import (
"fmt"
"time"
"github.com/ethereum-optimism/optimism/op-service/metrics"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
type EventMetrics struct {
......@@ -18,14 +19,14 @@ func (e *EventMetrics) RecordEvent() {
e.LastTime.Set(float64(time.Now().Unix()))
}
func NewEventMetrics(registry prometheus.Registerer, ns string, name string, displayName string) *EventMetrics {
func NewEventMetrics(factory metrics.Factory, ns string, name string, displayName string) *EventMetrics {
return &EventMetrics{
Total: promauto.With(registry).NewCounter(prometheus.CounterOpts{
Total: factory.NewCounter(prometheus.CounterOpts{
Namespace: ns,
Name: fmt.Sprintf("%s_total", name),
Help: fmt.Sprintf("Count of %s events", displayName),
}),
LastTime: promauto.With(registry).NewGauge(prometheus.GaugeOpts{
LastTime: factory.NewGauge(prometheus.GaugeOpts{
Namespace: ns,
Name: fmt.Sprintf("last_%s_unix", name),
Help: fmt.Sprintf("Timestamp of last %s event", displayName),
......
This diff is collapsed.
......@@ -23,17 +23,32 @@ type SystemConfigL2Fetcher interface {
SystemConfigByL2Hash(ctx context.Context, hash common.Hash) (eth.SystemConfig, error)
}
// FetchingAttributesBuilder fetches inputs for the building of L2 payload attributes on the fly.
type FetchingAttributesBuilder struct {
cfg *rollup.Config
l1 L1ReceiptsFetcher
l2 SystemConfigL2Fetcher
}
func NewFetchingAttributesBuilder(cfg *rollup.Config, l1 L1ReceiptsFetcher, l2 SystemConfigL2Fetcher) *FetchingAttributesBuilder {
return &FetchingAttributesBuilder{
cfg: cfg,
l1: l1,
l2: l2,
}
}
// PreparePayloadAttributes prepares a PayloadAttributes template that is ready to build a L2 block with deposits only, on top of the given l2Parent, with the given epoch as L1 origin.
// The template defaults to NoTxPool=true, and no sequencer transactions: the caller has to modify the template to add transactions,
// by setting NoTxPool=false as sequencer, or by appending batch transactions as verifier.
// The severity of the error is returned; a crit=false error means there was a temporary issue, like a failed RPC or time-out.
// A crit=true error means the input arguments are inconsistent or invalid.
func PreparePayloadAttributes(ctx context.Context, cfg *rollup.Config, dl L1ReceiptsFetcher, l2 SystemConfigL2Fetcher, l2Parent eth.L2BlockRef, timestamp uint64, epoch eth.BlockID) (attrs *eth.PayloadAttributes, err error) {
func (ba *FetchingAttributesBuilder) PreparePayloadAttributes(ctx context.Context, l2Parent eth.L2BlockRef, epoch eth.BlockID) (attrs *eth.PayloadAttributes, err error) {
var l1Info eth.BlockInfo
var depositTxs []hexutil.Bytes
var seqNumber uint64
sysConfig, err := l2.SystemConfigByL2Hash(ctx, l2Parent.Hash)
sysConfig, err := ba.l2.SystemConfigByL2Hash(ctx, l2Parent.Hash)
if err != nil {
return nil, NewTemporaryError(fmt.Errorf("failed to retrieve L2 parent block: %w", err))
}
......@@ -42,7 +57,7 @@ func PreparePayloadAttributes(ctx context.Context, cfg *rollup.Config, dl L1Rece
// case we need to fetch all transaction receipts from the L1 origin block so we can scan for
// user deposits.
if l2Parent.L1Origin.Number != epoch.Number {
info, receipts, err := dl.FetchReceipts(ctx, epoch.Hash)
info, receipts, err := ba.l1.FetchReceipts(ctx, epoch.Hash)
if err != nil {
return nil, NewTemporaryError(fmt.Errorf("failed to fetch L1 block info and receipts: %w", err))
}
......@@ -52,13 +67,13 @@ func PreparePayloadAttributes(ctx context.Context, cfg *rollup.Config, dl L1Rece
epoch, info.ParentHash(), l2Parent.L1Origin))
}
deposits, err := DeriveDeposits(receipts, cfg.DepositContractAddress)
deposits, err := DeriveDeposits(receipts, ba.cfg.DepositContractAddress)
if err != nil {
// deposits may never be ignored. Failing to process them is a critical error.
return nil, NewCriticalError(fmt.Errorf("failed to derive some deposits: %w", err))
}
// apply sysCfg changes
if err := UpdateSystemConfigWithL1Receipts(&sysConfig, receipts, cfg); err != nil {
if err := UpdateSystemConfigWithL1Receipts(&sysConfig, receipts, ba.cfg); err != nil {
return nil, NewCriticalError(fmt.Errorf("failed to apply derived L1 sysCfg updates: %w", err))
}
......@@ -69,7 +84,7 @@ func PreparePayloadAttributes(ctx context.Context, cfg *rollup.Config, dl L1Rece
if l2Parent.L1Origin.Hash != epoch.Hash {
return nil, NewResetError(fmt.Errorf("cannot create new block with L1 origin %s in conflict with L1 origin %s", epoch, l2Parent.L1Origin))
}
info, err := dl.InfoByHash(ctx, epoch.Hash)
info, err := ba.l1.InfoByHash(ctx, epoch.Hash)
if err != nil {
return nil, NewTemporaryError(fmt.Errorf("failed to fetch L1 block info: %w", err))
}
......@@ -78,6 +93,13 @@ func PreparePayloadAttributes(ctx context.Context, cfg *rollup.Config, dl L1Rece
seqNumber = l2Parent.SequenceNumber + 1
}
// Sanity check the L1 origin was correctly selected to maintain the time invariant between L1 and L2
nextL2Time := l2Parent.Time + ba.cfg.BlockTime
if nextL2Time < l1Info.Time() {
return nil, NewResetError(fmt.Errorf("cannot build L2 block on top %s for time %d before L1 origin %s at time %d",
l2Parent, nextL2Time, eth.ToBlockID(l1Info), l1Info.Time()))
}
l1InfoTx, err := L1InfoDepositBytes(seqNumber, l1Info, sysConfig)
if err != nil {
return nil, NewCriticalError(fmt.Errorf("failed to create l1InfoTx: %w", err))
......@@ -88,7 +110,7 @@ func PreparePayloadAttributes(ctx context.Context, cfg *rollup.Config, dl L1Rece
txs = append(txs, depositTxs...)
return &eth.PayloadAttributes{
Timestamp: hexutil.Uint64(timestamp),
Timestamp: hexutil.Uint64(nextL2Time),
PrevRandao: eth.Bytes32(l1Info.MixDigest()),
SuggestedFeeRecipient: predeploys.SequencerFeeVaultAddr,
Transactions: txs,
......
......@@ -23,22 +23,24 @@ import (
// This stage can be reset by clearing it's batch buffer.
// This stage does not need to retain any references to L1 blocks.
type AttributesBuilder interface {
PreparePayloadAttributes(ctx context.Context, l2Parent eth.L2BlockRef, epoch eth.BlockID) (attrs *eth.PayloadAttributes, err error)
}
type AttributesQueue struct {
log log.Logger
config *rollup.Config
dl L1ReceiptsFetcher
eng SystemConfigL2Fetcher
prev *BatchQueue
batch *BatchData
log log.Logger
config *rollup.Config
builder AttributesBuilder
prev *BatchQueue
batch *BatchData
}
func NewAttributesQueue(log log.Logger, cfg *rollup.Config, l1Fetcher L1ReceiptsFetcher, eng SystemConfigL2Fetcher, prev *BatchQueue) *AttributesQueue {
func NewAttributesQueue(log log.Logger, cfg *rollup.Config, builder AttributesBuilder, prev *BatchQueue) *AttributesQueue {
return &AttributesQueue{
log: log,
config: cfg,
dl: l1Fetcher,
eng: eng,
prev: prev,
log: log,
config: cfg,
builder: builder,
prev: prev,
}
}
......@@ -74,9 +76,13 @@ func (aq *AttributesQueue) createNextAttributes(ctx context.Context, batch *Batc
if batch.ParentHash != l2SafeHead.Hash {
return nil, NewResetError(fmt.Errorf("valid batch has bad parent hash %s, expected %s", batch.ParentHash, l2SafeHead.Hash))
}
// sanity check timestamp
if expected := l2SafeHead.Time + aq.config.BlockTime; expected != batch.Timestamp {
return nil, NewResetError(fmt.Errorf("valid batch has bad timestamp %d, expected %d", batch.Timestamp, expected))
}
fetchCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
defer cancel()
attrs, err := PreparePayloadAttributes(fetchCtx, aq.config, aq.dl, aq.eng, l2SafeHead, batch.Timestamp, batch.Epoch())
attrs, err := aq.builder.PreparePayloadAttributes(fetchCtx, l2SafeHead, batch.Epoch())
if err != nil {
return nil, err
}
......
......@@ -40,6 +40,7 @@ func TestAttributesQueue(t *testing.T) {
safeHead := testutils.RandomL2BlockRef(rng)
safeHead.L1Origin = l1Info.ID()
safeHead.Time = l1Info.InfoTime
batch := &BatchData{BatchV1{
ParentHash: safeHead.Hash,
......@@ -75,11 +76,12 @@ func TestAttributesQueue(t *testing.T) {
NoTxPool: true,
GasLimit: (*eth.Uint64Quantity)(&expectedL1Cfg.GasLimit),
}
attrBuilder := NewFetchingAttributesBuilder(cfg, l1Fetcher, l2Fetcher)
aq := NewAttributesQueue(testlog.Logger(t, log.LvlError), cfg, l1Fetcher, l2Fetcher, nil)
aq := NewAttributesQueue(testlog.Logger(t, log.LvlError), cfg, attrBuilder, nil)
actual, err := aq.createNextAttributes(context.Background(), batch, safeHead)
require.Nil(t, err)
require.NoError(t, err)
require.Equal(t, attrs, *actual)
}
......@@ -43,12 +43,12 @@ func TestPreparePayloadAttributes(t *testing.T) {
l1CfgFetcher := &testutils.MockL2Client{}
l1CfgFetcher.ExpectSystemConfigByL2Hash(l2Parent.Hash, testSysCfg, nil)
defer l1CfgFetcher.AssertExpectations(t)
l2Time := l2Parent.Time + cfg.BlockTime
l1Info := testutils.RandomBlockInfo(rng)
l1Info.InfoNum = l2Parent.L1Origin.Number + 1
epoch := l1Info.ID()
l1Fetcher.ExpectFetchReceipts(epoch.Hash, l1Info, nil, nil)
_, err := PreparePayloadAttributes(context.Background(), cfg, l1Fetcher, l1CfgFetcher, l2Parent, l2Time, epoch)
attrBuilder := NewFetchingAttributesBuilder(cfg, l1Fetcher, l1CfgFetcher)
_, err := attrBuilder.PreparePayloadAttributes(context.Background(), l2Parent, epoch)
require.NotNil(t, err, "inconsistent L1 origin error expected")
require.ErrorIs(t, err, ErrReset, "inconsistent L1 origin transition must be handled like a critical error with reorg")
})
......@@ -60,11 +60,11 @@ func TestPreparePayloadAttributes(t *testing.T) {
l1CfgFetcher := &testutils.MockL2Client{}
l1CfgFetcher.ExpectSystemConfigByL2Hash(l2Parent.Hash, testSysCfg, nil)
defer l1CfgFetcher.AssertExpectations(t)
l2Time := l2Parent.Time + cfg.BlockTime
l1Info := testutils.RandomBlockInfo(rng)
l1Info.InfoNum = l2Parent.L1Origin.Number
epoch := l1Info.ID()
_, err := PreparePayloadAttributes(context.Background(), cfg, l1Fetcher, l1CfgFetcher, l2Parent, l2Time, epoch)
attrBuilder := NewFetchingAttributesBuilder(cfg, l1Fetcher, l1CfgFetcher)
_, err := attrBuilder.PreparePayloadAttributes(context.Background(), l2Parent, epoch)
require.NotNil(t, err, "inconsistent L1 origin error expected")
require.ErrorIs(t, err, ErrReset, "inconsistent L1 origin transition must be handled like a critical error with reorg")
})
......@@ -76,12 +76,12 @@ func TestPreparePayloadAttributes(t *testing.T) {
l1CfgFetcher := &testutils.MockL2Client{}
l1CfgFetcher.ExpectSystemConfigByL2Hash(l2Parent.Hash, testSysCfg, nil)
defer l1CfgFetcher.AssertExpectations(t)
l2Time := l2Parent.Time + cfg.BlockTime
epoch := l2Parent.L1Origin
epoch.Number += 1
mockRPCErr := errors.New("mock rpc error")
l1Fetcher.ExpectFetchReceipts(epoch.Hash, nil, nil, mockRPCErr)
_, err := PreparePayloadAttributes(context.Background(), cfg, l1Fetcher, l1CfgFetcher, l2Parent, l2Time, epoch)
attrBuilder := NewFetchingAttributesBuilder(cfg, l1Fetcher, l1CfgFetcher)
_, err := attrBuilder.PreparePayloadAttributes(context.Background(), l2Parent, epoch)
require.ErrorIs(t, err, mockRPCErr, "mock rpc error expected")
require.ErrorIs(t, err, ErrTemporary, "rpc errors should not be critical, it is not necessary to reorg")
})
......@@ -93,11 +93,11 @@ func TestPreparePayloadAttributes(t *testing.T) {
l1CfgFetcher := &testutils.MockL2Client{}
l1CfgFetcher.ExpectSystemConfigByL2Hash(l2Parent.Hash, testSysCfg, nil)
defer l1CfgFetcher.AssertExpectations(t)
l2Time := l2Parent.Time + cfg.BlockTime
epoch := l2Parent.L1Origin
mockRPCErr := errors.New("mock rpc error")
l1Fetcher.ExpectInfoByHash(epoch.Hash, nil, mockRPCErr)
_, err := PreparePayloadAttributes(context.Background(), cfg, l1Fetcher, l1CfgFetcher, l2Parent, l2Time, epoch)
attrBuilder := NewFetchingAttributesBuilder(cfg, l1Fetcher, l1CfgFetcher)
_, err := attrBuilder.PreparePayloadAttributes(context.Background(), l2Parent, epoch)
require.ErrorIs(t, err, mockRPCErr, "mock rpc error expected")
require.ErrorIs(t, err, ErrTemporary, "rpc errors should not be critical, it is not necessary to reorg")
})
......@@ -109,7 +109,6 @@ func TestPreparePayloadAttributes(t *testing.T) {
l1CfgFetcher := &testutils.MockL2Client{}
l1CfgFetcher.ExpectSystemConfigByL2Hash(l2Parent.Hash, testSysCfg, nil)
defer l1CfgFetcher.AssertExpectations(t)
l2Time := l2Parent.Time + cfg.BlockTime
l1Info := testutils.RandomBlockInfo(rng)
l1Info.InfoParentHash = l2Parent.L1Origin.Hash
l1Info.InfoNum = l2Parent.L1Origin.Number + 1
......@@ -117,7 +116,8 @@ func TestPreparePayloadAttributes(t *testing.T) {
l1InfoTx, err := L1InfoDepositBytes(0, l1Info, testSysCfg)
require.NoError(t, err)
l1Fetcher.ExpectFetchReceipts(epoch.Hash, l1Info, nil, nil)
attrs, err := PreparePayloadAttributes(context.Background(), cfg, l1Fetcher, l1CfgFetcher, l2Parent, l2Time, epoch)
attrBuilder := NewFetchingAttributesBuilder(cfg, l1Fetcher, l1CfgFetcher)
attrs, err := attrBuilder.PreparePayloadAttributes(context.Background(), l2Parent, epoch)
require.NoError(t, err)
require.NotNil(t, attrs)
require.Equal(t, l2Parent.Time+cfg.BlockTime, uint64(attrs.Timestamp))
......@@ -135,7 +135,6 @@ func TestPreparePayloadAttributes(t *testing.T) {
l1CfgFetcher := &testutils.MockL2Client{}
l1CfgFetcher.ExpectSystemConfigByL2Hash(l2Parent.Hash, testSysCfg, nil)
defer l1CfgFetcher.AssertExpectations(t)
l2Time := l2Parent.Time + cfg.BlockTime
l1Info := testutils.RandomBlockInfo(rng)
l1Info.InfoParentHash = l2Parent.L1Origin.Hash
l1Info.InfoNum = l2Parent.L1Origin.Number + 1
......@@ -157,7 +156,8 @@ func TestPreparePayloadAttributes(t *testing.T) {
l2Txs := append(append(make([]eth.Data, 0), l1InfoTx), usedDepositTxs...)
l1Fetcher.ExpectFetchReceipts(epoch.Hash, l1Info, receipts, nil)
attrs, err := PreparePayloadAttributes(context.Background(), cfg, l1Fetcher, l1CfgFetcher, l2Parent, l2Time, epoch)
attrBuilder := NewFetchingAttributesBuilder(cfg, l1Fetcher, l1CfgFetcher)
attrs, err := attrBuilder.PreparePayloadAttributes(context.Background(), l2Parent, epoch)
require.NoError(t, err)
require.NotNil(t, attrs)
require.Equal(t, l2Parent.Time+cfg.BlockTime, uint64(attrs.Timestamp))
......@@ -175,7 +175,6 @@ func TestPreparePayloadAttributes(t *testing.T) {
l1CfgFetcher := &testutils.MockL2Client{}
l1CfgFetcher.ExpectSystemConfigByL2Hash(l2Parent.Hash, testSysCfg, nil)
defer l1CfgFetcher.AssertExpectations(t)
l2Time := l2Parent.Time + cfg.BlockTime
l1Info := testutils.RandomBlockInfo(rng)
l1Info.InfoHash = l2Parent.L1Origin.Hash
l1Info.InfoNum = l2Parent.L1Origin.Number
......@@ -185,7 +184,8 @@ func TestPreparePayloadAttributes(t *testing.T) {
require.NoError(t, err)
l1Fetcher.ExpectInfoByHash(epoch.Hash, l1Info, nil)
attrs, err := PreparePayloadAttributes(context.Background(), cfg, l1Fetcher, l1CfgFetcher, l2Parent, l2Time, epoch)
attrBuilder := NewFetchingAttributesBuilder(cfg, l1Fetcher, l1CfgFetcher)
attrs, err := attrBuilder.PreparePayloadAttributes(context.Background(), l2Parent, epoch)
require.NoError(t, err)
require.NotNil(t, attrs)
require.Equal(t, l2Parent.Time+cfg.BlockTime, uint64(attrs.Timestamp))
......
......@@ -74,7 +74,8 @@ func NewDerivationPipeline(log log.Logger, cfg *rollup.Config, l1Fetcher L1Fetch
bank := NewChannelBank(log, cfg, frameQueue, l1Fetcher)
chInReader := NewChannelInReader(log, bank)
batchQueue := NewBatchQueue(log, cfg, chInReader)
attributesQueue := NewAttributesQueue(log, cfg, l1Fetcher, engine, batchQueue)
attrBuilder := NewFetchingAttributesBuilder(cfg, l1Fetcher, engine)
attributesQueue := NewAttributesQueue(log, cfg, attrBuilder, batchQueue)
// Step stages
eng := NewEngineQueue(log, cfg, engine, metrics, attributesQueue, l1Fetcher)
......
......@@ -27,6 +27,17 @@ var (
ConfigUpdateEventVersion0 = common.Hash{}
)
var (
// A left-padded uint256 equal to 32.
oneWordUint = common.Hash{31: 32}
// A left-padded uint256 equal to 64.
twoWordUint = common.Hash{31: 64}
// 24 zero bytes (the padding for a uint64 in a 32 byte word)
uint64Padding = make([]byte, 24)
// 12 zero bytes (the padding for an Ethereum address in a 32 byte word)
addressPadding = make([]byte, 12)
)
// UpdateSystemConfigWithL1Receipts filters all L1 receipts to find config updates and applies the config updates to the given sysCfg
func UpdateSystemConfigWithL1Receipts(sysCfg *eth.SystemConfig, receipts []*types.Receipt, cfg *rollup.Config) error {
var result error
......@@ -69,50 +80,94 @@ func ProcessSystemConfigUpdateLogEvent(destSysCfg *eth.SystemConfig, ev *types.L
}
// indexed 1
updateType := ev.Topics[2]
// unindexed data
// Create a reader of the unindexed data
reader := bytes.NewReader(ev.Data)
// Counter for the number of bytes read from `reader` via `readWord`
countReadBytes := 0
// Helper function to read a word from the log data reader
readWord := func() (b [32]byte) {
if _, err := reader.Read(b[:]); err != nil {
// If there is an error reading the next 32 bytes from the reader, return an empty
// 32 byte array. We always check that the number of bytes read (`countReadBytes`)
// is equal to the expected amount at the end of each switch case.
return b
}
countReadBytes += 32
return b
}
// Attempt to read unindexed data
switch updateType {
case SystemConfigUpdateBatcher:
if len(ev.Data) != 32*3 {
return fmt.Errorf("expected 32*3 bytes in batcher hash update, but got %d bytes", len(ev.Data))
// Read the pointer, it should always equal 32.
if word := readWord(); word != oneWordUint {
return fmt.Errorf("expected offset to point to length location, but got %s", word)
}
if x := common.BytesToHash(ev.Data[:32]); x != (common.Hash{31: 32}) {
return fmt.Errorf("expected offset to point to length location, but got %s", x)
// Read the length, it should also always equal 32.
if word := readWord(); word != oneWordUint {
return fmt.Errorf("expected length to be 32 bytes, but got %s", word)
}
if x := common.BytesToHash(ev.Data[32:64]); x != (common.Hash{31: 32}) {
return fmt.Errorf("expected length of 1 bytes32, but got %s", x)
// Indexing `word` directly is always safe here, it is guaranteed to be 32 bytes in length.
// Check that the batcher address is correctly zero-padded.
word := readWord()
if !bytes.Equal(word[:12], addressPadding) {
return fmt.Errorf("expected version 0 batcher hash with zero padding, but got %x", word)
}
if !bytes.Equal(ev.Data[64:64+12], make([]byte, 12)) {
return fmt.Errorf("expected version 0 batcher hash with zero padding, but got %x", ev.Data)
destSysCfg.BatcherAddr.SetBytes(word[12:])
if countReadBytes != 32*3 {
return NewCriticalError(fmt.Errorf("expected 32*3 bytes in batcher hash update, but got %d bytes", len(ev.Data)))
}
destSysCfg.BatcherAddr.SetBytes(ev.Data[64+12:])
return nil
case SystemConfigUpdateGasConfig: // left padded uint8
if len(ev.Data) != 32*4 {
return fmt.Errorf("expected 32*4 bytes in GPO params update data, but got %d", len(ev.Data))
case SystemConfigUpdateGasConfig:
// Read the pointer, it should always equal 32.
if word := readWord(); word != oneWordUint {
return fmt.Errorf("expected offset to point to length location, but got %s", word)
}
if x := common.BytesToHash(ev.Data[:32]); x != (common.Hash{31: 32}) {
return fmt.Errorf("expected offset to point to length location, but got %s", x)
// Read the length, it should always equal 64.
if word := readWord(); word != twoWordUint {
return fmt.Errorf("expected length to be 64 bytes, but got %s", word)
}
if x := common.BytesToHash(ev.Data[32:64]); x != (common.Hash{31: 64}) {
return fmt.Errorf("expected length of 2 bytes32, but got %s", x)
// Set the system config's overhead and scalar values to the values read from the log
destSysCfg.Overhead = readWord()
destSysCfg.Scalar = readWord()
if countReadBytes != 32*4 {
return NewCriticalError(fmt.Errorf("expected 32*4 bytes in GPO params update data, but got %d", len(ev.Data)))
}
copy(destSysCfg.Overhead[:], ev.Data[64:96])
copy(destSysCfg.Scalar[:], ev.Data[96:128])
return nil
case SystemConfigUpdateGasLimit:
if len(ev.Data) != 32*3 {
return fmt.Errorf("expected 32*3 bytes in gas limit update, but got %d bytes", len(ev.Data))
// Read the pointer, it should always equal 32.
if word := readWord(); word != oneWordUint {
return fmt.Errorf("expected offset to point to length location, but got %s", word)
}
if x := common.BytesToHash(ev.Data[:32]); x != (common.Hash{31: 32}) {
return fmt.Errorf("expected offset to point to length location, but got %s", x)
// Read the length, it should also always equal 32.
if word := readWord(); word != oneWordUint {
return fmt.Errorf("expected length to be 32 bytes, but got %s", word)
}
if x := common.BytesToHash(ev.Data[32:64]); x != (common.Hash{31: 32}) {
return fmt.Errorf("expected length of 1 bytes32, but got %s", x)
// Indexing `word` directly is always safe here, it is guaranteed to be 32 bytes in length.
// Check that the gas limit is correctly zero-padded.
word := readWord()
if !bytes.Equal(word[:24], uint64Padding) {
return fmt.Errorf("expected zero padding for gaslimit, but got %x", word)
}
if !bytes.Equal(ev.Data[64:64+24], make([]byte, 24)) {
return fmt.Errorf("expected zero padding for gaslimit, but got %x", ev.Data)
destSysCfg.GasLimit = binary.BigEndian.Uint64(word[24:])
if countReadBytes != 32*3 {
return NewCriticalError(fmt.Errorf("expected 32*3 bytes in gas limit update, but got %d bytes", len(ev.Data)))
}
destSysCfg.GasLimit = binary.BigEndian.Uint64(ev.Data[64+24:])
return nil
case SystemConfigUpdateUnsafeBlockSigner:
// Ignored in derivation. This configurable applies to runtime configuration outside of the derivation.
......
......@@ -89,8 +89,8 @@ func NewDriver(driverCfg *Config, cfg *rollup.Config, l2 L2Chain, l1 L1Chain, ne
findL1Origin := NewL1OriginSelector(log, cfg, l1, driverCfg.SequencerConfDepth)
verifConfDepth := NewConfDepth(driverCfg.VerifierConfDepth, l1State.L1Head, l1)
derivationPipeline := derive.NewDerivationPipeline(log, cfg, verifConfDepth, l2, metrics)
sequencer := NewSequencer(log, cfg, l1, l2, derivationPipeline, metrics)
attrBuilder := derive.NewFetchingAttributesBuilder(cfg, l1, l2)
sequencer := NewSequencer(log, cfg, l2, derivationPipeline, attrBuilder, metrics)
return &Driver{
l1State: l1State,
derivation: derivationPipeline,
......
......@@ -37,10 +37,10 @@ type Sequencer struct {
log log.Logger
config *rollup.Config
l1 Downloader
l2 derive.Engine
engineState EngineState
attrBuilder derive.AttributesBuilder
buildingOnto eth.L2BlockRef
buildingID eth.PayloadID
buildingStartTime time.Time
......@@ -48,14 +48,14 @@ type Sequencer struct {
metrics SequencerMetrics
}
func NewSequencer(log log.Logger, cfg *rollup.Config, l1 Downloader, l2 derive.Engine, engineState EngineState, metrics SequencerMetrics) *Sequencer {
func NewSequencer(log log.Logger, cfg *rollup.Config, l2 derive.Engine, engineState EngineState, attributesBuilder derive.AttributesBuilder, metrics SequencerMetrics) *Sequencer {
return &Sequencer{
log: log,
config: cfg,
l1: l1,
l2: l2,
metrics: metrics,
engineState: engineState,
attrBuilder: attributesBuilder,
}
}
......@@ -75,7 +75,7 @@ func (d *Sequencer) StartBuildingBlock(ctx context.Context, l1Origin eth.L1Block
fetchCtx, cancel := context.WithTimeout(ctx, time.Second*20)
defer cancel()
attrs, err := derive.PreparePayloadAttributes(fetchCtx, d.config, d.l1, d.l2, l2Head, l2Head.Time+d.config.BlockTime, l1Origin.ID())
attrs, err := d.attrBuilder.PreparePayloadAttributes(fetchCtx, l2Head, l1Origin.ID())
if err != nil {
return err
}
......
package metrics
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
type Factory interface {
NewCounter(opts prometheus.CounterOpts) prometheus.Counter
NewCounterVec(opts prometheus.CounterOpts, labelNames []string) *prometheus.CounterVec
NewGauge(opts prometheus.GaugeOpts) prometheus.Gauge
NewGaugeVec(opts prometheus.GaugeOpts, labelNames []string) *prometheus.GaugeVec
NewHistogram(opts prometheus.HistogramOpts) prometheus.Histogram
NewHistogramVec(opts prometheus.HistogramOpts, labelNames []string) *prometheus.HistogramVec
NewSummary(opts prometheus.SummaryOpts) prometheus.Summary
NewSummaryVec(opts prometheus.SummaryOpts, labelNames []string) *prometheus.SummaryVec
Document() []DocumentedMetric
}
type DocumentedMetric struct {
Type string `json:"type"`
Name string `json:"name"`
Help string `json:"help"`
Labels []string `json:"labels"`
}
type documentor struct {
metrics []DocumentedMetric
factory promauto.Factory
}
func With(registry *prometheus.Registry) Factory {
return &documentor{
factory: promauto.With(registry),
}
}
func (d *documentor) NewCounter(opts prometheus.CounterOpts) prometheus.Counter {
d.metrics = append(d.metrics, DocumentedMetric{
Type: "counter",
Name: fullName(opts.Namespace, opts.Subsystem, opts.Name),
Help: opts.Help,
})
return d.factory.NewCounter(opts)
}
func (d *documentor) NewCounterVec(opts prometheus.CounterOpts, labelNames []string) *prometheus.CounterVec {
d.metrics = append(d.metrics, DocumentedMetric{
Type: "counter",
Name: fullName(opts.Namespace, opts.Subsystem, opts.Name),
Help: opts.Help,
Labels: labelNames,
})
return d.factory.NewCounterVec(opts, labelNames)
}
func (d *documentor) NewGauge(opts prometheus.GaugeOpts) prometheus.Gauge {
d.metrics = append(d.metrics, DocumentedMetric{
Type: "gauge",
Name: fullName(opts.Namespace, opts.Subsystem, opts.Name),
Help: opts.Help,
})
return d.factory.NewGauge(opts)
}
func (d *documentor) NewGaugeVec(opts prometheus.GaugeOpts, labelNames []string) *prometheus.GaugeVec {
d.metrics = append(d.metrics, DocumentedMetric{
Type: "gauge",
Name: fullName(opts.Namespace, opts.Subsystem, opts.Name),
Help: opts.Help,
Labels: labelNames,
})
return d.factory.NewGaugeVec(opts, labelNames)
}
func (d *documentor) NewHistogram(opts prometheus.HistogramOpts) prometheus.Histogram {
d.metrics = append(d.metrics, DocumentedMetric{
Type: "histogram",
Name: fullName(opts.Namespace, opts.Subsystem, opts.Name),
Help: opts.Help,
})
return d.factory.NewHistogram(opts)
}
func (d *documentor) NewHistogramVec(opts prometheus.HistogramOpts, labelNames []string) *prometheus.HistogramVec {
d.metrics = append(d.metrics, DocumentedMetric{
Type: "histogram",
Name: fullName(opts.Namespace, opts.Subsystem, opts.Name),
Help: opts.Help,
Labels: labelNames,
})
return d.factory.NewHistogramVec(opts, labelNames)
}
func (d *documentor) NewSummary(opts prometheus.SummaryOpts) prometheus.Summary {
d.metrics = append(d.metrics, DocumentedMetric{
Type: "summary",
Name: fullName(opts.Namespace, opts.Subsystem, opts.Name),
Help: opts.Help,
})
return d.factory.NewSummary(opts)
}
func (d *documentor) NewSummaryVec(opts prometheus.SummaryOpts, labelNames []string) *prometheus.SummaryVec {
d.metrics = append(d.metrics, DocumentedMetric{
Type: "summary",
Name: fullName(opts.Namespace, opts.Subsystem, opts.Name),
Help: opts.Help,
Labels: labelNames,
})
return d.factory.NewSummaryVec(opts, labelNames)
}
func (d *documentor) Document() []DocumentedMetric {
return d.metrics
}
func fullName(ns, subsystem, name string) string {
out := ns
if subsystem != "" {
out += "_" + subsystem
}
return out + "_" + name
}
GasBenchMark_L1CrossDomainMessenger:test_sendMessage_benchmark_0() (gas: 261344)
GasBenchMark_L1CrossDomainMessenger:test_sendMessage_benchmark_1() (gas: 75851)
GasBenchMark_L1StandardBridge_Deposit:test_depositERC20_benchmark_0() (gas: 348151)
GasBenchMark_L1StandardBridge_Deposit:test_depositERC20_benchmark_1() (gas: 112583)
GasBenchMark_L1StandardBridge_Deposit:test_depositETH_benchmark_0() (gas: 348173)
GasBenchMark_L1StandardBridge_Deposit:test_depositETH_benchmark_1() (gas: 112604)
GasBenchMark_L1StandardBridge_Deposit:test_depositERC20_benchmark_0() (gas: 348207)
GasBenchMark_L1StandardBridge_Deposit:test_depositERC20_benchmark_1() (gas: 112639)
GasBenchMark_L1StandardBridge_Deposit:test_depositETH_benchmark_0() (gas: 348229)
GasBenchMark_L1StandardBridge_Deposit:test_depositETH_benchmark_1() (gas: 112660)
GasBenchMark_L1StandardBridge_Finalize:test_finalizeETHWithdrawal_benchmark() (gas: 40502)
GasBenchMark_L2OutputOracle:test_proposeL2Output_benchmark() (gas: 88513)
GasBenchMark_OptimismPortal:test_depositTransaction_benchmark() (gas: 74998)
......@@ -17,7 +17,7 @@ Bytes_Test:test_toNibbles_expectedResult128Bytes_works() (gas: 129885)
Bytes_Test:test_toNibbles_expectedResult5Bytes_works() (gas: 6132)
Bytes_Test:test_toNibbles_zeroLengthInput_works() (gas: 966)
CrossDomainMessenger_BaseGas_Test:test_baseGas_succeeds() (gas: 20120)
CrossDomainOwnableThroughPortal_Test:test_depositTransaction_crossDomainOwner_succeeds() (gas: 61876)
CrossDomainOwnableThroughPortal_Test:test_depositTransaction_crossDomainOwner_succeeds() (gas: 61882)
CrossDomainOwnable_Test:test_onlyOwner_notOwner_reverts() (gas: 10530)
CrossDomainOwnable_Test:test_onlyOwner_succeeds() (gas: 34861)
CrossDomainOwnable2_Test:test_onlyOwner_notMessenger_reverts() (gas: 8416)
......@@ -92,11 +92,11 @@ L1ERC721Bridge_Test:test_finalizeBridgeERC721_notFromRemoteMessenger_reverts() (
L1ERC721Bridge_Test:test_finalizeBridgeERC721_notViaLocalMessenger_reverts() (gas: 16093)
L1ERC721Bridge_Test:test_finalizeBridgeERC721_selfToken_reverts() (gas: 17593)
L1ERC721Bridge_Test:test_finalizeBridgeERC721_succeeds() (gas: 323814)
L1StandardBridge_DepositERC20To_Test:test_depositERC20To_succeeds() (gas: 575959)
L1StandardBridge_DepositERC20_Test:test_depositERC20_succeeds() (gas: 573786)
L1StandardBridge_DepositERC20To_Test:test_depositERC20To_succeeds() (gas: 576276)
L1StandardBridge_DepositERC20_Test:test_depositERC20_succeeds() (gas: 574103)
L1StandardBridge_DepositERC20_TestFail:test_depositERC20_notEoa_reverts() (gas: 22320)
L1StandardBridge_DepositETHTo_Test:test_depositETHTo_succeeds() (gas: 324712)
L1StandardBridge_DepositETH_Test:test_depositETH_succeeds() (gas: 367539)
L1StandardBridge_DepositETHTo_Test:test_depositETHTo_succeeds() (gas: 324839)
L1StandardBridge_DepositETH_Test:test_depositETH_succeeds() (gas: 367666)
L1StandardBridge_DepositETH_TestFail:test_depositETH_notEoa_reverts() (gas: 40780)
L1StandardBridge_FinalizeBridgeETH_TestFail:test_finalizeBridgeETH_incorrectValue_reverts() (gas: 34207)
L1StandardBridge_FinalizeBridgeETH_TestFail:test_finalizeBridgeETH_sendToMessenger_reverts() (gas: 34288)
......@@ -105,9 +105,9 @@ L1StandardBridge_FinalizeERC20Withdrawal_Test:test_finalizeERC20Withdrawal_succe
L1StandardBridge_FinalizeERC20Withdrawal_TestFail:test_finalizeERC20Withdrawal_notMessenger_reverts() (gas: 31148)
L1StandardBridge_FinalizeERC20Withdrawal_TestFail:test_finalizeERC20Withdrawal_notOtherBridge_reverts() (gas: 31504)
L1StandardBridge_FinalizeETHWithdrawal_Test:test_finalizeETHWithdrawal_succeeds() (gas: 58686)
L1StandardBridge_Getter_Test:test_getters_succeeds() (gas: 31449)
L1StandardBridge_Getter_Test:test_getters_succeeds() (gas: 32151)
L1StandardBridge_Initialize_Test:test_initialize_succeeds() (gas: 22005)
L1StandardBridge_Receive_Test:test_receive_succeeds() (gas: 514475)
L1StandardBridge_Receive_Test:test_receive_succeeds() (gas: 519995)
L2CrossDomainMessenger_Test:test_messageVersion_succeeds() (gas: 8389)
L2CrossDomainMessenger_Test:test_pause_notOwner_reverts() (gas: 10837)
L2CrossDomainMessenger_Test:test_pause_succeeds() (gas: 31846)
......@@ -168,11 +168,11 @@ L2StandardBridge_Test:test_finalizeBridgeETH_sendToMessenger_reverts() (gas: 239
L2StandardBridge_Test:test_finalizeBridgeETH_sendToSelf_reverts() (gas: 23893)
L2StandardBridge_Test:test_finalizeDeposit_succeeds() (gas: 89473)
L2StandardBridge_Test:test_initialize_succeeds() (gas: 24270)
L2StandardBridge_Test:test_receive_succeeds() (gas: 131905)
L2StandardBridge_Test:test_withdrawTo_succeeds() (gas: 344660)
L2StandardBridge_Test:test_withdraw_insufficientValue_reverts() (gas: 19630)
L2StandardBridge_Test:test_receive_succeeds() (gas: 141940)
L2StandardBridge_Test:test_withdrawTo_succeeds() (gas: 344914)
L2StandardBridge_Test:test_withdraw_insufficientValue_reverts() (gas: 19627)
L2StandardBridge_Test:test_withdraw_notEOA_reverts() (gas: 251798)
L2StandardBridge_Test:test_withdraw_succeeds() (gas: 343975)
L2StandardBridge_Test:test_withdraw_succeeds() (gas: 344228)
L2ToL1MessagePasserTest:test_burn_succeeds() (gas: 112572)
L2ToL1MessagePasserTest:test_initiateWithdrawal_fromContract_succeeds() (gas: 70423)
L2ToL1MessagePasserTest:test_initiateWithdrawal_fromEOA_succeeds() (gas: 75874)
......
This diff is collapsed.
......@@ -91,15 +91,22 @@ contract L1StandardBridge is StandardBridge, Semver {
);
/**
* @custom:semver 1.0.0
* @custom:semver 1.1.0
*
* @param _messenger Address of the L1CrossDomainMessenger.
*/
constructor(address payable _messenger)
Semver(1, 0, 0)
Semver(1, 1, 0)
StandardBridge(_messenger, payable(Predeploys.L2_STANDARD_BRIDGE))
{}
/**
* @notice Allows EOAs to bridge ETH by sending directly to the bridge.
*/
receive() external payable override onlyEOA {
_initiateETHDeposit(msg.sender, msg.sender, RECEIVE_DEFAULT_GAS_LIMIT, bytes(""));
}
/**
* @custom:legacy
* @notice Finalizes a withdrawal of ERC20 tokens from L2.
......@@ -261,7 +268,7 @@ contract L1StandardBridge is StandardBridge, Semver {
address _from,
address _to,
uint32 _minGasLimit,
bytes calldata _extraData
bytes memory _extraData
) internal {
emit ETHDepositInitiated(_from, _to, msg.value, _extraData);
_initiateBridgeETH(_from, _to, msg.value, _minGasLimit, _extraData);
......@@ -285,7 +292,7 @@ contract L1StandardBridge is StandardBridge, Semver {
address _to,
uint256 _amount,
uint32 _minGasLimit,
bytes calldata _extraData
bytes memory _extraData
) internal {
emit ERC20DepositInitiated(_l1Token, _l2Token, _from, _to, _amount, _extraData);
_initiateBridgeERC20(_l1Token, _l2Token, _from, _to, _amount, _minGasLimit, _extraData);
......
......@@ -59,15 +59,29 @@ contract L2StandardBridge is StandardBridge, Semver {
);
/**
* @custom:semver 1.0.0
* @custom:semver 1.1.0
*
* @param _otherBridge Address of the L1StandardBridge.
*/
constructor(address payable _otherBridge)
Semver(1, 0, 0)
Semver(1, 1, 0)
StandardBridge(payable(Predeploys.L2_CROSS_DOMAIN_MESSENGER), _otherBridge)
{}
/**
* @notice Allows EOAs to bridge ETH by sending directly to the bridge.
*/
receive() external payable override onlyEOA {
_initiateWithdrawal(
Predeploys.LEGACY_ERC20_ETH,
msg.sender,
msg.sender,
msg.value,
RECEIVE_DEFAULT_GAS_LIMIT,
bytes("")
);
}
/**
* @custom:legacy
* @notice Initiates a withdrawal from L2 to L1.
......@@ -165,7 +179,7 @@ contract L2StandardBridge is StandardBridge, Semver {
address _to,
uint256 _amount,
uint32 _minGasLimit,
bytes calldata _extraData
bytes memory _extraData
) internal {
address l1Token = OptimismMintableERC20(_l2Token).l1Token();
if (_l2Token == Predeploys.LEGACY_ERC20_ETH) {
......
......@@ -16,7 +16,7 @@ contract L1StandardBridge_Getter_Test is Bridge_Initializer {
assert(L1Bridge.OTHER_BRIDGE() == L2Bridge);
assert(L1Bridge.messenger() == L1Messenger);
assert(L1Bridge.MESSENGER() == L1Messenger);
assertEq(L1Bridge.version(), "1.0.0");
assertEq(L1Bridge.version(), "1.1.0");
}
}
......@@ -38,6 +38,10 @@ contract L1StandardBridge_Receive_Test is Bridge_Initializer {
function test_receive_succeeds() external {
assertEq(address(op).balance, 0);
// The legacy event must be emitted for backwards compatibility
vm.expectEmit(true, true, true, true);
emit ETHDepositInitiated(alice, alice, 100, hex"");
vm.expectEmit(true, true, true, true);
emit ETHBridgeInitiated(alice, alice, 100, hex"");
......
......@@ -33,6 +33,9 @@ contract L2StandardBridge_Test is Bridge_Initializer {
// TODO: withdrawal hash should be computed correctly
// TODO: events from each contract
vm.expectEmit(true, true, true, true);
emit WithdrawalInitiated(address(0), Predeploys.LEGACY_ERC20_ETH, alice, alice, 100, hex"");
vm.prank(alice, alice);
(bool success, ) = address(L2Bridge).call{ value: 100 }(hex"");
assertEq(success, true);
......
......@@ -164,11 +164,10 @@ abstract contract StandardBridge {
}
/**
* @notice Allows EOAs to deposit ETH by sending directly to the bridge.
* @notice Allows EOAs to bridge ETH by sending directly to the bridge.
* Must be implemented by contracts that inherit.
*/
receive() external payable onlyEOA {
_initiateBridgeETH(msg.sender, msg.sender, msg.value, RECEIVE_DEFAULT_GAS_LIMIT, bytes(""));
}
receive() external payable virtual;
/**
* @custom:legacy
......@@ -401,7 +400,7 @@ abstract contract StandardBridge {
address _to,
uint256 _amount,
uint32 _minGasLimit,
bytes calldata _extraData
bytes memory _extraData
) internal {
if (_isOptimismMintableERC20(_localToken)) {
require(
......
......@@ -29,7 +29,7 @@
"coverage:lcov": "yarn build:differential && yarn build:fuzz && forge coverage --report lcov",
"gas-snapshot": "yarn build:differential && yarn build:fuzz && forge snapshot --no-match-test 'testDiff|testFuzz|invariant'",
"storage-snapshot": "./scripts/storage-snapshot.sh",
"validate-spacers": "hardhat validate-spacers",
"validate-spacers": "hardhat compile && hardhat validate-spacers",
"slither": "./scripts/slither.sh",
"clean": "rm -rf ./dist ./artifacts ./forge-artifacts ./cache ./tsconfig.tsbuildinfo ./tsconfig.build.tsbuildinfo ./src/contract-artifacts.ts ./test-case-generator/fuzz",
"lint:ts:check": "eslint . --max-warnings=0",
......
......@@ -56,7 +56,7 @@ interface RequiredDeployConfig {
sequencerWindowSize: number
/**
* Number of seconds (w.r.t. L1 time) that a frame can be valid when included in L1.
* Number of L1 blocks that a frame stays valid when included in L1.
*/
channelTimeout: number
......
......@@ -328,7 +328,7 @@ const check = {
signer
)
await assertSemver(L2StandardBridge, 'L2StandardBridge')
await assertSemver(L2StandardBridge, 'L2StandardBridge', '1.1.0')
const OTHER_BRIDGE = await L2StandardBridge.OTHER_BRIDGE()
assert(OTHER_BRIDGE !== hre.ethers.constants.AddressZero)
......
This diff is collapsed.
......@@ -479,7 +479,7 @@ transaction must also be signed by a recognized batch submitter account.
[channel-timeout]: glossary.md#channel-timeout
The channel timeout is a duration (in seconds) during which [channel frames][channel-frame may land on L1 within
The channel timeout is a duration (in L1 blocks) during which [channel frames][channel-frame] may land on L1 within
[batcher transactions][batcher-transaction].
The acceptable time range for the frames of a [channel][channel] is `[channel_id.timestamp, channel_id.timestamp +
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment