Commit 81c51c8d authored by Andreas Bigger's avatar Andreas Bigger

Merge branch 'develop' into refcell/missing-events

parents 42291afc 5354f467
---
'@eth-optimism/contracts-bedrock': patch
---
Added a test for large deposit gaps
...@@ -14,12 +14,12 @@ import ( ...@@ -14,12 +14,12 @@ import (
// version 1 messages have a value and the most significant // version 1 messages have a value and the most significant
// byte of the nonce is a 1 // byte of the nonce is a 1
type CrossDomainMessage struct { type CrossDomainMessage struct {
Nonce *big.Int Nonce *big.Int `json:"nonce"`
Sender *common.Address Sender *common.Address `json:"sender"`
Target *common.Address Target *common.Address `json:"target"`
Value *big.Int Value *big.Int `json:"value"`
GasLimit *big.Int GasLimit *big.Int `json:"gasLimit"`
Data []byte Data []byte `json:"data"`
} }
// NewCrossDomainMessage creates a CrossDomainMessage. // NewCrossDomainMessage creates a CrossDomainMessage.
......
...@@ -76,7 +76,7 @@ func MigrateWithdrawal(withdrawal *LegacyWithdrawal, l1CrossDomainMessenger *com ...@@ -76,7 +76,7 @@ func MigrateWithdrawal(withdrawal *LegacyWithdrawal, l1CrossDomainMessenger *com
withdrawal.Target, withdrawal.Target,
value, value,
new(big.Int), new(big.Int),
withdrawal.Data, []byte(withdrawal.Data),
) )
if err != nil { if err != nil {
return nil, fmt.Errorf("cannot abi encode relayMessage: %w", err) return nil, fmt.Errorf("cannot abi encode relayMessage: %w", err)
......
...@@ -2,11 +2,13 @@ package crossdomain ...@@ -2,11 +2,13 @@ package crossdomain
import ( import (
"errors" "errors"
"fmt"
"math/big" "math/big"
"github.com/ethereum-optimism/optimism/op-bindings/bindings" "github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
) )
...@@ -28,7 +30,7 @@ type Withdrawal struct { ...@@ -28,7 +30,7 @@ type Withdrawal struct {
Target *common.Address `json:"target"` Target *common.Address `json:"target"`
Value *big.Int `json:"value"` Value *big.Int `json:"value"`
GasLimit *big.Int `json:"gasLimit"` GasLimit *big.Int `json:"gasLimit"`
Data []byte `json:"data"` Data hexutil.Bytes `json:"data"`
} }
// NewWithdrawal will create a Withdrawal // NewWithdrawal will create a Withdrawal
...@@ -44,7 +46,7 @@ func NewWithdrawal( ...@@ -44,7 +46,7 @@ func NewWithdrawal(
Target: target, Target: target,
Value: value, Value: value,
GasLimit: gasLimit, GasLimit: gasLimit,
Data: data, Data: hexutil.Bytes(data),
} }
} }
...@@ -58,9 +60,9 @@ func (w *Withdrawal) Encode() ([]byte, error) { ...@@ -58,9 +60,9 @@ func (w *Withdrawal) Encode() ([]byte, error) {
{Name: "gasLimit", Type: Uint256Type}, {Name: "gasLimit", Type: Uint256Type},
{Name: "data", Type: BytesType}, {Name: "data", Type: BytesType},
} }
enc, err := args.Pack(w.Nonce, w.Sender, w.Target, w.Value, w.GasLimit, w.Data) enc, err := args.Pack(w.Nonce, w.Sender, w.Target, w.Value, w.GasLimit, []byte(w.Data))
if err != nil { if err != nil {
return nil, err return nil, fmt.Errorf("cannot encode withdrawal: %w", err)
} }
return enc, nil return enc, nil
} }
...@@ -110,7 +112,7 @@ func (w *Withdrawal) Decode(data []byte) error { ...@@ -110,7 +112,7 @@ func (w *Withdrawal) Decode(data []byte) error {
w.Target = &target w.Target = &target
w.Value = value w.Value = value
w.GasLimit = gasLimit w.GasLimit = gasLimit
w.Data = msgData w.Data = hexutil.Bytes(msgData)
return nil return nil
} }
...@@ -150,6 +152,6 @@ func (w *Withdrawal) WithdrawalTransaction() bindings.TypesWithdrawalTransaction ...@@ -150,6 +152,6 @@ func (w *Withdrawal) WithdrawalTransaction() bindings.TypesWithdrawalTransaction
Target: *w.Target, Target: *w.Target,
Value: w.Value, Value: w.Value,
GasLimit: w.GasLimit, GasLimit: w.GasLimit,
Data: w.Data, Data: []byte(w.Data),
} }
} }
package doc
import (
"encoding/json"
"fmt"
"os"
"strings"
"github.com/ethereum-optimism/optimism/op-node/metrics"
"github.com/olekukonko/tablewriter"
"github.com/urfave/cli"
)
var Subcommands = cli.Commands{
{
Name: "metrics",
Usage: "Dumps a list of supported metrics to stdout",
Flags: []cli.Flag{
cli.StringFlag{
Name: "format",
Value: "markdown",
Usage: "Output format (json|markdown)",
},
},
Action: func(ctx *cli.Context) error {
m := metrics.NewMetrics("default")
supportedMetrics := m.Document()
format := ctx.String("format")
if format != "markdown" && format != "json" {
return fmt.Errorf("invalid format: %s", format)
}
if format == "json" {
enc := json.NewEncoder(os.Stdout)
return enc.Encode(supportedMetrics)
}
table := tablewriter.NewWriter(os.Stdout)
table.SetBorders(tablewriter.Border{Left: true, Top: false, Right: true, Bottom: false})
table.SetCenterSeparator("|")
table.SetAutoWrapText(false)
table.SetHeader([]string{"Metric", "Description", "Labels", "Type"})
var data [][]string
for _, metric := range supportedMetrics {
labels := strings.Join(metric.Labels, ",")
data = append(data, []string{metric.Name, metric.Help, labels, metric.Type})
}
table.AppendBulk(data)
table.Render()
return nil
},
},
}
...@@ -9,6 +9,8 @@ import ( ...@@ -9,6 +9,8 @@ import (
"syscall" "syscall"
"time" "time"
"github.com/ethereum-optimism/optimism/op-node/cmd/doc"
"github.com/urfave/cli" "github.com/urfave/cli"
opnode "github.com/ethereum-optimism/optimism/op-node" opnode "github.com/ethereum-optimism/optimism/op-node"
...@@ -68,6 +70,10 @@ func main() { ...@@ -68,6 +70,10 @@ func main() {
Name: "genesis", Name: "genesis",
Subcommands: genesis.Subcommands, Subcommands: genesis.Subcommands,
}, },
{
Name: "doc",
Subcommands: doc.Subcommands,
},
} }
err := app.Run(os.Args) err := app.Run(os.Args)
......
...@@ -23,6 +23,7 @@ require ( ...@@ -23,6 +23,7 @@ require (
github.com/libp2p/go-libp2p-testing v0.12.0 github.com/libp2p/go-libp2p-testing v0.12.0
github.com/multiformats/go-multiaddr v0.7.0 github.com/multiformats/go-multiaddr v0.7.0
github.com/multiformats/go-multiaddr-dns v0.3.1 github.com/multiformats/go-multiaddr-dns v0.3.1
github.com/olekukonko/tablewriter v0.0.5
github.com/prometheus/client_golang v1.13.0 github.com/prometheus/client_golang v1.13.0
github.com/stretchr/testify v1.8.1 github.com/stretchr/testify v1.8.1
github.com/urfave/cli v1.22.9 github.com/urfave/cli v1.22.9
...@@ -115,7 +116,6 @@ require ( ...@@ -115,7 +116,6 @@ require (
github.com/multiformats/go-multistream v0.3.3 // indirect github.com/multiformats/go-multistream v0.3.3 // indirect
github.com/multiformats/go-varint v0.0.6 // indirect github.com/multiformats/go-varint v0.0.6 // indirect
github.com/nxadm/tail v1.4.8 // indirect github.com/nxadm/tail v1.4.8 // indirect
github.com/olekukonko/tablewriter v0.0.5 // indirect
github.com/onsi/ginkgo v1.16.5 // indirect github.com/onsi/ginkgo v1.16.5 // indirect
github.com/opencontainers/runtime-spec v1.0.2 // indirect github.com/opencontainers/runtime-spec v1.0.2 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect
......
package metrics package metrics
import ( import (
"github.com/ethereum-optimism/optimism/op-service/metrics"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
) )
// CacheMetrics implements the Metrics interface in the caching package, // CacheMetrics implements the Metrics interface in the caching package,
...@@ -34,16 +34,16 @@ func (m *CacheMetrics) CacheGet(typeLabel string, hit bool) { ...@@ -34,16 +34,16 @@ func (m *CacheMetrics) CacheGet(typeLabel string, hit bool) {
} }
} }
func NewCacheMetrics(registry prometheus.Registerer, ns string, name string, displayName string) *CacheMetrics { func NewCacheMetrics(factory metrics.Factory, ns string, name string, displayName string) *CacheMetrics {
return &CacheMetrics{ return &CacheMetrics{
SizeVec: promauto.With(registry).NewGaugeVec(prometheus.GaugeOpts{ SizeVec: factory.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns, Namespace: ns,
Name: name + "_size", Name: name + "_size",
Help: displayName + " cache size", Help: displayName + " cache size",
}, []string{ }, []string{
"type", "type",
}), }),
GetVec: promauto.With(registry).NewCounterVec(prometheus.CounterOpts{ GetVec: factory.NewCounterVec(prometheus.CounterOpts{
Namespace: ns, Namespace: ns,
Name: name + "_get", Name: name + "_get",
Help: displayName + " lookups, hitting or not", Help: displayName + " lookups, hitting or not",
...@@ -51,7 +51,7 @@ func NewCacheMetrics(registry prometheus.Registerer, ns string, name string, dis ...@@ -51,7 +51,7 @@ func NewCacheMetrics(registry prometheus.Registerer, ns string, name string, dis
"type", "type",
"hit", "hit",
}), }),
AddVec: promauto.With(registry).NewCounterVec(prometheus.CounterOpts{ AddVec: factory.NewCounterVec(prometheus.CounterOpts{
Namespace: ns, Namespace: ns,
Name: name + "_add", Name: name + "_add",
Help: displayName + " additions, evicting previous values or not", Help: displayName + " additions, evicting previous values or not",
......
...@@ -4,8 +4,9 @@ import ( ...@@ -4,8 +4,9 @@ import (
"fmt" "fmt"
"time" "time"
"github.com/ethereum-optimism/optimism/op-service/metrics"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
) )
type EventMetrics struct { type EventMetrics struct {
...@@ -18,14 +19,14 @@ func (e *EventMetrics) RecordEvent() { ...@@ -18,14 +19,14 @@ func (e *EventMetrics) RecordEvent() {
e.LastTime.Set(float64(time.Now().Unix())) e.LastTime.Set(float64(time.Now().Unix()))
} }
func NewEventMetrics(registry prometheus.Registerer, ns string, name string, displayName string) *EventMetrics { func NewEventMetrics(factory metrics.Factory, ns string, name string, displayName string) *EventMetrics {
return &EventMetrics{ return &EventMetrics{
Total: promauto.With(registry).NewCounter(prometheus.CounterOpts{ Total: factory.NewCounter(prometheus.CounterOpts{
Namespace: ns, Namespace: ns,
Name: fmt.Sprintf("%s_total", name), Name: fmt.Sprintf("%s_total", name),
Help: fmt.Sprintf("Count of %s events", displayName), Help: fmt.Sprintf("Count of %s events", displayName),
}), }),
LastTime: promauto.With(registry).NewGauge(prometheus.GaugeOpts{ LastTime: factory.NewGauge(prometheus.GaugeOpts{
Namespace: ns, Namespace: ns,
Name: fmt.Sprintf("last_%s_unix", name), Name: fmt.Sprintf("last_%s_unix", name),
Help: fmt.Sprintf("Timestamp of last %s event", displayName), Help: fmt.Sprintf("Timestamp of last %s event", displayName),
......
This diff is collapsed.
...@@ -27,6 +27,17 @@ var ( ...@@ -27,6 +27,17 @@ var (
ConfigUpdateEventVersion0 = common.Hash{} ConfigUpdateEventVersion0 = common.Hash{}
) )
var (
// A left-padded uint256 equal to 32.
oneWordUint = common.Hash{31: 32}
// A left-padded uint256 equal to 64.
twoWordUint = common.Hash{31: 64}
// 24 zero bytes (the padding for a uint64 in a 32 byte word)
uint64Padding = make([]byte, 24)
// 12 zero bytes (the padding for an Ethereum address in a 32 byte word)
addressPadding = make([]byte, 12)
)
// UpdateSystemConfigWithL1Receipts filters all L1 receipts to find config updates and applies the config updates to the given sysCfg // UpdateSystemConfigWithL1Receipts filters all L1 receipts to find config updates and applies the config updates to the given sysCfg
func UpdateSystemConfigWithL1Receipts(sysCfg *eth.SystemConfig, receipts []*types.Receipt, cfg *rollup.Config) error { func UpdateSystemConfigWithL1Receipts(sysCfg *eth.SystemConfig, receipts []*types.Receipt, cfg *rollup.Config) error {
var result error var result error
...@@ -69,50 +80,94 @@ func ProcessSystemConfigUpdateLogEvent(destSysCfg *eth.SystemConfig, ev *types.L ...@@ -69,50 +80,94 @@ func ProcessSystemConfigUpdateLogEvent(destSysCfg *eth.SystemConfig, ev *types.L
} }
// indexed 1 // indexed 1
updateType := ev.Topics[2] updateType := ev.Topics[2]
// unindexed data
// Create a reader of the unindexed data
reader := bytes.NewReader(ev.Data)
// Counter for the number of bytes read from `reader` via `readWord`
countReadBytes := 0
// Helper function to read a word from the log data reader
readWord := func() (b [32]byte) {
if _, err := reader.Read(b[:]); err != nil {
// If there is an error reading the next 32 bytes from the reader, return an empty
// 32 byte array. We always check that the number of bytes read (`countReadBytes`)
// is equal to the expected amount at the end of each switch case.
return b
}
countReadBytes += 32
return b
}
// Attempt to read unindexed data
switch updateType { switch updateType {
case SystemConfigUpdateBatcher: case SystemConfigUpdateBatcher:
if len(ev.Data) != 32*3 { // Read the pointer, it should always equal 32.
return fmt.Errorf("expected 32*3 bytes in batcher hash update, but got %d bytes", len(ev.Data)) if word := readWord(); word != oneWordUint {
return fmt.Errorf("expected offset to point to length location, but got %s", word)
} }
if x := common.BytesToHash(ev.Data[:32]); x != (common.Hash{31: 32}) {
return fmt.Errorf("expected offset to point to length location, but got %s", x) // Read the length, it should also always equal 32.
if word := readWord(); word != oneWordUint {
return fmt.Errorf("expected length to be 32 bytes, but got %s", word)
} }
if x := common.BytesToHash(ev.Data[32:64]); x != (common.Hash{31: 32}) {
return fmt.Errorf("expected length of 1 bytes32, but got %s", x) // Indexing `word` directly is always safe here, it is guaranteed to be 32 bytes in length.
// Check that the batcher address is correctly zero-padded.
word := readWord()
if !bytes.Equal(word[:12], addressPadding) {
return fmt.Errorf("expected version 0 batcher hash with zero padding, but got %x", word)
} }
if !bytes.Equal(ev.Data[64:64+12], make([]byte, 12)) { destSysCfg.BatcherAddr.SetBytes(word[12:])
return fmt.Errorf("expected version 0 batcher hash with zero padding, but got %x", ev.Data)
if countReadBytes != 32*3 {
return NewCriticalError(fmt.Errorf("expected 32*3 bytes in batcher hash update, but got %d bytes", len(ev.Data)))
} }
destSysCfg.BatcherAddr.SetBytes(ev.Data[64+12:])
return nil return nil
case SystemConfigUpdateGasConfig: // left padded uint8 case SystemConfigUpdateGasConfig:
if len(ev.Data) != 32*4 { // Read the pointer, it should always equal 32.
return fmt.Errorf("expected 32*4 bytes in GPO params update data, but got %d", len(ev.Data)) if word := readWord(); word != oneWordUint {
return fmt.Errorf("expected offset to point to length location, but got %s", word)
} }
if x := common.BytesToHash(ev.Data[:32]); x != (common.Hash{31: 32}) {
return fmt.Errorf("expected offset to point to length location, but got %s", x) // Read the length, it should always equal 64.
if word := readWord(); word != twoWordUint {
return fmt.Errorf("expected length to be 64 bytes, but got %s", word)
} }
if x := common.BytesToHash(ev.Data[32:64]); x != (common.Hash{31: 64}) {
return fmt.Errorf("expected length of 2 bytes32, but got %s", x) // Set the system config's overhead and scalar values to the values read from the log
destSysCfg.Overhead = readWord()
destSysCfg.Scalar = readWord()
if countReadBytes != 32*4 {
return NewCriticalError(fmt.Errorf("expected 32*4 bytes in GPO params update data, but got %d", len(ev.Data)))
} }
copy(destSysCfg.Overhead[:], ev.Data[64:96])
copy(destSysCfg.Scalar[:], ev.Data[96:128])
return nil return nil
case SystemConfigUpdateGasLimit: case SystemConfigUpdateGasLimit:
if len(ev.Data) != 32*3 { // Read the pointer, it should always equal 32.
return fmt.Errorf("expected 32*3 bytes in gas limit update, but got %d bytes", len(ev.Data)) if word := readWord(); word != oneWordUint {
return fmt.Errorf("expected offset to point to length location, but got %s", word)
} }
if x := common.BytesToHash(ev.Data[:32]); x != (common.Hash{31: 32}) {
return fmt.Errorf("expected offset to point to length location, but got %s", x) // Read the length, it should also always equal 32.
if word := readWord(); word != oneWordUint {
return fmt.Errorf("expected length to be 32 bytes, but got %s", word)
} }
if x := common.BytesToHash(ev.Data[32:64]); x != (common.Hash{31: 32}) {
return fmt.Errorf("expected length of 1 bytes32, but got %s", x) // Indexing `word` directly is always safe here, it is guaranteed to be 32 bytes in length.
// Check that the gas limit is correctly zero-padded.
word := readWord()
if !bytes.Equal(word[:24], uint64Padding) {
return fmt.Errorf("expected zero padding for gaslimit, but got %x", word)
} }
if !bytes.Equal(ev.Data[64:64+24], make([]byte, 24)) { destSysCfg.GasLimit = binary.BigEndian.Uint64(word[24:])
return fmt.Errorf("expected zero padding for gaslimit, but got %x", ev.Data)
if countReadBytes != 32*3 {
return NewCriticalError(fmt.Errorf("expected 32*3 bytes in gas limit update, but got %d bytes", len(ev.Data)))
} }
destSysCfg.GasLimit = binary.BigEndian.Uint64(ev.Data[64+24:])
return nil return nil
case SystemConfigUpdateUnsafeBlockSigner: case SystemConfigUpdateUnsafeBlockSigner:
// Ignored in derivation. This configurable applies to runtime configuration outside of the derivation. // Ignored in derivation. This configurable applies to runtime configuration outside of the derivation.
......
package metrics
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
type Factory interface {
NewCounter(opts prometheus.CounterOpts) prometheus.Counter
NewCounterVec(opts prometheus.CounterOpts, labelNames []string) *prometheus.CounterVec
NewGauge(opts prometheus.GaugeOpts) prometheus.Gauge
NewGaugeVec(opts prometheus.GaugeOpts, labelNames []string) *prometheus.GaugeVec
NewHistogram(opts prometheus.HistogramOpts) prometheus.Histogram
NewHistogramVec(opts prometheus.HistogramOpts, labelNames []string) *prometheus.HistogramVec
NewSummary(opts prometheus.SummaryOpts) prometheus.Summary
NewSummaryVec(opts prometheus.SummaryOpts, labelNames []string) *prometheus.SummaryVec
Document() []DocumentedMetric
}
type DocumentedMetric struct {
Type string `json:"type"`
Name string `json:"name"`
Help string `json:"help"`
Labels []string `json:"labels"`
}
type documentor struct {
metrics []DocumentedMetric
factory promauto.Factory
}
func With(registry *prometheus.Registry) Factory {
return &documentor{
factory: promauto.With(registry),
}
}
func (d *documentor) NewCounter(opts prometheus.CounterOpts) prometheus.Counter {
d.metrics = append(d.metrics, DocumentedMetric{
Type: "counter",
Name: fullName(opts.Namespace, opts.Subsystem, opts.Name),
Help: opts.Help,
})
return d.factory.NewCounter(opts)
}
func (d *documentor) NewCounterVec(opts prometheus.CounterOpts, labelNames []string) *prometheus.CounterVec {
d.metrics = append(d.metrics, DocumentedMetric{
Type: "counter",
Name: fullName(opts.Namespace, opts.Subsystem, opts.Name),
Help: opts.Help,
Labels: labelNames,
})
return d.factory.NewCounterVec(opts, labelNames)
}
func (d *documentor) NewGauge(opts prometheus.GaugeOpts) prometheus.Gauge {
d.metrics = append(d.metrics, DocumentedMetric{
Type: "gauge",
Name: fullName(opts.Namespace, opts.Subsystem, opts.Name),
Help: opts.Help,
})
return d.factory.NewGauge(opts)
}
func (d *documentor) NewGaugeVec(opts prometheus.GaugeOpts, labelNames []string) *prometheus.GaugeVec {
d.metrics = append(d.metrics, DocumentedMetric{
Type: "gauge",
Name: fullName(opts.Namespace, opts.Subsystem, opts.Name),
Help: opts.Help,
Labels: labelNames,
})
return d.factory.NewGaugeVec(opts, labelNames)
}
func (d *documentor) NewHistogram(opts prometheus.HistogramOpts) prometheus.Histogram {
d.metrics = append(d.metrics, DocumentedMetric{
Type: "histogram",
Name: fullName(opts.Namespace, opts.Subsystem, opts.Name),
Help: opts.Help,
})
return d.factory.NewHistogram(opts)
}
func (d *documentor) NewHistogramVec(opts prometheus.HistogramOpts, labelNames []string) *prometheus.HistogramVec {
d.metrics = append(d.metrics, DocumentedMetric{
Type: "histogram",
Name: fullName(opts.Namespace, opts.Subsystem, opts.Name),
Help: opts.Help,
Labels: labelNames,
})
return d.factory.NewHistogramVec(opts, labelNames)
}
func (d *documentor) NewSummary(opts prometheus.SummaryOpts) prometheus.Summary {
d.metrics = append(d.metrics, DocumentedMetric{
Type: "summary",
Name: fullName(opts.Namespace, opts.Subsystem, opts.Name),
Help: opts.Help,
})
return d.factory.NewSummary(opts)
}
func (d *documentor) NewSummaryVec(opts prometheus.SummaryOpts, labelNames []string) *prometheus.SummaryVec {
d.metrics = append(d.metrics, DocumentedMetric{
Type: "summary",
Name: fullName(opts.Namespace, opts.Subsystem, opts.Name),
Help: opts.Help,
Labels: labelNames,
})
return d.factory.NewSummaryVec(opts, labelNames)
}
func (d *documentor) Document() []DocumentedMetric {
return d.metrics
}
func fullName(ns, subsystem, name string) string {
out := ns
if subsystem != "" {
out += "_" + subsystem
}
return out + "_" + name
}
# @eth-optimism/ci-builder # @eth-optimism/ci-builder
## 0.4.0
### Minor Changes
- 05cc935b2: Bump foundry to 2ff99025abade470a795724c10648c800a41025e
## 0.3.8 ## 0.3.8
### Patch Changes ### Patch Changes
......
...@@ -16,7 +16,7 @@ WORKDIR /opt/foundry ...@@ -16,7 +16,7 @@ WORKDIR /opt/foundry
# Only diff from upstream docker image is this clone instead # Only diff from upstream docker image is this clone instead
# of COPY. We select a specific commit to use. # of COPY. We select a specific commit to use.
RUN git clone https://github.com/foundry-rs/foundry.git . \ RUN git clone https://github.com/foundry-rs/foundry.git . \
&& git checkout c06b53287dc23c4e5b1b3e57c937a90114bbe166 && git checkout 2ff99025abade470a795724c10648c800a41025e
RUN source $HOME/.profile && \ RUN source $HOME/.profile && \
cargo build --release && \ cargo build --release && \
......
{ {
"name": "@eth-optimism/ci-builder", "name": "@eth-optimism/ci-builder",
"version": "0.3.8", "version": "0.4.0",
"scripts": {}, "scripts": {},
"license": "MIT", "license": "MIT",
"dependencies": {} "dependencies": {}
......
# @eth-optimism/foundry # @eth-optimism/foundry
## 0.2.0
### Minor Changes
- 05cc935b2: Bump foundry to 2ff99025abade470a795724c10648c800a41025e
## 0.1.3 ## 0.1.3
### Patch Changes ### Patch Changes
......
...@@ -9,7 +9,7 @@ WORKDIR /opt/foundry ...@@ -9,7 +9,7 @@ WORKDIR /opt/foundry
# Only diff from upstream docker image is this clone instead # Only diff from upstream docker image is this clone instead
# of COPY. We select a specific commit to use. # of COPY. We select a specific commit to use.
RUN git clone https://github.com/foundry-rs/foundry.git . \ RUN git clone https://github.com/foundry-rs/foundry.git . \
&& git checkout f540aa9ebde88dce720140b332412089c2ee85b6 && git checkout 2ff99025abade470a795724c10648c800a41025e
RUN source $HOME/.profile && cargo build --release \ RUN source $HOME/.profile && cargo build --release \
&& strip /opt/foundry/target/release/forge \ && strip /opt/foundry/target/release/forge \
......
{ {
"name": "@eth-optimism/foundry", "name": "@eth-optimism/foundry",
"version": "0.1.3", "version": "0.2.0",
"scripts": {}, "scripts": {},
"license": "MIT", "license": "MIT",
"dependencies": {} "dependencies": {}
......
...@@ -370,14 +370,14 @@ RLPWriter_Test:test_writeUint_smallint3_succeeds() (gas: 7311) ...@@ -370,14 +370,14 @@ RLPWriter_Test:test_writeUint_smallint3_succeeds() (gas: 7311)
RLPWriter_Test:test_writeUint_smallint4_succeeds() (gas: 7312) RLPWriter_Test:test_writeUint_smallint4_succeeds() (gas: 7312)
RLPWriter_Test:test_writeUint_smallint_succeeds() (gas: 7290) RLPWriter_Test:test_writeUint_smallint_succeeds() (gas: 7290)
RLPWriter_Test:test_writeUint_zero_succeeds() (gas: 7802) RLPWriter_Test:test_writeUint_zero_succeeds() (gas: 7802)
ResourceMetering_Test:test_meter_initialResourceParams_succeeds() (gas: 8965) ResourceMetering_Test:test_meter_initialResourceParams_succeeds() (gas: 8983)
ResourceMetering_Test:test_meter_updateNoGasDelta_succeeds() (gas: 2008101) ResourceMetering_Test:test_meter_updateNoGasDelta_succeeds() (gas: 2008119)
ResourceMetering_Test:test_meter_updateOneEmptyBlock_succeeds() (gas: 18152) ResourceMetering_Test:test_meter_updateOneEmptyBlock_succeeds() (gas: 18148)
ResourceMetering_Test:test_meter_updateParamsNoChange_succeeds() (gas: 13911) ResourceMetering_Test:test_meter_updateParamsNoChange_succeeds() (gas: 13859)
ResourceMetering_Test:test_meter_updateTenEmptyBlocks_succeeds() (gas: 20900) ResourceMetering_Test:test_meter_updateTenEmptyBlocks_succeeds() (gas: 20918)
ResourceMetering_Test:test_meter_updateTwoEmptyBlocks_succeeds() (gas: 20923) ResourceMetering_Test:test_meter_updateTwoEmptyBlocks_succeeds() (gas: 20941)
ResourceMetering_Test:test_meter_useMax_succeeds() (gas: 8017204) ResourceMetering_Test:test_meter_useMax_succeeds() (gas: 8017151)
ResourceMetering_Test:test_meter_useMoreThanMax_reverts() (gas: 16023) ResourceMetering_Test:test_meter_useMoreThanMax_reverts() (gas: 16045)
Semver_Test:test_behindProxy_succeeds() (gas: 506725) Semver_Test:test_behindProxy_succeeds() (gas: 506725)
Semver_Test:test_version_succeeds() (gas: 9396) Semver_Test:test_version_succeeds() (gas: 9396)
SequencerFeeVault_Test:test_constructor_succeeds() (gas: 5504) SequencerFeeVault_Test:test_constructor_succeeds() (gas: 5504)
......
...@@ -195,7 +195,12 @@ ...@@ -195,7 +195,12 @@
======================= =======================
| Name | Type | Slot | Offset | Bytes | Contract | | Name | Type | Slot | Offset | Bytes | Contract |
|------|------|------|--------|-------|----------| |-----------|-------------------------------------------------|------|--------|-------|----------------------------------|
| name | string | 0 | 0 | 32 | contracts/vendor/WETH9.sol:WETH9 |
| symbol | string | 1 | 0 | 32 | contracts/vendor/WETH9.sol:WETH9 |
| decimals | uint8 | 2 | 0 | 1 | contracts/vendor/WETH9.sol:WETH9 |
| balanceOf | mapping(address => uint256) | 3 | 0 | 32 | contracts/vendor/WETH9.sol:WETH9 |
| allowance | mapping(address => mapping(address => uint256)) | 4 | 0 | 32 | contracts/vendor/WETH9.sol:WETH9 |
======================= =======================
➡ contracts/universal/ProxyAdmin.sol:ProxyAdmin ➡ contracts/universal/ProxyAdmin.sol:ProxyAdmin
......
...@@ -57,7 +57,7 @@ We work on this repository with a combination of [Hardhat](https://hardhat.org) ...@@ -57,7 +57,7 @@ We work on this repository with a combination of [Hardhat](https://hardhat.org)
1. Install Foundry by following [the instructions located here](https://getfoundry.sh/). 1. Install Foundry by following [the instructions located here](https://getfoundry.sh/).
A specific version must be used. A specific version must be used.
```shell ```shell
foundryup -C c06b53287dc23c4e5b1b3e57c937a90114bbe166 foundryup -C 2ff99025abade470a795724c10648c800a41025e
``` ```
2. Install node modules with yarn (v1) and Node.js (16+): 2. Install node modules with yarn (v1) and Node.js (16+):
......
...@@ -108,4 +108,18 @@ contract ResourceMetering_Test is CommonTest { ...@@ -108,4 +108,18 @@ contract ResourceMetering_Test is CommonTest {
vm.expectRevert("ResourceMetering: cannot buy more gas than available gas limit"); vm.expectRevert("ResourceMetering: cannot buy more gas than available gas limit");
meter.use(target * elasticity + 1); meter.use(target * elasticity + 1);
} }
// Demonstrates that the resource metering arithmetic can tolerate very large gaps between
// deposits.
function testFuzz_meter_largeBlockDiff_succeeds(uint64 _amount, uint256 _blockDiff) external {
// This test fails if the following line is commented out.
// At 12 seconds per block, this number is effectively unreachable.
vm.assume(_blockDiff < 433576281058164217753225238677900874458691);
uint64 target = uint64(uint256(meter.TARGET_RESOURCE_LIMIT()));
uint64 elasticity = uint64(uint256(meter.ELASTICITY_MULTIPLIER()));
vm.assume(_amount < target * elasticity);
vm.roll(initialBlockNum + _blockDiff);
meter.use(_amount);
}
} }
...@@ -48,26 +48,31 @@ task('wait-for-final-batch', 'Waits for the final batch to be submitted') ...@@ -48,26 +48,31 @@ task('wait-for-final-batch', 'Waits for the final batch to be submitted')
const wait = async (contract: Contract) => { const wait = async (contract: Contract) => {
let height = await l2Provider.getBlockNumber() let height = await l2Provider.getBlockNumber()
let totalElements = await contract.getTotalElements() let totalElements = await contract.getTotalElements()
// The genesis block was not batch submitted so subtract 1 from the height console.log(` - height: ${height}`)
// when comparing with the total elements console.log(` - totalElements: ${totalElements}`)
while (totalElements !== height - 1) {
while (totalElements.toNumber() !== height) {
console.log('Total elements does not match') console.log('Total elements does not match')
console.log(` - real height: ${height}`) console.log(` - height: ${height}`)
console.log(` - height: ${height - 1}`)
console.log(` - totalElements: ${totalElements}`) console.log(` - totalElements: ${totalElements}`)
console.log(
`Waiting for ${height - totalElements} elements to be submitted`
)
totalElements = await contract.getTotalElements() totalElements = await contract.getTotalElements()
height = await l2Provider.getBlockNumber() height = await l2Provider.getBlockNumber()
await sleep(2 * 1000) await sleep(5 * 1000)
} }
} }
console.log('Waiting for the CanonicalTransactionChain...') console.log('Waiting for the CanonicalTransactionChain...')
await wait(CanonicalTransactionChain) await wait(CanonicalTransactionChain)
console.log('All transaction batches have been submitted') console.log('All transaction batches have been submitted')
console.log()
console.log('Waiting for the StateCommitmentChain...') console.log('Waiting for the StateCommitmentChain...')
await wait(StateCommitmentChain) await wait(StateCommitmentChain)
console.log('All state root batches have been submitted') console.log('All state root batches have been submitted')
console.log()
console.log('All batches have been submitted') console.log('All batches have been submitted')
}) })
...@@ -22,6 +22,7 @@ task('wait-for-final-deposit', 'Waits for the final deposit to be ingested') ...@@ -22,6 +22,7 @@ task('wait-for-final-deposit', 'Waits for the final deposit to be ingested')
const l1Provider = new hre.ethers.providers.StaticJsonRpcProvider( const l1Provider = new hre.ethers.providers.StaticJsonRpcProvider(
args.l1RpcUrl args.l1RpcUrl
) )
const l2Provider = new hre.ethers.providers.StaticJsonRpcProvider( const l2Provider = new hre.ethers.providers.StaticJsonRpcProvider(
args.l2RpcUrl args.l2RpcUrl
) )
...@@ -63,6 +64,9 @@ task('wait-for-final-deposit', 'Waits for the final deposit to be ingested') ...@@ -63,6 +64,9 @@ task('wait-for-final-deposit', 'Waits for the final deposit to be ingested')
console.log(`DTL shutoff block ${dtlShutoffBlock.toString()}`) console.log(`DTL shutoff block ${dtlShutoffBlock.toString()}`)
let pending = await CanonicalTransactionChain.getNumPendingQueueElements()
console.log(`${pending} deposits must be batch submitted`)
// Now query the number of queue elements in the CTC // Now query the number of queue elements in the CTC
const queueLength = await CanonicalTransactionChain.getQueueLength() const queueLength = await CanonicalTransactionChain.getQueueLength()
console.log(`Total number of deposits: ${queueLength}`) console.log(`Total number of deposits: ${queueLength}`)
...@@ -80,11 +84,10 @@ task('wait-for-final-deposit', 'Waits for the final deposit to be ingested') ...@@ -80,11 +84,10 @@ task('wait-for-final-deposit', 'Waits for the final deposit to be ingested')
if (tx.queueOrigin === 'l1') { if (tx.queueOrigin === 'l1') {
const queueIndex = BigNumber.from(tx.queueIndex).toNumber() const queueIndex = BigNumber.from(tx.queueIndex).toNumber()
if (queueIndex === queueLength) { if (queueIndex === queueLength - 1) {
break break
} }
if (queueIndex < queueLength) { if (queueIndex < queueLength) {
console.log()
throw new Error( throw new Error(
`Missed the final deposit. queueIndex ${queueIndex}, queueLength ${queueLength}` `Missed the final deposit. queueIndex ${queueIndex}, queueLength ${queueLength}`
) )
...@@ -94,4 +97,6 @@ task('wait-for-final-deposit', 'Waits for the final deposit to be ingested') ...@@ -94,4 +97,6 @@ task('wait-for-final-deposit', 'Waits for the final deposit to be ingested')
} }
console.log('Final deposit has been ingested by l2geth') console.log('Final deposit has been ingested by l2geth')
pending = await CanonicalTransactionChain.getNumPendingQueueElements()
console.log(`${pending} deposits must be batch submitted`)
}) })
...@@ -3,6 +3,10 @@ ...@@ -3,6 +3,10 @@
- **Chain ID**: 420 - **Chain ID**: 420
- **Public RPC**: https://goerli.optimism.io - **Public RPC**: https://goerli.optimism.io
- **Block Explorer**: https://goerli-optimism.etherscan.io/ - **Block Explorer**: https://goerli-optimism.etherscan.io/
**Note:** This list is out of date, now that Goerli is on bedrock.
[The valid list is here](https://github.com/ethereum-optimism/optimism/tree/develop/packages/contracts-bedrock/deployments/goerli).
## Layer 1 Contracts ## Layer 1 Contracts
<table> <table>
<tr> <tr>
......
# data transport layer # data transport layer
## 0.5.51
### Patch Changes
- 4396e187d: Fixes a bug in the DTL that would cause it to not be able to sync beyond the deposit shutoff block.
## 0.5.50 ## 0.5.50
### Patch Changes ### Patch Changes
......
{ {
"private": true, "private": true,
"name": "@eth-optimism/data-transport-layer", "name": "@eth-optimism/data-transport-layer",
"version": "0.5.50", "version": "0.5.51",
"description": "[Optimism] Service for shuttling data from L1 into L2", "description": "[Optimism] Service for shuttling data from L1 into L2",
"main": "dist/index", "main": "dist/index",
"types": "dist/index", "types": "dist/index",
......
...@@ -268,9 +268,8 @@ export class L1IngestionService extends BaseService<L1IngestionServiceOptions> { ...@@ -268,9 +268,8 @@ export class L1IngestionService extends BaseService<L1IngestionServiceOptions> {
) )
} }
// I prefer to do this in serial to avoid non-determinism. We could have a discussion about // We should not sync TransactionEnqueued events beyond the deposit shutoff block.
// using Promise.all if necessary, but I don't see a good reason to do so unless parsing is if (depositTargetL1Block >= highestSyncedL1Block) {
// really, really slow for all event types.
await this._syncEvents( await this._syncEvents(
'CanonicalTransactionChain', 'CanonicalTransactionChain',
'TransactionEnqueued', 'TransactionEnqueued',
...@@ -278,6 +277,7 @@ export class L1IngestionService extends BaseService<L1IngestionServiceOptions> { ...@@ -278,6 +277,7 @@ export class L1IngestionService extends BaseService<L1IngestionServiceOptions> {
depositTargetL1Block, depositTargetL1Block,
handleEventsTransactionEnqueued handleEventsTransactionEnqueued
) )
}
await this._syncEvents( await this._syncEvents(
'CanonicalTransactionChain', 'CanonicalTransactionChain',
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment