Commit ce62df90 authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge branch 'develop' into jg/batcher_l2_reorg_tests

parents bb77ca44 29787f64
---
'@eth-optimism/atst': minor
---
Make type parsing more intuitive
---
"@eth-optimism/l2geth-exporter": patch
---
build(deps): bump golang.org/x/crypto from 0.0.0-20220307211146-efcb8507fb70 to 0.1.0 in /l2geth-exporter
---
"@eth-optimism/gas-oracle": patch
---
build(deps): bump golang.org/x/net from 0.0.0-20211112202133-69e39bad7dc2 to 0.7.0 in /gas-oracle
---
"@eth-optimism/gas-oracle": patch
---
build(deps): bump golang.org/x/sys from 0.0.0-20220310020820-b874c991c1a5 to 0.1.0 in /gas-oracle
---
'@eth-optimism/sdk': major
'@eth-optimism/contracts-bedrock': minor
---
Moves `FINALIZATION_PERIOD_SECONDS` from the `OptimismPortal` to the `L2OutputOracle` & ensures the `CHALLENGER` key cannot delete finalized outputs.
---
"@eth-optimism/l2geth-exporter": patch
---
build(deps): bump golang.org/x/sys from 0.0.0-20220310020820-b874c991c1a5 to 0.1.0 in /l2geth-exporter
---
'@eth-optimism/chain-mon': minor
---
Added a withdrawal monitoring service
---
'@eth-optimism/atst': patch
---
Add new atst package
---
"@eth-optimism/batch-submitter-service": patch
---
build(deps): bump golang.org/x/crypto from 0.0.0-20220307211146-efcb8507fb70 to 0.1.0 in /batch-submitter
---
'@eth-optimism/atst': patch
---
Release ATST
...@@ -525,8 +525,7 @@ jobs: ...@@ -525,8 +525,7 @@ jobs:
# constraint that gotestsum does not currently (nor likely will) accept files from different pacakges when building. # constraint that gotestsum does not currently (nor likely will) accept files from different pacakges when building.
OP_TESTLOG_DISABLE_COLOR=true OP_E2E_DISABLE_PARALLEL=true OP_E2E_USE_HTTP=<<parameters.use_http>> gotestsum \ OP_TESTLOG_DISABLE_COLOR=true OP_E2E_DISABLE_PARALLEL=true OP_E2E_USE_HTTP=<<parameters.use_http>> gotestsum \
--format=standard-verbose --junitfile=/tmp/test-results/<<parameters.module>>_http_<<parameters.use_http>>.xml \ --format=standard-verbose --junitfile=/tmp/test-results/<<parameters.module>>_http_<<parameters.use_http>>.xml \
./... \ -- -timeout=20m ./...
-- -timeout=20m
working_directory: <<parameters.module>> working_directory: <<parameters.module>>
- store_test_results: - store_test_results:
path: /tmp/test-results path: /tmp/test-results
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
/packages/migration-data @ethereum-optimism/legacy-reviewers /packages/migration-data @ethereum-optimism/legacy-reviewers
/packages/replica-healthcheck @ethereum-optimism/legacy-reviewers /packages/replica-healthcheck @ethereum-optimism/legacy-reviewers
/packages/sdk @ethereum-optimism/ecopod /packages/sdk @ethereum-optimism/ecopod
/packages/atst @ethereum-optimism/ecopod
# Bedrock codebases # Bedrock codebases
/bedrock-devnet @ethereum-optimism/go-reviewers /bedrock-devnet @ethereum-optimism/go-reviewers
......
# @eth-optimism/batch-submitter-service # @eth-optimism/batch-submitter-service
## 0.1.15
### Patch Changes
- 1d8d50c42: build(deps): bump golang.org/x/crypto from 0.0.0-20220307211146-efcb8507fb70 to 0.1.0 in /batch-submitter
## 0.1.14 ## 0.1.14
### Patch Changes ### Patch Changes
......
{ {
"name": "@eth-optimism/batch-submitter-service", "name": "@eth-optimism/batch-submitter-service",
"version": "0.1.14", "version": "0.1.15",
"private": true, "private": true,
"devDependencies": {} "devDependencies": {}
} }
# @eth-optimism/gas-oracle # @eth-optimism/gas-oracle
## 0.1.13
### Patch Changes
- 9b61c84c9: build(deps): bump golang.org/x/net from 0.0.0-20211112202133-69e39bad7dc2 to 0.7.0 in /gas-oracle
- f13b31e04: build(deps): bump golang.org/x/sys from 0.0.0-20220310020820-b874c991c1a5 to 0.1.0 in /gas-oracle
## 0.1.12 ## 0.1.12
### Patch Changes ### Patch Changes
......
{ {
"name": "@eth-optimism/gas-oracle", "name": "@eth-optimism/gas-oracle",
"version": "0.1.12", "version": "0.1.13",
"private": true, "private": true,
"devDependencies": {} "devDependencies": {}
} }
...@@ -191,6 +191,6 @@ require ( ...@@ -191,6 +191,6 @@ require (
nhooyr.io/websocket v1.8.7 // indirect nhooyr.io/websocket v1.8.7 // indirect
) )
replace github.com/ethereum/go-ethereum v1.11.2 => github.com/ethereum-optimism/op-geth v1.11.2-aea0402.0.20230227230209-0705cf1b7df9 replace github.com/ethereum/go-ethereum v1.11.2 => github.com/ethereum-optimism/op-geth v1.11.2-aea0402.0.20230301232322-c407b2a217b7
//replace github.com/ethereum/go-ethereum v1.11.2 => ../go-ethereum //replace github.com/ethereum/go-ethereum v1.11.2 => ../go-ethereum
...@@ -217,8 +217,8 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7 ...@@ -217,8 +217,8 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7
github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw=
github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3 h1:RWHKLhCrQThMfch+QJ1Z8veEq5ZO3DfIhZ7xgRP9WTc= github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3 h1:RWHKLhCrQThMfch+QJ1Z8veEq5ZO3DfIhZ7xgRP9WTc=
github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3/go.mod h1:QziizLAiF0KqyLdNJYD7O5cpDlaFMNZzlxYNcWsJUxs= github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3/go.mod h1:QziizLAiF0KqyLdNJYD7O5cpDlaFMNZzlxYNcWsJUxs=
github.com/ethereum-optimism/op-geth v1.11.2-aea0402.0.20230227230209-0705cf1b7df9 h1:O13fqCZYW+HiGVs+UFKtMUHnCMpWR7XcyTPijm9IAiY= github.com/ethereum-optimism/op-geth v1.11.2-aea0402.0.20230301232322-c407b2a217b7 h1:bkttBXCRDv2Mp4VoGBglr4BjS7icIuN8HS5ZFpeKfvE=
github.com/ethereum-optimism/op-geth v1.11.2-aea0402.0.20230227230209-0705cf1b7df9/go.mod h1:/tjlXxOaovIyuF0l6+wCzr6AtDb3lYWTymmpQAQcqu8= github.com/ethereum-optimism/op-geth v1.11.2-aea0402.0.20230301232322-c407b2a217b7/go.mod h1:/tjlXxOaovIyuF0l6+wCzr6AtDb3lYWTymmpQAQcqu8=
github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8= github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
......
...@@ -4,7 +4,7 @@ go 1.17 ...@@ -4,7 +4,7 @@ go 1.17
replace ( replace (
github.com/ethereum/go-ethereum v1.10.26 => github.com/ethereum-optimism/op-geth v0.0.0-20230214215134-401b7fd3309b github.com/ethereum/go-ethereum v1.10.26 => github.com/ethereum-optimism/op-geth v0.0.0-20230214215134-401b7fd3309b
github.com/ethereum/go-ethereum v1.11.2 => github.com/ethereum-optimism/op-geth v1.11.2-aea0402.0.20230227230209-0705cf1b7df9 github.com/ethereum/go-ethereum v1.11.2 => github.com/ethereum-optimism/op-geth v1.11.2-aea0402.0.20230301232322-c407b2a217b7
) )
require ( require (
......
...@@ -30,10 +30,10 @@ ...@@ -30,10 +30,10 @@
"devDependencies": { "devDependencies": {
"@babel/eslint-parser": "^7.5.4", "@babel/eslint-parser": "^7.5.4",
"@eth-optimism/contracts": "^0.5.40", "@eth-optimism/contracts": "^0.5.40",
"@eth-optimism/contracts-bedrock": "0.12.1", "@eth-optimism/contracts-bedrock": "0.13.0",
"@eth-optimism/contracts-periphery": "^1.0.7", "@eth-optimism/contracts-periphery": "^1.0.7",
"@eth-optimism/core-utils": "0.12.0", "@eth-optimism/core-utils": "0.12.0",
"@eth-optimism/sdk": "1.10.4", "@eth-optimism/sdk": "2.0.0",
"@ethersproject/abstract-provider": "^5.7.0", "@ethersproject/abstract-provider": "^5.7.0",
"@ethersproject/providers": "^5.7.0", "@ethersproject/providers": "^5.7.0",
"@ethersproject/transactions": "^5.7.0", "@ethersproject/transactions": "^5.7.0",
......
# @eth-optimism/l2geth-exporter # @eth-optimism/l2geth-exporter
## 0.0.8
### Patch Changes
- e085354a8: build(deps): bump golang.org/x/crypto from 0.0.0-20220307211146-efcb8507fb70 to 0.1.0 in /l2geth-exporter
- 9bee5c8cc: build(deps): bump golang.org/x/sys from 0.0.0-20220310020820-b874c991c1a5 to 0.1.0 in /l2geth-exporter
## 0.0.7 ## 0.0.7
### Patch Changes ### Patch Changes
......
{ {
"name": "@eth-optimism/l2geth-exporter", "name": "@eth-optimism/l2geth-exporter",
"version": "0.0.7", "version": "0.0.8",
"private": true, "private": true,
"devDependencies": {} "devDependencies": {}
} }
...@@ -18,6 +18,15 @@ type ChannelConfig struct { ...@@ -18,6 +18,15 @@ type ChannelConfig struct {
// The maximum number of L1 blocks that the inclusion transactions of a // The maximum number of L1 blocks that the inclusion transactions of a
// channel's frames can span. // channel's frames can span.
ChannelTimeout uint64 ChannelTimeout uint64
// Builder Config
// MaxChannelDuration is the maximum duration (in #L1-blocks) to keep the
// channel open. This allows control over how long a channel is kept open
// during times of low transaction volume.
//
// If 0, duration checks are disabled.
MaxChannelDuration uint64
// The batcher tx submission safety margin (in #L1-blocks) to subtract from // The batcher tx submission safety margin (in #L1-blocks) to subtract from
// a channel's timeout and sequencing window, to guarantee safe inclusion of // a channel's timeout and sequencing window, to guarantee safe inclusion of
// a channel on L1. // a channel on L1.
...@@ -50,12 +59,17 @@ func (c ChannelConfig) InputThreshold() uint64 { ...@@ -50,12 +59,17 @@ func (c ChannelConfig) InputThreshold() uint64 {
type channelBuilder struct { type channelBuilder struct {
cfg ChannelConfig cfg ChannelConfig
// L1 block timestamp of combined channel & sequencing window timeout. 0 if // L1 block number timeout of combined
// no timeout set yet. // - channel duration timeout,
// - consensus channel timeout,
// - sequencing window timeout.
// 0 if no block number timeout set yet.
timeout uint64 timeout uint64
// reason for currently set timeout
timeoutReason error
// marked as full if a) max RLP input bytes, b) max num frames or c) max // Reason for the channel being full. Set by setFullErr so it's always
// allowed frame index (uint16) has been reached // guaranteed to be a ChannelFullError wrapping the specific reason.
fullErr error fullErr error
// current channel // current channel
co *derive.ChannelOut co *derive.ChannelOut
...@@ -102,28 +116,6 @@ func (c *channelBuilder) Reset() error { ...@@ -102,28 +116,6 @@ func (c *channelBuilder) Reset() error {
return c.co.Reset() return c.co.Reset()
} }
// FramePublished calculates the submission timeout of this channel from the
// given frame inclusion L1-block number. If an older frame tx has already been
// seen, the timeout is not updated.
func (c *channelBuilder) FramePublished(l1BlockNum uint64) {
timeout := l1BlockNum + c.cfg.ChannelTimeout - c.cfg.SubSafetyMargin
c.updateTimeout(timeout)
}
// TimedOut returns whether the passed block number is after the channel timeout
// block. If no block timeout is set yet, it returns false.
func (c *channelBuilder) TimedOut(blockNum uint64) bool {
return c.timeout != 0 && blockNum >= c.timeout
}
// CheckTimeout checks if the channel is timed out at the given block number and
// in this case marks the channel as full with reason ErrChannelTimedOut.
func (c *channelBuilder) CheckTimeout(blockNum uint64) {
if !c.IsFull() && c.TimedOut(blockNum) {
c.setFullErr(ErrChannelTimedOut)
}
}
// AddBlock adds a block to the channel compression pipeline. IsFull should be // AddBlock adds a block to the channel compression pipeline. IsFull should be
// called aftewards to test whether the channel is full. If full, a new channel // called aftewards to test whether the channel is full. If full, a new channel
// must be started. // must be started.
...@@ -151,7 +143,7 @@ func (c *channelBuilder) AddBlock(block *types.Block) error { ...@@ -151,7 +143,7 @@ func (c *channelBuilder) AddBlock(block *types.Block) error {
c.blocks = append(c.blocks, block) c.blocks = append(c.blocks, block)
c.updateSwTimeout(batch) c.updateSwTimeout(batch)
if c.InputTargetReached() { if c.inputTargetReached() {
c.setFullErr(ErrInputTargetReached) c.setFullErr(ErrInputTargetReached)
// Adding this block still worked, so don't return error, just mark as full // Adding this block still worked, so don't return error, just mark as full
} }
...@@ -159,25 +151,76 @@ func (c *channelBuilder) AddBlock(block *types.Block) error { ...@@ -159,25 +151,76 @@ func (c *channelBuilder) AddBlock(block *types.Block) error {
return nil return nil
} }
// Timeout management
// RegisterL1Block should be called whenever a new L1-block is seen.
//
// It ensures proper tracking of all possible timeouts (max channel duration,
// close to consensus channel timeout, close to end of sequencing window).
func (c *channelBuilder) RegisterL1Block(l1BlockNum uint64) {
c.updateDurationTimeout(l1BlockNum)
c.checkTimeout(l1BlockNum)
}
// FramePublished should be called whenever a frame of this channel got
// published with the L1-block number of the block that the frame got included
// in.
func (c *channelBuilder) FramePublished(l1BlockNum uint64) {
timeout := l1BlockNum + c.cfg.ChannelTimeout - c.cfg.SubSafetyMargin
c.updateTimeout(timeout, ErrChannelTimeoutClose)
}
// updateDurationTimeout updates the block timeout with the channel duration
// timeout derived from the given L1-block number. The timeout is only moved
// forward if the derived timeout is earlier than the currently set timeout.
//
// It does nothing if the max channel duration is set to 0.
func (c *channelBuilder) updateDurationTimeout(l1BlockNum uint64) {
if c.cfg.MaxChannelDuration == 0 {
return
}
timeout := l1BlockNum + c.cfg.MaxChannelDuration
c.updateTimeout(timeout, ErrMaxDurationReached)
}
// updateSwTimeout updates the block timeout with the sequencer window timeout // updateSwTimeout updates the block timeout with the sequencer window timeout
// derived from the batch's origin L1 block. The timeout is only moved forward // derived from the batch's origin L1 block. The timeout is only moved forward
// if the derived sequencer window timeout is earlier than the current. // if the derived sequencer window timeout is earlier than the currently set
// timeout.
func (c *channelBuilder) updateSwTimeout(batch *derive.BatchData) { func (c *channelBuilder) updateSwTimeout(batch *derive.BatchData) {
timeout := uint64(batch.EpochNum) + c.cfg.SeqWindowSize - c.cfg.SubSafetyMargin timeout := uint64(batch.EpochNum) + c.cfg.SeqWindowSize - c.cfg.SubSafetyMargin
c.updateTimeout(timeout) c.updateTimeout(timeout, ErrSeqWindowClose)
} }
// updateTimeout updates the timeout block to the given block number if it is // updateTimeout updates the timeout block to the given block number if it is
// earlier then the current block timeout, or if it still unset. // earlier than the current block timeout, or if it still unset.
func (c *channelBuilder) updateTimeout(timeoutBlockNum uint64) { //
// If the timeout is updated, the provided reason will be set as the channel
// full error reason in case the timeout is hit in the future.
func (c *channelBuilder) updateTimeout(timeoutBlockNum uint64, reason error) {
if c.timeout == 0 || c.timeout > timeoutBlockNum { if c.timeout == 0 || c.timeout > timeoutBlockNum {
c.timeout = timeoutBlockNum c.timeout = timeoutBlockNum
c.timeoutReason = reason
} }
} }
// InputTargetReached says whether the target amount of input data has been // checkTimeout checks if the channel is timed out at the given block number and
// in this case marks the channel as full, if it wasn't full alredy.
func (c *channelBuilder) checkTimeout(blockNum uint64) {
if !c.IsFull() && c.TimedOut(blockNum) {
c.setFullErr(c.timeoutReason)
}
}
// TimedOut returns whether the passed block number is after the timeout block
// number. If no block timeout is set yet, it returns false.
func (c *channelBuilder) TimedOut(blockNum uint64) bool {
return c.timeout != 0 && blockNum >= c.timeout
}
// inputTargetReached says whether the target amount of input data has been
// reached in this channel builder. No more blocks can be added afterwards. // reached in this channel builder. No more blocks can be added afterwards.
func (c *channelBuilder) InputTargetReached() bool { func (c *channelBuilder) inputTargetReached() bool {
return uint64(c.co.InputBytes()) >= c.cfg.InputThreshold() return uint64(c.co.InputBytes()) >= c.cfg.InputThreshold()
} }
...@@ -190,14 +233,16 @@ func (c *channelBuilder) IsFull() bool { ...@@ -190,14 +233,16 @@ func (c *channelBuilder) IsFull() bool {
// FullErr returns the reason why the channel is full. If not full yet, it // FullErr returns the reason why the channel is full. If not full yet, it
// returns nil. // returns nil.
// //
// It returns a ChannelFullError wrapping one of four possible reasons for the // It returns a ChannelFullError wrapping one of six possible reasons for the
// channel being full: // channel being full:
// - ErrInputTargetReached if the target amount of input data has been reached, // - ErrInputTargetReached if the target amount of input data has been reached,
// - derive.MaxRLPBytesPerChannel if the general maximum amount of input data // - derive.MaxRLPBytesPerChannel if the general maximum amount of input data
// would have been exceeded by the latest AddBlock call, // would have been exceeded by the latest AddBlock call,
// - ErrMaxFrameIndex if the maximum number of frames has been generated // - ErrMaxFrameIndex if the maximum number of frames has been generated
// (uint16), // (uint16),
// - ErrChannelTimedOut if the batcher channel timeout has been reached. // - ErrMaxDurationReached if the max channel duration got reached.
// - ErrChannelTimeoutClose if the consensus channel timeout got too close.
// - ErrSeqWindowClose if the end of the sequencer window got too close.
func (c *channelBuilder) FullErr() error { func (c *channelBuilder) FullErr() error {
return c.fullErr return c.fullErr
} }
...@@ -210,9 +255,9 @@ func (c *channelBuilder) setFullErr(err error) { ...@@ -210,9 +255,9 @@ func (c *channelBuilder) setFullErr(err error) {
// after AddBlock and before iterating over available frames with HasFrame and // after AddBlock and before iterating over available frames with HasFrame and
// NextFrame. // NextFrame.
// //
// If the input data target hasn't been reached yet, it will conservatively only // If the channel isn't full yet, it will conservatively only
// pull readily available frames from the compression output. // pull readily available frames from the compression output.
// If the target has been reached, the channel is closed and all remaining // If it is full, the channel is closed and all remaining
// frames will be created, possibly with a small leftover frame. // frames will be created, possibly with a small leftover frame.
func (c *channelBuilder) OutputFrames() error { func (c *channelBuilder) OutputFrames() error {
if c.IsFull() { if c.IsFull() {
...@@ -320,7 +365,9 @@ func (c *channelBuilder) PushFrame(id txID, frame []byte) { ...@@ -320,7 +365,9 @@ func (c *channelBuilder) PushFrame(id txID, frame []byte) {
var ( var (
ErrInputTargetReached = errors.New("target amount of input data reached") ErrInputTargetReached = errors.New("target amount of input data reached")
ErrMaxFrameIndex = errors.New("max frame index reached (uint16)") ErrMaxFrameIndex = errors.New("max frame index reached (uint16)")
ErrChannelTimedOut = errors.New("channel timed out") ErrMaxDurationReached = errors.New("max channel duration reached")
ErrChannelTimeoutClose = errors.New("close to channel timeout")
ErrSeqWindowClose = errors.New("close to sequencer window timeout")
) )
type ChannelFullError struct { type ChannelFullError struct {
......
...@@ -188,9 +188,6 @@ func (s *channelManager) nextTxData() ([]byte, txID, error) { ...@@ -188,9 +188,6 @@ func (s *channelManager) nextTxData() ([]byte, txID, error) {
// It currently only uses one frame per transaction. If the pending channel is // It currently only uses one frame per transaction. If the pending channel is
// full, it only returns the remaining frames of this channel until it got // full, it only returns the remaining frames of this channel until it got
// successfully fully sent to L1. It returns io.EOF if there's no pending frame. // successfully fully sent to L1. It returns io.EOF if there's no pending frame.
//
// It currently ignores the l1Head provided and doesn't track channel timeouts
// or the sequencer window span yet.
func (s *channelManager) TxData(l1Head eth.BlockID) ([]byte, txID, error) { func (s *channelManager) TxData(l1Head eth.BlockID) ([]byte, txID, error) {
dataPending := s.pendingChannel != nil && s.pendingChannel.HasFrame() dataPending := s.pendingChannel != nil && s.pendingChannel.HasFrame()
s.log.Debug("Requested tx data", "l1Head", l1Head, "data_pending", dataPending, "blocks_pending", len(s.blocks)) s.log.Debug("Requested tx data", "l1Head", l1Head, "data_pending", dataPending, "blocks_pending", len(s.blocks))
...@@ -211,12 +208,15 @@ func (s *channelManager) TxData(l1Head eth.BlockID) ([]byte, txID, error) { ...@@ -211,12 +208,15 @@ func (s *channelManager) TxData(l1Head eth.BlockID) ([]byte, txID, error) {
return nil, txID{}, err return nil, txID{}, err
} }
s.checkTimeout(l1Head)
if err := s.processBlocks(); err != nil { if err := s.processBlocks(); err != nil {
return nil, txID{}, err return nil, txID{}, err
} }
// Register current L1 head only after all pending blocks have been
// processed. Even if a timeout will be triggered now, it is better to have
// all pending blocks be included in this channel for submission.
s.registerL1Block(l1Head)
if err := s.pendingChannel.OutputFrames(); err != nil { if err := s.pendingChannel.OutputFrames(); err != nil {
return nil, txID{}, fmt.Errorf("creating frames with channel builder: %w", err) return nil, txID{}, fmt.Errorf("creating frames with channel builder: %w", err)
} }
...@@ -239,14 +239,13 @@ func (s *channelManager) ensurePendingChannel(l1Head eth.BlockID) error { ...@@ -239,14 +239,13 @@ func (s *channelManager) ensurePendingChannel(l1Head eth.BlockID) error {
return nil return nil
} }
// checkTimeout checks the block timeout on the pending channel. // registerL1Block registers the given block at the pending channel.
func (s *channelManager) checkTimeout(l1Head eth.BlockID) { func (s *channelManager) registerL1Block(l1Head eth.BlockID) {
s.pendingChannel.CheckTimeout(l1Head.Number) s.pendingChannel.RegisterL1Block(l1Head.Number)
ferr := s.pendingChannel.FullErr() s.log.Debug("new L1-block registered at channel builder",
s.log.Debug("timeout triggered",
"l1Head", l1Head, "l1Head", l1Head,
"timed_out", errors.Is(ferr, ErrChannelTimedOut), "channel_full", s.pendingChannel.IsFull(),
"full_reason", ferr, "full_reason", s.pendingChannel.FullErr(),
) )
} }
......
...@@ -47,6 +47,16 @@ type CLIConfig struct { ...@@ -47,6 +47,16 @@ type CLIConfig struct {
// RollupRpc is the HTTP provider URL for the L2 rollup node. // RollupRpc is the HTTP provider URL for the L2 rollup node.
RollupRpc string RollupRpc string
// MaxChannelDuration is the maximum duration (in #L1-blocks) to keep a
// channel open. This allows to more eagerly send batcher transactions
// during times of low L2 transaction volume. Note that the effective
// L1-block distance between batcher transactions is then MaxChannelDuration
// + NumConfirmations because the batcher waits for NumConfirmations blocks
// after sending a batcher tx and only then starts a new channel.
//
// If 0, duration checks are disabled.
MaxChannelDuration uint64
// The batcher tx submission safety margin (in #L1-blocks) to subtract from // The batcher tx submission safety margin (in #L1-blocks) to subtract from
// a channel's timeout and sequencing window, to guarantee safe inclusion of // a channel's timeout and sequencing window, to guarantee safe inclusion of
// a channel on L1. // a channel on L1.
...@@ -143,6 +153,7 @@ func NewConfig(ctx *cli.Context) CLIConfig { ...@@ -143,6 +153,7 @@ func NewConfig(ctx *cli.Context) CLIConfig {
ResubmissionTimeout: ctx.GlobalDuration(flags.ResubmissionTimeoutFlag.Name), ResubmissionTimeout: ctx.GlobalDuration(flags.ResubmissionTimeoutFlag.Name),
/* Optional Flags */ /* Optional Flags */
MaxChannelDuration: ctx.GlobalUint64(flags.MaxChannelDurationFlag.Name),
MaxL1TxSize: ctx.GlobalUint64(flags.MaxL1TxSizeBytesFlag.Name), MaxL1TxSize: ctx.GlobalUint64(flags.MaxL1TxSizeBytesFlag.Name),
TargetL1TxSize: ctx.GlobalUint64(flags.TargetL1TxSizeBytesFlag.Name), TargetL1TxSize: ctx.GlobalUint64(flags.TargetL1TxSizeBytesFlag.Name),
TargetNumFrames: ctx.GlobalInt(flags.TargetNumFramesFlag.Name), TargetNumFrames: ctx.GlobalInt(flags.TargetNumFramesFlag.Name),
......
...@@ -90,6 +90,7 @@ func NewBatchSubmitterFromCLIConfig(cfg CLIConfig, l log.Logger) (*BatchSubmitte ...@@ -90,6 +90,7 @@ func NewBatchSubmitterFromCLIConfig(cfg CLIConfig, l log.Logger) (*BatchSubmitte
Channel: ChannelConfig{ Channel: ChannelConfig{
SeqWindowSize: rcfg.SeqWindowSize, SeqWindowSize: rcfg.SeqWindowSize,
ChannelTimeout: rcfg.ChannelTimeout, ChannelTimeout: rcfg.ChannelTimeout,
MaxChannelDuration: cfg.MaxChannelDuration,
SubSafetyMargin: cfg.SubSafetyMargin, SubSafetyMargin: cfg.SubSafetyMargin,
MaxFrameSize: cfg.MaxL1TxSize - 1, // subtract 1 byte for version MaxFrameSize: cfg.MaxL1TxSize - 1, // subtract 1 byte for version
TargetFrameSize: cfg.TargetL1TxSize - 1, // subtract 1 byte for version TargetFrameSize: cfg.TargetL1TxSize - 1, // subtract 1 byte for version
......
...@@ -75,6 +75,12 @@ var ( ...@@ -75,6 +75,12 @@ var (
/* Optional flags */ /* Optional flags */
MaxChannelDurationFlag = cli.Uint64Flag{
Name: "max-channel-duration",
Usage: "The maximum duration of L1-blocks to keep a channel open. 0 to disable.",
Value: 0,
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "MAX_CHANNEL_DURATION"),
}
MaxL1TxSizeBytesFlag = cli.Uint64Flag{ MaxL1TxSizeBytesFlag = cli.Uint64Flag{
Name: "max-l1-tx-size-bytes", Name: "max-l1-tx-size-bytes",
Usage: "The maximum size of a batch tx submitted to L1.", Usage: "The maximum size of a batch tx submitted to L1.",
...@@ -96,7 +102,7 @@ var ( ...@@ -96,7 +102,7 @@ var (
ApproxComprRatioFlag = cli.Float64Flag{ ApproxComprRatioFlag = cli.Float64Flag{
Name: "approx-compr-ratio", Name: "approx-compr-ratio",
Usage: "The approximate compression ratio (<= 1.0)", Usage: "The approximate compression ratio (<= 1.0)",
Value: 1.0, Value: 0.4,
EnvVar: opservice.PrefixEnvVar(envVarPrefix, "APPROX_COMPR_RATIO"), EnvVar: opservice.PrefixEnvVar(envVarPrefix, "APPROX_COMPR_RATIO"),
} }
StoppedFlag = cli.BoolFlag{ StoppedFlag = cli.BoolFlag{
...@@ -135,6 +141,7 @@ var requiredFlags = []cli.Flag{ ...@@ -135,6 +141,7 @@ var requiredFlags = []cli.Flag{
} }
var optionalFlags = []cli.Flag{ var optionalFlags = []cli.Flag{
MaxChannelDurationFlag,
MaxL1TxSizeBytesFlag, MaxL1TxSizeBytesFlag,
TargetL1TxSizeBytesFlag, TargetL1TxSizeBytesFlag,
TargetNumFramesFlag, TargetNumFramesFlag,
......
...@@ -323,11 +323,12 @@ func TestMigration(t *testing.T) { ...@@ -323,11 +323,12 @@ func TestMigration(t *testing.T) {
L1EthRpc: forkedL1URL, L1EthRpc: forkedL1URL,
L2EthRpc: gethNode.WSEndpoint(), L2EthRpc: gethNode.WSEndpoint(),
RollupRpc: rollupNode.HTTPEndpoint(), RollupRpc: rollupNode.HTTPEndpoint(),
MaxChannelDuration: 1,
MaxL1TxSize: 120_000, MaxL1TxSize: 120_000,
TargetL1TxSize: 624, TargetL1TxSize: 100_000,
TargetNumFrames: 1, TargetNumFrames: 1,
ApproxComprRatio: 1.0, ApproxComprRatio: 0.4,
SubSafetyMargin: testSafetyMargin(deployCfg), SubSafetyMargin: 4,
PollInterval: 50 * time.Millisecond, PollInterval: 50 * time.Millisecond,
NumConfirmations: 1, NumConfirmations: 1,
ResubmissionTimeout: 5 * time.Second, ResubmissionTimeout: 5 * time.Second,
......
...@@ -531,11 +531,12 @@ func (cfg SystemConfig) Start() (*System, error) { ...@@ -531,11 +531,12 @@ func (cfg SystemConfig) Start() (*System, error) {
L1EthRpc: sys.Nodes["l1"].WSEndpoint(), L1EthRpc: sys.Nodes["l1"].WSEndpoint(),
L2EthRpc: sys.Nodes["sequencer"].WSEndpoint(), L2EthRpc: sys.Nodes["sequencer"].WSEndpoint(),
RollupRpc: sys.RollupNodes["sequencer"].HTTPEndpoint(), RollupRpc: sys.RollupNodes["sequencer"].HTTPEndpoint(),
MaxChannelDuration: 1,
MaxL1TxSize: 120_000, MaxL1TxSize: 120_000,
TargetL1TxSize: 160, //624, TargetL1TxSize: 100_000,
TargetNumFrames: 1, TargetNumFrames: 1,
ApproxComprRatio: 1.0, ApproxComprRatio: 0.4,
SubSafetyMargin: testSafetyMargin(cfg.DeployConfig), SubSafetyMargin: 4,
PollInterval: 50 * time.Millisecond, PollInterval: 50 * time.Millisecond,
NumConfirmations: 1, NumConfirmations: 1,
ResubmissionTimeout: 5 * time.Second, ResubmissionTimeout: 5 * time.Second,
...@@ -575,24 +576,3 @@ func hexPriv(in *ecdsa.PrivateKey) string { ...@@ -575,24 +576,3 @@ func hexPriv(in *ecdsa.PrivateKey) string {
b := e2eutils.EncodePrivKey(in) b := e2eutils.EncodePrivKey(in)
return hexutil.Encode(b) return hexutil.Encode(b)
} }
// returns a safety margin that heuristically leads to a short channel lifetime
// of netChannelDuration. In current testing setups, we want channels to close
// quickly to have a low latency. We don't optimize for gas consumption.
func testSafetyMargin(cfg *genesis.DeployConfig) uint64 {
// target channel duration after first frame is included on L1
const netChannelDuration = 2
// The sequencing window timeout starts from the L1 origin, whereas the
// channel timeout starts from the first L1 inclusion block of any frame.
// So to have comparable values, the sws is converted to an effective
// sequencing window from the first L1 inclusion block, assuming that L2
// blocks are quickly included on L1.
// So we subtract 1 block distance from the origin block and 1 block for
// minging the first frame.
openChannelSeqWindow := cfg.SequencerWindowSize - 2
if openChannelSeqWindow > cfg.ChannelTimeout {
return cfg.ChannelTimeout - netChannelDuration
} else {
return openChannelSeqWindow - netChannelDuration
}
}
...@@ -367,9 +367,9 @@ func TestFinalize(t *testing.T) { ...@@ -367,9 +367,9 @@ func TestFinalize(t *testing.T) {
l2Seq := sys.Clients["sequencer"] l2Seq := sys.Clients["sequencer"]
// as configured in the extra geth lifecycle in testing setup // as configured in the extra geth lifecycle in testing setup
finalizedDistance := uint64(8) const finalizedDistance = 8
// Wait enough time for L1 to finalize and L2 to confirm its data in finalized L1 blocks // Wait enough time for L1 to finalize and L2 to confirm its data in finalized L1 blocks
<-time.After(time.Duration((finalizedDistance+4)*cfg.DeployConfig.L1BlockTime) * time.Second) time.Sleep(time.Duration((finalizedDistance+6)*cfg.DeployConfig.L1BlockTime) * time.Second)
// fetch the finalizes head of geth // fetch the finalizes head of geth
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
...@@ -883,7 +883,7 @@ func TestWithdrawals(t *testing.T) { ...@@ -883,7 +883,7 @@ func TestWithdrawals(t *testing.T) {
require.Nil(t, err) require.Nil(t, err)
// Get l2BlockNumber for proof generation // Get l2BlockNumber for proof generation
ctx, cancel = context.WithTimeout(context.Background(), 30*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second) ctx, cancel = context.WithTimeout(context.Background(), 40*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
defer cancel() defer cancel()
blockNumber, err := withdrawals.WaitForFinalizationPeriod(ctx, l1Client, predeploys.DevOptimismPortalAddr, receipt.BlockNumber) blockNumber, err := withdrawals.WaitForFinalizationPeriod(ctx, l1Client, predeploys.DevOptimismPortalAddr, receipt.BlockNumber)
require.Nil(t, err) require.Nil(t, err)
......
...@@ -415,7 +415,6 @@ func TestMixedDepositValidity(t *testing.T) { ...@@ -415,7 +415,6 @@ func TestMixedDepositValidity(t *testing.T) {
// TestMixedWithdrawalValidity makes a number of withdrawal transactions and ensures ones with modified parameters are // TestMixedWithdrawalValidity makes a number of withdrawal transactions and ensures ones with modified parameters are
// rejected while unmodified ones are accepted. This runs test cases in different systems. // rejected while unmodified ones are accepted. This runs test cases in different systems.
func TestMixedWithdrawalValidity(t *testing.T) { func TestMixedWithdrawalValidity(t *testing.T) {
parallel(t)
// Setup our logger handler // Setup our logger handler
if !verboseGethNodes { if !verboseGethNodes {
log.Root().SetHandler(log.DiscardHandler()) log.Root().SetHandler(log.DiscardHandler())
...@@ -425,7 +424,7 @@ func TestMixedWithdrawalValidity(t *testing.T) { ...@@ -425,7 +424,7 @@ func TestMixedWithdrawalValidity(t *testing.T) {
for i := 0; i <= 8; i++ { for i := 0; i <= 8; i++ {
i := i // avoid loop var capture i := i // avoid loop var capture
t.Run(fmt.Sprintf("withdrawal test#%d", i+1), func(t *testing.T) { t.Run(fmt.Sprintf("withdrawal test#%d", i+1), func(t *testing.T) {
t.Parallel() parallel(t)
// Create our system configuration, funding all accounts we created for L1/L2, and start it // Create our system configuration, funding all accounts we created for L1/L2, and start it
cfg := DefaultSystemConfig(t) cfg := DefaultSystemConfig(t)
cfg.DeployConfig.FinalizationPeriodSeconds = 6 cfg.DeployConfig.FinalizationPeriodSeconds = 6
...@@ -528,7 +527,7 @@ func TestMixedWithdrawalValidity(t *testing.T) { ...@@ -528,7 +527,7 @@ func TestMixedWithdrawalValidity(t *testing.T) {
transactor.ExpectedL2Nonce = transactor.ExpectedL2Nonce + 1 transactor.ExpectedL2Nonce = transactor.ExpectedL2Nonce + 1
// Wait for the finalization period, then we can finalize this withdrawal. // Wait for the finalization period, then we can finalize this withdrawal.
ctx, cancel = context.WithTimeout(context.Background(), 25*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second) ctx, cancel = context.WithTimeout(context.Background(), 40*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
blockNumber, err := withdrawals.WaitForFinalizationPeriod(ctx, l1Client, predeploys.DevOptimismPortalAddr, receipt.BlockNumber) blockNumber, err := withdrawals.WaitForFinalizationPeriod(ctx, l1Client, predeploys.DevOptimismPortalAddr, receipt.BlockNumber)
cancel() cancel()
require.Nil(t, err) require.Nil(t, err)
...@@ -658,7 +657,7 @@ func TestMixedWithdrawalValidity(t *testing.T) { ...@@ -658,7 +657,7 @@ func TestMixedWithdrawalValidity(t *testing.T) {
require.Equal(t, types.ReceiptStatusSuccessful, proveReceipt.Status) require.Equal(t, types.ReceiptStatusSuccessful, proveReceipt.Status)
// Wait for finalization and then create the Finalized Withdrawal Transaction // Wait for finalization and then create the Finalized Withdrawal Transaction
ctx, cancel = context.WithTimeout(context.Background(), 40*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second) ctx, cancel = context.WithTimeout(context.Background(), 45*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second)
defer cancel() defer cancel()
_, err = withdrawals.WaitForFinalizationPeriod(ctx, l1Client, predeploys.DevOptimismPortalAddr, header.Number) _, err = withdrawals.WaitForFinalizationPeriod(ctx, l1Client, predeploys.DevOptimismPortalAddr, header.Number)
require.Nil(t, err) require.Nil(t, err)
......
...@@ -66,6 +66,7 @@ var Goerli = rollup.Config{ ...@@ -66,6 +66,7 @@ var Goerli = rollup.Config{
BatchInboxAddress: common.HexToAddress("0xff00000000000000000000000000000000000420"), BatchInboxAddress: common.HexToAddress("0xff00000000000000000000000000000000000420"),
DepositContractAddress: common.HexToAddress("0x5b47E1A08Ea6d985D6649300584e6722Ec4B1383"), DepositContractAddress: common.HexToAddress("0x5b47E1A08Ea6d985D6649300584e6722Ec4B1383"),
L1SystemConfigAddress: common.HexToAddress("0xAe851f927Ee40dE99aaBb7461C00f9622ab91d60"), L1SystemConfigAddress: common.HexToAddress("0xAe851f927Ee40dE99aaBb7461C00f9622ab91d60"),
RegolithTime: u64Ptr(1679079600),
} }
var NetworksByName = map[string]rollup.Config{ var NetworksByName = map[string]rollup.Config{
...@@ -97,3 +98,7 @@ func GetRollupConfig(name string) (rollup.Config, error) { ...@@ -97,3 +98,7 @@ func GetRollupConfig(name string) (rollup.Config, error) {
return network, nil return network, nil
} }
func u64Ptr(v uint64) *uint64 {
return &v
}
FROM ethereum/client-go:v1.10.22 FROM ethereum/client-go:v1.11.2
RUN apk add --no-cache jq RUN apk add --no-cache jq
......
FROM ethereumoptimism/op-geth:optimism-history FROM ethereumoptimism/op-geth:optimism
RUN apk add --no-cache jq RUN apk add --no-cache jq
......
...@@ -17,6 +17,7 @@ services: ...@@ -17,6 +17,7 @@ services:
dockerfile: Dockerfile.l1 dockerfile: Dockerfile.l1
ports: ports:
- "8545:8545" - "8545:8545"
- "7060:6060"
volumes: volumes:
- "l1_data:/db" - "l1_data:/db"
- "${PWD}/../.devnet/genesis-l1.json:/genesis.json" - "${PWD}/../.devnet/genesis-l1.json:/genesis.json"
...@@ -28,6 +29,7 @@ services: ...@@ -28,6 +29,7 @@ services:
dockerfile: Dockerfile.l2 dockerfile: Dockerfile.l2
ports: ports:
- "9545:8545" - "9545:8545"
- "8060:6060"
volumes: volumes:
- "l2_data:/db" - "l2_data:/db"
- "${PWD}/../.devnet/genesis-l2.json:/genesis.json" - "${PWD}/../.devnet/genesis-l2.json:/genesis.json"
...@@ -120,11 +122,12 @@ services: ...@@ -120,11 +122,12 @@ services:
OP_BATCHER_L1_ETH_RPC: http://l1:8545 OP_BATCHER_L1_ETH_RPC: http://l1:8545
OP_BATCHER_L2_ETH_RPC: http://l2:8545 OP_BATCHER_L2_ETH_RPC: http://l2:8545
OP_BATCHER_ROLLUP_RPC: http://op-node:8545 OP_BATCHER_ROLLUP_RPC: http://op-node:8545
OP_BATCHER_MAX_CHANNEL_DURATION: 1
OP_BATCHER_MAX_L1_TX_SIZE_BYTES: 120000 OP_BATCHER_MAX_L1_TX_SIZE_BYTES: 120000
OP_BATCHER_TARGET_L1_TX_SIZE_BYTES: 624 OP_BATCHER_TARGET_L1_TX_SIZE_BYTES: 100000
OP_BATCHER_TARGET_NUM_FRAMES: 1 OP_BATCHER_TARGET_NUM_FRAMES: 1
OP_BATCHER_APPROX_COMPR_RATIO: 1.0 OP_BATCHER_APPROX_COMPR_RATIO: 0.4
OP_BATCHER_SUB_SAFETY_MARGIN: 6 # SWS is 15, ChannelTimeout is 40 OP_BATCHER_SUB_SAFETY_MARGIN: 4 # SWS is 15, ChannelTimeout is 40
OP_BATCHER_POLL_INTERVAL: 1s OP_BATCHER_POLL_INTERVAL: 1s
OP_BATCHER_NUM_CONFIRMATIONS: 1 OP_BATCHER_NUM_CONFIRMATIONS: 1
OP_BATCHER_SAFE_ABORT_NONCE_TOO_LOW_COUNT: 3 OP_BATCHER_SAFE_ABORT_NONCE_TOO_LOW_COUNT: 3
......
...@@ -65,4 +65,7 @@ exec geth \ ...@@ -65,4 +65,7 @@ exec geth \
--authrpc.vhosts="*" \ --authrpc.vhosts="*" \
--authrpc.jwtsecret=/config/jwt-secret.txt \ --authrpc.jwtsecret=/config/jwt-secret.txt \
--gcmode=archive \ --gcmode=archive \
--metrics \
--metrics.addr=0.0.0.0 \
--metrics.port=6060 \
"$@" "$@"
# @eth-optimism/actor-tests # @eth-optimism/actor-tests
## 0.0.22
### Patch Changes
- Updated dependencies [cb19e2f9c]
- @eth-optimism/sdk@2.0.0
- @eth-optimism/contracts-bedrock@0.13.0
## 0.0.21 ## 0.0.21
### Patch Changes ### Patch Changes
......
{ {
"name": "@eth-optimism/actor-tests", "name": "@eth-optimism/actor-tests",
"version": "0.0.21", "version": "0.0.22",
"description": "A library and suite of tests to stress test Optimism Bedrock.", "description": "A library and suite of tests to stress test Optimism Bedrock.",
"license": "MIT", "license": "MIT",
"author": "", "author": "",
...@@ -18,9 +18,9 @@ ...@@ -18,9 +18,9 @@
"test:coverage": "yarn test" "test:coverage": "yarn test"
}, },
"dependencies": { "dependencies": {
"@eth-optimism/contracts-bedrock": "0.12.1", "@eth-optimism/contracts-bedrock": "0.13.0",
"@eth-optimism/core-utils": "^0.12.0", "@eth-optimism/core-utils": "^0.12.0",
"@eth-optimism/sdk": "^1.10.4", "@eth-optimism/sdk": "^2.0.0",
"@types/chai": "^4.2.18", "@types/chai": "^4.2.18",
"@types/chai-as-promised": "^7.1.4", "@types/chai-as-promised": "^7.1.4",
"async-mutex": "^0.3.2", "async-mutex": "^0.3.2",
......
# @eth-optimism/atst
## 0.1.0
### Minor Changes
- a312af15d: Make type parsing more intuitive
- 82a033fed: Fix string type that should be `0x${string}`
### Patch Changes
- 11bb01851: Add new atst package
- 7c37d262a: Release ATST
...@@ -30,6 +30,8 @@ yarn add @eth-optimism/atst @wagmi/core ethers@5.7.0 ...@@ -30,6 +30,8 @@ yarn add @eth-optimism/atst @wagmi/core ethers@5.7.0
## Basic usage ## Basic usage
Note: all functions are fully tested. The tests are a great example to see usage examples.
### Basic Setup ### Basic Setup
ATST uses `@wagmi/core` under the hood. See their documentation for more information. ATST uses `@wagmi/core` under the hood. See their documentation for more information.
...@@ -251,6 +253,24 @@ const preparedTx = await prepareWriteAttestation(about, key, 'hello world') ...@@ -251,6 +253,24 @@ const preparedTx = await prepareWriteAttestation(about, key, 'hello world')
await writeAttestation(preparedTx) await writeAttestation(preparedTx)
``` ```
### getEvents
To getEvents use getEvents with a provider and any filters to filter the event
```typescript
const events = await getEvents({
creator,
about,
key,
value,
provider: new ethers.providers.JsonRpcProvider('http://localhost:8545'),
fromBlockOrBlockhash,
toBlock,
})
```
Set key, about, creator, or value to `null` to not include that filter
## Tutorial ## Tutorial
For a tutorial on using the attestation station in general, see out tutorial as well as other Optimism related tutorials in our [optimism-tutorial](https://github.com/ethereum-optimism/optimism-tutorial/tree/main/ecosystem/attestation-station#key-values) repo For a tutorial on using the attestation station in general, see out tutorial as well as other Optimism related tutorials in our [optimism-tutorial](https://github.com/ethereum-optimism/optimism-tutorial/tree/main/ecosystem/attestation-station#key-values) repo
{ {
"name": "@eth-optimism/atst", "name": "@eth-optimism/atst",
"version": "0.0.0", "version": "0.1.0",
"type": "module", "type": "module",
"main": "dist/index.js", "main": "dist/index.js",
"types": "src/index.ts", "types": "src/index.ts",
......
...@@ -26,6 +26,7 @@ cli ...@@ -26,6 +26,7 @@ cli
}) })
.example( .example(
() => () =>
// note: private key is just the first testing address when running anvil
`atst read --key "optimist.base-uri" --about 0x2335022c740d17c2837f9C884Bfe4fFdbf0A95D5 --creator 0x60c5C9c98bcBd0b0F2fD89B24c16e533BaA8CdA3` `atst read --key "optimist.base-uri" --about 0x2335022c740d17c2837f9C884Bfe4fFdbf0A95D5 --creator 0x60c5C9c98bcBd0b0F2fD89B24c16e533BaA8CdA3`
) )
.action(async (options: ReadOptions) => { .action(async (options: ReadOptions) => {
...@@ -72,6 +73,8 @@ cli ...@@ -72,6 +73,8 @@ cli
`atst write --key "optimist.base-uri" --about 0x2335022c740d17c2837f9C884Bfe4fFdbf0A95D5 --value "my attestation" --private-key 0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80 --rpc-url http://localhost:8545` `atst write --key "optimist.base-uri" --about 0x2335022c740d17c2837f9C884Bfe4fFdbf0A95D5 --value "my attestation" --private-key 0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80 --rpc-url http://localhost:8545`
) )
.action(async (options: WriteOptions) => { .action(async (options: WriteOptions) => {
const spinner = logger.spinner()
spinner.start('Writing attestation...')
const { write } = await import('./commands/write') const { write } = await import('./commands/write')
// TODO use the native api to do this instead of parsing the raw args // TODO use the native api to do this instead of parsing the raw args
...@@ -86,9 +89,16 @@ cli ...@@ -86,9 +89,16 @@ cli
: options.contract : options.contract
await write({ ...options, about, privateKey, contract }) await write({ ...options, about, privateKey, contract })
.then((res) => {
spinner.succeed('Attestation written!')
logger.info(`Attestation hash: ${res}`)
})
.catch((e) => {
logger.error(e)
spinner.fail('Attestation failed!')
})
}) })
cli.help()
cli.version(packageJson.version) cli.version(packageJson.version)
void (async () => { void (async () => {
......
...@@ -10,6 +10,7 @@ export { ...@@ -10,6 +10,7 @@ export {
readAttestationString, readAttestationString,
} from './lib/readAttestation' } from './lib/readAttestation'
export { readAttestations } from './lib/readAttestations' export { readAttestations } from './lib/readAttestations'
export { getEvents } from './lib/getEvents'
export { prepareWriteAttestation } from './lib/prepareWriteAttestation' export { prepareWriteAttestation } from './lib/prepareWriteAttestation'
export { prepareWriteAttestations } from './lib/prepareWriteAttestations' export { prepareWriteAttestations } from './lib/prepareWriteAttestations'
export { writeAttestation } from './lib/writeAttestation' export { writeAttestation } from './lib/writeAttestation'
...@@ -23,6 +24,9 @@ export { ...@@ -23,6 +24,9 @@ export {
parseString, parseString,
} from './lib/parseAttestationBytes' } from './lib/parseAttestationBytes'
// types // types
export type { AttestationCreatedEvent } from './types/AttestationCreatedEvent'
export type { AttestationReadParams } from './types/AttestationReadParams' export type { AttestationReadParams } from './types/AttestationReadParams'
export type { DataTypeOption } from './types/DataTypeOption' export type { DataTypeOption } from './types/DataTypeOption'
export type { WagmiBytes } from './types/WagmiBytes' export type { WagmiBytes } from './types/WagmiBytes'
// react
export * from './react'
import { ethers } from 'ethers' import { ethers } from 'ethers'
export const encodeRawKey = (rawKey: string) => { import { WagmiBytes } from '../types/WagmiBytes'
export const encodeRawKey = (rawKey: string): WagmiBytes => {
if (rawKey.length < 32) { if (rawKey.length < 32) {
return ethers.utils.formatBytes32String(rawKey) return ethers.utils.formatBytes32String(rawKey) as WagmiBytes
} }
const hash = ethers.utils.keccak256(ethers.utils.toUtf8Bytes(rawKey)) const hash = ethers.utils.keccak256(ethers.utils.toUtf8Bytes(rawKey))
return hash.slice(0, 64) + 'ff' return (hash.slice(0, 64) + 'ff') as WagmiBytes
} }
import { ethers } from 'ethers'
import { describe, it, expect } from 'vitest'
import { getEvents } from './getEvents'
describe(getEvents.name, () => {
it('should get events on goerli', async () => {
const key = 'animalfarm.school.attended'
const creator = '0xBCf86Fd70a0183433763ab0c14E7a760194f3a9F'
expect(
await getEvents({
creator,
about: '0x00000000000000000000000000000000000060A7',
key,
provider: new ethers.providers.JsonRpcProvider(
'https://goerli.optimism.io'
),
})
).toMatchInlineSnapshot(`
[
{
"address": "0xEE36eaaD94d1Cc1d0eccaDb55C38bFfB6Be06C77",
"args": [
"0xBCf86Fd70a0183433763ab0c14E7a760194f3a9F",
"0x00000000000000000000000000000000000060A7",
"0x616e696d616c6661726d2e7363686f6f6c2e617474656e646564000000000000",
"0x01",
],
"blockHash": "0x75feb3572d4b7d682cf632bf64df72c8d9c336dedcf8df1c88f755d529ec1b85",
"blockNumber": 3463240,
"data": "0x000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000010100000000000000000000000000000000000000000000000000000000000000",
"decode": [Function],
"event": "AttestationCreated",
"eventSignature": "AttestationCreated(address,address,bytes32,bytes)",
"getBlock": [Function],
"getTransaction": [Function],
"getTransactionReceipt": [Function],
"logIndex": 0,
"removeListener": [Function],
"removed": false,
"topics": [
"0x28710dfecab43d1e29e02aa56b2e1e610c0bae19135c9cf7a83a1adb6df96d85",
"0x000000000000000000000000bcf86fd70a0183433763ab0c14e7a760194f3a9f",
"0x00000000000000000000000000000000000000000000000000000000000060a7",
"0x616e696d616c6661726d2e7363686f6f6c2e617474656e646564000000000000",
],
"transactionHash": "0x0e77a32b2558f39e60c3e81bd6efd811cf4b3bd80a4f666d042a221ea63c93ab",
"transactionIndex": 0,
},
{
"address": "0xEE36eaaD94d1Cc1d0eccaDb55C38bFfB6Be06C77",
"args": [
"0xBCf86Fd70a0183433763ab0c14E7a760194f3a9F",
"0x00000000000000000000000000000000000060A7",
"0x616e696d616c6661726d2e7363686f6f6c2e617474656e646564000000000000",
"0x01",
],
"blockHash": "0xdb11b4b06e5866be931667b8c62dca182240b9256a3d8c64c1c247107aa33752",
"blockNumber": 4105095,
"data": "0x000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000010100000000000000000000000000000000000000000000000000000000000000",
"decode": [Function],
"event": "AttestationCreated",
"eventSignature": "AttestationCreated(address,address,bytes32,bytes)",
"getBlock": [Function],
"getTransaction": [Function],
"getTransactionReceipt": [Function],
"logIndex": 0,
"removeListener": [Function],
"removed": false,
"topics": [
"0x28710dfecab43d1e29e02aa56b2e1e610c0bae19135c9cf7a83a1adb6df96d85",
"0x000000000000000000000000bcf86fd70a0183433763ab0c14e7a760194f3a9f",
"0x00000000000000000000000000000000000000000000000000000000000060a7",
"0x616e696d616c6661726d2e7363686f6f6c2e617474656e646564000000000000",
],
"transactionHash": "0x61f59bd4dfe54272d9369effe3ae57a0ef2584161fcf2bbd55f5596002e759bd",
"transactionIndex": 1,
},
]
`)
})
it('should get events on mainnet', async () => {
const creator = '0x60c5C9c98bcBd0b0F2fD89B24c16e533BaA8CdA3'
const about = '0x2335022c740d17c2837f9C884Bfe4fFdbf0A95D5'
const key = 'optimist.base-uri'
expect(
await getEvents({
creator,
about,
key,
provider: new ethers.providers.JsonRpcProvider('http://localhost:8545'),
})
).toMatchInlineSnapshot(`
[
{
"address": "0xEE36eaaD94d1Cc1d0eccaDb55C38bFfB6Be06C77",
"args": [
"0x60c5C9c98bcBd0b0F2fD89B24c16e533BaA8CdA3",
"0x2335022c740d17c2837f9C884Bfe4fFdbf0A95D5",
"0x6f7074696d6973742e626173652d757269000000000000000000000000000000",
"0x68747470733a2f2f73746f726167656170692e666c65656b2e636f2f33336630633965392d666437392d343634622d613431642d3634343238313961316230352d6275636b65742f6f7074696d6973742d6e66742f61747472696275746573",
],
"blockHash": "0x5b5f34cb7a72eb6aaf6d8af873f210278738573386c88f85c605067c10d67ee3",
"blockNumber": 50135778,
"data": "0x0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000005f68747470733a2f2f73746f726167656170692e666c65656b2e636f2f33336630633965392d666437392d343634622d613431642d3634343238313961316230352d6275636b65742f6f7074696d6973742d6e66742f6174747269627574657300",
"decode": [Function],
"event": "AttestationCreated",
"eventSignature": "AttestationCreated(address,address,bytes32,bytes)",
"getBlock": [Function],
"getTransaction": [Function],
"getTransactionReceipt": [Function],
"logIndex": 1,
"removeListener": [Function],
"removed": false,
"topics": [
"0x28710dfecab43d1e29e02aa56b2e1e610c0bae19135c9cf7a83a1adb6df96d85",
"0x00000000000000000000000060c5c9c98bcbd0b0f2fd89b24c16e533baa8cda3",
"0x0000000000000000000000002335022c740d17c2837f9c884bfe4ffdbf0a95d5",
"0x6f7074696d6973742e626173652d757269000000000000000000000000000000",
],
"transactionHash": "0x265c98ce12e0836616efd3ea2130df9647729574feb40d5607e4031ff9aace01",
"transactionIndex": 0,
},
{
"address": "0xEE36eaaD94d1Cc1d0eccaDb55C38bFfB6Be06C77",
"args": [
"0x60c5C9c98bcBd0b0F2fD89B24c16e533BaA8CdA3",
"0x2335022c740d17c2837f9C884Bfe4fFdbf0A95D5",
"0x6f7074696d6973742e626173652d757269000000000000000000000000000000",
"0x68747470733a2f2f6173736574732e6f7074696d69736d2e696f2f34613630393636312d363737342d343431662d396664622d3435336664626238393933312d6275636b65742f6f7074696d6973742d6e66742f61747472696275746573",
],
"blockHash": "0x889ad6bb2eb7aee0c095c1f6cc11f5a7a65917d7bc06500dad3213fb031f1e9c",
"blockNumber": 50141511,
"data": "0x0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000005e68747470733a2f2f6173736574732e6f7074696d69736d2e696f2f34613630393636312d363737342d343431662d396664622d3435336664626238393933312d6275636b65742f6f7074696d6973742d6e66742f617474726962757465730000",
"decode": [Function],
"event": "AttestationCreated",
"eventSignature": "AttestationCreated(address,address,bytes32,bytes)",
"getBlock": [Function],
"getTransaction": [Function],
"getTransactionReceipt": [Function],
"logIndex": 1,
"removeListener": [Function],
"removed": false,
"topics": [
"0x28710dfecab43d1e29e02aa56b2e1e610c0bae19135c9cf7a83a1adb6df96d85",
"0x00000000000000000000000060c5c9c98bcbd0b0f2fd89b24c16e533baa8cda3",
"0x0000000000000000000000002335022c740d17c2837f9c884bfe4ffdbf0a95d5",
"0x6f7074696d6973742e626173652d757269000000000000000000000000000000",
],
"transactionHash": "0xf4c0fc1ceec42831252c90b7d5c1e7a5bd6d9642d07c80afc8b525211852ee03",
"transactionIndex": 0,
},
{
"address": "0xEE36eaaD94d1Cc1d0eccaDb55C38bFfB6Be06C77",
"args": [
"0x60c5C9c98bcBd0b0F2fD89B24c16e533BaA8CdA3",
"0x2335022c740d17c2837f9C884Bfe4fFdbf0A95D5",
"0x6f7074696d6973742e626173652d757269000000000000000000000000000000",
"0x68747470733a2f2f6173736574732e6f7074696d69736d2e696f2f34613630393636312d363737342d343431662d396664622d3435336664626238393933312d6275636b65742f6f7074696d6973742d6e66742f61747472696275746573",
],
"blockHash": "0x120931c24234d03af66b9b21fcaf3b97242ed8f0c0418a9b16fc5cc1a804e917",
"blockNumber": 50141837,
"data": "0x0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000005e68747470733a2f2f6173736574732e6f7074696d69736d2e696f2f34613630393636312d363737342d343431662d396664622d3435336664626238393933312d6275636b65742f6f7074696d6973742d6e66742f617474726962757465730000",
"decode": [Function],
"event": "AttestationCreated",
"eventSignature": "AttestationCreated(address,address,bytes32,bytes)",
"getBlock": [Function],
"getTransaction": [Function],
"getTransactionReceipt": [Function],
"logIndex": 1,
"removeListener": [Function],
"removed": false,
"topics": [
"0x28710dfecab43d1e29e02aa56b2e1e610c0bae19135c9cf7a83a1adb6df96d85",
"0x00000000000000000000000060c5c9c98bcbd0b0f2fd89b24c16e533baa8cda3",
"0x0000000000000000000000002335022c740d17c2837f9c884bfe4ffdbf0a95d5",
"0x6f7074696d6973742e626173652d757269000000000000000000000000000000",
],
"transactionHash": "0xfaf727afe431a920448636b80864dfeeef690903756f9c3041eb625ffcc82f11",
"transactionIndex": 0,
},
]
`)
})
})
import { ethers } from 'ethers'
import { Address } from 'wagmi'
import { ATTESTATION_STATION_ADDRESS } from '../constants/attestationStationAddress'
import { abi } from '../lib/abi'
import { AttestationCreatedEvent } from '../types/AttestationCreatedEvent'
import { encodeRawKey } from './encodeRawKey'
export const getEvents = async ({
creator = null,
about = null,
key = null,
value = null,
provider,
fromBlockOrBlockhash,
toBlock,
}: {
creator?: Address | null
about?: Address | null
key?: string | null
value?: string | null
provider: ethers.providers.JsonRpcProvider
fromBlockOrBlockhash?: ethers.providers.BlockTag | undefined
toBlock?: ethers.providers.BlockTag | undefined
}) => {
const contract = new ethers.Contract(
ATTESTATION_STATION_ADDRESS,
abi,
provider
)
return contract.queryFilter(
contract.filters.AttestationCreated(
creator,
about,
key && encodeRawKey(key),
value
),
fromBlockOrBlockhash,
toBlock
) as Promise<AttestationCreatedEvent[]>
}
...@@ -41,8 +41,11 @@ describe(parseAttestationBytes.name, () => { ...@@ -41,8 +41,11 @@ describe(parseAttestationBytes.name, () => {
}) })
it('should work for raw bytes', () => { it('should work for raw bytes', () => {
const bytes = '0x420' expect(parseAttestationBytes('0x420', 'bytes')).toMatchInlineSnapshot(
expect(parseAttestationBytes(bytes, 'bytes')).toBe(bytes) '"0x420"'
)
expect(parseAttestationBytes('0x', 'string')).toMatchInlineSnapshot('""')
expect(parseAttestationBytes('0x0', 'string')).toMatchInlineSnapshot('""')
}) })
it('should return raw bytes for invalid type', () => { it('should return raw bytes for invalid type', () => {
...@@ -57,12 +60,16 @@ describe('parseFoo', () => { ...@@ -57,12 +60,16 @@ describe('parseFoo', () => {
const str = 'Hello World' const str = 'Hello World'
const bytes = BigNumber.from(toUtf8Bytes(str)).toHexString() as WagmiBytes const bytes = BigNumber.from(toUtf8Bytes(str)).toHexString() as WagmiBytes
expect(parseString(bytes)).toBe(str) expect(parseString(bytes)).toBe(str)
expect(parseString('0x')).toMatchInlineSnapshot('""')
expect(parseString('0x0')).toMatchInlineSnapshot('""')
expect(parseString('0x0')).toMatchInlineSnapshot('""')
}) })
it('works for numbers', () => { it('works for numbers', () => {
const num = 123 const num = 123
const bytes = BigNumber.from(num).toHexString() as WagmiBytes const bytes = BigNumber.from(num).toHexString() as WagmiBytes
expect(parseNumber(bytes)).toEqual(BigNumber.from(num)) expect(parseNumber(bytes)).toEqual(BigNumber.from(num))
expect(parseNumber('0x')).toEqual(BigNumber.from(0))
}) })
it('works for addresses', () => { it('works for addresses', () => {
...@@ -74,5 +81,8 @@ describe('parseFoo', () => { ...@@ -74,5 +81,8 @@ describe('parseFoo', () => {
it('works for booleans', () => { it('works for booleans', () => {
const bytes = BigNumber.from(1).toHexString() as WagmiBytes const bytes = BigNumber.from(1).toHexString() as WagmiBytes
expect(parseBool(bytes)).toBe(true) expect(parseBool(bytes)).toBe(true)
expect(parseBool('0x')).toBe(false)
expect(parseBool('0x0')).toBe(false)
expect(parseBool('0x00000')).toBe(false)
}) })
}) })
...@@ -10,7 +10,7 @@ import { ParseBytesReturn } from '../types/ParseBytesReturn' ...@@ -10,7 +10,7 @@ import { ParseBytesReturn } from '../types/ParseBytesReturn'
* Parses a string attestion * Parses a string attestion
*/ */
export const parseString = (rawAttestation: WagmiBytes): string => { export const parseString = (rawAttestation: WagmiBytes): string => {
rawAttestation = rawAttestation === '0x' ? '0x0' : rawAttestation rawAttestation = rawAttestation === '0x0' ? '0x' : rawAttestation
return rawAttestation ? toUtf8String(rawAttestation) : '' return rawAttestation ? toUtf8String(rawAttestation) : ''
} }
......
...@@ -39,4 +39,3 @@ describe(readAttestation.name, () => { ...@@ -39,4 +39,3 @@ describe(readAttestation.name, () => {
) )
}) })
}) })
import { Address } from '@wagmi/core' import { Address } from '@wagmi/core'
import { BigNumber } from 'ethers' import { BigNumber } from 'ethers'
import { isAddress, isHexString, toUtf8Bytes } from 'ethers/lib/utils.js' import {
hexlify,
isAddress,
isHexString,
toUtf8Bytes,
} from 'ethers/lib/utils.js'
import { WagmiBytes } from '../types/WagmiBytes' import { WagmiBytes } from '../types/WagmiBytes'
export const stringifyAttestationBytes = ( export const stringifyAttestationBytes = (
bytes: WagmiBytes | string | Address | number | boolean | BigNumber bytes: WagmiBytes | string | Address | number | boolean | BigNumber
) => { ): WagmiBytes => {
bytes = bytes === '0x' ? '0x0' : bytes bytes = bytes === '0x' ? '0x0' : bytes
if (BigNumber.isBigNumber(bytes)) { if (BigNumber.isBigNumber(bytes)) {
return bytes.toHexString() return bytes.toHexString() as WagmiBytes
} }
if (typeof bytes === 'number') { if (typeof bytes === 'number') {
return BigNumber.from(bytes).toHexString() return BigNumber.from(bytes).toHexString() as WagmiBytes
} }
if (typeof bytes === 'boolean') { if (typeof bytes === 'boolean') {
return bytes ? '0x1' : '0x0' return bytes ? '0x1' : '0x0'
...@@ -21,10 +26,10 @@ export const stringifyAttestationBytes = ( ...@@ -21,10 +26,10 @@ export const stringifyAttestationBytes = (
return bytes return bytes
} }
if (isHexString(bytes)) { if (isHexString(bytes)) {
return bytes return bytes as WagmiBytes
} }
if (typeof bytes === 'string') { if (typeof bytes === 'string') {
return toUtf8Bytes(bytes) return hexlify(toUtf8Bytes(bytes)) as WagmiBytes
} }
throw new Error(`unrecognized bytes type ${bytes satisfies never}`) throw new Error(`unrecognized bytes type ${bytes satisfies never}`)
} }
/* eslint-disable prefer-arrow/prefer-arrow-functions */
// Generated by @wagmi/cli@0.1.10 on 2/26/2023 at 11:08:05 AM // Generated by @wagmi/cli@0.1.10 on 2/26/2023 at 11:08:05 AM
import { import {
useNetwork, useNetwork,
...@@ -177,11 +178,7 @@ export function useAttestationStationRead< ...@@ -177,11 +178,7 @@ export function useAttestationStationRead<
chainId as keyof typeof attestationStationAddress chainId as keyof typeof attestationStationAddress
], ],
...config, ...config,
} as UseContractReadConfig< } as UseContractReadConfig<typeof attestationStationABI, TFunctionName, TSelectData>)
typeof attestationStationABI,
TFunctionName,
TSelectData
>)
} }
/** /**
...@@ -213,11 +210,7 @@ export function useAttestationStationAttestations< ...@@ -213,11 +210,7 @@ export function useAttestationStationAttestations<
], ],
functionName: 'attestations', functionName: 'attestations',
...config, ...config,
} as UseContractReadConfig< } as UseContractReadConfig<typeof attestationStationABI, 'attestations', TSelectData>)
typeof attestationStationABI,
'attestations',
TSelectData
>)
} }
/** /**
...@@ -245,11 +238,7 @@ export function useAttestationStationVersion< ...@@ -245,11 +238,7 @@ export function useAttestationStationVersion<
], ],
functionName: 'version', functionName: 'version',
...config, ...config,
} as UseContractReadConfig< } as UseContractReadConfig<typeof attestationStationABI, 'version', TSelectData>)
typeof attestationStationABI,
'version',
TSelectData
>)
} }
/** /**
...@@ -358,10 +347,7 @@ export function usePrepareAttestationStationWrite<TFunctionName extends string>( ...@@ -358,10 +347,7 @@ export function usePrepareAttestationStationWrite<TFunctionName extends string>(
chainId as keyof typeof attestationStationAddress chainId as keyof typeof attestationStationAddress
], ],
...config, ...config,
} as UsePrepareContractWriteConfig< } as UsePrepareContractWriteConfig<typeof attestationStationABI, TFunctionName>)
typeof attestationStationABI,
TFunctionName
>)
} }
/** /**
...@@ -438,8 +424,5 @@ export function useAttestationStationAttestationCreatedEvent( ...@@ -438,8 +424,5 @@ export function useAttestationStationAttestationCreatedEvent(
], ],
eventName: 'AttestationCreated', eventName: 'AttestationCreated',
...config, ...config,
} as UseContractEventConfig< } as UseContractEventConfig<typeof attestationStationABI, 'AttestationCreated'>)
typeof attestationStationABI,
'AttestationCreated'
>)
} }
import type { Event } from 'ethers'
interface TypedEvent<TArgsArray extends Array<any> = any, TArgsObject = any>
extends Event {
args: TArgsArray & TArgsObject
}
export interface AttestationCreatedEventObject {
creator: string
about: string
key: string
val: string
}
export type AttestationCreatedEvent = TypedEvent<
[string, string, string, string],
AttestationCreatedEventObject
>
import { defineConfig } from '@wagmi/cli' import { defineConfig } from '@wagmi/cli'
import { hardhat, react } from '@wagmi/cli/plugins' import { hardhat, react } from '@wagmi/cli/plugins'
import * as chains from 'wagmi/chains' import * as chains from 'wagmi/chains'
import {ATTESTATION_STATION_ADDRESS} from '@eth-optimism/atst'
export const ATTESTATION_STATION_ADDRESS =
'0xEE36eaaD94d1Cc1d0eccaDb55C38bFfB6Be06C77'
export default defineConfig({ export default defineConfig({
out: 'src/react.ts', out: 'src/react.ts',
...@@ -14,7 +16,7 @@ export default defineConfig({ ...@@ -14,7 +16,7 @@ export default defineConfig({
[chains.optimism.id]: ATTESTATION_STATION_ADDRESS, [chains.optimism.id]: ATTESTATION_STATION_ADDRESS,
[chains.optimismGoerli.id]: ATTESTATION_STATION_ADDRESS, [chains.optimismGoerli.id]: ATTESTATION_STATION_ADDRESS,
[chains.foundry.id]: ATTESTATION_STATION_ADDRESS, [chains.foundry.id]: ATTESTATION_STATION_ADDRESS,
} },
}, },
}), }),
react(), react(),
......
# @eth-optimism/drippie-mon # @eth-optimism/drippie-mon
## 0.2.0
### Minor Changes
- 282bda091: Added a withdrawal monitoring service
### Patch Changes
- Updated dependencies [cb19e2f9c]
- @eth-optimism/sdk@2.0.0
## 0.1.3 ## 0.1.3
### Patch Changes ### Patch Changes
......
{ {
"private": true, "private": true,
"name": "@eth-optimism/chain-mon", "name": "@eth-optimism/chain-mon",
"version": "0.1.3", "version": "0.2.0",
"description": "[Optimism] Chain monitoring services", "description": "[Optimism] Chain monitoring services",
"main": "dist/index", "main": "dist/index",
"types": "dist/index", "types": "dist/index",
...@@ -35,7 +35,7 @@ ...@@ -35,7 +35,7 @@
"@eth-optimism/common-ts": "0.8.0", "@eth-optimism/common-ts": "0.8.0",
"@eth-optimism/contracts-periphery": "1.0.7", "@eth-optimism/contracts-periphery": "1.0.7",
"@eth-optimism/core-utils": "0.12.0", "@eth-optimism/core-utils": "0.12.0",
"@eth-optimism/sdk": "1.10.4", "@eth-optimism/sdk": "2.0.0",
"ethers": "^5.7.0", "ethers": "^5.7.0",
"@types/dateformat": "^5.0.0", "@types/dateformat": "^5.0.0",
"chai-as-promised": "^7.1.1", "chai-as-promised": "^7.1.1",
......
# @eth-optimism/contracts-bedrock # @eth-optimism/contracts-bedrock
## 0.13.0
### Minor Changes
- cb19e2f9c: Moves `FINALIZATION_PERIOD_SECONDS` from the `OptimismPortal` to the `L2OutputOracle` & ensures the `CHALLENGER` key cannot delete finalized outputs.
## 0.12.1 ## 0.12.1
### Patch Changes ### Patch Changes
......
{ {
"name": "@eth-optimism/contracts-bedrock", "name": "@eth-optimism/contracts-bedrock",
"version": "0.12.1", "version": "0.13.0",
"description": "Contracts for Optimism Specs", "description": "Contracts for Optimism Specs",
"main": "dist/index", "main": "dist/index",
"types": "dist/index", "types": "dist/index",
......
...@@ -53,7 +53,7 @@ ...@@ -53,7 +53,7 @@
"url": "https://github.com/ethereum-optimism/optimism.git" "url": "https://github.com/ethereum-optimism/optimism.git"
}, },
"devDependencies": { "devDependencies": {
"@eth-optimism/contracts-bedrock": "0.12.1", "@eth-optimism/contracts-bedrock": "0.13.0",
"@eth-optimism/core-utils": "^0.12.0", "@eth-optimism/core-utils": "^0.12.0",
"@eth-optimism/hardhat-deploy-config": "^0.2.5", "@eth-optimism/hardhat-deploy-config": "^0.2.5",
"@ethersproject/hardware-wallets": "^5.7.0", "@ethersproject/hardware-wallets": "^5.7.0",
......
# @eth-optimism/fault-detector # @eth-optimism/fault-detector
## 0.6.1
### Patch Changes
- Updated dependencies [cb19e2f9c]
- @eth-optimism/sdk@2.0.0
## 0.6.0 ## 0.6.0
### Minor Changes ### Minor Changes
......
{ {
"private": true, "private": true,
"name": "@eth-optimism/fault-detector", "name": "@eth-optimism/fault-detector",
"version": "0.6.0", "version": "0.6.1",
"description": "[Optimism] Service for detecting faulty L2 output proposals", "description": "[Optimism] Service for detecting faulty L2 output proposals",
"main": "dist/index", "main": "dist/index",
"types": "dist/index", "types": "dist/index",
...@@ -50,7 +50,7 @@ ...@@ -50,7 +50,7 @@
"@eth-optimism/common-ts": "^0.8.0", "@eth-optimism/common-ts": "^0.8.0",
"@eth-optimism/contracts": "^0.5.40", "@eth-optimism/contracts": "^0.5.40",
"@eth-optimism/core-utils": "^0.12.0", "@eth-optimism/core-utils": "^0.12.0",
"@eth-optimism/sdk": "^1.10.2", "@eth-optimism/sdk": "^2.0.0",
"@ethersproject/abstract-provider": "^5.7.0" "@ethersproject/abstract-provider": "^5.7.0"
} }
} }
# @eth-optimism/message-relayer # @eth-optimism/message-relayer
## 0.5.31
### Patch Changes
- Updated dependencies [cb19e2f9c]
- @eth-optimism/sdk@2.0.0
## 0.5.30 ## 0.5.30
### Patch Changes ### Patch Changes
......
{ {
"private": true, "private": true,
"name": "@eth-optimism/message-relayer", "name": "@eth-optimism/message-relayer",
"version": "0.5.30", "version": "0.5.31",
"description": "[Optimism] Service for automatically relaying L2 to L1 transactions", "description": "[Optimism] Service for automatically relaying L2 to L1 transactions",
"main": "dist/index", "main": "dist/index",
"types": "dist/index", "types": "dist/index",
...@@ -33,7 +33,7 @@ ...@@ -33,7 +33,7 @@
"dependencies": { "dependencies": {
"@eth-optimism/common-ts": "0.8.0", "@eth-optimism/common-ts": "0.8.0",
"@eth-optimism/core-utils": "0.12.0", "@eth-optimism/core-utils": "0.12.0",
"@eth-optimism/sdk": "1.10.4", "@eth-optimism/sdk": "2.0.0",
"ethers": "^5.7.0" "ethers": "^5.7.0"
}, },
"devDependencies": { "devDependencies": {
......
# @eth-optimism/sdk # @eth-optimism/sdk
## 2.0.0
### Major Changes
- cb19e2f9c: Moves `FINALIZATION_PERIOD_SECONDS` from the `OptimismPortal` to the `L2OutputOracle` & ensures the `CHALLENGER` key cannot delete finalized outputs.
### Patch Changes
- Updated dependencies [cb19e2f9c]
- @eth-optimism/contracts-bedrock@0.13.0
## 1.10.4 ## 1.10.4
### Patch Changes ### Patch Changes
......
{ {
"name": "@eth-optimism/sdk", "name": "@eth-optimism/sdk",
"version": "1.10.4", "version": "2.0.0",
"description": "[Optimism] Tools for working with Optimism", "description": "[Optimism] Tools for working with Optimism",
"main": "dist/index", "main": "dist/index",
"types": "dist/index", "types": "dist/index",
...@@ -50,7 +50,7 @@ ...@@ -50,7 +50,7 @@
"dependencies": { "dependencies": {
"@eth-optimism/contracts": "0.5.40", "@eth-optimism/contracts": "0.5.40",
"@eth-optimism/core-utils": "0.12.0", "@eth-optimism/core-utils": "0.12.0",
"@eth-optimism/contracts-bedrock": "0.12.1", "@eth-optimism/contracts-bedrock": "0.13.0",
"lodash": "^4.17.21", "lodash": "^4.17.21",
"merkletreejs": "^0.2.27", "merkletreejs": "^0.2.27",
"rlp": "^2.2.7" "rlp": "^2.2.7"
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment