Commit 831f9f62 authored by Andreas Bigger's avatar Andreas Bigger

Merge branch 'develop' into refcell/p2p_cli

parents 5e65978e 962ac1d9
# Batch Decoding Tool
The batch decoding tool is a utility to aid in debugging the batch submitter & the op-node
by looking at what batches were submitted on L1.
## Design Philosophy
The `batch_decoder` tool is designed to be simple & flexible. It offloads as much data analysis
as possible to other tools. It is built around manipulating JSON on disk. The first stage is to
fetch all transaction which are sent to a batch inbox address. Those transactions are decoded into
frames in that step & information about them is recorded. After transactions are fetched the frames
are re-assembled into channels in a second step that does not touch the network.
## Commands
### Fetch
`batch_decoder fetch` pulls all L1 transactions sent to the batch inbox address in a given L1 block
range and then stores them on disk to a specified path as JSON files where the name of the file is
the transaction hash.
### Reassemble
`batch_decoder reassemble` goes through all of the found frames in the cache & then turns them
into channels. It then stores the channels with metadata on disk where the file name is the Channel ID.
## JQ Cheat Sheet
`jq` is a really useful utility for manipulating JSON files.
```
# Pretty print a JSON file
jq . $JSON_FILE
# Print the number of valid & invalid transactions
jq .valid_data $TX_DIR/* | sort | uniq -c
# Select all transactions that have invalid data & then print the transaction hash
jq "select(.valid_data == false)|.tx.hash" $TX_DIR
# Select all channels that are not ready and then get the id and inclusion block & tx hash of the first frame.
jq "select(.is_ready == false)|[.id, .frames[0].inclusion_block, .frames[0].transaction_hash]" $CHANNEL_DIR
```
## Roadmap
- Parallel transaction fetching (CLI-3563)
- Create force-close channel tx data from channel ID (CLI-3564)
- Pull the batches out of channels & store that information inside the ChannelWithMetadata (CLI-3565)
- Transaction Bytes used
- Total uncompressed (different from tx bytes) + compressed bytes
- Invert ChannelWithMetadata so block numbers/hashes are mapped to channels they are submitted in (CLI-3560)
......@@ -16,11 +16,12 @@ import (
"github.com/ethereum/go-ethereum/ethclient"
)
type TransactionWithMeta struct {
type TransactionWithMetadata struct {
TxIndex uint64 `json:"tx_index"`
InboxAddr common.Address `json:"inbox_address"`
BlockNumber uint64 `json:"block_number"`
BlockHash common.Hash `json:"block_hash"`
BlockTime uint64 `json:"block_time"`
ChainId uint64 `json:"chain_id"`
Sender common.Address `json:"sender"`
ValidSender bool `json:"valid_sender"`
......@@ -38,6 +39,9 @@ type Config struct {
OutDirectory string
}
// Batches fetches & stores all transactions sent to the batch inbox address in
// the given block range (inclusive to exclusive).
// The transactions & metadata are written to the out directory.
func Batches(client *ethclient.Client, config Config) (totalValid, totalInvalid int) {
if err := os.MkdirAll(config.OutDirectory, 0750); err != nil {
log.Fatal(err)
......@@ -53,13 +57,15 @@ func Batches(client *ethclient.Client, config Config) (totalValid, totalInvalid
return
}
// fetchBatchesPerBlock gets a block & the parses all of the transactions in the block.
func fetchBatchesPerBlock(client *ethclient.Client, number *big.Int, signer types.Signer, config Config) (validBatchCount, invalidBatchCount int) {
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
block, err := client.BlockByNumber(ctx, number)
if err != nil {
log.Fatal(err)
}
fmt.Println("Fetched block: ", number)
for i, tx := range block.Transactions() {
if tx.To() != nil && *tx.To() == config.BatchInbox {
sender, err := signer.Sender(tx)
......@@ -88,13 +94,14 @@ func fetchBatchesPerBlock(client *ethclient.Client, number *big.Int, signer type
invalidBatchCount += 1
}
txm := &TransactionWithMeta{
txm := &TransactionWithMetadata{
Tx: tx,
Sender: sender,
ValidSender: validSender,
TxIndex: uint64(i),
BlockNumber: block.NumberU64(),
BlockHash: block.Hash(),
BlockTime: block.Time(),
ChainId: config.ChainID.Uint64(),
InboxAddr: config.BatchInbox,
Frames: frames,
......
......@@ -8,6 +8,7 @@ import (
"time"
"github.com/ethereum-optimism/optimism/op-node/cmd/batch_decoder/fetch"
"github.com/ethereum-optimism/optimism/op-node/cmd/batch_decoder/reassemble"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/urfave/cli"
......@@ -59,7 +60,7 @@ func main() {
if err != nil {
log.Fatal(err)
}
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
chainID, err := client.ChainID(ctx)
if err != nil {
......@@ -82,6 +83,36 @@ func main() {
return nil
},
},
{
Name: "reassemble",
Usage: "Reassembles channels from fetched batches",
Flags: []cli.Flag{
cli.StringFlag{
Name: "inbox",
Value: "0xff00000000000000000000000000000000000420",
Usage: "Batch Inbox Address",
},
cli.StringFlag{
Name: "in",
Value: "/tmp/batch_decoder/transactions_cache",
Usage: "Cache directory for the found transactions",
},
cli.StringFlag{
Name: "out",
Value: "/tmp/batch_decoder/channel_cache",
Usage: "Cache directory for the found channels",
},
},
Action: func(cliCtx *cli.Context) error {
config := reassemble.Config{
BatchInbox: common.HexToAddress(cliCtx.String("inbox")),
InDirectory: cliCtx.String("in"),
OutDirectory: cliCtx.String("out"),
}
reassemble.Channels(config)
return nil
},
},
}
if err := app.Run(os.Args); err != nil {
......
package reassemble
import (
"encoding/json"
"fmt"
"io"
"log"
"os"
"path"
"sort"
"github.com/ethereum-optimism/optimism/op-node/cmd/batch_decoder/fetch"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum/go-ethereum/common"
)
type ChannelWithMetadata struct {
ID derive.ChannelID `json:"id"`
IsReady bool `json:"is_ready"`
InvalidFrames bool `json:"invalid_frames"`
InvalidBatches bool `json:"invalid_batches"`
Frames []FrameWithMetadata `json:"frames"`
Batches []derive.BatchV1 `json:"batches"`
}
type FrameWithMetadata struct {
TxHash common.Hash `json:"transaction_hash"`
InclusionBlock uint64 `json:"inclusion_block"`
Timestamp uint64 `json:"timestamp"`
BlockHash common.Hash `json:"block_hash"`
Frame derive.Frame `json:"frame"`
}
type Config struct {
BatchInbox common.Address
InDirectory string
OutDirectory string
}
// Channels loads all transactions from the given input directory that are submitted to the
// specified batch inbox and then re-assembles all channels & writes the re-assembled channels
// to the out directory.
func Channels(config Config) {
if err := os.MkdirAll(config.OutDirectory, 0750); err != nil {
log.Fatal(err)
}
txns := loadTransactions(config.InDirectory, config.BatchInbox)
// Sort first by block number then by transaction index inside the block number range.
// This is to match the order they are processed in derivation.
sort.Slice(txns, func(i, j int) bool {
if txns[i].BlockNumber == txns[j].BlockNumber {
return txns[i].TxIndex < txns[j].TxIndex
} else {
return txns[i].BlockNumber < txns[j].BlockNumber
}
})
frames := transactionsToFrames(txns)
framesByChannel := make(map[derive.ChannelID][]FrameWithMetadata)
for _, frame := range frames {
framesByChannel[frame.Frame.ID] = append(framesByChannel[frame.Frame.ID], frame)
}
for id, frames := range framesByChannel {
ch := processFrames(id, frames)
filename := path.Join(config.OutDirectory, fmt.Sprintf("%s.json", id.String()))
if err := writeChannel(ch, filename); err != nil {
log.Fatal(err)
}
}
}
func writeChannel(ch ChannelWithMetadata, filename string) error {
file, err := os.Create(filename)
if err != nil {
log.Fatal(err)
}
defer file.Close()
enc := json.NewEncoder(file)
return enc.Encode(ch)
}
func processFrames(id derive.ChannelID, frames []FrameWithMetadata) ChannelWithMetadata {
ch := derive.NewChannel(id, eth.L1BlockRef{Number: frames[0].InclusionBlock})
invalidFrame := false
for _, frame := range frames {
if ch.IsReady() {
fmt.Printf("Channel %v is ready despite having more frames\n", id.String())
invalidFrame = true
break
}
if err := ch.AddFrame(frame.Frame, eth.L1BlockRef{Number: frame.InclusionBlock}); err != nil {
fmt.Printf("Error adding to channel %v. Err: %v\n", id.String(), err)
invalidFrame = true
}
}
var batches []derive.BatchV1
invalidBatches := false
if ch.IsReady() {
br, err := derive.BatchReader(ch.Reader(), eth.L1BlockRef{})
if err == nil {
for batch, err := br(); err != io.EOF; batch, err = br() {
if err != nil {
fmt.Printf("Error reading batch for channel %v. Err: %v\n", id.String(), err)
invalidBatches = true
} else {
batches = append(batches, batch.Batch.BatchV1)
}
}
} else {
fmt.Printf("Error creating batch reader for channel %v. Err: %v\n", id.String(), err)
}
} else {
fmt.Printf("Channel %v is not ready\n", id.String())
}
return ChannelWithMetadata{
ID: id,
Frames: frames,
IsReady: ch.IsReady(),
InvalidFrames: invalidFrame,
InvalidBatches: invalidBatches,
Batches: batches,
}
}
func transactionsToFrames(txns []fetch.TransactionWithMetadata) []FrameWithMetadata {
var out []FrameWithMetadata
for _, tx := range txns {
for _, frame := range tx.Frames {
fm := FrameWithMetadata{
TxHash: tx.Tx.Hash(),
InclusionBlock: tx.BlockNumber,
BlockHash: tx.BlockHash,
Timestamp: tx.BlockTime,
Frame: frame,
}
out = append(out, fm)
}
}
return out
}
func loadTransactions(dir string, inbox common.Address) []fetch.TransactionWithMetadata {
files, err := os.ReadDir(dir)
if err != nil {
log.Fatal(err)
}
var out []fetch.TransactionWithMetadata
for _, file := range files {
f := path.Join(dir, file.Name())
txm := loadTransactionsFile(f)
if txm.InboxAddr == inbox && txm.ValidSender {
out = append(out, txm)
}
}
return out
}
func loadTransactionsFile(file string) fetch.TransactionWithMetadata {
f, err := os.Open(file)
if err != nil {
log.Fatal(err)
}
defer f.Close()
dec := json.NewDecoder(f)
var txm fetch.TransactionWithMetadata
if err := dec.Decode(&txm); err != nil {
log.Fatalf("Failed to decode %v. Err: %v\n", file, err)
}
return txm
}
......@@ -104,7 +104,7 @@ type EngineQueue struct {
finalizedL1 eth.L1BlockRef
safeAttributes []*eth.PayloadAttributes
safeAttributes *eth.PayloadAttributes
unsafePayloads PayloadsQueue // queue of unsafe payloads, ordered by ascending block number, may have gaps
// Tracks which L2 blocks where last derived from which L1 block. At most finalityLookback large.
......@@ -167,11 +167,6 @@ func (eq *EngineQueue) AddUnsafePayload(payload *eth.ExecutionPayload) {
eq.log.Trace("Next unsafe payload to process", "next", p.ID(), "timestamp", uint64(p.Timestamp))
}
func (eq *EngineQueue) AddSafeAttributes(attributes *eth.PayloadAttributes) {
eq.log.Trace("Adding next safe attributes", "timestamp", attributes.Timestamp)
eq.safeAttributes = append(eq.safeAttributes, attributes)
}
func (eq *EngineQueue) Finalize(l1Origin eth.L1BlockRef) {
if l1Origin.Number < eq.finalizedL1.Number {
eq.log.Error("ignoring old L1 finalized block signal! Is the L1 provider corrupted?", "prev_finalized_l1", eq.finalizedL1, "signaled_finalized_l1", l1Origin)
......@@ -212,27 +207,27 @@ func (eq *EngineQueue) Step(ctx context.Context) error {
if eq.needForkchoiceUpdate {
return eq.tryUpdateEngine(ctx)
}
if len(eq.safeAttributes) > 0 {
if eq.safeAttributes != nil {
return eq.tryNextSafeAttributes(ctx)
}
outOfData := false
if len(eq.safeAttributes) == 0 {
newOrigin := eq.prev.Origin()
// Check if the L2 unsafe head origin is consistent with the new origin
if err := eq.verifyNewL1Origin(ctx, newOrigin); err != nil {
return err
}
eq.origin = newOrigin
eq.postProcessSafeL2() // make sure we track the last L2 safe head for every new L1 block
if next, err := eq.prev.NextAttributes(ctx, eq.safeHead); err == io.EOF {
outOfData = true
} else if err != nil {
return err
} else {
eq.safeAttributes = append(eq.safeAttributes, next)
return NotEnoughData
}
newOrigin := eq.prev.Origin()
// Check if the L2 unsafe head origin is consistent with the new origin
if err := eq.verifyNewL1Origin(ctx, newOrigin); err != nil {
return err
}
eq.origin = newOrigin
eq.postProcessSafeL2() // make sure we track the last L2 safe head for every new L1 block
if next, err := eq.prev.NextAttributes(ctx, eq.safeHead); err == io.EOF {
outOfData = true
} else if err != nil {
return err
} else {
eq.safeAttributes = next
eq.log.Debug("Adding next safe attributes", "safe_head", eq.safeHead, "next", eq.safeAttributes)
return NotEnoughData
}
if eq.unsafePayloads.Len() > 0 {
return eq.tryNextUnsafePayload(ctx)
}
......@@ -459,7 +454,7 @@ func (eq *EngineQueue) consolidateNextSafeAttributes(ctx context.Context) error
}
return NewTemporaryError(fmt.Errorf("failed to get existing unsafe payload to compare against derived attributes from L1: %w", err))
}
if err := AttributesMatchBlock(eq.safeAttributes[0], eq.safeHead.Hash, payload, eq.log); err != nil {
if err := AttributesMatchBlock(eq.safeAttributes, eq.safeHead.Hash, payload, eq.log); err != nil {
eq.log.Warn("L2 reorg: existing unsafe block does not match derived attributes from L1", "err", err)
// geth cannot wind back a chain without reorging to a new, previously non-canonical, block
return eq.forceNextSafeAttributes(ctx)
......@@ -472,7 +467,7 @@ func (eq *EngineQueue) consolidateNextSafeAttributes(ctx context.Context) error
eq.needForkchoiceUpdate = true
eq.metrics.RecordL2Ref("l2_safe", ref)
// unsafe head stays the same, we did not reorg the chain.
eq.safeAttributes = eq.safeAttributes[1:]
eq.safeAttributes = nil
eq.postProcessSafeL2()
eq.logSyncProgress("reconciled with L1")
......@@ -481,10 +476,10 @@ func (eq *EngineQueue) consolidateNextSafeAttributes(ctx context.Context) error
// forceNextSafeAttributes inserts the provided attributes, reorging away any conflicting unsafe chain.
func (eq *EngineQueue) forceNextSafeAttributes(ctx context.Context) error {
if len(eq.safeAttributes) == 0 {
if eq.safeAttributes == nil {
return nil
}
attrs := eq.safeAttributes[0]
attrs := eq.safeAttributes
errType, err := eq.StartPayload(ctx, eq.safeHead, attrs, true)
if err == nil {
_, errType, err = eq.ConfirmPayload(ctx)
......@@ -513,7 +508,7 @@ func (eq *EngineQueue) forceNextSafeAttributes(ctx context.Context) error {
return NewCriticalError(fmt.Errorf("failed to process block with only deposit transactions: %w", err))
}
// drop the payload without inserting it
eq.safeAttributes = eq.safeAttributes[1:]
eq.safeAttributes = nil
// suppress the error b/c we want to retry with the next batch from the batch queue
// If there is no valid batch the node will eventually force a deposit only block. If
// the deposit only block fails, this will return the critical error above.
......@@ -523,7 +518,7 @@ func (eq *EngineQueue) forceNextSafeAttributes(ctx context.Context) error {
return NewCriticalError(fmt.Errorf("unknown InsertHeadBlock error type %d: %w", errType, err))
}
}
eq.safeAttributes = eq.safeAttributes[1:]
eq.safeAttributes = nil
eq.logSyncProgress("processed safe block derived from L1")
return nil
......
......@@ -50,7 +50,6 @@ type EngineQueueStage interface {
SetUnsafeHead(head eth.L2BlockRef)
Finalize(l1Origin eth.L1BlockRef)
AddSafeAttributes(attributes *eth.PayloadAttributes)
AddUnsafePayload(payload *eth.ExecutionPayload)
Step(context.Context) error
}
......
......@@ -16,9 +16,9 @@
# atst
atst is a typescript sdk and cli around the attestation station
atst is a typescript / javascript sdk and cli around AttestationStation
### Visit [Docs](https://community.optimism.io/docs/governance/attestation-station/) for general documentation on the attestation station!
**Visit [Docs](https://community.optimism.io/docs/governance/attestation-station/) for general documentation on AttestationStation.**
## Getting started
......@@ -28,42 +28,50 @@ Install
npm install @eth-optimism/atst wagmi @wagmi/core ethers@5.7.0
```
## atst typescript sdk
## atst typescript/javascript sdk
The typescript sdk provides a clean [wagmi](https://wagmi.sh/) based interface for reading and writing to the attestation station
The typescript sdk provides a clean [wagmi](https://wagmi.sh/) based interface for reading and writing to AttestationStation.
### See [sdk docs](https://github.com/ethereum-optimism/optimism/blob/develop/packages/atst/docs/sdk.md) for usage instructions
**See [sdk docs](https://github.com/ethereum-optimism/optimism/blob/develop/packages/atst/docs/sdk.md) for usage instructions.**
## atst cli
The cli provides a convenient cli for interacting with the attestation station contract
The cli provides a convenient cli for interacting with the AttestationStation contract
![preview](./assets/preview.gif)
## React API
**See [cli docs](https://github.com/ethereum-optimism/optimism/blob/develop/packages/atst/docs/cli.md) for usage instructions.**
For react hooks we recomend using the [wagmi cli](https://wagmi.sh/cli/getting-started) with the [etherscan plugin](https://wagmi.sh/cli/plugins/etherscan) and [react plugin](https://wagmi.sh/cli/plugins/react) to automatically generate react hooks around the attestation station.
## React API
Use `createKey` and `createValue` to convert your raw keys and values into bytes that can be used in the attestation station contract calls
For react hooks we recomend using the [wagmi cli](https://wagmi.sh/cli/getting-started) with the [etherscan plugin](https://wagmi.sh/cli/plugins/etherscan) and [react plugin](https://wagmi.sh/cli/plugins/react) to automatically generate react hooks around AttestationStation.
Use `parseString`, `parseBool`, `parseAddress` and `parseNumber` to convert values returned by attestation station to their correct data type.
Use `createKey` and `createValue` to convert your raw keys and values into bytes that can be used in AttestationStation contract calls
For convenience we also export the hooks here.
Use `parseString`, `parseBool`, `parseAddress` and `parseNumber` to convert values returned by AttestationStation to their correct data type.
`useAttestationStationAttestation` - Reads attestations with useContractRead
For convenience we also [export the hooks here](https://github.com/ethereum-optimism/optimism/blob/develop/packages/atst/src/index.ts):
- `useAttestationStationAttestation` - Reads attestations with useContractRead
- `useAttestationStationVersion` - Reads attestation version
- `useAttestationStationAttest` - Wraps useContractWrite with AttestationStation abi calling attest
- `usePrepareAttestationStationAttest` - Wraps usePrepare with AttestationStation abi calling attest
- `useAttestationStationAttestationCreatedEvent` - Wraps useContractEvents for Created events
`useAttestationStationVersion` - Reads attestation version
Also some more hooks exported by the cli but these are likely the only ones you need.
`useAttestationStationAttest` - Wraps useContractWrite with attestation station abi calling attest
## Contributing
`usePrepareAttestationStationAttest` - Wraps usePrepare with attestation station abi calling attest
Please see our [contributing.md](https://github.com/ethereum-optimism/optimism/blob/develop/CONTRIBUTING.md). No contribution is too small.
`useAttestationStationAttestationCreatedEvent` - Wraps useContractEvents for Created events
Having your contribution denied feels bad.
Please consider [opening an issue](https://github.com/ethereum-optimism/optimism/issues) before adding any new features or apis.
Also some more hooks exported by the cli but these are likely the only ones you need.
## Contributing
## Getting help
Please see our [contributing.md](docs/contributing.md). No contribution is too small.
If you have any problems, these resources could help you:
Having your contribution denied feels bad. Please consider opening an issue before adding any new features or apis
- [sdk documentation](https://github.com/ethereum-optimism/optimism/blob/develop/packages/atst/docs/sdk.md)
- [cli documentation](https://github.com/ethereum-optimism/optimism/blob/develop/packages/atst/docs/cli.md)
- [Optimism Discord](https://discord-gateway.optimism.io/)
- [Telegram group](https://t.me/+zwpJ8Ohqgl8yNjNh)
......@@ -63,9 +63,9 @@ npx atst read --key "optimist.base-uri" --about 0x2335022c740d17c2837f9C884Bfe4f
Example:
```bash
atst write --key "optimist.base-uri" \
npx atst write --key "optimist.base-uri" \
--about 0x2335022c740d17c2837f9C884Bfe4fFdbf0A95D5 \
--value "my attestation" \
--private-key 0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80 \
--rpc-url http://localhost:8545
--rpc-url http://goerli.optimism.io
```
# atst sdk docs
# AttestationStation sdk docs
Typescript sdk for interacting with the ATST based on [@wagmi/core](https://wagmi.sh/core/getting-started)
......@@ -150,7 +150,7 @@ const attestation = await readAttestation(
about, // Address: The about topic of the attestation
key, // string: The key of the attestation
dataType, // Optional, the data type of the attestation, 'string' | 'bytes' | 'number' | 'bool' | 'address'
contractAddress // Optional address: the contract address of the attestation station
contractAddress // Optional address: the contract address of AttestationStation
)
```
......@@ -193,7 +193,7 @@ These definitions allow you to communicate with AttestationStation, but are not
#### `ATTESTATION_STATION_ADDRESS`
The deployment address for the attestation station currently deployed with create2 on Optimism and Optimism Goerli `0xEE36eaaD94d1Cc1d0eccaDb55C38bFfB6Be06C77`.
The deployment address for AttestationStation currently deployed with create2 on Optimism and Optimism Goerli `0xEE36eaaD94d1Cc1d0eccaDb55C38bFfB6Be06C77`.
```typescript
import { ATTESTATION_STATION_ADDRESS } from '@eth-optimism/atst'
......@@ -201,7 +201,7 @@ import { ATTESTATION_STATION_ADDRESS } from '@eth-optimism/atst'
#### `abi`
The abi of the attestation station contract
The abi of the AttestationStation contract
```typescript
import { abi } from '@eth-optimism/atst'
......@@ -265,24 +265,26 @@ const bigNumberAttestation = stringifyAttestationBytes(
)
```
**Note:** `writeAttestation` already does this for you so this is only needed if using a library other than the attestation station.
**Note:** `writeAttestation` already does this for you so this is only needed if using a library other than `atst`.
### React API
For react hooks we recomend using the [wagmi cli](https://wagmi.sh/cli/getting-started) with the [etherscan plugin](https://wagmi.sh/cli/plugins/etherscan) and [react plugin](https://wagmi.sh/cli/plugins/react) to automatically generate react hooks around the attestation station.
For react hooks we recomend using the [wagmi cli](https://wagmi.sh/cli/getting-started) with the [etherscan plugin](https://wagmi.sh/cli/plugins/etherscan) and [react plugin](https://wagmi.sh/cli/plugins/react) to automatically generate react hooks around AttestationStation.
Use `createKey` and `createValue` to convert your raw keys and values into bytes that can be used in the attestation station contract calls.
Use `parseString`, `parseBool`, `parseAddress` and `parseNumber` to convert values returned by attestation station to their correct data type.
Use `createKey` and `createValue` to convert your raw keys and values into bytes that can be used in AttestationStation contract calls.
Use `parseString`, `parseBool`, `parseAddress` and `parseNumber` to convert values returned by AttestationStation to their correct data type.
For convenience we also [export the hooks](../src/react.ts) here:
For convenience we also [export the hooks here](../src/react.ts):
- `useAttestationStationAttestation` - Reads attestations with useContractRead
- `useAttestationStationVersion` - Reads attestation version
- `useAttestationStationAttest` - Wraps useContractWrite with attestation station abi calling attest
- `usePrepareAttestationStationAttest` - Wraps usePrepare with attestation station abi calling attest
- `useAttestationStationAttest` - Wraps useContractWrite with AttestationStation abi calling attest
- `usePrepareAttestationStationAttest` - Wraps usePrepare with AttestationStation abi calling attest
- `useAttestationStationAttestationCreatedEvent` - Wraps useContractEvents for Created events
## Tutorial
For a tutorial on using the attestation station in general, see out tutorial as well as other Optimism related tutorials in our [optimism-tutorial](https://github.com/ethereum-optimism/optimism-tutorial/tree/main/ecosystem/attestation-station#key-values) repo.
- [General atst tutorial](https://github.com/ethereum-optimism/optimism-tutorial/tree/main/ecosystem/attestation-station).
- [React atst starter](https://github.com/ethereum-optimism/optimism-starter).
......@@ -59,5 +59,15 @@
"@wagmi/core": "^0.9.2",
"@wagmi/cli": "~0.1.5",
"wagmi": "~0.11.0"
}
},
"keywords": [
"react",
"hooks",
"eth",
"ethereum",
"dapps",
"web3",
"optimism",
"attestation"
]
}
This diff is collapsed.
......@@ -8,27 +8,22 @@ import "./CommonTest.t.sol";
import { CrossDomainMessenger } from "../universal/CrossDomainMessenger.sol";
import { ResourceMetering } from "../L1/ResourceMetering.sol";
uint128 constant INITIAL_BASE_FEE = 1_000_000_000;
// Free function for setting the prevBaseFee param in the OptimismPortal.
function setPrevBaseFee(
Vm _vm,
address _op,
uint128 _prevBaseFee
) {
_vm.store(
address(_op),
bytes32(uint256(1)),
bytes32(
abi.encode(
ResourceMetering.ResourceParams({
prevBaseFee: _prevBaseFee,
prevBoughtGas: 0,
prevBlockNum: uint64(block.number)
})
)
)
);
_vm.store(address(_op), bytes32(uint256(1)), bytes32((block.number << 192) | _prevBaseFee));
}
contract SetPrevBaseFee_Test is Portal_Initializer {
function test_setPrevBaseFee_succeeds() external {
setPrevBaseFee(vm, address(op), 100 gwei);
(uint128 prevBaseFee, , uint64 prevBlockNum) = op.params();
assertEq(uint256(prevBaseFee), 100 gwei);
assertEq(uint256(prevBlockNum), block.number);
}
}
// Tests for obtaining pure gas cost estimates for commonly used functions.
......@@ -37,6 +32,8 @@ function setPrevBaseFee(
// In order to achieve this we make no assertions, and handle everything else in the setUp()
// function.
contract GasBenchMark_OptimismPortal is Portal_Initializer {
uint128 INITIAL_BASE_FEE;
// Reusable default values for a test withdrawal
Types.WithdrawalTransaction _defaultTx;
......@@ -76,7 +73,7 @@ contract GasBenchMark_OptimismPortal is Portal_Initializer {
}
// Get the system into a nice ready-to-use state.
function setUp() public override {
function setUp() public virtual override {
// Configure the oracle to return the output root we've prepared.
vm.warp(oracle.computeL2Timestamp(_proposedBlockNumber) + 1);
vm.prank(oracle.PROPOSER());
......@@ -88,6 +85,9 @@ contract GasBenchMark_OptimismPortal is Portal_Initializer {
oracle.FINALIZATION_PERIOD_SECONDS() +
1
);
INITIAL_BASE_FEE = op.INITIAL_BASE_FEE();
// Fund the portal so that we can withdraw ETH.
vm.deal(address(op), 0xFFFFFFFF);
}
......@@ -124,45 +124,83 @@ contract GasBenchMark_OptimismPortal is Portal_Initializer {
}
contract GasBenchMark_L1CrossDomainMessenger is Messenger_Initializer {
uint128 INITIAL_BASE_FEE;
function setUp() public virtual override {
super.setUp();
INITIAL_BASE_FEE = op.INITIAL_BASE_FEE();
}
function test_sendMessage_benchmark_0() external {
vm.pauseGasMetering();
setPrevBaseFee(vm, address(op), INITIAL_BASE_FEE);
// The amount of data typically sent during a bridge deposit.
bytes
memory data = hex"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff";
vm.resumeGasMetering();
L1Messenger.sendMessage(bob, data, uint32(100));
}
function test_sendMessage_benchmark_1() external {
setPrevBaseFee(vm, address(op), INITIAL_BASE_FEE);
vm.pauseGasMetering();
setPrevBaseFee(vm, address(op), 10 gwei);
// The amount of data typically sent during a bridge deposit.
bytes
memory data = hex"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff";
vm.resumeGasMetering();
L1Messenger.sendMessage(bob, data, uint32(100));
}
}
contract GasBenchMark_L1StandardBridge_Deposit is Bridge_Initializer {
uint128 INITIAL_BASE_FEE;
function setUp() public virtual override {
super.setUp();
INITIAL_BASE_FEE = op.INITIAL_BASE_FEE();
deal(address(L1Token), alice, 100000, true);
vm.startPrank(alice, alice);
L1Token.approve(address(L1Bridge), type(uint256).max);
}
function test_depositETH_benchmark_0() external {
vm.pauseGasMetering();
setPrevBaseFee(vm, address(op), INITIAL_BASE_FEE);
vm.resumeGasMetering();
L1Bridge.depositETH{ value: 500 }(50000, hex"");
}
function test_depositETH_benchmark_1() external {
setPrevBaseFee(vm, address(op), INITIAL_BASE_FEE);
vm.pauseGasMetering();
setPrevBaseFee(vm, address(op), 10 gwei);
vm.resumeGasMetering();
L1Bridge.depositETH{ value: 500 }(50000, hex"");
}
function test_depositERC20_benchmark_0() external {
L1Bridge.depositETH{ value: 500 }(50000, hex"");
vm.pauseGasMetering();
setPrevBaseFee(vm, address(op), INITIAL_BASE_FEE);
vm.resumeGasMetering();
L1Bridge.bridgeERC20({
_localToken: address(L1Token),
_remoteToken: address(L2Token),
_amount: 100,
_minGasLimit: 100_000,
_extraData: hex""
});
}
function test_depositERC20_benchmark_1() external {
setPrevBaseFee(vm, address(op), INITIAL_BASE_FEE);
L1Bridge.depositETH{ value: 500 }(50000, hex"");
vm.pauseGasMetering();
setPrevBaseFee(vm, address(op), 10 gwei);
vm.resumeGasMetering();
L1Bridge.bridgeERC20({
_localToken: address(L1Token),
_remoteToken: address(L2Token),
_amount: 100,
_minGasLimit: 100_000,
_extraData: hex""
});
}
}
......
......@@ -156,8 +156,8 @@ contract L2OutputOracle_Initializer is CommonTest {
contract Portal_Initializer is L2OutputOracle_Initializer {
// Test target
OptimismPortal opImpl;
OptimismPortal op;
OptimismPortal internal opImpl;
OptimismPortal internal op;
event WithdrawalFinalized(bytes32 indexed withdrawalHash, bool success);
event WithdrawalProven(
......@@ -177,14 +177,14 @@ contract Portal_Initializer is L2OutputOracle_Initializer {
abi.encodeWithSelector(OptimismPortal.initialize.selector, false)
);
op = OptimismPortal(payable(address(proxy)));
vm.label(address(op), "OptimismPortal");
}
}
contract Messenger_Initializer is L2OutputOracle_Initializer {
OptimismPortal op;
AddressManager addressManager;
L1CrossDomainMessenger L1Messenger;
L2CrossDomainMessenger L2Messenger =
contract Messenger_Initializer is Portal_Initializer {
AddressManager internal addressManager;
L1CrossDomainMessenger internal L1Messenger;
L2CrossDomainMessenger internal L2Messenger =
L2CrossDomainMessenger(Predeploys.L2_CROSS_DOMAIN_MESSENGER);
event SentMessage(
......@@ -220,17 +220,11 @@ contract Messenger_Initializer is L2OutputOracle_Initializer {
bytes data
);
event WithdrawalFinalized(bytes32 indexed, bool success);
event WhatHappened(bool success, bytes returndata);
function setUp() public virtual override {
super.setUp();
// Deploy the OptimismPortal
op = new OptimismPortal({ _l2Oracle: oracle, _guardian: guardian, _paused: false });
vm.label(address(op), "OptimismPortal");
// Deploy the address manager
vm.prank(multisig);
addressManager = new AddressManager();
......
......@@ -41,7 +41,7 @@ contract StandardBridgeTester is StandardBridge {
contract LegacyMintable is ERC20, ILegacyMintableERC20 {
constructor(string memory _name, string memory _ticker) ERC20(_name, _ticker) {}
function l1Token() external view returns (address) {
function l1Token() external pure returns (address) {
return address(0);
}
......@@ -55,7 +55,7 @@ contract LegacyMintable is ERC20, ILegacyMintableERC20 {
* check. Allows for testing against code that is has been deployed,
* assuming different compiler version is no problem.
*/
function supportsInterface(bytes4 _interfaceId) external view returns (bool) {
function supportsInterface(bytes4 _interfaceId) external pure returns (bool) {
bytes4 firstSupportedInterface = bytes4(keccak256("supportsInterface(bytes4)")); // ERC165
bytes4 secondSupportedInterface = ILegacyMintableERC20.l1Token.selector ^
ILegacyMintableERC20.mint.selector ^
......
......@@ -141,16 +141,6 @@ abstract contract CrossDomainMessenger is
*/
uint64 public constant MIN_GAS_CALLDATA_OVERHEAD = 16;
/**
* @notice Minimum amount of gas required to relay a message.
*/
uint256 internal constant RELAY_GAS_REQUIRED = 45_000;
/**
* @notice Amount of gas held in reserve to guarantee that relay execution completes.
*/
uint256 internal constant RELAY_GAS_BUFFER = RELAY_GAS_REQUIRED - 5000;
/**
* @notice Address of the paired CrossDomainMessenger contract on the other chain.
*/
......@@ -367,16 +357,11 @@ abstract contract CrossDomainMessenger is
"CrossDomainMessenger: message has already been relayed"
);
require(
gasleft() >= _minGasLimit + RELAY_GAS_REQUIRED,
"CrossDomainMessenger: insufficient gas to relay message"
);
xDomainMsgSender = _sender;
bool success = SafeCall.call(_target, gasleft() - RELAY_GAS_BUFFER, _value, _message);
bool success = SafeCall.callWithMinGas(_target, _minGasLimit, _value, _message);
xDomainMsgSender = Constants.DEFAULT_L2_SENDER;
if (success == true) {
if (success) {
successfulMessages[versionedHash] = true;
emit RelayedMessage(versionedHash);
} else {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment