Commit dcdcc757 authored by Kelvin Fichter's avatar Kelvin Fichter

feat: update message-relayer to use the SDK

Removes all message relaying utilities found inside the message-relayer
package. All of these utilities are now present in the SDK. Also
re-implements the message-relayer service with the utilities found in
the SDK.
parent 4da19032
---
'@eth-optimism/message-relayer': minor
'@eth-optimism/integration-tests': patch
---
Removes message relaying utilities from the Message Relayer, to be replaced by the SDK
...@@ -30,7 +30,6 @@ ...@@ -30,7 +30,6 @@
"devDependencies": { "devDependencies": {
"@eth-optimism/contracts": "0.5.14", "@eth-optimism/contracts": "0.5.14",
"@eth-optimism/core-utils": "0.7.7", "@eth-optimism/core-utils": "0.7.7",
"@eth-optimism/message-relayer": "0.2.18",
"@eth-optimism/sdk": "0.2.2", "@eth-optimism/sdk": "0.2.2",
"@ethersproject/abstract-provider": "^5.5.1", "@ethersproject/abstract-provider": "^5.5.1",
"@ethersproject/providers": "^5.5.3", "@ethersproject/providers": "^5.5.3",
......
...@@ -3,8 +3,7 @@ import { Contract, utils, Wallet, providers } from 'ethers' ...@@ -3,8 +3,7 @@ import { Contract, utils, Wallet, providers } from 'ethers'
import { TransactionResponse } from '@ethersproject/providers' import { TransactionResponse } from '@ethersproject/providers'
import { getContractFactory, predeploys } from '@eth-optimism/contracts' import { getContractFactory, predeploys } from '@eth-optimism/contracts'
import { sleep } from '@eth-optimism/core-utils' import { sleep } from '@eth-optimism/core-utils'
import { getMessagesAndProofsForL2Transaction } from '@eth-optimism/message-relayer' import { CrossChainMessenger, MessageStatus } from '@eth-optimism/sdk'
import { CrossChainMessenger } from '@eth-optimism/sdk'
/* Imports: Internal */ /* Imports: Internal */
import { import {
...@@ -21,7 +20,6 @@ import { ...@@ -21,7 +20,6 @@ import {
getL1Bridge, getL1Bridge,
getL2Bridge, getL2Bridge,
envConfig, envConfig,
DEFAULT_TEST_GAS_L1,
} from './utils' } from './utils'
import { import {
CrossDomainMessagePair, CrossDomainMessagePair,
...@@ -184,67 +182,48 @@ export class OptimismEnv { ...@@ -184,67 +182,48 @@ export class OptimismEnv {
tx = await tx tx = await tx
await tx.wait() await tx.wait()
let messagePairs = [] const messages = await this.messenger.getMessagesByTransaction(tx)
while (true) { if (messages.length === 0) {
try { return
messagePairs = await getMessagesAndProofsForL2Transaction(
l1Provider,
l2Provider,
this.scc.address,
predeploys.L2CrossDomainMessenger,
tx.hash
)
break
} catch (err) {
if (err.message.includes('unable to find state root batch for tx')) {
await sleep(5000)
} else {
throw err
}
}
} }
for (const { message, proof } of messagePairs) { for (const message of messages) {
while (true) { let status: MessageStatus
while (
status !== MessageStatus.READY_FOR_RELAY &&
status !== MessageStatus.RELAYED
) {
status = await this.messenger.getMessageStatus(message)
await sleep(1000)
}
let relayed = false
while (!relayed) {
try { try {
const result = await this.l1Messenger await this.messenger.finalizeMessage(message)
.connect(this.l1Wallet) relayed = true
.relayMessage(
message.target,
message.sender,
message.message,
message.messageNonce,
proof,
{
gasLimit: DEFAULT_TEST_GAS_L1 * 10,
}
)
await result.wait()
break
} catch (err) { } catch (err) {
if (err.message.includes('execution failed due to an exception')) { if (
await sleep(5000) err.message.includes('Nonce too low') ||
} else if (err.message.includes('Nonce too low')) { err.message.includes('transaction was replaced') ||
await sleep(5000)
} else if (err.message.includes('transaction was replaced')) {
// this happens when we run tests in parallel
await sleep(5000)
} else if (
err.message.includes( err.message.includes(
'another transaction with same nonce in the queue' 'another transaction with same nonce in the queue'
) )
) { ) {
// this happens when we run tests in parallel // Sometimes happens when we run tests in parallel.
await sleep(5000) await sleep(5000)
} else if ( } else if (
err.message.includes('message has already been received') err.message.includes('message has already been received')
) { ) {
break // Message already relayed, this is fine.
relayed = true
} else { } else {
throw err throw err
} }
} }
} }
await this.messenger.waitForMessageReceipt(message)
} }
} }
} }
...@@ -67,6 +67,7 @@ ...@@ -67,6 +67,7 @@
"@codechecks/client": "^0.1.11", "@codechecks/client": "^0.1.11",
"@defi-wonderland/smock": "^2.0.2", "@defi-wonderland/smock": "^2.0.2",
"@eth-optimism/smock": "1.1.10", "@eth-optimism/smock": "1.1.10",
"@nomiclabs/ethereumjs-vm": "^4.2.2",
"@nomiclabs/hardhat-ethers": "^2.0.2", "@nomiclabs/hardhat-ethers": "^2.0.2",
"@nomiclabs/hardhat-etherscan": "^2.1.6", "@nomiclabs/hardhat-etherscan": "^2.1.6",
"@nomiclabs/hardhat-waffle": "^2.0.1", "@nomiclabs/hardhat-waffle": "^2.0.1",
......
...@@ -5,7 +5,7 @@ import * as Sentry from '@sentry/node' ...@@ -5,7 +5,7 @@ import * as Sentry from '@sentry/node'
import * as dotenv from 'dotenv' import * as dotenv from 'dotenv'
import Config from 'bcfg' import Config from 'bcfg'
import { MessageRelayerService } from '../service' import { MessageRelayerService } from '../src'
dotenv.config() dotenv.config()
...@@ -59,14 +59,6 @@ const main = async () => { ...@@ -59,14 +59,6 @@ const main = async () => {
'get-logs-interval', 'get-logs-interval',
parseInt(env.GET_LOGS_INTERVAL, 10) || 2000 parseInt(env.GET_LOGS_INTERVAL, 10) || 2000
) )
const L2_BLOCK_OFFSET = config.uint(
'l2-start-offset',
parseInt(env.L2_BLOCK_OFFSET, 10) || 1
)
const L1_START_OFFSET = config.uint(
'l1-start-offset',
parseInt(env.L1_BLOCK_OFFSET, 10) || 1
)
const FROM_L2_TRANSACTION_INDEX = config.uint( const FROM_L2_TRANSACTION_INDEX = config.uint(
'from-l2-transaction-index', 'from-l2-transaction-index',
parseInt(env.FROM_L2_TRANSACTION_INDEX, 10) || 0 parseInt(env.FROM_L2_TRANSACTION_INDEX, 10) || 0
...@@ -102,15 +94,11 @@ const main = async () => { ...@@ -102,15 +94,11 @@ const main = async () => {
} }
const service = new MessageRelayerService({ const service = new MessageRelayerService({
l1RpcProvider: l1Provider,
l2RpcProvider: l2Provider, l2RpcProvider: l2Provider,
addressManagerAddress: ADDRESS_MANAGER_ADDRESS,
l1Wallet: wallet, l1Wallet: wallet,
relayGasLimit: RELAY_GAS_LIMIT, relayGasLimit: RELAY_GAS_LIMIT,
fromL2TransactionIndex: FROM_L2_TRANSACTION_INDEX, fromL2TransactionIndex: FROM_L2_TRANSACTION_INDEX,
pollingInterval: POLLING_INTERVAL, pollingInterval: POLLING_INTERVAL,
l2BlockOffset: L2_BLOCK_OFFSET,
l1StartOffset: L1_START_OFFSET,
getLogsInterval: GET_LOGS_INTERVAL, getLogsInterval: GET_LOGS_INTERVAL,
logger, logger,
}) })
......
...@@ -7,19 +7,14 @@ ...@@ -7,19 +7,14 @@
"files": [ "files": [
"dist/*" "dist/*"
], ],
"bin": {
"withdraw": "./src/exec/withdraw.ts"
},
"scripts": { "scripts": {
"start": "ts-node ./src/exec/run.ts", "start": "ts-node ./bin/run.ts",
"build": "tsc -p ./tsconfig.build.json", "build": "tsc -p ./tsconfig.build.json",
"clean": "rimraf dist/ ./tsconfig.build.tsbuildinfo", "clean": "rimraf dist/ ./tsconfig.build.tsbuildinfo",
"lint": "yarn lint:fix && yarn lint:check", "lint": "yarn lint:fix && yarn lint:check",
"pre-commit": "lint-staged", "pre-commit": "lint-staged",
"lint:fix": "yarn lint:check --fix", "lint:fix": "yarn lint:check --fix",
"lint:check": "eslint . --max-warnings=0", "lint:check": "eslint . --max-warnings=0"
"test": "hardhat test --show-stack-traces",
"test:coverage": "nyc hardhat test && nyc merge .nyc_output coverage.json"
}, },
"keywords": [ "keywords": [
"optimism", "optimism",
...@@ -35,28 +30,19 @@ ...@@ -35,28 +30,19 @@
}, },
"dependencies": { "dependencies": {
"@eth-optimism/common-ts": "0.2.1", "@eth-optimism/common-ts": "0.2.1",
"@eth-optimism/contracts": "0.5.14",
"@eth-optimism/core-utils": "0.7.7", "@eth-optimism/core-utils": "0.7.7",
"@eth-optimism/sdk": "^0.2.1",
"@sentry/node": "^6.3.1", "@sentry/node": "^6.3.1",
"bcfg": "^0.1.6", "bcfg": "^0.1.6",
"dotenv": "^10.0.0", "dotenv": "^10.0.0",
"ethers": "^5.5.4", "ethers": "^5.5.4"
"merkletreejs": "^0.2.18",
"rlp": "^2.2.6"
}, },
"devDependencies": { "devDependencies": {
"@eth-optimism/smock": "1.1.10",
"@nomiclabs/ethereumjs-vm": "^4",
"@nomiclabs/hardhat-ethers": "^2.0.2", "@nomiclabs/hardhat-ethers": "^2.0.2",
"@nomiclabs/hardhat-waffle": "^2.0.1", "@nomiclabs/hardhat-waffle": "^2.0.1",
"@types/chai": "^4.2.18",
"@types/chai-as-promised": "^7.1.4",
"@types/mocha": "^8.2.2",
"@typescript-eslint/eslint-plugin": "^4.26.0", "@typescript-eslint/eslint-plugin": "^4.26.0",
"@typescript-eslint/parser": "^4.26.0", "@typescript-eslint/parser": "^4.26.0",
"babel-eslint": "^10.1.0", "babel-eslint": "^10.1.0",
"chai": "^4.3.4",
"chai-as-promised": "^7.1.1",
"eslint": "^7.27.0", "eslint": "^7.27.0",
"eslint-config-prettier": "^8.3.0", "eslint-config-prettier": "^8.3.0",
"eslint-plugin-import": "^2.23.4", "eslint-plugin-import": "^2.23.4",
...@@ -68,8 +54,6 @@ ...@@ -68,8 +54,6 @@
"ethereum-waffle": "^3.3.0", "ethereum-waffle": "^3.3.0",
"hardhat": "^2.3.0", "hardhat": "^2.3.0",
"lint-staged": "11.0.0", "lint-staged": "11.0.0",
"lodash": "^4.17.21",
"mocha": "^8.4.0",
"prettier": "^2.3.1", "prettier": "^2.3.1",
"ts-node": "^10.0.0", "ts-node": "^10.0.0",
"typescript": "^4.3.5" "typescript": "^4.3.5"
......
#!/usr/bin/env ts-node
/**
* Utility that will relay all L2 => L1 messages created within a given L2 transaction.
*/
/* Imports: External */
import { ethers } from 'ethers'
import { predeploys, getContractInterface } from '@eth-optimism/contracts'
import { sleep } from '@eth-optimism/core-utils'
import dotenv from 'dotenv'
/* Imports: Internal */
import { getMessagesAndProofsForL2Transaction } from '../relay-tx'
dotenv.config()
const l1RpcProviderUrl = process.env.WITHDRAW__L1_RPC_URL
const l2RpcProviderUrl = process.env.WITHDRAW__L2_RPC_URL
const l1PrivateKey = process.env.WITHDRAW__L1_PRIVATE_KEY
const l1StateCommitmentChainAddress =
process.env.WITHDRAW__STATE_COMMITMENT_CHAIN_ADDRESS
const l1CrossDomainMessengerAddress =
process.env.WITHDRAW__L1_CROSS_DOMAIN_MESSENGER_ADDRESS
const main = async () => {
const l2TransactionHash = process.argv[2]
if (l2TransactionHash === undefined) {
throw new Error(`must provide l2 transaction hash`)
}
const l1RpcProvider = new ethers.providers.JsonRpcProvider(l1RpcProviderUrl)
const l1Wallet = new ethers.Wallet(l1PrivateKey, l1RpcProvider)
const l1WalletBalance = await l1Wallet.getBalance()
console.log(`relayer address: ${l1Wallet.address}`)
console.log(`relayer balance: ${ethers.utils.formatEther(l1WalletBalance)}`)
const l1CrossDomainMessenger = new ethers.Contract(
l1CrossDomainMessengerAddress,
getContractInterface('L1CrossDomainMessenger'),
l1Wallet
)
console.log(`searching for messages in transaction: ${l2TransactionHash}`)
let messagePairs = []
while (true) {
try {
messagePairs = await getMessagesAndProofsForL2Transaction(
l1RpcProviderUrl,
l2RpcProviderUrl,
l1StateCommitmentChainAddress,
predeploys.L2CrossDomainMessenger,
l2TransactionHash
)
break
} catch (err) {
if (err.message.includes('unable to find state root batch for tx')) {
console.log(`no state root batch for tx yet, trying again in 5s...`)
await sleep(5000)
} else {
throw err
}
}
}
console.log(`found ${messagePairs.length} messages`)
for (let i = 0; i < messagePairs.length; i++) {
console.log(`relaying message ${i + 1}/${messagePairs.length}`)
const { message, proof } = messagePairs[i]
while (true) {
try {
const result = await l1CrossDomainMessenger.relayMessage(
message.target,
message.sender,
message.message,
message.messageNonce,
proof
)
await result.wait()
console.log(
`relayed message ${i + 1}/${messagePairs.length}! L1 tx hash: ${
result.hash
}`
)
break
} catch (err) {
if (err.message.includes('execution failed due to an exception')) {
console.log(`fraud proof may not be elapsed, trying again in 5s...`)
await sleep(5000)
} else if (err.message.includes('message has already been received')) {
console.log(
`message ${i + 1}/${
messagePairs.length
} was relayed by someone else`
)
break
} else {
throw err
}
}
}
}
}
main()
export * from './relay-tx' export * from './service'
/* Imports: External */
import { ethers, Transaction } from 'ethers'
import {
fromHexString,
remove0x,
toHexString,
toRpcHexString,
} from '@eth-optimism/core-utils'
import { getContractInterface, predeploys } from '@eth-optimism/contracts'
import * as rlp from 'rlp'
import { MerkleTree } from 'merkletreejs'
// Number of blocks added to the L2 chain before the first L2 transaction. Genesis are added to the
// chain to initialize the system. However, they create a discrepancy between the L2 block number
// the index of the transaction that corresponds to that block number. For example, if there's 1
// genesis block, then the transaction with an index of 0 corresponds to the block with index 1.
const NUM_L2_GENESIS_BLOCKS = 1
interface StateRootBatchHeader {
batchIndex: ethers.BigNumber
batchRoot: string
batchSize: ethers.BigNumber
prevTotalElements: ethers.BigNumber
extraData: string
}
interface StateRootBatch {
header: StateRootBatchHeader
stateRoots: string[]
}
interface CrossDomainMessage {
target: string
sender: string
message: string
messageNonce: number
}
interface CrossDomainMessageProof {
stateRoot: string
stateRootBatchHeader: StateRootBatchHeader
stateRootProof: {
index: number
siblings: string[]
}
stateTrieWitness: string
storageTrieWitness: string
}
interface CrossDomainMessagePair {
message: CrossDomainMessage
proof: CrossDomainMessageProof
}
interface StateTrieProof {
accountProof: string
storageProof: string
}
/**
* Finds all L2 => L1 messages triggered by a given L2 transaction, if the message exists.
*
* @param l2RpcProvider L2 RPC provider.
* @param l2CrossDomainMessengerAddress Address of the L2CrossDomainMessenger.
* @param l2TransactionHash Hash of the L2 transaction to find a message for.
* @returns Messages associated with the transaction.
*/
export const getMessagesByTransactionHash = async (
l2RpcProvider: ethers.providers.JsonRpcProvider,
l2CrossDomainMessengerAddress: string,
l2TransactionHash: string
): Promise<CrossDomainMessage[]> => {
// Complain if we can't find the given transaction.
const transaction = await l2RpcProvider.getTransaction(l2TransactionHash)
if (transaction === null) {
throw new Error(`unable to find tx with hash: ${l2TransactionHash}`)
}
const l2CrossDomainMessenger = new ethers.Contract(
l2CrossDomainMessengerAddress,
getContractInterface('L2CrossDomainMessenger'),
l2RpcProvider
)
// Find all SentMessage events created in the same block as the given transaction. This is
// reliable because we should only have one transaction per block.
const sentMessageEvents = await l2CrossDomainMessenger.queryFilter(
l2CrossDomainMessenger.filters.SentMessage(),
transaction.blockNumber,
transaction.blockNumber
)
// Decode the messages and turn them into a nicer struct.
const sentMessages = sentMessageEvents.map((sentMessageEvent) => {
return {
target: sentMessageEvent.args.target,
sender: sentMessageEvent.args.sender,
message: sentMessageEvent.args.message, // decoded message
messageNonce: sentMessageEvent.args.messageNonce.toNumber(),
}
})
return sentMessages
}
/**
* Encodes a cross domain message.
*
* @param message Message to encode.
* @returns Encoded message.
*/
const encodeCrossDomainMessage = (message: CrossDomainMessage): string => {
return getContractInterface('L2CrossDomainMessenger').encodeFunctionData(
'relayMessage',
[message.target, message.sender, message.message, message.messageNonce]
)
}
/**
* Finds the StateBatchAppended event associated with a given L2 transaction.
*
* @param l1RpcProvider L1 RPC provider.
* @param l1StateCommitmentChainAddress Address of the L1StateCommitmentChain.
* @param l2TransactionIndex Index of the L2 transaction to find a StateBatchAppended event for.
* @returns StateBatchAppended event for the given transaction or null if no such event exists.
*/
export const getStateBatchAppendedEventByTransactionIndex = async (
l1RpcProvider: ethers.providers.JsonRpcProvider,
l1StateCommitmentChainAddress: string,
l2TransactionIndex: number
): Promise<ethers.Event | null> => {
const l1StateCommitmentChain = new ethers.Contract(
l1StateCommitmentChainAddress,
getContractInterface('StateCommitmentChain'),
l1RpcProvider
)
const getStateBatchAppendedEventByBatchIndex = async (
index: number
): Promise<ethers.Event | null> => {
const eventQueryResult = await l1StateCommitmentChain.queryFilter(
l1StateCommitmentChain.filters.StateBatchAppended(index)
)
if (eventQueryResult.length === 0) {
return null
} else {
return eventQueryResult[0]
}
}
const isEventHi = (event: ethers.Event, index: number) => {
const prevTotalElements = event.args._prevTotalElements.toNumber()
return index < prevTotalElements
}
const isEventLo = (event: ethers.Event, index: number) => {
const prevTotalElements = event.args._prevTotalElements.toNumber()
const batchSize = event.args._batchSize.toNumber()
return index >= prevTotalElements + batchSize
}
const totalBatches: ethers.BigNumber =
await l1StateCommitmentChain.getTotalBatches()
if (totalBatches.eq(0)) {
return null
}
let lowerBound = 0
let upperBound = totalBatches.toNumber() - 1
let batchEvent: ethers.Event | null =
await getStateBatchAppendedEventByBatchIndex(upperBound)
if (isEventLo(batchEvent, l2TransactionIndex)) {
// Upper bound is too low, means this transaction doesn't have a corresponding state batch yet.
return null
} else if (!isEventHi(batchEvent, l2TransactionIndex)) {
// Upper bound is not too low and also not too high. This means the upper bound event is the
// one we're looking for! Return it.
return batchEvent
}
// Binary search to find the right event. The above checks will guarantee that the event does
// exist and that we'll find it during this search.
while (lowerBound < upperBound) {
const middleOfBounds = Math.floor((lowerBound + upperBound) / 2)
batchEvent = await getStateBatchAppendedEventByBatchIndex(middleOfBounds)
if (isEventHi(batchEvent, l2TransactionIndex)) {
upperBound = middleOfBounds
} else if (isEventLo(batchEvent, l2TransactionIndex)) {
lowerBound = middleOfBounds
} else {
break
}
}
return batchEvent
}
/**
* Finds the full state root batch associated with a given transaction index.
*
* @param l1RpcProvider L1 RPC provider.
* @param l1StateCommitmentChainAddress Address of the L1StateCommitmentChain.
* @param l2TransactionIndex Index of the L2 transaction to find a state root batch for.
* @returns State root batch associated with the given transaction index or null if no state root
* batch exists.
*/
export const getStateRootBatchByTransactionIndex = async (
l1RpcProvider: ethers.providers.JsonRpcProvider,
l1StateCommitmentChainAddress: string,
l2TransactionIndex: number
): Promise<StateRootBatch | null> => {
const l1StateCommitmentChain = new ethers.Contract(
l1StateCommitmentChainAddress,
getContractInterface('StateCommitmentChain'),
l1RpcProvider
)
const stateBatchAppendedEvent =
await getStateBatchAppendedEventByTransactionIndex(
l1RpcProvider,
l1StateCommitmentChainAddress,
l2TransactionIndex
)
if (stateBatchAppendedEvent === null) {
return null
}
const stateBatchTransaction = await stateBatchAppendedEvent.getTransaction()
const [stateRoots] = l1StateCommitmentChain.interface.decodeFunctionData(
'appendStateBatch',
stateBatchTransaction.data
)
return {
header: {
batchIndex: stateBatchAppendedEvent.args._batchIndex,
batchRoot: stateBatchAppendedEvent.args._batchRoot,
batchSize: stateBatchAppendedEvent.args._batchSize,
prevTotalElements: stateBatchAppendedEvent.args._prevTotalElements,
extraData: stateBatchAppendedEvent.args._extraData,
},
stateRoots,
}
}
/**
* Generates a Merkle proof (using the particular scheme we use within Lib_MerkleTree).
*
* @param leaves Leaves of the merkle tree.
* @param index Index to generate a proof for.
* @returns Merkle proof sibling leaves, as hex strings.
*/
export const getMerkleTreeProof = (
leaves: string[],
index: number
): string[] => {
// Our specific Merkle tree implementation requires that the number of leaves is a power of 2.
// If the number of given leaves is less than a power of 2, we need to round up to the next
// available power of 2. We fill the remaining space with the hash of bytes32(0).
const correctedTreeSize = Math.pow(2, Math.ceil(Math.log2(leaves.length)))
const parsedLeaves = []
for (let i = 0; i < correctedTreeSize; i++) {
if (i < leaves.length) {
parsedLeaves.push(leaves[i])
} else {
parsedLeaves.push(ethers.utils.keccak256('0x' + '00'.repeat(32)))
}
}
// merkletreejs prefers things to be Buffers.
const bufLeaves = parsedLeaves.map(fromHexString)
const tree = new MerkleTree(bufLeaves, (el: Buffer | string): Buffer => {
return fromHexString(ethers.utils.keccak256(el))
})
const proof = tree.getProof(bufLeaves[index], index).map((element: any) => {
return toHexString(element.data)
})
return proof
}
/**
* Generates a Merkle-Patricia trie proof for a given account and storage slot.
*
* @param l2RpcProvider L2 RPC provider.
* @param blockNumber Block number to generate the proof at.
* @param address Address to generate the proof for.
* @param slot Storage slot to generate the proof for.
* @returns Account proof and storage proof.
*/
const getStateTrieProof = async (
l2RpcProvider: ethers.providers.JsonRpcProvider,
blockNumber: number,
address: string,
slot: string
): Promise<StateTrieProof> => {
const proof = await l2RpcProvider.send('eth_getProof', [
address,
[slot],
toRpcHexString(blockNumber),
])
return {
accountProof: toHexString(rlp.encode(proof.accountProof)),
storageProof: toHexString(rlp.encode(proof.storageProof[0].proof)),
}
}
/**
* Finds all L2 => L1 messages sent in a given L2 transaction and generates proofs for each of
* those messages.
*
* @param l1RpcProvider L1 RPC provider.
* @param l2RpcProvider L2 RPC provider.
* @param l1StateCommitmentChainAddress Address of the StateCommitmentChain.
* @param l2CrossDomainMessengerAddress Address of the L2CrossDomainMessenger.
* @param l2TransactionHash L2 transaction hash to generate a relay transaction for.
* @returns An array of messages sent in the transaction and a proof of inclusion for each.
*/
export const getMessagesAndProofsForL2Transaction = async (
l1RpcProvider: ethers.providers.JsonRpcProvider | string,
l2RpcProvider: ethers.providers.JsonRpcProvider | string,
l1StateCommitmentChainAddress: string,
l2CrossDomainMessengerAddress: string,
l2TransactionHash: string
): Promise<CrossDomainMessagePair[]> => {
if (typeof l1RpcProvider === 'string') {
l1RpcProvider = new ethers.providers.JsonRpcProvider(l1RpcProvider)
}
if (typeof l2RpcProvider === 'string') {
l2RpcProvider = new ethers.providers.JsonRpcProvider(l2RpcProvider)
}
const l2Transaction = await l2RpcProvider.getTransaction(l2TransactionHash)
if (l2Transaction === null) {
throw new Error(`unable to find tx with hash: ${l2TransactionHash}`)
}
// Need to find the state batch for the given transaction. If no state batch has been published
// yet then we will not be able to generate a proof.
const batch = await getStateRootBatchByTransactionIndex(
l1RpcProvider,
l1StateCommitmentChainAddress,
l2Transaction.blockNumber - NUM_L2_GENESIS_BLOCKS
)
if (batch === null) {
throw new Error(
`unable to find state root batch for tx with hash: ${l2TransactionHash}`
)
}
// Adjust the transaction index based on the number of L2 genesis block we have. "Index" here
// refers to the position of the transaction within the *Canonical Transaction Chain*.
const l2TransactionIndex = l2Transaction.blockNumber - NUM_L2_GENESIS_BLOCKS
// Here the index refers to the position of the state root that corresponds to this transaction
// within the batch of state roots in which that state root was published.
const txIndexInBatch =
l2TransactionIndex - batch.header.prevTotalElements.toNumber()
// Find every message that was sent during this transaction. We'll then attach a proof for each.
const messages = await getMessagesByTransactionHash(
l2RpcProvider,
l2CrossDomainMessengerAddress,
l2TransactionHash
)
const messagePairs: CrossDomainMessagePair[] = []
for (const message of messages) {
// We need to calculate the specific storage slot that demonstrates that this message was
// actually included in the L2 chain. The following calculation is based on the fact that
// messages are stored in the following mapping on L2:
// https://github.com/ethereum-optimism/optimism/blob/c84d3450225306abbb39b4e7d6d82424341df2be/packages/contracts/contracts/L2/predeploys/OVM_L2ToL1MessagePasser.sol#L23
// You can read more about how Solidity storage slots are computed for mappings here:
// https://docs.soliditylang.org/en/v0.8.4/internals/layout_in_storage.html#mappings-and-dynamic-arrays
const messageSlot = ethers.utils.keccak256(
ethers.utils.keccak256(
encodeCrossDomainMessage(message) +
remove0x(l2CrossDomainMessengerAddress)
) + '00'.repeat(32)
)
// We need a Merkle trie proof for the given storage slot. This allows us to prove to L1 that
// the message was actually sent on L2.
const stateTrieProof = await getStateTrieProof(
l2RpcProvider,
l2Transaction.blockNumber,
predeploys.OVM_L2ToL1MessagePasser,
messageSlot
)
// State roots are published in batches to L1 and correspond 1:1 to transactions. We compute a
// Merkle root for these state roots so that we only need to store the minimum amount of
// information on-chain. So we need to create a Merkle proof for the specific state root that
// corresponds to this transaction.
const stateRootMerkleProof = getMerkleTreeProof(
batch.stateRoots,
txIndexInBatch
)
// We now have enough information to create the message proof.
const proof: CrossDomainMessageProof = {
stateRoot: batch.stateRoots[txIndexInBatch],
stateRootBatchHeader: batch.header,
stateRootProof: {
index: txIndexInBatch,
siblings: stateRootMerkleProof,
},
stateTrieWitness: stateTrieProof.accountProof,
storageTrieWitness: stateTrieProof.storageProof,
}
messagePairs.push({
message,
proof,
})
}
return messagePairs
}
/**
* Allows for proof generation of pre-regenesis L2->L1 messages, by retrieving proofs from
* The genesis state (block 0) of the post-regenesis chain. This is required because the
* history is wiped during regnesis, so old inclusion proofs would no longer work.
*
* @param l1RpcProvider L1 RPC provider.
* @param l2RpcProvider L2 RPC provider of the POST-REGENESIS chain.
* @param legacyL2Transaction A PRE-REGENESIS L2 transaction which sent some L2->L1 messages.
* @param legacyMessages The L2->L1 messages which were sent by the legacy L2 transaction.
* @param l1StateCommitmentChainAddress Address of the POST-REGENESIS StateCommitmentChain.
* @param l2CrossDomainMessengerAddress Address of the L2CrossDomainMessenger.
* @returns An array of messages sent in the transaction and a proof of inclusion for each.
*/
export const getLegacyProofsForL2Transaction = async (
l1RpcProvider: ethers.providers.JsonRpcProvider | string,
l2RpcProvider: ethers.providers.JsonRpcProvider | string,
legacyL2Transaction: Transaction,
legacyMessages: CrossDomainMessage[],
l1StateCommitmentChainAddress: string,
l2CrossDomainMessengerAddress: string
): Promise<CrossDomainMessagePair[]> => {
if (typeof l1RpcProvider === 'string') {
l1RpcProvider = new ethers.providers.JsonRpcProvider(l1RpcProvider)
}
if (typeof l2RpcProvider === 'string') {
l2RpcProvider = new ethers.providers.JsonRpcProvider(l2RpcProvider)
}
// We will use the first ever batch submitted on the new chain
// Because the genesis state already contains all of those state roots, and
// That's the earliest we'll be able to relay the withdrawal.
// This is 1 and not 0 because we don't commit the genesis state.
const postRegenesisBlockToRelayFrom = 1
const batch = await getStateRootBatchByTransactionIndex(
l1RpcProvider,
l1StateCommitmentChainAddress,
postRegenesisBlockToRelayFrom - NUM_L2_GENESIS_BLOCKS
)
if (batch === null) {
throw new Error(
`unable to find first state root batch for legacy withdrawal: ${
legacyL2Transaction?.hash || legacyL2Transaction
}`
)
}
// Here the index refers to the position of the state root that corresponds to this transaction
// within the batch of state roots in which that state root was published.
// Since this is a legacy TX, we get it from 0 always.
// (see comment on `postRegenesisBlockToRelayFrom` above)
const txIndexInBatch = 0
const messagePairs: CrossDomainMessagePair[] = []
for (const message of legacyMessages) {
// We need to calculate the specific storage slot that demonstrates that this message was
// actually included in the L2 chain. The following calculation is based on the fact that
// messages are stored in the following mapping on L2:
// https://github.com/ethereum-optimism/optimism/blob/c84d3450225306abbb39b4e7d6d82424341df2be/packages/contracts/contracts/L2/predeploys/OVM_L2ToL1MessagePasser.sol#L23
// You can read more about how Solidity storage slots are computed for mappings here:
// https://docs.soliditylang.org/en/v0.8.4/internals/layout_in_storage.html#mappings-and-dynamic-arrays
const messageSlot = ethers.utils.keccak256(
ethers.utils.keccak256(
encodeCrossDomainMessage(message) +
remove0x(l2CrossDomainMessengerAddress)
) + '00'.repeat(32)
)
// We need a Merkle trie proof for the given storage slot. This allows us to prove to L1 that
// the message was actually sent on L2.
// Because this is a legacy message, we just get it from index 0.
const stateTrieProof = await getStateTrieProof(
l2RpcProvider,
postRegenesisBlockToRelayFrom,
predeploys.OVM_L2ToL1MessagePasser,
messageSlot
)
// State roots are published in batches to L1 and correspond 1:1 to transactions. We compute a
// Merkle root for these state roots so that we only need to store the minimum amount of
// information on-chain. So we need to create a Merkle proof for the specific state root that
// corresponds to this transaction.
const stateRootMerkleProof = getMerkleTreeProof(
batch.stateRoots,
txIndexInBatch
)
// We now have enough information to create the message proof.
const proof: CrossDomainMessageProof = {
stateRoot: batch.stateRoots[txIndexInBatch],
stateRootBatchHeader: batch.header,
stateRootProof: {
index: txIndexInBatch,
siblings: stateRootMerkleProof,
},
stateTrieWitness: stateTrieProof.accountProof,
storageTrieWitness: stateTrieProof.storageProof,
}
messagePairs.push({
message,
proof,
})
}
return messagePairs
}
/* Imports: External */ /* Imports: External */
import { Contract, ethers, Wallet, BigNumber, providers } from 'ethers' import { Wallet } from 'ethers'
import * as rlp from 'rlp' import { sleep } from '@eth-optimism/core-utils'
import { MerkleTree } from 'merkletreejs'
import { fromHexString, sleep } from '@eth-optimism/core-utils'
import { Logger, BaseService, Metrics } from '@eth-optimism/common-ts' import { Logger, BaseService, Metrics } from '@eth-optimism/common-ts'
import { import {
loadContract, CrossChainMessenger,
loadContractFromManager, MessageStatus,
predeploys, ProviderLike,
} from '@eth-optimism/contracts' } from '@eth-optimism/sdk'
/* Imports: Internal */
import { StateRootBatchHeader, SentMessage, SentMessageProof } from './types'
interface MessageRelayerOptions { interface MessageRelayerOptions {
// Providers for interacting with L1 and L2. /**
l1RpcProvider: providers.StaticJsonRpcProvider * Provider for interacting with L2.
l2RpcProvider: providers.StaticJsonRpcProvider */
l2RpcProvider: ProviderLike
// Address of the AddressManager contract, used to resolve the various addresses we'll need
// within this service.
addressManagerAddress: string
// Wallet instance, used to sign and send the L1 relay transactions. /**
* Wallet used to interact with L1.
*/
l1Wallet: Wallet l1Wallet: Wallet
// Max gas to relay messages with. /**
relayGasLimit: number * Gas to relay transactions with. If not provided, will use the estimated gas for the relay
* transaction.
*/
relayGasLimit?: number
// Height of the L2 transaction to start searching for L2->L1 messages. /**
* Index of the first L2 transaction to start processing from.
*/
fromL2TransactionIndex?: number fromL2TransactionIndex?: number
// Interval in seconds to wait between loops. /**
* Waiting interval between loops when the service is at the tip.
*/
pollingInterval?: number pollingInterval?: number
// Number of blocks that L2 is "ahead" of transaction indices. Can happen if blocks are created /**
// on L2 after the genesis but before the first state commitment is published. * Size of the block range to query when looking for new SentMessage events.
l2BlockOffset?: number */
// L1 block to start querying events from. Recommended to set to the StateCommitmentChain deploy height
l1StartOffset?: number
// Number of blocks within each getLogs query - max is 2000
getLogsInterval?: number getLogsInterval?: number
// A custom logger to transport logs via; default STDOUT /**
* Logger to transport logs. Defaults to STDOUT.
*/
logger?: Logger logger?: Logger
// A custom metrics tracker to manage metrics; default undefined /**
* Metrics object to use. Defaults to no metrics.
*/
metrics?: Metrics metrics?: Metrics
} }
const optionSettings = {
relayGasLimit: { default: 4_000_000 },
fromL2TransactionIndex: { default: 0 },
pollingInterval: { default: 5000 },
l2BlockOffset: { default: 1 },
l1StartOffset: { default: 0 },
getLogsInterval: { default: 2000 },
}
export class MessageRelayerService extends BaseService<MessageRelayerOptions> { export class MessageRelayerService extends BaseService<MessageRelayerOptions> {
constructor(options: MessageRelayerOptions) { constructor(options: MessageRelayerOptions) {
super('Message_Relayer', options, optionSettings) super('Message_Relayer', options, {
relayGasLimit: {
default: 4_000_000,
},
fromL2TransactionIndex: {
default: 0,
},
pollingInterval: {
default: 5000,
},
getLogsInterval: {
default: 2000,
},
})
} }
private state: { private state: {
lastFinalizedTxHeight: number messenger: CrossChainMessenger
nextUnfinalizedTxHeight: number highestCheckedL2Tx: number
lastQueriedL1Block: number } = {} as any
eventCache: ethers.Event[]
Lib_AddressManager: Contract
StateCommitmentChain: Contract
L1CrossDomainMessenger: Contract
L2CrossDomainMessenger: Contract
OVM_L2ToL1MessagePasser: Contract
}
protected async _init(): Promise<void> { protected async _init(): Promise<void> {
this.logger.info('Initializing message relayer', { this.logger.info('Initializing message relayer', {
relayGasLimit: this.options.relayGasLimit, relayGasLimit: this.options.relayGasLimit,
fromL2TransactionIndex: this.options.fromL2TransactionIndex, fromL2TransactionIndex: this.options.fromL2TransactionIndex,
pollingInterval: this.options.pollingInterval, pollingInterval: this.options.pollingInterval,
l2BlockOffset: this.options.l2BlockOffset,
getLogsInterval: this.options.getLogsInterval, getLogsInterval: this.options.getLogsInterval,
}) })
// Need to improve this, sorry.
this.state = {} as any
const address = await this.options.l1Wallet.getAddress()
this.logger.info('Using L1 EOA', { address })
this.state.Lib_AddressManager = loadContract(
'Lib_AddressManager',
this.options.addressManagerAddress,
this.options.l1RpcProvider
)
this.logger.info('Connecting to StateCommitmentChain...')
this.state.StateCommitmentChain = await loadContractFromManager({
name: 'StateCommitmentChain',
Lib_AddressManager: this.state.Lib_AddressManager,
provider: this.options.l1RpcProvider,
})
this.logger.info('Connected to StateCommitmentChain', {
address: this.state.StateCommitmentChain.address,
})
this.logger.info('Connecting to L1CrossDomainMessenger...')
this.state.L1CrossDomainMessenger = await loadContractFromManager({
name: 'L1CrossDomainMessenger',
proxy: 'Proxy__OVM_L1CrossDomainMessenger',
Lib_AddressManager: this.state.Lib_AddressManager,
provider: this.options.l1RpcProvider,
})
this.logger.info('Connected to L1CrossDomainMessenger', {
address: this.state.L1CrossDomainMessenger.address,
})
this.logger.info('Connecting to L2CrossDomainMessenger...') const l1Network = await this.options.l1Wallet.provider.getNetwork()
this.state.L2CrossDomainMessenger = await loadContractFromManager({ const l1ChainId = l1Network.chainId
name: 'L2CrossDomainMessenger', this.state.messenger = new CrossChainMessenger({
Lib_AddressManager: this.state.Lib_AddressManager, l1SignerOrProvider: this.options.l1Wallet,
provider: this.options.l2RpcProvider, l2SignerOrProvider: this.options.l2RpcProvider,
l1ChainId,
}) })
this.logger.info('Connected to L2CrossDomainMessenger', {
address: this.state.L2CrossDomainMessenger.address,
})
this.logger.info('Connecting to OVM_L2ToL1MessagePasser...')
this.state.OVM_L2ToL1MessagePasser = loadContract(
'OVM_L2ToL1MessagePasser',
predeploys.OVM_L2ToL1MessagePasser,
this.options.l2RpcProvider
)
this.logger.info('Connected to OVM_L2ToL1MessagePasser', {
address: this.state.OVM_L2ToL1MessagePasser.address,
})
this.logger.info('Connected to all contracts.')
this.state.lastQueriedL1Block = this.options.l1StartOffset this.state.highestCheckedL2Tx = this.options.fromL2TransactionIndex || 1
this.state.eventCache = []
this.state.lastFinalizedTxHeight = this.options.fromL2TransactionIndex || 0
this.state.nextUnfinalizedTxHeight =
this.options.fromL2TransactionIndex || 0
} }
protected async _start(): Promise<void> { protected async _start(): Promise<void> {
...@@ -153,102 +98,84 @@ export class MessageRelayerService extends BaseService<MessageRelayerOptions> { ...@@ -153,102 +98,84 @@ export class MessageRelayerService extends BaseService<MessageRelayerOptions> {
await sleep(this.options.pollingInterval) await sleep(this.options.pollingInterval)
try { try {
this.logger.info('Checking for newly finalized transactions...') // Loop strategy is as follows:
if ( // 1. Get the current L2 tip
!(await this._isTransactionFinalized( // 2. While we're not at the tip:
this.state.nextUnfinalizedTxHeight // 2.1. Get the transaction for the next L2 block to parse.
)) // 2.2. Find any messages sent in the L2 block.
) { // 2.3. Make sure all messages are ready to be relayed.
this.logger.info('Did not find any newly finalized transactions', { // 3.4. Relay the messages.
retryAgainInS: Math.floor(this.options.pollingInterval / 1000), const l2BlockNumber =
}) await this.state.messenger.l2Provider.getBlockNumber()
continue while (this.state.highestCheckedL2Tx <= l2BlockNumber) {
} this.logger.info(`checking L2 block ${this.state.highestCheckedL2Tx}`)
this.state.lastFinalizedTxHeight = this.state.nextUnfinalizedTxHeight const block =
while ( await this.state.messenger.l2Provider.getBlockWithTransactions(
await this._isTransactionFinalized(this.state.nextUnfinalizedTxHeight) this.state.highestCheckedL2Tx
) { )
const size = (
await this._getStateBatchHeader(this.state.nextUnfinalizedTxHeight) // Should never happen.
).batch.batchSize.toNumber() if (block.transactions.length !== 1) {
this.logger.info( throw new Error(
'Found a batch of finalized transaction(s), checking for more...', `got an unexpected number of transactions in block: ${block.number}`
{ batchSize: size } )
)
this.state.nextUnfinalizedTxHeight += size
// Only deal with ~1000 transactions at a time so we can limit the amount of stuff we
// need to keep in memory. We operate on full batches at a time so the actual amount
// depends on the size of the batches we're processing.
const numTransactionsToProcess =
this.state.nextUnfinalizedTxHeight -
this.state.lastFinalizedTxHeight
if (numTransactionsToProcess > 1000) {
break
} }
}
this.logger.info('Found finalized transactions', {
totalNumber:
this.state.nextUnfinalizedTxHeight -
this.state.lastFinalizedTxHeight,
})
const messages = await this._getSentMessages( const messages = await this.state.messenger.getMessagesByTransaction(
this.state.lastFinalizedTxHeight, block.transactions[0].hash
this.state.nextUnfinalizedTxHeight )
)
for (const message of messages) { // No messages in this transaction so we can move on to the next one.
this.logger.info('Found a message sent during transaction', { if (messages.length === 0) {
index: message.parentTransactionIndex, this.state.highestCheckedL2Tx++
})
if (await this._wasMessageRelayed(message)) {
this.logger.info('Message has already been relayed, skipping.')
continue continue
} }
this.logger.info( // Make sure that all messages sent within the transaction are finalized. If any messages
'Message not yet relayed. Attempting to generate a proof...' // are not finalized, then we're going to break the loop which will trigger the sleep and
) // wait for a few seconds before we check again to see if this transaction is finalized.
const proof = await this._getMessageProof(message) let isFinalized = true
this.logger.info( for (const message of messages) {
'Successfully generated a proof. Attempting to relay to Layer 1...' const status = await this.state.messenger.getMessageStatus(message)
) if (
status === MessageStatus.IN_CHALLENGE_PERIOD ||
await this._relayMessageToL1(message, proof) status === MessageStatus.STATE_ROOT_NOT_PUBLISHED
} ) {
isFinalized = false
}
}
if (messages.length === 0) { if (!isFinalized) {
this.logger.info('Did not find any L2->L1 messages', { this.logger.info(
retryAgainInS: Math.floor(this.options.pollingInterval / 1000), `tx not yet finalized, waiting: ${this.state.highestCheckedL2Tx}`
}) )
} else { break
// Clear the event cache to avoid keeping every single event in memory and eventually } else {
// getting OOM killed. Messages are already sorted in ascending order so the last message this.logger.info(
// will have the highest batch index. `tx is finalized, relaying: ${this.state.highestCheckedL2Tx}`
const lastMessage = messages[messages.length - 1] )
}
// Find the batch corresponding to the last processed message. // If we got here then all messages in the transaction are finalized. Now we can relay
const lastProcessedBatch = await this._getStateBatchHeader( // each message to L1.
lastMessage.parentTransactionIndex for (const message of messages) {
) try {
await this.state.messenger.finalizeMessage(message)
} catch (err) {
if (err.message.includes('message has already been received')) {
// It's fine, the message was relayed by someone else
} else {
throw err
}
}
await this.state.messenger.waitForMessageReceipt(message)
}
// Remove any events from the cache for batches that should've been processed by now. // All messages have been relayed so we can move on to the next block.
this.state.eventCache = this.state.eventCache.filter((event) => { this.state.highestCheckedL2Tx++
return event.args._batchIndex > lastProcessedBatch.batch.batchIndex
})
} }
this.logger.info(
'Finished searching through newly finalized transactions',
{
retryAgainInS: Math.floor(this.options.pollingInterval / 1000),
}
)
} catch (err) { } catch (err) {
this.logger.error('Caught an unhandled error', { this.logger.error('Caught an unhandled error', {
message: err.toString(), message: err.toString(),
...@@ -258,288 +185,4 @@ export class MessageRelayerService extends BaseService<MessageRelayerOptions> { ...@@ -258,288 +185,4 @@ export class MessageRelayerService extends BaseService<MessageRelayerOptions> {
} }
} }
} }
private async _getStateBatchHeader(height: number): Promise<
| {
batch: StateRootBatchHeader
stateRoots: string[]
}
| undefined
> {
const getStateBatchAppendedEventForIndex = (
txIndex: number
): ethers.Event => {
return this.state.eventCache.find((cachedEvent) => {
const prevTotalElements = cachedEvent.args._prevTotalElements.toNumber()
const batchSize = cachedEvent.args._batchSize.toNumber()
// Height should be within the bounds of the batch.
return (
txIndex >= prevTotalElements &&
txIndex < prevTotalElements + batchSize
)
})
}
let startingBlock = this.state.lastQueriedL1Block
while (
startingBlock < (await this.options.l1RpcProvider.getBlockNumber())
) {
this.state.lastQueriedL1Block = startingBlock
this.logger.info('Querying events', {
startingBlock,
endBlock: startingBlock + this.options.getLogsInterval,
})
const events: ethers.Event[] =
await this.state.StateCommitmentChain.queryFilter(
this.state.StateCommitmentChain.filters.StateBatchAppended(),
startingBlock,
startingBlock + this.options.getLogsInterval
)
this.state.eventCache = this.state.eventCache.concat(events)
startingBlock += this.options.getLogsInterval
// We need to stop syncing early once we find the event we're looking for to avoid putting
// *all* events into memory at the same time. Otherwise we'll get OOM killed.
if (getStateBatchAppendedEventForIndex(height) !== undefined) {
break
}
}
const event = getStateBatchAppendedEventForIndex(height)
if (event === undefined) {
return undefined
}
const transaction = await this.options.l1RpcProvider.getTransaction(
event.transactionHash
)
const [stateRoots] =
this.state.StateCommitmentChain.interface.decodeFunctionData(
'appendStateBatch',
transaction.data
)
return {
batch: {
batchIndex: event.args._batchIndex,
batchRoot: event.args._batchRoot,
batchSize: event.args._batchSize,
prevTotalElements: event.args._prevTotalElements,
extraData: event.args._extraData,
},
stateRoots,
}
}
private async _isTransactionFinalized(height: number): Promise<boolean> {
this.logger.info('Checking if tx is finalized', { height })
const header = await this._getStateBatchHeader(height)
if (header === undefined) {
this.logger.info('No state batch header found.')
return false
} else {
this.logger.info('Got state batch header', { header })
}
return !(await this.state.StateCommitmentChain.insideFraudProofWindow(
header.batch
))
}
/**
* Returns all sent message events between some start height (inclusive) and an end height
* (exclusive).
*
* @param startHeight Start height to start finding messages from.
* @param endHeight End height to finish finding messages at.
* @returns All sent messages between start and end height, sorted by transaction index in
* ascending order.
*/
private async _getSentMessages(
startHeight: number,
endHeight: number
): Promise<SentMessage[]> {
const filter = this.state.L2CrossDomainMessenger.filters.SentMessage()
const events = await this.state.L2CrossDomainMessenger.queryFilter(
filter,
startHeight + this.options.l2BlockOffset,
endHeight + this.options.l2BlockOffset - 1
)
const messages = events.map((event) => {
const encodedMessage =
this.state.L2CrossDomainMessenger.interface.encodeFunctionData(
'relayMessage',
[
event.args.target,
event.args.sender,
event.args.message,
event.args.messageNonce,
]
)
return {
target: event.args.target,
sender: event.args.sender,
message: event.args.message,
messageNonce: event.args.messageNonce,
encodedMessage,
encodedMessageHash: ethers.utils.keccak256(encodedMessage),
parentTransactionIndex: event.blockNumber - this.options.l2BlockOffset,
parentTransactionHash: event.transactionHash,
}
})
// Sort in ascending order based on tx index and return.
return messages.sort((a, b) => {
return a.parentTransactionIndex - b.parentTransactionIndex
})
}
private async _wasMessageRelayed(message: SentMessage): Promise<boolean> {
return this.state.L1CrossDomainMessenger.successfulMessages(
message.encodedMessageHash
)
}
private async _getMessageProof(
message: SentMessage
): Promise<SentMessageProof> {
const messageSlot = ethers.utils.keccak256(
ethers.utils.keccak256(
message.encodedMessage +
this.state.L2CrossDomainMessenger.address.slice(2)
) + '00'.repeat(32)
)
// TODO: Complain if the proof doesn't exist.
const proof = await this.options.l2RpcProvider.send('eth_getProof', [
this.state.OVM_L2ToL1MessagePasser.address,
[messageSlot],
'0x' +
BigNumber.from(
message.parentTransactionIndex + this.options.l2BlockOffset
)
.toHexString()
.slice(2)
.replace(/^0+/, ''),
])
// TODO: Complain if the batch doesn't exist.
const header = await this._getStateBatchHeader(
message.parentTransactionIndex
)
const elements = []
for (
let i = 0;
i < Math.pow(2, Math.ceil(Math.log2(header.stateRoots.length)));
i++
) {
if (i < header.stateRoots.length) {
elements.push(header.stateRoots[i])
} else {
elements.push(ethers.utils.keccak256('0x' + '00'.repeat(32)))
}
}
const hash = (el: Buffer | string): Buffer => {
return Buffer.from(ethers.utils.keccak256(el).slice(2), 'hex')
}
const leaves = elements.map((element) => {
return fromHexString(element)
})
const tree = new MerkleTree(leaves, hash)
const index =
message.parentTransactionIndex - header.batch.prevTotalElements.toNumber()
const treeProof = tree.getProof(leaves[index], index).map((element) => {
return element.data
})
return {
stateRoot: header.stateRoots[index],
stateRootBatchHeader: header.batch,
stateRootProof: {
index,
siblings: treeProof,
},
stateTrieWitness: rlp.encode(proof.accountProof),
storageTrieWitness: rlp.encode(proof.storageProof[0].proof),
}
}
private async _relayMessageToL1(
message: SentMessage,
proof: SentMessageProof
): Promise<void> {
try {
this.logger.info('Dry-run, checking to make sure proof would succeed...')
await this.state.L1CrossDomainMessenger.connect(
this.options.l1Wallet
).callStatic.relayMessage(
message.target,
message.sender,
message.message,
message.messageNonce,
proof,
{
gasLimit: this.options.relayGasLimit,
}
)
this.logger.info('Proof should succeed. Submitting for real this time...')
} catch (err) {
this.logger.error('Proof would fail, skipping', {
message: err.toString(),
stack: err.stack,
code: err.code,
})
return
}
const result = await this.state.L1CrossDomainMessenger.connect(
this.options.l1Wallet
).relayMessage(
message.target,
message.sender,
message.message,
message.messageNonce,
proof,
{
gasLimit: this.options.relayGasLimit,
}
)
this.logger.info('Relay message transaction sent', {
transactionHash: result,
})
try {
const receipt = await result.wait()
this.logger.info('Relay message included in block', {
transactionHash: receipt.transactionHash,
blockNumber: receipt.blockNumber,
gasUsed: receipt.gasUsed.toString(),
confirmations: receipt.confirmations,
status: receipt.status,
})
} catch (err) {
this.logger.error('Real relay attempt failed, skipping.', {
message: err.toString(),
stack: err.stack,
code: err.code,
})
return
}
this.logger.info('Message successfully relayed to Layer 1!')
}
} }
import { BigNumber } from 'ethers'
export interface StateRootBatchHeader {
batchIndex: BigNumber
batchRoot: string
batchSize: BigNumber
prevTotalElements: BigNumber
extraData: string
}
export interface SentMessage {
target: string
sender: string
message: string
messageNonce: number
encodedMessage: string
encodedMessageHash: string
parentTransactionIndex: number
parentTransactionHash: string
}
export interface SentMessageProof {
stateRoot: string
stateRootBatchHeader: StateRootBatchHeader
stateRootProof: StateRootProof
stateTrieWitness: string | Buffer
storageTrieWitness: string | Buffer
}
export interface StateRootProof {
index: number
siblings: string[]
}
/* External Imports */
import chai = require('chai')
import Mocha from 'mocha'
import { solidity } from 'ethereum-waffle'
import chaiAsPromised from 'chai-as-promised'
chai.use(solidity)
chai.use(chaiAsPromised)
const should = chai.should()
const expect = chai.expect
export { should, expect, Mocha }
// SPDX-License-Identifier: MIT
pragma solidity ^0.8.9;
contract MockL2CrossDomainMessenger {
struct MessageData {
address target;
address sender;
bytes message;
uint256 messageNonce;
}
event SentMessage(
address indexed target,
address sender,
bytes message,
uint256 messageNonce,
uint256 gasLimit);
function emitSentMessageEvent(
MessageData memory _message
)
public
{
emit SentMessage(
_message.target,
_message.sender,
_message.message,
_message.messageNonce,
0
);
}
function emitMultipleSentMessageEvents(
MessageData[] memory _messages
)
public
{
for (uint256 i = 0; i < _messages.length; i++) {
emitSentMessageEvent(
_messages[i]
);
}
}
function doNothing() public {}
}
/* Imports: External */
import hre from 'hardhat'
import { Contract, Signer } from 'ethers'
import { getContractFactory } from '@eth-optimism/contracts'
import { smockit } from '@eth-optimism/smock'
import { toPlainObject } from 'lodash'
/* Imports: Internal */
import { expect } from '../setup'
import {
getMerkleTreeProof,
getMessagesAndProofsForL2Transaction,
getStateRootBatchByTransactionIndex,
getStateBatchAppendedEventByTransactionIndex,
getMessagesByTransactionHash,
} from '../../src/relay-tx'
describe('relay transaction generation functions', () => {
const ethers = (hre as any).ethers
const l1RpcProvider = ethers.provider
const l2RpcProvider = ethers.provider
let signer1: Signer
before(async () => {
;[signer1] = await ethers.getSigners()
})
let MockL2CrossDomainMessenger: Contract
beforeEach(async () => {
const factory = await ethers.getContractFactory(
'MockL2CrossDomainMessenger'
)
MockL2CrossDomainMessenger = await factory.deploy()
})
let StateCommitmentChain: Contract
beforeEach(async () => {
const factory1 = getContractFactory('Lib_AddressManager')
const factory2 = getContractFactory('ChainStorageContainer')
const factory3 = getContractFactory('StateCommitmentChain')
const mockBondManager = await smockit(getContractFactory('BondManager'))
const mockCanonicalTransactionChain = await smockit(
getContractFactory('CanonicalTransactionChain')
)
mockBondManager.smocked.isCollateralized.will.return.with(true)
mockCanonicalTransactionChain.smocked.getTotalElements.will.return.with(
999999
)
const AddressManager = await factory1.connect(signer1).deploy()
const ChainStorageContainer = await factory2
.connect(signer1)
.deploy(AddressManager.address, 'StateCommitmentChain')
StateCommitmentChain = await factory3
.connect(signer1)
.deploy(AddressManager.address, 0, 0)
await AddressManager.setAddress(
'ChainStorageContainer-SCC-batches',
ChainStorageContainer.address
)
await AddressManager.setAddress(
'StateCommitmentChain',
StateCommitmentChain.address
)
await AddressManager.setAddress('BondManager', mockBondManager.address)
await AddressManager.setAddress(
'CanonicalTransactionChain',
mockCanonicalTransactionChain.address
)
})
describe('getMessageByTransactionHash', () => {
it('should throw an error if a transaction with the given hash does not exist', async () => {
await expect(
getMessagesByTransactionHash(
l2RpcProvider,
MockL2CrossDomainMessenger.address,
ethers.constants.HashZero
)
).to.be.rejected
})
it('should return null if the transaction did not emit a SentMessage event', async () => {
const tx = await MockL2CrossDomainMessenger.doNothing()
expect(
await getMessagesByTransactionHash(
l2RpcProvider,
MockL2CrossDomainMessenger.address,
tx.hash
)
).to.deep.equal([])
})
it('should return the parsed event if the transaction emitted exactly one SentMessage event', async () => {
const message = {
target: ethers.constants.AddressZero,
sender: ethers.constants.AddressZero,
message: '0x',
messageNonce: 0,
}
const tx = await MockL2CrossDomainMessenger.emitSentMessageEvent(message)
expect(
await getMessagesByTransactionHash(
l2RpcProvider,
MockL2CrossDomainMessenger.address,
tx.hash
)
).to.deep.equal([message])
})
it('should return the parsed events if the transaction emitted more than one SentMessage event', async () => {
const messages = [
{
target: ethers.constants.AddressZero,
sender: ethers.constants.AddressZero,
message: '0x',
messageNonce: 0,
},
{
target: ethers.constants.AddressZero,
sender: ethers.constants.AddressZero,
message: '0x',
messageNonce: 1,
},
]
const tx = await MockL2CrossDomainMessenger.emitMultipleSentMessageEvents(
messages
)
expect(
await getMessagesByTransactionHash(
l2RpcProvider,
MockL2CrossDomainMessenger.address,
tx.hash
)
).to.deep.equal(messages)
})
})
describe('getStateBatchAppendedEventByTransactionIndex', () => {
it('should return null when there are no batches yet', async () => {
expect(
await getStateBatchAppendedEventByTransactionIndex(
l1RpcProvider,
StateCommitmentChain.address,
0
)
).to.equal(null)
})
it('should return null if a batch for the index does not exist', async () => {
// Should have a total of 1 element now.
await StateCommitmentChain.appendStateBatch(
[ethers.constants.HashZero],
0
)
expect(
await getStateBatchAppendedEventByTransactionIndex(
l1RpcProvider,
StateCommitmentChain.address,
1 // Index 0 is ok but 1 should return null
)
).to.equal(null)
})
it('should return the batch if the index is part of the first batch', async () => {
// 5 elements
await StateCommitmentChain.appendStateBatch(
[
ethers.constants.HashZero,
ethers.constants.HashZero,
ethers.constants.HashZero,
ethers.constants.HashZero,
ethers.constants.HashZero,
],
0
)
// Add another 5 so we have two batches and can isolate tests against the first.
await StateCommitmentChain.appendStateBatch(
[
ethers.constants.HashZero,
ethers.constants.HashZero,
ethers.constants.HashZero,
ethers.constants.HashZero,
ethers.constants.HashZero,
],
5
)
const event = await getStateBatchAppendedEventByTransactionIndex(
l1RpcProvider,
StateCommitmentChain.address,
1
)
expect(toPlainObject(event.args)).to.deep.include({
_batchIndex: ethers.BigNumber.from(0),
_batchSize: ethers.BigNumber.from(5),
_prevTotalElements: ethers.BigNumber.from(0),
})
})
it('should return the batch if the index is part of the last batch', async () => {
// 5 elements
await StateCommitmentChain.appendStateBatch(
[
ethers.constants.HashZero,
ethers.constants.HashZero,
ethers.constants.HashZero,
ethers.constants.HashZero,
ethers.constants.HashZero,
],
0
)
// Add another 5 so we have two batches and can isolate tests against the second.
await StateCommitmentChain.appendStateBatch(
[
ethers.constants.HashZero,
ethers.constants.HashZero,
ethers.constants.HashZero,
ethers.constants.HashZero,
ethers.constants.HashZero,
],
5
)
const event = await getStateBatchAppendedEventByTransactionIndex(
l1RpcProvider,
StateCommitmentChain.address,
7
)
expect(toPlainObject(event.args)).to.deep.include({
_batchIndex: ethers.BigNumber.from(1),
_batchSize: ethers.BigNumber.from(5),
_prevTotalElements: ethers.BigNumber.from(5),
})
})
for (const numBatches of [1, 2, 8]) {
const elementsPerBatch = 8
describe(`when there are ${numBatches} batch(es) of ${elementsPerBatch} elements each`, () => {
const totalElements = numBatches * elementsPerBatch
beforeEach(async () => {
for (let i = 0; i < numBatches; i++) {
await StateCommitmentChain.appendStateBatch(
new Array(elementsPerBatch).fill(ethers.constants.HashZero),
i * elementsPerBatch
)
}
})
for (let i = 0; i < totalElements; i += elementsPerBatch) {
it(`should be able to get the correct event for the ${i}th/st/rd/whatever element`, async () => {
const event = await getStateBatchAppendedEventByTransactionIndex(
l1RpcProvider,
StateCommitmentChain.address,
i
)
expect(toPlainObject(event.args)).to.deep.include({
_batchIndex: ethers.BigNumber.from(i / elementsPerBatch),
_batchSize: ethers.BigNumber.from(elementsPerBatch),
_prevTotalElements: ethers.BigNumber.from(i),
})
})
}
})
}
})
describe('getStateRootBatchByTransactionIndex', () => {
it('should return null if a batch for the index does not exist', async () => {
// Should have a total of 1 element now.
await StateCommitmentChain.appendStateBatch(
[ethers.constants.HashZero],
0
)
expect(
await getStateRootBatchByTransactionIndex(
l1RpcProvider,
StateCommitmentChain.address,
1 // Index 0 is ok but 1 should return null
)
).to.equal(null)
})
it('should return the full batch for a given index when it exists', async () => {
// Should have a total of 1 element now.
await StateCommitmentChain.appendStateBatch(
[ethers.constants.HashZero],
0
)
const batch = await getStateRootBatchByTransactionIndex(
l1RpcProvider,
StateCommitmentChain.address,
0 // Index 0 is ok but 1 should return null
)
expect(batch.header).to.deep.include({
batchIndex: ethers.BigNumber.from(0),
batchSize: ethers.BigNumber.from(1),
prevTotalElements: ethers.BigNumber.from(0),
})
expect(batch.stateRoots).to.deep.equal([ethers.constants.HashZero])
})
})
describe('makeRelayTransactionData', () => {
it('should throw an error if the transaction does not exist', async () => {
await expect(
getMessagesAndProofsForL2Transaction(
l1RpcProvider,
l2RpcProvider,
StateCommitmentChain.address,
MockL2CrossDomainMessenger.address,
ethers.constants.HashZero
)
).to.be.rejected
})
it('should throw an error if the transaction did not send a message', async () => {
const tx = await MockL2CrossDomainMessenger.doNothing()
await expect(
getMessagesAndProofsForL2Transaction(
l1RpcProvider,
l2RpcProvider,
StateCommitmentChain.address,
MockL2CrossDomainMessenger.address,
tx.hash
)
).to.be.rejected
})
it('should throw an error if the corresponding state batch has not been submitted', async () => {
const tx = await MockL2CrossDomainMessenger.emitSentMessageEvent({
target: ethers.constants.AddressZero,
sender: ethers.constants.AddressZero,
message: '0x',
messageNonce: 0,
})
await expect(
getMessagesAndProofsForL2Transaction(
l1RpcProvider,
l2RpcProvider,
StateCommitmentChain.address,
MockL2CrossDomainMessenger.address,
tx.hash
)
).to.be.rejected
})
// Unfortunately this is hard to test here because hardhat doesn't support eth_getProof.
// Because this function is embedded into the message relayer, we should be able to use
// integration tests to sufficiently test this.
it.skip('should otherwise return the encoded transaction data', () => {
// TODO?
})
})
})
describe('getMerkleTreeProof', () => {
let leaves: string[] = [
'the',
'quick',
'brown',
'fox',
'jumps',
'over',
'the',
'lazy',
'dog',
]
const index: number = 4
it('should generate a merkle tree proof from an odd number of leaves at the correct index', () => {
const expectedProof = [
'0x6f766572',
'0x123268ec1a3f9aac2bc68e899fe4329eefef783c76265722508b8abbfbf11440',
'0x12aaa1b2e09f26e14d86aa3b157b94cfeabe815e44b6742d00c47441a576b12d',
'0x297d90df3f77f93eefdeab4e9f6e9a074b41a3508f9d265e92e9b5449c7b11c8',
]
expect(getMerkleTreeProof(leaves, index)).to.deep.equal(expectedProof)
})
it('should generate a merkle tree proof from an even number of leaves at the correct index', () => {
const expectedProof = [
'0x6f766572',
'0x09e430fa7b513203dd9c74afd734267a73f64299d9dac61ef09e96c3b3b3fe96',
'0x12aaa1b2e09f26e14d86aa3b157b94cfeabe815e44b6742d00c47441a576b12d',
]
leaves = leaves.slice(0, leaves.length - 2)
expect(getMerkleTreeProof(leaves, index)).to.deep.equal(expectedProof)
})
})
...@@ -2311,7 +2311,7 @@ ...@@ -2311,7 +2311,7 @@
"@nodelib/fs.scandir" "2.1.5" "@nodelib/fs.scandir" "2.1.5"
fastq "^1.6.0" fastq "^1.6.0"
"@nomiclabs/ethereumjs-vm@^4": "@nomiclabs/ethereumjs-vm@^4.2.2":
version "4.2.2" version "4.2.2"
resolved "https://registry.yarnpkg.com/@nomiclabs/ethereumjs-vm/-/ethereumjs-vm-4.2.2.tgz#2f8817113ca0fb6c44c1b870d0a809f0e026a6cc" resolved "https://registry.yarnpkg.com/@nomiclabs/ethereumjs-vm/-/ethereumjs-vm-4.2.2.tgz#2f8817113ca0fb6c44c1b870d0a809f0e026a6cc"
integrity sha512-8WmX94mMcJaZ7/m7yBbyuS6B+wuOul+eF+RY9fBpGhNaUpyMR/vFIcDojqcWQ4Yafe1tMKY5LDu2yfT4NZgV4Q== integrity sha512-8WmX94mMcJaZ7/m7yBbyuS6B+wuOul+eF+RY9fBpGhNaUpyMR/vFIcDojqcWQ4Yafe1tMKY5LDu2yfT4NZgV4Q==
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment