Commit e6748fc8 authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge branch 'develop' into patch-1

parents 3e9d64ea 5a35df9d
---
'@eth-optimism/contracts-bedrock': minor
'@eth-optimism/contracts': minor
'@eth-optimism/sdk': minor
---
Update sdk contract addresses for bedrock
...@@ -293,7 +293,7 @@ jobs: ...@@ -293,7 +293,7 @@ jobs:
contracts-bedrock-coverage: contracts-bedrock-coverage:
docker: docker:
- image: ethereumoptimism/ci-builder:latest - image: us-docker.pkg.dev/oplabs-tools-artifacts/images/ci-builder:latest
resource_class: large resource_class: large
steps: steps:
- checkout - checkout
...@@ -553,7 +553,7 @@ jobs: ...@@ -553,7 +553,7 @@ jobs:
sdk-next-tests: sdk-next-tests:
docker: docker:
- image: ethereumoptimism/ci-builder:latest - image: us-docker.pkg.dev/oplabs-tools-artifacts/images/ci-builder:latest
resource_class: large resource_class: large
steps: steps:
- checkout - checkout
...@@ -700,7 +700,7 @@ jobs: ...@@ -700,7 +700,7 @@ jobs:
atst-tests: atst-tests:
docker: docker:
- image: ethereumoptimism/ci-builder:latest - image: us-docker.pkg.dev/oplabs-tools-artifacts/images/ci-builder:latest
resource_class: large resource_class: large
steps: steps:
- checkout - checkout
......
...@@ -5,12 +5,14 @@ ...@@ -5,12 +5,14 @@
"main": "index.js", "main": "index.js",
"scripts": { "scripts": {
"dev": "vuepress dev src", "dev": "vuepress dev src",
"build": "vuepress build src" "build": "vuepress build src",
"preview": "yarn build && serve -s src/.vuepress/dist -p 8080"
}, },
"license": "MIT", "license": "MIT",
"devDependencies": { "devDependencies": {
"@vuepress/plugin-medium-zoom": "^1.8.2", "@vuepress/plugin-medium-zoom": "^1.8.2",
"@vuepress/plugin-pwa": "^1.9.7", "@vuepress/plugin-pwa": "^1.9.7",
"serve": "^14.2.0",
"vuepress": "^1.8.2", "vuepress": "^1.8.2",
"vuepress-plugin-plausible-analytics": "^0.2.1", "vuepress-plugin-plausible-analytics": "^0.2.1",
"vuepress-theme-hope": "^1.22.0" "vuepress-theme-hope": "^1.22.0"
......
...@@ -13,7 +13,12 @@ export default ({ router }) => { ...@@ -13,7 +13,12 @@ export default ({ router }) => {
// the refresh button. For more details see: // the refresh button. For more details see:
// https://linear.app/optimism/issue/FE-1003/investigate-archive-issue-on-docs // https://linear.app/optimism/issue/FE-1003/investigate-archive-issue-on-docs
const registerAutoReload = () => { const registerAutoReload = () => {
event.$on('sw-updated', e => e.skipWaiting().then(() => { event.$on('sw-updated', e => {
location.reload(true); e.skipWaiting().then(() =>
})) {
if (typeof location !== 'undefined')
location.reload(true);
}
)
})
} }
This diff is collapsed.
...@@ -368,6 +368,7 @@ func (s *SyncClient) onRangeRequest(ctx context.Context, req rangeRequest) { ...@@ -368,6 +368,7 @@ func (s *SyncClient) onRangeRequest(ctx context.Context, req rangeRequest) {
} }
if _, ok := s.inFlight[num]; ok { if _, ok := s.inFlight[num]; ok {
log.Debug("request still in-flight, not rescheduling sync request", "num", num)
continue // request still in flight continue // request still in flight
} }
pr := peerRequest{num: num, complete: new(atomic.Bool)} pr := peerRequest{num: num, complete: new(atomic.Bool)}
......
...@@ -3,7 +3,9 @@ package p2p ...@@ -3,7 +3,9 @@ package p2p
import ( import (
"context" "context"
"math/big" "math/big"
"sync"
"testing" "testing"
"time"
"github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/network"
...@@ -29,7 +31,42 @@ func (fn mockPayloadFn) PayloadByNumber(_ context.Context, number uint64) (*eth. ...@@ -29,7 +31,42 @@ func (fn mockPayloadFn) PayloadByNumber(_ context.Context, number uint64) (*eth.
var _ L2Chain = mockPayloadFn(nil) var _ L2Chain = mockPayloadFn(nil)
func setupSyncTestData(length uint64) (*rollup.Config, map[uint64]*eth.ExecutionPayload, func(i uint64) eth.L2BlockRef) { type syncTestData struct {
sync.RWMutex
payloads map[uint64]*eth.ExecutionPayload
}
func (s *syncTestData) getPayload(i uint64) (payload *eth.ExecutionPayload, ok bool) {
s.RLock()
defer s.RUnlock()
payload, ok = s.payloads[i]
return payload, ok
}
func (s *syncTestData) deletePayload(i uint64) {
s.Lock()
defer s.Unlock()
delete(s.payloads, i)
}
func (s *syncTestData) addPayload(payload *eth.ExecutionPayload) {
s.Lock()
defer s.Unlock()
s.payloads[uint64(payload.BlockNumber)] = payload
}
func (s *syncTestData) getBlockRef(i uint64) eth.L2BlockRef {
s.RLock()
defer s.RUnlock()
return eth.L2BlockRef{
Hash: s.payloads[i].BlockHash,
Number: uint64(s.payloads[i].BlockNumber),
ParentHash: s.payloads[i].ParentHash,
Time: uint64(s.payloads[i].Timestamp),
}
}
func setupSyncTestData(length uint64) (*rollup.Config, *syncTestData) {
// minimal rollup config to build mock blocks & verify their time. // minimal rollup config to build mock blocks & verify their time.
cfg := &rollup.Config{ cfg := &rollup.Config{
Genesis: rollup.Genesis{ Genesis: rollup.Genesis{
...@@ -57,15 +94,7 @@ func setupSyncTestData(length uint64) (*rollup.Config, map[uint64]*eth.Execution ...@@ -57,15 +94,7 @@ func setupSyncTestData(length uint64) (*rollup.Config, map[uint64]*eth.Execution
payloads[i] = payload payloads[i] = payload
} }
l2Ref := func(i uint64) eth.L2BlockRef { return cfg, &syncTestData{payloads: payloads}
return eth.L2BlockRef{
Hash: payloads[i].BlockHash,
Number: uint64(payloads[i].BlockNumber),
ParentHash: payloads[i].ParentHash,
Time: uint64(payloads[i].Timestamp),
}
}
return cfg, payloads, l2Ref
} }
func TestSinglePeerSync(t *testing.T) { func TestSinglePeerSync(t *testing.T) {
...@@ -73,11 +102,11 @@ func TestSinglePeerSync(t *testing.T) { ...@@ -73,11 +102,11 @@ func TestSinglePeerSync(t *testing.T) {
log := testlog.Logger(t, log.LvlError) log := testlog.Logger(t, log.LvlError)
cfg, payloads, l2Ref := setupSyncTestData(25) cfg, payloads := setupSyncTestData(25)
// Serving payloads: just load them from the map, if they exist // Serving payloads: just load them from the map, if they exist
servePayload := mockPayloadFn(func(n uint64) (*eth.ExecutionPayload, error) { servePayload := mockPayloadFn(func(n uint64) (*eth.ExecutionPayload, error) {
p, ok := payloads[n] p, ok := payloads.getPayload(n)
if !ok { if !ok {
return nil, ethereum.NotFound return nil, ethereum.NotFound
} }
...@@ -116,13 +145,13 @@ func TestSinglePeerSync(t *testing.T) { ...@@ -116,13 +145,13 @@ func TestSinglePeerSync(t *testing.T) {
defer cl.Close() defer cl.Close()
// request to start syncing between 10 and 20 // request to start syncing between 10 and 20
require.NoError(t, cl.RequestL2Range(ctx, l2Ref(10), l2Ref(20))) require.NoError(t, cl.RequestL2Range(ctx, payloads.getBlockRef(10), payloads.getBlockRef(20)))
// and wait for the sync results to come in (in reverse order) // and wait for the sync results to come in (in reverse order)
for i := uint64(19); i > 10; i-- { for i := uint64(19); i > 10; i-- {
p := <-received p := <-received
require.Equal(t, uint64(p.BlockNumber), i, "expecting payloads in order") require.Equal(t, uint64(p.BlockNumber), i, "expecting payloads in order")
exp, ok := payloads[uint64(p.BlockNumber)] exp, ok := payloads.getPayload(uint64(p.BlockNumber))
require.True(t, ok, "expecting known payload") require.True(t, ok, "expecting known payload")
require.Equal(t, exp.BlockHash, p.BlockHash, "expecting the correct payload") require.Equal(t, exp.BlockHash, p.BlockHash, "expecting the correct payload")
} }
...@@ -131,14 +160,14 @@ func TestSinglePeerSync(t *testing.T) { ...@@ -131,14 +160,14 @@ func TestSinglePeerSync(t *testing.T) {
func TestMultiPeerSync(t *testing.T) { func TestMultiPeerSync(t *testing.T) {
t.Parallel() // Takes a while, but can run in parallel t.Parallel() // Takes a while, but can run in parallel
log := testlog.Logger(t, log.LvlError) log := testlog.Logger(t, log.LvlDebug)
cfg, payloads, l2Ref := setupSyncTestData(100) cfg, payloads := setupSyncTestData(100)
setupPeer := func(ctx context.Context, h host.Host) (*SyncClient, chan *eth.ExecutionPayload) { setupPeer := func(ctx context.Context, h host.Host) (*SyncClient, chan *eth.ExecutionPayload) {
// Serving payloads: just load them from the map, if they exist // Serving payloads: just load them from the map, if they exist
servePayload := mockPayloadFn(func(n uint64) (*eth.ExecutionPayload, error) { servePayload := mockPayloadFn(func(n uint64) (*eth.ExecutionPayload, error) {
p, ok := payloads[n] p, ok := payloads.getPayload(n)
if !ok { if !ok {
return nil, ethereum.NotFound return nil, ethereum.NotFound
} }
...@@ -190,23 +219,25 @@ func TestMultiPeerSync(t *testing.T) { ...@@ -190,23 +219,25 @@ func TestMultiPeerSync(t *testing.T) {
clC.Start() clC.Start()
defer clC.Close() defer clC.Close()
// request to start syncing between 10 and 100 // request to start syncing between 10 and 90
require.NoError(t, clA.RequestL2Range(ctx, l2Ref(10), l2Ref(90))) require.NoError(t, clA.RequestL2Range(ctx, payloads.getBlockRef(10), payloads.getBlockRef(90)))
// With such large range to request we are going to hit the rate-limits of B and C, // With such large range to request we are going to hit the rate-limits of B and C,
// but that means we'll balance the work between the peers. // but that means we'll balance the work between the peers.
p := <-recvA for i := uint64(89); i > 10; i-- { // wait for all payloads
exp, ok := payloads[uint64(p.BlockNumber)] p := <-recvA
require.True(t, ok, "expecting known payload") exp, ok := payloads.getPayload(uint64(p.BlockNumber))
require.Equal(t, exp.BlockHash, p.BlockHash, "expecting the correct payload") require.True(t, ok, "expecting known payload")
require.Equal(t, exp.BlockHash, p.BlockHash, "expecting the correct payload")
}
// now see if B can sync a range, and fill the gap with a re-request // now see if B can sync a range, and fill the gap with a re-request
bl25 := payloads[25] // temporarily remove it from the available payloads. This will create a gap bl25, _ := payloads.getPayload(25) // temporarily remove it from the available payloads. This will create a gap
delete(payloads, uint64(25)) payloads.deletePayload(25)
require.NoError(t, clB.RequestL2Range(ctx, l2Ref(20), l2Ref(30))) require.NoError(t, clB.RequestL2Range(ctx, payloads.getBlockRef(20), payloads.getBlockRef(30)))
for i := uint64(29); i > 25; i-- { for i := uint64(29); i > 25; i-- {
p := <-recvB p := <-recvB
exp, ok := payloads[uint64(p.BlockNumber)] exp, ok := payloads.getPayload(uint64(p.BlockNumber))
require.True(t, ok, "expecting known payload") require.True(t, ok, "expecting known payload")
require.Equal(t, exp.BlockHash, p.BlockHash, "expecting the correct payload") require.Equal(t, exp.BlockHash, p.BlockHash, "expecting the correct payload")
} }
...@@ -215,13 +246,19 @@ func TestMultiPeerSync(t *testing.T) { ...@@ -215,13 +246,19 @@ func TestMultiPeerSync(t *testing.T) {
// client: WARN failed p2p sync request num=25 err="peer failed to serve request with code 1" // client: WARN failed p2p sync request num=25 err="peer failed to serve request with code 1"
require.Zero(t, len(recvB), "there is a gap, should not see other payloads yet") require.Zero(t, len(recvB), "there is a gap, should not see other payloads yet")
// Add back the block // Add back the block
payloads[25] = bl25 payloads.addPayload(bl25)
// race-condition fix: the request for 25 is expected to error, but is marked as complete in the peer-loop.
// But the re-request checks the status in the main loop, and it may thus look like it's still in-flight,
// and thus not run the new request.
// Wait till the failed request is recognized as marked as done, so the re-request actually runs.
for !clB.inFlight[25].Load() {
time.Sleep(time.Second)
}
// And request a range again, 25 is there now, and 21-24 should follow quickly (some may already have been fetched and wait in quarantine) // And request a range again, 25 is there now, and 21-24 should follow quickly (some may already have been fetched and wait in quarantine)
require.NoError(t, clB.RequestL2Range(ctx, l2Ref(20), l2Ref(26))) require.NoError(t, clB.RequestL2Range(ctx, payloads.getBlockRef(20), payloads.getBlockRef(26)))
for i := uint64(25); i > 20; i-- { for i := uint64(25); i > 20; i-- {
p := <-recvB p := <-recvB
exp, ok := payloads[uint64(p.BlockNumber)] exp, ok := payloads.getPayload(uint64(p.BlockNumber))
require.True(t, ok, "expecting known payload") require.True(t, ok, "expecting known payload")
require.Equal(t, exp.BlockHash, p.BlockHash, "expecting the correct payload") require.Equal(t, exp.BlockHash, p.BlockHash, "expecting the correct payload")
} }
......
...@@ -187,7 +187,7 @@ func NewL2OutputSubmitter(cfg Config, l log.Logger, m metrics.Metricer) (*L2Outp ...@@ -187,7 +187,7 @@ func NewL2OutputSubmitter(cfg Config, l log.Logger, m metrics.Metricer) (*L2Outp
l2ooContract, err := bindings.NewL2OutputOracleCaller(cfg.L2OutputOracleAddr, cfg.L1Client) l2ooContract, err := bindings.NewL2OutputOracleCaller(cfg.L2OutputOracleAddr, cfg.L1Client)
if err != nil { if err != nil {
cancel() cancel()
return nil, err return nil, fmt.Errorf("failed to create L2OO at address %s: %w", cfg.L2OutputOracleAddr, err)
} }
cCtx, cCancel := context.WithTimeout(ctx, cfg.NetworkTimeout) cCtx, cCancel := context.WithTimeout(ctx, cfg.NetworkTimeout)
......
// SPDX-License-Identifier: MIT
pragma solidity 0.8.15;
import { Test } from "forge-std/Test.sol";
import { StdInvariant } from "forge-std/StdInvariant.sol";
import { Encoding } from "../../libraries/Encoding.sol";
import { Hashing } from "../../libraries/Hashing.sol";
contract Hash_CrossDomainHasher {
bool public failedCrossDomainHashHighVersion;
bool public failedCrossDomainHashV0;
bool public failedCrossDomainHashV1;
/**
* @notice Takes the necessary parameters to perform a cross domain hash with a randomly
* generated version. Only schema versions 0 and 1 are supported and all others should revert.
*/
function hashCrossDomainMessageHighVersion(
uint16 _version,
uint240 _nonce,
address _sender,
address _target,
uint256 _value,
uint256 _gasLimit,
bytes memory _data
) external {
// generate the versioned nonce
uint256 encodedNonce = Encoding.encodeVersionedNonce(_nonce, _version);
// hash the cross domain message. we don't need to store the result since the function
// validates and should revert if an invalid version (>1) is encoded
Hashing.hashCrossDomainMessage(encodedNonce, _sender, _target, _value, _gasLimit, _data);
// check that execution never makes it this far for an invalid version
if (_version > 1) {
failedCrossDomainHashHighVersion = true;
}
}
/**
* @notice Takes the necessary parameters to perform a cross domain hash using the v0 schema
* and compares the output of a call to the unversioned function to the v0 function directly
*/
function hashCrossDomainMessageV0(
uint240 _nonce,
address _sender,
address _target,
uint256 _value,
uint256 _gasLimit,
bytes memory _data
) external {
// generate the versioned nonce with the version set to 0
uint256 encodedNonce = Encoding.encodeVersionedNonce(_nonce, 0);
// hash the cross domain message using the unversioned and versioned functions for
// comparison
bytes32 sampleHash1 = Hashing.hashCrossDomainMessage(
encodedNonce,
_sender,
_target,
_value,
_gasLimit,
_data
);
bytes32 sampleHash2 = Hashing.hashCrossDomainMessageV0(
_target,
_sender,
_data,
encodedNonce
);
// check that the output of both functions matches
if (sampleHash1 != sampleHash2) {
failedCrossDomainHashV0 = true;
}
}
/**
* @notice Takes the necessary parameters to perform a cross domain hash using the v1 schema
* and compares the output of a call to the unversioned function to the v1 function directly
*/
function hashCrossDomainMessageV1(
uint240 _nonce,
address _sender,
address _target,
uint256 _value,
uint256 _gasLimit,
bytes memory _data
) external {
// generate the versioned nonce with the version set to 1
uint256 encodedNonce = Encoding.encodeVersionedNonce(_nonce, 1);
// hash the cross domain message using the unversioned and versioned functions for
// comparison
bytes32 sampleHash1 = Hashing.hashCrossDomainMessage(
encodedNonce,
_sender,
_target,
_value,
_gasLimit,
_data
);
bytes32 sampleHash2 = Hashing.hashCrossDomainMessageV1(
encodedNonce,
_sender,
_target,
_value,
_gasLimit,
_data
);
// check that the output of both functions matches
if (sampleHash1 != sampleHash2) {
failedCrossDomainHashV1 = true;
}
}
}
contract Hashing_Invariant is StdInvariant, Test {
Hash_CrossDomainHasher internal actor;
function setUp() public {
// Create a hasher actor.
actor = new Hash_CrossDomainHasher();
targetContract(address(actor));
bytes4[] memory selectors = new bytes4[](3);
selectors[0] = actor.hashCrossDomainMessageHighVersion.selector;
selectors[1] = actor.hashCrossDomainMessageV0.selector;
selectors[2] = actor.hashCrossDomainMessageV1.selector;
FuzzSelector memory selector = FuzzSelector({ addr: address(actor), selectors: selectors });
targetSelector(selector);
}
/**
* @custom:invariant `hashCrossDomainMessage` reverts if `version` is > `1`.
*
* The `hashCrossDomainMessage` function should always revert if the `version` passed is > `1`.
*/
function invariant_hash_xdomain_msg_high_version() external {
// ASSERTION: The round trip aliasing done in testRoundTrip(...) should never fail.
assertFalse(actor.failedCrossDomainHashHighVersion());
}
/**
* @custom:invariant `version` = `0`: `hashCrossDomainMessage` and `hashCrossDomainMessageV0`
* are equivalent.
*
* If the version passed is 0, `hashCrossDomainMessage` and `hashCrossDomainMessageV0` should be
* equivalent.
*/
function invariant_hash_xdomain_msg_0() external {
// ASSERTION: A call to hashCrossDomainMessage and hashCrossDomainMessageV0
// should always match when the version passed is 0
assertFalse(actor.failedCrossDomainHashV0());
}
/**
* @custom:invariant `version` = `1`: `hashCrossDomainMessage` and `hashCrossDomainMessageV1`
* are equivalent.
*
* If the version passed is 1, `hashCrossDomainMessage` and `hashCrossDomainMessageV1` should be
* equivalent.
*/
function invariant_hash_xdomain_msg_1() external {
// ASSERTION: A call to hashCrossDomainMessage and hashCrossDomainMessageV1
// should always match when the version passed is 1
assertFalse(actor.failedCrossDomainHashV1());
}
}
# `Hashing` Invariants # `Hashing` Invariants
## `hashCrossDomainMessage` reverts if `version` is > `1`.
**Test:** [`Hashing.t.sol#L141`](../contracts/test/invariants/Hashing.t.sol#L141)
The `hashCrossDomainMessage` function should always revert if the `version` passed is > `1`.
## `version` = `0`: `hashCrossDomainMessage` and `hashCrossDomainMessageV0` are equivalent.
**Test:** [`Hashing.t.sol#L153`](../contracts/test/invariants/Hashing.t.sol#L153)
If the version passed is 0, `hashCrossDomainMessage` and `hashCrossDomainMessageV0` should be equivalent.
## `version` = `1`: `hashCrossDomainMessage` and `hashCrossDomainMessageV1` are equivalent.
**Test:** [`Hashing.t.sol#L166`](../contracts/test/invariants/Hashing.t.sol#L166)
If the version passed is 1, `hashCrossDomainMessage` and `hashCrossDomainMessageV1` should be equivalent.
## `hashCrossDomainMessage` reverts if `version` is > `1`. ## `hashCrossDomainMessage` reverts if `version` is > `1`.
**Test:** [`FuzzHashing.sol#L120`](../contracts/echidna/FuzzHashing.sol#L120) **Test:** [`FuzzHashing.sol#L120`](../contracts/echidna/FuzzHashing.sol#L120)
......
...@@ -3,6 +3,20 @@ import { ...@@ -3,6 +3,20 @@ import {
getDeployedContractDefinition, getDeployedContractDefinition,
} from '@eth-optimism/contracts' } from '@eth-optimism/contracts'
import { predeploys as bedrockPredeploys } from '@eth-optimism/contracts-bedrock' import { predeploys as bedrockPredeploys } from '@eth-optimism/contracts-bedrock'
import portalArtifactsMainnet from '@eth-optimism/contracts-bedrock/deployments/mainnet/OptimismPortalProxy.json'
import portalArtifactsGoerli from '@eth-optimism/contracts-bedrock/deployments/goerli/OptimismPortalProxy.json'
import l2OutputOracleArtifactsMainnet from '@eth-optimism/contracts-bedrock/deployments/mainnet/L2OutputOracleProxy.json'
import l2OutputOracleArtifactsGoerli from '@eth-optimism/contracts-bedrock/deployments/goerli/L2OutputOracleProxy.json'
const portalAddresses = {
mainnet: portalArtifactsMainnet,
goerli: portalArtifactsGoerli,
}
const l2OutputOracleAddresses = {
mainnet: l2OutputOracleArtifactsMainnet,
goerli: l2OutputOracleArtifactsGoerli,
}
import { import {
L1ChainID, L1ChainID,
...@@ -64,6 +78,7 @@ export const DEFAULT_L2_CONTRACT_ADDRESSES: OEL2ContractsLike = { ...@@ -64,6 +78,7 @@ export const DEFAULT_L2_CONTRACT_ADDRESSES: OEL2ContractsLike = {
* @returns The L1 contracts for the given network. * @returns The L1 contracts for the given network.
*/ */
const getL1ContractsByNetworkName = (network: string): OEL1ContractsLike => { const getL1ContractsByNetworkName = (network: string): OEL1ContractsLike => {
// TODO this doesn't code split and makes the sdk artifacts way too big
const getDeployedAddress = (name: string) => { const getDeployedAddress = (name: string) => {
return getDeployedContractDefinition(name, network).address return getDeployedContractDefinition(name, network).address
} }
...@@ -77,8 +92,8 @@ const getL1ContractsByNetworkName = (network: string): OEL1ContractsLike => { ...@@ -77,8 +92,8 @@ const getL1ContractsByNetworkName = (network: string): OEL1ContractsLike => {
StateCommitmentChain: getDeployedAddress('StateCommitmentChain'), StateCommitmentChain: getDeployedAddress('StateCommitmentChain'),
CanonicalTransactionChain: getDeployedAddress('CanonicalTransactionChain'), CanonicalTransactionChain: getDeployedAddress('CanonicalTransactionChain'),
BondManager: getDeployedAddress('BondManager'), BondManager: getDeployedAddress('BondManager'),
OptimismPortal: '0x5b47E1A08Ea6d985D6649300584e6722Ec4B1383' as const, OptimismPortal: portalAddresses[network].address,
L2OutputOracle: '0xE6Dfba0953616Bacab0c9A8ecb3a9BBa77FC15c0' as const, L2OutputOracle: l2OutputOracleAddresses[network].address,
} }
} }
...@@ -109,6 +124,7 @@ export const CONTRACT_ADDRESSES: { ...@@ -109,6 +124,7 @@ export const CONTRACT_ADDRESSES: {
CanonicalTransactionChain: CanonicalTransactionChain:
'0xCf7Ed3AccA5a467e9e704C703E8D87F634fB0Fc9' as const, '0xCf7Ed3AccA5a467e9e704C703E8D87F634fB0Fc9' as const,
BondManager: '0x5FC8d32690cc91D4c39d9d3abcBD16989F875707' as const, BondManager: '0x5FC8d32690cc91D4c39d9d3abcBD16989F875707' as const,
// FIXME
OptimismPortal: '0x0000000000000000000000000000000000000000' as const, OptimismPortal: '0x0000000000000000000000000000000000000000' as const,
L2OutputOracle: '0x0000000000000000000000000000000000000000' as const, L2OutputOracle: '0x0000000000000000000000000000000000000000' as const,
}, },
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment