Commit 42e79775 authored by tre's avatar tre

Merge branch 'develop' of https://github.com/ethereum-optimism/optimism into develop

parents 269ed961 64cf3657
---
'@eth-optimism/contracts-periphery': patch
---
Change type for auth id on Faucet contracts from bytes to bytes32
......@@ -70,9 +70,6 @@ func NewConfig(ctx *cli.Context, blockTime uint64) (*p2p.Config, error) {
return nil, fmt.Errorf("failed to load p2p topic scoring options: %w", err)
}
conf.ConnGater = p2p.DefaultConnGater
conf.ConnMngr = p2p.DefaultConnManager
conf.EnableReqRespSync = ctx.GlobalBool(flags.SyncReqRespFlag.Name)
return conf, nil
......
......@@ -106,9 +106,6 @@ type Config struct {
// Underlying store that hosts connection-gater and peerstore data.
Store ds.Batching
ConnGater func(conf *Config) (connmgr.ConnectionGater, error)
ConnMngr func(conf *Config) (connmgr.ConnManager, error)
EnableReqRespSync bool
}
......@@ -193,12 +190,6 @@ func (conf *Config) Check() error {
if conf.PeersLo == 0 || conf.PeersHi == 0 || conf.PeersLo > conf.PeersHi {
return fmt.Errorf("peers lo/hi tides are invalid: %d, %d", conf.PeersLo, conf.PeersHi)
}
if conf.ConnMngr == nil {
return errors.New("need a connection manager")
}
if conf.ConnGater == nil {
return errors.New("need a connection gater")
}
if conf.MeshD <= 0 || conf.MeshD > maxMeshParam {
return fmt.Errorf("mesh D param must not be 0 or exceed %d, but got %d", maxMeshParam, conf.MeshD)
}
......
......@@ -144,12 +144,12 @@ func (conf *Config) Host(log log.Logger, reporter metrics.Reporter) (host.Host,
return nil, fmt.Errorf("failed to set up peerstore with pub key: %w", err)
}
connGtr, err := conf.ConnGater(conf)
connGtr, err := DefaultConnGater(conf)
if err != nil {
return nil, fmt.Errorf("failed to open connection gater: %w", err)
}
connMngr, err := conf.ConnMngr(conf)
connMngr, err := DefaultConnManager(conf)
if err != nil {
return nil, fmt.Errorf("failed to open connection manager: %w", err)
}
......
......@@ -12,12 +12,10 @@ import (
ds "github.com/ipfs/go-datastore"
"github.com/ipfs/go-datastore/sync"
"github.com/libp2p/go-libp2p"
"github.com/libp2p/go-libp2p/core/connmgr"
"github.com/libp2p/go-libp2p/core/crypto"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
mocknet "github.com/libp2p/go-libp2p/p2p/net/mock"
tswarm "github.com/libp2p/go-libp2p/p2p/net/swarm/testing"
"github.com/stretchr/testify/require"
"golang.org/x/exp/slices"
......@@ -54,10 +52,6 @@ func TestingConfig(t *testing.T) *Config {
TimeoutAccept: time.Second * 2,
TimeoutDial: time.Second * 2,
Store: sync.MutexWrap(ds.NewMapDatastore()),
ConnGater: func(conf *Config) (connmgr.ConnectionGater, error) {
return tswarm.DefaultMockConnectionGater(), nil
},
ConnMngr: DefaultConnManager,
}
}
......@@ -113,8 +107,6 @@ func TestP2PFull(t *testing.T) {
TimeoutAccept: time.Second * 2,
TimeoutDial: time.Second * 2,
Store: sync.MutexWrap(ds.NewMapDatastore()),
ConnGater: DefaultConnGater,
ConnMngr: DefaultConnManager,
}
// copy config A, and change the settings for B
confB := confA
......@@ -262,8 +254,6 @@ func TestDiscovery(t *testing.T) {
TimeoutDial: time.Second * 2,
Store: sync.MutexWrap(ds.NewMapDatastore()),
DiscoveryDB: discDBA,
ConnGater: DefaultConnGater,
ConnMngr: DefaultConnManager,
}
// copy config A, and change the settings for B
confB := confA
......
......@@ -12,14 +12,6 @@ const config: DeployConfig = {
'0x70997970c51812dc3a010c7d01b50e0d17dc79c8',
optimistAllowlistCoinbaseQuestAttestor:
'0x70997970c51812dc3a010c7d01b50e0d17dc79c8',
faucetAdmin: '',
faucetName: '',
githubFamAdmin: '',
githubFamName: '',
githubFamVersion: '',
optimistFamAdmin: '',
optimistFamName: '',
optimistFamVersion: '',
}
export default config
......@@ -80,7 +80,7 @@ contract AdminFaucetAuthModuleTest is Test {
uint256 _eip712Chainid,
address _eip712VerifyingContract,
address recipient,
bytes memory id,
bytes32 id,
bytes32 nonce
) internal view returns (bytes memory) {
AdminFaucetAuthModule.Proof memory proof = AdminFaucetAuthModule.Proof(
......@@ -114,7 +114,7 @@ contract AdminFaucetAuthModuleTest is Test {
block.chainid,
address(adminFam),
fundsReceiver,
abi.encodePacked(fundsReceiver),
keccak256(abi.encodePacked(fundsReceiver)),
nonce
);
......@@ -122,7 +122,7 @@ contract AdminFaucetAuthModuleTest is Test {
assertEq(
adminFam.verify(
Faucet.DripParameters(payable(fundsReceiver), nonce),
abi.encodePacked(fundsReceiver),
keccak256(abi.encodePacked(fundsReceiver)),
proof
),
true
......@@ -142,7 +142,7 @@ contract AdminFaucetAuthModuleTest is Test {
block.chainid,
address(adminFam),
fundsReceiver,
abi.encodePacked(fundsReceiver),
keccak256(abi.encodePacked(fundsReceiver)),
nonce
);
......@@ -150,7 +150,7 @@ contract AdminFaucetAuthModuleTest is Test {
assertEq(
adminFam.verify(
Faucet.DripParameters(payable(fundsReceiver), nonce),
abi.encodePacked(fundsReceiver),
keccak256(abi.encodePacked(fundsReceiver)),
proof
),
false
......@@ -172,7 +172,7 @@ contract AdminFaucetAuthModuleTest is Test {
block.chainid,
address(adminFam),
fundsReceiver,
abi.encodePacked(fundsReceiver),
keccak256(abi.encodePacked(fundsReceiver)),
nonce
);
......@@ -180,7 +180,7 @@ contract AdminFaucetAuthModuleTest is Test {
assertEq(
adminFam.verify(
Faucet.DripParameters(payable(fundsReceiver), nonce),
abi.encodePacked(randomAddress),
keccak256(abi.encodePacked(randomAddress)),
proof
),
false
......
......@@ -9,7 +9,7 @@ import { FaucetHelper } from "../testing/helpers/FaucetHelper.sol";
contract Faucet_Initializer is Test {
event Drip(
string indexed authModule,
bytes indexed userId,
bytes32 indexed userId,
uint256 amount,
address indexed recipient
);
......@@ -103,7 +103,7 @@ contract Faucet_Initializer is Test {
uint256 _eip712Chainid,
address _eip712VerifyingContract,
address recipient,
bytes memory id,
bytes32 id,
bytes32 nonce
) internal view returns (bytes memory) {
AdminFaucetAuthModule.Proof memory proof = AdminFaucetAuthModule.Proof(
......@@ -140,14 +140,18 @@ contract FaucetTest is Faucet_Initializer {
block.chainid,
address(optimistNftFam),
fundsReceiver,
abi.encodePacked(fundsReceiver),
keccak256(abi.encodePacked(fundsReceiver)),
nonce
);
vm.prank(nonAdmin);
faucet.drip(
Faucet.DripParameters(payable(fundsReceiver), nonce),
Faucet.AuthParameters(optimistNftFam, abi.encodePacked(fundsReceiver), signature)
Faucet.AuthParameters(
optimistNftFam,
keccak256(abi.encodePacked(fundsReceiver)),
signature
)
);
}
......@@ -161,7 +165,7 @@ contract FaucetTest is Faucet_Initializer {
block.chainid,
address(optimistNftFam),
fundsReceiver,
abi.encodePacked(fundsReceiver),
keccak256(abi.encodePacked(fundsReceiver)),
nonce
);
......@@ -169,7 +173,11 @@ contract FaucetTest is Faucet_Initializer {
vm.expectRevert("Faucet: drip parameters could not be verified by security module");
faucet.drip(
Faucet.DripParameters(payable(fundsReceiver), nonce),
Faucet.AuthParameters(optimistNftFam, abi.encodePacked(fundsReceiver), signature)
Faucet.AuthParameters(
optimistNftFam,
keccak256(abi.encodePacked(fundsReceiver)),
signature
)
);
}
......@@ -183,7 +191,7 @@ contract FaucetTest is Faucet_Initializer {
block.chainid,
address(optimistNftFam),
fundsReceiver,
abi.encodePacked(fundsReceiver),
keccak256(abi.encodePacked(fundsReceiver)),
nonce
);
......@@ -191,7 +199,11 @@ contract FaucetTest is Faucet_Initializer {
vm.prank(nonAdmin);
faucet.drip(
Faucet.DripParameters(payable(fundsReceiver), nonce),
Faucet.AuthParameters(optimistNftFam, abi.encodePacked(fundsReceiver), signature)
Faucet.AuthParameters(
optimistNftFam,
keccak256(abi.encodePacked(fundsReceiver)),
signature
)
);
uint256 recipientBalanceAfter = address(fundsReceiver).balance;
assertEq(
......@@ -211,7 +223,7 @@ contract FaucetTest is Faucet_Initializer {
block.chainid,
address(githubFam),
fundsReceiver,
abi.encodePacked(fundsReceiver),
keccak256(abi.encodePacked(fundsReceiver)),
nonce
);
......@@ -219,7 +231,7 @@ contract FaucetTest is Faucet_Initializer {
vm.prank(nonAdmin);
faucet.drip(
Faucet.DripParameters(payable(fundsReceiver), nonce),
Faucet.AuthParameters(githubFam, abi.encodePacked(fundsReceiver), signature)
Faucet.AuthParameters(githubFam, keccak256(abi.encodePacked(fundsReceiver)), signature)
);
uint256 recipientBalanceAfter = address(fundsReceiver).balance;
assertEq(
......@@ -239,17 +251,22 @@ contract FaucetTest is Faucet_Initializer {
block.chainid,
address(githubFam),
fundsReceiver,
abi.encodePacked(fundsReceiver),
keccak256(abi.encodePacked(fundsReceiver)),
nonce
);
vm.expectEmit(true, true, true, true, address(faucet));
emit Drip("GithubModule", abi.encodePacked(fundsReceiver), .05 ether, fundsReceiver);
emit Drip(
"GithubModule",
keccak256(abi.encodePacked(fundsReceiver)),
.05 ether,
fundsReceiver
);
vm.prank(nonAdmin);
faucet.drip(
Faucet.DripParameters(payable(fundsReceiver), nonce),
Faucet.AuthParameters(githubFam, abi.encodePacked(fundsReceiver), signature)
Faucet.AuthParameters(githubFam, keccak256(abi.encodePacked(fundsReceiver)), signature)
);
}
......@@ -263,14 +280,14 @@ contract FaucetTest is Faucet_Initializer {
block.chainid,
address(githubFam),
fundsReceiver,
abi.encodePacked(fundsReceiver),
keccak256(abi.encodePacked(fundsReceiver)),
nonce
);
vm.startPrank(faucetContractAdmin);
faucet.drip(
Faucet.DripParameters(payable(fundsReceiver), nonce),
Faucet.AuthParameters(githubFam, abi.encodePacked(fundsReceiver), signature)
Faucet.AuthParameters(githubFam, keccak256(abi.encodePacked(fundsReceiver)), signature)
);
faucet.configure(githubFam, Faucet.ModuleConfig("GithubModule", false, 1 days, .05 ether));
......@@ -278,7 +295,7 @@ contract FaucetTest is Faucet_Initializer {
vm.expectRevert("Faucet: provided auth module is not supported by this faucet");
faucet.drip(
Faucet.DripParameters(payable(fundsReceiver), nonce),
Faucet.AuthParameters(githubFam, abi.encodePacked(fundsReceiver), signature)
Faucet.AuthParameters(githubFam, keccak256(abi.encodePacked(fundsReceiver)), signature)
);
vm.stopPrank();
}
......@@ -293,20 +310,20 @@ contract FaucetTest is Faucet_Initializer {
block.chainid,
address(githubFam),
fundsReceiver,
abi.encodePacked(fundsReceiver),
keccak256(abi.encodePacked(fundsReceiver)),
nonce
);
vm.startPrank(faucetContractAdmin);
faucet.drip(
Faucet.DripParameters(payable(fundsReceiver), nonce),
Faucet.AuthParameters(githubFam, abi.encodePacked(fundsReceiver), signature)
Faucet.AuthParameters(githubFam, keccak256(abi.encodePacked(fundsReceiver)), signature)
);
vm.expectRevert("Faucet: nonce has already been used");
faucet.drip(
Faucet.DripParameters(payable(fundsReceiver), nonce),
Faucet.AuthParameters(githubFam, abi.encodePacked(fundsReceiver), signature)
Faucet.AuthParameters(githubFam, keccak256(abi.encodePacked(fundsReceiver)), signature)
);
vm.stopPrank();
}
......@@ -321,14 +338,14 @@ contract FaucetTest is Faucet_Initializer {
block.chainid,
address(githubFam),
fundsReceiver,
abi.encodePacked(fundsReceiver),
keccak256(abi.encodePacked(fundsReceiver)),
nonce0
);
vm.startPrank(faucetContractAdmin);
faucet.drip(
Faucet.DripParameters(payable(fundsReceiver), nonce0),
Faucet.AuthParameters(githubFam, abi.encodePacked(fundsReceiver), signature0)
Faucet.AuthParameters(githubFam, keccak256(abi.encodePacked(fundsReceiver)), signature0)
);
bytes32 nonce1 = faucetHelper.consumeNonce();
......@@ -339,14 +356,14 @@ contract FaucetTest is Faucet_Initializer {
block.chainid,
address(githubFam),
fundsReceiver,
abi.encodePacked(fundsReceiver),
keccak256(abi.encodePacked(fundsReceiver)),
nonce1
);
vm.expectRevert("Faucet: auth cannot be used yet because timeout has not elapsed");
faucet.drip(
Faucet.DripParameters(payable(fundsReceiver), nonce1),
Faucet.AuthParameters(githubFam, abi.encodePacked(fundsReceiver), signature1)
Faucet.AuthParameters(githubFam, keccak256(abi.encodePacked(fundsReceiver)), signature1)
);
vm.stopPrank();
}
......@@ -361,14 +378,14 @@ contract FaucetTest is Faucet_Initializer {
block.chainid,
address(githubFam),
fundsReceiver,
abi.encodePacked(fundsReceiver),
keccak256(abi.encodePacked(fundsReceiver)),
nonce0
);
vm.startPrank(faucetContractAdmin);
faucet.drip(
Faucet.DripParameters(payable(fundsReceiver), nonce0),
Faucet.AuthParameters(githubFam, abi.encodePacked(fundsReceiver), signature0)
Faucet.AuthParameters(githubFam, keccak256(abi.encodePacked(fundsReceiver)), signature0)
);
bytes32 nonce1 = faucetHelper.consumeNonce();
......@@ -379,14 +396,14 @@ contract FaucetTest is Faucet_Initializer {
block.chainid,
address(githubFam),
fundsReceiver,
abi.encodePacked(fundsReceiver),
keccak256(abi.encodePacked(fundsReceiver)),
nonce1
);
vm.warp(startingTimestamp + 1 days + 1 seconds);
faucet.drip(
Faucet.DripParameters(payable(fundsReceiver), nonce1),
Faucet.AuthParameters(githubFam, abi.encodePacked(fundsReceiver), signature1)
Faucet.AuthParameters(githubFam, keccak256(abi.encodePacked(fundsReceiver)), signature1)
);
vm.stopPrank();
}
......
......@@ -16,7 +16,7 @@ contract FaucetHelper {
* @notice EIP712 typehash for the Proof type.
*/
bytes32 public constant PROOF_TYPEHASH =
keccak256("Proof(address recipient,bytes32 nonce,bytes id)");
keccak256("Proof(address recipient,bytes32 nonce,bytes32 id)");
/**
* @notice EIP712 typehash for the EIP712Domain type that is included as part of the signature.
......
......@@ -31,7 +31,7 @@ contract Faucet {
*/
event Drip(
string indexed authModule,
bytes indexed userId,
bytes32 indexed userId,
uint256 amount,
address indexed recipient
);
......@@ -49,7 +49,7 @@ contract Faucet {
*/
struct AuthParameters {
IFaucetAuthModule module;
bytes id;
bytes32 id;
bytes proof;
}
......@@ -76,12 +76,12 @@ contract Faucet {
/**
* @notice Mapping of authentication IDs to the next timestamp at which they can be used.
*/
mapping(IFaucetAuthModule => mapping(bytes => uint256)) public timeouts;
mapping(IFaucetAuthModule => mapping(bytes32 => uint256)) public timeouts;
/**
* @notice Maps from id to nonces to whether or not they have been used.
*/
mapping(bytes => mapping(bytes32 => bool)) public nonces;
mapping(bytes32 => mapping(bytes32 => bool)) public nonces;
/**
* @notice Modifier that makes a function admin priviledged.
......
......@@ -21,7 +21,7 @@ contract AdminFaucetAuthModule is IFaucetAuthModule, EIP712 {
* @notice EIP712 typehash for the Proof type.
*/
bytes32 public constant PROOF_TYPEHASH =
keccak256("Proof(address recipient,bytes32 nonce,bytes id)");
keccak256("Proof(address recipient,bytes32 nonce,bytes32 id)");
/**
* @notice Struct that represents a proof that verifies the admin.
......@@ -33,7 +33,7 @@ contract AdminFaucetAuthModule is IFaucetAuthModule, EIP712 {
struct Proof {
address recipient;
bytes32 nonce;
bytes id;
bytes32 id;
}
/**
......@@ -54,7 +54,7 @@ contract AdminFaucetAuthModule is IFaucetAuthModule, EIP712 {
*/
function verify(
Faucet.DripParameters memory _params,
bytes memory _id,
bytes32 _id,
bytes memory _proof
) external view returns (bool) {
// Generate a EIP712 typed data hash to compare against the proof.
......
......@@ -17,7 +17,7 @@ interface IFaucetAuthModule {
*/
function verify(
Faucet.DripParameters memory _params,
bytes memory _id,
bytes32 _id,
bytes memory _proof
) external view returns (bool);
}
/* Imports: External */
import { DeployFunction } from 'hardhat-deploy/dist/types'
import { HardhatRuntimeEnvironment } from 'hardhat/types'
import '@nomiclabs/hardhat-ethers'
import '@eth-optimism/hardhat-deploy-config'
import 'hardhat-deploy'
import type { DeployConfig } from '../../src'
const deployFn: DeployFunction = async (hre: HardhatRuntimeEnvironment) => {
const deployConfig = hre.deployConfig as DeployConfig
const { deployer } = await hre.getNamedAccounts()
console.log('Deploying Faucet')
const { deploy } = await hre.deployments.deterministic('Faucet', {
salt: hre.ethers.utils.solidityKeccak256(['string'], ['Faucet']),
from: deployer,
args: [deployConfig.faucetAdmin],
log: true,
})
const result = await deploy()
console.log(`Faucet deployed to ${result.address}`)
}
deployFn.tags = ['Faucet', 'FaucetEnvironment']
export default deployFn
/* Imports: External */
import { DeployFunction } from 'hardhat-deploy/dist/types'
import { HardhatRuntimeEnvironment } from 'hardhat/types'
import '@nomiclabs/hardhat-ethers'
import '@eth-optimism/hardhat-deploy-config'
import 'hardhat-deploy'
import type { DeployConfig } from '../../../src'
const deployFn: DeployFunction = async (hre: HardhatRuntimeEnvironment) => {
const deployConfig = hre.deployConfig as DeployConfig
const { deployer } = await hre.getNamedAccounts()
const { deploy } = await hre.deployments.deterministic(
'AdminFaucetAuthModule',
{
salt: hre.ethers.utils.solidityKeccak256(
['string'],
['AdminFaucetAuthModule']
),
from: deployer,
args: [
deployConfig.githubFamAdmin,
deployConfig.githubFamName,
deployConfig.githubFamVersion,
],
log: true,
}
)
await deploy()
}
deployFn.tags = ['Faucet', 'FaucetEnvironment']
export default deployFn
/* Imports: External */
import { DeployFunction } from 'hardhat-deploy/dist/types'
import { HardhatRuntimeEnvironment } from 'hardhat/types'
import '@nomiclabs/hardhat-ethers'
import '@eth-optimism/hardhat-deploy-config'
import 'hardhat-deploy'
import type { DeployConfig } from '../../../src'
const deployFn: DeployFunction = async (hre: HardhatRuntimeEnvironment) => {
const deployConfig = hre.deployConfig as DeployConfig
const { deployer } = await hre.getNamedAccounts()
const { deploy } = await hre.deployments.deterministic(
'AdminFaucetAuthModule',
{
salt: hre.ethers.utils.solidityKeccak256(
['string'],
['AdminFaucetAuthModule']
),
from: deployer,
args: [
deployConfig.optimistFamAdmin,
deployConfig.optimistFamName,
deployConfig.optimistFamVersion,
],
log: true,
}
)
await deploy()
}
deployFn.tags = ['Faucet', 'FaucetEnvironment']
export default deployFn
......@@ -54,46 +54,6 @@ export interface DeployConfig {
*/
optimistAllowlistCoinbaseQuestAttestor: string
/**
* Address of privileged account for the Faucet contract.
*/
faucetAdmin: string
/**
* Name of Faucet contract.
*/
faucetName: string
/**
* Address of admin account for the Github FaucetAuthModule.
*/
githubFamAdmin: string
/**
* Name of Github FaucetAuthModule contract, used for the EIP712 domain separator.
*/
githubFamName: string
/**
* Version of Github FaucetAuthModule contract, used for the EIP712 domain separator.
*/
githubFamVersion: string
/**
* Address of admin account for Optimist FaucetAuthModule.
*/
optimistFamAdmin: string
/**
* Name of Optimist FaucetAuthModule contract, used for the EIP712 domain separator.
*/
optimistFamName: string
/**
* Version of Optimist FaucetAuthModule contract, used for the EIP712 domain separator.
*/
optimistFamVersion: string
/**
* Address of the owner of the proxies on L2. There will be a ProxyAdmin deployed as a predeploy
* after bedrock, so the owner of proxies should be updated to that after the upgrade.
......@@ -138,30 +98,7 @@ export const configSpec: DeployConfigSpec<DeployConfig> = {
optimistAllowlistCoinbaseQuestAttestor: {
type: 'address',
},
faucetAdmin: {
type: 'address',
},
faucetName: {
type: 'string',
},
githubFamAdmin: {
type: 'address',
},
githubFamName: {
type: 'string',
},
githubFamVersion: {
type: 'string',
},
optimistFamAdmin: {
type: 'address',
},
optimistFamName: {
type: 'string',
},
optimistFamVersion: {
type: 'string',
},
l2ProxyOwnerAddress: {
type: 'address',
},
......
import { Contract, BigNumber } from 'ethers'
import { Logger } from '@eth-optimism/common-ts'
export interface OutputOracle<TSubmissionEventArgs> {
contract: Contract
......@@ -39,7 +40,7 @@ const getCache = (
} => {
if (!caches[address]) {
caches[address] = {
highestBlock: 0,
highestBlock: -1,
eventCache: new Map(),
}
}
......@@ -54,15 +55,28 @@ const getCache = (
* @param filter Event filter to use.
*/
export const updateOracleCache = async <TSubmissionEventArgs>(
oracle: OutputOracle<TSubmissionEventArgs>
oracle: OutputOracle<TSubmissionEventArgs>,
logger?: Logger
): Promise<void> => {
const cache = getCache(oracle.contract.address)
let currentBlock = cache.highestBlock
const endingBlock = await oracle.contract.provider.getBlockNumber()
let step = endingBlock - currentBlock
const endBlock = await oracle.contract.provider.getBlockNumber()
logger?.info('visiting uncached oracle events for range', {
node: 'l1',
cachedUntilBlock: cache.highestBlock,
latestBlock: endBlock,
})
let failures = 0
while (currentBlock < endingBlock) {
let currentBlock = cache.highestBlock + 1
let step = endBlock - currentBlock
while (currentBlock < endBlock) {
try {
logger?.info('polling events for range', {
node: 'l1',
startBlock: currentBlock,
blockRangeSize: step,
})
const events = await oracle.contract.queryFilter(
oracle.filter,
currentBlock,
......@@ -83,7 +97,13 @@ export const updateOracleCache = async <TSubmissionEventArgs>(
// Update the current block and increase the step size for the next iteration.
currentBlock += step
step = Math.ceil(step * 2)
} catch {
} catch (err) {
logger?.error('error fetching events', {
err,
node: 'l1',
section: 'getLogs',
})
// Might happen if we're querying too large an event range.
step = Math.floor(step / 2)
......@@ -97,13 +117,15 @@ export const updateOracleCache = async <TSubmissionEventArgs>(
// We've failed 3 times in a row, we're probably stuck.
if (failures >= 3) {
logger?.fatal('unable to fetch oracle events', { err })
throw new Error('failed to update event cache')
}
}
}
// Update the highest block.
cache.highestBlock = endingBlock
cache.highestBlock = endBlock
logger?.info('done caching oracle events')
}
/**
......@@ -115,7 +137,8 @@ export const updateOracleCache = async <TSubmissionEventArgs>(
*/
export const findEventForStateBatch = async <TSubmissionEventArgs>(
oracle: OutputOracle<TSubmissionEventArgs>,
index: number
index: number,
logger?: Logger
): Promise<PartialEvent> => {
const cache = getCache(oracle.contract.address)
......@@ -125,10 +148,12 @@ export const findEventForStateBatch = async <TSubmissionEventArgs>(
}
// Update the event cache if we don't have the event.
await updateOracleCache(oracle)
logger?.info('event not cached for index. warming cache...', { index })
await updateOracleCache(oracle, logger)
// Event better be in cache now!
if (cache.eventCache[index] === undefined) {
logger?.fatal('expected event for index!', { index })
throw new Error(`unable to find event for batch ${index}`)
}
......@@ -143,7 +168,8 @@ export const findEventForStateBatch = async <TSubmissionEventArgs>(
*/
export const findFirstUnfinalizedStateBatchIndex = async <TSubmissionEventArgs>(
oracle: OutputOracle<TSubmissionEventArgs>,
fpw: number
fpw: number,
logger?: Logger
): Promise<number> => {
const latestBlock = await oracle.contract.provider.getBlock('latest')
const totalBatches = (await oracle.getTotalElements()).toNumber()
......@@ -153,7 +179,7 @@ export const findFirstUnfinalizedStateBatchIndex = async <TSubmissionEventArgs>(
let hi = totalBatches
while (lo !== hi) {
const mid = Math.floor((lo + hi) / 2)
const event = await findEventForStateBatch(oracle, mid)
const event = await findEventForStateBatch(oracle, mid, logger)
const block = await oracle.contract.provider.getBlock(event.blockNumber)
if (block.timestamp + fpw < latestBlock.timestamp) {
......
......@@ -72,7 +72,7 @@ export class FaultDetector extends BaseServiceV2<Options, Metrics, State> {
startBatchIndex: {
validator: validators.num,
default: -1,
desc: 'Batch index to start checking from',
desc: 'Batch index to start checking from. For bedrock chains, this is the L2 height to start from',
public: true,
},
bedrock: {
......@@ -219,6 +219,8 @@ export class FaultDetector extends BaseServiceV2<Options, Metrics, State> {
// We use this a lot, a bit cleaner to pull out to the top level of the state object.
this.state.fpw = await this.state.messenger.getChallengePeriodSeconds()
this.logger.info(`fault proof window is ${this.state.fpw} seconds`)
if (this.options.bedrock) {
const oo = this.state.messenger.contracts.l1.L2OutputOracle
this.state.oo = {
......@@ -238,25 +240,25 @@ export class FaultDetector extends BaseServiceV2<Options, Metrics, State> {
}
// Populate the event cache.
this.logger.info(`warming event cache, this might take a while...`)
await updateOracleCache(this.state.oo)
this.logger.info('warming event cache, this might take a while...')
await updateOracleCache(this.state.oo, this.logger)
// Figure out where to start syncing from.
if (this.options.startBatchIndex === -1) {
this.logger.info(`finding appropriate starting height`)
this.logger.info('finding appropriate starting unfinalized batch')
const firstUnfinalized = await findFirstUnfinalizedStateBatchIndex(
this.state.oo,
this.state.fpw
this.state.fpw,
this.logger
)
// We may not have an unfinalized batches in the case where no batches have been submitted
// for the entire duration of the FPW. We generally do not expect this to happen on mainnet,
// but it happens often on testnets because the FPW is very short.
if (firstUnfinalized === undefined) {
this.logger.info(`no unfinalized batches found, starting from latest`)
this.state.currentBatchIndex = (
await this.state.oo.getTotalElements()
).toNumber()
this.logger.info('no unfinalized batches found. skipping all batches.')
const totalBatches = await this.state.oo.getTotalElements()
this.state.currentBatchIndex = totalBatches.toNumber() - 1
} else {
this.state.currentBatchIndex = firstUnfinalized
}
......@@ -264,8 +266,8 @@ export class FaultDetector extends BaseServiceV2<Options, Metrics, State> {
this.state.currentBatchIndex = this.options.startBatchIndex
}
this.logger.info('starting height', {
startBatchIndex: this.state.currentBatchIndex,
this.logger.info('starting batch', {
batchIndex: this.state.currentBatchIndex,
})
// Set the initial metrics.
......@@ -285,49 +287,50 @@ export class FaultDetector extends BaseServiceV2<Options, Metrics, State> {
let latestBatchIndex: number
try {
latestBatchIndex = (await this.state.oo.getTotalElements()).toNumber()
const totalBatches = await this.state.oo.getTotalElements()
latestBatchIndex = totalBatches.toNumber() - 1
} catch (err) {
this.logger.error(`got error when connecting to node`, {
this.logger.error('failed to query total # of batches', {
error: err,
node: 'l1',
section: 'getTotalBatches',
section: 'getTotalElements',
})
this.metrics.nodeConnectionFailures.inc({
layer: 'l1',
section: 'getTotalBatches',
section: 'getTotalElements',
})
await sleep(15000)
return
}
if (this.state.currentBatchIndex >= latestBatchIndex) {
if (this.state.currentBatchIndex > latestBatchIndex) {
this.logger.info('batch index is ahead of the oracle. waiting...', {
batchIndex: this.state.currentBatchIndex,
latestBatchIndex,
})
await sleep(15000)
return
} else {
this.metrics.highestBatchIndex.set(
{
type: 'known',
},
latestBatchIndex
)
}
this.logger.info(`checking batch`, {
this.metrics.highestBatchIndex.set({ type: 'known' }, latestBatchIndex)
this.logger.info('checking batch', {
batchIndex: this.state.currentBatchIndex,
latestIndex: latestBatchIndex,
latestBatchIndex,
})
let event: PartialEvent
try {
event = await findEventForStateBatch(
this.state.oo,
this.state.currentBatchIndex
this.state.currentBatchIndex,
this.logger
)
} catch (err) {
this.logger.error(`got error when connecting to node`, {
this.logger.error('failed to fetch event associated with batch', {
error: err,
node: 'l1',
section: 'findEventForStateBatch',
batchIndex: this.state.currentBatchIndex,
})
this.metrics.nodeConnectionFailures.inc({
layer: 'l1',
......@@ -341,7 +344,7 @@ export class FaultDetector extends BaseServiceV2<Options, Metrics, State> {
try {
latestBlock = await this.options.l2RpcProvider.getBlockNumber()
} catch (err) {
this.logger.error(`got error when connecting to node`, {
this.logger.error('failed to query L2 block height', {
error: err,
node: 'l2',
section: 'getBlockNumber',
......@@ -355,27 +358,29 @@ export class FaultDetector extends BaseServiceV2<Options, Metrics, State> {
}
if (this.options.bedrock) {
if (latestBlock < event.args.l2BlockNumber.toNumber()) {
this.logger.info(`node is behind, waiting for sync`, {
batchEnd: event.args.l2BlockNumber.toNumber(),
latestBlock,
const outputBlockNumber = event.args.l2BlockNumber.toNumber()
if (latestBlock < outputBlockNumber) {
this.logger.info('L2 node is behind, waiting for sync...', {
l2BlockHeight: latestBlock,
outputBlock: outputBlockNumber,
})
return
}
let targetBlock: any
let outputBlock: any
try {
targetBlock = await (
outputBlock = await (
this.options.l2RpcProvider as ethers.providers.JsonRpcProvider
).send('eth_getBlockByNumber', [
toRpcHexString(event.args.l2BlockNumber.toNumber()),
toRpcHexString(outputBlockNumber),
false,
])
} catch (err) {
this.logger.error(`got error when connecting to node`, {
this.logger.error('failed to fetch output block', {
error: err,
node: 'l2',
section: 'getBlock',
block: outputBlockNumber,
})
this.metrics.nodeConnectionFailures.inc({
layer: 'l2',
......@@ -392,13 +397,14 @@ export class FaultDetector extends BaseServiceV2<Options, Metrics, State> {
).send('eth_getProof', [
this.state.messenger.contracts.l2.BedrockMessagePasser.address,
[],
toRpcHexString(event.args.l2BlockNumber.toNumber()),
toRpcHexString(outputBlockNumber),
])
} catch (err) {
this.logger.error(`got error when connecting to node`, {
this.logger.error('failed to fetch message passer proof', {
error: err,
node: 'l2',
section: 'getProof',
block: outputBlockNumber,
})
this.metrics.nodeConnectionFailures.inc({
layer: 'l2',
......@@ -412,22 +418,22 @@ export class FaultDetector extends BaseServiceV2<Options, Metrics, State> {
['uint256', 'bytes32', 'bytes32', 'bytes32'],
[
0,
targetBlock.stateRoot,
outputBlock.stateRoot,
messagePasserProofResponse.storageHash,
targetBlock.hash,
outputBlock.hash,
]
)
if (outputRoot !== event.args.outputRoot) {
this.state.diverged = true
this.metrics.isCurrentlyMismatched.set(1)
this.logger.error(`state root mismatch`, {
blockNumber: targetBlock.number,
this.logger.error('state root mismatch', {
blockNumber: outputBlock.number,
expectedStateRoot: event.args.outputRoot,
actualStateRoot: outputRoot,
finalizationTime: dateformat(
new Date(
(ethers.BigNumber.from(targetBlock.timestamp).toNumber() +
(ethers.BigNumber.from(outputBlock.timestamp).toNumber() +
this.state.fpw) *
1000
),
......@@ -443,7 +449,7 @@ export class FaultDetector extends BaseServiceV2<Options, Metrics, State> {
event.transactionHash
)
} catch (err) {
this.logger.error(`got error when connecting to node`, {
this.logger.error('failed to acquire batch transaction', {
error: err,
node: 'l1',
section: 'getTransaction',
......@@ -466,9 +472,10 @@ export class FaultDetector extends BaseServiceV2<Options, Metrics, State> {
const batchEnd = batchStart + batchSize
if (latestBlock < batchEnd) {
this.logger.info(`node is behind, waiting for sync`, {
batchEnd,
latestBlock,
this.logger.info('L2 node is behind. waiting for sync...', {
batchBlockStart: batchStart,
batchBlockEnd: batchEnd,
l2BlockHeight: latestBlock,
})
return
}
......@@ -487,7 +494,7 @@ export class FaultDetector extends BaseServiceV2<Options, Metrics, State> {
false,
])
} catch (err) {
this.logger.error(`got error when connecting to node`, {
this.logger.error('failed to query for blocks in batch', {
error: err,
node: 'l2',
section: 'getBlockRange',
......@@ -507,7 +514,7 @@ export class FaultDetector extends BaseServiceV2<Options, Metrics, State> {
if (blocks[i].stateRoot !== stateRoot) {
this.state.diverged = true
this.metrics.isCurrentlyMismatched.set(1)
this.logger.error(`state root mismatch`, {
this.logger.error('state root mismatch', {
blockNumber: blocks[i].number,
expectedStateRoot: blocks[i].stateRoot,
actualStateRoot: stateRoot,
......@@ -533,9 +540,7 @@ export class FaultDetector extends BaseServiceV2<Options, Metrics, State> {
timeMs: elapsedMs,
})
this.metrics.highestBatchIndex.set(
{
type: 'checked',
},
{ type: 'checked' },
this.state.currentBatchIndex
)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment