Commit a8134283 authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge branch 'develop' into feat/doc-metrics

parents 23db9514 c1d44d70
---
'@eth-optimism/contracts-bedrock': patch
---
Added a test for large deposit gaps
...@@ -8,7 +8,7 @@ There are plenty of ways to contribute, in particular we appreciate support in t ...@@ -8,7 +8,7 @@ There are plenty of ways to contribute, in particular we appreciate support in t
- Fixing and responding to existing issues. You can start off with those tagged ["good first issue"](https://github.com/ethereum-optimism/optimism/contribute) which are meant as introductory issues for external contributors. - Fixing and responding to existing issues. You can start off with those tagged ["good first issue"](https://github.com/ethereum-optimism/optimism/contribute) which are meant as introductory issues for external contributors.
- Improving the [community site](https://community.optimism.io/), [documentation](https://github.com/ethereum-optimism/community-hub) and [tutorials](https://github.com/ethereum-optimism/optimism-tutorial). - Improving the [community site](https://community.optimism.io/), [documentation](https://github.com/ethereum-optimism/community-hub) and [tutorials](https://github.com/ethereum-optimism/optimism-tutorial).
- Become an "Optimizer" and answer questions in the [Optimism Discord](https://discord.optimism.io). - Become an "Optimizer" and answer questions in the [Optimism Discord](https://discord.optimism.io).
- Get involved in the protocol design process by proposing changes or new features or write parts of the spec yourself in the [optimistic-specs repo](https://github.com/ethereum-optimism/optimistic-specs). - Get involved in the protocol design process by proposing changes or new features or write parts of the spec yourself in the [specs subdirectory](./specs/).
Note that we have a [Code of Conduct](https://github.com/ethereum-optimism/.github/blob/master/CODE_OF_CONDUCT.md), please follow it in all your interactions with the project. Note that we have a [Code of Conduct](https://github.com/ethereum-optimism/.github/blob/master/CODE_OF_CONDUCT.md), please follow it in all your interactions with the project.
......
...@@ -15,7 +15,7 @@ import ( ...@@ -15,7 +15,7 @@ import (
var ( var (
// UnsafeBlockSignerAddressSystemConfigStorageSlot is the storage slot identifier of the unsafeBlockSigner // UnsafeBlockSignerAddressSystemConfigStorageSlot is the storage slot identifier of the unsafeBlockSigner
// `address` storage value in the SystemConfig L1 contract. // `address` storage value in the SystemConfig L1 contract. Computed as `keccak256("systemconfig.unsafeblocksigner")`
UnsafeBlockSignerAddressSystemConfigStorageSlot = common.HexToHash("0x65a7ed542fb37fe237fdfbdd70b31598523fe5b32879e307bae27a0bd9581c08") UnsafeBlockSignerAddressSystemConfigStorageSlot = common.HexToHash("0x65a7ed542fb37fe237fdfbdd70b31598523fe5b32879e307bae27a0bd9581c08")
) )
......
...@@ -27,6 +27,17 @@ var ( ...@@ -27,6 +27,17 @@ var (
ConfigUpdateEventVersion0 = common.Hash{} ConfigUpdateEventVersion0 = common.Hash{}
) )
var (
// A left-padded uint256 equal to 32.
oneWordUint = common.Hash{31: 32}
// A left-padded uint256 equal to 64.
twoWordUint = common.Hash{31: 64}
// 24 zero bytes (the padding for a uint64 in a 32 byte word)
uint64Padding = make([]byte, 24)
// 12 zero bytes (the padding for an Ethereum address in a 32 byte word)
addressPadding = make([]byte, 12)
)
// UpdateSystemConfigWithL1Receipts filters all L1 receipts to find config updates and applies the config updates to the given sysCfg // UpdateSystemConfigWithL1Receipts filters all L1 receipts to find config updates and applies the config updates to the given sysCfg
func UpdateSystemConfigWithL1Receipts(sysCfg *eth.SystemConfig, receipts []*types.Receipt, cfg *rollup.Config) error { func UpdateSystemConfigWithL1Receipts(sysCfg *eth.SystemConfig, receipts []*types.Receipt, cfg *rollup.Config) error {
var result error var result error
...@@ -69,50 +80,94 @@ func ProcessSystemConfigUpdateLogEvent(destSysCfg *eth.SystemConfig, ev *types.L ...@@ -69,50 +80,94 @@ func ProcessSystemConfigUpdateLogEvent(destSysCfg *eth.SystemConfig, ev *types.L
} }
// indexed 1 // indexed 1
updateType := ev.Topics[2] updateType := ev.Topics[2]
// unindexed data
// Create a reader of the unindexed data
reader := bytes.NewReader(ev.Data)
// Counter for the number of bytes read from `reader` via `readWord`
countReadBytes := 0
// Helper function to read a word from the log data reader
readWord := func() (b [32]byte) {
if _, err := reader.Read(b[:]); err != nil {
// If there is an error reading the next 32 bytes from the reader, return an empty
// 32 byte array. We always check that the number of bytes read (`countReadBytes`)
// is equal to the expected amount at the end of each switch case.
return b
}
countReadBytes += 32
return b
}
// Attempt to read unindexed data
switch updateType { switch updateType {
case SystemConfigUpdateBatcher: case SystemConfigUpdateBatcher:
if len(ev.Data) != 32*3 { // Read the pointer, it should always equal 32.
return fmt.Errorf("expected 32*3 bytes in batcher hash update, but got %d bytes", len(ev.Data)) if word := readWord(); word != oneWordUint {
return fmt.Errorf("expected offset to point to length location, but got %s", word)
} }
if x := common.BytesToHash(ev.Data[:32]); x != (common.Hash{31: 32}) {
return fmt.Errorf("expected offset to point to length location, but got %s", x) // Read the length, it should also always equal 32.
if word := readWord(); word != oneWordUint {
return fmt.Errorf("expected length to be 32 bytes, but got %s", word)
} }
if x := common.BytesToHash(ev.Data[32:64]); x != (common.Hash{31: 32}) {
return fmt.Errorf("expected length of 1 bytes32, but got %s", x) // Indexing `word` directly is always safe here, it is guaranteed to be 32 bytes in length.
// Check that the batcher address is correctly zero-padded.
word := readWord()
if !bytes.Equal(word[:12], addressPadding) {
return fmt.Errorf("expected version 0 batcher hash with zero padding, but got %x", word)
} }
if !bytes.Equal(ev.Data[64:64+12], make([]byte, 12)) { destSysCfg.BatcherAddr.SetBytes(word[12:])
return fmt.Errorf("expected version 0 batcher hash with zero padding, but got %x", ev.Data)
if countReadBytes != 32*3 {
return NewCriticalError(fmt.Errorf("expected 32*3 bytes in batcher hash update, but got %d bytes", len(ev.Data)))
} }
destSysCfg.BatcherAddr.SetBytes(ev.Data[64+12:])
return nil return nil
case SystemConfigUpdateGasConfig: // left padded uint8 case SystemConfigUpdateGasConfig:
if len(ev.Data) != 32*4 { // Read the pointer, it should always equal 32.
return fmt.Errorf("expected 32*4 bytes in GPO params update data, but got %d", len(ev.Data)) if word := readWord(); word != oneWordUint {
return fmt.Errorf("expected offset to point to length location, but got %s", word)
} }
if x := common.BytesToHash(ev.Data[:32]); x != (common.Hash{31: 32}) {
return fmt.Errorf("expected offset to point to length location, but got %s", x) // Read the length, it should always equal 64.
if word := readWord(); word != twoWordUint {
return fmt.Errorf("expected length to be 64 bytes, but got %s", word)
} }
if x := common.BytesToHash(ev.Data[32:64]); x != (common.Hash{31: 64}) {
return fmt.Errorf("expected length of 2 bytes32, but got %s", x) // Set the system config's overhead and scalar values to the values read from the log
destSysCfg.Overhead = readWord()
destSysCfg.Scalar = readWord()
if countReadBytes != 32*4 {
return NewCriticalError(fmt.Errorf("expected 32*4 bytes in GPO params update data, but got %d", len(ev.Data)))
} }
copy(destSysCfg.Overhead[:], ev.Data[64:96])
copy(destSysCfg.Scalar[:], ev.Data[96:128])
return nil return nil
case SystemConfigUpdateGasLimit: case SystemConfigUpdateGasLimit:
if len(ev.Data) != 32*3 { // Read the pointer, it should always equal 32.
return fmt.Errorf("expected 32*3 bytes in gas limit update, but got %d bytes", len(ev.Data)) if word := readWord(); word != oneWordUint {
return fmt.Errorf("expected offset to point to length location, but got %s", word)
} }
if x := common.BytesToHash(ev.Data[:32]); x != (common.Hash{31: 32}) {
return fmt.Errorf("expected offset to point to length location, but got %s", x) // Read the length, it should also always equal 32.
if word := readWord(); word != oneWordUint {
return fmt.Errorf("expected length to be 32 bytes, but got %s", word)
} }
if x := common.BytesToHash(ev.Data[32:64]); x != (common.Hash{31: 32}) {
return fmt.Errorf("expected length of 1 bytes32, but got %s", x) // Indexing `word` directly is always safe here, it is guaranteed to be 32 bytes in length.
// Check that the gas limit is correctly zero-padded.
word := readWord()
if !bytes.Equal(word[:24], uint64Padding) {
return fmt.Errorf("expected zero padding for gaslimit, but got %x", word)
} }
if !bytes.Equal(ev.Data[64:64+24], make([]byte, 24)) { destSysCfg.GasLimit = binary.BigEndian.Uint64(word[24:])
return fmt.Errorf("expected zero padding for gaslimit, but got %x", ev.Data)
if countReadBytes != 32*3 {
return NewCriticalError(fmt.Errorf("expected 32*3 bytes in gas limit update, but got %d bytes", len(ev.Data)))
} }
destSysCfg.GasLimit = binary.BigEndian.Uint64(ev.Data[64+24:])
return nil return nil
case SystemConfigUpdateUnsafeBlockSigner: case SystemConfigUpdateUnsafeBlockSigner:
// Ignored in derivation. This configurable applies to runtime configuration outside of the derivation. // Ignored in derivation. This configurable applies to runtime configuration outside of the derivation.
......
# @eth-optimism/ci-builder # @eth-optimism/ci-builder
## 0.4.0
### Minor Changes
- 05cc935b2: Bump foundry to 2ff99025abade470a795724c10648c800a41025e
## 0.3.8 ## 0.3.8
### Patch Changes ### Patch Changes
......
...@@ -16,7 +16,7 @@ WORKDIR /opt/foundry ...@@ -16,7 +16,7 @@ WORKDIR /opt/foundry
# Only diff from upstream docker image is this clone instead # Only diff from upstream docker image is this clone instead
# of COPY. We select a specific commit to use. # of COPY. We select a specific commit to use.
RUN git clone https://github.com/foundry-rs/foundry.git . \ RUN git clone https://github.com/foundry-rs/foundry.git . \
&& git checkout c06b53287dc23c4e5b1b3e57c937a90114bbe166 && git checkout 2ff99025abade470a795724c10648c800a41025e
RUN source $HOME/.profile && \ RUN source $HOME/.profile && \
cargo build --release && \ cargo build --release && \
......
{ {
"name": "@eth-optimism/ci-builder", "name": "@eth-optimism/ci-builder",
"version": "0.3.8", "version": "0.4.0",
"scripts": {}, "scripts": {},
"license": "MIT", "license": "MIT",
"dependencies": {} "dependencies": {}
......
# @eth-optimism/foundry # @eth-optimism/foundry
## 0.2.0
### Minor Changes
- 05cc935b2: Bump foundry to 2ff99025abade470a795724c10648c800a41025e
## 0.1.3 ## 0.1.3
### Patch Changes ### Patch Changes
......
...@@ -9,7 +9,7 @@ WORKDIR /opt/foundry ...@@ -9,7 +9,7 @@ WORKDIR /opt/foundry
# Only diff from upstream docker image is this clone instead # Only diff from upstream docker image is this clone instead
# of COPY. We select a specific commit to use. # of COPY. We select a specific commit to use.
RUN git clone https://github.com/foundry-rs/foundry.git . \ RUN git clone https://github.com/foundry-rs/foundry.git . \
&& git checkout f540aa9ebde88dce720140b332412089c2ee85b6 && git checkout 2ff99025abade470a795724c10648c800a41025e
RUN source $HOME/.profile && cargo build --release \ RUN source $HOME/.profile && cargo build --release \
&& strip /opt/foundry/target/release/forge \ && strip /opt/foundry/target/release/forge \
......
{ {
"name": "@eth-optimism/foundry", "name": "@eth-optimism/foundry",
"version": "0.1.3", "version": "0.2.0",
"scripts": {}, "scripts": {},
"license": "MIT", "license": "MIT",
"dependencies": {} "dependencies": {}
......
...@@ -368,14 +368,14 @@ RLPWriter_Test:test_writeUint_smallint3_succeeds() (gas: 7311) ...@@ -368,14 +368,14 @@ RLPWriter_Test:test_writeUint_smallint3_succeeds() (gas: 7311)
RLPWriter_Test:test_writeUint_smallint4_succeeds() (gas: 7312) RLPWriter_Test:test_writeUint_smallint4_succeeds() (gas: 7312)
RLPWriter_Test:test_writeUint_smallint_succeeds() (gas: 7290) RLPWriter_Test:test_writeUint_smallint_succeeds() (gas: 7290)
RLPWriter_Test:test_writeUint_zero_succeeds() (gas: 7802) RLPWriter_Test:test_writeUint_zero_succeeds() (gas: 7802)
ResourceMetering_Test:test_meter_initialResourceParams_succeeds() (gas: 8965) ResourceMetering_Test:test_meter_initialResourceParams_succeeds() (gas: 8983)
ResourceMetering_Test:test_meter_updateNoGasDelta_succeeds() (gas: 2008101) ResourceMetering_Test:test_meter_updateNoGasDelta_succeeds() (gas: 2008119)
ResourceMetering_Test:test_meter_updateOneEmptyBlock_succeeds() (gas: 18152) ResourceMetering_Test:test_meter_updateOneEmptyBlock_succeeds() (gas: 18148)
ResourceMetering_Test:test_meter_updateParamsNoChange_succeeds() (gas: 13911) ResourceMetering_Test:test_meter_updateParamsNoChange_succeeds() (gas: 13859)
ResourceMetering_Test:test_meter_updateTenEmptyBlocks_succeeds() (gas: 20900) ResourceMetering_Test:test_meter_updateTenEmptyBlocks_succeeds() (gas: 20918)
ResourceMetering_Test:test_meter_updateTwoEmptyBlocks_succeeds() (gas: 20923) ResourceMetering_Test:test_meter_updateTwoEmptyBlocks_succeeds() (gas: 20941)
ResourceMetering_Test:test_meter_useMax_succeeds() (gas: 8017204) ResourceMetering_Test:test_meter_useMax_succeeds() (gas: 8017151)
ResourceMetering_Test:test_meter_useMoreThanMax_reverts() (gas: 16023) ResourceMetering_Test:test_meter_useMoreThanMax_reverts() (gas: 16045)
Semver_Test:test_behindProxy_succeeds() (gas: 506725) Semver_Test:test_behindProxy_succeeds() (gas: 506725)
Semver_Test:test_version_succeeds() (gas: 9396) Semver_Test:test_version_succeeds() (gas: 9396)
SequencerFeeVault_Test:test_constructor_succeeds() (gas: 5504) SequencerFeeVault_Test:test_constructor_succeeds() (gas: 5504)
......
...@@ -57,7 +57,7 @@ We work on this repository with a combination of [Hardhat](https://hardhat.org) ...@@ -57,7 +57,7 @@ We work on this repository with a combination of [Hardhat](https://hardhat.org)
1. Install Foundry by following [the instructions located here](https://getfoundry.sh/). 1. Install Foundry by following [the instructions located here](https://getfoundry.sh/).
A specific version must be used. A specific version must be used.
```shell ```shell
foundryup -C c06b53287dc23c4e5b1b3e57c937a90114bbe166 foundryup -C 2ff99025abade470a795724c10648c800a41025e
``` ```
2. Install node modules with yarn (v1) and Node.js (16+): 2. Install node modules with yarn (v1) and Node.js (16+):
......
...@@ -188,6 +188,23 @@ contract SystemDictator is OwnableUpgradeable { ...@@ -188,6 +188,23 @@ contract SystemDictator is OwnableUpgradeable {
config.proxyAddressConfig.l1StandardBridgeProxy, config.proxyAddressConfig.l1StandardBridgeProxy,
ProxyAdmin.ProxyType.CHUGSPLASH ProxyAdmin.ProxyType.CHUGSPLASH
); );
// Upgrade and initialize the SystemConfig so the Sequencer can start up.
config.globalConfig.proxyAdmin.upgradeAndCall(
payable(config.proxyAddressConfig.systemConfigProxy),
address(config.implementationAddressConfig.systemConfigImpl),
abi.encodeCall(
SystemConfig.initialize,
(
config.systemConfigConfig.owner,
config.systemConfigConfig.overhead,
config.systemConfigConfig.scalar,
config.systemConfigConfig.batcherHash,
config.systemConfigConfig.gasLimit,
config.systemConfigConfig.unsafeBlockSigner
)
)
);
} }
/** /**
...@@ -343,23 +360,6 @@ contract SystemDictator is OwnableUpgradeable { ...@@ -343,23 +360,6 @@ contract SystemDictator is OwnableUpgradeable {
address(config.implementationAddressConfig.l1ERC721BridgeImpl) address(config.implementationAddressConfig.l1ERC721BridgeImpl)
); );
// Upgrade and initialize the SystemConfig.
config.globalConfig.proxyAdmin.upgradeAndCall(
payable(config.proxyAddressConfig.systemConfigProxy),
address(config.implementationAddressConfig.systemConfigImpl),
abi.encodeCall(
SystemConfig.initialize,
(
config.systemConfigConfig.owner,
config.systemConfigConfig.overhead,
config.systemConfigConfig.scalar,
config.systemConfigConfig.batcherHash,
config.systemConfigConfig.gasLimit,
config.systemConfigConfig.unsafeBlockSigner
)
)
);
// Pause the L1CrossDomainMessenger, chance to check that everything is OK. // Pause the L1CrossDomainMessenger, chance to check that everything is OK.
L1CrossDomainMessenger(config.proxyAddressConfig.l1CrossDomainMessengerProxy).pause(); L1CrossDomainMessenger(config.proxyAddressConfig.l1CrossDomainMessengerProxy).pause();
} }
......
...@@ -108,4 +108,18 @@ contract ResourceMetering_Test is CommonTest { ...@@ -108,4 +108,18 @@ contract ResourceMetering_Test is CommonTest {
vm.expectRevert("ResourceMetering: cannot buy more gas than available gas limit"); vm.expectRevert("ResourceMetering: cannot buy more gas than available gas limit");
meter.use(target * elasticity + 1); meter.use(target * elasticity + 1);
} }
// Demonstrates that the resource metering arithmetic can tolerate very large gaps between
// deposits.
function testFuzz_meter_largeBlockDiff_succeeds(uint64 _amount, uint256 _blockDiff) external {
// This test fails if the following line is commented out.
// At 12 seconds per block, this number is effectively unreachable.
vm.assume(_blockDiff < 433576281058164217753225238677900874458691);
uint64 target = uint64(uint256(meter.TARGET_RESOURCE_LIMIT()));
uint64 elasticity = uint64(uint256(meter.ELASTICITY_MULTIPLIER()));
vm.assume(_amount < target * elasticity);
vm.roll(initialBlockNum + _blockDiff);
meter.use(_amount);
}
} }
...@@ -28,6 +28,7 @@ const deployFn: DeployFunction = async (hre) => { ...@@ -28,6 +28,7 @@ const deployFn: DeployFunction = async (hre) => {
L1StandardBridgeProxyWithSigner, L1StandardBridgeProxyWithSigner,
L1ERC721BridgeProxy, L1ERC721BridgeProxy,
L1ERC721BridgeProxyWithSigner, L1ERC721BridgeProxyWithSigner,
SystemConfigProxy,
] = await getContractsFromArtifacts(hre, [ ] = await getContractsFromArtifacts(hre, [
{ {
name: 'SystemDictatorProxy', name: 'SystemDictatorProxy',
...@@ -61,6 +62,11 @@ const deployFn: DeployFunction = async (hre) => { ...@@ -61,6 +62,11 @@ const deployFn: DeployFunction = async (hre) => {
name: 'L1ERC721BridgeProxy', name: 'L1ERC721BridgeProxy',
signerOrProvider: deployer, signerOrProvider: deployer,
}, },
{
name: 'SystemConfigProxy',
iface: 'SystemConfig',
signerOrProvider: deployer,
},
]) ])
// If we have the key for the controller then we don't need to wait for external txns. // If we have the key for the controller then we don't need to wait for external txns.
...@@ -251,6 +257,36 @@ const deployFn: DeployFunction = async (hre) => { ...@@ -251,6 +257,36 @@ const deployFn: DeployFunction = async (hre) => {
getDeploymentAddress(hre, 'Proxy__OVM_L1StandardBridge') getDeploymentAddress(hre, 'Proxy__OVM_L1StandardBridge')
)) === 1 )) === 1
) )
// Check the SystemConfig was initialized properly.
await assertContractVariable(
SystemConfigProxy,
'owner',
hre.deployConfig.finalSystemOwner
)
await assertContractVariable(
SystemConfigProxy,
'overhead',
hre.deployConfig.gasPriceOracleOverhead
)
await assertContractVariable(
SystemConfigProxy,
'scalar',
hre.deployConfig.gasPriceOracleScalar
)
await assertContractVariable(
SystemConfigProxy,
'batcherHash',
ethers.utils.hexZeroPad(
hre.deployConfig.batchSenderAddress.toLowerCase(),
32
)
)
await assertContractVariable(
SystemConfigProxy,
'gasLimit',
hre.deployConfig.l2GenesisBlockGasLimit
)
}, },
}) })
......
...@@ -30,7 +30,6 @@ const deployFn: DeployFunction = async (hre) => { ...@@ -30,7 +30,6 @@ const deployFn: DeployFunction = async (hre) => {
OptimismPortal, OptimismPortal,
OptimismMintableERC20Factory, OptimismMintableERC20Factory,
L1ERC721Bridge, L1ERC721Bridge,
SystemConfigProxy,
] = await getContractsFromArtifacts(hre, [ ] = await getContractsFromArtifacts(hre, [
{ {
name: 'SystemDictatorProxy', name: 'SystemDictatorProxy',
...@@ -78,11 +77,6 @@ const deployFn: DeployFunction = async (hre) => { ...@@ -78,11 +77,6 @@ const deployFn: DeployFunction = async (hre) => {
iface: 'L1ERC721Bridge', iface: 'L1ERC721Bridge',
signerOrProvider: deployer, signerOrProvider: deployer,
}, },
{
name: 'SystemConfigProxy',
iface: 'SystemConfig',
signerOrProvider: deployer,
},
]) ])
// If we have the key for the controller then we don't need to wait for external txns. // If we have the key for the controller then we don't need to wait for external txns.
...@@ -286,40 +280,6 @@ const deployFn: DeployFunction = async (hre) => { ...@@ -286,40 +280,6 @@ const deployFn: DeployFunction = async (hre) => {
'messenger', 'messenger',
L1CrossDomainMessenger.address L1CrossDomainMessenger.address
) )
// Check the SystemConfig was initialized properly.
await assertContractVariable(
SystemConfigProxy,
'owner',
hre.deployConfig.finalSystemOwner
)
await assertContractVariable(
SystemConfigProxy,
'overhead',
hre.deployConfig.gasPriceOracleOverhead
)
await assertContractVariable(
SystemConfigProxy,
'scalar',
hre.deployConfig.gasPriceOracleScalar
)
await assertContractVariable(
SystemConfigProxy,
'batcherHash',
ethers.utils.hexZeroPad(
hre.deployConfig.batchSenderAddress.toLowerCase(),
32
)
)
await assertContractVariable(
SystemConfigProxy,
'gasLimit',
hre.deployConfig.l2GenesisBlockGasLimit
)
}, },
}) })
......
...@@ -48,26 +48,31 @@ task('wait-for-final-batch', 'Waits for the final batch to be submitted') ...@@ -48,26 +48,31 @@ task('wait-for-final-batch', 'Waits for the final batch to be submitted')
const wait = async (contract: Contract) => { const wait = async (contract: Contract) => {
let height = await l2Provider.getBlockNumber() let height = await l2Provider.getBlockNumber()
let totalElements = await contract.getTotalElements() let totalElements = await contract.getTotalElements()
// The genesis block was not batch submitted so subtract 1 from the height console.log(` - height: ${height}`)
// when comparing with the total elements console.log(` - totalElements: ${totalElements}`)
while (totalElements !== height - 1) {
while (totalElements.toNumber() !== height) {
console.log('Total elements does not match') console.log('Total elements does not match')
console.log(` - real height: ${height}`) console.log(` - height: ${height}`)
console.log(` - height: ${height - 1}`)
console.log(` - totalElements: ${totalElements}`) console.log(` - totalElements: ${totalElements}`)
console.log(
`Waiting for ${height - totalElements} elements to be submitted`
)
totalElements = await contract.getTotalElements() totalElements = await contract.getTotalElements()
height = await l2Provider.getBlockNumber() height = await l2Provider.getBlockNumber()
await sleep(2 * 1000) await sleep(5 * 1000)
} }
} }
console.log('Waiting for the CanonicalTransactionChain...') console.log('Waiting for the CanonicalTransactionChain...')
await wait(CanonicalTransactionChain) await wait(CanonicalTransactionChain)
console.log('All transaction batches have been submitted') console.log('All transaction batches have been submitted')
console.log()
console.log('Waiting for the StateCommitmentChain...') console.log('Waiting for the StateCommitmentChain...')
await wait(StateCommitmentChain) await wait(StateCommitmentChain)
console.log('All state root batches have been submitted') console.log('All state root batches have been submitted')
console.log()
console.log('All batches have been submitted') console.log('All batches have been submitted')
}) })
...@@ -22,6 +22,7 @@ task('wait-for-final-deposit', 'Waits for the final deposit to be ingested') ...@@ -22,6 +22,7 @@ task('wait-for-final-deposit', 'Waits for the final deposit to be ingested')
const l1Provider = new hre.ethers.providers.StaticJsonRpcProvider( const l1Provider = new hre.ethers.providers.StaticJsonRpcProvider(
args.l1RpcUrl args.l1RpcUrl
) )
const l2Provider = new hre.ethers.providers.StaticJsonRpcProvider( const l2Provider = new hre.ethers.providers.StaticJsonRpcProvider(
args.l2RpcUrl args.l2RpcUrl
) )
...@@ -63,6 +64,9 @@ task('wait-for-final-deposit', 'Waits for the final deposit to be ingested') ...@@ -63,6 +64,9 @@ task('wait-for-final-deposit', 'Waits for the final deposit to be ingested')
console.log(`DTL shutoff block ${dtlShutoffBlock.toString()}`) console.log(`DTL shutoff block ${dtlShutoffBlock.toString()}`)
let pending = await CanonicalTransactionChain.getNumPendingQueueElements()
console.log(`${pending} deposits must be batch submitted`)
// Now query the number of queue elements in the CTC // Now query the number of queue elements in the CTC
const queueLength = await CanonicalTransactionChain.getQueueLength() const queueLength = await CanonicalTransactionChain.getQueueLength()
console.log(`Total number of deposits: ${queueLength}`) console.log(`Total number of deposits: ${queueLength}`)
...@@ -80,11 +84,10 @@ task('wait-for-final-deposit', 'Waits for the final deposit to be ingested') ...@@ -80,11 +84,10 @@ task('wait-for-final-deposit', 'Waits for the final deposit to be ingested')
if (tx.queueOrigin === 'l1') { if (tx.queueOrigin === 'l1') {
const queueIndex = BigNumber.from(tx.queueIndex).toNumber() const queueIndex = BigNumber.from(tx.queueIndex).toNumber()
if (queueIndex === queueLength) { if (queueIndex === queueLength - 1) {
break break
} }
if (queueIndex < queueLength) { if (queueIndex < queueLength) {
console.log()
throw new Error( throw new Error(
`Missed the final deposit. queueIndex ${queueIndex}, queueLength ${queueLength}` `Missed the final deposit. queueIndex ${queueIndex}, queueLength ${queueLength}`
) )
...@@ -94,4 +97,6 @@ task('wait-for-final-deposit', 'Waits for the final deposit to be ingested') ...@@ -94,4 +97,6 @@ task('wait-for-final-deposit', 'Waits for the final deposit to be ingested')
} }
console.log('Final deposit has been ingested by l2geth') console.log('Final deposit has been ingested by l2geth')
pending = await CanonicalTransactionChain.getNumPendingQueueElements()
console.log(`${pending} deposits must be batch submitted`)
}) })
...@@ -3,6 +3,10 @@ ...@@ -3,6 +3,10 @@
- **Chain ID**: 420 - **Chain ID**: 420
- **Public RPC**: https://goerli.optimism.io - **Public RPC**: https://goerli.optimism.io
- **Block Explorer**: https://goerli-optimism.etherscan.io/ - **Block Explorer**: https://goerli-optimism.etherscan.io/
**Note:** This list is out of date, now that Goerli is on bedrock.
[The valid list is here](https://github.com/ethereum-optimism/optimism/tree/develop/packages/contracts-bedrock/deployments/goerli).
## Layer 1 Contracts ## Layer 1 Contracts
<table> <table>
<tr> <tr>
......
# data transport layer # data transport layer
## 0.5.51
### Patch Changes
- 4396e187d: Fixes a bug in the DTL that would cause it to not be able to sync beyond the deposit shutoff block.
## 0.5.50 ## 0.5.50
### Patch Changes ### Patch Changes
......
{ {
"private": true, "private": true,
"name": "@eth-optimism/data-transport-layer", "name": "@eth-optimism/data-transport-layer",
"version": "0.5.50", "version": "0.5.51",
"description": "[Optimism] Service for shuttling data from L1 into L2", "description": "[Optimism] Service for shuttling data from L1 into L2",
"main": "dist/index", "main": "dist/index",
"types": "dist/index", "types": "dist/index",
......
...@@ -268,9 +268,8 @@ export class L1IngestionService extends BaseService<L1IngestionServiceOptions> { ...@@ -268,9 +268,8 @@ export class L1IngestionService extends BaseService<L1IngestionServiceOptions> {
) )
} }
// I prefer to do this in serial to avoid non-determinism. We could have a discussion about // We should not sync TransactionEnqueued events beyond the deposit shutoff block.
// using Promise.all if necessary, but I don't see a good reason to do so unless parsing is if (depositTargetL1Block >= highestSyncedL1Block) {
// really, really slow for all event types.
await this._syncEvents( await this._syncEvents(
'CanonicalTransactionChain', 'CanonicalTransactionChain',
'TransactionEnqueued', 'TransactionEnqueued',
...@@ -278,6 +277,7 @@ export class L1IngestionService extends BaseService<L1IngestionServiceOptions> { ...@@ -278,6 +277,7 @@ export class L1IngestionService extends BaseService<L1IngestionServiceOptions> {
depositTargetL1Block, depositTargetL1Block,
handleEventsTransactionEnqueued handleEventsTransactionEnqueued
) )
}
await this._syncEvents( await this._syncEvents(
'CanonicalTransactionChain', 'CanonicalTransactionChain',
......
...@@ -4,10 +4,10 @@ ...@@ -4,10 +4,10 @@
The batch submitter, also referred to as the batcher, is the entity submitting the L2 sequencer data to L1, The batch submitter, also referred to as the batcher, is the entity submitting the L2 sequencer data to L1,
to make it available for verifiers. to make it available for verifiers.
[derivation-spec]: ./derivation.md [derivation spec]: derivation.md
The format of the data transactions is defined in the [derivation spec]: the data is constructed from L2 blocks The format of the data transactions is defined in the [derivation spec]:
in the reverse order as it is derived from data into L2 blocks. the data is constructed from L2 blocks in the reverse order as it is derived from data into L2 blocks.
The timing, operation and transaction signing is implementation-specific: any data can be submitted at any time, The timing, operation and transaction signing is implementation-specific: any data can be submitted at any time,
but only the data that matches the [derivation spec] rules will be valid from the verifier perspective. but only the data that matches the [derivation spec] rules will be valid from the verifier perspective.
......
...@@ -263,7 +263,7 @@ The rest of the diagram is conceptually distinct from the first part and illustr ...@@ -263,7 +263,7 @@ The rest of the diagram is conceptually distinct from the first part and illustr
channels have been reordered. channels have been reordered.
The first line shows batcher transactions. Note that in this case, there exists an ordering of the batches that makes The first line shows batcher transactions. Note that in this case, there exists an ordering of the batches that makes
all frames within the channels appear contiguously. This is not true in true in general. For instance, in the second all frames within the channels appear contiguously. This is not true in general. For instance, in the second
transaction, the position of `A1` and `B0` could have been inverted for exactly the same result — no changes needed in transaction, the position of `A1` and `B0` could have been inverted for exactly the same result — no changes needed in
the rest of the diagram. the rest of the diagram.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment