Commit 787db60c authored by mergify[bot]'s avatar mergify[bot] Committed by GitHub

Merge branch 'develop' into refcell/fix/pscores

parents 80b42f67 8dd0248b
---
'@eth-optimism/batch-submitter-service': patch
---
Allow deposit only batches
---
'@eth-optimism/contracts-bedrock': patch
---
Added a contsructor to the System Dictator
......@@ -518,6 +518,10 @@ jobs:
patterns: packages
# Note: The below needs to be manually configured whenever we
# add a new package to CI.
- run:
name: Check common-ts
command: npx depcheck
working_directory: packages/common-ts
- run:
name: Check contracts
command: npx depcheck
......
......@@ -66,6 +66,7 @@ You'll need the following:
* [Yarn](https://classic.yarnpkg.com/en/docs/install)
* [Docker](https://docs.docker.com/get-docker/)
* [Docker Compose](https://docs.docker.com/compose/install/)
* [Go](https://go.dev/dl/)
* [Foundry](https://getfoundry.sh)
### Setup
......
......@@ -6,6 +6,7 @@ import (
"github.com/ethereum-optimism/optimism/batch-submitter/drivers/sequencer"
l2common "github.com/ethereum-optimism/optimism/l2geth/common"
"github.com/ethereum-optimism/optimism/l2geth/core/types"
l2types "github.com/ethereum-optimism/optimism/l2geth/core/types"
"github.com/stretchr/testify/require"
)
......@@ -47,3 +48,76 @@ func TestBatchElementFromBlock(t *testing.T) {
require.False(t, element.IsSequencerTx())
require.Nil(t, element.Tx)
}
func TestGenSequencerParams(t *testing.T) {
tx := types.NewTransaction(0, l2common.Address{}, big.NewInt(0), 0, big.NewInt(0), []byte{})
shouldStartAtElement := uint64(1)
blockOffset := uint64(1)
batches := []sequencer.BatchElement{
{Timestamp: 1, BlockNumber: 1},
{Timestamp: 1, BlockNumber: 1, Tx: sequencer.NewCachedTx(tx)},
}
params, err := sequencer.GenSequencerBatchParams(shouldStartAtElement, blockOffset, batches)
require.NoError(t, err)
require.Equal(t, uint64(0), params.ShouldStartAtElement)
require.Equal(t, uint64(len(batches)), params.TotalElementsToAppend)
require.Equal(t, len(batches), len(params.Contexts))
// There is only 1 sequencer tx
require.Equal(t, 1, len(params.Txs))
// There are 2 contexts
// The first context contains the deposit
context1 := params.Contexts[0]
require.Equal(t, uint64(0), context1.NumSequencedTxs)
require.Equal(t, uint64(1), context1.NumSubsequentQueueTxs)
require.Equal(t, uint64(1), context1.Timestamp)
require.Equal(t, uint64(1), context1.BlockNumber)
// The second context contains the sequencer tx
context2 := params.Contexts[1]
require.Equal(t, uint64(1), context2.NumSequencedTxs)
require.Equal(t, uint64(0), context2.NumSubsequentQueueTxs)
require.Equal(t, uint64(1), context2.Timestamp)
require.Equal(t, uint64(1), context2.BlockNumber)
}
func TestGenSequencerParamsOnlyDeposits(t *testing.T) {
shouldStartAtElement := uint64(1)
blockOffset := uint64(1)
batches := []sequencer.BatchElement{
{Timestamp: 1, BlockNumber: 1},
{Timestamp: 1, BlockNumber: 1},
{Timestamp: 2, BlockNumber: 2},
}
params, err := sequencer.GenSequencerBatchParams(shouldStartAtElement, blockOffset, batches)
require.NoError(t, err)
// The batches will pack deposits into the same context when their
// timestamps and blocknumbers are the same
require.Equal(t, uint64(0), params.ShouldStartAtElement)
require.Equal(t, uint64(len(batches)), params.TotalElementsToAppend)
// 2 deposits have the same timestamp + blocknumber, they go in the
// same context. 1 deposit has a different timestamp + blocknumber,
// it goes into a different context. Therefore there are 2 contexts
require.Equal(t, 2, len(params.Contexts))
// No sequencer txs
require.Equal(t, 0, len(params.Txs))
// There are 2 contexts
// The first context contains the deposit
context1 := params.Contexts[0]
require.Equal(t, uint64(0), context1.NumSequencedTxs)
require.Equal(t, uint64(2), context1.NumSubsequentQueueTxs)
require.Equal(t, uint64(1), context1.Timestamp)
require.Equal(t, uint64(1), context1.BlockNumber)
context2 := params.Contexts[1]
require.Equal(t, uint64(0), context2.NumSequencedTxs)
require.Equal(t, uint64(1), context2.NumSubsequentQueueTxs)
require.Equal(t, uint64(2), context2.Timestamp)
require.Equal(t, uint64(2), context2.BlockNumber)
}
......@@ -222,11 +222,6 @@ func (p *AppendSequencerBatchParams) Write(
return ErrMalformedBatch
}
// There must be transactions if there are contexts
if len(p.Txs) == 0 && len(p.Contexts) != 0 {
return ErrMalformedBatch
}
// copy the contexts as to not malleate the struct
// when it is a typed batch
contexts := make([]BatchContext, 0, len(p.Contexts)+1)
......@@ -361,9 +356,6 @@ func (p *AppendSequencerBatchParams) Read(r io.Reader) error {
if len(p.Contexts) == 0 && len(p.Txs) != 0 {
return ErrMalformedBatch
}
if len(p.Txs) == 0 && len(p.Contexts) != 0 {
return ErrMalformedBatch
}
return closeReader()
} else if err != nil {
return err
......
......@@ -46,7 +46,7 @@
}
],
"txs": [],
"error": true
"error": false
},
{
"name": "multiple-contexts-no-txs",
......@@ -80,7 +80,7 @@
}
],
"txs": [],
"error": true
"error": false
},
{
"name": "complex",
......
......@@ -166,6 +166,7 @@ module.exports = {
children: [
"/docs/build/tutorials/add-attr.md",
"/docs/build/tutorials/new-precomp.md",
"/docs/build/tutorials/predeploys.md"
]
} // End of tutorials
],
......
......@@ -32,6 +32,11 @@
<i class="fab fa-discord"></i> Discord community
</a>
</li>
<li>
<a href="https://wkf.ms/3XTdpLl" target="_blank" rel="noopener noreferrer">
<i class="far fa-comment-dots"></i> Get support for going live
</a>
</li>
</ul>
</div>
</div>
......
---
title: Modifying Predeployed Contracts
lang: en-US
---
::: warning 🚧 OP Stack Hacks are explicitly things that you can do with the OP Stack that are *not* currently intended for production use
OP Stack Hacks are not for the faint of heart. You will not be able to receive significant developer support for OP Stack Hacks — be prepared to get your hands dirty and to work without support.
:::
OP Stack blockchains have a number of [predeployed contracts](https://github.com/ethereum-optimism/optimism/blob/develop/packages/contracts-bedrock/src/constants.ts) that provide important functionality.
Most of those contracts are proxies that can be upgraded using the `proxyAdminOwner` which was configured when the network was initially deployed.
The predeploys are controlled from a predeploy called [`ProxyAdmin`](https://github.com/ethereum-optimism/optimism/blob/develop/packages/contracts-bedrock/contracts/universal/ProxyAdmin.sol), whose address is `0x4200000000000000000000000000000000000018`.
The function to call is [`upgrade(address,address)`](https://github.com/ethereum-optimism/optimism/blob/develop/packages/contracts-bedrock/contracts/universal/ProxyAdmin.sol#L211-L229).
The first parameter is the proxy to upgrade, and the second is the address of a new implementation.
For example, the legacy `L1BlockNumber` contract is at `0x420...013`.
To disable this function, we'll set the implementation to `0x00...00`.
We do this using the [Foundry](https://book.getfoundry.sh/) command `cast`.
1. We'll need several constants.
- Set these addresses as variables in your terminal.
```sh
L1BLOCKNUM=0x4200000000000000000000000000000000000013
PROXY_ADMIN=0x4200000000000000000000000000000000000018
ZERO_ADDR=0x0000000000000000000000000000000000000000
```
- Set `PRIVKEY` to the private key of your ADMIN account.
- Set `ETH_RPC_URL`. If you're on the computer that runs the blockchain, use this command.
```sh
export ETH_RPC_URL=http://localhost:8545
```
1. Verify `L1BlockNumber` works correctly.
See that when you call the contract you get a block number, and twelve seconds later you get the next one (block time on L1 is twelve seconds).
```sh
cast call $L1BLOCKNUM 'number()' | cast --to-dec
sleep 12 && cast call $L1BLOCKNUM 'number()' | cast --to-dec
```
1. Get the current implementation for the contract.
```sh
L1BLOCKNUM_IMPLEMENTATION=`cast call $L1BLOCKNUM "implementation()" | sed 's/000000000000000000000000//'`
echo $L1BLOCKNUM_IMPLEMENTATION
```
1. Change the implementation to the zero address
```sh
cast send --private-key $PRIVKEY $PROXY_ADMIN "upgrade(address,address)" $L1BLOCKNUM $ZERO_ADDR
```
1. See that the implementation is address zero, and that calling it fails.
```sh
cast call $L1BLOCKNUM 'implementation()'
cast call $L1BLOCKNUM 'number()'
```
1. Fix the predeploy by returning it to the previous implementation, and verify it works.
```sh
cast send --private-key $PRIVKEY $PROXY_ADMIN "upgrade(address,address)" $L1BLOCKNUM $L1BLOCKNUM_IMPLEMENTATION
cast call $L1BLOCKNUM 'number()' | cast --to-dec
```
\ No newline at end of file
......@@ -185,7 +185,7 @@ func (m *Metrics) RecordLatestL1Block(l1ref eth.L1BlockRef) {
m.RecordL1Ref("latest", l1ref)
}
// RecordL2BlockLoaded should be called when a new L2 block was loaded into the
// RecordL2BlocksLoaded should be called when a new L2 block was loaded into the
// channel manager (but not processed yet).
func (m *Metrics) RecordL2BlocksLoaded(l2ref eth.L2BlockRef) {
m.RecordL2Ref(StageLoaded, l2ref)
......
// Code generated - DO NOT EDIT.
// This file is a generated binding and any manual changes will be lost.
package legacy_bindings
import (
"errors"
"math/big"
"strings"
ethereum "github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/event"
)
// Reference imports to suppress errors if they are not otherwise used.
var (
_ = errors.New
_ = big.NewInt
_ = strings.NewReader
_ = ethereum.NotFound
_ = bind.Bind
_ = common.Big1
_ = types.BloomLookup
_ = event.NewSubscription
)
// LibOVMCodecQueueElement is an auto generated low-level Go binding around an user-defined struct.
type LibOVMCodecQueueElement struct {
TransactionHash [32]byte
Timestamp *big.Int
BlockNumber *big.Int
}
// CanonicalTransactionChainMetaData contains all meta data concerning the CanonicalTransactionChain contract.
var CanonicalTransactionChainMetaData = &bind.MetaData{
ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_libAddressManager\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_maxTransactionGasLimit\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_l2GasDiscountDivisor\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_enqueueGasCost\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"l2GasDiscountDivisor\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"enqueueGasCost\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"enqueueL2GasPrepaid\",\"type\":\"uint256\"}],\"name\":\"L2GasParamsUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"_startingQueueIndex\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"_numQueueElements\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"_totalElements\",\"type\":\"uint256\"}],\"name\":\"QueueBatchAppended\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"_startingQueueIndex\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"_numQueueElements\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"_totalElements\",\"type\":\"uint256\"}],\"name\":\"SequencerBatchAppended\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"_batchIndex\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"_batchRoot\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"_batchSize\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"_prevTotalElements\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"_extraData\",\"type\":\"bytes\"}],\"name\":\"TransactionBatchAppended\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"_l1TxOrigin\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"_target\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"_gasLimit\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"_data\",\"type\":\"bytes\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"_queueIndex\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"_timestamp\",\"type\":\"uint256\"}],\"name\":\"TransactionEnqueued\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"MAX_ROLLUP_TX_SIZE\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"MIN_ROLLUP_TX_GAS\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"appendSequencerBatch\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"batches\",\"outputs\":[{\"internalType\":\"contractIChainStorageContainer\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_target\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_gasLimit\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"_data\",\"type\":\"bytes\"}],\"name\":\"enqueue\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"enqueueGasCost\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"enqueueL2GasPrepaid\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getLastBlockNumber\",\"outputs\":[{\"internalType\":\"uint40\",\"name\":\"\",\"type\":\"uint40\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getLastTimestamp\",\"outputs\":[{\"internalType\":\"uint40\",\"name\":\"\",\"type\":\"uint40\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getNextQueueIndex\",\"outputs\":[{\"internalType\":\"uint40\",\"name\":\"\",\"type\":\"uint40\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getNumPendingQueueElements\",\"outputs\":[{\"internalType\":\"uint40\",\"name\":\"\",\"type\":\"uint40\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_index\",\"type\":\"uint256\"}],\"name\":\"getQueueElement\",\"outputs\":[{\"components\":[{\"internalType\":\"bytes32\",\"name\":\"transactionHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint40\",\"name\":\"timestamp\",\"type\":\"uint40\"},{\"internalType\":\"uint40\",\"name\":\"blockNumber\",\"type\":\"uint40\"}],\"internalType\":\"structLib_OVMCodec.QueueElement\",\"name\":\"_element\",\"type\":\"tuple\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getQueueLength\",\"outputs\":[{\"internalType\":\"uint40\",\"name\":\"\",\"type\":\"uint40\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getTotalBatches\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"_totalBatches\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getTotalElements\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"_totalElements\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"l2GasDiscountDivisor\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"libAddressManager\",\"outputs\":[{\"internalType\":\"contractLib_AddressManager\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"maxTransactionGasLimit\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"string\",\"name\":\"_name\",\"type\":\"string\"}],\"name\":\"resolve\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_l2GasDiscountDivisor\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_enqueueGasCost\",\"type\":\"uint256\"}],\"name\":\"setGasParams\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]",
Bin: "0x608060405234801561001057600080fd5b5060405162001a9838038062001a9883398101604081905261003191610072565b600080546001600160a01b0319166001600160a01b03861617905560048390556002829055600181905561006581836100bd565b600355506100ea92505050565b6000806000806080858703121561008857600080fd5b84516001600160a01b038116811461009f57600080fd5b60208601516040870151606090970151919890975090945092505050565b60008160001904831182151516156100e557634e487b7160e01b600052601160045260246000fd5b500290565b61199e80620000fa6000396000f3fe608060405234801561001057600080fd5b506004361061016c5760003560e01c8063876ed5cb116100cd578063d0f8934411610081578063e654b1fb11610066578063e654b1fb146102c0578063edcc4a45146102c9578063f722b41a146102dc57600080fd5b8063d0f89344146102b0578063e561dddc146102b857600080fd5b8063b8f77005116100b2578063b8f7700514610297578063ccf987c81461029f578063cfdf677e146102a857600080fd5b8063876ed5cb146102855780638d38c6c11461028e57600080fd5b80635ae6256d1161012457806378f4b2f21161010957806378f4b2f2146102645780637a167a8a1461026e5780637aa63a861461027d57600080fd5b80635ae6256d146102475780636fee07e01461024f57600080fd5b80632a7f18be116101555780632a7f18be146101d25780633789977014610216578063461a44781461023457600080fd5b80630b3dfa9714610171578063299ca4781461018d575b600080fd5b61017a60035481565b6040519081526020015b60405180910390f35b6000546101ad9073ffffffffffffffffffffffffffffffffffffffff1681565b60405173ffffffffffffffffffffffffffffffffffffffff9091168152602001610184565b6101e56101e03660046113e5565b6102e4565b604080518251815260208084015164ffffffffff908116918301919091529282015190921690820152606001610184565b61021e610362565b60405164ffffffffff9091168152602001610184565b6101ad6102423660046114c1565b610376565b61021e610423565b61026261025d366004611537565b610437565b005b61017a620186a081565b60055464ffffffffff1661021e565b61017a610899565b61017a61c35081565b61017a60045481565b60065461021e565b61017a60025481565b6101ad6108b4565b6102626108dc565b61017a610df8565b61017a60015481565b6102626102d73660046115a4565b610e7f565b61021e611016565b604080516060810182526000808252602082018190529181019190915260068281548110610314576103146115c6565b6000918252602091829020604080516060810182526002909302909101805483526001015464ffffffffff808216948401949094526501000000000090049092169181019190915292915050565b60008061036d611032565b50949350505050565b600080546040517fbf40fac100000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff9091169063bf40fac1906103cd908590600401611660565b60206040518083038186803b1580156103e557600080fd5b505afa1580156103f9573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061041d919061167a565b92915050565b60008061042e611032565b95945050505050565b61c350815111156104cf576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152603d60248201527f5472616e73616374696f6e20646174612073697a652065786365656473206d6160448201527f78696d756d20666f7220726f6c6c7570207472616e73616374696f6e2e00000060648201526084015b60405180910390fd5b600454821115610561576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152603d60248201527f5472616e73616374696f6e20676173206c696d69742065786365656473206d6160448201527f78696d756d20666f7220726f6c6c7570207472616e73616374696f6e2e00000060648201526084016104c6565b620186a08210156105f4576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602960248201527f5472616e73616374696f6e20676173206c696d697420746f6f206c6f7720746f60448201527f20656e71756575652e000000000000000000000000000000000000000000000060648201526084016104c6565b6003548211156106dc5760006002546003548461061191906116c6565b61061b91906116dd565b905060005a90508181116106b1576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602b60248201527f496e73756666696369656e742067617320666f72204c322072617465206c696d60448201527f6974696e67206275726e2e00000000000000000000000000000000000000000060648201526084016104c6565b60005b825a6106c090846116c6565b10156106d857806106d081611718565b9150506106b4565b5050505b6000333214156106ed575033610706565b5033731111000000000000000000000000000000001111015b60008185858560405160200161071f9493929190611751565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0818403018152828252805160209182012060608401835280845264ffffffffff42811692850192835243811693850193845260068054600181810183556000838152975160029092027ff652222313e28459528d920b65115c16c04f3efc82aaedc97be59f3f377c0d3f81019290925594517ff652222313e28459528d920b65115c16c04f3efc82aaedc97be59f3f377c0d4090910180549651841665010000000000027fffffffffffffffffffffffffffffffffffffffffffff0000000000000000000090971691909316179490941790559154919350610825916116c6565b9050808673ffffffffffffffffffffffffffffffffffffffff168473ffffffffffffffffffffffffffffffffffffffff167f4b388aecf9fa6cc92253704e5975a6129a4f735bdbd99567df4ed0094ee4ceb58888426040516108899392919061179a565b60405180910390a4505050505050565b6000806108a4611032565b50505064ffffffffff1692915050565b60006108d760405180606001604052806021815260200161194860219139610376565b905090565b60043560d81c60093560e890811c90600c35901c6108f8610899565b8364ffffffffff161461098d576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152603d60248201527f41637475616c20626174636820737461727420696e64657820646f6573206e6f60448201527f74206d6174636820657870656374656420737461727420696e6465782e00000060648201526084016104c6565b6109cb6040518060400160405280600d81526020017f4f564d5f53657175656e63657200000000000000000000000000000000000000815250610376565b73ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614610a85576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602d60248201527f46756e6374696f6e2063616e206f6e6c792062652063616c6c6564206279207460448201527f68652053657175656e6365722e0000000000000000000000000000000000000060648201526084016104c6565b6000610a9762ffffff831660106117c3565b610aa290600f611800565b905064ffffffffff8116361015610b3b576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602260248201527f4e6f7420656e6f756768204261746368436f6e74657874732070726f7669646560448201527f642e00000000000000000000000000000000000000000000000000000000000060648201526084016104c6565b6005546040805160808101825260008082526020820181905291810182905260608101829052909164ffffffffff169060005b8562ffffff168163ffffffff161015610bcc576000610b928263ffffffff166110ed565b8051909350839150610ba49086611818565b9450826020015184610bb69190611840565b9350508080610bc490611860565b915050610b6e565b5060065464ffffffffff83161115610c8c576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152604260248201527f417474656d7074656420746f20617070656e64206d6f726520656c656d656e7460448201527f73207468616e2061726520617661696c61626c6520696e20746865207175657560648201527f652e000000000000000000000000000000000000000000000000000000000000608482015260a4016104c6565b6000610c9d8462ffffff8916611884565b63ffffffff169050600080836020015160001415610cc657505060408201516060830151610d37565b60006006610cd56001886118a9565b64ffffffffff1681548110610cec57610cec6115c6565b6000918252602091829020604080516060810182526002909302909101805483526001015464ffffffffff808216948401859052650100000000009091041691018190529093509150505b610d5b610d456001436116c6565b408a62ffffff168564ffffffffff168585611174565b7f602f1aeac0ca2e7a13e281a9ef0ad7838542712ce16780fa2ecffd351f05f899610d8684876118a9565b84610d8f610899565b6040805164ffffffffff94851681529390921660208401529082015260600160405180910390a15050600580547fffffffffffffffffffffffffffffffffffffffffffffffffffffff00000000001664ffffffffff949094169390931790925550505050505050565b6000610e026108b4565b73ffffffffffffffffffffffffffffffffffffffff16631f7b6d326040518163ffffffff1660e01b815260040160206040518083038186803b158015610e4757600080fd5b505afa158015610e5b573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906108d791906118c7565b60008054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16638da5cb5b6040518163ffffffff1660e01b815260040160206040518083038186803b158015610ee557600080fd5b505afa158015610ef9573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610f1d919061167a565b73ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614610fb1576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820181905260248201527f4f6e6c792063616c6c61626c6520627920746865204275726e2041646d696e2e60448201526064016104c6565b60018190556002829055610fc581836117c3565b60038190556002546001546040805192835260208301919091528101919091527fc6ed75e96b8b18b71edc1a6e82a9d677f8268c774a262c624eeb2cf0a8b3e07e9060600160405180910390a15050565b6005546006546000916108d79164ffffffffff909116906118a9565b60008060008060006110426108b4565b73ffffffffffffffffffffffffffffffffffffffff1663ccf8f9696040518163ffffffff1660e01b815260040160206040518083038186803b15801561108757600080fd5b505afa15801561109b573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906110bf91906118e0565b64ffffffffff602882901c811697605083901c82169750607883901c8216965060a09290921c169350915050565b6111186040518060800160405280600081526020016000815260200160008152602001600081525090565b60006111256010846117c3565b61113090600f611800565b60408051608081018252823560e890811c82526003840135901c6020820152600683013560d890811c92820192909252600b90920135901c60608201529392505050565b600061117e6108b4565b905060008061118b611032565b50509150915060006040518060a001604052808573ffffffffffffffffffffffffffffffffffffffff16631f7b6d326040518163ffffffff1660e01b815260040160206040518083038186803b1580156111e457600080fd5b505afa1580156111f8573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061121c91906118c7565b81526020018a81526020018981526020018464ffffffffff16815260200160405180602001604052806000815250815250905080600001517f127186556e7be68c7e31263195225b4de02820707889540969f62c05cf73525e82602001518360400151846060015185608001516040516112999493929190611922565b60405180910390a260006112ac8261139f565b905060006112e78360400151866112c39190611840565b6112cd8b87611840565b602890811b9190911760508b901b1760788a901b17901b90565b6040517f2015276c000000000000000000000000000000000000000000000000000000008152600481018490527fffffffffffffffffffffffffffffffffffffffffffffffffffffff00000000008216602482015290915073ffffffffffffffffffffffffffffffffffffffff871690632015276c90604401600060405180830381600087803b15801561137a57600080fd5b505af115801561138e573d6000803e3d6000fd5b505050505050505050505050505050565b600081602001518260400151836060015184608001516040516020016113c89493929190611922565b604051602081830303815290604052805190602001209050919050565b6000602082840312156113f757600080fd5b5035919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b600067ffffffffffffffff80841115611448576114486113fe565b604051601f85017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0908116603f0116810190828211818310171561148e5761148e6113fe565b816040528093508581528686860111156114a757600080fd5b858560208301376000602087830101525050509392505050565b6000602082840312156114d357600080fd5b813567ffffffffffffffff8111156114ea57600080fd5b8201601f810184136114fb57600080fd5b61150a8482356020840161142d565b949350505050565b73ffffffffffffffffffffffffffffffffffffffff8116811461153457600080fd5b50565b60008060006060848603121561154c57600080fd5b833561155781611512565b925060208401359150604084013567ffffffffffffffff81111561157a57600080fd5b8401601f8101861361158b57600080fd5b61159a8682356020840161142d565b9150509250925092565b600080604083850312156115b757600080fd5b50508035926020909101359150565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b6000815180845260005b8181101561161b576020818501810151868301820152016115ff565b8181111561162d576000602083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b60208152600061167360208301846115f5565b9392505050565b60006020828403121561168c57600080fd5b815161167381611512565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b6000828210156116d8576116d8611697565b500390565b600082611713577f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b500490565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff82141561174a5761174a611697565b5060010190565b600073ffffffffffffffffffffffffffffffffffffffff80871683528086166020840152508360408301526080606083015261179060808301846115f5565b9695505050505050565b8381526060602082015260006117b360608301856115f5565b9050826040830152949350505050565b6000817fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff04831182151516156117fb576117fb611697565b500290565b6000821982111561181357611813611697565b500190565b600063ffffffff80831681851680830382111561183757611837611697565b01949350505050565b600064ffffffffff80831681851680830382111561183757611837611697565b600063ffffffff8083168181141561187a5761187a611697565b6001019392505050565b600063ffffffff838116908316818110156118a1576118a1611697565b039392505050565b600064ffffffffff838116908316818110156118a1576118a1611697565b6000602082840312156118d957600080fd5b5051919050565b6000602082840312156118f257600080fd5b81517fffffffffffffffffffffffffffffffffffffffffffffffffffffff00000000008116811461167357600080fd5b84815283602082015282604082015260806060820152600061179060808301846115f556fe436861696e53746f72616765436f6e7461696e65722d4354432d62617463686573a264697066735822122071f9046c41835cfaa3b888bb4aa8b907bdd46588ad69741847a96bf3fcaad90264736f6c63430008090033",
}
// CanonicalTransactionChainABI is the input ABI used to generate the binding from.
// Deprecated: Use CanonicalTransactionChainMetaData.ABI instead.
var CanonicalTransactionChainABI = CanonicalTransactionChainMetaData.ABI
// CanonicalTransactionChainBin is the compiled bytecode used for deploying new contracts.
// Deprecated: Use CanonicalTransactionChainMetaData.Bin instead.
var CanonicalTransactionChainBin = CanonicalTransactionChainMetaData.Bin
// DeployCanonicalTransactionChain deploys a new Ethereum contract, binding an instance of CanonicalTransactionChain to it.
func DeployCanonicalTransactionChain(auth *bind.TransactOpts, backend bind.ContractBackend, _libAddressManager common.Address, _maxTransactionGasLimit *big.Int, _l2GasDiscountDivisor *big.Int, _enqueueGasCost *big.Int) (common.Address, *types.Transaction, *CanonicalTransactionChain, error) {
parsed, err := CanonicalTransactionChainMetaData.GetAbi()
if err != nil {
return common.Address{}, nil, nil, err
}
if parsed == nil {
return common.Address{}, nil, nil, errors.New("GetABI returned nil")
}
address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(CanonicalTransactionChainBin), backend, _libAddressManager, _maxTransactionGasLimit, _l2GasDiscountDivisor, _enqueueGasCost)
if err != nil {
return common.Address{}, nil, nil, err
}
return address, tx, &CanonicalTransactionChain{CanonicalTransactionChainCaller: CanonicalTransactionChainCaller{contract: contract}, CanonicalTransactionChainTransactor: CanonicalTransactionChainTransactor{contract: contract}, CanonicalTransactionChainFilterer: CanonicalTransactionChainFilterer{contract: contract}}, nil
}
// CanonicalTransactionChain is an auto generated Go binding around an Ethereum contract.
type CanonicalTransactionChain struct {
CanonicalTransactionChainCaller // Read-only binding to the contract
CanonicalTransactionChainTransactor // Write-only binding to the contract
CanonicalTransactionChainFilterer // Log filterer for contract events
}
// CanonicalTransactionChainCaller is an auto generated read-only Go binding around an Ethereum contract.
type CanonicalTransactionChainCaller struct {
contract *bind.BoundContract // Generic contract wrapper for the low level calls
}
// CanonicalTransactionChainTransactor is an auto generated write-only Go binding around an Ethereum contract.
type CanonicalTransactionChainTransactor struct {
contract *bind.BoundContract // Generic contract wrapper for the low level calls
}
// CanonicalTransactionChainFilterer is an auto generated log filtering Go binding around an Ethereum contract events.
type CanonicalTransactionChainFilterer struct {
contract *bind.BoundContract // Generic contract wrapper for the low level calls
}
// CanonicalTransactionChainSession is an auto generated Go binding around an Ethereum contract,
// with pre-set call and transact options.
type CanonicalTransactionChainSession struct {
Contract *CanonicalTransactionChain // Generic contract binding to set the session for
CallOpts bind.CallOpts // Call options to use throughout this session
TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session
}
// CanonicalTransactionChainCallerSession is an auto generated read-only Go binding around an Ethereum contract,
// with pre-set call options.
type CanonicalTransactionChainCallerSession struct {
Contract *CanonicalTransactionChainCaller // Generic contract caller binding to set the session for
CallOpts bind.CallOpts // Call options to use throughout this session
}
// CanonicalTransactionChainTransactorSession is an auto generated write-only Go binding around an Ethereum contract,
// with pre-set transact options.
type CanonicalTransactionChainTransactorSession struct {
Contract *CanonicalTransactionChainTransactor // Generic contract transactor binding to set the session for
TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session
}
// CanonicalTransactionChainRaw is an auto generated low-level Go binding around an Ethereum contract.
type CanonicalTransactionChainRaw struct {
Contract *CanonicalTransactionChain // Generic contract binding to access the raw methods on
}
// CanonicalTransactionChainCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract.
type CanonicalTransactionChainCallerRaw struct {
Contract *CanonicalTransactionChainCaller // Generic read-only contract binding to access the raw methods on
}
// CanonicalTransactionChainTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract.
type CanonicalTransactionChainTransactorRaw struct {
Contract *CanonicalTransactionChainTransactor // Generic write-only contract binding to access the raw methods on
}
// NewCanonicalTransactionChain creates a new instance of CanonicalTransactionChain, bound to a specific deployed contract.
func NewCanonicalTransactionChain(address common.Address, backend bind.ContractBackend) (*CanonicalTransactionChain, error) {
contract, err := bindCanonicalTransactionChain(address, backend, backend, backend)
if err != nil {
return nil, err
}
return &CanonicalTransactionChain{CanonicalTransactionChainCaller: CanonicalTransactionChainCaller{contract: contract}, CanonicalTransactionChainTransactor: CanonicalTransactionChainTransactor{contract: contract}, CanonicalTransactionChainFilterer: CanonicalTransactionChainFilterer{contract: contract}}, nil
}
// NewCanonicalTransactionChainCaller creates a new read-only instance of CanonicalTransactionChain, bound to a specific deployed contract.
func NewCanonicalTransactionChainCaller(address common.Address, caller bind.ContractCaller) (*CanonicalTransactionChainCaller, error) {
contract, err := bindCanonicalTransactionChain(address, caller, nil, nil)
if err != nil {
return nil, err
}
return &CanonicalTransactionChainCaller{contract: contract}, nil
}
// NewCanonicalTransactionChainTransactor creates a new write-only instance of CanonicalTransactionChain, bound to a specific deployed contract.
func NewCanonicalTransactionChainTransactor(address common.Address, transactor bind.ContractTransactor) (*CanonicalTransactionChainTransactor, error) {
contract, err := bindCanonicalTransactionChain(address, nil, transactor, nil)
if err != nil {
return nil, err
}
return &CanonicalTransactionChainTransactor{contract: contract}, nil
}
// NewCanonicalTransactionChainFilterer creates a new log filterer instance of CanonicalTransactionChain, bound to a specific deployed contract.
func NewCanonicalTransactionChainFilterer(address common.Address, filterer bind.ContractFilterer) (*CanonicalTransactionChainFilterer, error) {
contract, err := bindCanonicalTransactionChain(address, nil, nil, filterer)
if err != nil {
return nil, err
}
return &CanonicalTransactionChainFilterer{contract: contract}, nil
}
// bindCanonicalTransactionChain binds a generic wrapper to an already deployed contract.
func bindCanonicalTransactionChain(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {
parsed, err := abi.JSON(strings.NewReader(CanonicalTransactionChainABI))
if err != nil {
return nil, err
}
return bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil
}
// Call invokes the (constant) contract method with params as input values and
// sets the output to result. The result type might be a single field for simple
// returns, a slice of interfaces for anonymous returns and a struct for named
// returns.
func (_CanonicalTransactionChain *CanonicalTransactionChainRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error {
return _CanonicalTransactionChain.Contract.CanonicalTransactionChainCaller.contract.Call(opts, result, method, params...)
}
// Transfer initiates a plain transaction to move funds to the contract, calling
// its default method if one is available.
func (_CanonicalTransactionChain *CanonicalTransactionChainRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {
return _CanonicalTransactionChain.Contract.CanonicalTransactionChainTransactor.contract.Transfer(opts)
}
// Transact invokes the (paid) contract method with params as input values.
func (_CanonicalTransactionChain *CanonicalTransactionChainRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) {
return _CanonicalTransactionChain.Contract.CanonicalTransactionChainTransactor.contract.Transact(opts, method, params...)
}
// Call invokes the (constant) contract method with params as input values and
// sets the output to result. The result type might be a single field for simple
// returns, a slice of interfaces for anonymous returns and a struct for named
// returns.
func (_CanonicalTransactionChain *CanonicalTransactionChainCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error {
return _CanonicalTransactionChain.Contract.contract.Call(opts, result, method, params...)
}
// Transfer initiates a plain transaction to move funds to the contract, calling
// its default method if one is available.
func (_CanonicalTransactionChain *CanonicalTransactionChainTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {
return _CanonicalTransactionChain.Contract.contract.Transfer(opts)
}
// Transact invokes the (paid) contract method with params as input values.
func (_CanonicalTransactionChain *CanonicalTransactionChainTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) {
return _CanonicalTransactionChain.Contract.contract.Transact(opts, method, params...)
}
// MAXROLLUPTXSIZE is a free data retrieval call binding the contract method 0x876ed5cb.
//
// Solidity: function MAX_ROLLUP_TX_SIZE() view returns(uint256)
func (_CanonicalTransactionChain *CanonicalTransactionChainCaller) MAXROLLUPTXSIZE(opts *bind.CallOpts) (*big.Int, error) {
var out []interface{}
err := _CanonicalTransactionChain.contract.Call(opts, &out, "MAX_ROLLUP_TX_SIZE")
if err != nil {
return *new(*big.Int), err
}
out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)
return out0, err
}
// MAXROLLUPTXSIZE is a free data retrieval call binding the contract method 0x876ed5cb.
//
// Solidity: function MAX_ROLLUP_TX_SIZE() view returns(uint256)
func (_CanonicalTransactionChain *CanonicalTransactionChainSession) MAXROLLUPTXSIZE() (*big.Int, error) {
return _CanonicalTransactionChain.Contract.MAXROLLUPTXSIZE(&_CanonicalTransactionChain.CallOpts)
}
// MAXROLLUPTXSIZE is a free data retrieval call binding the contract method 0x876ed5cb.
//
// Solidity: function MAX_ROLLUP_TX_SIZE() view returns(uint256)
func (_CanonicalTransactionChain *CanonicalTransactionChainCallerSession) MAXROLLUPTXSIZE() (*big.Int, error) {
return _CanonicalTransactionChain.Contract.MAXROLLUPTXSIZE(&_CanonicalTransactionChain.CallOpts)
}
// MINROLLUPTXGAS is a free data retrieval call binding the contract method 0x78f4b2f2.
//
// Solidity: function MIN_ROLLUP_TX_GAS() view returns(uint256)
func (_CanonicalTransactionChain *CanonicalTransactionChainCaller) MINROLLUPTXGAS(opts *bind.CallOpts) (*big.Int, error) {
var out []interface{}
err := _CanonicalTransactionChain.contract.Call(opts, &out, "MIN_ROLLUP_TX_GAS")
if err != nil {
return *new(*big.Int), err
}
out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)
return out0, err
}
// MINROLLUPTXGAS is a free data retrieval call binding the contract method 0x78f4b2f2.
//
// Solidity: function MIN_ROLLUP_TX_GAS() view returns(uint256)
func (_CanonicalTransactionChain *CanonicalTransactionChainSession) MINROLLUPTXGAS() (*big.Int, error) {
return _CanonicalTransactionChain.Contract.MINROLLUPTXGAS(&_CanonicalTransactionChain.CallOpts)
}
// MINROLLUPTXGAS is a free data retrieval call binding the contract method 0x78f4b2f2.
//
// Solidity: function MIN_ROLLUP_TX_GAS() view returns(uint256)
func (_CanonicalTransactionChain *CanonicalTransactionChainCallerSession) MINROLLUPTXGAS() (*big.Int, error) {
return _CanonicalTransactionChain.Contract.MINROLLUPTXGAS(&_CanonicalTransactionChain.CallOpts)
}
// Batches is a free data retrieval call binding the contract method 0xcfdf677e.
//
// Solidity: function batches() view returns(address)
func (_CanonicalTransactionChain *CanonicalTransactionChainCaller) Batches(opts *bind.CallOpts) (common.Address, error) {
var out []interface{}
err := _CanonicalTransactionChain.contract.Call(opts, &out, "batches")
if err != nil {
return *new(common.Address), err
}
out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address)
return out0, err
}
// Batches is a free data retrieval call binding the contract method 0xcfdf677e.
//
// Solidity: function batches() view returns(address)
func (_CanonicalTransactionChain *CanonicalTransactionChainSession) Batches() (common.Address, error) {
return _CanonicalTransactionChain.Contract.Batches(&_CanonicalTransactionChain.CallOpts)
}
// Batches is a free data retrieval call binding the contract method 0xcfdf677e.
//
// Solidity: function batches() view returns(address)
func (_CanonicalTransactionChain *CanonicalTransactionChainCallerSession) Batches() (common.Address, error) {
return _CanonicalTransactionChain.Contract.Batches(&_CanonicalTransactionChain.CallOpts)
}
// EnqueueGasCost is a free data retrieval call binding the contract method 0xe654b1fb.
//
// Solidity: function enqueueGasCost() view returns(uint256)
func (_CanonicalTransactionChain *CanonicalTransactionChainCaller) EnqueueGasCost(opts *bind.CallOpts) (*big.Int, error) {
var out []interface{}
err := _CanonicalTransactionChain.contract.Call(opts, &out, "enqueueGasCost")
if err != nil {
return *new(*big.Int), err
}
out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)
return out0, err
}
// EnqueueGasCost is a free data retrieval call binding the contract method 0xe654b1fb.
//
// Solidity: function enqueueGasCost() view returns(uint256)
func (_CanonicalTransactionChain *CanonicalTransactionChainSession) EnqueueGasCost() (*big.Int, error) {
return _CanonicalTransactionChain.Contract.EnqueueGasCost(&_CanonicalTransactionChain.CallOpts)
}
// EnqueueGasCost is a free data retrieval call binding the contract method 0xe654b1fb.
//
// Solidity: function enqueueGasCost() view returns(uint256)
func (_CanonicalTransactionChain *CanonicalTransactionChainCallerSession) EnqueueGasCost() (*big.Int, error) {
return _CanonicalTransactionChain.Contract.EnqueueGasCost(&_CanonicalTransactionChain.CallOpts)
}
// EnqueueL2GasPrepaid is a free data retrieval call binding the contract method 0x0b3dfa97.
//
// Solidity: function enqueueL2GasPrepaid() view returns(uint256)
func (_CanonicalTransactionChain *CanonicalTransactionChainCaller) EnqueueL2GasPrepaid(opts *bind.CallOpts) (*big.Int, error) {
var out []interface{}
err := _CanonicalTransactionChain.contract.Call(opts, &out, "enqueueL2GasPrepaid")
if err != nil {
return *new(*big.Int), err
}
out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)
return out0, err
}
// EnqueueL2GasPrepaid is a free data retrieval call binding the contract method 0x0b3dfa97.
//
// Solidity: function enqueueL2GasPrepaid() view returns(uint256)
func (_CanonicalTransactionChain *CanonicalTransactionChainSession) EnqueueL2GasPrepaid() (*big.Int, error) {
return _CanonicalTransactionChain.Contract.EnqueueL2GasPrepaid(&_CanonicalTransactionChain.CallOpts)
}
// EnqueueL2GasPrepaid is a free data retrieval call binding the contract method 0x0b3dfa97.
//
// Solidity: function enqueueL2GasPrepaid() view returns(uint256)
func (_CanonicalTransactionChain *CanonicalTransactionChainCallerSession) EnqueueL2GasPrepaid() (*big.Int, error) {
return _CanonicalTransactionChain.Contract.EnqueueL2GasPrepaid(&_CanonicalTransactionChain.CallOpts)
}
// GetLastBlockNumber is a free data retrieval call binding the contract method 0x5ae6256d.
//
// Solidity: function getLastBlockNumber() view returns(uint40)
func (_CanonicalTransactionChain *CanonicalTransactionChainCaller) GetLastBlockNumber(opts *bind.CallOpts) (*big.Int, error) {
var out []interface{}
err := _CanonicalTransactionChain.contract.Call(opts, &out, "getLastBlockNumber")
if err != nil {
return *new(*big.Int), err
}
out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)
return out0, err
}
// GetLastBlockNumber is a free data retrieval call binding the contract method 0x5ae6256d.
//
// Solidity: function getLastBlockNumber() view returns(uint40)
func (_CanonicalTransactionChain *CanonicalTransactionChainSession) GetLastBlockNumber() (*big.Int, error) {
return _CanonicalTransactionChain.Contract.GetLastBlockNumber(&_CanonicalTransactionChain.CallOpts)
}
// GetLastBlockNumber is a free data retrieval call binding the contract method 0x5ae6256d.
//
// Solidity: function getLastBlockNumber() view returns(uint40)
func (_CanonicalTransactionChain *CanonicalTransactionChainCallerSession) GetLastBlockNumber() (*big.Int, error) {
return _CanonicalTransactionChain.Contract.GetLastBlockNumber(&_CanonicalTransactionChain.CallOpts)
}
// GetLastTimestamp is a free data retrieval call binding the contract method 0x37899770.
//
// Solidity: function getLastTimestamp() view returns(uint40)
func (_CanonicalTransactionChain *CanonicalTransactionChainCaller) GetLastTimestamp(opts *bind.CallOpts) (*big.Int, error) {
var out []interface{}
err := _CanonicalTransactionChain.contract.Call(opts, &out, "getLastTimestamp")
if err != nil {
return *new(*big.Int), err
}
out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)
return out0, err
}
// GetLastTimestamp is a free data retrieval call binding the contract method 0x37899770.
//
// Solidity: function getLastTimestamp() view returns(uint40)
func (_CanonicalTransactionChain *CanonicalTransactionChainSession) GetLastTimestamp() (*big.Int, error) {
return _CanonicalTransactionChain.Contract.GetLastTimestamp(&_CanonicalTransactionChain.CallOpts)
}
// GetLastTimestamp is a free data retrieval call binding the contract method 0x37899770.
//
// Solidity: function getLastTimestamp() view returns(uint40)
func (_CanonicalTransactionChain *CanonicalTransactionChainCallerSession) GetLastTimestamp() (*big.Int, error) {
return _CanonicalTransactionChain.Contract.GetLastTimestamp(&_CanonicalTransactionChain.CallOpts)
}
// GetNextQueueIndex is a free data retrieval call binding the contract method 0x7a167a8a.
//
// Solidity: function getNextQueueIndex() view returns(uint40)
func (_CanonicalTransactionChain *CanonicalTransactionChainCaller) GetNextQueueIndex(opts *bind.CallOpts) (*big.Int, error) {
var out []interface{}
err := _CanonicalTransactionChain.contract.Call(opts, &out, "getNextQueueIndex")
if err != nil {
return *new(*big.Int), err
}
out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)
return out0, err
}
// GetNextQueueIndex is a free data retrieval call binding the contract method 0x7a167a8a.
//
// Solidity: function getNextQueueIndex() view returns(uint40)
func (_CanonicalTransactionChain *CanonicalTransactionChainSession) GetNextQueueIndex() (*big.Int, error) {
return _CanonicalTransactionChain.Contract.GetNextQueueIndex(&_CanonicalTransactionChain.CallOpts)
}
// GetNextQueueIndex is a free data retrieval call binding the contract method 0x7a167a8a.
//
// Solidity: function getNextQueueIndex() view returns(uint40)
func (_CanonicalTransactionChain *CanonicalTransactionChainCallerSession) GetNextQueueIndex() (*big.Int, error) {
return _CanonicalTransactionChain.Contract.GetNextQueueIndex(&_CanonicalTransactionChain.CallOpts)
}
// GetNumPendingQueueElements is a free data retrieval call binding the contract method 0xf722b41a.
//
// Solidity: function getNumPendingQueueElements() view returns(uint40)
func (_CanonicalTransactionChain *CanonicalTransactionChainCaller) GetNumPendingQueueElements(opts *bind.CallOpts) (*big.Int, error) {
var out []interface{}
err := _CanonicalTransactionChain.contract.Call(opts, &out, "getNumPendingQueueElements")
if err != nil {
return *new(*big.Int), err
}
out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)
return out0, err
}
// GetNumPendingQueueElements is a free data retrieval call binding the contract method 0xf722b41a.
//
// Solidity: function getNumPendingQueueElements() view returns(uint40)
func (_CanonicalTransactionChain *CanonicalTransactionChainSession) GetNumPendingQueueElements() (*big.Int, error) {
return _CanonicalTransactionChain.Contract.GetNumPendingQueueElements(&_CanonicalTransactionChain.CallOpts)
}
// GetNumPendingQueueElements is a free data retrieval call binding the contract method 0xf722b41a.
//
// Solidity: function getNumPendingQueueElements() view returns(uint40)
func (_CanonicalTransactionChain *CanonicalTransactionChainCallerSession) GetNumPendingQueueElements() (*big.Int, error) {
return _CanonicalTransactionChain.Contract.GetNumPendingQueueElements(&_CanonicalTransactionChain.CallOpts)
}
// GetQueueElement is a free data retrieval call binding the contract method 0x2a7f18be.
//
// Solidity: function getQueueElement(uint256 _index) view returns((bytes32,uint40,uint40) _element)
func (_CanonicalTransactionChain *CanonicalTransactionChainCaller) GetQueueElement(opts *bind.CallOpts, _index *big.Int) (LibOVMCodecQueueElement, error) {
var out []interface{}
err := _CanonicalTransactionChain.contract.Call(opts, &out, "getQueueElement", _index)
if err != nil {
return *new(LibOVMCodecQueueElement), err
}
out0 := *abi.ConvertType(out[0], new(LibOVMCodecQueueElement)).(*LibOVMCodecQueueElement)
return out0, err
}
// GetQueueElement is a free data retrieval call binding the contract method 0x2a7f18be.
//
// Solidity: function getQueueElement(uint256 _index) view returns((bytes32,uint40,uint40) _element)
func (_CanonicalTransactionChain *CanonicalTransactionChainSession) GetQueueElement(_index *big.Int) (LibOVMCodecQueueElement, error) {
return _CanonicalTransactionChain.Contract.GetQueueElement(&_CanonicalTransactionChain.CallOpts, _index)
}
// GetQueueElement is a free data retrieval call binding the contract method 0x2a7f18be.
//
// Solidity: function getQueueElement(uint256 _index) view returns((bytes32,uint40,uint40) _element)
func (_CanonicalTransactionChain *CanonicalTransactionChainCallerSession) GetQueueElement(_index *big.Int) (LibOVMCodecQueueElement, error) {
return _CanonicalTransactionChain.Contract.GetQueueElement(&_CanonicalTransactionChain.CallOpts, _index)
}
// GetQueueLength is a free data retrieval call binding the contract method 0xb8f77005.
//
// Solidity: function getQueueLength() view returns(uint40)
func (_CanonicalTransactionChain *CanonicalTransactionChainCaller) GetQueueLength(opts *bind.CallOpts) (*big.Int, error) {
var out []interface{}
err := _CanonicalTransactionChain.contract.Call(opts, &out, "getQueueLength")
if err != nil {
return *new(*big.Int), err
}
out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)
return out0, err
}
// GetQueueLength is a free data retrieval call binding the contract method 0xb8f77005.
//
// Solidity: function getQueueLength() view returns(uint40)
func (_CanonicalTransactionChain *CanonicalTransactionChainSession) GetQueueLength() (*big.Int, error) {
return _CanonicalTransactionChain.Contract.GetQueueLength(&_CanonicalTransactionChain.CallOpts)
}
// GetQueueLength is a free data retrieval call binding the contract method 0xb8f77005.
//
// Solidity: function getQueueLength() view returns(uint40)
func (_CanonicalTransactionChain *CanonicalTransactionChainCallerSession) GetQueueLength() (*big.Int, error) {
return _CanonicalTransactionChain.Contract.GetQueueLength(&_CanonicalTransactionChain.CallOpts)
}
// GetTotalBatches is a free data retrieval call binding the contract method 0xe561dddc.
//
// Solidity: function getTotalBatches() view returns(uint256 _totalBatches)
func (_CanonicalTransactionChain *CanonicalTransactionChainCaller) GetTotalBatches(opts *bind.CallOpts) (*big.Int, error) {
var out []interface{}
err := _CanonicalTransactionChain.contract.Call(opts, &out, "getTotalBatches")
if err != nil {
return *new(*big.Int), err
}
out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)
return out0, err
}
// GetTotalBatches is a free data retrieval call binding the contract method 0xe561dddc.
//
// Solidity: function getTotalBatches() view returns(uint256 _totalBatches)
func (_CanonicalTransactionChain *CanonicalTransactionChainSession) GetTotalBatches() (*big.Int, error) {
return _CanonicalTransactionChain.Contract.GetTotalBatches(&_CanonicalTransactionChain.CallOpts)
}
// GetTotalBatches is a free data retrieval call binding the contract method 0xe561dddc.
//
// Solidity: function getTotalBatches() view returns(uint256 _totalBatches)
func (_CanonicalTransactionChain *CanonicalTransactionChainCallerSession) GetTotalBatches() (*big.Int, error) {
return _CanonicalTransactionChain.Contract.GetTotalBatches(&_CanonicalTransactionChain.CallOpts)
}
// GetTotalElements is a free data retrieval call binding the contract method 0x7aa63a86.
//
// Solidity: function getTotalElements() view returns(uint256 _totalElements)
func (_CanonicalTransactionChain *CanonicalTransactionChainCaller) GetTotalElements(opts *bind.CallOpts) (*big.Int, error) {
var out []interface{}
err := _CanonicalTransactionChain.contract.Call(opts, &out, "getTotalElements")
if err != nil {
return *new(*big.Int), err
}
out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)
return out0, err
}
// GetTotalElements is a free data retrieval call binding the contract method 0x7aa63a86.
//
// Solidity: function getTotalElements() view returns(uint256 _totalElements)
func (_CanonicalTransactionChain *CanonicalTransactionChainSession) GetTotalElements() (*big.Int, error) {
return _CanonicalTransactionChain.Contract.GetTotalElements(&_CanonicalTransactionChain.CallOpts)
}
// GetTotalElements is a free data retrieval call binding the contract method 0x7aa63a86.
//
// Solidity: function getTotalElements() view returns(uint256 _totalElements)
func (_CanonicalTransactionChain *CanonicalTransactionChainCallerSession) GetTotalElements() (*big.Int, error) {
return _CanonicalTransactionChain.Contract.GetTotalElements(&_CanonicalTransactionChain.CallOpts)
}
// L2GasDiscountDivisor is a free data retrieval call binding the contract method 0xccf987c8.
//
// Solidity: function l2GasDiscountDivisor() view returns(uint256)
func (_CanonicalTransactionChain *CanonicalTransactionChainCaller) L2GasDiscountDivisor(opts *bind.CallOpts) (*big.Int, error) {
var out []interface{}
err := _CanonicalTransactionChain.contract.Call(opts, &out, "l2GasDiscountDivisor")
if err != nil {
return *new(*big.Int), err
}
out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)
return out0, err
}
// L2GasDiscountDivisor is a free data retrieval call binding the contract method 0xccf987c8.
//
// Solidity: function l2GasDiscountDivisor() view returns(uint256)
func (_CanonicalTransactionChain *CanonicalTransactionChainSession) L2GasDiscountDivisor() (*big.Int, error) {
return _CanonicalTransactionChain.Contract.L2GasDiscountDivisor(&_CanonicalTransactionChain.CallOpts)
}
// L2GasDiscountDivisor is a free data retrieval call binding the contract method 0xccf987c8.
//
// Solidity: function l2GasDiscountDivisor() view returns(uint256)
func (_CanonicalTransactionChain *CanonicalTransactionChainCallerSession) L2GasDiscountDivisor() (*big.Int, error) {
return _CanonicalTransactionChain.Contract.L2GasDiscountDivisor(&_CanonicalTransactionChain.CallOpts)
}
// LibAddressManager is a free data retrieval call binding the contract method 0x299ca478.
//
// Solidity: function libAddressManager() view returns(address)
func (_CanonicalTransactionChain *CanonicalTransactionChainCaller) LibAddressManager(opts *bind.CallOpts) (common.Address, error) {
var out []interface{}
err := _CanonicalTransactionChain.contract.Call(opts, &out, "libAddressManager")
if err != nil {
return *new(common.Address), err
}
out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address)
return out0, err
}
// LibAddressManager is a free data retrieval call binding the contract method 0x299ca478.
//
// Solidity: function libAddressManager() view returns(address)
func (_CanonicalTransactionChain *CanonicalTransactionChainSession) LibAddressManager() (common.Address, error) {
return _CanonicalTransactionChain.Contract.LibAddressManager(&_CanonicalTransactionChain.CallOpts)
}
// LibAddressManager is a free data retrieval call binding the contract method 0x299ca478.
//
// Solidity: function libAddressManager() view returns(address)
func (_CanonicalTransactionChain *CanonicalTransactionChainCallerSession) LibAddressManager() (common.Address, error) {
return _CanonicalTransactionChain.Contract.LibAddressManager(&_CanonicalTransactionChain.CallOpts)
}
// MaxTransactionGasLimit is a free data retrieval call binding the contract method 0x8d38c6c1.
//
// Solidity: function maxTransactionGasLimit() view returns(uint256)
func (_CanonicalTransactionChain *CanonicalTransactionChainCaller) MaxTransactionGasLimit(opts *bind.CallOpts) (*big.Int, error) {
var out []interface{}
err := _CanonicalTransactionChain.contract.Call(opts, &out, "maxTransactionGasLimit")
if err != nil {
return *new(*big.Int), err
}
out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)
return out0, err
}
// MaxTransactionGasLimit is a free data retrieval call binding the contract method 0x8d38c6c1.
//
// Solidity: function maxTransactionGasLimit() view returns(uint256)
func (_CanonicalTransactionChain *CanonicalTransactionChainSession) MaxTransactionGasLimit() (*big.Int, error) {
return _CanonicalTransactionChain.Contract.MaxTransactionGasLimit(&_CanonicalTransactionChain.CallOpts)
}
// MaxTransactionGasLimit is a free data retrieval call binding the contract method 0x8d38c6c1.
//
// Solidity: function maxTransactionGasLimit() view returns(uint256)
func (_CanonicalTransactionChain *CanonicalTransactionChainCallerSession) MaxTransactionGasLimit() (*big.Int, error) {
return _CanonicalTransactionChain.Contract.MaxTransactionGasLimit(&_CanonicalTransactionChain.CallOpts)
}
// Resolve is a free data retrieval call binding the contract method 0x461a4478.
//
// Solidity: function resolve(string _name) view returns(address)
func (_CanonicalTransactionChain *CanonicalTransactionChainCaller) Resolve(opts *bind.CallOpts, _name string) (common.Address, error) {
var out []interface{}
err := _CanonicalTransactionChain.contract.Call(opts, &out, "resolve", _name)
if err != nil {
return *new(common.Address), err
}
out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address)
return out0, err
}
// Resolve is a free data retrieval call binding the contract method 0x461a4478.
//
// Solidity: function resolve(string _name) view returns(address)
func (_CanonicalTransactionChain *CanonicalTransactionChainSession) Resolve(_name string) (common.Address, error) {
return _CanonicalTransactionChain.Contract.Resolve(&_CanonicalTransactionChain.CallOpts, _name)
}
// Resolve is a free data retrieval call binding the contract method 0x461a4478.
//
// Solidity: function resolve(string _name) view returns(address)
func (_CanonicalTransactionChain *CanonicalTransactionChainCallerSession) Resolve(_name string) (common.Address, error) {
return _CanonicalTransactionChain.Contract.Resolve(&_CanonicalTransactionChain.CallOpts, _name)
}
// AppendSequencerBatch is a paid mutator transaction binding the contract method 0xd0f89344.
//
// Solidity: function appendSequencerBatch() returns()
func (_CanonicalTransactionChain *CanonicalTransactionChainTransactor) AppendSequencerBatch(opts *bind.TransactOpts) (*types.Transaction, error) {
return _CanonicalTransactionChain.contract.Transact(opts, "appendSequencerBatch")
}
// AppendSequencerBatch is a paid mutator transaction binding the contract method 0xd0f89344.
//
// Solidity: function appendSequencerBatch() returns()
func (_CanonicalTransactionChain *CanonicalTransactionChainSession) AppendSequencerBatch() (*types.Transaction, error) {
return _CanonicalTransactionChain.Contract.AppendSequencerBatch(&_CanonicalTransactionChain.TransactOpts)
}
// AppendSequencerBatch is a paid mutator transaction binding the contract method 0xd0f89344.
//
// Solidity: function appendSequencerBatch() returns()
func (_CanonicalTransactionChain *CanonicalTransactionChainTransactorSession) AppendSequencerBatch() (*types.Transaction, error) {
return _CanonicalTransactionChain.Contract.AppendSequencerBatch(&_CanonicalTransactionChain.TransactOpts)
}
// Enqueue is a paid mutator transaction binding the contract method 0x6fee07e0.
//
// Solidity: function enqueue(address _target, uint256 _gasLimit, bytes _data) returns()
func (_CanonicalTransactionChain *CanonicalTransactionChainTransactor) Enqueue(opts *bind.TransactOpts, _target common.Address, _gasLimit *big.Int, _data []byte) (*types.Transaction, error) {
return _CanonicalTransactionChain.contract.Transact(opts, "enqueue", _target, _gasLimit, _data)
}
// Enqueue is a paid mutator transaction binding the contract method 0x6fee07e0.
//
// Solidity: function enqueue(address _target, uint256 _gasLimit, bytes _data) returns()
func (_CanonicalTransactionChain *CanonicalTransactionChainSession) Enqueue(_target common.Address, _gasLimit *big.Int, _data []byte) (*types.Transaction, error) {
return _CanonicalTransactionChain.Contract.Enqueue(&_CanonicalTransactionChain.TransactOpts, _target, _gasLimit, _data)
}
// Enqueue is a paid mutator transaction binding the contract method 0x6fee07e0.
//
// Solidity: function enqueue(address _target, uint256 _gasLimit, bytes _data) returns()
func (_CanonicalTransactionChain *CanonicalTransactionChainTransactorSession) Enqueue(_target common.Address, _gasLimit *big.Int, _data []byte) (*types.Transaction, error) {
return _CanonicalTransactionChain.Contract.Enqueue(&_CanonicalTransactionChain.TransactOpts, _target, _gasLimit, _data)
}
// SetGasParams is a paid mutator transaction binding the contract method 0xedcc4a45.
//
// Solidity: function setGasParams(uint256 _l2GasDiscountDivisor, uint256 _enqueueGasCost) returns()
func (_CanonicalTransactionChain *CanonicalTransactionChainTransactor) SetGasParams(opts *bind.TransactOpts, _l2GasDiscountDivisor *big.Int, _enqueueGasCost *big.Int) (*types.Transaction, error) {
return _CanonicalTransactionChain.contract.Transact(opts, "setGasParams", _l2GasDiscountDivisor, _enqueueGasCost)
}
// SetGasParams is a paid mutator transaction binding the contract method 0xedcc4a45.
//
// Solidity: function setGasParams(uint256 _l2GasDiscountDivisor, uint256 _enqueueGasCost) returns()
func (_CanonicalTransactionChain *CanonicalTransactionChainSession) SetGasParams(_l2GasDiscountDivisor *big.Int, _enqueueGasCost *big.Int) (*types.Transaction, error) {
return _CanonicalTransactionChain.Contract.SetGasParams(&_CanonicalTransactionChain.TransactOpts, _l2GasDiscountDivisor, _enqueueGasCost)
}
// SetGasParams is a paid mutator transaction binding the contract method 0xedcc4a45.
//
// Solidity: function setGasParams(uint256 _l2GasDiscountDivisor, uint256 _enqueueGasCost) returns()
func (_CanonicalTransactionChain *CanonicalTransactionChainTransactorSession) SetGasParams(_l2GasDiscountDivisor *big.Int, _enqueueGasCost *big.Int) (*types.Transaction, error) {
return _CanonicalTransactionChain.Contract.SetGasParams(&_CanonicalTransactionChain.TransactOpts, _l2GasDiscountDivisor, _enqueueGasCost)
}
// CanonicalTransactionChainL2GasParamsUpdatedIterator is returned from FilterL2GasParamsUpdated and is used to iterate over the raw logs and unpacked data for L2GasParamsUpdated events raised by the CanonicalTransactionChain contract.
type CanonicalTransactionChainL2GasParamsUpdatedIterator struct {
Event *CanonicalTransactionChainL2GasParamsUpdated // Event containing the contract specifics and raw log
contract *bind.BoundContract // Generic contract to use for unpacking event data
event string // Event name to use for unpacking event data
logs chan types.Log // Log channel receiving the found contract events
sub ethereum.Subscription // Subscription for errors, completion and termination
done bool // Whether the subscription completed delivering logs
fail error // Occurred error to stop iteration
}
// Next advances the iterator to the subsequent event, returning whether there
// are any more events found. In case of a retrieval or parsing error, false is
// returned and Error() can be queried for the exact failure.
func (it *CanonicalTransactionChainL2GasParamsUpdatedIterator) Next() bool {
// If the iterator failed, stop iterating
if it.fail != nil {
return false
}
// If the iterator completed, deliver directly whatever's available
if it.done {
select {
case log := <-it.logs:
it.Event = new(CanonicalTransactionChainL2GasParamsUpdated)
if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil {
it.fail = err
return false
}
it.Event.Raw = log
return true
default:
return false
}
}
// Iterator still in progress, wait for either a data or an error event
select {
case log := <-it.logs:
it.Event = new(CanonicalTransactionChainL2GasParamsUpdated)
if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil {
it.fail = err
return false
}
it.Event.Raw = log
return true
case err := <-it.sub.Err():
it.done = true
it.fail = err
return it.Next()
}
}
// Error returns any retrieval or parsing error occurred during filtering.
func (it *CanonicalTransactionChainL2GasParamsUpdatedIterator) Error() error {
return it.fail
}
// Close terminates the iteration process, releasing any pending underlying
// resources.
func (it *CanonicalTransactionChainL2GasParamsUpdatedIterator) Close() error {
it.sub.Unsubscribe()
return nil
}
// CanonicalTransactionChainL2GasParamsUpdated represents a L2GasParamsUpdated event raised by the CanonicalTransactionChain contract.
type CanonicalTransactionChainL2GasParamsUpdated struct {
L2GasDiscountDivisor *big.Int
EnqueueGasCost *big.Int
EnqueueL2GasPrepaid *big.Int
Raw types.Log // Blockchain specific contextual infos
}
// FilterL2GasParamsUpdated is a free log retrieval operation binding the contract event 0xc6ed75e96b8b18b71edc1a6e82a9d677f8268c774a262c624eeb2cf0a8b3e07e.
//
// Solidity: event L2GasParamsUpdated(uint256 l2GasDiscountDivisor, uint256 enqueueGasCost, uint256 enqueueL2GasPrepaid)
func (_CanonicalTransactionChain *CanonicalTransactionChainFilterer) FilterL2GasParamsUpdated(opts *bind.FilterOpts) (*CanonicalTransactionChainL2GasParamsUpdatedIterator, error) {
logs, sub, err := _CanonicalTransactionChain.contract.FilterLogs(opts, "L2GasParamsUpdated")
if err != nil {
return nil, err
}
return &CanonicalTransactionChainL2GasParamsUpdatedIterator{contract: _CanonicalTransactionChain.contract, event: "L2GasParamsUpdated", logs: logs, sub: sub}, nil
}
// WatchL2GasParamsUpdated is a free log subscription operation binding the contract event 0xc6ed75e96b8b18b71edc1a6e82a9d677f8268c774a262c624eeb2cf0a8b3e07e.
//
// Solidity: event L2GasParamsUpdated(uint256 l2GasDiscountDivisor, uint256 enqueueGasCost, uint256 enqueueL2GasPrepaid)
func (_CanonicalTransactionChain *CanonicalTransactionChainFilterer) WatchL2GasParamsUpdated(opts *bind.WatchOpts, sink chan<- *CanonicalTransactionChainL2GasParamsUpdated) (event.Subscription, error) {
logs, sub, err := _CanonicalTransactionChain.contract.WatchLogs(opts, "L2GasParamsUpdated")
if err != nil {
return nil, err
}
return event.NewSubscription(func(quit <-chan struct{}) error {
defer sub.Unsubscribe()
for {
select {
case log := <-logs:
// New log arrived, parse the event and forward to the user
event := new(CanonicalTransactionChainL2GasParamsUpdated)
if err := _CanonicalTransactionChain.contract.UnpackLog(event, "L2GasParamsUpdated", log); err != nil {
return err
}
event.Raw = log
select {
case sink <- event:
case err := <-sub.Err():
return err
case <-quit:
return nil
}
case err := <-sub.Err():
return err
case <-quit:
return nil
}
}
}), nil
}
// ParseL2GasParamsUpdated is a log parse operation binding the contract event 0xc6ed75e96b8b18b71edc1a6e82a9d677f8268c774a262c624eeb2cf0a8b3e07e.
//
// Solidity: event L2GasParamsUpdated(uint256 l2GasDiscountDivisor, uint256 enqueueGasCost, uint256 enqueueL2GasPrepaid)
func (_CanonicalTransactionChain *CanonicalTransactionChainFilterer) ParseL2GasParamsUpdated(log types.Log) (*CanonicalTransactionChainL2GasParamsUpdated, error) {
event := new(CanonicalTransactionChainL2GasParamsUpdated)
if err := _CanonicalTransactionChain.contract.UnpackLog(event, "L2GasParamsUpdated", log); err != nil {
return nil, err
}
event.Raw = log
return event, nil
}
// CanonicalTransactionChainQueueBatchAppendedIterator is returned from FilterQueueBatchAppended and is used to iterate over the raw logs and unpacked data for QueueBatchAppended events raised by the CanonicalTransactionChain contract.
type CanonicalTransactionChainQueueBatchAppendedIterator struct {
Event *CanonicalTransactionChainQueueBatchAppended // Event containing the contract specifics and raw log
contract *bind.BoundContract // Generic contract to use for unpacking event data
event string // Event name to use for unpacking event data
logs chan types.Log // Log channel receiving the found contract events
sub ethereum.Subscription // Subscription for errors, completion and termination
done bool // Whether the subscription completed delivering logs
fail error // Occurred error to stop iteration
}
// Next advances the iterator to the subsequent event, returning whether there
// are any more events found. In case of a retrieval or parsing error, false is
// returned and Error() can be queried for the exact failure.
func (it *CanonicalTransactionChainQueueBatchAppendedIterator) Next() bool {
// If the iterator failed, stop iterating
if it.fail != nil {
return false
}
// If the iterator completed, deliver directly whatever's available
if it.done {
select {
case log := <-it.logs:
it.Event = new(CanonicalTransactionChainQueueBatchAppended)
if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil {
it.fail = err
return false
}
it.Event.Raw = log
return true
default:
return false
}
}
// Iterator still in progress, wait for either a data or an error event
select {
case log := <-it.logs:
it.Event = new(CanonicalTransactionChainQueueBatchAppended)
if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil {
it.fail = err
return false
}
it.Event.Raw = log
return true
case err := <-it.sub.Err():
it.done = true
it.fail = err
return it.Next()
}
}
// Error returns any retrieval or parsing error occurred during filtering.
func (it *CanonicalTransactionChainQueueBatchAppendedIterator) Error() error {
return it.fail
}
// Close terminates the iteration process, releasing any pending underlying
// resources.
func (it *CanonicalTransactionChainQueueBatchAppendedIterator) Close() error {
it.sub.Unsubscribe()
return nil
}
// CanonicalTransactionChainQueueBatchAppended represents a QueueBatchAppended event raised by the CanonicalTransactionChain contract.
type CanonicalTransactionChainQueueBatchAppended struct {
StartingQueueIndex *big.Int
NumQueueElements *big.Int
TotalElements *big.Int
Raw types.Log // Blockchain specific contextual infos
}
// FilterQueueBatchAppended is a free log retrieval operation binding the contract event 0x64d7f508348c70dea42d5302a393987e4abc20e45954ab3f9d320207751956f0.
//
// Solidity: event QueueBatchAppended(uint256 _startingQueueIndex, uint256 _numQueueElements, uint256 _totalElements)
func (_CanonicalTransactionChain *CanonicalTransactionChainFilterer) FilterQueueBatchAppended(opts *bind.FilterOpts) (*CanonicalTransactionChainQueueBatchAppendedIterator, error) {
logs, sub, err := _CanonicalTransactionChain.contract.FilterLogs(opts, "QueueBatchAppended")
if err != nil {
return nil, err
}
return &CanonicalTransactionChainQueueBatchAppendedIterator{contract: _CanonicalTransactionChain.contract, event: "QueueBatchAppended", logs: logs, sub: sub}, nil
}
// WatchQueueBatchAppended is a free log subscription operation binding the contract event 0x64d7f508348c70dea42d5302a393987e4abc20e45954ab3f9d320207751956f0.
//
// Solidity: event QueueBatchAppended(uint256 _startingQueueIndex, uint256 _numQueueElements, uint256 _totalElements)
func (_CanonicalTransactionChain *CanonicalTransactionChainFilterer) WatchQueueBatchAppended(opts *bind.WatchOpts, sink chan<- *CanonicalTransactionChainQueueBatchAppended) (event.Subscription, error) {
logs, sub, err := _CanonicalTransactionChain.contract.WatchLogs(opts, "QueueBatchAppended")
if err != nil {
return nil, err
}
return event.NewSubscription(func(quit <-chan struct{}) error {
defer sub.Unsubscribe()
for {
select {
case log := <-logs:
// New log arrived, parse the event and forward to the user
event := new(CanonicalTransactionChainQueueBatchAppended)
if err := _CanonicalTransactionChain.contract.UnpackLog(event, "QueueBatchAppended", log); err != nil {
return err
}
event.Raw = log
select {
case sink <- event:
case err := <-sub.Err():
return err
case <-quit:
return nil
}
case err := <-sub.Err():
return err
case <-quit:
return nil
}
}
}), nil
}
// ParseQueueBatchAppended is a log parse operation binding the contract event 0x64d7f508348c70dea42d5302a393987e4abc20e45954ab3f9d320207751956f0.
//
// Solidity: event QueueBatchAppended(uint256 _startingQueueIndex, uint256 _numQueueElements, uint256 _totalElements)
func (_CanonicalTransactionChain *CanonicalTransactionChainFilterer) ParseQueueBatchAppended(log types.Log) (*CanonicalTransactionChainQueueBatchAppended, error) {
event := new(CanonicalTransactionChainQueueBatchAppended)
if err := _CanonicalTransactionChain.contract.UnpackLog(event, "QueueBatchAppended", log); err != nil {
return nil, err
}
event.Raw = log
return event, nil
}
// CanonicalTransactionChainSequencerBatchAppendedIterator is returned from FilterSequencerBatchAppended and is used to iterate over the raw logs and unpacked data for SequencerBatchAppended events raised by the CanonicalTransactionChain contract.
type CanonicalTransactionChainSequencerBatchAppendedIterator struct {
Event *CanonicalTransactionChainSequencerBatchAppended // Event containing the contract specifics and raw log
contract *bind.BoundContract // Generic contract to use for unpacking event data
event string // Event name to use for unpacking event data
logs chan types.Log // Log channel receiving the found contract events
sub ethereum.Subscription // Subscription for errors, completion and termination
done bool // Whether the subscription completed delivering logs
fail error // Occurred error to stop iteration
}
// Next advances the iterator to the subsequent event, returning whether there
// are any more events found. In case of a retrieval or parsing error, false is
// returned and Error() can be queried for the exact failure.
func (it *CanonicalTransactionChainSequencerBatchAppendedIterator) Next() bool {
// If the iterator failed, stop iterating
if it.fail != nil {
return false
}
// If the iterator completed, deliver directly whatever's available
if it.done {
select {
case log := <-it.logs:
it.Event = new(CanonicalTransactionChainSequencerBatchAppended)
if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil {
it.fail = err
return false
}
it.Event.Raw = log
return true
default:
return false
}
}
// Iterator still in progress, wait for either a data or an error event
select {
case log := <-it.logs:
it.Event = new(CanonicalTransactionChainSequencerBatchAppended)
if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil {
it.fail = err
return false
}
it.Event.Raw = log
return true
case err := <-it.sub.Err():
it.done = true
it.fail = err
return it.Next()
}
}
// Error returns any retrieval or parsing error occurred during filtering.
func (it *CanonicalTransactionChainSequencerBatchAppendedIterator) Error() error {
return it.fail
}
// Close terminates the iteration process, releasing any pending underlying
// resources.
func (it *CanonicalTransactionChainSequencerBatchAppendedIterator) Close() error {
it.sub.Unsubscribe()
return nil
}
// CanonicalTransactionChainSequencerBatchAppended represents a SequencerBatchAppended event raised by the CanonicalTransactionChain contract.
type CanonicalTransactionChainSequencerBatchAppended struct {
StartingQueueIndex *big.Int
NumQueueElements *big.Int
TotalElements *big.Int
Raw types.Log // Blockchain specific contextual infos
}
// FilterSequencerBatchAppended is a free log retrieval operation binding the contract event 0x602f1aeac0ca2e7a13e281a9ef0ad7838542712ce16780fa2ecffd351f05f899.
//
// Solidity: event SequencerBatchAppended(uint256 _startingQueueIndex, uint256 _numQueueElements, uint256 _totalElements)
func (_CanonicalTransactionChain *CanonicalTransactionChainFilterer) FilterSequencerBatchAppended(opts *bind.FilterOpts) (*CanonicalTransactionChainSequencerBatchAppendedIterator, error) {
logs, sub, err := _CanonicalTransactionChain.contract.FilterLogs(opts, "SequencerBatchAppended")
if err != nil {
return nil, err
}
return &CanonicalTransactionChainSequencerBatchAppendedIterator{contract: _CanonicalTransactionChain.contract, event: "SequencerBatchAppended", logs: logs, sub: sub}, nil
}
// WatchSequencerBatchAppended is a free log subscription operation binding the contract event 0x602f1aeac0ca2e7a13e281a9ef0ad7838542712ce16780fa2ecffd351f05f899.
//
// Solidity: event SequencerBatchAppended(uint256 _startingQueueIndex, uint256 _numQueueElements, uint256 _totalElements)
func (_CanonicalTransactionChain *CanonicalTransactionChainFilterer) WatchSequencerBatchAppended(opts *bind.WatchOpts, sink chan<- *CanonicalTransactionChainSequencerBatchAppended) (event.Subscription, error) {
logs, sub, err := _CanonicalTransactionChain.contract.WatchLogs(opts, "SequencerBatchAppended")
if err != nil {
return nil, err
}
return event.NewSubscription(func(quit <-chan struct{}) error {
defer sub.Unsubscribe()
for {
select {
case log := <-logs:
// New log arrived, parse the event and forward to the user
event := new(CanonicalTransactionChainSequencerBatchAppended)
if err := _CanonicalTransactionChain.contract.UnpackLog(event, "SequencerBatchAppended", log); err != nil {
return err
}
event.Raw = log
select {
case sink <- event:
case err := <-sub.Err():
return err
case <-quit:
return nil
}
case err := <-sub.Err():
return err
case <-quit:
return nil
}
}
}), nil
}
// ParseSequencerBatchAppended is a log parse operation binding the contract event 0x602f1aeac0ca2e7a13e281a9ef0ad7838542712ce16780fa2ecffd351f05f899.
//
// Solidity: event SequencerBatchAppended(uint256 _startingQueueIndex, uint256 _numQueueElements, uint256 _totalElements)
func (_CanonicalTransactionChain *CanonicalTransactionChainFilterer) ParseSequencerBatchAppended(log types.Log) (*CanonicalTransactionChainSequencerBatchAppended, error) {
event := new(CanonicalTransactionChainSequencerBatchAppended)
if err := _CanonicalTransactionChain.contract.UnpackLog(event, "SequencerBatchAppended", log); err != nil {
return nil, err
}
event.Raw = log
return event, nil
}
// CanonicalTransactionChainTransactionBatchAppendedIterator is returned from FilterTransactionBatchAppended and is used to iterate over the raw logs and unpacked data for TransactionBatchAppended events raised by the CanonicalTransactionChain contract.
type CanonicalTransactionChainTransactionBatchAppendedIterator struct {
Event *CanonicalTransactionChainTransactionBatchAppended // Event containing the contract specifics and raw log
contract *bind.BoundContract // Generic contract to use for unpacking event data
event string // Event name to use for unpacking event data
logs chan types.Log // Log channel receiving the found contract events
sub ethereum.Subscription // Subscription for errors, completion and termination
done bool // Whether the subscription completed delivering logs
fail error // Occurred error to stop iteration
}
// Next advances the iterator to the subsequent event, returning whether there
// are any more events found. In case of a retrieval or parsing error, false is
// returned and Error() can be queried for the exact failure.
func (it *CanonicalTransactionChainTransactionBatchAppendedIterator) Next() bool {
// If the iterator failed, stop iterating
if it.fail != nil {
return false
}
// If the iterator completed, deliver directly whatever's available
if it.done {
select {
case log := <-it.logs:
it.Event = new(CanonicalTransactionChainTransactionBatchAppended)
if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil {
it.fail = err
return false
}
it.Event.Raw = log
return true
default:
return false
}
}
// Iterator still in progress, wait for either a data or an error event
select {
case log := <-it.logs:
it.Event = new(CanonicalTransactionChainTransactionBatchAppended)
if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil {
it.fail = err
return false
}
it.Event.Raw = log
return true
case err := <-it.sub.Err():
it.done = true
it.fail = err
return it.Next()
}
}
// Error returns any retrieval or parsing error occurred during filtering.
func (it *CanonicalTransactionChainTransactionBatchAppendedIterator) Error() error {
return it.fail
}
// Close terminates the iteration process, releasing any pending underlying
// resources.
func (it *CanonicalTransactionChainTransactionBatchAppendedIterator) Close() error {
it.sub.Unsubscribe()
return nil
}
// CanonicalTransactionChainTransactionBatchAppended represents a TransactionBatchAppended event raised by the CanonicalTransactionChain contract.
type CanonicalTransactionChainTransactionBatchAppended struct {
BatchIndex *big.Int
BatchRoot [32]byte
BatchSize *big.Int
PrevTotalElements *big.Int
ExtraData []byte
Raw types.Log // Blockchain specific contextual infos
}
// FilterTransactionBatchAppended is a free log retrieval operation binding the contract event 0x127186556e7be68c7e31263195225b4de02820707889540969f62c05cf73525e.
//
// Solidity: event TransactionBatchAppended(uint256 indexed _batchIndex, bytes32 _batchRoot, uint256 _batchSize, uint256 _prevTotalElements, bytes _extraData)
func (_CanonicalTransactionChain *CanonicalTransactionChainFilterer) FilterTransactionBatchAppended(opts *bind.FilterOpts, _batchIndex []*big.Int) (*CanonicalTransactionChainTransactionBatchAppendedIterator, error) {
var _batchIndexRule []interface{}
for _, _batchIndexItem := range _batchIndex {
_batchIndexRule = append(_batchIndexRule, _batchIndexItem)
}
logs, sub, err := _CanonicalTransactionChain.contract.FilterLogs(opts, "TransactionBatchAppended", _batchIndexRule)
if err != nil {
return nil, err
}
return &CanonicalTransactionChainTransactionBatchAppendedIterator{contract: _CanonicalTransactionChain.contract, event: "TransactionBatchAppended", logs: logs, sub: sub}, nil
}
// WatchTransactionBatchAppended is a free log subscription operation binding the contract event 0x127186556e7be68c7e31263195225b4de02820707889540969f62c05cf73525e.
//
// Solidity: event TransactionBatchAppended(uint256 indexed _batchIndex, bytes32 _batchRoot, uint256 _batchSize, uint256 _prevTotalElements, bytes _extraData)
func (_CanonicalTransactionChain *CanonicalTransactionChainFilterer) WatchTransactionBatchAppended(opts *bind.WatchOpts, sink chan<- *CanonicalTransactionChainTransactionBatchAppended, _batchIndex []*big.Int) (event.Subscription, error) {
var _batchIndexRule []interface{}
for _, _batchIndexItem := range _batchIndex {
_batchIndexRule = append(_batchIndexRule, _batchIndexItem)
}
logs, sub, err := _CanonicalTransactionChain.contract.WatchLogs(opts, "TransactionBatchAppended", _batchIndexRule)
if err != nil {
return nil, err
}
return event.NewSubscription(func(quit <-chan struct{}) error {
defer sub.Unsubscribe()
for {
select {
case log := <-logs:
// New log arrived, parse the event and forward to the user
event := new(CanonicalTransactionChainTransactionBatchAppended)
if err := _CanonicalTransactionChain.contract.UnpackLog(event, "TransactionBatchAppended", log); err != nil {
return err
}
event.Raw = log
select {
case sink <- event:
case err := <-sub.Err():
return err
case <-quit:
return nil
}
case err := <-sub.Err():
return err
case <-quit:
return nil
}
}
}), nil
}
// ParseTransactionBatchAppended is a log parse operation binding the contract event 0x127186556e7be68c7e31263195225b4de02820707889540969f62c05cf73525e.
//
// Solidity: event TransactionBatchAppended(uint256 indexed _batchIndex, bytes32 _batchRoot, uint256 _batchSize, uint256 _prevTotalElements, bytes _extraData)
func (_CanonicalTransactionChain *CanonicalTransactionChainFilterer) ParseTransactionBatchAppended(log types.Log) (*CanonicalTransactionChainTransactionBatchAppended, error) {
event := new(CanonicalTransactionChainTransactionBatchAppended)
if err := _CanonicalTransactionChain.contract.UnpackLog(event, "TransactionBatchAppended", log); err != nil {
return nil, err
}
event.Raw = log
return event, nil
}
// CanonicalTransactionChainTransactionEnqueuedIterator is returned from FilterTransactionEnqueued and is used to iterate over the raw logs and unpacked data for TransactionEnqueued events raised by the CanonicalTransactionChain contract.
type CanonicalTransactionChainTransactionEnqueuedIterator struct {
Event *CanonicalTransactionChainTransactionEnqueued // Event containing the contract specifics and raw log
contract *bind.BoundContract // Generic contract to use for unpacking event data
event string // Event name to use for unpacking event data
logs chan types.Log // Log channel receiving the found contract events
sub ethereum.Subscription // Subscription for errors, completion and termination
done bool // Whether the subscription completed delivering logs
fail error // Occurred error to stop iteration
}
// Next advances the iterator to the subsequent event, returning whether there
// are any more events found. In case of a retrieval or parsing error, false is
// returned and Error() can be queried for the exact failure.
func (it *CanonicalTransactionChainTransactionEnqueuedIterator) Next() bool {
// If the iterator failed, stop iterating
if it.fail != nil {
return false
}
// If the iterator completed, deliver directly whatever's available
if it.done {
select {
case log := <-it.logs:
it.Event = new(CanonicalTransactionChainTransactionEnqueued)
if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil {
it.fail = err
return false
}
it.Event.Raw = log
return true
default:
return false
}
}
// Iterator still in progress, wait for either a data or an error event
select {
case log := <-it.logs:
it.Event = new(CanonicalTransactionChainTransactionEnqueued)
if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil {
it.fail = err
return false
}
it.Event.Raw = log
return true
case err := <-it.sub.Err():
it.done = true
it.fail = err
return it.Next()
}
}
// Error returns any retrieval or parsing error occurred during filtering.
func (it *CanonicalTransactionChainTransactionEnqueuedIterator) Error() error {
return it.fail
}
// Close terminates the iteration process, releasing any pending underlying
// resources.
func (it *CanonicalTransactionChainTransactionEnqueuedIterator) Close() error {
it.sub.Unsubscribe()
return nil
}
// CanonicalTransactionChainTransactionEnqueued represents a TransactionEnqueued event raised by the CanonicalTransactionChain contract.
type CanonicalTransactionChainTransactionEnqueued struct {
L1TxOrigin common.Address
Target common.Address
GasLimit *big.Int
Data []byte
QueueIndex *big.Int
Timestamp *big.Int
Raw types.Log // Blockchain specific contextual infos
}
// FilterTransactionEnqueued is a free log retrieval operation binding the contract event 0x4b388aecf9fa6cc92253704e5975a6129a4f735bdbd99567df4ed0094ee4ceb5.
//
// Solidity: event TransactionEnqueued(address indexed _l1TxOrigin, address indexed _target, uint256 _gasLimit, bytes _data, uint256 indexed _queueIndex, uint256 _timestamp)
func (_CanonicalTransactionChain *CanonicalTransactionChainFilterer) FilterTransactionEnqueued(opts *bind.FilterOpts, _l1TxOrigin []common.Address, _target []common.Address, _queueIndex []*big.Int) (*CanonicalTransactionChainTransactionEnqueuedIterator, error) {
var _l1TxOriginRule []interface{}
for _, _l1TxOriginItem := range _l1TxOrigin {
_l1TxOriginRule = append(_l1TxOriginRule, _l1TxOriginItem)
}
var _targetRule []interface{}
for _, _targetItem := range _target {
_targetRule = append(_targetRule, _targetItem)
}
var _queueIndexRule []interface{}
for _, _queueIndexItem := range _queueIndex {
_queueIndexRule = append(_queueIndexRule, _queueIndexItem)
}
logs, sub, err := _CanonicalTransactionChain.contract.FilterLogs(opts, "TransactionEnqueued", _l1TxOriginRule, _targetRule, _queueIndexRule)
if err != nil {
return nil, err
}
return &CanonicalTransactionChainTransactionEnqueuedIterator{contract: _CanonicalTransactionChain.contract, event: "TransactionEnqueued", logs: logs, sub: sub}, nil
}
// WatchTransactionEnqueued is a free log subscription operation binding the contract event 0x4b388aecf9fa6cc92253704e5975a6129a4f735bdbd99567df4ed0094ee4ceb5.
//
// Solidity: event TransactionEnqueued(address indexed _l1TxOrigin, address indexed _target, uint256 _gasLimit, bytes _data, uint256 indexed _queueIndex, uint256 _timestamp)
func (_CanonicalTransactionChain *CanonicalTransactionChainFilterer) WatchTransactionEnqueued(opts *bind.WatchOpts, sink chan<- *CanonicalTransactionChainTransactionEnqueued, _l1TxOrigin []common.Address, _target []common.Address, _queueIndex []*big.Int) (event.Subscription, error) {
var _l1TxOriginRule []interface{}
for _, _l1TxOriginItem := range _l1TxOrigin {
_l1TxOriginRule = append(_l1TxOriginRule, _l1TxOriginItem)
}
var _targetRule []interface{}
for _, _targetItem := range _target {
_targetRule = append(_targetRule, _targetItem)
}
var _queueIndexRule []interface{}
for _, _queueIndexItem := range _queueIndex {
_queueIndexRule = append(_queueIndexRule, _queueIndexItem)
}
logs, sub, err := _CanonicalTransactionChain.contract.WatchLogs(opts, "TransactionEnqueued", _l1TxOriginRule, _targetRule, _queueIndexRule)
if err != nil {
return nil, err
}
return event.NewSubscription(func(quit <-chan struct{}) error {
defer sub.Unsubscribe()
for {
select {
case log := <-logs:
// New log arrived, parse the event and forward to the user
event := new(CanonicalTransactionChainTransactionEnqueued)
if err := _CanonicalTransactionChain.contract.UnpackLog(event, "TransactionEnqueued", log); err != nil {
return err
}
event.Raw = log
select {
case sink <- event:
case err := <-sub.Err():
return err
case <-quit:
return nil
}
case err := <-sub.Err():
return err
case <-quit:
return nil
}
}
}), nil
}
// ParseTransactionEnqueued is a log parse operation binding the contract event 0x4b388aecf9fa6cc92253704e5975a6129a4f735bdbd99567df4ed0094ee4ceb5.
//
// Solidity: event TransactionEnqueued(address indexed _l1TxOrigin, address indexed _target, uint256 _gasLimit, bytes _data, uint256 indexed _queueIndex, uint256 _timestamp)
func (_CanonicalTransactionChain *CanonicalTransactionChainFilterer) ParseTransactionEnqueued(log types.Log) (*CanonicalTransactionChainTransactionEnqueued, error) {
event := new(CanonicalTransactionChainTransactionEnqueued)
if err := _CanonicalTransactionChain.contract.UnpackLog(event, "TransactionEnqueued", log); err != nil {
return nil, err
}
event.Raw = log
return event, nil
}
......@@ -3,10 +3,6 @@ package ether
import (
"fmt"
"math/big"
"sync"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
"github.com/ethereum-optimism/optimism/op-chain-ops/crossdomain"
"github.com/ethereum-optimism/optimism/op-chain-ops/util"
......@@ -46,9 +42,6 @@ var (
common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000006"): true,
}
// maxSlot is the maximum possible storage slot.
maxSlot = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
// sequencerEntrypointAddr is the address of the OVM sequencer entrypoint contract.
sequencerEntrypointAddr = common.HexToAddress("0x4200000000000000000000000000000000000005")
)
......@@ -61,11 +54,9 @@ type accountData struct {
address common.Address
}
type DBFactory func() (*state.StateDB, error)
// MigrateBalances migrates all balances in the LegacyERC20ETH contract into state. It performs checks
// in parallel with mutations in order to reduce overall migration time.
func MigrateBalances(mutableDB *state.StateDB, dbFactory DBFactory, addresses []common.Address, allowances []*crossdomain.Allowance, chainID int, noCheck bool) error {
func MigrateBalances(mutableDB *state.StateDB, dbFactory util.DBFactory, addresses []common.Address, allowances []*crossdomain.Allowance, chainID int, noCheck bool) error {
// Chain params to use for integrity checking.
params := crossdomain.ParamsByChainID[chainID]
if params == nil {
......@@ -75,7 +66,7 @@ func MigrateBalances(mutableDB *state.StateDB, dbFactory DBFactory, addresses []
return doMigration(mutableDB, dbFactory, addresses, allowances, params.ExpectedSupplyDelta, noCheck)
}
func doMigration(mutableDB *state.StateDB, dbFactory DBFactory, addresses []common.Address, allowances []*crossdomain.Allowance, expDiff *big.Int, noCheck bool) error {
func doMigration(mutableDB *state.StateDB, dbFactory util.DBFactory, addresses []common.Address, allowances []*crossdomain.Allowance, expDiff *big.Int, noCheck bool) error {
// We'll need to maintain a list of all addresses that we've seen along with all of the storage
// slots based on the witness data.
slotsAddrs := make(map[common.Hash]common.Address)
......@@ -103,226 +94,109 @@ func doMigration(mutableDB *state.StateDB, dbFactory DBFactory, addresses []comm
slotsAddrs[entrySK] = sequencerEntrypointAddr
slotsInp[entrySK] = BalanceSlot
// WaitGroup to wait on each iteration job to finish.
var wg sync.WaitGroup
// Channel to receive storage slot keys and values from each iteration job.
outCh := make(chan accountData)
// Channel to receive errors from each iteration job.
errCh := make(chan error, checkJobs)
// Channel to cancel all iteration jobs.
cancelCh := make(chan struct{})
// Define a worker function to iterate over each partition.
worker := func(start, end common.Hash) {
// Decrement the WaitGroup when the function returns.
defer wg.Done()
db, err := dbFactory()
if err != nil {
log.Crit("cannot get database", "err", err)
}
// Create a new storage trie. Each trie returned by db.StorageTrie
// is a copy, so this is safe for concurrent use.
st, err := db.StorageTrie(predeploys.LegacyERC20ETHAddr)
if err != nil {
// Should never happen, so explode if it does.
log.Crit("cannot get storage trie for LegacyERC20ETHAddr", "err", err)
}
if st == nil {
// Should never happen, so explode if it does.
log.Crit("nil storage trie for LegacyERC20ETHAddr")
}
it := trie.NewIterator(st.NodeIterator(start.Bytes()))
// Below code is largely based on db.ForEachStorage. We can't use that
// because it doesn't allow us to specify a start and end key.
for it.Next() {
select {
case <-cancelCh:
// If one of the workers encounters an error, cancel all of them.
return
default:
break
}
// Channel that gets closed when the collector is done.
doneCh := make(chan struct{})
// Use the raw (i.e., secure hashed) key to check if we've reached
// the end of the partition. Use > rather than >= here to account for
// the fact that the values returned by PartitionKeys are inclusive.
// Duplicate addresses that may be returned by this iteration are
// filtered out in the collector.
if new(big.Int).SetBytes(it.Key).Cmp(end.Big()) > 0 {
return
}
// Create a map of accounts we've seen so that we can filter out duplicates.
seenAccounts := make(map[common.Address]bool)
// Skip if the value is empty.
rawValue := it.Value
if len(rawValue) == 0 {
continue
}
// Keep track of the total migrated supply.
totalFound := new(big.Int)
// Get the preimage.
rawKey := st.GetKey(it.Key)
if rawKey == nil {
// Should never happen, so explode if it does.
log.Crit("cannot get preimage for storage key", "key", it.Key)
}
key := common.BytesToHash(rawKey)
// Kick off a background process to collect
// values from the channel and add them to the map.
var count int
progress := util.ProgressLogger(1000, "Migrated OVM_ETH storage slot")
go func() {
defer func() { doneCh <- struct{}{} }()
// Parse the raw value.
_, content, _, err := rlp.Split(rawValue)
if err != nil {
// Should never happen, so explode if it does.
log.Crit("mal-formed data in state: %v", err)
}
for account := range outCh {
progress()
// We can safely ignore specific slots (totalSupply, name, symbol).
if ignoredSlots[key] {
// Filter out duplicate accounts. See the below note about keyspace iteration for
// why we may have to filter out duplicates.
if seenAccounts[account.address] {
log.Info("skipping duplicate account during iteration", "addr", account.address)
continue
}
slotType, ok := slotsInp[key]
if !ok {
if noCheck {
log.Error("ignoring unknown storage slot in state", "slot", key.String())
} else {
errCh <- fmt.Errorf("unknown storage slot in state: %s", key.String())
return
}
}
// No accounts should have a balance in state. If they do, bail.
addr, ok := slotsAddrs[key]
if !ok {
log.Crit("could not find address in map - should never happen")
}
bal := db.GetBalance(addr)
if bal.Sign() != 0 {
log.Error(
"account has non-zero balance in state - should never happen",
"addr", addr,
"balance", bal.String(),
)
if !noCheck {
errCh <- fmt.Errorf("account has non-zero balance in state - should never happen: %s", addr.String())
return
}
}
// Accumulate addresses and total supply.
totalFound = new(big.Int).Add(totalFound, account.balance)
// Add balances to the total found.
switch slotType {
case BalanceSlot:
// Convert the value to a common.Hash, then send to the channel.
value := common.BytesToHash(content)
outCh <- accountData{
balance: value.Big(),
legacySlot: key,
address: addr,
}
case AllowanceSlot:
// Allowance slot.
continue
default:
// Should never happen.
if noCheck {
log.Error("unknown slot type", "slot", key, "type", slotType)
} else {
log.Crit("unknown slot type %d, should never happen", slotType)
}
}
mutableDB.SetBalance(account.address, account.balance)
mutableDB.SetState(predeploys.LegacyERC20ETHAddr, account.legacySlot, common.Hash{})
count++
seenAccounts[account.address] = true
}
}
for i := 0; i < checkJobs; i++ {
wg.Add(1)
// Partition the keyspace per worker.
start, end := PartitionKeyspace(i, checkJobs)
// Kick off our worker.
go worker(start, end)
}
// Make a channel to track when collector process completes.
collectorClosedCh := make(chan struct{})
// Make a channel to cancel the collector process.
collectorCancelCh := make(chan struct{})
// Keep track of the last error seen.
var lastErr error
// The cancel channel can be closed if any of the workers returns an error.
// We wrap the close in a sync.Once to ensure that it's only closed once.
var cancelOnce sync.Once
}()
// Create a map of accounts we've seen so that we can filter out duplicates.
seenAccounts := make(map[common.Address]bool)
err := util.IterateState(dbFactory, predeploys.LegacyERC20ETHAddr, func(db *state.StateDB, key, value common.Hash) error {
// We can safely ignore specific slots (totalSupply, name, symbol).
if ignoredSlots[key] {
return nil
}
// Keep track of the total migrated supply.
totalFound := new(big.Int)
slotType, ok := slotsInp[key]
if !ok {
log.Error("unknown storage slot in state", "slot", key.String())
if !noCheck {
return fmt.Errorf("unknown storage slot in state: %s", key.String())
}
}
// Kick off another background process to collect
// values from the channel and add them to the map.
var count int
progress := util.ProgressLogger(1000, "Migrated OVM_ETH storage slot")
go func() {
defer func() {
collectorClosedCh <- struct{}{}
}()
for {
select {
case account := <-outCh:
progress()
// Filter out duplicate accounts. See the below note about keyspace iteration for
// why we may have to filter out duplicates.
if seenAccounts[account.address] {
log.Info("skipping duplicate account during iteration", "addr", account.address)
continue
}
// Accumulate addresses and total supply.
totalFound = new(big.Int).Add(totalFound, account.balance)
mutableDB.SetBalance(account.address, account.balance)
mutableDB.SetState(predeploys.LegacyERC20ETHAddr, account.legacySlot, common.Hash{})
count++
seenAccounts[account.address] = true
case err := <-errCh:
cancelOnce.Do(func() {
close(cancelCh)
lastErr = err
})
case <-collectorCancelCh:
// Explicitly drain the error channel. Since the error channel is buffered, it's possible
// for the wg.Wait() call below to unblock and cancel this goroutine before the error gets
// processed by the case statement above.
for len(errCh) > 0 {
err := <-errCh
if lastErr == nil {
lastErr = err
}
}
return
// No accounts should have a balance in state. If they do, bail.
addr, ok := slotsAddrs[key]
if !ok {
log.Crit("could not find address in map - should never happen")
}
bal := db.GetBalance(addr)
if bal.Sign() != 0 {
log.Error(
"account has non-zero balance in state - should never happen",
"addr", addr,
"balance", bal.String(),
)
if !noCheck {
return fmt.Errorf("account has non-zero balance in state - should never happen: %s", addr.String())
}
}
}()
// Wait for the workers to finish.
wg.Wait()
// Add balances to the total found.
switch slotType {
case BalanceSlot:
// Send the data to the channel.
outCh <- accountData{
balance: value.Big(),
legacySlot: key,
address: addr,
}
case AllowanceSlot:
// Allowance slot. Do nothing here.
default:
// Should never happen.
if noCheck {
log.Error("unknown slot type", "slot", key, "type", slotType)
} else {
log.Crit("unknown slot type %d, should never happen", slotType)
}
}
// Close the collector, and wait for it to finish.
close(collectorCancelCh)
<-collectorClosedCh
return nil
}, checkJobs)
// If we saw an error, return it.
if lastErr != nil {
return lastErr
if err != nil {
return err
}
// Close the outCh to cancel the collector. The collector will signal that it's done
// using doneCh. Any values waiting to be read from outCh will be read before the
// collector exits.
close(outCh)
<-doneCh
// Log how many slots were iterated over.
log.Info("Iterated legacy balances", "count", count)
......@@ -368,33 +242,3 @@ func doMigration(mutableDB *state.StateDB, dbFactory DBFactory, addresses []comm
return nil
}
// PartitionKeyspace divides the key space into partitions by dividing the maximum keyspace
// by count then multiplying by i. This will leave some slots left over, which we handle below. It
// returns the start and end keys for the partition as a common.Hash. Note that the returned range
// of keys is inclusive, i.e., [start, end] NOT [start, end).
func PartitionKeyspace(i int, count int) (common.Hash, common.Hash) {
if i < 0 || count < 0 {
panic("i and count must be greater than 0")
}
if i > count-1 {
panic("i must be less than count - 1")
}
// Divide the key space into partitions by dividing the key space by the number
// of jobs. This will leave some slots left over, which we handle below.
partSize := new(big.Int).Div(maxSlot.Big(), big.NewInt(int64(count)))
start := common.BigToHash(new(big.Int).Mul(big.NewInt(int64(i)), partSize))
var end common.Hash
if i < count-1 {
// If this is not the last partition, use the next partition's start key as the end.
end = common.BigToHash(new(big.Int).Mul(big.NewInt(int64(i+1)), partSize))
} else {
// If this is the last partition, use the max slot as the end.
end = maxSlot
}
return start, end
}
package ether
import (
"fmt"
"math/big"
"math/rand"
"testing"
"github.com/ethereum-optimism/optimism/op-chain-ops/util"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum-optimism/optimism/op-chain-ops/crossdomain"
......@@ -190,7 +191,7 @@ func TestMigrateBalances(t *testing.T) {
}
}
func makeLegacyETH(t *testing.T, totalSupply *big.Int, balances map[common.Address]*big.Int, allowances map[common.Address]common.Address) (*state.StateDB, DBFactory) {
func makeLegacyETH(t *testing.T, totalSupply *big.Int, balances map[common.Address]*big.Int, allowances map[common.Address]common.Address) (*state.StateDB, util.DBFactory) {
memDB := rawdb.NewMemoryDatabase()
db, err := state.New(common.Hash{}, state.NewDatabaseWithConfig(memDB, &trie.Config{
Preimages: true,
......@@ -283,85 +284,6 @@ func TestMigrateBalancesRandomMissing(t *testing.T) {
}
}
func TestPartitionKeyspace(t *testing.T) {
tests := []struct {
i int
count int
expected [2]common.Hash
}{
{
i: 0,
count: 1,
expected: [2]common.Hash{
common.HexToHash("0x00"),
common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
},
},
{
i: 0,
count: 2,
expected: [2]common.Hash{
common.HexToHash("0x00"),
common.HexToHash("0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
},
},
{
i: 1,
count: 2,
expected: [2]common.Hash{
common.HexToHash("0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
},
},
{
i: 0,
count: 3,
expected: [2]common.Hash{
common.HexToHash("0x00"),
common.HexToHash("0x5555555555555555555555555555555555555555555555555555555555555555"),
},
},
{
i: 1,
count: 3,
expected: [2]common.Hash{
common.HexToHash("0x5555555555555555555555555555555555555555555555555555555555555555"),
common.HexToHash("0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
},
},
{
i: 2,
count: 3,
expected: [2]common.Hash{
common.HexToHash("0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
},
},
}
for _, tt := range tests {
t.Run(fmt.Sprintf("i %d, count %d", tt.i, tt.count), func(t *testing.T) {
start, end := PartitionKeyspace(tt.i, tt.count)
require.Equal(t, tt.expected[0], start)
require.Equal(t, tt.expected[1], end)
})
}
t.Run("panics on invalid i or count", func(t *testing.T) {
require.Panics(t, func() {
PartitionKeyspace(1, 1)
})
require.Panics(t, func() {
PartitionKeyspace(-1, 1)
})
require.Panics(t, func() {
PartitionKeyspace(0, -1)
})
require.Panics(t, func() {
PartitionKeyspace(-1, -1)
})
})
}
func randAddr(t *testing.T) common.Address {
var addr common.Address
_, err := rand.Read(addr[:])
......
package util
import (
"fmt"
"math/big"
"sync"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
)
var (
// maxSlot is the maximum possible storage slot.
maxSlot = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
)
type DBFactory func() (*state.StateDB, error)
type StateCallback func(db *state.StateDB, key, value common.Hash) error
func IterateState(dbFactory DBFactory, address common.Address, cb StateCallback, workers int) error {
if workers <= 0 {
panic("workers must be greater than 0")
}
// WaitGroup to wait for all workers to finish.
var wg sync.WaitGroup
// Channel to receive errors from each iteration job.
errCh := make(chan error, workers)
// Channel to cancel all iteration jobs.
cancelCh := make(chan struct{})
worker := func(start, end common.Hash) {
// Decrement the WaitGroup when the function returns.
defer wg.Done()
db, err := dbFactory()
if err != nil {
// Should never happen, so explode if it does.
log.Crit("cannot create state db", "err", err)
}
st, err := db.StorageTrie(address)
if err != nil {
// Should never happen, so explode if it does.
log.Crit("cannot get storage trie", "address", address, "err", err)
}
// st can be nil if the account doesn't exist.
if st == nil {
errCh <- fmt.Errorf("account does not exist: %s", address.Hex())
return
}
it := trie.NewIterator(st.NodeIterator(start.Bytes()))
// Below code is largely based on db.ForEachStorage. We can't use that
// because it doesn't allow us to specify a start and end key.
for it.Next() {
select {
case <-cancelCh:
// If one of the workers encounters an error, cancel all of them.
return
default:
break
}
// Use the raw (i.e., secure hashed) key to check if we've reached
// the end of the partition. Use > rather than >= here to account for
// the fact that the values returned by PartitionKeys are inclusive.
// Duplicate addresses that may be returned by this iteration are
// filtered out in the collector.
if new(big.Int).SetBytes(it.Key).Cmp(end.Big()) > 0 {
return
}
// Skip if the value is empty.
rawValue := it.Value
if len(rawValue) == 0 {
continue
}
// Get the preimage.
rawKey := st.GetKey(it.Key)
if rawKey == nil {
// Should never happen, so explode if it does.
log.Crit("cannot get preimage for storage key", "key", it.Key)
}
key := common.BytesToHash(rawKey)
// Parse the raw value.
_, content, _, err := rlp.Split(rawValue)
if err != nil {
// Should never happen, so explode if it does.
log.Crit("mal-formed data in state: %v", err)
}
value := common.BytesToHash(content)
// Call the callback with the DB, key, and value. Errors get
// bubbled up to the errCh.
if err := cb(db, key, value); err != nil {
errCh <- err
return
}
}
}
for i := 0; i < workers; i++ {
wg.Add(1)
// Partition the keyspace per worker.
start, end := PartitionKeyspace(i, workers)
// Kick off our worker.
go worker(start, end)
}
wg.Wait()
for len(errCh) > 0 {
err := <-errCh
if err != nil {
return err
}
}
return nil
}
// PartitionKeyspace divides the key space into partitions by dividing the maximum keyspace
// by count then multiplying by i. This will leave some slots left over, which we handle below. It
// returns the start and end keys for the partition as a common.Hash. Note that the returned range
// of keys is inclusive, i.e., [start, end] NOT [start, end).
func PartitionKeyspace(i int, count int) (common.Hash, common.Hash) {
if i < 0 || count < 0 {
panic("i and count must be greater than 0")
}
if i > count-1 {
panic("i must be less than count - 1")
}
// Divide the key space into partitions by dividing the key space by the number
// of jobs. This will leave some slots left over, which we handle below.
partSize := new(big.Int).Div(maxSlot.Big(), big.NewInt(int64(count)))
start := common.BigToHash(new(big.Int).Mul(big.NewInt(int64(i)), partSize))
var end common.Hash
if i < count-1 {
// If this is not the last partition, use the next partition's start key as the end.
end = common.BigToHash(new(big.Int).Mul(big.NewInt(int64(i+1)), partSize))
} else {
// If this is the last partition, use the max slot as the end.
end = maxSlot
}
return start, end
}
package util
import (
crand "crypto/rand"
"fmt"
"math/rand"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/trie"
"github.com/stretchr/testify/require"
)
var testAddr = common.Address{0: 0xff}
func TestStateIteratorWorkers(t *testing.T) {
_, factory, _ := setupRandTest(t)
for i := -1; i <= 0; i++ {
require.Panics(t, func() {
_ = IterateState(factory, testAddr, func(db *state.StateDB, key, value common.Hash) error {
return nil
}, i)
})
}
}
func TestStateIteratorNonexistentAccount(t *testing.T) {
_, factory, _ := setupRandTest(t)
require.ErrorContains(t, IterateState(factory, common.Address{}, func(db *state.StateDB, key, value common.Hash) error {
return nil
}, 1), "account does not exist")
}
func TestStateIteratorRandomOK(t *testing.T) {
for i := 0; i < 100; i++ {
hashes, factory, workerCount := setupRandTest(t)
seenHashes := make(map[common.Hash]bool)
hashCh := make(chan common.Hash)
doneCh := make(chan struct{})
go func() {
defer close(doneCh)
for hash := range hashCh {
seenHashes[hash] = true
}
}()
require.NoError(t, IterateState(factory, testAddr, func(db *state.StateDB, key, value common.Hash) error {
hashCh <- key
return nil
}, workerCount))
close(hashCh)
<-doneCh
// Perform a less or equal check here in case of duplicates. The map check below will assert
// that all of the hashes are accounted for.
require.LessOrEqual(t, len(seenHashes), len(hashes))
// Every hash we put into state should have been iterated over.
for _, hash := range hashes {
require.Contains(t, seenHashes, hash)
}
}
}
func TestStateIteratorRandomError(t *testing.T) {
for i := 0; i < 100; i++ {
hashes, factory, workerCount := setupRandTest(t)
failHash := hashes[rand.Intn(len(hashes))]
require.ErrorContains(t, IterateState(factory, testAddr, func(db *state.StateDB, key, value common.Hash) error {
if key == failHash {
return fmt.Errorf("test error")
}
return nil
}, workerCount), "test error")
}
}
func TestPartitionKeyspace(t *testing.T) {
tests := []struct {
i int
count int
expected [2]common.Hash
}{
{
i: 0,
count: 1,
expected: [2]common.Hash{
common.HexToHash("0x00"),
common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
},
},
{
i: 0,
count: 2,
expected: [2]common.Hash{
common.HexToHash("0x00"),
common.HexToHash("0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
},
},
{
i: 1,
count: 2,
expected: [2]common.Hash{
common.HexToHash("0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
},
},
{
i: 0,
count: 3,
expected: [2]common.Hash{
common.HexToHash("0x00"),
common.HexToHash("0x5555555555555555555555555555555555555555555555555555555555555555"),
},
},
{
i: 1,
count: 3,
expected: [2]common.Hash{
common.HexToHash("0x5555555555555555555555555555555555555555555555555555555555555555"),
common.HexToHash("0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
},
},
{
i: 2,
count: 3,
expected: [2]common.Hash{
common.HexToHash("0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
},
},
}
for _, tt := range tests {
t.Run(fmt.Sprintf("i %d, count %d", tt.i, tt.count), func(t *testing.T) {
start, end := PartitionKeyspace(tt.i, tt.count)
require.Equal(t, tt.expected[0], start)
require.Equal(t, tt.expected[1], end)
})
}
t.Run("panics on invalid i or count", func(t *testing.T) {
require.Panics(t, func() {
PartitionKeyspace(1, 1)
})
require.Panics(t, func() {
PartitionKeyspace(-1, 1)
})
require.Panics(t, func() {
PartitionKeyspace(0, -1)
})
require.Panics(t, func() {
PartitionKeyspace(-1, -1)
})
})
}
func setupRandTest(t *testing.T) ([]common.Hash, DBFactory, int) {
memDB := rawdb.NewMemoryDatabase()
db, err := state.New(common.Hash{}, state.NewDatabaseWithConfig(memDB, &trie.Config{
Preimages: true,
Cache: 1024,
}), nil)
require.NoError(t, err)
hashCount := rand.Intn(100)
if hashCount == 0 {
hashCount = 1
}
hashes := make([]common.Hash, hashCount)
db.CreateAccount(testAddr)
for j := 0; j < hashCount; j++ {
hashes[j] = randHash(t)
db.SetState(testAddr, hashes[j], hashes[j])
}
root, err := db.Commit(false)
require.NoError(t, err)
err = db.Database().TrieDB().Commit(root, true)
require.NoError(t, err)
factory := func() (*state.StateDB, error) {
return state.New(root, state.NewDatabaseWithConfig(memDB, &trie.Config{
Preimages: true,
Cache: 1024,
}), nil)
}
workerCount := rand.Intn(64)
if workerCount == 0 {
workerCount = 1
}
return hashes, factory, workerCount
}
func randHash(t *testing.T) common.Hash {
var h common.Hash
_, err := crand.Read(h[:])
require.NoError(t, err)
return h
}
......@@ -14,6 +14,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/ethereum-optimism/optimism/op-node/sources"
"github.com/ethereum-optimism/optimism/op-proposer/metrics"
"github.com/ethereum-optimism/optimism/op-proposer/proposer"
opcrypto "github.com/ethereum-optimism/optimism/op-service/crypto"
"github.com/ethereum-optimism/optimism/op-service/txmgr"
......@@ -60,7 +61,7 @@ func NewL2Proposer(t Testing, log log.Logger, cfg *ProposerCfg, l1 *ethclient.Cl
SignerFnFactory: signer,
}
dr, err := proposer.NewL2OutputSubmitter(proposerCfg, log)
dr, err := proposer.NewL2OutputSubmitter(proposerCfg, log, metrics.NoopMetrics)
require.NoError(t, err)
return &L2Proposer{
......
......@@ -15,6 +15,7 @@ import (
batchermetrics "github.com/ethereum-optimism/optimism/op-batcher/metrics"
"github.com/ethereum-optimism/optimism/op-node/chaincfg"
"github.com/ethereum-optimism/optimism/op-node/sources"
proposermetrics "github.com/ethereum-optimism/optimism/op-proposer/metrics"
l2os "github.com/ethereum-optimism/optimism/op-proposer/proposer"
oplog "github.com/ethereum-optimism/optimism/op-service/log"
......@@ -277,6 +278,7 @@ func TestMigration(t *testing.T) {
L2EngineAddr: gethNode.HTTPAuthEndpoint(),
L2EngineJWTSecret: testingJWTSecret,
},
L2Sync: &node.PreparedL2SyncEndpoint{Client: nil, TrustRPC: false},
Driver: driver.Config{
VerifierConfDepth: 0,
SequencerConfDepth: 0,
......@@ -362,7 +364,7 @@ func TestMigration(t *testing.T) {
Format: "text",
},
PrivateKey: hexPriv(secrets.Proposer),
}, lgr.New("module", "proposer"))
}, lgr.New("module", "proposer"), proposermetrics.NoopMetrics)
require.NoError(t, err)
t.Cleanup(func() {
proposer.Stop()
......
......@@ -37,6 +37,7 @@ import (
"github.com/ethereum-optimism/optimism/op-node/rollup/driver"
"github.com/ethereum-optimism/optimism/op-node/sources"
"github.com/ethereum-optimism/optimism/op-node/testlog"
proposermetrics "github.com/ethereum-optimism/optimism/op-proposer/metrics"
l2os "github.com/ethereum-optimism/optimism/op-proposer/proposer"
oplog "github.com/ethereum-optimism/optimism/op-service/log"
)
......@@ -193,6 +194,9 @@ type SystemConfig struct {
// If the proposer can make proposals for L2 blocks derived from L1 blocks which are not finalized on L1 yet.
NonFinalizedProposals bool
// Explicitly disable batcher, for tests that rely on unsafe L2 payloads
DisableBatcher bool
}
type System struct {
......@@ -417,6 +421,10 @@ func (cfg SystemConfig) Start(_opts ...SystemConfigOption) (*System, error) {
L2EngineAddr: l2EndpointConfig,
L2EngineJWTSecret: cfg.JWTSecret,
}
rollupCfg.L2Sync = &rollupNode.PreparedL2SyncEndpoint{
Client: nil,
TrustRPC: false,
}
}
// Geth Clients
......@@ -572,7 +580,7 @@ func (cfg SystemConfig) Start(_opts ...SystemConfigOption) (*System, error) {
Format: "text",
},
PrivateKey: hexPriv(cfg.Secrets.Proposer),
}, sys.cfg.Loggers["proposer"])
}, sys.cfg.Loggers["proposer"], proposermetrics.NoopMetrics)
if err != nil {
return nil, fmt.Errorf("unable to setup l2 output submitter: %w", err)
}
......@@ -606,8 +614,11 @@ func (cfg SystemConfig) Start(_opts ...SystemConfigOption) (*System, error) {
return nil, fmt.Errorf("failed to setup batch submitter: %w", err)
}
if err := sys.BatchSubmitter.Start(); err != nil {
return nil, fmt.Errorf("unable to start batch submitter: %w", err)
// Batcher may be enabled later
if !sys.cfg.DisableBatcher {
if err := sys.BatchSubmitter.Start(); err != nil {
return nil, fmt.Errorf("unable to start batch submitter: %w", err)
}
}
return sys, nil
......
......@@ -649,7 +649,7 @@ func TestSystemMockP2P(t *testing.T) {
require.Contains(t, received, receiptVerif.BlockHash)
}
// TestSystemMockP2P sets up a L1 Geth node, a rollup node, and a L2 geth node and then confirms that
// TestSystemRPCAltSync sets up a L1 Geth node, a rollup node, and a L2 geth node and then confirms that
// the nodes can sync L2 blocks before they are confirmed on L1.
//
// Test steps:
......@@ -660,24 +660,28 @@ func TestSystemMockP2P(t *testing.T) {
// 6. Wait for the RPC sync method to grab the block from the sequencer over RPC and insert it into the verifier's unsafe chain.
// 7. Wait for the verifier to sync the unsafe chain into the safe chain.
// 8. Verify that the TX is included in the verifier's safe chain.
func TestSystemMockAltSync(t *testing.T) {
func TestSystemRPCAltSync(t *testing.T) {
parallel(t)
if !verboseGethNodes {
log.Root().SetHandler(log.DiscardHandler())
}
cfg := DefaultSystemConfig(t)
// slow down L1 blocks so we can see the L2 blocks arrive well before the L1 blocks do.
// Keep the seq window small so the L2 chain is started quick
cfg.DeployConfig.L1BlockTime = 10
// the default is nil, but this may change in the future.
// This test must ensure the blocks are not synced via Gossip, but instead via the alt RPC based sync.
cfg.P2PTopology = nil
// Disable batcher, so there will not be any L1 data to sync from
cfg.DisableBatcher = true
var published, received []common.Hash
var published, received []string
seqTracer, verifTracer := new(FnTracer), new(FnTracer)
// The sequencer still publishes the blocks to the tracer, even if they do not reach the network due to disabled P2P
seqTracer.OnPublishL2PayloadFn = func(ctx context.Context, payload *eth.ExecutionPayload) {
published = append(published, payload.BlockHash)
published = append(published, payload.ID().String())
}
// Blocks are now received via the RPC based alt-sync method
verifTracer.OnUnsafeL2PayloadFn = func(ctx context.Context, from peer.ID, payload *eth.ExecutionPayload) {
received = append(received, payload.BlockHash)
received = append(received, payload.ID().String())
}
cfg.Nodes["sequencer"].Tracer = seqTracer
cfg.Nodes["verifier"].Tracer = verifTracer
......@@ -687,8 +691,8 @@ func TestSystemMockAltSync(t *testing.T) {
role: "sequencer",
action: func(sCfg *SystemConfig, system *System) {
rpc, _ := system.Nodes["sequencer"].Attach() // never errors
cfg.Nodes["verifier"].L2Sync = &rollupNode.L2SyncRPCConfig{
Rpc: client.NewBaseRPCClient(rpc),
cfg.Nodes["verifier"].L2Sync = &rollupNode.PreparedL2SyncEndpoint{
Client: client.NewBaseRPCClient(rpc),
}
},
})
......@@ -726,7 +730,7 @@ func TestSystemMockAltSync(t *testing.T) {
require.Equal(t, receiptSeq, receiptVerif)
// Verify that the tx was received via RPC sync (P2P is disabled)
require.Contains(t, received, receiptVerif.BlockHash)
require.Contains(t, received, eth.BlockID{Hash: receiptVerif.BlockHash, Number: receiptVerif.BlockNumber.Uint64()}.String())
// Verify that everything that was received was published
require.GreaterOrEqual(t, len(published), len(received))
......
......@@ -175,6 +175,13 @@ var (
EnvVar: prefixEnvVar("L2_BACKUP_UNSAFE_SYNC_RPC"),
Required: false,
}
BackupL2UnsafeSyncRPCTrustRPC = cli.StringFlag{
Name: "l2.backup-unsafe-sync-rpc.trustrpc",
Usage: "Like l1.trustrpc, configure if response data from the RPC needs to be verified, e.g. blockhash computation." +
"This does not include checks if the blockhash is part of the canonical chain.",
EnvVar: prefixEnvVar("L2_BACKUP_UNSAFE_SYNC_RPC_TRUST_RPC"),
Required: false,
}
)
var requiredFlags = []cli.Flag{
......@@ -207,6 +214,7 @@ var optionalFlags = []cli.Flag{
HeartbeatMonikerFlag,
HeartbeatURLFlag,
BackupL2UnsafeSyncRPC,
BackupL2UnsafeSyncRPCTrustRPC,
}
// Flags contains the list of configuration options available to the binary.
......
......@@ -20,7 +20,9 @@ type L2EndpointSetup interface {
}
type L2SyncEndpointSetup interface {
Setup(ctx context.Context, log log.Logger) (cl client.RPC, err error)
// Setup a RPC client to another L2 node to sync L2 blocks from.
// It may return a nil client with nil error if RPC based sync is not enabled.
Setup(ctx context.Context, log log.Logger) (cl client.RPC, trust bool, err error)
Check() error
}
......@@ -82,45 +84,45 @@ func (p *PreparedL2Endpoints) Setup(ctx context.Context, log log.Logger) (client
// L2SyncEndpointConfig contains configuration for the fallback sync endpoint
type L2SyncEndpointConfig struct {
// Address of the L2 RPC to use for backup sync
// Address of the L2 RPC to use for backup sync, may be empty if RPC alt-sync is disabled.
L2NodeAddr string
TrustRPC bool
}
var _ L2SyncEndpointSetup = (*L2SyncEndpointConfig)(nil)
func (cfg *L2SyncEndpointConfig) Setup(ctx context.Context, log log.Logger) (client.RPC, error) {
// Setup creates an RPC client to sync from.
// It will return nil without error if no sync method is configured.
func (cfg *L2SyncEndpointConfig) Setup(ctx context.Context, log log.Logger) (cl client.RPC, trust bool, err error) {
if cfg.L2NodeAddr == "" {
return nil, false, nil
}
l2Node, err := client.NewRPC(ctx, log, cfg.L2NodeAddr)
if err != nil {
return nil, err
return nil, false, err
}
return l2Node, nil
return l2Node, cfg.TrustRPC, nil
}
func (cfg *L2SyncEndpointConfig) Check() error {
if cfg.L2NodeAddr == "" {
return errors.New("empty L2 Node Address")
}
// empty addr is valid, as it is optional.
return nil
}
type L2SyncRPCConfig struct {
// RPC endpoint to use for syncing
Rpc client.RPC
type PreparedL2SyncEndpoint struct {
// RPC endpoint to use for syncing, may be nil if RPC alt-sync is disabled.
Client client.RPC
TrustRPC bool
}
var _ L2SyncEndpointSetup = (*L2SyncRPCConfig)(nil)
var _ L2SyncEndpointSetup = (*PreparedL2SyncEndpoint)(nil)
func (cfg *L2SyncRPCConfig) Setup(ctx context.Context, log log.Logger) (client.RPC, error) {
return cfg.Rpc, nil
func (cfg *PreparedL2SyncEndpoint) Setup(ctx context.Context, log log.Logger) (cl client.RPC, trust bool, err error) {
return cfg.Client, cfg.TrustRPC, nil
}
func (cfg *L2SyncRPCConfig) Check() error {
if cfg.Rpc == nil {
return errors.New("rpc cannot be nil")
}
func (cfg *PreparedL2SyncEndpoint) Check() error {
return nil
}
......
......@@ -80,6 +80,9 @@ func (cfg *Config) Check() error {
if err := cfg.L2.Check(); err != nil {
return fmt.Errorf("l2 endpoint config error: %w", err)
}
if err := cfg.L2Sync.Check(); err != nil {
return fmt.Errorf("sync config error: %w", err)
}
if err := cfg.Rollup.Check(); err != nil {
return fmt.Errorf("rollup config error: %w", err)
}
......
......@@ -33,6 +33,7 @@ type OpNode struct {
l1Source *sources.L1Client // L1 Client to fetch data from
l2Driver *driver.Driver // L2 Engine to Sync
l2Source *sources.EngineClient // L2 Execution Engine RPC bindings
rpcSync *sources.SyncClient // Alt-sync RPC client, optional (may be nil)
server *rpcServer // RPC server hosting the rollup-node API
p2pNode *p2p.NodeP2P // P2P node functionality
p2pSigner p2p.Signer // p2p gogssip application messages will be signed with this signer
......@@ -86,6 +87,9 @@ func (n *OpNode) init(ctx context.Context, cfg *Config, snapshotLog log.Logger)
if err := n.initL2(ctx, cfg, snapshotLog); err != nil {
return err
}
if err := n.initRPCSync(ctx, cfg); err != nil {
return err
}
if err := n.initP2PSigner(ctx, cfg); err != nil {
return err
}
......@@ -197,29 +201,27 @@ func (n *OpNode) initL2(ctx context.Context, cfg *Config, snapshotLog log.Logger
return err
}
var syncClient *sources.SyncClient
// If the L2 sync config is present, use it to create a sync client
if cfg.L2Sync != nil {
if err := cfg.L2Sync.Check(); err != nil {
log.Info("L2 sync config is not present, skipping L2 sync client setup", "err", err)
} else {
rpcSyncClient, err := cfg.L2Sync.Setup(ctx, n.log)
if err != nil {
return fmt.Errorf("failed to setup L2 execution-engine RPC client for backup sync: %w", err)
}
n.l2Driver = driver.NewDriver(&cfg.Driver, &cfg.Rollup, n.l2Source, n.l1Source, n, n, n.log, snapshotLog, n.metrics)
// The sync client's RPC is always trusted
config := sources.SyncClientDefaultConfig(&cfg.Rollup, true)
return nil
}
syncClient, err = sources.NewSyncClient(n.OnUnsafeL2Payload, rpcSyncClient, n.log, n.metrics.L2SourceCache, config)
if err != nil {
return fmt.Errorf("failed to create sync client: %w", err)
}
}
func (n *OpNode) initRPCSync(ctx context.Context, cfg *Config) error {
rpcSyncClient, trustRPC, err := cfg.L2Sync.Setup(ctx, n.log)
if err != nil {
return fmt.Errorf("failed to setup L2 execution-engine RPC client for backup sync: %w", err)
}
if rpcSyncClient == nil { // if no RPC client is configured to sync from, then don't add the RPC sync client
return nil
}
n.l2Driver = driver.NewDriver(&cfg.Driver, &cfg.Rollup, n.l2Source, n.l1Source, syncClient, n, n.log, snapshotLog, n.metrics)
config := sources.SyncClientDefaultConfig(&cfg.Rollup, trustRPC)
syncClient, err := sources.NewSyncClient(n.OnUnsafeL2Payload, rpcSyncClient, n.log, n.metrics.L2SourceCache, config)
if err != nil {
return fmt.Errorf("failed to create sync client: %w", err)
}
n.rpcSync = syncClient
return nil
}
......@@ -292,11 +294,12 @@ func (n *OpNode) Start(ctx context.Context) error {
}
// If the backup unsafe sync client is enabled, start its event loop
if n.l2Driver.L2SyncCl != nil {
if err := n.l2Driver.L2SyncCl.Start(); err != nil {
if n.rpcSync != nil {
if err := n.rpcSync.Start(); err != nil {
n.log.Error("Could not start the backup sync client", "err", err)
return err
}
n.log.Info("Started L2-RPC sync service")
}
return nil
......@@ -375,6 +378,14 @@ func (n *OpNode) OnUnsafeL2Payload(ctx context.Context, from peer.ID, payload *e
return nil
}
func (n *OpNode) RequestL2Range(ctx context.Context, start, end uint64) error {
if n.rpcSync != nil {
return n.rpcSync.RequestL2Range(ctx, start, end)
}
n.log.Debug("ignoring request to sync L2 range, no sync method available")
return nil
}
func (n *OpNode) P2P() p2p.Node {
return n.p2pNode
}
......@@ -413,8 +424,8 @@ func (n *OpNode) Close() error {
}
// If the L2 sync client is present & running, close it.
if n.l2Driver.L2SyncCl != nil {
if err := n.l2Driver.L2SyncCl.Close(); err != nil {
if n.rpcSync != nil {
if err := n.rpcSync.Close(); err != nil {
result = multierror.Append(result, fmt.Errorf("failed to close L2 engine backup sync client cleanly: %w", err))
}
}
......
......@@ -107,7 +107,7 @@ type EngineQueue struct {
// The queued-up attributes
safeAttributesParent eth.L2BlockRef
safeAttributes *eth.PayloadAttributes
unsafePayloads PayloadsQueue // queue of unsafe payloads, ordered by ascending block number, may have gaps
unsafePayloads *PayloadsQueue // queue of unsafe payloads, ordered by ascending block number, may have gaps and duplicates
// Tracks which L2 blocks where last derived from which L1 block. At most finalityLookback large.
finalityData []FinalityData
......@@ -127,18 +127,14 @@ var _ EngineControl = (*EngineQueue)(nil)
// NewEngineQueue creates a new EngineQueue, which should be Reset(origin) before use.
func NewEngineQueue(log log.Logger, cfg *rollup.Config, engine Engine, metrics Metrics, prev NextAttributesProvider, l1Fetcher L1Fetcher) *EngineQueue {
return &EngineQueue{
log: log,
cfg: cfg,
engine: engine,
metrics: metrics,
finalityData: make([]FinalityData, 0, finalityLookback),
unsafePayloads: PayloadsQueue{
MaxSize: maxUnsafePayloadsMemory,
SizeFn: payloadMemSize,
blockNos: make(map[uint64]bool),
},
prev: prev,
l1Fetcher: l1Fetcher,
log: log,
cfg: cfg,
engine: engine,
metrics: metrics,
finalityData: make([]FinalityData, 0, finalityLookback),
unsafePayloads: NewPayloadsQueue(maxUnsafePayloadsMemory, payloadMemSize),
prev: prev,
l1Fetcher: l1Fetcher,
}
}
......@@ -682,7 +678,8 @@ func (eq *EngineQueue) Reset(ctx context.Context, _ eth.L1BlockRef, _ eth.System
return io.EOF
}
// GetUnsafeQueueGap retrieves the current [start, end] range of the gap between the tip of the unsafe priority queue and the unsafe head.
// GetUnsafeQueueGap retrieves the current [start, end) range (incl. start, excl. end)
// of the gap between the tip of the unsafe priority queue and the unsafe head.
// If there is no gap, the difference between end and start will be 0.
func (eq *EngineQueue) GetUnsafeQueueGap(expectedNumber uint64) (start uint64, end uint64) {
// The start of the gap is always the unsafe head + 1
......@@ -691,9 +688,11 @@ func (eq *EngineQueue) GetUnsafeQueueGap(expectedNumber uint64) (start uint64, e
// If the priority queue is empty, the end is the first block number at the top of the priority queue
// Otherwise, the end is the expected block number
if first := eq.unsafePayloads.Peek(); first != nil {
// Don't include the payload we already have in the sync range
end = first.ID().Number
} else {
end = expectedNumber
// Include the expected payload in the sync range
end = expectedNumber + 1
}
return start, end
......
......@@ -5,6 +5,8 @@ import (
"errors"
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum-optimism/optimism/op-node/eth"
)
......@@ -48,8 +50,8 @@ func (pq *payloadsByNumber) Pop() any {
}
const (
// ~580 bytes per payload, with some margin for overhead
payloadMemFixedCost uint64 = 600
// ~580 bytes per payload, with some margin for overhead like map data
payloadMemFixedCost uint64 = 800
// 24 bytes per tx overhead (size of slice header in memory)
payloadTxMemOverhead uint64 = 24
)
......@@ -72,15 +74,25 @@ func payloadMemSize(p *eth.ExecutionPayload) uint64 {
// without the need to use heap.Push/heap.Pop as caller.
// PayloadsQueue maintains a MaxSize by counting and tracking sizes of added eth.ExecutionPayload entries.
// When the size grows too large, the first (lowest block-number) payload is removed from the queue.
// PayloadsQueue allows entries with same block number, or even full duplicates.
// PayloadsQueue allows entries with same block number, but does not allow duplicate blocks
type PayloadsQueue struct {
pq payloadsByNumber
currentSize uint64
MaxSize uint64
blockNos map[uint64]bool
blockHashes map[common.Hash]struct{}
SizeFn func(p *eth.ExecutionPayload) uint64
}
func NewPayloadsQueue(maxSize uint64, sizeFn func(p *eth.ExecutionPayload) uint64) *PayloadsQueue {
return &PayloadsQueue{
pq: nil,
currentSize: 0,
MaxSize: maxSize,
blockHashes: make(map[common.Hash]struct{}),
SizeFn: sizeFn,
}
}
func (upq *PayloadsQueue) Len() int {
return len(upq.pq)
}
......@@ -100,8 +112,8 @@ func (upq *PayloadsQueue) Push(p *eth.ExecutionPayload) error {
if p == nil {
return errors.New("cannot add nil payload")
}
if upq.blockNos[p.ID().Number] {
return errors.New("cannot add duplicate payload")
if _, ok := upq.blockHashes[p.BlockHash]; ok {
return fmt.Errorf("cannot add duplicate payload %s", p.ID())
}
size := upq.SizeFn(p)
if size > upq.MaxSize {
......@@ -115,7 +127,7 @@ func (upq *PayloadsQueue) Push(p *eth.ExecutionPayload) error {
for upq.currentSize > upq.MaxSize {
upq.Pop()
}
upq.blockNos[p.ID().Number] = true
upq.blockHashes[p.BlockHash] = struct{}{}
return nil
}
......@@ -137,7 +149,7 @@ func (upq *PayloadsQueue) Pop() *eth.ExecutionPayload {
}
ps := heap.Pop(&upq.pq).(payloadAndSize) // nosemgrep
upq.currentSize -= ps.size
// remove the key from the blockNos map
delete(upq.blockNos, ps.payload.ID().Number)
// remove the key from the block hashes map
delete(upq.blockHashes, ps.payload.BlockHash)
return ps.payload
}
......@@ -4,6 +4,7 @@ import (
"container/heap"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/stretchr/testify/require"
"github.com/ethereum-optimism/optimism/op-node/eth"
......@@ -74,20 +75,17 @@ func TestPayloadMemSize(t *testing.T) {
}
func TestPayloadsQueue(t *testing.T) {
pq := PayloadsQueue{
MaxSize: payloadMemFixedCost * 3,
SizeFn: payloadMemSize,
blockNos: make(map[uint64]bool),
}
pq := NewPayloadsQueue(payloadMemFixedCost*3, payloadMemSize)
require.Equal(t, 0, pq.Len())
require.Equal(t, (*eth.ExecutionPayload)(nil), pq.Peek())
require.Equal(t, (*eth.ExecutionPayload)(nil), pq.Pop())
a := &eth.ExecutionPayload{BlockNumber: 3}
b := &eth.ExecutionPayload{BlockNumber: 4}
c := &eth.ExecutionPayload{BlockNumber: 5}
d := &eth.ExecutionPayload{BlockNumber: 6}
bAlt := &eth.ExecutionPayload{BlockNumber: 4}
a := &eth.ExecutionPayload{BlockNumber: 3, BlockHash: common.Hash{3}}
b := &eth.ExecutionPayload{BlockNumber: 4, BlockHash: common.Hash{4}}
c := &eth.ExecutionPayload{BlockNumber: 5, BlockHash: common.Hash{5}}
d := &eth.ExecutionPayload{BlockNumber: 6, BlockHash: common.Hash{6}}
bAlt := &eth.ExecutionPayload{BlockNumber: 4, BlockHash: common.Hash{0xff}}
bDup := &eth.ExecutionPayload{BlockNumber: 4, BlockHash: common.Hash{4}}
require.NoError(t, pq.Push(b))
require.Equal(t, pq.Len(), 1)
require.Equal(t, pq.Peek(), b)
......@@ -130,7 +128,9 @@ func TestPayloadsQueue(t *testing.T) {
require.Equal(t, pq.Peek(), a)
// No duplicates allowed
require.Error(t, pq.Push(bAlt))
require.Error(t, pq.Push(bDup))
// But reorg data allowed
require.NoError(t, pq.Push(bAlt))
require.NoError(t, pq.Push(d))
require.Equal(t, pq.Len(), 3)
......
......@@ -10,7 +10,6 @@ import (
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-node/sources"
)
type Metrics interface {
......@@ -82,8 +81,19 @@ type Network interface {
PublishL2Payload(ctx context.Context, payload *eth.ExecutionPayload) error
}
type AltSync interface {
// RequestL2Range informs the sync source that the given range of L2 blocks is missing,
// and should be retrieved from any available alternative syncing source.
// The start of the range is inclusive, the end is exclusive.
// The sync results should be returned back to the driver via the OnUnsafeL2Payload(ctx, payload) method.
// The latest requested range should always take priority over previous requests.
// There may be overlaps in requested ranges.
// An error may be returned if the scheduling fails immediately, e.g. a context timeout.
RequestL2Range(ctx context.Context, start, end uint64) error
}
// NewDriver composes an events handler that tracks L1 state, triggers L2 derivation, and optionally sequences new L2 blocks.
func NewDriver(driverCfg *Config, cfg *rollup.Config, l2 L2Chain, l1 L1Chain, syncClient *sources.SyncClient, network Network, log log.Logger, snapshotLog log.Logger, metrics Metrics) *Driver {
func NewDriver(driverCfg *Config, cfg *rollup.Config, l2 L2Chain, l1 L1Chain, altSync AltSync, network Network, log log.Logger, snapshotLog log.Logger, metrics Metrics) *Driver {
l1State := NewL1State(log, metrics)
sequencerConfDepth := NewConfDepth(driverCfg.SequencerConfDepth, l1State.L1Head, l1)
findL1Origin := NewL1OriginSelector(log, cfg, sequencerConfDepth)
......@@ -115,6 +125,6 @@ func NewDriver(driverCfg *Config, cfg *rollup.Config, l2 L2Chain, l1 L1Chain, sy
l1SafeSig: make(chan eth.L1BlockRef, 10),
l1FinalizedSig: make(chan eth.L1BlockRef, 10),
unsafeL2Payloads: make(chan *eth.ExecutionPayload, 10),
L2SyncCl: syncClient,
altSync: altSync,
}
}
......@@ -16,7 +16,6 @@ import (
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/ethereum-optimism/optimism/op-node/sources"
"github.com/ethereum-optimism/optimism/op-service/backoff"
)
......@@ -64,8 +63,8 @@ type Driver struct {
l1SafeSig chan eth.L1BlockRef
l1FinalizedSig chan eth.L1BlockRef
// Backup unsafe sync client
L2SyncCl *sources.SyncClient
// Interface to signal the L2 block range to sync.
altSync AltSync
// L2 Signals:
......@@ -200,11 +199,12 @@ func (s *Driver) eventLoop() {
sequencerTimer.Reset(delay)
}
// Create a ticker to check if there is a gap in the engine queue every 15 seconds
// If there is, we send requests to the backup RPC to retrieve the missing payloads
// and add them to the unsafe queue.
altSyncTicker := time.NewTicker(15 * time.Second)
// Create a ticker to check if there is a gap in the engine queue. Whenever
// there is, we send requests to sync source to retrieve the missing payloads.
syncCheckInterval := time.Duration(s.config.BlockTime) * time.Second * 2
altSyncTicker := time.NewTicker(syncCheckInterval)
defer altSyncTicker.Stop()
lastUnsafeL2 := s.derivation.UnsafeL2Head()
for {
// If we are sequencing, and the L1 state is ready, update the trigger for the next sequencer action.
......@@ -220,6 +220,13 @@ func (s *Driver) eventLoop() {
sequencerCh = nil
}
// If the engine is not ready, or if the L2 head is actively changing, then reset the alt-sync:
// there is no need to request L2 blocks when we are syncing already.
if head := s.derivation.UnsafeL2Head(); head != lastUnsafeL2 || !s.derivation.EngineReady() {
lastUnsafeL2 = head
altSyncTicker.Reset(syncCheckInterval)
}
select {
case <-sequencerCh:
payload, err := s.sequencer.RunNextSequencerAction(ctx)
......@@ -237,10 +244,12 @@ func (s *Driver) eventLoop() {
}
planSequencerAction() // schedule the next sequencer action to keep the sequencing looping
case <-altSyncTicker.C:
// Check if there is a gap in the current unsafe payload queue. If there is, attempt to fetch
// missing payloads from the backup RPC (if it is configured).
if s.L2SyncCl != nil {
s.checkForGapInUnsafeQueue(ctx)
// Check if there is a gap in the current unsafe payload queue.
ctx, cancel := context.WithTimeout(ctx, time.Second*2)
err := s.checkForGapInUnsafeQueue(ctx)
cancel()
if err != nil {
s.log.Warn("failed to check for unsafe L2 blocks to sync", "err", err)
}
case payload := <-s.unsafeL2Payloads:
s.snapshot("New unsafe payload")
......@@ -462,35 +471,29 @@ type hashAndErrorChannel struct {
err chan error
}
// checkForGapInUnsafeQueue checks if there is a gap in the unsafe queue and attempts to retrieve the missing payloads from the backup RPC.
// WARNING: The sync client's attempt to retrieve the missing payloads is not guaranteed to succeed, and it will fail silently (besides
// emitting warning logs) if the requests fail.
func (s *Driver) checkForGapInUnsafeQueue(ctx context.Context) {
// checkForGapInUnsafeQueue checks if there is a gap in the unsafe queue and attempts to retrieve the missing payloads from an alt-sync method.
// WARNING: This is only an outgoing signal, the blocks are not guaranteed to be retrieved.
// Results are received through OnUnsafeL2Payload.
func (s *Driver) checkForGapInUnsafeQueue(ctx context.Context) error {
// subtract genesis time from wall clock to get the time elapsed since genesis, and then divide that
// difference by the block time to get the expected L2 block number at the current time. If the
// unsafe head does not have this block number, then there is a gap in the queue.
wallClock := uint64(time.Now().Unix())
genesisTimestamp := s.config.Genesis.L2Time
if wallClock < genesisTimestamp {
s.log.Debug("nothing to sync, did not reach genesis L2 time yet", "genesis", genesisTimestamp)
return nil
}
wallClockGenesisDiff := wallClock - genesisTimestamp
expectedL2Block := wallClockGenesisDiff / s.config.BlockTime
// Note: round down, we should not request blocks into the future.
blocksSinceGenesis := wallClockGenesisDiff / s.config.BlockTime
expectedL2Block := s.config.Genesis.L2.Number + blocksSinceGenesis
start, end := s.derivation.GetUnsafeQueueGap(expectedL2Block)
size := end - start
// Check if there is a gap between the unsafe head and the expected L2 block number at the current time.
if size > 0 {
s.log.Warn("Gap in payload queue tip and expected unsafe chain detected", "start", start, "end", end, "size", size)
s.log.Info("Attempting to fetch missing payloads from backup RPC", "start", start, "end", end, "size", size)
// Attempt to fetch the missing payloads from the backup unsafe sync RPC concurrently.
// Concurrent requests are safe here due to the engine queue being a priority queue.
for blockNumber := start; blockNumber <= end; blockNumber++ {
select {
case s.L2SyncCl.FetchUnsafeBlock <- blockNumber:
// Do nothing- the block number was successfully sent into the channel
default:
return // If the channel is full, return and wait for the next iteration of the event loop
}
}
if end > start {
s.log.Debug("requesting missing unsafe L2 block range", "start", start, "end", end, "size", end-start)
return s.altSync.RequestL2Range(ctx, start, end)
}
return nil
}
......@@ -136,6 +136,7 @@ func NewL2EndpointConfig(ctx *cli.Context, log log.Logger) (*node.L2EndpointConf
func NewL2SyncEndpointConfig(ctx *cli.Context) *node.L2SyncEndpointConfig {
return &node.L2SyncEndpointConfig{
L2NodeAddr: ctx.GlobalString(flags.BackupL2UnsafeSyncRPC.Name),
TrustRPC: ctx.GlobalBool(flags.BackupL2UnsafeSyncRPCTrustRPC.Name),
}
}
......
......@@ -3,12 +3,17 @@ package sources
import (
"context"
"errors"
"fmt"
"io"
"sync"
"time"
"github.com/ethereum-optimism/optimism/op-node/client"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-node/sources/caching"
"github.com/ethereum-optimism/optimism/op-service/backoff"
"github.com/ethereum/go-ethereum/log"
"github.com/libp2p/go-libp2p/core/peer"
)
......@@ -18,23 +23,29 @@ var ErrNoUnsafeL2PayloadChannel = errors.New("unsafeL2Payloads channel must not
// RpcSyncPeer is a mock PeerID for the RPC sync client.
var RpcSyncPeer peer.ID = "ALT_RPC_SYNC"
// receivePayload queues the received payload for processing.
// This may return an error if there's no capacity for the payload.
type receivePayload = func(ctx context.Context, from peer.ID, payload *eth.ExecutionPayload) error
type SyncClientInterface interface {
type RPCSync interface {
io.Closer
// Start starts an additional worker syncing job
Start() error
Close() error
fetchUnsafeBlockFromRpc(ctx context.Context, blockNumber uint64)
// RequestL2Range signals that the given range should be fetched, implementing the alt-sync interface.
RequestL2Range(ctx context.Context, start, end uint64) error
}
type SyncClient struct {
*L2Client
FetchUnsafeBlock chan uint64
done chan struct{}
receivePayload receivePayload
wg sync.WaitGroup
}
var _ SyncClientInterface = (*SyncClient)(nil)
requests chan uint64
resCtx context.Context
resCancel context.CancelFunc
receivePayload receivePayload
wg sync.WaitGroup
}
type SyncClientConfig struct {
L2ClientConfig
......@@ -51,41 +62,92 @@ func NewSyncClient(receiver receivePayload, client client.RPC, log log.Logger, m
if err != nil {
return nil, err
}
// This resource context is shared between all workers that may be started
resCtx, resCancel := context.WithCancel(context.Background())
return &SyncClient{
L2Client: l2Client,
FetchUnsafeBlock: make(chan uint64, 128),
done: make(chan struct{}),
receivePayload: receiver,
L2Client: l2Client,
resCtx: resCtx,
resCancel: resCancel,
requests: make(chan uint64, 128),
receivePayload: receiver,
}, nil
}
// Start starts up the state loop.
// The loop will have been started if err is not nil.
// Start starts the syncing background work. This may not be called after Close().
func (s *SyncClient) Start() error {
// TODO(CLI-3635): we can start multiple event loop runners as workers, to parallelize the work
s.wg.Add(1)
go s.eventLoop()
return nil
}
// Close sends a signal to the event loop to stop.
// Close sends a signal to close all concurrent syncing work.
func (s *SyncClient) Close() error {
s.done <- struct{}{}
s.resCancel()
s.wg.Wait()
return nil
}
func (s *SyncClient) RequestL2Range(ctx context.Context, start, end uint64) error {
// Drain previous requests now that we have new information
for len(s.requests) > 0 {
select { // in case requests is being read at the same time, don't block on draining it.
case <-s.requests:
default:
break
}
}
// TODO(CLI-3635): optimize the by-range fetching with the Engine API payloads-by-range method.
s.log.Info("Scheduling to fetch missing payloads from backup RPC", "start", start, "end", end, "size", end-start)
for i := start; i < end; i++ {
select {
case s.requests <- i:
case <-ctx.Done():
return ctx.Err()
}
}
return nil
}
// eventLoop is the main event loop for the sync client.
func (s *SyncClient) eventLoop() {
defer s.wg.Done()
s.log.Info("Starting sync client event loop")
backoffStrategy := &backoff.ExponentialStrategy{
Min: 1000,
Max: 20_000,
MaxJitter: 250,
}
for {
select {
case <-s.done:
case <-s.resCtx.Done():
s.log.Debug("Shutting down RPC sync worker")
return
case blockNumber := <-s.FetchUnsafeBlock:
s.fetchUnsafeBlockFromRpc(context.Background(), blockNumber)
case reqNum := <-s.requests:
err := backoff.DoCtx(s.resCtx, 5, backoffStrategy, func() error {
// Limit the maximum time for fetching payloads
ctx, cancel := context.WithTimeout(s.resCtx, time.Second*10)
defer cancel()
// We are only fetching one block at a time here.
return s.fetchUnsafeBlockFromRpc(ctx, reqNum)
})
if err != nil {
if err == s.resCtx.Err() {
return
}
s.log.Error("failed syncing L2 block via RPC", "err", err, "num", reqNum)
// Reschedule at end of queue
select {
case s.requests <- reqNum:
default:
// drop syncing job if we are too busy with sync jobs already.
}
}
}
}
}
......@@ -95,28 +157,22 @@ func (s *SyncClient) eventLoop() {
//
// Post Shanghai hardfork, the engine API's `PayloadBodiesByRange` method will be much more efficient, but for now,
// the `eth_getBlockByNumber` method is more widely available.
func (s *SyncClient) fetchUnsafeBlockFromRpc(ctx context.Context, blockNumber uint64) {
func (s *SyncClient) fetchUnsafeBlockFromRpc(ctx context.Context, blockNumber uint64) error {
s.log.Info("Requesting unsafe payload from backup RPC", "block number", blockNumber)
payload, err := s.PayloadByNumber(ctx, blockNumber)
if err != nil {
s.log.Warn("Failed to convert block to execution payload", "block number", blockNumber, "err", err)
return
}
// Signature validation is not necessary here since the backup RPC is trusted.
if _, ok := payload.CheckBlockHash(); !ok {
s.log.Warn("Received invalid payload from backup RPC; invalid block hash", "payload", payload.ID())
return
return fmt.Errorf("failed to fetch payload by number (%d): %w", blockNumber, err)
}
// Note: the underlying RPC client used for syncing verifies the execution payload blockhash, if set to untrusted.
s.log.Info("Received unsafe payload from backup RPC", "payload", payload.ID())
// Send the retrieved payload to the `unsafeL2Payloads` channel.
if err = s.receivePayload(ctx, RpcSyncPeer, payload); err != nil {
s.log.Warn("Failed to send payload into the driver's unsafeL2Payloads channel", "payload", payload.ID(), "err", err)
return
return fmt.Errorf("failed to send payload %s into the driver's unsafeL2Payloads channel: %w", payload.ID(), err)
} else {
s.log.Info("Sent received payload into the driver's unsafeL2Payloads channel", "payload", payload.ID())
s.log.Debug("Sent received payload into the driver's unsafeL2Payloads channel", "payload", payload.ID())
return nil
}
}
package metrics
import (
"context"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
"github.com/prometheus/client_golang/prometheus"
opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics"
)
const Namespace = "op_proposer"
type Metricer interface {
RecordInfo(version string)
RecordUp()
// Records all L1 and L2 block events
opmetrics.RefMetricer
RecordL2BlocksProposed(l2ref eth.L2BlockRef)
}
type Metrics struct {
ns string
registry *prometheus.Registry
factory opmetrics.Factory
opmetrics.RefMetrics
Info prometheus.GaugeVec
Up prometheus.Gauge
}
var _ Metricer = (*Metrics)(nil)
func NewMetrics(procName string) *Metrics {
if procName == "" {
procName = "default"
}
ns := Namespace + "_" + procName
registry := opmetrics.NewRegistry()
factory := opmetrics.With(registry)
return &Metrics{
ns: ns,
registry: registry,
factory: factory,
RefMetrics: opmetrics.MakeRefMetrics(ns, factory),
Info: *factory.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns,
Name: "info",
Help: "Pseudo-metric tracking version and config info",
}, []string{
"version",
}),
Up: factory.NewGauge(prometheus.GaugeOpts{
Namespace: ns,
Name: "up",
Help: "1 if the op-proposer has finished starting up",
}),
}
}
func (m *Metrics) Serve(ctx context.Context, host string, port int) error {
return opmetrics.ListenAndServe(ctx, m.registry, host, port)
}
func (m *Metrics) StartBalanceMetrics(ctx context.Context,
l log.Logger, client *ethclient.Client, account common.Address) {
opmetrics.LaunchBalanceMetrics(ctx, l, m.registry, m.ns, client, account)
}
// RecordInfo sets a pseudo-metric that contains versioning and
// config info for the op-proposer.
func (m *Metrics) RecordInfo(version string) {
m.Info.WithLabelValues(version).Set(1)
}
// RecordUp sets the up metric to 1.
func (m *Metrics) RecordUp() {
prometheus.MustRegister()
m.Up.Set(1)
}
const (
BlockProposed = "proposed"
)
// RecordL2BlocksProposed should be called when new L2 block is proposed
func (m *Metrics) RecordL2BlocksProposed(l2ref eth.L2BlockRef) {
m.RecordL2Ref(BlockProposed, l2ref)
}
package metrics
import (
"github.com/ethereum-optimism/optimism/op-node/eth"
opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics"
)
type noopMetrics struct{ opmetrics.NoopRefMetrics }
var NoopMetrics Metricer = new(noopMetrics)
func (*noopMetrics) RecordInfo(version string) {}
func (*noopMetrics) RecordUp() {}
func (*noopMetrics) RecordL2BlocksProposed(l2ref eth.L2BlockRef) {}
......@@ -24,9 +24,9 @@ import (
"github.com/ethereum-optimism/optimism/op-bindings/bindings"
"github.com/ethereum-optimism/optimism/op-node/eth"
"github.com/ethereum-optimism/optimism/op-node/sources"
"github.com/ethereum-optimism/optimism/op-proposer/metrics"
opcrypto "github.com/ethereum-optimism/optimism/op-service/crypto"
oplog "github.com/ethereum-optimism/optimism/op-service/log"
opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics"
oppprof "github.com/ethereum-optimism/optimism/op-service/pprof"
oprpc "github.com/ethereum-optimism/optimism/op-service/rpc"
"github.com/ethereum-optimism/optimism/op-service/txmgr"
......@@ -49,9 +49,10 @@ func Main(version string, cliCtx *cli.Context) error {
}
l := oplog.NewLogger(cfg.LogConfig)
m := metrics.NewMetrics("default")
l.Info("Initializing L2 Output Submitter")
l2OutputSubmitter, err := NewL2OutputSubmitterFromCLIConfig(cfg, l)
l2OutputSubmitter, err := NewL2OutputSubmitterFromCLIConfig(cfg, l, m)
if err != nil {
l.Error("Unable to create the L2 Output Submitter", "error", err)
return err
......@@ -78,17 +79,15 @@ func Main(version string, cliCtx *cli.Context) error {
}()
}
registry := opmetrics.NewRegistry()
metricsCfg := cfg.MetricsConfig
if metricsCfg.Enabled {
l.Info("starting metrics server", "addr", metricsCfg.ListenAddr, "port", metricsCfg.ListenPort)
go func() {
if err := opmetrics.ListenAndServe(ctx, registry, metricsCfg.ListenAddr, metricsCfg.ListenPort); err != nil {
if err := m.Serve(ctx, metricsCfg.ListenAddr, metricsCfg.ListenPort); err != nil {
l.Error("error starting metrics server", err)
}
}()
addr := l2OutputSubmitter.from
opmetrics.LaunchBalanceMetrics(ctx, l, registry, "", l2OutputSubmitter.l1Client, addr)
m.StartBalanceMetrics(ctx, l, l2OutputSubmitter.l1Client, l2OutputSubmitter.from)
}
rpcCfg := cfg.RPCConfig
......@@ -98,6 +97,9 @@ func Main(version string, cliCtx *cli.Context) error {
return fmt.Errorf("error starting RPC server: %w", err)
}
m.RecordInfo(version)
m.RecordUp()
interruptChannel := make(chan os.Signal, 1)
signal.Notify(interruptChannel, []os.Signal{
os.Interrupt,
......@@ -117,6 +119,7 @@ type L2OutputSubmitter struct {
wg sync.WaitGroup
done chan struct{}
log log.Logger
metr metrics.Metricer
ctx context.Context
cancel context.CancelFunc
......@@ -143,7 +146,7 @@ type L2OutputSubmitter struct {
}
// NewL2OutputSubmitterFromCLIConfig creates a new L2 Output Submitter given the CLI Config
func NewL2OutputSubmitterFromCLIConfig(cfg CLIConfig, l log.Logger) (*L2OutputSubmitter, error) {
func NewL2OutputSubmitterFromCLIConfig(cfg CLIConfig, l log.Logger, m metrics.Metricer) (*L2OutputSubmitter, error) {
signer, fromAddress, err := opcrypto.SignerFactoryFromConfig(l, cfg.PrivateKey, cfg.Mnemonic, cfg.L2OutputHDPath, cfg.SignerConfig)
if err != nil {
return nil, err
......@@ -185,11 +188,11 @@ func NewL2OutputSubmitterFromCLIConfig(cfg CLIConfig, l log.Logger) (*L2OutputSu
SignerFnFactory: signer,
}
return NewL2OutputSubmitter(proposerCfg, l)
return NewL2OutputSubmitter(proposerCfg, l, m)
}
// NewL2OutputSubmitter creates a new L2 Output Submitter
func NewL2OutputSubmitter(cfg Config, l log.Logger) (*L2OutputSubmitter, error) {
func NewL2OutputSubmitter(cfg Config, l log.Logger, m metrics.Metricer) (*L2OutputSubmitter, error) {
ctx, cancel := context.WithCancel(context.Background())
cCtx, cCancel := context.WithTimeout(ctx, defaultDialTimeout)
......@@ -228,6 +231,7 @@ func NewL2OutputSubmitter(cfg Config, l log.Logger) (*L2OutputSubmitter, error)
log: l,
ctx: ctx,
cancel: cancel,
metr: m,
l1Client: cfg.L1Client,
rollupClient: cfg.RollupClient,
......@@ -413,9 +417,9 @@ func (l *L2OutputSubmitter) loop() {
l.log.Error("Failed to send proposal transaction", "err", err)
cancel()
break
} else {
cancel()
}
l.metr.RecordL2BlocksProposed(output.BlockRef)
cancel()
case <-l.done:
return
......
......@@ -16,7 +16,7 @@ WORKDIR /opt/foundry
# Only diff from upstream docker image is this clone instead
# of COPY. We select a specific commit to use.
RUN git clone https://github.com/foundry-rs/foundry.git . \
&& git checkout 8f3fca9c608d58981daaffe11e7f8076644cb753
&& git checkout da2392e58bb8a7fefeba46b40c4df1afad8ccd22
RUN source $HOME/.profile && \
cargo build --release && \
......
ignores: [
"@babel/eslint-parser",
"@typescript-eslint/parser",
"eslint-plugin-import",
"eslint-plugin-unicorn",
"eslint-plugin-jsdoc",
"eslint-plugin-prefer-arrow",
"eslint-plugin-react",
"@typescript-eslint/eslint-plugin",
"eslint-config-prettier",
"eslint-plugin-prettier"
]
......@@ -17,7 +17,7 @@
"lint": "yarn lint:fix && yarn lint:check",
"pre-commit": "lint-staged",
"test": "ts-mocha test/*.spec.ts",
"test:coverage": "echo 'no coverage'"
"test:coverage": "nyc ts-mocha test/*.spec.ts && nyc merge .nyc_output coverage.json"
},
"keywords": [
"optimism",
......@@ -48,8 +48,7 @@
"pino": "^6.11.3",
"pino-multi-stream": "^5.3.0",
"pino-sentry": "^0.7.0",
"prom-client": "^13.1.0",
"qs": "^6.10.5"
"prom-client": "^13.1.0"
},
"devDependencies": {
"@ethersproject/abstract-provider": "^5.7.0",
......
......@@ -3,11 +3,11 @@ import request from 'supertest'
import chai = require('chai')
const expect = chai.expect
import { Logger, Metrics, createMetricsServer } from '../src'
import { Logger, LegacyMetrics, createMetricsServer } from '../src'
describe('Metrics', () => {
it('shoud serve metrics', async () => {
const metrics = new Metrics({
const metrics = new LegacyMetrics({
prefix: 'test_metrics',
})
const registry = metrics.registry
......
......@@ -155,6 +155,33 @@ contract SystemDictator is OwnableUpgradeable {
currentStep++;
}
/**
* @notice Constructor required to ensure that the implementation of the SystemDictator is
* initialized upon deployment.
*/
constructor() {
// Using this shorter variable as an alias for address(0) just prevents us from having to
// to use a new line for every single parameter.
address zero = address(0);
initialize(
DeployConfig(
GlobalConfig(AddressManager(zero), ProxyAdmin(zero), zero, zero),
ProxyAddressConfig(zero, zero, zero, zero, zero, zero, zero),
ImplementationAddressConfig(
L2OutputOracle(zero),
OptimismPortal(payable(zero)),
L1CrossDomainMessenger(zero),
L1StandardBridge(payable(zero)),
OptimismMintableERC20Factory(zero),
L1ERC721Bridge(zero),
PortalSender(zero),
SystemConfig(zero)
),
SystemConfigConfig(zero, 0, 0, bytes32(0), 0, zero)
)
);
}
/**
* @param _config System configuration.
*/
......
......@@ -139,16 +139,20 @@ contract Bytes_slice_Test is Test {
vm.assume(_length <= _input.length - _start);
// Grab the free memory pointer before the slice operation
uint256 initPtr;
uint64 initPtr;
assembly {
initPtr := mload(0x40)
}
uint64 expectedPtr = uint64(initPtr + 0x20 + ((_length + 0x1f) & ~uint256(0x1f)));
// Ensure that all memory outside of the expected range is safe.
vm.expectSafeMemory(initPtr, expectedPtr);
// Slice the input bytes array from `_start` to `_start + _length`
bytes memory slice = Bytes.slice(_input, _start, _length);
// Grab the free memory pointer after the slice operation
uint256 finalPtr;
uint64 finalPtr;
assembly {
finalPtr := mload(0x40)
}
......@@ -165,10 +169,11 @@ contract Bytes_slice_Test is Test {
// Note that we use a slightly less efficient, but equivalent method of rounding
// up `_length` to the next multiple of 32 than is used in the `slice` function.
// This is to diff test the method used in `slice`.
assertEq(finalPtr, initPtr + 0x20 + (((_length + 0x1F) >> 5) << 5));
uint64 _expectedPtr = uint64(initPtr + 0x20 + (((_length + 0x1F) >> 5) << 5));
assertEq(finalPtr, _expectedPtr);
// Sanity check for equivalence of the rounding methods.
assertEq(((_length + 0x1F) >> 5) << 5, (_length + 0x1F) & ~uint256(0x1F));
assertEq(_expectedPtr, expectedPtr);
}
// The slice length should be equal to `_length`
......
......@@ -12,7 +12,7 @@
"l2OutputOracleStartingTimestamp": -1,
"l2OutputOracleProposer": "0x70997970C51812dc3A010C7d01b50e0d17dc79C8",
"l2OutputOracleChallenger": "0x15d34AAf54267DB7D7c367839AAf71A00a2C6A65",
"l2GenesisBlockGasLimit": "0xE4E1C0",
"l2GenesisBlockGasLimit": "0x1c9c380",
"l1BlockTime": 3,
"cliqueSignerAddress": "0xca062b0fd91172d89bcd4bb084ac4e21972cc467",
"baseFeeVaultRecipient": "0xBcd4042DE499D14e55001CcbB24a551F3b954096",
......
......@@ -28,7 +28,7 @@
"governanceTokenSymbol": "OP",
"governanceTokenOwner": "0x038a8825A3C3B0c08d52Cc76E5E361953Cf6Dc76",
"l2GenesisBlockGasLimit": "0x17D7840",
"l2GenesisBlockGasLimit": "0x1c9c380",
"l2GenesisBlockCoinbase": "0x4200000000000000000000000000000000000011",
"l2GenesisBlockBaseFeePerGas": "0x3b9aca00",
......
......@@ -40,7 +40,7 @@
"governanceTokenName": "Optimism",
"governanceTokenOwner": "ADMIN",
"l2GenesisBlockGasLimit": "0x17D7840",
"l2GenesisBlockGasLimit": "0x1c9c380",
"l2GenesisBlockBaseFeePerGas": "0x3b9aca00",
"l2GenesisRegolithTimeOffset": "0x0",
......
......@@ -77,7 +77,7 @@
"dotenv": "^16.0.0",
"ds-test": "https://github.com/dapphub/ds-test.git#9310e879db8ba3ea6d5c6489a579118fd264a3f5",
"ethereum-waffle": "^3.0.0",
"forge-std": "https://github.com/foundry-rs/forge-std.git#fd86115ed6aba8e234ee0fb86c12fe35eff0b2a0",
"forge-std": "https://github.com/foundry-rs/forge-std.git#46264e9788017fc74f9f58b7efa0bc6e1df6d410",
"glob": "^7.1.6",
"hardhat-deploy": "^0.11.4",
"solhint": "^3.3.7",
......
......@@ -22,6 +22,7 @@
"scripts": {
"build": "yarn build:contracts && yarn copy:contracts && yarn autogen:artifacts && yarn build:typescript",
"build:typescript": "tsc -p ./tsconfig.json",
"build:bindings": "./scripts/legacy-bindings.sh ./../../op-bindings/legacy-bindings",
"build:contracts": "hardhat compile --show-stack-traces",
"autogen:markdown": "ts-node scripts/generate-markdown.ts",
"autogen:artifacts": "ts-node scripts/generate-artifacts.ts && ts-node scripts/generate-deployed-artifacts.ts",
......
#!/bin/bash
OUTDIR="$1"
if [ ! -d "$OUTDIR" ]; then
echo "Must pass output directory"
exit 1
fi
CONTRACTS=("CanonicalTransactionChain")
PKG=legacy_bindings
for contract in ${CONTRACTS[@]}; do
TMPFILE=$(mktemp)
npx hardhat inspect $contract bytecode > "$TMPFILE"
ABI=$(npx hardhat inspect $contract abi)
outfile="$OUTDIR/$contract.go"
echo "$ABI" | abigen --abi - --pkg "$PKG" --bin "$TMPFILE" --type $contract --out "$outfile"
rm "$TMPFILE"
done
......@@ -11471,14 +11471,14 @@ forever-agent@~0.6.1:
resolved "https://registry.yarnpkg.com/forever-agent/-/forever-agent-0.6.1.tgz#fbc71f0c41adeb37f96c577ad1ed42d8fdacca91"
integrity sha1-+8cfDEGt6zf5bFd60e1C2P2sypE=
"forge-std@https://github.com/foundry-rs/forge-std.git#46264e9788017fc74f9f58b7efa0bc6e1df6d410":
version "1.5.2"
resolved "https://github.com/foundry-rs/forge-std.git#46264e9788017fc74f9f58b7efa0bc6e1df6d410"
"forge-std@https://github.com/foundry-rs/forge-std.git#53331f4cb2e313466f72440f3e73af048c454d02":
version "1.2.0"
resolved "https://github.com/foundry-rs/forge-std.git#53331f4cb2e313466f72440f3e73af048c454d02"
"forge-std@https://github.com/foundry-rs/forge-std.git#fd86115ed6aba8e234ee0fb86c12fe35eff0b2a0":
version "1.4.0"
resolved "https://github.com/foundry-rs/forge-std.git#fd86115ed6aba8e234ee0fb86c12fe35eff0b2a0"
form-data@^2.2.0:
version "2.5.1"
resolved "https://registry.yarnpkg.com/form-data/-/form-data-2.5.1.tgz#f2cbec57b5e59e23716e128fe44d4e5dd23895f4"
......@@ -17871,13 +17871,6 @@ qs@^6.10.3, qs@^6.9.4:
dependencies:
side-channel "^1.0.4"
qs@^6.10.5:
version "6.10.5"
resolved "https://registry.yarnpkg.com/qs/-/qs-6.10.5.tgz#974715920a80ff6a262264acd2c7e6c2a53282b4"
integrity sha512-O5RlPh0VFtR78y79rgcgKK4wbAI0C5zGVLztOIdpWX6ep368q5Hv6XRxDvXuZ9q3C6v+e3n8UfZZJw7IIG27eQ==
dependencies:
side-channel "^1.0.4"
qs@^6.4.0, qs@^6.7.0:
version "6.10.1"
resolved "https://registry.yarnpkg.com/qs/-/qs-6.10.1.tgz#4931482fa8d647a5aab799c5271d2133b981fb6a"
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment