Commit 9f795d06 authored by Andreas Bigger's avatar Andreas Bigger

Merge branch 'develop' into refcell/feat/txcreation

parents 3922f5f2 aeda9d95
---
'@eth-optimism/contracts-bedrock': patch
---
Added a contsructor to the System Dictator
...@@ -518,6 +518,10 @@ jobs: ...@@ -518,6 +518,10 @@ jobs:
patterns: packages patterns: packages
# Note: The below needs to be manually configured whenever we # Note: The below needs to be manually configured whenever we
# add a new package to CI. # add a new package to CI.
- run:
name: Check common-ts
command: npx depcheck
working_directory: packages/common-ts
- run: - run:
name: Check contracts name: Check contracts
command: npx depcheck command: npx depcheck
......
...@@ -166,6 +166,7 @@ module.exports = { ...@@ -166,6 +166,7 @@ module.exports = {
children: [ children: [
"/docs/build/tutorials/add-attr.md", "/docs/build/tutorials/add-attr.md",
"/docs/build/tutorials/new-precomp.md", "/docs/build/tutorials/new-precomp.md",
"/docs/build/tutorials/predeploys.md"
] ]
} // End of tutorials } // End of tutorials
], ],
......
...@@ -32,6 +32,11 @@ ...@@ -32,6 +32,11 @@
<i class="fab fa-discord"></i> Discord community <i class="fab fa-discord"></i> Discord community
</a> </a>
</li> </li>
<li>
<a href="https://wkf.ms/3XTdpLl" target="_blank" rel="noopener noreferrer">
<i class="far fa-comment-dots"></i> Get support for going live
</a>
</li>
</ul> </ul>
</div> </div>
</div> </div>
......
---
title: Modifying Predeployed Contracts
lang: en-US
---
::: warning 🚧 OP Stack Hacks are explicitly things that you can do with the OP Stack that are *not* currently intended for production use
OP Stack Hacks are not for the faint of heart. You will not be able to receive significant developer support for OP Stack Hacks — be prepared to get your hands dirty and to work without support.
:::
OP Stack blockchains have a number of [predeployed contracts](https://github.com/ethereum-optimism/optimism/blob/develop/packages/contracts-bedrock/src/constants.ts) that provide important functionality.
Most of those contracts are proxies that can be upgraded using the `proxyAdminOwner` which was configured when the network was initially deployed.
The predeploys are controlled from a predeploy called [`ProxyAdmin`](https://github.com/ethereum-optimism/optimism/blob/develop/packages/contracts-bedrock/contracts/universal/ProxyAdmin.sol), whose address is `0x4200000000000000000000000000000000000018`.
The function to call is [`upgrade(address,address)`](https://github.com/ethereum-optimism/optimism/blob/develop/packages/contracts-bedrock/contracts/universal/ProxyAdmin.sol#L211-L229).
The first parameter is the proxy to upgrade, and the second is the address of a new implementation.
For example, the legacy `L1BlockNumber` contract is at `0x420...013`.
To disable this function, we'll set the implementation to `0x00...00`.
We do this using the [Foundry](https://book.getfoundry.sh/) command `cast`.
1. We'll need several constants.
- Set these addresses as variables in your terminal.
```sh
L1BLOCKNUM=0x4200000000000000000000000000000000000013
PROXY_ADMIN=0x4200000000000000000000000000000000000018
ZERO_ADDR=0x0000000000000000000000000000000000000000
```
- Set `PRIVKEY` to the private key of your ADMIN account.
- Set `ETH_RPC_URL`. If you're on the computer that runs the blockchain, use this command.
```sh
export ETH_RPC_URL=http://localhost:8545
```
1. Verify `L1BlockNumber` works correctly.
See that when you call the contract you get a block number, and twelve seconds later you get the next one (block time on L1 is twelve seconds).
```sh
cast call $L1BLOCKNUM 'number()' | cast --to-dec
sleep 12 && cast call $L1BLOCKNUM 'number()' | cast --to-dec
```
1. Get the current implementation for the contract.
```sh
L1BLOCKNUM_IMPLEMENTATION=`cast call $L1BLOCKNUM "implementation()" | sed 's/000000000000000000000000//'`
echo $L1BLOCKNUM_IMPLEMENTATION
```
1. Change the implementation to the zero address
```sh
cast send --private-key $PRIVKEY $PROXY_ADMIN "upgrade(address,address)" $L1BLOCKNUM $ZERO_ADDR
```
1. See that the implementation is address zero, and that calling it fails.
```sh
cast call $L1BLOCKNUM 'implementation()'
cast call $L1BLOCKNUM 'number()'
```
1. Fix the predeploy by returning it to the previous implementation, and verify it works.
```sh
cast send --private-key $PRIVKEY $PROXY_ADMIN "upgrade(address,address)" $L1BLOCKNUM $L1BLOCKNUM_IMPLEMENTATION
cast call $L1BLOCKNUM 'number()' | cast --to-dec
```
\ No newline at end of file
This diff is collapsed.
This diff is collapsed.
package ether package ether
import ( import (
"fmt"
"math/big" "math/big"
"math/rand" "math/rand"
"testing" "testing"
"github.com/ethereum-optimism/optimism/op-chain-ops/util"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys" "github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum-optimism/optimism/op-chain-ops/crossdomain" "github.com/ethereum-optimism/optimism/op-chain-ops/crossdomain"
...@@ -190,7 +191,7 @@ func TestMigrateBalances(t *testing.T) { ...@@ -190,7 +191,7 @@ func TestMigrateBalances(t *testing.T) {
} }
} }
func makeLegacyETH(t *testing.T, totalSupply *big.Int, balances map[common.Address]*big.Int, allowances map[common.Address]common.Address) (*state.StateDB, DBFactory) { func makeLegacyETH(t *testing.T, totalSupply *big.Int, balances map[common.Address]*big.Int, allowances map[common.Address]common.Address) (*state.StateDB, util.DBFactory) {
memDB := rawdb.NewMemoryDatabase() memDB := rawdb.NewMemoryDatabase()
db, err := state.New(common.Hash{}, state.NewDatabaseWithConfig(memDB, &trie.Config{ db, err := state.New(common.Hash{}, state.NewDatabaseWithConfig(memDB, &trie.Config{
Preimages: true, Preimages: true,
...@@ -283,85 +284,6 @@ func TestMigrateBalancesRandomMissing(t *testing.T) { ...@@ -283,85 +284,6 @@ func TestMigrateBalancesRandomMissing(t *testing.T) {
} }
} }
func TestPartitionKeyspace(t *testing.T) {
tests := []struct {
i int
count int
expected [2]common.Hash
}{
{
i: 0,
count: 1,
expected: [2]common.Hash{
common.HexToHash("0x00"),
common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
},
},
{
i: 0,
count: 2,
expected: [2]common.Hash{
common.HexToHash("0x00"),
common.HexToHash("0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
},
},
{
i: 1,
count: 2,
expected: [2]common.Hash{
common.HexToHash("0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
},
},
{
i: 0,
count: 3,
expected: [2]common.Hash{
common.HexToHash("0x00"),
common.HexToHash("0x5555555555555555555555555555555555555555555555555555555555555555"),
},
},
{
i: 1,
count: 3,
expected: [2]common.Hash{
common.HexToHash("0x5555555555555555555555555555555555555555555555555555555555555555"),
common.HexToHash("0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
},
},
{
i: 2,
count: 3,
expected: [2]common.Hash{
common.HexToHash("0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
},
},
}
for _, tt := range tests {
t.Run(fmt.Sprintf("i %d, count %d", tt.i, tt.count), func(t *testing.T) {
start, end := PartitionKeyspace(tt.i, tt.count)
require.Equal(t, tt.expected[0], start)
require.Equal(t, tt.expected[1], end)
})
}
t.Run("panics on invalid i or count", func(t *testing.T) {
require.Panics(t, func() {
PartitionKeyspace(1, 1)
})
require.Panics(t, func() {
PartitionKeyspace(-1, 1)
})
require.Panics(t, func() {
PartitionKeyspace(0, -1)
})
require.Panics(t, func() {
PartitionKeyspace(-1, -1)
})
})
}
func randAddr(t *testing.T) common.Address { func randAddr(t *testing.T) common.Address {
var addr common.Address var addr common.Address
_, err := rand.Read(addr[:]) _, err := rand.Read(addr[:])
......
package util
import (
"fmt"
"math/big"
"sync"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
)
var (
// maxSlot is the maximum possible storage slot.
maxSlot = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
)
type DBFactory func() (*state.StateDB, error)
type StateCallback func(db *state.StateDB, key, value common.Hash) error
func IterateState(dbFactory DBFactory, address common.Address, cb StateCallback, workers int) error {
if workers <= 0 {
panic("workers must be greater than 0")
}
// WaitGroup to wait for all workers to finish.
var wg sync.WaitGroup
// Channel to receive errors from each iteration job.
errCh := make(chan error, workers)
// Channel to cancel all iteration jobs.
cancelCh := make(chan struct{})
worker := func(start, end common.Hash) {
// Decrement the WaitGroup when the function returns.
defer wg.Done()
db, err := dbFactory()
if err != nil {
// Should never happen, so explode if it does.
log.Crit("cannot create state db", "err", err)
}
st, err := db.StorageTrie(address)
if err != nil {
// Should never happen, so explode if it does.
log.Crit("cannot get storage trie", "address", address, "err", err)
}
// st can be nil if the account doesn't exist.
if st == nil {
errCh <- fmt.Errorf("account does not exist: %s", address.Hex())
return
}
it := trie.NewIterator(st.NodeIterator(start.Bytes()))
// Below code is largely based on db.ForEachStorage. We can't use that
// because it doesn't allow us to specify a start and end key.
for it.Next() {
select {
case <-cancelCh:
// If one of the workers encounters an error, cancel all of them.
return
default:
break
}
// Use the raw (i.e., secure hashed) key to check if we've reached
// the end of the partition. Use > rather than >= here to account for
// the fact that the values returned by PartitionKeys are inclusive.
// Duplicate addresses that may be returned by this iteration are
// filtered out in the collector.
if new(big.Int).SetBytes(it.Key).Cmp(end.Big()) > 0 {
return
}
// Skip if the value is empty.
rawValue := it.Value
if len(rawValue) == 0 {
continue
}
// Get the preimage.
rawKey := st.GetKey(it.Key)
if rawKey == nil {
// Should never happen, so explode if it does.
log.Crit("cannot get preimage for storage key", "key", it.Key)
}
key := common.BytesToHash(rawKey)
// Parse the raw value.
_, content, _, err := rlp.Split(rawValue)
if err != nil {
// Should never happen, so explode if it does.
log.Crit("mal-formed data in state: %v", err)
}
value := common.BytesToHash(content)
// Call the callback with the DB, key, and value. Errors get
// bubbled up to the errCh.
if err := cb(db, key, value); err != nil {
errCh <- err
return
}
}
}
for i := 0; i < workers; i++ {
wg.Add(1)
// Partition the keyspace per worker.
start, end := PartitionKeyspace(i, workers)
// Kick off our worker.
go worker(start, end)
}
wg.Wait()
for len(errCh) > 0 {
err := <-errCh
if err != nil {
return err
}
}
return nil
}
// PartitionKeyspace divides the key space into partitions by dividing the maximum keyspace
// by count then multiplying by i. This will leave some slots left over, which we handle below. It
// returns the start and end keys for the partition as a common.Hash. Note that the returned range
// of keys is inclusive, i.e., [start, end] NOT [start, end).
func PartitionKeyspace(i int, count int) (common.Hash, common.Hash) {
if i < 0 || count < 0 {
panic("i and count must be greater than 0")
}
if i > count-1 {
panic("i must be less than count - 1")
}
// Divide the key space into partitions by dividing the key space by the number
// of jobs. This will leave some slots left over, which we handle below.
partSize := new(big.Int).Div(maxSlot.Big(), big.NewInt(int64(count)))
start := common.BigToHash(new(big.Int).Mul(big.NewInt(int64(i)), partSize))
var end common.Hash
if i < count-1 {
// If this is not the last partition, use the next partition's start key as the end.
end = common.BigToHash(new(big.Int).Mul(big.NewInt(int64(i+1)), partSize))
} else {
// If this is the last partition, use the max slot as the end.
end = maxSlot
}
return start, end
}
package util
import (
crand "crypto/rand"
"fmt"
"math/rand"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/trie"
"github.com/stretchr/testify/require"
)
var testAddr = common.Address{0: 0xff}
func TestStateIteratorWorkers(t *testing.T) {
_, factory, _ := setupRandTest(t)
for i := -1; i <= 0; i++ {
require.Panics(t, func() {
_ = IterateState(factory, testAddr, func(db *state.StateDB, key, value common.Hash) error {
return nil
}, i)
})
}
}
func TestStateIteratorNonexistentAccount(t *testing.T) {
_, factory, _ := setupRandTest(t)
require.ErrorContains(t, IterateState(factory, common.Address{}, func(db *state.StateDB, key, value common.Hash) error {
return nil
}, 1), "account does not exist")
}
func TestStateIteratorRandomOK(t *testing.T) {
for i := 0; i < 100; i++ {
hashes, factory, workerCount := setupRandTest(t)
seenHashes := make(map[common.Hash]bool)
hashCh := make(chan common.Hash)
doneCh := make(chan struct{})
go func() {
defer close(doneCh)
for hash := range hashCh {
seenHashes[hash] = true
}
}()
require.NoError(t, IterateState(factory, testAddr, func(db *state.StateDB, key, value common.Hash) error {
hashCh <- key
return nil
}, workerCount))
close(hashCh)
<-doneCh
// Perform a less or equal check here in case of duplicates. The map check below will assert
// that all of the hashes are accounted for.
require.LessOrEqual(t, len(seenHashes), len(hashes))
// Every hash we put into state should have been iterated over.
for _, hash := range hashes {
require.Contains(t, seenHashes, hash)
}
}
}
func TestStateIteratorRandomError(t *testing.T) {
for i := 0; i < 100; i++ {
hashes, factory, workerCount := setupRandTest(t)
failHash := hashes[rand.Intn(len(hashes))]
require.ErrorContains(t, IterateState(factory, testAddr, func(db *state.StateDB, key, value common.Hash) error {
if key == failHash {
return fmt.Errorf("test error")
}
return nil
}, workerCount), "test error")
}
}
func TestPartitionKeyspace(t *testing.T) {
tests := []struct {
i int
count int
expected [2]common.Hash
}{
{
i: 0,
count: 1,
expected: [2]common.Hash{
common.HexToHash("0x00"),
common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
},
},
{
i: 0,
count: 2,
expected: [2]common.Hash{
common.HexToHash("0x00"),
common.HexToHash("0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
},
},
{
i: 1,
count: 2,
expected: [2]common.Hash{
common.HexToHash("0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
},
},
{
i: 0,
count: 3,
expected: [2]common.Hash{
common.HexToHash("0x00"),
common.HexToHash("0x5555555555555555555555555555555555555555555555555555555555555555"),
},
},
{
i: 1,
count: 3,
expected: [2]common.Hash{
common.HexToHash("0x5555555555555555555555555555555555555555555555555555555555555555"),
common.HexToHash("0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
},
},
{
i: 2,
count: 3,
expected: [2]common.Hash{
common.HexToHash("0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
},
},
}
for _, tt := range tests {
t.Run(fmt.Sprintf("i %d, count %d", tt.i, tt.count), func(t *testing.T) {
start, end := PartitionKeyspace(tt.i, tt.count)
require.Equal(t, tt.expected[0], start)
require.Equal(t, tt.expected[1], end)
})
}
t.Run("panics on invalid i or count", func(t *testing.T) {
require.Panics(t, func() {
PartitionKeyspace(1, 1)
})
require.Panics(t, func() {
PartitionKeyspace(-1, 1)
})
require.Panics(t, func() {
PartitionKeyspace(0, -1)
})
require.Panics(t, func() {
PartitionKeyspace(-1, -1)
})
})
}
func setupRandTest(t *testing.T) ([]common.Hash, DBFactory, int) {
memDB := rawdb.NewMemoryDatabase()
db, err := state.New(common.Hash{}, state.NewDatabaseWithConfig(memDB, &trie.Config{
Preimages: true,
Cache: 1024,
}), nil)
require.NoError(t, err)
hashCount := rand.Intn(100)
if hashCount == 0 {
hashCount = 1
}
hashes := make([]common.Hash, hashCount)
db.CreateAccount(testAddr)
for j := 0; j < hashCount; j++ {
hashes[j] = randHash(t)
db.SetState(testAddr, hashes[j], hashes[j])
}
root, err := db.Commit(false)
require.NoError(t, err)
err = db.Database().TrieDB().Commit(root, true)
require.NoError(t, err)
factory := func() (*state.StateDB, error) {
return state.New(root, state.NewDatabaseWithConfig(memDB, &trie.Config{
Preimages: true,
Cache: 1024,
}), nil)
}
workerCount := rand.Intn(64)
if workerCount == 0 {
workerCount = 1
}
return hashes, factory, workerCount
}
func randHash(t *testing.T) common.Hash {
var h common.Hash
_, err := crand.Read(h[:])
require.NoError(t, err)
return h
}
ignores: [
"@babel/eslint-parser",
"@typescript-eslint/parser",
"eslint-plugin-import",
"eslint-plugin-unicorn",
"eslint-plugin-jsdoc",
"eslint-plugin-prefer-arrow",
"eslint-plugin-react",
"@typescript-eslint/eslint-plugin",
"eslint-config-prettier",
"eslint-plugin-prettier"
]
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
"lint": "yarn lint:fix && yarn lint:check", "lint": "yarn lint:fix && yarn lint:check",
"pre-commit": "lint-staged", "pre-commit": "lint-staged",
"test": "ts-mocha test/*.spec.ts", "test": "ts-mocha test/*.spec.ts",
"test:coverage": "echo 'no coverage'" "test:coverage": "nyc ts-mocha test/*.spec.ts && nyc merge .nyc_output coverage.json"
}, },
"keywords": [ "keywords": [
"optimism", "optimism",
...@@ -48,8 +48,7 @@ ...@@ -48,8 +48,7 @@
"pino": "^6.11.3", "pino": "^6.11.3",
"pino-multi-stream": "^5.3.0", "pino-multi-stream": "^5.3.0",
"pino-sentry": "^0.7.0", "pino-sentry": "^0.7.0",
"prom-client": "^13.1.0", "prom-client": "^13.1.0"
"qs": "^6.10.5"
}, },
"devDependencies": { "devDependencies": {
"@ethersproject/abstract-provider": "^5.7.0", "@ethersproject/abstract-provider": "^5.7.0",
......
...@@ -3,11 +3,11 @@ import request from 'supertest' ...@@ -3,11 +3,11 @@ import request from 'supertest'
import chai = require('chai') import chai = require('chai')
const expect = chai.expect const expect = chai.expect
import { Logger, Metrics, createMetricsServer } from '../src' import { Logger, LegacyMetrics, createMetricsServer } from '../src'
describe('Metrics', () => { describe('Metrics', () => {
it('shoud serve metrics', async () => { it('shoud serve metrics', async () => {
const metrics = new Metrics({ const metrics = new LegacyMetrics({
prefix: 'test_metrics', prefix: 'test_metrics',
}) })
const registry = metrics.registry const registry = metrics.registry
......
...@@ -155,6 +155,33 @@ contract SystemDictator is OwnableUpgradeable { ...@@ -155,6 +155,33 @@ contract SystemDictator is OwnableUpgradeable {
currentStep++; currentStep++;
} }
/**
* @notice Constructor required to ensure that the implementation of the SystemDictator is
* initialized upon deployment.
*/
constructor() {
// Using this shorter variable as an alias for address(0) just prevents us from having to
// to use a new line for every single parameter.
address zero = address(0);
initialize(
DeployConfig(
GlobalConfig(AddressManager(zero), ProxyAdmin(zero), zero, zero),
ProxyAddressConfig(zero, zero, zero, zero, zero, zero, zero),
ImplementationAddressConfig(
L2OutputOracle(zero),
OptimismPortal(payable(zero)),
L1CrossDomainMessenger(zero),
L1StandardBridge(payable(zero)),
OptimismMintableERC20Factory(zero),
L1ERC721Bridge(zero),
PortalSender(zero),
SystemConfig(zero)
),
SystemConfigConfig(zero, 0, 0, bytes32(0), 0, zero)
)
);
}
/** /**
* @param _config System configuration. * @param _config System configuration.
*/ */
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
"l2OutputOracleStartingTimestamp": -1, "l2OutputOracleStartingTimestamp": -1,
"l2OutputOracleProposer": "0x70997970C51812dc3A010C7d01b50e0d17dc79C8", "l2OutputOracleProposer": "0x70997970C51812dc3A010C7d01b50e0d17dc79C8",
"l2OutputOracleChallenger": "0x15d34AAf54267DB7D7c367839AAf71A00a2C6A65", "l2OutputOracleChallenger": "0x15d34AAf54267DB7D7c367839AAf71A00a2C6A65",
"l2GenesisBlockGasLimit": "0xE4E1C0", "l2GenesisBlockGasLimit": "0x1c9c380",
"l1BlockTime": 3, "l1BlockTime": 3,
"cliqueSignerAddress": "0xca062b0fd91172d89bcd4bb084ac4e21972cc467", "cliqueSignerAddress": "0xca062b0fd91172d89bcd4bb084ac4e21972cc467",
"baseFeeVaultRecipient": "0xBcd4042DE499D14e55001CcbB24a551F3b954096", "baseFeeVaultRecipient": "0xBcd4042DE499D14e55001CcbB24a551F3b954096",
......
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
"governanceTokenSymbol": "OP", "governanceTokenSymbol": "OP",
"governanceTokenOwner": "0x038a8825A3C3B0c08d52Cc76E5E361953Cf6Dc76", "governanceTokenOwner": "0x038a8825A3C3B0c08d52Cc76E5E361953Cf6Dc76",
"l2GenesisBlockGasLimit": "0x17D7840", "l2GenesisBlockGasLimit": "0x1c9c380",
"l2GenesisBlockCoinbase": "0x4200000000000000000000000000000000000011", "l2GenesisBlockCoinbase": "0x4200000000000000000000000000000000000011",
"l2GenesisBlockBaseFeePerGas": "0x3b9aca00", "l2GenesisBlockBaseFeePerGas": "0x3b9aca00",
......
...@@ -40,7 +40,7 @@ ...@@ -40,7 +40,7 @@
"governanceTokenName": "Optimism", "governanceTokenName": "Optimism",
"governanceTokenOwner": "ADMIN", "governanceTokenOwner": "ADMIN",
"l2GenesisBlockGasLimit": "0x17D7840", "l2GenesisBlockGasLimit": "0x1c9c380",
"l2GenesisBlockBaseFeePerGas": "0x3b9aca00", "l2GenesisBlockBaseFeePerGas": "0x3b9aca00",
"l2GenesisRegolithTimeOffset": "0x0", "l2GenesisRegolithTimeOffset": "0x0",
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
"scripts": { "scripts": {
"build": "yarn build:contracts && yarn copy:contracts && yarn autogen:artifacts && yarn build:typescript", "build": "yarn build:contracts && yarn copy:contracts && yarn autogen:artifacts && yarn build:typescript",
"build:typescript": "tsc -p ./tsconfig.json", "build:typescript": "tsc -p ./tsconfig.json",
"build:bindings": "./scripts/legacy-bindings.sh ./../../op-bindings/legacy-bindings",
"build:contracts": "hardhat compile --show-stack-traces", "build:contracts": "hardhat compile --show-stack-traces",
"autogen:markdown": "ts-node scripts/generate-markdown.ts", "autogen:markdown": "ts-node scripts/generate-markdown.ts",
"autogen:artifacts": "ts-node scripts/generate-artifacts.ts && ts-node scripts/generate-deployed-artifacts.ts", "autogen:artifacts": "ts-node scripts/generate-artifacts.ts && ts-node scripts/generate-deployed-artifacts.ts",
......
#!/bin/bash
OUTDIR="$1"
if [ ! -d "$OUTDIR" ]; then
echo "Must pass output directory"
exit 1
fi
CONTRACTS=("CanonicalTransactionChain")
PKG=legacy_bindings
for contract in ${CONTRACTS[@]}; do
TMPFILE=$(mktemp)
npx hardhat inspect $contract bytecode > "$TMPFILE"
ABI=$(npx hardhat inspect $contract abi)
outfile="$OUTDIR/$contract.go"
echo "$ABI" | abigen --abi - --pkg "$PKG" --bin "$TMPFILE" --type $contract --out "$outfile"
rm "$TMPFILE"
done
...@@ -17871,13 +17871,6 @@ qs@^6.10.3, qs@^6.9.4: ...@@ -17871,13 +17871,6 @@ qs@^6.10.3, qs@^6.9.4:
dependencies: dependencies:
side-channel "^1.0.4" side-channel "^1.0.4"
qs@^6.10.5:
version "6.10.5"
resolved "https://registry.yarnpkg.com/qs/-/qs-6.10.5.tgz#974715920a80ff6a262264acd2c7e6c2a53282b4"
integrity sha512-O5RlPh0VFtR78y79rgcgKK4wbAI0C5zGVLztOIdpWX6ep368q5Hv6XRxDvXuZ9q3C6v+e3n8UfZZJw7IIG27eQ==
dependencies:
side-channel "^1.0.4"
qs@^6.4.0, qs@^6.7.0: qs@^6.4.0, qs@^6.7.0:
version "6.10.1" version "6.10.1"
resolved "https://registry.yarnpkg.com/qs/-/qs-6.10.1.tgz#4931482fa8d647a5aab799c5271d2133b981fb6a" resolved "https://registry.yarnpkg.com/qs/-/qs-6.10.1.tgz#4931482fa8d647a5aab799c5271d2133b981fb6a"
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment