Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
N
nebula
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
exchain
nebula
Commits
33d10cd0
Unverified
Commit
33d10cd0
authored
Mar 21, 2023
by
mergify[bot]
Committed by
GitHub
Mar 21, 2023
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'develop' into sc/cmn-test-coverage
parents
3c580424
822f588a
Changes
8
Hide whitespace changes
Inline
Side-by-side
Showing
8 changed files
with
497 additions
and
322 deletions
+497
-322
cuddly-turkeys-burn.md
.changeset/cuddly-turkeys-burn.md
+5
-0
pink-chicken-hear.md
.changeset/pink-chicken-hear.md
+5
-0
migrate.go
op-chain-ops/ether/migrate.go
+82
-238
migrate_test.go
op-chain-ops/ether/migrate_test.go
+3
-81
state_iterator.go
op-chain-ops/util/state_iterator.go
+161
-0
state_iterator_test.go
op-chain-ops/util/state_iterator_test.go
+211
-0
SystemDictator.sol
...contracts-bedrock/contracts/deployment/SystemDictator.sol
+27
-0
contracts.ts
packages/sdk/src/utils/contracts.ts
+3
-3
No files found.
.changeset/cuddly-turkeys-burn.md
0 → 100644
View file @
33d10cd0
---
'
@eth-optimism/sdk'
:
patch
---
Have SDK automatically create Standard and ETH bridges when L1StandardBridge is provided.
.changeset/pink-chicken-hear.md
0 → 100644
View file @
33d10cd0
---
'
@eth-optimism/contracts-bedrock'
:
patch
---
Added a contsructor to the System Dictator
op-chain-ops/ether/migrate.go
View file @
33d10cd0
...
...
@@ -3,10 +3,6 @@ package ether
import
(
"fmt"
"math/big"
"sync"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
"github.com/ethereum-optimism/optimism/op-chain-ops/crossdomain"
"github.com/ethereum-optimism/optimism/op-chain-ops/util"
...
...
@@ -46,9 +42,6 @@ var (
common
.
HexToHash
(
"0x0000000000000000000000000000000000000000000000000000000000000006"
)
:
true
,
}
// maxSlot is the maximum possible storage slot.
maxSlot
=
common
.
HexToHash
(
"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
)
// sequencerEntrypointAddr is the address of the OVM sequencer entrypoint contract.
sequencerEntrypointAddr
=
common
.
HexToAddress
(
"0x4200000000000000000000000000000000000005"
)
)
...
...
@@ -61,11 +54,9 @@ type accountData struct {
address
common
.
Address
}
type
DBFactory
func
()
(
*
state
.
StateDB
,
error
)
// MigrateBalances migrates all balances in the LegacyERC20ETH contract into state. It performs checks
// in parallel with mutations in order to reduce overall migration time.
func
MigrateBalances
(
mutableDB
*
state
.
StateDB
,
dbFactory
DBFactory
,
addresses
[]
common
.
Address
,
allowances
[]
*
crossdomain
.
Allowance
,
chainID
int
,
noCheck
bool
)
error
{
func
MigrateBalances
(
mutableDB
*
state
.
StateDB
,
dbFactory
util
.
DBFactory
,
addresses
[]
common
.
Address
,
allowances
[]
*
crossdomain
.
Allowance
,
chainID
int
,
noCheck
bool
)
error
{
// Chain params to use for integrity checking.
params
:=
crossdomain
.
ParamsByChainID
[
chainID
]
if
params
==
nil
{
...
...
@@ -75,7 +66,7 @@ func MigrateBalances(mutableDB *state.StateDB, dbFactory DBFactory, addresses []
return
doMigration
(
mutableDB
,
dbFactory
,
addresses
,
allowances
,
params
.
ExpectedSupplyDelta
,
noCheck
)
}
func
doMigration
(
mutableDB
*
state
.
StateDB
,
dbFactory
DBFactory
,
addresses
[]
common
.
Address
,
allowances
[]
*
crossdomain
.
Allowance
,
expDiff
*
big
.
Int
,
noCheck
bool
)
error
{
func
doMigration
(
mutableDB
*
state
.
StateDB
,
dbFactory
util
.
DBFactory
,
addresses
[]
common
.
Address
,
allowances
[]
*
crossdomain
.
Allowance
,
expDiff
*
big
.
Int
,
noCheck
bool
)
error
{
// We'll need to maintain a list of all addresses that we've seen along with all of the storage
// slots based on the witness data.
slotsAddrs
:=
make
(
map
[
common
.
Hash
]
common
.
Address
)
...
...
@@ -103,226 +94,109 @@ func doMigration(mutableDB *state.StateDB, dbFactory DBFactory, addresses []comm
slotsAddrs
[
entrySK
]
=
sequencerEntrypointAddr
slotsInp
[
entrySK
]
=
BalanceSlot
// WaitGroup to wait on each iteration job to finish.
var
wg
sync
.
WaitGroup
// Channel to receive storage slot keys and values from each iteration job.
outCh
:=
make
(
chan
accountData
)
// Channel to receive errors from each iteration job.
errCh
:=
make
(
chan
error
,
checkJobs
)
// Channel to cancel all iteration jobs.
cancelCh
:=
make
(
chan
struct
{})
// Define a worker function to iterate over each partition.
worker
:=
func
(
start
,
end
common
.
Hash
)
{
// Decrement the WaitGroup when the function returns.
defer
wg
.
Done
()
db
,
err
:=
dbFactory
()
if
err
!=
nil
{
log
.
Crit
(
"cannot get database"
,
"err"
,
err
)
}
// Create a new storage trie. Each trie returned by db.StorageTrie
// is a copy, so this is safe for concurrent use.
st
,
err
:=
db
.
StorageTrie
(
predeploys
.
LegacyERC20ETHAddr
)
if
err
!=
nil
{
// Should never happen, so explode if it does.
log
.
Crit
(
"cannot get storage trie for LegacyERC20ETHAddr"
,
"err"
,
err
)
}
if
st
==
nil
{
// Should never happen, so explode if it does.
log
.
Crit
(
"nil storage trie for LegacyERC20ETHAddr"
)
}
it
:=
trie
.
NewIterator
(
st
.
NodeIterator
(
start
.
Bytes
()))
// Below code is largely based on db.ForEachStorage. We can't use that
// because it doesn't allow us to specify a start and end key.
for
it
.
Next
()
{
select
{
case
<-
cancelCh
:
// If one of the workers encounters an error, cancel all of them.
return
default
:
break
}
// Channel that gets closed when the collector is done.
doneCh
:=
make
(
chan
struct
{})
// Use the raw (i.e., secure hashed) key to check if we've reached
// the end of the partition. Use > rather than >= here to account for
// the fact that the values returned by PartitionKeys are inclusive.
// Duplicate addresses that may be returned by this iteration are
// filtered out in the collector.
if
new
(
big
.
Int
)
.
SetBytes
(
it
.
Key
)
.
Cmp
(
end
.
Big
())
>
0
{
return
}
// Create a map of accounts we've seen so that we can filter out duplicates.
seenAccounts
:=
make
(
map
[
common
.
Address
]
bool
)
// Skip if the value is empty.
rawValue
:=
it
.
Value
if
len
(
rawValue
)
==
0
{
continue
}
// Keep track of the total migrated supply.
totalFound
:=
new
(
big
.
Int
)
// Get the preimage.
rawKey
:=
st
.
GetKey
(
it
.
Key
)
if
rawKey
==
nil
{
// Should never happen, so explode if it does.
log
.
Crit
(
"cannot get preimage for storage key"
,
"key"
,
it
.
Key
)
}
key
:=
common
.
BytesToHash
(
rawKey
)
// Kick off a background process to collect
// values from the channel and add them to the map.
var
count
int
progress
:=
util
.
ProgressLogger
(
1000
,
"Migrated OVM_ETH storage slot"
)
go
func
()
{
defer
func
()
{
doneCh
<-
struct
{}{}
}()
// Parse the raw value.
_
,
content
,
_
,
err
:=
rlp
.
Split
(
rawValue
)
if
err
!=
nil
{
// Should never happen, so explode if it does.
log
.
Crit
(
"mal-formed data in state: %v"
,
err
)
}
for
account
:=
range
outCh
{
progress
()
// We can safely ignore specific slots (totalSupply, name, symbol).
if
ignoredSlots
[
key
]
{
// Filter out duplicate accounts. See the below note about keyspace iteration for
// why we may have to filter out duplicates.
if
seenAccounts
[
account
.
address
]
{
log
.
Info
(
"skipping duplicate account during iteration"
,
"addr"
,
account
.
address
)
continue
}
slotType
,
ok
:=
slotsInp
[
key
]
if
!
ok
{
if
noCheck
{
log
.
Error
(
"ignoring unknown storage slot in state"
,
"slot"
,
key
.
String
())
}
else
{
errCh
<-
fmt
.
Errorf
(
"unknown storage slot in state: %s"
,
key
.
String
())
return
}
}
// No accounts should have a balance in state. If they do, bail.
addr
,
ok
:=
slotsAddrs
[
key
]
if
!
ok
{
log
.
Crit
(
"could not find address in map - should never happen"
)
}
bal
:=
db
.
GetBalance
(
addr
)
if
bal
.
Sign
()
!=
0
{
log
.
Error
(
"account has non-zero balance in state - should never happen"
,
"addr"
,
addr
,
"balance"
,
bal
.
String
(),
)
if
!
noCheck
{
errCh
<-
fmt
.
Errorf
(
"account has non-zero balance in state - should never happen: %s"
,
addr
.
String
())
return
}
}
// Accumulate addresses and total supply.
totalFound
=
new
(
big
.
Int
)
.
Add
(
totalFound
,
account
.
balance
)
// Add balances to the total found.
switch
slotType
{
case
BalanceSlot
:
// Convert the value to a common.Hash, then send to the channel.
value
:=
common
.
BytesToHash
(
content
)
outCh
<-
accountData
{
balance
:
value
.
Big
(),
legacySlot
:
key
,
address
:
addr
,
}
case
AllowanceSlot
:
// Allowance slot.
continue
default
:
// Should never happen.
if
noCheck
{
log
.
Error
(
"unknown slot type"
,
"slot"
,
key
,
"type"
,
slotType
)
}
else
{
log
.
Crit
(
"unknown slot type %d, should never happen"
,
slotType
)
}
}
mutableDB
.
SetBalance
(
account
.
address
,
account
.
balance
)
mutableDB
.
SetState
(
predeploys
.
LegacyERC20ETHAddr
,
account
.
legacySlot
,
common
.
Hash
{})
count
++
seenAccounts
[
account
.
address
]
=
true
}
}
for
i
:=
0
;
i
<
checkJobs
;
i
++
{
wg
.
Add
(
1
)
// Partition the keyspace per worker.
start
,
end
:=
PartitionKeyspace
(
i
,
checkJobs
)
// Kick off our worker.
go
worker
(
start
,
end
)
}
// Make a channel to track when collector process completes.
collectorClosedCh
:=
make
(
chan
struct
{})
// Make a channel to cancel the collector process.
collectorCancelCh
:=
make
(
chan
struct
{})
// Keep track of the last error seen.
var
lastErr
error
// The cancel channel can be closed if any of the workers returns an error.
// We wrap the close in a sync.Once to ensure that it's only closed once.
var
cancelOnce
sync
.
Once
}()
// Create a map of accounts we've seen so that we can filter out duplicates.
seenAccounts
:=
make
(
map
[
common
.
Address
]
bool
)
err
:=
util
.
IterateState
(
dbFactory
,
predeploys
.
LegacyERC20ETHAddr
,
func
(
db
*
state
.
StateDB
,
key
,
value
common
.
Hash
)
error
{
// We can safely ignore specific slots (totalSupply, name, symbol).
if
ignoredSlots
[
key
]
{
return
nil
}
// Keep track of the total migrated supply.
totalFound
:=
new
(
big
.
Int
)
slotType
,
ok
:=
slotsInp
[
key
]
if
!
ok
{
log
.
Error
(
"unknown storage slot in state"
,
"slot"
,
key
.
String
())
if
!
noCheck
{
return
fmt
.
Errorf
(
"unknown storage slot in state: %s"
,
key
.
String
())
}
}
// Kick off another background process to collect
// values from the channel and add them to the map.
var
count
int
progress
:=
util
.
ProgressLogger
(
1000
,
"Migrated OVM_ETH storage slot"
)
go
func
()
{
defer
func
()
{
collectorClosedCh
<-
struct
{}{}
}()
for
{
select
{
case
account
:=
<-
outCh
:
progress
()
// Filter out duplicate accounts. See the below note about keyspace iteration for
// why we may have to filter out duplicates.
if
seenAccounts
[
account
.
address
]
{
log
.
Info
(
"skipping duplicate account during iteration"
,
"addr"
,
account
.
address
)
continue
}
// Accumulate addresses and total supply.
totalFound
=
new
(
big
.
Int
)
.
Add
(
totalFound
,
account
.
balance
)
mutableDB
.
SetBalance
(
account
.
address
,
account
.
balance
)
mutableDB
.
SetState
(
predeploys
.
LegacyERC20ETHAddr
,
account
.
legacySlot
,
common
.
Hash
{})
count
++
seenAccounts
[
account
.
address
]
=
true
case
err
:=
<-
errCh
:
cancelOnce
.
Do
(
func
()
{
close
(
cancelCh
)
lastErr
=
err
})
case
<-
collectorCancelCh
:
// Explicitly drain the error channel. Since the error channel is buffered, it's possible
// for the wg.Wait() call below to unblock and cancel this goroutine before the error gets
// processed by the case statement above.
for
len
(
errCh
)
>
0
{
err
:=
<-
errCh
if
lastErr
==
nil
{
lastErr
=
err
}
}
return
// No accounts should have a balance in state. If they do, bail.
addr
,
ok
:=
slotsAddrs
[
key
]
if
!
ok
{
log
.
Crit
(
"could not find address in map - should never happen"
)
}
bal
:=
db
.
GetBalance
(
addr
)
if
bal
.
Sign
()
!=
0
{
log
.
Error
(
"account has non-zero balance in state - should never happen"
,
"addr"
,
addr
,
"balance"
,
bal
.
String
(),
)
if
!
noCheck
{
return
fmt
.
Errorf
(
"account has non-zero balance in state - should never happen: %s"
,
addr
.
String
())
}
}
}()
// Wait for the workers to finish.
wg
.
Wait
()
// Add balances to the total found.
switch
slotType
{
case
BalanceSlot
:
// Send the data to the channel.
outCh
<-
accountData
{
balance
:
value
.
Big
(),
legacySlot
:
key
,
address
:
addr
,
}
case
AllowanceSlot
:
// Allowance slot. Do nothing here.
default
:
// Should never happen.
if
noCheck
{
log
.
Error
(
"unknown slot type"
,
"slot"
,
key
,
"type"
,
slotType
)
}
else
{
log
.
Crit
(
"unknown slot type %d, should never happen"
,
slotType
)
}
}
// Close the collector, and wait for it to finish.
close
(
collectorCancelCh
)
<-
collectorClosedCh
return
nil
},
checkJobs
)
// If we saw an error, return it.
if
lastErr
!=
nil
{
return
lastErr
if
err
!=
nil
{
return
err
}
// Close the outCh to cancel the collector. The collector will signal that it's done
// using doneCh. Any values waiting to be read from outCh will be read before the
// collector exits.
close
(
outCh
)
<-
doneCh
// Log how many slots were iterated over.
log
.
Info
(
"Iterated legacy balances"
,
"count"
,
count
)
...
...
@@ -368,33 +242,3 @@ func doMigration(mutableDB *state.StateDB, dbFactory DBFactory, addresses []comm
return
nil
}
// PartitionKeyspace divides the key space into partitions by dividing the maximum keyspace
// by count then multiplying by i. This will leave some slots left over, which we handle below. It
// returns the start and end keys for the partition as a common.Hash. Note that the returned range
// of keys is inclusive, i.e., [start, end] NOT [start, end).
func
PartitionKeyspace
(
i
int
,
count
int
)
(
common
.
Hash
,
common
.
Hash
)
{
if
i
<
0
||
count
<
0
{
panic
(
"i and count must be greater than 0"
)
}
if
i
>
count
-
1
{
panic
(
"i must be less than count - 1"
)
}
// Divide the key space into partitions by dividing the key space by the number
// of jobs. This will leave some slots left over, which we handle below.
partSize
:=
new
(
big
.
Int
)
.
Div
(
maxSlot
.
Big
(),
big
.
NewInt
(
int64
(
count
)))
start
:=
common
.
BigToHash
(
new
(
big
.
Int
)
.
Mul
(
big
.
NewInt
(
int64
(
i
)),
partSize
))
var
end
common
.
Hash
if
i
<
count
-
1
{
// If this is not the last partition, use the next partition's start key as the end.
end
=
common
.
BigToHash
(
new
(
big
.
Int
)
.
Mul
(
big
.
NewInt
(
int64
(
i
+
1
)),
partSize
))
}
else
{
// If this is the last partition, use the max slot as the end.
end
=
maxSlot
}
return
start
,
end
}
op-chain-ops/ether/migrate_test.go
View file @
33d10cd0
package
ether
import
(
"fmt"
"math/big"
"math/rand"
"testing"
"github.com/ethereum-optimism/optimism/op-chain-ops/util"
"github.com/ethereum-optimism/optimism/op-bindings/predeploys"
"github.com/ethereum-optimism/optimism/op-chain-ops/crossdomain"
...
...
@@ -190,7 +191,7 @@ func TestMigrateBalances(t *testing.T) {
}
}
func
makeLegacyETH
(
t
*
testing
.
T
,
totalSupply
*
big
.
Int
,
balances
map
[
common
.
Address
]
*
big
.
Int
,
allowances
map
[
common
.
Address
]
common
.
Address
)
(
*
state
.
StateDB
,
DBFactory
)
{
func
makeLegacyETH
(
t
*
testing
.
T
,
totalSupply
*
big
.
Int
,
balances
map
[
common
.
Address
]
*
big
.
Int
,
allowances
map
[
common
.
Address
]
common
.
Address
)
(
*
state
.
StateDB
,
util
.
DBFactory
)
{
memDB
:=
rawdb
.
NewMemoryDatabase
()
db
,
err
:=
state
.
New
(
common
.
Hash
{},
state
.
NewDatabaseWithConfig
(
memDB
,
&
trie
.
Config
{
Preimages
:
true
,
...
...
@@ -283,85 +284,6 @@ func TestMigrateBalancesRandomMissing(t *testing.T) {
}
}
func
TestPartitionKeyspace
(
t
*
testing
.
T
)
{
tests
:=
[]
struct
{
i
int
count
int
expected
[
2
]
common
.
Hash
}{
{
i
:
0
,
count
:
1
,
expected
:
[
2
]
common
.
Hash
{
common
.
HexToHash
(
"0x00"
),
common
.
HexToHash
(
"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
),
},
},
{
i
:
0
,
count
:
2
,
expected
:
[
2
]
common
.
Hash
{
common
.
HexToHash
(
"0x00"
),
common
.
HexToHash
(
"0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
),
},
},
{
i
:
1
,
count
:
2
,
expected
:
[
2
]
common
.
Hash
{
common
.
HexToHash
(
"0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
),
common
.
HexToHash
(
"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
),
},
},
{
i
:
0
,
count
:
3
,
expected
:
[
2
]
common
.
Hash
{
common
.
HexToHash
(
"0x00"
),
common
.
HexToHash
(
"0x5555555555555555555555555555555555555555555555555555555555555555"
),
},
},
{
i
:
1
,
count
:
3
,
expected
:
[
2
]
common
.
Hash
{
common
.
HexToHash
(
"0x5555555555555555555555555555555555555555555555555555555555555555"
),
common
.
HexToHash
(
"0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
),
},
},
{
i
:
2
,
count
:
3
,
expected
:
[
2
]
common
.
Hash
{
common
.
HexToHash
(
"0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
),
common
.
HexToHash
(
"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
),
},
},
}
for
_
,
tt
:=
range
tests
{
t
.
Run
(
fmt
.
Sprintf
(
"i %d, count %d"
,
tt
.
i
,
tt
.
count
),
func
(
t
*
testing
.
T
)
{
start
,
end
:=
PartitionKeyspace
(
tt
.
i
,
tt
.
count
)
require
.
Equal
(
t
,
tt
.
expected
[
0
],
start
)
require
.
Equal
(
t
,
tt
.
expected
[
1
],
end
)
})
}
t
.
Run
(
"panics on invalid i or count"
,
func
(
t
*
testing
.
T
)
{
require
.
Panics
(
t
,
func
()
{
PartitionKeyspace
(
1
,
1
)
})
require
.
Panics
(
t
,
func
()
{
PartitionKeyspace
(
-
1
,
1
)
})
require
.
Panics
(
t
,
func
()
{
PartitionKeyspace
(
0
,
-
1
)
})
require
.
Panics
(
t
,
func
()
{
PartitionKeyspace
(
-
1
,
-
1
)
})
})
}
func
randAddr
(
t
*
testing
.
T
)
common
.
Address
{
var
addr
common
.
Address
_
,
err
:=
rand
.
Read
(
addr
[
:
])
...
...
op-chain-ops/util/state_iterator.go
0 → 100644
View file @
33d10cd0
package
util
import
(
"fmt"
"math/big"
"sync"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
)
var
(
// maxSlot is the maximum possible storage slot.
maxSlot
=
common
.
HexToHash
(
"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
)
)
type
DBFactory
func
()
(
*
state
.
StateDB
,
error
)
type
StateCallback
func
(
db
*
state
.
StateDB
,
key
,
value
common
.
Hash
)
error
func
IterateState
(
dbFactory
DBFactory
,
address
common
.
Address
,
cb
StateCallback
,
workers
int
)
error
{
if
workers
<=
0
{
panic
(
"workers must be greater than 0"
)
}
// WaitGroup to wait for all workers to finish.
var
wg
sync
.
WaitGroup
// Channel to receive errors from each iteration job.
errCh
:=
make
(
chan
error
,
workers
)
// Channel to cancel all iteration jobs.
cancelCh
:=
make
(
chan
struct
{})
worker
:=
func
(
start
,
end
common
.
Hash
)
{
// Decrement the WaitGroup when the function returns.
defer
wg
.
Done
()
db
,
err
:=
dbFactory
()
if
err
!=
nil
{
// Should never happen, so explode if it does.
log
.
Crit
(
"cannot create state db"
,
"err"
,
err
)
}
st
,
err
:=
db
.
StorageTrie
(
address
)
if
err
!=
nil
{
// Should never happen, so explode if it does.
log
.
Crit
(
"cannot get storage trie"
,
"address"
,
address
,
"err"
,
err
)
}
// st can be nil if the account doesn't exist.
if
st
==
nil
{
errCh
<-
fmt
.
Errorf
(
"account does not exist: %s"
,
address
.
Hex
())
return
}
it
:=
trie
.
NewIterator
(
st
.
NodeIterator
(
start
.
Bytes
()))
// Below code is largely based on db.ForEachStorage. We can't use that
// because it doesn't allow us to specify a start and end key.
for
it
.
Next
()
{
select
{
case
<-
cancelCh
:
// If one of the workers encounters an error, cancel all of them.
return
default
:
break
}
// Use the raw (i.e., secure hashed) key to check if we've reached
// the end of the partition. Use > rather than >= here to account for
// the fact that the values returned by PartitionKeys are inclusive.
// Duplicate addresses that may be returned by this iteration are
// filtered out in the collector.
if
new
(
big
.
Int
)
.
SetBytes
(
it
.
Key
)
.
Cmp
(
end
.
Big
())
>
0
{
return
}
// Skip if the value is empty.
rawValue
:=
it
.
Value
if
len
(
rawValue
)
==
0
{
continue
}
// Get the preimage.
rawKey
:=
st
.
GetKey
(
it
.
Key
)
if
rawKey
==
nil
{
// Should never happen, so explode if it does.
log
.
Crit
(
"cannot get preimage for storage key"
,
"key"
,
it
.
Key
)
}
key
:=
common
.
BytesToHash
(
rawKey
)
// Parse the raw value.
_
,
content
,
_
,
err
:=
rlp
.
Split
(
rawValue
)
if
err
!=
nil
{
// Should never happen, so explode if it does.
log
.
Crit
(
"mal-formed data in state: %v"
,
err
)
}
value
:=
common
.
BytesToHash
(
content
)
// Call the callback with the DB, key, and value. Errors get
// bubbled up to the errCh.
if
err
:=
cb
(
db
,
key
,
value
);
err
!=
nil
{
errCh
<-
err
return
}
}
}
for
i
:=
0
;
i
<
workers
;
i
++
{
wg
.
Add
(
1
)
// Partition the keyspace per worker.
start
,
end
:=
PartitionKeyspace
(
i
,
workers
)
// Kick off our worker.
go
worker
(
start
,
end
)
}
wg
.
Wait
()
for
len
(
errCh
)
>
0
{
err
:=
<-
errCh
if
err
!=
nil
{
return
err
}
}
return
nil
}
// PartitionKeyspace divides the key space into partitions by dividing the maximum keyspace
// by count then multiplying by i. This will leave some slots left over, which we handle below. It
// returns the start and end keys for the partition as a common.Hash. Note that the returned range
// of keys is inclusive, i.e., [start, end] NOT [start, end).
func
PartitionKeyspace
(
i
int
,
count
int
)
(
common
.
Hash
,
common
.
Hash
)
{
if
i
<
0
||
count
<
0
{
panic
(
"i and count must be greater than 0"
)
}
if
i
>
count
-
1
{
panic
(
"i must be less than count - 1"
)
}
// Divide the key space into partitions by dividing the key space by the number
// of jobs. This will leave some slots left over, which we handle below.
partSize
:=
new
(
big
.
Int
)
.
Div
(
maxSlot
.
Big
(),
big
.
NewInt
(
int64
(
count
)))
start
:=
common
.
BigToHash
(
new
(
big
.
Int
)
.
Mul
(
big
.
NewInt
(
int64
(
i
)),
partSize
))
var
end
common
.
Hash
if
i
<
count
-
1
{
// If this is not the last partition, use the next partition's start key as the end.
end
=
common
.
BigToHash
(
new
(
big
.
Int
)
.
Mul
(
big
.
NewInt
(
int64
(
i
+
1
)),
partSize
))
}
else
{
// If this is the last partition, use the max slot as the end.
end
=
maxSlot
}
return
start
,
end
}
op-chain-ops/util/state_iterator_test.go
0 → 100644
View file @
33d10cd0
package
util
import
(
crand
"crypto/rand"
"fmt"
"math/rand"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/trie"
"github.com/stretchr/testify/require"
)
var
testAddr
=
common
.
Address
{
0
:
0xff
}
func
TestStateIteratorWorkers
(
t
*
testing
.
T
)
{
_
,
factory
,
_
:=
setupRandTest
(
t
)
for
i
:=
-
1
;
i
<=
0
;
i
++
{
require
.
Panics
(
t
,
func
()
{
_
=
IterateState
(
factory
,
testAddr
,
func
(
db
*
state
.
StateDB
,
key
,
value
common
.
Hash
)
error
{
return
nil
},
i
)
})
}
}
func
TestStateIteratorNonexistentAccount
(
t
*
testing
.
T
)
{
_
,
factory
,
_
:=
setupRandTest
(
t
)
require
.
ErrorContains
(
t
,
IterateState
(
factory
,
common
.
Address
{},
func
(
db
*
state
.
StateDB
,
key
,
value
common
.
Hash
)
error
{
return
nil
},
1
),
"account does not exist"
)
}
func
TestStateIteratorRandomOK
(
t
*
testing
.
T
)
{
for
i
:=
0
;
i
<
100
;
i
++
{
hashes
,
factory
,
workerCount
:=
setupRandTest
(
t
)
seenHashes
:=
make
(
map
[
common
.
Hash
]
bool
)
hashCh
:=
make
(
chan
common
.
Hash
)
doneCh
:=
make
(
chan
struct
{})
go
func
()
{
defer
close
(
doneCh
)
for
hash
:=
range
hashCh
{
seenHashes
[
hash
]
=
true
}
}()
require
.
NoError
(
t
,
IterateState
(
factory
,
testAddr
,
func
(
db
*
state
.
StateDB
,
key
,
value
common
.
Hash
)
error
{
hashCh
<-
key
return
nil
},
workerCount
))
close
(
hashCh
)
<-
doneCh
// Perform a less or equal check here in case of duplicates. The map check below will assert
// that all of the hashes are accounted for.
require
.
LessOrEqual
(
t
,
len
(
seenHashes
),
len
(
hashes
))
// Every hash we put into state should have been iterated over.
for
_
,
hash
:=
range
hashes
{
require
.
Contains
(
t
,
seenHashes
,
hash
)
}
}
}
func
TestStateIteratorRandomError
(
t
*
testing
.
T
)
{
for
i
:=
0
;
i
<
100
;
i
++
{
hashes
,
factory
,
workerCount
:=
setupRandTest
(
t
)
failHash
:=
hashes
[
rand
.
Intn
(
len
(
hashes
))]
require
.
ErrorContains
(
t
,
IterateState
(
factory
,
testAddr
,
func
(
db
*
state
.
StateDB
,
key
,
value
common
.
Hash
)
error
{
if
key
==
failHash
{
return
fmt
.
Errorf
(
"test error"
)
}
return
nil
},
workerCount
),
"test error"
)
}
}
func
TestPartitionKeyspace
(
t
*
testing
.
T
)
{
tests
:=
[]
struct
{
i
int
count
int
expected
[
2
]
common
.
Hash
}{
{
i
:
0
,
count
:
1
,
expected
:
[
2
]
common
.
Hash
{
common
.
HexToHash
(
"0x00"
),
common
.
HexToHash
(
"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
),
},
},
{
i
:
0
,
count
:
2
,
expected
:
[
2
]
common
.
Hash
{
common
.
HexToHash
(
"0x00"
),
common
.
HexToHash
(
"0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
),
},
},
{
i
:
1
,
count
:
2
,
expected
:
[
2
]
common
.
Hash
{
common
.
HexToHash
(
"0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
),
common
.
HexToHash
(
"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
),
},
},
{
i
:
0
,
count
:
3
,
expected
:
[
2
]
common
.
Hash
{
common
.
HexToHash
(
"0x00"
),
common
.
HexToHash
(
"0x5555555555555555555555555555555555555555555555555555555555555555"
),
},
},
{
i
:
1
,
count
:
3
,
expected
:
[
2
]
common
.
Hash
{
common
.
HexToHash
(
"0x5555555555555555555555555555555555555555555555555555555555555555"
),
common
.
HexToHash
(
"0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
),
},
},
{
i
:
2
,
count
:
3
,
expected
:
[
2
]
common
.
Hash
{
common
.
HexToHash
(
"0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
),
common
.
HexToHash
(
"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
),
},
},
}
for
_
,
tt
:=
range
tests
{
t
.
Run
(
fmt
.
Sprintf
(
"i %d, count %d"
,
tt
.
i
,
tt
.
count
),
func
(
t
*
testing
.
T
)
{
start
,
end
:=
PartitionKeyspace
(
tt
.
i
,
tt
.
count
)
require
.
Equal
(
t
,
tt
.
expected
[
0
],
start
)
require
.
Equal
(
t
,
tt
.
expected
[
1
],
end
)
})
}
t
.
Run
(
"panics on invalid i or count"
,
func
(
t
*
testing
.
T
)
{
require
.
Panics
(
t
,
func
()
{
PartitionKeyspace
(
1
,
1
)
})
require
.
Panics
(
t
,
func
()
{
PartitionKeyspace
(
-
1
,
1
)
})
require
.
Panics
(
t
,
func
()
{
PartitionKeyspace
(
0
,
-
1
)
})
require
.
Panics
(
t
,
func
()
{
PartitionKeyspace
(
-
1
,
-
1
)
})
})
}
func
setupRandTest
(
t
*
testing
.
T
)
([]
common
.
Hash
,
DBFactory
,
int
)
{
memDB
:=
rawdb
.
NewMemoryDatabase
()
db
,
err
:=
state
.
New
(
common
.
Hash
{},
state
.
NewDatabaseWithConfig
(
memDB
,
&
trie
.
Config
{
Preimages
:
true
,
Cache
:
1024
,
}),
nil
)
require
.
NoError
(
t
,
err
)
hashCount
:=
rand
.
Intn
(
100
)
if
hashCount
==
0
{
hashCount
=
1
}
hashes
:=
make
([]
common
.
Hash
,
hashCount
)
db
.
CreateAccount
(
testAddr
)
for
j
:=
0
;
j
<
hashCount
;
j
++
{
hashes
[
j
]
=
randHash
(
t
)
db
.
SetState
(
testAddr
,
hashes
[
j
],
hashes
[
j
])
}
root
,
err
:=
db
.
Commit
(
false
)
require
.
NoError
(
t
,
err
)
err
=
db
.
Database
()
.
TrieDB
()
.
Commit
(
root
,
true
)
require
.
NoError
(
t
,
err
)
factory
:=
func
()
(
*
state
.
StateDB
,
error
)
{
return
state
.
New
(
root
,
state
.
NewDatabaseWithConfig
(
memDB
,
&
trie
.
Config
{
Preimages
:
true
,
Cache
:
1024
,
}),
nil
)
}
workerCount
:=
rand
.
Intn
(
64
)
if
workerCount
==
0
{
workerCount
=
1
}
return
hashes
,
factory
,
workerCount
}
func
randHash
(
t
*
testing
.
T
)
common
.
Hash
{
var
h
common
.
Hash
_
,
err
:=
crand
.
Read
(
h
[
:
])
require
.
NoError
(
t
,
err
)
return
h
}
packages/contracts-bedrock/contracts/deployment/SystemDictator.sol
View file @
33d10cd0
...
...
@@ -155,6 +155,33 @@ contract SystemDictator is OwnableUpgradeable {
currentStep++;
}
/**
* @notice Constructor required to ensure that the implementation of the SystemDictator is
* initialized upon deployment.
*/
constructor() {
// Using this shorter variable as an alias for address(0) just prevents us from having to
// to use a new line for every single parameter.
address zero = address(0);
initialize(
DeployConfig(
GlobalConfig(AddressManager(zero), ProxyAdmin(zero), zero, zero),
ProxyAddressConfig(zero, zero, zero, zero, zero, zero, zero),
ImplementationAddressConfig(
L2OutputOracle(zero),
OptimismPortal(payable(zero)),
L1CrossDomainMessenger(zero),
L1StandardBridge(payable(zero)),
OptimismMintableERC20Factory(zero),
L1ERC721Bridge(zero),
PortalSender(zero),
SystemConfig(zero)
),
SystemConfigConfig(zero, 0, 0, bytes32(0), 0, zero)
)
);
}
/**
* @param _config System configuration.
*/
...
...
packages/sdk/src/utils/contracts.ts
View file @
33d10cd0
...
...
@@ -165,19 +165,19 @@ export const getBridgeAdapters = (
}
):
BridgeAdapters
=>
{
const
adapterData
:
BridgeAdapterData
=
{
...(
CONTRACT_ADDRESSES
[
l2ChainId
]
...(
CONTRACT_ADDRESSES
[
l2ChainId
]
||
opts
?.
contracts
?.
l1
?.
L1StandardBridge
?
{
Standard
:
{
Adapter
:
StandardBridgeAdapter
,
l1Bridge
:
opts
.
contracts
?.
l1
?.
L1StandardBridge
||
opts
?
.
contracts
?.
l1
?.
L1StandardBridge
||
CONTRACT_ADDRESSES
[
l2ChainId
].
l1
.
L1StandardBridge
,
l2Bridge
:
predeploys
.
L2StandardBridge
,
},
ETH
:
{
Adapter
:
ETHBridgeAdapter
,
l1Bridge
:
opts
.
contracts
?.
l1
?.
L1StandardBridge
||
opts
?
.
contracts
?.
l1
?.
L1StandardBridge
||
CONTRACT_ADDRESSES
[
l2ChainId
].
l1
.
L1StandardBridge
,
l2Bridge
:
predeploys
.
L2StandardBridge
,
},
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment