Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
N
nebula
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
exchain
nebula
Commits
9eac826b
Unverified
Commit
9eac826b
authored
Aug 21, 2023
by
mergify[bot]
Committed by
GitHub
Aug 21, 2023
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'develop' into dependabot/go_modules/gorm.io/gorm-1.25.4
parents
c307af69
e9bb919d
Changes
19
Hide whitespace changes
Inline
Side-by-side
Showing
19 changed files
with
163 additions
and
119 deletions
+163
-119
go.mod
go.mod
+0
-1
go.sum
go.sum
+0
-2
config.go
op-challenger/config/config.go
+8
-16
executor.go
op-challenger/fault/cannon/executor.go
+1
-3
executor_test.go
op-challenger/fault/cannon/executor_test.go
+11
-9
provider.go
op-challenger/fault/cannon/provider.go
+4
-3
provider_test.go
op-challenger/fault/cannon/provider_test.go
+16
-1
init_game.sh
op-challenger/scripts/alphabet/init_game.sh
+2
-2
helper.go
op-e2e/e2eutils/disputegame/helper.go
+3
-3
waits.go
op-e2e/e2eutils/wait/waits.go
+2
-1
faultproof_test.go
op-e2e/faultproof_test.go
+0
-1
metrics.go
op-heartbeat/metrics.go
+4
-5
gossip.go
op-node/p2p/gossip.go
+4
-4
host.go
op-node/p2p/host.go
+0
-3
cache.go
op-node/sources/caching/cache.go
+8
-8
eth_client.go
op-node/sources/eth_client.go
+12
-12
l1_client.go
op-node/sources/l1_client.go
+3
-3
l2_client.go
op-node/sources/l2_client.go
+6
-6
Deploy.s.sol
packages/contracts-bedrock/scripts/Deploy.s.sol
+79
-36
No files found.
go.mod
View file @
9eac826b
...
@@ -17,7 +17,6 @@ require (
...
@@ -17,7 +17,6 @@ require (
github.com/google/gofuzz v1.2.1-0.20220503160820-4a35382e8fc8
github.com/google/gofuzz v1.2.1-0.20220503160820-4a35382e8fc8
github.com/google/uuid v1.3.0
github.com/google/uuid v1.3.0
github.com/hashicorp/go-multierror v1.1.1
github.com/hashicorp/go-multierror v1.1.1
github.com/hashicorp/golang-lru v1.0.2
github.com/hashicorp/golang-lru/v2 v2.0.2
github.com/hashicorp/golang-lru/v2 v2.0.2
github.com/holiman/uint256 v1.2.3
github.com/holiman/uint256 v1.2.3
github.com/ipfs/go-datastore v0.6.0
github.com/ipfs/go-datastore v0.6.0
...
...
go.sum
View file @
9eac826b
...
@@ -337,8 +337,6 @@ github.com/hashicorp/go-bexpr v0.1.11/go.mod h1:f03lAo0duBlDIUMGCuad8oLcgejw4m7U
...
@@ -337,8 +337,6 @@ github.com/hashicorp/go-bexpr v0.1.11/go.mod h1:f03lAo0duBlDIUMGCuad8oLcgejw4m7U
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c=
github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/golang-lru/v2 v2.0.2 h1:Dwmkdr5Nc/oBiXgJS3CDHNhJtIHkuZ3DZF5twqnfBdU=
github.com/hashicorp/golang-lru/v2 v2.0.2 h1:Dwmkdr5Nc/oBiXgJS3CDHNhJtIHkuZ3DZF5twqnfBdU=
github.com/hashicorp/golang-lru/v2 v2.0.2/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
github.com/hashicorp/golang-lru/v2 v2.0.2/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
...
...
op-challenger/config/config.go
View file @
9eac826b
...
@@ -35,27 +35,19 @@ const (
...
@@ -35,27 +35,19 @@ const (
TraceTypeAlphabet
TraceType
=
"alphabet"
TraceTypeAlphabet
TraceType
=
"alphabet"
TraceTypeCannon
TraceType
=
"cannon"
TraceTypeCannon
TraceType
=
"cannon"
// Devnet game IDs
// Mainnet games
DevnetGameIDAlphabet
=
uint8
(
0
)
CannonFaultGameID
=
0
DevnetGameIDCannon
=
uint8
(
1
)
//
Mainnet game ID
s
//
Devnet game
s
MainnetGameIDFault
=
uint8
(
0
)
AlphabetFaultGameID
=
255
)
)
var
TraceTypes
=
[]
TraceType
{
TraceTypeAlphabet
,
TraceTypeCannon
}
var
TraceTypes
=
[]
TraceType
{
TraceTypeAlphabet
,
TraceTypeCannon
}
// GameIdToString maps game IDs to their string representation on a per-network basis.
// GameIdToString maps game IDs to their string representation.
var
GameIdToString
=
map
[
uint64
]
map
[
uint8
]
string
{
var
GameIdToString
=
map
[
uint8
]
string
{
// Mainnet
CannonFaultGameID
:
"Cannon"
,
1
:
{
AlphabetFaultGameID
:
"Alphabet"
,
MainnetGameIDFault
:
"fault-cannon"
,
},
// Devnet
900
:
{
DevnetGameIDAlphabet
:
"fault-alphabet"
,
DevnetGameIDCannon
:
"fault-cannon"
,
},
}
}
func
(
t
TraceType
)
String
()
string
{
func
(
t
TraceType
)
String
()
string
{
...
...
op-challenger/fault/cannon/executor.go
View file @
9eac826b
...
@@ -39,7 +39,6 @@ type Executor struct {
...
@@ -39,7 +39,6 @@ type Executor struct {
rollupConfig
string
rollupConfig
string
l2Genesis
string
l2Genesis
string
absolutePreState
string
absolutePreState
string
dataDir
string
snapshotFreq
uint
snapshotFreq
uint
selectSnapshot
snapshotSelect
selectSnapshot
snapshotSelect
cmdExecutor
cmdExecutor
cmdExecutor
cmdExecutor
...
@@ -57,7 +56,6 @@ func NewExecutor(logger log.Logger, cfg *config.Config, inputs LocalGameInputs)
...
@@ -57,7 +56,6 @@ func NewExecutor(logger log.Logger, cfg *config.Config, inputs LocalGameInputs)
rollupConfig
:
cfg
.
CannonRollupConfigPath
,
rollupConfig
:
cfg
.
CannonRollupConfigPath
,
l2Genesis
:
cfg
.
CannonL2GenesisPath
,
l2Genesis
:
cfg
.
CannonL2GenesisPath
,
absolutePreState
:
cfg
.
CannonAbsolutePreState
,
absolutePreState
:
cfg
.
CannonAbsolutePreState
,
dataDir
:
cfg
.
CannonDatadir
,
snapshotFreq
:
cfg
.
CannonSnapshotFreq
,
snapshotFreq
:
cfg
.
CannonSnapshotFreq
,
selectSnapshot
:
findStartingSnapshot
,
selectSnapshot
:
findStartingSnapshot
,
cmdExecutor
:
runCmd
,
cmdExecutor
:
runCmd
,
...
@@ -71,7 +69,7 @@ func (e *Executor) GenerateProof(ctx context.Context, dir string, i uint64) erro
...
@@ -71,7 +69,7 @@ func (e *Executor) GenerateProof(ctx context.Context, dir string, i uint64) erro
return
fmt
.
Errorf
(
"find starting snapshot: %w"
,
err
)
return
fmt
.
Errorf
(
"find starting snapshot: %w"
,
err
)
}
}
proofDir
:=
filepath
.
Join
(
dir
,
proofsDir
)
proofDir
:=
filepath
.
Join
(
dir
,
proofsDir
)
dataDir
:=
filepath
.
Join
(
e
.
dataD
ir
,
preimagesDir
)
dataDir
:=
filepath
.
Join
(
d
ir
,
preimagesDir
)
lastGeneratedState
:=
filepath
.
Join
(
dir
,
finalState
)
lastGeneratedState
:=
filepath
.
Join
(
dir
,
finalState
)
args
:=
[]
string
{
args
:=
[]
string
{
"run"
,
"run"
,
...
...
op-challenger/fault/cannon/executor_test.go
View file @
9eac826b
...
@@ -22,7 +22,9 @@ const execTestCannonPrestate = "/foo/pre.json"
...
@@ -22,7 +22,9 @@ const execTestCannonPrestate = "/foo/pre.json"
func
TestGenerateProof
(
t
*
testing
.
T
)
{
func
TestGenerateProof
(
t
*
testing
.
T
)
{
input
:=
"starting.json"
input
:=
"starting.json"
cfg
:=
config
.
NewConfig
(
common
.
Address
{
0xbb
},
"http://localhost:8888"
,
config
.
TraceTypeCannon
,
true
)
cfg
:=
config
.
NewConfig
(
common
.
Address
{
0xbb
},
"http://localhost:8888"
,
config
.
TraceTypeCannon
,
true
)
cfg
.
CannonDatadir
=
t
.
TempDir
()
tempDir
:=
t
.
TempDir
()
dir
:=
filepath
.
Join
(
tempDir
,
"gameDir"
)
cfg
.
CannonDatadir
=
tempDir
cfg
.
CannonAbsolutePreState
=
"pre.json"
cfg
.
CannonAbsolutePreState
=
"pre.json"
cfg
.
CannonBin
=
"./bin/cannon"
cfg
.
CannonBin
=
"./bin/cannon"
cfg
.
CannonServer
=
"./bin/op-program"
cfg
.
CannonServer
=
"./bin/op-program"
...
@@ -58,7 +60,7 @@ func TestGenerateProof(t *testing.T) {
...
@@ -58,7 +60,7 @@ func TestGenerateProof(t *testing.T) {
}
}
return
nil
return
nil
}
}
err
:=
executor
.
GenerateProof
(
context
.
Background
(),
cfg
.
CannonData
dir
,
proofAt
)
err
:=
executor
.
GenerateProof
(
context
.
Background
(),
dir
,
proofAt
)
require
.
NoError
(
t
,
err
)
require
.
NoError
(
t
,
err
)
return
binary
,
subcommand
,
args
return
binary
,
subcommand
,
args
}
}
...
@@ -68,15 +70,15 @@ func TestGenerateProof(t *testing.T) {
...
@@ -68,15 +70,15 @@ func TestGenerateProof(t *testing.T) {
cfg
.
CannonRollupConfigPath
=
""
cfg
.
CannonRollupConfigPath
=
""
cfg
.
CannonL2GenesisPath
=
""
cfg
.
CannonL2GenesisPath
=
""
binary
,
subcommand
,
args
:=
captureExec
(
t
,
cfg
,
150
_000_000
)
binary
,
subcommand
,
args
:=
captureExec
(
t
,
cfg
,
150
_000_000
)
require
.
DirExists
(
t
,
filepath
.
Join
(
cfg
.
CannonData
dir
,
preimagesDir
))
require
.
DirExists
(
t
,
filepath
.
Join
(
dir
,
preimagesDir
))
require
.
DirExists
(
t
,
filepath
.
Join
(
cfg
.
CannonData
dir
,
proofsDir
))
require
.
DirExists
(
t
,
filepath
.
Join
(
dir
,
proofsDir
))
require
.
DirExists
(
t
,
filepath
.
Join
(
cfg
.
CannonData
dir
,
snapsDir
))
require
.
DirExists
(
t
,
filepath
.
Join
(
dir
,
snapsDir
))
require
.
Equal
(
t
,
cfg
.
CannonBin
,
binary
)
require
.
Equal
(
t
,
cfg
.
CannonBin
,
binary
)
require
.
Equal
(
t
,
"run"
,
subcommand
)
require
.
Equal
(
t
,
"run"
,
subcommand
)
require
.
Equal
(
t
,
input
,
args
[
"--input"
])
require
.
Equal
(
t
,
input
,
args
[
"--input"
])
require
.
Contains
(
t
,
args
,
"--meta"
)
require
.
Contains
(
t
,
args
,
"--meta"
)
require
.
Equal
(
t
,
""
,
args
[
"--meta"
])
require
.
Equal
(
t
,
""
,
args
[
"--meta"
])
require
.
Equal
(
t
,
filepath
.
Join
(
cfg
.
CannonData
dir
,
finalState
),
args
[
"--output"
])
require
.
Equal
(
t
,
filepath
.
Join
(
dir
,
finalState
),
args
[
"--output"
])
require
.
Equal
(
t
,
"=150000000"
,
args
[
"--proof-at"
])
require
.
Equal
(
t
,
"=150000000"
,
args
[
"--proof-at"
])
require
.
Equal
(
t
,
"=150000001"
,
args
[
"--stop-at"
])
require
.
Equal
(
t
,
"=150000001"
,
args
[
"--stop-at"
])
require
.
Equal
(
t
,
"%500"
,
args
[
"--snapshot-at"
])
require
.
Equal
(
t
,
"%500"
,
args
[
"--snapshot-at"
])
...
@@ -86,9 +88,9 @@ func TestGenerateProof(t *testing.T) {
...
@@ -86,9 +88,9 @@ func TestGenerateProof(t *testing.T) {
require
.
Equal
(
t
,
"--server"
,
args
[
cfg
.
CannonServer
])
require
.
Equal
(
t
,
"--server"
,
args
[
cfg
.
CannonServer
])
require
.
Equal
(
t
,
cfg
.
L1EthRpc
,
args
[
"--l1"
])
require
.
Equal
(
t
,
cfg
.
L1EthRpc
,
args
[
"--l1"
])
require
.
Equal
(
t
,
cfg
.
CannonL2
,
args
[
"--l2"
])
require
.
Equal
(
t
,
cfg
.
CannonL2
,
args
[
"--l2"
])
require
.
Equal
(
t
,
filepath
.
Join
(
cfg
.
CannonData
dir
,
preimagesDir
),
args
[
"--datadir"
])
require
.
Equal
(
t
,
filepath
.
Join
(
dir
,
preimagesDir
),
args
[
"--datadir"
])
require
.
Equal
(
t
,
filepath
.
Join
(
cfg
.
CannonData
dir
,
proofsDir
,
"%d.json"
),
args
[
"--proof-fmt"
])
require
.
Equal
(
t
,
filepath
.
Join
(
dir
,
proofsDir
,
"%d.json"
),
args
[
"--proof-fmt"
])
require
.
Equal
(
t
,
filepath
.
Join
(
cfg
.
CannonData
dir
,
snapsDir
,
"%d.json"
),
args
[
"--snapshot-fmt"
])
require
.
Equal
(
t
,
filepath
.
Join
(
dir
,
snapsDir
,
"%d.json"
),
args
[
"--snapshot-fmt"
])
require
.
Equal
(
t
,
cfg
.
CannonNetwork
,
args
[
"--network"
])
require
.
Equal
(
t
,
cfg
.
CannonNetwork
,
args
[
"--network"
])
require
.
NotContains
(
t
,
args
,
"--rollup.config"
)
require
.
NotContains
(
t
,
args
,
"--rollup.config"
)
require
.
NotContains
(
t
,
args
,
"--l2.genesis"
)
require
.
NotContains
(
t
,
args
,
"--l2.genesis"
)
...
...
op-challenger/fault/cannon/provider.go
View file @
9eac826b
...
@@ -64,13 +64,14 @@ func NewTraceProvider(ctx context.Context, logger log.Logger, cfg *config.Config
...
@@ -64,13 +64,14 @@ func NewTraceProvider(ctx context.Context, logger log.Logger, cfg *config.Config
if
err
!=
nil
{
if
err
!=
nil
{
return
nil
,
fmt
.
Errorf
(
"fetch local game inputs: %w"
,
err
)
return
nil
,
fmt
.
Errorf
(
"fetch local game inputs: %w"
,
err
)
}
}
return
NewTraceProviderFromInputs
(
logger
,
cfg
,
localInputs
),
nil
return
NewTraceProviderFromInputs
(
logger
,
cfg
,
gameAddr
.
Hex
(),
localInputs
),
nil
}
}
func
NewTraceProviderFromInputs
(
logger
log
.
Logger
,
cfg
*
config
.
Config
,
localInputs
LocalGameInputs
)
*
CannonTraceProvider
{
func
NewTraceProviderFromInputs
(
logger
log
.
Logger
,
cfg
*
config
.
Config
,
gameDirName
string
,
localInputs
LocalGameInputs
)
*
CannonTraceProvider
{
dir
:=
filepath
.
Join
(
cfg
.
CannonDatadir
,
gameDirName
)
return
&
CannonTraceProvider
{
return
&
CannonTraceProvider
{
logger
:
logger
,
logger
:
logger
,
dir
:
cfg
.
CannonData
dir
,
dir
:
dir
,
prestate
:
cfg
.
CannonAbsolutePreState
,
prestate
:
cfg
.
CannonAbsolutePreState
,
generator
:
NewExecutor
(
logger
,
cfg
,
localInputs
),
generator
:
NewExecutor
(
logger
,
cfg
,
localInputs
),
}
}
...
...
op-challenger/fault/cannon/provider_test.go
View file @
9eac826b
...
@@ -11,6 +11,7 @@ import (
...
@@ -11,6 +11,7 @@ import (
"testing"
"testing"
"github.com/ethereum-optimism/optimism/cannon/mipsevm"
"github.com/ethereum-optimism/optimism/cannon/mipsevm"
"github.com/ethereum-optimism/optimism/op-challenger/config"
"github.com/ethereum-optimism/optimism/op-challenger/fault/types"
"github.com/ethereum-optimism/optimism/op-challenger/fault/types"
"github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum-optimism/optimism/op-node/testlog"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common"
...
@@ -149,7 +150,6 @@ func TestGetStepData(t *testing.T) {
...
@@ -149,7 +150,6 @@ func TestGetStepData(t *testing.T) {
func
TestAbsolutePreState
(
t
*
testing
.
T
)
{
func
TestAbsolutePreState
(
t
*
testing
.
T
)
{
dataDir
:=
t
.
TempDir
()
dataDir
:=
t
.
TempDir
()
_
=
os
.
Mkdir
(
dataDir
,
0
o777
)
prestate
:=
"state.json"
prestate
:=
"state.json"
...
@@ -189,6 +189,21 @@ func TestAbsolutePreState(t *testing.T) {
...
@@ -189,6 +189,21 @@ func TestAbsolutePreState(t *testing.T) {
})
})
}
}
func
TestUseGameSpecificSubdir
(
t
*
testing
.
T
)
{
tempDir
:=
t
.
TempDir
()
dataDir
:=
filepath
.
Join
(
tempDir
,
"data"
)
setupPreState
(
t
,
tempDir
,
"state.json"
)
logger
:=
testlog
.
Logger
(
t
,
log
.
LvlInfo
)
cfg
:=
&
config
.
Config
{
CannonAbsolutePreState
:
filepath
.
Join
(
tempDir
,
"state.json"
),
CannonDatadir
:
dataDir
,
}
gameDirName
:=
"gameSubdir"
localInputs
:=
LocalGameInputs
{}
provider
:=
NewTraceProviderFromInputs
(
logger
,
cfg
,
gameDirName
,
localInputs
)
require
.
Equal
(
t
,
filepath
.
Join
(
dataDir
,
gameDirName
),
provider
.
dir
,
"should use game specific subdir"
)
}
func
setupPreState
(
t
*
testing
.
T
,
dataDir
string
,
filename
string
)
{
func
setupPreState
(
t
*
testing
.
T
,
dataDir
string
,
filename
string
)
{
srcDir
:=
filepath
.
Join
(
"test_data"
)
srcDir
:=
filepath
.
Join
(
"test_data"
)
path
:=
filepath
.
Join
(
srcDir
,
filename
)
path
:=
filepath
.
Join
(
srcDir
,
filename
)
...
...
op-challenger/scripts/alphabet/init_game.sh
View file @
9eac826b
...
@@ -78,8 +78,8 @@ cast call $L2_OUTPUT_ORACLE_PROXY "getL2Output(uint256)" $PRIOR_INDEX
...
@@ -78,8 +78,8 @@ cast call $L2_OUTPUT_ORACLE_PROXY "getL2Output(uint256)" $PRIOR_INDEX
echo
"Getting the l2 output at index
$INDEX
"
echo
"Getting the l2 output at index
$INDEX
"
cast call
$L2_OUTPUT_ORACLE_PROXY
"getL2Output(uint256)"
$INDEX
cast call
$L2_OUTPUT_ORACLE_PROXY
"getL2Output(uint256)"
$INDEX
# (Alphabet) Fault game type =
0
# (Alphabet) Fault game type =
255
GAME_TYPE
=
0
GAME_TYPE
=
255
# Root claim commits to the entire trace.
# Root claim commits to the entire trace.
# Alphabet game claim construction: keccak256(abi.encode(trace_index, trace[trace_index]))
# Alphabet game claim construction: keccak256(abi.encode(trace_index, trace[trace_index]))
...
...
op-e2e/e2eutils/disputegame/helper.go
View file @
9eac826b
...
@@ -26,8 +26,8 @@ import (
...
@@ -26,8 +26,8 @@ import (
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/require"
)
)
const
alphabetGameType
uint8
=
0
const
alphabetGameType
uint8
=
255
const
cannonGameType
uint8
=
1
const
cannonGameType
uint8
=
0
const
alphabetGameDepth
=
4
const
alphabetGameDepth
=
4
const
lastAlphabetTraceIndex
=
1
<<
alphabetGameDepth
-
1
const
lastAlphabetTraceIndex
=
1
<<
alphabetGameDepth
-
1
...
@@ -170,7 +170,7 @@ func (h *FactoryHelper) StartCannonGameWithCorrectRoot(ctx context.Context, roll
...
@@ -170,7 +170,7 @@ func (h *FactoryHelper) StartCannonGameWithCorrectRoot(ctx context.Context, roll
L2Claim
:
challengedOutput
.
OutputRoot
,
L2Claim
:
challengedOutput
.
OutputRoot
,
L2BlockNumber
:
challengedOutput
.
L2BlockNumber
,
L2BlockNumber
:
challengedOutput
.
L2BlockNumber
,
}
}
provider
:=
cannon
.
NewTraceProviderFromInputs
(
testlog
.
Logger
(
h
.
t
,
log
.
LvlInfo
)
.
New
(
"role"
,
"CorrectTrace"
),
cfg
,
inputs
)
provider
:=
cannon
.
NewTraceProviderFromInputs
(
testlog
.
Logger
(
h
.
t
,
log
.
LvlInfo
)
.
New
(
"role"
,
"CorrectTrace"
),
cfg
,
"correct"
,
inputs
)
rootClaim
,
err
:=
provider
.
Get
(
ctx
,
math
.
MaxUint64
)
rootClaim
,
err
:=
provider
.
Get
(
ctx
,
math
.
MaxUint64
)
h
.
require
.
NoError
(
err
,
"Compute correct root hash"
)
h
.
require
.
NoError
(
err
,
"Compute correct root hash"
)
...
...
op-e2e/e2eutils/wait/waits.go
View file @
9eac826b
...
@@ -56,7 +56,8 @@ func (s *jsonRawString) UnmarshalJSON(input []byte) error {
...
@@ -56,7 +56,8 @@ func (s *jsonRawString) UnmarshalJSON(input []byte) error {
// printDebugTrace logs debug_traceTransaction output to aid in debugging unexpected receipt statuses
// printDebugTrace logs debug_traceTransaction output to aid in debugging unexpected receipt statuses
func
printDebugTrace
(
ctx
context
.
Context
,
client
*
ethclient
.
Client
,
txHash
common
.
Hash
)
{
func
printDebugTrace
(
ctx
context
.
Context
,
client
*
ethclient
.
Client
,
txHash
common
.
Hash
)
{
var
trace
jsonRawString
var
trace
jsonRawString
options
:=
map
[
string
]
string
{}
options
:=
map
[
string
]
any
{}
options
[
"enableReturnData"
]
=
true
err
:=
client
.
Client
()
.
CallContext
(
ctx
,
&
trace
,
"debug_traceTransaction"
,
hexutil
.
Bytes
(
txHash
.
Bytes
()),
options
)
err
:=
client
.
Client
()
.
CallContext
(
ctx
,
&
trace
,
"debug_traceTransaction"
,
hexutil
.
Bytes
(
txHash
.
Bytes
()),
options
)
if
err
!=
nil
{
if
err
!=
nil
{
fmt
.
Printf
(
"TxTrace unavailable: %v
\n
"
,
err
)
fmt
.
Printf
(
"TxTrace unavailable: %v
\n
"
,
err
)
...
...
op-e2e/faultproof_test.go
View file @
9eac826b
...
@@ -50,7 +50,6 @@ func TestMultipleAlphabetGames(t *testing.T) {
...
@@ -50,7 +50,6 @@ func TestMultipleAlphabetGames(t *testing.T) {
}
}
func
TestMultipleCannonGames
(
t
*
testing
.
T
)
{
func
TestMultipleCannonGames
(
t
*
testing
.
T
)
{
t
.
Skip
(
"Cannon provider doesn't currently isolate different game traces"
)
InitParallel
(
t
)
InitParallel
(
t
)
ctx
:=
context
.
Background
()
ctx
:=
context
.
Background
()
...
...
op-heartbeat/metrics.go
View file @
9eac826b
...
@@ -6,7 +6,7 @@ import (
...
@@ -6,7 +6,7 @@ import (
"sync/atomic"
"sync/atomic"
"time"
"time"
lru
"github.com/hashicorp/golang-lru"
lru
"github.com/hashicorp/golang-lru
/v2
"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/client_golang/prometheus/promauto"
...
@@ -31,7 +31,7 @@ type metrics struct {
...
@@ -31,7 +31,7 @@ type metrics struct {
// Groups heartbeats per unique IP, version and chain ID combination.
// Groups heartbeats per unique IP, version and chain ID combination.
// string(IP ++ version ++ chainID) -> *heartbeatEntry
// string(IP ++ version ++ chainID) -> *heartbeatEntry
heartbeatUsers
*
lru
.
Cache
heartbeatUsers
*
lru
.
Cache
[
string
,
*
heartbeatEntry
]
}
}
type
heartbeatEntry
struct
{
type
heartbeatEntry
struct
{
...
@@ -42,7 +42,7 @@ type heartbeatEntry struct {
...
@@ -42,7 +42,7 @@ type heartbeatEntry struct {
}
}
func
NewMetrics
(
r
*
prometheus
.
Registry
)
Metrics
{
func
NewMetrics
(
r
*
prometheus
.
Registry
)
Metrics
{
lruCache
,
_
:=
lru
.
New
(
UsersCacheSize
)
lruCache
,
_
:=
lru
.
New
[
string
,
*
heartbeatEntry
]
(
UsersCacheSize
)
m
:=
&
metrics
{
m
:=
&
metrics
{
heartbeats
:
promauto
.
With
(
r
)
.
NewCounterVec
(
prometheus
.
CounterOpts
{
heartbeats
:
promauto
.
With
(
r
)
.
NewCounterVec
(
prometheus
.
CounterOpts
{
Namespace
:
MetricsNamespace
,
Namespace
:
MetricsNamespace
,
...
@@ -89,7 +89,7 @@ func (m *metrics) RecordHeartbeat(payload heartbeat.Payload, ip string) {
...
@@ -89,7 +89,7 @@ func (m *metrics) RecordHeartbeat(payload heartbeat.Payload, ip string) {
key
:=
fmt
.
Sprintf
(
"%s;%s;%s"
,
ip
,
version
,
chainID
)
key
:=
fmt
.
Sprintf
(
"%s;%s;%s"
,
ip
,
version
,
chainID
)
now
:=
time
.
Now
()
now
:=
time
.
Now
()
previous
,
ok
,
_
:=
m
.
heartbeatUsers
.
PeekOrAdd
(
key
,
&
heartbeatEntry
{
Time
:
now
,
Count
:
1
})
entry
,
ok
,
_
:=
m
.
heartbeatUsers
.
PeekOrAdd
(
key
,
&
heartbeatEntry
{
Time
:
now
,
Count
:
1
})
if
!
ok
{
if
!
ok
{
// if it's a new entry, observe it and exit.
// if it's a new entry, observe it and exit.
m
.
sameIP
.
WithLabelValues
(
chainID
,
version
)
.
Observe
(
1
)
m
.
sameIP
.
WithLabelValues
(
chainID
,
version
)
.
Observe
(
1
)
...
@@ -97,7 +97,6 @@ func (m *metrics) RecordHeartbeat(payload heartbeat.Payload, ip string) {
...
@@ -97,7 +97,6 @@ func (m *metrics) RecordHeartbeat(payload heartbeat.Payload, ip string) {
return
return
}
}
entry
:=
previous
.
(
*
heartbeatEntry
)
if
now
.
Sub
(
entry
.
Time
)
<
MinHeartbeatInterval
{
if
now
.
Sub
(
entry
.
Time
)
<
MinHeartbeatInterval
{
// if the span is still going, then add it up
// if the span is still going, then add it up
atomic
.
AddUint64
(
&
entry
.
Count
,
1
)
atomic
.
AddUint64
(
&
entry
.
Count
,
1
)
...
...
op-node/p2p/gossip.go
View file @
9eac826b
...
@@ -10,7 +10,7 @@ import (
...
@@ -10,7 +10,7 @@ import (
"time"
"time"
"github.com/golang/snappy"
"github.com/golang/snappy"
lru
"github.com/hashicorp/golang-lru"
lru
"github.com/hashicorp/golang-lru
/v2
"
pubsub
"github.com/libp2p/go-libp2p-pubsub"
pubsub
"github.com/libp2p/go-libp2p-pubsub"
pb
"github.com/libp2p/go-libp2p-pubsub/pb"
pb
"github.com/libp2p/go-libp2p-pubsub/pb"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/host"
...
@@ -242,7 +242,7 @@ func BuildBlocksValidator(log log.Logger, cfg *rollup.Config, runCfg GossipRunti
...
@@ -242,7 +242,7 @@ func BuildBlocksValidator(log log.Logger, cfg *rollup.Config, runCfg GossipRunti
// Seen block hashes per block height
// Seen block hashes per block height
// uint64 -> *seenBlocks
// uint64 -> *seenBlocks
blockHeightLRU
,
err
:=
lru
.
New
(
1000
)
blockHeightLRU
,
err
:=
lru
.
New
[
uint64
,
*
seenBlocks
]
(
1000
)
if
err
!=
nil
{
if
err
!=
nil
{
panic
(
fmt
.
Errorf
(
"failed to set up block height LRU cache: %w"
,
err
))
panic
(
fmt
.
Errorf
(
"failed to set up block height LRU cache: %w"
,
err
))
}
}
...
@@ -315,7 +315,7 @@ func BuildBlocksValidator(log log.Logger, cfg *rollup.Config, runCfg GossipRunti
...
@@ -315,7 +315,7 @@ func BuildBlocksValidator(log log.Logger, cfg *rollup.Config, runCfg GossipRunti
blockHeightLRU
.
Add
(
uint64
(
payload
.
BlockNumber
),
seen
)
blockHeightLRU
.
Add
(
uint64
(
payload
.
BlockNumber
),
seen
)
}
}
if
count
,
hasSeen
:=
seen
.
(
*
seenBlocks
)
.
hasSeen
(
payload
.
BlockHash
);
count
>
5
{
if
count
,
hasSeen
:=
seen
.
hasSeen
(
payload
.
BlockHash
);
count
>
5
{
// [REJECT] if more than 5 blocks have been seen with the same block height
// [REJECT] if more than 5 blocks have been seen with the same block height
log
.
Warn
(
"seen too many different blocks at same height"
,
"height"
,
payload
.
BlockNumber
)
log
.
Warn
(
"seen too many different blocks at same height"
,
"height"
,
payload
.
BlockNumber
)
return
pubsub
.
ValidationReject
return
pubsub
.
ValidationReject
...
@@ -327,7 +327,7 @@ func BuildBlocksValidator(log log.Logger, cfg *rollup.Config, runCfg GossipRunti
...
@@ -327,7 +327,7 @@ func BuildBlocksValidator(log log.Logger, cfg *rollup.Config, runCfg GossipRunti
// mark it as seen. (note: with concurrent validation more than 5 blocks may be marked as seen still,
// mark it as seen. (note: with concurrent validation more than 5 blocks may be marked as seen still,
// but validator concurrency is limited anyway)
// but validator concurrency is limited anyway)
seen
.
(
*
seenBlocks
)
.
markSeen
(
payload
.
BlockHash
)
seen
.
markSeen
(
payload
.
BlockHash
)
// remember the decoded payload for later usage in topic subscriber.
// remember the decoded payload for later usage in topic subscriber.
message
.
ValidatorData
=
&
payload
message
.
ValidatorData
=
&
payload
...
...
op-node/p2p/host.go
View file @
9eac826b
...
@@ -186,9 +186,6 @@ func (conf *Config) Host(log log.Logger, reporter metrics.Reporter, metrics Host
...
@@ -186,9 +186,6 @@ func (conf *Config) Host(log log.Logger, reporter metrics.Reporter, metrics Host
tcpTransport
:=
libp2p
.
Transport
(
tcpTransport
:=
libp2p
.
Transport
(
tcp
.
NewTCPTransport
,
tcp
.
NewTCPTransport
,
tcp
.
WithConnectionTimeout
(
time
.
Minute
*
60
))
// break unused connections
tcp
.
WithConnectionTimeout
(
time
.
Minute
*
60
))
// break unused connections
if
err
!=
nil
{
return
nil
,
fmt
.
Errorf
(
"failed to create TCP transport: %w"
,
err
)
}
// TODO: technically we can also run the node on websocket and QUIC transports. Maybe in the future?
// TODO: technically we can also run the node on websocket and QUIC transports. Maybe in the future?
var
nat
lconf
.
NATManagerC
// disabled if nil
var
nat
lconf
.
NATManagerC
// disabled if nil
...
...
op-node/sources/caching/cache.go
View file @
9eac826b
package
caching
package
caching
import
lru
"github.com/hashicorp/golang-lru"
import
lru
"github.com/hashicorp/golang-lru
/v2
"
type
Metrics
interface
{
type
Metrics
interface
{
CacheAdd
(
label
string
,
cacheSize
int
,
evicted
bool
)
CacheAdd
(
label
string
,
cacheSize
int
,
evicted
bool
)
...
@@ -8,13 +8,13 @@ type Metrics interface {
...
@@ -8,13 +8,13 @@ type Metrics interface {
}
}
// LRUCache wraps hashicorp *lru.Cache and tracks cache metrics
// LRUCache wraps hashicorp *lru.Cache and tracks cache metrics
type
LRUCache
struct
{
type
LRUCache
[
K
comparable
,
V
any
]
struct
{
m
Metrics
m
Metrics
label
string
label
string
inner
*
lru
.
Cache
inner
*
lru
.
Cache
[
K
,
V
]
}
}
func
(
c
*
LRUCache
)
Get
(
key
any
)
(
value
any
,
ok
bool
)
{
func
(
c
*
LRUCache
[
K
,
V
])
Get
(
key
K
)
(
value
V
,
ok
bool
)
{
value
,
ok
=
c
.
inner
.
Get
(
key
)
value
,
ok
=
c
.
inner
.
Get
(
key
)
if
c
.
m
!=
nil
{
if
c
.
m
!=
nil
{
c
.
m
.
CacheGet
(
c
.
label
,
ok
)
c
.
m
.
CacheGet
(
c
.
label
,
ok
)
...
@@ -22,7 +22,7 @@ func (c *LRUCache) Get(key any) (value any, ok bool) {
...
@@ -22,7 +22,7 @@ func (c *LRUCache) Get(key any) (value any, ok bool) {
return
value
,
ok
return
value
,
ok
}
}
func
(
c
*
LRUCache
)
Add
(
key
,
value
any
)
(
evicted
bool
)
{
func
(
c
*
LRUCache
[
K
,
V
])
Add
(
key
K
,
value
V
)
(
evicted
bool
)
{
evicted
=
c
.
inner
.
Add
(
key
,
value
)
evicted
=
c
.
inner
.
Add
(
key
,
value
)
if
c
.
m
!=
nil
{
if
c
.
m
!=
nil
{
c
.
m
.
CacheAdd
(
c
.
label
,
c
.
inner
.
Len
(),
evicted
)
c
.
m
.
CacheAdd
(
c
.
label
,
c
.
inner
.
Len
(),
evicted
)
...
@@ -32,10 +32,10 @@ func (c *LRUCache) Add(key, value any) (evicted bool) {
...
@@ -32,10 +32,10 @@ func (c *LRUCache) Add(key, value any) (evicted bool) {
// NewLRUCache creates a LRU cache with the given metrics, labeling the cache adds/gets.
// NewLRUCache creates a LRU cache with the given metrics, labeling the cache adds/gets.
// Metrics are optional: no metrics will be tracked if m == nil.
// Metrics are optional: no metrics will be tracked if m == nil.
func
NewLRUCache
(
m
Metrics
,
label
string
,
maxSize
int
)
*
LRUCache
{
func
NewLRUCache
[
K
comparable
,
V
any
](
m
Metrics
,
label
string
,
maxSize
int
)
*
LRUCache
[
K
,
V
]
{
// no errors if the size is positive
// no errors if the size is positive
cache
,
_
:=
lru
.
New
(
maxSize
)
cache
,
_
:=
lru
.
New
[
K
,
V
]
(
maxSize
)
return
&
LRUCache
{
return
&
LRUCache
[
K
,
V
]
{
m
:
m
,
m
:
m
,
label
:
label
,
label
:
label
,
inner
:
cache
,
inner
:
cache
,
...
...
op-node/sources/eth_client.go
View file @
9eac826b
...
@@ -106,19 +106,19 @@ type EthClient struct {
...
@@ -106,19 +106,19 @@ type EthClient struct {
// cache receipts in bundles per block hash
// cache receipts in bundles per block hash
// We cache the receipts fetching job to not lose progress when we have to retry the `Fetch` call
// We cache the receipts fetching job to not lose progress when we have to retry the `Fetch` call
// common.Hash -> *receiptsFetchingJob
// common.Hash -> *receiptsFetchingJob
receiptsCache
*
caching
.
LRUCache
receiptsCache
*
caching
.
LRUCache
[
common
.
Hash
,
*
receiptsFetchingJob
]
// cache transactions in bundles per block hash
// cache transactions in bundles per block hash
// common.Hash -> types.Transactions
// common.Hash -> types.Transactions
transactionsCache
*
caching
.
LRUCache
transactionsCache
*
caching
.
LRUCache
[
common
.
Hash
,
types
.
Transactions
]
// cache block headers of blocks by hash
// cache block headers of blocks by hash
// common.Hash -> *HeaderInfo
// common.Hash -> *HeaderInfo
headersCache
*
caching
.
LRUCache
headersCache
*
caching
.
LRUCache
[
common
.
Hash
,
eth
.
BlockInfo
]
// cache payloads by hash
// cache payloads by hash
// common.Hash -> *eth.ExecutionPayload
// common.Hash -> *eth.ExecutionPayload
payloadsCache
*
caching
.
LRUCache
payloadsCache
*
caching
.
LRUCache
[
common
.
Hash
,
*
eth
.
ExecutionPayload
]
// availableReceiptMethods tracks which receipt methods can be used for fetching receipts
// availableReceiptMethods tracks which receipt methods can be used for fetching receipts
// This may be modified concurrently, but we don't lock since it's a single
// This may be modified concurrently, but we don't lock since it's a single
...
@@ -172,10 +172,10 @@ func NewEthClient(client client.RPC, log log.Logger, metrics caching.Metrics, co
...
@@ -172,10 +172,10 @@ func NewEthClient(client client.RPC, log log.Logger, metrics caching.Metrics, co
mustBePostMerge
:
config
.
MustBePostMerge
,
mustBePostMerge
:
config
.
MustBePostMerge
,
provKind
:
config
.
RPCProviderKind
,
provKind
:
config
.
RPCProviderKind
,
log
:
log
,
log
:
log
,
receiptsCache
:
caching
.
NewLRUCache
(
metrics
,
"receipts"
,
config
.
ReceiptsCacheSize
),
receiptsCache
:
caching
.
NewLRUCache
[
common
.
Hash
,
*
receiptsFetchingJob
]
(
metrics
,
"receipts"
,
config
.
ReceiptsCacheSize
),
transactionsCache
:
caching
.
NewLRUCache
(
metrics
,
"txs"
,
config
.
TransactionsCacheSize
),
transactionsCache
:
caching
.
NewLRUCache
[
common
.
Hash
,
types
.
Transactions
]
(
metrics
,
"txs"
,
config
.
TransactionsCacheSize
),
headersCache
:
caching
.
NewLRUCache
(
metrics
,
"headers"
,
config
.
HeadersCacheSize
),
headersCache
:
caching
.
NewLRUCache
[
common
.
Hash
,
eth
.
BlockInfo
]
(
metrics
,
"headers"
,
config
.
HeadersCacheSize
),
payloadsCache
:
caching
.
NewLRUCache
(
metrics
,
"payloads"
,
config
.
PayloadsCacheSize
),
payloadsCache
:
caching
.
NewLRUCache
[
common
.
Hash
,
*
eth
.
ExecutionPayload
]
(
metrics
,
"payloads"
,
config
.
PayloadsCacheSize
),
availableReceiptMethods
:
AvailableReceiptsFetchingMethods
(
config
.
RPCProviderKind
),
availableReceiptMethods
:
AvailableReceiptsFetchingMethods
(
config
.
RPCProviderKind
),
lastMethodsReset
:
time
.
Now
(),
lastMethodsReset
:
time
.
Now
(),
methodResetDuration
:
config
.
MethodResetDuration
,
methodResetDuration
:
config
.
MethodResetDuration
,
...
@@ -292,7 +292,7 @@ func (s *EthClient) ChainID(ctx context.Context) (*big.Int, error) {
...
@@ -292,7 +292,7 @@ func (s *EthClient) ChainID(ctx context.Context) (*big.Int, error) {
func
(
s
*
EthClient
)
InfoByHash
(
ctx
context
.
Context
,
hash
common
.
Hash
)
(
eth
.
BlockInfo
,
error
)
{
func
(
s
*
EthClient
)
InfoByHash
(
ctx
context
.
Context
,
hash
common
.
Hash
)
(
eth
.
BlockInfo
,
error
)
{
if
header
,
ok
:=
s
.
headersCache
.
Get
(
hash
);
ok
{
if
header
,
ok
:=
s
.
headersCache
.
Get
(
hash
);
ok
{
return
header
.
(
eth
.
BlockInfo
)
,
nil
return
header
,
nil
}
}
return
s
.
headerCall
(
ctx
,
"eth_getBlockByHash"
,
hashID
(
hash
))
return
s
.
headerCall
(
ctx
,
"eth_getBlockByHash"
,
hashID
(
hash
))
}
}
...
@@ -310,7 +310,7 @@ func (s *EthClient) InfoByLabel(ctx context.Context, label eth.BlockLabel) (eth.
...
@@ -310,7 +310,7 @@ func (s *EthClient) InfoByLabel(ctx context.Context, label eth.BlockLabel) (eth.
func
(
s
*
EthClient
)
InfoAndTxsByHash
(
ctx
context
.
Context
,
hash
common
.
Hash
)
(
eth
.
BlockInfo
,
types
.
Transactions
,
error
)
{
func
(
s
*
EthClient
)
InfoAndTxsByHash
(
ctx
context
.
Context
,
hash
common
.
Hash
)
(
eth
.
BlockInfo
,
types
.
Transactions
,
error
)
{
if
header
,
ok
:=
s
.
headersCache
.
Get
(
hash
);
ok
{
if
header
,
ok
:=
s
.
headersCache
.
Get
(
hash
);
ok
{
if
txs
,
ok
:=
s
.
transactionsCache
.
Get
(
hash
);
ok
{
if
txs
,
ok
:=
s
.
transactionsCache
.
Get
(
hash
);
ok
{
return
header
.
(
eth
.
BlockInfo
),
txs
.
(
types
.
Transactions
)
,
nil
return
header
,
txs
,
nil
}
}
}
}
return
s
.
blockCall
(
ctx
,
"eth_getBlockByHash"
,
hashID
(
hash
))
return
s
.
blockCall
(
ctx
,
"eth_getBlockByHash"
,
hashID
(
hash
))
...
@@ -328,7 +328,7 @@ func (s *EthClient) InfoAndTxsByLabel(ctx context.Context, label eth.BlockLabel)
...
@@ -328,7 +328,7 @@ func (s *EthClient) InfoAndTxsByLabel(ctx context.Context, label eth.BlockLabel)
func
(
s
*
EthClient
)
PayloadByHash
(
ctx
context
.
Context
,
hash
common
.
Hash
)
(
*
eth
.
ExecutionPayload
,
error
)
{
func
(
s
*
EthClient
)
PayloadByHash
(
ctx
context
.
Context
,
hash
common
.
Hash
)
(
*
eth
.
ExecutionPayload
,
error
)
{
if
payload
,
ok
:=
s
.
payloadsCache
.
Get
(
hash
);
ok
{
if
payload
,
ok
:=
s
.
payloadsCache
.
Get
(
hash
);
ok
{
return
payload
.
(
*
eth
.
ExecutionPayload
)
,
nil
return
payload
,
nil
}
}
return
s
.
payloadCall
(
ctx
,
"eth_getBlockByHash"
,
hashID
(
hash
))
return
s
.
payloadCall
(
ctx
,
"eth_getBlockByHash"
,
hashID
(
hash
))
}
}
...
@@ -354,7 +354,7 @@ func (s *EthClient) FetchReceipts(ctx context.Context, blockHash common.Hash) (e
...
@@ -354,7 +354,7 @@ func (s *EthClient) FetchReceipts(ctx context.Context, blockHash common.Hash) (e
// The underlying fetcher uses the receipts hash to verify receipt integrity.
// The underlying fetcher uses the receipts hash to verify receipt integrity.
var
job
*
receiptsFetchingJob
var
job
*
receiptsFetchingJob
if
v
,
ok
:=
s
.
receiptsCache
.
Get
(
blockHash
);
ok
{
if
v
,
ok
:=
s
.
receiptsCache
.
Get
(
blockHash
);
ok
{
job
=
v
.
(
*
receiptsFetchingJob
)
job
=
v
}
else
{
}
else
{
txHashes
:=
eth
.
TransactionsToHashes
(
txs
)
txHashes
:=
eth
.
TransactionsToHashes
(
txs
)
job
=
NewReceiptsFetchingJob
(
s
,
s
.
client
,
s
.
maxBatchSize
,
eth
.
ToBlockID
(
info
),
info
.
ReceiptHash
(),
txHashes
)
job
=
NewReceiptsFetchingJob
(
s
,
s
.
client
,
s
.
maxBatchSize
,
eth
.
ToBlockID
(
info
),
info
.
ReceiptHash
(),
txHashes
)
...
...
op-node/sources/l1_client.go
View file @
9eac826b
...
@@ -56,7 +56,7 @@ type L1Client struct {
...
@@ -56,7 +56,7 @@ type L1Client struct {
// cache L1BlockRef by hash
// cache L1BlockRef by hash
// common.Hash -> eth.L1BlockRef
// common.Hash -> eth.L1BlockRef
l1BlockRefsCache
*
caching
.
LRUCache
l1BlockRefsCache
*
caching
.
LRUCache
[
common
.
Hash
,
eth
.
L1BlockRef
]
}
}
// NewL1Client wraps a RPC with bindings to fetch L1 data, while logging errors, tracking metrics (optional), and caching.
// NewL1Client wraps a RPC with bindings to fetch L1 data, while logging errors, tracking metrics (optional), and caching.
...
@@ -68,7 +68,7 @@ func NewL1Client(client client.RPC, log log.Logger, metrics caching.Metrics, con
...
@@ -68,7 +68,7 @@ func NewL1Client(client client.RPC, log log.Logger, metrics caching.Metrics, con
return
&
L1Client
{
return
&
L1Client
{
EthClient
:
ethClient
,
EthClient
:
ethClient
,
l1BlockRefsCache
:
caching
.
NewLRUCache
(
metrics
,
"blockrefs"
,
config
.
L1BlockRefsCacheSize
),
l1BlockRefsCache
:
caching
.
NewLRUCache
[
common
.
Hash
,
eth
.
L1BlockRef
]
(
metrics
,
"blockrefs"
,
config
.
L1BlockRefsCacheSize
),
},
nil
},
nil
}
}
...
@@ -105,7 +105,7 @@ func (s *L1Client) L1BlockRefByNumber(ctx context.Context, num uint64) (eth.L1Bl
...
@@ -105,7 +105,7 @@ func (s *L1Client) L1BlockRefByNumber(ctx context.Context, num uint64) (eth.L1Bl
// We cache the block reference by hash as it is safe to assume collision will not occur.
// We cache the block reference by hash as it is safe to assume collision will not occur.
func
(
s
*
L1Client
)
L1BlockRefByHash
(
ctx
context
.
Context
,
hash
common
.
Hash
)
(
eth
.
L1BlockRef
,
error
)
{
func
(
s
*
L1Client
)
L1BlockRefByHash
(
ctx
context
.
Context
,
hash
common
.
Hash
)
(
eth
.
L1BlockRef
,
error
)
{
if
v
,
ok
:=
s
.
l1BlockRefsCache
.
Get
(
hash
);
ok
{
if
v
,
ok
:=
s
.
l1BlockRefsCache
.
Get
(
hash
);
ok
{
return
v
.
(
eth
.
L1BlockRef
)
,
nil
return
v
,
nil
}
}
info
,
err
:=
s
.
InfoByHash
(
ctx
,
hash
)
info
,
err
:=
s
.
InfoByHash
(
ctx
,
hash
)
if
err
!=
nil
{
if
err
!=
nil
{
...
...
op-node/sources/l2_client.go
View file @
9eac826b
...
@@ -68,11 +68,11 @@ type L2Client struct {
...
@@ -68,11 +68,11 @@ type L2Client struct {
// cache L2BlockRef by hash
// cache L2BlockRef by hash
// common.Hash -> eth.L2BlockRef
// common.Hash -> eth.L2BlockRef
l2BlockRefsCache
*
caching
.
LRUCache
l2BlockRefsCache
*
caching
.
LRUCache
[
common
.
Hash
,
eth
.
L2BlockRef
]
// cache SystemConfig by L2 hash
// cache SystemConfig by L2 hash
// common.Hash -> eth.SystemConfig
// common.Hash -> eth.SystemConfig
systemConfigsCache
*
caching
.
LRUCache
systemConfigsCache
*
caching
.
LRUCache
[
common
.
Hash
,
eth
.
SystemConfig
]
}
}
// NewL2Client constructs a new L2Client instance. The L2Client is a thin wrapper around the EthClient with added functions
// NewL2Client constructs a new L2Client instance. The L2Client is a thin wrapper around the EthClient with added functions
...
@@ -87,8 +87,8 @@ func NewL2Client(client client.RPC, log log.Logger, metrics caching.Metrics, con
...
@@ -87,8 +87,8 @@ func NewL2Client(client client.RPC, log log.Logger, metrics caching.Metrics, con
return
&
L2Client
{
return
&
L2Client
{
EthClient
:
ethClient
,
EthClient
:
ethClient
,
rollupCfg
:
config
.
RollupCfg
,
rollupCfg
:
config
.
RollupCfg
,
l2BlockRefsCache
:
caching
.
NewLRUCache
(
metrics
,
"blockrefs"
,
config
.
L2BlockRefsCacheSize
),
l2BlockRefsCache
:
caching
.
NewLRUCache
[
common
.
Hash
,
eth
.
L2BlockRef
]
(
metrics
,
"blockrefs"
,
config
.
L2BlockRefsCacheSize
),
systemConfigsCache
:
caching
.
NewLRUCache
(
metrics
,
"systemconfigs"
,
config
.
L1ConfigsCacheSize
),
systemConfigsCache
:
caching
.
NewLRUCache
[
common
.
Hash
,
eth
.
SystemConfig
]
(
metrics
,
"systemconfigs"
,
config
.
L1ConfigsCacheSize
),
},
nil
},
nil
}
}
...
@@ -131,7 +131,7 @@ func (s *L2Client) L2BlockRefByNumber(ctx context.Context, num uint64) (eth.L2Bl
...
@@ -131,7 +131,7 @@ func (s *L2Client) L2BlockRefByNumber(ctx context.Context, num uint64) (eth.L2Bl
// The returned BlockRef may not be in the canonical chain.
// The returned BlockRef may not be in the canonical chain.
func
(
s
*
L2Client
)
L2BlockRefByHash
(
ctx
context
.
Context
,
hash
common
.
Hash
)
(
eth
.
L2BlockRef
,
error
)
{
func
(
s
*
L2Client
)
L2BlockRefByHash
(
ctx
context
.
Context
,
hash
common
.
Hash
)
(
eth
.
L2BlockRef
,
error
)
{
if
ref
,
ok
:=
s
.
l2BlockRefsCache
.
Get
(
hash
);
ok
{
if
ref
,
ok
:=
s
.
l2BlockRefsCache
.
Get
(
hash
);
ok
{
return
ref
.
(
eth
.
L2BlockRef
)
,
nil
return
ref
,
nil
}
}
payload
,
err
:=
s
.
PayloadByHash
(
ctx
,
hash
)
payload
,
err
:=
s
.
PayloadByHash
(
ctx
,
hash
)
...
@@ -151,7 +151,7 @@ func (s *L2Client) L2BlockRefByHash(ctx context.Context, hash common.Hash) (eth.
...
@@ -151,7 +151,7 @@ func (s *L2Client) L2BlockRefByHash(ctx context.Context, hash common.Hash) (eth.
// The returned [eth.SystemConfig] may not be in the canonical chain when the hash is not canonical.
// The returned [eth.SystemConfig] may not be in the canonical chain when the hash is not canonical.
func
(
s
*
L2Client
)
SystemConfigByL2Hash
(
ctx
context
.
Context
,
hash
common
.
Hash
)
(
eth
.
SystemConfig
,
error
)
{
func
(
s
*
L2Client
)
SystemConfigByL2Hash
(
ctx
context
.
Context
,
hash
common
.
Hash
)
(
eth
.
SystemConfig
,
error
)
{
if
ref
,
ok
:=
s
.
systemConfigsCache
.
Get
(
hash
);
ok
{
if
ref
,
ok
:=
s
.
systemConfigsCache
.
Get
(
hash
);
ok
{
return
ref
.
(
eth
.
SystemConfig
)
,
nil
return
ref
,
nil
}
}
payload
,
err
:=
s
.
PayloadByHash
(
ctx
,
hash
)
payload
,
err
:=
s
.
PayloadByHash
(
ctx
,
hash
)
...
...
packages/contracts-bedrock/scripts/Deploy.s.sol
View file @
9eac826b
...
@@ -76,7 +76,8 @@ contract Deploy is Deployer {
...
@@ -76,7 +76,8 @@ contract Deploy is Deployer {
initializeL2OutputOracle();
initializeL2OutputOracle();
initializeOptimismPortal();
initializeOptimismPortal();
setFaultGameImplementation();
setAlphabetFaultGameImplementation();
setCannonFaultGameImplementation();
transferProxyAdminOwnership();
transferProxyAdminOwnership();
transferDisputeGameFactoryOwnership();
transferDisputeGameFactoryOwnership();
...
@@ -759,47 +760,89 @@ contract Deploy is Deployer {
...
@@ -759,47 +760,89 @@ contract Deploy is Deployer {
}
}
/// @notice Sets the implementation for the `FAULT` game type in the `DisputeGameFactory`
/// @notice Sets the implementation for the `FAULT` game type in the `DisputeGameFactory`
function setFaultGameImplementation() public onlyDevnet broadcast {
function setCannonFaultGameImplementation() public onlyDevnet broadcast {
// Create the absolute prestate dump
DisputeGameFactory factory = DisputeGameFactory(mustGetAddress("DisputeGameFactoryProxy"));
string memory filePath = string.concat(vm.projectRoot(), "/../../op-program/bin/prestate-proof.json");
bytes32 mipsAbsolutePrestate;
Claim mipsAbsolutePrestate;
string[] memory commands = new string[](3);
if (block.chainid == Chains.LocalDevnet || block.chainid == Chains.GethDevnet) {
commands[0] = "bash";
// Fetch the absolute prestate dump
commands[1] = "-c";
string memory filePath = string.concat(vm.projectRoot(), "/../../op-program/bin/prestate-proof.json");
commands[2] = string.concat("[[ -f ", filePath, " ]] && echo \"present\"");
string[] memory commands = new string[](3);
if (vm.ffi(commands).length == 0) {
commands[0] = "bash";
revert("Cannon prestate dump not found, generate it with `make cannon-prestate` in the monorepo root.");
commands[1] = "-c";
commands[2] = string.concat("[[ -f ", filePath, " ]] && echo \"present\"");
if (vm.ffi(commands).length == 0) {
revert("Cannon prestate dump not found, generate it with `make cannon-prestate` in the monorepo root.");
}
commands[2] = string.concat("cat ", filePath, " | jq -r .pre");
mipsAbsolutePrestate = Claim.wrap(abi.decode(vm.ffi(commands), (bytes32)));
console.log(
"[Cannon Dispute Game] Using devnet MIPS Absolute prestate: %s",
vm.toString(Claim.unwrap(mipsAbsolutePrestate))
);
} else {
console.log(
"[Cannon Dispute Game] Using absolute prestate from config: %s", cfg.faultGameAbsolutePrestate()
);
mipsAbsolutePrestate = Claim.wrap(bytes32(cfg.faultGameAbsolutePrestate()));
}
}
commands[2] = string.concat("cat ", filePath, " | jq -r .pre");
mipsAbsolutePrestate = abi.decode(vm.ffi(commands), (bytes32));
console.log("Absolute prestate: %s", vm.toString(mipsAbsolutePrestate));
// Set the Cannon FaultDisputeGame implementation in the factory.
_setFaultGameImplementation(
factory, GameTypes.FAULT, mipsAbsolutePrestate, IBigStepper(mustGetAddress("Mips")), cfg.faultGameMaxDepth()
);
}
/// @notice Sets the implementation for the alphabet game type in the `DisputeGameFactory`
function setAlphabetFaultGameImplementation() public onlyDevnet broadcast {
DisputeGameFactory factory = DisputeGameFactory(mustGetAddress("DisputeGameFactoryProxy"));
DisputeGameFactory factory = DisputeGameFactory(mustGetAddress("DisputeGameFactoryProxy"));
for (uint8 i; i < 2; i++) {
Claim absolutePrestate =
// Set the Alphabet FaultDisputeGame implementation in the factory.
Claim.wrap(i == 0 ? bytes32(cfg.faultGameAbsolutePrestate()) : mipsAbsolutePrestate);
Claim alphabetAbsolutePrestate = Claim.wrap(bytes32(cfg.faultGameAbsolutePrestate()));
IBigStepper faultVm =
_setFaultGameImplementation(
IBigStepper(i == 0 ? address(new AlphabetVM(absolutePrestate)) : mustGetAddress("Mips"));
factory,
GameType gameType = GameType.wrap(i);
GameType.wrap(255),
if (address(factory.gameImpls(gameType)) == address(0)) {
alphabetAbsolutePrestate,
factory.setImplementation(
IBigStepper(new AlphabetVM(alphabetAbsolutePrestate)),
gameType,
4 // The max game depth of the alphabet game is always 4.
new FaultDisputeGame({
);
_gameType: gameType,
}
_absolutePrestate: absolutePrestate,
_maxGameDepth: i == 0 ? 4 : cfg.faultGameMaxDepth(), // The max depth of the alphabet game is always 4
/// @notice Sets the implementation for the given fault game type in the `DisputeGameFactory`.
function _setFaultGameImplementation(
DisputeGameFactory _factory,
GameType _gameType,
Claim _absolutePrestate,
IBigStepper _faultVm,
uint256 _maxGameDepth
)
internal
{
if (address(_factory.gameImpls(_gameType)) == address(0)) {
_factory.setImplementation(
_gameType,
new FaultDisputeGame({
_gameType: _gameType,
_absolutePrestate: _absolutePrestate,
_maxGameDepth: _maxGameDepth,
_gameDuration: Duration.wrap(uint64(cfg.faultGameMaxDuration())),
_gameDuration: Duration.wrap(uint64(cfg.faultGameMaxDuration())),
_vm: faultVm,
_vm:
_
faultVm,
_l2oo: L2OutputOracle(mustGetAddress("L2OutputOracleProxy")),
_l2oo: L2OutputOracle(mustGetAddress("L2OutputOracleProxy")),
_blockOracle: BlockOracle(mustGetAddress("BlockOracle"))
_blockOracle: BlockOracle(mustGetAddress("BlockOracle"))
})
})
);
);
console.log(
"DisputeGameFactoryProxy: set `FaultDisputeGame` implementation (Backend: %s | GameType: %s)",
uint8 rawGameType = GameType.unwrap(_gameType);
i == 0 ? "AlphabetVM" : "MIPS",
console.log(
vm.toString(i)
"DisputeGameFactoryProxy: set `FaultDisputeGame` implementation (Backend: %s | GameType: %s)",
);
rawGameType == 0 ? "Cannon" : "Alphabet",
}
vm.toString(rawGameType)
);
} else {
console.log(
"[WARN] DisputeGameFactoryProxy: `FaultDisputeGame` implementation already set for game type: %s",
vm.toString(GameType.unwrap(_gameType))
);
}
}
}
}
}
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment