Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
N
nebula
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
exchain
nebula
Commits
f740ac21
Unverified
Commit
f740ac21
authored
Sep 29, 2023
by
mergify[bot]
Committed by
GitHub
Sep 29, 2023
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'develop' into dependabot/npm_and_yarn/types/express-4.17.18
parents
d656473a
d2fafe41
Changes
30
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
30 changed files
with
821 additions
and
600 deletions
+821
-600
bigint.go
indexer/bigint/bigint.go
+0
-32
bigint_test.go
indexer/bigint/bigint_test.go
+0
-43
blocks.go
indexer/database/blocks.go
+62
-22
mocks.go
indexer/database/mocks.go
+3
-1
setup.go
indexer/e2e_tests/setup.go
+20
-12
etl.go
indexer/etl/etl.go
+6
-5
metrics.go
indexer/etl/metrics.go
+1
-6
indexer.go
indexer/indexer.go
+2
-1
metrics.go
indexer/node/metrics.go
+1
-0
bridge.go
indexer/processors/bridge.go
+137
-152
l1_bridge_processor.go
indexer/processors/bridge/l1_bridge_processor.go
+36
-12
l2_bridge_processor.go
indexer/processors/bridge/l2_bridge_processor.go
+33
-14
legacy_bridge_processor.go
indexer/processors/bridge/legacy_bridge_processor.go
+49
-27
metrics.go
indexer/processors/bridge/metrics.go
+221
-0
legacy_standard_bridge.go
indexer/processors/contracts/legacy_standard_bridge.go
+8
-4
config.go
op-challenger/config/config.go
+1
-1
flags.go
op-challenger/flags/flags.go
+1
-1
agent.go
op-challenger/game/fault/agent.go
+1
-1
solver.go
op-challenger/game/fault/solver/solver.go
+4
-4
monitor_test.go
op-challenger/game/monitor_test.go
+27
-20
l2_batcher_test.go
op-e2e/actions/l2_batcher_test.go
+1
-1
l2_engine.go
op-e2e/actions/l2_engine.go
+18
-5
helper.go
op-e2e/e2eutils/challenger/helper.go
+1
-1
blocks.go
op-e2e/e2eutils/wait/blocks.go
+59
-0
waits.go
op-e2e/e2eutils/wait/waits.go
+9
-43
system_adminrpc_test.go
op-e2e/system_adminrpc_test.go
+13
-10
cli.go
op-service/txmgr/cli.go
+47
-22
cli_test.go
op-service/txmgr/cli_test.go
+2
-2
package.json
package.json
+1
-1
pnpm-lock.yaml
pnpm-lock.yaml
+57
-157
No files found.
indexer/bigint/bigint.go
View file @
f740ac21
...
...
@@ -25,35 +25,3 @@ func Clamp(start, end *big.Int, size uint64) *big.Int {
func
Matcher
(
num
int64
)
func
(
*
big
.
Int
)
bool
{
return
func
(
bi
*
big
.
Int
)
bool
{
return
bi
.
Int64
()
==
num
}
}
type
Range
struct
{
Start
*
big
.
Int
End
*
big
.
Int
}
// Grouped will return a slice of inclusive ranges from (start, end),
// capped to the supplied size from `(start, end)`.
func
Grouped
(
start
,
end
*
big
.
Int
,
size
uint64
)
[]
Range
{
if
end
.
Cmp
(
start
)
<
0
||
size
==
0
{
return
nil
}
bigMaxDiff
:=
big
.
NewInt
(
int64
(
size
-
1
))
groups
:=
[]
Range
{}
for
start
.
Cmp
(
end
)
<=
0
{
diff
:=
new
(
big
.
Int
)
.
Sub
(
end
,
start
)
switch
{
case
diff
.
Uint64
()
+
1
<=
size
:
// re-use allocated diff as the next start
groups
=
append
(
groups
,
Range
{
start
,
end
})
start
=
diff
.
Add
(
end
,
One
)
default
:
// re-use allocated diff as the next start
end
:=
new
(
big
.
Int
)
.
Add
(
start
,
bigMaxDiff
)
groups
=
append
(
groups
,
Range
{
start
,
end
})
start
=
diff
.
Add
(
end
,
One
)
}
}
return
groups
}
indexer/bigint/bigint_test.go
View file @
f740ac21
...
...
@@ -27,46 +27,3 @@ func TestClamp(t *testing.T) {
require
.
False
(
t
,
end
==
result
)
require
.
Equal
(
t
,
uint64
(
5
),
result
.
Uint64
())
}
func
TestGrouped
(
t
*
testing
.
T
)
{
// base cases
require
.
Nil
(
t
,
Grouped
(
One
,
Zero
,
1
))
require
.
Nil
(
t
,
Grouped
(
Zero
,
One
,
0
))
// Same Start/End
group
:=
Grouped
(
One
,
One
,
1
)
require
.
Len
(
t
,
group
,
1
)
require
.
Equal
(
t
,
One
,
group
[
0
]
.
Start
)
require
.
Equal
(
t
,
One
,
group
[
0
]
.
End
)
Three
,
Five
:=
big
.
NewInt
(
3
),
big
.
NewInt
(
5
)
// One at a time
group
=
Grouped
(
One
,
Three
,
1
)
require
.
Equal
(
t
,
One
,
group
[
0
]
.
End
)
require
.
Equal
(
t
,
int64
(
1
),
group
[
0
]
.
End
.
Int64
())
require
.
Equal
(
t
,
int64
(
2
),
group
[
1
]
.
Start
.
Int64
())
require
.
Equal
(
t
,
int64
(
2
),
group
[
1
]
.
End
.
Int64
())
require
.
Equal
(
t
,
int64
(
3
),
group
[
2
]
.
Start
.
Int64
())
require
.
Equal
(
t
,
int64
(
3
),
group
[
2
]
.
End
.
Int64
())
// Split groups
group
=
Grouped
(
One
,
Five
,
3
)
require
.
Len
(
t
,
group
,
2
)
require
.
Equal
(
t
,
One
,
group
[
0
]
.
Start
)
require
.
Equal
(
t
,
int64
(
3
),
group
[
0
]
.
End
.
Int64
())
require
.
Equal
(
t
,
int64
(
4
),
group
[
1
]
.
Start
.
Int64
())
require
.
Equal
(
t
,
Five
,
group
[
1
]
.
End
)
// Encompasses the range
group
=
Grouped
(
One
,
Five
,
5
)
require
.
Len
(
t
,
group
,
1
)
require
.
Equal
(
t
,
One
,
group
[
0
]
.
Start
,
Zero
)
require
.
Equal
(
t
,
Five
,
group
[
0
]
.
End
)
// Size larger than the entire range
group
=
Grouped
(
One
,
Five
,
100
)
require
.
Len
(
t
,
group
,
1
)
require
.
Equal
(
t
,
One
,
group
[
0
]
.
Start
,
Zero
)
require
.
Equal
(
t
,
Five
,
group
[
0
]
.
End
)
}
indexer/database/blocks.go
View file @
f740ac21
...
...
@@ -2,8 +2,10 @@ package database
import
(
"errors"
"fmt"
"math/big"
"github.com/ethereum-optimism/optimism/indexer/bigint"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
...
...
@@ -51,7 +53,7 @@ type BlocksView interface {
L2BlockHeaderWithFilter
(
BlockHeader
)
(
*
L2BlockHeader
,
error
)
L2LatestBlockHeader
()
(
*
L2BlockHeader
,
error
)
Latest
Epoch
(
)
(
*
Epoch
,
error
)
Latest
ObservedEpoch
(
*
big
.
Int
,
uint64
)
(
*
Epoch
,
error
)
}
type
BlocksDB
interface
{
...
...
@@ -155,36 +157,74 @@ type Epoch struct {
L2BlockHeader
L2BlockHeader
`gorm:"embedded"`
}
// LatestEpoch return the latest epoch, seen on L1 & L2. In other words
// this returns the latest indexed L1 block that has a corresponding
// indexed L2 block with a matching L1Origin (equal timestamps).
// LatestObservedEpoch return the marker for latest epoch, observed on L1 & L2, within
// the specified bounds. In other words this returns the latest indexed L1 block that has
// a corresponding indexed L2 block with a matching L1Origin (equal timestamps).
//
// If `fromL1Height` (inclusive) is not specified, the search will start from genesis and
// continue all the way to latest indexed heights if `maxL1Range == 0`.
//
// For more, see the protocol spec:
// - https://github.com/ethereum-optimism/optimism/blob/develop/specs/derivation.md
func
(
db
*
blocksDB
)
Latest
Epoch
(
)
(
*
Epoch
,
error
)
{
latestL1Header
,
err
:=
db
.
L1LatestBlockHeader
()
if
err
!=
nil
{
return
nil
,
err
}
else
if
latestL1Header
==
nil
{
return
nil
,
nil
func
(
db
*
blocksDB
)
Latest
ObservedEpoch
(
fromL1Height
*
big
.
Int
,
maxL1Range
uint64
)
(
*
Epoch
,
error
)
{
// We use timestamps since that translates to both L1 & L2
var
fromTimestamp
,
toTimestamp
uint64
if
fromL1Height
==
nil
{
fromL1Height
=
bigint
.
Zero
}
latestL2Header
,
err
:=
db
.
L2LatestBlockHeader
()
if
err
!=
nil
{
return
nil
,
err
}
else
if
latestL2Header
==
nil
{
return
nil
,
nil
// Lower Bound (the default `fromTimestamp = 0` suffices genesis representation)
if
fromL1Height
.
BitLen
()
>
0
{
var
header
L1BlockHeader
result
:=
db
.
gorm
.
Where
(
"number = ?"
,
fromL1Height
)
.
Take
(
&
header
)
if
result
.
Error
!=
nil
{
if
errors
.
Is
(
result
.
Error
,
gorm
.
ErrRecordNotFound
)
{
return
nil
,
nil
}
return
nil
,
result
.
Error
}
fromTimestamp
=
header
.
Timestamp
}
minTime
:=
latestL1Header
.
Timestamp
if
latestL2Header
.
Timestamp
<
minTime
{
minTime
=
latestL2Header
.
Timestamp
// Upper Bound (lowest timestamp indexed between L1/L2 bounded by `maxL1Range`)
{
l1QueryFilter
:=
fmt
.
Sprintf
(
"timestamp >= %d"
,
fromTimestamp
)
if
maxL1Range
>
0
{
maxHeight
:=
new
(
big
.
Int
)
.
Add
(
fromL1Height
,
big
.
NewInt
(
int64
(
maxL1Range
)))
l1QueryFilter
=
fmt
.
Sprintf
(
"%s AND number <= %d"
,
l1QueryFilter
,
maxHeight
)
}
var
l1Header
L1BlockHeader
result
:=
db
.
gorm
.
Where
(
l1QueryFilter
)
.
Order
(
"timestamp DESC"
)
.
Take
(
&
l1Header
)
if
result
.
Error
!=
nil
{
if
errors
.
Is
(
result
.
Error
,
gorm
.
ErrRecordNotFound
)
{
return
nil
,
nil
}
return
nil
,
result
.
Error
}
toTimestamp
=
l1Header
.
Timestamp
var
l2Header
L2BlockHeader
result
=
db
.
gorm
.
Where
(
"timestamp <= ?"
,
toTimestamp
)
.
Order
(
"timestamp DESC"
)
.
Take
(
&
l2Header
)
if
result
.
Error
!=
nil
{
if
errors
.
Is
(
result
.
Error
,
gorm
.
ErrRecordNotFound
)
{
return
nil
,
nil
}
return
nil
,
result
.
Error
}
if
l2Header
.
Timestamp
<
toTimestamp
{
toTimestamp
=
l2Header
.
Timestamp
}
}
//
This is a faster query than doing an INNER JOIN between l1_block_headers and l2_block_headers
// which requires a full table scan to compute the resulting table.
l1Query
:=
db
.
gorm
.
Table
(
"l1_block_headers"
)
.
Where
(
"timestamp
<= ?"
,
minTime
)
l2Query
:=
db
.
gorm
.
Table
(
"l2_block_headers"
)
.
Where
(
"timestamp
<= ?"
,
minTime
)
//
Search for the latest indexed epoch within range. This is a faster query than doing an INNER JOIN between
//
l1_block_headers and l2_block_headers
which requires a full table scan to compute the resulting table.
l1Query
:=
db
.
gorm
.
Table
(
"l1_block_headers"
)
.
Where
(
"timestamp
>= ? AND timestamp <= ?"
,
fromTimestamp
,
toTimestamp
)
l2Query
:=
db
.
gorm
.
Table
(
"l2_block_headers"
)
.
Where
(
"timestamp
>= ? AND timestamp <= ?"
,
fromTimestamp
,
toTimestamp
)
query
:=
db
.
gorm
.
Raw
(
`SELECT * FROM (?) AS l1_block_headers, (?) AS l2_block_headers
WHERE l1_block_headers.timestamp = l2_block_headers.timestamp
ORDER BY l2_block_headers.number DESC LIMIT 1`
,
l1Query
,
l2Query
)
...
...
indexer/database/mocks.go
View file @
f740ac21
package
database
import
(
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/stretchr/testify/mock"
...
...
@@ -51,7 +53,7 @@ func (m *MockBlocksView) L2LatestBlockHeader() (*L2BlockHeader, error) {
return
args
.
Get
(
0
)
.
(
*
L2BlockHeader
),
args
.
Error
(
1
)
}
func
(
m
*
MockBlocksView
)
Latest
Epoch
(
)
(
*
Epoch
,
error
)
{
func
(
m
*
MockBlocksView
)
Latest
ObservedEpoch
(
*
big
.
Int
,
uint64
)
(
*
Epoch
,
error
)
{
args
:=
m
.
Called
()
return
args
.
Get
(
0
)
.
(
*
Epoch
),
args
.
Error
(
1
)
}
...
...
indexer/e2e_tests/setup.go
View file @
f740ac21
...
...
@@ -41,21 +41,24 @@ func createE2ETestSuite(t *testing.T) E2ETestSuite {
dbUser
:=
os
.
Getenv
(
"DB_USER"
)
dbName
:=
setupTestDatabase
(
t
)
// Discard the Global Logger as each component
// has its own configured logger
// Rollup System Configuration. Unless specified,
// omit logs emitted by the various components. Maybe
// we can eventually dump these logs to a temp file
log
.
Root
()
.
SetHandler
(
log
.
DiscardHandler
())
// Rollup System Configuration and Start
opCfg
:=
op_e2e
.
DefaultSystemConfig
(
t
)
opCfg
.
DeployConfig
.
FinalizationPeriodSeconds
=
2
if
len
(
os
.
Getenv
(
"ENABLE_ROLLUP_LOGS"
))
==
0
{
t
.
Log
(
"set env 'ENABLE_ROLLUP_LOGS' to show rollup logs"
)
for
name
,
logger
:=
range
opCfg
.
Loggers
{
t
.
Logf
(
"discarding logs for %s"
,
name
)
logger
.
SetHandler
(
log
.
DiscardHandler
())
}
}
// Rollup Start
opSys
,
err
:=
opCfg
.
Start
(
t
)
require
.
NoError
(
t
,
err
)
t
.
Cleanup
(
func
()
{
opSys
.
Close
()
})
// E2E tests can run on the order of magnitude of minutes. Once
// the system is running, mark this test for Parallel execution
t
.
Parallel
()
// Indexer Configuration and Start
indexerCfg
:=
config
.
Config
{
DB
:
config
.
DBConfig
{
...
...
@@ -86,8 +89,14 @@ func createE2ETestSuite(t *testing.T) E2ETestSuite {
MetricsServer
:
config
.
ServerConfig
{
Host
:
"127.0.0.1"
,
Port
:
0
},
}
// Emit debug log levels
db
,
err
:=
database
.
NewDB
(
testlog
.
Logger
(
t
,
log
.
LvlDebug
)
.
New
(
"role"
,
"db"
),
indexerCfg
.
DB
)
// E2E tests can run on the order of magnitude of minutes. Once
// the system is running, mark this test for Parallel execution
t
.
Parallel
()
// provide a DB for the unit test. disable logging
silentLog
:=
testlog
.
Logger
(
t
,
log
.
LvlInfo
)
silentLog
.
SetHandler
(
log
.
DiscardHandler
())
db
,
err
:=
database
.
NewDB
(
silentLog
,
indexerCfg
.
DB
)
require
.
NoError
(
t
,
err
)
t
.
Cleanup
(
func
()
{
db
.
Close
()
})
...
...
@@ -138,7 +147,6 @@ func setupTestDatabase(t *testing.T) string {
User
:
user
,
Password
:
""
,
}
// NewDB will create the database schema
silentLog
:=
log
.
New
()
silentLog
.
SetHandler
(
log
.
DiscardHandler
())
...
...
indexer/etl/etl.go
View file @
f740ac21
...
...
@@ -71,10 +71,10 @@ func (etl *ETL) Start(ctx context.Context) error {
etl
.
log
.
Error
(
"error querying for headers"
,
"err"
,
err
)
}
else
if
len
(
newHeaders
)
==
0
{
etl
.
log
.
Warn
(
"no new headers. processor unexpectedly at head..."
)
}
else
{
headers
=
newHeaders
etl
.
metrics
.
RecordBatchHeaders
(
len
(
newHeaders
))
}
headers
=
newHeaders
etl
.
metrics
.
RecordBatchHeaders
(
len
(
newHeaders
))
}
// only clear the reference if we were able to process this batch
...
...
@@ -107,7 +107,7 @@ func (etl *ETL) processBatch(headers []types.Header) error {
headersWithLog
:=
make
(
map
[
common
.
Hash
]
bool
,
len
(
headers
))
logs
,
err
:=
etl
.
EthClient
.
FilterLogs
(
ethereum
.
FilterQuery
{
FromBlock
:
firstHeader
.
Number
,
ToBlock
:
lastHeader
.
Number
,
Addresses
:
etl
.
contracts
})
if
err
!=
nil
{
batchLog
.
Info
(
"
unable
to extract logs"
,
"err"
,
err
)
batchLog
.
Info
(
"
failed
to extract logs"
,
"err"
,
err
)
return
err
}
if
len
(
logs
)
>
0
{
...
...
@@ -118,7 +118,8 @@ func (etl *ETL) processBatch(headers []types.Header) error {
log
:=
logs
[
i
]
if
_
,
ok
:=
headerMap
[
log
.
BlockHash
];
!
ok
{
// NOTE. Definitely an error state if the none of the headers were re-orged out in between
// the blocks and logs retrieval operations. However, we need to gracefully handle reorgs
// the blocks and logs retrieval operations. Unlikely as long as the confirmation depth has
// been appropriately set or when we get to natively handling reorgs.
batchLog
.
Error
(
"log found with block hash not in the batch"
,
"block_hash"
,
logs
[
i
]
.
BlockHash
,
"log_index"
,
logs
[
i
]
.
Index
)
return
errors
.
New
(
"parsed log with a block hash not in the batch"
)
}
...
...
indexer/etl/metrics.go
View file @
f740ac21
...
...
@@ -16,7 +16,6 @@ type Metricer interface {
RecordInterval
()
(
done
func
(
err
error
))
// Batch Extraction
RecordBatchFailure
()
RecordBatchLatestHeight
(
height
*
big
.
Int
)
RecordBatchHeaders
(
size
int
)
RecordBatchLog
(
contractAddress
common
.
Address
)
...
...
@@ -108,17 +107,13 @@ func (m *etlMetrics) RecordInterval() func(error) {
timer
:=
prometheus
.
NewTimer
(
m
.
intervalDuration
)
return
func
(
err
error
)
{
if
err
!=
nil
{
m
.
RecordBatchFailure
()
m
.
batchFailures
.
Inc
()
}
timer
.
ObserveDuration
()
}
}
func
(
m
*
etlMetrics
)
RecordBatchFailure
()
{
m
.
batchFailures
.
Inc
()
}
func
(
m
*
etlMetrics
)
RecordBatchLatestHeight
(
height
*
big
.
Int
)
{
m
.
batchLatestHeight
.
Set
(
float64
(
height
.
Uint64
()))
}
...
...
indexer/indexer.go
View file @
f740ac21
...
...
@@ -20,6 +20,7 @@ import (
"github.com/ethereum-optimism/optimism/indexer/etl"
"github.com/ethereum-optimism/optimism/indexer/node"
"github.com/ethereum-optimism/optimism/indexer/processors"
"github.com/ethereum-optimism/optimism/indexer/processors/bridge"
"github.com/ethereum-optimism/optimism/op-service/httputil"
"github.com/ethereum-optimism/optimism/op-service/metrics"
)
...
...
@@ -82,7 +83,7 @@ func NewIndexer(
}
// Bridge
bridgeProcessor
,
err
:=
processors
.
NewBridgeProcessor
(
log
,
db
,
l1Etl
,
chainConfig
)
bridgeProcessor
,
err
:=
processors
.
NewBridgeProcessor
(
log
,
db
,
bridge
.
NewMetrics
(
metricsRegistry
),
l1Etl
,
chainConfig
)
if
err
!=
nil
{
return
nil
,
err
}
...
...
indexer/node/metrics.go
View file @
f740ac21
...
...
@@ -7,6 +7,7 @@ import (
"github.com/ethereum-optimism/optimism/op-service/metrics"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/rpc"
"github.com/prometheus/client_golang/prometheus"
)
...
...
indexer/processors/bridge.go
View file @
f740ac21
This diff is collapsed.
Click to expand it.
indexer/processors/bridge/l1_bridge_processor.go
View file @
f740ac21
...
...
@@ -8,6 +8,7 @@ import (
"github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum-optimism/optimism/indexer/processors/contracts"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
)
...
...
@@ -17,7 +18,7 @@ import (
// 1. OptimismPortal
// 2. L1CrossDomainMessenger
// 3. L1StandardBridge
func
L1ProcessInitiatedBridgeEvents
(
log
log
.
Logger
,
db
*
database
.
DB
,
l1Contracts
config
.
L1Contracts
,
fromHeight
,
toHeight
*
big
.
Int
)
error
{
func
L1ProcessInitiatedBridgeEvents
(
log
log
.
Logger
,
db
*
database
.
DB
,
metrics
L1Metricer
,
l1Contracts
config
.
L1Contracts
,
fromHeight
,
toHeight
*
big
.
Int
)
error
{
// (1) OptimismPortal
optimismPortalTxDeposits
,
err
:=
contracts
.
OptimismPortalTransactionDepositEvents
(
l1Contracts
.
OptimismPortalProxy
,
db
,
fromHeight
,
toHeight
)
if
err
!=
nil
{
...
...
@@ -44,6 +45,7 @@ func L1ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, l1Contracts
if
err
:=
db
.
BridgeTransactions
.
StoreL1TransactionDeposits
(
transactionDeposits
);
err
!=
nil
{
return
err
}
metrics
.
RecordL1TransactionDeposits
(
len
(
transactionDeposits
))
}
// (2) L1CrossDomainMessenger
...
...
@@ -56,7 +58,7 @@ func L1ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, l1Contracts
}
sentMessages
:=
make
(
map
[
logKey
]
*
contracts
.
CrossDomainMessengerSentMessageEvent
,
len
(
crossDomainSentMessages
))
l1B
ridgeMessages
:=
make
([]
database
.
L1BridgeMessage
,
len
(
crossDomainSentMessages
))
b
ridgeMessages
:=
make
([]
database
.
L1BridgeMessage
,
len
(
crossDomainSentMessages
))
for
i
:=
range
crossDomainSentMessages
{
sentMessage
:=
crossDomainSentMessages
[
i
]
sentMessages
[
logKey
{
sentMessage
.
Event
.
BlockHash
,
sentMessage
.
Event
.
LogIndex
}]
=
&
sentMessage
...
...
@@ -68,12 +70,13 @@ func L1ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, l1Contracts
return
fmt
.
Errorf
(
"expected TransactionDeposit preceding SentMessage event. tx_hash = %s"
,
sentMessage
.
Event
.
TransactionHash
.
String
())
}
l1B
ridgeMessages
[
i
]
=
database
.
L1BridgeMessage
{
TransactionSourceHash
:
portalDeposit
.
DepositTx
.
SourceHash
,
BridgeMessage
:
sentMessage
.
BridgeMessage
}
b
ridgeMessages
[
i
]
=
database
.
L1BridgeMessage
{
TransactionSourceHash
:
portalDeposit
.
DepositTx
.
SourceHash
,
BridgeMessage
:
sentMessage
.
BridgeMessage
}
}
if
len
(
l1B
ridgeMessages
)
>
0
{
if
err
:=
db
.
BridgeMessages
.
StoreL1BridgeMessages
(
l1B
ridgeMessages
);
err
!=
nil
{
if
len
(
b
ridgeMessages
)
>
0
{
if
err
:=
db
.
BridgeMessages
.
StoreL1BridgeMessages
(
b
ridgeMessages
);
err
!=
nil
{
return
err
}
metrics
.
RecordL1CrossDomainSentMessages
(
len
(
bridgeMessages
))
}
// (3) L1StandardBridge
...
...
@@ -85,7 +88,8 @@ func L1ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, l1Contracts
log
.
Info
(
"detected bridge deposits"
,
"size"
,
len
(
initiatedBridges
))
}
l1BridgeDeposits
:=
make
([]
database
.
L1BridgeDeposit
,
len
(
initiatedBridges
))
bridgedTokens
:=
make
(
map
[
common
.
Address
]
int
)
bridgeDeposits
:=
make
([]
database
.
L1BridgeDeposit
,
len
(
initiatedBridges
))
for
i
:=
range
initiatedBridges
{
initiatedBridge
:=
initiatedBridges
[
i
]
...
...
@@ -102,15 +106,19 @@ func L1ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, l1Contracts
}
initiatedBridge
.
BridgeTransfer
.
CrossDomainMessageHash
=
&
sentMessage
.
BridgeMessage
.
MessageHash
l1BridgeDeposits
[
i
]
=
database
.
L1BridgeDeposit
{
bridgedTokens
[
initiatedBridge
.
BridgeTransfer
.
TokenPair
.
LocalTokenAddress
]
++
bridgeDeposits
[
i
]
=
database
.
L1BridgeDeposit
{
TransactionSourceHash
:
portalDeposit
.
DepositTx
.
SourceHash
,
BridgeTransfer
:
initiatedBridge
.
BridgeTransfer
,
}
}
if
len
(
l1B
ridgeDeposits
)
>
0
{
if
err
:=
db
.
BridgeTransfers
.
StoreL1BridgeDeposits
(
l1B
ridgeDeposits
);
err
!=
nil
{
if
len
(
b
ridgeDeposits
)
>
0
{
if
err
:=
db
.
BridgeTransfers
.
StoreL1BridgeDeposits
(
b
ridgeDeposits
);
err
!=
nil
{
return
err
}
for
tokenAddr
,
size
:=
range
bridgedTokens
{
metrics
.
RecordL1InitiatedBridgeTransfers
(
tokenAddr
,
size
)
}
}
return
nil
...
...
@@ -121,7 +129,7 @@ func L1ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, l1Contracts
// 1. OptimismPortal (Bedrock prove & finalize steps)
// 2. L1CrossDomainMessenger (relayMessage marker)
// 3. L1StandardBridge (no-op, since this is simply a wrapper over the L1CrossDomainMessenger)
func
L1ProcessFinalizedBridgeEvents
(
log
log
.
Logger
,
db
*
database
.
DB
,
l1Contracts
config
.
L1Contracts
,
fromHeight
,
toHeight
*
big
.
Int
)
error
{
func
L1ProcessFinalizedBridgeEvents
(
log
log
.
Logger
,
db
*
database
.
DB
,
metrics
L1Metricer
,
l1Contracts
config
.
L1Contracts
,
fromHeight
,
toHeight
*
big
.
Int
)
error
{
// (1) OptimismPortal (proven withdrawals)
provenWithdrawals
,
err
:=
contracts
.
OptimismPortalWithdrawalProvenEvents
(
l1Contracts
.
OptimismPortalProxy
,
db
,
fromHeight
,
toHeight
)
if
err
!=
nil
{
...
...
@@ -146,6 +154,9 @@ func L1ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, l1Contracts
return
err
}
}
if
len
(
provenWithdrawals
)
>
0
{
metrics
.
RecordL1ProvenWithdrawals
(
len
(
provenWithdrawals
))
}
// (2) OptimismPortal (finalized withdrawals)
finalizedWithdrawals
,
err
:=
contracts
.
OptimismPortalWithdrawalFinalizedEvents
(
l1Contracts
.
OptimismPortalProxy
,
db
,
fromHeight
,
toHeight
)
...
...
@@ -171,6 +182,9 @@ func L1ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, l1Contracts
return
err
}
}
if
len
(
finalizedWithdrawals
)
>
0
{
metrics
.
RecordL1FinalizedWithdrawals
(
len
(
finalizedWithdrawals
))
}
// (3) L1CrossDomainMessenger
crossDomainRelayedMessages
,
err
:=
contracts
.
CrossDomainMessengerRelayedMessageEvents
(
"l1"
,
l1Contracts
.
L1CrossDomainMessengerProxy
,
db
,
fromHeight
,
toHeight
)
...
...
@@ -198,6 +212,9 @@ func L1ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, l1Contracts
return
err
}
}
if
len
(
crossDomainRelayedMessages
)
>
0
{
metrics
.
RecordL1CrossDomainRelayedMessages
(
len
(
crossDomainRelayedMessages
))
}
// (4) L1StandardBridge
finalizedBridges
,
err
:=
contracts
.
StandardBridgeFinalizedEvents
(
"l1"
,
l1Contracts
.
L1StandardBridgeProxy
,
db
,
fromHeight
,
toHeight
)
...
...
@@ -208,6 +225,7 @@ func L1ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, l1Contracts
log
.
Info
(
"detected finalized bridge withdrawals"
,
"size"
,
len
(
finalizedBridges
))
}
finalizedTokens
:=
make
(
map
[
common
.
Address
]
int
)
for
i
:=
range
finalizedBridges
{
// Nothing actionable on the database. However, we can treat the relayed message
// as an invariant by ensuring we can query for a deposit by the same hash
...
...
@@ -218,8 +236,7 @@ func L1ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, l1Contracts
return
fmt
.
Errorf
(
"expected RelayedMessage following BridgeFinalized event. tx_hash = %s"
,
finalizedBridge
.
Event
.
TransactionHash
.
String
())
}
// Since the message hash is computed from the relayed message, this ensures the deposit fields must match. For good measure,
// we may choose to make sure `withdrawal.BridgeTransfer` matches with the finalized bridge
// Since the message hash is computed from the relayed message, this ensures the deposit fields must match
withdrawal
,
err
:=
db
.
BridgeTransfers
.
L2BridgeWithdrawalWithFilter
(
database
.
BridgeTransfer
{
CrossDomainMessageHash
:
&
relayedMessage
.
MessageHash
})
if
err
!=
nil
{
return
err
...
...
@@ -227,6 +244,13 @@ func L1ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, l1Contracts
log
.
Error
(
"missing L2StandardBridge withdrawal on L1 finalization"
,
"tx_hash"
,
finalizedBridge
.
Event
.
TransactionHash
.
String
())
return
fmt
.
Errorf
(
"missing L2StandardBridge withdrawal on L1 finalization. tx_hash: %s"
,
finalizedBridge
.
Event
.
TransactionHash
.
String
())
}
finalizedTokens
[
finalizedBridge
.
BridgeTransfer
.
TokenPair
.
LocalTokenAddress
]
++
}
if
len
(
finalizedBridges
)
>
0
{
for
tokenAddr
,
size
:=
range
finalizedTokens
{
metrics
.
RecordL1FinalizedBridgeTransfers
(
tokenAddr
,
size
)
}
}
// a-ok!
...
...
indexer/processors/bridge/l2_bridge_processor.go
View file @
f740ac21
...
...
@@ -9,6 +9,7 @@ import (
"github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum-optimism/optimism/indexer/processors/contracts"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
)
...
...
@@ -17,7 +18,7 @@ import (
// 1. OptimismPortal
// 2. L2CrossDomainMessenger
// 3. L2StandardBridge
func
L2ProcessInitiatedBridgeEvents
(
log
log
.
Logger
,
db
*
database
.
DB
,
l2Contracts
config
.
L2Contracts
,
fromHeight
,
toHeight
*
big
.
Int
)
error
{
func
L2ProcessInitiatedBridgeEvents
(
log
log
.
Logger
,
db
*
database
.
DB
,
metrics
L2Metricer
,
l2Contracts
config
.
L2Contracts
,
fromHeight
,
toHeight
*
big
.
Int
)
error
{
// (1) L2ToL1MessagePasser
l2ToL1MPMessagesPassed
,
err
:=
contracts
.
L2ToL1MessagePasserMessagePassedEvents
(
l2Contracts
.
L2ToL1MessagePasser
,
db
,
fromHeight
,
toHeight
)
if
err
!=
nil
{
...
...
@@ -44,6 +45,7 @@ func L2ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, l2Contracts
if
err
:=
db
.
BridgeTransactions
.
StoreL2TransactionWithdrawals
(
transactionWithdrawals
);
err
!=
nil
{
return
err
}
metrics
.
RecordL2TransactionWithdrawals
(
len
(
transactionWithdrawals
))
}
// (2) L2CrossDomainMessenger
...
...
@@ -56,7 +58,7 @@ func L2ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, l2Contracts
}
sentMessages
:=
make
(
map
[
logKey
]
*
contracts
.
CrossDomainMessengerSentMessageEvent
,
len
(
crossDomainSentMessages
))
l2B
ridgeMessages
:=
make
([]
database
.
L2BridgeMessage
,
len
(
crossDomainSentMessages
))
b
ridgeMessages
:=
make
([]
database
.
L2BridgeMessage
,
len
(
crossDomainSentMessages
))
for
i
:=
range
crossDomainSentMessages
{
sentMessage
:=
crossDomainSentMessages
[
i
]
sentMessages
[
logKey
{
sentMessage
.
Event
.
BlockHash
,
sentMessage
.
Event
.
LogIndex
}]
=
&
sentMessage
...
...
@@ -68,13 +70,13 @@ func L2ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, l2Contracts
return
fmt
.
Errorf
(
"expected MessagePassedEvent preceding SentMessage. tx_hash = %s"
,
sentMessage
.
Event
.
TransactionHash
.
String
())
}
l2B
ridgeMessages
[
i
]
=
database
.
L2BridgeMessage
{
TransactionWithdrawalHash
:
messagePassed
.
WithdrawalHash
,
BridgeMessage
:
sentMessage
.
BridgeMessage
}
b
ridgeMessages
[
i
]
=
database
.
L2BridgeMessage
{
TransactionWithdrawalHash
:
messagePassed
.
WithdrawalHash
,
BridgeMessage
:
sentMessage
.
BridgeMessage
}
}
if
len
(
l2BridgeMessages
)
>
0
{
if
err
:=
db
.
BridgeMessages
.
StoreL2BridgeMessages
(
l2BridgeMessages
);
err
!=
nil
{
if
len
(
bridgeMessages
)
>
0
{
if
err
:=
db
.
BridgeMessages
.
StoreL2BridgeMessages
(
bridgeMessages
);
err
!=
nil
{
return
err
}
metrics
.
RecordL2CrossDomainSentMessages
(
len
(
bridgeMessages
))
}
// (3) L2StandardBridge
...
...
@@ -86,7 +88,8 @@ func L2ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, l2Contracts
log
.
Info
(
"detected bridge withdrawals"
,
"size"
,
len
(
initiatedBridges
))
}
l2BridgeWithdrawals
:=
make
([]
database
.
L2BridgeWithdrawal
,
len
(
initiatedBridges
))
bridgedTokens
:=
make
(
map
[
common
.
Address
]
int
)
bridgeWithdrawals
:=
make
([]
database
.
L2BridgeWithdrawal
,
len
(
initiatedBridges
))
for
i
:=
range
initiatedBridges
{
initiatedBridge
:=
initiatedBridges
[
i
]
...
...
@@ -103,13 +106,19 @@ func L2ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, l2Contracts
}
initiatedBridge
.
BridgeTransfer
.
CrossDomainMessageHash
=
&
sentMessage
.
BridgeMessage
.
MessageHash
l2BridgeWithdrawals
[
i
]
=
database
.
L2BridgeWithdrawal
{
TransactionWithdrawalHash
:
messagePassed
.
WithdrawalHash
,
BridgeTransfer
:
initiatedBridge
.
BridgeTransfer
}
bridgedTokens
[
initiatedBridge
.
BridgeTransfer
.
TokenPair
.
LocalTokenAddress
]
++
bridgeWithdrawals
[
i
]
=
database
.
L2BridgeWithdrawal
{
TransactionWithdrawalHash
:
messagePassed
.
WithdrawalHash
,
BridgeTransfer
:
initiatedBridge
.
BridgeTransfer
,
}
}
if
len
(
l2BridgeWithdrawals
)
>
0
{
if
err
:=
db
.
BridgeTransfers
.
StoreL2BridgeWithdrawals
(
l2BridgeWithdrawals
);
err
!=
nil
{
if
len
(
bridgeWithdrawals
)
>
0
{
if
err
:=
db
.
BridgeTransfers
.
StoreL2BridgeWithdrawals
(
bridgeWithdrawals
);
err
!=
nil
{
return
err
}
for
tokenAddr
,
size
:=
range
bridgedTokens
{
metrics
.
RecordL2InitiatedBridgeTransfers
(
tokenAddr
,
size
)
}
}
// a-ok!
...
...
@@ -122,7 +131,7 @@ func L2ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, l2Contracts
// 2. L2StandardBridge (no-op, since this is simply a wrapper over the L2CrossDomainMEssenger)
//
// NOTE: Unlike L1, there's no L2ToL1MessagePasser stage since transaction deposits are apart of the block derivation process.
func
L2ProcessFinalizedBridgeEvents
(
log
log
.
Logger
,
db
*
database
.
DB
,
l2Contracts
config
.
L2Contracts
,
fromHeight
,
toHeight
*
big
.
Int
)
error
{
func
L2ProcessFinalizedBridgeEvents
(
log
log
.
Logger
,
db
*
database
.
DB
,
metrics
L2Metricer
,
l2Contracts
config
.
L2Contracts
,
fromHeight
,
toHeight
*
big
.
Int
)
error
{
// (1) L2CrossDomainMessenger
crossDomainRelayedMessages
,
err
:=
contracts
.
CrossDomainMessengerRelayedMessageEvents
(
"l2"
,
l2Contracts
.
L2CrossDomainMessenger
,
db
,
fromHeight
,
toHeight
)
if
err
!=
nil
{
...
...
@@ -149,6 +158,9 @@ func L2ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, l2Contracts
return
err
}
}
if
len
(
relayedMessages
)
>
0
{
metrics
.
RecordL2CrossDomainRelayedMessages
(
len
(
relayedMessages
))
}
// (2) L2StandardBridge
finalizedBridges
,
err
:=
contracts
.
StandardBridgeFinalizedEvents
(
"l2"
,
l2Contracts
.
L2StandardBridge
,
db
,
fromHeight
,
toHeight
)
...
...
@@ -159,6 +171,7 @@ func L2ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, l2Contracts
log
.
Info
(
"detected finalized bridge deposits"
,
"size"
,
len
(
finalizedBridges
))
}
finalizedTokens
:=
make
(
map
[
common
.
Address
]
int
)
for
i
:=
range
finalizedBridges
{
// Nothing actionable on the database. However, we can treat the relayed message
// as an invariant by ensuring we can query for a deposit by the same hash
...
...
@@ -169,8 +182,7 @@ func L2ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, l2Contracts
return
fmt
.
Errorf
(
"expected RelayedMessage following BridgeFinalized event. tx_hash = %s"
,
finalizedBridge
.
Event
.
TransactionHash
.
String
())
}
// Since the message hash is computed from the relayed message, this ensures the withdrawal fields must match. For good measure,
// we may choose to make sure `deposit.BridgeTransfer` matches with the finalized bridge
// Since the message hash is computed from the relayed message, this ensures the withdrawal fields must match
deposit
,
err
:=
db
.
BridgeTransfers
.
L1BridgeDepositWithFilter
(
database
.
BridgeTransfer
{
CrossDomainMessageHash
:
&
relayedMessage
.
MessageHash
})
if
err
!=
nil
{
return
err
...
...
@@ -178,6 +190,13 @@ func L2ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, l2Contracts
log
.
Error
(
"missing L1StandardBridge deposit on L2 finalization"
,
"tx_hash"
,
finalizedBridge
.
Event
.
TransactionHash
.
String
())
return
errors
.
New
(
"missing L1StandardBridge deposit on L2 finalization"
)
}
finalizedTokens
[
finalizedBridge
.
BridgeTransfer
.
TokenPair
.
LocalTokenAddress
]
++
}
if
len
(
finalizedBridges
)
>
0
{
for
tokenAddr
,
size
:=
range
finalizedTokens
{
metrics
.
RecordL2FinalizedBridgeTransfers
(
tokenAddr
,
size
)
}
}
// a-ok!
...
...
indexer/processors/bridge/legacy_bridge_processor.go
View file @
f740ac21
This diff is collapsed.
Click to expand it.
indexer/processors/bridge/metrics.go
0 → 100644
View file @
f740ac21
package
bridge
import
(
"math/big"
"github.com/ethereum-optimism/optimism/op-service/metrics"
"github.com/ethereum/go-ethereum/common"
"github.com/prometheus/client_golang/prometheus"
)
var
(
MetricsNamespace
string
=
"bridge"
)
type
L1Metricer
interface
{
RecordLatestIndexedL1Height
(
height
*
big
.
Int
)
RecordL1TransactionDeposits
(
size
int
)
RecordL1ProvenWithdrawals
(
size
int
)
RecordL1FinalizedWithdrawals
(
size
int
)
RecordL1CrossDomainSentMessages
(
size
int
)
RecordL1CrossDomainRelayedMessages
(
size
int
)
RecordL1InitiatedBridgeTransfers
(
token
common
.
Address
,
size
int
)
RecordL1FinalizedBridgeTransfers
(
token
common
.
Address
,
size
int
)
}
type
L2Metricer
interface
{
RecordLatestIndexedL2Height
(
height
*
big
.
Int
)
RecordL2TransactionWithdrawals
(
size
int
)
RecordL2CrossDomainSentMessages
(
size
int
)
RecordL2CrossDomainRelayedMessages
(
size
int
)
RecordL2InitiatedBridgeTransfers
(
token
common
.
Address
,
size
int
)
RecordL2FinalizedBridgeTransfers
(
token
common
.
Address
,
size
int
)
}
type
Metricer
interface
{
L1Metricer
L2Metricer
RecordInterval
()
(
done
func
(
err
error
))
}
type
bridgeMetrics
struct
{
intervalTick
prometheus
.
Counter
intervalDuration
prometheus
.
Histogram
intervalFailures
prometheus
.
Counter
latestL1Height
prometheus
.
Gauge
latestL2Height
prometheus
.
Gauge
txDeposits
prometheus
.
Counter
txWithdrawals
prometheus
.
Counter
provenWithdrawals
prometheus
.
Counter
finalizedWithdrawals
prometheus
.
Counter
sentMessages
*
prometheus
.
CounterVec
relayedMessages
*
prometheus
.
CounterVec
initiatedBridgeTransfers
*
prometheus
.
CounterVec
finalizedBridgeTransfers
*
prometheus
.
CounterVec
}
func
NewMetrics
(
registry
*
prometheus
.
Registry
)
Metricer
{
factory
:=
metrics
.
With
(
registry
)
return
&
bridgeMetrics
{
intervalTick
:
factory
.
NewCounter
(
prometheus
.
CounterOpts
{
Namespace
:
MetricsNamespace
,
Name
:
"intervals_total"
,
Help
:
"number of times processing loop has run"
,
}),
intervalDuration
:
factory
.
NewHistogram
(
prometheus
.
HistogramOpts
{
Namespace
:
MetricsNamespace
,
Name
:
"interval_seconds"
,
Help
:
"duration elapsed in the processing loop"
,
}),
intervalFailures
:
factory
.
NewCounter
(
prometheus
.
CounterOpts
{
Namespace
:
MetricsNamespace
,
Name
:
"failures_total"
,
Help
:
"number of failures encountered"
,
}),
latestL1Height
:
factory
.
NewGauge
(
prometheus
.
GaugeOpts
{
Namespace
:
MetricsNamespace
,
Subsystem
:
"l1"
,
Name
:
"height"
,
Help
:
"the latest processed l1 block height"
,
}),
latestL2Height
:
factory
.
NewGauge
(
prometheus
.
GaugeOpts
{
Namespace
:
MetricsNamespace
,
Subsystem
:
"l2"
,
Name
:
"height"
,
Help
:
"the latest processed l2 block height"
,
}),
txDeposits
:
factory
.
NewCounter
(
prometheus
.
CounterOpts
{
Namespace
:
MetricsNamespace
,
Name
:
"tx_deposits"
,
Help
:
"number of processed transactions deposited from l1"
,
}),
txWithdrawals
:
factory
.
NewCounter
(
prometheus
.
CounterOpts
{
Namespace
:
MetricsNamespace
,
Name
:
"tx_withdrawals"
,
Help
:
"number of processed transactions withdrawn from l2"
,
}),
provenWithdrawals
:
factory
.
NewCounter
(
prometheus
.
CounterOpts
{
Namespace
:
MetricsNamespace
,
Name
:
"proven_withdrawals"
,
Help
:
"number of proven tx withdrawals on l1"
,
}),
finalizedWithdrawals
:
factory
.
NewCounter
(
prometheus
.
CounterOpts
{
Namespace
:
MetricsNamespace
,
Name
:
"finalized_withdrawals"
,
Help
:
"number of finalized tx withdrawals on l1"
,
}),
sentMessages
:
factory
.
NewCounterVec
(
prometheus
.
CounterOpts
{
Namespace
:
MetricsNamespace
,
Name
:
"sent_messages"
,
Help
:
"number of bridged messages between l1 and l2"
,
},
[]
string
{
"chain"
,
}),
relayedMessages
:
factory
.
NewCounterVec
(
prometheus
.
CounterOpts
{
Namespace
:
MetricsNamespace
,
Name
:
"relayed_messages"
,
Help
:
"number of relayed messages between l1 and l2"
,
},
[]
string
{
"chain"
,
}),
initiatedBridgeTransfers
:
factory
.
NewCounterVec
(
prometheus
.
CounterOpts
{
Namespace
:
MetricsNamespace
,
Name
:
"initiated_token_transfers"
,
Help
:
"number of bridged tokens between l1 and l2"
,
},
[]
string
{
"chain"
,
"token_address"
,
}),
finalizedBridgeTransfers
:
factory
.
NewCounterVec
(
prometheus
.
CounterOpts
{
Namespace
:
MetricsNamespace
,
Name
:
"finalized_token_transfers"
,
Help
:
"number of finalized token transfers between l1 and l2"
,
},
[]
string
{
"chain"
,
"token_address"
,
}),
}
}
func
(
m
*
bridgeMetrics
)
RecordInterval
()
func
(
error
)
{
m
.
intervalTick
.
Inc
()
timer
:=
prometheus
.
NewTimer
(
m
.
intervalDuration
)
return
func
(
err
error
)
{
timer
.
ObserveDuration
()
if
err
!=
nil
{
m
.
intervalFailures
.
Inc
()
}
}
}
// L1Metricer
func
(
m
*
bridgeMetrics
)
RecordLatestIndexedL1Height
(
height
*
big
.
Int
)
{
m
.
latestL1Height
.
Set
(
float64
(
height
.
Uint64
()))
}
func
(
m
*
bridgeMetrics
)
RecordL1TransactionDeposits
(
size
int
)
{
m
.
txDeposits
.
Add
(
float64
(
size
))
}
func
(
m
*
bridgeMetrics
)
RecordL1ProvenWithdrawals
(
size
int
)
{
m
.
provenWithdrawals
.
Add
(
float64
(
size
))
}
func
(
m
*
bridgeMetrics
)
RecordL1FinalizedWithdrawals
(
size
int
)
{
m
.
finalizedWithdrawals
.
Add
(
float64
(
size
))
}
func
(
m
*
bridgeMetrics
)
RecordL1CrossDomainSentMessages
(
size
int
)
{
m
.
sentMessages
.
WithLabelValues
(
"l1"
)
.
Add
(
float64
(
size
))
}
func
(
m
*
bridgeMetrics
)
RecordL1CrossDomainRelayedMessages
(
size
int
)
{
m
.
relayedMessages
.
WithLabelValues
(
"l1"
)
.
Add
(
float64
(
size
))
}
func
(
m
*
bridgeMetrics
)
RecordL1InitiatedBridgeTransfers
(
tokenAddr
common
.
Address
,
size
int
)
{
m
.
initiatedBridgeTransfers
.
WithLabelValues
(
"l1"
,
tokenAddr
.
String
())
.
Add
(
float64
(
size
))
}
func
(
m
*
bridgeMetrics
)
RecordL1FinalizedBridgeTransfers
(
tokenAddr
common
.
Address
,
size
int
)
{
m
.
finalizedBridgeTransfers
.
WithLabelValues
(
"l1"
,
tokenAddr
.
String
())
.
Add
(
float64
(
size
))
}
// L2Metricer
func
(
m
*
bridgeMetrics
)
RecordLatestIndexedL2Height
(
height
*
big
.
Int
)
{
m
.
latestL2Height
.
Set
(
float64
(
height
.
Uint64
()))
}
func
(
m
*
bridgeMetrics
)
RecordL2TransactionWithdrawals
(
size
int
)
{
m
.
txWithdrawals
.
Add
(
float64
(
size
))
}
func
(
m
*
bridgeMetrics
)
RecordL2CrossDomainSentMessages
(
size
int
)
{
m
.
sentMessages
.
WithLabelValues
(
"l2"
)
.
Add
(
float64
(
size
))
}
func
(
m
*
bridgeMetrics
)
RecordL2CrossDomainRelayedMessages
(
size
int
)
{
m
.
relayedMessages
.
WithLabelValues
(
"l2"
)
.
Add
(
float64
(
size
))
}
func
(
m
*
bridgeMetrics
)
RecordL2InitiatedBridgeTransfers
(
tokenAddr
common
.
Address
,
size
int
)
{
m
.
initiatedBridgeTransfers
.
WithLabelValues
(
"l2"
,
tokenAddr
.
String
())
.
Add
(
float64
(
size
))
}
func
(
m
*
bridgeMetrics
)
RecordL2FinalizedBridgeTransfers
(
tokenAddr
common
.
Address
,
size
int
)
{
m
.
finalizedBridgeTransfers
.
WithLabelValues
(
"l2"
,
tokenAddr
.
String
())
.
Add
(
float64
(
size
))
}
indexer/processors/contracts/legacy_standard_bridge.go
View file @
f740ac21
...
...
@@ -15,21 +15,23 @@ type LegacyBridgeEvent struct {
}
func
L1StandardBridgeLegacyDepositInitiatedEvents
(
contractAddress
common
.
Address
,
db
*
database
.
DB
,
fromHeight
,
toHeight
*
big
.
Int
)
([]
LegacyBridgeEvent
,
error
)
{
// The L1StandardBridge ABI contains the legacy events
l1StandardBridgeAbi
,
err
:=
bindings
.
L1StandardBridgeMetaData
.
GetAbi
()
if
err
!=
nil
{
return
nil
,
err
}
// The L1StandardBridge contains the legacy events
ethDepositEventAbi
:=
l1StandardBridgeAbi
.
Events
[
"ETHDepositInitiated"
]
erc20DepositEventAbi
:=
l1StandardBridgeAbi
.
Events
[
"ERC20DepositInitiated"
]
// Grab both ETH & ERC20 Events
ethDepositEvents
,
err
:=
db
.
ContractEvents
.
L1ContractEventsWithFilter
(
database
.
ContractEvent
{
ContractAddress
:
contractAddress
,
EventSignature
:
ethDepositEventAbi
.
ID
},
fromHeight
,
toHeight
)
contractEventFilter
:=
database
.
ContractEvent
{
ContractAddress
:
contractAddress
,
EventSignature
:
ethDepositEventAbi
.
ID
}
ethDepositEvents
,
err
:=
db
.
ContractEvents
.
L1ContractEventsWithFilter
(
contractEventFilter
,
fromHeight
,
toHeight
)
if
err
!=
nil
{
return
nil
,
err
}
erc20DepositEvents
,
err
:=
db
.
ContractEvents
.
L1ContractEventsWithFilter
(
database
.
ContractEvent
{
ContractAddress
:
contractAddress
,
EventSignature
:
erc20DepositEventAbi
.
ID
},
fromHeight
,
toHeight
)
contractEventFilter
.
EventSignature
=
erc20DepositEventAbi
.
ID
erc20DepositEvents
,
err
:=
db
.
ContractEvents
.
L1ContractEventsWithFilter
(
contractEventFilter
,
fromHeight
,
toHeight
)
if
err
!=
nil
{
return
nil
,
err
}
...
...
@@ -81,13 +83,15 @@ func L1StandardBridgeLegacyDepositInitiatedEvents(contractAddress common.Address
}
func
L2StandardBridgeLegacyWithdrawalInitiatedEvents
(
contractAddress
common
.
Address
,
db
*
database
.
DB
,
fromHeight
,
toHeight
*
big
.
Int
)
([]
LegacyBridgeEvent
,
error
)
{
// The L2StandardBridge ABI contains the legacy events
l2StandardBridgeAbi
,
err
:=
bindings
.
L2StandardBridgeMetaData
.
GetAbi
()
if
err
!=
nil
{
return
nil
,
err
}
withdrawalInitiatedEventAbi
:=
l2StandardBridgeAbi
.
Events
[
"WithdrawalInitiated"
]
withdrawalEvents
,
err
:=
db
.
ContractEvents
.
L2ContractEventsWithFilter
(
database
.
ContractEvent
{
ContractAddress
:
contractAddress
,
EventSignature
:
withdrawalInitiatedEventAbi
.
ID
},
fromHeight
,
toHeight
)
contractEventFilter
:=
database
.
ContractEvent
{
ContractAddress
:
contractAddress
,
EventSignature
:
withdrawalInitiatedEventAbi
.
ID
}
withdrawalEvents
,
err
:=
db
.
ContractEvents
.
L2ContractEventsWithFilter
(
contractEventFilter
,
fromHeight
,
toHeight
)
if
err
!=
nil
{
return
nil
,
err
}
...
...
op-challenger/config/config.go
View file @
f740ac21
...
...
@@ -144,7 +144,7 @@ func NewConfig(
TraceType
:
traceType
,
TxMgrConfig
:
txmgr
.
NewCLIConfig
(
l1EthRpc
),
TxMgrConfig
:
txmgr
.
NewCLIConfig
(
l1EthRpc
,
txmgr
.
DefaultChallengerFlagValues
),
MetricsConfig
:
opmetrics
.
DefaultCLIConfig
(),
PprofConfig
:
oppprof
.
DefaultCLIConfig
(),
...
...
op-challenger/flags/flags.go
View file @
f740ac21
...
...
@@ -174,7 +174,7 @@ var optionalFlags = []cli.Flag{
func
init
()
{
optionalFlags
=
append
(
optionalFlags
,
oplog
.
CLIFlags
(
envVarPrefix
)
...
)
optionalFlags
=
append
(
optionalFlags
,
txmgr
.
CLIFlags
(
envVarPrefix
)
...
)
optionalFlags
=
append
(
optionalFlags
,
txmgr
.
CLIFlags
WithDefaults
(
envVarPrefix
,
txmgr
.
DefaultChallengerFlagValues
)
...
)
optionalFlags
=
append
(
optionalFlags
,
opmetrics
.
CLIFlags
(
envVarPrefix
)
...
)
optionalFlags
=
append
(
optionalFlags
,
oppprof
.
CLIFlags
(
envVarPrefix
)
...
)
...
...
op-challenger/game/fault/agent.go
View file @
f740ac21
...
...
@@ -152,10 +152,10 @@ func (a *Agent) tryResolveClaims(ctx context.Context) error {
resolvableClaims
=
append
(
resolvableClaims
,
int64
(
claim
.
ContractIndex
))
}
}
a
.
log
.
Info
(
"Resolving claims"
,
"numClaims"
,
len
(
resolvableClaims
))
if
len
(
resolvableClaims
)
==
0
{
return
errNoResolvableClaims
}
a
.
log
.
Info
(
"Resolving claims"
,
"numClaims"
,
len
(
resolvableClaims
))
var
wg
sync
.
WaitGroup
wg
.
Add
(
len
(
resolvableClaims
))
...
...
op-challenger/game/fault/solver/solver.go
View file @
f740ac21
...
...
@@ -35,10 +35,6 @@ func (s *claimSolver) NextMove(ctx context.Context, claim types.Claim, game type
if
claim
.
Depth
()
==
s
.
gameDepth
{
return
nil
,
types
.
ErrGameDepthReached
}
agree
,
err
:=
s
.
agreeWithClaim
(
ctx
,
claim
.
ClaimData
)
if
err
!=
nil
{
return
nil
,
err
}
// Before challenging this claim, first check that the move wasn't warranted.
// If the parent claim is on a dishonest path, then we would have moved against it anyways. So we don't move.
...
...
@@ -57,6 +53,10 @@ func (s *claimSolver) NextMove(ctx context.Context, claim types.Claim, game type
}
}
agree
,
err
:=
s
.
agreeWithClaim
(
ctx
,
claim
.
ClaimData
)
if
err
!=
nil
{
return
nil
,
err
}
if
agree
{
return
s
.
defend
(
ctx
,
claim
)
}
else
{
...
...
op-challenger/game/monitor_test.go
View file @
f740ac21
...
...
@@ -58,22 +58,27 @@ func TestMonitorGames(t *testing.T) {
go
func
()
{
headerNotSent
:=
true
waitErr
:=
wait
.
For
(
context
.
Background
(),
100
*
time
.
Millisecond
,
func
()
(
bool
,
error
)
{
for
{
if
len
(
sched
.
scheduled
)
>=
1
{
return
true
,
nil
break
}
if
mockHeadSource
.
sub
==
nil
{
return
false
,
nil
continue
}
if
headerNotSent
{
mockHeadSource
.
sub
.
headers
<-
&
ethtypes
.
Header
{
select
{
case
mockHeadSource
.
sub
.
headers
<-
&
ethtypes
.
Header
{
Number
:
big
.
NewInt
(
1
),
}
:
headerNotSent
=
false
case
<-
ctx
.
Done
()
:
break
default
:
}
headerNotSent
=
false
}
return
false
,
nil
}
)
require
.
NoError
(
t
,
waitErr
)
// Just to avoid a tight loop
time
.
Sleep
(
100
*
time
.
Millisecond
)
}
mockHeadSource
.
err
=
fmt
.
Errorf
(
"eth subscribe test error"
)
cancel
()
}()
...
...
@@ -94,27 +99,29 @@ func TestMonitorGames(t *testing.T) {
defer
cancel
()
go
func
()
{
headerNotSent
:=
true
waitErr
:=
wait
.
For
(
context
.
Background
(),
100
*
time
.
Millisecond
,
func
()
(
bool
,
error
)
{
return
mockHeadSource
.
sub
!=
nil
,
nil
})
require
.
NoError
(
t
,
waitErr
)
mockHeadSource
.
sub
.
errChan
<-
fmt
.
Errorf
(
"test error"
)
waitErr
=
wait
.
For
(
context
.
Background
(),
100
*
time
.
Millisecond
,
func
()
(
bool
,
error
)
{
for
{
if
len
(
sched
.
scheduled
)
>=
1
{
return
true
,
nil
break
}
if
mockHeadSource
.
sub
==
nil
{
return
false
,
nil
continue
}
if
headerNotSent
{
mockHeadSource
.
sub
.
headers
<-
&
ethtypes
.
Header
{
Number
:
big
.
NewInt
(
1
),
}
headerNotSent
=
false
select
{
case
mockHeadSource
.
sub
.
headers
<-
&
ethtypes
.
Header
{
Number
:
big
.
NewInt
(
1
),
}
:
case
<-
ctx
.
Done
()
:
break
default
:
}
return
false
,
nil
})
// Just to avoid a tight loop
time
.
Sleep
(
100
*
time
.
Millisecond
)
}
require
.
NoError
(
t
,
waitErr
)
mockHeadSource
.
err
=
fmt
.
Errorf
(
"eth subscribe test error"
)
cancel
()
...
...
@@ -122,7 +129,7 @@ func TestMonitorGames(t *testing.T) {
err
:=
monitor
.
MonitorGames
(
ctx
)
require
.
NoError
(
t
,
err
)
require
.
Len
(
t
,
sched
.
scheduled
,
1
)
require
.
NotEmpty
(
t
,
sched
.
scheduled
)
// We might get more than one update scheduled.
require
.
Equal
(
t
,
[]
common
.
Address
{
addr1
,
addr2
},
sched
.
scheduled
[
0
])
})
}
...
...
op-e2e/actions/l2_batcher_test.go
View file @
f740ac21
...
...
@@ -504,7 +504,7 @@ func TestBigL2Txs(gt *testing.T) {
if
miner
.
l1GasPool
.
Gas
()
<
tx
.
Gas
()
{
// fill the L1 block with batcher txs until we run out of gas
break
}
log
.
Info
(
"including batcher tx"
,
"nonce"
,
tx
)
log
.
Info
(
"including batcher tx"
,
"nonce"
,
tx
.
Nonce
()
)
miner
.
IncludeTx
(
t
,
tx
)
txs
=
txs
[
1
:
]
}
...
...
op-e2e/actions/l2_engine.go
View file @
f740ac21
package
actions
import
(
"context"
"errors"
"time"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait"
"github.com/ethereum-optimism/optimism/op-program/client/l2/engineapi"
"github.com/stretchr/testify/require"
...
...
@@ -176,12 +179,22 @@ func (e *L2Engine) ActL2IncludeTx(from common.Address) Action {
return
}
i
:=
e
.
engineApi
.
PendingIndices
(
from
)
txs
,
q
:=
e
.
eth
.
TxPool
()
.
ContentFrom
(
from
)
require
.
Greaterf
(
t
,
uint64
(
len
(
txs
)),
i
,
"no pending txs from %s, and have %d unprocessable queued txs from this account"
,
from
,
len
(
q
))
var
i
uint64
var
txs
[]
*
types
.
Transaction
var
q
[]
*
types
.
Transaction
// Wait for the tx to be in the pending tx queue
ctx
,
cancel
:=
context
.
WithTimeout
(
context
.
Background
(),
30
*
time
.
Second
)
defer
cancel
()
err
:=
wait
.
For
(
ctx
,
time
.
Second
,
func
()
(
bool
,
error
)
{
i
=
e
.
engineApi
.
PendingIndices
(
from
)
txs
,
q
=
e
.
eth
.
TxPool
()
.
ContentFrom
(
from
)
return
uint64
(
len
(
txs
))
>
i
,
nil
})
require
.
NoError
(
t
,
err
,
"no pending txs from %s, and have %d unprocessable queued txs from this account: %w"
,
from
,
len
(
q
),
err
)
tx
:=
txs
[
i
]
err
:
=
e
.
engineApi
.
IncludeTx
(
tx
,
from
)
err
=
e
.
engineApi
.
IncludeTx
(
tx
,
from
)
if
errors
.
Is
(
err
,
engineapi
.
ErrNotBuildingBlock
)
{
t
.
InvalidAction
(
err
.
Error
())
}
else
if
errors
.
Is
(
err
,
engineapi
.
ErrUsesTooMuchGas
)
{
...
...
op-e2e/e2eutils/challenger/helper.go
View file @
f740ac21
...
...
@@ -197,7 +197,7 @@ func (h *Helper) WaitForGameDataDeletion(ctx context.Context, games ...GameAddr)
if
err
!=
nil
{
return
false
,
fmt
.
Errorf
(
"failed to check dir %v is deleted: %w"
,
dir
,
err
)
}
h
.
t
.
Error
f
(
"Game data directory %v not yet deleted"
,
dir
)
h
.
t
.
Log
f
(
"Game data directory %v not yet deleted"
,
dir
)
return
false
,
nil
}
return
true
,
nil
...
...
op-e2e/e2eutils/wait/blocks.go
0 → 100644
View file @
f740ac21
package
wait
import
(
"context"
"fmt"
"math/big"
"time"
"github.com/ethereum/go-ethereum/core/types"
)
// BlockCaller is a subset of the [ethclient.Client] interface
// encompassing methods that query for block information.
type
BlockCaller
interface
{
BlockByNumber
(
ctx
context
.
Context
,
number
*
big
.
Int
)
(
*
types
.
Block
,
error
)
BlockNumber
(
ctx
context
.
Context
)
(
uint64
,
error
)
}
func
ForBlock
(
ctx
context
.
Context
,
client
BlockCaller
,
n
uint64
)
error
{
for
{
if
ctx
.
Done
()
!=
nil
{
return
ctx
.
Err
()
}
height
,
err
:=
client
.
BlockNumber
(
ctx
)
if
err
!=
nil
{
return
err
}
if
height
<
n
{
time
.
Sleep
(
500
*
time
.
Millisecond
)
continue
}
break
}
return
nil
}
func
ForBlockWithTimestamp
(
ctx
context
.
Context
,
client
BlockCaller
,
target
uint64
)
error
{
_
,
err
:=
AndGet
(
ctx
,
time
.
Second
,
func
()
(
uint64
,
error
)
{
head
,
err
:=
client
.
BlockByNumber
(
ctx
,
nil
)
if
err
!=
nil
{
return
0
,
err
}
return
head
.
Time
(),
nil
},
func
(
actual
uint64
)
bool
{
return
actual
>=
target
})
return
err
}
func
ForNextBlock
(
ctx
context
.
Context
,
client
BlockCaller
)
error
{
current
,
err
:=
client
.
BlockNumber
(
ctx
)
// Long timeout so we don't have to care what the block time is. If the test passes this will complete early anyway.
ctx
,
cancel
:=
context
.
WithTimeout
(
ctx
,
60
*
time
.
Second
)
defer
cancel
()
if
err
!=
nil
{
return
fmt
.
Errorf
(
"get starting block number: %w"
,
err
)
}
return
ForBlock
(
ctx
,
client
,
current
+
1
)
}
op-e2e/e2eutils/wait/waits.go
View file @
f740ac21
...
...
@@ -69,59 +69,25 @@ func printDebugTrace(ctx context.Context, client *ethclient.Client, txHash commo
fmt
.
Printf
(
"TxTrace: %v
\n
"
,
trace
)
}
func
ForBlock
(
ctx
context
.
Context
,
client
*
ethclient
.
Client
,
n
uint64
)
error
{
func
For
(
ctx
context
.
Context
,
rate
time
.
Duration
,
cb
func
()
(
bool
,
error
))
error
{
tick
:=
time
.
NewTicker
(
rate
)
defer
tick
.
Stop
()
for
{
height
,
err
:=
client
.
BlockNumber
(
ctx
)
// Perform the first check before any waiting.
done
,
err
:=
cb
()
if
err
!=
nil
{
return
err
}
if
height
<
n
{
time
.
Sleep
(
500
*
time
.
Millisecond
)
continue
}
break
}
return
nil
}
func
ForBlockWithTimestamp
(
ctx
context
.
Context
,
client
*
ethclient
.
Client
,
target
uint64
)
error
{
_
,
err
:=
AndGet
(
ctx
,
time
.
Second
,
func
()
(
uint64
,
error
)
{
head
,
err
:=
client
.
BlockByNumber
(
ctx
,
nil
)
if
err
!=
nil
{
return
0
,
err
if
done
{
return
nil
}
return
head
.
Time
(),
nil
},
func
(
actual
uint64
)
bool
{
return
actual
>=
target
})
return
err
}
func
ForNextBlock
(
ctx
context
.
Context
,
client
*
ethclient
.
Client
)
error
{
current
,
err
:=
client
.
BlockNumber
(
ctx
)
if
err
!=
nil
{
return
fmt
.
Errorf
(
"get starting block number: %w"
,
err
)
}
return
ForBlock
(
ctx
,
client
,
current
+
1
)
}
func
For
(
ctx
context
.
Context
,
rate
time
.
Duration
,
cb
func
()
(
bool
,
error
))
error
{
tick
:=
time
.
NewTicker
(
rate
)
defer
tick
.
Stop
()
for
{
select
{
case
<-
ctx
.
Done
()
:
return
ctx
.
Err
()
case
<-
tick
.
C
:
done
,
err
:=
cb
()
if
err
!=
nil
{
return
err
}
if
done
{
return
nil
}
// Allow loop to continue for next retry
}
}
}
...
...
op-e2e/system_adminrpc_test.go
View file @
f740ac21
...
...
@@ -5,6 +5,7 @@ import (
"testing"
"time"
"github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait"
"github.com/ethereum-optimism/optimism/op-node/client"
"github.com/ethereum-optimism/optimism/op-node/node"
"github.com/ethereum-optimism/optimism/op-node/sources"
...
...
@@ -34,10 +35,11 @@ func TestStopStartSequencer(t *testing.T) {
require
.
NoError
(
t
,
err
)
require
.
True
(
t
,
active
,
"sequencer should be active"
)
blockBefore
:=
latestBlock
(
t
,
l2Seq
)
time
.
Sleep
(
time
.
Duration
(
cfg
.
DeployConfig
.
L2BlockTime
+
1
)
*
time
.
Second
)
blockAfter
:=
latestBlock
(
t
,
l2Seq
)
require
.
Greaterf
(
t
,
blockAfter
,
blockBefore
,
"Chain did not advance"
)
require
.
NoError
(
t
,
wait
.
ForNextBlock
(
ctx
,
l2Seq
),
"Chain did not advance after starting sequencer"
,
)
ctx
,
cancel
=
context
.
WithTimeout
(
context
.
Background
(),
5
*
time
.
Second
)
defer
cancel
()
...
...
@@ -50,9 +52,9 @@ func TestStopStartSequencer(t *testing.T) {
require
.
NoError
(
t
,
err
)
require
.
False
(
t
,
active
,
"sequencer should be inactive"
)
blockBefore
=
latestBlock
(
t
,
l2Seq
)
blockBefore
:
=
latestBlock
(
t
,
l2Seq
)
time
.
Sleep
(
time
.
Duration
(
cfg
.
DeployConfig
.
L2BlockTime
+
1
)
*
time
.
Second
)
blockAfter
=
latestBlock
(
t
,
l2Seq
)
blockAfter
:
=
latestBlock
(
t
,
l2Seq
)
require
.
Equal
(
t
,
blockAfter
,
blockBefore
,
"Chain advanced after stopping sequencer"
)
ctx
,
cancel
=
context
.
WithTimeout
(
context
.
Background
(),
5
*
time
.
Second
)
...
...
@@ -66,10 +68,11 @@ func TestStopStartSequencer(t *testing.T) {
require
.
NoError
(
t
,
err
)
require
.
True
(
t
,
active
,
"sequencer should be active again"
)
blockBefore
=
latestBlock
(
t
,
l2Seq
)
time
.
Sleep
(
time
.
Duration
(
cfg
.
DeployConfig
.
L2BlockTime
+
1
)
*
time
.
Second
)
blockAfter
=
latestBlock
(
t
,
l2Seq
)
require
.
Greater
(
t
,
blockAfter
,
blockBefore
,
"Chain did not advance after starting sequencer"
)
require
.
NoError
(
t
,
wait
.
ForNextBlock
(
ctx
,
l2Seq
),
"Chain did not advance after starting sequencer"
,
)
}
func
TestPersistSequencerStateWhenChanged
(
t
*
testing
.
T
)
{
...
...
op-service/txmgr/cli.go
View file @
f740ac21
...
...
@@ -48,17 +48,42 @@ var (
}
)
type
DefaultFlagValues
struct
{
NumConfirmations
uint64
SafeAbortNonceTooLowCount
uint64
ResubmissionTimeout
time
.
Duration
NetworkTimeout
time
.
Duration
TxSendTimeout
time
.
Duration
TxNotInMempoolTimeout
time
.
Duration
ReceiptQueryInterval
time
.
Duration
}
var
(
defaultNumConfirmations
=
uint64
(
10
)
defaultSafeAbortNonceTooLowCount
=
uint64
(
3
)
defaultResubmissionTimeout
=
48
*
time
.
Second
defaultNetworkTimeout
=
10
*
time
.
Second
defaultTxSendTimeout
=
0
*
time
.
Second
defaultTxNotInMempoolTimeout
=
2
*
time
.
Minute
defaultReceiptQueryInterval
=
12
*
time
.
Second
DefaultBatcherFlagValues
=
DefaultFlagValues
{
NumConfirmations
:
uint64
(
10
),
SafeAbortNonceTooLowCount
:
uint64
(
3
),
ResubmissionTimeout
:
48
*
time
.
Second
,
NetworkTimeout
:
10
*
time
.
Second
,
TxSendTimeout
:
0
*
time
.
Second
,
TxNotInMempoolTimeout
:
2
*
time
.
Minute
,
ReceiptQueryInterval
:
12
*
time
.
Second
,
}
DefaultChallengerFlagValues
=
DefaultFlagValues
{
NumConfirmations
:
uint64
(
3
),
SafeAbortNonceTooLowCount
:
uint64
(
3
),
ResubmissionTimeout
:
24
*
time
.
Second
,
NetworkTimeout
:
10
*
time
.
Second
,
TxSendTimeout
:
2
*
time
.
Minute
,
TxNotInMempoolTimeout
:
1
*
time
.
Minute
,
ReceiptQueryInterval
:
12
*
time
.
Second
,
}
)
func
CLIFlags
(
envPrefix
string
)
[]
cli
.
Flag
{
return
CLIFlagsWithDefaults
(
envPrefix
,
DefaultBatcherFlagValues
)
}
func
CLIFlagsWithDefaults
(
envPrefix
string
,
defaults
DefaultFlagValues
)
[]
cli
.
Flag
{
prefixEnvVars
:=
func
(
name
string
)
[]
string
{
return
opservice
.
PrefixEnvVar
(
envPrefix
,
name
)
}
...
...
@@ -81,43 +106,43 @@ func CLIFlags(envPrefix string) []cli.Flag {
&
cli
.
Uint64Flag
{
Name
:
NumConfirmationsFlagName
,
Usage
:
"Number of confirmations which we will wait after sending a transaction"
,
Value
:
defaultNumConfirmations
,
Value
:
default
s
.
NumConfirmations
,
EnvVars
:
prefixEnvVars
(
"NUM_CONFIRMATIONS"
),
},
&
cli
.
Uint64Flag
{
Name
:
SafeAbortNonceTooLowCountFlagName
,
Usage
:
"Number of ErrNonceTooLow observations required to give up on a tx at a particular nonce without receiving confirmation"
,
Value
:
defaultSafeAbortNonceTooLowCount
,
Value
:
default
s
.
SafeAbortNonceTooLowCount
,
EnvVars
:
prefixEnvVars
(
"SAFE_ABORT_NONCE_TOO_LOW_COUNT"
),
},
&
cli
.
DurationFlag
{
Name
:
ResubmissionTimeoutFlagName
,
Usage
:
"Duration we will wait before resubmitting a transaction to L1"
,
Value
:
defaultResubmissionTimeout
,
Value
:
default
s
.
ResubmissionTimeout
,
EnvVars
:
prefixEnvVars
(
"RESUBMISSION_TIMEOUT"
),
},
&
cli
.
DurationFlag
{
Name
:
NetworkTimeoutFlagName
,
Usage
:
"Timeout for all network operations"
,
Value
:
defaultNetworkTimeout
,
Value
:
default
s
.
NetworkTimeout
,
EnvVars
:
prefixEnvVars
(
"NETWORK_TIMEOUT"
),
},
&
cli
.
DurationFlag
{
Name
:
TxSendTimeoutFlagName
,
Usage
:
"Timeout for sending transactions. If 0 it is disabled."
,
Value
:
defaultTxSendTimeout
,
Value
:
default
s
.
TxSendTimeout
,
EnvVars
:
prefixEnvVars
(
"TXMGR_TX_SEND_TIMEOUT"
),
},
&
cli
.
DurationFlag
{
Name
:
TxNotInMempoolTimeoutFlagName
,
Usage
:
"Timeout for aborting a tx send if the tx does not make it to the mempool."
,
Value
:
defaultTxNotInMempoolTimeout
,
Value
:
default
s
.
TxNotInMempoolTimeout
,
EnvVars
:
prefixEnvVars
(
"TXMGR_TX_NOT_IN_MEMPOOL_TIMEOUT"
),
},
&
cli
.
DurationFlag
{
Name
:
ReceiptQueryIntervalFlagName
,
Usage
:
"Frequency to poll for receipts"
,
Value
:
defaultReceiptQueryInterval
,
Value
:
default
s
.
ReceiptQueryInterval
,
EnvVars
:
prefixEnvVars
(
"TXMGR_RECEIPT_QUERY_INTERVAL"
),
},
},
client
.
CLIFlags
(
envPrefix
)
...
)
...
...
@@ -140,16 +165,16 @@ type CLIConfig struct {
TxNotInMempoolTimeout
time
.
Duration
}
func
NewCLIConfig
(
l1RPCURL
string
)
CLIConfig
{
func
NewCLIConfig
(
l1RPCURL
string
,
defaults
DefaultFlagValues
)
CLIConfig
{
return
CLIConfig
{
L1RPCURL
:
l1RPCURL
,
NumConfirmations
:
defaultNumConfirmations
,
SafeAbortNonceTooLowCount
:
defaultSafeAbortNonceTooLowCount
,
ResubmissionTimeout
:
defaultResubmissionTimeout
,
NetworkTimeout
:
defaultNetworkTimeout
,
TxSendTimeout
:
defaultTxSendTimeout
,
TxNotInMempoolTimeout
:
defaultTxNotInMempoolTimeout
,
ReceiptQueryInterval
:
defaultReceiptQueryInterval
,
NumConfirmations
:
default
s
.
NumConfirmations
,
SafeAbortNonceTooLowCount
:
default
s
.
SafeAbortNonceTooLowCount
,
ResubmissionTimeout
:
default
s
.
ResubmissionTimeout
,
NetworkTimeout
:
default
s
.
NetworkTimeout
,
TxSendTimeout
:
default
s
.
TxSendTimeout
,
TxNotInMempoolTimeout
:
default
s
.
TxNotInMempoolTimeout
,
ReceiptQueryInterval
:
default
s
.
ReceiptQueryInterval
,
SignerCLIConfig
:
client
.
NewCLIConfig
(),
}
}
...
...
op-service/txmgr/cli_test.go
View file @
f740ac21
...
...
@@ -13,12 +13,12 @@ var (
func
TestDefaultCLIOptionsMatchDefaultConfig
(
t
*
testing
.
T
)
{
cfg
:=
configForArgs
()
defaultCfg
:=
NewCLIConfig
(
l1EthRpcValue
)
defaultCfg
:=
NewCLIConfig
(
l1EthRpcValue
,
DefaultBatcherFlagValues
)
require
.
Equal
(
t
,
defaultCfg
,
cfg
)
}
func
TestDefaultConfigIsValid
(
t
*
testing
.
T
)
{
cfg
:=
NewCLIConfig
(
l1EthRpcValue
)
cfg
:=
NewCLIConfig
(
l1EthRpcValue
,
DefaultBatcherFlagValues
)
require
.
NoError
(
t
,
cfg
.
Check
())
}
...
...
package.json
View file @
f740ac21
...
...
@@ -49,7 +49,7 @@
"
chai
"
:
"
^4.3.9
"
,
"
depcheck
"
:
"
^1.4.3
"
,
"
doctoc
"
:
"
^2.2.0
"
,
"
eslint
"
:
"
^8.
43
.0
"
,
"
eslint
"
:
"
^8.
50
.0
"
,
"
eslint-config-prettier
"
:
"
^8.3.0
"
,
"
eslint-config-standard
"
:
"
^16.0.3
"
,
"
eslint-plugin-import
"
:
"
^2.26.0
"
,
...
...
pnpm-lock.yaml
View file @
f740ac21
This diff is collapsed.
Click to expand it.
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment