Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
N
nebula
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
exchain
nebula
Commits
787f6857
Unverified
Commit
787f6857
authored
Sep 10, 2023
by
mergify[bot]
Committed by
GitHub
Sep 10, 2023
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'develop' into dependabot/go_modules/op-ufm/github.com/supranational/blst-0.3.11
parents
31f9a978
5eaaee10
Changes
23
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
23 changed files
with
439 additions
and
243 deletions
+439
-243
go.mod
cannon/example/claim/go.mod
+1
-1
go.sum
cannon/example/claim/go.sum
+2
-2
go.mod
go.mod
+2
-2
go.sum
go.sum
+4
-4
bigint.go
indexer/bigint/bigint.go
+60
-0
bigint_test.go
indexer/bigint/bigint_test.go
+72
-0
blocks.go
indexer/database/blocks.go
+30
-12
bridge_messages.go
indexer/database/bridge_messages.go
+2
-2
bridge_transactions.go
indexer/database/bridge_transactions.go
+42
-18
bridge_transfers.go
indexer/database/bridge_transfers.go
+2
-2
contract_events.go
indexer/database/contract_events.go
+2
-2
db.go
indexer/database/db.go
+9
-2
mocks.go
indexer/database/mocks.go
+1
-0
l1_etl_test.go
indexer/etl/l1_etl_test.go
+3
-3
20230523_create_schema.sql
indexer/migrations/20230523_create_schema.sql
+39
-2
bigint.go
indexer/node/bigint.go
+0
-26
bigint_test.go
indexer/node/bigint_test.go
+0
-31
header_traversal.go
indexer/node/header_traversal.go
+4
-3
header_traversal_test.go
indexer/node/header_traversal_test.go
+11
-10
mocks.go
indexer/node/mocks.go
+1
-0
bridge.go
indexer/processors/bridge.go
+88
-60
l1_bridge_processor.go
indexer/processors/bridge/l1_bridge_processor.go
+38
-38
l2_bridge_processor.go
indexer/processors/bridge/l2_bridge_processor.go
+26
-23
No files found.
cannon/example/claim/go.mod
View file @
787f6857
...
@@ -6,7 +6,7 @@ require github.com/ethereum-optimism/optimism v0.0.0
...
@@ -6,7 +6,7 @@ require github.com/ethereum-optimism/optimism v0.0.0
require (
require (
golang.org/x/crypto v0.12.0 // indirect
golang.org/x/crypto v0.12.0 // indirect
golang.org/x/sys v0.1
1
.0 // indirect
golang.org/x/sys v0.1
2
.0 // indirect
)
)
replace github.com/ethereum-optimism/optimism v0.0.0 => ../../..
replace github.com/ethereum-optimism/optimism v0.0.0 => ../../..
cannon/example/claim/go.sum
View file @
787f6857
...
@@ -3,6 +3,6 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb
...
@@ -3,6 +3,6 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk=
golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk=
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
golang.org/x/sys v0.1
1.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM
=
golang.org/x/sys v0.1
2.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o
=
golang.org/x/sys v0.1
1
.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1
2
.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
go.mod
View file @
787f6857
...
@@ -41,7 +41,7 @@ require (
...
@@ -41,7 +41,7 @@ require (
golang.org/x/crypto v0.12.0
golang.org/x/crypto v0.12.0
golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df
golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df
golang.org/x/sync v0.3.0
golang.org/x/sync v0.3.0
golang.org/x/term v0.1
1
.0
golang.org/x/term v0.1
2
.0
golang.org/x/time v0.3.0
golang.org/x/time v0.3.0
gorm.io/driver/postgres v1.5.2
gorm.io/driver/postgres v1.5.2
gorm.io/gorm v1.25.4
gorm.io/gorm v1.25.4
...
@@ -195,7 +195,7 @@ require (
...
@@ -195,7 +195,7 @@ require (
go.uber.org/zap v1.24.0 // indirect
go.uber.org/zap v1.24.0 // indirect
golang.org/x/mod v0.11.0 // indirect
golang.org/x/mod v0.11.0 // indirect
golang.org/x/net v0.12.0 // indirect
golang.org/x/net v0.12.0 // indirect
golang.org/x/sys v0.1
1
.0 // indirect
golang.org/x/sys v0.1
2
.0 // indirect
golang.org/x/text v0.12.0 // indirect
golang.org/x/text v0.12.0 // indirect
golang.org/x/tools v0.9.3 // indirect
golang.org/x/tools v0.9.3 // indirect
google.golang.org/protobuf v1.30.0 // indirect
google.golang.org/protobuf v1.30.0 // indirect
...
...
go.sum
View file @
787f6857
...
@@ -1008,13 +1008,13 @@ golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBc
...
@@ -1008,13 +1008,13 @@ golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1
1.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM
=
golang.org/x/sys v0.1
2.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o
=
golang.org/x/sys v0.1
1
.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1
2
.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1
1.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0
=
golang.org/x/term v0.1
2.0 h1:/ZfYdc3zq+q02Rv9vGqTeSItdzZTSNDmfTi0mBAuidU
=
golang.org/x/term v0.1
1.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPI
U=
golang.org/x/term v0.1
2.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgv
U=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
...
...
indexer/bigint/bigint.go
0 → 100644
View file @
787f6857
package
bigint
import
"math/big"
var
(
Zero
=
big
.
NewInt
(
0
)
One
=
big
.
NewInt
(
1
)
)
// Clamp returns a new big.Int for `end` to which `end - start` <= size.
// @note (start, end) is an inclusive range
func
Clamp
(
start
,
end
*
big
.
Int
,
size
uint64
)
*
big
.
Int
{
temp
:=
new
(
big
.
Int
)
count
:=
temp
.
Sub
(
end
,
start
)
.
Uint64
()
+
1
if
count
<=
size
{
return
end
}
// we re-use the allocated temp as the new end
temp
.
Add
(
start
,
big
.
NewInt
(
int64
(
size
-
1
)))
return
temp
}
// Matcher returns an inner comparison function result for a big.Int
func
Matcher
(
num
int64
)
func
(
*
big
.
Int
)
bool
{
return
func
(
bi
*
big
.
Int
)
bool
{
return
bi
.
Int64
()
==
num
}
}
type
Range
struct
{
Start
*
big
.
Int
End
*
big
.
Int
}
// Grouped will return a slice of inclusive ranges from (start, end),
// capped to the supplied size from `(start, end)`.
func
Grouped
(
start
,
end
*
big
.
Int
,
size
uint64
)
[]
Range
{
if
end
.
Cmp
(
start
)
<
0
||
size
==
0
{
return
nil
}
bigMaxDiff
:=
big
.
NewInt
(
int64
(
size
-
1
))
groups
:=
[]
Range
{}
for
start
.
Cmp
(
end
)
<=
0
{
diff
:=
new
(
big
.
Int
)
.
Sub
(
end
,
start
)
switch
{
case
diff
.
Uint64
()
+
1
<=
size
:
// re-use allocated diff as the next start
groups
=
append
(
groups
,
Range
{
start
,
end
})
start
=
diff
.
Add
(
end
,
One
)
default
:
// re-use allocated diff as the next start
end
:=
new
(
big
.
Int
)
.
Add
(
start
,
bigMaxDiff
)
groups
=
append
(
groups
,
Range
{
start
,
end
})
start
=
diff
.
Add
(
end
,
One
)
}
}
return
groups
}
indexer/bigint/bigint_test.go
0 → 100644
View file @
787f6857
package
bigint
import
(
"math/big"
"testing"
"github.com/stretchr/testify/require"
)
func
TestClamp
(
t
*
testing
.
T
)
{
start
:=
big
.
NewInt
(
1
)
end
:=
big
.
NewInt
(
10
)
// When the (start, end) bounds are within range
// the same end pointer should be returned
// larger range
result
:=
Clamp
(
start
,
end
,
20
)
require
.
True
(
t
,
end
==
result
)
// exact range
result
=
Clamp
(
start
,
end
,
10
)
require
.
True
(
t
,
end
==
result
)
// smaller range
result
=
Clamp
(
start
,
end
,
5
)
require
.
False
(
t
,
end
==
result
)
require
.
Equal
(
t
,
uint64
(
5
),
result
.
Uint64
())
}
func
TestGrouped
(
t
*
testing
.
T
)
{
// base cases
require
.
Nil
(
t
,
Grouped
(
One
,
Zero
,
1
))
require
.
Nil
(
t
,
Grouped
(
Zero
,
One
,
0
))
// Same Start/End
group
:=
Grouped
(
One
,
One
,
1
)
require
.
Len
(
t
,
group
,
1
)
require
.
Equal
(
t
,
One
,
group
[
0
]
.
Start
)
require
.
Equal
(
t
,
One
,
group
[
0
]
.
End
)
Three
,
Five
:=
big
.
NewInt
(
3
),
big
.
NewInt
(
5
)
// One at a time
group
=
Grouped
(
One
,
Three
,
1
)
require
.
Equal
(
t
,
One
,
group
[
0
]
.
End
)
require
.
Equal
(
t
,
int64
(
1
),
group
[
0
]
.
End
.
Int64
())
require
.
Equal
(
t
,
int64
(
2
),
group
[
1
]
.
Start
.
Int64
())
require
.
Equal
(
t
,
int64
(
2
),
group
[
1
]
.
End
.
Int64
())
require
.
Equal
(
t
,
int64
(
3
),
group
[
2
]
.
Start
.
Int64
())
require
.
Equal
(
t
,
int64
(
3
),
group
[
2
]
.
End
.
Int64
())
// Split groups
group
=
Grouped
(
One
,
Five
,
3
)
require
.
Len
(
t
,
group
,
2
)
require
.
Equal
(
t
,
One
,
group
[
0
]
.
Start
)
require
.
Equal
(
t
,
int64
(
3
),
group
[
0
]
.
End
.
Int64
())
require
.
Equal
(
t
,
int64
(
4
),
group
[
1
]
.
Start
.
Int64
())
require
.
Equal
(
t
,
Five
,
group
[
1
]
.
End
)
// Encompasses the range
group
=
Grouped
(
One
,
Five
,
5
)
require
.
Len
(
t
,
group
,
1
)
require
.
Equal
(
t
,
One
,
group
[
0
]
.
Start
,
Zero
)
require
.
Equal
(
t
,
Five
,
group
[
0
]
.
End
)
// Size larger than the entire range
group
=
Grouped
(
One
,
Five
,
100
)
require
.
Len
(
t
,
group
,
1
)
require
.
Equal
(
t
,
One
,
group
[
0
]
.
Start
,
Zero
)
require
.
Equal
(
t
,
Five
,
group
[
0
]
.
End
)
}
indexer/database/blocks.go
View file @
787f6857
package
database
package
database
import
(
import
(
"context"
"errors"
"errors"
"math/big"
"math/big"
...
@@ -104,17 +103,17 @@ func newBlocksDB(db *gorm.DB) BlocksDB {
...
@@ -104,17 +103,17 @@ func newBlocksDB(db *gorm.DB) BlocksDB {
// L1
// L1
func
(
db
*
blocksDB
)
StoreL1BlockHeaders
(
headers
[]
L1BlockHeader
)
error
{
func
(
db
*
blocksDB
)
StoreL1BlockHeaders
(
headers
[]
L1BlockHeader
)
error
{
result
:=
db
.
gorm
.
Create
(
&
headers
)
result
:=
db
.
gorm
.
Create
InBatches
(
&
headers
,
batchInsertSize
)
return
result
.
Error
return
result
.
Error
}
}
func
(
db
*
blocksDB
)
StoreLegacyStateBatches
(
stateBatches
[]
LegacyStateBatch
)
error
{
func
(
db
*
blocksDB
)
StoreLegacyStateBatches
(
stateBatches
[]
LegacyStateBatch
)
error
{
result
:=
db
.
gorm
.
Create
(
stateBatches
)
result
:=
db
.
gorm
.
Create
InBatches
(
stateBatches
,
batchInsertSize
)
return
result
.
Error
return
result
.
Error
}
}
func
(
db
*
blocksDB
)
StoreOutputProposals
(
outputs
[]
OutputProposal
)
error
{
func
(
db
*
blocksDB
)
StoreOutputProposals
(
outputs
[]
OutputProposal
)
error
{
result
:=
db
.
gorm
.
Create
(
outputs
)
result
:=
db
.
gorm
.
Create
InBatches
(
outputs
,
batchInsertSize
)
return
result
.
Error
return
result
.
Error
}
}
...
@@ -180,7 +179,7 @@ func (db *blocksDB) OutputProposal(index *big.Int) (*OutputProposal, error) {
...
@@ -180,7 +179,7 @@ func (db *blocksDB) OutputProposal(index *big.Int) (*OutputProposal, error) {
// L2
// L2
func
(
db
*
blocksDB
)
StoreL2BlockHeaders
(
headers
[]
L2BlockHeader
)
error
{
func
(
db
*
blocksDB
)
StoreL2BlockHeaders
(
headers
[]
L2BlockHeader
)
error
{
result
:=
db
.
gorm
.
Create
(
&
headers
)
result
:=
db
.
gorm
.
Create
InBatches
(
&
headers
,
batchInsertSize
)
return
result
.
Error
return
result
.
Error
}
}
...
@@ -211,7 +210,6 @@ func (db *blocksDB) L2LatestBlockHeader() (*L2BlockHeader, error) {
...
@@ -211,7 +210,6 @@ func (db *blocksDB) L2LatestBlockHeader() (*L2BlockHeader, error) {
return
nil
,
result
.
Error
return
nil
,
result
.
Error
}
}
result
.
Logger
.
Info
(
context
.
Background
(),
"number "
,
l2Header
.
Number
)
return
&
l2Header
,
nil
return
&
l2Header
,
nil
}
}
...
@@ -229,12 +227,32 @@ type Epoch struct {
...
@@ -229,12 +227,32 @@ type Epoch struct {
// For more, see the protocol spec:
// For more, see the protocol spec:
// - https://github.com/ethereum-optimism/optimism/blob/develop/specs/derivation.md
// - https://github.com/ethereum-optimism/optimism/blob/develop/specs/derivation.md
func
(
db
*
blocksDB
)
LatestEpoch
()
(
*
Epoch
,
error
)
{
func
(
db
*
blocksDB
)
LatestEpoch
()
(
*
Epoch
,
error
)
{
// Since L1 blocks occur less frequently than L2, we do a INNER JOIN from L1 on
latestL1Header
,
err
:=
db
.
L1LatestBlockHeader
()
// L2 for a faster query. Per the protocol, the L2 block that starts a new epoch
if
err
!=
nil
{
// will have a matching timestamp with the L1 origin.
return
nil
,
err
query
:=
db
.
gorm
.
Table
(
"l1_block_headers"
)
.
Order
(
"l1_block_headers.timestamp DESC"
)
}
else
if
latestL1Header
==
nil
{
query
=
query
.
Joins
(
"INNER JOIN l2_block_headers ON l2_block_headers.timestamp = l1_block_headers.timestamp"
)
return
nil
,
nil
query
=
query
.
Select
(
"*"
)
}
latestL2Header
,
err
:=
db
.
L2LatestBlockHeader
()
if
err
!=
nil
{
return
nil
,
err
}
else
if
latestL2Header
==
nil
{
return
nil
,
nil
}
minTime
:=
latestL1Header
.
Timestamp
if
latestL2Header
.
Timestamp
<
minTime
{
minTime
=
latestL2Header
.
Timestamp
}
// This is a faster query than doing an INNER JOIN between l1_block_headers and l2_block_headers
// which requires a full table scan to compute the resulting table.
l1Query
:=
db
.
gorm
.
Table
(
"l1_block_headers"
)
.
Where
(
"timestamp <= ?"
,
minTime
)
l2Query
:=
db
.
gorm
.
Table
(
"l2_block_headers"
)
.
Where
(
"timestamp <= ?"
,
minTime
)
query
:=
db
.
gorm
.
Raw
(
`SELECT * FROM (?) AS l1_block_headers, (?) AS l2_block_headers
WHERE l1_block_headers.timestamp = l2_block_headers.timestamp
ORDER BY l2_block_headers.number DESC LIMIT 1`
,
l1Query
,
l2Query
)
var
epoch
Epoch
var
epoch
Epoch
result
:=
query
.
Take
(
&
epoch
)
result
:=
query
.
Take
(
&
epoch
)
...
...
indexer/database/bridge_messages.go
View file @
787f6857
...
@@ -72,7 +72,7 @@ func newBridgeMessagesDB(db *gorm.DB) BridgeMessagesDB {
...
@@ -72,7 +72,7 @@ func newBridgeMessagesDB(db *gorm.DB) BridgeMessagesDB {
*/
*/
func
(
db
bridgeMessagesDB
)
StoreL1BridgeMessages
(
messages
[]
L1BridgeMessage
)
error
{
func
(
db
bridgeMessagesDB
)
StoreL1BridgeMessages
(
messages
[]
L1BridgeMessage
)
error
{
result
:=
db
.
gorm
.
Create
(
&
messages
)
result
:=
db
.
gorm
.
Create
InBatches
(
&
messages
,
batchInsertSize
)
return
result
.
Error
return
result
.
Error
}
}
...
@@ -111,7 +111,7 @@ func (db bridgeMessagesDB) MarkRelayedL1BridgeMessage(messageHash common.Hash, r
...
@@ -111,7 +111,7 @@ func (db bridgeMessagesDB) MarkRelayedL1BridgeMessage(messageHash common.Hash, r
*/
*/
func
(
db
bridgeMessagesDB
)
StoreL2BridgeMessages
(
messages
[]
L2BridgeMessage
)
error
{
func
(
db
bridgeMessagesDB
)
StoreL2BridgeMessages
(
messages
[]
L2BridgeMessage
)
error
{
result
:=
db
.
gorm
.
Create
(
&
messages
)
result
:=
db
.
gorm
.
Create
InBatches
(
&
messages
,
batchInsertSize
)
return
result
.
Error
return
result
.
Error
}
}
...
...
indexer/database/bridge_transactions.go
View file @
787f6857
...
@@ -80,7 +80,7 @@ func newBridgeTransactionsDB(db *gorm.DB) BridgeTransactionsDB {
...
@@ -80,7 +80,7 @@ func newBridgeTransactionsDB(db *gorm.DB) BridgeTransactionsDB {
*/
*/
func
(
db
*
bridgeTransactionsDB
)
StoreL1TransactionDeposits
(
deposits
[]
L1TransactionDeposit
)
error
{
func
(
db
*
bridgeTransactionsDB
)
StoreL1TransactionDeposits
(
deposits
[]
L1TransactionDeposit
)
error
{
result
:=
db
.
gorm
.
Create
(
&
deposits
)
result
:=
db
.
gorm
.
Create
InBatches
(
&
deposits
,
batchInsertSize
)
return
result
.
Error
return
result
.
Error
}
}
...
@@ -114,7 +114,7 @@ func (db *bridgeTransactionsDB) L1LatestBlockHeader() (*L1BlockHeader, error) {
...
@@ -114,7 +114,7 @@ func (db *bridgeTransactionsDB) L1LatestBlockHeader() (*L1BlockHeader, error) {
l1Query
:=
db
.
gorm
.
Table
(
"((?) UNION (?) UNION (?)) AS latest_bridge_events"
,
l1DepositQuery
.
Limit
(
1
),
l1ProvenQuery
,
l1FinalizedQuery
)
l1Query
:=
db
.
gorm
.
Table
(
"((?) UNION (?) UNION (?)) AS latest_bridge_events"
,
l1DepositQuery
.
Limit
(
1
),
l1ProvenQuery
,
l1FinalizedQuery
)
l1Query
=
l1Query
.
Joins
(
"INNER JOIN l1_block_headers ON l1_block_headers.hash = latest_bridge_events.block_hash"
)
l1Query
=
l1Query
.
Joins
(
"INNER JOIN l1_block_headers ON l1_block_headers.hash = latest_bridge_events.block_hash"
)
l1Query
=
l1Query
.
Order
(
"l
1_block_headers.number
DESC"
)
.
Select
(
"l1_block_headers.*"
)
l1Query
=
l1Query
.
Order
(
"l
atest_bridge_events.timestamp
DESC"
)
.
Select
(
"l1_block_headers.*"
)
var
l1Header
L1BlockHeader
var
l1Header
L1BlockHeader
result
:=
l1Query
.
Take
(
&
l1Header
)
result
:=
l1Query
.
Take
(
&
l1Header
)
...
@@ -133,7 +133,7 @@ func (db *bridgeTransactionsDB) L1LatestBlockHeader() (*L1BlockHeader, error) {
...
@@ -133,7 +133,7 @@ func (db *bridgeTransactionsDB) L1LatestBlockHeader() (*L1BlockHeader, error) {
*/
*/
func
(
db
*
bridgeTransactionsDB
)
StoreL2TransactionWithdrawals
(
withdrawals
[]
L2TransactionWithdrawal
)
error
{
func
(
db
*
bridgeTransactionsDB
)
StoreL2TransactionWithdrawals
(
withdrawals
[]
L2TransactionWithdrawal
)
error
{
result
:=
db
.
gorm
.
Create
(
&
withdrawals
)
result
:=
db
.
gorm
.
Create
InBatches
(
&
withdrawals
,
batchInsertSize
)
return
result
.
Error
return
result
.
Error
}
}
...
@@ -185,23 +185,47 @@ func (db *bridgeTransactionsDB) MarkL2TransactionWithdrawalFinalizedEvent(withdr
...
@@ -185,23 +185,47 @@ func (db *bridgeTransactionsDB) MarkL2TransactionWithdrawalFinalizedEvent(withdr
}
}
func
(
db
*
bridgeTransactionsDB
)
L2LatestBlockHeader
()
(
*
L2BlockHeader
,
error
)
{
func
(
db
*
bridgeTransactionsDB
)
L2LatestBlockHeader
()
(
*
L2BlockHeader
,
error
)
{
// L2: Inclusion of the latest deposit
// L2: Latest Withdrawal, Latest L2 Header of indexed deposit epoch
l1DepositQuery
:=
db
.
gorm
.
Table
(
"l1_transaction_deposits"
)
.
Order
(
"l1_transaction_deposits.timestamp DESC"
)
var
latestWithdrawalHeader
,
latestL2DepositHeader
*
L2BlockHeader
l1DepositQuery
=
l1DepositQuery
.
Joins
(
"INNER JOIN l1_contract_events ON l1_contract_events.guid = l1_transaction_deposits.initiated_l1_event_guid"
)
l1DepositQuery
=
l1DepositQuery
.
Select
(
"l1_contract_events.*"
)
var
withdrawHeader
L2BlockHeader
withdrawalQuery
:=
db
.
gorm
.
Table
(
"l2_transaction_withdrawals"
)
.
Order
(
"timestamp DESC"
)
.
Limit
(
1
)
l2Query
:=
db
.
gorm
.
Table
(
"(?) AS l1_deposit_events"
,
l1DepositQuery
)
withdrawalQuery
=
withdrawalQuery
.
Joins
(
"INNER JOIN l2_contract_events ON l2_contract_events.guid = l2_transaction_withdrawals.initiated_l2_event_guid"
)
l2Query
=
l2Query
.
Joins
(
"INNER JOIN l2_block_headers ON l2_block_headers.timestamp = l1_deposit_events.timestamp"
)
withdrawalQuery
=
withdrawalQuery
.
Joins
(
"INNER JOIN l2_block_headers ON l2_block_headers.hash = l2_contract_events.block_hash"
)
l2Query
=
l2Query
.
Order
(
"l2_block_headers.timestamp DESC"
)
.
Select
(
"l2_block_headers.*"
)
result
:=
withdrawalQuery
.
Select
(
"l2_block_headers.*"
)
.
Take
(
&
withdrawHeader
)
if
result
.
Error
!=
nil
&&
!
errors
.
Is
(
result
.
Error
,
gorm
.
ErrRecordNotFound
)
{
return
nil
,
result
.
Error
}
else
if
!
errors
.
Is
(
result
.
Error
,
gorm
.
ErrRecordNotFound
)
{
latestWithdrawalHeader
=
&
withdrawHeader
}
var
l2Header
L2BlockHeader
// Check for any deposits that may have been included after the latest withdrawal. However, since the bridge
result
:=
l2Query
.
Take
(
&
l2Header
)
// processor only inserts entries when the corresponding epoch has been indexed on both L1 and L2, we can
if
result
.
Error
!=
nil
{
// simply look for the latest L2 block with at <= time of the latest L1 deposit.
if
errors
.
Is
(
result
.
Error
,
gorm
.
ErrRecordNotFound
)
{
var
l1Deposit
L1TransactionDeposit
return
nil
,
nil
result
=
db
.
gorm
.
Table
(
"l1_transaction_deposits"
)
.
Order
(
"timestamp DESC"
)
.
Limit
(
1
)
.
Take
(
&
l1Deposit
)
}
if
result
.
Error
!=
nil
&&
!
errors
.
Is
(
result
.
Error
,
gorm
.
ErrRecordNotFound
)
{
return
nil
,
result
.
Error
return
nil
,
result
.
Error
}
else
if
!
errors
.
Is
(
result
.
Error
,
gorm
.
ErrRecordNotFound
)
{
var
l2DepositHeader
L2BlockHeader
result
:=
db
.
gorm
.
Table
(
"l2_block_headers"
)
.
Order
(
"timestamp DESC"
)
.
Limit
(
1
)
.
Where
(
"timestamp <= ?"
,
l1Deposit
.
Tx
.
Timestamp
)
.
Take
(
&
l2DepositHeader
)
if
result
.
Error
!=
nil
&&
!
errors
.
Is
(
result
.
Error
,
gorm
.
ErrRecordNotFound
)
{
return
nil
,
result
.
Error
}
else
if
!
errors
.
Is
(
result
.
Error
,
gorm
.
ErrRecordNotFound
)
{
latestL2DepositHeader
=
&
l2DepositHeader
}
}
}
return
&
l2Header
,
nil
// compare
if
latestWithdrawalHeader
==
nil
{
return
latestL2DepositHeader
,
nil
}
else
if
latestL2DepositHeader
==
nil
{
return
latestWithdrawalHeader
,
nil
}
if
latestWithdrawalHeader
.
Timestamp
>=
latestL2DepositHeader
.
Timestamp
{
return
latestWithdrawalHeader
,
nil
}
else
{
return
latestL2DepositHeader
,
nil
}
}
}
indexer/database/bridge_transfers.go
View file @
787f6857
...
@@ -89,7 +89,7 @@ func newBridgeTransfersDB(db *gorm.DB) BridgeTransfersDB {
...
@@ -89,7 +89,7 @@ func newBridgeTransfersDB(db *gorm.DB) BridgeTransfersDB {
*/
*/
func
(
db
*
bridgeTransfersDB
)
StoreL1BridgeDeposits
(
deposits
[]
L1BridgeDeposit
)
error
{
func
(
db
*
bridgeTransfersDB
)
StoreL1BridgeDeposits
(
deposits
[]
L1BridgeDeposit
)
error
{
result
:=
db
.
gorm
.
Create
(
&
deposits
)
result
:=
db
.
gorm
.
Create
InBatches
(
&
deposits
,
batchInsertSize
)
return
result
.
Error
return
result
.
Error
}
}
...
@@ -202,7 +202,7 @@ l1_bridge_deposits.timestamp, cross_domain_message_hash, local_token_address, re
...
@@ -202,7 +202,7 @@ l1_bridge_deposits.timestamp, cross_domain_message_hash, local_token_address, re
*/
*/
func
(
db
*
bridgeTransfersDB
)
StoreL2BridgeWithdrawals
(
withdrawals
[]
L2BridgeWithdrawal
)
error
{
func
(
db
*
bridgeTransfersDB
)
StoreL2BridgeWithdrawals
(
withdrawals
[]
L2BridgeWithdrawal
)
error
{
result
:=
db
.
gorm
.
Create
(
&
withdrawals
)
result
:=
db
.
gorm
.
Create
InBatches
(
&
withdrawals
,
batchInsertSize
)
return
result
.
Error
return
result
.
Error
}
}
...
...
indexer/database/contract_events.go
View file @
787f6857
...
@@ -109,7 +109,7 @@ func newContractEventsDB(db *gorm.DB) ContractEventsDB {
...
@@ -109,7 +109,7 @@ func newContractEventsDB(db *gorm.DB) ContractEventsDB {
// L1
// L1
func
(
db
*
contractEventsDB
)
StoreL1ContractEvents
(
events
[]
L1ContractEvent
)
error
{
func
(
db
*
contractEventsDB
)
StoreL1ContractEvents
(
events
[]
L1ContractEvent
)
error
{
result
:=
db
.
gorm
.
Create
(
&
events
)
result
:=
db
.
gorm
.
Create
InBatches
(
&
events
,
batchInsertSize
)
return
result
.
Error
return
result
.
Error
}
}
...
@@ -176,7 +176,7 @@ func (db *contractEventsDB) L1LatestContractEventWithFilter(filter ContractEvent
...
@@ -176,7 +176,7 @@ func (db *contractEventsDB) L1LatestContractEventWithFilter(filter ContractEvent
// L2
// L2
func
(
db
*
contractEventsDB
)
StoreL2ContractEvents
(
events
[]
L2ContractEvent
)
error
{
func
(
db
*
contractEventsDB
)
StoreL2ContractEvents
(
events
[]
L2ContractEvent
)
error
{
result
:=
db
.
gorm
.
Create
(
&
events
)
result
:=
db
.
gorm
.
Create
InBatches
(
&
events
,
batchInsertSize
)
return
result
.
Error
return
result
.
Error
}
}
...
...
indexer/database/db.go
View file @
787f6857
...
@@ -12,6 +12,14 @@ import (
...
@@ -12,6 +12,14 @@ import (
"gorm.io/gorm/logger"
"gorm.io/gorm/logger"
)
)
var
(
// The postgres parameter counter for a given query is stored via a uint16,
// resulting in a parameter limit of 65535. In order to avoid reaching this limit
// we'll utilize a batch size of 3k for inserts, well below as long as the the number
// of columns < 20.
batchInsertSize
int
=
3
_000
)
type
DB
struct
{
type
DB
struct
{
gorm
*
gorm
.
DB
gorm
*
gorm
.
DB
...
@@ -31,8 +39,7 @@ func NewDB(dbConfig config.DBConfig) (*DB, error) {
...
@@ -31,8 +39,7 @@ func NewDB(dbConfig config.DBConfig) (*DB, error) {
dsn
+=
fmt
.
Sprintf
(
" password=%s"
,
dbConfig
.
Password
)
dsn
+=
fmt
.
Sprintf
(
" password=%s"
,
dbConfig
.
Password
)
}
}
gorm
,
err
:=
gorm
.
Open
(
postgres
.
Open
(
dsn
),
&
gorm
.
Config
{
gorm
,
err
:=
gorm
.
Open
(
postgres
.
Open
(
dsn
),
&
gorm
.
Config
{
// The indexer will explicitly manage the transaction
// The indexer will explicitly manage the transactions
// flow processing blocks
SkipDefaultTransaction
:
true
,
SkipDefaultTransaction
:
true
,
// We may choose to create an adapter such that the
// We may choose to create an adapter such that the
...
...
indexer/database/mocks.go
View file @
787f6857
...
@@ -4,6 +4,7 @@ import (
...
@@ -4,6 +4,7 @@ import (
"math/big"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/mock"
)
)
...
...
indexer/etl/l1_etl_test.go
View file @
787f6857
...
@@ -10,6 +10,7 @@ import (
...
@@ -10,6 +10,7 @@ import (
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/require"
"github.com/ethereum-optimism/optimism/indexer/bigint"
"github.com/ethereum-optimism/optimism/indexer/config"
"github.com/ethereum-optimism/optimism/indexer/config"
"github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum-optimism/optimism/indexer/node"
"github.com/ethereum-optimism/optimism/indexer/node"
...
@@ -17,7 +18,7 @@ import (
...
@@ -17,7 +18,7 @@ import (
"testing"
"testing"
)
)
func
Test
_L1ETL_
Construction
(
t
*
testing
.
T
)
{
func
Test
L1ETL
Construction
(
t
*
testing
.
T
)
{
etlMetrics
:=
NewMetrics
(
metrics
.
NewRegistry
(),
"l1"
)
etlMetrics
:=
NewMetrics
(
metrics
.
NewRegistry
(),
"l1"
)
type
testSuite
struct
{
type
testSuite
struct
{
...
@@ -39,11 +40,10 @@ func Test_L1ETL_Construction(t *testing.T) {
...
@@ -39,11 +40,10 @@ func Test_L1ETL_Construction(t *testing.T) {
db
:=
database
.
NewMockDB
()
db
:=
database
.
NewMockDB
()
testStart
:=
big
.
NewInt
(
100
)
testStart
:=
big
.
NewInt
(
100
)
db
.
MockBlocks
.
On
(
"L1LatestBlockHeader"
)
.
Return
(
nil
,
nil
)
db
.
MockBlocks
.
On
(
"L1LatestBlockHeader"
)
.
Return
(
nil
,
nil
)
client
.
On
(
"BlockHeaderByNumber"
,
mock
.
MatchedBy
(
client
.
On
(
"BlockHeaderByNumber"
,
mock
.
MatchedBy
(
node
.
BigInt
Matcher
(
100
)))
.
Return
(
bigint
.
Matcher
(
100
)))
.
Return
(
&
types
.
Header
{
&
types
.
Header
{
ParentHash
:
common
.
HexToHash
(
"0x69"
),
ParentHash
:
common
.
HexToHash
(
"0x69"
),
},
nil
)
},
nil
)
...
...
indexer/migrations/20230523_create_schema.sql
View file @
787f6857
CREATE
DOMAIN
UINT256
AS
NUMERIC
DO
$$
CHECK
(
VALUE
>=
0
AND
VALUE
<
2
^
256
and
SCALE
(
VALUE
)
=
0
);
BEGIN
IF
NOT
EXISTS
(
SELECT
1
FROM
pg_type
WHERE
typname
=
'uint256'
)
THEN
CREATE
DOMAIN
UINT256
AS
NUMERIC
CHECK
(
VALUE
>=
0
AND
VALUE
<
2
^
256
and
SCALE
(
VALUE
)
=
0
);
END
IF
;
END
$$
;
/**
/**
* BLOCK DATA
* BLOCK DATA
...
@@ -16,6 +21,8 @@ CREATE TABLE IF NOT EXISTS l1_block_headers (
...
@@ -16,6 +21,8 @@ CREATE TABLE IF NOT EXISTS l1_block_headers (
-- Raw Data
-- Raw Data
rlp_bytes
VARCHAR
NOT
NULL
rlp_bytes
VARCHAR
NOT
NULL
);
);
CREATE
INDEX
IF
NOT
EXISTS
l1_block_headers_timestamp
ON
l1_block_headers
(
timestamp
);
CREATE
INDEX
IF
NOT
EXISTS
l1_block_headers_number
ON
l1_block_headers
(
number
);
CREATE
TABLE
IF
NOT
EXISTS
l2_block_headers
(
CREATE
TABLE
IF
NOT
EXISTS
l2_block_headers
(
-- Searchable fields
-- Searchable fields
...
@@ -27,6 +34,8 @@ CREATE TABLE IF NOT EXISTS l2_block_headers (
...
@@ -27,6 +34,8 @@ CREATE TABLE IF NOT EXISTS l2_block_headers (
-- Raw Data
-- Raw Data
rlp_bytes
VARCHAR
NOT
NULL
rlp_bytes
VARCHAR
NOT
NULL
);
);
CREATE
INDEX
IF
NOT
EXISTS
l2_block_headers_timestamp
ON
l2_block_headers
(
timestamp
);
CREATE
INDEX
IF
NOT
EXISTS
l2_block_headers_number
ON
l2_block_headers
(
number
);
/**
/**
* EVENT DATA
* EVENT DATA
...
@@ -45,6 +54,9 @@ CREATE TABLE IF NOT EXISTS l1_contract_events (
...
@@ -45,6 +54,9 @@ CREATE TABLE IF NOT EXISTS l1_contract_events (
-- Raw Data
-- Raw Data
rlp_bytes
VARCHAR
NOT
NULL
rlp_bytes
VARCHAR
NOT
NULL
);
);
CREATE
INDEX
IF
NOT
EXISTS
l1_contract_events_timestamp
ON
l1_contract_events
(
timestamp
);
CREATE
INDEX
IF
NOT
EXISTS
l1_contract_events_block_hash
ON
l1_contract_events
(
block_hash
);
CREATE
INDEX
IF
NOT
EXISTS
l1_contract_events_event_signature
ON
l1_contract_events
(
event_signature
);
CREATE
TABLE
IF
NOT
EXISTS
l2_contract_events
(
CREATE
TABLE
IF
NOT
EXISTS
l2_contract_events
(
-- Searchable fields
-- Searchable fields
...
@@ -59,6 +71,9 @@ CREATE TABLE IF NOT EXISTS l2_contract_events (
...
@@ -59,6 +71,9 @@ CREATE TABLE IF NOT EXISTS l2_contract_events (
-- Raw Data
-- Raw Data
rlp_bytes
VARCHAR
NOT
NULL
rlp_bytes
VARCHAR
NOT
NULL
);
);
CREATE
INDEX
IF
NOT
EXISTS
l2_contract_events_timestamp
ON
l2_contract_events
(
timestamp
);
CREATE
INDEX
IF
NOT
EXISTS
l2_contract_events_block_hash
ON
l2_contract_events
(
block_hash
);
CREATE
INDEX
IF
NOT
EXISTS
l2_contract_events_event_signature
ON
l2_contract_events
(
event_signature
);
-- Tables that index finalization markers for L2 blocks.
-- Tables that index finalization markers for L2 blocks.
...
@@ -79,6 +94,7 @@ CREATE TABLE IF NOT EXISTS output_proposals (
...
@@ -79,6 +94,7 @@ CREATE TABLE IF NOT EXISTS output_proposals (
output_proposed_guid
VARCHAR
NOT
NULL
UNIQUE
REFERENCES
l1_contract_events
(
guid
)
ON
DELETE
CASCADE
output_proposed_guid
VARCHAR
NOT
NULL
UNIQUE
REFERENCES
l1_contract_events
(
guid
)
ON
DELETE
CASCADE
);
);
/**
/**
* BRIDGING DATA
* BRIDGING DATA
*/
*/
...
@@ -118,6 +134,10 @@ CREATE TABLE IF NOT EXISTS l1_transaction_deposits (
...
@@ -118,6 +134,10 @@ CREATE TABLE IF NOT EXISTS l1_transaction_deposits (
data
VARCHAR
NOT
NULL
,
data
VARCHAR
NOT
NULL
,
timestamp
INTEGER
NOT
NULL
CHECK
(
timestamp
>
0
)
timestamp
INTEGER
NOT
NULL
CHECK
(
timestamp
>
0
)
);
);
CREATE
INDEX
IF
NOT
EXISTS
l1_transaction_deposits_timestamp
ON
l1_transaction_deposits
(
timestamp
);
CREATE
INDEX
IF
NOT
EXISTS
l1_transaction_deposits_initiated_l1_event_guid
ON
l1_transaction_deposits
(
initiated_l1_event_guid
);
CREATE
INDEX
IF
NOT
EXISTS
l1_transaction_deposits_from_address
ON
l1_transaction_deposits
(
from_address
);
CREATE
TABLE
IF
NOT
EXISTS
l2_transaction_withdrawals
(
CREATE
TABLE
IF
NOT
EXISTS
l2_transaction_withdrawals
(
withdrawal_hash
VARCHAR
PRIMARY
KEY
,
withdrawal_hash
VARCHAR
PRIMARY
KEY
,
nonce
UINT256
NOT
NULL
UNIQUE
,
nonce
UINT256
NOT
NULL
UNIQUE
,
...
@@ -136,6 +156,9 @@ CREATE TABLE IF NOT EXISTS l2_transaction_withdrawals (
...
@@ -136,6 +156,9 @@ CREATE TABLE IF NOT EXISTS l2_transaction_withdrawals (
data
VARCHAR
NOT
NULL
,
data
VARCHAR
NOT
NULL
,
timestamp
INTEGER
NOT
NULL
CHECK
(
timestamp
>
0
)
timestamp
INTEGER
NOT
NULL
CHECK
(
timestamp
>
0
)
);
);
CREATE
INDEX
IF
NOT
EXISTS
l2_transaction_withdrawals_timestamp
ON
l2_transaction_withdrawals
(
timestamp
);
CREATE
INDEX
IF
NOT
EXISTS
l2_transaction_withdrawals_initiated_l2_event_guid
ON
l2_transaction_withdrawals
(
initiated_l2_event_guid
);
CREATE
INDEX
IF
NOT
EXISTS
l2_transaction_withdrawals_from_address
ON
l2_transaction_withdrawals
(
from_address
);
-- CrossDomainMessenger
-- CrossDomainMessenger
CREATE
TABLE
IF
NOT
EXISTS
l1_bridge_messages
(
CREATE
TABLE
IF
NOT
EXISTS
l1_bridge_messages
(
...
@@ -154,6 +177,10 @@ CREATE TABLE IF NOT EXISTS l1_bridge_messages(
...
@@ -154,6 +177,10 @@ CREATE TABLE IF NOT EXISTS l1_bridge_messages(
data
VARCHAR
NOT
NULL
,
data
VARCHAR
NOT
NULL
,
timestamp
INTEGER
NOT
NULL
CHECK
(
timestamp
>
0
)
timestamp
INTEGER
NOT
NULL
CHECK
(
timestamp
>
0
)
);
);
CREATE
INDEX
IF
NOT
EXISTS
l1_bridge_messages_timestamp
ON
l1_bridge_messages
(
timestamp
);
CREATE
INDEX
IF
NOT
EXISTS
l1_bridge_messages_transaction_source_hash
ON
l1_bridge_messages
(
transaction_source_hash
);
CREATE
INDEX
IF
NOT
EXISTS
l1_bridge_messages_from_address
ON
l1_bridge_messages
(
from_address
);
CREATE
TABLE
IF
NOT
EXISTS
l2_bridge_messages
(
CREATE
TABLE
IF
NOT
EXISTS
l2_bridge_messages
(
message_hash
VARCHAR
PRIMARY
KEY
,
message_hash
VARCHAR
PRIMARY
KEY
,
nonce
UINT256
NOT
NULL
UNIQUE
,
nonce
UINT256
NOT
NULL
UNIQUE
,
...
@@ -170,6 +197,9 @@ CREATE TABLE IF NOT EXISTS l2_bridge_messages(
...
@@ -170,6 +197,9 @@ CREATE TABLE IF NOT EXISTS l2_bridge_messages(
data
VARCHAR
NOT
NULL
,
data
VARCHAR
NOT
NULL
,
timestamp
INTEGER
NOT
NULL
CHECK
(
timestamp
>
0
)
timestamp
INTEGER
NOT
NULL
CHECK
(
timestamp
>
0
)
);
);
CREATE
INDEX
IF
NOT
EXISTS
l2_bridge_messages_timestamp
ON
l2_bridge_messages
(
timestamp
);
CREATE
INDEX
IF
NOT
EXISTS
l2_bridge_messages_transaction_withdrawal_hash
ON
l2_bridge_messages
(
transaction_withdrawal_hash
);
CREATE
INDEX
IF
NOT
EXISTS
l2_bridge_messages_from_address
ON
l2_bridge_messages
(
from_address
);
-- StandardBridge
-- StandardBridge
CREATE
TABLE
IF
NOT
EXISTS
l1_bridge_deposits
(
CREATE
TABLE
IF
NOT
EXISTS
l1_bridge_deposits
(
...
@@ -185,6 +215,10 @@ CREATE TABLE IF NOT EXISTS l1_bridge_deposits (
...
@@ -185,6 +215,10 @@ CREATE TABLE IF NOT EXISTS l1_bridge_deposits (
data
VARCHAR
NOT
NULL
,
data
VARCHAR
NOT
NULL
,
timestamp
INTEGER
NOT
NULL
CHECK
(
timestamp
>
0
)
timestamp
INTEGER
NOT
NULL
CHECK
(
timestamp
>
0
)
);
);
CREATE
INDEX
IF
NOT
EXISTS
l1_bridge_deposits_timestamp
ON
l1_bridge_deposits
(
timestamp
);
CREATE
INDEX
IF
NOT
EXISTS
l1_bridge_deposits_cross_domain_message_hash
ON
l1_bridge_deposits
(
cross_domain_message_hash
);
CREATE
INDEX
IF
NOT
EXISTS
l1_bridge_deposits_from_address
ON
l1_bridge_deposits
(
from_address
);
CREATE
TABLE
IF
NOT
EXISTS
l2_bridge_withdrawals
(
CREATE
TABLE
IF
NOT
EXISTS
l2_bridge_withdrawals
(
transaction_withdrawal_hash
VARCHAR
PRIMARY
KEY
REFERENCES
l2_transaction_withdrawals
(
withdrawal_hash
)
ON
DELETE
CASCADE
,
transaction_withdrawal_hash
VARCHAR
PRIMARY
KEY
REFERENCES
l2_transaction_withdrawals
(
withdrawal_hash
)
ON
DELETE
CASCADE
,
cross_domain_message_hash
VARCHAR
NOT
NULL
UNIQUE
REFERENCES
l2_bridge_messages
(
message_hash
)
ON
DELETE
CASCADE
,
cross_domain_message_hash
VARCHAR
NOT
NULL
UNIQUE
REFERENCES
l2_bridge_messages
(
message_hash
)
ON
DELETE
CASCADE
,
...
@@ -198,3 +232,6 @@ CREATE TABLE IF NOT EXISTS l2_bridge_withdrawals (
...
@@ -198,3 +232,6 @@ CREATE TABLE IF NOT EXISTS l2_bridge_withdrawals (
data
VARCHAR
NOT
NULL
,
data
VARCHAR
NOT
NULL
,
timestamp
INTEGER
NOT
NULL
CHECK
(
timestamp
>
0
)
timestamp
INTEGER
NOT
NULL
CHECK
(
timestamp
>
0
)
);
);
CREATE
INDEX
IF
NOT
EXISTS
l2_bridge_withdrawals_timestamp
ON
l2_bridge_withdrawals
(
timestamp
);
CREATE
INDEX
IF
NOT
EXISTS
l2_bridge_withdrawals_cross_domain_message_hash
ON
l2_bridge_withdrawals
(
cross_domain_message_hash
);
CREATE
INDEX
IF
NOT
EXISTS
l2_bridge_withdrawals_from_address
ON
l2_bridge_withdrawals
(
from_address
);
indexer/node/bigint.go
deleted
100644 → 0
View file @
31f9a978
package
node
import
"math/big"
var
bigZero
=
big
.
NewInt
(
0
)
var
bigOne
=
big
.
NewInt
(
1
)
// returns a new big.Int for `end` to which `end - start` <= size.
// @note (start, end) is an inclusive range
func
clampBigInt
(
start
,
end
*
big
.
Int
,
size
uint64
)
*
big
.
Int
{
temp
:=
new
(
big
.
Int
)
count
:=
temp
.
Sub
(
end
,
start
)
.
Uint64
()
+
1
if
count
<=
size
{
return
end
}
// we re-use the allocated temp as the new end
temp
.
Add
(
start
,
big
.
NewInt
(
int64
(
size
-
1
)))
return
temp
}
// returns an inner comparison function result for a big.Int
func
BigIntMatcher
(
num
int64
)
func
(
*
big
.
Int
)
bool
{
return
func
(
bi
*
big
.
Int
)
bool
{
return
bi
.
Int64
()
==
num
}
}
indexer/node/bigint_test.go
deleted
100644 → 0
View file @
31f9a978
package
node
import
(
"math/big"
"testing"
"github.com/stretchr/testify/assert"
)
func
TestClampBigInt
(
t
*
testing
.
T
)
{
assert
.
True
(
t
,
true
)
start
:=
big
.
NewInt
(
1
)
end
:=
big
.
NewInt
(
10
)
// When the (start, end) bounds are within range
// the same end pointer should be returned
// larger range
result
:=
clampBigInt
(
start
,
end
,
20
)
assert
.
True
(
t
,
end
==
result
)
// exact range
result
=
clampBigInt
(
start
,
end
,
10
)
assert
.
True
(
t
,
end
==
result
)
// smaller range
result
=
clampBigInt
(
start
,
end
,
5
)
assert
.
False
(
t
,
end
==
result
)
assert
.
Equal
(
t
,
uint64
(
5
),
result
.
Uint64
())
}
indexer/node/header_traversal.go
View file @
787f6857
...
@@ -5,6 +5,7 @@ import (
...
@@ -5,6 +5,7 @@ import (
"fmt"
"fmt"
"math/big"
"math/big"
"github.com/ethereum-optimism/optimism/indexer/bigint"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/types"
)
)
...
@@ -55,12 +56,12 @@ func (f *HeaderTraversal) NextFinalizedHeaders(maxSize uint64) ([]types.Header,
...
@@ -55,12 +56,12 @@ func (f *HeaderTraversal) NextFinalizedHeaders(maxSize uint64) ([]types.Header,
}
}
}
}
nextHeight
:=
bigZero
nextHeight
:=
big
int
.
Zero
if
f
.
lastHeader
!=
nil
{
if
f
.
lastHeader
!=
nil
{
nextHeight
=
new
(
big
.
Int
)
.
Add
(
f
.
lastHeader
.
Number
,
bigOne
)
nextHeight
=
new
(
big
.
Int
)
.
Add
(
f
.
lastHeader
.
Number
,
big
int
.
One
)
}
}
endHeight
=
clampBigInt
(
nextHeight
,
endHeight
,
maxSize
)
endHeight
=
bigint
.
Clamp
(
nextHeight
,
endHeight
,
maxSize
)
headers
,
err
:=
f
.
ethClient
.
BlockHeadersByRange
(
nextHeight
,
endHeight
)
headers
,
err
:=
f
.
ethClient
.
BlockHeadersByRange
(
nextHeight
,
endHeight
)
if
err
!=
nil
{
if
err
!=
nil
{
return
nil
,
fmt
.
Errorf
(
"error querying blocks by range: %w"
,
err
)
return
nil
,
fmt
.
Errorf
(
"error querying blocks by range: %w"
,
err
)
...
...
indexer/node/header_traversal_test.go
View file @
787f6857
...
@@ -4,6 +4,7 @@ import (
...
@@ -4,6 +4,7 @@ import (
"math/big"
"math/big"
"testing"
"testing"
"github.com/ethereum-optimism/optimism/indexer/bigint"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/require"
...
@@ -37,7 +38,7 @@ func TestHeaderTraversalNextFinalizedHeadersNoOp(t *testing.T) {
...
@@ -37,7 +38,7 @@ func TestHeaderTraversalNextFinalizedHeadersNoOp(t *testing.T) {
// start from block 10 as the latest fetched block
// start from block 10 as the latest fetched block
lastHeader
:=
&
types
.
Header
{
Number
:
big
.
NewInt
(
10
)}
lastHeader
:=
&
types
.
Header
{
Number
:
big
.
NewInt
(
10
)}
headerTraversal
:=
NewHeaderTraversal
(
client
,
lastHeader
,
bigZero
)
headerTraversal
:=
NewHeaderTraversal
(
client
,
lastHeader
,
big
int
.
Zero
)
// no new headers when matched with head
// no new headers when matched with head
client
.
On
(
"BlockHeaderByNumber"
,
(
*
big
.
Int
)(
nil
))
.
Return
(
lastHeader
,
nil
)
client
.
On
(
"BlockHeaderByNumber"
,
(
*
big
.
Int
)(
nil
))
.
Return
(
lastHeader
,
nil
)
...
@@ -50,12 +51,12 @@ func TestHeaderTraversalNextFinalizedHeadersCursored(t *testing.T) {
...
@@ -50,12 +51,12 @@ func TestHeaderTraversalNextFinalizedHeadersCursored(t *testing.T) {
client
:=
new
(
MockEthClient
)
client
:=
new
(
MockEthClient
)
// start from genesis
// start from genesis
headerTraversal
:=
NewHeaderTraversal
(
client
,
nil
,
bigZero
)
headerTraversal
:=
NewHeaderTraversal
(
client
,
nil
,
big
int
.
Zero
)
// blocks [0..4]
// blocks [0..4]
headers
:=
makeHeaders
(
5
,
nil
)
headers
:=
makeHeaders
(
5
,
nil
)
client
.
On
(
"BlockHeaderByNumber"
,
(
*
big
.
Int
)(
nil
))
.
Return
(
&
headers
[
4
],
nil
)
.
Times
(
1
)
// Times so that we can override next
client
.
On
(
"BlockHeaderByNumber"
,
(
*
big
.
Int
)(
nil
))
.
Return
(
&
headers
[
4
],
nil
)
.
Times
(
1
)
// Times so that we can override next
client
.
On
(
"BlockHeadersByRange"
,
mock
.
MatchedBy
(
BigIntMatcher
(
0
)),
mock
.
MatchedBy
(
BigInt
Matcher
(
4
)))
.
Return
(
headers
,
nil
)
client
.
On
(
"BlockHeadersByRange"
,
mock
.
MatchedBy
(
bigint
.
Matcher
(
0
)),
mock
.
MatchedBy
(
bigint
.
Matcher
(
4
)))
.
Return
(
headers
,
nil
)
headers
,
err
:=
headerTraversal
.
NextFinalizedHeaders
(
5
)
headers
,
err
:=
headerTraversal
.
NextFinalizedHeaders
(
5
)
require
.
NoError
(
t
,
err
)
require
.
NoError
(
t
,
err
)
require
.
Len
(
t
,
headers
,
5
)
require
.
Len
(
t
,
headers
,
5
)
...
@@ -63,7 +64,7 @@ func TestHeaderTraversalNextFinalizedHeadersCursored(t *testing.T) {
...
@@ -63,7 +64,7 @@ func TestHeaderTraversalNextFinalizedHeadersCursored(t *testing.T) {
// blocks [5..9]
// blocks [5..9]
headers
=
makeHeaders
(
5
,
&
headers
[
len
(
headers
)
-
1
])
headers
=
makeHeaders
(
5
,
&
headers
[
len
(
headers
)
-
1
])
client
.
On
(
"BlockHeaderByNumber"
,
(
*
big
.
Int
)(
nil
))
.
Return
(
&
headers
[
4
],
nil
)
client
.
On
(
"BlockHeaderByNumber"
,
(
*
big
.
Int
)(
nil
))
.
Return
(
&
headers
[
4
],
nil
)
client
.
On
(
"BlockHeadersByRange"
,
mock
.
MatchedBy
(
BigIntMatcher
(
5
)),
mock
.
MatchedBy
(
BigInt
Matcher
(
9
)))
.
Return
(
headers
,
nil
)
client
.
On
(
"BlockHeadersByRange"
,
mock
.
MatchedBy
(
bigint
.
Matcher
(
5
)),
mock
.
MatchedBy
(
bigint
.
Matcher
(
9
)))
.
Return
(
headers
,
nil
)
headers
,
err
=
headerTraversal
.
NextFinalizedHeaders
(
5
)
headers
,
err
=
headerTraversal
.
NextFinalizedHeaders
(
5
)
require
.
NoError
(
t
,
err
)
require
.
NoError
(
t
,
err
)
require
.
Len
(
t
,
headers
,
5
)
require
.
Len
(
t
,
headers
,
5
)
...
@@ -73,21 +74,21 @@ func TestHeaderTraversalNextFinalizedHeadersMaxSize(t *testing.T) {
...
@@ -73,21 +74,21 @@ func TestHeaderTraversalNextFinalizedHeadersMaxSize(t *testing.T) {
client
:=
new
(
MockEthClient
)
client
:=
new
(
MockEthClient
)
// start from genesis
// start from genesis
headerTraversal
:=
NewHeaderTraversal
(
client
,
nil
,
bigZero
)
headerTraversal
:=
NewHeaderTraversal
(
client
,
nil
,
big
int
.
Zero
)
// 100 "available" headers
// 100 "available" headers
client
.
On
(
"BlockHeaderByNumber"
,
(
*
big
.
Int
)(
nil
))
.
Return
(
&
types
.
Header
{
Number
:
big
.
NewInt
(
100
)},
nil
)
client
.
On
(
"BlockHeaderByNumber"
,
(
*
big
.
Int
)(
nil
))
.
Return
(
&
types
.
Header
{
Number
:
big
.
NewInt
(
100
)},
nil
)
// clamped by the supplied size
// clamped by the supplied size
headers
:=
makeHeaders
(
5
,
nil
)
headers
:=
makeHeaders
(
5
,
nil
)
client
.
On
(
"BlockHeadersByRange"
,
mock
.
MatchedBy
(
BigIntMatcher
(
0
)),
mock
.
MatchedBy
(
BigInt
Matcher
(
4
)))
.
Return
(
headers
,
nil
)
client
.
On
(
"BlockHeadersByRange"
,
mock
.
MatchedBy
(
bigint
.
Matcher
(
0
)),
mock
.
MatchedBy
(
bigint
.
Matcher
(
4
)))
.
Return
(
headers
,
nil
)
headers
,
err
:=
headerTraversal
.
NextFinalizedHeaders
(
5
)
headers
,
err
:=
headerTraversal
.
NextFinalizedHeaders
(
5
)
require
.
NoError
(
t
,
err
)
require
.
NoError
(
t
,
err
)
require
.
Len
(
t
,
headers
,
5
)
require
.
Len
(
t
,
headers
,
5
)
// clamped by the supplied size. FinalizedHeight == 100
// clamped by the supplied size. FinalizedHeight == 100
headers
=
makeHeaders
(
10
,
&
headers
[
len
(
headers
)
-
1
])
headers
=
makeHeaders
(
10
,
&
headers
[
len
(
headers
)
-
1
])
client
.
On
(
"BlockHeadersByRange"
,
mock
.
MatchedBy
(
BigIntMatcher
(
5
)),
mock
.
MatchedBy
(
BigInt
Matcher
(
14
)))
.
Return
(
headers
,
nil
)
client
.
On
(
"BlockHeadersByRange"
,
mock
.
MatchedBy
(
bigint
.
Matcher
(
5
)),
mock
.
MatchedBy
(
bigint
.
Matcher
(
14
)))
.
Return
(
headers
,
nil
)
headers
,
err
=
headerTraversal
.
NextFinalizedHeaders
(
10
)
headers
,
err
=
headerTraversal
.
NextFinalizedHeaders
(
10
)
require
.
NoError
(
t
,
err
)
require
.
NoError
(
t
,
err
)
require
.
Len
(
t
,
headers
,
10
)
require
.
Len
(
t
,
headers
,
10
)
...
@@ -97,12 +98,12 @@ func TestHeaderTraversalMismatchedProviderStateError(t *testing.T) {
...
@@ -97,12 +98,12 @@ func TestHeaderTraversalMismatchedProviderStateError(t *testing.T) {
client
:=
new
(
MockEthClient
)
client
:=
new
(
MockEthClient
)
// start from genesis
// start from genesis
headerTraversal
:=
NewHeaderTraversal
(
client
,
nil
,
bigZero
)
headerTraversal
:=
NewHeaderTraversal
(
client
,
nil
,
big
int
.
Zero
)
// blocks [0..4]
// blocks [0..4]
headers
:=
makeHeaders
(
5
,
nil
)
headers
:=
makeHeaders
(
5
,
nil
)
client
.
On
(
"BlockHeaderByNumber"
,
(
*
big
.
Int
)(
nil
))
.
Return
(
&
headers
[
4
],
nil
)
.
Times
(
1
)
// Times so that we can override next
client
.
On
(
"BlockHeaderByNumber"
,
(
*
big
.
Int
)(
nil
))
.
Return
(
&
headers
[
4
],
nil
)
.
Times
(
1
)
// Times so that we can override next
client
.
On
(
"BlockHeadersByRange"
,
mock
.
MatchedBy
(
BigIntMatcher
(
0
)),
mock
.
MatchedBy
(
BigInt
Matcher
(
4
)))
.
Return
(
headers
,
nil
)
client
.
On
(
"BlockHeadersByRange"
,
mock
.
MatchedBy
(
bigint
.
Matcher
(
0
)),
mock
.
MatchedBy
(
bigint
.
Matcher
(
4
)))
.
Return
(
headers
,
nil
)
headers
,
err
:=
headerTraversal
.
NextFinalizedHeaders
(
5
)
headers
,
err
:=
headerTraversal
.
NextFinalizedHeaders
(
5
)
require
.
NoError
(
t
,
err
)
require
.
NoError
(
t
,
err
)
require
.
Len
(
t
,
headers
,
5
)
require
.
Len
(
t
,
headers
,
5
)
...
@@ -110,7 +111,7 @@ func TestHeaderTraversalMismatchedProviderStateError(t *testing.T) {
...
@@ -110,7 +111,7 @@ func TestHeaderTraversalMismatchedProviderStateError(t *testing.T) {
// blocks [5..9]. Next batch is not chained correctly (starts again from genesis)
// blocks [5..9]. Next batch is not chained correctly (starts again from genesis)
headers
=
makeHeaders
(
5
,
nil
)
headers
=
makeHeaders
(
5
,
nil
)
client
.
On
(
"BlockHeaderByNumber"
,
(
*
big
.
Int
)(
nil
))
.
Return
(
&
types
.
Header
{
Number
:
big
.
NewInt
(
9
)},
nil
)
client
.
On
(
"BlockHeaderByNumber"
,
(
*
big
.
Int
)(
nil
))
.
Return
(
&
types
.
Header
{
Number
:
big
.
NewInt
(
9
)},
nil
)
client
.
On
(
"BlockHeadersByRange"
,
mock
.
MatchedBy
(
BigIntMatcher
(
5
)),
mock
.
MatchedBy
(
BigInt
Matcher
(
9
)))
.
Return
(
headers
,
nil
)
client
.
On
(
"BlockHeadersByRange"
,
mock
.
MatchedBy
(
bigint
.
Matcher
(
5
)),
mock
.
MatchedBy
(
bigint
.
Matcher
(
9
)))
.
Return
(
headers
,
nil
)
headers
,
err
=
headerTraversal
.
NextFinalizedHeaders
(
5
)
headers
,
err
=
headerTraversal
.
NextFinalizedHeaders
(
5
)
require
.
Nil
(
t
,
headers
)
require
.
Nil
(
t
,
headers
)
require
.
Equal
(
t
,
ErrHeaderTraversalAndProviderMismatchedState
,
err
)
require
.
Equal
(
t
,
ErrHeaderTraversalAndProviderMismatchedState
,
err
)
...
...
indexer/node/mocks.go
View file @
787f6857
...
@@ -6,6 +6,7 @@ import (
...
@@ -6,6 +6,7 @@ import (
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/types"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/mock"
)
)
...
...
indexer/processors/bridge.go
View file @
787f6857
...
@@ -5,6 +5,7 @@ import (
...
@@ -5,6 +5,7 @@ import (
"errors"
"errors"
"math/big"
"math/big"
"github.com/ethereum-optimism/optimism/indexer/bigint"
"github.com/ethereum-optimism/optimism/indexer/config"
"github.com/ethereum-optimism/optimism/indexer/config"
"github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum-optimism/optimism/indexer/etl"
"github.com/ethereum-optimism/optimism/indexer/etl"
...
@@ -20,8 +21,6 @@ type BridgeProcessor struct {
...
@@ -20,8 +21,6 @@ type BridgeProcessor struct {
l1Etl
*
etl
.
L1ETL
l1Etl
*
etl
.
L1ETL
chainConfig
config
.
ChainConfig
chainConfig
config
.
ChainConfig
// NOTE: We'll need this processor to handle for reorgs events.
LatestL1Header
*
types
.
Header
LatestL1Header
*
types
.
Header
LatestL2Header
*
types
.
Header
LatestL2Header
*
types
.
Header
}
}
...
@@ -42,7 +41,7 @@ func NewBridgeProcessor(log log.Logger, db *database.DB, l1Etl *etl.L1ETL, chain
...
@@ -42,7 +41,7 @@ func NewBridgeProcessor(log log.Logger, db *database.DB, l1Etl *etl.L1ETL, chain
if
latestL1Header
==
nil
&&
latestL2Header
==
nil
{
if
latestL1Header
==
nil
&&
latestL2Header
==
nil
{
log
.
Info
(
"no indexed state, starting from rollup genesis"
)
log
.
Info
(
"no indexed state, starting from rollup genesis"
)
}
else
{
}
else
{
l1Height
,
l2Height
:=
big
.
NewInt
(
0
),
big
.
NewInt
(
0
)
l1Height
,
l2Height
:=
big
int
.
Zero
,
bigint
.
Zero
if
latestL1Header
!=
nil
{
if
latestL1Header
!=
nil
{
l1Height
=
latestL1Header
.
Number
l1Height
=
latestL1Header
.
Number
l1Header
=
latestL1Header
.
RLPHeader
.
Header
()
l1Header
=
latestL1Header
.
RLPHeader
.
Header
()
...
@@ -51,7 +50,7 @@ func NewBridgeProcessor(log log.Logger, db *database.DB, l1Etl *etl.L1ETL, chain
...
@@ -51,7 +50,7 @@ func NewBridgeProcessor(log log.Logger, db *database.DB, l1Etl *etl.L1ETL, chain
l2Height
=
latestL2Header
.
Number
l2Height
=
latestL2Header
.
Number
l2Header
=
latestL2Header
.
RLPHeader
.
Header
()
l2Header
=
latestL2Header
.
RLPHeader
.
Header
()
}
}
log
.
Info
(
"detected latest indexed state"
,
"l1_block_number"
,
l1Height
,
"l2_block_number"
,
l2Height
)
log
.
Info
(
"detected latest indexed
bridge
state"
,
"l1_block_number"
,
l1Height
,
"l2_block_number"
,
l2Height
)
}
}
return
&
BridgeProcessor
{
log
,
db
,
l1Etl
,
chainConfig
,
l1Header
,
l2Header
},
nil
return
&
BridgeProcessor
{
log
,
db
,
l1Etl
,
chainConfig
,
l1Header
,
l2Header
},
nil
...
@@ -69,6 +68,9 @@ func (b *BridgeProcessor) Start(ctx context.Context) error {
...
@@ -69,6 +68,9 @@ func (b *BridgeProcessor) Start(ctx context.Context) error {
// serves as this shared marker.
// serves as this shared marker.
l1EtlUpdates
:=
b
.
l1Etl
.
Notify
()
l1EtlUpdates
:=
b
.
l1Etl
.
Notify
()
startup
:=
make
(
chan
interface
{},
1
)
startup
<-
nil
b
.
log
.
Info
(
"starting bridge processor..."
)
b
.
log
.
Info
(
"starting bridge processor..."
)
for
{
for
{
select
{
select
{
...
@@ -76,81 +78,107 @@ func (b *BridgeProcessor) Start(ctx context.Context) error {
...
@@ -76,81 +78,107 @@ func (b *BridgeProcessor) Start(ctx context.Context) error {
b
.
log
.
Info
(
"stopping bridge processor"
)
b
.
log
.
Info
(
"stopping bridge processor"
)
return
nil
return
nil
// Fire off independently on startup to check for any
// new data or if we've indexed new L1 data.
case
<-
startup
:
case
<-
l1EtlUpdates
:
case
<-
l1EtlUpdates
:
latestEpoch
,
err
:=
b
.
db
.
Blocks
.
LatestEpoch
()
}
if
err
!=
nil
{
return
err
}
else
if
latestEpoch
==
nil
{
if
b
.
LatestL1Header
!=
nil
||
b
.
LatestL2Header
!=
nil
{
// Once we have some indexed state `latestEpoch` can never return nil
b
.
log
.
Error
(
"bridge events indexed, but no indexed epoch returned"
,
"latest_bridge_l1_block_number"
,
b
.
LatestL1Header
.
Number
)
return
errors
.
New
(
"bridge events indexed, but no indexed epoch returned"
)
}
b
.
log
.
Warn
(
"no indexed epochs available. waiting..."
)
latestEpoch
,
err
:=
b
.
db
.
Blocks
.
LatestEpoch
()
continue
if
err
!=
nil
{
return
err
}
else
if
latestEpoch
==
nil
{
if
b
.
LatestL1Header
!=
nil
||
b
.
LatestL2Header
!=
nil
{
// Once we have some indexed state `latestEpoch` can never return nil
b
.
log
.
Error
(
"bridge events indexed, but no indexed epoch returned"
,
"latest_bridge_l1_block_number"
,
b
.
LatestL1Header
.
Number
)
return
errors
.
New
(
"bridge events indexed, but no indexed epoch returned"
)
}
}
// Integrity Checks
b
.
log
.
Warn
(
"no indexed epochs available. waiting..."
)
continue
}
if
b
.
LatestL1Header
!=
nil
&&
latestEpoch
.
L1BlockHeader
.
Hash
==
b
.
LatestL1Header
.
Hash
()
{
// Integrity Checks
b
.
log
.
Warn
(
"all available epochs indexed"
,
"latest_bridge_l1_block_number"
,
b
.
LatestL1Header
.
Number
)
continue
}
if
b
.
LatestL1Header
!=
nil
&&
latestEpoch
.
L1BlockHeader
.
Number
.
Cmp
(
b
.
LatestL1Header
.
Number
)
<=
0
{
b
.
log
.
Error
(
"non-increasing l1 block height observed"
,
"latest_bridge_l1_block_number"
,
b
.
LatestL1Header
.
Number
,
"latest_epoch_number"
,
latestEpoch
.
L1BlockHeader
.
Number
)
return
errors
.
New
(
"non-increasing l1 block heght observed"
)
}
if
b
.
LatestL2Header
!=
nil
&&
latestEpoch
.
L2BlockHeader
.
Number
.
Cmp
(
b
.
LatestL2Header
.
Number
)
<=
0
{
b
.
log
.
Error
(
"non-increasing l2 block height observed"
,
"latest_bridge_l2_block_number"
,
b
.
LatestL2Header
.
Number
,
"latest_epoch_number"
,
latestEpoch
.
L2BlockHeader
.
Number
)
return
errors
.
New
(
"non-increasing l2 block heght observed"
)
}
// Process Bridge Events
if
b
.
LatestL1Header
!=
nil
&&
latestEpoch
.
L1BlockHeader
.
Hash
==
b
.
LatestL1Header
.
Hash
()
{
b
.
log
.
Warn
(
"all available epochs indexed"
,
"latest_bridge_l1_block_number"
,
b
.
LatestL1Header
.
Number
)
continue
}
if
b
.
LatestL1Header
!=
nil
&&
latestEpoch
.
L1BlockHeader
.
Number
.
Cmp
(
b
.
LatestL1Header
.
Number
)
<=
0
{
b
.
log
.
Error
(
"decreasing l1 block height observed"
,
"latest_bridge_l1_block_number"
,
b
.
LatestL1Header
.
Number
,
"latest_epoch_number"
,
latestEpoch
.
L1BlockHeader
.
Number
)
return
errors
.
New
(
"decreasing l1 block heght observed"
)
}
if
b
.
LatestL2Header
!=
nil
&&
latestEpoch
.
L2BlockHeader
.
Number
.
Cmp
(
b
.
LatestL2Header
.
Number
)
<=
0
{
b
.
log
.
Error
(
"decreasing l2 block height observed"
,
"latest_bridge_l2_block_number"
,
b
.
LatestL2Header
.
Number
,
"latest_epoch_number"
,
latestEpoch
.
L2BlockHeader
.
Number
)
return
errors
.
New
(
"decreasing l2 block heght observed"
)
}
toL1Height
,
toL2Height
:=
latestEpoch
.
L1BlockHeader
.
Number
,
latestEpoch
.
L2BlockHeader
.
Number
// Process Bridge Events
fromL1Height
,
fromL2Height
:=
big
.
NewInt
(
0
),
big
.
NewInt
(
0
)
if
b
.
LatestL1Header
!=
nil
{
fromL1Height
=
new
(
big
.
Int
)
.
Add
(
b
.
LatestL1Header
.
Number
,
big
.
NewInt
(
1
))
}
if
b
.
LatestL2Header
!=
nil
{
fromL2Height
=
new
(
big
.
Int
)
.
Add
(
b
.
LatestL2Header
.
Number
,
big
.
NewInt
(
1
))
}
batchLog
:=
b
.
log
.
New
(
"epoch_start_number"
,
fromL1Height
,
"epoch_end_number"
,
toL1Height
)
toL1Height
,
toL2Height
:=
latestEpoch
.
L1BlockHeader
.
Number
,
latestEpoch
.
L2BlockHeader
.
Number
batchLog
.
Info
(
"scanning for new bridge events"
)
fromL1Height
,
fromL2Height
:=
big
.
NewInt
(
int64
(
b
.
chainConfig
.
L1StartingHeight
)),
bigint
.
Zero
err
=
b
.
db
.
Transaction
(
func
(
tx
*
database
.
DB
)
error
{
if
b
.
LatestL1Header
!=
nil
{
l1BridgeLog
:=
b
.
log
.
New
(
"from_l1_block_number"
,
fromL1Height
,
"to_l1_block_number"
,
toL1Height
)
fromL1Height
=
new
(
big
.
Int
)
.
Add
(
b
.
LatestL1Header
.
Number
,
bigint
.
One
)
l2BridgeLog
:=
b
.
log
.
New
(
"from_l2_block_number"
,
fromL2Height
,
"to_l2_block_number"
,
toL2Height
)
}
if
b
.
LatestL2Header
!=
nil
{
fromL2Height
=
new
(
big
.
Int
)
.
Add
(
b
.
LatestL2Header
.
Number
,
bigint
.
One
)
}
// First, find all possible initiated bridge events
batchLog
:=
b
.
log
.
New
(
"epoch_start_number"
,
fromL1Height
,
"epoch_end_number"
,
toL1Height
)
if
err
:=
bridge
.
L1ProcessInitiatedBridgeEvents
(
l1BridgeLog
,
tx
,
b
.
chainConfig
,
fromL1Height
,
toL1Height
);
err
!=
nil
{
batchLog
.
Info
(
"unobserved epochs"
)
err
=
b
.
db
.
Transaction
(
func
(
tx
*
database
.
DB
)
error
{
l1BridgeLog
:=
b
.
log
.
New
(
"bridge"
,
"l1"
)
l2BridgeLog
:=
b
.
log
.
New
(
"bridge"
,
"l2"
)
// In the event where we have a large number of un-observed blocks, group the block range
// on the order of 10k blocks at a time. If this turns out to be a bottleneck, we can
// parallelize these operations for significant improvements as well
l1BlockGroups
:=
bigint
.
Grouped
(
fromL1Height
,
toL1Height
,
10
_000
)
l2BlockGroups
:=
bigint
.
Grouped
(
fromL2Height
,
toL2Height
,
10
_000
)
// First, find all possible initiated bridge events
for
_
,
group
:=
range
l1BlockGroups
{
log
:=
l1BridgeLog
.
New
(
"from_block_number"
,
group
.
Start
,
"to_block_number"
,
group
.
End
)
log
.
Info
(
"scanning for initiated bridge events"
)
if
err
:=
bridge
.
L1ProcessInitiatedBridgeEvents
(
log
,
tx
,
b
.
chainConfig
,
group
.
Start
,
group
.
End
);
err
!=
nil
{
return
err
return
err
}
}
if
err
:=
bridge
.
L2ProcessInitiatedBridgeEvents
(
l2BridgeLog
,
tx
,
fromL2Height
,
toL2Height
);
err
!=
nil
{
}
for
_
,
group
:=
range
l2BlockGroups
{
log
:=
l2BridgeLog
.
New
(
"from_block_number"
,
group
.
Start
,
"to_block_number"
,
group
.
End
)
log
.
Info
(
"scanning for initiated bridge events"
)
if
err
:=
bridge
.
L2ProcessInitiatedBridgeEvents
(
log
,
tx
,
group
.
Start
,
group
.
End
);
err
!=
nil
{
return
err
return
err
}
}
}
// Now that all initiated events have been indexed, it is ensured that all finalization can find their counterpart.
// Now all finalization events can find their counterpart.
if
err
:=
bridge
.
L1ProcessFinalizedBridgeEvents
(
l1BridgeLog
,
tx
,
b
.
chainConfig
,
fromL1Height
,
toL1Height
);
err
!=
nil
{
for
_
,
group
:=
range
l1BlockGroups
{
log
:=
l1BridgeLog
.
New
(
"from_block_number"
,
group
.
Start
,
"to_block_number"
,
group
.
End
)
log
.
Info
(
"scanning for finalized bridge events"
)
if
err
:=
bridge
.
L1ProcessFinalizedBridgeEvents
(
log
,
tx
,
b
.
chainConfig
,
group
.
Start
,
group
.
End
);
err
!=
nil
{
return
err
return
err
}
}
if
err
:=
bridge
.
L2ProcessFinalizedBridgeEvents
(
l2BridgeLog
,
tx
,
fromL2Height
,
toL2Height
);
err
!=
nil
{
}
for
_
,
group
:=
range
l2BlockGroups
{
log
:=
l2BridgeLog
.
New
(
"from_block_number"
,
group
.
Start
,
"to_block_number"
,
group
.
End
)
log
.
Info
(
"scanning for finalized bridge events"
)
if
err
:=
bridge
.
L2ProcessFinalizedBridgeEvents
(
log
,
tx
,
group
.
Start
,
group
.
End
);
err
!=
nil
{
return
err
return
err
}
}
// a-ok
return
nil
})
if
err
!=
nil
{
// Try again on a subsequent interval
batchLog
.
Error
(
"unable to index new bridge events"
,
"err"
,
err
)
}
else
{
batchLog
.
Info
(
"done indexing bridge events"
,
"latest_l1_block_number"
,
toL1Height
,
"latest_l2_block_number"
,
toL2Height
)
b
.
LatestL1Header
=
latestEpoch
.
L1BlockHeader
.
RLPHeader
.
Header
()
b
.
LatestL2Header
=
latestEpoch
.
L2BlockHeader
.
RLPHeader
.
Header
()
}
}
// a-ok
return
nil
})
if
err
!=
nil
{
// Try again on a subsequent interval
batchLog
.
Error
(
"failed to index bridge events"
,
"err"
,
err
)
}
else
{
batchLog
.
Info
(
"indexed bridge events"
,
"latest_l1_block_number"
,
toL1Height
,
"latest_l2_block_number"
,
toL2Height
)
b
.
LatestL1Header
=
latestEpoch
.
L1BlockHeader
.
RLPHeader
.
Header
()
b
.
LatestL2Header
=
latestEpoch
.
L2BlockHeader
.
RLPHeader
.
Header
()
}
}
}
}
}
}
indexer/processors/bridge/l1_bridge_processor.go
View file @
787f6857
This diff is collapsed.
Click to expand it.
indexer/processors/bridge/l2_bridge_processor.go
View file @
787f6857
...
@@ -23,6 +23,9 @@ func L2ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, fromHeight
...
@@ -23,6 +23,9 @@ func L2ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, fromHeight
if
err
!=
nil
{
if
err
!=
nil
{
return
err
return
err
}
}
if
len
(
l2ToL1MPMessagesPassed
)
>
0
{
log
.
Info
(
"detected transaction withdrawals"
,
"size"
,
len
(
l2ToL1MPMessagesPassed
))
}
messagesPassed
:=
make
(
map
[
logKey
]
*
contracts
.
L2ToL1MessagePasserMessagePassed
,
len
(
l2ToL1MPMessagesPassed
))
messagesPassed
:=
make
(
map
[
logKey
]
*
contracts
.
L2ToL1MessagePasserMessagePassed
,
len
(
l2ToL1MPMessagesPassed
))
transactionWithdrawals
:=
make
([]
database
.
L2TransactionWithdrawal
,
len
(
l2ToL1MPMessagesPassed
))
transactionWithdrawals
:=
make
([]
database
.
L2TransactionWithdrawal
,
len
(
l2ToL1MPMessagesPassed
))
...
@@ -37,9 +40,7 @@ func L2ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, fromHeight
...
@@ -37,9 +40,7 @@ func L2ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, fromHeight
Tx
:
messagePassed
.
Tx
,
Tx
:
messagePassed
.
Tx
,
}
}
}
}
if
len
(
messagesPassed
)
>
0
{
if
len
(
messagesPassed
)
>
0
{
log
.
Info
(
"detected transaction withdrawals"
,
"size"
,
len
(
transactionWithdrawals
))
if
err
:=
db
.
BridgeTransactions
.
StoreL2TransactionWithdrawals
(
transactionWithdrawals
);
err
!=
nil
{
if
err
:=
db
.
BridgeTransactions
.
StoreL2TransactionWithdrawals
(
transactionWithdrawals
);
err
!=
nil
{
return
err
return
err
}
}
...
@@ -50,8 +51,8 @@ func L2ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, fromHeight
...
@@ -50,8 +51,8 @@ func L2ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, fromHeight
if
err
!=
nil
{
if
err
!=
nil
{
return
err
return
err
}
}
if
len
(
crossDomainSentMessages
)
>
len
(
messagesPassed
)
{
if
len
(
crossDomainSentMessages
)
>
0
{
return
fmt
.
Errorf
(
"missing L2ToL1MP withdrawal for each cross-domain message. withdrawals: %d, messages: %d"
,
len
(
messagesPassed
)
,
len
(
crossDomainSentMessages
))
log
.
Info
(
"detected sent messages"
,
"size"
,
len
(
crossDomainSentMessages
))
}
}
sentMessages
:=
make
(
map
[
logKey
]
*
contracts
.
CrossDomainMessengerSentMessageEvent
,
len
(
crossDomainSentMessages
))
sentMessages
:=
make
(
map
[
logKey
]
*
contracts
.
CrossDomainMessengerSentMessageEvent
,
len
(
crossDomainSentMessages
))
...
@@ -63,14 +64,14 @@ func L2ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, fromHeight
...
@@ -63,14 +64,14 @@ func L2ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, fromHeight
// extract the withdrawal hash from the previous MessagePassed event
// extract the withdrawal hash from the previous MessagePassed event
messagePassed
,
ok
:=
messagesPassed
[
logKey
{
sentMessage
.
Event
.
BlockHash
,
sentMessage
.
Event
.
LogIndex
-
1
}]
messagePassed
,
ok
:=
messagesPassed
[
logKey
{
sentMessage
.
Event
.
BlockHash
,
sentMessage
.
Event
.
LogIndex
-
1
}]
if
!
ok
{
if
!
ok
{
return
fmt
.
Errorf
(
"expected MessagePassedEvent preceding SentMessage. tx_hash = %s"
,
sentMessage
.
Event
.
TransactionHash
)
log
.
Error
(
"expected MessagePassedEvent preceding SentMessage"
,
"tx_hash"
,
sentMessage
.
Event
.
TransactionHash
.
String
())
return
fmt
.
Errorf
(
"expected MessagePassedEvent preceding SentMessage. tx_hash = %s"
,
sentMessage
.
Event
.
TransactionHash
.
String
())
}
}
l2BridgeMessages
[
i
]
=
database
.
L2BridgeMessage
{
TransactionWithdrawalHash
:
messagePassed
.
WithdrawalHash
,
BridgeMessage
:
sentMessage
.
BridgeMessage
}
l2BridgeMessages
[
i
]
=
database
.
L2BridgeMessage
{
TransactionWithdrawalHash
:
messagePassed
.
WithdrawalHash
,
BridgeMessage
:
sentMessage
.
BridgeMessage
}
}
}
if
len
(
l2BridgeMessages
)
>
0
{
if
len
(
l2BridgeMessages
)
>
0
{
log
.
Info
(
"detected L2CrossDomainMessenger messages"
,
"size"
,
len
(
l2BridgeMessages
))
if
err
:=
db
.
BridgeMessages
.
StoreL2BridgeMessages
(
l2BridgeMessages
);
err
!=
nil
{
if
err
:=
db
.
BridgeMessages
.
StoreL2BridgeMessages
(
l2BridgeMessages
);
err
!=
nil
{
return
err
return
err
}
}
...
@@ -81,8 +82,8 @@ func L2ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, fromHeight
...
@@ -81,8 +82,8 @@ func L2ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, fromHeight
if
err
!=
nil
{
if
err
!=
nil
{
return
err
return
err
}
}
if
len
(
initiatedBridges
)
>
len
(
crossDomainSentMessages
)
{
if
len
(
initiatedBridges
)
>
0
{
return
fmt
.
Errorf
(
"missing cross-domain message for each initiated bridge event. messages: %d, bridges: %d"
,
len
(
crossDomainSentMessages
)
,
len
(
initiatedBridges
))
log
.
Info
(
"detected bridge withdrawals"
,
"size"
,
len
(
initiatedBridges
))
}
}
l2BridgeWithdrawals
:=
make
([]
database
.
L2BridgeWithdrawal
,
len
(
initiatedBridges
))
l2BridgeWithdrawals
:=
make
([]
database
.
L2BridgeWithdrawal
,
len
(
initiatedBridges
))
...
@@ -92,11 +93,13 @@ func L2ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, fromHeight
...
@@ -92,11 +93,13 @@ func L2ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, fromHeight
// extract the cross domain message hash & deposit source hash from the following events
// extract the cross domain message hash & deposit source hash from the following events
messagePassed
,
ok
:=
messagesPassed
[
logKey
{
initiatedBridge
.
Event
.
BlockHash
,
initiatedBridge
.
Event
.
LogIndex
+
1
}]
messagePassed
,
ok
:=
messagesPassed
[
logKey
{
initiatedBridge
.
Event
.
BlockHash
,
initiatedBridge
.
Event
.
LogIndex
+
1
}]
if
!
ok
{
if
!
ok
{
return
fmt
.
Errorf
(
"expected MessagePassed following BridgeInitiated event. tx_hash = %s"
,
initiatedBridge
.
Event
.
TransactionHash
)
log
.
Error
(
"expected MessagePassed following BridgeInitiated event"
,
"tx_hash"
,
initiatedBridge
.
Event
.
TransactionHash
.
String
())
return
fmt
.
Errorf
(
"expected MessagePassed following BridgeInitiated event. tx_hash = %s"
,
initiatedBridge
.
Event
.
TransactionHash
.
String
())
}
}
sentMessage
,
ok
:=
sentMessages
[
logKey
{
initiatedBridge
.
Event
.
BlockHash
,
initiatedBridge
.
Event
.
LogIndex
+
2
}]
sentMessage
,
ok
:=
sentMessages
[
logKey
{
initiatedBridge
.
Event
.
BlockHash
,
initiatedBridge
.
Event
.
LogIndex
+
2
}]
if
!
ok
{
if
!
ok
{
return
fmt
.
Errorf
(
"expected SentMessage following MessagePassed event. tx_hash = %s"
,
initiatedBridge
.
Event
.
TransactionHash
)
log
.
Error
(
"expected SentMessage following MessagePassed event"
,
"tx_hash"
,
initiatedBridge
.
Event
.
TransactionHash
.
String
())
return
fmt
.
Errorf
(
"expected SentMessage following MessagePassed event. tx_hash = %s"
,
initiatedBridge
.
Event
.
TransactionHash
.
String
())
}
}
initiatedBridge
.
BridgeTransfer
.
CrossDomainMessageHash
=
&
sentMessage
.
BridgeMessage
.
MessageHash
initiatedBridge
.
BridgeTransfer
.
CrossDomainMessageHash
=
&
sentMessage
.
BridgeMessage
.
MessageHash
...
@@ -104,7 +107,6 @@ func L2ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, fromHeight
...
@@ -104,7 +107,6 @@ func L2ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, fromHeight
}
}
if
len
(
l2BridgeWithdrawals
)
>
0
{
if
len
(
l2BridgeWithdrawals
)
>
0
{
log
.
Info
(
"detected L2StandardBridge withdrawals"
,
"size"
,
len
(
l2BridgeWithdrawals
))
if
err
:=
db
.
BridgeTransfers
.
StoreL2BridgeWithdrawals
(
l2BridgeWithdrawals
);
err
!=
nil
{
if
err
:=
db
.
BridgeTransfers
.
StoreL2BridgeWithdrawals
(
l2BridgeWithdrawals
);
err
!=
nil
{
return
err
return
err
}
}
...
@@ -121,11 +123,14 @@ func L2ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, fromHeight
...
@@ -121,11 +123,14 @@ func L2ProcessInitiatedBridgeEvents(log log.Logger, db *database.DB, fromHeight
//
//
// NOTE: Unlike L1, there's no L2ToL1MessagePasser stage since transaction deposits are apart of the block derivation process.
// NOTE: Unlike L1, there's no L2ToL1MessagePasser stage since transaction deposits are apart of the block derivation process.
func
L2ProcessFinalizedBridgeEvents
(
log
log
.
Logger
,
db
*
database
.
DB
,
fromHeight
*
big
.
Int
,
toHeight
*
big
.
Int
)
error
{
func
L2ProcessFinalizedBridgeEvents
(
log
log
.
Logger
,
db
*
database
.
DB
,
fromHeight
*
big
.
Int
,
toHeight
*
big
.
Int
)
error
{
// (1) L2CrossDomainMessenger
relayedMessage
// (1) L2CrossDomainMessenger
crossDomainRelayedMessages
,
err
:=
contracts
.
CrossDomainMessengerRelayedMessageEvents
(
"l2"
,
predeploys
.
L2CrossDomainMessengerAddr
,
db
,
fromHeight
,
toHeight
)
crossDomainRelayedMessages
,
err
:=
contracts
.
CrossDomainMessengerRelayedMessageEvents
(
"l2"
,
predeploys
.
L2CrossDomainMessengerAddr
,
db
,
fromHeight
,
toHeight
)
if
err
!=
nil
{
if
err
!=
nil
{
return
err
return
err
}
}
if
len
(
crossDomainRelayedMessages
)
>
0
{
log
.
Info
(
"detected relayed messages"
,
"size"
,
len
(
crossDomainRelayedMessages
))
}
relayedMessages
:=
make
(
map
[
logKey
]
*
contracts
.
CrossDomainMessengerRelayedMessageEvent
,
len
(
crossDomainRelayedMessages
))
relayedMessages
:=
make
(
map
[
logKey
]
*
contracts
.
CrossDomainMessengerRelayedMessageEvent
,
len
(
crossDomainRelayedMessages
))
for
i
:=
range
crossDomainRelayedMessages
{
for
i
:=
range
crossDomainRelayedMessages
{
...
@@ -135,26 +140,23 @@ func L2ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, fromHeight
...
@@ -135,26 +140,23 @@ func L2ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, fromHeight
if
err
!=
nil
{
if
err
!=
nil
{
return
err
return
err
}
else
if
message
==
nil
{
}
else
if
message
==
nil
{
log
.
Error
(
"missing indexed L1CrossDomainMessenger message"
,
"
message_hash"
,
relayed
.
MessageHash
,
"tx_hash"
,
relayed
.
Event
.
TransactionHash
)
log
.
Error
(
"missing indexed L1CrossDomainMessenger message"
,
"
tx_hash"
,
relayed
.
Event
.
TransactionHash
.
String
()
)
return
fmt
.
Errorf
(
"missing indexed L1CrossDomainMessager message
"
)
return
fmt
.
Errorf
(
"missing indexed L1CrossDomainMessager message
. tx_hash = %s"
,
relayed
.
Event
.
TransactionHash
.
String
()
)
}
}
if
err
:=
db
.
BridgeMessages
.
MarkRelayedL1BridgeMessage
(
relayed
.
MessageHash
,
relayed
.
Event
.
GUID
);
err
!=
nil
{
if
err
:=
db
.
BridgeMessages
.
MarkRelayedL1BridgeMessage
(
relayed
.
MessageHash
,
relayed
.
Event
.
GUID
);
err
!=
nil
{
log
.
Error
(
"failed to relay cross domain message"
,
"err"
,
err
,
"tx_hash"
,
relayed
.
Event
.
TransactionHash
.
String
())
return
err
return
err
}
}
}
}
if
len
(
crossDomainRelayedMessages
)
>
0
{
// (2) L2StandardBridge
log
.
Info
(
"relayed L1CrossDomainMessenger messages"
,
"size"
,
len
(
crossDomainRelayedMessages
))
}
// (2) L2StandardBridge BridgeFinalized
finalizedBridges
,
err
:=
contracts
.
StandardBridgeFinalizedEvents
(
"l2"
,
predeploys
.
L2StandardBridgeAddr
,
db
,
fromHeight
,
toHeight
)
finalizedBridges
,
err
:=
contracts
.
StandardBridgeFinalizedEvents
(
"l2"
,
predeploys
.
L2StandardBridgeAddr
,
db
,
fromHeight
,
toHeight
)
if
err
!=
nil
{
if
err
!=
nil
{
return
err
return
err
}
}
if
len
(
finalizedBridges
)
>
len
(
crossDomainRelayedMessages
)
{
if
len
(
finalizedBridges
)
>
0
{
return
fmt
.
Errorf
(
"missing cross-domain message for each finalized bridge event. messages: %d, bridges: %d"
,
len
(
crossDomainRelayedMessages
)
,
len
(
finalizedBridges
))
log
.
Info
(
"detected finalized bridge deposits"
,
"size"
,
len
(
finalizedBridges
))
}
}
for
i
:=
range
finalizedBridges
{
for
i
:=
range
finalizedBridges
{
...
@@ -163,7 +165,8 @@ func L2ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, fromHeight
...
@@ -163,7 +165,8 @@ func L2ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, fromHeight
finalizedBridge
:=
finalizedBridges
[
i
]
finalizedBridge
:=
finalizedBridges
[
i
]
relayedMessage
,
ok
:=
relayedMessages
[
logKey
{
finalizedBridge
.
Event
.
BlockHash
,
finalizedBridge
.
Event
.
LogIndex
+
1
}]
relayedMessage
,
ok
:=
relayedMessages
[
logKey
{
finalizedBridge
.
Event
.
BlockHash
,
finalizedBridge
.
Event
.
LogIndex
+
1
}]
if
!
ok
{
if
!
ok
{
return
fmt
.
Errorf
(
"expected RelayedMessage following BridgeFinalized event. tx_hash = %s"
,
finalizedBridge
.
Event
.
TransactionHash
)
log
.
Error
(
"expected RelayedMessage following BridgeFinalized event"
,
"tx_hash"
,
finalizedBridge
.
Event
.
TransactionHash
.
String
())
return
fmt
.
Errorf
(
"expected RelayedMessage following BridgeFinalized event. tx_hash = %s"
,
finalizedBridge
.
Event
.
TransactionHash
.
String
())
}
}
// Since the message hash is computed from the relayed message, this ensures the withdrawal fields must match. For good measure,
// Since the message hash is computed from the relayed message, this ensures the withdrawal fields must match. For good measure,
...
@@ -172,7 +175,7 @@ func L2ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, fromHeight
...
@@ -172,7 +175,7 @@ func L2ProcessFinalizedBridgeEvents(log log.Logger, db *database.DB, fromHeight
if
err
!=
nil
{
if
err
!=
nil
{
return
err
return
err
}
else
if
deposit
==
nil
{
}
else
if
deposit
==
nil
{
log
.
Error
(
"missing L1StandardBridge deposit on L2 finalization"
,
"tx_hash"
,
finalizedBridge
.
Event
.
TransactionHash
)
log
.
Error
(
"missing L1StandardBridge deposit on L2 finalization"
,
"tx_hash"
,
finalizedBridge
.
Event
.
TransactionHash
.
String
()
)
return
errors
.
New
(
"missing L1StandardBridge deposit on L2 finalization"
)
return
errors
.
New
(
"missing L1StandardBridge deposit on L2 finalization"
)
}
}
}
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment