Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
N
nebula
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
exchain
nebula
Commits
69256aa1
Commit
69256aa1
authored
Sep 09, 2023
by
Hamdi Allam
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
indexer.db.batching
parent
e52924b9
Changes
17
Hide whitespace changes
Inline
Side-by-side
Showing
17 changed files
with
218 additions
and
93 deletions
+218
-93
bigint.go
indexer/bigint/bigint.go
+60
-0
bigint_test.go
indexer/bigint/bigint_test.go
+72
-0
blocks.go
indexer/database/blocks.go
+5
-4
bridge_messages.go
indexer/database/bridge_messages.go
+2
-2
bridge_transactions.go
indexer/database/bridge_transactions.go
+2
-2
bridge_transfers.go
indexer/database/bridge_transfers.go
+2
-2
contract_events.go
indexer/database/contract_events.go
+2
-2
db.go
indexer/database/db.go
+9
-2
mocks.go
indexer/database/mocks.go
+1
-0
l1_etl_test.go
indexer/etl/l1_etl_test.go
+3
-3
20230523_create_schema.sql
indexer/migrations/20230523_create_schema.sql
+39
-2
bigint.go
indexer/node/bigint.go
+0
-26
bigint_test.go
indexer/node/bigint_test.go
+0
-31
header_traversal.go
indexer/node/header_traversal.go
+4
-3
header_traversal_test.go
indexer/node/header_traversal_test.go
+11
-10
mocks.go
indexer/node/mocks.go
+1
-0
bridge.go
indexer/processors/bridge.go
+5
-4
No files found.
indexer/bigint/bigint.go
0 → 100644
View file @
69256aa1
package
bigint
import
"math/big"
var
(
Zero
=
big
.
NewInt
(
0
)
One
=
big
.
NewInt
(
1
)
)
// Clamp returns a new big.Int for `end` to which `end - start` <= size.
// @note (start, end) is an inclusive range
func
Clamp
(
start
,
end
*
big
.
Int
,
size
uint64
)
*
big
.
Int
{
temp
:=
new
(
big
.
Int
)
count
:=
temp
.
Sub
(
end
,
start
)
.
Uint64
()
+
1
if
count
<=
size
{
return
end
}
// we re-use the allocated temp as the new end
temp
.
Add
(
start
,
big
.
NewInt
(
int64
(
size
-
1
)))
return
temp
}
// Matcher returns an inner comparison function result for a big.Int
func
Matcher
(
num
int64
)
func
(
*
big
.
Int
)
bool
{
return
func
(
bi
*
big
.
Int
)
bool
{
return
bi
.
Int64
()
==
num
}
}
type
Range
struct
{
Start
*
big
.
Int
End
*
big
.
Int
}
// Grouped will return a slice of inclusive ranges from (start, end),
// capped to the supplied size from `(start, end)`.
func
Grouped
(
start
,
end
*
big
.
Int
,
size
uint64
)
[]
Range
{
if
end
.
Cmp
(
start
)
<
0
||
size
==
0
{
return
nil
}
bigMaxDiff
:=
big
.
NewInt
(
int64
(
size
-
1
))
groups
:=
[]
Range
{}
for
start
.
Cmp
(
end
)
<=
0
{
diff
:=
new
(
big
.
Int
)
.
Sub
(
end
,
start
)
switch
{
case
diff
.
Uint64
()
+
1
<=
size
:
// re-use allocated diff as the next start
groups
=
append
(
groups
,
Range
{
start
,
end
})
start
=
diff
.
Add
(
end
,
One
)
default
:
// re-use allocated diff as the next start
end
:=
new
(
big
.
Int
)
.
Add
(
start
,
bigMaxDiff
)
groups
=
append
(
groups
,
Range
{
start
,
end
})
start
=
diff
.
Add
(
end
,
One
)
}
}
return
groups
}
indexer/bigint/bigint_test.go
0 → 100644
View file @
69256aa1
package
bigint
import
(
"math/big"
"testing"
"github.com/stretchr/testify/require"
)
func
TestClamp
(
t
*
testing
.
T
)
{
start
:=
big
.
NewInt
(
1
)
end
:=
big
.
NewInt
(
10
)
// When the (start, end) bounds are within range
// the same end pointer should be returned
// larger range
result
:=
Clamp
(
start
,
end
,
20
)
require
.
True
(
t
,
end
==
result
)
// exact range
result
=
Clamp
(
start
,
end
,
10
)
require
.
True
(
t
,
end
==
result
)
// smaller range
result
=
Clamp
(
start
,
end
,
5
)
require
.
False
(
t
,
end
==
result
)
require
.
Equal
(
t
,
uint64
(
5
),
result
.
Uint64
())
}
func
TestGrouped
(
t
*
testing
.
T
)
{
// base cases
require
.
Nil
(
t
,
Grouped
(
One
,
Zero
,
1
))
require
.
Nil
(
t
,
Grouped
(
Zero
,
One
,
0
))
// Same Start/End
group
:=
Grouped
(
One
,
One
,
1
)
require
.
Len
(
t
,
group
,
1
)
require
.
Equal
(
t
,
One
,
group
[
0
]
.
Start
)
require
.
Equal
(
t
,
One
,
group
[
0
]
.
End
)
Three
,
Five
:=
big
.
NewInt
(
3
),
big
.
NewInt
(
5
)
// One at a time
group
=
Grouped
(
One
,
Three
,
1
)
require
.
Equal
(
t
,
One
,
group
[
0
]
.
End
)
require
.
Equal
(
t
,
int64
(
1
),
group
[
0
]
.
End
.
Int64
())
require
.
Equal
(
t
,
int64
(
2
),
group
[
1
]
.
Start
.
Int64
())
require
.
Equal
(
t
,
int64
(
2
),
group
[
1
]
.
End
.
Int64
())
require
.
Equal
(
t
,
int64
(
3
),
group
[
2
]
.
Start
.
Int64
())
require
.
Equal
(
t
,
int64
(
3
),
group
[
2
]
.
End
.
Int64
())
// Split groups
group
=
Grouped
(
One
,
Five
,
3
)
require
.
Len
(
t
,
group
,
2
)
require
.
Equal
(
t
,
One
,
group
[
0
]
.
Start
)
require
.
Equal
(
t
,
int64
(
3
),
group
[
0
]
.
End
.
Int64
())
require
.
Equal
(
t
,
int64
(
4
),
group
[
1
]
.
Start
.
Int64
())
require
.
Equal
(
t
,
Five
,
group
[
1
]
.
End
)
// Encompasses the range
group
=
Grouped
(
One
,
Five
,
5
)
require
.
Len
(
t
,
group
,
1
)
require
.
Equal
(
t
,
One
,
group
[
0
]
.
Start
,
Zero
)
require
.
Equal
(
t
,
Five
,
group
[
0
]
.
End
)
// Size larger than the entire range
group
=
Grouped
(
One
,
Five
,
100
)
require
.
Len
(
t
,
group
,
1
)
require
.
Equal
(
t
,
One
,
group
[
0
]
.
Start
,
Zero
)
require
.
Equal
(
t
,
Five
,
group
[
0
]
.
End
)
}
indexer/database/blocks.go
View file @
69256aa1
...
...
@@ -104,17 +104,17 @@ func newBlocksDB(db *gorm.DB) BlocksDB {
// L1
func
(
db
*
blocksDB
)
StoreL1BlockHeaders
(
headers
[]
L1BlockHeader
)
error
{
result
:=
db
.
gorm
.
Create
(
&
headers
)
result
:=
db
.
gorm
.
Create
InBatches
(
&
headers
,
batchInsertSize
)
return
result
.
Error
}
func
(
db
*
blocksDB
)
StoreLegacyStateBatches
(
stateBatches
[]
LegacyStateBatch
)
error
{
result
:=
db
.
gorm
.
Create
(
stateBatches
)
result
:=
db
.
gorm
.
Create
InBatches
(
stateBatches
,
batchInsertSize
)
return
result
.
Error
}
func
(
db
*
blocksDB
)
StoreOutputProposals
(
outputs
[]
OutputProposal
)
error
{
result
:=
db
.
gorm
.
Create
(
outputs
)
result
:=
db
.
gorm
.
Create
InBatches
(
outputs
,
batchInsertSize
)
return
result
.
Error
}
...
...
@@ -180,7 +180,7 @@ func (db *blocksDB) OutputProposal(index *big.Int) (*OutputProposal, error) {
// L2
func
(
db
*
blocksDB
)
StoreL2BlockHeaders
(
headers
[]
L2BlockHeader
)
error
{
result
:=
db
.
gorm
.
Create
(
&
headers
)
result
:=
db
.
gorm
.
Create
InBatches
(
&
headers
,
batchInsertSize
)
return
result
.
Error
}
...
...
@@ -234,6 +234,7 @@ func (db *blocksDB) LatestEpoch() (*Epoch, error) {
// will have a matching timestamp with the L1 origin.
query
:=
db
.
gorm
.
Table
(
"l1_block_headers"
)
.
Order
(
"l1_block_headers.timestamp DESC"
)
query
=
query
.
Joins
(
"INNER JOIN l2_block_headers ON l2_block_headers.timestamp = l1_block_headers.timestamp"
)
query
=
query
.
Order
(
"l2_block_headers.number DESC"
)
query
=
query
.
Select
(
"*"
)
var
epoch
Epoch
...
...
indexer/database/bridge_messages.go
View file @
69256aa1
...
...
@@ -72,7 +72,7 @@ func newBridgeMessagesDB(db *gorm.DB) BridgeMessagesDB {
*/
func
(
db
bridgeMessagesDB
)
StoreL1BridgeMessages
(
messages
[]
L1BridgeMessage
)
error
{
result
:=
db
.
gorm
.
Create
(
&
messages
)
result
:=
db
.
gorm
.
Create
InBatches
(
&
messages
,
batchInsertSize
)
return
result
.
Error
}
...
...
@@ -111,7 +111,7 @@ func (db bridgeMessagesDB) MarkRelayedL1BridgeMessage(messageHash common.Hash, r
*/
func
(
db
bridgeMessagesDB
)
StoreL2BridgeMessages
(
messages
[]
L2BridgeMessage
)
error
{
result
:=
db
.
gorm
.
Create
(
&
messages
)
result
:=
db
.
gorm
.
Create
InBatches
(
&
messages
,
batchInsertSize
)
return
result
.
Error
}
...
...
indexer/database/bridge_transactions.go
View file @
69256aa1
...
...
@@ -80,7 +80,7 @@ func newBridgeTransactionsDB(db *gorm.DB) BridgeTransactionsDB {
*/
func
(
db
*
bridgeTransactionsDB
)
StoreL1TransactionDeposits
(
deposits
[]
L1TransactionDeposit
)
error
{
result
:=
db
.
gorm
.
Create
(
&
deposits
)
result
:=
db
.
gorm
.
Create
InBatches
(
&
deposits
,
batchInsertSize
)
return
result
.
Error
}
...
...
@@ -133,7 +133,7 @@ func (db *bridgeTransactionsDB) L1LatestBlockHeader() (*L1BlockHeader, error) {
*/
func
(
db
*
bridgeTransactionsDB
)
StoreL2TransactionWithdrawals
(
withdrawals
[]
L2TransactionWithdrawal
)
error
{
result
:=
db
.
gorm
.
Create
(
&
withdrawals
)
result
:=
db
.
gorm
.
Create
InBatches
(
&
withdrawals
,
batchInsertSize
)
return
result
.
Error
}
...
...
indexer/database/bridge_transfers.go
View file @
69256aa1
...
...
@@ -89,7 +89,7 @@ func newBridgeTransfersDB(db *gorm.DB) BridgeTransfersDB {
*/
func
(
db
*
bridgeTransfersDB
)
StoreL1BridgeDeposits
(
deposits
[]
L1BridgeDeposit
)
error
{
result
:=
db
.
gorm
.
Create
(
&
deposits
)
result
:=
db
.
gorm
.
Create
InBatches
(
&
deposits
,
batchInsertSize
)
return
result
.
Error
}
...
...
@@ -202,7 +202,7 @@ l1_bridge_deposits.timestamp, cross_domain_message_hash, local_token_address, re
*/
func
(
db
*
bridgeTransfersDB
)
StoreL2BridgeWithdrawals
(
withdrawals
[]
L2BridgeWithdrawal
)
error
{
result
:=
db
.
gorm
.
Create
(
&
withdrawals
)
result
:=
db
.
gorm
.
Create
InBatches
(
&
withdrawals
,
batchInsertSize
)
return
result
.
Error
}
...
...
indexer/database/contract_events.go
View file @
69256aa1
...
...
@@ -109,7 +109,7 @@ func newContractEventsDB(db *gorm.DB) ContractEventsDB {
// L1
func
(
db
*
contractEventsDB
)
StoreL1ContractEvents
(
events
[]
L1ContractEvent
)
error
{
result
:=
db
.
gorm
.
Create
(
&
events
)
result
:=
db
.
gorm
.
Create
InBatches
(
&
events
,
batchInsertSize
)
return
result
.
Error
}
...
...
@@ -176,7 +176,7 @@ func (db *contractEventsDB) L1LatestContractEventWithFilter(filter ContractEvent
// L2
func
(
db
*
contractEventsDB
)
StoreL2ContractEvents
(
events
[]
L2ContractEvent
)
error
{
result
:=
db
.
gorm
.
Create
(
&
events
)
result
:=
db
.
gorm
.
Create
InBatches
(
&
events
,
batchInsertSize
)
return
result
.
Error
}
...
...
indexer/database/db.go
View file @
69256aa1
...
...
@@ -12,6 +12,14 @@ import (
"gorm.io/gorm/logger"
)
var
(
// The postgres parameter counter for a given query is stored via a uint16,
// resulting in a parameter limit of 65535. In order to avoid reaching this limit
// we'll utilize a batch size of 3k for inserts, well below as long as the the number
// of columns < 20.
batchInsertSize
int
=
3
_000
)
type
DB
struct
{
gorm
*
gorm
.
DB
...
...
@@ -31,8 +39,7 @@ func NewDB(dbConfig config.DBConfig) (*DB, error) {
dsn
+=
fmt
.
Sprintf
(
" password=%s"
,
dbConfig
.
Password
)
}
gorm
,
err
:=
gorm
.
Open
(
postgres
.
Open
(
dsn
),
&
gorm
.
Config
{
// The indexer will explicitly manage the transaction
// flow processing blocks
// The indexer will explicitly manage the transactions
SkipDefaultTransaction
:
true
,
// We may choose to create an adapter such that the
...
...
indexer/database/mocks.go
View file @
69256aa1
...
...
@@ -4,6 +4,7 @@ import (
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/stretchr/testify/mock"
)
...
...
indexer/etl/l1_etl_test.go
View file @
69256aa1
...
...
@@ -10,6 +10,7 @@ import (
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"github.com/ethereum-optimism/optimism/indexer/bigint"
"github.com/ethereum-optimism/optimism/indexer/config"
"github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum-optimism/optimism/indexer/node"
...
...
@@ -17,7 +18,7 @@ import (
"testing"
)
func
Test
_L1ETL_
Construction
(
t
*
testing
.
T
)
{
func
Test
L1ETL
Construction
(
t
*
testing
.
T
)
{
etlMetrics
:=
NewMetrics
(
metrics
.
NewRegistry
(),
"l1"
)
type
testSuite
struct
{
...
...
@@ -39,11 +40,10 @@ func Test_L1ETL_Construction(t *testing.T) {
db
:=
database
.
NewMockDB
()
testStart
:=
big
.
NewInt
(
100
)
db
.
MockBlocks
.
On
(
"L1LatestBlockHeader"
)
.
Return
(
nil
,
nil
)
client
.
On
(
"BlockHeaderByNumber"
,
mock
.
MatchedBy
(
node
.
BigInt
Matcher
(
100
)))
.
Return
(
bigint
.
Matcher
(
100
)))
.
Return
(
&
types
.
Header
{
ParentHash
:
common
.
HexToHash
(
"0x69"
),
},
nil
)
...
...
indexer/migrations/20230523_create_schema.sql
View file @
69256aa1
CREATE
DOMAIN
UINT256
AS
NUMERIC
CHECK
(
VALUE
>=
0
AND
VALUE
<
2
^
256
and
SCALE
(
VALUE
)
=
0
);
DO
$$
BEGIN
IF
NOT
EXISTS
(
SELECT
1
FROM
pg_type
WHERE
typname
=
'uint256'
)
THEN
CREATE
DOMAIN
UINT256
AS
NUMERIC
CHECK
(
VALUE
>=
0
AND
VALUE
<
2
^
256
and
SCALE
(
VALUE
)
=
0
);
END
IF
;
END
$$
;
/**
* BLOCK DATA
...
...
@@ -16,6 +21,8 @@ CREATE TABLE IF NOT EXISTS l1_block_headers (
-- Raw Data
rlp_bytes
VARCHAR
NOT
NULL
);
CREATE
INDEX
IF
NOT
EXISTS
l1_block_headers_timestamp
ON
l1_block_headers
(
timestamp
);
CREATE
INDEX
IF
NOT
EXISTS
l1_block_headers_number
ON
l1_block_headers
(
number
);
CREATE
TABLE
IF
NOT
EXISTS
l2_block_headers
(
-- Searchable fields
...
...
@@ -27,6 +34,8 @@ CREATE TABLE IF NOT EXISTS l2_block_headers (
-- Raw Data
rlp_bytes
VARCHAR
NOT
NULL
);
CREATE
INDEX
IF
NOT
EXISTS
l2_block_headers_timestamp
ON
l2_block_headers
(
timestamp
);
CREATE
INDEX
IF
NOT
EXISTS
l2_block_headers_number
ON
l2_block_headers
(
number
);
/**
* EVENT DATA
...
...
@@ -45,6 +54,9 @@ CREATE TABLE IF NOT EXISTS l1_contract_events (
-- Raw Data
rlp_bytes
VARCHAR
NOT
NULL
);
CREATE
INDEX
IF
NOT
EXISTS
l1_contract_events_timestamp
ON
l1_contract_events
(
timestamp
);
CREATE
INDEX
IF
NOT
EXISTS
l1_contract_events_block_hash
ON
l1_contract_events
(
block_hash
);
CREATE
INDEX
IF
NOT
EXISTS
l1_contract_events_event_signature
ON
l1_contract_events
(
event_signature
);
CREATE
TABLE
IF
NOT
EXISTS
l2_contract_events
(
-- Searchable fields
...
...
@@ -59,6 +71,9 @@ CREATE TABLE IF NOT EXISTS l2_contract_events (
-- Raw Data
rlp_bytes
VARCHAR
NOT
NULL
);
CREATE
INDEX
IF
NOT
EXISTS
l2_contract_events_timestamp
ON
l2_contract_events
(
timestamp
);
CREATE
INDEX
IF
NOT
EXISTS
l2_contract_events_block_hash
ON
l2_contract_events
(
block_hash
);
CREATE
INDEX
IF
NOT
EXISTS
l2_contract_events_event_signature
ON
l2_contract_events
(
event_signature
);
-- Tables that index finalization markers for L2 blocks.
...
...
@@ -79,6 +94,7 @@ CREATE TABLE IF NOT EXISTS output_proposals (
output_proposed_guid
VARCHAR
NOT
NULL
UNIQUE
REFERENCES
l1_contract_events
(
guid
)
ON
DELETE
CASCADE
);
/**
* BRIDGING DATA
*/
...
...
@@ -118,6 +134,10 @@ CREATE TABLE IF NOT EXISTS l1_transaction_deposits (
data
VARCHAR
NOT
NULL
,
timestamp
INTEGER
NOT
NULL
CHECK
(
timestamp
>
0
)
);
CREATE
INDEX
IF
NOT
EXISTS
l1_transaction_deposits_timestamp
ON
l1_transaction_deposits
(
timestamp
);
CREATE
INDEX
IF
NOT
EXISTS
l1_transaction_deposits_initiated_l1_event_guid
ON
l1_transaction_deposits
(
initiated_l1_event_guid
);
CREATE
INDEX
IF
NOT
EXISTS
l1_transaction_deposits_from_address
ON
l1_transaction_deposits
(
from_address
);
CREATE
TABLE
IF
NOT
EXISTS
l2_transaction_withdrawals
(
withdrawal_hash
VARCHAR
PRIMARY
KEY
,
nonce
UINT256
NOT
NULL
UNIQUE
,
...
...
@@ -136,6 +156,9 @@ CREATE TABLE IF NOT EXISTS l2_transaction_withdrawals (
data
VARCHAR
NOT
NULL
,
timestamp
INTEGER
NOT
NULL
CHECK
(
timestamp
>
0
)
);
CREATE
INDEX
IF
NOT
EXISTS
l2_transaction_withdrawals_timestamp
ON
l2_transaction_withdrawals
(
timestamp
);
CREATE
INDEX
IF
NOT
EXISTS
l2_transaction_withdrawals_initiated_l2_event_guid
ON
l2_transaction_withdrawals
(
initiated_l2_event_guid
);
CREATE
INDEX
IF
NOT
EXISTS
l2_transaction_withdrawals_from_address
ON
l2_transaction_withdrawals
(
from_address
);
-- CrossDomainMessenger
CREATE
TABLE
IF
NOT
EXISTS
l1_bridge_messages
(
...
...
@@ -154,6 +177,10 @@ CREATE TABLE IF NOT EXISTS l1_bridge_messages(
data
VARCHAR
NOT
NULL
,
timestamp
INTEGER
NOT
NULL
CHECK
(
timestamp
>
0
)
);
CREATE
INDEX
IF
NOT
EXISTS
l1_bridge_messages_timestamp
ON
l1_bridge_messages
(
timestamp
);
CREATE
INDEX
IF
NOT
EXISTS
l1_bridge_messages_transaction_source_hash
ON
l1_bridge_messages
(
transaction_source_hash
);
CREATE
INDEX
IF
NOT
EXISTS
l1_bridge_messages_from_address
ON
l1_bridge_messages
(
from_address
);
CREATE
TABLE
IF
NOT
EXISTS
l2_bridge_messages
(
message_hash
VARCHAR
PRIMARY
KEY
,
nonce
UINT256
NOT
NULL
UNIQUE
,
...
...
@@ -170,6 +197,9 @@ CREATE TABLE IF NOT EXISTS l2_bridge_messages(
data
VARCHAR
NOT
NULL
,
timestamp
INTEGER
NOT
NULL
CHECK
(
timestamp
>
0
)
);
CREATE
INDEX
IF
NOT
EXISTS
l2_bridge_messages_timestamp
ON
l2_bridge_messages
(
timestamp
);
CREATE
INDEX
IF
NOT
EXISTS
l2_bridge_messages_transaction_withdrawal_hash
ON
l2_bridge_messages
(
transaction_withdrawal_hash
);
CREATE
INDEX
IF
NOT
EXISTS
l2_bridge_messages_from_address
ON
l2_bridge_messages
(
from_address
);
-- StandardBridge
CREATE
TABLE
IF
NOT
EXISTS
l1_bridge_deposits
(
...
...
@@ -185,6 +215,10 @@ CREATE TABLE IF NOT EXISTS l1_bridge_deposits (
data
VARCHAR
NOT
NULL
,
timestamp
INTEGER
NOT
NULL
CHECK
(
timestamp
>
0
)
);
CREATE
INDEX
IF
NOT
EXISTS
l1_bridge_deposits_timestamp
ON
l1_bridge_deposits
(
timestamp
);
CREATE
INDEX
IF
NOT
EXISTS
l1_bridge_deposits_cross_domain_message_hash
ON
l1_bridge_deposits
(
cross_domain_message_hash
);
CREATE
INDEX
IF
NOT
EXISTS
l1_bridge_deposits_from_address
ON
l1_bridge_deposits
(
from_address
);
CREATE
TABLE
IF
NOT
EXISTS
l2_bridge_withdrawals
(
transaction_withdrawal_hash
VARCHAR
PRIMARY
KEY
REFERENCES
l2_transaction_withdrawals
(
withdrawal_hash
)
ON
DELETE
CASCADE
,
cross_domain_message_hash
VARCHAR
NOT
NULL
UNIQUE
REFERENCES
l2_bridge_messages
(
message_hash
)
ON
DELETE
CASCADE
,
...
...
@@ -198,3 +232,6 @@ CREATE TABLE IF NOT EXISTS l2_bridge_withdrawals (
data
VARCHAR
NOT
NULL
,
timestamp
INTEGER
NOT
NULL
CHECK
(
timestamp
>
0
)
);
CREATE
INDEX
IF
NOT
EXISTS
l2_bridge_withdrawals_timestamp
ON
l2_bridge_withdrawals
(
timestamp
);
CREATE
INDEX
IF
NOT
EXISTS
l2_bridge_withdrawals_cross_domain_message_hash
ON
l2_bridge_withdrawals
(
cross_domain_message_hash
);
CREATE
INDEX
IF
NOT
EXISTS
l2_bridge_withdrawals_from_address
ON
l2_bridge_withdrawals
(
from_address
);
indexer/node/bigint.go
deleted
100644 → 0
View file @
e52924b9
package
node
import
"math/big"
var
bigZero
=
big
.
NewInt
(
0
)
var
bigOne
=
big
.
NewInt
(
1
)
// returns a new big.Int for `end` to which `end - start` <= size.
// @note (start, end) is an inclusive range
func
clampBigInt
(
start
,
end
*
big
.
Int
,
size
uint64
)
*
big
.
Int
{
temp
:=
new
(
big
.
Int
)
count
:=
temp
.
Sub
(
end
,
start
)
.
Uint64
()
+
1
if
count
<=
size
{
return
end
}
// we re-use the allocated temp as the new end
temp
.
Add
(
start
,
big
.
NewInt
(
int64
(
size
-
1
)))
return
temp
}
// returns an inner comparison function result for a big.Int
func
BigIntMatcher
(
num
int64
)
func
(
*
big
.
Int
)
bool
{
return
func
(
bi
*
big
.
Int
)
bool
{
return
bi
.
Int64
()
==
num
}
}
indexer/node/bigint_test.go
deleted
100644 → 0
View file @
e52924b9
package
node
import
(
"math/big"
"testing"
"github.com/stretchr/testify/assert"
)
func
TestClampBigInt
(
t
*
testing
.
T
)
{
assert
.
True
(
t
,
true
)
start
:=
big
.
NewInt
(
1
)
end
:=
big
.
NewInt
(
10
)
// When the (start, end) bounds are within range
// the same end pointer should be returned
// larger range
result
:=
clampBigInt
(
start
,
end
,
20
)
assert
.
True
(
t
,
end
==
result
)
// exact range
result
=
clampBigInt
(
start
,
end
,
10
)
assert
.
True
(
t
,
end
==
result
)
// smaller range
result
=
clampBigInt
(
start
,
end
,
5
)
assert
.
False
(
t
,
end
==
result
)
assert
.
Equal
(
t
,
uint64
(
5
),
result
.
Uint64
())
}
indexer/node/header_traversal.go
View file @
69256aa1
...
...
@@ -5,6 +5,7 @@ import (
"fmt"
"math/big"
"github.com/ethereum-optimism/optimism/indexer/bigint"
"github.com/ethereum/go-ethereum/core/types"
)
...
...
@@ -55,12 +56,12 @@ func (f *HeaderTraversal) NextFinalizedHeaders(maxSize uint64) ([]types.Header,
}
}
nextHeight
:=
bigZero
nextHeight
:=
big
int
.
Zero
if
f
.
lastHeader
!=
nil
{
nextHeight
=
new
(
big
.
Int
)
.
Add
(
f
.
lastHeader
.
Number
,
bigOne
)
nextHeight
=
new
(
big
.
Int
)
.
Add
(
f
.
lastHeader
.
Number
,
big
int
.
One
)
}
endHeight
=
clampBigInt
(
nextHeight
,
endHeight
,
maxSize
)
endHeight
=
bigint
.
Clamp
(
nextHeight
,
endHeight
,
maxSize
)
headers
,
err
:=
f
.
ethClient
.
BlockHeadersByRange
(
nextHeight
,
endHeight
)
if
err
!=
nil
{
return
nil
,
fmt
.
Errorf
(
"error querying blocks by range: %w"
,
err
)
...
...
indexer/node/header_traversal_test.go
View file @
69256aa1
...
...
@@ -4,6 +4,7 @@ import (
"math/big"
"testing"
"github.com/ethereum-optimism/optimism/indexer/bigint"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
...
...
@@ -37,7 +38,7 @@ func TestHeaderTraversalNextFinalizedHeadersNoOp(t *testing.T) {
// start from block 10 as the latest fetched block
lastHeader
:=
&
types
.
Header
{
Number
:
big
.
NewInt
(
10
)}
headerTraversal
:=
NewHeaderTraversal
(
client
,
lastHeader
,
bigZero
)
headerTraversal
:=
NewHeaderTraversal
(
client
,
lastHeader
,
big
int
.
Zero
)
// no new headers when matched with head
client
.
On
(
"BlockHeaderByNumber"
,
(
*
big
.
Int
)(
nil
))
.
Return
(
lastHeader
,
nil
)
...
...
@@ -50,12 +51,12 @@ func TestHeaderTraversalNextFinalizedHeadersCursored(t *testing.T) {
client
:=
new
(
MockEthClient
)
// start from genesis
headerTraversal
:=
NewHeaderTraversal
(
client
,
nil
,
bigZero
)
headerTraversal
:=
NewHeaderTraversal
(
client
,
nil
,
big
int
.
Zero
)
// blocks [0..4]
headers
:=
makeHeaders
(
5
,
nil
)
client
.
On
(
"BlockHeaderByNumber"
,
(
*
big
.
Int
)(
nil
))
.
Return
(
&
headers
[
4
],
nil
)
.
Times
(
1
)
// Times so that we can override next
client
.
On
(
"BlockHeadersByRange"
,
mock
.
MatchedBy
(
BigIntMatcher
(
0
)),
mock
.
MatchedBy
(
BigInt
Matcher
(
4
)))
.
Return
(
headers
,
nil
)
client
.
On
(
"BlockHeadersByRange"
,
mock
.
MatchedBy
(
bigint
.
Matcher
(
0
)),
mock
.
MatchedBy
(
bigint
.
Matcher
(
4
)))
.
Return
(
headers
,
nil
)
headers
,
err
:=
headerTraversal
.
NextFinalizedHeaders
(
5
)
require
.
NoError
(
t
,
err
)
require
.
Len
(
t
,
headers
,
5
)
...
...
@@ -63,7 +64,7 @@ func TestHeaderTraversalNextFinalizedHeadersCursored(t *testing.T) {
// blocks [5..9]
headers
=
makeHeaders
(
5
,
&
headers
[
len
(
headers
)
-
1
])
client
.
On
(
"BlockHeaderByNumber"
,
(
*
big
.
Int
)(
nil
))
.
Return
(
&
headers
[
4
],
nil
)
client
.
On
(
"BlockHeadersByRange"
,
mock
.
MatchedBy
(
BigIntMatcher
(
5
)),
mock
.
MatchedBy
(
BigInt
Matcher
(
9
)))
.
Return
(
headers
,
nil
)
client
.
On
(
"BlockHeadersByRange"
,
mock
.
MatchedBy
(
bigint
.
Matcher
(
5
)),
mock
.
MatchedBy
(
bigint
.
Matcher
(
9
)))
.
Return
(
headers
,
nil
)
headers
,
err
=
headerTraversal
.
NextFinalizedHeaders
(
5
)
require
.
NoError
(
t
,
err
)
require
.
Len
(
t
,
headers
,
5
)
...
...
@@ -73,21 +74,21 @@ func TestHeaderTraversalNextFinalizedHeadersMaxSize(t *testing.T) {
client
:=
new
(
MockEthClient
)
// start from genesis
headerTraversal
:=
NewHeaderTraversal
(
client
,
nil
,
bigZero
)
headerTraversal
:=
NewHeaderTraversal
(
client
,
nil
,
big
int
.
Zero
)
// 100 "available" headers
client
.
On
(
"BlockHeaderByNumber"
,
(
*
big
.
Int
)(
nil
))
.
Return
(
&
types
.
Header
{
Number
:
big
.
NewInt
(
100
)},
nil
)
// clamped by the supplied size
headers
:=
makeHeaders
(
5
,
nil
)
client
.
On
(
"BlockHeadersByRange"
,
mock
.
MatchedBy
(
BigIntMatcher
(
0
)),
mock
.
MatchedBy
(
BigInt
Matcher
(
4
)))
.
Return
(
headers
,
nil
)
client
.
On
(
"BlockHeadersByRange"
,
mock
.
MatchedBy
(
bigint
.
Matcher
(
0
)),
mock
.
MatchedBy
(
bigint
.
Matcher
(
4
)))
.
Return
(
headers
,
nil
)
headers
,
err
:=
headerTraversal
.
NextFinalizedHeaders
(
5
)
require
.
NoError
(
t
,
err
)
require
.
Len
(
t
,
headers
,
5
)
// clamped by the supplied size. FinalizedHeight == 100
headers
=
makeHeaders
(
10
,
&
headers
[
len
(
headers
)
-
1
])
client
.
On
(
"BlockHeadersByRange"
,
mock
.
MatchedBy
(
BigIntMatcher
(
5
)),
mock
.
MatchedBy
(
BigInt
Matcher
(
14
)))
.
Return
(
headers
,
nil
)
client
.
On
(
"BlockHeadersByRange"
,
mock
.
MatchedBy
(
bigint
.
Matcher
(
5
)),
mock
.
MatchedBy
(
bigint
.
Matcher
(
14
)))
.
Return
(
headers
,
nil
)
headers
,
err
=
headerTraversal
.
NextFinalizedHeaders
(
10
)
require
.
NoError
(
t
,
err
)
require
.
Len
(
t
,
headers
,
10
)
...
...
@@ -97,12 +98,12 @@ func TestHeaderTraversalMismatchedProviderStateError(t *testing.T) {
client
:=
new
(
MockEthClient
)
// start from genesis
headerTraversal
:=
NewHeaderTraversal
(
client
,
nil
,
bigZero
)
headerTraversal
:=
NewHeaderTraversal
(
client
,
nil
,
big
int
.
Zero
)
// blocks [0..4]
headers
:=
makeHeaders
(
5
,
nil
)
client
.
On
(
"BlockHeaderByNumber"
,
(
*
big
.
Int
)(
nil
))
.
Return
(
&
headers
[
4
],
nil
)
.
Times
(
1
)
// Times so that we can override next
client
.
On
(
"BlockHeadersByRange"
,
mock
.
MatchedBy
(
BigIntMatcher
(
0
)),
mock
.
MatchedBy
(
BigInt
Matcher
(
4
)))
.
Return
(
headers
,
nil
)
client
.
On
(
"BlockHeadersByRange"
,
mock
.
MatchedBy
(
bigint
.
Matcher
(
0
)),
mock
.
MatchedBy
(
bigint
.
Matcher
(
4
)))
.
Return
(
headers
,
nil
)
headers
,
err
:=
headerTraversal
.
NextFinalizedHeaders
(
5
)
require
.
NoError
(
t
,
err
)
require
.
Len
(
t
,
headers
,
5
)
...
...
@@ -110,7 +111,7 @@ func TestHeaderTraversalMismatchedProviderStateError(t *testing.T) {
// blocks [5..9]. Next batch is not chained correctly (starts again from genesis)
headers
=
makeHeaders
(
5
,
nil
)
client
.
On
(
"BlockHeaderByNumber"
,
(
*
big
.
Int
)(
nil
))
.
Return
(
&
types
.
Header
{
Number
:
big
.
NewInt
(
9
)},
nil
)
client
.
On
(
"BlockHeadersByRange"
,
mock
.
MatchedBy
(
BigIntMatcher
(
5
)),
mock
.
MatchedBy
(
BigInt
Matcher
(
9
)))
.
Return
(
headers
,
nil
)
client
.
On
(
"BlockHeadersByRange"
,
mock
.
MatchedBy
(
bigint
.
Matcher
(
5
)),
mock
.
MatchedBy
(
bigint
.
Matcher
(
9
)))
.
Return
(
headers
,
nil
)
headers
,
err
=
headerTraversal
.
NextFinalizedHeaders
(
5
)
require
.
Nil
(
t
,
headers
)
require
.
Equal
(
t
,
ErrHeaderTraversalAndProviderMismatchedState
,
err
)
...
...
indexer/node/mocks.go
View file @
69256aa1
...
...
@@ -6,6 +6,7 @@ import (
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/stretchr/testify/mock"
)
...
...
indexer/processors/bridge.go
View file @
69256aa1
...
...
@@ -5,6 +5,7 @@ import (
"errors"
"math/big"
"github.com/ethereum-optimism/optimism/indexer/bigint"
"github.com/ethereum-optimism/optimism/indexer/config"
"github.com/ethereum-optimism/optimism/indexer/database"
"github.com/ethereum-optimism/optimism/indexer/etl"
...
...
@@ -42,7 +43,7 @@ func NewBridgeProcessor(log log.Logger, db *database.DB, l1Etl *etl.L1ETL, chain
if
latestL1Header
==
nil
&&
latestL2Header
==
nil
{
log
.
Info
(
"no indexed state, starting from rollup genesis"
)
}
else
{
l1Height
,
l2Height
:=
big
.
NewInt
(
0
),
big
.
NewInt
(
0
)
l1Height
,
l2Height
:=
big
int
.
Zero
,
bigint
.
Zero
if
latestL1Header
!=
nil
{
l1Height
=
latestL1Header
.
Number
l1Header
=
latestL1Header
.
RLPHeader
.
Header
()
...
...
@@ -109,12 +110,12 @@ func (b *BridgeProcessor) Start(ctx context.Context) error {
// Process Bridge Events
toL1Height
,
toL2Height
:=
latestEpoch
.
L1BlockHeader
.
Number
,
latestEpoch
.
L2BlockHeader
.
Number
fromL1Height
,
fromL2Height
:=
big
.
NewInt
(
0
),
big
.
NewInt
(
0
)
fromL1Height
,
fromL2Height
:=
big
int
.
Zero
,
bigint
.
Zero
if
b
.
LatestL1Header
!=
nil
{
fromL1Height
=
new
(
big
.
Int
)
.
Add
(
b
.
LatestL1Header
.
Number
,
big
.
NewInt
(
1
)
)
fromL1Height
=
new
(
big
.
Int
)
.
Add
(
b
.
LatestL1Header
.
Number
,
big
int
.
One
)
}
if
b
.
LatestL2Header
!=
nil
{
fromL2Height
=
new
(
big
.
Int
)
.
Add
(
b
.
LatestL2Header
.
Number
,
big
.
NewInt
(
1
)
)
fromL2Height
=
new
(
big
.
Int
)
.
Add
(
b
.
LatestL2Header
.
Number
,
big
int
.
One
)
}
batchLog
:=
b
.
log
.
New
(
"epoch_start_number"
,
fromL1Height
,
"epoch_end_number"
,
toL1Height
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment