Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
N
nebula
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
exchain
nebula
Commits
cd1a0def
Commit
cd1a0def
authored
Jun 02, 2023
by
Hamdi Allam
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
introduce u256 database type to conform to domain type. utilize hexutil.Bytes for all byte arrays
parent
f988b4e9
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
103 additions
and
34 deletions
+103
-34
blocks.go
indexer/database/blocks.go
+27
-21
bridge.go
indexer/database/bridge.go
+8
-10
types.go
indexer/database/types.go
+65
-0
20230523_create_schema.sql
indexer/migrations/20230523_create_schema.sql
+3
-3
No files found.
indexer/database/blocks.go
View file @
cd1a0def
package
database
import
(
"
database/sql
"
"
context
"
"errors"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/jackc/pgtype"
"gorm.io/gorm"
)
...
...
@@ -18,7 +17,7 @@ import (
type
BlockHeader
struct
{
Hash
common
.
Hash
`gorm:"primaryKey;serializer:json"`
ParentHash
common
.
Hash
`gorm:"serializer:json"`
Number
pgtype
.
Numeric
Number
U256
Timestamp
uint64
}
...
...
@@ -30,13 +29,16 @@ type L2BlockHeader struct {
BlockHeader
// Marked when the proposed output is finalized on L1.
// All bedrock blocks will have `LegacyStateBatchIndex == NULL`
// All bedrock blocks will have `LegacyStateBatchIndex
^
== NULL`
L1BlockHash
*
common
.
Hash
`gorm:"serializer:json"`
LegacyStateBatchIndex
sql
.
NullI
nt64
LegacyStateBatchIndex
*
ui
nt64
}
type
LegacyStateBatch
struct
{
Index
uint64
`gorm:"primaryKey"`
// `default:0` is added since gorm would interepret 0 as NULL
// violating the primary key constraint.
Index
uint64
`gorm:"primaryKey;default:0"`
Root
common
.
Hash
`gorm:"serializer:json"`
Size
uint64
PrevTotal
uint64
...
...
@@ -44,8 +46,8 @@ type LegacyStateBatch struct {
}
type
BlocksView
interface
{
Latest
L1BlockHeight
()
(
*
big
.
Int
,
error
)
Latest
L2BlockHeight
()
(
*
big
.
Int
,
error
)
Finalized
L1BlockHeight
()
(
*
big
.
Int
,
error
)
Finalized
L2BlockHeight
()
(
*
big
.
Int
,
error
)
}
type
BlocksDB
interface
{
...
...
@@ -81,18 +83,21 @@ func (db *blocksDB) StoreLegacyStateBatch(stateBatch *LegacyStateBatch) error {
// Event though transaction control flow is managed, could we benefit
// from a nested transaction here?
// Handle edge case where gorm interprets the nil representation of uint256
// as a NULL insertion. This causes issues with the non-null constaint as a
// primary key
result
:=
db
.
gorm
.
Create
(
stateBatch
)
if
result
.
Error
!=
nil
{
return
result
.
Error
}
// Mark this index & l1 block hash for all applicable l2 blocks
l2Headers
:=
make
([]
L2BlockHeader
,
stateBatch
.
Size
)
// Mark this
state batch
index & l1 block hash for all applicable l2 blocks
l2Headers
:=
make
([]
*
L2BlockHeader
,
stateBatch
.
Size
)
// [start, end] range is inclusive. Since `PrevTotal` is the index of the prior batch, no
// need to substract one when adding the size
startHeight
:=
pgtype
.
Numeric
{
Int
:
big
.
NewInt
(
int64
(
stateBatch
.
PrevTotal
+
1
)),
Status
:
pgtype
.
Present
}
endHeight
:=
pgtype
.
Numeric
{
Int
:
big
.
NewInt
(
int64
(
stateBatch
.
PrevTotal
+
stateBatch
.
Size
)),
Status
:
pgtype
.
Present
}
startHeight
:=
U256
{
Int
:
big
.
NewInt
(
int64
(
stateBatch
.
PrevTotal
+
1
))
}
endHeight
:=
U256
{
Int
:
big
.
NewInt
(
int64
(
stateBatch
.
PrevTotal
+
stateBatch
.
Size
))
}
result
=
db
.
gorm
.
Where
(
"number BETWEEN ? AND ?"
,
&
startHeight
,
&
endHeight
)
.
Find
(
&
l2Headers
)
if
result
.
Error
!=
nil
{
return
result
.
Error
...
...
@@ -101,7 +106,7 @@ func (db *blocksDB) StoreLegacyStateBatch(stateBatch *LegacyStateBatch) error {
}
for
_
,
header
:=
range
l2Headers
{
header
.
LegacyStateBatchIndex
=
sql
.
NullInt64
{
Int64
:
int64
(
stateBatch
.
Index
),
Valid
:
true
}
header
.
LegacyStateBatchIndex
=
&
stateBatch
.
Index
header
.
L1BlockHash
=
&
stateBatch
.
L1BlockHash
}
...
...
@@ -109,14 +114,14 @@ func (db *blocksDB) StoreLegacyStateBatch(stateBatch *LegacyStateBatch) error {
return
result
.
Error
}
func
(
db
*
blocksDB
)
Latest
L1BlockHeight
()
(
*
big
.
Int
,
error
)
{
var
l
atest
Header
L1BlockHeader
result
:=
db
.
gorm
.
Order
(
"number
desc"
)
.
First
(
&
latest
Header
)
func
(
db
*
blocksDB
)
Finalized
L1BlockHeight
()
(
*
big
.
Int
,
error
)
{
var
l
1
Header
L1BlockHeader
result
:=
db
.
gorm
.
Order
(
"number
DESC"
)
.
Take
(
&
l1
Header
)
if
result
.
Error
!=
nil
{
return
nil
,
result
.
Error
}
return
l
atest
Header
.
Number
.
Int
,
nil
return
l
1
Header
.
Number
.
Int
,
nil
}
// L2
...
...
@@ -126,14 +131,15 @@ func (db *blocksDB) StoreL2BlockHeaders(headers []*L2BlockHeader) error {
return
result
.
Error
}
func
(
db
*
blocksDB
)
Latest
L2BlockHeight
()
(
*
big
.
Int
,
error
)
{
var
l
atest
Header
L2BlockHeader
result
:=
db
.
gorm
.
Order
(
"number
desc"
)
.
First
(
&
latest
Header
)
func
(
db
*
blocksDB
)
Finalized
L2BlockHeight
()
(
*
big
.
Int
,
error
)
{
var
l
2
Header
L2BlockHeader
result
:=
db
.
gorm
.
Order
(
"number
DESC"
)
.
Take
(
&
l2
Header
)
if
result
.
Error
!=
nil
{
return
nil
,
result
.
Error
}
return
latestHeader
.
Number
.
Int
,
nil
result
.
Logger
.
Info
(
context
.
Background
(),
"number "
,
l2Header
.
Number
)
return
l2Header
.
Number
.
Int
,
nil
}
func
(
db
*
blocksDB
)
MarkFinalizedL1RootForL2Block
(
l2Root
,
l1Root
common
.
Hash
)
error
{
...
...
indexer/database/bridge.go
View file @
cd1a0def
package
database
import
(
"database/sql"
"github.com/ethereum/go-ethereum/common"
"github.com/
jackc/pgtype
"
"github.com/
ethereum/go-ethereum/common/hexutil
"
"gorm.io/gorm"
)
...
...
@@ -15,8 +13,8 @@ import (
type
Transaction
struct
{
FromAddress
common
.
Address
`gorm:"serializer:json"`
ToAddress
common
.
Address
`gorm:"serializer:json"`
Amount
pgtype
.
Numeric
Data
[]
byte
Amount
U256
Data
hexutil
.
Bytes
`gorom:"serializer:json"`
}
type
TokenPair
struct
{
...
...
@@ -42,8 +40,8 @@ type Withdrawal struct {
InitiatedL2EventGUID
string
WithdrawalHash
common
.
Hash
`gorm:"serializer:json"`
ProvenL1EventGUID
sql
.
NullS
tring
FinalizedL1EventGUID
sql
.
NullS
tring
ProvenL1EventGUID
*
s
tring
FinalizedL1EventGUID
*
s
tring
Tx
Transaction
`gorm:"embedded"`
TokenPair
TokenPair
`gorm:"embedded"`
...
...
@@ -96,7 +94,7 @@ func (db *bridgeDB) DepositsByAddress(address common.Address) ([]*DepositWithTra
joinQuery
:=
depositsQuery
.
Joins
(
"left join l1_contract_events transaction_hash as l1_transaction_hash ON deposit.initiated_l1_event_guid = l1_contract_events.guid"
)
deposits
:=
[]
DepositWithTransactionHash
{}
result
:=
joinQuery
.
Scan
(
&
deposits
)
result
:=
joinQuery
.
Limit
(
100
)
.
Scan
(
&
deposits
)
if
result
.
Error
!=
nil
{
return
nil
,
result
.
Error
}
...
...
@@ -119,7 +117,7 @@ func (db *bridgeDB) MarkProvenWithdrawalEvent(guid, provenL1EventGuid string) er
var
withdrawal
Withdrawal
result
:=
db
.
gorm
.
First
(
&
withdrawal
,
"guid = ?"
,
guid
)
if
result
.
Error
==
nil
{
withdrawal
.
ProvenL1EventGUID
=
sql
.
NullString
{
String
:
provenL1EventGuid
,
Valid
:
true
}
withdrawal
.
ProvenL1EventGUID
=
&
provenL1EventGuid
db
.
gorm
.
Save
(
&
withdrawal
)
}
...
...
@@ -130,7 +128,7 @@ func (db *bridgeDB) MarkFinalizedWithdrawalEvent(guid, finalizedL1EventGuid stri
var
withdrawal
Withdrawal
result
:=
db
.
gorm
.
First
(
&
withdrawal
,
"guid = ?"
,
guid
)
if
result
.
Error
==
nil
{
withdrawal
.
FinalizedL1EventGUID
=
sql
.
NullString
{
String
:
finalizedL1EventGuid
,
Valid
:
true
}
withdrawal
.
FinalizedL1EventGUID
=
&
finalizedL1EventGuid
db
.
gorm
.
Save
(
&
withdrawal
)
}
...
...
indexer/database/types.go
0 → 100644
View file @
cd1a0def
package
database
import
(
"database/sql/driver"
"errors"
"math/big"
"github.com/jackc/pgtype"
)
var
u256BigIntOverflow
=
new
(
big
.
Int
)
.
Exp
(
big
.
NewInt
(
2
),
big
.
NewInt
(
256
),
nil
)
var
big10
=
big
.
NewInt
(
10
)
var
ErrU256Overflow
=
errors
.
New
(
"number exceeds u256"
)
var
ErrU256ContainsDecimal
=
errors
.
New
(
"number contains fractional digits"
)
var
ErrU256NotNull
=
errors
.
New
(
"number cannot be null"
)
// U256 is a wrapper over big.Int that conforms to the database U256 numeric domain type
type
U256
struct
{
Int
*
big
.
Int
}
// Scan implements the database/sql Scanner interface.
func
(
u256
*
U256
)
Scan
(
src
interface
{})
error
{
// deserialize as a numeric
var
numeric
pgtype
.
Numeric
err
:=
numeric
.
Scan
(
src
)
if
err
!=
nil
{
return
err
}
else
if
numeric
.
Exp
<
0
{
return
ErrU256ContainsDecimal
}
else
if
numeric
.
Status
==
pgtype
.
Null
{
return
ErrU256NotNull
}
// factor in the powers of 10
num
:=
numeric
.
Int
if
numeric
.
Exp
>
0
{
factor
:=
new
(
big
.
Int
)
.
Exp
(
big10
,
big
.
NewInt
(
int64
(
numeric
.
Exp
)),
nil
)
num
.
Mul
(
num
,
factor
)
}
// check bounds before setting the u256
if
num
.
Cmp
(
u256BigIntOverflow
)
>=
0
{
return
ErrU256Overflow
}
else
{
u256
.
Int
=
num
}
return
nil
}
// Value implements the database/sql/driver Valuer interface.
func
(
u256
U256
)
Value
()
(
driver
.
Value
,
error
)
{
// check bounds
if
u256
.
Int
==
nil
{
return
nil
,
ErrU256NotNull
}
else
if
u256
.
Int
.
Cmp
(
u256BigIntOverflow
)
>=
0
{
return
nil
,
ErrU256Overflow
}
// simply encode as a numeric with no Exp set (non-decimal)
numeric
:=
pgtype
.
Numeric
{
Int
:
u256
.
Int
,
Status
:
pgtype
.
Present
}
return
numeric
.
Value
()
}
indexer/migrations/20230523_create_schema.sql
View file @
cd1a0def
...
...
@@ -22,7 +22,7 @@ CREATE TABLE IF NOT EXISTS l2_block_headers (
-- Finalization information
l1_block_hash
VARCHAR
REFERENCES
l1_block_headers
(
hash
),
legacy_state_batch_index
INTEGER
legacy_state_batch_index
INTEGER
REFERENCES
legacy_state_batches
(
index
)
);
CREATE
TABLE
IF
NOT
EXISTS
legacy_state_batches
(
...
...
@@ -73,7 +73,7 @@ CREATE TABLE IF NOT EXISTS deposits (
l1_token_address
VARCHAR
NOT
NULL
,
l2_token_address
VARCHAR
NOT
NULL
,
amount
UINT256
,
data
BYTEA
NOT
NULL
data
VARCHAR
NOT
NULL
);
CREATE
TABLE
IF
NOT
EXISTS
withdrawals
(
...
...
@@ -95,5 +95,5 @@ CREATE TABLE IF NOT EXISTS withdrawals (
l1_token_address
VARCHAR
NOT
NULL
,
l2_token_address
VARCHAR
NOT
NULL
,
amount
UINT256
,
data
BYTEA
NOT
NULL
data
VARCHAR
NOT
NULL
);
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment