Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
N
nebula
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
exchain
nebula
Commits
9bef9c39
Commit
9bef9c39
authored
Mar 11, 2025
by
vicotor
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
update log
parent
3f98cfec
Changes
9
Show whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
58 additions
and
68 deletions
+58
-68
chaindb.go
exchain/chaindb/chaindb.go
+32
-29
engine.go
exchain/mockengine/engine.go
+9
-5
process.go
exchain/mockengine/process.go
+4
-6
init.sh
init.sh
+1
-0
leveldb.go
metadb/groupdb/leveldb.go
+2
-8
leveldb.go
metadb/storagedb/leveldb.go
+3
-10
cmd.go
op-node/cmd/genesis/cmd.go
+5
-2
node.go
op-node/node/node.go
+2
-2
payload_util.go
op-node/rollup/derive/payload_util.go
+0
-6
No files found.
exchain/chaindb/chaindb.go
View file @
9bef9c39
...
@@ -5,6 +5,7 @@ import (
...
@@ -5,6 +5,7 @@ import (
"fmt"
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
"github.com/exchain/go-exchain/exchain"
"github.com/exchain/go-exchain/exchain"
nebulav1
"github.com/exchain/go-exchain/exchain/protocol/gen/go/nebula/v1"
nebulav1
"github.com/exchain/go-exchain/exchain/protocol/gen/go/nebula/v1"
"github.com/exchain/go-exchain/exchain/wrapper"
"github.com/exchain/go-exchain/exchain/wrapper"
...
@@ -13,7 +14,6 @@ import (
...
@@ -13,7 +14,6 @@ import (
"github.com/exchain/go-exchain/op-service/grouptask"
"github.com/exchain/go-exchain/op-service/grouptask"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/proto"
"github.com/holiman/uint256"
"github.com/holiman/uint256"
log
"github.com/sirupsen/logrus"
"sync"
"sync"
"sync/atomic"
"sync/atomic"
"time"
"time"
...
@@ -55,8 +55,9 @@ const (
...
@@ -55,8 +55,9 @@ const (
ExChainBlockFinalized
ExChainBlockLabel
=
-
2
ExChainBlockFinalized
ExChainBlockLabel
=
-
2
)
)
func
NewChainDB
(
database
metadb
.
Database
)
ChainDB
{
func
NewChainDB
(
log
log
.
Logger
,
database
metadb
.
Database
)
ChainDB
{
chain
:=
&
chaindb
{
chain
:=
&
chaindb
{
log
:
log
,
database
:
database
,
database
:
database
,
cache
:
memdb
.
NewMemDB
(),
cache
:
memdb
.
NewMemDB
(),
toSaveData
:
make
(
chan
chainData
,
1000000
),
toSaveData
:
make
(
chan
chainData
,
1000000
),
...
@@ -78,6 +79,7 @@ type chainData struct {
...
@@ -78,6 +79,7 @@ type chainData struct {
}
}
type
chaindb
struct
{
type
chaindb
struct
{
log
log
.
Logger
cache
metadb
.
CacheKV
cache
metadb
.
CacheKV
txCache
*
lru
.
Cache
txCache
*
lru
.
Cache
receiptCache
*
lru
.
Cache
receiptCache
*
lru
.
Cache
...
@@ -247,7 +249,7 @@ func (m *chaindb) chainDataSaveTask() {
...
@@ -247,7 +249,7 @@ func (m *chaindb) chainDataSaveTask() {
// 7. save chain height
// 7. save chain height
block
:=
data
.
block
block
:=
data
.
block
log
.
Infof
(
"save block with block number %d
"
,
block
.
Height
())
m
.
log
.
Info
(
"save block"
,
"block number
"
,
block
.
Height
())
blockHash
:=
block
.
Hash
()
blockHash
:=
block
.
Hash
()
blockHeight
:=
uint256
.
NewInt
(
block
.
Height
())
blockHeight
:=
uint256
.
NewInt
(
block
.
Height
())
...
@@ -258,24 +260,25 @@ func (m *chaindb) chainDataSaveTask() {
...
@@ -258,24 +260,25 @@ func (m *chaindb) chainDataSaveTask() {
bnk
:=
blockNumKey
(
blockHash
)
bnk
:=
blockNumKey
(
blockHash
)
m
.
database
.
Put
([]
byte
(
bnk
),
blockHeight
.
Bytes
())
m
.
database
.
Put
([]
byte
(
bnk
),
blockHeight
.
Bytes
())
t2
:=
time
.
Now
()
t2
:=
time
.
Now
()
log
.
Debugf
(
"save block number key cost %d ms
"
,
t2
.
Sub
(
t1
)
.
Milliseconds
())
m
.
log
.
Debug
(
"save block number key"
,
"cost
"
,
t2
.
Sub
(
t1
)
.
Milliseconds
())
}
}
{
{
t1
:=
time
.
Now
()
t1
:=
time
.
Now
()
// save block header data with number
// save block header data with number
hk
:=
blockHeaderKey
(
blockHeight
)
hk
:=
blockHeaderKey
(
blockHeight
)
if
dh
,
err
:=
proto
.
Marshal
(
header
);
err
!=
nil
{
if
dh
,
err
:=
proto
.
Marshal
(
header
);
err
!=
nil
{
log
.
Error
(
"save block header"
,
"err"
,
err
)
m
.
log
.
Error
(
"marshal block header failed"
,
"err"
,
err
)
panic
(
fmt
.
Sprintf
(
"save block header with err %v"
,
err
))
// todo: vicotor to handle error.
panic
(
fmt
.
Sprintf
(
"marshal block header failed with err %v"
,
err
))
}
else
{
}
else
{
if
err
:=
m
.
database
.
Put
([]
byte
(
hk
),
dh
);
err
!=
nil
{
if
err
:=
m
.
database
.
Put
([]
byte
(
hk
),
dh
);
err
!=
nil
{
log
.
Error
(
"save block header
"
,
"err"
,
err
)
m
.
log
.
Error
(
"save block header failed
"
,
"err"
,
err
)
panic
(
fmt
.
Sprintf
(
"save block header with err %v"
,
err
))
panic
(
fmt
.
Sprintf
(
"save block header
failed
with err %v"
,
err
))
}
}
}
}
t2
:=
time
.
Now
()
t2
:=
time
.
Now
()
log
.
Debugf
(
"save block header cost %d ms
"
,
t2
.
Sub
(
t1
)
.
Milliseconds
())
m
.
log
.
Debug
(
"save block header"
,
"cost
"
,
t2
.
Sub
(
t1
)
.
Milliseconds
())
// remove from cache.
// remove from cache.
m
.
cache
.
Delete
(
hk
)
m
.
cache
.
Delete
(
hk
)
}
}
...
@@ -285,35 +288,35 @@ func (m *chaindb) chainDataSaveTask() {
...
@@ -285,35 +288,35 @@ func (m *chaindb) chainDataSaveTask() {
bodyk
:=
blockBodyKey
(
blockHeight
)
bodyk
:=
blockBodyKey
(
blockHeight
)
dbody
,
err
:=
proto
.
Marshal
(
block
.
Block
())
dbody
,
err
:=
proto
.
Marshal
(
block
.
Block
())
if
err
!=
nil
{
if
err
!=
nil
{
log
.
Error
(
"save block body
"
,
"err"
,
err
)
m
.
log
.
Error
(
"marshal block data failed
"
,
"err"
,
err
)
panic
(
fmt
.
Sprintf
(
"
save block body
with err %v"
,
err
))
panic
(
fmt
.
Sprintf
(
"
marshal block data failed
with err %v"
,
err
))
}
}
if
err
:=
m
.
database
.
Put
([]
byte
(
bodyk
),
dbody
);
err
!=
nil
{
if
err
:=
m
.
database
.
Put
([]
byte
(
bodyk
),
dbody
);
err
!=
nil
{
log
.
Error
(
"save block body
"
,
"err"
,
err
)
m
.
log
.
Error
(
"save block body failed
"
,
"err"
,
err
)
panic
(
fmt
.
Sprintf
(
"save block body with err %v"
,
err
))
panic
(
fmt
.
Sprintf
(
"save block body
failed
with err %v"
,
err
))
}
}
// remove from cache.
// remove from cache.
m
.
cache
.
Delete
(
bodyk
)
m
.
cache
.
Delete
(
bodyk
)
t2
:=
time
.
Now
()
t2
:=
time
.
Now
()
log
.
Debugf
(
"save block body with number %d, cost %d ms, size = %d"
,
header
.
Height
,
t2
.
Sub
(
t1
)
.
Milliseconds
()
,
len
(
dbody
))
m
.
log
.
Debug
(
"save block body"
,
"cost"
,
t2
.
Sub
(
t1
)
.
Milliseconds
(),
"number"
,
blockHeight
.
String
(),
"blk size"
,
len
(
dbody
))
}
}
if
data
.
receipts
!=
nil
&&
len
(
data
.
receipts
.
Receipts
)
>
0
{
if
data
.
receipts
!=
nil
&&
len
(
data
.
receipts
.
Receipts
)
>
0
{
t1
:=
time
.
Now
()
t1
:=
time
.
Now
()
// save block receipts
// save block receipts
t2
:=
time
.
Now
()
t2
:=
time
.
Now
()
log
.
Debugf
(
"save block receipts convert cost %d ms
"
,
t2
.
Sub
(
t1
)
.
Milliseconds
())
m
.
log
.
Debug
(
"save block receipts"
,
"convert cost
"
,
t2
.
Sub
(
t1
)
.
Milliseconds
())
t1
=
time
.
Now
()
t1
=
time
.
Now
()
receiptsk
:=
blockReceiptsKey
(
blockHeight
)
receiptsk
:=
blockReceiptsKey
(
blockHeight
)
dreceipts
,
err
:=
proto
.
Marshal
(
data
.
receipts
)
dreceipts
,
err
:=
proto
.
Marshal
(
data
.
receipts
)
if
err
!=
nil
{
if
err
!=
nil
{
log
.
Error
(
"save
block receipts"
,
"err"
,
err
)
m
.
log
.
Error
(
"marshal
block receipts"
,
"err"
,
err
)
panic
(
err
)
panic
(
err
)
}
}
if
err
:=
m
.
database
.
Put
([]
byte
(
receiptsk
),
dreceipts
);
err
!=
nil
{
if
err
:=
m
.
database
.
Put
([]
byte
(
receiptsk
),
dreceipts
);
err
!=
nil
{
log
.
Error
(
"save block receipts"
,
"err"
,
err
)
m
.
log
.
Error
(
"save block receipts"
,
"err"
,
err
)
}
else
{
}
else
{
// remove from cache.
// remove from cache.
m
.
cache
.
Delete
(
receiptsk
)
m
.
cache
.
Delete
(
receiptsk
)
...
@@ -349,21 +352,21 @@ func (m *chaindb) chainDataSaveTask() {
...
@@ -349,21 +352,21 @@ func (m *chaindb) chainDataSaveTask() {
t1
:=
time
.
Now
()
t1
:=
time
.
Now
()
seq
:=
sequence
(
data
.
receipts
.
Receipts
)
seq
:=
sequence
(
data
.
receipts
.
Receipts
)
t2
:=
time
.
Now
()
t2
:=
time
.
Now
()
log
.
Debugf
(
"save block txentry sequence cost %d ms
"
,
t2
.
Sub
(
t1
)
.
Milliseconds
())
m
.
log
.
Debug
(
"save block txentry sequence cost
"
,
t2
.
Sub
(
t1
)
.
Milliseconds
())
grouptask
.
DoMultiTasks
(
4
,
handler
,
seq
...
)
grouptask
.
DoMultiTasks
(
4
,
handler
,
seq
...
)
}
}
}
}
t2
:=
time
.
Now
()
t2
:=
time
.
Now
()
log
.
Debugf
(
"save block txentry batch put cost %d ms
"
,
t2
.
Sub
(
t1
)
.
Milliseconds
())
m
.
log
.
Debug
(
"save block txentry batch put cost
"
,
t2
.
Sub
(
t1
)
.
Milliseconds
())
// save txhash -> entry with multi routine
// save txhash -> entry with multi routine
if
err
:=
batch
.
Write
();
err
!=
nil
{
if
err
:=
batch
.
Write
();
err
!=
nil
{
log
.
Errorf
(
"save block txentry error %s
"
,
err
)
m
.
log
.
Error
(
"save block txentry batch write error"
,
"err
"
,
err
)
panic
(
err
)
panic
(
err
)
}
}
t3
:=
time
.
Now
()
t3
:=
time
.
Now
()
log
.
Debugf
(
"save block txentry cost %d ms
"
,
t3
.
Sub
(
t1
)
.
Milliseconds
())
m
.
log
.
Debug
(
"save block txentry batch write cost
"
,
t3
.
Sub
(
t1
)
.
Milliseconds
())
}
}
{
{
// save latest height as string
// save latest height as string
...
@@ -371,7 +374,7 @@ func (m *chaindb) chainDataSaveTask() {
...
@@ -371,7 +374,7 @@ func (m *chaindb) chainDataSaveTask() {
m
.
database
.
Put
([]
byte
(
k
),
[]
byte
(
blockHeight
.
String
()))
// ethdb save height string.
m
.
database
.
Put
([]
byte
(
k
),
[]
byte
(
blockHeight
.
String
()))
// ethdb save height string.
}
}
log
.
Infof
(
"save block with block number %s finished"
,
blockHeight
.
String
())
m
.
log
.
Info
(
"save block finished"
,
"block number"
,
block
.
Height
())
case
<-
tm
.
C
:
case
<-
tm
.
C
:
if
m
.
startHeight
!=
nil
{
if
m
.
startHeight
!=
nil
{
h
:=
m
.
CurrentHeight
()
h
:=
m
.
CurrentHeight
()
...
@@ -433,12 +436,12 @@ func (m *chaindb) getBlockReceipts(num *uint256.Int) *nebulav1.TransactionReceip
...
@@ -433,12 +436,12 @@ func (m *chaindb) getBlockReceipts(num *uint256.Int) *nebulav1.TransactionReceip
k
:=
blockReceiptsKey
(
num
)
k
:=
blockReceiptsKey
(
num
)
d
,
err
:=
m
.
database
.
Get
([]
byte
(
k
))
d
,
err
:=
m
.
database
.
Get
([]
byte
(
k
))
if
err
!=
nil
{
if
err
!=
nil
{
log
.
Error
(
"GetBlockReceipts failed, "
,
err
)
m
.
log
.
Error
(
"GetBlockReceipts failed, "
,
err
)
return
nil
return
nil
}
}
receipts
:=
new
(
nebulav1
.
TransactionReceiptList
)
receipts
:=
new
(
nebulav1
.
TransactionReceiptList
)
if
err
:=
proto
.
Unmarshal
(
d
,
receipts
);
err
!=
nil
{
if
err
:=
proto
.
Unmarshal
(
d
,
receipts
);
err
!=
nil
{
log
.
Error
(
"GetBlockReceipts failed, "
,
err
)
m
.
log
.
Error
(
"GetBlockReceipts failed, "
,
err
)
return
nil
return
nil
}
}
return
receipts
return
receipts
...
@@ -531,20 +534,20 @@ func (m *chaindb) ResetHeight(height *uint256.Int, clear bool) error {
...
@@ -531,20 +534,20 @@ func (m *chaindb) ResetHeight(height *uint256.Int, clear bool) error {
for
_
,
r
:=
range
receipts
.
Receipts
{
for
_
,
r
:=
range
receipts
.
Receipts
{
txhash
:=
common
.
BytesToHash
(
r
.
Hash
)
txhash
:=
common
.
BytesToHash
(
r
.
Hash
)
if
err
:=
batch
.
Delete
([]
byte
(
txEntryKey
(
txhash
)));
err
!=
nil
{
if
err
:=
batch
.
Delete
([]
byte
(
txEntryKey
(
txhash
)));
err
!=
nil
{
log
.
Debugf
(
"delete tx entry %s failed, err:%s"
,
txhash
.
String
()
,
err
)
m
.
log
.
Debug
(
"delete tx entry failed"
,
"txhash"
,
txhash
.
String
(),
"err"
,
err
)
}
}
if
err
:=
batch
.
Delete
([]
byte
(
transactionKey
(
txhash
)));
err
!=
nil
{
if
err
:=
batch
.
Delete
([]
byte
(
transactionKey
(
txhash
)));
err
!=
nil
{
log
.
Debugf
(
"delete tx %s failed, err:%s"
,
txhash
.
String
()
,
err
)
m
.
log
.
Debug
(
"delete tx failed"
,
"txhash"
,
txhash
.
String
(),
"err"
,
err
)
}
}
if
err
:=
batch
.
Delete
([]
byte
(
receiptKey
(
txhash
)));
err
!=
nil
{
if
err
:=
batch
.
Delete
([]
byte
(
receiptKey
(
txhash
)));
err
!=
nil
{
log
.
Debugf
(
"delete tx receipt %s failed, err:%s"
,
txhash
.
String
()
,
err
)
m
.
log
.
Debug
(
"delete tx receipt failed"
,
"txhash"
,
txhash
.
String
(),
"err"
,
err
)
}
}
}
}
}
}
batch
.
Delete
([]
byte
(
blockHeaderKey
(
newh
)))
batch
.
Delete
([]
byte
(
blockHeaderKey
(
newh
)))
batch
.
Delete
([]
byte
(
blockBodyKey
(
newh
)))
batch
.
Delete
([]
byte
(
blockBodyKey
(
newh
)))
log
.
Debugf
(
"reset height delete data for height %s
"
,
newh
.
String
())
m
.
log
.
Debug
(
"reset height delete data for height"
,
"height
"
,
newh
.
String
())
batch
.
Put
([]
byte
(
chainHeightKey
()),
[]
byte
(
newh
.
String
()))
batch
.
Put
([]
byte
(
chainHeightKey
()),
[]
byte
(
newh
.
String
()))
}
}
batch
.
Put
([]
byte
(
chainHeightKey
()),
[]
byte
(
height
.
String
()))
batch
.
Put
([]
byte
(
chainHeightKey
()),
[]
byte
(
height
.
String
()))
...
@@ -568,7 +571,7 @@ func (m *chaindb) SaveBlockHeader(header *nebulav1.BlockHeader) error {
...
@@ -568,7 +571,7 @@ func (m *chaindb) SaveBlockHeader(header *nebulav1.BlockHeader) error {
return
err
return
err
}
}
if
err
:=
m
.
database
.
Put
([]
byte
(
hk
),
dh
);
err
!=
nil
{
if
err
:=
m
.
database
.
Put
([]
byte
(
hk
),
dh
);
err
!=
nil
{
log
.
Error
(
"database save block header"
,
"err"
,
err
)
m
.
log
.
Error
(
"database save block header"
,
"err"
,
err
)
return
err
return
err
}
}
return
nil
return
nil
...
...
exchain/mockengine/engine.go
View file @
9bef9c39
...
@@ -2,14 +2,16 @@ package mockengine
...
@@ -2,14 +2,16 @@ package mockengine
import
(
import
(
"fmt"
"fmt"
"github.com/ethereum/go-ethereum/log"
"github.com/exchain/go-exchain/exchain"
"github.com/exchain/go-exchain/exchain"
"github.com/exchain/go-exchain/exchain/chaindb"
"github.com/exchain/go-exchain/exchain/chaindb"
nebulav1
"github.com/exchain/go-exchain/exchain/protocol/gen/go/nebula/v1"
nebulav1
"github.com/exchain/go-exchain/exchain/protocol/gen/go/nebula/v1"
"github.com/exchain/go-exchain/exchain/wrapper"
"github.com/exchain/go-exchain/exchain/wrapper"
log
"github.com/sirupsen/logrus"
)
)
type
MockEngine
struct
{
type
MockEngine
struct
{
log
log
.
Logger
dataroot
string
chain
chaindb
.
ChainDB
chain
chaindb
.
ChainDB
}
}
...
@@ -34,7 +36,7 @@ func (m MockEngine) NewPayload(params exchain.PayloadParams) (exchain.ExecutionR
...
@@ -34,7 +36,7 @@ func (m MockEngine) NewPayload(params exchain.PayloadParams) (exchain.ExecutionR
}
}
receipts
,
err
:=
m
.
ProcessTx
(
header
,
params
.
Transactions
)
receipts
,
err
:=
m
.
ProcessTx
(
header
,
params
.
Transactions
)
if
err
!=
nil
{
if
err
!=
nil
{
log
.
WithError
(
err
)
.
Error
(
"failed to process txs"
)
m
.
log
.
Error
(
"failed to process tx"
,
"err"
,
err
)
return
exchain
.
ExecutionResult
{},
err
return
exchain
.
ExecutionResult
{},
err
}
}
...
@@ -70,8 +72,10 @@ func (m MockEngine) ProcessPayload(block *nebulav1.Block) (exchain.ExecutionResu
...
@@ -70,8 +72,10 @@ func (m MockEngine) ProcessPayload(block *nebulav1.Block) (exchain.ExecutionResu
},
nil
},
nil
}
}
func
NewEngine
(
chain
chaindb
.
ChainDB
)
exchain
.
Engine
{
func
NewEngine
(
dataroot
string
,
log
log
.
Logger
,
chain
chaindb
.
ChainDB
)
exchain
.
Engine
{
return
&
MockEngine
{
return
&
MockEngine
{
dataroot
:
dataroot
,
log
:
log
,
chain
:
chain
,
chain
:
chain
,
}
}
}
}
exchain/mockengine/process.go
View file @
9bef9c39
...
@@ -5,7 +5,6 @@ import (
...
@@ -5,7 +5,6 @@ import (
nebulav1
"github.com/exchain/go-exchain/exchain/protocol/gen/go/nebula/v1"
nebulav1
"github.com/exchain/go-exchain/exchain/protocol/gen/go/nebula/v1"
"github.com/exchain/go-exchain/exchain/wrapper"
"github.com/exchain/go-exchain/exchain/wrapper"
"github.com/holiman/uint256"
"github.com/holiman/uint256"
log
"github.com/sirupsen/logrus"
)
)
func
(
m
MockEngine
)
ProcessTx
(
header
*
nebulav1
.
BlockHeader
,
txs
*
nebulav1
.
TransactionList
)
(
*
nebulav1
.
TransactionReceiptList
,
error
)
{
func
(
m
MockEngine
)
ProcessTx
(
header
*
nebulav1
.
BlockHeader
,
txs
*
nebulav1
.
TransactionList
)
(
*
nebulav1
.
TransactionReceiptList
,
error
)
{
...
@@ -27,11 +26,10 @@ func (m MockEngine) ProcessTx(header *nebulav1.BlockHeader, txs *nebulav1.Transa
...
@@ -27,11 +26,10 @@ func (m MockEngine) ProcessTx(header *nebulav1.BlockHeader, txs *nebulav1.Transa
DepositR
:
&
nebulav1
.
DepositReceipt
{},
DepositR
:
&
nebulav1
.
DepositReceipt
{},
}
}
deposit
:=
tx
.
GetDepositTx
()
deposit
:=
tx
.
GetDepositTx
()
log
.
WithFields
(
log
.
Fields
{
m
.
log
.
Debug
(
"Process deposit tx"
,
"from"
:
common
.
BytesToAddress
(
deposit
.
User
),
"from"
,
common
.
BytesToAddress
(
deposit
.
User
),
"coin"
:
string
(
deposit
.
Coin
),
"coin"
,
string
(
deposit
.
Coin
),
"value"
:
new
(
uint256
.
Int
)
.
SetBytes
(
deposit
.
Amount
)
.
String
(),
"value"
,
new
(
uint256
.
Int
)
.
SetBytes
(
deposit
.
Amount
)
.
String
())
})
.
Info
(
"Process deposit tx"
)
case
nebulav1
.
TxType_LimitTx
:
case
nebulav1
.
TxType_LimitTx
:
receipt
.
Content
=
&
nebulav1
.
TransactionReceipt_LimitR
{
receipt
.
Content
=
&
nebulav1
.
TransactionReceipt_LimitR
{
LimitR
:
&
nebulav1
.
LimitOrderReceipt
{},
LimitR
:
&
nebulav1
.
LimitOrderReceipt
{},
...
...
init.sh
View file @
9bef9c39
#!/bin/bash
#!/bin/bash
rm
-rf
node
./opnode genesis init
--data-dir
node
--genesis
./deployer/genesis.json
./opnode genesis init
--data-dir
node
--genesis
./deployer/genesis.json
metadb/groupdb/leveldb.go
View file @
9bef9c39
...
@@ -4,7 +4,6 @@ import (
...
@@ -4,7 +4,6 @@ import (
"sync"
"sync"
"github.com/exchain/go-exchain/metadb"
"github.com/exchain/go-exchain/metadb"
log
"github.com/sirupsen/logrus"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/filter"
"github.com/syndtr/goleveldb/leveldb/filter"
...
@@ -20,16 +19,12 @@ type levelDB struct {
...
@@ -20,16 +19,12 @@ type levelDB struct {
db
*
leveldb
.
DB
// levelDB instance
db
*
leveldb
.
DB
// levelDB instance
quitLock
sync
.
Mutex
// Mutex protecting the quit channel access
quitLock
sync
.
Mutex
// Mutex protecting the quit channel access
logger
*
log
.
Entry
// Contextual logger tracking the database path
}
}
// newLevelDB returns a wrapped levelDB object. The namespace is the prefix that the
// newLevelDB returns a wrapped levelDB object. The namespace is the prefix that the
// metrics reporting should use for surfacing internal stats.
// metrics reporting should use for surfacing internal stats.
// The customize function allows the caller to modify the leveldb options.
// The customize function allows the caller to modify the leveldb options.
func
newLevelDB
(
file
string
)
(
*
levelDB
,
error
)
{
func
newLevelDB
(
file
string
)
(
*
levelDB
,
error
)
{
logger
:=
log
.
WithField
(
"dbpath"
,
file
)
// Open the ethdb and recover any potential corruptions
// Open the ethdb and recover any potential corruptions
db
,
err
:=
leveldb
.
OpenFile
(
file
,
&
opt
.
Options
{
db
,
err
:=
leveldb
.
OpenFile
(
file
,
&
opt
.
Options
{
Filter
:
filter
.
NewBloomFilter
(
10
),
Filter
:
filter
.
NewBloomFilter
(
10
),
...
@@ -47,7 +42,6 @@ func newLevelDB(file string) (*levelDB, error) {
...
@@ -47,7 +42,6 @@ func newLevelDB(file string) (*levelDB, error) {
ldb
:=
&
levelDB
{
ldb
:=
&
levelDB
{
fn
:
file
,
fn
:
file
,
db
:
db
,
db
:
db
,
logger
:
logger
,
}
}
return
ldb
,
nil
return
ldb
,
nil
...
...
metadb/storagedb/leveldb.go
View file @
9bef9c39
package
storagedb
package
storagedb
import
(
import
(
"fmt"
"sync"
"sync"
"time"
"time"
"github.com/exchain/go-exchain/metadb"
"github.com/exchain/go-exchain/metadb"
log
"github.com/sirupsen/logrus"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/errors"
...
@@ -42,7 +42,6 @@ type LevelDB struct {
...
@@ -42,7 +42,6 @@ type LevelDB struct {
quitLock
sync
.
Mutex
// Mutex protecting the quit channel access
quitLock
sync
.
Mutex
// Mutex protecting the quit channel access
quitChan
chan
chan
error
// Quit channel to stop the metrics collection before closing the database
quitChan
chan
chan
error
// Quit channel to stop the metrics collection before closing the database
logger
*
log
.
Entry
// Contextual logger tracking the database path
}
}
func
(
db
*
LevelDB
)
Put
(
key
[]
byte
,
value
[]
byte
)
error
{
func
(
db
*
LevelDB
)
Put
(
key
[]
byte
,
value
[]
byte
)
error
{
...
@@ -75,8 +74,6 @@ func (db *LevelDB) NewSnapshot() (metadb.Snapshot, error) {
...
@@ -75,8 +74,6 @@ func (db *LevelDB) NewSnapshot() (metadb.Snapshot, error) {
return
&
snapshot
{
db
:
snap
},
nil
return
&
snapshot
{
db
:
snap
},
nil
}
}
func
newLevelDB
(
file
string
)
*
LevelDB
{
func
newLevelDB
(
file
string
)
*
LevelDB
{
logger
:=
log
.
WithField
(
"dbpath"
,
file
)
// Open the ethdb and recover any potential corruptions
// Open the ethdb and recover any potential corruptions
db
,
err
:=
leveldb
.
OpenFile
(
file
,
&
opt
.
Options
{
db
,
err
:=
leveldb
.
OpenFile
(
file
,
&
opt
.
Options
{
Filter
:
filter
.
NewBloomFilter
(
10
),
Filter
:
filter
.
NewBloomFilter
(
10
),
...
@@ -88,13 +85,12 @@ func newLevelDB(file string) *LevelDB {
...
@@ -88,13 +85,12 @@ func newLevelDB(file string) *LevelDB {
db
,
err
=
leveldb
.
RecoverFile
(
file
,
nil
)
db
,
err
=
leveldb
.
RecoverFile
(
file
,
nil
)
}
}
if
err
!=
nil
{
if
err
!=
nil
{
log
.
WithError
(
err
)
.
Fatalf
(
"Failed to open LevelDB database at %s"
,
file
)
panic
(
fmt
.
Errorf
(
"Failed to open LevelDB database at %s with err:%v"
,
file
,
err
)
)
}
}
// Assemble the wrapper with all the registered metrics
// Assemble the wrapper with all the registered metrics
ldb
:=
&
LevelDB
{
ldb
:=
&
LevelDB
{
fn
:
file
,
fn
:
file
,
db
:
db
,
db
:
db
,
logger
:
logger
,
quitChan
:
make
(
chan
chan
error
),
quitChan
:
make
(
chan
chan
error
),
}
}
...
@@ -124,13 +120,11 @@ func New(file string, cache int, handles int) (*LevelDB, error) {
...
@@ -124,13 +120,11 @@ func New(file string, cache int, handles int) (*LevelDB, error) {
// The customize function allows the caller to modify the leveldb options.
// The customize function allows the caller to modify the leveldb options.
func
NewCustom
(
file
string
,
customize
func
(
options
*
opt
.
Options
))
(
*
LevelDB
,
error
)
{
func
NewCustom
(
file
string
,
customize
func
(
options
*
opt
.
Options
))
(
*
LevelDB
,
error
)
{
options
:=
configureOptions
(
customize
)
options
:=
configureOptions
(
customize
)
logger
:=
log
.
WithField
(
"module"
,
"leveldb"
)
usedCache
:=
options
.
GetBlockCacheCapacity
()
+
options
.
GetWriteBuffer
()
*
2
usedCache
:=
options
.
GetBlockCacheCapacity
()
+
options
.
GetWriteBuffer
()
*
2
logCtx
:=
[]
interface
{}{
"cache"
,
usedCache
,
"handles"
,
options
.
GetOpenFilesCacheCapacity
()}
logCtx
:=
[]
interface
{}{
"cache"
,
usedCache
,
"handles"
,
options
.
GetOpenFilesCacheCapacity
()}
if
options
.
ReadOnly
{
if
options
.
ReadOnly
{
logCtx
=
append
(
logCtx
,
"readonly"
,
"true"
)
logCtx
=
append
(
logCtx
,
"readonly"
,
"true"
)
}
}
logger
.
Info
(
"Allocated cache and file handles "
,
"logCtx "
,
logCtx
)
// Open the ethdb and recover any potential corruptions
// Open the ethdb and recover any potential corruptions
db
,
err
:=
leveldb
.
OpenFile
(
file
,
&
opt
.
Options
{
db
,
err
:=
leveldb
.
OpenFile
(
file
,
&
opt
.
Options
{
...
@@ -147,7 +141,6 @@ func NewCustom(file string, customize func(options *opt.Options)) (*LevelDB, err
...
@@ -147,7 +141,6 @@ func NewCustom(file string, customize func(options *opt.Options)) (*LevelDB, err
ldb
:=
&
LevelDB
{
ldb
:=
&
LevelDB
{
fn
:
file
,
fn
:
file
,
db
:
db
,
db
:
db
,
logger
:
logger
,
quitChan
:
make
(
chan
chan
error
),
quitChan
:
make
(
chan
chan
error
),
}
}
...
@@ -178,7 +171,7 @@ func (db *LevelDB) Close() error {
...
@@ -178,7 +171,7 @@ func (db *LevelDB) Close() error {
errc
:=
make
(
chan
error
)
errc
:=
make
(
chan
error
)
db
.
quitChan
<-
errc
db
.
quitChan
<-
errc
if
err
:=
<-
errc
;
err
!=
nil
{
if
err
:=
<-
errc
;
err
!=
nil
{
db
.
logger
.
Error
(
"Metrics collection failed"
,
"err"
,
err
)
//
db.logger.Error("Metrics collection failed", "err", err)
}
}
db
.
quitChan
=
nil
db
.
quitChan
=
nil
}
}
...
...
op-node/cmd/genesis/cmd.go
View file @
9bef9c39
...
@@ -8,6 +8,7 @@ import (
...
@@ -8,6 +8,7 @@ import (
"github.com/exchain/go-exchain/exchain/mockengine"
"github.com/exchain/go-exchain/exchain/mockengine"
"github.com/exchain/go-exchain/exchain/wrapper"
"github.com/exchain/go-exchain/exchain/wrapper"
"github.com/exchain/go-exchain/metadb/groupdb"
"github.com/exchain/go-exchain/metadb/groupdb"
processengine
"github.com/exchain/process/engine"
"time"
"time"
"github.com/exchain/go-exchain/op-service/ioutil"
"github.com/exchain/go-exchain/op-service/ioutil"
...
@@ -250,8 +251,10 @@ var Subcommands = cli.Commands{
...
@@ -250,8 +251,10 @@ var Subcommands = cli.Commands{
logger
.
Error
(
"Failed to create genesis block"
,
"err"
,
err
)
logger
.
Error
(
"Failed to create genesis block"
,
"err"
,
err
)
return
err
return
err
}
}
chain
:=
chaindb
.
NewChainDB
(
database
)
chain
:=
chaindb
.
NewChainDB
(
logger
,
database
)
engine
:=
mockengine
.
NewEngine
(
chain
)
processengine
.
NewEngine
(
chain
)
// todo
engine
:=
mockengine
.
NewEngine
(
dataDir
,
logger
,
chain
)
if
err
=
genblk
.
Commit
(
engine
,
chain
);
err
!=
nil
{
if
err
=
genblk
.
Commit
(
engine
,
chain
);
err
!=
nil
{
logger
.
Error
(
"Failed to commit genesis block"
,
"err"
,
err
)
logger
.
Error
(
"Failed to commit genesis block"
,
"err"
,
err
)
}
}
...
...
op-node/node/node.go
View file @
9bef9c39
...
@@ -399,9 +399,9 @@ func (n *OpNode) initL1BeaconAPI(ctx context.Context, cfg *Config) error {
...
@@ -399,9 +399,9 @@ func (n *OpNode) initL1BeaconAPI(ctx context.Context, cfg *Config) error {
func
(
n
*
OpNode
)
initL2
(
ctx
context
.
Context
,
cfg
*
Config
)
error
{
func
(
n
*
OpNode
)
initL2
(
ctx
context
.
Context
,
cfg
*
Config
)
error
{
var
err
error
var
err
error
n
.
db
=
groupdb
.
NewGroupDB
(
n
.
cfg
.
NodeDataPath
,
"chain"
)
n
.
db
=
groupdb
.
NewGroupDB
(
n
.
cfg
.
NodeDataPath
,
"chain"
)
chain
:=
chaindb
.
NewChainDB
(
n
.
db
)
chain
:=
chaindb
.
NewChainDB
(
n
.
log
,
n
.
db
)
n
.
engineIns
=
processengine
.
NewEngine
(
chain
)
n
.
engineIns
=
processengine
.
NewEngine
(
chain
)
n
.
engineIns
=
mockengine
.
NewEngine
(
chain
)
n
.
engineIns
=
mockengine
.
NewEngine
(
n
.
cfg
.
NodeDataPath
,
n
.
log
,
chain
)
n
.
l2Source
=
engine
.
NewEngineAPI
(
&
n
.
cfg
.
Rollup
,
chain
,
n
.
engineIns
)
n
.
l2Source
=
engine
.
NewEngineAPI
(
&
n
.
cfg
.
Rollup
,
chain
,
n
.
engineIns
)
if
n
.
engineIns
==
nil
{
if
n
.
engineIns
==
nil
{
return
errors
.
New
(
"failed to create L2 engine"
)
return
errors
.
New
(
"failed to create L2 engine"
)
...
...
op-node/rollup/derive/payload_util.go
View file @
9bef9c39
...
@@ -6,7 +6,6 @@ import (
...
@@ -6,7 +6,6 @@ import (
nebulav1
"github.com/exchain/go-exchain/exchain/protocol/gen/go/nebula/v1"
nebulav1
"github.com/exchain/go-exchain/exchain/protocol/gen/go/nebula/v1"
"github.com/exchain/go-exchain/op-node/rollup"
"github.com/exchain/go-exchain/op-node/rollup"
"github.com/exchain/go-exchain/op-service/eth"
"github.com/exchain/go-exchain/op-service/eth"
log
"github.com/sirupsen/logrus"
)
)
// PayloadToBlockRef extracts the essential L2BlockRef information from an execution payload,
// PayloadToBlockRef extracts the essential L2BlockRef information from an execution payload,
...
@@ -21,11 +20,6 @@ func PayloadToBlockRef(rollupCfg *rollup.Config, payload *eth.ExecutionPayload)
...
@@ -21,11 +20,6 @@ func PayloadToBlockRef(rollupCfg *rollup.Config, payload *eth.ExecutionPayload)
}
}
l1Origin
=
genesis
.
L1
l1Origin
=
genesis
.
L1
sequenceNumber
=
0
sequenceNumber
=
0
log
.
WithFields
(
log
.
Fields
{
"blockNumber"
:
payload
.
BlockNumber
,
"blockHash"
:
payload
.
BlockHash
,
"l1info"
:
l1Origin
,
})
.
Debug
(
"using genesis L1 info for L2 block"
)
}
else
{
}
else
{
header
:=
payload
.
Payload
.
Header
header
:=
payload
.
Payload
.
Header
l1Origin
=
eth
.
BlockID
{
Hash
:
common
.
BytesToHash
(
header
.
L1Hash
),
Number
:
header
.
L1Height
}
l1Origin
=
eth
.
BlockID
{
Hash
:
common
.
BytesToHash
(
header
.
L1Hash
),
Number
:
header
.
L1Height
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment