Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
N
nebula
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
exchain
nebula
Commits
dead005d
Unverified
Commit
dead005d
authored
Nov 01, 2024
by
protolambda
Committed by
GitHub
Nov 01, 2024
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
op-supervisor: improve logging, add update signals to trigger worker routines (#12770)
parent
33a03313
Changes
5
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
62 additions
and
22 deletions
+62
-22
backend.go
op-supervisor/supervisor/backend/backend.go
+43
-8
worker.go
op-supervisor/supervisor/backend/cross/worker.go
+2
-3
worker_test.go
op-supervisor/supervisor/backend/cross/worker_test.go
+1
-1
update.go
op-supervisor/supervisor/backend/db/update.go
+8
-5
chain_processor.go
...pervisor/supervisor/backend/processors/chain_processor.go
+8
-5
No files found.
op-supervisor/supervisor/backend/backend.go
View file @
dead005d
...
...
@@ -117,13 +117,6 @@ func (su *SupervisorBackend) initResources(ctx context.Context, cfg *config.Conf
}
}
// for each chain initialize a chain processor service
for
_
,
chainID
:=
range
chains
{
logProcessor
:=
processors
.
NewLogProcessor
(
chainID
,
su
.
chainDBs
)
chainProcessor
:=
processors
.
NewChainProcessor
(
su
.
logger
,
chainID
,
logProcessor
,
su
.
chainDBs
)
su
.
chainProcessors
[
chainID
]
=
chainProcessor
}
// initialize all cross-unsafe processors
for
_
,
chainID
:=
range
chains
{
worker
:=
cross
.
NewCrossUnsafeWorker
(
su
.
logger
,
chainID
,
su
.
chainDBs
)
...
...
@@ -134,6 +127,13 @@ func (su *SupervisorBackend) initResources(ctx context.Context, cfg *config.Conf
worker
:=
cross
.
NewCrossSafeWorker
(
su
.
logger
,
chainID
,
su
.
chainDBs
)
su
.
crossSafeProcessors
[
chainID
]
=
worker
}
// For each chain initialize a chain processor service,
// after cross-unsafe workers are ready to receive updates
for
_
,
chainID
:=
range
chains
{
logProcessor
:=
processors
.
NewLogProcessor
(
chainID
,
su
.
chainDBs
)
chainProcessor
:=
processors
.
NewChainProcessor
(
su
.
logger
,
chainID
,
logProcessor
,
su
.
chainDBs
,
su
.
onIndexedLocalUnsafeData
)
su
.
chainProcessors
[
chainID
]
=
chainProcessor
}
// the config has some RPC connections to attach to the chain-processors
for
_
,
rpc
:=
range
cfg
.
L2RPCs
{
...
...
@@ -145,6 +145,36 @@ func (su *SupervisorBackend) initResources(ctx context.Context, cfg *config.Conf
return
nil
}
// onIndexedLocalUnsafeData is called by the event indexing workers.
// This signals to cross-unsafe workers that there's data to index.
func
(
su
*
SupervisorBackend
)
onIndexedLocalUnsafeData
()
{
su
.
mu
.
RLock
()
defer
su
.
mu
.
RUnlock
()
// We signal all workers, since dependencies on a chain may be unblocked
// by new data on other chains.
// Busy workers don't block processing.
// The signal is picked up only if the worker is running in the background.
for
_
,
w
:=
range
su
.
crossUnsafeProcessors
{
w
.
OnNewData
()
}
}
// onNewLocalSafeData is called by the safety-indexing.
// This signals to cross-safe workers that there's data to index.
func
(
su
*
SupervisorBackend
)
onNewLocalSafeData
()
{
su
.
mu
.
RLock
()
defer
su
.
mu
.
RUnlock
()
// We signal all workers, since dependencies on a chain may be unblocked
// by new data on other chains.
// Busy workers don't block processing.
// The signal is picked up only if the worker is running in the background.
for
_
,
w
:=
range
su
.
crossSafeProcessors
{
w
.
OnNewData
()
}
}
// openChainDBs initializes all the DB resources of a specific chain.
// It is a sub-task of initResources.
func
(
su
*
SupervisorBackend
)
openChainDBs
(
chainID
types
.
ChainID
)
error
{
...
...
@@ -443,7 +473,12 @@ func (su *SupervisorBackend) UpdateLocalSafe(ctx context.Context, chainID types.
su
.
mu
.
RLock
()
defer
su
.
mu
.
RUnlock
()
return
su
.
chainDBs
.
UpdateLocalSafe
(
chainID
,
derivedFrom
,
lastDerived
)
err
:=
su
.
chainDBs
.
UpdateLocalSafe
(
chainID
,
derivedFrom
,
lastDerived
)
if
err
!=
nil
{
return
err
}
su
.
onNewLocalSafeData
()
return
nil
}
func
(
su
*
SupervisorBackend
)
UpdateFinalizedL1
(
ctx
context
.
Context
,
chainID
types
.
ChainID
,
finalized
eth
.
BlockRef
)
error
{
...
...
op-supervisor/supervisor/backend/cross/worker.go
View file @
dead005d
...
...
@@ -71,7 +71,7 @@ func (s *Worker) worker() {
return
}
if
errors
.
Is
(
err
,
types
.
ErrFuture
)
{
s
.
log
.
Debug
(
"
Failed to process work
"
,
"err"
,
err
)
s
.
log
.
Debug
(
"
Worker awaits more data
"
,
"err"
,
err
)
}
else
{
s
.
log
.
Warn
(
"Failed to process work"
,
"err"
,
err
)
}
...
...
@@ -92,14 +92,13 @@ func (s *Worker) worker() {
}
}
func
(
s
*
Worker
)
OnNewData
()
error
{
func
(
s
*
Worker
)
OnNewData
()
{
// signal that we have something to process
select
{
case
s
.
poke
<-
struct
{}{}
:
default
:
// already requested an update
}
return
nil
}
func
(
s
*
Worker
)
Close
()
{
...
...
op-supervisor/supervisor/backend/cross/worker_test.go
View file @
dead005d
...
...
@@ -55,7 +55,7 @@ func TestWorker(t *testing.T) {
return
count
==
1
},
2
*
time
.
Second
,
100
*
time
.
Millisecond
)
// when OnNewData is called, the worker runs again
require
.
NoError
(
t
,
w
.
OnNewData
()
)
w
.
OnNewData
(
)
require
.
Eventually
(
t
,
func
()
bool
{
return
count
==
2
},
2
*
time
.
Second
,
100
*
time
.
Millisecond
)
...
...
op-supervisor/supervisor/backend/db/update.go
View file @
dead005d
...
...
@@ -27,11 +27,11 @@ func (db *ChainsDB) SealBlock(chain types.ChainID, block eth.BlockRef) error {
if
!
ok
{
return
fmt
.
Errorf
(
"cannot SealBlock: %w: %v"
,
types
.
ErrUnknownChain
,
chain
)
}
db
.
logger
.
Debug
(
"Updating local unsafe"
,
"chain"
,
chain
,
"block"
,
block
)
err
:=
logDB
.
SealBlock
(
block
.
ParentHash
,
block
.
ID
(),
block
.
Time
)
if
err
!=
nil
{
return
fmt
.
Errorf
(
"failed to seal block %v: %w"
,
block
,
err
)
}
db
.
logger
.
Info
(
"Updated local unsafe"
,
"chain"
,
chain
,
"block"
,
block
)
return
nil
}
...
...
@@ -57,8 +57,8 @@ func (db *ChainsDB) UpdateCrossUnsafe(chain types.ChainID, crossUnsafe types.Blo
if
!
ok
{
return
fmt
.
Errorf
(
"cannot UpdateCrossUnsafe: %w: %s"
,
types
.
ErrUnknownChain
,
chain
)
}
db
.
logger
.
Debug
(
"Updating cross unsafe"
,
"chain"
,
chain
,
"crossUnsafe"
,
crossUnsafe
)
v
.
Set
(
crossUnsafe
)
db
.
logger
.
Info
(
"Updated cross-unsafe"
,
"chain"
,
chain
,
"crossUnsafe"
,
crossUnsafe
)
return
nil
}
...
...
@@ -67,8 +67,11 @@ func (db *ChainsDB) UpdateCrossSafe(chain types.ChainID, l1View eth.BlockRef, la
if
!
ok
{
return
fmt
.
Errorf
(
"cannot UpdateCrossSafe: %w: %s"
,
types
.
ErrUnknownChain
,
chain
)
}
db
.
logger
.
Debug
(
"Updating cross safe"
,
"chain"
,
chain
,
"l1View"
,
l1View
,
"lastCrossDerived"
,
lastCrossDerived
)
return
crossDB
.
AddDerived
(
l1View
,
lastCrossDerived
)
if
err
:=
crossDB
.
AddDerived
(
l1View
,
lastCrossDerived
);
err
!=
nil
{
return
err
}
db
.
logger
.
Info
(
"Updated cross-safe"
,
"chain"
,
chain
,
"l1View"
,
l1View
,
"lastCrossDerived"
,
lastCrossDerived
)
return
nil
}
func
(
db
*
ChainsDB
)
UpdateFinalizedL1
(
finalized
eth
.
BlockRef
)
error
{
...
...
@@ -79,7 +82,7 @@ func (db *ChainsDB) UpdateFinalizedL1(finalized eth.BlockRef) error {
if
v
:=
db
.
finalizedL1
.
Value
;
v
.
Number
>
finalized
.
Number
{
return
fmt
.
Errorf
(
"cannot rewind finalized L1 head from %s to %s"
,
v
,
finalized
)
}
db
.
logger
.
Debug
(
"Updating finalized L1"
,
"finalizedL1"
,
finalized
)
db
.
finalizedL1
.
Value
=
finalized
db
.
logger
.
Info
(
"Updated finalized L1"
,
"finalizedL1"
,
finalized
)
return
nil
}
op-supervisor/supervisor/backend/processors/chain_processor.go
View file @
dead005d
...
...
@@ -55,8 +55,8 @@ type ChainProcessor struct {
// channel with capacity of 1, full if there is work to do
newHead
chan
struct
{}
//
channel with capacity of 1, to signal work complete if running in synchroneous mod
e
o
ut
chan
struct
{}
//
to signal to the other services that new indexed data is availabl
e
o
nIndexed
func
()
// lifetime management of the chain processor
ctx
context
.
Context
...
...
@@ -64,7 +64,7 @@ type ChainProcessor struct {
wg
sync
.
WaitGroup
}
func
NewChainProcessor
(
log
log
.
Logger
,
chain
types
.
ChainID
,
processor
LogProcessor
,
rewinder
DatabaseRewinder
)
*
ChainProcessor
{
func
NewChainProcessor
(
log
log
.
Logger
,
chain
types
.
ChainID
,
processor
LogProcessor
,
rewinder
DatabaseRewinder
,
onIndexed
func
()
)
*
ChainProcessor
{
ctx
,
cancel
:=
context
.
WithCancel
(
context
.
Background
())
out
:=
&
ChainProcessor
{
log
:
log
.
New
(
"chain"
,
chain
),
...
...
@@ -73,7 +73,7 @@ func NewChainProcessor(log log.Logger, chain types.ChainID, processor LogProcess
processor
:
processor
,
rewinder
:
rewinder
,
newHead
:
make
(
chan
struct
{},
1
),
o
ut
:
make
(
chan
struct
{},
1
)
,
o
nIndexed
:
onIndexed
,
ctx
:
ctx
,
cancel
:
cancel
,
}
...
...
@@ -134,7 +134,7 @@ func (s *ChainProcessor) work() {
target
:=
s
.
nextNum
()
if
err
:=
s
.
update
(
target
);
err
!=
nil
{
if
errors
.
Is
(
err
,
ethereum
.
NotFound
)
{
s
.
log
.
Info
(
"C
annot find next block yet"
,
"target"
,
target
,
"err"
,
err
)
s
.
log
.
Debug
(
"Event-indexer c
annot find next block yet"
,
"target"
,
target
,
"err"
,
err
)
}
else
if
errors
.
Is
(
err
,
types
.
ErrNoRPCSource
)
{
s
.
log
.
Warn
(
"No RPC source configured, cannot process new blocks"
)
}
else
{
...
...
@@ -192,7 +192,10 @@ func (s *ChainProcessor) update(nextNum uint64) error {
// If no logs were written successfully then the rewind wouldn't have done anything anyway.
s
.
log
.
Error
(
"Failed to rewind after error processing block"
,
"block"
,
next
,
"err"
,
err
)
}
return
err
}
s
.
log
.
Info
(
"Indexed block events"
,
"block"
,
next
,
"txs"
,
len
(
receipts
))
s
.
onIndexed
()
return
nil
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment