Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
M
mybee
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
vicotor
mybee
Commits
c6910097
Unverified
Commit
c6910097
authored
Jun 19, 2021
by
acud
Committed by
GitHub
Jun 19, 2021
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
feat: unreserve on-demand (#2071)
parent
726b61f2
Changes
21
Hide whitespace changes
Inline
Side-by-side
Showing
21 changed files
with
1084 additions
and
483 deletions
+1084
-483
postagereserve_gc.patch
.github/patches/postagereserve_gc.patch
+1
-1
beekeeper.yml
.github/workflows/beekeeper.yml
+0
-21
postage.go
pkg/debugapi/postage.go
+5
-4
gc.go
pkg/localstore/gc.go
+155
-1
gc_test.go
pkg/localstore/gc_test.go
+201
-4
localstore.go
pkg/localstore/localstore.go
+41
-11
metrics.go
pkg/localstore/metrics.go
+29
-0
mode_put.go
pkg/localstore/mode_put.go
+30
-8
mode_set.go
pkg/localstore/mode_set.go
+0
-7
mode_set_test.go
pkg/localstore/mode_set_test.go
+1
-1
reserve.go
pkg/localstore/reserve.go
+46
-20
reserve_test.go
pkg/localstore/reserve_test.go
+251
-99
node.go
pkg/node/node.go
+15
-4
export_test.go
pkg/postage/batchstore/export_test.go
+5
-19
store.go
pkg/postage/batchstore/mock/store.go
+3
-1
reserve.go
pkg/postage/batchstore/reserve.go
+103
-6
reserve_test.go
pkg/postage/batchstore/reserve_test.go
+131
-246
store.go
pkg/postage/batchstore/store.go
+52
-20
store_test.go
pkg/postage/batchstore/store_test.go
+7
-6
interface.go
pkg/postage/interface.go
+3
-0
reservestate.go
pkg/postage/reservestate.go
+5
-4
No files found.
.github/patches/postagereserve_gc.patch
View file @
c6910097
48c48
48c48
< var Capacity = exp2(22)
< var Capacity = exp2(22)
---
---
> var Capacity = exp2(
10
)
> var Capacity = exp2(
6
)
.github/workflows/beekeeper.yml
View file @
c6910097
...
@@ -110,25 +110,6 @@ jobs:
...
@@ -110,25 +110,6 @@ jobs:
run
:
|
run
:
|
beekeeper delete bee-cluster --cluster-name local-clef
beekeeper delete bee-cluster --cluster-name local-clef
make beelocal ACTION=uninstall
make beelocal ACTION=uninstall
-
name
:
Apply patches
run
:
|
patch pkg/postage/batchstore/reserve.go .github/patches/postagereserve_gc.patch
-
name
:
Prepare testing cluster (storage incentives setup)
run
:
|
timeout 10m make beelocal OPTS='ci skip-vet'
-
name
:
Set kube config
run
:
|
mkdir -p ~/.kube
cp /etc/rancher/k3s/k3s.yaml ~/.kube/config
-
name
:
Set testing cluster (storage incentives setup)
run
:
|
timeout 10m make deploylocal BEEKEEPER_CLUSTER=local-gc
-
name
:
Test pingpong
id
:
pingpong-3
run
:
until beekeeper check --cluster-name local-gc --checks ci-pingpong; do echo "waiting for pingpong..."; sleep .3; done
-
name
:
Test gc
id
:
gc-chunk-1
run
:
beekeeper check --cluster-name local-gc --checks=ci-gc
-
name
:
Retag Docker image and push for cache
-
name
:
Retag Docker image and push for cache
if
:
success()
if
:
success()
run
:
|
run
:
|
...
@@ -170,8 +151,6 @@ jobs:
...
@@ -170,8 +151,6 @@ jobs:
if ${{ steps.settlements-2.outcome=='failure' }}; then FAILED=settlements-2; fi
if ${{ steps.settlements-2.outcome=='failure' }}; then FAILED=settlements-2; fi
if ${{ steps.pss.outcome=='failure' }}; then FAILED=pss; fi
if ${{ steps.pss.outcome=='failure' }}; then FAILED=pss; fi
if ${{ steps.soc.outcome=='failure' }}; then FAILED=soc; fi
if ${{ steps.soc.outcome=='failure' }}; then FAILED=soc; fi
if ${{ steps.pingpong-3.outcome=='failure' }}; then FAILED=pingpong-3; fi
if ${{ steps.gc-chunk-1.outcome=='failure' }}; then FAILED=gc-chunk-1; fi
KEYS=$(curl -sSf -X POST https://eu.relay.tunshell.com/api/sessions)
KEYS=$(curl -sSf -X POST https://eu.relay.tunshell.com/api/sessions)
curl -sSf -X POST -H "Content-Type: application/json" -d "{\"text\": \"**${RUN_TYPE}** ${{ github.head_ref }}\nFailed -> \`${FAILED}\`\nDebug -> \`sh <(curl -sSf https://lets.tunshell.com/init.sh) L $(echo $KEYS | jq -r .peer2_key) \${TUNSHELL_SECRET} eu.relay.tunshell.com\`\"}" https://beehive.ethswarm.org/hooks/${{ secrets.WEBHOOK_KEY }}
curl -sSf -X POST -H "Content-Type: application/json" -d "{\"text\": \"**${RUN_TYPE}** ${{ github.head_ref }}\nFailed -> \`${FAILED}\`\nDebug -> \`sh <(curl -sSf https://lets.tunshell.com/init.sh) L $(echo $KEYS | jq -r .peer2_key) \${TUNSHELL_SECRET} eu.relay.tunshell.com\`\"}" https://beehive.ethswarm.org/hooks/${{ secrets.WEBHOOK_KEY }}
echo "Failed test: ${FAILED}"
echo "Failed test: ${FAILED}"
...
...
pkg/debugapi/postage.go
View file @
c6910097
...
@@ -12,10 +12,11 @@ import (
...
@@ -12,10 +12,11 @@ import (
)
)
type
reserveStateResponse
struct
{
type
reserveStateResponse
struct
{
Radius
uint8
`json:"radius"`
Radius
uint8
`json:"radius"`
Available
int64
`json:"available"`
StorageRadius
uint8
`json:"storageRadius"`
Outer
*
bigint
.
BigInt
`json:"outer"`
// lower value limit for outer layer = the further half of chunks
Available
int64
`json:"available"`
Inner
*
bigint
.
BigInt
`json:"inner"`
Outer
*
bigint
.
BigInt
`json:"outer"`
// lower value limit for outer layer = the further half of chunks
Inner
*
bigint
.
BigInt
`json:"inner"`
}
}
type
chainStateResponse
struct
{
type
chainStateResponse
struct
{
...
...
pkg/localstore/gc.go
View file @
c6910097
...
@@ -38,6 +38,19 @@ var (
...
@@ -38,6 +38,19 @@ var (
// gcBatchSize limits the number of chunks in a single
// gcBatchSize limits the number of chunks in a single
// transaction on garbage collection.
// transaction on garbage collection.
gcBatchSize
uint64
=
2000
gcBatchSize
uint64
=
2000
// reserveCollectionRatio is the ratio of the cache to evict from
// the reserve every time it hits the limit. If the cache size is
// 1000 chunks then we will evict 500 chunks from the reserve, this is
// not to overwhelm the cache with too many chunks which it will flush
// anyway.
reserveCollectionRatio
=
0.5
// reserveEvictionBatch limits the number of chunks collected in
// a single reserve eviction run.
reserveEvictionBatch
uint64
=
200
// maxPurgeablePercentageOfReserve is a ceiling of size of the reserve
// to evict in case the cache size is bigger than the reserve
maxPurgeablePercentageOfReserve
=
0.1
)
)
// collectGarbageWorker is a long running function that waits for
// collectGarbageWorker is a long running function that waits for
...
@@ -104,8 +117,11 @@ func (db *DB) collectGarbage() (collectedCount uint64, done bool, err error) {
...
@@ -104,8 +117,11 @@ func (db *DB) collectGarbage() (collectedCount uint64, done bool, err error) {
if
err
!=
nil
{
if
err
!=
nil
{
return
0
,
true
,
err
return
0
,
true
,
err
}
}
if
gcSize
==
target
{
return
0
,
true
,
nil
}
db
.
metrics
.
GCSize
.
Set
(
float64
(
gcSize
))
db
.
metrics
.
GCSize
.
Set
(
float64
(
gcSize
))
defer
func
()
{
db
.
logger
.
Debugf
(
"gc collected %d, target %d, startSize %d"
,
collectedCount
,
target
,
gcSize
)
}()
done
=
true
done
=
true
first
:=
true
first
:=
true
start
:=
time
.
Now
()
start
:=
time
.
Now
()
...
@@ -208,6 +224,15 @@ func (db *DB) gcTarget() (target uint64) {
...
@@ -208,6 +224,15 @@ func (db *DB) gcTarget() (target uint64) {
return
uint64
(
float64
(
db
.
cacheCapacity
)
*
gcTargetRatio
)
return
uint64
(
float64
(
db
.
cacheCapacity
)
*
gcTargetRatio
)
}
}
func
(
db
*
DB
)
reserveEvictionTarget
()
(
target
uint64
)
{
targetCache
:=
db
.
reserveCapacity
-
uint64
(
float64
(
db
.
cacheCapacity
)
*
reserveCollectionRatio
)
targetCeiling
:=
db
.
reserveCapacity
-
uint64
(
float64
(
db
.
reserveCapacity
)
*
maxPurgeablePercentageOfReserve
)
if
targetCeiling
>
targetCache
{
return
targetCeiling
}
return
targetCache
}
// triggerGarbageCollection signals collectGarbageWorker
// triggerGarbageCollection signals collectGarbageWorker
// to call collectGarbage.
// to call collectGarbage.
func
(
db
*
DB
)
triggerGarbageCollection
()
{
func
(
db
*
DB
)
triggerGarbageCollection
()
{
...
@@ -218,6 +243,16 @@ func (db *DB) triggerGarbageCollection() {
...
@@ -218,6 +243,16 @@ func (db *DB) triggerGarbageCollection() {
}
}
}
}
// triggerGarbageCollection signals collectGarbageWorker
// to call collectGarbage.
func
(
db
*
DB
)
triggerReserveEviction
()
{
select
{
case
db
.
reserveEvictionTrigger
<-
struct
{}{}
:
case
<-
db
.
close
:
default
:
}
}
// incGCSizeInBatch changes gcSize field value
// incGCSizeInBatch changes gcSize field value
// by change which can be negative. This function
// by change which can be negative. This function
// must be called under batchMu lock.
// must be called under batchMu lock.
...
@@ -243,6 +278,7 @@ func (db *DB) incGCSizeInBatch(batch *leveldb.Batch, change int64) (err error) {
...
@@ -243,6 +278,7 @@ func (db *DB) incGCSizeInBatch(batch *leveldb.Batch, change int64) (err error) {
}
}
newSize
=
gcSize
-
c
newSize
=
gcSize
-
c
}
}
db
.
logger
.
Debugf
(
"inc gc size %d change %d"
,
gcSize
,
change
)
db
.
gcSize
.
PutInBatch
(
batch
,
newSize
)
db
.
gcSize
.
PutInBatch
(
batch
,
newSize
)
db
.
metrics
.
GCSize
.
Set
(
float64
(
newSize
))
db
.
metrics
.
GCSize
.
Set
(
float64
(
newSize
))
...
@@ -253,6 +289,122 @@ func (db *DB) incGCSizeInBatch(batch *leveldb.Batch, change int64) (err error) {
...
@@ -253,6 +289,122 @@ func (db *DB) incGCSizeInBatch(batch *leveldb.Batch, change int64) (err error) {
return
nil
return
nil
}
}
// incReserveSizeInBatch changes reserveSize field value
// by change which can be negative. This function
// must be called under batchMu lock.
func
(
db
*
DB
)
incReserveSizeInBatch
(
batch
*
leveldb
.
Batch
,
change
int64
)
(
err
error
)
{
if
change
==
0
{
return
nil
}
reserveSize
,
err
:=
db
.
reserveSize
.
Get
()
if
err
!=
nil
&&
!
errors
.
Is
(
err
,
leveldb
.
ErrNotFound
)
{
return
err
}
var
newSize
uint64
if
change
>
0
{
newSize
=
reserveSize
+
uint64
(
change
)
}
else
{
// 'change' is an int64 and is negative
// a conversion is needed with correct sign
c
:=
uint64
(
-
change
)
if
c
>
reserveSize
{
// protect uint64 undeflow
return
nil
}
newSize
=
reserveSize
-
c
}
db
.
logger
.
Debugf
(
"inc reserve size in batch %d old %d change %d"
,
newSize
,
reserveSize
,
change
)
db
.
reserveSize
.
PutInBatch
(
batch
,
newSize
)
db
.
metrics
.
ReserveSize
.
Set
(
float64
(
newSize
))
// trigger garbage collection if we reached the capacity
if
newSize
>=
db
.
reserveCapacity
{
db
.
triggerReserveEviction
()
}
return
nil
}
func
(
db
*
DB
)
reserveEvictionWorker
()
{
defer
close
(
db
.
reserveEvictionWorkerDone
)
for
{
select
{
case
<-
db
.
reserveEvictionTrigger
:
evictedCount
,
done
,
err
:=
db
.
evictReserve
()
if
err
!=
nil
{
db
.
logger
.
Errorf
(
"localstore: evict reserve: %v"
,
err
)
}
if
!
done
{
db
.
triggerReserveEviction
()
}
if
testHookEviction
!=
nil
{
testHookEviction
(
evictedCount
)
}
case
<-
db
.
close
:
return
}
}
}
func
(
db
*
DB
)
evictReserve
()
(
totalEvicted
uint64
,
done
bool
,
err
error
)
{
var
target
uint64
db
.
metrics
.
EvictReserveCounter
.
Inc
()
defer
func
(
start
time
.
Time
)
{
if
err
!=
nil
{
db
.
metrics
.
EvictReserveErrorCounter
.
Inc
()
}
totalTimeMetric
(
db
.
metrics
.
TotalTimeEvictReserve
,
start
)
}(
time
.
Now
())
target
=
db
.
reserveEvictionTarget
()
db
.
batchMu
.
Lock
()
defer
db
.
batchMu
.
Unlock
()
reserveSizeStart
,
err
:=
db
.
reserveSize
.
Get
()
if
err
!=
nil
{
return
0
,
false
,
err
}
if
reserveSizeStart
==
target
{
return
0
,
true
,
nil
}
// if we dont get any entries at all then there's no use
// of triggering subsequent runs in case we're not done
totalCallbacks
:=
0
err
=
db
.
unreserveFunc
(
func
(
batchID
[]
byte
,
radius
uint8
)
(
bool
,
error
)
{
totalCallbacks
++
e
,
err
:=
db
.
UnreserveBatch
(
batchID
,
radius
)
if
err
!=
nil
{
return
true
,
err
}
totalEvicted
+=
e
if
reserveSizeStart
-
totalEvicted
<=
target
{
done
=
true
return
true
,
nil
}
if
totalEvicted
>=
reserveEvictionBatch
{
// stop collecting when we reach the eviction
// batch size so that we can avoid lock contention
// on localstore.
return
true
,
nil
}
return
false
,
nil
})
if
err
!=
nil
{
return
0
,
false
,
err
}
if
totalCallbacks
==
0
{
// if we did not get any items from the batchstore
// it means there's no point of trigerring a subsequent
// round
done
=
true
}
db
.
logger
.
Debugf
(
"reserve evicted %d done %t size %d callbacks %d"
,
totalEvicted
,
done
,
reserveSizeStart
,
totalCallbacks
)
return
totalEvicted
,
done
,
nil
}
// testHookCollectGarbage is a hook that can provide
// testHookCollectGarbage is a hook that can provide
// information when a garbage collection run is done
// information when a garbage collection run is done
// and how many items it removed.
// and how many items it removed.
...
@@ -264,3 +416,5 @@ var testHookCollectGarbage func(collectedCount uint64)
...
@@ -264,3 +416,5 @@ var testHookCollectGarbage func(collectedCount uint64)
var
testHookGCIteratorDone
func
()
var
testHookGCIteratorDone
func
()
var
withinRadiusFn
func
(
*
DB
,
shed
.
Item
)
bool
var
withinRadiusFn
func
(
*
DB
,
shed
.
Item
)
bool
var
testHookEviction
func
(
count
uint64
)
pkg/localstore/gc_test.go
View file @
c6910097
...
@@ -28,6 +28,7 @@ import (
...
@@ -28,6 +28,7 @@ import (
"time"
"time"
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/postage"
"github.com/ethersphere/bee/pkg/shed"
"github.com/ethersphere/bee/pkg/shed"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/bee/pkg/swarm"
...
@@ -279,7 +280,8 @@ func TestGCAfterPin(t *testing.T) {
...
@@ -279,7 +280,8 @@ func TestGCAfterPin(t *testing.T) {
chunkCount
:=
50
chunkCount
:=
50
db
:=
newTestDB
(
t
,
&
Options
{
db
:=
newTestDB
(
t
,
&
Options
{
Capacity
:
100
,
Capacity
:
100
,
ReserveCapacity
:
100
,
})
})
pinAddrs
:=
make
([]
swarm
.
Address
,
0
)
pinAddrs
:=
make
([]
swarm
.
Address
,
0
)
...
@@ -596,7 +598,8 @@ func TestSetTestHookCollectGarbage(t *testing.T) {
...
@@ -596,7 +598,8 @@ func TestSetTestHookCollectGarbage(t *testing.T) {
func
TestPinAfterMultiGC
(
t
*
testing
.
T
)
{
func
TestPinAfterMultiGC
(
t
*
testing
.
T
)
{
t
.
Cleanup
(
setWithinRadiusFunc
(
func
(
_
*
DB
,
_
shed
.
Item
)
bool
{
return
false
}))
t
.
Cleanup
(
setWithinRadiusFunc
(
func
(
_
*
DB
,
_
shed
.
Item
)
bool
{
return
false
}))
db
:=
newTestDB
(
t
,
&
Options
{
db
:=
newTestDB
(
t
,
&
Options
{
Capacity
:
10
,
Capacity
:
10
,
ReserveCapacity
:
10
,
})
})
pinnedChunks
:=
make
([]
swarm
.
Address
,
0
)
pinnedChunks
:=
make
([]
swarm
.
Address
,
0
)
...
@@ -715,7 +718,8 @@ func TestPinSyncAndAccessPutSetChunkMultipleTimes(t *testing.T) {
...
@@ -715,7 +718,8 @@ func TestPinSyncAndAccessPutSetChunkMultipleTimes(t *testing.T) {
}
}
}))
}))
db
:=
newTestDB
(
t
,
&
Options
{
db
:=
newTestDB
(
t
,
&
Options
{
Capacity
:
10
,
Capacity
:
10
,
ReserveCapacity
:
100
,
})
})
closed
=
db
.
close
closed
=
db
.
close
...
@@ -959,9 +963,202 @@ func setTestHookGCIteratorDone(h func()) (reset func()) {
...
@@ -959,9 +963,202 @@ func setTestHookGCIteratorDone(h func()) (reset func()) {
func
unreserveChunkBatch
(
t
*
testing
.
T
,
db
*
DB
,
radius
uint8
,
chs
...
swarm
.
Chunk
)
{
func
unreserveChunkBatch
(
t
*
testing
.
T
,
db
*
DB
,
radius
uint8
,
chs
...
swarm
.
Chunk
)
{
t
.
Helper
()
t
.
Helper
()
for
_
,
ch
:=
range
chs
{
for
_
,
ch
:=
range
chs
{
err
:=
db
.
UnreserveBatch
(
ch
.
Stamp
()
.
BatchID
(),
radius
)
_
,
err
:=
db
.
UnreserveBatch
(
ch
.
Stamp
()
.
BatchID
(),
radius
)
if
err
!=
nil
{
t
.
Fatal
(
err
)
}
}
}
func
setTestHookEviction
(
h
func
(
count
uint64
))
(
reset
func
())
{
current
:=
testHookEviction
reset
=
func
()
{
testHookEviction
=
current
}
testHookEviction
=
h
return
reset
}
// TestReserveEvictionWorker tests that the reserve
// eviction works correctly once the reserve hits the
// capacity. The necessary items are then moved into the
// gc index.
func
TestReserveEvictionWorker
(
t
*
testing
.
T
)
{
var
(
chunkCount
=
10
batchIDs
[][]
byte
db
*
DB
addrs
[]
swarm
.
Address
closed
chan
struct
{}
mtx
sync
.
Mutex
)
testHookEvictionChan
:=
make
(
chan
uint64
)
t
.
Cleanup
(
setTestHookEviction
(
func
(
count
uint64
)
{
if
count
==
0
{
return
}
select
{
case
testHookEvictionChan
<-
count
:
case
<-
closed
:
}
}))
t
.
Cleanup
(
setWithinRadiusFunc
(
func
(
_
*
DB
,
_
shed
.
Item
)
bool
{
return
true
}))
unres
:=
func
(
f
postage
.
UnreserveIteratorFn
)
error
{
mtx
.
Lock
()
defer
mtx
.
Unlock
()
for
i
:=
0
;
i
<
len
(
batchIDs
);
i
++
{
// pop an element from batchIDs, call the Unreserve
item
:=
batchIDs
[
i
]
// here we mock the behavior of the batchstore
// that would call the localstore back with the
// batch IDs and the radiuses from the FIFO queue
stop
,
err
:=
f
(
item
,
2
)
if
err
!=
nil
{
return
err
}
if
stop
{
return
nil
}
stop
,
err
=
f
(
item
,
4
)
if
err
!=
nil
{
return
err
}
if
stop
{
return
nil
}
}
batchIDs
=
nil
return
nil
}
testHookCollectGarbageChan
:=
make
(
chan
uint64
)
t
.
Cleanup
(
setTestHookCollectGarbage
(
func
(
collectedCount
uint64
)
{
// don't trigger if we haven't collected anything - this may
// result in a race condition when we inspect the gcsize below,
// causing the database to shut down while the cleanup to happen
// before the correct signal has been communicated here.
if
collectedCount
==
0
{
return
}
select
{
case
testHookCollectGarbageChan
<-
collectedCount
:
case
<-
db
.
close
:
}
}))
db
=
newTestDB
(
t
,
&
Options
{
Capacity
:
10
,
ReserveCapacity
:
10
,
UnreserveFunc
:
unres
,
})
// insert 10 chunks that fall into the reserve, then
// expect first one to be evicted
for
i
:=
0
;
i
<
chunkCount
;
i
++
{
ch
:=
generateTestRandomChunkAt
(
swarm
.
NewAddress
(
db
.
baseKey
),
2
)
.
WithBatch
(
2
,
3
,
2
,
false
)
_
,
err
:=
db
.
Put
(
context
.
Background
(),
storage
.
ModePutUpload
,
ch
)
if
err
!=
nil
{
t
.
Fatal
(
err
)
}
err
=
db
.
Set
(
context
.
Background
(),
storage
.
ModeSetSync
,
ch
.
Address
())
if
err
!=
nil
{
t
.
Fatal
(
err
)
}
mtx
.
Lock
()
addrs
=
append
(
addrs
,
ch
.
Address
())
batchIDs
=
append
(
batchIDs
,
ch
.
Stamp
()
.
BatchID
())
mtx
.
Unlock
()
}
evictTarget
:=
db
.
reserveEvictionTarget
()
for
{
select
{
case
<-
testHookEvictionChan
:
case
<-
time
.
After
(
10
*
time
.
Second
)
:
t
.
Fatal
(
"eviction timeout"
)
}
reserveSize
,
err
:=
db
.
reserveSize
.
Get
()
if
err
!=
nil
{
if
err
!=
nil
{
t
.
Fatal
(
err
)
t
.
Fatal
(
err
)
}
}
if
reserveSize
==
evictTarget
{
break
}
}
}
t
.
Run
(
"pull index count"
,
newItemsCountTest
(
db
.
pullIndex
,
chunkCount
))
t
.
Run
(
"postage index count"
,
newItemsCountTest
(
db
.
postageIndexIndex
,
chunkCount
))
t
.
Run
(
"postage radius count"
,
newItemsCountTest
(
db
.
postageRadiusIndex
,
1
))
t
.
Run
(
"gc index count"
,
newItemsCountTest
(
db
.
gcIndex
,
1
))
t
.
Run
(
"gc size"
,
newIndexGCSizeTest
(
db
))
t
.
Run
(
"all chunks should be accessible"
,
func
(
t
*
testing
.
T
)
{
for
_
,
a
:=
range
addrs
{
if
_
,
err
:=
db
.
Get
(
context
.
Background
(),
storage
.
ModeGetRequest
,
a
);
err
!=
nil
{
t
.
Errorf
(
"got error %v, want none"
,
err
)
}
}
})
for
i
:=
0
;
i
<
chunkCount
-
1
;
i
++
{
ch
:=
generateTestRandomChunkAt
(
swarm
.
NewAddress
(
db
.
baseKey
),
3
)
.
WithBatch
(
2
,
3
,
2
,
false
)
_
,
err
:=
db
.
Put
(
context
.
Background
(),
storage
.
ModePutUpload
,
ch
)
if
err
!=
nil
{
t
.
Fatal
(
err
)
}
err
=
db
.
Set
(
context
.
Background
(),
storage
.
ModeSetSync
,
ch
.
Address
())
if
err
!=
nil
{
t
.
Fatal
(
err
)
}
mtx
.
Lock
()
addrs
=
append
(
addrs
,
ch
.
Address
())
batchIDs
=
append
(
batchIDs
,
ch
.
Stamp
()
.
BatchID
())
mtx
.
Unlock
()
}
for
{
select
{
case
<-
testHookEvictionChan
:
case
<-
time
.
After
(
10
*
time
.
Second
)
:
t
.
Fatal
(
"eviction timeout"
)
}
reserveSize
,
err
:=
db
.
reserveSize
.
Get
()
if
err
!=
nil
{
t
.
Fatal
(
err
)
}
if
reserveSize
==
evictTarget
{
break
}
}
gcTarget
:=
db
.
gcTarget
()
for
{
select
{
case
<-
testHookCollectGarbageChan
:
case
<-
time
.
After
(
10
*
time
.
Second
)
:
t
.
Error
(
"collect garbage timeout"
)
}
gcSize
,
err
:=
db
.
gcSize
.
Get
()
if
err
!=
nil
{
t
.
Fatal
(
err
)
}
if
gcSize
==
gcTarget
{
break
}
}
t
.
Run
(
"9/10 of the first chunks should be accessible"
,
func
(
t
*
testing
.
T
)
{
has
:=
0
for
_
,
a
:=
range
addrs
[
:
10
]
{
if
_
,
err
:=
db
.
Get
(
context
.
Background
(),
storage
.
ModeGetRequest
,
a
);
err
==
nil
{
has
++
}
}
if
has
!=
9
{
t
.
Errorf
(
"got %d chunks, want 9"
,
has
)
}
})
}
}
pkg/localstore/localstore.go
View file @
c6910097
...
@@ -106,13 +106,24 @@ type DB struct {
...
@@ -106,13 +106,24 @@ type DB struct {
// field that stores number of intems in gc index
// field that stores number of intems in gc index
gcSize
shed
.
Uint64Field
gcSize
shed
.
Uint64Field
// field that stores the size of the reserve
reserveSize
shed
.
Uint64Field
// garbage collection is triggered when gcSize exceeds
// garbage collection is triggered when gcSize exceeds
// the cacheCapacity value
// the cacheCapacity value
cacheCapacity
uint64
cacheCapacity
uint64
// the size of the reserve in chunks
reserveCapacity
uint64
unreserveFunc
func
(
postage
.
UnreserveIteratorFn
)
error
// triggers garbage collection event loop
// triggers garbage collection event loop
collectGarbageTrigger
chan
struct
{}
collectGarbageTrigger
chan
struct
{}
// triggers reserve eviction event loop
reserveEvictionTrigger
chan
struct
{}
// a buffered channel acting as a semaphore
// a buffered channel acting as a semaphore
// to limit the maximal number of goroutines
// to limit the maximal number of goroutines
// created by Getters to call updateGC function
// created by Getters to call updateGC function
...
@@ -142,7 +153,8 @@ type DB struct {
...
@@ -142,7 +153,8 @@ type DB struct {
// protect Close method from exiting before
// protect Close method from exiting before
// garbage collection and gc size write workers
// garbage collection and gc size write workers
// are done
// are done
collectGarbageWorkerDone
chan
struct
{}
collectGarbageWorkerDone
chan
struct
{}
reserveEvictionWorkerDone
chan
struct
{}
// wait for all subscriptions to finish before closing
// wait for all subscriptions to finish before closing
// underlaying leveldb to prevent possible panics from
// underlaying leveldb to prevent possible panics from
...
@@ -159,6 +171,11 @@ type Options struct {
...
@@ -159,6 +171,11 @@ type Options struct {
// Capacity is a limit that triggers garbage collection when
// Capacity is a limit that triggers garbage collection when
// number of items in gcIndex equals or exceeds it.
// number of items in gcIndex equals or exceeds it.
Capacity
uint64
Capacity
uint64
// ReserveCapacity is the capacity of the reserve.
ReserveCapacity
uint64
// UnreserveFunc is an iterator needed to facilitate reserve
// eviction once ReserveCapacity is reached.
UnreserveFunc
func
(
postage
.
UnreserveIteratorFn
)
error
// OpenFilesLimit defines the upper bound of open files that the
// OpenFilesLimit defines the upper bound of open files that the
// the localstore should maintain at any point of time. It is
// the localstore should maintain at any point of time. It is
// passed on to the shed constructor.
// passed on to the shed constructor.
...
@@ -184,24 +201,29 @@ func New(path string, baseKey []byte, ss storage.StateStorer, o *Options, logger
...
@@ -184,24 +201,29 @@ func New(path string, baseKey []byte, ss storage.StateStorer, o *Options, logger
if
o
==
nil
{
if
o
==
nil
{
// default options
// default options
o
=
&
Options
{
o
=
&
Options
{
Capacity
:
defaultCacheCapacity
,
Capacity
:
defaultCacheCapacity
,
ReserveCapacity
:
uint64
(
batchstore
.
Capacity
),
}
}
}
}
db
=
&
DB
{
db
=
&
DB
{
stateStore
:
ss
,
stateStore
:
ss
,
cacheCapacity
:
o
.
Capacity
,
cacheCapacity
:
o
.
Capacity
,
baseKey
:
baseKey
,
reserveCapacity
:
o
.
ReserveCapacity
,
tags
:
o
.
Tags
,
unreserveFunc
:
o
.
UnreserveFunc
,
baseKey
:
baseKey
,
tags
:
o
.
Tags
,
// channel collectGarbageTrigger
// channel collectGarbageTrigger
// needs to be buffered with the size of 1
// needs to be buffered with the size of 1
// to signal another event if it
// to signal another event if it
// is triggered during already running function
// is triggered during already running function
collectGarbageTrigger
:
make
(
chan
struct
{},
1
),
collectGarbageTrigger
:
make
(
chan
struct
{},
1
),
close
:
make
(
chan
struct
{}),
reserveEvictionTrigger
:
make
(
chan
struct
{},
1
),
collectGarbageWorkerDone
:
make
(
chan
struct
{}),
close
:
make
(
chan
struct
{}),
metrics
:
newMetrics
(),
collectGarbageWorkerDone
:
make
(
chan
struct
{}),
logger
:
logger
,
reserveEvictionWorkerDone
:
make
(
chan
struct
{}),
metrics
:
newMetrics
(),
logger
:
logger
,
}
}
if
db
.
cacheCapacity
==
0
{
if
db
.
cacheCapacity
==
0
{
db
.
cacheCapacity
=
defaultCacheCapacity
db
.
cacheCapacity
=
defaultCacheCapacity
...
@@ -264,6 +286,12 @@ func New(path string, baseKey []byte, ss storage.StateStorer, o *Options, logger
...
@@ -264,6 +286,12 @@ func New(path string, baseKey []byte, ss storage.StateStorer, o *Options, logger
return
nil
,
err
return
nil
,
err
}
}
// reserve size
db
.
reserveSize
,
err
=
db
.
shed
.
NewUint64Field
(
"reserve-size"
)
if
err
!=
nil
{
return
nil
,
err
}
// Index storing actual chunk address, data and bin id.
// Index storing actual chunk address, data and bin id.
headerSize
:=
16
+
postage
.
StampSize
headerSize
:=
16
+
postage
.
StampSize
db
.
retrievalDataIndex
,
err
=
db
.
shed
.
NewIndex
(
"Address->StoreTimestamp|BinID|BatchID|BatchIndex|Sig|Data"
,
shed
.
IndexFuncs
{
db
.
retrievalDataIndex
,
err
=
db
.
shed
.
NewIndex
(
"Address->StoreTimestamp|BinID|BatchID|BatchIndex|Sig|Data"
,
shed
.
IndexFuncs
{
...
@@ -523,6 +551,7 @@ func New(path string, baseKey []byte, ss storage.StateStorer, o *Options, logger
...
@@ -523,6 +551,7 @@ func New(path string, baseKey []byte, ss storage.StateStorer, o *Options, logger
// start garbage collection worker
// start garbage collection worker
go
db
.
collectGarbageWorker
()
go
db
.
collectGarbageWorker
()
go
db
.
reserveEvictionWorker
()
return
db
,
nil
return
db
,
nil
}
}
...
@@ -538,6 +567,7 @@ func (db *DB) Close() (err error) {
...
@@ -538,6 +567,7 @@ func (db *DB) Close() (err error) {
// wait for gc worker to
// wait for gc worker to
// return before closing the shed
// return before closing the shed
<-
db
.
collectGarbageWorkerDone
<-
db
.
collectGarbageWorkerDone
<-
db
.
reserveEvictionWorkerDone
close
(
done
)
close
(
done
)
}()
}()
select
{
select
{
...
...
pkg/localstore/metrics.go
View file @
c6910097
...
@@ -59,6 +59,11 @@ type metrics struct {
...
@@ -59,6 +59,11 @@ type metrics struct {
GCSize
prometheus
.
Gauge
GCSize
prometheus
.
Gauge
GCStoreTimeStamps
prometheus
.
Gauge
GCStoreTimeStamps
prometheus
.
Gauge
GCStoreAccessTimeStamps
prometheus
.
Gauge
GCStoreAccessTimeStamps
prometheus
.
Gauge
ReserveSize
prometheus
.
Gauge
EvictReserveCounter
prometheus
.
Counter
EvictReserveErrorCounter
prometheus
.
Counter
TotalTimeEvictReserve
prometheus
.
Counter
}
}
func
newMetrics
()
metrics
{
func
newMetrics
()
metrics
{
...
@@ -343,6 +348,30 @@ func newMetrics() metrics {
...
@@ -343,6 +348,30 @@ func newMetrics() metrics {
Name
:
"gc_access_time_stamp"
,
Name
:
"gc_access_time_stamp"
,
Help
:
"Access timestamp in Garbage collection iteration."
,
Help
:
"Access timestamp in Garbage collection iteration."
,
}),
}),
ReserveSize
:
prometheus
.
NewGauge
(
prometheus
.
GaugeOpts
{
Namespace
:
m
.
Namespace
,
Subsystem
:
subsystem
,
Name
:
"reserve_size"
,
Help
:
"Number of elements in reserve."
,
}),
EvictReserveCounter
:
prometheus
.
NewCounter
(
prometheus
.
CounterOpts
{
Namespace
:
m
.
Namespace
,
Subsystem
:
subsystem
,
Name
:
"evict_reserve_count"
,
Help
:
"number of times the evict reserve worker was invoked"
,
}),
EvictReserveErrorCounter
:
prometheus
.
NewCounter
(
prometheus
.
CounterOpts
{
Namespace
:
m
.
Namespace
,
Subsystem
:
subsystem
,
Name
:
"evict_reserve_err_count"
,
Help
:
"number of times evict reserve got an error"
,
}),
TotalTimeEvictReserve
:
prometheus
.
NewCounter
(
prometheus
.
CounterOpts
{
Namespace
:
m
.
Namespace
,
Subsystem
:
subsystem
,
Name
:
"evict_reserve_total_time"
,
Help
:
"total time spent evicting from reserve"
,
}),
}
}
}
}
...
...
pkg/localstore/mode_put.go
View file @
c6910097
...
@@ -219,6 +219,19 @@ func (db *DB) putRequest(batch *leveldb.Batch, binIDs map[uint8]uint64, item she
...
@@ -219,6 +219,19 @@ func (db *DB) putRequest(batch *leveldb.Batch, binIDs map[uint8]uint64, item she
if
err
!=
nil
{
if
err
!=
nil
{
return
false
,
0
,
err
return
false
,
0
,
err
}
}
radius
,
err
:=
db
.
postageRadiusIndex
.
Get
(
item
)
if
err
!=
nil
{
if
!
errors
.
Is
(
err
,
leveldb
.
ErrNotFound
)
{
return
false
,
0
,
err
}
}
else
{
if
db
.
po
(
swarm
.
NewAddress
(
item
.
Address
))
>=
radius
.
Radius
{
if
err
:=
db
.
incReserveSizeInBatch
(
batch
,
-
1
);
err
!=
nil
{
return
false
,
0
,
err
}
}
}
}
}
item
.
StoreTimestamp
=
now
()
item
.
StoreTimestamp
=
now
()
...
@@ -353,6 +366,18 @@ func (db *DB) putSync(batch *leveldb.Batch, binIDs map[uint8]uint64, item shed.I
...
@@ -353,6 +366,18 @@ func (db *DB) putSync(batch *leveldb.Batch, binIDs map[uint8]uint64, item shed.I
if
err
!=
nil
{
if
err
!=
nil
{
return
false
,
0
,
err
return
false
,
0
,
err
}
}
radius
,
err
:=
db
.
postageRadiusIndex
.
Get
(
item
)
if
err
!=
nil
{
if
!
errors
.
Is
(
err
,
leveldb
.
ErrNotFound
)
{
return
false
,
0
,
err
}
}
else
{
if
db
.
po
(
swarm
.
NewAddress
(
item
.
Address
))
>=
radius
.
Radius
{
if
err
:=
db
.
incReserveSizeInBatch
(
batch
,
-
1
);
err
!=
nil
{
return
false
,
0
,
err
}
}
}
}
}
item
.
StoreTimestamp
=
now
()
item
.
StoreTimestamp
=
now
()
...
@@ -393,15 +418,12 @@ func (db *DB) putSync(batch *leveldb.Batch, binIDs map[uint8]uint64, item shed.I
...
@@ -393,15 +418,12 @@ func (db *DB) putSync(batch *leveldb.Batch, binIDs map[uint8]uint64, item shed.I
// preserveOrCache is a helper function used to add chunks to either a pinned reserve or gc cache
// preserveOrCache is a helper function used to add chunks to either a pinned reserve or gc cache
// (the retrieval access index and the gc index)
// (the retrieval access index and the gc index)
func
(
db
*
DB
)
preserveOrCache
(
batch
*
leveldb
.
Batch
,
item
shed
.
Item
,
forcePin
,
forceCache
bool
)
(
gcSizeChange
int64
,
err
error
)
{
func
(
db
*
DB
)
preserveOrCache
(
batch
*
leveldb
.
Batch
,
item
shed
.
Item
,
forcePin
,
forceCache
bool
)
(
gcSizeChange
int64
,
err
error
)
{
// item needs to be populated with Radius
item2
,
err
:=
db
.
postageRadiusIndex
.
Get
(
item
)
if
err
!=
nil
{
// if there's an error, assume the chunk needs to be GCd
forceCache
=
true
}
else
{
item
.
Radius
=
item2
.
Radius
}
if
!
forceCache
&&
(
withinRadiusFn
(
db
,
item
)
||
forcePin
)
{
if
!
forceCache
&&
(
withinRadiusFn
(
db
,
item
)
||
forcePin
)
{
if
!
forcePin
{
if
err
:=
db
.
incReserveSizeInBatch
(
batch
,
1
);
err
!=
nil
{
return
0
,
err
}
}
return
db
.
setPin
(
batch
,
item
)
return
db
.
setPin
(
batch
,
item
)
}
}
...
...
pkg/localstore/mode_set.go
View file @
c6910097
...
@@ -19,7 +19,6 @@ package localstore
...
@@ -19,7 +19,6 @@ package localstore
import
(
import
(
"context"
"context"
"errors"
"errors"
"fmt"
"time"
"time"
"github.com/ethersphere/bee/pkg/shed"
"github.com/ethersphere/bee/pkg/shed"
...
@@ -193,12 +192,6 @@ func (db *DB) setSync(batch *leveldb.Batch, addr swarm.Address) (gcSizeChange in
...
@@ -193,12 +192,6 @@ func (db *DB) setSync(batch *leveldb.Batch, addr swarm.Address) (gcSizeChange in
}
else
{
}
else
{
item
.
AccessTimestamp
=
i1
.
AccessTimestamp
item
.
AccessTimestamp
=
i1
.
AccessTimestamp
}
}
// item needs to be populated with Radius
item2
,
err
:=
db
.
postageRadiusIndex
.
Get
(
item
)
if
err
!=
nil
{
return
0
,
fmt
.
Errorf
(
"postage chunks index: %w"
,
err
)
}
item
.
Radius
=
item2
.
Radius
return
db
.
preserveOrCache
(
batch
,
item
,
false
,
false
)
return
db
.
preserveOrCache
(
batch
,
item
,
false
,
false
)
}
}
...
...
pkg/localstore/mode_set_test.go
View file @
c6910097
...
@@ -71,7 +71,7 @@ func TestModeSetRemove_WithSync(t *testing.T) {
...
@@ -71,7 +71,7 @@ func TestModeSetRemove_WithSync(t *testing.T) {
var
chs
[]
swarm
.
Chunk
var
chs
[]
swarm
.
Chunk
for
i
:=
0
;
i
<
tc
.
count
;
i
++
{
for
i
:=
0
;
i
<
tc
.
count
;
i
++
{
ch
:=
generateTestRandomChunkAt
(
swarm
.
NewAddress
(
db
.
baseKey
),
2
)
.
WithBatch
(
2
,
3
,
2
,
false
)
ch
:=
generateTestRandomChunkAt
(
swarm
.
NewAddress
(
db
.
baseKey
),
2
)
.
WithBatch
(
2
,
3
,
2
,
false
)
err
:=
db
.
UnreserveBatch
(
ch
.
Stamp
()
.
BatchID
(),
2
)
_
,
err
:=
db
.
UnreserveBatch
(
ch
.
Stamp
()
.
BatchID
(),
2
)
if
err
!=
nil
{
if
err
!=
nil
{
t
.
Fatal
(
err
)
t
.
Fatal
(
err
)
}
}
...
...
pkg/localstore/reserve.go
View file @
c6910097
...
@@ -16,29 +16,26 @@ import (
...
@@ -16,29 +16,26 @@ import (
// UnreserveBatch atomically unpins chunks of a batch in proximity order upto and including po.
// UnreserveBatch atomically unpins chunks of a batch in proximity order upto and including po.
// Unpinning will result in all chunks with pincounter 0 to be put in the gc index
// Unpinning will result in all chunks with pincounter 0 to be put in the gc index
// so if a chunk was only pinned by the reserve, unreserving it will make it gc-able.
// so if a chunk was only pinned by the reserve, unreserving it will make it gc-able.
func
(
db
*
DB
)
UnreserveBatch
(
id
[]
byte
,
radius
uint8
)
error
{
func
(
db
*
DB
)
UnreserveBatch
(
id
[]
byte
,
radius
uint8
)
(
evicted
uint64
,
err
error
)
{
db
.
batchMu
.
Lock
()
defer
db
.
batchMu
.
Unlock
()
var
(
var
(
item
=
shed
.
Item
{
item
=
shed
.
Item
{
BatchID
:
id
,
BatchID
:
id
,
}
}
batch
=
new
(
leveldb
.
Batch
)
batch
=
new
(
leveldb
.
Batch
)
oldRadius
=
radius
)
)
i
,
err
:=
db
.
postageRadiusIndex
.
Get
(
item
)
i
,
err
:=
db
.
postageRadiusIndex
.
Get
(
item
)
if
err
!=
nil
{
if
err
!=
nil
{
if
!
errors
.
Is
(
err
,
leveldb
.
ErrNotFound
)
{
if
!
errors
.
Is
(
err
,
leveldb
.
ErrNotFound
)
{
return
err
return
0
,
err
}
item
.
Radius
=
radius
if
err
:=
db
.
postageRadiusIndex
.
PutInBatch
(
batch
,
item
);
err
!=
nil
{
return
err
}
}
return
db
.
shed
.
WriteBatch
(
batch
)
}
else
{
oldRadius
=
i
.
Radius
}
}
oldRadius
:=
i
.
Radius
var
(
var
gcSizeChange
int64
// number to add or subtract from gcSize
gcSizeChange
int64
// number to add or subtract from gcSize and reserveSize
reserveSizeChange
uint64
)
unpin
:=
func
(
item
shed
.
Item
)
(
stop
bool
,
err
error
)
{
unpin
:=
func
(
item
shed
.
Item
)
(
stop
bool
,
err
error
)
{
addr
:=
swarm
.
NewAddress
(
item
.
Address
)
addr
:=
swarm
.
NewAddress
(
item
.
Address
)
c
,
err
:=
db
.
setUnpin
(
batch
,
addr
)
c
,
err
:=
db
.
setUnpin
(
batch
,
addr
)
...
@@ -50,6 +47,13 @@ func (db *DB) UnreserveBatch(id []byte, radius uint8) error {
...
@@ -50,6 +47,13 @@ func (db *DB) UnreserveBatch(id []byte, radius uint8) error {
// a dirty shutdown
// a dirty shutdown
db
.
logger
.
Tracef
(
"unreserve set unpin chunk %s: %v"
,
addr
.
String
(),
err
)
db
.
logger
.
Tracef
(
"unreserve set unpin chunk %s: %v"
,
addr
.
String
(),
err
)
}
}
}
else
{
// we need to do this because a user might pin a chunk on top of
// the reserve pinning. when we unpin due to an unreserve call, then
// we should logically deduct the chunk anyway from the reserve size
// otherwise the reserve size leaks, since c returned from setUnpin
// will be zero.
reserveSizeChange
++
}
}
gcSizeChange
+=
c
gcSizeChange
+=
c
...
@@ -60,38 +64,60 @@ func (db *DB) UnreserveBatch(id []byte, radius uint8) error {
...
@@ -60,38 +64,60 @@ func (db *DB) UnreserveBatch(id []byte, radius uint8) error {
for
bin
:=
oldRadius
;
bin
<
radius
;
bin
++
{
for
bin
:=
oldRadius
;
bin
<
radius
;
bin
++
{
err
:=
db
.
postageChunksIndex
.
Iterate
(
unpin
,
&
shed
.
IterateOptions
{
Prefix
:
append
(
id
,
bin
)})
err
:=
db
.
postageChunksIndex
.
Iterate
(
unpin
,
&
shed
.
IterateOptions
{
Prefix
:
append
(
id
,
bin
)})
if
err
!=
nil
{
if
err
!=
nil
{
return
err
return
0
,
err
}
}
// adjust gcSize
// adjust gcSize
if
err
:=
db
.
incGCSizeInBatch
(
batch
,
gcSizeChange
);
err
!=
nil
{
if
err
:=
db
.
incGCSizeInBatch
(
batch
,
gcSizeChange
);
err
!=
nil
{
return
err
return
0
,
err
}
}
item
.
Radius
=
bin
item
.
Radius
=
bin
if
err
:=
db
.
postageRadiusIndex
.
PutInBatch
(
batch
,
item
);
err
!=
nil
{
if
err
:=
db
.
postageRadiusIndex
.
PutInBatch
(
batch
,
item
);
err
!=
nil
{
return
err
return
0
,
err
}
}
if
bin
==
swarm
.
MaxPO
{
if
bin
==
swarm
.
MaxPO
{
if
err
:=
db
.
postageRadiusIndex
.
DeleteInBatch
(
batch
,
item
);
err
!=
nil
{
if
err
:=
db
.
postageRadiusIndex
.
DeleteInBatch
(
batch
,
item
);
err
!=
nil
{
return
err
return
0
,
err
}
}
}
}
if
err
:=
db
.
shed
.
WriteBatch
(
batch
);
err
!=
nil
{
if
err
:=
db
.
shed
.
WriteBatch
(
batch
);
err
!=
nil
{
return
err
return
0
,
err
}
}
db
.
logger
.
Debugf
(
"unreserveBatch gc change %d reserve size change %d"
,
gcSizeChange
,
reserveSizeChange
)
batch
=
new
(
leveldb
.
Batch
)
batch
=
new
(
leveldb
.
Batch
)
gcSizeChange
=
0
gcSizeChange
=
0
}
}
if
radius
!=
swarm
.
MaxPO
+
1
{
item
.
Radius
=
radius
if
err
:=
db
.
postageRadiusIndex
.
PutInBatch
(
batch
,
item
);
err
!=
nil
{
return
0
,
err
}
if
err
:=
db
.
shed
.
WriteBatch
(
batch
);
err
!=
nil
{
return
0
,
err
}
}
gcSize
,
err
:=
db
.
gcSize
.
Get
()
gcSize
,
err
:=
db
.
gcSize
.
Get
()
if
err
!=
nil
&&
!
errors
.
Is
(
err
,
leveldb
.
ErrNotFound
)
{
if
err
!=
nil
&&
!
errors
.
Is
(
err
,
leveldb
.
ErrNotFound
)
{
return
err
return
0
,
err
}
if
reserveSizeChange
>
0
{
batch
=
new
(
leveldb
.
Batch
)
if
err
:=
db
.
incReserveSizeInBatch
(
batch
,
-
int64
(
reserveSizeChange
));
err
!=
nil
{
return
0
,
err
}
if
err
:=
db
.
shed
.
WriteBatch
(
batch
);
err
!=
nil
{
return
0
,
err
}
}
}
// trigger garbage collection if we reached the capacity
// trigger garbage collection if we reached the capacity
if
gcSize
>=
db
.
cacheCapacity
{
if
gcSize
>=
db
.
cacheCapacity
{
db
.
triggerGarbageCollection
()
db
.
triggerGarbageCollection
()
}
}
return
nil
return
reserveSizeChange
,
nil
}
}
func
withinRadius
(
db
*
DB
,
item
shed
.
Item
)
bool
{
func
withinRadius
(
db
*
DB
,
item
shed
.
Item
)
bool
{
...
...
pkg/localstore/reserve_test.go
View file @
c6910097
...
@@ -7,13 +7,14 @@ package localstore
...
@@ -7,13 +7,14 @@ package localstore
import
(
import
(
"context"
"context"
"errors"
"errors"
"sync"
"testing"
"testing"
"time"
"time"
"github.com/ethersphere/bee/pkg/postage"
"github.com/ethersphere/bee/pkg/shed"
"github.com/ethersphere/bee/pkg/shed"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/syndtr/goleveldb/leveldb"
)
)
// TestDB_ReserveGC_AllOutOfRadius tests that when all chunks fall outside of
// TestDB_ReserveGC_AllOutOfRadius tests that when all chunks fall outside of
...
@@ -30,21 +31,19 @@ func TestDB_ReserveGC_AllOutOfRadius(t *testing.T) {
...
@@ -30,21 +31,19 @@ func TestDB_ReserveGC_AllOutOfRadius(t *testing.T) {
case
<-
closed
:
case
<-
closed
:
}
}
}))
}))
t
.
Cleanup
(
setWithinRadiusFunc
(
func
(
*
DB
,
shed
.
Item
)
bool
{
return
false
}))
db
:=
newTestDB
(
t
,
&
Options
{
db
:=
newTestDB
(
t
,
&
Options
{
Capacity
:
100
,
Capacity
:
100
,
ReserveCapacity
:
200
,
})
})
closed
=
db
.
close
closed
=
db
.
close
addrs
:=
make
([]
swarm
.
Address
,
0
)
addrs
:=
make
([]
swarm
.
Address
,
0
)
for
i
:=
0
;
i
<
chunkCount
;
i
++
{
for
i
:=
0
;
i
<
chunkCount
;
i
++
{
ch
:=
generateTestRandomChunkAt
(
swarm
.
NewAddress
(
db
.
baseKey
),
2
)
.
WithBatch
(
3
,
3
,
2
,
false
)
ch
:=
generateTestRandomChunkAt
(
swarm
.
NewAddress
(
db
.
baseKey
),
2
)
.
WithBatch
(
5
,
3
,
2
,
false
)
err
:=
db
.
UnreserveBatch
(
ch
.
Stamp
()
.
BatchID
(),
4
)
_
,
err
:=
db
.
Put
(
context
.
Background
(),
storage
.
ModePutUpload
,
ch
)
if
err
!=
nil
{
t
.
Fatal
(
err
)
}
_
,
err
=
db
.
Put
(
context
.
Background
(),
storage
.
ModePutUpload
,
ch
)
if
err
!=
nil
{
if
err
!=
nil
{
t
.
Fatal
(
err
)
t
.
Fatal
(
err
)
}
}
...
@@ -79,7 +78,7 @@ func TestDB_ReserveGC_AllOutOfRadius(t *testing.T) {
...
@@ -79,7 +78,7 @@ func TestDB_ReserveGC_AllOutOfRadius(t *testing.T) {
// postageRadiusIndex gets removed only when the batches are called with evict on MaxPO+1
// postageRadiusIndex gets removed only when the batches are called with evict on MaxPO+1
// therefore, the expected index count here is larger than one would expect.
// therefore, the expected index count here is larger than one would expect.
t
.
Run
(
"postage radius index count"
,
newItemsCountTest
(
db
.
postageRadiusIndex
,
chunkCount
))
t
.
Run
(
"postage radius index count"
,
newItemsCountTest
(
db
.
postageRadiusIndex
,
0
))
t
.
Run
(
"gc index count"
,
newItemsCountTest
(
db
.
gcIndex
,
int
(
gcTarget
)))
t
.
Run
(
"gc index count"
,
newItemsCountTest
(
db
.
gcIndex
,
int
(
gcTarget
)))
...
@@ -124,9 +123,36 @@ func TestDB_ReserveGC_AllWithinRadius(t *testing.T) {
...
@@ -124,9 +123,36 @@ func TestDB_ReserveGC_AllWithinRadius(t *testing.T) {
case
<-
closed
:
case
<-
closed
:
}
}
}))
}))
var
(
batchIDs
[][]
byte
unreserveCalled
bool
mtx
sync
.
Mutex
)
unres
:=
func
(
f
postage
.
UnreserveIteratorFn
)
error
{
mtx
.
Lock
()
defer
mtx
.
Unlock
()
unreserveCalled
=
true
for
i
:=
0
;
i
<
len
(
batchIDs
);
i
++
{
// pop an element from batchIDs, call the Unreserve
item
:=
batchIDs
[
i
]
// here we mock the behavior of the batchstore
// that would call the localstore back with the
// batch IDs and the radiuses from the FIFO queue
stop
,
err
:=
f
(
item
,
4
)
if
err
!=
nil
{
return
err
}
if
stop
{
return
nil
}
}
return
nil
}
db
:=
newTestDB
(
t
,
&
Options
{
db
:=
newTestDB
(
t
,
&
Options
{
Capacity
:
100
,
Capacity
:
100
,
ReserveCapacity
:
151
,
UnreserveFunc
:
unres
,
})
})
closed
=
db
.
close
closed
=
db
.
close
...
@@ -134,11 +160,7 @@ func TestDB_ReserveGC_AllWithinRadius(t *testing.T) {
...
@@ -134,11 +160,7 @@ func TestDB_ReserveGC_AllWithinRadius(t *testing.T) {
for
i
:=
0
;
i
<
chunkCount
;
i
++
{
for
i
:=
0
;
i
<
chunkCount
;
i
++
{
ch
:=
generateTestRandomChunkAt
(
swarm
.
NewAddress
(
db
.
baseKey
),
2
)
.
WithBatch
(
2
,
3
,
2
,
false
)
ch
:=
generateTestRandomChunkAt
(
swarm
.
NewAddress
(
db
.
baseKey
),
2
)
.
WithBatch
(
2
,
3
,
2
,
false
)
err
:=
db
.
UnreserveBatch
(
ch
.
Stamp
()
.
BatchID
(),
2
)
_
,
err
:=
db
.
Put
(
context
.
Background
(),
storage
.
ModePutUpload
,
ch
)
if
err
!=
nil
{
t
.
Fatal
(
err
)
}
_
,
err
=
db
.
Put
(
context
.
Background
(),
storage
.
ModePutUpload
,
ch
)
if
err
!=
nil
{
if
err
!=
nil
{
t
.
Fatal
(
err
)
t
.
Fatal
(
err
)
}
}
...
@@ -146,8 +168,10 @@ func TestDB_ReserveGC_AllWithinRadius(t *testing.T) {
...
@@ -146,8 +168,10 @@ func TestDB_ReserveGC_AllWithinRadius(t *testing.T) {
if
err
!=
nil
{
if
err
!=
nil
{
t
.
Fatal
(
err
)
t
.
Fatal
(
err
)
}
}
mtx
.
Lock
()
addrs
=
append
(
addrs
,
ch
.
Address
())
addrs
=
append
(
addrs
,
ch
.
Address
())
batchIDs
=
append
(
batchIDs
,
ch
.
Stamp
()
.
BatchID
())
mtx
.
Unlock
()
}
}
select
{
select
{
...
@@ -160,7 +184,7 @@ func TestDB_ReserveGC_AllWithinRadius(t *testing.T) {
...
@@ -160,7 +184,7 @@ func TestDB_ReserveGC_AllWithinRadius(t *testing.T) {
t
.
Run
(
"postage chunks index count"
,
newItemsCountTest
(
db
.
postageChunksIndex
,
chunkCount
))
t
.
Run
(
"postage chunks index count"
,
newItemsCountTest
(
db
.
postageChunksIndex
,
chunkCount
))
t
.
Run
(
"postage radius index count"
,
newItemsCountTest
(
db
.
postageRadiusIndex
,
chunkCount
))
t
.
Run
(
"postage radius index count"
,
newItemsCountTest
(
db
.
postageRadiusIndex
,
0
))
t
.
Run
(
"gc index count"
,
newItemsCountTest
(
db
.
gcIndex
,
0
))
t
.
Run
(
"gc index count"
,
newItemsCountTest
(
db
.
gcIndex
,
0
))
...
@@ -174,37 +198,84 @@ func TestDB_ReserveGC_AllWithinRadius(t *testing.T) {
...
@@ -174,37 +198,84 @@ func TestDB_ReserveGC_AllWithinRadius(t *testing.T) {
}
}
}
}
})
})
mtx
.
Lock
()
defer
mtx
.
Unlock
()
if
unreserveCalled
{
t
.
Fatal
(
"unreserveCalled but should not have"
)
}
}
}
// TestDB_ReserveGC_Unreserve tests that after calling UnreserveBatch
// TestDB_ReserveGC_Unreserve tests that after calling UnreserveBatch
// with a certain radius change, the correct chunks get put into the
// with a certain radius change, the correct chunks get put into the
// GC index and eventually get garbage collected.
// GC index and eventually get garbage collected.
// batch radius, none get collected.
func
TestDB_ReserveGC_Unreserve
(
t
*
testing
.
T
)
{
func
TestDB_ReserveGC_Unreserve
(
t
*
testing
.
T
)
{
chunkCount
:=
1
5
0
chunkCount
:=
1
0
0
var
closed
chan
struct
{}
var
closed
chan
struct
{}
testHookCollectGarbageChan
:=
make
(
chan
uint64
)
testHookCollectGarbageChan
:=
make
(
chan
uint64
)
testHookEvictChan
:=
make
(
chan
uint64
)
t
.
Cleanup
(
setTestHookCollectGarbage
(
func
(
collectedCount
uint64
)
{
t
.
Cleanup
(
setTestHookCollectGarbage
(
func
(
collectedCount
uint64
)
{
select
{
select
{
case
testHookCollectGarbageChan
<-
collectedCount
:
case
testHookCollectGarbageChan
<-
collectedCount
:
case
<-
closed
:
case
<-
closed
:
}
}
}))
}))
t
.
Cleanup
(
setTestHookEviction
(
func
(
collectedCount
uint64
)
{
select
{
case
testHookEvictChan
<-
collectedCount
:
case
<-
closed
:
}
}))
var
(
mtx
sync
.
Mutex
batchIDs
[][]
byte
addrs
[]
swarm
.
Address
)
unres
:=
func
(
f
postage
.
UnreserveIteratorFn
)
error
{
mtx
.
Lock
()
defer
mtx
.
Unlock
()
for
i
:=
0
;
i
<
len
(
batchIDs
);
i
++
{
// pop an element from batchIDs, call the Unreserve
item
:=
batchIDs
[
i
]
// here we mock the behavior of the batchstore
// that would call the localstore back with the
// batch IDs and the radiuses from the FIFO queue
stop
,
err
:=
f
(
item
,
2
)
if
err
!=
nil
{
return
err
}
if
stop
{
return
nil
}
stop
,
err
=
f
(
item
,
4
)
if
err
!=
nil
{
return
err
}
if
stop
{
return
nil
}
}
batchIDs
=
nil
return
nil
}
db
:=
newTestDB
(
t
,
&
Options
{
db
:=
newTestDB
(
t
,
&
Options
{
Capacity
:
100
,
Capacity
:
100
,
// once reaching 150 in the reserve, we will evict
// half the size of the cache from the reserve, so 50 chunks
ReserveCapacity
:
100
,
UnreserveFunc
:
unres
,
})
})
closed
=
db
.
close
closed
=
db
.
close
// put the first chunkCount chunks within radius
// put chunksCount chunks within radius. this
// will cause reserve eviction of 10 chunks into
// the cache. gc of the cache is still not triggered
for
i
:=
0
;
i
<
chunkCount
;
i
++
{
for
i
:=
0
;
i
<
chunkCount
;
i
++
{
ch
:=
generateTestRandomChunkAt
(
swarm
.
NewAddress
(
db
.
baseKey
),
2
)
.
WithBatch
(
2
,
3
,
2
,
false
)
ch
:=
generateTestRandomChunkAt
(
swarm
.
NewAddress
(
db
.
baseKey
),
2
)
.
WithBatch
(
2
,
3
,
2
,
false
)
err
:=
db
.
UnreserveBatch
(
ch
.
Stamp
()
.
BatchID
(),
2
)
_
,
err
:=
db
.
Put
(
context
.
Background
(),
storage
.
ModePutUpload
,
ch
)
if
err
!=
nil
{
t
.
Fatal
(
err
)
}
_
,
err
=
db
.
Put
(
context
.
Background
(),
storage
.
ModePutUpload
,
ch
)
if
err
!=
nil
{
if
err
!=
nil
{
t
.
Fatal
(
err
)
t
.
Fatal
(
err
)
}
}
...
@@ -212,51 +283,62 @@ func TestDB_ReserveGC_Unreserve(t *testing.T) {
...
@@ -212,51 +283,62 @@ func TestDB_ReserveGC_Unreserve(t *testing.T) {
if
err
!=
nil
{
if
err
!=
nil
{
t
.
Fatal
(
err
)
t
.
Fatal
(
err
)
}
}
mtx
.
Lock
()
batchIDs
=
append
(
batchIDs
,
ch
.
Stamp
()
.
BatchID
())
addrs
=
append
(
addrs
,
ch
.
Address
())
mtx
.
Unlock
()
}
}
var
po4Chs
[]
swarm
.
Chunk
// wait for the first eviction to finish, otherwise
for
i
:=
0
;
i
<
chunkCount
;
i
++
{
// we collect some of the next chunks that get added
ch
:=
generateTestRandomChunkAt
(
swarm
.
NewAddress
(
db
.
baseKey
),
4
)
.
WithBatch
(
2
,
3
,
2
,
false
)
// which results in inconsistencies
err
:=
db
.
UnreserveBatch
(
ch
.
Stamp
()
.
BatchID
(),
2
)
evictTarget
:=
db
.
reserveEvictionTarget
()
if
err
!=
nil
{
t
.
Fatal
(
err
)
for
{
select
{
case
<-
testHookEvictChan
:
case
<-
time
.
After
(
10
*
time
.
Second
)
:
t
.
Fatal
(
"collect garbage timeout"
)
}
}
_
,
err
=
db
.
Put
(
context
.
Background
(),
storage
.
ModePutUpload
,
ch
)
resSize
,
err
:=
db
.
reserveSize
.
Get
(
)
if
err
!=
nil
{
if
err
!=
nil
{
t
.
Fatal
(
err
)
t
.
Fatal
(
err
)
}
}
err
=
db
.
Set
(
context
.
Background
(),
storage
.
ModeSetSync
,
ch
.
Address
())
if
resSize
==
evictTarget
{
if
err
!=
nil
{
break
t
.
Fatal
(
err
)
}
}
po4Chs
=
append
(
po4Chs
,
ch
)
}
}
var
gcChs
[]
swarm
.
Chunk
// insert another 90, this will trigger gc
for
i
:=
0
;
i
<
100
;
i
++
{
for
i
:=
0
;
i
<
90
;
i
++
{
gcch
:=
generateTestRandomChunkAt
(
swarm
.
NewAddress
(
db
.
baseKey
),
2
)
.
WithBatch
(
2
,
3
,
2
,
false
)
ch
:=
generateTestRandomChunkAt
(
swarm
.
NewAddress
(
db
.
baseKey
),
2
)
.
WithBatch
(
2
,
3
,
2
,
false
)
err
:=
db
.
UnreserveBatch
(
gcch
.
Stamp
()
.
BatchID
(),
2
)
_
,
err
:=
db
.
Put
(
context
.
Background
(),
storage
.
ModePutUpload
,
ch
)
if
err
!=
nil
{
t
.
Fatal
(
err
)
}
_
,
err
=
db
.
Put
(
context
.
Background
(),
storage
.
ModePutUpload
,
gcch
)
if
err
!=
nil
{
if
err
!=
nil
{
t
.
Fatal
(
err
)
t
.
Fatal
(
err
)
}
}
err
=
db
.
Set
(
context
.
Background
(),
storage
.
ModeSetSync
,
gc
ch
.
Address
())
err
=
db
.
Set
(
context
.
Background
(),
storage
.
ModeSetSync
,
ch
.
Address
())
if
err
!=
nil
{
if
err
!=
nil
{
t
.
Fatal
(
err
)
t
.
Fatal
(
err
)
}
}
gcChs
=
append
(
gcChs
,
gcch
)
mtx
.
Lock
()
batchIDs
=
append
(
batchIDs
,
ch
.
Stamp
()
.
BatchID
())
addrs
=
append
(
addrs
,
ch
.
Address
())
mtx
.
Unlock
()
}
}
// radius increases from 2 to 3, chunk is in PO 2, therefore it should be
for
{
// GCd
select
{
for
_
,
ch
:=
range
gcChs
{
case
<-
testHookEvictChan
:
err
:=
db
.
UnreserveBatch
(
ch
.
Stamp
()
.
BatchID
(),
3
)
case
<-
time
.
After
(
10
*
time
.
Second
)
:
t
.
Fatal
(
"collect garbage timeout"
)
}
resSize
,
err
:=
db
.
reserveSize
.
Get
()
if
err
!=
nil
{
if
err
!=
nil
{
t
.
Fatal
(
err
)
t
.
Fatal
(
err
)
}
}
if
resSize
==
evictTarget
{
break
}
}
}
gcTarget
:=
db
.
gcTarget
()
gcTarget
:=
db
.
gcTarget
()
...
@@ -275,21 +357,21 @@ func TestDB_ReserveGC_Unreserve(t *testing.T) {
...
@@ -275,21 +357,21 @@ func TestDB_ReserveGC_Unreserve(t *testing.T) {
break
break
}
}
}
}
t
.
Run
(
"pull index count"
,
newItemsCountTest
(
db
.
pullIndex
,
chunkCount
*
2
+
9
0
))
t
.
Run
(
"pull index count"
,
newItemsCountTest
(
db
.
pullIndex
,
chunkCount
+
90
-
1
0
))
t
.
Run
(
"postage chunks index count"
,
newItemsCountTest
(
db
.
postageChunksIndex
,
chunkCount
*
2
+
9
0
))
t
.
Run
(
"postage chunks index count"
,
newItemsCountTest
(
db
.
postageChunksIndex
,
chunkCount
+
90
-
1
0
))
// postageRadiusIndex gets removed only when the batches are called with evict on MaxPO+1
// postageRadiusIndex gets removed only when the batches are called with evict on MaxPO+1
// therefore, the expected index count here is larger than one would expect.
// therefore, the expected index count here is larger than one would expect.
t
.
Run
(
"postage radius index count"
,
newItemsCountTest
(
db
.
postageRadiusIndex
,
chunkCount
*
2
+
100
))
t
.
Run
(
"postage radius index count"
,
newItemsCountTest
(
db
.
postageRadiusIndex
,
chunkCount
))
t
.
Run
(
"gc index count"
,
newItemsCountTest
(
db
.
gcIndex
,
90
))
t
.
Run
(
"gc index count"
,
newItemsCountTest
(
db
.
gcIndex
,
90
))
t
.
Run
(
"gc size"
,
newIndexGCSizeTest
(
db
))
t
.
Run
(
"gc size"
,
newIndexGCSizeTest
(
db
))
t
.
Run
(
"first ten unreserved chunks should not be accessible"
,
func
(
t
*
testing
.
T
)
{
t
.
Run
(
"first ten unreserved chunks should not be accessible"
,
func
(
t
*
testing
.
T
)
{
for
_
,
ch
:=
range
gcCh
s
[
:
10
]
{
for
_
,
a
:=
range
addr
s
[
:
10
]
{
_
,
err
:=
db
.
Get
(
context
.
Background
(),
storage
.
ModeGetRequest
,
ch
.
Address
()
)
_
,
err
:=
db
.
Get
(
context
.
Background
(),
storage
.
ModeGetRequest
,
a
)
if
err
==
nil
{
if
err
==
nil
{
t
.
Error
(
"got no error, want NotFound"
)
t
.
Error
(
"got no error, want NotFound"
)
}
}
...
@@ -297,17 +379,8 @@ func TestDB_ReserveGC_Unreserve(t *testing.T) {
...
@@ -297,17 +379,8 @@ func TestDB_ReserveGC_Unreserve(t *testing.T) {
})
})
t
.
Run
(
"the rest should be accessible"
,
func
(
t
*
testing
.
T
)
{
t
.
Run
(
"the rest should be accessible"
,
func
(
t
*
testing
.
T
)
{
for
_
,
ch
:=
range
gcChs
[
10
:
]
{
for
_
,
a
:=
range
addrs
[
10
:
]
{
_
,
err
:=
db
.
Get
(
context
.
Background
(),
storage
.
ModeGetRequest
,
ch
.
Address
())
_
,
err
:=
db
.
Get
(
context
.
Background
(),
storage
.
ModeGetRequest
,
a
)
if
err
!=
nil
{
t
.
Errorf
(
"got error %v but want none"
,
err
)
}
}
})
t
.
Run
(
"po 4 chunks accessible"
,
func
(
t
*
testing
.
T
)
{
for
_
,
ch
:=
range
po4Chs
{
_
,
err
:=
db
.
Get
(
context
.
Background
(),
storage
.
ModeGetRequest
,
ch
.
Address
())
if
err
!=
nil
{
if
err
!=
nil
{
t
.
Errorf
(
"got error %v but want none"
,
err
)
t
.
Errorf
(
"got error %v but want none"
,
err
)
}
}
...
@@ -318,30 +391,79 @@ func TestDB_ReserveGC_Unreserve(t *testing.T) {
...
@@ -318,30 +391,79 @@ func TestDB_ReserveGC_Unreserve(t *testing.T) {
// TestDB_ReserveGC_EvictMaxPO tests that when unreserving a batch at
// TestDB_ReserveGC_EvictMaxPO tests that when unreserving a batch at
// swarm.MaxPO+1 results in the correct behaviour.
// swarm.MaxPO+1 results in the correct behaviour.
func
TestDB_ReserveGC_EvictMaxPO
(
t
*
testing
.
T
)
{
func
TestDB_ReserveGC_EvictMaxPO
(
t
*
testing
.
T
)
{
chunkCount
:=
150
var
closed
chan
struct
{}
var
(
testHookCollectGarbageChan
:=
make
(
chan
uint64
)
mtx
sync
.
Mutex
batchIDs
[][]
byte
addrs
[]
swarm
.
Address
chunkCount
=
100
testHookCollectGarbageChan
=
make
(
chan
uint64
)
testHookEvictChan
=
make
(
chan
uint64
)
closed
chan
struct
{}
)
t
.
Cleanup
(
setTestHookCollectGarbage
(
func
(
collectedCount
uint64
)
{
t
.
Cleanup
(
setTestHookCollectGarbage
(
func
(
collectedCount
uint64
)
{
if
collectedCount
==
0
{
return
}
select
{
select
{
case
testHookCollectGarbageChan
<-
collectedCount
:
case
testHookCollectGarbageChan
<-
collectedCount
:
case
<-
closed
:
case
<-
closed
:
}
}
}))
}))
t
.
Cleanup
(
setTestHookEviction
(
func
(
collectedCount
uint64
)
{
if
collectedCount
==
0
{
return
}
select
{
case
testHookEvictChan
<-
collectedCount
:
case
<-
closed
:
}
}))
unres
:=
func
(
f
postage
.
UnreserveIteratorFn
)
error
{
mtx
.
Lock
()
defer
mtx
.
Unlock
()
i
:=
0
defer
func
()
{
batchIDs
=
batchIDs
[
i
:
]
}()
for
i
=
0
;
i
<
len
(
batchIDs
);
i
++
{
// pop an element from batchIDs, call the Unreserve
item
:=
batchIDs
[
i
]
// here we mock the behavior of the batchstore
// that would call the localstore back with the
// batch IDs and the radiuses from the FIFO queue
stop
,
err
:=
f
(
item
,
2
)
if
err
!=
nil
{
return
err
}
if
stop
{
return
nil
}
stop
,
err
=
f
(
item
,
swarm
.
MaxPO
+
1
)
if
err
!=
nil
{
return
err
}
if
stop
{
return
nil
}
}
return
nil
}
db
:=
newTestDB
(
t
,
&
Options
{
db
:=
newTestDB
(
t
,
&
Options
{
Capacity
:
100
,
Capacity
:
100
,
// once reaching 100 in the reserve, we will evict
// half the size of the cache from the reserve, so 50 chunks
ReserveCapacity
:
100
,
UnreserveFunc
:
unres
,
})
})
closed
=
db
.
close
closed
=
db
.
close
// put the first chunkCount chunks within radius
// put the first chunkCount chunks within radius
for
i
:=
0
;
i
<
chunkCount
;
i
++
{
for
i
:=
0
;
i
<
chunkCount
;
i
++
{
ch
:=
generateTestRandomChunkAt
(
swarm
.
NewAddress
(
db
.
baseKey
),
2
)
.
WithBatch
(
2
,
3
,
2
,
false
)
ch
:=
generateTestRandomChunkAt
(
swarm
.
NewAddress
(
db
.
baseKey
),
2
)
.
WithBatch
(
2
,
3
,
2
,
false
)
err
:=
db
.
UnreserveBatch
(
ch
.
Stamp
()
.
BatchID
(),
2
)
_
,
err
:=
db
.
Put
(
context
.
Background
(),
storage
.
ModePutUpload
,
ch
)
if
err
!=
nil
{
t
.
Fatal
(
err
)
}
_
,
err
=
db
.
Put
(
context
.
Background
(),
storage
.
ModePutUpload
,
ch
)
if
err
!=
nil
{
if
err
!=
nil
{
t
.
Fatal
(
err
)
t
.
Fatal
(
err
)
}
}
...
@@ -349,31 +471,69 @@ func TestDB_ReserveGC_EvictMaxPO(t *testing.T) {
...
@@ -349,31 +471,69 @@ func TestDB_ReserveGC_EvictMaxPO(t *testing.T) {
if
err
!=
nil
{
if
err
!=
nil
{
t
.
Fatal
(
err
)
t
.
Fatal
(
err
)
}
}
mtx
.
Lock
()
batchIDs
=
append
(
batchIDs
,
ch
.
Stamp
()
.
BatchID
())
addrs
=
append
(
addrs
,
ch
.
Address
())
mtx
.
Unlock
()
}
}
var
gcChs
[]
swarm
.
Chunk
// wait for the first eviction to finish, otherwise
for
i
:=
0
;
i
<
100
;
i
++
{
// we collect some of the next chunks that get added
gcch
:=
generateTestRandomChunkAt
(
swarm
.
NewAddress
(
db
.
baseKey
),
2
)
.
WithBatch
(
2
,
3
,
2
,
false
)
// which results in inconsistencies
err
:=
db
.
UnreserveBatch
(
gcch
.
Stamp
()
.
BatchID
(),
2
)
evictTarget
:=
db
.
reserveEvictionTarget
()
for
{
select
{
case
<-
testHookEvictChan
:
case
<-
time
.
After
(
10
*
time
.
Second
)
:
t
.
Fatal
(
"collect garbage timeout"
)
}
resSize
,
err
:=
db
.
reserveSize
.
Get
()
if
err
!=
nil
{
if
err
!=
nil
{
t
.
Fatal
(
err
)
t
.
Fatal
(
err
)
}
}
_
,
err
=
db
.
Put
(
context
.
Background
(),
storage
.
ModePutUpload
,
gcch
)
if
resSize
==
evictTarget
{
break
}
}
// this is zero because we call eviction with max PO on the first 10 batches
// but the next 90 batches were not called with unreserve yet. this means that
// although the next 90 chunks exist in the store, their according batch radius
// still isn't persisted, since the localstore still is not aware of their
// batch radiuses. the same goes for the check after the gc actually evicts the
// ten chunks out of the cache (we still expect a zero for postage radius for the
// same reason)
t
.
Run
(
"postage radius index count"
,
newItemsCountTest
(
db
.
postageRadiusIndex
,
0
))
for
i
:=
0
;
i
<
90
;
i
++
{
ch
:=
generateTestRandomChunkAt
(
swarm
.
NewAddress
(
db
.
baseKey
),
2
)
.
WithBatch
(
2
,
3
,
2
,
false
)
_
,
err
:=
db
.
Put
(
context
.
Background
(),
storage
.
ModePutUpload
,
ch
)
if
err
!=
nil
{
if
err
!=
nil
{
t
.
Fatal
(
err
)
t
.
Fatal
(
err
)
}
}
err
=
db
.
Set
(
context
.
Background
(),
storage
.
ModeSetSync
,
gc
ch
.
Address
())
err
=
db
.
Set
(
context
.
Background
(),
storage
.
ModeSetSync
,
ch
.
Address
())
if
err
!=
nil
{
if
err
!=
nil
{
t
.
Fatal
(
err
)
t
.
Fatal
(
err
)
}
}
gcChs
=
append
(
gcChs
,
gcch
)
mtx
.
Lock
()
batchIDs
=
append
(
batchIDs
,
ch
.
Stamp
()
.
BatchID
())
addrs
=
append
(
addrs
,
ch
.
Address
())
mtx
.
Unlock
()
}
}
for
{
for
_
,
ch
:=
range
gcChs
{
select
{
err
:=
db
.
UnreserveBatch
(
ch
.
Stamp
()
.
BatchID
(),
swarm
.
MaxPO
+
1
)
case
<-
testHookEvictChan
:
case
<-
time
.
After
(
10
*
time
.
Second
)
:
t
.
Fatal
(
"collect garbage timeout"
)
}
resSize
,
err
:=
db
.
reserveSize
.
Get
()
if
err
!=
nil
{
if
err
!=
nil
{
t
.
Fatal
(
err
)
t
.
Fatal
(
err
)
}
}
if
resSize
==
evictTarget
{
break
}
}
}
gcTarget
:=
db
.
gcTarget
()
gcTarget
:=
db
.
gcTarget
()
...
@@ -392,19 +552,19 @@ func TestDB_ReserveGC_EvictMaxPO(t *testing.T) {
...
@@ -392,19 +552,19 @@ func TestDB_ReserveGC_EvictMaxPO(t *testing.T) {
break
break
}
}
}
}
t
.
Run
(
"pull index count"
,
newItemsCountTest
(
db
.
pullIndex
,
chunkCount
+
90
))
t
.
Run
(
"pull index count"
,
newItemsCountTest
(
db
.
pullIndex
,
chunkCount
+
90
-
10
))
t
.
Run
(
"postage chunks index count"
,
newItemsCountTest
(
db
.
postageChunksIndex
,
chunkCount
+
90
))
t
.
Run
(
"postage chunks index count"
,
newItemsCountTest
(
db
.
postageChunksIndex
,
chunkCount
+
90
-
10
))
t
.
Run
(
"postage radius index count"
,
newItemsCountTest
(
db
.
postageRadiusIndex
,
chunkCount
))
t
.
Run
(
"postage radius index count"
,
newItemsCountTest
(
db
.
postageRadiusIndex
,
0
))
t
.
Run
(
"gc index count"
,
newItemsCountTest
(
db
.
gcIndex
,
90
))
t
.
Run
(
"gc index count"
,
newItemsCountTest
(
db
.
gcIndex
,
90
))
t
.
Run
(
"gc size"
,
newIndexGCSizeTest
(
db
))
t
.
Run
(
"gc size"
,
newIndexGCSizeTest
(
db
))
t
.
Run
(
"first ten unreserved chunks should not be accessible"
,
func
(
t
*
testing
.
T
)
{
t
.
Run
(
"first ten unreserved chunks should not be accessible"
,
func
(
t
*
testing
.
T
)
{
for
_
,
ch
:=
range
gcCh
s
[
:
10
]
{
for
_
,
a
:=
range
addr
s
[
:
10
]
{
_
,
err
:=
db
.
Get
(
context
.
Background
(),
storage
.
ModeGetRequest
,
ch
.
Address
()
)
_
,
err
:=
db
.
Get
(
context
.
Background
(),
storage
.
ModeGetRequest
,
a
)
if
err
==
nil
{
if
err
==
nil
{
t
.
Error
(
"got no error, want NotFound"
)
t
.
Error
(
"got no error, want NotFound"
)
}
}
...
@@ -412,19 +572,11 @@ func TestDB_ReserveGC_EvictMaxPO(t *testing.T) {
...
@@ -412,19 +572,11 @@ func TestDB_ReserveGC_EvictMaxPO(t *testing.T) {
})
})
t
.
Run
(
"the rest should be accessible"
,
func
(
t
*
testing
.
T
)
{
t
.
Run
(
"the rest should be accessible"
,
func
(
t
*
testing
.
T
)
{
for
_
,
ch
:=
range
gcCh
s
[
10
:
]
{
for
_
,
a
:=
range
addr
s
[
10
:
]
{
_
,
err
:=
db
.
Get
(
context
.
Background
(),
storage
.
ModeGetRequest
,
ch
.
Address
()
)
_
,
err
:=
db
.
Get
(
context
.
Background
(),
storage
.
ModeGetRequest
,
a
)
if
err
!=
nil
{
if
err
!=
nil
{
t
.
Errorf
(
"got error %v but want none"
,
err
)
t
.
Errorf
(
"got error %v but want none"
,
err
)
}
}
}
}
})
})
t
.
Run
(
"batches for the all evicted batches should be evicted"
,
func
(
t
*
testing
.
T
)
{
for
_
,
ch
:=
range
gcChs
{
item
:=
shed
.
Item
{
BatchID
:
ch
.
Stamp
()
.
BatchID
()}
if
_
,
err
:=
db
.
postageRadiusIndex
.
Get
(
item
);
!
errors
.
Is
(
err
,
leveldb
.
ErrNotFound
)
{
t
.
Fatalf
(
"wanted ErrNotFound but got %v"
,
err
)
}
}
})
}
}
pkg/node/node.go
View file @
c6910097
...
@@ -64,6 +64,7 @@ import (
...
@@ -64,6 +64,7 @@ import (
"github.com/ethersphere/bee/pkg/shed"
"github.com/ethersphere/bee/pkg/shed"
"github.com/ethersphere/bee/pkg/steward"
"github.com/ethersphere/bee/pkg/steward"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/bee/pkg/tags"
"github.com/ethersphere/bee/pkg/tags"
"github.com/ethersphere/bee/pkg/topology"
"github.com/ethersphere/bee/pkg/topology"
"github.com/ethersphere/bee/pkg/topology/kademlia"
"github.com/ethersphere/bee/pkg/topology/kademlia"
...
@@ -349,6 +350,17 @@ func NewBee(addr string, publicKey *ecdsa.PublicKey, signer crypto.Signer, netwo
...
@@ -349,6 +350,17 @@ func NewBee(addr string, publicKey *ecdsa.PublicKey, signer crypto.Signer, netwo
b
.
p2pService
=
p2ps
b
.
p2pService
=
p2ps
b
.
p2pHalter
=
p2ps
b
.
p2pHalter
=
p2ps
var
unreserveFn
func
([]
byte
,
uint8
)
(
uint64
,
error
)
var
evictFn
=
func
(
b
[]
byte
)
error
{
_
,
err
:=
unreserveFn
(
b
,
swarm
.
MaxPO
+
1
)
return
err
}
batchStore
,
err
:=
batchstore
.
New
(
stateStore
,
evictFn
,
logger
)
if
err
!=
nil
{
return
nil
,
fmt
.
Errorf
(
"batchstore: %w"
,
err
)
}
// localstore depends on batchstore
// localstore depends on batchstore
var
path
string
var
path
string
...
@@ -358,6 +370,8 @@ func NewBee(addr string, publicKey *ecdsa.PublicKey, signer crypto.Signer, netwo
...
@@ -358,6 +370,8 @@ func NewBee(addr string, publicKey *ecdsa.PublicKey, signer crypto.Signer, netwo
}
}
lo
:=
&
localstore
.
Options
{
lo
:=
&
localstore
.
Options
{
Capacity
:
o
.
CacheCapacity
,
Capacity
:
o
.
CacheCapacity
,
ReserveCapacity
:
uint64
(
batchstore
.
Capacity
),
UnreserveFunc
:
batchStore
.
Unreserve
,
OpenFilesLimit
:
o
.
DBOpenFilesLimit
,
OpenFilesLimit
:
o
.
DBOpenFilesLimit
,
BlockCacheCapacity
:
o
.
DBBlockCacheCapacity
,
BlockCacheCapacity
:
o
.
DBBlockCacheCapacity
,
WriteBufferSize
:
o
.
DBWriteBufferSize
,
WriteBufferSize
:
o
.
DBWriteBufferSize
,
...
@@ -369,11 +383,8 @@ func NewBee(addr string, publicKey *ecdsa.PublicKey, signer crypto.Signer, netwo
...
@@ -369,11 +383,8 @@ func NewBee(addr string, publicKey *ecdsa.PublicKey, signer crypto.Signer, netwo
return
nil
,
fmt
.
Errorf
(
"localstore: %w"
,
err
)
return
nil
,
fmt
.
Errorf
(
"localstore: %w"
,
err
)
}
}
b
.
localstoreCloser
=
storer
b
.
localstoreCloser
=
storer
unreserveFn
=
storer
.
UnreserveBatch
batchStore
,
err
:=
batchstore
.
New
(
stateStore
,
storer
.
UnreserveBatch
)
if
err
!=
nil
{
return
nil
,
fmt
.
Errorf
(
"batchstore: %w"
,
err
)
}
validStamp
:=
postage
.
ValidStamp
(
batchStore
)
validStamp
:=
postage
.
ValidStamp
(
batchStore
)
post
,
err
:=
postage
.
NewService
(
stateStore
,
batchStore
,
chainID
)
post
,
err
:=
postage
.
NewService
(
stateStore
,
batchStore
,
chainID
)
if
err
!=
nil
{
if
err
!=
nil
{
...
...
pkg/postage/batchstore/export_test.go
View file @
c6910097
...
@@ -6,7 +6,6 @@ package batchstore
...
@@ -6,7 +6,6 @@ package batchstore
import
(
import
(
"fmt"
"fmt"
"math/big"
"github.com/ethersphere/bee/pkg/postage"
"github.com/ethersphere/bee/pkg/postage"
)
)
...
@@ -20,24 +19,11 @@ var BatchKey = batchKey
...
@@ -20,24 +19,11 @@ var BatchKey = batchKey
// power of 2 function
// power of 2 function
var
Exp2
=
exp2
var
Exp2
=
exp2
// iterates through all batches
func
IterateAll
(
bs
postage
.
Storer
,
f
func
(
b
*
postage
.
Batch
)
(
bool
,
error
))
error
{
s
:=
bs
.
(
*
store
)
return
s
.
store
.
Iterate
(
batchKeyPrefix
,
func
(
key
[]
byte
,
_
[]
byte
)
(
bool
,
error
)
{
b
,
err
:=
s
.
Get
(
key
[
len
(
key
)
-
32
:
])
if
err
!=
nil
{
return
true
,
err
}
return
f
(
b
)
})
}
// GetReserve extracts the inner limit and depth of reserve
func
GetReserve
(
si
postage
.
Storer
)
(
*
big
.
Int
,
uint8
)
{
s
,
_
:=
si
.
(
*
store
)
return
s
.
rs
.
Inner
,
s
.
rs
.
Radius
}
func
(
s
*
store
)
String
()
string
{
func
(
s
*
store
)
String
()
string
{
return
fmt
.
Sprintf
(
"inner=%d,outer=%d"
,
s
.
rs
.
Inner
.
Uint64
(),
s
.
rs
.
Outer
.
Uint64
())
return
fmt
.
Sprintf
(
"inner=%d,outer=%d"
,
s
.
rs
.
Inner
.
Uint64
(),
s
.
rs
.
Outer
.
Uint64
())
}
}
func
SetUnreserveFunc
(
s
postage
.
Storer
,
fn
func
([]
byte
,
uint8
)
error
)
{
st
:=
s
.
(
*
store
)
st
.
unreserveFn
=
fn
}
pkg/postage/batchstore/mock/store.go
View file @
c6910097
...
@@ -140,7 +140,9 @@ func (bs *BatchStore) GetReserveState() *postage.ReserveState {
...
@@ -140,7 +140,9 @@ func (bs *BatchStore) GetReserveState() *postage.ReserveState {
}
}
return
rs
return
rs
}
}
func
(
bs
*
BatchStore
)
Unreserve
(
_
postage
.
UnreserveIteratorFn
)
error
{
panic
(
"not implemented"
)
}
func
(
bs
*
BatchStore
)
SetRadiusSetter
(
r
postage
.
RadiusSetter
)
{
func
(
bs
*
BatchStore
)
SetRadiusSetter
(
r
postage
.
RadiusSetter
)
{
panic
(
"not implemented"
)
panic
(
"not implemented"
)
}
}
...
...
pkg/postage/batchstore/reserve.go
View file @
c6910097
...
@@ -28,11 +28,14 @@ package batchstore
...
@@ -28,11 +28,14 @@ package batchstore
import
(
import
(
"bytes"
"bytes"
"encoding/binary"
"errors"
"errors"
"fmt"
"fmt"
"math/big"
"math/big"
"strings"
"github.com/ethersphere/bee/pkg/postage"
"github.com/ethersphere/bee/pkg/postage"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/bee/pkg/swarm"
)
)
...
@@ -54,6 +57,10 @@ type reserveState struct {
...
@@ -54,6 +57,10 @@ type reserveState struct {
// it defines the proximity order of chunks which we
// it defines the proximity order of chunks which we
// would like to guarantee that all chunks are stored
// would like to guarantee that all chunks are stored
Radius
uint8
`json:"radius"`
Radius
uint8
`json:"radius"`
// StorageRadius is the de-facto storage radius tracked
// by monitoring the events communicated to the localstore
// reserve eviction worker.
StorageRadius
uint8
`json:"storageRadius"`
// Available capacity of the reserve which can still be used.
// Available capacity of the reserve which can still be used.
Available
int64
`json:"available"`
Available
int64
`json:"available"`
Outer
*
big
.
Int
`json:"outer"`
// lower value limit for outer layer = the further half of chunks
Outer
*
big
.
Int
`json:"outer"`
// lower value limit for outer layer = the further half of chunks
...
@@ -61,9 +68,63 @@ type reserveState struct {
...
@@ -61,9 +68,63 @@ type reserveState struct {
}
}
// unreserve is called when the batchstore decides not to reserve a batch on a PO
// unreserve is called when the batchstore decides not to reserve a batch on a PO
// i.e. chunk of the batch in bins [0 upto PO] (closed interval) are unreserved
// i.e. chunk of the batch in bins [0 upto PO] (closed interval) are unreserved.
func
(
s
*
store
)
unreserve
(
b
*
postage
.
Batch
,
radius
uint8
)
error
{
// this adds the batch at the mentioned PO to the unreserve fifo queue, that can be
return
s
.
unreserveFunc
(
b
.
ID
,
radius
)
// dequeued by the localstore once the storage fills up.
func
(
s
*
store
)
unreserve
(
b
[]
byte
,
radius
uint8
)
error
{
c
:=
s
.
queueIdx
c
++
v
:=
make
([]
byte
,
8
)
binary
.
BigEndian
.
PutUint64
(
v
,
c
)
i
:=
&
UnreserveItem
{
BatchID
:
b
,
Radius
:
radius
}
if
err
:=
s
.
store
.
Put
(
fmt
.
Sprintf
(
"%s_%s"
,
unreserveQueueKey
,
string
(
v
)),
i
);
err
!=
nil
{
return
err
}
if
err
:=
s
.
putQueueCardinality
(
c
);
err
!=
nil
{
return
err
}
s
.
queueIdx
=
c
return
nil
}
func
(
s
*
store
)
Unreserve
(
cb
postage
.
UnreserveIteratorFn
)
error
{
var
entries
[]
string
// entries to clean up
defer
func
()
{
for
_
,
v
:=
range
entries
{
if
err
:=
s
.
store
.
Delete
(
v
);
err
!=
nil
{
s
.
logger
.
Errorf
(
"batchstore: unreserve entry delete: %v"
,
err
)
return
}
}
}()
return
s
.
store
.
Iterate
(
unreserveQueueKey
,
func
(
key
,
val
[]
byte
)
(
bool
,
error
)
{
if
!
strings
.
HasPrefix
(
string
(
key
),
unreserveQueueKey
)
{
return
true
,
nil
}
v
:=
&
UnreserveItem
{}
err
:=
v
.
UnmarshalBinary
(
val
)
if
err
!=
nil
{
return
true
,
err
}
stop
,
err
:=
cb
(
v
.
BatchID
,
v
.
Radius
)
if
err
!=
nil
{
return
true
,
err
}
s
.
rsMtx
.
Lock
()
defer
s
.
rsMtx
.
Unlock
()
if
s
.
rs
.
StorageRadius
+
1
<
v
.
Radius
{
s
.
rs
.
StorageRadius
=
v
.
Radius
-
1
if
err
=
s
.
store
.
Put
(
reserveStateKey
,
s
.
rs
);
err
!=
nil
{
return
true
,
err
}
}
entries
=
append
(
entries
,
string
(
key
))
if
stop
{
return
true
,
nil
}
return
false
,
nil
})
}
}
// evictExpired is called when PutChainState is called (and there is 'settlement')
// evictExpired is called when PutChainState is called (and there is 'settlement')
...
@@ -112,10 +173,11 @@ func (s *store) evictExpired() error {
...
@@ -112,10 +173,11 @@ func (s *store) evictExpired() error {
}
}
// unreserve batch fully
// unreserve batch fully
err
=
s
.
unreserve
(
b
,
swarm
.
MaxPO
+
1
)
err
=
s
.
evictFn
(
b
.
ID
)
if
err
!=
nil
{
if
err
!=
nil
{
return
true
,
err
return
true
,
err
}
}
s
.
rs
.
Available
+=
multiplier
*
exp2
(
b
.
Radius
-
s
.
rs
.
Radius
-
1
)
s
.
rs
.
Available
+=
multiplier
*
exp2
(
b
.
Radius
-
s
.
rs
.
Radius
-
1
)
// if batch has no value then delete it
// if batch has no value then delete it
...
@@ -250,7 +312,7 @@ func (s *store) update(b *postage.Batch, oldDepth uint8, oldValue *big.Int) erro
...
@@ -250,7 +312,7 @@ func (s *store) update(b *postage.Batch, oldDepth uint8, oldValue *big.Int) erro
capacityChange
,
reserveRadius
:=
s
.
rs
.
change
(
oldValue
,
newValue
,
oldDepth
,
newDepth
)
capacityChange
,
reserveRadius
:=
s
.
rs
.
change
(
oldValue
,
newValue
,
oldDepth
,
newDepth
)
s
.
rs
.
Available
+=
capacityChange
s
.
rs
.
Available
+=
capacityChange
if
err
:=
s
.
unreserve
(
b
,
reserveRadius
);
err
!=
nil
{
if
err
:=
s
.
unreserve
Fn
(
b
.
ID
,
reserveRadius
);
err
!=
nil
{
return
err
return
err
}
}
err
:=
s
.
evictOuter
(
b
)
err
:=
s
.
evictOuter
(
b
)
...
@@ -293,7 +355,7 @@ func (s *store) evictOuter(last *postage.Batch) error {
...
@@ -293,7 +355,7 @@ func (s *store) evictOuter(last *postage.Batch) error {
// unreserve outer PO of the lowest priority batch until capacity is back to positive
// unreserve outer PO of the lowest priority batch until capacity is back to positive
s
.
rs
.
Available
+=
exp2
(
b
.
Depth
-
s
.
rs
.
Radius
-
1
)
s
.
rs
.
Available
+=
exp2
(
b
.
Depth
-
s
.
rs
.
Radius
-
1
)
s
.
rs
.
Outer
.
Set
(
b
.
Value
)
s
.
rs
.
Outer
.
Set
(
b
.
Value
)
return
false
,
s
.
unreserve
(
b
,
s
.
rs
.
Radius
)
return
false
,
s
.
unreserve
Fn
(
b
.
ID
,
s
.
rs
.
Radius
)
})
})
if
err
!=
nil
{
if
err
!=
nil
{
return
err
return
err
...
@@ -310,6 +372,41 @@ func (s *store) evictOuter(last *postage.Batch) error {
...
@@ -310,6 +372,41 @@ func (s *store) evictOuter(last *postage.Batch) error {
return
s
.
store
.
Put
(
reserveStateKey
,
s
.
rs
)
return
s
.
store
.
Put
(
reserveStateKey
,
s
.
rs
)
}
}
func
(
s
*
store
)
getQueueCardinality
()
(
val
uint64
,
err
error
)
{
err
=
s
.
store
.
Get
(
ureserveQueueCardinalityKey
,
&
val
)
if
errors
.
Is
(
err
,
storage
.
ErrNotFound
)
{
return
0
,
nil
}
return
val
,
err
}
func
(
s
*
store
)
putQueueCardinality
(
val
uint64
)
error
{
return
s
.
store
.
Put
(
ureserveQueueCardinalityKey
,
val
)
}
type
UnreserveItem
struct
{
BatchID
[]
byte
Radius
uint8
}
func
(
u
*
UnreserveItem
)
MarshalBinary
()
([]
byte
,
error
)
{
out
:=
make
([]
byte
,
32
+
1
)
// 32 byte batch ID + 1 byte uint8 radius
copy
(
out
,
u
.
BatchID
)
out
[
32
]
=
u
.
Radius
return
out
,
nil
}
func
(
u
*
UnreserveItem
)
UnmarshalBinary
(
b
[]
byte
)
error
{
if
len
(
b
)
!=
33
{
return
errors
.
New
(
"invalid unreserve item length"
)
}
u
.
BatchID
=
make
([]
byte
,
32
)
copy
(
u
.
BatchID
,
b
[
:
32
])
u
.
Radius
=
b
[
32
]
return
nil
}
// exp2 returns the e-th power of 2
// exp2 returns the e-th power of 2
func
exp2
(
e
uint8
)
int64
{
func
exp2
(
e
uint8
)
int64
{
if
e
==
0
{
if
e
==
0
{
...
...
pkg/postage/batchstore/reserve_test.go
View file @
c6910097
...
@@ -5,9 +5,9 @@
...
@@ -5,9 +5,9 @@
package
batchstore_test
package
batchstore_test
import
(
import
(
"bytes"
"encoding/hex"
"encoding/hex"
"errors"
"errors"
"fmt"
"io/ioutil"
"io/ioutil"
"math/big"
"math/big"
"math/rand"
"math/rand"
...
@@ -23,167 +23,6 @@ import (
...
@@ -23,167 +23,6 @@ import (
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/bee/pkg/swarm"
)
)
// random advance on the blockchain
func
newBlockAdvance
()
uint64
{
return
uint64
(
rand
.
Intn
(
3
)
+
1
)
}
// initial depth of a new batch
func
newBatchDepth
(
depth
uint8
)
uint8
{
return
depth
+
uint8
(
rand
.
Intn
(
10
))
+
4
}
// the factor to increase the batch depth with
func
newDilutionFactor
()
int
{
return
rand
.
Intn
(
3
)
+
1
}
// new value on top of value based on random period and price
func
newValue
(
price
,
value
*
big
.
Int
)
*
big
.
Int
{
period
:=
rand
.
Intn
(
100
)
+
1000
v
:=
new
(
big
.
Int
)
.
Mul
(
price
,
big
.
NewInt
(
int64
(
period
)))
return
v
.
Add
(
v
,
value
)
}
// TestBatchStoreUnreserve is testing the correct behaviour of the reserve.
// the following assumptions are tested on each modification of the batches (top up, depth increase, price change)
// - reserve exceeds capacity
// - value-consistency of unreserved POs
func
TestBatchStoreUnreserveEvents
(
t
*
testing
.
T
)
{
defer
func
(
i
int64
,
d
uint8
)
{
batchstore
.
Capacity
=
i
batchstore
.
DefaultDepth
=
d
}(
batchstore
.
Capacity
,
batchstore
.
DefaultDepth
)
batchstore
.
DefaultDepth
=
5
batchstore
.
Capacity
=
batchstore
.
Exp2
(
16
)
bStore
,
unreserved
:=
setupBatchStore
(
t
)
bStore
.
SetRadiusSetter
(
noopRadiusSetter
{})
batches
:=
make
(
map
[
string
]
*
postage
.
Batch
)
t
.
Run
(
"new batches only"
,
func
(
t
*
testing
.
T
)
{
// iterate starting from batchstore.DefaultDepth to maxPO
_
,
radius
:=
batchstore
.
GetReserve
(
bStore
)
for
step
:=
0
;
radius
<
swarm
.
MaxPO
;
step
++
{
cs
,
err
:=
nextChainState
(
bStore
)
if
err
!=
nil
{
t
.
Fatal
(
err
)
}
var
b
*
postage
.
Batch
if
b
,
err
=
createBatch
(
bStore
,
cs
,
radius
);
err
!=
nil
{
t
.
Fatal
(
err
)
}
batches
[
string
(
b
.
ID
)]
=
b
if
radius
,
err
=
checkReserve
(
bStore
,
unreserved
);
err
!=
nil
{
t
.
Fatal
(
err
)
}
}
})
t
.
Run
(
"top up batches"
,
func
(
t
*
testing
.
T
)
{
n
:=
0
for
id
:=
range
batches
{
b
,
err
:=
bStore
.
Get
([]
byte
(
id
))
if
err
!=
nil
{
if
errors
.
Is
(
err
,
storage
.
ErrNotFound
)
{
continue
}
t
.
Fatal
(
err
)
}
cs
,
err
:=
nextChainState
(
bStore
)
if
err
!=
nil
{
t
.
Fatal
(
err
)
}
if
err
=
topUp
(
bStore
,
cs
,
b
);
err
!=
nil
{
t
.
Fatal
(
err
)
}
if
_
,
err
=
checkReserve
(
bStore
,
unreserved
);
err
!=
nil
{
t
.
Fatal
(
err
)
}
n
++
if
n
>
len
(
batches
)
/
5
{
break
}
}
})
t
.
Run
(
"dilute batches"
,
func
(
t
*
testing
.
T
)
{
n
:=
0
for
id
:=
range
batches
{
b
,
err
:=
bStore
.
Get
([]
byte
(
id
))
if
err
!=
nil
{
if
errors
.
Is
(
err
,
storage
.
ErrNotFound
)
{
continue
}
t
.
Fatal
(
err
)
}
cs
,
err
:=
nextChainState
(
bStore
)
if
err
!=
nil
{
t
.
Fatal
(
err
)
}
if
err
=
increaseDepth
(
bStore
,
cs
,
b
);
err
!=
nil
{
t
.
Fatal
(
err
)
}
if
_
,
err
=
checkReserve
(
bStore
,
unreserved
);
err
!=
nil
{
t
.
Fatal
(
err
)
}
n
++
if
n
>
len
(
batches
)
/
5
{
break
}
}
})
}
func
TestBatchStoreUnreserveAll
(
t
*
testing
.
T
)
{
defer
func
(
i
int64
,
d
uint8
)
{
batchstore
.
Capacity
=
i
batchstore
.
DefaultDepth
=
d
}(
batchstore
.
Capacity
,
batchstore
.
DefaultDepth
)
batchstore
.
DefaultDepth
=
5
batchstore
.
Capacity
=
batchstore
.
Exp2
(
16
)
bStore
,
unreserved
:=
setupBatchStore
(
t
)
bStore
.
SetRadiusSetter
(
noopRadiusSetter
{})
var
batches
[][]
byte
// iterate starting from batchstore.DefaultDepth to maxPO
_
,
depth
:=
batchstore
.
GetReserve
(
bStore
)
for
step
:=
0
;
depth
<
swarm
.
MaxPO
;
step
++
{
cs
,
err
:=
nextChainState
(
bStore
)
if
err
!=
nil
{
t
.
Fatal
(
err
)
}
event
:=
rand
.
Intn
(
6
)
// 0: dilute, 1: topup, 2,3,4,5: create
var
b
*
postage
.
Batch
if
event
<
2
&&
len
(
batches
)
>
10
{
for
{
n
:=
rand
.
Intn
(
len
(
batches
))
b
,
err
=
bStore
.
Get
(
batches
[
n
])
if
err
!=
nil
{
if
errors
.
Is
(
err
,
storage
.
ErrNotFound
)
{
continue
}
t
.
Fatal
(
err
)
}
break
}
if
event
==
0
{
if
err
=
increaseDepth
(
bStore
,
cs
,
b
);
err
!=
nil
{
t
.
Fatal
(
err
)
}
}
else
if
err
=
topUp
(
bStore
,
cs
,
b
);
err
!=
nil
{
t
.
Fatal
(
err
)
}
}
else
if
b
,
err
=
createBatch
(
bStore
,
cs
,
depth
);
err
!=
nil
{
t
.
Fatal
(
err
)
}
else
{
batches
=
append
(
batches
,
b
.
ID
)
}
if
depth
,
err
=
checkReserve
(
bStore
,
unreserved
);
err
!=
nil
{
t
.
Fatal
(
err
)
}
}
}
func
setupBatchStore
(
t
*
testing
.
T
)
(
postage
.
Storer
,
map
[
string
]
uint8
)
{
func
setupBatchStore
(
t
*
testing
.
T
)
(
postage
.
Storer
,
map
[
string
]
uint8
)
{
t
.
Helper
()
t
.
Helper
()
// we cannot use the mock statestore here since the iterator is not giving the right order
// we cannot use the mock statestore here since the iterator is not giving the right order
...
@@ -214,8 +53,14 @@ func setupBatchStore(t *testing.T) (postage.Storer, map[string]uint8) {
...
@@ -214,8 +53,14 @@ func setupBatchStore(t *testing.T) (postage.Storer, map[string]uint8) {
unreserved
[
hex
.
EncodeToString
(
batchID
)]
=
radius
unreserved
[
hex
.
EncodeToString
(
batchID
)]
=
radius
return
nil
return
nil
}
}
bStore
,
_
:=
batchstore
.
New
(
stateStore
,
unreserveFunc
)
evictFn
:=
func
(
b
[]
byte
)
error
{
return
unreserveFunc
(
b
,
swarm
.
MaxPO
+
1
)
}
bStore
,
_
:=
batchstore
.
New
(
stateStore
,
evictFn
,
logger
)
bStore
.
SetRadiusSetter
(
noopRadiusSetter
{})
bStore
.
SetRadiusSetter
(
noopRadiusSetter
{})
batchstore
.
SetUnreserveFunc
(
bStore
,
unreserveFunc
)
// initialise chainstate
// initialise chainstate
err
=
bStore
.
PutChainState
(
&
postage
.
ChainState
{
err
=
bStore
.
PutChainState
(
&
postage
.
ChainState
{
...
@@ -229,89 +74,6 @@ func setupBatchStore(t *testing.T) (postage.Storer, map[string]uint8) {
...
@@ -229,89 +74,6 @@ func setupBatchStore(t *testing.T) (postage.Storer, map[string]uint8) {
return
bStore
,
unreserved
return
bStore
,
unreserved
}
}
func
nextChainState
(
bStore
postage
.
Storer
)
(
*
postage
.
ChainState
,
error
)
{
cs
:=
bStore
.
GetChainState
()
// random advance on the blockchain
advance
:=
newBlockAdvance
()
cs
=
&
postage
.
ChainState
{
Block
:
advance
+
cs
.
Block
,
CurrentPrice
:
cs
.
CurrentPrice
,
// settle although no price change
TotalAmount
:
cs
.
TotalAmount
.
Add
(
cs
.
TotalAmount
,
new
(
big
.
Int
)
.
Mul
(
cs
.
CurrentPrice
,
big
.
NewInt
(
int64
(
advance
)))),
}
return
cs
,
bStore
.
PutChainState
(
cs
)
}
// creates a test batch with random value and depth and adds it to the batchstore
func
createBatch
(
bStore
postage
.
Storer
,
cs
*
postage
.
ChainState
,
depth
uint8
)
(
*
postage
.
Batch
,
error
)
{
b
:=
postagetest
.
MustNewBatch
()
b
.
Depth
=
newBatchDepth
(
depth
)
value
:=
newValue
(
cs
.
CurrentPrice
,
cs
.
TotalAmount
)
b
.
Value
=
big
.
NewInt
(
0
)
return
b
,
bStore
.
Put
(
b
,
value
,
b
.
Depth
)
}
// tops up a batch with random amount
func
topUp
(
bStore
postage
.
Storer
,
cs
*
postage
.
ChainState
,
b
*
postage
.
Batch
)
error
{
value
:=
newValue
(
cs
.
CurrentPrice
,
b
.
Value
)
return
bStore
.
Put
(
b
,
value
,
b
.
Depth
)
}
// dilutes the batch with random factor
func
increaseDepth
(
bStore
postage
.
Storer
,
cs
*
postage
.
ChainState
,
b
*
postage
.
Batch
)
error
{
diff
:=
newDilutionFactor
()
value
:=
new
(
big
.
Int
)
.
Sub
(
b
.
Value
,
cs
.
TotalAmount
)
value
.
Div
(
value
,
big
.
NewInt
(
int64
(
1
<<
diff
)))
value
.
Add
(
value
,
cs
.
TotalAmount
)
return
bStore
.
Put
(
b
,
value
,
b
.
Depth
+
uint8
(
diff
))
}
// checkReserve is testing the correct behaviour of the reserve.
// the following assumptions are tested on each modification of the batches (top up, depth increase, price change)
// - reserve exceeds capacity
// - value-consistency of unreserved POs
func
checkReserve
(
bStore
postage
.
Storer
,
unreserved
map
[
string
]
uint8
)
(
uint8
,
error
)
{
var
size
int64
count
:=
0
outer
:=
big
.
NewInt
(
0
)
inner
:=
big
.
NewInt
(
0
)
limit
,
depth
:=
batchstore
.
GetReserve
(
bStore
)
// checking all batches
err
:=
batchstore
.
IterateAll
(
bStore
,
func
(
b
*
postage
.
Batch
)
(
bool
,
error
)
{
count
++
bDepth
,
found
:=
unreserved
[
hex
.
EncodeToString
(
b
.
ID
)]
if
!
found
{
return
true
,
fmt
.
Errorf
(
"batch not unreserved"
)
}
if
b
.
Value
.
Cmp
(
limit
)
>=
0
{
if
bDepth
<
depth
-
1
||
bDepth
>
depth
{
return
true
,
fmt
.
Errorf
(
"incorrect reserve radius. expected %d or %d. got %d"
,
depth
-
1
,
depth
,
bDepth
)
}
if
bDepth
==
depth
{
if
inner
.
Cmp
(
b
.
Value
)
<
0
{
inner
.
Set
(
b
.
Value
)
}
}
else
if
outer
.
Cmp
(
b
.
Value
)
>
0
||
outer
.
Cmp
(
big
.
NewInt
(
0
))
==
0
{
outer
.
Set
(
b
.
Value
)
}
if
outer
.
Cmp
(
big
.
NewInt
(
0
))
!=
0
&&
outer
.
Cmp
(
inner
)
<=
0
{
return
true
,
fmt
.
Errorf
(
"inconsistent reserve radius: %d <= %d"
,
outer
.
Uint64
(),
inner
.
Uint64
())
}
size
+=
batchstore
.
Exp2
(
b
.
Depth
-
bDepth
-
1
)
}
else
if
bDepth
!=
swarm
.
MaxPO
{
return
true
,
fmt
.
Errorf
(
"batch below limit expected to be fully unreserved. got found=%v, radius=%d"
,
found
,
bDepth
)
}
return
false
,
nil
})
if
err
!=
nil
{
return
0
,
err
}
if
size
>
batchstore
.
Capacity
{
return
0
,
fmt
.
Errorf
(
"reserve size beyond capacity. max %d, got %d"
,
batchstore
.
Capacity
,
size
)
}
return
depth
,
nil
}
// TestBatchStore_Unreserve tests that the unreserve
// TestBatchStore_Unreserve tests that the unreserve
// hook is called with the correct batch IDs and correct
// hook is called with the correct batch IDs and correct
// Radius as a result of batches coming in from chain events.
// Radius as a result of batches coming in from chain events.
...
@@ -542,6 +304,7 @@ func TestBatchStore_Unreserve(t *testing.T) {
...
@@ -542,6 +304,7 @@ func TestBatchStore_Unreserve(t *testing.T) {
}
{
}
{
t
.
Run
(
tc
.
desc
,
func
(
t
*
testing
.
T
)
{
t
.
Run
(
tc
.
desc
,
func
(
t
*
testing
.
T
)
{
store
,
unreserved
:=
setupBatchStore
(
t
)
store
,
unreserved
:=
setupBatchStore
(
t
)
store
.
SetRadiusSetter
(
noopRadiusSetter
{})
store
.
SetRadiusSetter
(
noopRadiusSetter
{})
batches
:=
addBatch
(
t
,
store
,
batches
:=
addBatch
(
t
,
store
,
depthValue
(
initBatchDepth
,
3
),
depthValue
(
initBatchDepth
,
3
),
...
@@ -945,3 +708,125 @@ func checkUnreserved(t *testing.T, unreserved map[string]uint8, batches []*posta
...
@@ -945,3 +708,125 @@ func checkUnreserved(t *testing.T, unreserved map[string]uint8, batches []*posta
}
}
}
}
}
}
func
TestUnreserveItemMarshaling
(
t
*
testing
.
T
)
{
v1
:=
batchstore
.
UnreserveItem
{
BatchID
:
make
([]
byte
,
32
),
Radius
:
5
}
_
,
err
:=
rand
.
Read
(
v1
.
BatchID
)
if
err
!=
nil
{
t
.
Fatal
(
err
)
}
v
,
_
:=
v1
.
MarshalBinary
()
v2
:=
&
batchstore
.
UnreserveItem
{}
err
=
v2
.
UnmarshalBinary
(
v
)
if
err
!=
nil
{
t
.
Fatal
(
err
)
}
if
!
bytes
.
Equal
(
v1
.
BatchID
,
v2
.
BatchID
)
{
t
.
Fatalf
(
"batch ID not equal got %x want %x"
,
v2
.
BatchID
,
v1
.
BatchID
)
}
if
v1
.
Radius
!=
v2
.
Radius
{
t
.
Fatalf
(
"radius mismatch got %d want %d"
,
v2
.
Radius
,
v1
.
Radius
)
}
}
func
TestUnreserveItemSequence
(
t
*
testing
.
T
)
{
defer
func
(
i
int64
,
d
uint8
)
{
batchstore
.
Capacity
=
i
batchstore
.
DefaultDepth
=
d
}(
batchstore
.
Capacity
,
batchstore
.
DefaultDepth
)
batchstore
.
DefaultDepth
=
5
batchstore
.
Capacity
=
batchstore
.
Exp2
(
5
)
// 32 chunks
initBatchDepth
:=
uint8
(
8
)
dir
,
err
:=
ioutil
.
TempDir
(
""
,
"batchstore_test"
)
if
err
!=
nil
{
t
.
Fatal
(
err
)
}
t
.
Cleanup
(
func
()
{
if
err
:=
os
.
RemoveAll
(
dir
);
err
!=
nil
{
t
.
Fatal
(
err
)
}
})
logger
:=
logging
.
New
(
ioutil
.
Discard
,
0
)
stateStore
,
err
:=
leveldb
.
NewStateStore
(
dir
,
logger
)
if
err
!=
nil
{
t
.
Fatal
(
err
)
}
t
.
Cleanup
(
func
()
{
if
err
:=
stateStore
.
Close
();
err
!=
nil
{
t
.
Fatal
(
err
)
}
})
// set mock unreserve call
unreserved
:=
[]
batchstore
.
UnreserveItem
{}
unreserveFunc
:=
func
(
batchID
[]
byte
,
radius
uint8
)
error
{
v
:=
batchstore
.
UnreserveItem
{
BatchID
:
batchID
,
Radius
:
radius
}
unreserved
=
append
(
unreserved
,
v
)
return
nil
}
evictFn
:=
func
(
b
[]
byte
)
error
{
return
unreserveFunc
(
b
,
swarm
.
MaxPO
+
1
)
}
bStore
,
_
:=
batchstore
.
New
(
stateStore
,
evictFn
,
logger
)
bStore
.
SetRadiusSetter
(
noopRadiusSetter
{})
batchstore
.
SetUnreserveFunc
(
bStore
,
unreserveFunc
)
// initialise chainstate
err
=
bStore
.
PutChainState
(
&
postage
.
ChainState
{
Block
:
0
,
TotalAmount
:
big
.
NewInt
(
0
),
CurrentPrice
:
big
.
NewInt
(
1
),
})
if
err
!=
nil
{
t
.
Fatal
(
err
)
}
batches
:=
addBatch
(
t
,
bStore
,
depthValue
(
initBatchDepth
,
2
),
depthValue
(
initBatchDepth
,
3
),
depthValue
(
initBatchDepth
,
4
),
depthValue
(
initBatchDepth
,
5
),
)
batch2
:=
addBatch
(
t
,
bStore
,
depthValue
(
initBatchDepth
,
8
),
)
if
l
:=
len
(
unreserved
);
l
!=
7
{
t
.
Fatalf
(
"expected 7 unreserve events got %d"
,
l
)
}
// check the initial unreserve calls
for
i
,
batch
:=
range
batches
{
ur
:=
unreserved
[
i
]
if
!
bytes
.
Equal
(
batch
.
ID
,
ur
.
BatchID
)
{
t
.
Fatalf
(
"wrong batchID in sequence %d, got %x want %x"
,
i
,
ur
.
BatchID
,
batch
.
ID
)
}
if
ur
.
Radius
!=
4
{
t
.
Fatalf
(
"wrong radius in sequence %d got %d want %d"
,
i
,
ur
.
Radius
,
4
)
}
}
// next event is the new batch
if
!
bytes
.
Equal
(
unreserved
[
4
]
.
BatchID
,
batch2
[
0
]
.
ID
)
{
t
.
Fatal
(
"batch mismatch"
)
}
if
unreserved
[
4
]
.
Radius
!=
4
{
t
.
Fatal
(
"radius mismatch"
)
}
// now the 2 cheapest batches with higher radius
if
!
bytes
.
Equal
(
unreserved
[
5
]
.
BatchID
,
batches
[
0
]
.
ID
)
{
t
.
Fatal
(
"batch mismatch"
)
}
if
unreserved
[
5
]
.
Radius
!=
5
{
t
.
Fatal
(
"radius mismatch"
)
}
if
!
bytes
.
Equal
(
unreserved
[
6
]
.
BatchID
,
batches
[
1
]
.
ID
)
{
t
.
Fatal
(
"batch mismatch"
)
}
if
unreserved
[
6
]
.
Radius
!=
5
{
t
.
Fatal
(
"radius mismatch"
)
}
}
pkg/postage/batchstore/store.go
View file @
c6910097
...
@@ -10,34 +10,44 @@ import (
...
@@ -10,34 +10,44 @@ import (
"fmt"
"fmt"
"math/big"
"math/big"
"strings"
"strings"
"sync"
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/postage"
"github.com/ethersphere/bee/pkg/postage"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/storage"
)
)
const
(
const
(
batchKeyPrefix
=
"batchstore_batch_"
batchKeyPrefix
=
"batchstore_batch_"
valueKeyPrefix
=
"batchstore_value_"
valueKeyPrefix
=
"batchstore_value_"
chainStateKey
=
"batchstore_chainstate"
chainStateKey
=
"batchstore_chainstate"
reserveStateKey
=
"batchstore_reservestate"
reserveStateKey
=
"batchstore_reservestate"
unreserveQueueKey
=
"batchstore_unreserve_queue_"
ureserveQueueCardinalityKey
=
"batchstore_queue_cardinality"
)
)
type
unreserveFn
func
(
batchID
[]
byte
,
radius
uint8
)
error
type
unreserveFn
func
(
batchID
[]
byte
,
radius
uint8
)
error
type
evictFn
func
(
batchID
[]
byte
)
error
// store implements postage.Storer
// store implements postage.Storer
type
store
struct
{
type
store
struct
{
store
storage
.
StateStorer
// State store backend to persist batches.
store
storage
.
StateStorer
// State store backend to persist batches.
cs
*
postage
.
ChainState
// the chain state
cs
*
postage
.
ChainState
// the chain state
rs
*
reserveState
// the reserve state
unreserveFunc
unreserveFn
// unreserve function
rsMtx
sync
.
Mutex
metrics
metrics
// metrics
rs
*
reserveState
// the reserve state
unreserveFn
unreserveFn
// unreserve function
evictFn
evictFn
// evict function
queueIdx
uint64
// unreserve queue cardinality
metrics
metrics
// metrics
logger
logging
.
Logger
radiusSetter
postage
.
RadiusSetter
// setter for radius notifications
radiusSetter
postage
.
RadiusSetter
// setter for radius notifications
}
}
// New constructs a new postage batch store.
// New constructs a new postage batch store.
// It initialises both chain state and reserve state from the persistent state store
// It initialises both chain state and reserve state from the persistent state store
func
New
(
st
storage
.
StateStorer
,
unreserveFunc
unreserveFn
)
(
postage
.
Storer
,
error
)
{
func
New
(
st
storage
.
StateStorer
,
ev
evictFn
,
logger
logging
.
Logger
)
(
postage
.
Storer
,
error
)
{
cs
:=
&
postage
.
ChainState
{}
cs
:=
&
postage
.
ChainState
{}
err
:=
st
.
Get
(
chainStateKey
,
cs
)
err
:=
st
.
Get
(
chainStateKey
,
cs
)
if
err
!=
nil
{
if
err
!=
nil
{
...
@@ -63,23 +73,33 @@ func New(st storage.StateStorer, unreserveFunc unreserveFn) (postage.Storer, err
...
@@ -63,23 +73,33 @@ func New(st storage.StateStorer, unreserveFunc unreserveFn) (postage.Storer, err
Available
:
Capacity
,
Available
:
Capacity
,
}
}
}
}
s
:=
&
store
{
s
:=
&
store
{
store
:
st
,
store
:
st
,
cs
:
cs
,
cs
:
cs
,
rs
:
rs
,
rs
:
rs
,
unreserveFunc
:
unreserveFunc
,
evictFn
:
ev
,
metrics
:
newMetrics
(),
metrics
:
newMetrics
(),
logger
:
logger
,
}
s
.
unreserveFn
=
s
.
unreserve
if
s
.
queueIdx
,
err
=
s
.
getQueueCardinality
();
err
!=
nil
{
return
nil
,
err
}
}
return
s
,
nil
return
s
,
nil
}
}
func
(
s
*
store
)
GetReserveState
()
*
postage
.
ReserveState
{
func
(
s
*
store
)
GetReserveState
()
*
postage
.
ReserveState
{
s
.
rsMtx
.
Lock
()
defer
s
.
rsMtx
.
Unlock
()
return
&
postage
.
ReserveState
{
return
&
postage
.
ReserveState
{
Radius
:
s
.
rs
.
Radius
,
Radius
:
s
.
rs
.
Radius
,
Available
:
s
.
rs
.
Available
,
StorageRadius
:
s
.
rs
.
StorageRadius
,
Outer
:
new
(
big
.
Int
)
.
Set
(
s
.
rs
.
Outer
),
Available
:
s
.
rs
.
Available
,
Inner
:
new
(
big
.
Int
)
.
Set
(
s
.
rs
.
Inner
),
Outer
:
new
(
big
.
Int
)
.
Set
(
s
.
rs
.
Outer
),
Inner
:
new
(
big
.
Int
)
.
Set
(
s
.
rs
.
Inner
),
}
}
}
}
...
@@ -90,7 +110,15 @@ func (s *store) Get(id []byte) (*postage.Batch, error) {
...
@@ -90,7 +110,15 @@ func (s *store) Get(id []byte) (*postage.Batch, error) {
if
err
!=
nil
{
if
err
!=
nil
{
return
nil
,
fmt
.
Errorf
(
"get batch %s: %w"
,
hex
.
EncodeToString
(
id
),
err
)
return
nil
,
fmt
.
Errorf
(
"get batch %s: %w"
,
hex
.
EncodeToString
(
id
),
err
)
}
}
b
.
Radius
=
s
.
rs
.
radius
(
s
.
rs
.
tier
(
b
.
Value
))
s
.
rsMtx
.
Lock
()
defer
s
.
rsMtx
.
Unlock
()
if
s
.
rs
.
StorageRadius
<
s
.
rs
.
Radius
{
b
.
Radius
=
s
.
rs
.
StorageRadius
}
else
{
b
.
Radius
=
s
.
rs
.
radius
(
s
.
rs
.
tier
(
b
.
Value
))
}
return
b
,
nil
return
b
,
nil
}
}
...
@@ -114,7 +142,9 @@ func (s *store) Put(b *postage.Batch, value *big.Int, depth uint8) error {
...
@@ -114,7 +142,9 @@ func (s *store) Put(b *postage.Batch, value *big.Int, depth uint8) error {
}
}
if
s
.
radiusSetter
!=
nil
{
if
s
.
radiusSetter
!=
nil
{
s
.
rsMtx
.
Lock
()
s
.
radiusSetter
.
SetRadius
(
s
.
rs
.
Radius
)
s
.
radiusSetter
.
SetRadius
(
s
.
rs
.
Radius
)
s
.
rsMtx
.
Unlock
()
}
}
return
s
.
store
.
Put
(
batchKey
(
b
.
ID
),
b
)
return
s
.
store
.
Put
(
batchKey
(
b
.
ID
),
b
)
}
}
...
@@ -150,7 +180,9 @@ func (s *store) PutChainState(cs *postage.ChainState) error {
...
@@ -150,7 +180,9 @@ func (s *store) PutChainState(cs *postage.ChainState) error {
// this needs to be improved, since we can miss some calls on
// this needs to be improved, since we can miss some calls on
// startup. the same goes for the other call to radiusSetter
// startup. the same goes for the other call to radiusSetter
if
s
.
radiusSetter
!=
nil
{
if
s
.
radiusSetter
!=
nil
{
s
.
rsMtx
.
Lock
()
s
.
radiusSetter
.
SetRadius
(
s
.
rs
.
Radius
)
s
.
radiusSetter
.
SetRadius
(
s
.
rs
.
Radius
)
s
.
rsMtx
.
Unlock
()
}
}
return
s
.
store
.
Put
(
chainStateKey
,
cs
)
return
s
.
store
.
Put
(
chainStateKey
,
cs
)
...
...
pkg/postage/batchstore/store_test.go
View file @
c6910097
...
@@ -18,13 +18,14 @@ import (
...
@@ -18,13 +18,14 @@ import (
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/storage"
)
)
func
unreserve
([]
byte
,
uint8
)
error
{
return
nil
}
var
noopEvictFn
=
func
([]
byte
)
error
{
return
nil
}
func
TestBatchStoreGet
(
t
*
testing
.
T
)
{
func
TestBatchStoreGet
(
t
*
testing
.
T
)
{
testBatch
:=
postagetest
.
MustNewBatch
()
testBatch
:=
postagetest
.
MustNewBatch
()
key
:=
batchstore
.
BatchKey
(
testBatch
.
ID
)
key
:=
batchstore
.
BatchKey
(
testBatch
.
ID
)
stateStore
:=
mock
.
NewStateStore
()
stateStore
:=
mock
.
NewStateStore
()
batchStore
,
_
:=
batchstore
.
New
(
stateStore
,
nil
)
batchStore
,
_
:=
batchstore
.
New
(
stateStore
,
nil
,
logging
.
New
(
ioutil
.
Discard
,
0
)
)
stateStorePut
(
t
,
stateStore
,
key
,
testBatch
)
stateStorePut
(
t
,
stateStore
,
key
,
testBatch
)
got
:=
batchStoreGetBatch
(
t
,
batchStore
,
testBatch
.
ID
)
got
:=
batchStoreGetBatch
(
t
,
batchStore
,
testBatch
.
ID
)
...
@@ -36,7 +37,7 @@ func TestBatchStorePut(t *testing.T) {
...
@@ -36,7 +37,7 @@ func TestBatchStorePut(t *testing.T) {
key
:=
batchstore
.
BatchKey
(
testBatch
.
ID
)
key
:=
batchstore
.
BatchKey
(
testBatch
.
ID
)
stateStore
:=
mock
.
NewStateStore
()
stateStore
:=
mock
.
NewStateStore
()
batchStore
,
_
:=
batchstore
.
New
(
stateStore
,
unreserve
)
batchStore
,
_
:=
batchstore
.
New
(
stateStore
,
nil
,
logging
.
New
(
ioutil
.
Discard
,
0
)
)
batchStore
.
SetRadiusSetter
(
noopRadiusSetter
{})
batchStore
.
SetRadiusSetter
(
noopRadiusSetter
{})
batchStorePutBatch
(
t
,
batchStore
,
testBatch
)
batchStorePutBatch
(
t
,
batchStore
,
testBatch
)
...
@@ -49,7 +50,7 @@ func TestBatchStoreGetChainState(t *testing.T) {
...
@@ -49,7 +50,7 @@ func TestBatchStoreGetChainState(t *testing.T) {
testChainState
:=
postagetest
.
NewChainState
()
testChainState
:=
postagetest
.
NewChainState
()
stateStore
:=
mock
.
NewStateStore
()
stateStore
:=
mock
.
NewStateStore
()
batchStore
,
_
:=
batchstore
.
New
(
stateStore
,
nil
)
batchStore
,
_
:=
batchstore
.
New
(
stateStore
,
nil
,
logging
.
New
(
ioutil
.
Discard
,
0
)
)
batchStore
.
SetRadiusSetter
(
noopRadiusSetter
{})
batchStore
.
SetRadiusSetter
(
noopRadiusSetter
{})
err
:=
batchStore
.
PutChainState
(
testChainState
)
err
:=
batchStore
.
PutChainState
(
testChainState
)
...
@@ -64,7 +65,7 @@ func TestBatchStorePutChainState(t *testing.T) {
...
@@ -64,7 +65,7 @@ func TestBatchStorePutChainState(t *testing.T) {
testChainState
:=
postagetest
.
NewChainState
()
testChainState
:=
postagetest
.
NewChainState
()
stateStore
:=
mock
.
NewStateStore
()
stateStore
:=
mock
.
NewStateStore
()
batchStore
,
_
:=
batchstore
.
New
(
stateStore
,
nil
)
batchStore
,
_
:=
batchstore
.
New
(
stateStore
,
nil
,
logging
.
New
(
ioutil
.
Discard
,
0
)
)
batchStore
.
SetRadiusSetter
(
noopRadiusSetter
{})
batchStore
.
SetRadiusSetter
(
noopRadiusSetter
{})
batchStorePutChainState
(
t
,
batchStore
,
testChainState
)
batchStorePutChainState
(
t
,
batchStore
,
testChainState
)
...
@@ -89,7 +90,7 @@ func TestBatchStoreReset(t *testing.T) {
...
@@ -89,7 +90,7 @@ func TestBatchStoreReset(t *testing.T) {
}
}
defer
stateStore
.
Close
()
defer
stateStore
.
Close
()
batchStore
,
_
:=
batchstore
.
New
(
stateStore
,
func
([]
byte
,
uint8
)
error
{
return
nil
}
)
batchStore
,
_
:=
batchstore
.
New
(
stateStore
,
noopEvictFn
,
logger
)
batchStore
.
SetRadiusSetter
(
noopRadiusSetter
{})
batchStore
.
SetRadiusSetter
(
noopRadiusSetter
{})
err
=
batchStore
.
Put
(
testBatch
,
big
.
NewInt
(
15
),
8
)
err
=
batchStore
.
Put
(
testBatch
,
big
.
NewInt
(
15
),
8
)
if
err
!=
nil
{
if
err
!=
nil
{
...
...
pkg/postage/interface.go
View file @
c6910097
...
@@ -23,6 +23,8 @@ type EventUpdater interface {
...
@@ -23,6 +23,8 @@ type EventUpdater interface {
TransactionEnd
()
error
TransactionEnd
()
error
}
}
type
UnreserveIteratorFn
func
(
id
[]
byte
,
radius
uint8
)
(
bool
,
error
)
// Storer represents the persistence layer for batches on the current (highest
// Storer represents the persistence layer for batches on the current (highest
// available) block.
// available) block.
type
Storer
interface
{
type
Storer
interface
{
...
@@ -32,6 +34,7 @@ type Storer interface {
...
@@ -32,6 +34,7 @@ type Storer interface {
GetChainState
()
*
ChainState
GetChainState
()
*
ChainState
GetReserveState
()
*
ReserveState
GetReserveState
()
*
ReserveState
SetRadiusSetter
(
RadiusSetter
)
SetRadiusSetter
(
RadiusSetter
)
Unreserve
(
UnreserveIteratorFn
)
error
Reset
()
error
Reset
()
error
}
}
...
...
pkg/postage/reservestate.go
View file @
c6910097
...
@@ -7,8 +7,9 @@ package postage
...
@@ -7,8 +7,9 @@ package postage
import
"math/big"
import
"math/big"
type
ReserveState
struct
{
type
ReserveState
struct
{
Radius
uint8
Radius
uint8
Available
int64
StorageRadius
uint8
Outer
*
big
.
Int
// lower value limit for outer layer = the further half of chunks
Available
int64
Inner
*
big
.
Int
Outer
*
big
.
Int
// lower value limit for outer layer = the further half of chunks
Inner
*
big
.
Int
}
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment