Commit c6910097 authored by acud's avatar acud Committed by GitHub

feat: unreserve on-demand (#2071)

parent 726b61f2
48c48
< var Capacity = exp2(22)
---
> var Capacity = exp2(10)
> var Capacity = exp2(6)
......@@ -110,25 +110,6 @@ jobs:
run: |
beekeeper delete bee-cluster --cluster-name local-clef
make beelocal ACTION=uninstall
- name: Apply patches
run: |
patch pkg/postage/batchstore/reserve.go .github/patches/postagereserve_gc.patch
- name: Prepare testing cluster (storage incentives setup)
run: |
timeout 10m make beelocal OPTS='ci skip-vet'
- name: Set kube config
run: |
mkdir -p ~/.kube
cp /etc/rancher/k3s/k3s.yaml ~/.kube/config
- name: Set testing cluster (storage incentives setup)
run: |
timeout 10m make deploylocal BEEKEEPER_CLUSTER=local-gc
- name: Test pingpong
id: pingpong-3
run: until beekeeper check --cluster-name local-gc --checks ci-pingpong; do echo "waiting for pingpong..."; sleep .3; done
- name: Test gc
id: gc-chunk-1
run: beekeeper check --cluster-name local-gc --checks=ci-gc
- name: Retag Docker image and push for cache
if: success()
run: |
......@@ -170,8 +151,6 @@ jobs:
if ${{ steps.settlements-2.outcome=='failure' }}; then FAILED=settlements-2; fi
if ${{ steps.pss.outcome=='failure' }}; then FAILED=pss; fi
if ${{ steps.soc.outcome=='failure' }}; then FAILED=soc; fi
if ${{ steps.pingpong-3.outcome=='failure' }}; then FAILED=pingpong-3; fi
if ${{ steps.gc-chunk-1.outcome=='failure' }}; then FAILED=gc-chunk-1; fi
KEYS=$(curl -sSf -X POST https://eu.relay.tunshell.com/api/sessions)
curl -sSf -X POST -H "Content-Type: application/json" -d "{\"text\": \"**${RUN_TYPE}** ${{ github.head_ref }}\nFailed -> \`${FAILED}\`\nDebug -> \`sh <(curl -sSf https://lets.tunshell.com/init.sh) L $(echo $KEYS | jq -r .peer2_key) \${TUNSHELL_SECRET} eu.relay.tunshell.com\`\"}" https://beehive.ethswarm.org/hooks/${{ secrets.WEBHOOK_KEY }}
echo "Failed test: ${FAILED}"
......
......@@ -13,6 +13,7 @@ import (
type reserveStateResponse struct {
Radius uint8 `json:"radius"`
StorageRadius uint8 `json:"storageRadius"`
Available int64 `json:"available"`
Outer *bigint.BigInt `json:"outer"` // lower value limit for outer layer = the further half of chunks
Inner *bigint.BigInt `json:"inner"`
......
......@@ -38,6 +38,19 @@ var (
// gcBatchSize limits the number of chunks in a single
// transaction on garbage collection.
gcBatchSize uint64 = 2000
// reserveCollectionRatio is the ratio of the cache to evict from
// the reserve every time it hits the limit. If the cache size is
// 1000 chunks then we will evict 500 chunks from the reserve, this is
// not to overwhelm the cache with too many chunks which it will flush
// anyway.
reserveCollectionRatio = 0.5
// reserveEvictionBatch limits the number of chunks collected in
// a single reserve eviction run.
reserveEvictionBatch uint64 = 200
// maxPurgeablePercentageOfReserve is a ceiling of size of the reserve
// to evict in case the cache size is bigger than the reserve
maxPurgeablePercentageOfReserve = 0.1
)
// collectGarbageWorker is a long running function that waits for
......@@ -104,8 +117,11 @@ func (db *DB) collectGarbage() (collectedCount uint64, done bool, err error) {
if err != nil {
return 0, true, err
}
if gcSize == target {
return 0, true, nil
}
db.metrics.GCSize.Set(float64(gcSize))
defer func() { db.logger.Debugf("gc collected %d, target %d, startSize %d", collectedCount, target, gcSize) }()
done = true
first := true
start := time.Now()
......@@ -208,6 +224,15 @@ func (db *DB) gcTarget() (target uint64) {
return uint64(float64(db.cacheCapacity) * gcTargetRatio)
}
func (db *DB) reserveEvictionTarget() (target uint64) {
targetCache := db.reserveCapacity - uint64(float64(db.cacheCapacity)*reserveCollectionRatio)
targetCeiling := db.reserveCapacity - uint64(float64(db.reserveCapacity)*maxPurgeablePercentageOfReserve)
if targetCeiling > targetCache {
return targetCeiling
}
return targetCache
}
// triggerGarbageCollection signals collectGarbageWorker
// to call collectGarbage.
func (db *DB) triggerGarbageCollection() {
......@@ -218,6 +243,16 @@ func (db *DB) triggerGarbageCollection() {
}
}
// triggerGarbageCollection signals collectGarbageWorker
// to call collectGarbage.
func (db *DB) triggerReserveEviction() {
select {
case db.reserveEvictionTrigger <- struct{}{}:
case <-db.close:
default:
}
}
// incGCSizeInBatch changes gcSize field value
// by change which can be negative. This function
// must be called under batchMu lock.
......@@ -243,6 +278,7 @@ func (db *DB) incGCSizeInBatch(batch *leveldb.Batch, change int64) (err error) {
}
newSize = gcSize - c
}
db.logger.Debugf("inc gc size %d change %d", gcSize, change)
db.gcSize.PutInBatch(batch, newSize)
db.metrics.GCSize.Set(float64(newSize))
......@@ -253,6 +289,122 @@ func (db *DB) incGCSizeInBatch(batch *leveldb.Batch, change int64) (err error) {
return nil
}
// incReserveSizeInBatch changes reserveSize field value
// by change which can be negative. This function
// must be called under batchMu lock.
func (db *DB) incReserveSizeInBatch(batch *leveldb.Batch, change int64) (err error) {
if change == 0 {
return nil
}
reserveSize, err := db.reserveSize.Get()
if err != nil && !errors.Is(err, leveldb.ErrNotFound) {
return err
}
var newSize uint64
if change > 0 {
newSize = reserveSize + uint64(change)
} else {
// 'change' is an int64 and is negative
// a conversion is needed with correct sign
c := uint64(-change)
if c > reserveSize {
// protect uint64 undeflow
return nil
}
newSize = reserveSize - c
}
db.logger.Debugf("inc reserve size in batch %d old %d change %d", newSize, reserveSize, change)
db.reserveSize.PutInBatch(batch, newSize)
db.metrics.ReserveSize.Set(float64(newSize))
// trigger garbage collection if we reached the capacity
if newSize >= db.reserveCapacity {
db.triggerReserveEviction()
}
return nil
}
func (db *DB) reserveEvictionWorker() {
defer close(db.reserveEvictionWorkerDone)
for {
select {
case <-db.reserveEvictionTrigger:
evictedCount, done, err := db.evictReserve()
if err != nil {
db.logger.Errorf("localstore: evict reserve: %v", err)
}
if !done {
db.triggerReserveEviction()
}
if testHookEviction != nil {
testHookEviction(evictedCount)
}
case <-db.close:
return
}
}
}
func (db *DB) evictReserve() (totalEvicted uint64, done bool, err error) {
var target uint64
db.metrics.EvictReserveCounter.Inc()
defer func(start time.Time) {
if err != nil {
db.metrics.EvictReserveErrorCounter.Inc()
}
totalTimeMetric(db.metrics.TotalTimeEvictReserve, start)
}(time.Now())
target = db.reserveEvictionTarget()
db.batchMu.Lock()
defer db.batchMu.Unlock()
reserveSizeStart, err := db.reserveSize.Get()
if err != nil {
return 0, false, err
}
if reserveSizeStart == target {
return 0, true, nil
}
// if we dont get any entries at all then there's no use
// of triggering subsequent runs in case we're not done
totalCallbacks := 0
err = db.unreserveFunc(func(batchID []byte, radius uint8) (bool, error) {
totalCallbacks++
e, err := db.UnreserveBatch(batchID, radius)
if err != nil {
return true, err
}
totalEvicted += e
if reserveSizeStart-totalEvicted <= target {
done = true
return true, nil
}
if totalEvicted >= reserveEvictionBatch {
// stop collecting when we reach the eviction
// batch size so that we can avoid lock contention
// on localstore.
return true, nil
}
return false, nil
})
if err != nil {
return 0, false, err
}
if totalCallbacks == 0 {
// if we did not get any items from the batchstore
// it means there's no point of trigerring a subsequent
// round
done = true
}
db.logger.Debugf("reserve evicted %d done %t size %d callbacks %d", totalEvicted, done, reserveSizeStart, totalCallbacks)
return totalEvicted, done, nil
}
// testHookCollectGarbage is a hook that can provide
// information when a garbage collection run is done
// and how many items it removed.
......@@ -264,3 +416,5 @@ var testHookCollectGarbage func(collectedCount uint64)
var testHookGCIteratorDone func()
var withinRadiusFn func(*DB, shed.Item) bool
var testHookEviction func(count uint64)
......@@ -28,6 +28,7 @@ import (
"time"
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/postage"
"github.com/ethersphere/bee/pkg/shed"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm"
......@@ -280,6 +281,7 @@ func TestGCAfterPin(t *testing.T) {
db := newTestDB(t, &Options{
Capacity: 100,
ReserveCapacity: 100,
})
pinAddrs := make([]swarm.Address, 0)
......@@ -597,6 +599,7 @@ func TestPinAfterMultiGC(t *testing.T) {
t.Cleanup(setWithinRadiusFunc(func(_ *DB, _ shed.Item) bool { return false }))
db := newTestDB(t, &Options{
Capacity: 10,
ReserveCapacity: 10,
})
pinnedChunks := make([]swarm.Address, 0)
......@@ -716,6 +719,7 @@ func TestPinSyncAndAccessPutSetChunkMultipleTimes(t *testing.T) {
}))
db := newTestDB(t, &Options{
Capacity: 10,
ReserveCapacity: 100,
})
closed = db.close
......@@ -959,9 +963,202 @@ func setTestHookGCIteratorDone(h func()) (reset func()) {
func unreserveChunkBatch(t *testing.T, db *DB, radius uint8, chs ...swarm.Chunk) {
t.Helper()
for _, ch := range chs {
err := db.UnreserveBatch(ch.Stamp().BatchID(), radius)
_, err := db.UnreserveBatch(ch.Stamp().BatchID(), radius)
if err != nil {
t.Fatal(err)
}
}
}
func setTestHookEviction(h func(count uint64)) (reset func()) {
current := testHookEviction
reset = func() { testHookEviction = current }
testHookEviction = h
return reset
}
// TestReserveEvictionWorker tests that the reserve
// eviction works correctly once the reserve hits the
// capacity. The necessary items are then moved into the
// gc index.
func TestReserveEvictionWorker(t *testing.T) {
var (
chunkCount = 10
batchIDs [][]byte
db *DB
addrs []swarm.Address
closed chan struct{}
mtx sync.Mutex
)
testHookEvictionChan := make(chan uint64)
t.Cleanup(setTestHookEviction(func(count uint64) {
if count == 0 {
return
}
select {
case testHookEvictionChan <- count:
case <-closed:
}
}))
t.Cleanup(setWithinRadiusFunc(func(_ *DB, _ shed.Item) bool { return true }))
unres := func(f postage.UnreserveIteratorFn) error {
mtx.Lock()
defer mtx.Unlock()
for i := 0; i < len(batchIDs); i++ {
// pop an element from batchIDs, call the Unreserve
item := batchIDs[i]
// here we mock the behavior of the batchstore
// that would call the localstore back with the
// batch IDs and the radiuses from the FIFO queue
stop, err := f(item, 2)
if err != nil {
return err
}
if stop {
return nil
}
stop, err = f(item, 4)
if err != nil {
return err
}
if stop {
return nil
}
}
batchIDs = nil
return nil
}
testHookCollectGarbageChan := make(chan uint64)
t.Cleanup(setTestHookCollectGarbage(func(collectedCount uint64) {
// don't trigger if we haven't collected anything - this may
// result in a race condition when we inspect the gcsize below,
// causing the database to shut down while the cleanup to happen
// before the correct signal has been communicated here.
if collectedCount == 0 {
return
}
select {
case testHookCollectGarbageChan <- collectedCount:
case <-db.close:
}
}))
db = newTestDB(t, &Options{
Capacity: 10,
ReserveCapacity: 10,
UnreserveFunc: unres,
})
// insert 10 chunks that fall into the reserve, then
// expect first one to be evicted
for i := 0; i < chunkCount; i++ {
ch := generateTestRandomChunkAt(swarm.NewAddress(db.baseKey), 2).WithBatch(2, 3, 2, false)
_, err := db.Put(context.Background(), storage.ModePutUpload, ch)
if err != nil {
t.Fatal(err)
}
err = db.Set(context.Background(), storage.ModeSetSync, ch.Address())
if err != nil {
t.Fatal(err)
}
mtx.Lock()
addrs = append(addrs, ch.Address())
batchIDs = append(batchIDs, ch.Stamp().BatchID())
mtx.Unlock()
}
evictTarget := db.reserveEvictionTarget()
for {
select {
case <-testHookEvictionChan:
case <-time.After(10 * time.Second):
t.Fatal("eviction timeout")
}
reserveSize, err := db.reserveSize.Get()
if err != nil {
t.Fatal(err)
}
if reserveSize == evictTarget {
break
}
}
t.Run("pull index count", newItemsCountTest(db.pullIndex, chunkCount))
t.Run("postage index count", newItemsCountTest(db.postageIndexIndex, chunkCount))
t.Run("postage radius count", newItemsCountTest(db.postageRadiusIndex, 1))
t.Run("gc index count", newItemsCountTest(db.gcIndex, 1))
t.Run("gc size", newIndexGCSizeTest(db))
t.Run("all chunks should be accessible", func(t *testing.T) {
for _, a := range addrs {
if _, err := db.Get(context.Background(), storage.ModeGetRequest, a); err != nil {
t.Errorf("got error %v, want none", err)
}
}
})
for i := 0; i < chunkCount-1; i++ {
ch := generateTestRandomChunkAt(swarm.NewAddress(db.baseKey), 3).WithBatch(2, 3, 2, false)
_, err := db.Put(context.Background(), storage.ModePutUpload, ch)
if err != nil {
t.Fatal(err)
}
err = db.Set(context.Background(), storage.ModeSetSync, ch.Address())
if err != nil {
t.Fatal(err)
}
mtx.Lock()
addrs = append(addrs, ch.Address())
batchIDs = append(batchIDs, ch.Stamp().BatchID())
mtx.Unlock()
}
for {
select {
case <-testHookEvictionChan:
case <-time.After(10 * time.Second):
t.Fatal("eviction timeout")
}
reserveSize, err := db.reserveSize.Get()
if err != nil {
t.Fatal(err)
}
if reserveSize == evictTarget {
break
}
}
gcTarget := db.gcTarget()
for {
select {
case <-testHookCollectGarbageChan:
case <-time.After(10 * time.Second):
t.Error("collect garbage timeout")
}
gcSize, err := db.gcSize.Get()
if err != nil {
t.Fatal(err)
}
if gcSize == gcTarget {
break
}
}
t.Run("9/10 of the first chunks should be accessible", func(t *testing.T) {
has := 0
for _, a := range addrs[:10] {
if _, err := db.Get(context.Background(), storage.ModeGetRequest, a); err == nil {
has++
}
}
if has != 9 {
t.Errorf("got %d chunks, want 9", has)
}
})
}
......@@ -106,13 +106,24 @@ type DB struct {
// field that stores number of intems in gc index
gcSize shed.Uint64Field
// field that stores the size of the reserve
reserveSize shed.Uint64Field
// garbage collection is triggered when gcSize exceeds
// the cacheCapacity value
cacheCapacity uint64
// the size of the reserve in chunks
reserveCapacity uint64
unreserveFunc func(postage.UnreserveIteratorFn) error
// triggers garbage collection event loop
collectGarbageTrigger chan struct{}
// triggers reserve eviction event loop
reserveEvictionTrigger chan struct{}
// a buffered channel acting as a semaphore
// to limit the maximal number of goroutines
// created by Getters to call updateGC function
......@@ -143,6 +154,7 @@ type DB struct {
// garbage collection and gc size write workers
// are done
collectGarbageWorkerDone chan struct{}
reserveEvictionWorkerDone chan struct{}
// wait for all subscriptions to finish before closing
// underlaying leveldb to prevent possible panics from
......@@ -159,6 +171,11 @@ type Options struct {
// Capacity is a limit that triggers garbage collection when
// number of items in gcIndex equals or exceeds it.
Capacity uint64
// ReserveCapacity is the capacity of the reserve.
ReserveCapacity uint64
// UnreserveFunc is an iterator needed to facilitate reserve
// eviction once ReserveCapacity is reached.
UnreserveFunc func(postage.UnreserveIteratorFn) error
// OpenFilesLimit defines the upper bound of open files that the
// the localstore should maintain at any point of time. It is
// passed on to the shed constructor.
......@@ -185,12 +202,15 @@ func New(path string, baseKey []byte, ss storage.StateStorer, o *Options, logger
// default options
o = &Options{
Capacity: defaultCacheCapacity,
ReserveCapacity: uint64(batchstore.Capacity),
}
}
db = &DB{
stateStore: ss,
cacheCapacity: o.Capacity,
reserveCapacity: o.ReserveCapacity,
unreserveFunc: o.UnreserveFunc,
baseKey: baseKey,
tags: o.Tags,
// channel collectGarbageTrigger
......@@ -198,8 +218,10 @@ func New(path string, baseKey []byte, ss storage.StateStorer, o *Options, logger
// to signal another event if it
// is triggered during already running function
collectGarbageTrigger: make(chan struct{}, 1),
reserveEvictionTrigger: make(chan struct{}, 1),
close: make(chan struct{}),
collectGarbageWorkerDone: make(chan struct{}),
reserveEvictionWorkerDone: make(chan struct{}),
metrics: newMetrics(),
logger: logger,
}
......@@ -264,6 +286,12 @@ func New(path string, baseKey []byte, ss storage.StateStorer, o *Options, logger
return nil, err
}
// reserve size
db.reserveSize, err = db.shed.NewUint64Field("reserve-size")
if err != nil {
return nil, err
}
// Index storing actual chunk address, data and bin id.
headerSize := 16 + postage.StampSize
db.retrievalDataIndex, err = db.shed.NewIndex("Address->StoreTimestamp|BinID|BatchID|BatchIndex|Sig|Data", shed.IndexFuncs{
......@@ -523,6 +551,7 @@ func New(path string, baseKey []byte, ss storage.StateStorer, o *Options, logger
// start garbage collection worker
go db.collectGarbageWorker()
go db.reserveEvictionWorker()
return db, nil
}
......@@ -538,6 +567,7 @@ func (db *DB) Close() (err error) {
// wait for gc worker to
// return before closing the shed
<-db.collectGarbageWorkerDone
<-db.reserveEvictionWorkerDone
close(done)
}()
select {
......
......@@ -59,6 +59,11 @@ type metrics struct {
GCSize prometheus.Gauge
GCStoreTimeStamps prometheus.Gauge
GCStoreAccessTimeStamps prometheus.Gauge
ReserveSize prometheus.Gauge
EvictReserveCounter prometheus.Counter
EvictReserveErrorCounter prometheus.Counter
TotalTimeEvictReserve prometheus.Counter
}
func newMetrics() metrics {
......@@ -343,6 +348,30 @@ func newMetrics() metrics {
Name: "gc_access_time_stamp",
Help: "Access timestamp in Garbage collection iteration.",
}),
ReserveSize: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: m.Namespace,
Subsystem: subsystem,
Name: "reserve_size",
Help: "Number of elements in reserve.",
}),
EvictReserveCounter: prometheus.NewCounter(prometheus.CounterOpts{
Namespace: m.Namespace,
Subsystem: subsystem,
Name: "evict_reserve_count",
Help: "number of times the evict reserve worker was invoked",
}),
EvictReserveErrorCounter: prometheus.NewCounter(prometheus.CounterOpts{
Namespace: m.Namespace,
Subsystem: subsystem,
Name: "evict_reserve_err_count",
Help: "number of times evict reserve got an error",
}),
TotalTimeEvictReserve: prometheus.NewCounter(prometheus.CounterOpts{
Namespace: m.Namespace,
Subsystem: subsystem,
Name: "evict_reserve_total_time",
Help: "total time spent evicting from reserve",
}),
}
}
......
......@@ -219,6 +219,19 @@ func (db *DB) putRequest(batch *leveldb.Batch, binIDs map[uint8]uint64, item she
if err != nil {
return false, 0, err
}
radius, err := db.postageRadiusIndex.Get(item)
if err != nil {
if !errors.Is(err, leveldb.ErrNotFound) {
return false, 0, err
}
} else {
if db.po(swarm.NewAddress(item.Address)) >= radius.Radius {
if err := db.incReserveSizeInBatch(batch, -1); err != nil {
return false, 0, err
}
}
}
}
item.StoreTimestamp = now()
......@@ -353,6 +366,18 @@ func (db *DB) putSync(batch *leveldb.Batch, binIDs map[uint8]uint64, item shed.I
if err != nil {
return false, 0, err
}
radius, err := db.postageRadiusIndex.Get(item)
if err != nil {
if !errors.Is(err, leveldb.ErrNotFound) {
return false, 0, err
}
} else {
if db.po(swarm.NewAddress(item.Address)) >= radius.Radius {
if err := db.incReserveSizeInBatch(batch, -1); err != nil {
return false, 0, err
}
}
}
}
item.StoreTimestamp = now()
......@@ -393,15 +418,12 @@ func (db *DB) putSync(batch *leveldb.Batch, binIDs map[uint8]uint64, item shed.I
// preserveOrCache is a helper function used to add chunks to either a pinned reserve or gc cache
// (the retrieval access index and the gc index)
func (db *DB) preserveOrCache(batch *leveldb.Batch, item shed.Item, forcePin, forceCache bool) (gcSizeChange int64, err error) {
// item needs to be populated with Radius
item2, err := db.postageRadiusIndex.Get(item)
if err != nil {
// if there's an error, assume the chunk needs to be GCd
forceCache = true
} else {
item.Radius = item2.Radius
}
if !forceCache && (withinRadiusFn(db, item) || forcePin) {
if !forcePin {
if err := db.incReserveSizeInBatch(batch, 1); err != nil {
return 0, err
}
}
return db.setPin(batch, item)
}
......
......@@ -19,7 +19,6 @@ package localstore
import (
"context"
"errors"
"fmt"
"time"
"github.com/ethersphere/bee/pkg/shed"
......@@ -193,12 +192,6 @@ func (db *DB) setSync(batch *leveldb.Batch, addr swarm.Address) (gcSizeChange in
} else {
item.AccessTimestamp = i1.AccessTimestamp
}
// item needs to be populated with Radius
item2, err := db.postageRadiusIndex.Get(item)
if err != nil {
return 0, fmt.Errorf("postage chunks index: %w", err)
}
item.Radius = item2.Radius
return db.preserveOrCache(batch, item, false, false)
}
......
......@@ -71,7 +71,7 @@ func TestModeSetRemove_WithSync(t *testing.T) {
var chs []swarm.Chunk
for i := 0; i < tc.count; i++ {
ch := generateTestRandomChunkAt(swarm.NewAddress(db.baseKey), 2).WithBatch(2, 3, 2, false)
err := db.UnreserveBatch(ch.Stamp().BatchID(), 2)
_, err := db.UnreserveBatch(ch.Stamp().BatchID(), 2)
if err != nil {
t.Fatal(err)
}
......
......@@ -16,29 +16,26 @@ import (
// UnreserveBatch atomically unpins chunks of a batch in proximity order upto and including po.
// Unpinning will result in all chunks with pincounter 0 to be put in the gc index
// so if a chunk was only pinned by the reserve, unreserving it will make it gc-able.
func (db *DB) UnreserveBatch(id []byte, radius uint8) error {
db.batchMu.Lock()
defer db.batchMu.Unlock()
func (db *DB) UnreserveBatch(id []byte, radius uint8) (evicted uint64, err error) {
var (
item = shed.Item{
BatchID: id,
}
batch = new(leveldb.Batch)
oldRadius = radius
)
i, err := db.postageRadiusIndex.Get(item)
if err != nil {
if !errors.Is(err, leveldb.ErrNotFound) {
return err
}
item.Radius = radius
if err := db.postageRadiusIndex.PutInBatch(batch, item); err != nil {
return err
return 0, err
}
return db.shed.WriteBatch(batch)
} else {
oldRadius = i.Radius
}
oldRadius := i.Radius
var gcSizeChange int64 // number to add or subtract from gcSize
var (
gcSizeChange int64 // number to add or subtract from gcSize and reserveSize
reserveSizeChange uint64
)
unpin := func(item shed.Item) (stop bool, err error) {
addr := swarm.NewAddress(item.Address)
c, err := db.setUnpin(batch, addr)
......@@ -50,6 +47,13 @@ func (db *DB) UnreserveBatch(id []byte, radius uint8) error {
// a dirty shutdown
db.logger.Tracef("unreserve set unpin chunk %s: %v", addr.String(), err)
}
} else {
// we need to do this because a user might pin a chunk on top of
// the reserve pinning. when we unpin due to an unreserve call, then
// we should logically deduct the chunk anyway from the reserve size
// otherwise the reserve size leaks, since c returned from setUnpin
// will be zero.
reserveSizeChange++
}
gcSizeChange += c
......@@ -60,38 +64,60 @@ func (db *DB) UnreserveBatch(id []byte, radius uint8) error {
for bin := oldRadius; bin < radius; bin++ {
err := db.postageChunksIndex.Iterate(unpin, &shed.IterateOptions{Prefix: append(id, bin)})
if err != nil {
return err
return 0, err
}
// adjust gcSize
if err := db.incGCSizeInBatch(batch, gcSizeChange); err != nil {
return err
return 0, err
}
item.Radius = bin
if err := db.postageRadiusIndex.PutInBatch(batch, item); err != nil {
return err
return 0, err
}
if bin == swarm.MaxPO {
if err := db.postageRadiusIndex.DeleteInBatch(batch, item); err != nil {
return err
return 0, err
}
}
if err := db.shed.WriteBatch(batch); err != nil {
return err
return 0, err
}
db.logger.Debugf("unreserveBatch gc change %d reserve size change %d", gcSizeChange, reserveSizeChange)
batch = new(leveldb.Batch)
gcSizeChange = 0
}
if radius != swarm.MaxPO+1 {
item.Radius = radius
if err := db.postageRadiusIndex.PutInBatch(batch, item); err != nil {
return 0, err
}
if err := db.shed.WriteBatch(batch); err != nil {
return 0, err
}
}
gcSize, err := db.gcSize.Get()
if err != nil && !errors.Is(err, leveldb.ErrNotFound) {
return err
return 0, err
}
if reserveSizeChange > 0 {
batch = new(leveldb.Batch)
if err := db.incReserveSizeInBatch(batch, -int64(reserveSizeChange)); err != nil {
return 0, err
}
if err := db.shed.WriteBatch(batch); err != nil {
return 0, err
}
}
// trigger garbage collection if we reached the capacity
if gcSize >= db.cacheCapacity {
db.triggerGarbageCollection()
}
return nil
return reserveSizeChange, nil
}
func withinRadius(db *DB, item shed.Item) bool {
......
......@@ -7,13 +7,14 @@ package localstore
import (
"context"
"errors"
"sync"
"testing"
"time"
"github.com/ethersphere/bee/pkg/postage"
"github.com/ethersphere/bee/pkg/shed"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/syndtr/goleveldb/leveldb"
)
// TestDB_ReserveGC_AllOutOfRadius tests that when all chunks fall outside of
......@@ -30,21 +31,19 @@ func TestDB_ReserveGC_AllOutOfRadius(t *testing.T) {
case <-closed:
}
}))
t.Cleanup(setWithinRadiusFunc(func(*DB, shed.Item) bool { return false }))
db := newTestDB(t, &Options{
Capacity: 100,
ReserveCapacity: 200,
})
closed = db.close
addrs := make([]swarm.Address, 0)
for i := 0; i < chunkCount; i++ {
ch := generateTestRandomChunkAt(swarm.NewAddress(db.baseKey), 2).WithBatch(3, 3, 2, false)
err := db.UnreserveBatch(ch.Stamp().BatchID(), 4)
if err != nil {
t.Fatal(err)
}
_, err = db.Put(context.Background(), storage.ModePutUpload, ch)
ch := generateTestRandomChunkAt(swarm.NewAddress(db.baseKey), 2).WithBatch(5, 3, 2, false)
_, err := db.Put(context.Background(), storage.ModePutUpload, ch)
if err != nil {
t.Fatal(err)
}
......@@ -79,7 +78,7 @@ func TestDB_ReserveGC_AllOutOfRadius(t *testing.T) {
// postageRadiusIndex gets removed only when the batches are called with evict on MaxPO+1
// therefore, the expected index count here is larger than one would expect.
t.Run("postage radius index count", newItemsCountTest(db.postageRadiusIndex, chunkCount))
t.Run("postage radius index count", newItemsCountTest(db.postageRadiusIndex, 0))
t.Run("gc index count", newItemsCountTest(db.gcIndex, int(gcTarget)))
......@@ -124,9 +123,36 @@ func TestDB_ReserveGC_AllWithinRadius(t *testing.T) {
case <-closed:
}
}))
var (
batchIDs [][]byte
unreserveCalled bool
mtx sync.Mutex
)
unres := func(f postage.UnreserveIteratorFn) error {
mtx.Lock()
defer mtx.Unlock()
unreserveCalled = true
for i := 0; i < len(batchIDs); i++ {
// pop an element from batchIDs, call the Unreserve
item := batchIDs[i]
// here we mock the behavior of the batchstore
// that would call the localstore back with the
// batch IDs and the radiuses from the FIFO queue
stop, err := f(item, 4)
if err != nil {
return err
}
if stop {
return nil
}
}
return nil
}
db := newTestDB(t, &Options{
Capacity: 100,
ReserveCapacity: 151,
UnreserveFunc: unres,
})
closed = db.close
......@@ -134,11 +160,7 @@ func TestDB_ReserveGC_AllWithinRadius(t *testing.T) {
for i := 0; i < chunkCount; i++ {
ch := generateTestRandomChunkAt(swarm.NewAddress(db.baseKey), 2).WithBatch(2, 3, 2, false)
err := db.UnreserveBatch(ch.Stamp().BatchID(), 2)
if err != nil {
t.Fatal(err)
}
_, err = db.Put(context.Background(), storage.ModePutUpload, ch)
_, err := db.Put(context.Background(), storage.ModePutUpload, ch)
if err != nil {
t.Fatal(err)
}
......@@ -146,8 +168,10 @@ func TestDB_ReserveGC_AllWithinRadius(t *testing.T) {
if err != nil {
t.Fatal(err)
}
mtx.Lock()
addrs = append(addrs, ch.Address())
batchIDs = append(batchIDs, ch.Stamp().BatchID())
mtx.Unlock()
}
select {
......@@ -160,7 +184,7 @@ func TestDB_ReserveGC_AllWithinRadius(t *testing.T) {
t.Run("postage chunks index count", newItemsCountTest(db.postageChunksIndex, chunkCount))
t.Run("postage radius index count", newItemsCountTest(db.postageRadiusIndex, chunkCount))
t.Run("postage radius index count", newItemsCountTest(db.postageRadiusIndex, 0))
t.Run("gc index count", newItemsCountTest(db.gcIndex, 0))
......@@ -174,37 +198,84 @@ func TestDB_ReserveGC_AllWithinRadius(t *testing.T) {
}
}
})
mtx.Lock()
defer mtx.Unlock()
if unreserveCalled {
t.Fatal("unreserveCalled but should not have")
}
}
// TestDB_ReserveGC_Unreserve tests that after calling UnreserveBatch
// with a certain radius change, the correct chunks get put into the
// GC index and eventually get garbage collected.
// batch radius, none get collected.
func TestDB_ReserveGC_Unreserve(t *testing.T) {
chunkCount := 150
chunkCount := 100
var closed chan struct{}
testHookCollectGarbageChan := make(chan uint64)
testHookEvictChan := make(chan uint64)
t.Cleanup(setTestHookCollectGarbage(func(collectedCount uint64) {
select {
case testHookCollectGarbageChan <- collectedCount:
case <-closed:
}
}))
t.Cleanup(setTestHookEviction(func(collectedCount uint64) {
select {
case testHookEvictChan <- collectedCount:
case <-closed:
}
}))
var (
mtx sync.Mutex
batchIDs [][]byte
addrs []swarm.Address
)
unres := func(f postage.UnreserveIteratorFn) error {
mtx.Lock()
defer mtx.Unlock()
for i := 0; i < len(batchIDs); i++ {
// pop an element from batchIDs, call the Unreserve
item := batchIDs[i]
// here we mock the behavior of the batchstore
// that would call the localstore back with the
// batch IDs and the radiuses from the FIFO queue
stop, err := f(item, 2)
if err != nil {
return err
}
if stop {
return nil
}
stop, err = f(item, 4)
if err != nil {
return err
}
if stop {
return nil
}
}
batchIDs = nil
return nil
}
db := newTestDB(t, &Options{
Capacity: 100,
// once reaching 150 in the reserve, we will evict
// half the size of the cache from the reserve, so 50 chunks
ReserveCapacity: 100,
UnreserveFunc: unres,
})
closed = db.close
// put the first chunkCount chunks within radius
// put chunksCount chunks within radius. this
// will cause reserve eviction of 10 chunks into
// the cache. gc of the cache is still not triggered
for i := 0; i < chunkCount; i++ {
ch := generateTestRandomChunkAt(swarm.NewAddress(db.baseKey), 2).WithBatch(2, 3, 2, false)
err := db.UnreserveBatch(ch.Stamp().BatchID(), 2)
if err != nil {
t.Fatal(err)
}
_, err = db.Put(context.Background(), storage.ModePutUpload, ch)
_, err := db.Put(context.Background(), storage.ModePutUpload, ch)
if err != nil {
t.Fatal(err)
}
......@@ -212,51 +283,62 @@ func TestDB_ReserveGC_Unreserve(t *testing.T) {
if err != nil {
t.Fatal(err)
}
mtx.Lock()
batchIDs = append(batchIDs, ch.Stamp().BatchID())
addrs = append(addrs, ch.Address())
mtx.Unlock()
}
var po4Chs []swarm.Chunk
for i := 0; i < chunkCount; i++ {
ch := generateTestRandomChunkAt(swarm.NewAddress(db.baseKey), 4).WithBatch(2, 3, 2, false)
err := db.UnreserveBatch(ch.Stamp().BatchID(), 2)
if err != nil {
t.Fatal(err)
// wait for the first eviction to finish, otherwise
// we collect some of the next chunks that get added
// which results in inconsistencies
evictTarget := db.reserveEvictionTarget()
for {
select {
case <-testHookEvictChan:
case <-time.After(10 * time.Second):
t.Fatal("collect garbage timeout")
}
_, err = db.Put(context.Background(), storage.ModePutUpload, ch)
resSize, err := db.reserveSize.Get()
if err != nil {
t.Fatal(err)
}
err = db.Set(context.Background(), storage.ModeSetSync, ch.Address())
if err != nil {
t.Fatal(err)
if resSize == evictTarget {
break
}
po4Chs = append(po4Chs, ch)
}
var gcChs []swarm.Chunk
for i := 0; i < 100; i++ {
gcch := generateTestRandomChunkAt(swarm.NewAddress(db.baseKey), 2).WithBatch(2, 3, 2, false)
err := db.UnreserveBatch(gcch.Stamp().BatchID(), 2)
if err != nil {
t.Fatal(err)
}
_, err = db.Put(context.Background(), storage.ModePutUpload, gcch)
// insert another 90, this will trigger gc
for i := 0; i < 90; i++ {
ch := generateTestRandomChunkAt(swarm.NewAddress(db.baseKey), 2).WithBatch(2, 3, 2, false)
_, err := db.Put(context.Background(), storage.ModePutUpload, ch)
if err != nil {
t.Fatal(err)
}
err = db.Set(context.Background(), storage.ModeSetSync, gcch.Address())
err = db.Set(context.Background(), storage.ModeSetSync, ch.Address())
if err != nil {
t.Fatal(err)
}
gcChs = append(gcChs, gcch)
mtx.Lock()
batchIDs = append(batchIDs, ch.Stamp().BatchID())
addrs = append(addrs, ch.Address())
mtx.Unlock()
}
// radius increases from 2 to 3, chunk is in PO 2, therefore it should be
// GCd
for _, ch := range gcChs {
err := db.UnreserveBatch(ch.Stamp().BatchID(), 3)
for {
select {
case <-testHookEvictChan:
case <-time.After(10 * time.Second):
t.Fatal("collect garbage timeout")
}
resSize, err := db.reserveSize.Get()
if err != nil {
t.Fatal(err)
}
if resSize == evictTarget {
break
}
}
gcTarget := db.gcTarget()
......@@ -275,21 +357,21 @@ func TestDB_ReserveGC_Unreserve(t *testing.T) {
break
}
}
t.Run("pull index count", newItemsCountTest(db.pullIndex, chunkCount*2+90))
t.Run("pull index count", newItemsCountTest(db.pullIndex, chunkCount+90-10))
t.Run("postage chunks index count", newItemsCountTest(db.postageChunksIndex, chunkCount*2+90))
t.Run("postage chunks index count", newItemsCountTest(db.postageChunksIndex, chunkCount+90-10))
// postageRadiusIndex gets removed only when the batches are called with evict on MaxPO+1
// therefore, the expected index count here is larger than one would expect.
t.Run("postage radius index count", newItemsCountTest(db.postageRadiusIndex, chunkCount*2+100))
t.Run("postage radius index count", newItemsCountTest(db.postageRadiusIndex, chunkCount))
t.Run("gc index count", newItemsCountTest(db.gcIndex, 90))
t.Run("gc size", newIndexGCSizeTest(db))
t.Run("first ten unreserved chunks should not be accessible", func(t *testing.T) {
for _, ch := range gcChs[:10] {
_, err := db.Get(context.Background(), storage.ModeGetRequest, ch.Address())
for _, a := range addrs[:10] {
_, err := db.Get(context.Background(), storage.ModeGetRequest, a)
if err == nil {
t.Error("got no error, want NotFound")
}
......@@ -297,17 +379,8 @@ func TestDB_ReserveGC_Unreserve(t *testing.T) {
})
t.Run("the rest should be accessible", func(t *testing.T) {
for _, ch := range gcChs[10:] {
_, err := db.Get(context.Background(), storage.ModeGetRequest, ch.Address())
if err != nil {
t.Errorf("got error %v but want none", err)
}
}
})
t.Run("po 4 chunks accessible", func(t *testing.T) {
for _, ch := range po4Chs {
_, err := db.Get(context.Background(), storage.ModeGetRequest, ch.Address())
for _, a := range addrs[10:] {
_, err := db.Get(context.Background(), storage.ModeGetRequest, a)
if err != nil {
t.Errorf("got error %v but want none", err)
}
......@@ -318,30 +391,79 @@ func TestDB_ReserveGC_Unreserve(t *testing.T) {
// TestDB_ReserveGC_EvictMaxPO tests that when unreserving a batch at
// swarm.MaxPO+1 results in the correct behaviour.
func TestDB_ReserveGC_EvictMaxPO(t *testing.T) {
chunkCount := 150
var closed chan struct{}
testHookCollectGarbageChan := make(chan uint64)
var (
mtx sync.Mutex
batchIDs [][]byte
addrs []swarm.Address
chunkCount = 100
testHookCollectGarbageChan = make(chan uint64)
testHookEvictChan = make(chan uint64)
closed chan struct{}
)
t.Cleanup(setTestHookCollectGarbage(func(collectedCount uint64) {
if collectedCount == 0 {
return
}
select {
case testHookCollectGarbageChan <- collectedCount:
case <-closed:
}
}))
t.Cleanup(setTestHookEviction(func(collectedCount uint64) {
if collectedCount == 0 {
return
}
select {
case testHookEvictChan <- collectedCount:
case <-closed:
}
}))
unres := func(f postage.UnreserveIteratorFn) error {
mtx.Lock()
defer mtx.Unlock()
i := 0
defer func() { batchIDs = batchIDs[i:] }()
for i = 0; i < len(batchIDs); i++ {
// pop an element from batchIDs, call the Unreserve
item := batchIDs[i]
// here we mock the behavior of the batchstore
// that would call the localstore back with the
// batch IDs and the radiuses from the FIFO queue
stop, err := f(item, 2)
if err != nil {
return err
}
if stop {
return nil
}
stop, err = f(item, swarm.MaxPO+1)
if err != nil {
return err
}
if stop {
return nil
}
}
return nil
}
db := newTestDB(t, &Options{
Capacity: 100,
// once reaching 100 in the reserve, we will evict
// half the size of the cache from the reserve, so 50 chunks
ReserveCapacity: 100,
UnreserveFunc: unres,
})
closed = db.close
// put the first chunkCount chunks within radius
for i := 0; i < chunkCount; i++ {
ch := generateTestRandomChunkAt(swarm.NewAddress(db.baseKey), 2).WithBatch(2, 3, 2, false)
err := db.UnreserveBatch(ch.Stamp().BatchID(), 2)
if err != nil {
t.Fatal(err)
}
_, err = db.Put(context.Background(), storage.ModePutUpload, ch)
_, err := db.Put(context.Background(), storage.ModePutUpload, ch)
if err != nil {
t.Fatal(err)
}
......@@ -349,31 +471,69 @@ func TestDB_ReserveGC_EvictMaxPO(t *testing.T) {
if err != nil {
t.Fatal(err)
}
mtx.Lock()
batchIDs = append(batchIDs, ch.Stamp().BatchID())
addrs = append(addrs, ch.Address())
mtx.Unlock()
}
var gcChs []swarm.Chunk
for i := 0; i < 100; i++ {
gcch := generateTestRandomChunkAt(swarm.NewAddress(db.baseKey), 2).WithBatch(2, 3, 2, false)
err := db.UnreserveBatch(gcch.Stamp().BatchID(), 2)
// wait for the first eviction to finish, otherwise
// we collect some of the next chunks that get added
// which results in inconsistencies
evictTarget := db.reserveEvictionTarget()
for {
select {
case <-testHookEvictChan:
case <-time.After(10 * time.Second):
t.Fatal("collect garbage timeout")
}
resSize, err := db.reserveSize.Get()
if err != nil {
t.Fatal(err)
}
_, err = db.Put(context.Background(), storage.ModePutUpload, gcch)
if resSize == evictTarget {
break
}
}
// this is zero because we call eviction with max PO on the first 10 batches
// but the next 90 batches were not called with unreserve yet. this means that
// although the next 90 chunks exist in the store, their according batch radius
// still isn't persisted, since the localstore still is not aware of their
// batch radiuses. the same goes for the check after the gc actually evicts the
// ten chunks out of the cache (we still expect a zero for postage radius for the
// same reason)
t.Run("postage radius index count", newItemsCountTest(db.postageRadiusIndex, 0))
for i := 0; i < 90; i++ {
ch := generateTestRandomChunkAt(swarm.NewAddress(db.baseKey), 2).WithBatch(2, 3, 2, false)
_, err := db.Put(context.Background(), storage.ModePutUpload, ch)
if err != nil {
t.Fatal(err)
}
err = db.Set(context.Background(), storage.ModeSetSync, gcch.Address())
err = db.Set(context.Background(), storage.ModeSetSync, ch.Address())
if err != nil {
t.Fatal(err)
}
gcChs = append(gcChs, gcch)
mtx.Lock()
batchIDs = append(batchIDs, ch.Stamp().BatchID())
addrs = append(addrs, ch.Address())
mtx.Unlock()
}
for _, ch := range gcChs {
err := db.UnreserveBatch(ch.Stamp().BatchID(), swarm.MaxPO+1)
for {
select {
case <-testHookEvictChan:
case <-time.After(10 * time.Second):
t.Fatal("collect garbage timeout")
}
resSize, err := db.reserveSize.Get()
if err != nil {
t.Fatal(err)
}
if resSize == evictTarget {
break
}
}
gcTarget := db.gcTarget()
......@@ -392,19 +552,19 @@ func TestDB_ReserveGC_EvictMaxPO(t *testing.T) {
break
}
}
t.Run("pull index count", newItemsCountTest(db.pullIndex, chunkCount+90))
t.Run("pull index count", newItemsCountTest(db.pullIndex, chunkCount+90-10))
t.Run("postage chunks index count", newItemsCountTest(db.postageChunksIndex, chunkCount+90))
t.Run("postage chunks index count", newItemsCountTest(db.postageChunksIndex, chunkCount+90-10))
t.Run("postage radius index count", newItemsCountTest(db.postageRadiusIndex, chunkCount))
t.Run("postage radius index count", newItemsCountTest(db.postageRadiusIndex, 0))
t.Run("gc index count", newItemsCountTest(db.gcIndex, 90))
t.Run("gc size", newIndexGCSizeTest(db))
t.Run("first ten unreserved chunks should not be accessible", func(t *testing.T) {
for _, ch := range gcChs[:10] {
_, err := db.Get(context.Background(), storage.ModeGetRequest, ch.Address())
for _, a := range addrs[:10] {
_, err := db.Get(context.Background(), storage.ModeGetRequest, a)
if err == nil {
t.Error("got no error, want NotFound")
}
......@@ -412,19 +572,11 @@ func TestDB_ReserveGC_EvictMaxPO(t *testing.T) {
})
t.Run("the rest should be accessible", func(t *testing.T) {
for _, ch := range gcChs[10:] {
_, err := db.Get(context.Background(), storage.ModeGetRequest, ch.Address())
for _, a := range addrs[10:] {
_, err := db.Get(context.Background(), storage.ModeGetRequest, a)
if err != nil {
t.Errorf("got error %v but want none", err)
}
}
})
t.Run("batches for the all evicted batches should be evicted", func(t *testing.T) {
for _, ch := range gcChs {
item := shed.Item{BatchID: ch.Stamp().BatchID()}
if _, err := db.postageRadiusIndex.Get(item); !errors.Is(err, leveldb.ErrNotFound) {
t.Fatalf("wanted ErrNotFound but got %v", err)
}
}
})
}
......@@ -64,6 +64,7 @@ import (
"github.com/ethersphere/bee/pkg/shed"
"github.com/ethersphere/bee/pkg/steward"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/bee/pkg/tags"
"github.com/ethersphere/bee/pkg/topology"
"github.com/ethersphere/bee/pkg/topology/kademlia"
......@@ -349,6 +350,17 @@ func NewBee(addr string, publicKey *ecdsa.PublicKey, signer crypto.Signer, netwo
b.p2pService = p2ps
b.p2pHalter = p2ps
var unreserveFn func([]byte, uint8) (uint64, error)
var evictFn = func(b []byte) error {
_, err := unreserveFn(b, swarm.MaxPO+1)
return err
}
batchStore, err := batchstore.New(stateStore, evictFn, logger)
if err != nil {
return nil, fmt.Errorf("batchstore: %w", err)
}
// localstore depends on batchstore
var path string
......@@ -358,6 +370,8 @@ func NewBee(addr string, publicKey *ecdsa.PublicKey, signer crypto.Signer, netwo
}
lo := &localstore.Options{
Capacity: o.CacheCapacity,
ReserveCapacity: uint64(batchstore.Capacity),
UnreserveFunc: batchStore.Unreserve,
OpenFilesLimit: o.DBOpenFilesLimit,
BlockCacheCapacity: o.DBBlockCacheCapacity,
WriteBufferSize: o.DBWriteBufferSize,
......@@ -369,11 +383,8 @@ func NewBee(addr string, publicKey *ecdsa.PublicKey, signer crypto.Signer, netwo
return nil, fmt.Errorf("localstore: %w", err)
}
b.localstoreCloser = storer
unreserveFn = storer.UnreserveBatch
batchStore, err := batchstore.New(stateStore, storer.UnreserveBatch)
if err != nil {
return nil, fmt.Errorf("batchstore: %w", err)
}
validStamp := postage.ValidStamp(batchStore)
post, err := postage.NewService(stateStore, batchStore, chainID)
if err != nil {
......
......@@ -6,7 +6,6 @@ package batchstore
import (
"fmt"
"math/big"
"github.com/ethersphere/bee/pkg/postage"
)
......@@ -20,24 +19,11 @@ var BatchKey = batchKey
// power of 2 function
var Exp2 = exp2
// iterates through all batches
func IterateAll(bs postage.Storer, f func(b *postage.Batch) (bool, error)) error {
s := bs.(*store)
return s.store.Iterate(batchKeyPrefix, func(key []byte, _ []byte) (bool, error) {
b, err := s.Get(key[len(key)-32:])
if err != nil {
return true, err
}
return f(b)
})
}
// GetReserve extracts the inner limit and depth of reserve
func GetReserve(si postage.Storer) (*big.Int, uint8) {
s, _ := si.(*store)
return s.rs.Inner, s.rs.Radius
}
func (s *store) String() string {
return fmt.Sprintf("inner=%d,outer=%d", s.rs.Inner.Uint64(), s.rs.Outer.Uint64())
}
func SetUnreserveFunc(s postage.Storer, fn func([]byte, uint8) error) {
st := s.(*store)
st.unreserveFn = fn
}
......@@ -140,7 +140,9 @@ func (bs *BatchStore) GetReserveState() *postage.ReserveState {
}
return rs
}
func (bs *BatchStore) Unreserve(_ postage.UnreserveIteratorFn) error {
panic("not implemented")
}
func (bs *BatchStore) SetRadiusSetter(r postage.RadiusSetter) {
panic("not implemented")
}
......
......@@ -28,11 +28,14 @@ package batchstore
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"math/big"
"strings"
"github.com/ethersphere/bee/pkg/postage"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm"
)
......@@ -54,6 +57,10 @@ type reserveState struct {
// it defines the proximity order of chunks which we
// would like to guarantee that all chunks are stored
Radius uint8 `json:"radius"`
// StorageRadius is the de-facto storage radius tracked
// by monitoring the events communicated to the localstore
// reserve eviction worker.
StorageRadius uint8 `json:"storageRadius"`
// Available capacity of the reserve which can still be used.
Available int64 `json:"available"`
Outer *big.Int `json:"outer"` // lower value limit for outer layer = the further half of chunks
......@@ -61,9 +68,63 @@ type reserveState struct {
}
// unreserve is called when the batchstore decides not to reserve a batch on a PO
// i.e. chunk of the batch in bins [0 upto PO] (closed interval) are unreserved
func (s *store) unreserve(b *postage.Batch, radius uint8) error {
return s.unreserveFunc(b.ID, radius)
// i.e. chunk of the batch in bins [0 upto PO] (closed interval) are unreserved.
// this adds the batch at the mentioned PO to the unreserve fifo queue, that can be
// dequeued by the localstore once the storage fills up.
func (s *store) unreserve(b []byte, radius uint8) error {
c := s.queueIdx
c++
v := make([]byte, 8)
binary.BigEndian.PutUint64(v, c)
i := &UnreserveItem{BatchID: b, Radius: radius}
if err := s.store.Put(fmt.Sprintf("%s_%s", unreserveQueueKey, string(v)), i); err != nil {
return err
}
if err := s.putQueueCardinality(c); err != nil {
return err
}
s.queueIdx = c
return nil
}
func (s *store) Unreserve(cb postage.UnreserveIteratorFn) error {
var entries []string // entries to clean up
defer func() {
for _, v := range entries {
if err := s.store.Delete(v); err != nil {
s.logger.Errorf("batchstore: unreserve entry delete: %v", err)
return
}
}
}()
return s.store.Iterate(unreserveQueueKey, func(key, val []byte) (bool, error) {
if !strings.HasPrefix(string(key), unreserveQueueKey) {
return true, nil
}
v := &UnreserveItem{}
err := v.UnmarshalBinary(val)
if err != nil {
return true, err
}
stop, err := cb(v.BatchID, v.Radius)
if err != nil {
return true, err
}
s.rsMtx.Lock()
defer s.rsMtx.Unlock()
if s.rs.StorageRadius+1 < v.Radius {
s.rs.StorageRadius = v.Radius - 1
if err = s.store.Put(reserveStateKey, s.rs); err != nil {
return true, err
}
}
entries = append(entries, string(key))
if stop {
return true, nil
}
return false, nil
})
}
// evictExpired is called when PutChainState is called (and there is 'settlement')
......@@ -112,10 +173,11 @@ func (s *store) evictExpired() error {
}
// unreserve batch fully
err = s.unreserve(b, swarm.MaxPO+1)
err = s.evictFn(b.ID)
if err != nil {
return true, err
}
s.rs.Available += multiplier * exp2(b.Radius-s.rs.Radius-1)
// if batch has no value then delete it
......@@ -250,7 +312,7 @@ func (s *store) update(b *postage.Batch, oldDepth uint8, oldValue *big.Int) erro
capacityChange, reserveRadius := s.rs.change(oldValue, newValue, oldDepth, newDepth)
s.rs.Available += capacityChange
if err := s.unreserve(b, reserveRadius); err != nil {
if err := s.unreserveFn(b.ID, reserveRadius); err != nil {
return err
}
err := s.evictOuter(b)
......@@ -293,7 +355,7 @@ func (s *store) evictOuter(last *postage.Batch) error {
// unreserve outer PO of the lowest priority batch until capacity is back to positive
s.rs.Available += exp2(b.Depth - s.rs.Radius - 1)
s.rs.Outer.Set(b.Value)
return false, s.unreserve(b, s.rs.Radius)
return false, s.unreserveFn(b.ID, s.rs.Radius)
})
if err != nil {
return err
......@@ -310,6 +372,41 @@ func (s *store) evictOuter(last *postage.Batch) error {
return s.store.Put(reserveStateKey, s.rs)
}
func (s *store) getQueueCardinality() (val uint64, err error) {
err = s.store.Get(ureserveQueueCardinalityKey, &val)
if errors.Is(err, storage.ErrNotFound) {
return 0, nil
}
return val, err
}
func (s *store) putQueueCardinality(val uint64) error {
return s.store.Put(ureserveQueueCardinalityKey, val)
}
type UnreserveItem struct {
BatchID []byte
Radius uint8
}
func (u *UnreserveItem) MarshalBinary() ([]byte, error) {
out := make([]byte, 32+1) // 32 byte batch ID + 1 byte uint8 radius
copy(out, u.BatchID)
out[32] = u.Radius
return out, nil
}
func (u *UnreserveItem) UnmarshalBinary(b []byte) error {
if len(b) != 33 {
return errors.New("invalid unreserve item length")
}
u.BatchID = make([]byte, 32)
copy(u.BatchID, b[:32])
u.Radius = b[32]
return nil
}
// exp2 returns the e-th power of 2
func exp2(e uint8) int64 {
if e == 0 {
......
......@@ -5,9 +5,9 @@
package batchstore_test
import (
"bytes"
"encoding/hex"
"errors"
"fmt"
"io/ioutil"
"math/big"
"math/rand"
......@@ -23,167 +23,6 @@ import (
"github.com/ethersphere/bee/pkg/swarm"
)
// random advance on the blockchain
func newBlockAdvance() uint64 {
return uint64(rand.Intn(3) + 1)
}
// initial depth of a new batch
func newBatchDepth(depth uint8) uint8 {
return depth + uint8(rand.Intn(10)) + 4
}
// the factor to increase the batch depth with
func newDilutionFactor() int {
return rand.Intn(3) + 1
}
// new value on top of value based on random period and price
func newValue(price, value *big.Int) *big.Int {
period := rand.Intn(100) + 1000
v := new(big.Int).Mul(price, big.NewInt(int64(period)))
return v.Add(v, value)
}
// TestBatchStoreUnreserve is testing the correct behaviour of the reserve.
// the following assumptions are tested on each modification of the batches (top up, depth increase, price change)
// - reserve exceeds capacity
// - value-consistency of unreserved POs
func TestBatchStoreUnreserveEvents(t *testing.T) {
defer func(i int64, d uint8) {
batchstore.Capacity = i
batchstore.DefaultDepth = d
}(batchstore.Capacity, batchstore.DefaultDepth)
batchstore.DefaultDepth = 5
batchstore.Capacity = batchstore.Exp2(16)
bStore, unreserved := setupBatchStore(t)
bStore.SetRadiusSetter(noopRadiusSetter{})
batches := make(map[string]*postage.Batch)
t.Run("new batches only", func(t *testing.T) {
// iterate starting from batchstore.DefaultDepth to maxPO
_, radius := batchstore.GetReserve(bStore)
for step := 0; radius < swarm.MaxPO; step++ {
cs, err := nextChainState(bStore)
if err != nil {
t.Fatal(err)
}
var b *postage.Batch
if b, err = createBatch(bStore, cs, radius); err != nil {
t.Fatal(err)
}
batches[string(b.ID)] = b
if radius, err = checkReserve(bStore, unreserved); err != nil {
t.Fatal(err)
}
}
})
t.Run("top up batches", func(t *testing.T) {
n := 0
for id := range batches {
b, err := bStore.Get([]byte(id))
if err != nil {
if errors.Is(err, storage.ErrNotFound) {
continue
}
t.Fatal(err)
}
cs, err := nextChainState(bStore)
if err != nil {
t.Fatal(err)
}
if err = topUp(bStore, cs, b); err != nil {
t.Fatal(err)
}
if _, err = checkReserve(bStore, unreserved); err != nil {
t.Fatal(err)
}
n++
if n > len(batches)/5 {
break
}
}
})
t.Run("dilute batches", func(t *testing.T) {
n := 0
for id := range batches {
b, err := bStore.Get([]byte(id))
if err != nil {
if errors.Is(err, storage.ErrNotFound) {
continue
}
t.Fatal(err)
}
cs, err := nextChainState(bStore)
if err != nil {
t.Fatal(err)
}
if err = increaseDepth(bStore, cs, b); err != nil {
t.Fatal(err)
}
if _, err = checkReserve(bStore, unreserved); err != nil {
t.Fatal(err)
}
n++
if n > len(batches)/5 {
break
}
}
})
}
func TestBatchStoreUnreserveAll(t *testing.T) {
defer func(i int64, d uint8) {
batchstore.Capacity = i
batchstore.DefaultDepth = d
}(batchstore.Capacity, batchstore.DefaultDepth)
batchstore.DefaultDepth = 5
batchstore.Capacity = batchstore.Exp2(16)
bStore, unreserved := setupBatchStore(t)
bStore.SetRadiusSetter(noopRadiusSetter{})
var batches [][]byte
// iterate starting from batchstore.DefaultDepth to maxPO
_, depth := batchstore.GetReserve(bStore)
for step := 0; depth < swarm.MaxPO; step++ {
cs, err := nextChainState(bStore)
if err != nil {
t.Fatal(err)
}
event := rand.Intn(6)
// 0: dilute, 1: topup, 2,3,4,5: create
var b *postage.Batch
if event < 2 && len(batches) > 10 {
for {
n := rand.Intn(len(batches))
b, err = bStore.Get(batches[n])
if err != nil {
if errors.Is(err, storage.ErrNotFound) {
continue
}
t.Fatal(err)
}
break
}
if event == 0 {
if err = increaseDepth(bStore, cs, b); err != nil {
t.Fatal(err)
}
} else if err = topUp(bStore, cs, b); err != nil {
t.Fatal(err)
}
} else if b, err = createBatch(bStore, cs, depth); err != nil {
t.Fatal(err)
} else {
batches = append(batches, b.ID)
}
if depth, err = checkReserve(bStore, unreserved); err != nil {
t.Fatal(err)
}
}
}
func setupBatchStore(t *testing.T) (postage.Storer, map[string]uint8) {
t.Helper()
// we cannot use the mock statestore here since the iterator is not giving the right order
......@@ -214,8 +53,14 @@ func setupBatchStore(t *testing.T) (postage.Storer, map[string]uint8) {
unreserved[hex.EncodeToString(batchID)] = radius
return nil
}
bStore, _ := batchstore.New(stateStore, unreserveFunc)
evictFn := func(b []byte) error {
return unreserveFunc(b, swarm.MaxPO+1)
}
bStore, _ := batchstore.New(stateStore, evictFn, logger)
bStore.SetRadiusSetter(noopRadiusSetter{})
batchstore.SetUnreserveFunc(bStore, unreserveFunc)
// initialise chainstate
err = bStore.PutChainState(&postage.ChainState{
......@@ -229,89 +74,6 @@ func setupBatchStore(t *testing.T) (postage.Storer, map[string]uint8) {
return bStore, unreserved
}
func nextChainState(bStore postage.Storer) (*postage.ChainState, error) {
cs := bStore.GetChainState()
// random advance on the blockchain
advance := newBlockAdvance()
cs = &postage.ChainState{
Block: advance + cs.Block,
CurrentPrice: cs.CurrentPrice,
// settle although no price change
TotalAmount: cs.TotalAmount.Add(cs.TotalAmount, new(big.Int).Mul(cs.CurrentPrice, big.NewInt(int64(advance)))),
}
return cs, bStore.PutChainState(cs)
}
// creates a test batch with random value and depth and adds it to the batchstore
func createBatch(bStore postage.Storer, cs *postage.ChainState, depth uint8) (*postage.Batch, error) {
b := postagetest.MustNewBatch()
b.Depth = newBatchDepth(depth)
value := newValue(cs.CurrentPrice, cs.TotalAmount)
b.Value = big.NewInt(0)
return b, bStore.Put(b, value, b.Depth)
}
// tops up a batch with random amount
func topUp(bStore postage.Storer, cs *postage.ChainState, b *postage.Batch) error {
value := newValue(cs.CurrentPrice, b.Value)
return bStore.Put(b, value, b.Depth)
}
// dilutes the batch with random factor
func increaseDepth(bStore postage.Storer, cs *postage.ChainState, b *postage.Batch) error {
diff := newDilutionFactor()
value := new(big.Int).Sub(b.Value, cs.TotalAmount)
value.Div(value, big.NewInt(int64(1<<diff)))
value.Add(value, cs.TotalAmount)
return bStore.Put(b, value, b.Depth+uint8(diff))
}
// checkReserve is testing the correct behaviour of the reserve.
// the following assumptions are tested on each modification of the batches (top up, depth increase, price change)
// - reserve exceeds capacity
// - value-consistency of unreserved POs
func checkReserve(bStore postage.Storer, unreserved map[string]uint8) (uint8, error) {
var size int64
count := 0
outer := big.NewInt(0)
inner := big.NewInt(0)
limit, depth := batchstore.GetReserve(bStore)
// checking all batches
err := batchstore.IterateAll(bStore, func(b *postage.Batch) (bool, error) {
count++
bDepth, found := unreserved[hex.EncodeToString(b.ID)]
if !found {
return true, fmt.Errorf("batch not unreserved")
}
if b.Value.Cmp(limit) >= 0 {
if bDepth < depth-1 || bDepth > depth {
return true, fmt.Errorf("incorrect reserve radius. expected %d or %d. got %d", depth-1, depth, bDepth)
}
if bDepth == depth {
if inner.Cmp(b.Value) < 0 {
inner.Set(b.Value)
}
} else if outer.Cmp(b.Value) > 0 || outer.Cmp(big.NewInt(0)) == 0 {
outer.Set(b.Value)
}
if outer.Cmp(big.NewInt(0)) != 0 && outer.Cmp(inner) <= 0 {
return true, fmt.Errorf("inconsistent reserve radius: %d <= %d", outer.Uint64(), inner.Uint64())
}
size += batchstore.Exp2(b.Depth - bDepth - 1)
} else if bDepth != swarm.MaxPO {
return true, fmt.Errorf("batch below limit expected to be fully unreserved. got found=%v, radius=%d", found, bDepth)
}
return false, nil
})
if err != nil {
return 0, err
}
if size > batchstore.Capacity {
return 0, fmt.Errorf("reserve size beyond capacity. max %d, got %d", batchstore.Capacity, size)
}
return depth, nil
}
// TestBatchStore_Unreserve tests that the unreserve
// hook is called with the correct batch IDs and correct
// Radius as a result of batches coming in from chain events.
......@@ -542,6 +304,7 @@ func TestBatchStore_Unreserve(t *testing.T) {
} {
t.Run(tc.desc, func(t *testing.T) {
store, unreserved := setupBatchStore(t)
store.SetRadiusSetter(noopRadiusSetter{})
batches := addBatch(t, store,
depthValue(initBatchDepth, 3),
......@@ -945,3 +708,125 @@ func checkUnreserved(t *testing.T, unreserved map[string]uint8, batches []*posta
}
}
}
func TestUnreserveItemMarshaling(t *testing.T) {
v1 := batchstore.UnreserveItem{BatchID: make([]byte, 32), Radius: 5}
_, err := rand.Read(v1.BatchID)
if err != nil {
t.Fatal(err)
}
v, _ := v1.MarshalBinary()
v2 := &batchstore.UnreserveItem{}
err = v2.UnmarshalBinary(v)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(v1.BatchID, v2.BatchID) {
t.Fatalf("batch ID not equal got %x want %x", v2.BatchID, v1.BatchID)
}
if v1.Radius != v2.Radius {
t.Fatalf("radius mismatch got %d want %d", v2.Radius, v1.Radius)
}
}
func TestUnreserveItemSequence(t *testing.T) {
defer func(i int64, d uint8) {
batchstore.Capacity = i
batchstore.DefaultDepth = d
}(batchstore.Capacity, batchstore.DefaultDepth)
batchstore.DefaultDepth = 5
batchstore.Capacity = batchstore.Exp2(5) // 32 chunks
initBatchDepth := uint8(8)
dir, err := ioutil.TempDir("", "batchstore_test")
if err != nil {
t.Fatal(err)
}
t.Cleanup(func() {
if err := os.RemoveAll(dir); err != nil {
t.Fatal(err)
}
})
logger := logging.New(ioutil.Discard, 0)
stateStore, err := leveldb.NewStateStore(dir, logger)
if err != nil {
t.Fatal(err)
}
t.Cleanup(func() {
if err := stateStore.Close(); err != nil {
t.Fatal(err)
}
})
// set mock unreserve call
unreserved := []batchstore.UnreserveItem{}
unreserveFunc := func(batchID []byte, radius uint8) error {
v := batchstore.UnreserveItem{BatchID: batchID, Radius: radius}
unreserved = append(unreserved, v)
return nil
}
evictFn := func(b []byte) error {
return unreserveFunc(b, swarm.MaxPO+1)
}
bStore, _ := batchstore.New(stateStore, evictFn, logger)
bStore.SetRadiusSetter(noopRadiusSetter{})
batchstore.SetUnreserveFunc(bStore, unreserveFunc)
// initialise chainstate
err = bStore.PutChainState(&postage.ChainState{
Block: 0,
TotalAmount: big.NewInt(0),
CurrentPrice: big.NewInt(1),
})
if err != nil {
t.Fatal(err)
}
batches := addBatch(t, bStore,
depthValue(initBatchDepth, 2),
depthValue(initBatchDepth, 3),
depthValue(initBatchDepth, 4),
depthValue(initBatchDepth, 5),
)
batch2 := addBatch(t, bStore,
depthValue(initBatchDepth, 8),
)
if l := len(unreserved); l != 7 {
t.Fatalf("expected 7 unreserve events got %d", l)
}
// check the initial unreserve calls
for i, batch := range batches {
ur := unreserved[i]
if !bytes.Equal(batch.ID, ur.BatchID) {
t.Fatalf("wrong batchID in sequence %d, got %x want %x", i, ur.BatchID, batch.ID)
}
if ur.Radius != 4 {
t.Fatalf("wrong radius in sequence %d got %d want %d", i, ur.Radius, 4)
}
}
// next event is the new batch
if !bytes.Equal(unreserved[4].BatchID, batch2[0].ID) {
t.Fatal("batch mismatch")
}
if unreserved[4].Radius != 4 {
t.Fatal("radius mismatch")
}
// now the 2 cheapest batches with higher radius
if !bytes.Equal(unreserved[5].BatchID, batches[0].ID) {
t.Fatal("batch mismatch")
}
if unreserved[5].Radius != 5 {
t.Fatal("radius mismatch")
}
if !bytes.Equal(unreserved[6].BatchID, batches[1].ID) {
t.Fatal("batch mismatch")
}
if unreserved[6].Radius != 5 {
t.Fatal("radius mismatch")
}
}
......@@ -10,7 +10,9 @@ import (
"fmt"
"math/big"
"strings"
"sync"
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/postage"
"github.com/ethersphere/bee/pkg/storage"
)
......@@ -20,24 +22,32 @@ const (
valueKeyPrefix = "batchstore_value_"
chainStateKey = "batchstore_chainstate"
reserveStateKey = "batchstore_reservestate"
unreserveQueueKey = "batchstore_unreserve_queue_"
ureserveQueueCardinalityKey = "batchstore_queue_cardinality"
)
type unreserveFn func(batchID []byte, radius uint8) error
type evictFn func(batchID []byte) error
// store implements postage.Storer
type store struct {
store storage.StateStorer // State store backend to persist batches.
cs *postage.ChainState // the chain state
rsMtx sync.Mutex
rs *reserveState // the reserve state
unreserveFunc unreserveFn // unreserve function
unreserveFn unreserveFn // unreserve function
evictFn evictFn // evict function
queueIdx uint64 // unreserve queue cardinality
metrics metrics // metrics
logger logging.Logger
radiusSetter postage.RadiusSetter // setter for radius notifications
}
// New constructs a new postage batch store.
// It initialises both chain state and reserve state from the persistent state store
func New(st storage.StateStorer, unreserveFunc unreserveFn) (postage.Storer, error) {
func New(st storage.StateStorer, ev evictFn, logger logging.Logger) (postage.Storer, error) {
cs := &postage.ChainState{}
err := st.Get(chainStateKey, cs)
if err != nil {
......@@ -63,20 +73,30 @@ func New(st storage.StateStorer, unreserveFunc unreserveFn) (postage.Storer, err
Available: Capacity,
}
}
s := &store{
store: st,
cs: cs,
rs: rs,
unreserveFunc: unreserveFunc,
evictFn: ev,
metrics: newMetrics(),
logger: logger,
}
s.unreserveFn = s.unreserve
if s.queueIdx, err = s.getQueueCardinality(); err != nil {
return nil, err
}
return s, nil
}
func (s *store) GetReserveState() *postage.ReserveState {
s.rsMtx.Lock()
defer s.rsMtx.Unlock()
return &postage.ReserveState{
Radius: s.rs.Radius,
StorageRadius: s.rs.StorageRadius,
Available: s.rs.Available,
Outer: new(big.Int).Set(s.rs.Outer),
Inner: new(big.Int).Set(s.rs.Inner),
......@@ -90,7 +110,15 @@ func (s *store) Get(id []byte) (*postage.Batch, error) {
if err != nil {
return nil, fmt.Errorf("get batch %s: %w", hex.EncodeToString(id), err)
}
s.rsMtx.Lock()
defer s.rsMtx.Unlock()
if s.rs.StorageRadius < s.rs.Radius {
b.Radius = s.rs.StorageRadius
} else {
b.Radius = s.rs.radius(s.rs.tier(b.Value))
}
return b, nil
}
......@@ -114,7 +142,9 @@ func (s *store) Put(b *postage.Batch, value *big.Int, depth uint8) error {
}
if s.radiusSetter != nil {
s.rsMtx.Lock()
s.radiusSetter.SetRadius(s.rs.Radius)
s.rsMtx.Unlock()
}
return s.store.Put(batchKey(b.ID), b)
}
......@@ -150,7 +180,9 @@ func (s *store) PutChainState(cs *postage.ChainState) error {
// this needs to be improved, since we can miss some calls on
// startup. the same goes for the other call to radiusSetter
if s.radiusSetter != nil {
s.rsMtx.Lock()
s.radiusSetter.SetRadius(s.rs.Radius)
s.rsMtx.Unlock()
}
return s.store.Put(chainStateKey, cs)
......
......@@ -18,13 +18,14 @@ import (
"github.com/ethersphere/bee/pkg/storage"
)
func unreserve([]byte, uint8) error { return nil }
var noopEvictFn = func([]byte) error { return nil }
func TestBatchStoreGet(t *testing.T) {
testBatch := postagetest.MustNewBatch()
key := batchstore.BatchKey(testBatch.ID)
stateStore := mock.NewStateStore()
batchStore, _ := batchstore.New(stateStore, nil)
batchStore, _ := batchstore.New(stateStore, nil, logging.New(ioutil.Discard, 0))
stateStorePut(t, stateStore, key, testBatch)
got := batchStoreGetBatch(t, batchStore, testBatch.ID)
......@@ -36,7 +37,7 @@ func TestBatchStorePut(t *testing.T) {
key := batchstore.BatchKey(testBatch.ID)
stateStore := mock.NewStateStore()
batchStore, _ := batchstore.New(stateStore, unreserve)
batchStore, _ := batchstore.New(stateStore, nil, logging.New(ioutil.Discard, 0))
batchStore.SetRadiusSetter(noopRadiusSetter{})
batchStorePutBatch(t, batchStore, testBatch)
......@@ -49,7 +50,7 @@ func TestBatchStoreGetChainState(t *testing.T) {
testChainState := postagetest.NewChainState()
stateStore := mock.NewStateStore()
batchStore, _ := batchstore.New(stateStore, nil)
batchStore, _ := batchstore.New(stateStore, nil, logging.New(ioutil.Discard, 0))
batchStore.SetRadiusSetter(noopRadiusSetter{})
err := batchStore.PutChainState(testChainState)
......@@ -64,7 +65,7 @@ func TestBatchStorePutChainState(t *testing.T) {
testChainState := postagetest.NewChainState()
stateStore := mock.NewStateStore()
batchStore, _ := batchstore.New(stateStore, nil)
batchStore, _ := batchstore.New(stateStore, nil, logging.New(ioutil.Discard, 0))
batchStore.SetRadiusSetter(noopRadiusSetter{})
batchStorePutChainState(t, batchStore, testChainState)
......@@ -89,7 +90,7 @@ func TestBatchStoreReset(t *testing.T) {
}
defer stateStore.Close()
batchStore, _ := batchstore.New(stateStore, func([]byte, uint8) error { return nil })
batchStore, _ := batchstore.New(stateStore, noopEvictFn, logger)
batchStore.SetRadiusSetter(noopRadiusSetter{})
err = batchStore.Put(testBatch, big.NewInt(15), 8)
if err != nil {
......
......@@ -23,6 +23,8 @@ type EventUpdater interface {
TransactionEnd() error
}
type UnreserveIteratorFn func(id []byte, radius uint8) (bool, error)
// Storer represents the persistence layer for batches on the current (highest
// available) block.
type Storer interface {
......@@ -32,6 +34,7 @@ type Storer interface {
GetChainState() *ChainState
GetReserveState() *ReserveState
SetRadiusSetter(RadiusSetter)
Unreserve(UnreserveIteratorFn) error
Reset() error
}
......
......@@ -8,6 +8,7 @@ import "math/big"
type ReserveState struct {
Radius uint8
StorageRadius uint8
Available int64
Outer *big.Int // lower value limit for outer layer = the further half of chunks
Inner *big.Int
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment