Commit af1a1015 authored by Anatolie Lupacescu's avatar Anatolie Lupacescu Committed by GitHub

fix: include reserve capacity in estimate (#1767)

parent f9eee3cf
......@@ -200,7 +200,7 @@ func (db *DB) collectGarbage() (collectedCount uint64, done bool, err error) {
// gcTrigger retruns the absolute value for garbage collection
// target value, calculated from db.capacity and gcTargetRatio.
func (db *DB) gcTarget() (target uint64) {
return uint64(float64(db.capacity) * gcTargetRatio)
return uint64(float64(db.cacheCapacity) * gcTargetRatio)
}
// triggerGarbageCollection signals collectGarbageWorker
......@@ -242,7 +242,7 @@ func (db *DB) incGCSizeInBatch(batch *leveldb.Batch, change int64) (err error) {
db.metrics.GCSize.Set(float64(newSize))
// trigger garbage collection if we reached the capacity
if newSize >= db.capacity {
if newSize >= db.cacheCapacity {
db.triggerGarbageCollection()
}
return nil
......
......@@ -343,7 +343,7 @@ func TestDB_collectGarbageWorker_withRequests(t *testing.T) {
addrs := make([]swarm.Address, 0)
// upload random chunks just up to the capacity
for i := 0; i < int(db.capacity)-1; i++ {
for i := 0; i < int(db.cacheCapacity)-1; i++ {
ch := generateTestRandomChunk()
// call unreserve on the batch with radius 0 so that
// localstore is aware of the batch and the chunk can
......
......@@ -26,6 +26,7 @@ import (
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/postage"
"github.com/ethersphere/bee/pkg/postage/batchstore"
"github.com/ethersphere/bee/pkg/shed"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm"
......@@ -44,7 +45,7 @@ var (
var (
// Default value for Capacity DB option.
defaultCapacity uint64 = 1000000
defaultCacheCapacity uint64 = 1000000
// Limit the number of goroutines created by Getters
// that call updateGC function. Value 0 sets no limit.
maxParallelUpdateGC = 1000
......@@ -99,8 +100,8 @@ type DB struct {
gcSize shed.Uint64Field
// garbage collection is triggered when gcSize exceeds
// the capacity value
capacity uint64
// the cacheCapacity value
cacheCapacity uint64
// triggers garbage collection event loop
collectGarbageTrigger chan struct{}
......@@ -176,12 +177,12 @@ func New(path string, baseKey []byte, o *Options, logger logging.Logger) (db *DB
if o == nil {
// default options
o = &Options{
Capacity: defaultCapacity,
Capacity: defaultCacheCapacity,
}
}
db = &DB{
capacity: o.Capacity,
cacheCapacity: o.Capacity,
baseKey: baseKey,
tags: o.Tags,
// channel collectGarbageTrigger
......@@ -194,16 +195,16 @@ func New(path string, baseKey []byte, o *Options, logger logging.Logger) (db *DB
metrics: newMetrics(),
logger: logger,
}
if db.capacity == 0 {
db.capacity = defaultCapacity
if db.cacheCapacity == 0 {
db.cacheCapacity = defaultCacheCapacity
}
capacityMB := float64(db.capacity*swarm.ChunkSize) * 9.5367431640625e-7
capacityMB := float64((db.cacheCapacity+uint64(batchstore.Capacity))*swarm.ChunkSize) * 9.5367431640625e-7
if capacityMB <= 1000 {
db.logger.Infof("database capacity: %d chunks (approximately %fMB)", db.capacity, capacityMB)
db.logger.Infof("database capacity: %d chunks (approximately %fMB)", db.cacheCapacity, capacityMB)
} else {
db.logger.Infof("database capacity: %d chunks (approximately %0.1fGB)", db.capacity, capacityMB/1000)
db.logger.Infof("database capacity: %d chunks (approximately %0.1fGB)", db.cacheCapacity, capacityMB/1000)
}
if maxParallelUpdateGC > 0 {
......
......@@ -63,7 +63,7 @@ func TestDBCapacity(t *testing.T) {
Capacity: 500,
}
db := newTestDB(t, &lo)
if db.capacity != 500 {
if db.cacheCapacity != 500 {
t.Fatal("could not set db capacity")
}
}
......
......@@ -80,7 +80,7 @@ func (db *DB) UnreserveBatch(id []byte, radius uint8) error {
return err
}
// trigger garbage collection if we reached the capacity
if gcSize >= db.capacity {
if gcSize >= db.cacheCapacity {
db.triggerGarbageCollection()
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment