Commit af1a1015 authored by Anatolie Lupacescu's avatar Anatolie Lupacescu Committed by GitHub

fix: include reserve capacity in estimate (#1767)

parent f9eee3cf
...@@ -200,7 +200,7 @@ func (db *DB) collectGarbage() (collectedCount uint64, done bool, err error) { ...@@ -200,7 +200,7 @@ func (db *DB) collectGarbage() (collectedCount uint64, done bool, err error) {
// gcTrigger retruns the absolute value for garbage collection // gcTrigger retruns the absolute value for garbage collection
// target value, calculated from db.capacity and gcTargetRatio. // target value, calculated from db.capacity and gcTargetRatio.
func (db *DB) gcTarget() (target uint64) { func (db *DB) gcTarget() (target uint64) {
return uint64(float64(db.capacity) * gcTargetRatio) return uint64(float64(db.cacheCapacity) * gcTargetRatio)
} }
// triggerGarbageCollection signals collectGarbageWorker // triggerGarbageCollection signals collectGarbageWorker
...@@ -242,7 +242,7 @@ func (db *DB) incGCSizeInBatch(batch *leveldb.Batch, change int64) (err error) { ...@@ -242,7 +242,7 @@ func (db *DB) incGCSizeInBatch(batch *leveldb.Batch, change int64) (err error) {
db.metrics.GCSize.Set(float64(newSize)) db.metrics.GCSize.Set(float64(newSize))
// trigger garbage collection if we reached the capacity // trigger garbage collection if we reached the capacity
if newSize >= db.capacity { if newSize >= db.cacheCapacity {
db.triggerGarbageCollection() db.triggerGarbageCollection()
} }
return nil return nil
......
...@@ -343,7 +343,7 @@ func TestDB_collectGarbageWorker_withRequests(t *testing.T) { ...@@ -343,7 +343,7 @@ func TestDB_collectGarbageWorker_withRequests(t *testing.T) {
addrs := make([]swarm.Address, 0) addrs := make([]swarm.Address, 0)
// upload random chunks just up to the capacity // upload random chunks just up to the capacity
for i := 0; i < int(db.capacity)-1; i++ { for i := 0; i < int(db.cacheCapacity)-1; i++ {
ch := generateTestRandomChunk() ch := generateTestRandomChunk()
// call unreserve on the batch with radius 0 so that // call unreserve on the batch with radius 0 so that
// localstore is aware of the batch and the chunk can // localstore is aware of the batch and the chunk can
......
...@@ -26,6 +26,7 @@ import ( ...@@ -26,6 +26,7 @@ import (
"github.com/ethersphere/bee/pkg/logging" "github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/postage" "github.com/ethersphere/bee/pkg/postage"
"github.com/ethersphere/bee/pkg/postage/batchstore"
"github.com/ethersphere/bee/pkg/shed" "github.com/ethersphere/bee/pkg/shed"
"github.com/ethersphere/bee/pkg/storage" "github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm" "github.com/ethersphere/bee/pkg/swarm"
...@@ -44,7 +45,7 @@ var ( ...@@ -44,7 +45,7 @@ var (
var ( var (
// Default value for Capacity DB option. // Default value for Capacity DB option.
defaultCapacity uint64 = 1000000 defaultCacheCapacity uint64 = 1000000
// Limit the number of goroutines created by Getters // Limit the number of goroutines created by Getters
// that call updateGC function. Value 0 sets no limit. // that call updateGC function. Value 0 sets no limit.
maxParallelUpdateGC = 1000 maxParallelUpdateGC = 1000
...@@ -99,8 +100,8 @@ type DB struct { ...@@ -99,8 +100,8 @@ type DB struct {
gcSize shed.Uint64Field gcSize shed.Uint64Field
// garbage collection is triggered when gcSize exceeds // garbage collection is triggered when gcSize exceeds
// the capacity value // the cacheCapacity value
capacity uint64 cacheCapacity uint64
// triggers garbage collection event loop // triggers garbage collection event loop
collectGarbageTrigger chan struct{} collectGarbageTrigger chan struct{}
...@@ -176,14 +177,14 @@ func New(path string, baseKey []byte, o *Options, logger logging.Logger) (db *DB ...@@ -176,14 +177,14 @@ func New(path string, baseKey []byte, o *Options, logger logging.Logger) (db *DB
if o == nil { if o == nil {
// default options // default options
o = &Options{ o = &Options{
Capacity: defaultCapacity, Capacity: defaultCacheCapacity,
} }
} }
db = &DB{ db = &DB{
capacity: o.Capacity, cacheCapacity: o.Capacity,
baseKey: baseKey, baseKey: baseKey,
tags: o.Tags, tags: o.Tags,
// channel collectGarbageTrigger // channel collectGarbageTrigger
// needs to be buffered with the size of 1 // needs to be buffered with the size of 1
// to signal another event if it // to signal another event if it
...@@ -194,16 +195,16 @@ func New(path string, baseKey []byte, o *Options, logger logging.Logger) (db *DB ...@@ -194,16 +195,16 @@ func New(path string, baseKey []byte, o *Options, logger logging.Logger) (db *DB
metrics: newMetrics(), metrics: newMetrics(),
logger: logger, logger: logger,
} }
if db.capacity == 0 { if db.cacheCapacity == 0 {
db.capacity = defaultCapacity db.cacheCapacity = defaultCacheCapacity
} }
capacityMB := float64(db.capacity*swarm.ChunkSize) * 9.5367431640625e-7 capacityMB := float64((db.cacheCapacity+uint64(batchstore.Capacity))*swarm.ChunkSize) * 9.5367431640625e-7
if capacityMB <= 1000 { if capacityMB <= 1000 {
db.logger.Infof("database capacity: %d chunks (approximately %fMB)", db.capacity, capacityMB) db.logger.Infof("database capacity: %d chunks (approximately %fMB)", db.cacheCapacity, capacityMB)
} else { } else {
db.logger.Infof("database capacity: %d chunks (approximately %0.1fGB)", db.capacity, capacityMB/1000) db.logger.Infof("database capacity: %d chunks (approximately %0.1fGB)", db.cacheCapacity, capacityMB/1000)
} }
if maxParallelUpdateGC > 0 { if maxParallelUpdateGC > 0 {
......
...@@ -63,7 +63,7 @@ func TestDBCapacity(t *testing.T) { ...@@ -63,7 +63,7 @@ func TestDBCapacity(t *testing.T) {
Capacity: 500, Capacity: 500,
} }
db := newTestDB(t, &lo) db := newTestDB(t, &lo)
if db.capacity != 500 { if db.cacheCapacity != 500 {
t.Fatal("could not set db capacity") t.Fatal("could not set db capacity")
} }
} }
......
...@@ -80,7 +80,7 @@ func (db *DB) UnreserveBatch(id []byte, radius uint8) error { ...@@ -80,7 +80,7 @@ func (db *DB) UnreserveBatch(id []byte, radius uint8) error {
return err return err
} }
// trigger garbage collection if we reached the capacity // trigger garbage collection if we reached the capacity
if gcSize >= db.capacity { if gcSize >= db.cacheCapacity {
db.triggerGarbageCollection() db.triggerGarbageCollection()
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment