Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
M
mybee
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
vicotor
mybee
Commits
af1a1015
Unverified
Commit
af1a1015
authored
May 19, 2021
by
Anatolie Lupacescu
Committed by
GitHub
May 19, 2021
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
fix: include reserve capacity in estimate (#1767)
parent
f9eee3cf
Changes
5
Show whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
18 additions
and
17 deletions
+18
-17
gc.go
pkg/localstore/gc.go
+2
-2
gc_test.go
pkg/localstore/gc_test.go
+1
-1
localstore.go
pkg/localstore/localstore.go
+13
-12
localstore_test.go
pkg/localstore/localstore_test.go
+1
-1
reserve.go
pkg/localstore/reserve.go
+1
-1
No files found.
pkg/localstore/gc.go
View file @
af1a1015
...
...
@@ -200,7 +200,7 @@ func (db *DB) collectGarbage() (collectedCount uint64, done bool, err error) {
// gcTrigger retruns the absolute value for garbage collection
// target value, calculated from db.capacity and gcTargetRatio.
func
(
db
*
DB
)
gcTarget
()
(
target
uint64
)
{
return
uint64
(
float64
(
db
.
capacity
)
*
gcTargetRatio
)
return
uint64
(
float64
(
db
.
ca
cheCa
pacity
)
*
gcTargetRatio
)
}
// triggerGarbageCollection signals collectGarbageWorker
...
...
@@ -242,7 +242,7 @@ func (db *DB) incGCSizeInBatch(batch *leveldb.Batch, change int64) (err error) {
db
.
metrics
.
GCSize
.
Set
(
float64
(
newSize
))
// trigger garbage collection if we reached the capacity
if
newSize
>=
db
.
capacity
{
if
newSize
>=
db
.
ca
cheCa
pacity
{
db
.
triggerGarbageCollection
()
}
return
nil
...
...
pkg/localstore/gc_test.go
View file @
af1a1015
...
...
@@ -343,7 +343,7 @@ func TestDB_collectGarbageWorker_withRequests(t *testing.T) {
addrs
:=
make
([]
swarm
.
Address
,
0
)
// upload random chunks just up to the capacity
for
i
:=
0
;
i
<
int
(
db
.
capacity
)
-
1
;
i
++
{
for
i
:=
0
;
i
<
int
(
db
.
ca
cheCa
pacity
)
-
1
;
i
++
{
ch
:=
generateTestRandomChunk
()
// call unreserve on the batch with radius 0 so that
// localstore is aware of the batch and the chunk can
...
...
pkg/localstore/localstore.go
View file @
af1a1015
...
...
@@ -26,6 +26,7 @@ import (
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/postage"
"github.com/ethersphere/bee/pkg/postage/batchstore"
"github.com/ethersphere/bee/pkg/shed"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm"
...
...
@@ -44,7 +45,7 @@ var (
var
(
// Default value for Capacity DB option.
defaultCapacity
uint64
=
1000000
defaultCa
cheCa
pacity
uint64
=
1000000
// Limit the number of goroutines created by Getters
// that call updateGC function. Value 0 sets no limit.
maxParallelUpdateGC
=
1000
...
...
@@ -99,8 +100,8 @@ type DB struct {
gcSize
shed
.
Uint64Field
// garbage collection is triggered when gcSize exceeds
// the capacity value
capacity
uint64
// the ca
cheCa
pacity value
ca
cheCa
pacity
uint64
// triggers garbage collection event loop
collectGarbageTrigger
chan
struct
{}
...
...
@@ -176,12 +177,12 @@ func New(path string, baseKey []byte, o *Options, logger logging.Logger) (db *DB
if
o
==
nil
{
// default options
o
=
&
Options
{
Capacity
:
defaultCapacity
,
Capacity
:
defaultCa
cheCa
pacity
,
}
}
db
=
&
DB
{
capacity
:
o
.
Capacity
,
ca
cheCa
pacity
:
o
.
Capacity
,
baseKey
:
baseKey
,
tags
:
o
.
Tags
,
// channel collectGarbageTrigger
...
...
@@ -194,16 +195,16 @@ func New(path string, baseKey []byte, o *Options, logger logging.Logger) (db *DB
metrics
:
newMetrics
(),
logger
:
logger
,
}
if
db
.
capacity
==
0
{
db
.
ca
pacity
=
default
Capacity
if
db
.
ca
cheCa
pacity
==
0
{
db
.
ca
cheCapacity
=
defaultCache
Capacity
}
capacityMB
:=
float64
(
db
.
capacity
*
swarm
.
ChunkSize
)
*
9.5367431640625e-7
capacityMB
:=
float64
(
(
db
.
cacheCapacity
+
uint64
(
batchstore
.
Capacity
))
*
swarm
.
ChunkSize
)
*
9.5367431640625e-7
if
capacityMB
<=
1000
{
db
.
logger
.
Infof
(
"database capacity: %d chunks (approximately %fMB)"
,
db
.
capacity
,
capacityMB
)
db
.
logger
.
Infof
(
"database capacity: %d chunks (approximately %fMB)"
,
db
.
ca
cheCa
pacity
,
capacityMB
)
}
else
{
db
.
logger
.
Infof
(
"database capacity: %d chunks (approximately %0.1fGB)"
,
db
.
capacity
,
capacityMB
/
1000
)
db
.
logger
.
Infof
(
"database capacity: %d chunks (approximately %0.1fGB)"
,
db
.
ca
cheCa
pacity
,
capacityMB
/
1000
)
}
if
maxParallelUpdateGC
>
0
{
...
...
pkg/localstore/localstore_test.go
View file @
af1a1015
...
...
@@ -63,7 +63,7 @@ func TestDBCapacity(t *testing.T) {
Capacity
:
500
,
}
db
:=
newTestDB
(
t
,
&
lo
)
if
db
.
capacity
!=
500
{
if
db
.
ca
cheCa
pacity
!=
500
{
t
.
Fatal
(
"could not set db capacity"
)
}
}
...
...
pkg/localstore/reserve.go
View file @
af1a1015
...
...
@@ -80,7 +80,7 @@ func (db *DB) UnreserveBatch(id []byte, radius uint8) error {
return
err
}
// trigger garbage collection if we reached the capacity
if
gcSize
>=
db
.
capacity
{
if
gcSize
>=
db
.
ca
cheCa
pacity
{
db
.
triggerGarbageCollection
()
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment