Commit 2f677c40 authored by Anatolie Lupacescu's avatar Anatolie Lupacescu Committed by GitHub

refactor: rename db-capacity flag to cache-capacity (#1704)

parent 59ef4d48
......@@ -21,7 +21,7 @@ import (
const (
optionNameDataDir = "data-dir"
optionNameDBCapacity = "db-capacity"
optionNameCacheCapacity = "cache-capacity"
optionNameDBOpenFilesLimit = "db-open-files-limit"
optionNameDBBlockCacheCapacity = "db-block-cache-capacity"
optionNameDBWriteBufferSize = "db-write-buffer-size"
......@@ -193,7 +193,7 @@ func (c *command) setHomeDir() (err error) {
func (c *command) setAllFlags(cmd *cobra.Command) {
cmd.Flags().String(optionNameDataDir, filepath.Join(c.homeDir, ".bee"), "data directory")
cmd.Flags().Uint64(optionNameDBCapacity, 1000000, fmt.Sprintf("db capacity in chunks, multiply by %d to get approximate capacity in bytes", swarm.ChunkSize))
cmd.Flags().Uint64(optionNameCacheCapacity, 1000000, fmt.Sprintf("cache capacity in chunks, multiply by %d to get approximate capacity in bytes", swarm.ChunkSize))
cmd.Flags().Uint64(optionNameDBOpenFilesLimit, 200, "number of open files allowed by database")
cmd.Flags().Uint64(optionNameDBBlockCacheCapacity, 32*1024*1024, "size of block cache of the database in bytes")
cmd.Flags().Uint64(optionNameDBWriteBufferSize, 32*1024*1024, "size of the database write buffer in bytes")
......
......@@ -118,7 +118,7 @@ Welcome to the Swarm.... Bzzz Bzzzz Bzzzz
b, err := node.NewBee(c.config.GetString(optionNameP2PAddr), signerConfig.address, *signerConfig.publicKey, signerConfig.signer, c.config.GetUint64(optionNameNetworkID), logger, signerConfig.libp2pPrivateKey, signerConfig.pssPrivateKey, node.Options{
DataDir: c.config.GetString(optionNameDataDir),
DBCapacity: c.config.GetUint64(optionNameDBCapacity),
CacheCapacity: c.config.GetUint64(optionNameCacheCapacity),
DBOpenFilesLimit: c.config.GetUint64(optionNameDBOpenFilesLimit),
DBBlockCacheCapacity: c.config.GetUint64(optionNameDBBlockCacheCapacity),
DBWriteBufferSize: c.config.GetUint64(optionNameDBWriteBufferSize),
......
......@@ -16,8 +16,8 @@ config: /etc/bee/bee.yaml
# cors-allowed-origins: []
## data directory (default "/home/<user>/.bee")
data-dir: /var/lib/bee
## db capacity in chunks, multiply by 4096 to get approximate capacity in bytes
# db-capacity: 1000000
## cache capacity in chunks, multiply by 4096 to get approximate capacity in bytes
# cache-capacity: 1000000
## number of open files allowed by database
# db-open-files-limit: 200
## size of block cache of the database in bytes
......
......@@ -22,7 +22,7 @@ services:
- BEE_CONFIG
- BEE_CORS_ALLOWED_ORIGINS
- BEE_DATA_DIR
- BEE_DB_CAPACITY
- BEE_CACHE_CAPACITY
- BEE_DB_OPEN_FILES_LIMIT
- BEE_DB_BLOCK_CACHE_CAPACITY
- BEE_DB_WRITE_BUFFER_SIZE
......
......@@ -23,8 +23,8 @@ BEE_CLEF_SIGNER_ENABLE=true
# BEE_CORS_ALLOWED_ORIGINS=[]
## data directory (default /home/<user>/.bee)
# BEE_DATA_DIR=/home/bee/.bee
## db capacity in chunks, multiply by 4096 to get approximate capacity in bytes
# BEE_DB_CAPACITY=1000000
## cache capacity in chunks, multiply by 4096 to get approximate capacity in bytes
# BEE_CACHE_CAPACITY=1000000
## number of open files allowed by database
# BEE_DB_OPEN_FILES_LIMIT=200
## size of block cache of the database in bytes
......
......@@ -16,8 +16,8 @@ config: /usr/local/etc/swarm-bee/bee.yaml
# cors-allowed-origins: []
## data directory (default "/home/<user>/.bee")
data-dir: /usr/local/var/lib/swarm-bee
## db capacity in chunks, multiply by 4096 to get approximate capacity in bytes
# db-capacity: 1000000
## cache capacity in chunks, multiply by 4096 to get approximate capacity in bytes
# cache-capacity: 1000000
## number of open files allowed by database
# db-open-files-limit: 200
## size of block cache of the database in bytes
......
......@@ -14,8 +14,8 @@ config: ./bee.yaml
# cors-allowed-origins: []
## data directory (default "/home/<user>/.bee")
data-dir: ./data
## db capacity in chunks, multiply by 4096 to get approximate capacity in bytes
# db-capacity: 1000000
## cache capacity in chunks, multiply by 4096 to get approximate capacity in bytes
# cache-capacity: 1000000
## debug HTTP API listen address (default ":1635")
# debug-api-addr: 127.0.0.1:1635
## enable debug HTTP API
......
......@@ -156,7 +156,7 @@ func testDBCollectGarbageWorker(t *testing.T) {
func TestPinGC(t *testing.T) {
chunkCount := 150
pinChunksCount := 50
dbCapacity := uint64(100)
cacheCapacity := uint64(100)
var closed chan struct{}
testHookCollectGarbageChan := make(chan uint64)
......@@ -177,7 +177,7 @@ func TestPinGC(t *testing.T) {
t.Cleanup(setWithinRadiusFunc(func(_ *DB, _ shed.Item) bool { return false }))
db := newTestDB(t, &Options{
Capacity: dbCapacity,
Capacity: cacheCapacity,
})
closed = db.close
......@@ -259,7 +259,7 @@ func TestPinGC(t *testing.T) {
})
t.Run("first chunks after pinned chunks should be removed", func(t *testing.T) {
for i := pinChunksCount; i < (int(dbCapacity) - int(gcTarget)); i++ {
for i := pinChunksCount; i < (int(cacheCapacity) - int(gcTarget)); i++ {
_, err := db.Get(context.Background(), storage.ModeGetRequest, addrs[i])
if !errors.Is(err, leveldb.ErrNotFound) {
t.Fatal(err)
......@@ -593,7 +593,7 @@ func TestPinAfterMultiGC(t *testing.T) {
pinnedChunks := make([]swarm.Address, 0)
// upload random chunks above db capacity to see if chunks are still pinned
// upload random chunks above cache capacity to see if chunks are still pinned
for i := 0; i < 20; i++ {
ch := generateTestRandomChunk()
// call unreserve on the batch with radius 0 so that
......
......@@ -58,13 +58,13 @@ func init() {
}
}
func TestDBCapacity(t *testing.T) {
func TestCacheCapacity(t *testing.T) {
lo := Options{
Capacity: 500,
}
db := newTestDB(t, &lo)
if db.cacheCapacity != 500 {
t.Fatal("could not set db capacity")
t.Fatal("could not set cache capacity")
}
}
......
......@@ -98,7 +98,7 @@ type Bee struct {
type Options struct {
DataDir string
DBCapacity uint64
CacheCapacity uint64
DBOpenFilesLimit uint64
DBWriteBufferSize uint64
DBBlockCacheCapacity uint64
......@@ -315,7 +315,7 @@ func NewBee(addr string, swarmAddress swarm.Address, publicKey ecdsa.PublicKey,
path = filepath.Join(o.DataDir, "localstore")
}
lo := &localstore.Options{
Capacity: o.DBCapacity,
Capacity: o.CacheCapacity,
OpenFilesLimit: o.DBOpenFilesLimit,
BlockCacheCapacity: o.DBBlockCacheCapacity,
WriteBufferSize: o.DBWriteBufferSize,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment