Commit 97420333 authored by acud's avatar acud Committed by GitHub

shed, localstore, cmd: expose leveldb configuration parameters to cli (#1454)

parent b22fda83
......@@ -19,6 +19,10 @@ import (
const (
optionNameDataDir = "data-dir"
optionNameDBCapacity = "db-capacity"
optionNameDBOpenFilesLimit = "db-open-files-limit"
optionNameDBBlockCacheCapacity = "db-block-cache-capacity"
optionNameDBWriteBufferSize = "db-write-buffer-size"
optionNameDBDisableSeeksCompaction = "db-disable-seeks-compaction"
optionNamePassword = "password"
optionNamePasswordFile = "password-file"
optionNameAPIAddr = "api-addr"
......@@ -180,6 +184,10 @@ func (c *command) setHomeDir() (err error) {
func (c *command) setAllFlags(cmd *cobra.Command) {
cmd.Flags().String(optionNameDataDir, filepath.Join(c.homeDir, ".bee"), "data directory")
cmd.Flags().Uint64(optionNameDBCapacity, 5000000, fmt.Sprintf("db capacity in chunks, multiply by %d to get approximate capacity in bytes", swarm.ChunkSize))
cmd.Flags().Uint64(optionNameDBOpenFilesLimit, 200, "number of open files allowed by database")
cmd.Flags().Uint64(optionNameDBBlockCacheCapacity, 32*1024*1024, "size of block cache of the database in bytes")
cmd.Flags().Uint64(optionNameDBWriteBufferSize, 32*1024*1024, "size of the database write buffer in bytes")
cmd.Flags().Bool(optionNameDBDisableSeeksCompaction, false, "disables db compactions triggered by seeks")
cmd.Flags().String(optionNamePassword, "", "password for decrypting keys")
cmd.Flags().String(optionNamePasswordFile, "", "path to a file that contains password for decrypting keys")
cmd.Flags().String(optionNameAPIAddr, ":1633", "HTTP API listen address")
......
......@@ -123,6 +123,10 @@ Welcome to the Swarm.... Bzzz Bzzzz Bzzzz
b, err := node.NewBee(c.config.GetString(optionNameP2PAddr), signerConfig.address, *signerConfig.publicKey, signerConfig.signer, c.config.GetUint64(optionNameNetworkID), logger, signerConfig.libp2pPrivateKey, signerConfig.pssPrivateKey, node.Options{
DataDir: c.config.GetString(optionNameDataDir),
DBCapacity: c.config.GetUint64(optionNameDBCapacity),
DBOpenFilesLimit: c.config.GetUint64(optionNameDBOpenFilesLimit),
DBBlockCacheCapacity: c.config.GetUint64(optionNameDBBlockCacheCapacity),
DBWriteBufferSize: c.config.GetUint64(optionNameDBWriteBufferSize),
DBDisableSeeksCompaction: c.config.GetBool(optionNameDBDisableSeeksCompaction),
APIAddr: c.config.GetString(optionNameAPIAddr),
DebugAPIAddr: debugAPIAddr,
Addr: c.config.GetString(optionNameP2PAddr),
......
......@@ -147,6 +147,19 @@ type Options struct {
// Capacity is a limit that triggers garbage collection when
// number of items in gcIndex equals or exceeds it.
Capacity uint64
// OpenFilesLimit defines the upper bound of open files that the
// the localstore should maintain at any point of time. It is
// passed on to the shed constructor.
OpenFilesLimit uint64
// BlockCacheCapacity defines the block cache capacity and is passed
// on to shed.
BlockCacheCapacity uint64
// WriteBuffer defines the size of writer buffer and is passed on to shed.
WriteBufferSize uint64
// DisableSeeksCompaction toggles the seek driven compactions feature on leveldb
// and is passed on to shed.
DisableSeeksCompaction bool
// MetricsPrefix defines a prefix for metrics names.
MetricsPrefix string
Tags *tags.Tags
......@@ -193,7 +206,14 @@ func New(path string, baseKey []byte, o *Options, logger logging.Logger) (db *DB
db.updateGCSem = make(chan struct{}, maxParallelUpdateGC)
}
db.shed, err = shed.NewDB(path)
shedOpts := &shed.Options{
OpenFilesLimit: o.OpenFilesLimit,
BlockCacheCapacity: o.BlockCacheCapacity,
WriteBufferSize: o.WriteBufferSize,
DisableSeeksCompaction: o.DisableSeeksCompaction,
}
db.shed, err = shed.NewDB(path, shedOpts)
if err != nil {
return nil, err
}
......
......@@ -84,6 +84,10 @@ type Bee struct {
type Options struct {
DataDir string
DBCapacity uint64
DBOpenFilesLimit uint64
DBWriteBufferSize uint64
DBBlockCacheCapacity uint64
DBDisableSeeksCompaction bool
APIAddr string
DebugAPIAddr string
Addr string
......@@ -382,6 +386,10 @@ func NewBee(addr string, swarmAddress swarm.Address, publicKey ecdsa.PublicKey,
}
lo := &localstore.Options{
Capacity: o.DBCapacity,
OpenFilesLimit: o.DBOpenFilesLimit,
BlockCacheCapacity: o.DBBlockCacheCapacity,
WriteBufferSize: o.DBWriteBufferSize,
DisableSeeksCompaction: o.DBDisableSeeksCompaction,
}
storer, err := localstore.New(path, swarmAddress.Bytes(), lo, logger)
if err != nil {
......
......@@ -32,11 +32,19 @@ import (
)
var (
openFileLimit = 128 // The limit for LevelDB OpenFilesCacheCapacity.
blockCacheCapacity = 32 * 1024 * 1024
writeBuffer = 32 * 1024 * 1024
defaultOpenFilesLimit = uint64(256)
defaultBlockCacheCapacity = uint64(32 * 1024 * 1024)
defaultWriteBufferSize = uint64(32 * 1024 * 1024)
defaultDisableSeeksCompaction = false
)
type Options struct {
BlockCacheCapacity uint64
WriteBufferSize uint64
OpenFilesLimit uint64
DisableSeeksCompaction bool
}
// DB provides abstractions over LevelDB in order to
// implement complex structures using fields and ordered indexes.
// It provides a schema functionality to store fields and indexes
......@@ -50,16 +58,24 @@ type DB struct {
// NewDB constructs a new DB and validates the schema
// if it exists in database on the given path.
// metricsPrefix is used for metrics collection for the given DB.
func NewDB(path string) (db *DB, err error) {
func NewDB(path string, o *Options) (db *DB, err error) {
if o == nil {
o = &Options{
OpenFilesLimit: defaultOpenFilesLimit,
BlockCacheCapacity: defaultBlockCacheCapacity,
WriteBufferSize: defaultWriteBufferSize,
DisableSeeksCompaction: defaultDisableSeeksCompaction,
}
}
var ldb *leveldb.DB
if path == "" {
ldb, err = leveldb.Open(storage.NewMemStorage(), nil)
} else {
ldb, err = leveldb.OpenFile(path, &opt.Options{
OpenFilesCacheCapacity: openFileLimit,
BlockCacheCapacity: blockCacheCapacity,
DisableSeeksCompaction: true,
WriteBuffer: writeBuffer,
OpenFilesCacheCapacity: int(o.OpenFilesLimit),
BlockCacheCapacity: int(o.BlockCacheCapacity),
WriteBuffer: int(o.WriteBufferSize),
DisableSeeksCompaction: o.DisableSeeksCompaction,
})
}
......
......@@ -54,7 +54,7 @@ func TestDB_persistence(t *testing.T) {
}
defer os.RemoveAll(dir)
db, err := NewDB(dir)
db, err := NewDB(dir, nil)
if err != nil {
t.Fatal(err)
}
......@@ -72,7 +72,7 @@ func TestDB_persistence(t *testing.T) {
t.Fatal(err)
}
db2, err := NewDB(dir)
db2, err := NewDB(dir, nil)
if err != nil {
t.Fatal(err)
}
......@@ -94,7 +94,7 @@ func TestDB_persistence(t *testing.T) {
// be called to remove the data.
func newTestDB(t *testing.T) *DB {
t.Helper()
db, err := NewDB("")
db, err := NewDB("", nil)
if err != nil {
t.Fatal(err)
}
......
......@@ -51,7 +51,7 @@ type Store struct {
// and possible conflicts with schema from existing database is checked
// automatically.
func New(path string) (s *Store, err error) {
db, err := shed.NewDB(path)
db, err := shed.NewDB(path, nil)
if err != nil {
return nil, err
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment