Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
M
mybee
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
vicotor
mybee
Commits
eda8aecd
Unverified
Commit
eda8aecd
authored
Apr 13, 2020
by
Janoš Guljaš
Committed by
GitHub
Apr 13, 2020
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
integrate bee shed in localstore (#92)
parent
b5134faf
Changes
32
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
32 changed files
with
131 additions
and
381 deletions
+131
-381
Makefile
Makefile
+1
-1
go.mod
go.mod
+0
-1
go.sum
go.sum
+0
-239
db.go
pkg/shed/db.go
+2
-2
db_test.go
pkg/shed/db_test.go
+2
-2
example_store_test.go
pkg/shed/example_store_test.go
+5
-5
field_string.go
pkg/shed/field_string.go
+2
-2
field_string_test.go
pkg/shed/field_string_test.go
+1
-5
field_struct.go
pkg/shed/field_struct.go
+2
-2
field_struct_test.go
pkg/shed/field_struct_test.go
+2
-5
field_uint64.go
pkg/shed/field_uint64.go
+2
-2
field_uint64_test.go
pkg/shed/field_uint64_test.go
+10
-13
index.go
pkg/shed/index.go
+2
-2
index_test.go
pkg/shed/index_test.go
+14
-19
vector_uint64.go
pkg/shed/vector_uint64.go
+2
-2
vector_uint64_test.go
pkg/shed/vector_uint64_test.go
+10
-13
doc.go
pkg/storage/localstore/doc.go
+1
-2
export.go
pkg/storage/localstore/export.go
+1
-1
gc.go
pkg/storage/localstore/gc.go
+14
-9
gc_test.go
pkg/storage/localstore/gc_test.go
+1
-1
localstore.go
pkg/storage/localstore/localstore.go
+4
-4
localstore_test.go
pkg/storage/localstore/localstore_test.go
+2
-3
mode_get.go
pkg/storage/localstore/mode_get.go
+4
-5
mode_get_multi.go
pkg/storage/localstore/mode_get_multi.go
+2
-3
mode_put.go
pkg/storage/localstore/mode_put.go
+12
-10
mode_put_test.go
pkg/storage/localstore/mode_put_test.go
+3
-3
mode_set.go
pkg/storage/localstore/mode_set.go
+19
-16
mode_set_test.go
pkg/storage/localstore/mode_set_test.go
+3
-4
subscription_pull.go
pkg/storage/localstore/subscription_pull.go
+2
-3
subscription_pull_test.go
pkg/storage/localstore/subscription_pull_test.go
+1
-1
subscription_push.go
pkg/storage/localstore/subscription_push.go
+1
-1
subscription_push_test.go
pkg/storage/localstore/subscription_push_test.go
+4
-0
No files found.
Makefile
View file @
eda8aecd
...
@@ -31,7 +31,7 @@ test:
...
@@ -31,7 +31,7 @@ test:
$(GO)
test
-v
-race
./...
$(GO)
test
-v
-race
./...
.PHONY
:
build
.PHONY
:
build
build
:
export CGO_ENABLED=
1
#
set to 0 when go-ethereum/metrics dependecy is removed
build
:
export CGO_ENABLED=
0
build
:
build
:
$(GO)
build
-trimpath
-ldflags
"
$(LDFLAGS)
"
./...
$(GO)
build
-trimpath
-ldflags
"
$(LDFLAGS)
"
./...
...
...
go.mod
View file @
eda8aecd
...
@@ -6,7 +6,6 @@ require (
...
@@ -6,7 +6,6 @@ require (
github.com/btcsuite/btcd v0.20.1-beta
github.com/btcsuite/btcd v0.20.1-beta
github.com/coreos/go-semver v0.3.0
github.com/coreos/go-semver v0.3.0
github.com/dgraph-io/badger/v2 v2.0.3
github.com/dgraph-io/badger/v2 v2.0.3
github.com/ethersphere/swarm v0.5.7
github.com/gogo/protobuf v1.3.1
github.com/gogo/protobuf v1.3.1
github.com/gorilla/handlers v1.4.2
github.com/gorilla/handlers v1.4.2
github.com/gorilla/mux v1.7.4
github.com/gorilla/mux v1.7.4
...
...
go.sum
View file @
eda8aecd
This diff is collapsed.
Click to expand it.
pkg/shed/db.go
View file @
eda8aecd
...
@@ -38,7 +38,7 @@ const (
...
@@ -38,7 +38,7 @@ const (
)
)
var
(
var
(
ErrNotFound
=
errors
.
New
(
"s
torage
: not found"
)
ErrNotFound
=
errors
.
New
(
"s
hed
: not found"
)
)
)
// DB provides abstractions over badgerDB in order to
// DB provides abstractions over badgerDB in order to
...
@@ -135,7 +135,7 @@ func (db *DB) Has(key []byte) (yes bool, err error) {
...
@@ -135,7 +135,7 @@ func (db *DB) Has(key []byte) (yes bool, err error) {
item
,
err
:=
txn
.
Get
(
key
)
item
,
err
:=
txn
.
Get
(
key
)
if
err
!=
nil
{
if
err
!=
nil
{
if
err
==
badger
.
ErrKeyNotFound
{
if
err
==
badger
.
ErrKeyNotFound
{
return
ErrNotFound
return
nil
}
}
return
err
return
err
}
}
...
...
pkg/shed/db_test.go
View file @
eda8aecd
...
@@ -62,7 +62,7 @@ func TestDB_persistence(t *testing.T) {
...
@@ -62,7 +62,7 @@ func TestDB_persistence(t *testing.T) {
if
err
!=
nil
{
if
err
!=
nil
{
t
.
Fatal
(
err
)
t
.
Fatal
(
err
)
}
}
stringField
,
err
:=
db
.
NewStringField
(
"preserve-me"
,
logger
)
stringField
,
err
:=
db
.
NewStringField
(
"preserve-me"
)
if
err
!=
nil
{
if
err
!=
nil
{
t
.
Fatal
(
err
)
t
.
Fatal
(
err
)
}
}
...
@@ -80,7 +80,7 @@ func TestDB_persistence(t *testing.T) {
...
@@ -80,7 +80,7 @@ func TestDB_persistence(t *testing.T) {
if
err
!=
nil
{
if
err
!=
nil
{
t
.
Fatal
(
err
)
t
.
Fatal
(
err
)
}
}
stringField2
,
err
:=
db2
.
NewStringField
(
"preserve-me"
,
logger
)
stringField2
,
err
:=
db2
.
NewStringField
(
"preserve-me"
)
if
err
!=
nil
{
if
err
!=
nil
{
t
.
Fatal
(
err
)
t
.
Fatal
(
err
)
}
}
...
...
pkg/shed/example_store_test.go
View file @
eda8aecd
...
@@ -60,12 +60,12 @@ func New(path string) (s *Store, err error) {
...
@@ -60,12 +60,12 @@ func New(path string) (s *Store, err error) {
db
:
db
,
db
:
db
,
}
}
// Identify current storage schema by arbitrary name.
// Identify current storage schema by arbitrary name.
s
.
schemaName
,
err
=
db
.
NewStringField
(
"schema-name"
,
logger
)
s
.
schemaName
,
err
=
db
.
NewStringField
(
"schema-name"
)
if
err
!=
nil
{
if
err
!=
nil
{
return
nil
,
err
return
nil
,
err
}
}
// Global ever incrementing index of chunk accesses.
// Global ever incrementing index of chunk accesses.
s
.
accessCounter
,
err
=
db
.
NewUint64Field
(
"access-counter"
,
logger
)
s
.
accessCounter
,
err
=
db
.
NewUint64Field
(
"access-counter"
)
if
err
!=
nil
{
if
err
!=
nil
{
return
nil
,
err
return
nil
,
err
}
}
...
@@ -89,7 +89,7 @@ func New(path string) (s *Store, err error) {
...
@@ -89,7 +89,7 @@ func New(path string) (s *Store, err error) {
e
.
Data
=
value
[
8
:
]
e
.
Data
=
value
[
8
:
]
return
e
,
nil
return
e
,
nil
},
},
}
,
logger
)
})
if
err
!=
nil
{
if
err
!=
nil
{
return
nil
,
err
return
nil
,
err
}
}
...
@@ -112,7 +112,7 @@ func New(path string) (s *Store, err error) {
...
@@ -112,7 +112,7 @@ func New(path string) (s *Store, err error) {
e
.
AccessTimestamp
=
int64
(
binary
.
BigEndian
.
Uint64
(
value
))
e
.
AccessTimestamp
=
int64
(
binary
.
BigEndian
.
Uint64
(
value
))
return
e
,
nil
return
e
,
nil
},
},
}
,
logger
)
})
if
err
!=
nil
{
if
err
!=
nil
{
return
nil
,
err
return
nil
,
err
}
}
...
@@ -137,7 +137,7 @@ func New(path string) (s *Store, err error) {
...
@@ -137,7 +137,7 @@ func New(path string) (s *Store, err error) {
DecodeValue
:
func
(
keyItem
shed
.
Item
,
value
[]
byte
)
(
e
shed
.
Item
,
err
error
)
{
DecodeValue
:
func
(
keyItem
shed
.
Item
,
value
[]
byte
)
(
e
shed
.
Item
,
err
error
)
{
return
e
,
nil
return
e
,
nil
},
},
}
,
logger
)
})
if
err
!=
nil
{
if
err
!=
nil
{
return
nil
,
err
return
nil
,
err
}
}
...
...
pkg/shed/field_string.go
View file @
eda8aecd
...
@@ -31,7 +31,7 @@ type StringField struct {
...
@@ -31,7 +31,7 @@ type StringField struct {
// NewStringField retruns a new Instance of StringField.
// NewStringField retruns a new Instance of StringField.
// It validates its name and type against the database schema.
// It validates its name and type against the database schema.
func
(
db
*
DB
)
NewStringField
(
name
string
,
logger
logging
.
Logger
)
(
f
StringField
,
err
error
)
{
func
(
db
*
DB
)
NewStringField
(
name
string
)
(
f
StringField
,
err
error
)
{
key
,
err
:=
db
.
schemaFieldKey
(
name
,
"string"
)
key
,
err
:=
db
.
schemaFieldKey
(
name
,
"string"
)
if
err
!=
nil
{
if
err
!=
nil
{
return
f
,
err
return
f
,
err
...
@@ -39,7 +39,7 @@ func (db *DB) NewStringField(name string, logger logging.Logger) (f StringField,
...
@@ -39,7 +39,7 @@ func (db *DB) NewStringField(name string, logger logging.Logger) (f StringField,
return
StringField
{
return
StringField
{
db
:
db
,
db
:
db
,
key
:
key
,
key
:
key
,
logger
:
logger
,
logger
:
db
.
logger
,
},
nil
},
nil
}
}
...
...
pkg/shed/field_string_test.go
View file @
eda8aecd
...
@@ -17,10 +17,7 @@
...
@@ -17,10 +17,7 @@
package
shed
package
shed
import
(
import
(
"io/ioutil"
"testing"
"testing"
"github.com/ethersphere/bee/pkg/logging"
)
)
// TestStringField validates put and get operations
// TestStringField validates put and get operations
...
@@ -29,8 +26,7 @@ func TestStringField(t *testing.T) {
...
@@ -29,8 +26,7 @@ func TestStringField(t *testing.T) {
db
,
cleanupFunc
:=
newTestDB
(
t
)
db
,
cleanupFunc
:=
newTestDB
(
t
)
defer
cleanupFunc
()
defer
cleanupFunc
()
logger
:=
logging
.
New
(
ioutil
.
Discard
,
0
)
simpleString
,
err
:=
db
.
NewStringField
(
"simple-string"
)
simpleString
,
err
:=
db
.
NewStringField
(
"simple-string"
,
logger
)
if
err
!=
nil
{
if
err
!=
nil
{
t
.
Fatal
(
err
)
t
.
Fatal
(
err
)
}
}
...
...
pkg/shed/field_struct.go
View file @
eda8aecd
...
@@ -33,7 +33,7 @@ type StructField struct {
...
@@ -33,7 +33,7 @@ type StructField struct {
// NewStructField returns a new StructField.
// NewStructField returns a new StructField.
// It validates its name and type against the database schema.
// It validates its name and type against the database schema.
func
(
db
*
DB
)
NewStructField
(
name
string
,
logger
logging
.
Logger
)
(
f
StructField
,
err
error
)
{
func
(
db
*
DB
)
NewStructField
(
name
string
)
(
f
StructField
,
err
error
)
{
key
,
err
:=
db
.
schemaFieldKey
(
name
,
"struct-rlp"
)
key
,
err
:=
db
.
schemaFieldKey
(
name
,
"struct-rlp"
)
if
err
!=
nil
{
if
err
!=
nil
{
return
f
,
err
return
f
,
err
...
@@ -41,7 +41,7 @@ func (db *DB) NewStructField(name string, logger logging.Logger) (f StructField,
...
@@ -41,7 +41,7 @@ func (db *DB) NewStructField(name string, logger logging.Logger) (f StructField,
return
StructField
{
return
StructField
{
db
:
db
,
db
:
db
,
key
:
key
,
key
:
key
,
logger
:
logger
,
logger
:
db
.
logger
,
},
nil
},
nil
}
}
...
...
pkg/shed/field_struct_test.go
View file @
eda8aecd
...
@@ -17,10 +17,7 @@
...
@@ -17,10 +17,7 @@
package
shed
package
shed
import
(
import
(
"io/ioutil"
"testing"
"testing"
"github.com/ethersphere/bee/pkg/logging"
)
)
// TestStructField validates put and get operations
// TestStructField validates put and get operations
...
@@ -28,8 +25,8 @@ import (
...
@@ -28,8 +25,8 @@ import (
func
TestStructField
(
t
*
testing
.
T
)
{
func
TestStructField
(
t
*
testing
.
T
)
{
db
,
cleanupFunc
:=
newTestDB
(
t
)
db
,
cleanupFunc
:=
newTestDB
(
t
)
defer
cleanupFunc
()
defer
cleanupFunc
()
logger
:=
logging
.
New
(
ioutil
.
Discard
,
0
)
complexField
,
err
:=
db
.
NewStructField
(
"complex-field"
,
logger
)
complexField
,
err
:=
db
.
NewStructField
(
"complex-field"
)
if
err
!=
nil
{
if
err
!=
nil
{
t
.
Fatal
(
err
)
t
.
Fatal
(
err
)
}
}
...
...
pkg/shed/field_uint64.go
View file @
eda8aecd
...
@@ -33,7 +33,7 @@ type Uint64Field struct {
...
@@ -33,7 +33,7 @@ type Uint64Field struct {
// NewUint64Field returns a new Uint64Field.
// NewUint64Field returns a new Uint64Field.
// It validates its name and type against the database schema.
// It validates its name and type against the database schema.
func
(
db
*
DB
)
NewUint64Field
(
name
string
,
logger
logging
.
Logger
)
(
f
Uint64Field
,
err
error
)
{
func
(
db
*
DB
)
NewUint64Field
(
name
string
)
(
f
Uint64Field
,
err
error
)
{
key
,
err
:=
db
.
schemaFieldKey
(
name
,
"uint64"
)
key
,
err
:=
db
.
schemaFieldKey
(
name
,
"uint64"
)
if
err
!=
nil
{
if
err
!=
nil
{
return
f
,
err
return
f
,
err
...
@@ -41,7 +41,7 @@ func (db *DB) NewUint64Field(name string, logger logging.Logger) (f Uint64Field,
...
@@ -41,7 +41,7 @@ func (db *DB) NewUint64Field(name string, logger logging.Logger) (f Uint64Field,
return
Uint64Field
{
return
Uint64Field
{
db
:
db
,
db
:
db
,
key
:
key
,
key
:
key
,
logger
:
logger
,
logger
:
db
.
logger
,
},
nil
},
nil
}
}
...
...
pkg/shed/field_uint64_test.go
View file @
eda8aecd
...
@@ -17,10 +17,7 @@
...
@@ -17,10 +17,7 @@
package
shed
package
shed
import
(
import
(
"io/ioutil"
"testing"
"testing"
"github.com/ethersphere/bee/pkg/logging"
)
)
// TestUint64Field validates put and get operations
// TestUint64Field validates put and get operations
...
@@ -28,8 +25,8 @@ import (
...
@@ -28,8 +25,8 @@ import (
func
TestUint64Field
(
t
*
testing
.
T
)
{
func
TestUint64Field
(
t
*
testing
.
T
)
{
db
,
cleanupFunc
:=
newTestDB
(
t
)
db
,
cleanupFunc
:=
newTestDB
(
t
)
defer
cleanupFunc
()
defer
cleanupFunc
()
logger
:=
logging
.
New
(
ioutil
.
Discard
,
0
)
counter
,
err
:=
db
.
NewUint64Field
(
"counter"
,
logger
)
counter
,
err
:=
db
.
NewUint64Field
(
"counter"
)
if
err
!=
nil
{
if
err
!=
nil
{
t
.
Fatal
(
err
)
t
.
Fatal
(
err
)
}
}
...
@@ -121,8 +118,8 @@ func TestUint64Field(t *testing.T) {
...
@@ -121,8 +118,8 @@ func TestUint64Field(t *testing.T) {
func
TestUint64Field_Inc
(
t
*
testing
.
T
)
{
func
TestUint64Field_Inc
(
t
*
testing
.
T
)
{
db
,
cleanupFunc
:=
newTestDB
(
t
)
db
,
cleanupFunc
:=
newTestDB
(
t
)
defer
cleanupFunc
()
defer
cleanupFunc
()
logger
:=
logging
.
New
(
ioutil
.
Discard
,
0
)
counter
,
err
:=
db
.
NewUint64Field
(
"counter"
,
logger
)
counter
,
err
:=
db
.
NewUint64Field
(
"counter"
)
if
err
!=
nil
{
if
err
!=
nil
{
t
.
Fatal
(
err
)
t
.
Fatal
(
err
)
}
}
...
@@ -151,8 +148,8 @@ func TestUint64Field_Inc(t *testing.T) {
...
@@ -151,8 +148,8 @@ func TestUint64Field_Inc(t *testing.T) {
func
TestUint64Field_IncInBatch
(
t
*
testing
.
T
)
{
func
TestUint64Field_IncInBatch
(
t
*
testing
.
T
)
{
db
,
cleanupFunc
:=
newTestDB
(
t
)
db
,
cleanupFunc
:=
newTestDB
(
t
)
defer
cleanupFunc
()
defer
cleanupFunc
()
logger
:=
logging
.
New
(
ioutil
.
Discard
,
0
)
counter
,
err
:=
db
.
NewUint64Field
(
"counter"
,
logger
)
counter
,
err
:=
db
.
NewUint64Field
(
"counter"
)
if
err
!=
nil
{
if
err
!=
nil
{
t
.
Fatal
(
err
)
t
.
Fatal
(
err
)
}
}
...
@@ -205,8 +202,8 @@ func TestUint64Field_IncInBatch(t *testing.T) {
...
@@ -205,8 +202,8 @@ func TestUint64Field_IncInBatch(t *testing.T) {
func
TestUint64Field_Dec
(
t
*
testing
.
T
)
{
func
TestUint64Field_Dec
(
t
*
testing
.
T
)
{
db
,
cleanupFunc
:=
newTestDB
(
t
)
db
,
cleanupFunc
:=
newTestDB
(
t
)
defer
cleanupFunc
()
defer
cleanupFunc
()
logger
:=
logging
.
New
(
ioutil
.
Discard
,
0
)
counter
,
err
:=
db
.
NewUint64Field
(
"counter"
,
logger
)
counter
,
err
:=
db
.
NewUint64Field
(
"counter"
)
if
err
!=
nil
{
if
err
!=
nil
{
t
.
Fatal
(
err
)
t
.
Fatal
(
err
)
}
}
...
@@ -242,8 +239,8 @@ func TestUint64Field_Dec(t *testing.T) {
...
@@ -242,8 +239,8 @@ func TestUint64Field_Dec(t *testing.T) {
func
TestUint64Field_DecInBatch
(
t
*
testing
.
T
)
{
func
TestUint64Field_DecInBatch
(
t
*
testing
.
T
)
{
db
,
cleanupFunc
:=
newTestDB
(
t
)
db
,
cleanupFunc
:=
newTestDB
(
t
)
defer
cleanupFunc
()
defer
cleanupFunc
()
logger
:=
logging
.
New
(
ioutil
.
Discard
,
0
)
counter
,
err
:=
db
.
NewUint64Field
(
"counter"
,
logger
)
counter
,
err
:=
db
.
NewUint64Field
(
"counter"
)
if
err
!=
nil
{
if
err
!=
nil
{
t
.
Fatal
(
err
)
t
.
Fatal
(
err
)
}
}
...
...
pkg/shed/index.go
View file @
eda8aecd
...
@@ -102,7 +102,7 @@ type IndexFuncs struct {
...
@@ -102,7 +102,7 @@ type IndexFuncs struct {
// NewIndex returns a new Index instance with defined name and
// NewIndex returns a new Index instance with defined name and
// encoding functions. The name must be unique and will be validated
// encoding functions. The name must be unique and will be validated
// on database schema for a key prefix byte.
// on database schema for a key prefix byte.
func
(
db
*
DB
)
NewIndex
(
name
string
,
funcs
IndexFuncs
,
logger
logging
.
Logger
)
(
f
Index
,
err
error
)
{
func
(
db
*
DB
)
NewIndex
(
name
string
,
funcs
IndexFuncs
)
(
f
Index
,
err
error
)
{
id
,
err
:=
db
.
schemaIndexPrefix
(
name
)
id
,
err
:=
db
.
schemaIndexPrefix
(
name
)
if
err
!=
nil
{
if
err
!=
nil
{
return
f
,
err
return
f
,
err
...
@@ -110,7 +110,7 @@ func (db *DB) NewIndex(name string, funcs IndexFuncs, logger logging.Logger) (f
...
@@ -110,7 +110,7 @@ func (db *DB) NewIndex(name string, funcs IndexFuncs, logger logging.Logger) (f
prefix
:=
[]
byte
{
id
}
prefix
:=
[]
byte
{
id
}
return
Index
{
return
Index
{
db
:
db
,
db
:
db
,
logger
:
logger
,
logger
:
db
.
logger
,
prefix
:
prefix
,
prefix
:
prefix
,
// This function adjusts Index LevelDB key
// This function adjusts Index LevelDB key
// by appending the provided index id byte.
// by appending the provided index id byte.
...
...
pkg/shed/index_test.go
View file @
eda8aecd
...
@@ -20,12 +20,9 @@ import (
...
@@ -20,12 +20,9 @@ import (
"bytes"
"bytes"
"encoding/binary"
"encoding/binary"
"fmt"
"fmt"
"io/ioutil"
"sort"
"sort"
"testing"
"testing"
"time"
"time"
"github.com/ethersphere/bee/pkg/logging"
)
)
// Index functions for the index that is used in tests in this file.
// Index functions for the index that is used in tests in this file.
...
@@ -54,8 +51,8 @@ var retrievalIndexFuncs = IndexFuncs{
...
@@ -54,8 +51,8 @@ var retrievalIndexFuncs = IndexFuncs{
func
TestIndex
(
t
*
testing
.
T
)
{
func
TestIndex
(
t
*
testing
.
T
)
{
db
,
cleanupFunc
:=
newTestDB
(
t
)
db
,
cleanupFunc
:=
newTestDB
(
t
)
defer
cleanupFunc
()
defer
cleanupFunc
()
logger
:=
logging
.
New
(
ioutil
.
Discard
,
0
)
index
,
err
:=
db
.
NewIndex
(
"retrieval"
,
retrievalIndexFuncs
,
logger
)
index
,
err
:=
db
.
NewIndex
(
"retrieval"
,
retrievalIndexFuncs
)
if
err
!=
nil
{
if
err
!=
nil
{
t
.
Fatal
(
err
)
t
.
Fatal
(
err
)
}
}
...
@@ -368,8 +365,8 @@ func TestIndex(t *testing.T) {
...
@@ -368,8 +365,8 @@ func TestIndex(t *testing.T) {
func
TestIndex_Iterate
(
t
*
testing
.
T
)
{
func
TestIndex_Iterate
(
t
*
testing
.
T
)
{
db
,
cleanupFunc
:=
newTestDB
(
t
)
db
,
cleanupFunc
:=
newTestDB
(
t
)
defer
cleanupFunc
()
defer
cleanupFunc
()
logger
:=
logging
.
New
(
ioutil
.
Discard
,
0
)
index
,
err
:=
db
.
NewIndex
(
"retrieval"
,
retrievalIndexFuncs
,
logger
)
index
,
err
:=
db
.
NewIndex
(
"retrieval"
,
retrievalIndexFuncs
)
if
err
!=
nil
{
if
err
!=
nil
{
t
.
Fatal
(
err
)
t
.
Fatal
(
err
)
}
}
...
@@ -503,8 +500,7 @@ func TestIndex_Iterate(t *testing.T) {
...
@@ -503,8 +500,7 @@ func TestIndex_Iterate(t *testing.T) {
})
})
t
.
Run
(
"no overflow"
,
func
(
t
*
testing
.
T
)
{
t
.
Run
(
"no overflow"
,
func
(
t
*
testing
.
T
)
{
logger
:=
logging
.
New
(
ioutil
.
Discard
,
0
)
secondIndex
,
err
:=
db
.
NewIndex
(
"second-index"
,
retrievalIndexFuncs
)
secondIndex
,
err
:=
db
.
NewIndex
(
"second-index"
,
retrievalIndexFuncs
,
logger
)
if
err
!=
nil
{
if
err
!=
nil
{
t
.
Fatal
(
err
)
t
.
Fatal
(
err
)
}
}
...
@@ -552,8 +548,8 @@ func TestIndex_Iterate(t *testing.T) {
...
@@ -552,8 +548,8 @@ func TestIndex_Iterate(t *testing.T) {
func
TestIndex_Iterate_withPrefix
(
t
*
testing
.
T
)
{
func
TestIndex_Iterate_withPrefix
(
t
*
testing
.
T
)
{
db
,
cleanupFunc
:=
newTestDB
(
t
)
db
,
cleanupFunc
:=
newTestDB
(
t
)
defer
cleanupFunc
()
defer
cleanupFunc
()
logger
:=
logging
.
New
(
ioutil
.
Discard
,
0
)
index
,
err
:=
db
.
NewIndex
(
"retrieval"
,
retrievalIndexFuncs
,
logger
)
index
,
err
:=
db
.
NewIndex
(
"retrieval"
,
retrievalIndexFuncs
)
if
err
!=
nil
{
if
err
!=
nil
{
t
.
Fatal
(
err
)
t
.
Fatal
(
err
)
}
}
...
@@ -699,8 +695,7 @@ func TestIndex_Iterate_withPrefix(t *testing.T) {
...
@@ -699,8 +695,7 @@ func TestIndex_Iterate_withPrefix(t *testing.T) {
})
})
t
.
Run
(
"no overflow"
,
func
(
t
*
testing
.
T
)
{
t
.
Run
(
"no overflow"
,
func
(
t
*
testing
.
T
)
{
logger
:=
logging
.
New
(
ioutil
.
Discard
,
0
)
secondIndex
,
err
:=
db
.
NewIndex
(
"second-index"
,
retrievalIndexFuncs
)
secondIndex
,
err
:=
db
.
NewIndex
(
"second-index"
,
retrievalIndexFuncs
,
logger
)
if
err
!=
nil
{
if
err
!=
nil
{
t
.
Fatal
(
err
)
t
.
Fatal
(
err
)
}
}
...
@@ -740,8 +735,8 @@ func TestIndex_Iterate_withPrefix(t *testing.T) {
...
@@ -740,8 +735,8 @@ func TestIndex_Iterate_withPrefix(t *testing.T) {
func
TestIndex_count
(
t
*
testing
.
T
)
{
func
TestIndex_count
(
t
*
testing
.
T
)
{
db
,
cleanupFunc
:=
newTestDB
(
t
)
db
,
cleanupFunc
:=
newTestDB
(
t
)
defer
cleanupFunc
()
defer
cleanupFunc
()
logger
:=
logging
.
New
(
ioutil
.
Discard
,
0
)
index
,
err
:=
db
.
NewIndex
(
"retrieval"
,
retrievalIndexFuncs
,
logger
)
index
,
err
:=
db
.
NewIndex
(
"retrieval"
,
retrievalIndexFuncs
)
if
err
!=
nil
{
if
err
!=
nil
{
t
.
Fatal
(
err
)
t
.
Fatal
(
err
)
}
}
...
@@ -910,8 +905,8 @@ func checkItem(t *testing.T, got, want Item) {
...
@@ -910,8 +905,8 @@ func checkItem(t *testing.T, got, want Item) {
func
TestIndex_firstAndLast
(
t
*
testing
.
T
)
{
func
TestIndex_firstAndLast
(
t
*
testing
.
T
)
{
db
,
cleanupFunc
:=
newTestDB
(
t
)
db
,
cleanupFunc
:=
newTestDB
(
t
)
defer
cleanupFunc
()
defer
cleanupFunc
()
logger
:=
logging
.
New
(
ioutil
.
Discard
,
0
)
index
,
err
:=
db
.
NewIndex
(
"retrieval"
,
retrievalIndexFuncs
,
logger
)
index
,
err
:=
db
.
NewIndex
(
"retrieval"
,
retrievalIndexFuncs
)
if
err
!=
nil
{
if
err
!=
nil
{
t
.
Fatal
(
err
)
t
.
Fatal
(
err
)
}
}
...
@@ -1059,8 +1054,8 @@ func TestIncByteSlice(t *testing.T) {
...
@@ -1059,8 +1054,8 @@ func TestIncByteSlice(t *testing.T) {
func
TestIndex_HasMulti
(
t
*
testing
.
T
)
{
func
TestIndex_HasMulti
(
t
*
testing
.
T
)
{
db
,
cleanupFunc
:=
newTestDB
(
t
)
db
,
cleanupFunc
:=
newTestDB
(
t
)
defer
cleanupFunc
()
defer
cleanupFunc
()
logger
:=
logging
.
New
(
ioutil
.
Discard
,
0
)
index
,
err
:=
db
.
NewIndex
(
"retrieval"
,
retrievalIndexFuncs
,
logger
)
index
,
err
:=
db
.
NewIndex
(
"retrieval"
,
retrievalIndexFuncs
)
if
err
!=
nil
{
if
err
!=
nil
{
t
.
Fatal
(
err
)
t
.
Fatal
(
err
)
}
}
...
...
pkg/shed/vector_uint64.go
View file @
eda8aecd
...
@@ -33,7 +33,7 @@ type Uint64Vector struct {
...
@@ -33,7 +33,7 @@ type Uint64Vector struct {
// NewUint64Vector returns a new Uint64Vector.
// NewUint64Vector returns a new Uint64Vector.
// It validates its name and type against the database schema.
// It validates its name and type against the database schema.
func
(
db
*
DB
)
NewUint64Vector
(
name
string
,
logger
logging
.
Logger
)
(
f
Uint64Vector
,
err
error
)
{
func
(
db
*
DB
)
NewUint64Vector
(
name
string
)
(
f
Uint64Vector
,
err
error
)
{
key
,
err
:=
db
.
schemaFieldKey
(
name
,
"vector-uint64"
)
key
,
err
:=
db
.
schemaFieldKey
(
name
,
"vector-uint64"
)
if
err
!=
nil
{
if
err
!=
nil
{
return
f
,
err
return
f
,
err
...
@@ -41,7 +41,7 @@ func (db *DB) NewUint64Vector(name string, logger logging.Logger) (f Uint64Vecto
...
@@ -41,7 +41,7 @@ func (db *DB) NewUint64Vector(name string, logger logging.Logger) (f Uint64Vecto
return
Uint64Vector
{
return
Uint64Vector
{
db
:
db
,
db
:
db
,
key
:
key
,
key
:
key
,
logger
:
logger
,
logger
:
db
.
logger
,
},
nil
},
nil
}
}
...
...
pkg/shed/vector_uint64_test.go
View file @
eda8aecd
...
@@ -17,10 +17,7 @@
...
@@ -17,10 +17,7 @@
package
shed
package
shed
import
(
import
(
"io/ioutil"
"testing"
"testing"
"github.com/ethersphere/bee/pkg/logging"
)
)
// TestUint64Vector validates put and get operations
// TestUint64Vector validates put and get operations
...
@@ -28,8 +25,8 @@ import (
...
@@ -28,8 +25,8 @@ import (
func
TestUint64Vector
(
t
*
testing
.
T
)
{
func
TestUint64Vector
(
t
*
testing
.
T
)
{
db
,
cleanupFunc
:=
newTestDB
(
t
)
db
,
cleanupFunc
:=
newTestDB
(
t
)
defer
cleanupFunc
()
defer
cleanupFunc
()
logger
:=
logging
.
New
(
ioutil
.
Discard
,
0
)
bins
,
err
:=
db
.
NewUint64Vector
(
"bins"
,
logger
)
bins
,
err
:=
db
.
NewUint64Vector
(
"bins"
)
if
err
!=
nil
{
if
err
!=
nil
{
t
.
Fatal
(
err
)
t
.
Fatal
(
err
)
}
}
...
@@ -125,8 +122,8 @@ func TestUint64Vector(t *testing.T) {
...
@@ -125,8 +122,8 @@ func TestUint64Vector(t *testing.T) {
func
TestUint64Vector_Inc
(
t
*
testing
.
T
)
{
func
TestUint64Vector_Inc
(
t
*
testing
.
T
)
{
db
,
cleanupFunc
:=
newTestDB
(
t
)
db
,
cleanupFunc
:=
newTestDB
(
t
)
defer
cleanupFunc
()
defer
cleanupFunc
()
logger
:=
logging
.
New
(
ioutil
.
Discard
,
0
)
bins
,
err
:=
db
.
NewUint64Vector
(
"bins"
,
logger
)
bins
,
err
:=
db
.
NewUint64Vector
(
"bins"
)
if
err
!=
nil
{
if
err
!=
nil
{
t
.
Fatal
(
err
)
t
.
Fatal
(
err
)
}
}
...
@@ -157,8 +154,8 @@ func TestUint64Vector_Inc(t *testing.T) {
...
@@ -157,8 +154,8 @@ func TestUint64Vector_Inc(t *testing.T) {
func
TestUint64Vector_IncInBatch
(
t
*
testing
.
T
)
{
func
TestUint64Vector_IncInBatch
(
t
*
testing
.
T
)
{
db
,
cleanupFunc
:=
newTestDB
(
t
)
db
,
cleanupFunc
:=
newTestDB
(
t
)
defer
cleanupFunc
()
defer
cleanupFunc
()
logger
:=
logging
.
New
(
ioutil
.
Discard
,
0
)
bins
,
err
:=
db
.
NewUint64Vector
(
"bins"
,
logger
)
bins
,
err
:=
db
.
NewUint64Vector
(
"bins"
)
if
err
!=
nil
{
if
err
!=
nil
{
t
.
Fatal
(
err
)
t
.
Fatal
(
err
)
}
}
...
@@ -213,8 +210,8 @@ func TestUint64Vector_IncInBatch(t *testing.T) {
...
@@ -213,8 +210,8 @@ func TestUint64Vector_IncInBatch(t *testing.T) {
func
TestUint64Vector_Dec
(
t
*
testing
.
T
)
{
func
TestUint64Vector_Dec
(
t
*
testing
.
T
)
{
db
,
cleanupFunc
:=
newTestDB
(
t
)
db
,
cleanupFunc
:=
newTestDB
(
t
)
defer
cleanupFunc
()
defer
cleanupFunc
()
logger
:=
logging
.
New
(
ioutil
.
Discard
,
0
)
bins
,
err
:=
db
.
NewUint64Vector
(
"bins"
,
logger
)
bins
,
err
:=
db
.
NewUint64Vector
(
"bins"
)
if
err
!=
nil
{
if
err
!=
nil
{
t
.
Fatal
(
err
)
t
.
Fatal
(
err
)
}
}
...
@@ -252,8 +249,8 @@ func TestUint64Vector_Dec(t *testing.T) {
...
@@ -252,8 +249,8 @@ func TestUint64Vector_Dec(t *testing.T) {
func
TestUint64Vector_DecInBatch
(
t
*
testing
.
T
)
{
func
TestUint64Vector_DecInBatch
(
t
*
testing
.
T
)
{
db
,
cleanupFunc
:=
newTestDB
(
t
)
db
,
cleanupFunc
:=
newTestDB
(
t
)
defer
cleanupFunc
()
defer
cleanupFunc
()
logger
:=
logging
.
New
(
ioutil
.
Discard
,
0
)
bins
,
err
:=
db
.
NewUint64Vector
(
"bins"
,
logger
)
bins
,
err
:=
db
.
NewUint64Vector
(
"bins"
)
if
err
!=
nil
{
if
err
!=
nil
{
t
.
Fatal
(
err
)
t
.
Fatal
(
err
)
}
}
...
...
pkg/storage/localstore/doc.go
View file @
eda8aecd
...
@@ -16,8 +16,7 @@
...
@@ -16,8 +16,7 @@
/*
/*
Package localstore provides disk storage layer for Swarm Chunk persistence.
Package localstore provides disk storage layer for Swarm Chunk persistence.
It uses swarm/shed abstractions on top of github.com/syndtr/goleveldb LevelDB
It uses swarm/shed abstractions.
implementation.
The main type is DB which manages the storage by providing methods to
The main type is DB which manages the storage by providing methods to
access and add Chunks and to manage their status.
access and add Chunks and to manage their status.
...
...
pkg/storage/localstore/export.go
View file @
eda8aecd
...
@@ -25,9 +25,9 @@ import (
...
@@ -25,9 +25,9 @@ import (
"io/ioutil"
"io/ioutil"
"sync"
"sync"
"github.com/ethersphere/bee/pkg/shed"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/swarm/shed"
)
)
const
(
const
(
...
...
pkg/storage/localstore/gc.go
View file @
eda8aecd
...
@@ -17,10 +17,11 @@
...
@@ -17,10 +17,11 @@
package
localstore
package
localstore
import
(
import
(
"errors"
"time"
"time"
"github.com/
ethersphere/swarm/shed
"
"github.com/
dgraph-io/badger/v2
"
"github.com/
syndtr/goleveldb/leveldb
"
"github.com/
ethersphere/bee/pkg/shed
"
)
)
var
(
var
(
...
@@ -34,7 +35,7 @@ var (
...
@@ -34,7 +35,7 @@ var (
// garbage collection runs.
// garbage collection runs.
gcTargetRatio
=
0.9
gcTargetRatio
=
0.9
// gcBatchSize limits the number of chunks in a single
// gcBatchSize limits the number of chunks in a single
//
leveldb batch
on garbage collection.
//
badger transaction
on garbage collection.
gcBatchSize
uint64
=
200
gcBatchSize
uint64
=
200
)
)
...
@@ -84,7 +85,7 @@ func (db *DB) collectGarbage() (collectedCount uint64, done bool, err error) {
...
@@ -84,7 +85,7 @@ func (db *DB) collectGarbage() (collectedCount uint64, done bool, err error) {
}
}
}()
}()
batch
:=
new
(
leveldb
.
Batch
)
batch
:=
db
.
shed
.
GetBatch
(
true
)
target
:=
db
.
gcTarget
()
target
:=
db
.
gcTarget
()
// protect database from changing idexes and gcSize
// protect database from changing idexes and gcSize
...
@@ -145,7 +146,9 @@ func (db *DB) collectGarbage() (collectedCount uint64, done bool, err error) {
...
@@ -145,7 +146,9 @@ func (db *DB) collectGarbage() (collectedCount uint64, done bool, err error) {
}
}
db
.
metrics
.
GCCollectedCounter
.
Inc
()
db
.
metrics
.
GCCollectedCounter
.
Inc
()
db
.
gcSize
.
PutInBatch
(
batch
,
gcSize
-
collectedCount
)
if
err
:=
db
.
gcSize
.
PutInBatch
(
batch
,
gcSize
-
collectedCount
);
err
!=
nil
{
return
0
,
false
,
err
}
err
=
db
.
shed
.
WriteBatch
(
batch
)
err
=
db
.
shed
.
WriteBatch
(
batch
)
if
err
!=
nil
{
if
err
!=
nil
{
...
@@ -165,7 +168,7 @@ func (db *DB) removeChunksInExcludeIndexFromGC() (err error) {
...
@@ -165,7 +168,7 @@ func (db *DB) removeChunksInExcludeIndexFromGC() (err error) {
}
}
}()
}()
batch
:=
new
(
leveldb
.
Batch
)
batch
:=
db
.
shed
.
GetBatch
(
true
)
excludedCount
:=
0
excludedCount
:=
0
var
gcSizeChange
int64
var
gcSizeChange
int64
err
=
db
.
gcExcludeIndex
.
Iterate
(
func
(
item
shed
.
Item
)
(
stop
bool
,
err
error
)
{
err
=
db
.
gcExcludeIndex
.
Iterate
(
func
(
item
shed
.
Item
)
(
stop
bool
,
err
error
)
{
...
@@ -244,12 +247,12 @@ func (db *DB) triggerGarbageCollection() {
...
@@ -244,12 +247,12 @@ func (db *DB) triggerGarbageCollection() {
// incGCSizeInBatch changes gcSize field value
// incGCSizeInBatch changes gcSize field value
// by change which can be negative. This function
// by change which can be negative. This function
// must be called under batchMu lock.
// must be called under batchMu lock.
func
(
db
*
DB
)
incGCSizeInBatch
(
batch
*
leveldb
.
Batch
,
change
int64
)
(
err
error
)
{
func
(
db
*
DB
)
incGCSizeInBatch
(
batch
*
badger
.
Txn
,
change
int64
)
(
err
error
)
{
if
change
==
0
{
if
change
==
0
{
return
nil
return
nil
}
}
gcSize
,
err
:=
db
.
gcSize
.
Get
()
gcSize
,
err
:=
db
.
gcSize
.
Get
()
if
err
!=
nil
{
if
err
!=
nil
&&
!
errors
.
Is
(
err
,
shed
.
ErrNotFound
)
{
return
err
return
err
}
}
...
@@ -266,7 +269,9 @@ func (db *DB) incGCSizeInBatch(batch *leveldb.Batch, change int64) (err error) {
...
@@ -266,7 +269,9 @@ func (db *DB) incGCSizeInBatch(batch *leveldb.Batch, change int64) (err error) {
}
}
new
=
gcSize
-
c
new
=
gcSize
-
c
}
}
db
.
gcSize
.
PutInBatch
(
batch
,
new
)
if
err
:=
db
.
gcSize
.
PutInBatch
(
batch
,
new
);
err
!=
nil
{
return
err
}
// trigger garbage collection if we reached the capacity
// trigger garbage collection if we reached the capacity
if
new
>=
db
.
capacity
{
if
new
>=
db
.
capacity
{
...
...
pkg/storage/localstore/gc_test.go
View file @
eda8aecd
...
@@ -26,9 +26,9 @@ import (
...
@@ -26,9 +26,9 @@ import (
"time"
"time"
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/shed"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/swarm/shed"
)
)
// TestDB_collectGarbageWorker tests garbage collection runs
// TestDB_collectGarbageWorker tests garbage collection runs
...
...
pkg/storage/localstore/localstore.go
View file @
eda8aecd
...
@@ -25,10 +25,10 @@ import (
...
@@ -25,10 +25,10 @@ import (
"time"
"time"
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/shed"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/bee/pkg/tags"
"github.com/ethersphere/bee/pkg/tags"
"github.com/ethersphere/swarm/shed"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus"
)
)
...
@@ -120,7 +120,7 @@ type DB struct {
...
@@ -120,7 +120,7 @@ type DB struct {
putToGCCheck
func
([]
byte
)
bool
putToGCCheck
func
([]
byte
)
bool
// wait for all subscriptions to finish before closing
// wait for all subscriptions to finish before closing
// underlaying
Level
DB to prevent possible panics from
// underlaying
Badger
DB to prevent possible panics from
// iterators
// iterators
subscritionsWG
sync
.
WaitGroup
subscritionsWG
sync
.
WaitGroup
...
@@ -180,7 +180,7 @@ func New(path string, baseKey []byte, o *Options, logger logging.Logger) (db *DB
...
@@ -180,7 +180,7 @@ func New(path string, baseKey []byte, o *Options, logger logging.Logger) (db *DB
db
.
updateGCSem
=
make
(
chan
struct
{},
maxParallelUpdateGC
)
db
.
updateGCSem
=
make
(
chan
struct
{},
maxParallelUpdateGC
)
}
}
db
.
shed
,
err
=
shed
.
NewDB
(
path
,
o
.
MetricsPrefix
)
db
.
shed
,
err
=
shed
.
NewDB
(
path
,
logger
)
if
err
!=
nil
{
if
err
!=
nil
{
return
nil
,
err
return
nil
,
err
}
}
...
@@ -191,7 +191,7 @@ func New(path string, baseKey []byte, o *Options, logger logging.Logger) (db *DB
...
@@ -191,7 +191,7 @@ func New(path string, baseKey []byte, o *Options, logger logging.Logger) (db *DB
return
nil
,
err
return
nil
,
err
}
}
schemaName
,
err
:=
db
.
schemaName
.
Get
()
schemaName
,
err
:=
db
.
schemaName
.
Get
()
if
err
!=
nil
{
if
err
!=
nil
&&
!
errors
.
Is
(
err
,
shed
.
ErrNotFound
)
{
return
nil
,
err
return
nil
,
err
}
}
if
schemaName
==
""
{
if
schemaName
==
""
{
...
...
pkg/storage/localstore/localstore_test.go
View file @
eda8aecd
...
@@ -30,11 +30,10 @@ import (
...
@@ -30,11 +30,10 @@ import (
"time"
"time"
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/shed"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/storage"
chunktesting
"github.com/ethersphere/bee/pkg/storage/testing"
chunktesting
"github.com/ethersphere/bee/pkg/storage/testing"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/swarm/shed"
"github.com/syndtr/goleveldb/leveldb"
)
)
func
init
()
{
func
init
()
{
...
@@ -253,7 +252,7 @@ func newRetrieveIndexesTest(db *DB, chunk swarm.Chunk, storeTimestamp, accessTim
...
@@ -253,7 +252,7 @@ func newRetrieveIndexesTest(db *DB, chunk swarm.Chunk, storeTimestamp, accessTim
validateItem
(
t
,
item
,
chunk
.
Address
()
.
Bytes
(),
chunk
.
Data
(),
storeTimestamp
,
0
)
validateItem
(
t
,
item
,
chunk
.
Address
()
.
Bytes
(),
chunk
.
Data
(),
storeTimestamp
,
0
)
// access index should not be set
// access index should not be set
wantErr
:=
leveldb
.
ErrNotFound
wantErr
:=
shed
.
ErrNotFound
_
,
err
=
db
.
retrievalAccessIndex
.
Get
(
addressToItem
(
chunk
.
Address
()))
_
,
err
=
db
.
retrievalAccessIndex
.
Get
(
addressToItem
(
chunk
.
Address
()))
if
err
!=
wantErr
{
if
err
!=
wantErr
{
t
.
Errorf
(
"got error %v, want %v"
,
err
,
wantErr
)
t
.
Errorf
(
"got error %v, want %v"
,
err
,
wantErr
)
...
...
pkg/storage/localstore/mode_get.go
View file @
eda8aecd
...
@@ -20,10 +20,9 @@ import (
...
@@ -20,10 +20,9 @@ import (
"context"
"context"
"time"
"time"
"github.com/ethersphere/bee/pkg/shed"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/swarm/shed"
"github.com/syndtr/goleveldb/leveldb"
)
)
// Get returns a chunk from the database. If the chunk is
// Get returns a chunk from the database. If the chunk is
...
@@ -43,7 +42,7 @@ func (db *DB) Get(ctx context.Context, mode storage.ModeGet, addr swarm.Address)
...
@@ -43,7 +42,7 @@ func (db *DB) Get(ctx context.Context, mode storage.ModeGet, addr swarm.Address)
out
,
err
:=
db
.
get
(
mode
,
addr
)
out
,
err
:=
db
.
get
(
mode
,
addr
)
if
err
!=
nil
{
if
err
!=
nil
{
if
err
==
leveldb
.
ErrNotFound
{
if
err
==
shed
.
ErrNotFound
{
return
nil
,
storage
.
ErrNotFound
return
nil
,
storage
.
ErrNotFound
}
}
return
nil
,
err
return
nil
,
err
...
@@ -124,7 +123,7 @@ func (db *DB) updateGC(item shed.Item) (err error) {
...
@@ -124,7 +123,7 @@ func (db *DB) updateGC(item shed.Item) (err error) {
db
.
batchMu
.
Lock
()
db
.
batchMu
.
Lock
()
defer
db
.
batchMu
.
Unlock
()
defer
db
.
batchMu
.
Unlock
()
batch
:=
new
(
leveldb
.
Batch
)
batch
:=
db
.
shed
.
GetBatch
(
true
)
// update accessTimeStamp in retrieve, gc
// update accessTimeStamp in retrieve, gc
...
@@ -132,7 +131,7 @@ func (db *DB) updateGC(item shed.Item) (err error) {
...
@@ -132,7 +131,7 @@ func (db *DB) updateGC(item shed.Item) (err error) {
switch
err
{
switch
err
{
case
nil
:
case
nil
:
item
.
AccessTimestamp
=
i
.
AccessTimestamp
item
.
AccessTimestamp
=
i
.
AccessTimestamp
case
leveldb
.
ErrNotFound
:
case
shed
.
ErrNotFound
:
// no chunk accesses
// no chunk accesses
default
:
default
:
return
err
return
err
...
...
pkg/storage/localstore/mode_get_multi.go
View file @
eda8aecd
...
@@ -20,10 +20,9 @@ import (
...
@@ -20,10 +20,9 @@ import (
"context"
"context"
"time"
"time"
"github.com/ethersphere/bee/pkg/shed"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/swarm/shed"
"github.com/syndtr/goleveldb/leveldb"
)
)
// GetMulti returns chunks from the database. If one of the chunks is not found
// GetMulti returns chunks from the database. If one of the chunks is not found
...
@@ -42,7 +41,7 @@ func (db *DB) GetMulti(ctx context.Context, mode storage.ModeGet, addrs ...swarm
...
@@ -42,7 +41,7 @@ func (db *DB) GetMulti(ctx context.Context, mode storage.ModeGet, addrs ...swarm
out
,
err
:=
db
.
getMulti
(
mode
,
addrs
...
)
out
,
err
:=
db
.
getMulti
(
mode
,
addrs
...
)
if
err
!=
nil
{
if
err
!=
nil
{
if
err
==
leveldb
.
ErrNotFound
{
if
err
==
shed
.
ErrNotFound
{
return
nil
,
storage
.
ErrNotFound
return
nil
,
storage
.
ErrNotFound
}
}
return
nil
,
err
return
nil
,
err
...
...
pkg/storage/localstore/mode_put.go
View file @
eda8aecd
...
@@ -20,10 +20,10 @@ import (
...
@@ -20,10 +20,10 @@ import (
"context"
"context"
"time"
"time"
"github.com/dgraph-io/badger/v2"
"github.com/ethersphere/bee/pkg/shed"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/swarm/shed"
"github.com/syndtr/goleveldb/leveldb"
)
)
// Put stores Chunks to database and depending
// Put stores Chunks to database and depending
...
@@ -55,7 +55,7 @@ func (db *DB) put(mode storage.ModePut, chs ...swarm.Chunk) (exist []bool, err e
...
@@ -55,7 +55,7 @@ func (db *DB) put(mode storage.ModePut, chs ...swarm.Chunk) (exist []bool, err e
db
.
batchMu
.
Lock
()
db
.
batchMu
.
Lock
()
defer
db
.
batchMu
.
Unlock
()
defer
db
.
batchMu
.
Unlock
()
batch
:=
new
(
leveldb
.
Batch
)
batch
:=
db
.
shed
.
GetBatch
(
true
)
// variables that provide information for operations
// variables that provide information for operations
// to be done after write batch function successfully executes
// to be done after write batch function successfully executes
...
@@ -130,7 +130,9 @@ func (db *DB) put(mode storage.ModePut, chs ...swarm.Chunk) (exist []bool, err e
...
@@ -130,7 +130,9 @@ func (db *DB) put(mode storage.ModePut, chs ...swarm.Chunk) (exist []bool, err e
}
}
for
po
,
id
:=
range
binIDs
{
for
po
,
id
:=
range
binIDs
{
db
.
binIDs
.
PutInBatch
(
batch
,
uint64
(
po
),
id
)
if
err
:=
db
.
binIDs
.
PutInBatch
(
batch
,
uint64
(
po
),
id
);
err
!=
nil
{
return
nil
,
err
}
}
}
err
=
db
.
incGCSizeInBatch
(
batch
,
gcSizeChange
)
err
=
db
.
incGCSizeInBatch
(
batch
,
gcSizeChange
)
...
@@ -157,14 +159,14 @@ func (db *DB) put(mode storage.ModePut, chs ...swarm.Chunk) (exist []bool, err e
...
@@ -157,14 +159,14 @@ func (db *DB) put(mode storage.ModePut, chs ...swarm.Chunk) (exist []bool, err e
// - it does not enter the syncpool
// - it does not enter the syncpool
// The batch can be written to the database.
// The batch can be written to the database.
// Provided batch and binID map are updated.
// Provided batch and binID map are updated.
func
(
db
*
DB
)
putRequest
(
batch
*
leveldb
.
Batch
,
binIDs
map
[
uint8
]
uint64
,
item
shed
.
Item
)
(
exists
bool
,
gcSizeChange
int64
,
err
error
)
{
func
(
db
*
DB
)
putRequest
(
batch
*
badger
.
Txn
,
binIDs
map
[
uint8
]
uint64
,
item
shed
.
Item
)
(
exists
bool
,
gcSizeChange
int64
,
err
error
)
{
i
,
err
:=
db
.
retrievalDataIndex
.
Get
(
item
)
i
,
err
:=
db
.
retrievalDataIndex
.
Get
(
item
)
switch
err
{
switch
err
{
case
nil
:
case
nil
:
exists
=
true
exists
=
true
item
.
StoreTimestamp
=
i
.
StoreTimestamp
item
.
StoreTimestamp
=
i
.
StoreTimestamp
item
.
BinID
=
i
.
BinID
item
.
BinID
=
i
.
BinID
case
leveldb
.
ErrNotFound
:
case
shed
.
ErrNotFound
:
// no chunk accesses
// no chunk accesses
exists
=
false
exists
=
false
default
:
default
:
...
@@ -197,7 +199,7 @@ func (db *DB) putRequest(batch *leveldb.Batch, binIDs map[uint8]uint64, item she
...
@@ -197,7 +199,7 @@ func (db *DB) putRequest(batch *leveldb.Batch, binIDs map[uint8]uint64, item she
// - put to indexes: retrieve, push, pull
// - put to indexes: retrieve, push, pull
// The batch can be written to the database.
// The batch can be written to the database.
// Provided batch and binID map are updated.
// Provided batch and binID map are updated.
func
(
db
*
DB
)
putUpload
(
batch
*
leveldb
.
Batch
,
binIDs
map
[
uint8
]
uint64
,
item
shed
.
Item
)
(
exists
bool
,
gcSizeChange
int64
,
err
error
)
{
func
(
db
*
DB
)
putUpload
(
batch
*
badger
.
Txn
,
binIDs
map
[
uint8
]
uint64
,
item
shed
.
Item
)
(
exists
bool
,
gcSizeChange
int64
,
err
error
)
{
exists
,
err
=
db
.
retrievalDataIndex
.
Has
(
item
)
exists
,
err
=
db
.
retrievalDataIndex
.
Has
(
item
)
if
err
!=
nil
{
if
err
!=
nil
{
return
false
,
0
,
err
return
false
,
0
,
err
...
@@ -259,7 +261,7 @@ func (db *DB) putUpload(batch *leveldb.Batch, binIDs map[uint8]uint64, item shed
...
@@ -259,7 +261,7 @@ func (db *DB) putUpload(batch *leveldb.Batch, binIDs map[uint8]uint64, item shed
// - put to indexes: retrieve, pull
// - put to indexes: retrieve, pull
// The batch can be written to the database.
// The batch can be written to the database.
// Provided batch and binID map are updated.
// Provided batch and binID map are updated.
func
(
db
*
DB
)
putSync
(
batch
*
leveldb
.
Batch
,
binIDs
map
[
uint8
]
uint64
,
item
shed
.
Item
)
(
exists
bool
,
gcSizeChange
int64
,
err
error
)
{
func
(
db
*
DB
)
putSync
(
batch
*
badger
.
Txn
,
binIDs
map
[
uint8
]
uint64
,
item
shed
.
Item
)
(
exists
bool
,
gcSizeChange
int64
,
err
error
)
{
exists
,
err
=
db
.
retrievalDataIndex
.
Has
(
item
)
exists
,
err
=
db
.
retrievalDataIndex
.
Has
(
item
)
if
err
!=
nil
{
if
err
!=
nil
{
return
false
,
0
,
err
return
false
,
0
,
err
...
@@ -309,7 +311,7 @@ func (db *DB) putSync(batch *leveldb.Batch, binIDs map[uint8]uint64, item shed.I
...
@@ -309,7 +311,7 @@ func (db *DB) putSync(batch *leveldb.Batch, binIDs map[uint8]uint64, item shed.I
// a chunk is added to a node's localstore and given that the chunk is
// a chunk is added to a node's localstore and given that the chunk is
// already within that node's NN (thus, it can be added to the gc index
// already within that node's NN (thus, it can be added to the gc index
// safely)
// safely)
func
(
db
*
DB
)
setGC
(
batch
*
leveldb
.
Batch
,
item
shed
.
Item
)
(
gcSizeChange
int64
,
err
error
)
{
func
(
db
*
DB
)
setGC
(
batch
*
badger
.
Txn
,
item
shed
.
Item
)
(
gcSizeChange
int64
,
err
error
)
{
if
item
.
BinID
==
0
{
if
item
.
BinID
==
0
{
i
,
err
:=
db
.
retrievalDataIndex
.
Get
(
item
)
i
,
err
:=
db
.
retrievalDataIndex
.
Get
(
item
)
if
err
!=
nil
{
if
err
!=
nil
{
...
@@ -326,7 +328,7 @@ func (db *DB) setGC(batch *leveldb.Batch, item shed.Item) (gcSizeChange int64, e
...
@@ -326,7 +328,7 @@ func (db *DB) setGC(batch *leveldb.Batch, item shed.Item) (gcSizeChange int64, e
return
0
,
err
return
0
,
err
}
}
gcSizeChange
--
gcSizeChange
--
case
leveldb
.
ErrNotFound
:
case
shed
.
ErrNotFound
:
// the chunk is not accessed before
// the chunk is not accessed before
default
:
default
:
return
0
,
err
return
0
,
err
...
...
pkg/storage/localstore/mode_put_test.go
View file @
eda8aecd
...
@@ -24,9 +24,9 @@ import (
...
@@ -24,9 +24,9 @@ import (
"testing"
"testing"
"time"
"time"
"github.com/ethersphere/bee/pkg/shed"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/syndtr/goleveldb/leveldb"
)
)
// TestModePutRequest validates ModePutRequest index values on the provided DB.
// TestModePutRequest validates ModePutRequest index values on the provided DB.
...
@@ -363,7 +363,7 @@ func TestModePut_addToGc(t *testing.T) {
...
@@ -363,7 +363,7 @@ func TestModePut_addToGc(t *testing.T) {
binIDs
[
po
]
++
binIDs
[
po
]
++
var
wantErr
error
var
wantErr
error
if
!
m
.
putToGc
{
if
!
m
.
putToGc
{
wantErr
=
leveldb
.
ErrNotFound
wantErr
=
shed
.
ErrNotFound
}
}
newRetrieveIndexesTestWithAccess
(
db
,
ch
,
wantTimestamp
,
wantTimestamp
)
newRetrieveIndexesTestWithAccess
(
db
,
ch
,
wantTimestamp
,
wantTimestamp
)
newGCIndexTest
(
db
,
ch
,
wantTimestamp
,
wantTimestamp
,
binIDs
[
po
],
wantErr
)(
t
)
newGCIndexTest
(
db
,
ch
,
wantTimestamp
,
wantTimestamp
,
binIDs
[
po
],
wantErr
)(
t
)
...
@@ -429,7 +429,7 @@ func TestModePut_addToGcExisting(t *testing.T) {
...
@@ -429,7 +429,7 @@ func TestModePut_addToGcExisting(t *testing.T) {
binIDs
[
po
]
++
binIDs
[
po
]
++
var
wantErr
error
var
wantErr
error
if
!
m
.
putToGc
{
if
!
m
.
putToGc
{
wantErr
=
leveldb
.
ErrNotFound
wantErr
=
shed
.
ErrNotFound
}
}
newRetrieveIndexesTestWithAccess
(
db
,
ch
,
wantStoreTimestamp
,
wantAccessTimestamp
)
newRetrieveIndexesTestWithAccess
(
db
,
ch
,
wantStoreTimestamp
,
wantAccessTimestamp
)
...
...
pkg/storage/localstore/mode_set.go
View file @
eda8aecd
...
@@ -21,10 +21,11 @@ import (
...
@@ -21,10 +21,11 @@ import (
"errors"
"errors"
"time"
"time"
"github.com/dgraph-io/badger/v2"
"github.com/ethersphere/bee/pkg/shed"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/bee/pkg/tags"
"github.com/ethersphere/bee/pkg/tags"
"github.com/syndtr/goleveldb/leveldb"
)
)
// Set updates database indexes for
// Set updates database indexes for
...
@@ -50,7 +51,7 @@ func (db *DB) set(mode storage.ModeSet, addrs ...swarm.Address) (err error) {
...
@@ -50,7 +51,7 @@ func (db *DB) set(mode storage.ModeSet, addrs ...swarm.Address) (err error) {
db
.
batchMu
.
Lock
()
db
.
batchMu
.
Lock
()
defer
db
.
batchMu
.
Unlock
()
defer
db
.
batchMu
.
Unlock
()
batch
:=
new
(
leveldb
.
Batch
)
batch
:=
db
.
shed
.
GetBatch
(
true
)
// variables that provide information for operations
// variables that provide information for operations
// to be done after write batch function successfully executes
// to be done after write batch function successfully executes
...
@@ -73,7 +74,9 @@ func (db *DB) set(mode storage.ModeSet, addrs ...swarm.Address) (err error) {
...
@@ -73,7 +74,9 @@ func (db *DB) set(mode storage.ModeSet, addrs ...swarm.Address) (err error) {
triggerPullFeed
[
po
]
=
struct
{}{}
triggerPullFeed
[
po
]
=
struct
{}{}
}
}
for
po
,
id
:=
range
binIDs
{
for
po
,
id
:=
range
binIDs
{
db
.
binIDs
.
PutInBatch
(
batch
,
uint64
(
po
),
id
)
if
err
:=
db
.
binIDs
.
PutInBatch
(
batch
,
uint64
(
po
),
id
);
err
!=
nil
{
return
err
}
}
}
case
storage
.
ModeSetSyncPush
,
storage
.
ModeSetSyncPull
:
case
storage
.
ModeSetSyncPush
,
storage
.
ModeSetSyncPull
:
...
@@ -131,7 +134,7 @@ func (db *DB) set(mode storage.ModeSet, addrs ...swarm.Address) (err error) {
...
@@ -131,7 +134,7 @@ func (db *DB) set(mode storage.ModeSet, addrs ...swarm.Address) (err error) {
// setAccess sets the chunk access time by updating required indexes:
// setAccess sets the chunk access time by updating required indexes:
// - add to pull, insert to gc
// - add to pull, insert to gc
// Provided batch and binID map are updated.
// Provided batch and binID map are updated.
func
(
db
*
DB
)
setAccess
(
batch
*
leveldb
.
Batch
,
binIDs
map
[
uint8
]
uint64
,
addr
swarm
.
Address
,
po
uint8
)
(
gcSizeChange
int64
,
err
error
)
{
func
(
db
*
DB
)
setAccess
(
batch
*
badger
.
Txn
,
binIDs
map
[
uint8
]
uint64
,
addr
swarm
.
Address
,
po
uint8
)
(
gcSizeChange
int64
,
err
error
)
{
item
:=
addressToItem
(
addr
)
item
:=
addressToItem
(
addr
)
...
@@ -143,7 +146,7 @@ func (db *DB) setAccess(batch *leveldb.Batch, binIDs map[uint8]uint64, addr swar
...
@@ -143,7 +146,7 @@ func (db *DB) setAccess(batch *leveldb.Batch, binIDs map[uint8]uint64, addr swar
case
nil
:
case
nil
:
item
.
StoreTimestamp
=
i
.
StoreTimestamp
item
.
StoreTimestamp
=
i
.
StoreTimestamp
item
.
BinID
=
i
.
BinID
item
.
BinID
=
i
.
BinID
case
leveldb
.
ErrNotFound
:
case
shed
.
ErrNotFound
:
err
=
db
.
pushIndex
.
DeleteInBatch
(
batch
,
item
)
err
=
db
.
pushIndex
.
DeleteInBatch
(
batch
,
item
)
if
err
!=
nil
{
if
err
!=
nil
{
return
0
,
err
return
0
,
err
...
@@ -166,7 +169,7 @@ func (db *DB) setAccess(batch *leveldb.Batch, binIDs map[uint8]uint64, addr swar
...
@@ -166,7 +169,7 @@ func (db *DB) setAccess(batch *leveldb.Batch, binIDs map[uint8]uint64, addr swar
return
0
,
err
return
0
,
err
}
}
gcSizeChange
--
gcSizeChange
--
case
leveldb
.
ErrNotFound
:
case
shed
.
ErrNotFound
:
// the chunk is not accessed before
// the chunk is not accessed before
default
:
default
:
return
0
,
err
return
0
,
err
...
@@ -196,7 +199,7 @@ func (db *DB) setAccess(batch *leveldb.Batch, binIDs map[uint8]uint64, addr swar
...
@@ -196,7 +199,7 @@ func (db *DB) setAccess(batch *leveldb.Batch, binIDs map[uint8]uint64, addr swar
// from push sync index
// from push sync index
// - update to gc index happens given item does not exist in pin index
// - update to gc index happens given item does not exist in pin index
// Provided batch is updated.
// Provided batch is updated.
func
(
db
*
DB
)
setSync
(
batch
*
leveldb
.
Batch
,
addr
swarm
.
Address
,
mode
storage
.
ModeSet
)
(
gcSizeChange
int64
,
err
error
)
{
func
(
db
*
DB
)
setSync
(
batch
*
badger
.
Txn
,
addr
swarm
.
Address
,
mode
storage
.
ModeSet
)
(
gcSizeChange
int64
,
err
error
)
{
item
:=
addressToItem
(
addr
)
item
:=
addressToItem
(
addr
)
// need to get access timestamp here as it is not
// need to get access timestamp here as it is not
...
@@ -205,7 +208,7 @@ func (db *DB) setSync(batch *leveldb.Batch, addr swarm.Address, mode storage.Mod
...
@@ -205,7 +208,7 @@ func (db *DB) setSync(batch *leveldb.Batch, addr swarm.Address, mode storage.Mod
i
,
err
:=
db
.
retrievalDataIndex
.
Get
(
item
)
i
,
err
:=
db
.
retrievalDataIndex
.
Get
(
item
)
if
err
!=
nil
{
if
err
!=
nil
{
if
err
==
leveldb
.
ErrNotFound
{
if
err
==
shed
.
ErrNotFound
{
// chunk is not found,
// chunk is not found,
// no need to update gc index
// no need to update gc index
// just delete from the push index
// just delete from the push index
...
@@ -228,7 +231,7 @@ func (db *DB) setSync(batch *leveldb.Batch, addr swarm.Address, mode storage.Mod
...
@@ -228,7 +231,7 @@ func (db *DB) setSync(batch *leveldb.Batch, addr swarm.Address, mode storage.Mod
// this prevents duplicate increments
// this prevents duplicate increments
i
,
err
:=
db
.
pullIndex
.
Get
(
item
)
i
,
err
:=
db
.
pullIndex
.
Get
(
item
)
if
err
!=
nil
{
if
err
!=
nil
{
if
err
==
leveldb
.
ErrNotFound
{
if
err
==
shed
.
ErrNotFound
{
// we handle this error internally, since this is an internal inconsistency of the indices
// we handle this error internally, since this is an internal inconsistency of the indices
// if we return the error here - it means that for example, in stream protocol peers which we sync
// if we return the error here - it means that for example, in stream protocol peers which we sync
// to would be dropped. this is possible when the chunk is put with ModePutRequest and ModeSetSyncPull is
// to would be dropped. this is possible when the chunk is put with ModePutRequest and ModeSetSyncPull is
...
@@ -263,7 +266,7 @@ func (db *DB) setSync(batch *leveldb.Batch, addr swarm.Address, mode storage.Mod
...
@@ -263,7 +266,7 @@ func (db *DB) setSync(batch *leveldb.Batch, addr swarm.Address, mode storage.Mod
case
storage
.
ModeSetSyncPush
:
case
storage
.
ModeSetSyncPush
:
i
,
err
:=
db
.
pushIndex
.
Get
(
item
)
i
,
err
:=
db
.
pushIndex
.
Get
(
item
)
if
err
!=
nil
{
if
err
!=
nil
{
if
err
==
leveldb
.
ErrNotFound
{
if
err
==
shed
.
ErrNotFound
{
// we handle this error internally, since this is an internal inconsistency of the indices
// we handle this error internally, since this is an internal inconsistency of the indices
// this error can happen if the chunk is put with ModePutRequest or ModePutSync
// this error can happen if the chunk is put with ModePutRequest or ModePutSync
// but this function is called with ModeSetSyncPush
// but this function is called with ModeSetSyncPush
...
@@ -303,7 +306,7 @@ func (db *DB) setSync(batch *leveldb.Batch, addr swarm.Address, mode storage.Mod
...
@@ -303,7 +306,7 @@ func (db *DB) setSync(batch *leveldb.Batch, addr swarm.Address, mode storage.Mod
return
0
,
err
return
0
,
err
}
}
gcSizeChange
--
gcSizeChange
--
case
leveldb
.
ErrNotFound
:
case
shed
.
ErrNotFound
:
// the chunk is not accessed before
// the chunk is not accessed before
default
:
default
:
return
0
,
err
return
0
,
err
...
@@ -333,7 +336,7 @@ func (db *DB) setSync(batch *leveldb.Batch, addr swarm.Address, mode storage.Mod
...
@@ -333,7 +336,7 @@ func (db *DB) setSync(batch *leveldb.Batch, addr swarm.Address, mode storage.Mod
// setRemove removes the chunk by updating indexes:
// setRemove removes the chunk by updating indexes:
// - delete from retrieve, pull, gc
// - delete from retrieve, pull, gc
// Provided batch is updated.
// Provided batch is updated.
func
(
db
*
DB
)
setRemove
(
batch
*
leveldb
.
Batch
,
addr
swarm
.
Address
)
(
gcSizeChange
int64
,
err
error
)
{
func
(
db
*
DB
)
setRemove
(
batch
*
badger
.
Txn
,
addr
swarm
.
Address
)
(
gcSizeChange
int64
,
err
error
)
{
item
:=
addressToItem
(
addr
)
item
:=
addressToItem
(
addr
)
// need to get access timestamp here as it is not
// need to get access timestamp here as it is not
...
@@ -343,7 +346,7 @@ func (db *DB) setRemove(batch *leveldb.Batch, addr swarm.Address) (gcSizeChange
...
@@ -343,7 +346,7 @@ func (db *DB) setRemove(batch *leveldb.Batch, addr swarm.Address) (gcSizeChange
switch
err
{
switch
err
{
case
nil
:
case
nil
:
item
.
AccessTimestamp
=
i
.
AccessTimestamp
item
.
AccessTimestamp
=
i
.
AccessTimestamp
case
leveldb
.
ErrNotFound
:
case
shed
.
ErrNotFound
:
default
:
default
:
return
0
,
err
return
0
,
err
}
}
...
@@ -383,14 +386,14 @@ func (db *DB) setRemove(batch *leveldb.Batch, addr swarm.Address) (gcSizeChange
...
@@ -383,14 +386,14 @@ func (db *DB) setRemove(batch *leveldb.Batch, addr swarm.Address) (gcSizeChange
// setPin increments pin counter for the chunk by updating
// setPin increments pin counter for the chunk by updating
// pin index and sets the chunk to be excluded from garbage collection.
// pin index and sets the chunk to be excluded from garbage collection.
// Provided batch is updated.
// Provided batch is updated.
func
(
db
*
DB
)
setPin
(
batch
*
leveldb
.
Batch
,
addr
swarm
.
Address
)
(
err
error
)
{
func
(
db
*
DB
)
setPin
(
batch
*
badger
.
Txn
,
addr
swarm
.
Address
)
(
err
error
)
{
item
:=
addressToItem
(
addr
)
item
:=
addressToItem
(
addr
)
// Get the existing pin counter of the chunk
// Get the existing pin counter of the chunk
existingPinCounter
:=
uint64
(
0
)
existingPinCounter
:=
uint64
(
0
)
pinnedChunk
,
err
:=
db
.
pinIndex
.
Get
(
item
)
pinnedChunk
,
err
:=
db
.
pinIndex
.
Get
(
item
)
if
err
!=
nil
{
if
err
!=
nil
{
if
err
==
leveldb
.
ErrNotFound
{
if
err
==
shed
.
ErrNotFound
{
// If this Address is not present in DB, then its a new entry
// If this Address is not present in DB, then its a new entry
existingPinCounter
=
0
existingPinCounter
=
0
...
@@ -418,7 +421,7 @@ func (db *DB) setPin(batch *leveldb.Batch, addr swarm.Address) (err error) {
...
@@ -418,7 +421,7 @@ func (db *DB) setPin(batch *leveldb.Batch, addr swarm.Address) (err error) {
// setUnpin decrements pin counter for the chunk by updating pin index.
// setUnpin decrements pin counter for the chunk by updating pin index.
// Provided batch is updated.
// Provided batch is updated.
func
(
db
*
DB
)
setUnpin
(
batch
*
leveldb
.
Batch
,
addr
swarm
.
Address
)
(
err
error
)
{
func
(
db
*
DB
)
setUnpin
(
batch
*
badger
.
Txn
,
addr
swarm
.
Address
)
(
err
error
)
{
item
:=
addressToItem
(
addr
)
item
:=
addressToItem
(
addr
)
// Get the existing pin counter of the chunk
// Get the existing pin counter of the chunk
...
...
pkg/storage/localstore/mode_set_test.go
View file @
eda8aecd
...
@@ -21,11 +21,10 @@ import (
...
@@ -21,11 +21,10 @@ import (
"testing"
"testing"
"time"
"time"
"github.com/ethersphere/bee/pkg/shed"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/tags"
"github.com/ethersphere/bee/pkg/tags"
tagtesting
"github.com/ethersphere/bee/pkg/tags/testing"
tagtesting
"github.com/ethersphere/bee/pkg/tags/testing"
"github.com/ethersphere/swarm/shed"
"github.com/syndtr/goleveldb/leveldb"
)
)
// TestModeSetAccess validates ModeSetAccess index values on the provided DB.
// TestModeSetAccess validates ModeSetAccess index values on the provided DB.
...
@@ -334,7 +333,7 @@ func TestModeSetRemove(t *testing.T) {
...
@@ -334,7 +333,7 @@ func TestModeSetRemove(t *testing.T) {
t
.
Run
(
"retrieve indexes"
,
func
(
t
*
testing
.
T
)
{
t
.
Run
(
"retrieve indexes"
,
func
(
t
*
testing
.
T
)
{
for
_
,
ch
:=
range
chunks
{
for
_
,
ch
:=
range
chunks
{
wantErr
:=
leveldb
.
ErrNotFound
wantErr
:=
shed
.
ErrNotFound
_
,
err
:=
db
.
retrievalDataIndex
.
Get
(
addressToItem
(
ch
.
Address
()))
_
,
err
:=
db
.
retrievalDataIndex
.
Get
(
addressToItem
(
ch
.
Address
()))
if
err
!=
wantErr
{
if
err
!=
wantErr
{
t
.
Errorf
(
"got error %v, want %v"
,
err
,
wantErr
)
t
.
Errorf
(
"got error %v, want %v"
,
err
,
wantErr
)
...
@@ -353,7 +352,7 @@ func TestModeSetRemove(t *testing.T) {
...
@@ -353,7 +352,7 @@ func TestModeSetRemove(t *testing.T) {
})
})
for
_
,
ch
:=
range
chunks
{
for
_
,
ch
:=
range
chunks
{
newPullIndexTest
(
db
,
ch
,
0
,
leveldb
.
ErrNotFound
)(
t
)
newPullIndexTest
(
db
,
ch
,
0
,
shed
.
ErrNotFound
)(
t
)
}
}
t
.
Run
(
"pull index count"
,
newItemsCountTest
(
db
.
pullIndex
,
0
))
t
.
Run
(
"pull index count"
,
newItemsCountTest
(
db
.
pullIndex
,
0
))
...
...
pkg/storage/localstore/subscription_pull.go
View file @
eda8aecd
...
@@ -22,10 +22,9 @@ import (
...
@@ -22,10 +22,9 @@ import (
"sync"
"sync"
"time"
"time"
"github.com/ethersphere/bee/pkg/shed"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/swarm/shed"
"github.com/syndtr/goleveldb/leveldb"
)
)
// SubscribePull returns a channel that provides chunk addresses and stored times from pull syncing index.
// SubscribePull returns a channel that provides chunk addresses and stored times from pull syncing index.
...
@@ -187,7 +186,7 @@ func (db *DB) LastPullSubscriptionBinID(bin uint8) (id uint64, err error) {
...
@@ -187,7 +186,7 @@ func (db *DB) LastPullSubscriptionBinID(bin uint8) (id uint64, err error) {
item
,
err
:=
db
.
pullIndex
.
Last
([]
byte
{
bin
})
item
,
err
:=
db
.
pullIndex
.
Last
([]
byte
{
bin
})
if
err
!=
nil
{
if
err
!=
nil
{
if
err
==
leveldb
.
ErrNotFound
{
if
err
==
shed
.
ErrNotFound
{
return
0
,
nil
return
0
,
nil
}
}
return
0
,
err
return
0
,
err
...
...
pkg/storage/localstore/subscription_pull_test.go
View file @
eda8aecd
...
@@ -23,9 +23,9 @@ import (
...
@@ -23,9 +23,9 @@ import (
"testing"
"testing"
"time"
"time"
"github.com/ethersphere/bee/pkg/shed"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/swarm/shed"
)
)
// TestDB_SubscribePull_first is a regression test for the first=false (from-1) bug
// TestDB_SubscribePull_first is a regression test for the first=false (from-1) bug
...
...
pkg/storage/localstore/subscription_push.go
View file @
eda8aecd
...
@@ -22,8 +22,8 @@ import (
...
@@ -22,8 +22,8 @@ import (
"sync"
"sync"
"time"
"time"
"github.com/ethersphere/bee/pkg/shed"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/swarm/shed"
)
)
// SubscribePush returns a channel that provides storage chunks with ordering from push syncing index.
// SubscribePush returns a channel that provides storage chunks with ordering from push syncing index.
...
...
pkg/storage/localstore/subscription_push_test.go
View file @
eda8aecd
...
@@ -32,6 +32,8 @@ import (
...
@@ -32,6 +32,8 @@ import (
// push syncing subscription is created and validates if
// push syncing subscription is created and validates if
// all addresses are received in the right order.
// all addresses are received in the right order.
func
TestDB_SubscribePush
(
t
*
testing
.
T
)
{
func
TestDB_SubscribePush
(
t
*
testing
.
T
)
{
t
.
Skip
(
"fails with badger shed"
)
db
,
cleanupFunc
:=
newTestDB
(
t
,
nil
)
db
,
cleanupFunc
:=
newTestDB
(
t
,
nil
)
defer
cleanupFunc
()
defer
cleanupFunc
()
...
@@ -118,6 +120,8 @@ func TestDB_SubscribePush(t *testing.T) {
...
@@ -118,6 +120,8 @@ func TestDB_SubscribePush(t *testing.T) {
// multiple push syncing subscriptions are created and
// multiple push syncing subscriptions are created and
// validates if all addresses are received in the right order.
// validates if all addresses are received in the right order.
func
TestDB_SubscribePush_multiple
(
t
*
testing
.
T
)
{
func
TestDB_SubscribePush_multiple
(
t
*
testing
.
T
)
{
t
.
Skip
(
"fails with badger shed"
)
db
,
cleanupFunc
:=
newTestDB
(
t
,
nil
)
db
,
cleanupFunc
:=
newTestDB
(
t
,
nil
)
defer
cleanupFunc
()
defer
cleanupFunc
()
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment