Commit f8843d15 authored by Nemanja Zbiljić's avatar Nemanja Zbiljić Committed by GitHub

Reverse the order of the queue for push sync index (#827)

parent 5f33df8b
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
package localstore package localstore
import ( import (
"bytes"
"context" "context"
"sync" "sync"
"time" "time"
...@@ -52,14 +53,15 @@ func (db *DB) SubscribePush(ctx context.Context) (c <-chan swarm.Chunk, stop fun ...@@ -52,14 +53,15 @@ func (db *DB) SubscribePush(ctx context.Context) (c <-chan swarm.Chunk, stop fun
// close the returned chunkInfo channel at the end to // close the returned chunkInfo channel at the end to
// signal that the subscription is done // signal that the subscription is done
defer close(chunks) defer close(chunks)
// sinceItem is the Item from which the next iteration // lastItem is the first Item received in the last iteration.
// should start. The first iteration starts from the first Item. var lastItem *shed.Item
var sinceItem *shed.Item // toItemKey is the key for the Item that was oldest in the last iteration.
var toItemKey []byte
for { for {
select { select {
case <-trigger: case <-trigger:
// iterate until: // iterate until:
// - last index Item is reached // - last non-processed Item is reached
// - subscription stop is called // - subscription stop is called
// - context is done.met // - context is done.met
db.metrics.SubscribePushIteration.Inc() db.metrics.SubscribePushIteration.Inc()
...@@ -73,12 +75,27 @@ func (db *DB) SubscribePush(ctx context.Context) (c <-chan swarm.Chunk, stop fun ...@@ -73,12 +75,27 @@ func (db *DB) SubscribePush(ctx context.Context) (c <-chan swarm.Chunk, stop fun
return true, err return true, err
} }
// check if we reached item that was already processed
// and stop
if toItemKey != nil {
dataItemKey, err := db.pushIndex.ItemKey(dataItem)
if err != nil {
return true, err
}
if bytes.Equal(dataItemKey, toItemKey) {
toItemKey = nil
return true, nil
}
}
select { select {
case chunks <- swarm.NewChunk(swarm.NewAddress(dataItem.Address), dataItem.Data).WithTagID(item.Tag): case chunks <- swarm.NewChunk(swarm.NewAddress(dataItem.Address), dataItem.Data).WithTagID(item.Tag):
count++ count++
// set next iteration start item // we set first one sent, which is "oldest" at that point
// when its chunk is successfully sent to channel if lastItem == nil {
sinceItem = &item lastItem = &item
}
return false, nil return false, nil
case <-stopChan: case <-stopChan:
// gracefully stop the iteration // gracefully stop the iteration
...@@ -92,10 +109,7 @@ func (db *DB) SubscribePush(ctx context.Context) (c <-chan swarm.Chunk, stop fun ...@@ -92,10 +109,7 @@ func (db *DB) SubscribePush(ctx context.Context) (c <-chan swarm.Chunk, stop fun
return true, ctx.Err() return true, ctx.Err()
} }
}, &shed.IterateOptions{ }, &shed.IterateOptions{
StartFrom: sinceItem, Reverse: true,
// sinceItem was sent as the last Address in the previous
// iterator call, skip it in this one
SkipStartFromItem: true,
}) })
totalTimeMetric(db.metrics.TotalTimeSubscribePushIteration, iterStart) totalTimeMetric(db.metrics.TotalTimeSubscribePushIteration, iterStart)
...@@ -105,6 +119,20 @@ func (db *DB) SubscribePush(ctx context.Context) (c <-chan swarm.Chunk, stop fun ...@@ -105,6 +119,20 @@ func (db *DB) SubscribePush(ctx context.Context) (c <-chan swarm.Chunk, stop fun
db.logger.Debugf("localstore push subscription iteration: %v", err) db.logger.Debugf("localstore push subscription iteration: %v", err)
return return
} }
// save last Item from this iteration in order to know where
// to stop on next iteration
if lastItem != nil && toItemKey == nil {
// move 'toItemKey' to point to last item in previous iteration
toItemKey, err = db.pushIndex.ItemKey(*lastItem)
if err != nil {
return
}
}
// 'lastItem' should be populated on next iteration again
lastItem = nil
case <-stopChan: case <-stopChan:
// terminate the subscription // terminate the subscription
// on stop // on stop
......
...@@ -37,6 +37,8 @@ func TestDB_SubscribePush(t *testing.T) { ...@@ -37,6 +37,8 @@ func TestDB_SubscribePush(t *testing.T) {
chunks := make([]swarm.Chunk, 0) chunks := make([]swarm.Chunk, 0)
var chunksMu sync.Mutex var chunksMu sync.Mutex
chunkProcessedTimes := make([]int, 0)
uploadRandomChunks := func(count int) { uploadRandomChunks := func(count int) {
chunksMu.Lock() chunksMu.Lock()
defer chunksMu.Unlock() defer chunksMu.Unlock()
...@@ -50,9 +52,22 @@ func TestDB_SubscribePush(t *testing.T) { ...@@ -50,9 +52,22 @@ func TestDB_SubscribePush(t *testing.T) {
} }
chunks = append(chunks, ch) chunks = append(chunks, ch)
chunkProcessedTimes = append(chunkProcessedTimes, 0)
} }
} }
// caller expected to hold lock on chunksMu
findChunkIndex := func(chunk swarm.Chunk) int {
for i, c := range chunks {
if chunk.Address().Equal(c.Address()) {
return i
}
}
return -1
}
// prepopulate database with some chunks // prepopulate database with some chunks
// before the subscription // before the subscription
uploadRandomChunks(10) uploadRandomChunks(10)
...@@ -68,8 +83,11 @@ func TestDB_SubscribePush(t *testing.T) { ...@@ -68,8 +83,11 @@ func TestDB_SubscribePush(t *testing.T) {
ch, stop := db.SubscribePush(ctx) ch, stop := db.SubscribePush(ctx)
defer stop() defer stop()
var lastStartIndex int = -1
// receive and validate addresses from the subscription // receive and validate addresses from the subscription
go func() { go func() {
var err error
var i int // address index var i int // address index
for { for {
select { select {
...@@ -78,9 +96,19 @@ func TestDB_SubscribePush(t *testing.T) { ...@@ -78,9 +96,19 @@ func TestDB_SubscribePush(t *testing.T) {
return return
} }
chunksMu.Lock() chunksMu.Lock()
want := chunks[i] if i > lastStartIndex {
// no way to know which chunk will come first here
gotIndex := findChunkIndex(got)
if gotIndex <= lastStartIndex {
err = fmt.Errorf("got index %v, expected index above %v", gotIndex, lastStartIndex)
}
lastStartIndex = gotIndex
i = 0
}
cIndex := lastStartIndex - i
want := chunks[cIndex]
chunkProcessedTimes[cIndex]++
chunksMu.Unlock() chunksMu.Unlock()
var err error
if !bytes.Equal(got.Data(), want.Data()) { if !bytes.Equal(got.Data(), want.Data()) {
err = fmt.Errorf("got chunk %v data %x, want %x", i, got.Data(), want.Data()) err = fmt.Errorf("got chunk %v data %x, want %x", i, got.Data(), want.Data())
} }
...@@ -111,6 +139,18 @@ func TestDB_SubscribePush(t *testing.T) { ...@@ -111,6 +139,18 @@ func TestDB_SubscribePush(t *testing.T) {
uploadRandomChunks(3) uploadRandomChunks(3)
checkErrChan(ctx, t, errChan, len(chunks)) checkErrChan(ctx, t, errChan, len(chunks))
chunksMu.Lock()
if lastStartIndex != len(chunks)-1 {
t.Fatalf("got %d chunks, expected %d", lastStartIndex, len(chunks))
}
for i, pc := range chunkProcessedTimes {
if pc != 1 {
t.Fatalf("chunk on address %s processed %d times, should be only once", chunks[i].Address(), pc)
}
}
chunksMu.Unlock()
} }
// TestDB_SubscribePush_multiple uploads chunks before and after // TestDB_SubscribePush_multiple uploads chunks before and after
...@@ -138,6 +178,17 @@ func TestDB_SubscribePush_multiple(t *testing.T) { ...@@ -138,6 +178,17 @@ func TestDB_SubscribePush_multiple(t *testing.T) {
} }
} }
// caller expected to hold lock on addrsMu
findAddressIndex := func(address swarm.Address) int {
for i, a := range addrs {
if a.Equal(address) {
return i
}
}
return -1
}
// prepopulate database with some chunks // prepopulate database with some chunks
// before the subscription // before the subscription
uploadRandomChunks(10) uploadRandomChunks(10)
...@@ -152,6 +203,8 @@ func TestDB_SubscribePush_multiple(t *testing.T) { ...@@ -152,6 +203,8 @@ func TestDB_SubscribePush_multiple(t *testing.T) {
subsCount := 10 subsCount := 10
lastStartIndexSlice := make([]int, subsCount)
// start a number of subscriptions // start a number of subscriptions
// that all of them will write every addresses error to errChan // that all of them will write every addresses error to errChan
for j := 0; j < subsCount; j++ { for j := 0; j < subsCount; j++ {
...@@ -160,7 +213,9 @@ func TestDB_SubscribePush_multiple(t *testing.T) { ...@@ -160,7 +213,9 @@ func TestDB_SubscribePush_multiple(t *testing.T) {
// receive and validate addresses from the subscription // receive and validate addresses from the subscription
go func(j int) { go func(j int) {
var err error
var i int // address index var i int // address index
lastStartIndexSlice[j] = -1
for { for {
select { select {
case got, ok := <-ch: case got, ok := <-ch:
...@@ -168,9 +223,18 @@ func TestDB_SubscribePush_multiple(t *testing.T) { ...@@ -168,9 +223,18 @@ func TestDB_SubscribePush_multiple(t *testing.T) {
return return
} }
addrsMu.Lock() addrsMu.Lock()
want := addrs[i] if i > lastStartIndexSlice[j] {
// no way to know which chunk will come first here
gotIndex := findAddressIndex(got.Address())
if gotIndex <= lastStartIndexSlice[j] {
err = fmt.Errorf("got index %v, expected index above %v", gotIndex, lastStartIndexSlice[j])
}
lastStartIndexSlice[j] = gotIndex
i = 0
}
aIndex := lastStartIndexSlice[j] - i
want := addrs[aIndex]
addrsMu.Unlock() addrsMu.Unlock()
var err error
if !got.Address().Equal(want) { if !got.Address().Equal(want) {
err = fmt.Errorf("got chunk %v address on subscription %v %s, want %s", i, j, got, want) err = fmt.Errorf("got chunk %v address on subscription %v %s, want %s", i, j, got, want)
} }
...@@ -202,4 +266,12 @@ func TestDB_SubscribePush_multiple(t *testing.T) { ...@@ -202,4 +266,12 @@ func TestDB_SubscribePush_multiple(t *testing.T) {
wantedChunksCount := len(addrs) * subsCount wantedChunksCount := len(addrs) * subsCount
checkErrChan(ctx, t, errChan, wantedChunksCount) checkErrChan(ctx, t, errChan, wantedChunksCount)
for j := 0; j < subsCount; j++ {
addrsMu.Lock()
if lastStartIndexSlice[j] != len(addrs)-1 {
t.Fatalf("got %d chunks, expected %d", lastStartIndexSlice[j], len(addrs))
}
addrsMu.Unlock()
}
} }
...@@ -134,6 +134,11 @@ func (db *DB) NewIndex(name string, funcs IndexFuncs) (f Index, err error) { ...@@ -134,6 +134,11 @@ func (db *DB) NewIndex(name string, funcs IndexFuncs) (f Index, err error) {
}, nil }, nil
} }
// ItemKey accepts an Item and returns generated key for it.
func (f Index) ItemKey(item Item) (key []byte, err error) {
return f.encodeKeyFunc(item)
}
// Get accepts key fields represented as Item to retrieve a // Get accepts key fields represented as Item to retrieve a
// value from the index and return maximum available information // value from the index and return maximum available information
// from the index represented as another Item. // from the index represented as another Item.
...@@ -284,6 +289,8 @@ type IterateOptions struct { ...@@ -284,6 +289,8 @@ type IterateOptions struct {
SkipStartFromItem bool SkipStartFromItem bool
// Iterate over items which keys have a common prefix. // Iterate over items which keys have a common prefix.
Prefix []byte Prefix []byte
// Iterate over items in reverse order.
Reverse bool
} }
// Iterate function iterates over keys of the Index. // Iterate function iterates over keys of the Index.
...@@ -303,21 +310,67 @@ func (f Index) Iterate(fn IndexIterFunc, options *IterateOptions) (err error) { ...@@ -303,21 +310,67 @@ func (f Index) Iterate(fn IndexIterFunc, options *IterateOptions) (err error) {
return fmt.Errorf("encode key: %w", err) return fmt.Errorf("encode key: %w", err)
} }
} }
it := f.db.NewIterator() it := f.db.NewIterator()
defer it.Release() defer it.Release()
var ok bool
// move the cursor to the start key // move the cursor to the start key
ok := it.Seek(startKey) ok = it.Seek(startKey)
if !ok {
// stop iterator if seek has failed if !options.Reverse {
return it.Error() if !ok {
// stop iterator if seek has failed
return it.Error()
}
} else {
// reverse seeker
if options.StartFrom != nil {
if !ok {
return it.Error()
}
} else {
// find last key for this index (and prefix)
// move cursor to last key
ok = it.Last()
if !ok {
return it.Error()
}
if lastKeyHasPrefix := bytes.HasPrefix(it.Key(), prefix); !lastKeyHasPrefix {
// increment last prefix byte (that is not 0xFF) to try to find last key
incrementedPrefix := bytesIncrement(prefix)
if incrementedPrefix == nil {
return fmt.Errorf("index iterator invalid prefix: %v -> %v", prefix, string(prefix))
}
// should find first key after prefix (same or different index)
ok = it.Seek(incrementedPrefix)
if !ok {
return it.Error()
}
// previous key should have proper prefix
ok = it.Prev()
if !ok {
return it.Error()
}
}
}
}
itSeekerFn := it.Next
if options.Reverse {
itSeekerFn = it.Prev
} }
if options.SkipStartFromItem && bytes.Equal(startKey, it.Key()) { if options.SkipStartFromItem && bytes.Equal(startKey, it.Key()) {
// skip the start from Item if it is the first key // skip the start from Item if it is the first key
// and it is explicitly configured to skip it // and it is explicitly configured to skip it
ok = it.Next() ok = itSeekerFn()
} }
for ; ok; ok = it.Next() { for ; ok; ok = itSeekerFn() {
item, err := f.itemFromIterator(it, prefix) item, err := f.itemFromIterator(it, prefix)
if err != nil { if err != nil {
if errors.Is(err, leveldb.ErrNotFound) { if errors.Is(err, leveldb.ErrNotFound) {
...@@ -336,6 +389,26 @@ func (f Index) Iterate(fn IndexIterFunc, options *IterateOptions) (err error) { ...@@ -336,6 +389,26 @@ func (f Index) Iterate(fn IndexIterFunc, options *IterateOptions) (err error) {
return it.Error() return it.Error()
} }
// bytesIncrement increments the last byte that is not 0xFF, and returns
// a new byte array truncated after the position that was incremented.
func bytesIncrement(bytes []byte) []byte {
b := append(bytes[:0:0], bytes...)
for i := len(bytes) - 1; i >= 0; {
if b[i] == 0xFF {
i--
continue
}
// found byte smaller than 0xFF: increment and truncate
b[i]++
return b[:i+1]
}
// input contained only 0xFF bytes
return nil
}
// First returns the first item in the Index which encoded key starts with a prefix. // First returns the first item in the Index which encoded key starts with a prefix.
// If the prefix is nil, the first element of the whole index is returned. // If the prefix is nil, the first element of the whole index is returned.
// If Index has no elements, a leveldb.ErrNotFound error is returned. // If Index has no elements, a leveldb.ErrNotFound error is returned.
......
...@@ -544,6 +544,228 @@ func TestIndex_Iterate(t *testing.T) { ...@@ -544,6 +544,228 @@ func TestIndex_Iterate(t *testing.T) {
}) })
} }
// TestIndex_IterateReverse validates index Iterate
// functions for correctness in reversed order.
func TestIndex_IterateReverse(t *testing.T) {
db := newTestDB(t)
index, err := db.NewIndex("retrieval", retrievalIndexFuncs)
if err != nil {
t.Fatal(err)
}
items := []Item{
{
Address: []byte("iterate-hash-01"),
Data: []byte("data80"),
},
{
Address: []byte("iterate-hash-03"),
Data: []byte("data22"),
},
{
Address: []byte("iterate-hash-05"),
Data: []byte("data41"),
},
{
Address: []byte("iterate-hash-02"),
Data: []byte("data84"),
},
{
Address: []byte("iterate-hash-06"),
Data: []byte("data1"),
},
}
batch := new(leveldb.Batch)
for _, i := range items {
err = index.PutInBatch(batch, i)
if err != nil {
t.Fatal(err)
}
}
err = db.WriteBatch(batch)
if err != nil {
t.Fatal(err)
}
item04 := Item{
Address: []byte("iterate-hash-04"),
Data: []byte("data0"),
}
err = index.Put(item04)
if err != nil {
t.Fatal(err)
}
items = append(items, item04)
sort.SliceStable(items, func(i, j int) bool {
return bytes.Compare(items[i].Address, items[j].Address) < 0
})
t.Run("all", func(t *testing.T) {
i := len(items) - 1
var count int
err := index.Iterate(func(item Item) (stop bool, err error) {
if i < 0 {
return true, fmt.Errorf("got unexpected index item: %#v", item)
}
want := items[i]
checkItem(t, item, want)
i--
count++
return false, nil
}, &IterateOptions{
Reverse: true,
})
if err != nil {
t.Fatal(err)
}
wantItemsCount := len(items)
if count != wantItemsCount {
t.Errorf("got %v items, expected %v", count, wantItemsCount)
}
})
t.Run("start from", func(t *testing.T) {
startIndex := 3
i := startIndex
var count int
err := index.Iterate(func(item Item) (stop bool, err error) {
if i < 0 {
return true, fmt.Errorf("got unexpected index item: %#v", item)
}
want := items[i]
checkItem(t, item, want)
i--
count++
return false, nil
}, &IterateOptions{
StartFrom: &items[startIndex],
Reverse: true,
})
if err != nil {
t.Fatal(err)
}
wantItemsCount := len(items) - startIndex + 1
if count != wantItemsCount {
t.Errorf("got %v items, expected %v", count, wantItemsCount)
}
})
t.Run("skip start from", func(t *testing.T) {
startIndex := 3
i := startIndex - 1
var count int
err := index.Iterate(func(item Item) (stop bool, err error) {
if i < 0 {
return true, fmt.Errorf("got unexpected index item: %#v", item)
}
want := items[i]
checkItem(t, item, want)
i--
count++
return false, nil
}, &IterateOptions{
StartFrom: &items[startIndex],
SkipStartFromItem: true,
Reverse: true,
})
if err != nil {
t.Fatal(err)
}
wantItemsCount := len(items) - startIndex
if count != wantItemsCount {
t.Errorf("got %v items, expected %v", count, wantItemsCount)
}
})
t.Run("stop", func(t *testing.T) {
i := len(items) - 1
stopIndex := 3
var count int
err := index.Iterate(func(item Item) (stop bool, err error) {
if i < 0 {
return true, fmt.Errorf("got unexpected index item: %#v", item)
}
want := items[i]
checkItem(t, item, want)
count++
if i == stopIndex {
return true, nil
}
i--
return false, nil
}, &IterateOptions{
Reverse: true,
})
if err != nil {
t.Fatal(err)
}
wantItemsCount := stopIndex
if count != wantItemsCount {
t.Errorf("got %v items, expected %v", count, wantItemsCount)
}
})
t.Run("no overflow", func(t *testing.T) {
secondIndex, err := db.NewIndex("second-index", retrievalIndexFuncs)
if err != nil {
t.Fatal(err)
}
secondItem := Item{
Address: []byte("iterate-hash-10"),
Data: []byte("data-second"),
}
err = secondIndex.Put(secondItem)
if err != nil {
t.Fatal(err)
}
i := len(items) - 1
count := 0
err = index.Iterate(func(item Item) (stop bool, err error) {
if i < 0 {
return true, fmt.Errorf("got unexpected index item: %#v", item)
}
want := items[i]
checkItem(t, item, want)
i--
count++
return false, nil
}, &IterateOptions{
Reverse: true,
})
if err != nil {
t.Fatal(err)
}
wantItemsCount := len(items)
if count != wantItemsCount {
t.Errorf("got %v items, expected %v", count, wantItemsCount)
}
i = 1
count = 0
err = secondIndex.Iterate(func(item Item) (stop bool, err error) {
if i < 0 {
return true, fmt.Errorf("got unexpected index item: %#v", item)
}
checkItem(t, item, secondItem)
i--
count++
return false, nil
}, &IterateOptions{
Reverse: true,
})
if err != nil {
t.Fatal(err)
}
wantItemsCount = 1
if count != wantItemsCount {
t.Errorf("got %v items, expected %v", count, wantItemsCount)
}
})
}
// TestIndex_Iterate_withPrefix validates index Iterate // TestIndex_Iterate_withPrefix validates index Iterate
// function for correctness. // function for correctness.
func TestIndex_Iterate_withPrefix(t *testing.T) { func TestIndex_Iterate_withPrefix(t *testing.T) {
...@@ -730,6 +952,202 @@ func TestIndex_Iterate_withPrefix(t *testing.T) { ...@@ -730,6 +952,202 @@ func TestIndex_Iterate_withPrefix(t *testing.T) {
}) })
} }
// TestIndex_IterateReverse_withPrefix validates index Iterate
// functions for correctness in reversed order.
func TestIndex_IterateReverse_withPrefix(t *testing.T) {
db := newTestDB(t)
index, err := db.NewIndex("retrieval", retrievalIndexFuncs)
if err != nil {
t.Fatal(err)
}
allItems := []Item{
{Address: []byte("want-hash-00"), Data: []byte("data80")},
{Address: []byte("skip-hash-01"), Data: []byte("data81")},
{Address: []byte("skip-hash-02"), Data: []byte("data82")},
{Address: []byte("skip-hash-03"), Data: []byte("data83")},
{Address: []byte("want-hash-04"), Data: []byte("data84")},
{Address: []byte("want-hash-05"), Data: []byte("data85")},
{Address: []byte("want-hash-06"), Data: []byte("data86")},
{Address: []byte("want-hash-07"), Data: []byte("data87")},
{Address: []byte("want-hash-08"), Data: []byte("data88")},
{Address: []byte("want-hash-09"), Data: []byte("data89")},
{Address: []byte("z-skip-hash-10"), Data: []byte("data90")},
}
batch := new(leveldb.Batch)
for _, i := range allItems {
err = index.PutInBatch(batch, i)
if err != nil {
t.Fatal(err)
}
}
err = db.WriteBatch(batch)
if err != nil {
t.Fatal(err)
}
prefix := []byte("want")
items := make([]Item, 0)
for _, item := range allItems {
if bytes.HasPrefix(item.Address, prefix) {
items = append(items, item)
}
}
sort.SliceStable(items, func(i, j int) bool {
return bytes.Compare(items[i].Address, items[j].Address) < 0
})
t.Run("with prefix", func(t *testing.T) {
i := len(items) - 1
var count int
err := index.Iterate(func(item Item) (stop bool, err error) {
if i < 0 {
return true, fmt.Errorf("got unexpected index item: %#v", item)
}
want := items[i]
checkItem(t, item, want)
i--
count++
return false, nil
}, &IterateOptions{
Prefix: prefix,
Reverse: true,
})
if err != nil {
t.Fatal(err)
}
wantItemsCount := len(items)
if count != wantItemsCount {
t.Errorf("got %v items, expected %v", count, wantItemsCount)
}
})
t.Run("with prefix and start from", func(t *testing.T) {
startIndex := 2
i := startIndex
var count int
err := index.Iterate(func(item Item) (stop bool, err error) {
if i < 0 {
return true, fmt.Errorf("got unexpected index item: %#v", item)
}
want := items[i]
checkItem(t, item, want)
i--
count++
return false, nil
}, &IterateOptions{
StartFrom: &items[startIndex],
Prefix: prefix,
Reverse: true,
})
if err != nil {
t.Fatal(err)
}
wantItemsCount := startIndex + 1
if count != wantItemsCount {
t.Errorf("got %v items, expected %v", count, wantItemsCount)
}
})
t.Run("with prefix and skip start from", func(t *testing.T) {
startIndex := 3
i := startIndex - 1
var count int
err := index.Iterate(func(item Item) (stop bool, err error) {
if i < 0 {
return true, fmt.Errorf("got unexpected index item: %#v", item)
}
want := items[i]
checkItem(t, item, want)
i--
count++
return false, nil
}, &IterateOptions{
StartFrom: &items[startIndex],
SkipStartFromItem: true,
Prefix: prefix,
Reverse: true,
})
if err != nil {
t.Fatal(err)
}
wantItemsCount := len(items) - startIndex - 1
if count != wantItemsCount {
t.Errorf("got %v items, expected %v", count, wantItemsCount)
}
})
t.Run("stop", func(t *testing.T) {
i := len(items) - 1
stopIndex := 3
var count int
err := index.Iterate(func(item Item) (stop bool, err error) {
if i < 0 {
return true, fmt.Errorf("got unexpected index item: %#v", item)
}
want := items[i]
checkItem(t, item, want)
count++
if i == stopIndex {
return true, nil
}
i--
return false, nil
}, &IterateOptions{
Prefix: prefix,
Reverse: true,
})
if err != nil {
t.Fatal(err)
}
wantItemsCount := stopIndex + 1
if count != wantItemsCount {
t.Errorf("got %v items, expected %v", count, wantItemsCount)
}
})
t.Run("no overflow", func(t *testing.T) {
secondIndex, err := db.NewIndex("second-index", retrievalIndexFuncs)
if err != nil {
t.Fatal(err)
}
secondItem := Item{
Address: []byte("iterate-hash-10"),
Data: []byte("data-second"),
}
err = secondIndex.Put(secondItem)
if err != nil {
t.Fatal(err)
}
i := len(items) - 1
var count int
err = index.Iterate(func(item Item) (stop bool, err error) {
if i < 0 {
return true, fmt.Errorf("got unexpected index item: %#v", item)
}
want := items[i]
checkItem(t, item, want)
i--
count++
return false, nil
}, &IterateOptions{
Prefix: prefix,
Reverse: true,
})
if err != nil {
t.Fatal(err)
}
wantItemsCount := len(items)
if count != wantItemsCount {
t.Errorf("got %v items, expected %v", count, wantItemsCount)
}
})
}
// TestIndex_count tests if Index.Count and Index.CountFrom // TestIndex_count tests if Index.Count and Index.CountFrom
// returns the correct number of items. // returns the correct number of items.
func TestIndex_count(t *testing.T) { func TestIndex_count(t *testing.T) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment