Commit a4ca719e authored by Esad Akar's avatar Esad Akar Committed by GitHub

Push/pull sync - neighborhood syncing (#1537)

* storer nodes send chunks to neighbor peers as the receipt is sent back

* pullsync: dont sync outside nn (#911)

* puller: cancel everything when peer moves out of depth

* pr comments

* adjust tests for puller changes

* recalc when peer moves out of depth

* beekeeper: shorter timeouts for debug

* revert change

* Revert "beekeeper: shorter timeouts for debug"

This reverts commit 756e9bc46bc18300133d62ea48f82f3b8479aecc.

* TestReplicateBeforeReceipt sleep fix

* increase retry-delay for pushsync with clef to 10s

* increase retry-delay for pushsync with clef to 15s

* set retry-delay for pushsync with clef to 10s and chunks-per-node to 1 for chunks test

* increase retry-delay for pushsync with clef to 15s

* revert to 5s, we solved it inside bee-local
Co-authored-by: default avataracud <12988138+acud@users.noreply.github.com>
Co-authored-by: default avatarIvan Vandot <ivan@vandot.rs>
parent dbb66820
...@@ -68,6 +68,18 @@ func (m *Mock) ClosestPeer(addr swarm.Address, skipPeers ...swarm.Address) (peer ...@@ -68,6 +68,18 @@ func (m *Mock) ClosestPeer(addr swarm.Address, skipPeers ...swarm.Address) (peer
panic("not implemented") // TODO: Implement panic("not implemented") // TODO: Implement
} }
func (m *Mock) IsWithinDepth(adr swarm.Address) bool {
panic("not implemented") // TODO: Implement
}
func (m *Mock) EachNeighbor(topology.EachPeerFunc) error {
panic("not implemented") // TODO: Implement
}
func (m *Mock) EachNeighborRev(topology.EachPeerFunc) error {
panic("not implemented") // TODO: Implement
}
// EachPeer iterates from closest bin to farthest // EachPeer iterates from closest bin to farthest
func (m *Mock) EachPeer(f topology.EachPeerFunc) error { func (m *Mock) EachPeer(f topology.EachPeerFunc) error {
m.mtx.Lock() m.mtx.Lock()
......
...@@ -428,7 +428,7 @@ func NewBee(addr string, swarmAddress swarm.Address, publicKey ecdsa.PublicKey, ...@@ -428,7 +428,7 @@ func NewBee(addr string, swarmAddress swarm.Address, publicKey ecdsa.PublicKey,
traversalService := traversal.NewService(ns) traversalService := traversal.NewService(ns)
pushSyncProtocol := pushsync.New(p2ps, storer, kad, tagService, pssService.TryUnwrap, logger, acc, pricer, signer, tracer) pushSyncProtocol := pushsync.New(swarmAddress, p2ps, storer, kad, tagService, pssService.TryUnwrap, logger, acc, pricer, signer, tracer)
// set the pushSyncer in the PSS // set the pushSyncer in the PSS
pssService.SetPushSyncer(pushSyncProtocol) pssService.SetPushSyncer(pushSyncProtocol)
......
...@@ -22,15 +22,12 @@ import ( ...@@ -22,15 +22,12 @@ import (
"github.com/ethersphere/bee/pkg/topology" "github.com/ethersphere/bee/pkg/topology"
) )
const defaultShallowBinPeers = 2
var ( var (
logMore = false // enable this to get more logging logMore = false // enable this to get more logging
) )
type Options struct { type Options struct {
Bins uint8 Bins uint8
ShallowBinPeers int
} }
type Puller struct { type Puller struct {
...@@ -53,20 +50,15 @@ type Puller struct { ...@@ -53,20 +50,15 @@ type Puller struct {
wg sync.WaitGroup wg sync.WaitGroup
bins uint8 // how many bins do we support bins uint8 // how many bins do we support
shallowBinPeers int // how many peers per bin do we want to sync with outside of depth
} }
func New(stateStore storage.StateStorer, topology topology.Driver, pullSync pullsync.Interface, logger logging.Logger, o Options) *Puller { func New(stateStore storage.StateStorer, topology topology.Driver, pullSync pullsync.Interface, logger logging.Logger, o Options) *Puller {
var ( var (
bins uint8 = swarm.MaxBins bins uint8 = swarm.MaxBins
shallowBinPeers int = defaultShallowBinPeers
) )
if o.Bins != 0 { if o.Bins != 0 {
bins = o.Bins bins = o.Bins
} }
if o.ShallowBinPeers != 0 {
shallowBinPeers = o.ShallowBinPeers
}
p := &Puller{ p := &Puller{
statestore: stateStore, statestore: stateStore,
...@@ -81,7 +73,6 @@ func New(stateStore storage.StateStorer, topology topology.Driver, pullSync pull ...@@ -81,7 +73,6 @@ func New(stateStore storage.StateStorer, topology topology.Driver, pullSync pull
wg: sync.WaitGroup{}, wg: sync.WaitGroup{},
bins: bins, bins: bins,
shallowBinPeers: shallowBinPeers,
} }
for i := uint8(0); i < bins; i++ { for i := uint8(0); i < bins; i++ {
...@@ -148,30 +139,21 @@ func (p *Puller) manage() { ...@@ -148,30 +139,21 @@ func (p *Puller) manage() {
if _, ok := bp[peerAddr.String()]; ok { if _, ok := bp[peerAddr.String()]; ok {
delete(peersDisconnected, peerAddr.String()) delete(peersDisconnected, peerAddr.String())
} }
syncing := len(bp) if po >= depth {
if po < depth { // within depth, sync everything
// outside of depth, sync peerPO bin only
if _, ok := bp[peerAddr.String()]; !ok { if _, ok := bp[peerAddr.String()]; !ok {
if syncing < p.shallowBinPeers { // we're not syncing with this peer yet, start doing so
// peer not syncing yet and we still need more peers in this bin
bp[peerAddr.String()] = newSyncPeer(peerAddr, p.bins) bp[peerAddr.String()] = newSyncPeer(peerAddr, p.bins)
peerEntry := peer{addr: peerAddr, po: po} peerEntry := peer{addr: peerAddr, po: po}
peersToSync = append(peersToSync, peerEntry) peersToSync = append(peersToSync, peerEntry)
}
} else { } else {
// already syncing, recalc // already syncing, recalc
peerEntry := peer{addr: peerAddr, po: po} peerEntry := peer{addr: peerAddr, po: po}
peersToRecalc = append(peersToRecalc, peerEntry) peersToRecalc = append(peersToRecalc, peerEntry)
} }
} else { } else {
// within depth, sync everything >= depth if _, ok := bp[peerAddr.String()]; ok {
if _, ok := bp[peerAddr.String()]; !ok { // already syncing, recalc so that existing streams get cleaned up
// we're not syncing with this peer yet, start doing so
bp[peerAddr.String()] = newSyncPeer(peerAddr, p.bins)
peerEntry := peer{addr: peerAddr, po: po}
peersToSync = append(peersToSync, peerEntry)
} else {
// already syncing, recalc
peerEntry := peer{addr: peerAddr, po: po} peerEntry := peer{addr: peerAddr, po: po}
peersToRecalc = append(peersToRecalc, peerEntry) peersToRecalc = append(peersToRecalc, peerEntry)
} }
...@@ -223,45 +205,34 @@ func (p *Puller) recalcPeer(ctx context.Context, peer swarm.Address, po, d uint8 ...@@ -223,45 +205,34 @@ func (p *Puller) recalcPeer(ctx context.Context, peer swarm.Address, po, d uint8
c := p.cursors[peer.String()] c := p.cursors[peer.String()]
p.cursorsMtx.Unlock() p.cursorsMtx.Unlock()
var want, dontWant []uint8
if po >= d { if po >= d {
// within depth // within depth
var want, dontWant []uint8
for i := d; i < p.bins; i++ { for i := d; i < p.bins; i++ {
if i == 0 { if i == 0 {
continue continue
} }
want = append(want, i) want = append(want, i)
} }
for i := uint8(0); i < d; i++ {
dontWant = append(dontWant, i)
}
for _, bin := range want { for _, bin := range want {
if !syncCtx.isBinSyncing(bin) { if !syncCtx.isBinSyncing(bin) {
p.syncPeerBin(ctx, syncCtx, peer, bin, c[bin]) p.syncPeerBin(ctx, syncCtx, peer, bin, c[bin])
} }
} }
syncCtx.cancelBins(dontWant...)
} else {
// outside of depth
var (
want = po
dontWant = []uint8{0} // never want bin 0
)
for i := uint8(0); i < p.bins; i++ { // cancel everything outside of depth
if i == want { for i := uint8(0); i < d; i++ {
continue dontWant = append(dontWant, i)
} }
} else {
// peer is outside depth. cancel everything
for i := uint8(0); i < p.bins; i++ {
dontWant = append(dontWant, i) dontWant = append(dontWant, i)
} }
if !syncCtx.isBinSyncing(want) {
p.syncPeerBin(ctx, syncCtx, peer, want, c[want])
} }
syncCtx.cancelBins(dontWant...) syncCtx.cancelBins(dontWant...)
}
} }
func (p *Puller) syncPeer(ctx context.Context, peer swarm.Address, po, d uint8) { func (p *Puller) syncPeer(ctx context.Context, peer swarm.Address, po, d uint8) {
...@@ -290,12 +261,6 @@ func (p *Puller) syncPeer(ctx context.Context, peer swarm.Address, po, d uint8) ...@@ -290,12 +261,6 @@ func (p *Puller) syncPeer(ctx context.Context, peer swarm.Address, po, d uint8)
c = cursors c = cursors
} }
// peer outside depth?
if po < d && po > 0 {
p.syncPeerBin(ctx, syncCtx, peer, po, c[po])
return
}
for bin, cur := range c { for bin, cur := range c {
if bin == 0 || uint8(bin) < d { if bin == 0 || uint8(bin) < d {
continue continue
......
...@@ -32,16 +32,40 @@ var ( ...@@ -32,16 +32,40 @@ var (
reply = mockps.NewReply // alias to make code more readable reply = mockps.NewReply // alias to make code more readable
) )
// test that adding one peer start syncing // test that adding one peer starts syncing
// then that adding another peer at the same po
// does not start another syncing session
func TestOneSync(t *testing.T) { func TestOneSync(t *testing.T) {
var (
addr = test.RandomAddress()
cursors = []uint64{1000, 1000, 1000}
liveReplies = []uint64{1}
)
puller, _, kad, pullsync := newPuller(opts{
kad: []mockk.Option{
mockk.WithEachPeerRevCalls(
mockk.AddrTuple{Addr: addr, PO: 1},
), mockk.WithDepth(1),
},
pullSync: []mockps.Option{mockps.WithCursors(cursors), mockps.WithLiveSyncReplies(liveReplies...)},
bins: 3,
})
defer puller.Close()
defer pullsync.Close()
time.Sleep(100 * time.Millisecond)
kad.Trigger()
waitCursorsCalled(t, pullsync, addr, false)
waitSyncCalled(t, pullsync, addr, false)
}
func TestNoSyncOutsideDepth(t *testing.T) {
var ( var (
addr = test.RandomAddress() addr = test.RandomAddress()
addr2 = test.RandomAddress() addr2 = test.RandomAddress()
cursors = []uint64{1000, 1000, 1000} cursors = []uint64{1000, 1000, 1000}
liveReplies = []uint64{1} liveReplies = []uint64{1}
shallowBinPeers = 1
) )
puller, _, kad, pullsync := newPuller(opts{ puller, _, kad, pullsync := newPuller(opts{
...@@ -53,7 +77,6 @@ func TestOneSync(t *testing.T) { ...@@ -53,7 +77,6 @@ func TestOneSync(t *testing.T) {
}, },
pullSync: []mockps.Option{mockps.WithCursors(cursors), mockps.WithLiveSyncReplies(liveReplies...)}, pullSync: []mockps.Option{mockps.WithCursors(cursors), mockps.WithLiveSyncReplies(liveReplies...)},
bins: 3, bins: 3,
shallowBinPeers: &shallowBinPeers,
}) })
defer puller.Close() defer puller.Close()
defer pullsync.Close() defer pullsync.Close()
...@@ -61,14 +84,14 @@ func TestOneSync(t *testing.T) { ...@@ -61,14 +84,14 @@ func TestOneSync(t *testing.T) {
kad.Trigger() kad.Trigger()
waitCursorsCalled(t, pullsync, addr, false) waitCursorsCalled(t, pullsync, addr, true)
waitCursorsCalled(t, pullsync, addr2, true) waitCursorsCalled(t, pullsync, addr2, true)
waitSyncCalled(t, pullsync, addr, false) waitSyncCalled(t, pullsync, addr, true)
waitSyncCalled(t, pullsync, addr2, true) waitSyncCalled(t, pullsync, addr2, true)
} }
func TestSyncFlow_PeerOutsideDepth_Live(t *testing.T) { func TestSyncFlow_PeerWithinDepth_Live(t *testing.T) {
addr := test.RandomAddress() addr := test.RandomAddress()
for _, tc := range []struct { for _, tc := range []struct {
...@@ -80,13 +103,13 @@ func TestSyncFlow_PeerOutsideDepth_Live(t *testing.T) { ...@@ -80,13 +103,13 @@ func TestSyncFlow_PeerOutsideDepth_Live(t *testing.T) {
expLiveCalls []c // expected live sync calls expLiveCalls []c // expected live sync calls
}{ }{
{ {
name: "cursor 0, 1 chunk on live", cursors: []uint64{0, 0, 0}, name: "cursor 0, 1 chunk on live", cursors: []uint64{0, 0},
intervals: "[[1 1]]", intervals: "[[1 1]]",
liveReplies: []uint64{1}, liveReplies: []uint64{1},
expLiveCalls: []c{call(1, 1, max), call(1, 2, max)}, expLiveCalls: []c{call(1, 1, max), call(1, 2, max)},
}, },
{ {
name: "cursor 0 - calls 1-1, 2-5, 6-10", cursors: []uint64{0, 0, 0}, name: "cursor 0 - calls 1-1, 2-5, 6-10", cursors: []uint64{0, 0},
intervals: "[[1 10]]", intervals: "[[1 10]]",
liveReplies: []uint64{1, 5, 10}, liveReplies: []uint64{1, 5, 10},
expLiveCalls: []c{call(1, 1, max), call(1, 2, max), call(1, 6, max), call(1, 11, max)}, expLiveCalls: []c{call(1, 1, max), call(1, 2, max), call(1, 6, max), call(1, 11, max)},
...@@ -97,7 +120,7 @@ func TestSyncFlow_PeerOutsideDepth_Live(t *testing.T) { ...@@ -97,7 +120,7 @@ func TestSyncFlow_PeerOutsideDepth_Live(t *testing.T) {
kad: []mockk.Option{ kad: []mockk.Option{
mockk.WithEachPeerRevCalls( mockk.WithEachPeerRevCalls(
mockk.AddrTuple{Addr: addr, PO: 1}, mockk.AddrTuple{Addr: addr, PO: 1},
), mockk.WithDepth(2), ), mockk.WithDepth(1),
}, },
pullSync: []mockps.Option{mockps.WithCursors(tc.cursors), mockps.WithLiveSyncReplies(tc.liveReplies...)}, pullSync: []mockps.Option{mockps.WithCursors(tc.cursors), mockps.WithLiveSyncReplies(tc.liveReplies...)},
bins: 5, bins: 5,
...@@ -121,7 +144,7 @@ func TestSyncFlow_PeerOutsideDepth_Live(t *testing.T) { ...@@ -121,7 +144,7 @@ func TestSyncFlow_PeerOutsideDepth_Live(t *testing.T) {
} }
} }
func TestSyncFlow_PeerOutsideDepth_Historical(t *testing.T) { func TestSyncFlow_PeerWithinDepth_Historical(t *testing.T) {
addr := test.RandomAddress() addr := test.RandomAddress()
for _, tc := range []struct { for _, tc := range []struct {
...@@ -132,7 +155,7 @@ func TestSyncFlow_PeerOutsideDepth_Historical(t *testing.T) { ...@@ -132,7 +155,7 @@ func TestSyncFlow_PeerOutsideDepth_Historical(t *testing.T) {
expLiveCalls []c // expected live sync calls expLiveCalls []c // expected live sync calls
}{ }{
{ {
name: "1,1 - 1 call", cursors: []uint64{0, 1, 3}, //the third cursor is to make sure we dont get a request for a bin we dont need name: "1,1 - 1 call", cursors: []uint64{0, 1}, //the third cursor is to make sure we dont get a request for a bin we dont need
intervals: "[[1 1]]", intervals: "[[1 1]]",
expCalls: []c{call(1, 1, 1)}, expCalls: []c{call(1, 1, 1)},
expLiveCalls: []c{call(1, 2, math.MaxUint64)}, expLiveCalls: []c{call(1, 2, math.MaxUint64)},
...@@ -173,7 +196,7 @@ func TestSyncFlow_PeerOutsideDepth_Historical(t *testing.T) { ...@@ -173,7 +196,7 @@ func TestSyncFlow_PeerOutsideDepth_Historical(t *testing.T) {
kad: []mockk.Option{ kad: []mockk.Option{
mockk.WithEachPeerRevCalls( mockk.WithEachPeerRevCalls(
mockk.AddrTuple{Addr: addr, PO: 1}, mockk.AddrTuple{Addr: addr, PO: 1},
), mockk.WithDepth(2), ), mockk.WithDepth(1),
}, },
pullSync: []mockps.Option{mockps.WithCursors(tc.cursors), mockps.WithAutoReply(), mockps.WithLiveSyncBlock()}, pullSync: []mockps.Option{mockps.WithCursors(tc.cursors), mockps.WithAutoReply(), mockps.WithLiveSyncBlock()},
bins: 5, bins: 5,
...@@ -197,7 +220,7 @@ func TestSyncFlow_PeerOutsideDepth_Historical(t *testing.T) { ...@@ -197,7 +220,7 @@ func TestSyncFlow_PeerOutsideDepth_Historical(t *testing.T) {
} }
} }
func TestSyncFlow_PeerWithinDepth_Live(t *testing.T) { func TestSyncFlow_PeerWithinDepth_Live2(t *testing.T) {
addr := test.RandomAddress() addr := test.RandomAddress()
for _, tc := range []struct { for _, tc := range []struct {
...@@ -265,8 +288,8 @@ func TestPeerDisconnected(t *testing.T) { ...@@ -265,8 +288,8 @@ func TestPeerDisconnected(t *testing.T) {
time.Sleep(50 * time.Millisecond) time.Sleep(50 * time.Millisecond)
kad.Trigger() kad.Trigger()
waitCursorsCalled(t, pullsync, addr, false) waitCursorsCalled(t, pullsync, addr, true)
waitLiveSyncCalled(t, pullsync, addr, false) waitLiveSyncCalled(t, pullsync, addr, true)
kad.ResetPeers() kad.ResetPeers()
kad.Trigger() kad.Trigger()
time.Sleep(50 * time.Millisecond) time.Sleep(50 * time.Millisecond)
...@@ -276,6 +299,20 @@ func TestPeerDisconnected(t *testing.T) { ...@@ -276,6 +299,20 @@ func TestPeerDisconnected(t *testing.T) {
} }
} }
// TestDepthChange tests that puller reacts correctly to
// depth changes signalled from kademlia.
// Due to the fact that the component does goroutine termination
// and new syncing sessions autonomously, the testing strategy is a bit
// more tricky than usual. The idea is that syncReplies basically allow us
// to somehow see through the inner workings of the syncing strategies.
// When a sync reply is specified with block=true, the protocol mock basically
// returns an interval back to the caller, as if we have successfully synced the
// requested interval. This in turn means that the interval would be persisted
// in the state store, allowing us to inspect for which bins intervals exist when
// we check which bins were synced or not (presence of the key in the db indicates
// the bin was synced). This also means that tweaking these tests needs to
// be done carefully and with the understanding of what each change does to
// the tested unit.
func TestDepthChange(t *testing.T) { func TestDepthChange(t *testing.T) {
var ( var (
addr = test.RandomAddress() addr = test.RandomAddress()
...@@ -318,11 +355,12 @@ func TestDepthChange(t *testing.T) { ...@@ -318,11 +355,12 @@ func TestDepthChange(t *testing.T) {
{ {
name: "peer moves out of depth", name: "peer moves out of depth",
cursors: []uint64{0, 0, 0, 0, 0}, cursors: []uint64{0, 0, 0, 0, 0},
binsNotSyncing: []uint8{1, 2, 4}, // only bins 3,4 are expected to sync binsNotSyncing: []uint8{1, 2, 3, 4}, // no bins should be syncing
binsSyncing: []uint8{3},
syncReplies: []mockps.SyncReply{ syncReplies: []mockps.SyncReply{
reply(3, 1, 1, false), reply(1, 1, 1, true),
reply(3, 2, 1, true), reply(2, 1, 1, true),
reply(3, 1, 1, true),
reply(4, 1, 1, true),
}, },
depths: []uint8{0, 1, 2, 3, 4}, depths: []uint8{0, 1, 2, 3, 4},
}, },
...@@ -345,7 +383,7 @@ func TestDepthChange(t *testing.T) { ...@@ -345,7 +383,7 @@ func TestDepthChange(t *testing.T) {
kad: []mockk.Option{ kad: []mockk.Option{
mockk.WithEachPeerRevCalls( mockk.WithEachPeerRevCalls(
mockk.AddrTuple{Addr: addr, PO: 3}, mockk.AddrTuple{Addr: addr, PO: 3},
), mockk.WithDepthCalls(tc.depths...), // peer moved from out of depth to depth ), mockk.WithDepthCalls(tc.depths...),
}, },
pullSync: []mockps.Option{mockps.WithCursors(tc.cursors), mockps.WithLateSyncReply(tc.syncReplies...)}, pullSync: []mockps.Option{mockps.WithCursors(tc.cursors), mockps.WithLateSyncReply(tc.syncReplies...)},
bins: 5, bins: 5,
...@@ -364,7 +402,7 @@ func TestDepthChange(t *testing.T) { ...@@ -364,7 +402,7 @@ func TestDepthChange(t *testing.T) {
// check the intervals // check the intervals
for _, b := range tc.binsSyncing { for _, b := range tc.binsSyncing {
checkIntervals(t, st, addr, interval, b) // getting errors here checkIntervals(t, st, addr, interval, b)
} }
for _, b := range tc.binsNotSyncing { for _, b := range tc.binsNotSyncing {
...@@ -548,7 +586,6 @@ type opts struct { ...@@ -548,7 +586,6 @@ type opts struct {
pullSync []mockps.Option pullSync []mockps.Option
kad []mockk.Option kad []mockk.Option
bins uint8 bins uint8
shallowBinPeers *int
} }
func newPuller(ops opts) (*puller.Puller, storage.StateStorer, *mockk.Mock, *mockps.PullSyncMock) { func newPuller(ops opts) (*puller.Puller, storage.StateStorer, *mockk.Mock, *mockps.PullSyncMock) {
...@@ -560,9 +597,6 @@ func newPuller(ops opts) (*puller.Puller, storage.StateStorer, *mockk.Mock, *moc ...@@ -560,9 +597,6 @@ func newPuller(ops opts) (*puller.Puller, storage.StateStorer, *mockk.Mock, *moc
o := puller.Options{ o := puller.Options{
Bins: ops.bins, Bins: ops.bins,
} }
if ops.shallowBinPeers != nil {
o.ShallowBinPeers = *ops.shallowBinPeers
}
return puller.New(s, kad, ps, logger, o), s, kad, ps return puller.New(s, kad, ps, logger, o), s, kad, ps
} }
......
...@@ -6,6 +6,7 @@ package mock ...@@ -6,6 +6,7 @@ package mock
import ( import (
"context" "context"
"fmt"
"math" "math"
"sync" "sync"
...@@ -156,7 +157,7 @@ func (p *PullSyncMock) SyncInterval(ctx context.Context, peer swarm.Address, bin ...@@ -156,7 +157,7 @@ func (p *PullSyncMock) SyncInterval(ctx context.Context, peer swarm.Address, bin
} }
return sr.topmost, 0, nil return sr.topmost, 0, nil
} }
panic("not found") panic(fmt.Sprintf("bin %d from %d to %d", bin, from, to))
} }
if isLive && p.blockLiveSync { if isLive && p.blockLiveSync {
...@@ -164,7 +165,6 @@ func (p *PullSyncMock) SyncInterval(ctx context.Context, peer swarm.Address, bin ...@@ -164,7 +165,6 @@ func (p *PullSyncMock) SyncInterval(ctx context.Context, peer swarm.Address, bin
<-p.quit <-p.quit
return 0, 1, context.Canceled return 0, 1, context.Canceled
} }
if isLive && len(p.liveSyncReplies) > 0 { if isLive && len(p.liveSyncReplies) > 0 {
if p.liveSyncCalls >= len(p.liveSyncReplies) { if p.liveSyncCalls >= len(p.liveSyncReplies) {
<-p.quit <-p.quit
......
...@@ -13,6 +13,8 @@ type metrics struct { ...@@ -13,6 +13,8 @@ type metrics struct {
TotalSent prometheus.Counter TotalSent prometheus.Counter
TotalReceived prometheus.Counter TotalReceived prometheus.Counter
TotalErrors prometheus.Counter TotalErrors prometheus.Counter
TotalReplicated prometheus.Counter
TotalReplicatedError prometheus.Counter
} }
func newMetrics() metrics { func newMetrics() metrics {
...@@ -37,6 +39,18 @@ func newMetrics() metrics { ...@@ -37,6 +39,18 @@ func newMetrics() metrics {
Name: "total_errors", Name: "total_errors",
Help: "Total no of time error received while sending chunk.", Help: "Total no of time error received while sending chunk.",
}), }),
TotalReplicated: prometheus.NewCounter(prometheus.CounterOpts{
Namespace: m.Namespace,
Subsystem: subsystem,
Name: "total_replication",
Help: "Total no of successfully sent replication chunks.",
}),
TotalReplicatedError: prometheus.NewCounter(prometheus.CounterOpts{
Namespace: m.Namespace,
Subsystem: subsystem,
Name: "total_replication_error",
Help: "Total no of failed replication chunks.",
}),
} }
} }
......
...@@ -40,6 +40,10 @@ const ( ...@@ -40,6 +40,10 @@ const (
maxPeers = 5 maxPeers = 5
) )
var (
ErrOutOfDepthReplication = errors.New("replication outside of the neighborhood")
)
type PushSyncer interface { type PushSyncer interface {
PushChunkToClosest(ctx context.Context, ch swarm.Chunk) (*Receipt, error) PushChunkToClosest(ctx context.Context, ch swarm.Chunk) (*Receipt, error)
} }
...@@ -50,9 +54,10 @@ type Receipt struct { ...@@ -50,9 +54,10 @@ type Receipt struct {
} }
type PushSync struct { type PushSync struct {
address swarm.Address
streamer p2p.StreamerDisconnecter streamer p2p.StreamerDisconnecter
storer storage.Putter storer storage.Putter
peerSuggester topology.ClosestPeerer topologyDriver topology.Driver
tagger *tags.Tags tagger *tags.Tags
unwrap func(swarm.Chunk) unwrap func(swarm.Chunk)
logger logging.Logger logger logging.Logger
...@@ -64,12 +69,15 @@ type PushSync struct { ...@@ -64,12 +69,15 @@ type PushSync struct {
} }
var timeToLive = 5 * time.Second // request time to live var timeToLive = 5 * time.Second // request time to live
var timeToWaitForPushsyncToNeighbor = 3 * time.Second // time to wait to get a receipt for a chunk
var nPeersToPushsync = 3 // number of peers to replicate to as receipt is sent upstream
func New(streamer p2p.StreamerDisconnecter, storer storage.Putter, closestPeerer topology.ClosestPeerer, tagger *tags.Tags, unwrap func(swarm.Chunk), logger logging.Logger, accounting accounting.Interface, pricer pricer.Interface, signer crypto.Signer, tracer *tracing.Tracer) *PushSync { func New(address swarm.Address, streamer p2p.StreamerDisconnecter, storer storage.Putter, topologyDriver topology.Driver, tagger *tags.Tags, unwrap func(swarm.Chunk), logger logging.Logger, accounting accounting.Interface, pricer pricer.Interface, signer crypto.Signer, tracer *tracing.Tracer) *PushSync {
ps := &PushSync{ ps := &PushSync{
address: address,
streamer: streamer, streamer: streamer,
storer: storer, storer: storer,
peerSuggester: closestPeerer, topologyDriver: topologyDriver,
tagger: tagger, tagger: tagger,
unwrap: unwrap, unwrap: unwrap,
logger: logger, logger: logger,
...@@ -126,19 +134,40 @@ func (ps *PushSync) handler(ctx context.Context, p p2p.Peer, stream p2p.Stream) ...@@ -126,19 +134,40 @@ func (ps *PushSync) handler(ctx context.Context, p p2p.Peer, stream p2p.Stream)
return swarm.ErrInvalidChunk return swarm.ErrInvalidChunk
} }
span, _, ctx := ps.tracer.StartSpanFromContext(ctx, "pushsync-handler", ps.logger, opentracing.Tag{Key: "address", Value: chunk.Address().String()}) // Get price we charge for upstream peer read at headler.
defer span.Finish()
// Get price we charge for upstream peer read at headler
responseHeaders := stream.ResponseHeaders() responseHeaders := stream.ResponseHeaders()
price, err := headerutils.ParsePriceHeader(responseHeaders) price, err := headerutils.ParsePriceHeader(responseHeaders)
// if not found in returned header, compute the price we charge for this chunk.
if err != nil { if err != nil {
// if not found in returned header, compute the price we charge for this chunk and ps.logger.Warningf("pushsync: peer %v no price in previously issued response headers: %v", p.Address, err)
ps.logger.Warningf("push sync: peer %v no price in previously issued response headers: %v", p.Address, err)
price = ps.pricer.PriceForPeer(p.Address, chunk.Address()) price = ps.pricer.PriceForPeer(p.Address, chunk.Address())
} }
// if the peer is closer to the chunk, we were selected for replication. Return early.
if dcmp, _ := swarm.DistanceCmp(chunk.Address().Bytes(), p.Address.Bytes(), ps.address.Bytes()); dcmp == 1 {
if ps.topologyDriver.IsWithinDepth(chunk.Address()) {
_, err = ps.storer.Put(ctx, storage.ModePutSync, chunk)
if err != nil {
ps.logger.Errorf("pushsync: chunk store: %v", err)
}
return ps.accounting.Debit(p.Address, price)
}
return ErrOutOfDepthReplication
}
// forwarding replication
if ps.topologyDriver.IsWithinDepth(chunk.Address()) {
_, err = ps.storer.Put(ctx, storage.ModePutSync, chunk)
if err != nil {
ps.logger.Warningf("pushsync: within depth peer's attempt to store chunk failed: %v", err)
}
}
span, _, ctx := ps.tracer.StartSpanFromContext(ctx, "pushsync-handler", ps.logger, opentracing.Tag{Key: "address", Value: chunk.Address().String()})
defer span.Finish()
receipt, err := ps.pushToClosest(ctx, chunk) receipt, err := ps.pushToClosest(ctx, chunk)
if err != nil { if err != nil {
if errors.Is(err, topology.ErrWantSelf) { if errors.Is(err, topology.ErrWantSelf) {
...@@ -147,10 +176,89 @@ func (ps *PushSync) handler(ctx context.Context, p p2p.Peer, stream p2p.Stream) ...@@ -147,10 +176,89 @@ func (ps *PushSync) handler(ctx context.Context, p p2p.Peer, stream p2p.Stream)
return fmt.Errorf("chunk store: %w", err) return fmt.Errorf("chunk store: %w", err)
} }
count := 0
// Push the chunk to some peers in the neighborhood in parallel for replication.
// Any errors here should NOT impact the rest of the handler.
err = ps.topologyDriver.EachNeighbor(func(peer swarm.Address, po uint8) (bool, bool, error) {
// skip forwarding peer
if peer.Equal(p.Address) {
return false, false, nil
}
if count == nPeersToPushsync {
return true, false, nil
}
count++
go func(peer swarm.Address) {
var err error
defer func() {
if err != nil {
ps.logger.Tracef("pushsync replication: %v", err)
ps.metrics.TotalReplicatedError.Inc()
} else {
ps.metrics.TotalReplicated.Inc()
}
}()
// price for neighborhood replication
const receiptPrice uint64 = 0
headers, err := headerutils.MakePricingHeaders(receiptPrice, chunk.Address())
if err != nil {
err = fmt.Errorf("make pricing headers: %w", err)
return
}
streamer, err := ps.streamer.NewStream(ctx, peer, headers, protocolName, protocolVersion, streamName)
if err != nil {
err = fmt.Errorf("new stream for peer %s: %w", peer.String(), err)
return
}
defer streamer.Close()
returnedHeaders := streamer.Headers()
_, returnedPrice, returnedIndex, err := headerutils.ParsePricingResponseHeaders(returnedHeaders)
if err != nil {
err = fmt.Errorf("push price headers read returned: %w", err)
return
}
// check if returned price matches presumed price, if not, return early.
if returnedPrice != receiptPrice {
err = ps.pricer.NotifyPeerPrice(peer, returnedPrice, returnedIndex)
return
}
w := protobuf.NewWriter(streamer)
ctx, cancel := context.WithTimeout(ctx, timeToWaitForPushsyncToNeighbor)
defer cancel()
err = w.WriteMsgWithContext(ctx, &pb.Delivery{
Address: chunk.Address().Bytes(),
Data: chunk.Data(),
})
if err != nil {
_ = streamer.Reset()
return
}
}(peer)
return false, false, nil
})
if err != nil {
ps.logger.Tracef("pushsync replication closest peer: %w", err)
}
signature, err := ps.signer.Sign(ch.Address) signature, err := ps.signer.Sign(ch.Address)
if err != nil { if err != nil {
return fmt.Errorf("receipt signature: %w", err) return fmt.Errorf("receipt signature: %w", err)
} }
// return back receipt
receipt := pb.Receipt{Address: chunk.Address().Bytes(), Signature: signature} receipt := pb.Receipt{Address: chunk.Address().Bytes(), Signature: signature}
if err := w.WriteMsgWithContext(ctx, &receipt); err != nil { if err := w.WriteMsgWithContext(ctx, &receipt); err != nil {
return fmt.Errorf("send receipt to peer %s: %w", p.Address.String(), err) return fmt.Errorf("send receipt to peer %s: %w", p.Address.String(), err)
...@@ -211,8 +319,8 @@ func (ps *PushSync) pushToClosest(ctx context.Context, ch swarm.Chunk) (rr *pb.R ...@@ -211,8 +319,8 @@ func (ps *PushSync) pushToClosest(ctx context.Context, ch swarm.Chunk) (rr *pb.R
defersFn() defersFn()
// find next closest peer // find the next closest peer
peer, err := ps.peerSuggester.ClosestPeer(ch.Address(), skipPeers...) peer, err := ps.topologyDriver.ClosestPeer(ch.Address(), skipPeers...)
if err != nil { if err != nil {
// ClosestPeer can return ErrNotFound in case we are not connected to any peers // ClosestPeer can return ErrNotFound in case we are not connected to any peers
// in which case we should return immediately. // in which case we should return immediately.
......
...@@ -7,8 +7,9 @@ package pushsync_test ...@@ -7,8 +7,9 @@ package pushsync_test
import ( import (
"bytes" "bytes"
"context" "context"
"fmt" "errors"
"io/ioutil" "io/ioutil"
"sync"
"testing" "testing"
"time" "time"
...@@ -26,6 +27,7 @@ import ( ...@@ -26,6 +27,7 @@ import (
"github.com/ethersphere/bee/pkg/pushsync" "github.com/ethersphere/bee/pkg/pushsync"
"github.com/ethersphere/bee/pkg/pushsync/pb" "github.com/ethersphere/bee/pkg/pushsync/pb"
statestore "github.com/ethersphere/bee/pkg/statestore/mock" statestore "github.com/ethersphere/bee/pkg/statestore/mock"
"github.com/ethersphere/bee/pkg/storage"
testingc "github.com/ethersphere/bee/pkg/storage/testing" testingc "github.com/ethersphere/bee/pkg/storage/testing"
"github.com/ethersphere/bee/pkg/swarm" "github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/bee/pkg/tags" "github.com/ethersphere/bee/pkg/tags"
...@@ -44,6 +46,9 @@ type pricerParameters struct { ...@@ -44,6 +46,9 @@ type pricerParameters struct {
var ( var (
defaultPrices = pricerParameters{price: fixedPrice, peerPrice: fixedPrice} defaultPrices = pricerParameters{price: fixedPrice, peerPrice: fixedPrice}
defaultSigner = cryptomock.New(cryptomock.WithSignFunc(func([]byte) ([]byte, error) {
return nil, nil
}))
) )
// TestSendChunkAndGetReceipt inserts a chunk as uploaded chunk in db. This triggers sending a chunk to the closest node // TestSendChunkAndGetReceipt inserts a chunk as uploaded chunk in db. This triggers sending a chunk to the closest node
...@@ -52,10 +57,6 @@ func TestSendChunkAndReceiveReceipt(t *testing.T) { ...@@ -52,10 +57,6 @@ func TestSendChunkAndReceiveReceipt(t *testing.T) {
// chunk data to upload // chunk data to upload
chunk := testingc.FixtureChunk("7000") chunk := testingc.FixtureChunk("7000")
signer := cryptomock.New(cryptomock.WithSignFunc(func([]byte) ([]byte, error) {
return nil, nil
}))
// create a pivot node and a mocked closest node // create a pivot node and a mocked closest node
pivotNode := swarm.MustParseHexAddress("0000000000000000000000000000000000000000000000000000000000000000") // base is 0000 pivotNode := swarm.MustParseHexAddress("0000000000000000000000000000000000000000000000000000000000000000") // base is 0000
closestPeer := swarm.MustParseHexAddress("6000000000000000000000000000000000000000000000000000000000000000") // binary 0110 -> po 1 closestPeer := swarm.MustParseHexAddress("6000000000000000000000000000000000000000000000000000000000000000") // binary 0110 -> po 1
...@@ -63,14 +64,14 @@ func TestSendChunkAndReceiveReceipt(t *testing.T) { ...@@ -63,14 +64,14 @@ func TestSendChunkAndReceiveReceipt(t *testing.T) {
// peer is the node responding to the chunk receipt message // peer is the node responding to the chunk receipt message
// mock should return ErrWantSelf since there's no one to forward to // mock should return ErrWantSelf since there's no one to forward to
psPeer, storerPeer, _, peerAccounting := createPushSyncNode(t, closestPeer, defaultPrices, nil, nil, signer, mock.WithClosestPeerErr(topology.ErrWantSelf)) psPeer, storerPeer, _, peerAccounting := createPushSyncNode(t, closestPeer, defaultPrices, nil, nil, defaultSigner, mock.WithClosestPeerErr(topology.ErrWantSelf))
defer storerPeer.Close() defer storerPeer.Close()
recorder := streamtest.New(streamtest.WithProtocols(psPeer.Protocol()), streamtest.WithBaseAddr(pivotNode)) recorder := streamtest.New(streamtest.WithProtocols(psPeer.Protocol()), streamtest.WithBaseAddr(pivotNode))
// pivot node needs the streamer since the chunk is intercepted by // pivot node needs the streamer since the chunk is intercepted by
// the chunk worker, then gets sent by opening a new stream // the chunk worker, then gets sent by opening a new stream
psPivot, storerPivot, _, pivotAccounting := createPushSyncNode(t, pivotNode, defaultPrices, recorder, nil, signer, mock.WithClosestPeer(closestPeer)) psPivot, storerPivot, _, pivotAccounting := createPushSyncNode(t, pivotNode, defaultPrices, recorder, nil, defaultSigner, mock.WithClosestPeer(closestPeer))
defer storerPivot.Close() defer storerPivot.Close()
// Trigger the sending of chunk to the closest node // Trigger the sending of chunk to the closest node
...@@ -117,21 +118,17 @@ func TestSendChunkAfterPriceUpdate(t *testing.T) { ...@@ -117,21 +118,17 @@ func TestSendChunkAfterPriceUpdate(t *testing.T) {
// peer is the node responding to the chunk receipt message // peer is the node responding to the chunk receipt message
// mock should return ErrWantSelf since there's no one to forward to // mock should return ErrWantSelf since there's no one to forward to
signer := cryptomock.New(cryptomock.WithSignFunc(func([]byte) ([]byte, error) {
return nil, nil
}))
serverPrice := uint64(17) serverPrice := uint64(17)
serverPrices := pricerParameters{price: serverPrice, peerPrice: fixedPrice} serverPrices := pricerParameters{price: serverPrice, peerPrice: fixedPrice}
psPeer, storerPeer, _, peerAccounting := createPushSyncNode(t, closestPeer, serverPrices, nil, nil, signer, mock.WithClosestPeerErr(topology.ErrWantSelf)) psPeer, storerPeer, _, peerAccounting := createPushSyncNode(t, closestPeer, serverPrices, nil, nil, defaultSigner, mock.WithClosestPeerErr(topology.ErrWantSelf))
defer storerPeer.Close() defer storerPeer.Close()
recorder := streamtest.New(streamtest.WithProtocols(psPeer.Protocol()), streamtest.WithBaseAddr(pivotNode)) recorder := streamtest.New(streamtest.WithProtocols(psPeer.Protocol()), streamtest.WithBaseAddr(pivotNode))
// pivot node needs the streamer since the chunk is intercepted by // pivot node needs the streamer since the chunk is intercepted by
// the chunk worker, then gets sent by opening a new stream // the chunk worker, then gets sent by opening a new stream
psPivot, storerPivot, _, pivotAccounting := createPushSyncNode(t, pivotNode, defaultPrices, recorder, nil, signer, mock.WithClosestPeer(closestPeer)) psPivot, storerPivot, _, pivotAccounting := createPushSyncNode(t, pivotNode, defaultPrices, recorder, nil, defaultSigner, mock.WithClosestPeer(closestPeer))
defer storerPivot.Close() defer storerPivot.Close()
// Trigger the sending of chunk to the closest node // Trigger the sending of chunk to the closest node
...@@ -169,30 +166,127 @@ func TestSendChunkAfterPriceUpdate(t *testing.T) { ...@@ -169,30 +166,127 @@ func TestSendChunkAfterPriceUpdate(t *testing.T) {
} }
} }
// TestReplicateBeforeReceipt tests that a chunk is pushed and a receipt is received.
// Also the storer node initiates a pushsync to N closest nodes of the chunk as it's sending back the receipt.
// The second storer should only store it and not forward it. The balance of all nodes is tested.
func TestReplicateBeforeReceipt(t *testing.T) {
// chunk data to upload
chunk := testingc.FixtureChunk("7000") // base 0111
neighborPrice := pricerParameters{price: 0, peerPrice: 0}
// create a pivot node and a mocked closest node
pivotNode := swarm.MustParseHexAddress("0000000000000000000000000000000000000000000000000000000000000000") // base is 0000
closestPeer := swarm.MustParseHexAddress("6000000000000000000000000000000000000000000000000000000000000000") // binary 0110
secondPeer := swarm.MustParseHexAddress("4000000000000000000000000000000000000000000000000000000000000000") // binary 0100
emptyPeer := swarm.MustParseHexAddress("5000000000000000000000000000000000000000000000000000000000000000") // binary 0101, this peer should not get the chunk
// node that is connected to secondPeer
// it's address is closer to the chunk than secondPeer but it will not receive the chunk
_, storerEmpty, _, _ := createPushSyncNode(t, emptyPeer, defaultPrices, nil, nil, defaultSigner)
defer storerEmpty.Close()
// node that is connected to closestPeer
// will receieve chunk from closestPeer
psSecond, storerSecond, _, secondAccounting := createPushSyncNode(t, secondPeer, neighborPrice, nil, nil, defaultSigner, mock.WithPeers(emptyPeer))
defer storerSecond.Close()
secondRecorder := streamtest.New(streamtest.WithProtocols(psSecond.Protocol()), streamtest.WithBaseAddr(closestPeer))
psStorer, storerPeer, _, storerAccounting := createPushSyncNode(t, closestPeer, defaultPrices, secondRecorder, nil, defaultSigner, mock.WithPeers(secondPeer), mock.WithClosestPeerErr(topology.ErrWantSelf))
defer storerPeer.Close()
recorder := streamtest.New(streamtest.WithProtocols(psStorer.Protocol()), streamtest.WithBaseAddr(pivotNode))
// pivot node needs the streamer since the chunk is intercepted by
// the chunk worker, then gets sent by opening a new stream
psPivot, storerPivot, _, pivotAccounting := createPushSyncNode(t, pivotNode, defaultPrices, recorder, nil, defaultSigner, mock.WithClosestPeer(closestPeer))
defer storerPivot.Close()
// Trigger the sending of chunk to the closest node
receipt, err := psPivot.PushChunkToClosest(context.Background(), chunk)
if err != nil {
t.Fatal(err)
}
if !chunk.Address().Equal(receipt.Address) {
t.Fatal("invalid receipt")
}
// this intercepts the outgoing delivery message
waitOnRecordAndTest(t, closestPeer, recorder, chunk.Address(), chunk.Data())
// this intercepts the incoming receipt message
waitOnRecordAndTest(t, closestPeer, recorder, chunk.Address(), nil)
// sleep for a bit to allow the second peer to the store replicated chunk
time.Sleep(time.Millisecond * 500)
// this intercepts the outgoing delivery message from storer node to second storer node
waitOnRecordAndTest(t, secondPeer, secondRecorder, chunk.Address(), chunk.Data())
// this intercepts the incoming receipt message
waitOnRecordAndTest(t, secondPeer, secondRecorder, chunk.Address(), nil)
_, err = storerEmpty.Get(context.Background(), storage.ModeGetSync, chunk.Address())
if !errors.Is(err, storage.ErrNotFound) {
t.Fatal(err)
}
balance, err := pivotAccounting.Balance(closestPeer)
if err != nil {
t.Fatal(err)
}
if balance.Int64() != -int64(fixedPrice) {
t.Fatalf("unexpected balance on storer node. want %d got %d", int64(fixedPrice), balance)
}
balance, err = storerAccounting.Balance(pivotNode)
if err != nil {
t.Fatal(err)
}
if balance.Int64() != int64(fixedPrice) {
t.Fatalf("unexpected balance on storer node. want %d got %d", int64(fixedPrice), balance)
}
balance, err = secondAccounting.Balance(closestPeer)
if err != nil {
t.Fatal(err)
}
if balance.Int64() != 0 {
t.Fatalf("unexpected balance on second storer. want %d got %d", 0, balance)
}
balance, err = storerAccounting.Balance(secondPeer)
if err != nil {
t.Fatal(err)
}
if balance.Int64() != 0 {
t.Fatalf("unexpected balance on storer node. want %d got %d", 0, balance)
}
}
// PushChunkToClosest tests the sending of chunk to closest peer from the origination source perspective. // PushChunkToClosest tests the sending of chunk to closest peer from the origination source perspective.
// it also checks wether the tags are incremented properly if they are present // it also checks wether the tags are incremented properly if they are present
func TestPushChunkToClosest(t *testing.T) { func TestPushChunkToClosest(t *testing.T) {
// chunk data to upload // chunk data to upload
chunk := testingc.FixtureChunk("7000") chunk := testingc.FixtureChunk("7000")
signer := cryptomock.New(cryptomock.WithSignFunc(func([]byte) ([]byte, error) {
return nil, nil
}))
// create a pivot node and a mocked closest node // create a pivot node and a mocked closest node
pivotNode := swarm.MustParseHexAddress("0000") // base is 0000 pivotNode := swarm.MustParseHexAddress("0000000000000000000000000000000000000000000000000000000000000000") // base is 0000
closestPeer := swarm.MustParseHexAddress("6000") // binary 0110 -> po 1 closestPeer := swarm.MustParseHexAddress("6000000000000000000000000000000000000000000000000000000000000000") // binary 0110 -> po 1
callbackC := make(chan struct{}, 1) callbackC := make(chan struct{}, 1)
// peer is the node responding to the chunk receipt message // peer is the node responding to the chunk receipt message
// mock should return ErrWantSelf since there's no one to forward to // mock should return ErrWantSelf since there's no one to forward to
psPeer, storerPeer, _, peerAccounting := createPushSyncNode(t, closestPeer, defaultPrices, nil, chanFunc(callbackC), signer, mock.WithClosestPeerErr(topology.ErrWantSelf))
psPeer, storerPeer, _, peerAccounting := createPushSyncNode(t, closestPeer, defaultPrices, nil, chanFunc(callbackC), defaultSigner, mock.WithClosestPeerErr(topology.ErrWantSelf))
defer storerPeer.Close() defer storerPeer.Close()
recorder := streamtest.New(streamtest.WithProtocols(psPeer.Protocol()), streamtest.WithBaseAddr(pivotNode)) recorder := streamtest.New(streamtest.WithProtocols(psPeer.Protocol()), streamtest.WithBaseAddr(pivotNode))
// pivot node needs the streamer since the chunk is intercepted by // pivot node needs the streamer since the chunk is intercepted by
// the chunk worker, then gets sent by opening a new stream // the chunk worker, then gets sent by opening a new stream
psPivot, storerPivot, pivotTags, pivotAccounting := createPushSyncNode(t, pivotNode, defaultPrices, recorder, nil, signer, mock.WithClosestPeer(closestPeer)) psPivot, storerPivot, pivotTags, pivotAccounting := createPushSyncNode(t, pivotNode, defaultPrices, recorder, nil, defaultSigner, mock.WithClosestPeer(closestPeer))
defer storerPivot.Close() defer storerPivot.Close()
ta, err := pivotTags.Create(1) ta, err := pivotTags.Create(1)
...@@ -261,31 +355,27 @@ func TestPushChunkToClosest(t *testing.T) { ...@@ -261,31 +355,27 @@ func TestPushChunkToClosest(t *testing.T) {
} }
func TestPushChunkToNextClosest(t *testing.T) { func TestPushChunkToNextClosest(t *testing.T) {
// chunk data to upload // chunk data to upload
chunk := testingc.FixtureChunk("7000") chunk := testingc.FixtureChunk("7000")
signer := cryptomock.New(cryptomock.WithSignFunc(func([]byte) ([]byte, error) {
return nil, nil
}))
// create a pivot node and a mocked closest node // create a pivot node and a mocked closest node
pivotNode := swarm.MustParseHexAddress("0000") // base is 0000 pivotNode := swarm.MustParseHexAddress("0000000000000000000000000000000000000000000000000000000000000000") // base is 0000
peer1 := swarm.MustParseHexAddress("6000") peer1 := swarm.MustParseHexAddress("6000000000000000000000000000000000000000000000000000000000000000")
peer2 := swarm.MustParseHexAddress("5000") peer2 := swarm.MustParseHexAddress("5000000000000000000000000000000000000000000000000000000000000000")
peers := []swarm.Address{
peer1,
peer2,
}
// peer is the node responding to the chunk receipt message // peer is the node responding to the chunk receipt message
// mock should return ErrWantSelf since there's no one to forward to // mock should return ErrWantSelf since there's no one to forward to
psPeer1, storerPeer1, _, peerAccounting1 := createPushSyncNode(t, peer1, defaultPrices, nil, nil, signer, mock.WithClosestPeerErr(topology.ErrWantSelf)) psPeer1, storerPeer1, _, peerAccounting1 := createPushSyncNode(t, peer1, defaultPrices, nil, nil, defaultSigner, mock.WithClosestPeerErr(topology.ErrWantSelf))
defer storerPeer1.Close() defer storerPeer1.Close()
psPeer2, storerPeer2, _, peerAccounting2 := createPushSyncNode(t, peer2, defaultPrices, nil, nil, signer, mock.WithClosestPeerErr(topology.ErrWantSelf)) psPeer2, storerPeer2, _, peerAccounting2 := createPushSyncNode(t, peer2, defaultPrices, nil, nil, defaultSigner, mock.WithClosestPeerErr(topology.ErrWantSelf))
defer storerPeer2.Close() defer storerPeer2.Close()
var fail = true
var lock sync.Mutex
recorder := streamtest.New( recorder := streamtest.New(
streamtest.WithProtocols( streamtest.WithProtocols(
psPeer1.Protocol(), psPeer1.Protocol(),
...@@ -294,9 +384,12 @@ func TestPushChunkToNextClosest(t *testing.T) { ...@@ -294,9 +384,12 @@ func TestPushChunkToNextClosest(t *testing.T) {
streamtest.WithMiddlewares( streamtest.WithMiddlewares(
func(h p2p.HandlerFunc) p2p.HandlerFunc { func(h p2p.HandlerFunc) p2p.HandlerFunc {
return func(ctx context.Context, peer p2p.Peer, stream p2p.Stream) error { return func(ctx context.Context, peer p2p.Peer, stream p2p.Stream) error {
// NOTE: return error for peer1 // this hack is required to simulate first storer node failing
if peer1.Equal(peer.Address) { lock.Lock()
return fmt.Errorf("peer not reachable: %s", peer.Address.String()) defer lock.Unlock()
if fail {
fail = false
return errors.New("peer not reachable")
} }
if err := h(ctx, peer, stream); err != nil { if err := h(ctx, peer, stream); err != nil {
...@@ -313,7 +406,7 @@ func TestPushChunkToNextClosest(t *testing.T) { ...@@ -313,7 +406,7 @@ func TestPushChunkToNextClosest(t *testing.T) {
// pivot node needs the streamer since the chunk is intercepted by // pivot node needs the streamer since the chunk is intercepted by
// the chunk worker, then gets sent by opening a new stream // the chunk worker, then gets sent by opening a new stream
psPivot, storerPivot, pivotTags, pivotAccounting := createPushSyncNode(t, pivotNode, defaultPrices, recorder, nil, signer, mock.WithPeers(peers...)) psPivot, storerPivot, pivotTags, pivotAccounting := createPushSyncNode(t, pivotNode, defaultPrices, recorder, nil, defaultSigner, mock.WithPeers(peer1, peer2))
defer storerPivot.Close() defer storerPivot.Close()
ta, err := pivotTags.Create(1) ta, err := pivotTags.Create(1)
...@@ -351,7 +444,7 @@ func TestPushChunkToNextClosest(t *testing.T) { ...@@ -351,7 +444,7 @@ func TestPushChunkToNextClosest(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
if ta2.Get(tags.StateSent) != 1 { if ta2.Get(tags.StateSent) != 2 {
t.Fatalf("tags error") t.Fatalf("tags error")
} }
...@@ -393,29 +486,25 @@ func TestHandler(t *testing.T) { ...@@ -393,29 +486,25 @@ func TestHandler(t *testing.T) {
// chunk data to upload // chunk data to upload
chunk := testingc.FixtureChunk("7000") chunk := testingc.FixtureChunk("7000")
signer := cryptomock.New(cryptomock.WithSignFunc(func([]byte) ([]byte, error) {
return nil, nil
}))
// create a pivot node and a mocked closest node // create a pivot node and a mocked closest node
pivotPeer := swarm.MustParseHexAddress("0000000000000000000000000000000000000000000000000000000000000000") triggerPeer := swarm.MustParseHexAddress("0000000000000000000000000000000000000000000000000000000000000000")
triggerPeer := swarm.MustParseHexAddress("6000000000000000000000000000000000000000000000000000000000000000") pivotPeer := swarm.MustParseHexAddress("5000000000000000000000000000000000000000000000000000000000000000")
closestPeer := swarm.MustParseHexAddress("f000000000000000000000000000000000000000000000000000000000000000") closestPeer := swarm.MustParseHexAddress("6000000000000000000000000000000000000000000000000000000000000000")
// Create the closest peer // Create the closest peer
psClosestPeer, closestStorerPeerDB, _, closestAccounting := createPushSyncNode(t, closestPeer, defaultPrices, nil, nil, signer, mock.WithClosestPeerErr(topology.ErrWantSelf)) psClosestPeer, closestStorerPeerDB, _, closestAccounting := createPushSyncNode(t, closestPeer, defaultPrices, nil, nil, defaultSigner, mock.WithClosestPeerErr(topology.ErrWantSelf))
defer closestStorerPeerDB.Close() defer closestStorerPeerDB.Close()
closestRecorder := streamtest.New(streamtest.WithProtocols(psClosestPeer.Protocol()), streamtest.WithBaseAddr(pivotPeer)) closestRecorder := streamtest.New(streamtest.WithProtocols(psClosestPeer.Protocol()), streamtest.WithBaseAddr(pivotPeer))
// creating the pivot peer // creating the pivot peer
psPivot, storerPivotDB, _, pivotAccounting := createPushSyncNode(t, pivotPeer, defaultPrices, closestRecorder, nil, signer, mock.WithClosestPeer(closestPeer)) psPivot, storerPivotDB, _, pivotAccounting := createPushSyncNode(t, pivotPeer, defaultPrices, closestRecorder, nil, defaultSigner, mock.WithClosestPeer(closestPeer))
defer storerPivotDB.Close() defer storerPivotDB.Close()
pivotRecorder := streamtest.New(streamtest.WithProtocols(psPivot.Protocol()), streamtest.WithBaseAddr(triggerPeer)) pivotRecorder := streamtest.New(streamtest.WithProtocols(psPivot.Protocol()), streamtest.WithBaseAddr(triggerPeer))
// Creating the trigger peer // Creating the trigger peer
psTriggerPeer, triggerStorerDB, _, triggerAccounting := createPushSyncNode(t, triggerPeer, defaultPrices, pivotRecorder, nil, signer, mock.WithClosestPeer(pivotPeer)) psTriggerPeer, triggerStorerDB, _, triggerAccounting := createPushSyncNode(t, triggerPeer, defaultPrices, pivotRecorder, nil, defaultSigner, mock.WithClosestPeer(pivotPeer))
defer triggerStorerDB.Close() defer triggerStorerDB.Close()
receipt, err := psTriggerPeer.PushChunkToClosest(context.Background(), chunk) receipt, err := psTriggerPeer.PushChunkToClosest(context.Background(), chunk)
...@@ -485,28 +574,24 @@ func TestHandlerWithUpdate(t *testing.T) { ...@@ -485,28 +574,24 @@ func TestHandlerWithUpdate(t *testing.T) {
serverPrices := pricerParameters{price: serverPrice, peerPrice: fixedPrice} serverPrices := pricerParameters{price: serverPrice, peerPrice: fixedPrice}
// create a pivot node and a mocked closest node // create a pivot node and a mocked closest node
pivotPeer := swarm.MustParseHexAddress("0000000000000000000000000000000000000000000000000000000000000000") triggerPeer := swarm.MustParseHexAddress("0000000000000000000000000000000000000000000000000000000000000000")
triggerPeer := swarm.MustParseHexAddress("6000000000000000000000000000000000000000000000000000000000000000") pivotPeer := swarm.MustParseHexAddress("5000000000000000000000000000000000000000000000000000000000000000")
closestPeer := swarm.MustParseHexAddress("f000000000000000000000000000000000000000000000000000000000000000") closestPeer := swarm.MustParseHexAddress("6000000000000000000000000000000000000000000000000000000000000000")
signer := cryptomock.New(cryptomock.WithSignFunc(func([]byte) ([]byte, error) {
return nil, nil
}))
// Create the closest peer with default prices (10) // Create the closest peer with default prices (10)
psClosestPeer, closestStorerPeerDB, _, closestAccounting := createPushSyncNode(t, closestPeer, defaultPrices, nil, nil, signer, mock.WithClosestPeerErr(topology.ErrWantSelf)) psClosestPeer, closestStorerPeerDB, _, closestAccounting := createPushSyncNode(t, closestPeer, defaultPrices, nil, nil, defaultSigner, mock.WithClosestPeerErr(topology.ErrWantSelf))
defer closestStorerPeerDB.Close() defer closestStorerPeerDB.Close()
closestRecorder := streamtest.New(streamtest.WithProtocols(psClosestPeer.Protocol()), streamtest.WithBaseAddr(pivotPeer)) closestRecorder := streamtest.New(streamtest.WithProtocols(psClosestPeer.Protocol()), streamtest.WithBaseAddr(pivotPeer))
// creating the pivot peer who will act as a forwarder node with a higher price (17) // creating the pivot peer who will act as a forwarder node with a higher price (17)
psPivot, storerPivotDB, _, pivotAccounting := createPushSyncNode(t, pivotPeer, serverPrices, closestRecorder, nil, signer, mock.WithClosestPeer(closestPeer)) psPivot, storerPivotDB, _, pivotAccounting := createPushSyncNode(t, pivotPeer, serverPrices, closestRecorder, nil, defaultSigner, mock.WithClosestPeer(closestPeer))
defer storerPivotDB.Close() defer storerPivotDB.Close()
pivotRecorder := streamtest.New(streamtest.WithProtocols(psPivot.Protocol()), streamtest.WithBaseAddr(triggerPeer)) pivotRecorder := streamtest.New(streamtest.WithProtocols(psPivot.Protocol()), streamtest.WithBaseAddr(triggerPeer))
// Creating the trigger peer with default price (10) // Creating the trigger peer with default price (10)
psTriggerPeer, triggerStorerDB, _, triggerAccounting := createPushSyncNode(t, triggerPeer, defaultPrices, pivotRecorder, nil, signer, mock.WithClosestPeer(pivotPeer)) psTriggerPeer, triggerStorerDB, _, triggerAccounting := createPushSyncNode(t, triggerPeer, defaultPrices, pivotRecorder, nil, defaultSigner, mock.WithClosestPeer(pivotPeer))
defer triggerStorerDB.Close() defer triggerStorerDB.Close()
receipt, err := psTriggerPeer.PushChunkToClosest(context.Background(), chunk) receipt, err := psTriggerPeer.PushChunkToClosest(context.Background(), chunk)
...@@ -582,7 +667,7 @@ func TestSignsReceipt(t *testing.T) { ...@@ -582,7 +667,7 @@ func TestSignsReceipt(t *testing.T) {
// create a pivot node and a mocked closest node // create a pivot node and a mocked closest node
pivotPeer := swarm.MustParseHexAddress("0000000000000000000000000000000000000000000000000000000000000000") pivotPeer := swarm.MustParseHexAddress("0000000000000000000000000000000000000000000000000000000000000000")
closestPeer := swarm.MustParseHexAddress("f000000000000000000000000000000000000000000000000000000000000000") closestPeer := swarm.MustParseHexAddress("6000000000000000000000000000000000000000000000000000000000000000")
// Create the closest peer // Create the closest peer
psClosestPeer, closestStorerPeerDB, _, _ := createPushSyncNode(t, closestPeer, defaultPrices, nil, nil, signer, mock.WithClosestPeerErr(topology.ErrWantSelf)) psClosestPeer, closestStorerPeerDB, _, _ := createPushSyncNode(t, closestPeer, defaultPrices, nil, nil, signer, mock.WithClosestPeerErr(topology.ErrWantSelf))
...@@ -639,7 +724,7 @@ func createPushSyncNode(t *testing.T, addr swarm.Address, prices pricerParameter ...@@ -639,7 +724,7 @@ func createPushSyncNode(t *testing.T, addr swarm.Address, prices pricerParameter
unwrap = func(swarm.Chunk) {} unwrap = func(swarm.Chunk) {}
} }
return pushsync.New(recorderDisconnecter, storer, mockTopology, mtag, unwrap, logger, mockAccounting, mockPricer, signer, nil), storer, mtag, mockAccounting return pushsync.New(addr, recorderDisconnecter, storer, mockTopology, mtag, unwrap, logger, mockAccounting, mockPricer, signer, nil), storer, mtag, mockAccounting
} }
func waitOnRecordAndTest(t *testing.T, peer swarm.Address, recorder *streamtest.Recorder, add swarm.Address, data []byte) { func waitOnRecordAndTest(t *testing.T, peer swarm.Address, recorder *streamtest.Recorder, add swarm.Address, data []byte) {
......
...@@ -16,6 +16,7 @@ type mock struct { ...@@ -16,6 +16,7 @@ type mock struct {
peers []swarm.Address peers []swarm.Address
closestPeer swarm.Address closestPeer swarm.Address
closestPeerErr error closestPeerErr error
peersErr error
addPeersErr error addPeersErr error
marshalJSONFunc func() ([]byte, error) marshalJSONFunc func() ([]byte, error)
mtx sync.Mutex mtx sync.Mutex
...@@ -64,11 +65,10 @@ func (d *mock) AddPeers(_ context.Context, addrs ...swarm.Address) error { ...@@ -64,11 +65,10 @@ func (d *mock) AddPeers(_ context.Context, addrs ...swarm.Address) error {
return d.addPeersErr return d.addPeersErr
} }
for _, addr := range addrs {
d.mtx.Lock() d.mtx.Lock()
d.peers = append(d.peers, addr) defer d.mtx.Unlock()
d.mtx.Unlock()
} d.peers = append(d.peers, addrs...)
return nil return nil
} }
...@@ -85,7 +85,7 @@ func (d *mock) Peers() []swarm.Address { ...@@ -85,7 +85,7 @@ func (d *mock) Peers() []swarm.Address {
return d.peers return d.peers
} }
func (d *mock) ClosestPeer(_ swarm.Address, skipPeers ...swarm.Address) (peerAddr swarm.Address, err error) { func (d *mock) ClosestPeer(addr swarm.Address, skipPeers ...swarm.Address) (peerAddr swarm.Address, err error) {
if len(skipPeers) == 0 { if len(skipPeers) == 0 {
if d.closestPeerErr != nil { if d.closestPeerErr != nil {
return d.closestPeer, d.closestPeerErr return d.closestPeer, d.closestPeerErr
...@@ -98,6 +98,10 @@ func (d *mock) ClosestPeer(_ swarm.Address, skipPeers ...swarm.Address) (peerAdd ...@@ -98,6 +98,10 @@ func (d *mock) ClosestPeer(_ swarm.Address, skipPeers ...swarm.Address) (peerAdd
d.mtx.Lock() d.mtx.Lock()
defer d.mtx.Unlock() defer d.mtx.Unlock()
if len(d.peers) == 0 {
return peerAddr, topology.ErrNotFound
}
skipPeer := false skipPeer := false
for _, p := range d.peers { for _, p := range d.peers {
for _, a := range skipPeers { for _, a := range skipPeers {
...@@ -111,9 +115,15 @@ func (d *mock) ClosestPeer(_ swarm.Address, skipPeers ...swarm.Address) (peerAdd ...@@ -111,9 +115,15 @@ func (d *mock) ClosestPeer(_ swarm.Address, skipPeers ...swarm.Address) (peerAdd
continue continue
} }
if peerAddr.IsZero() {
peerAddr = p peerAddr = p
} }
if cmp, _ := swarm.DistanceCmp(addr.Bytes(), p.Bytes(), peerAddr.Bytes()); cmp == 1 {
peerAddr = p
}
}
if peerAddr.IsZero() { if peerAddr.IsZero() {
return peerAddr, topology.ErrNotFound return peerAddr, topology.ErrNotFound
} }
...@@ -128,11 +138,27 @@ func (*mock) NeighborhoodDepth() uint8 { ...@@ -128,11 +138,27 @@ func (*mock) NeighborhoodDepth() uint8 {
return 0 return 0
} }
func (m *mock) IsWithinDepth(addr swarm.Address) bool {
return false
}
func (m *mock) EachNeighbor(f topology.EachPeerFunc) error {
return m.EachPeer(f)
}
func (*mock) EachNeighborRev(topology.EachPeerFunc) error {
panic("not implemented") // TODO: Implement
}
// EachPeer iterates from closest bin to farthest // EachPeer iterates from closest bin to farthest
func (d *mock) EachPeer(f topology.EachPeerFunc) (err error) { func (d *mock) EachPeer(f topology.EachPeerFunc) (err error) {
d.mtx.Lock() d.mtx.Lock()
defer d.mtx.Unlock() defer d.mtx.Unlock()
if d.peersErr != nil {
return d.peersErr
}
for i, p := range d.peers { for i, p := range d.peers {
_, _, err = f(p, uint8(i)) _, _, err = f(p, uint8(i))
if err != nil { if err != nil {
......
...@@ -25,6 +25,7 @@ type Driver interface { ...@@ -25,6 +25,7 @@ type Driver interface {
PeerAdder PeerAdder
ClosestPeerer ClosestPeerer
EachPeerer EachPeerer
EachNeighbor
NeighborhoodDepth() uint8 NeighborhoodDepth() uint8
SubscribePeersChange() (c <-chan struct{}, unsubscribe func()) SubscribePeersChange() (c <-chan struct{}, unsubscribe func())
io.Closer io.Closer
...@@ -51,6 +52,15 @@ type EachPeerer interface { ...@@ -51,6 +52,15 @@ type EachPeerer interface {
EachPeerRev(EachPeerFunc) error EachPeerRev(EachPeerFunc) error
} }
type EachNeighbor interface {
// EachNeighbor iterates from closest bin to farthest within the neighborhood.
EachNeighbor(EachPeerFunc) error
// EachNeighborRev iterates from farthest bin to closest within the neighborhood.
EachNeighborRev(EachPeerFunc) error
// IsWithinDepth checks if an address is the within neighborhood.
IsWithinDepth(swarm.Address) bool
}
// EachPeerFunc is a callback that is called with a peer and its PO // EachPeerFunc is a callback that is called with a peer and its PO
type EachPeerFunc func(swarm.Address, uint8) (stop, jumpToNext bool, err error) type EachPeerFunc func(swarm.Address, uint8) (stop, jumpToNext bool, err error)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment