Commit 6eedd003 authored by Zahoor Mohamed's avatar Zahoor Mohamed Committed by GitHub

Fix getting tags by address in pusher (#334)

*Fix tags in pusher
parent d39c5638
...@@ -237,7 +237,7 @@ func NewBee(o Options) (*Bee, error) { ...@@ -237,7 +237,7 @@ func NewBee(o Options) (*Bee, error) {
ChunkPeerer: topologyDriver, ChunkPeerer: topologyDriver,
Logger: logger, Logger: logger,
}) })
tag := tags.NewTags() tagg := tags.NewTags()
if err = p2ps.AddProtocol(retrieve.Protocol()); err != nil { if err = p2ps.AddProtocol(retrieve.Protocol()); err != nil {
return nil, fmt.Errorf("retrieval service: %w", err) return nil, fmt.Errorf("retrieval service: %w", err)
...@@ -251,6 +251,7 @@ func NewBee(o Options) (*Bee, error) { ...@@ -251,6 +251,7 @@ func NewBee(o Options) (*Bee, error) {
Streamer: p2ps, Streamer: p2ps,
Storer: storer, Storer: storer,
ClosestPeerer: topologyDriver, ClosestPeerer: topologyDriver,
Tagger: tagg,
Logger: logger, Logger: logger,
}) })
...@@ -262,7 +263,7 @@ func NewBee(o Options) (*Bee, error) { ...@@ -262,7 +263,7 @@ func NewBee(o Options) (*Bee, error) {
Storer: storer, Storer: storer,
PeerSuggester: topologyDriver, PeerSuggester: topologyDriver,
PushSyncer: pushSyncProtocol, PushSyncer: pushSyncProtocol,
Tags: tag, Tagger: tagg,
Logger: logger, Logger: logger,
}) })
b.pusherCloser = pushSyncPusher b.pusherCloser = pushSyncPusher
...@@ -293,7 +294,7 @@ func NewBee(o Options) (*Bee, error) { ...@@ -293,7 +294,7 @@ func NewBee(o Options) (*Bee, error) {
if o.APIAddr != "" { if o.APIAddr != "" {
// API server // API server
apiService = api.New(api.Options{ apiService = api.New(api.Options{
Tags: tag, Tags: tagg,
Storer: ns, Storer: ns,
CORSAllowedOrigins: o.CORSAllowedOrigins, CORSAllowedOrigins: o.CORSAllowedOrigins,
Logger: logger, Logger: logger,
......
...@@ -21,8 +21,8 @@ import ( ...@@ -21,8 +21,8 @@ import (
type Service struct { type Service struct {
storer storage.Storer storer storage.Storer
pushSyncer pushsync.PushSyncer pushSyncer pushsync.PushSyncer
tag *tags.Tags
logger logging.Logger logger logging.Logger
tagg *tags.Tags
metrics metrics metrics metrics
quit chan struct{} quit chan struct{}
chunksWorkerQuitC chan struct{} chunksWorkerQuitC chan struct{}
...@@ -31,8 +31,8 @@ type Service struct { ...@@ -31,8 +31,8 @@ type Service struct {
type Options struct { type Options struct {
Storer storage.Storer Storer storage.Storer
PeerSuggester topology.ClosestPeerer PeerSuggester topology.ClosestPeerer
Tags *tags.Tags
PushSyncer pushsync.PushSyncer PushSyncer pushsync.PushSyncer
Tagger *tags.Tags
Logger logging.Logger Logger logging.Logger
} }
...@@ -42,7 +42,7 @@ func New(o Options) *Service { ...@@ -42,7 +42,7 @@ func New(o Options) *Service {
service := &Service{ service := &Service{
storer: o.Storer, storer: o.Storer,
pushSyncer: o.PushSyncer, pushSyncer: o.PushSyncer,
tag: o.Tags, tagg: o.Tagger,
logger: o.Logger, logger: o.Logger,
metrics: newMetrics(), metrics: newMetrics(),
quit: make(chan struct{}), quit: make(chan struct{}),
...@@ -131,7 +131,7 @@ LOOP: ...@@ -131,7 +131,7 @@ LOOP:
} }
return return
} }
s.setChunkAsSynced(ctx, ch.Address()) s.setChunkAsSynced(ctx, ch)
}(ctx, ch) }(ctx, ch)
case <-timer.C: case <-timer.C:
// initially timer is set to go off as well as every time we hit the end of push index // initially timer is set to go off as well as every time we hit the end of push index
...@@ -173,11 +173,15 @@ LOOP: ...@@ -173,11 +173,15 @@ LOOP:
} }
} }
func (s *Service) setChunkAsSynced(ctx context.Context, addr swarm.Address) { func (s *Service) setChunkAsSynced(ctx context.Context, ch swarm.Chunk) {
if err := s.storer.Set(ctx, storage.ModeSetSyncPush, addr); err != nil { if err := s.storer.Set(ctx, storage.ModeSetSyncPush, ch.Address()); err != nil {
s.logger.Errorf("pusher: error setting chunk as synced: %v", err) s.logger.Errorf("pusher: error setting chunk as synced: %v", err)
s.metrics.ErrorSettingChunkToSynced.Inc() s.metrics.ErrorSettingChunkToSynced.Inc()
} }
t, err := s.tagg.Get(ch.TagID())
if err == nil && t != nil {
t.Inc(tags.StateSynced)
}
} }
func (s *Service) Close() error { func (s *Service) Close() error {
......
...@@ -12,8 +12,6 @@ import ( ...@@ -12,8 +12,6 @@ import (
"testing" "testing"
"time" "time"
"github.com/ethersphere/bee/pkg/tags"
"github.com/ethersphere/bee/pkg/localstore" "github.com/ethersphere/bee/pkg/localstore"
"github.com/ethersphere/bee/pkg/logging" "github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/pusher" "github.com/ethersphere/bee/pkg/pusher"
...@@ -21,6 +19,7 @@ import ( ...@@ -21,6 +19,7 @@ import (
pushsyncmock "github.com/ethersphere/bee/pkg/pushsync/mock" pushsyncmock "github.com/ethersphere/bee/pkg/pushsync/mock"
"github.com/ethersphere/bee/pkg/storage" "github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm" "github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/bee/pkg/tags"
"github.com/ethersphere/bee/pkg/topology/mock" "github.com/ethersphere/bee/pkg/topology/mock"
) )
...@@ -47,8 +46,7 @@ func (s Store) Set(ctx context.Context, mode storage.ModeSet, addrs ...swarm.Add ...@@ -47,8 +46,7 @@ func (s Store) Set(ctx context.Context, mode storage.ModeSet, addrs ...swarm.Add
// TestSendChunkToPushSync sends a chunk to pushsync to be sent ot its closest peer and get a receipt. // TestSendChunkToPushSync sends a chunk to pushsync to be sent ot its closest peer and get a receipt.
// once the receipt is got this check to see if the localstore is updated to see if the chunk is set // once the receipt is got this check to see if the localstore is updated to see if the chunk is set
// as ModeSetSyncPush status. // as ModeSetSyncPush status.
func TestSendChunkToPushSync(t *testing.T) { func TestSendChunkToPushSyncWithTag(t *testing.T) {
chunk := createChunk()
// create a trigger and a closestpeer // create a trigger and a closestpeer
triggerPeer := swarm.MustParseHexAddress("6000000000000000000000000000000000000000000000000000000000000000") triggerPeer := swarm.MustParseHexAddress("6000000000000000000000000000000000000000000000000000000000000000")
...@@ -60,14 +58,15 @@ func TestSendChunkToPushSync(t *testing.T) { ...@@ -60,14 +58,15 @@ func TestSendChunkToPushSync(t *testing.T) {
} }
return receipt, nil return receipt, nil
}) })
mtag := tags.NewTags() mtags, p, storer := createPusher(t, triggerPeer, pushSyncService, mock.WithClosestPeer(closestPeer))
tag, err := mtag.Create("name", 1, false) defer storer.Close()
ta, err := mtags.Create("test", 1, false)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
tag.Address = chunk.Address()
p, storer := createPusher(t, triggerPeer, pushSyncService, mtag, mock.WithClosestPeer(closestPeer)) chunk := createChunk().WithTagID(ta.Uid)
defer storer.Close()
_, err = storer.Put(context.Background(), storage.ModePutUpload, chunk) _, err = storer.Put(context.Background(), storage.ModePutUpload, chunk)
if err != nil { if err != nil {
...@@ -87,6 +86,11 @@ func TestSendChunkToPushSync(t *testing.T) { ...@@ -87,6 +86,11 @@ func TestSendChunkToPushSync(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
if ta.Get(tags.StateSynced) != 1 {
t.Fatalf("tags error")
}
p.Close() p.Close()
} }
...@@ -105,15 +109,11 @@ func TestSendChunkToPushSyncWithoutTag(t *testing.T) { ...@@ -105,15 +109,11 @@ func TestSendChunkToPushSyncWithoutTag(t *testing.T) {
} }
return receipt, nil return receipt, nil
}) })
mtag := tags.NewTags()
_, err := mtag.Create("name", 1, false) _, p, storer := createPusher(t, triggerPeer, pushSyncService, mock.WithClosestPeer(closestPeer))
if err != nil {
t.Fatal(err)
}
p, storer := createPusher(t, triggerPeer, pushSyncService, mtag, mock.WithClosestPeer(closestPeer))
defer storer.Close() defer storer.Close()
_, err = storer.Put(context.Background(), storage.ModePutUpload, chunk) _, err := storer.Put(context.Background(), storage.ModePutUpload, chunk)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
...@@ -148,16 +148,10 @@ func TestSendChunkAndReceiveInvalidReceipt(t *testing.T) { ...@@ -148,16 +148,10 @@ func TestSendChunkAndReceiveInvalidReceipt(t *testing.T) {
return nil, errors.New("invalid receipt") return nil, errors.New("invalid receipt")
}) })
mtag := tags.NewTags() _, p, storer := createPusher(t, triggerPeer, pushSyncService, mock.WithClosestPeer(closestPeer))
tag, err := mtag.Create("name", 1, false)
if err != nil {
t.Fatal(err)
}
tag.Address = chunk.Address()
p, storer := createPusher(t, triggerPeer, pushSyncService, mtag, mock.WithClosestPeer(closestPeer))
defer storer.Close() defer storer.Close()
_, err = storer.Put(context.Background(), storage.ModePutUpload, chunk) _, err := storer.Put(context.Background(), storage.ModePutUpload, chunk)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
...@@ -195,17 +189,11 @@ func TestSendChunkAndTimeoutinReceivingReceipt(t *testing.T) { ...@@ -195,17 +189,11 @@ func TestSendChunkAndTimeoutinReceivingReceipt(t *testing.T) {
return nil, nil return nil, nil
}) })
mtag := tags.NewTags() _, p, storer := createPusher(t, triggerPeer, pushSyncService, mock.WithClosestPeer(closestPeer))
tag, err := mtag.Create("name", 1, false)
if err != nil {
t.Fatal(err)
}
tag.Address = chunk.Address()
p, storer := createPusher(t, triggerPeer, pushSyncService, mtag, mock.WithClosestPeer(closestPeer))
defer storer.Close() defer storer.Close()
defer p.Close() defer p.Close()
_, err = storer.Put(context.Background(), storage.ModePutUpload, chunk) _, err := storer.Put(context.Background(), storage.ModePutUpload, chunk)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
...@@ -229,10 +217,10 @@ func createChunk() swarm.Chunk { ...@@ -229,10 +217,10 @@ func createChunk() swarm.Chunk {
// chunk data to upload // chunk data to upload
chunkAddress := swarm.MustParseHexAddress("7000000000000000000000000000000000000000000000000000000000000000") chunkAddress := swarm.MustParseHexAddress("7000000000000000000000000000000000000000000000000000000000000000")
chunkData := []byte("1234") chunkData := []byte("1234")
return swarm.NewChunk(chunkAddress, chunkData) return swarm.NewChunk(chunkAddress, chunkData).WithTagID(666)
} }
func createPusher(t *testing.T, addr swarm.Address, pushSyncService pushsync.PushSyncer, tag *tags.Tags, mockOpts ...mock.Option) (*pusher.Service, *Store) { func createPusher(t *testing.T, addr swarm.Address, pushSyncService pushsync.PushSyncer, mockOpts ...mock.Option) (*tags.Tags, *pusher.Service, *Store) {
t.Helper() t.Helper()
logger := logging.New(ioutil.Discard, 0) logger := logging.New(ioutil.Discard, 0)
storer, err := localstore.New("", addr.Bytes(), nil, logger) storer, err := localstore.New("", addr.Bytes(), nil, logger)
...@@ -240,6 +228,7 @@ func createPusher(t *testing.T, addr swarm.Address, pushSyncService pushsync.Pus ...@@ -240,6 +228,7 @@ func createPusher(t *testing.T, addr swarm.Address, pushSyncService pushsync.Pus
t.Fatal(err) t.Fatal(err)
} }
mtags := tags.NewTags()
pusherStorer := &Store{ pusherStorer := &Store{
Storer: storer, Storer: storer,
modeSet: make(map[string]storage.ModeSet), modeSet: make(map[string]storage.ModeSet),
...@@ -247,8 +236,8 @@ func createPusher(t *testing.T, addr swarm.Address, pushSyncService pushsync.Pus ...@@ -247,8 +236,8 @@ func createPusher(t *testing.T, addr swarm.Address, pushSyncService pushsync.Pus
} }
peerSuggester := mock.NewTopologyDriver(mockOpts...) peerSuggester := mock.NewTopologyDriver(mockOpts...)
pusherService := pusher.New(pusher.Options{Storer: pusherStorer, Tags: tag, PushSyncer: pushSyncService, PeerSuggester: peerSuggester, Logger: logger}) pusherService := pusher.New(pusher.Options{Storer: pusherStorer, PushSyncer: pushSyncService, Tagger: mtags, PeerSuggester: peerSuggester, Logger: logger})
return pusherService, pusherStorer return mtags, pusherService, pusherStorer
} }
func checkIfModeSet(addr swarm.Address, mode storage.ModeSet, storer *Store) error { func checkIfModeSet(addr swarm.Address, mode storage.ModeSet, storer *Store) error {
......
...@@ -16,6 +16,7 @@ import ( ...@@ -16,6 +16,7 @@ import (
"github.com/ethersphere/bee/pkg/pushsync/pb" "github.com/ethersphere/bee/pkg/pushsync/pb"
"github.com/ethersphere/bee/pkg/storage" "github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm" "github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/bee/pkg/tags"
"github.com/ethersphere/bee/pkg/topology" "github.com/ethersphere/bee/pkg/topology"
) )
...@@ -37,6 +38,7 @@ type PushSync struct { ...@@ -37,6 +38,7 @@ type PushSync struct {
streamer p2p.Streamer streamer p2p.Streamer
storer storage.Putter storer storage.Putter
peerSuggester topology.ClosestPeerer peerSuggester topology.ClosestPeerer
tagg *tags.Tags
logger logging.Logger logger logging.Logger
metrics metrics metrics metrics
} }
...@@ -45,6 +47,7 @@ type Options struct { ...@@ -45,6 +47,7 @@ type Options struct {
Streamer p2p.Streamer Streamer p2p.Streamer
Storer storage.Putter Storer storage.Putter
ClosestPeerer topology.ClosestPeerer ClosestPeerer topology.ClosestPeerer
Tagger *tags.Tags
Logger logging.Logger Logger logging.Logger
} }
...@@ -55,6 +58,7 @@ func New(o Options) *PushSync { ...@@ -55,6 +58,7 @@ func New(o Options) *PushSync {
streamer: o.Streamer, streamer: o.Streamer,
storer: o.Storer, storer: o.Storer,
peerSuggester: o.ClosestPeerer, peerSuggester: o.ClosestPeerer,
tagg: o.Tagger,
logger: o.Logger, logger: o.Logger,
metrics: newMetrics(), metrics: newMetrics(),
} }
...@@ -134,7 +138,6 @@ func (ps *PushSync) handler(ctx context.Context, p p2p.Peer, stream p2p.Stream) ...@@ -134,7 +138,6 @@ func (ps *PushSync) handler(ctx context.Context, p p2p.Peer, stream p2p.Stream)
defer streamer.Close() defer streamer.Close()
wc, rc := protobuf.NewWriterAndReader(streamer) wc, rc := protobuf.NewWriterAndReader(streamer)
if err := ps.sendChunkDelivery(wc, chunk); err != nil { if err := ps.sendChunkDelivery(wc, chunk); err != nil {
return fmt.Errorf("forward chunk to peer %s: %w", peer.String(), err) return fmt.Errorf("forward chunk to peer %s: %w", peer.String(), err)
} }
...@@ -233,8 +236,13 @@ func (ps *PushSync) PushChunkToClosest(ctx context.Context, ch swarm.Chunk) (*Re ...@@ -233,8 +236,13 @@ func (ps *PushSync) PushChunkToClosest(ctx context.Context, ch swarm.Chunk) (*Re
if err := ps.sendChunkDelivery(w, ch); err != nil { if err := ps.sendChunkDelivery(w, ch); err != nil {
return nil, fmt.Errorf("chunk deliver to peer %s: %w", peer.String(), err) return nil, fmt.Errorf("chunk deliver to peer %s: %w", peer.String(), err)
} }
receiptRTTTimer := time.Now() // if you manage to get a tag, just increment the respective counter
t, err := ps.tagg.Get(ch.TagID())
if err == nil && t != nil {
t.Inc(tags.StateSent)
}
receiptRTTTimer := time.Now()
receipt, err := ps.receiveReceipt(r) receipt, err := ps.receiveReceipt(r)
if err != nil { if err != nil {
return nil, fmt.Errorf("receive receipt from peer %s: %w", peer.String(), err) return nil, fmt.Errorf("receive receipt from peer %s: %w", peer.String(), err)
......
...@@ -17,6 +17,7 @@ import ( ...@@ -17,6 +17,7 @@ import (
"github.com/ethersphere/bee/pkg/pushsync" "github.com/ethersphere/bee/pkg/pushsync"
"github.com/ethersphere/bee/pkg/pushsync/pb" "github.com/ethersphere/bee/pkg/pushsync/pb"
"github.com/ethersphere/bee/pkg/swarm" "github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/bee/pkg/tags"
"github.com/ethersphere/bee/pkg/topology" "github.com/ethersphere/bee/pkg/topology"
"github.com/ethersphere/bee/pkg/topology/mock" "github.com/ethersphere/bee/pkg/topology/mock"
) )
...@@ -35,14 +36,14 @@ func TestSendChunkAndReceiveReceipt(t *testing.T) { ...@@ -35,14 +36,14 @@ func TestSendChunkAndReceiveReceipt(t *testing.T) {
// peer is the node responding to the chunk receipt message // peer is the node responding to the chunk receipt message
// mock should return ErrWantSelf since there's no one to forward to // mock should return ErrWantSelf since there's no one to forward to
psPeer, storerPeer := createPushSyncNode(t, closestPeer, nil, mock.WithClosestPeerErr(topology.ErrWantSelf)) psPeer, storerPeer, _ := createPushSyncNode(t, closestPeer, nil, mock.WithClosestPeerErr(topology.ErrWantSelf))
defer storerPeer.Close() defer storerPeer.Close()
recorder := streamtest.New(streamtest.WithProtocols(psPeer.Protocol())) recorder := streamtest.New(streamtest.WithProtocols(psPeer.Protocol()))
// pivot node needs the streamer since the chunk is intercepted by // pivot node needs the streamer since the chunk is intercepted by
// the chunk worker, then gets sent by opening a new stream // the chunk worker, then gets sent by opening a new stream
psPivot, storerPivot := createPushSyncNode(t, pivotNode, recorder, mock.WithClosestPeer(closestPeer)) psPivot, storerPivot, _ := createPushSyncNode(t, pivotNode, recorder, mock.WithClosestPeer(closestPeer))
defer storerPivot.Close() defer storerPivot.Close()
// Trigger the sending of chunk to the closest node // Trigger the sending of chunk to the closest node
...@@ -63,6 +64,70 @@ func TestSendChunkAndReceiveReceipt(t *testing.T) { ...@@ -63,6 +64,70 @@ func TestSendChunkAndReceiveReceipt(t *testing.T) {
} }
// PushChunkToClosest tests the sending of chunk to closest peer from the origination source perspective.
// it also checks wether the tags are incremented properly if they are present
func TestPushChunkToClosest(t *testing.T) {
// chunk data to upload
chunkAddress := swarm.MustParseHexAddress("7000000000000000000000000000000000000000000000000000000000000000")
chunkData := []byte("1234")
// create a pivot node and a mocked closest node
pivotNode := swarm.MustParseHexAddress("0000000000000000000000000000000000000000000000000000000000000000") // base is 0000
closestPeer := swarm.MustParseHexAddress("6000000000000000000000000000000000000000000000000000000000000000") // binary 0110 -> po 1
// peer is the node responding to the chunk receipt message
// mock should return ErrWantSelf since there's no one to forward to
psPeer, storerPeer, _ := createPushSyncNode(t, closestPeer, nil, mock.WithClosestPeerErr(topology.ErrWantSelf))
defer storerPeer.Close()
recorder := streamtest.New(streamtest.WithProtocols(psPeer.Protocol()))
// pivot node needs the streamer since the chunk is intercepted by
// the chunk worker, then gets sent by opening a new stream
psPivot, storerPivot, pivotTags := createPushSyncNode(t, pivotNode, recorder, mock.WithClosestPeer(closestPeer))
defer storerPivot.Close()
ta, err := pivotTags.Create("test", 1, false)
if err != nil {
t.Fatal(err)
}
chunk := swarm.NewChunk(chunkAddress, chunkData).WithTagID(ta.Uid)
ta1, err := pivotTags.Get(ta.Uid)
if err != nil {
t.Fatal(err)
}
if ta1.Get(tags.StateSent) != 0 || ta1.Get(tags.StateSynced) != 0 {
t.Fatalf("tags initialization error")
}
// Trigger the sending of chunk to the closest node
receipt, err := psPivot.PushChunkToClosest(context.Background(), chunk)
if err != nil {
t.Fatal(err)
}
if !chunk.Address().Equal(receipt.Address) {
t.Fatal("invalid receipt")
}
// this intercepts the outgoing delivery message
waitOnRecordAndTest(t, closestPeer, recorder, chunkAddress, chunkData)
// this intercepts the incoming receipt message
waitOnRecordAndTest(t, closestPeer, recorder, chunkAddress, nil)
ta2, err := pivotTags.Get(ta.Uid)
if err != nil {
t.Fatal(err)
}
if ta2.Get(tags.StateSent) != 1 {
t.Fatalf("tags error")
}
}
// TestHandler expect a chunk from a node on a stream. It then stores the chunk in the local store and // TestHandler expect a chunk from a node on a stream. It then stores the chunk in the local store and
// sends back a receipt. This is tested by intercepting the incoming stream for proper messages. // sends back a receipt. This is tested by intercepting the incoming stream for proper messages.
// It also sends the chunk to the closest peerand receives a receipt. // It also sends the chunk to the closest peerand receives a receipt.
...@@ -81,19 +146,19 @@ func TestHandler(t *testing.T) { ...@@ -81,19 +146,19 @@ func TestHandler(t *testing.T) {
closestPeer := swarm.MustParseHexAddress("f000000000000000000000000000000000000000000000000000000000000000") closestPeer := swarm.MustParseHexAddress("f000000000000000000000000000000000000000000000000000000000000000")
// Create the closest peer // Create the closest peer
psClosestPeer, closestStorerPeerDB := createPushSyncNode(t, closestPeer, nil, mock.WithClosestPeerErr(topology.ErrWantSelf)) psClosestPeer, closestStorerPeerDB, _ := createPushSyncNode(t, closestPeer, nil, mock.WithClosestPeerErr(topology.ErrWantSelf))
defer closestStorerPeerDB.Close() defer closestStorerPeerDB.Close()
closestRecorder := streamtest.New(streamtest.WithProtocols(psClosestPeer.Protocol())) closestRecorder := streamtest.New(streamtest.WithProtocols(psClosestPeer.Protocol()))
// creating the pivot peer // creating the pivot peer
psPivot, storerPivotDB := createPushSyncNode(t, pivotPeer, closestRecorder, mock.WithClosestPeer(closestPeer)) psPivot, storerPivotDB, _ := createPushSyncNode(t, pivotPeer, closestRecorder, mock.WithClosestPeer(closestPeer))
defer storerPivotDB.Close() defer storerPivotDB.Close()
pivotRecorder := streamtest.New(streamtest.WithProtocols(psPivot.Protocol())) pivotRecorder := streamtest.New(streamtest.WithProtocols(psPivot.Protocol()))
// Creating the trigger peer // Creating the trigger peer
psTriggerPeer, triggerStorerDB := createPushSyncNode(t, triggerPeer, pivotRecorder, mock.WithClosestPeer(pivotPeer)) psTriggerPeer, triggerStorerDB, _ := createPushSyncNode(t, triggerPeer, pivotRecorder, mock.WithClosestPeer(pivotPeer))
defer triggerStorerDB.Close() defer triggerStorerDB.Close()
receipt, err := psTriggerPeer.PushChunkToClosest(context.Background(), chunk) receipt, err := psTriggerPeer.PushChunkToClosest(context.Background(), chunk)
...@@ -119,7 +184,7 @@ func TestHandler(t *testing.T) { ...@@ -119,7 +184,7 @@ func TestHandler(t *testing.T) {
waitOnRecordAndTest(t, pivotPeer, pivotRecorder, chunkAddress, nil) waitOnRecordAndTest(t, pivotPeer, pivotRecorder, chunkAddress, nil)
} }
func createPushSyncNode(t *testing.T, addr swarm.Address, recorder *streamtest.Recorder, mockOpts ...mock.Option) (*pushsync.PushSync, *localstore.DB) { func createPushSyncNode(t *testing.T, addr swarm.Address, recorder *streamtest.Recorder, mockOpts ...mock.Option) (*pushsync.PushSync, *localstore.DB, *tags.Tags) {
logger := logging.New(ioutil.Discard, 0) logger := logging.New(ioutil.Discard, 0)
storer, err := localstore.New("", addr.Bytes(), nil, logger) storer, err := localstore.New("", addr.Bytes(), nil, logger)
...@@ -128,15 +193,17 @@ func createPushSyncNode(t *testing.T, addr swarm.Address, recorder *streamtest.R ...@@ -128,15 +193,17 @@ func createPushSyncNode(t *testing.T, addr swarm.Address, recorder *streamtest.R
} }
mockTopology := mock.NewTopologyDriver(mockOpts...) mockTopology := mock.NewTopologyDriver(mockOpts...)
mtag := tags.NewTags()
ps := pushsync.New(pushsync.Options{ ps := pushsync.New(pushsync.Options{
Streamer: recorder, Streamer: recorder,
Storer: storer, Storer: storer,
Tagger: mtag,
ClosestPeerer: mockTopology, ClosestPeerer: mockTopology,
Logger: logger, Logger: logger,
}) })
return ps, storer return ps, storer, mtag
} }
func waitOnRecordAndTest(t *testing.T, peer swarm.Address, recorder *streamtest.Recorder, add swarm.Address, data []byte) { func waitOnRecordAndTest(t *testing.T, peer swarm.Address, recorder *streamtest.Recorder, add swarm.Address, data []byte) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment