Commit e0f50061 authored by acud's avatar acud Committed by GitHub

pushsync: package refactor (#1176)

* pushsync: reuse overlapping logic, improve instrumentation, don't bail on invalid receipt
parent 4abf0717
...@@ -26,7 +26,7 @@ type Service struct { ...@@ -26,7 +26,7 @@ type Service struct {
storer storage.Storer storer storage.Storer
pushSyncer pushsync.PushSyncer pushSyncer pushsync.PushSyncer
logger logging.Logger logger logging.Logger
tagg *tags.Tags tag *tags.Tags
tracer *tracing.Tracer tracer *tracing.Tracer
metrics metrics metrics metrics
quit chan struct{} quit chan struct{}
...@@ -42,7 +42,7 @@ func New(storer storage.Storer, peerSuggester topology.ClosestPeerer, pushSyncer ...@@ -42,7 +42,7 @@ func New(storer storage.Storer, peerSuggester topology.ClosestPeerer, pushSyncer
service := &Service{ service := &Service{
storer: storer, storer: storer,
pushSyncer: pushSyncer, pushSyncer: pushSyncer,
tagg: tagger, tag: tagger,
logger: logger, logger: logger,
tracer: tracer, tracer: tracer,
metrics: newMetrics(), metrics: newMetrics(),
...@@ -127,6 +127,8 @@ LOOP: ...@@ -127,6 +127,8 @@ LOOP:
var ( var (
err error err error
startTime = time.Now() startTime = time.Now()
t *tags.Tag
setSent bool
) )
defer func() { defer func() {
if err == nil { if err == nil {
...@@ -147,16 +149,36 @@ LOOP: ...@@ -147,16 +149,36 @@ LOOP:
// for now ignoring the receipt and checking only for error // for now ignoring the receipt and checking only for error
_, err = s.pushSyncer.PushChunkToClosest(ctx, ch) _, err = s.pushSyncer.PushChunkToClosest(ctx, ch)
if err != nil { if err != nil {
if !errors.Is(err, topology.ErrNotFound) { if errors.Is(err, topology.ErrWantSelf) {
logger.Debugf("pusher: error while sending chunk or receiving receipt: %v", err) // we are the closest ones - this is fine
// this is to make sure that the sent number does not diverge from the synced counter
// the edge case is on the uploader node, in the case where the uploader node is
// connected to other nodes, but is the closest one to the chunk.
setSent = true
} else {
return
} }
return
} }
err = s.setChunkAsSynced(ctx, ch) if err = s.storer.Set(ctx, storage.ModeSetSync, ch.Address()); err != nil {
if err != nil { err = fmt.Errorf("pusher: set sync: %w", err)
logger.Debugf("pusher: error setting chunk as synced: %v", err)
return return
} }
t, err = s.tag.Get(ch.TagID())
if err == nil && t != nil {
err = t.Inc(tags.StateSynced)
if err != nil {
err = fmt.Errorf("pusher: increment synced: %v", err)
return
}
if setSent {
err = t.Inc(tags.StateSent)
if err != nil {
err = fmt.Errorf("pusher: increment sent: %w", err)
return
}
}
}
}(ctx, ch) }(ctx, ch)
case <-timer.C: case <-timer.C:
// initially timer is set to go off as well as every time we hit the end of push index // initially timer is set to go off as well as every time we hit the end of push index
...@@ -209,21 +231,6 @@ LOOP: ...@@ -209,21 +231,6 @@ LOOP:
} }
} }
func (s *Service) setChunkAsSynced(ctx context.Context, ch swarm.Chunk) error {
if err := s.storer.Set(ctx, storage.ModeSetSync, ch.Address()); err != nil {
return fmt.Errorf("set synced: %w", err)
}
t, err := s.tagg.Get(ch.TagID())
if err == nil && t != nil {
err = t.Inc(tags.StateSynced)
if err != nil {
return err
}
}
return nil
}
func (s *Service) Close() error { func (s *Service) Close() error {
s.logger.Info("pusher shutting down") s.logger.Info("pusher shutting down")
close(s.quit) close(s.quit)
......
This diff is collapsed.
...@@ -96,8 +96,8 @@ func TestPushChunkToClosest(t *testing.T) { ...@@ -96,8 +96,8 @@ func TestPushChunkToClosest(t *testing.T) {
// chunk data to upload // chunk data to upload
chunk := testingc.FixtureChunk("7000") chunk := testingc.FixtureChunk("7000")
// create a pivot node and a mocked closest node // create a pivot node and a mocked closest node
pivotNode := swarm.MustParseHexAddress("0000000000000000000000000000000000000000000000000000000000000000") // base is 0000 pivotNode := swarm.MustParseHexAddress("0000") // base is 0000
closestPeer := swarm.MustParseHexAddress("6000000000000000000000000000000000000000000000000000000000000000") // binary 0110 -> po 1 closestPeer := swarm.MustParseHexAddress("6000") // binary 0110 -> po 1
callbackC := make(chan struct{}, 1) callbackC := make(chan struct{}, 1)
// peer is the node responding to the chunk receipt message // peer is the node responding to the chunk receipt message
// mock should return ErrWantSelf since there's no one to forward to // mock should return ErrWantSelf since there's no one to forward to
...@@ -181,10 +181,10 @@ func TestPushChunkToNextClosest(t *testing.T) { ...@@ -181,10 +181,10 @@ func TestPushChunkToNextClosest(t *testing.T) {
chunk := testingc.FixtureChunk("7000") chunk := testingc.FixtureChunk("7000")
// create a pivot node and a mocked closest node // create a pivot node and a mocked closest node
pivotNode := swarm.MustParseHexAddress("0000000000000000000000000000000000000000000000000000000000000000") // base is 0000 pivotNode := swarm.MustParseHexAddress("0000") // base is 0000
peer1 := swarm.MustParseHexAddress("6000000000000000000000000000000000000000000000000000000000000000") peer1 := swarm.MustParseHexAddress("6000")
peer2 := swarm.MustParseHexAddress("5000000000000000000000000000000000000000000000000000000000000000") peer2 := swarm.MustParseHexAddress("5000")
peers := []swarm.Address{ peers := []swarm.Address{
peer1, peer1,
peer2, peer2,
...@@ -225,7 +225,6 @@ func TestPushChunkToNextClosest(t *testing.T) { ...@@ -225,7 +225,6 @@ func TestPushChunkToNextClosest(t *testing.T) {
// pivot node needs the streamer since the chunk is intercepted by // pivot node needs the streamer since the chunk is intercepted by
// the chunk worker, then gets sent by opening a new stream // the chunk worker, then gets sent by opening a new stream
psPivot, storerPivot, pivotTags, pivotAccounting := createPushSyncNode(t, pivotNode, recorder, nil, psPivot, storerPivot, pivotTags, pivotAccounting := createPushSyncNode(t, pivotNode, recorder, nil,
mock.WithClosestPeerErr(topology.ErrNotFound),
mock.WithPeers(peers...), mock.WithPeers(peers...),
) )
defer storerPivot.Close() defer storerPivot.Close()
......
...@@ -87,14 +87,18 @@ func (d *mock) Peers() []swarm.Address { ...@@ -87,14 +87,18 @@ func (d *mock) Peers() []swarm.Address {
func (d *mock) ClosestPeer(_ swarm.Address, skipPeers ...swarm.Address) (peerAddr swarm.Address, err error) { func (d *mock) ClosestPeer(_ swarm.Address, skipPeers ...swarm.Address) (peerAddr swarm.Address, err error) {
if len(skipPeers) == 0 { if len(skipPeers) == 0 {
return d.closestPeer, d.closestPeerErr if d.closestPeerErr != nil {
return d.closestPeer, d.closestPeerErr
}
if !d.closestPeer.Equal(swarm.ZeroAddress) {
return d.closestPeer, nil
}
} }
d.mtx.Lock() d.mtx.Lock()
defer d.mtx.Unlock() defer d.mtx.Unlock()
skipPeer := false skipPeer := false
for _, p := range d.peers { for _, p := range d.peers {
for _, a := range skipPeers { for _, a := range skipPeers {
if a.Equal(p) { if a.Equal(p) {
...@@ -113,7 +117,6 @@ func (d *mock) ClosestPeer(_ swarm.Address, skipPeers ...swarm.Address) (peerAdd ...@@ -113,7 +117,6 @@ func (d *mock) ClosestPeer(_ swarm.Address, skipPeers ...swarm.Address) (peerAdd
if peerAddr.IsZero() { if peerAddr.IsZero() {
return peerAddr, topology.ErrNotFound return peerAddr, topology.ErrNotFound
} }
return peerAddr, nil return peerAddr, nil
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment