Commit e0f50061 authored by acud's avatar acud Committed by GitHub

pushsync: package refactor (#1176)

* pushsync: reuse overlapping logic, improve instrumentation, don't bail on invalid receipt
parent 4abf0717
......@@ -26,7 +26,7 @@ type Service struct {
storer storage.Storer
pushSyncer pushsync.PushSyncer
logger logging.Logger
tagg *tags.Tags
tag *tags.Tags
tracer *tracing.Tracer
metrics metrics
quit chan struct{}
......@@ -42,7 +42,7 @@ func New(storer storage.Storer, peerSuggester topology.ClosestPeerer, pushSyncer
service := &Service{
storer: storer,
pushSyncer: pushSyncer,
tagg: tagger,
tag: tagger,
logger: logger,
tracer: tracer,
metrics: newMetrics(),
......@@ -127,6 +127,8 @@ LOOP:
var (
err error
startTime = time.Now()
t *tags.Tag
setSent bool
)
defer func() {
if err == nil {
......@@ -147,16 +149,36 @@ LOOP:
// for now ignoring the receipt and checking only for error
_, err = s.pushSyncer.PushChunkToClosest(ctx, ch)
if err != nil {
if !errors.Is(err, topology.ErrNotFound) {
logger.Debugf("pusher: error while sending chunk or receiving receipt: %v", err)
if errors.Is(err, topology.ErrWantSelf) {
// we are the closest ones - this is fine
// this is to make sure that the sent number does not diverge from the synced counter
// the edge case is on the uploader node, in the case where the uploader node is
// connected to other nodes, but is the closest one to the chunk.
setSent = true
} else {
return
}
return
}
err = s.setChunkAsSynced(ctx, ch)
if err != nil {
logger.Debugf("pusher: error setting chunk as synced: %v", err)
if err = s.storer.Set(ctx, storage.ModeSetSync, ch.Address()); err != nil {
err = fmt.Errorf("pusher: set sync: %w", err)
return
}
t, err = s.tag.Get(ch.TagID())
if err == nil && t != nil {
err = t.Inc(tags.StateSynced)
if err != nil {
err = fmt.Errorf("pusher: increment synced: %v", err)
return
}
if setSent {
err = t.Inc(tags.StateSent)
if err != nil {
err = fmt.Errorf("pusher: increment sent: %w", err)
return
}
}
}
}(ctx, ch)
case <-timer.C:
// initially timer is set to go off as well as every time we hit the end of push index
......@@ -209,21 +231,6 @@ LOOP:
}
}
func (s *Service) setChunkAsSynced(ctx context.Context, ch swarm.Chunk) error {
if err := s.storer.Set(ctx, storage.ModeSetSync, ch.Address()); err != nil {
return fmt.Errorf("set synced: %w", err)
}
t, err := s.tagg.Get(ch.TagID())
if err == nil && t != nil {
err = t.Inc(tags.StateSynced)
if err != nil {
return err
}
}
return nil
}
func (s *Service) Close() error {
s.logger.Info("pusher shutting down")
close(s.quit)
......
This diff is collapsed.
......@@ -96,8 +96,8 @@ func TestPushChunkToClosest(t *testing.T) {
// chunk data to upload
chunk := testingc.FixtureChunk("7000")
// create a pivot node and a mocked closest node
pivotNode := swarm.MustParseHexAddress("0000000000000000000000000000000000000000000000000000000000000000") // base is 0000
closestPeer := swarm.MustParseHexAddress("6000000000000000000000000000000000000000000000000000000000000000") // binary 0110 -> po 1
pivotNode := swarm.MustParseHexAddress("0000") // base is 0000
closestPeer := swarm.MustParseHexAddress("6000") // binary 0110 -> po 1
callbackC := make(chan struct{}, 1)
// peer is the node responding to the chunk receipt message
// mock should return ErrWantSelf since there's no one to forward to
......@@ -181,10 +181,10 @@ func TestPushChunkToNextClosest(t *testing.T) {
chunk := testingc.FixtureChunk("7000")
// create a pivot node and a mocked closest node
pivotNode := swarm.MustParseHexAddress("0000000000000000000000000000000000000000000000000000000000000000") // base is 0000
pivotNode := swarm.MustParseHexAddress("0000") // base is 0000
peer1 := swarm.MustParseHexAddress("6000000000000000000000000000000000000000000000000000000000000000")
peer2 := swarm.MustParseHexAddress("5000000000000000000000000000000000000000000000000000000000000000")
peer1 := swarm.MustParseHexAddress("6000")
peer2 := swarm.MustParseHexAddress("5000")
peers := []swarm.Address{
peer1,
peer2,
......@@ -225,7 +225,6 @@ func TestPushChunkToNextClosest(t *testing.T) {
// pivot node needs the streamer since the chunk is intercepted by
// the chunk worker, then gets sent by opening a new stream
psPivot, storerPivot, pivotTags, pivotAccounting := createPushSyncNode(t, pivotNode, recorder, nil,
mock.WithClosestPeerErr(topology.ErrNotFound),
mock.WithPeers(peers...),
)
defer storerPivot.Close()
......
......@@ -87,14 +87,18 @@ func (d *mock) Peers() []swarm.Address {
func (d *mock) ClosestPeer(_ swarm.Address, skipPeers ...swarm.Address) (peerAddr swarm.Address, err error) {
if len(skipPeers) == 0 {
return d.closestPeer, d.closestPeerErr
if d.closestPeerErr != nil {
return d.closestPeer, d.closestPeerErr
}
if !d.closestPeer.Equal(swarm.ZeroAddress) {
return d.closestPeer, nil
}
}
d.mtx.Lock()
defer d.mtx.Unlock()
skipPeer := false
for _, p := range d.peers {
for _, a := range skipPeers {
if a.Equal(p) {
......@@ -113,7 +117,6 @@ func (d *mock) ClosestPeer(_ swarm.Address, skipPeers ...swarm.Address) (peerAdd
if peerAddr.IsZero() {
return peerAddr, topology.ErrNotFound
}
return peerAddr, nil
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment