Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 5 additions & 10 deletions pkg/pusher/pusher.go
Original file line number Diff line number Diff line change
Expand Up @@ -281,10 +281,11 @@ func (s *Service) pushDeferred(ctx context.Context, logger log.Logger, op *Op) (
if s.shallowReceipt(op.identityAddress) {
return true, err
}
if err := s.storer.Report(ctx, op.Chunk, storage.ChunkSynced); err != nil {
loggerV1.Error(err, "pusher: failed to report sync status")
return true, err
}
loggerV1.Warning(
"pusher: shallow receipt got too many times for the chunk, skipping syncing",
"chunk_address", op.Chunk.Address(),
)
return false, s.storer.Report(ctx, op.Chunk, storage.ChunkCouldNotSync)
case err == nil:
if err := s.storer.Report(ctx, op.Chunk, storage.ChunkSynced); err != nil {
loggerV1.Error(err, "pusher: failed to report sync status")
Expand Down Expand Up @@ -331,12 +332,6 @@ func (s *Service) pushDirect(ctx context.Context, logger log.Logger, op *Op) err
if err != nil {
loggerV1.Error(err, "pusher: failed to store chunk")
}
case errors.Is(err, pushsync.ErrShallowReceipt):
if s.shallowReceipt(op.identityAddress) {
return err
}
// out of attempts for retry, swallow error
err = nil
case err != nil:
loggerV1.Error(err, "pusher: failed PushChunkToClosest")
}
Expand Down
11 changes: 9 additions & 2 deletions pkg/pushsync/pushsync.go
Original file line number Diff line number Diff line change
Expand Up @@ -293,8 +293,15 @@ func (ps *PushSync) handler(ctx context.Context, p p2p.Peer, stream p2p.Stream)

switch receipt, err := ps.pushToClosest(ctx, chunk, false); {
case errors.Is(err, topology.ErrWantSelf):
stored, reason = true, "want self"
return store(ctx)
// check whether the chunk generate shallow receipt or not
// chunk could be sent to the closest peer within the neighborhood as well
// in case if the node is not reachable we still sync the chunk(?).
if swarm.Proximity(ps.address.Bytes(), chunkAddress.Bytes()) >= rad {
stored, reason = true, "want self"
return store(ctx)
}
stored, reason = false, "chunk did not reach target neighborhood"
return ErrShallowReceipt
case err == nil:
ps.metrics.Forwarder.Inc()

Expand Down
5 changes: 5 additions & 0 deletions pkg/storer/netstore.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ func (db *DB) DirectUpload() PutterSession {
span.Finish()
}()

shallowRetryCount := pusher.DefaultRetryCount
for {
op := &pusher.Op{Chunk: ch, Err: make(chan error, 1), Direct: true, Span: span}
select {
Expand All @@ -59,6 +60,10 @@ func (db *DB) DirectUpload() PutterSession {
case err := <-op.Err:
if errors.Is(err, pushsync.ErrShallowReceipt) {
logger.Debug("direct upload: shallow receipt received, retrying", "chunk", ch.Address())
shallowRetryCount--
if shallowRetryCount == 0 {
return err
}
} else if errors.Is(err, topology.ErrNotFound) {
logger.Debug("direct upload: no peers available, retrying", "chunk", ch.Address())
} else {
Expand Down
Loading