From dcb2c0bb4116be0025d9ffa0bc18df70e383b4b4 Mon Sep 17 00:00:00 2001 From: Matt Joiner Date: Fri, 6 Nov 2020 08:39:56 +1100 Subject: [PATCH] Fix piece getting queued for hash multiple times Pieces could get queued for hash multiple times when we receive chunks if the piece starts getting hashed before we're done writing all the chunks out. This was only found because piece hashing currently only checks the incomplete data, which is missing after the first piece hash passes, the data is marked complete, then the subsequently queued hash has nothing to read. --- peerconn.go | 3 ++- piece.go | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/peerconn.go b/peerconn.go index d975af4f..48acbc11 100644 --- a/peerconn.go +++ b/peerconn.go @@ -1354,7 +1354,8 @@ func (c *peer) receiveChunk(msg *pp.Message) error { c.onDirtiedPiece(pieceIndex(req.Index)) - if t.pieceAllDirty(pieceIndex(req.Index)) { + // We need to ensure the piece is only queued once, so only the last chunk writer gets this job. + if t.pieceAllDirty(pieceIndex(req.Index)) && piece.pendingWrites == 0 { t.queuePieceCheck(pieceIndex(req.Index)) // We don't pend all chunks here anymore because we don't want code dependent on the dirty // chunk status (such as the haveChunk call above) to have to check all the various other diff --git a/piece.go b/piece.go index 2e6a4259..56dedec6 100644 --- a/piece.go +++ b/piece.go @@ -55,6 +55,7 @@ type Piece struct { publicPieceState PieceState priority piecePriority + // This can be locked when the Client lock is taken, but probably not vice versa. pendingWritesMutex sync.Mutex pendingWrites int noPendingWrites sync.Cond -- 2.48.1