On rare occasions, reads are failing in a loop, exhausting all the available file descriptors. It's not clear why, it could be an error in the filecache storage backend I'm using, or some logic error regarding when it's okay to try to read.
// prevent thrashing with small caches and file and piece priorities.
log.Printf("error reading torrent %s piece %d offset %d, %d bytes: %v",
r.t.infoHash.HexString(), pi, po, len(b1), err)
- r.t.updateAllPieceCompletions()
- r.t.updateAllPiecePriorities()
+ if !r.t.updatePieceCompletion(pi) {
+ log.Printf("piece %d completion unchanged", pi)
+ }
r.t.cl.unlock()
}
}
pieceInclinationsPut.Add(1)
}
-func (t *Torrent) updatePieceCompletion(piece pieceIndex) {
+func (t *Torrent) updatePieceCompletion(piece pieceIndex) bool {
pcu := t.pieceCompleteUncached(piece)
p := &t.pieces[piece]
changed := t.completedPieces.Get(bitmap.BitIndex(piece)) != pcu.Complete || p.storageCompletionOk != pcu.Ok
if changed {
t.pieceCompletionChanged(piece)
}
+ return changed
}
// Non-blocking read. Client lock is not required.