publicPieceState PieceState
priority piecePriority
- availability int64
+ // Availability adjustment for this piece relative to len(Torrent.connsWithAllPieces). This is
+ // incremented for any piece a peer has when a peer has a piece, Torrent.haveInfo is true, and
+ // the Peer isn't recorded in Torrent.connsWithAllPieces.
+ relativeAvailability int
// This can be locked when the Client lock is taken, but probably not vice versa.
pendingWritesMutex sync.Mutex
return p.t.storage.Piece(p.Info())
}
+func (p *Piece) Flush() {
+ if p.t.storage.Flush != nil {
+ _ = p.t.storage.Flush()
+ }
+}
+
func (p *Piece) pendingChunkIndex(chunkIndex chunkIndexType) bool {
return !p.chunkIndexDirty(chunkIndex)
}
}
func (p *Piece) numDirtyChunks() chunkIndexType {
- return chunkIndexType(roaringBitmapRangeCardinality(
+ return chunkIndexType(roaringBitmapRangeCardinality[RequestIndex](
&p.t.dirtyChunks,
p.requestIndexOffset(),
p.t.pieceRequestIndexOffset(p.index+1)))
func (p *Piece) unpendChunkIndex(i chunkIndexType) {
p.t.dirtyChunks.Add(p.requestIndexOffset() + i)
+ p.t.updatePieceRequestOrder(p.index)
p.readerCond.Broadcast()
}
func (p *Piece) pendChunkIndex(i RequestIndex) {
p.t.dirtyChunks.Remove(p.requestIndexOffset() + i)
+ p.t.updatePieceRequestOrder(p.index)
}
func (p *Piece) numChunks() chunkIndexType {
if p.hashing {
target++
}
- //log.Printf("target: %d", target)
+ // log.Printf("target: %d", target)
p.t.queuePieceCheck(p.index)
for {
- //log.Printf("got %d verifies", p.numVerifies)
+ // log.Printf("got %d verifies", p.numVerifies)
if p.numVerifies >= target {
break
}
p.t.cl.lock()
defer p.t.cl.unlock()
p.priority = prio
- p.t.updatePiecePriority(p.index)
+ p.t.updatePiecePriority(p.index, "Piece.SetPriority")
}
func (p *Piece) purePriority() (ret piecePriority) {
}
func (p *Piece) uncachedPriority() (ret piecePriority) {
- if p.t.pieceComplete(p.index) || p.t.pieceQueuedForHash(p.index) || p.t.hashingPiece(p.index) {
+ if p.hashing || p.marking || p.t.pieceComplete(p.index) || p.queuedForHash() {
return PiecePriorityNone
}
return p.purePriority()
return p.t.PieceState(p.index)
}
-func (p *Piece) iterUndirtiedChunks(f func(chunkIndexType)) {
- // Use an iterator to jump between dirty bits.
- if true {
- it := p.t.dirtyChunks.Iterator()
- startIndex := p.requestIndexOffset()
- endIndex := startIndex + p.numChunks()
- it.AdvanceIfNeeded(startIndex)
- lastDirty := startIndex - 1
- for it.HasNext() {
- next := it.Next()
- if next >= endIndex {
- break
- }
- for index := lastDirty + 1; index < next; index++ {
- f(index - startIndex)
- }
- lastDirty = next
- }
- for index := lastDirty + 1; index < endIndex; index++ {
- f(index - startIndex)
- }
- return
- }
- // The original implementation.
- for i := chunkIndexType(0); i < p.numChunks(); i++ {
- if p.chunkIndexDirty(i) {
- continue
- }
- f(i)
- }
-}
-
func (p *Piece) requestIndexOffset() RequestIndex {
return p.t.pieceRequestIndexOffset(p.index)
}
+
+func (p *Piece) availability() int {
+ return len(p.t.connsWithAllPieces) + p.relativeAvailability
+}