8 "github.com/RoaringBitmap/roaring"
9 "github.com/anacrolix/chansync"
10 "github.com/anacrolix/missinggo/v2/bitmap"
12 "github.com/anacrolix/torrent/metainfo"
13 pp "github.com/anacrolix/torrent/peer_protocol"
14 "github.com/anacrolix/torrent/storage"
18 // The completed piece SHA1 hash, from the metainfo "pieces" field.
24 readerCond chansync.BroadcastCond
29 storageCompletionOk bool
31 publicPieceState PieceState
32 priority piecePriority
33 // Availability adjustment for this piece relative to len(Torrent.connsWithAllPieces). This is
34 // incremented for any piece a peer has when a peer has a piece, Torrent.haveInfo is true, and
35 // the Peer isn't recorded in Torrent.connsWithAllPieces.
36 relativeAvailability int
38 // This can be locked when the Client lock is taken, but probably not vice versa.
39 pendingWritesMutex sync.Mutex
41 noPendingWrites sync.Cond
43 // Connections that have written data to this piece since its last check.
44 // This can include connections that have closed.
45 dirtiers map[*Peer]struct{}
47 undirtiedChunksIter undirtiedChunksIter
50 func (p *Piece) String() string {
51 return fmt.Sprintf("%s/%d", p.t.infoHash.HexString(), p.index)
54 func (p *Piece) Info() metainfo.Piece {
55 return p.t.info.Piece(int(p.index))
58 func (p *Piece) Storage() storage.Piece {
59 return p.t.storage.Piece(p.Info())
62 func (p *Piece) pendingChunkIndex(chunkIndex chunkIndexType) bool {
63 return !p.chunkIndexDirty(chunkIndex)
66 func (p *Piece) pendingChunk(cs ChunkSpec, chunkSize pp.Integer) bool {
67 return p.pendingChunkIndex(chunkIndexFromChunkSpec(cs, chunkSize))
70 func (p *Piece) hasDirtyChunks() bool {
71 return p.numDirtyChunks() != 0
74 func (p *Piece) numDirtyChunks() chunkIndexType {
75 return chunkIndexType(roaringBitmapRangeCardinality(
77 p.requestIndexOffset(),
78 p.t.pieceRequestIndexOffset(p.index+1)))
81 func (p *Piece) unpendChunkIndex(i chunkIndexType) {
82 p.t.dirtyChunks.Add(p.requestIndexOffset() + i)
83 p.t.updatePieceRequestOrder(p.index)
84 p.readerCond.Broadcast()
87 func (p *Piece) pendChunkIndex(i RequestIndex) {
88 p.t.dirtyChunks.Remove(p.requestIndexOffset() + i)
89 p.t.updatePieceRequestOrder(p.index)
92 func (p *Piece) numChunks() chunkIndexType {
93 return p.t.pieceNumChunks(p.index)
96 func (p *Piece) incrementPendingWrites() {
97 p.pendingWritesMutex.Lock()
99 p.pendingWritesMutex.Unlock()
102 func (p *Piece) decrementPendingWrites() {
103 p.pendingWritesMutex.Lock()
104 if p.pendingWrites == 0 {
108 if p.pendingWrites == 0 {
109 p.noPendingWrites.Broadcast()
111 p.pendingWritesMutex.Unlock()
114 func (p *Piece) waitNoPendingWrites() {
115 p.pendingWritesMutex.Lock()
116 for p.pendingWrites != 0 {
117 p.noPendingWrites.Wait()
119 p.pendingWritesMutex.Unlock()
122 func (p *Piece) chunkIndexDirty(chunk chunkIndexType) bool {
123 return p.t.dirtyChunks.Contains(p.requestIndexOffset() + chunk)
126 func (p *Piece) chunkIndexSpec(chunk chunkIndexType) ChunkSpec {
127 return chunkIndexSpec(pp.Integer(chunk), p.length(), p.chunkSize())
130 func (p *Piece) numDirtyBytes() (ret pp.Integer) {
132 // if ret > p.length() {
133 // panic("too many dirty bytes")
136 numRegularDirtyChunks := p.numDirtyChunks()
137 if p.chunkIndexDirty(p.numChunks() - 1) {
138 numRegularDirtyChunks--
139 ret += p.chunkIndexSpec(p.lastChunkIndex()).Length
141 ret += pp.Integer(numRegularDirtyChunks) * p.chunkSize()
145 func (p *Piece) length() pp.Integer {
146 return p.t.pieceLength(p.index)
149 func (p *Piece) chunkSize() pp.Integer {
153 func (p *Piece) lastChunkIndex() chunkIndexType {
154 return p.numChunks() - 1
157 func (p *Piece) bytesLeft() (ret pp.Integer) {
158 if p.t.pieceComplete(p.index) {
161 return p.length() - p.numDirtyBytes()
164 // Forces the piece data to be rehashed.
165 func (p *Piece) VerifyData() {
167 defer p.t.cl.unlock()
168 target := p.numVerifies + 1
172 // log.Printf("target: %d", target)
173 p.t.queuePieceCheck(p.index)
175 // log.Printf("got %d verifies", p.numVerifies)
176 if p.numVerifies >= target {
184 func (p *Piece) queuedForHash() bool {
185 return p.t.piecesQueuedForHash.Get(bitmap.BitIndex(p.index))
188 func (p *Piece) torrentBeginOffset() int64 {
189 return int64(p.index) * p.t.info.PieceLength
192 func (p *Piece) torrentEndOffset() int64 {
193 return p.torrentBeginOffset() + int64(p.length())
196 func (p *Piece) SetPriority(prio piecePriority) {
198 defer p.t.cl.unlock()
200 p.t.updatePiecePriority(p.index, "Piece.SetPriority")
203 func (p *Piece) purePriority() (ret piecePriority) {
204 for _, f := range p.files {
207 if p.t.readerNowPieces().Contains(bitmap.BitIndex(p.index)) {
208 ret.Raise(PiecePriorityNow)
210 // if t._readerNowPieces.Contains(piece - 1) {
211 // return PiecePriorityNext
213 if p.t.readerReadaheadPieces().Contains(bitmap.BitIndex(p.index)) {
214 ret.Raise(PiecePriorityReadahead)
216 ret.Raise(p.priority)
220 func (p *Piece) uncachedPriority() (ret piecePriority) {
221 if p.hashing || p.marking || p.t.pieceComplete(p.index) || p.queuedForHash() {
222 return PiecePriorityNone
224 return p.purePriority()
227 // Tells the Client to refetch the completion status from storage, updating priority etc. if
228 // necessary. Might be useful if you know the state of the piece data has changed externally.
229 func (p *Piece) UpdateCompletion() {
231 defer p.t.cl.unlock()
232 p.t.updatePieceCompletion(p.index)
235 func (p *Piece) completion() (ret storage.Completion) {
236 ret.Complete = p.t.pieceComplete(p.index)
237 ret.Ok = p.storageCompletionOk
241 func (p *Piece) allChunksDirty() bool {
242 return p.numDirtyChunks() == p.numChunks()
245 func (p *Piece) State() PieceState {
246 return p.t.PieceState(p.index)
250 gob.Register(undirtiedChunksIter{})
253 // Use an iterator to jump between dirty bits.
254 type undirtiedChunksIter struct {
255 TorrentDirtyChunks *roaring.Bitmap
256 StartRequestIndex RequestIndex
257 EndRequestIndex RequestIndex
260 func (me *undirtiedChunksIter) Iter(f func(chunkIndexType)) {
261 it := me.TorrentDirtyChunks.Iterator()
262 startIndex := me.StartRequestIndex
263 endIndex := me.EndRequestIndex
264 it.AdvanceIfNeeded(startIndex)
265 lastDirty := startIndex - 1
268 if next >= endIndex {
271 for index := lastDirty + 1; index < next; index++ {
272 f(index - startIndex)
276 for index := lastDirty + 1; index < endIndex; index++ {
277 f(index - startIndex)
282 func (p *Piece) requestIndexOffset() RequestIndex {
283 return p.t.pieceRequestIndexOffset(p.index)
286 func (p *Piece) availability() int {
287 return len(p.t.connsWithAllPieces) + p.relativeAvailability