8 "github.com/RoaringBitmap/roaring"
9 "github.com/anacrolix/chansync"
10 "github.com/anacrolix/missinggo/v2/bitmap"
11 "github.com/anacrolix/torrent/metainfo"
12 pp "github.com/anacrolix/torrent/peer_protocol"
13 "github.com/anacrolix/torrent/storage"
17 // The completed piece SHA1 hash, from the metainfo "pieces" field.
23 readerCond chansync.BroadcastCond
28 storageCompletionOk bool
30 publicPieceState PieceState
31 priority piecePriority
32 // Availability adjustment for this piece relative to len(Torrent.connsWithAllPieces). This is
33 // incremented for any piece a peer has when a peer has a piece, Torrent.haveInfo is true, and
34 // the Peer isn't recorded in Torrent.connsWithAllPieces.
35 relativeAvailability int
37 // This can be locked when the Client lock is taken, but probably not vice versa.
38 pendingWritesMutex sync.Mutex
40 noPendingWrites sync.Cond
42 // Connections that have written data to this piece since its last check.
43 // This can include connections that have closed.
44 dirtiers map[*Peer]struct{}
46 undirtiedChunksIter undirtiedChunksIter
49 func (p *Piece) String() string {
50 return fmt.Sprintf("%s/%d", p.t.infoHash.HexString(), p.index)
53 func (p *Piece) Info() metainfo.Piece {
54 return p.t.info.Piece(int(p.index))
57 func (p *Piece) Storage() storage.Piece {
58 return p.t.storage.Piece(p.Info())
61 func (p *Piece) pendingChunkIndex(chunkIndex chunkIndexType) bool {
62 return !p.chunkIndexDirty(chunkIndex)
65 func (p *Piece) pendingChunk(cs ChunkSpec, chunkSize pp.Integer) bool {
66 return p.pendingChunkIndex(chunkIndexFromChunkSpec(cs, chunkSize))
69 func (p *Piece) hasDirtyChunks() bool {
70 return p.numDirtyChunks() != 0
73 func (p *Piece) numDirtyChunks() chunkIndexType {
74 return chunkIndexType(roaringBitmapRangeCardinality(
76 p.requestIndexOffset(),
77 p.t.pieceRequestIndexOffset(p.index+1)))
80 func (p *Piece) unpendChunkIndex(i chunkIndexType) {
81 p.t.dirtyChunks.Add(p.requestIndexOffset() + i)
82 p.t.updatePieceRequestOrder(p.index)
83 p.readerCond.Broadcast()
86 func (p *Piece) pendChunkIndex(i RequestIndex) {
87 p.t.dirtyChunks.Remove(p.requestIndexOffset() + i)
88 p.t.updatePieceRequestOrder(p.index)
91 func (p *Piece) numChunks() chunkIndexType {
92 return p.t.pieceNumChunks(p.index)
95 func (p *Piece) incrementPendingWrites() {
96 p.pendingWritesMutex.Lock()
98 p.pendingWritesMutex.Unlock()
101 func (p *Piece) decrementPendingWrites() {
102 p.pendingWritesMutex.Lock()
103 if p.pendingWrites == 0 {
107 if p.pendingWrites == 0 {
108 p.noPendingWrites.Broadcast()
110 p.pendingWritesMutex.Unlock()
113 func (p *Piece) waitNoPendingWrites() {
114 p.pendingWritesMutex.Lock()
115 for p.pendingWrites != 0 {
116 p.noPendingWrites.Wait()
118 p.pendingWritesMutex.Unlock()
121 func (p *Piece) chunkIndexDirty(chunk chunkIndexType) bool {
122 return p.t.dirtyChunks.Contains(p.requestIndexOffset() + chunk)
125 func (p *Piece) chunkIndexSpec(chunk chunkIndexType) ChunkSpec {
126 return chunkIndexSpec(pp.Integer(chunk), p.length(), p.chunkSize())
129 func (p *Piece) numDirtyBytes() (ret pp.Integer) {
131 // if ret > p.length() {
132 // panic("too many dirty bytes")
135 numRegularDirtyChunks := p.numDirtyChunks()
136 if p.chunkIndexDirty(p.numChunks() - 1) {
137 numRegularDirtyChunks--
138 ret += p.chunkIndexSpec(p.lastChunkIndex()).Length
140 ret += pp.Integer(numRegularDirtyChunks) * p.chunkSize()
144 func (p *Piece) length() pp.Integer {
145 return p.t.pieceLength(p.index)
148 func (p *Piece) chunkSize() pp.Integer {
152 func (p *Piece) lastChunkIndex() chunkIndexType {
153 return p.numChunks() - 1
156 func (p *Piece) bytesLeft() (ret pp.Integer) {
157 if p.t.pieceComplete(p.index) {
160 return p.length() - p.numDirtyBytes()
163 // Forces the piece data to be rehashed.
164 func (p *Piece) VerifyData() {
166 defer p.t.cl.unlock()
167 target := p.numVerifies + 1
171 // log.Printf("target: %d", target)
172 p.t.queuePieceCheck(p.index)
174 // log.Printf("got %d verifies", p.numVerifies)
175 if p.numVerifies >= target {
183 func (p *Piece) queuedForHash() bool {
184 return p.t.piecesQueuedForHash.Get(bitmap.BitIndex(p.index))
187 func (p *Piece) torrentBeginOffset() int64 {
188 return int64(p.index) * p.t.info.PieceLength
191 func (p *Piece) torrentEndOffset() int64 {
192 return p.torrentBeginOffset() + int64(p.length())
195 func (p *Piece) SetPriority(prio piecePriority) {
197 defer p.t.cl.unlock()
199 p.t.updatePiecePriority(p.index, "Piece.SetPriority")
202 func (p *Piece) purePriority() (ret piecePriority) {
203 for _, f := range p.files {
206 if p.t.readerNowPieces().Contains(bitmap.BitIndex(p.index)) {
207 ret.Raise(PiecePriorityNow)
209 // if t._readerNowPieces.Contains(piece - 1) {
210 // return PiecePriorityNext
212 if p.t.readerReadaheadPieces().Contains(bitmap.BitIndex(p.index)) {
213 ret.Raise(PiecePriorityReadahead)
215 ret.Raise(p.priority)
219 func (p *Piece) uncachedPriority() (ret piecePriority) {
220 if p.hashing || p.marking || p.t.pieceComplete(p.index) || p.queuedForHash() {
221 return PiecePriorityNone
223 return p.purePriority()
226 // Tells the Client to refetch the completion status from storage, updating priority etc. if
227 // necessary. Might be useful if you know the state of the piece data has changed externally.
228 func (p *Piece) UpdateCompletion() {
230 defer p.t.cl.unlock()
231 p.t.updatePieceCompletion(p.index)
234 func (p *Piece) completion() (ret storage.Completion) {
235 ret.Complete = p.t.pieceComplete(p.index)
236 ret.Ok = p.storageCompletionOk
240 func (p *Piece) allChunksDirty() bool {
241 return p.numDirtyChunks() == p.numChunks()
244 func (p *Piece) State() PieceState {
245 return p.t.PieceState(p.index)
249 gob.Register(undirtiedChunksIter{})
252 // Use an iterator to jump between dirty bits.
253 type undirtiedChunksIter struct {
254 TorrentDirtyChunks *roaring.Bitmap
255 StartRequestIndex RequestIndex
256 EndRequestIndex RequestIndex
259 func (me *undirtiedChunksIter) Iter(f func(chunkIndexType)) {
260 it := me.TorrentDirtyChunks.Iterator()
261 startIndex := me.StartRequestIndex
262 endIndex := me.EndRequestIndex
263 it.AdvanceIfNeeded(startIndex)
264 lastDirty := startIndex - 1
267 if next >= endIndex {
270 for index := lastDirty + 1; index < next; index++ {
271 f(index - startIndex)
275 for index := lastDirty + 1; index < endIndex; index++ {
276 f(index - startIndex)
281 func (p *Piece) requestIndexOffset() RequestIndex {
282 return p.t.pieceRequestIndexOffset(p.index)
285 func (p *Piece) availability() int {
286 return len(p.t.connsWithAllPieces) + p.relativeAvailability