8 "github.com/RoaringBitmap/roaring"
9 "github.com/anacrolix/chansync"
10 "github.com/anacrolix/missinggo/v2/bitmap"
12 "github.com/anacrolix/torrent/metainfo"
13 pp "github.com/anacrolix/torrent/peer_protocol"
14 "github.com/anacrolix/torrent/storage"
18 // The completed piece SHA1 hash, from the metainfo "pieces" field.
24 readerCond chansync.BroadcastCond
29 storageCompletionOk bool
31 publicPieceState PieceState
32 priority piecePriority
35 // This can be locked when the Client lock is taken, but probably not vice versa.
36 pendingWritesMutex sync.Mutex
38 noPendingWrites sync.Cond
40 // Connections that have written data to this piece since its last check.
41 // This can include connections that have closed.
42 dirtiers map[*Peer]struct{}
44 undirtiedChunksIter undirtiedChunksIter
47 func (p *Piece) String() string {
48 return fmt.Sprintf("%s/%d", p.t.infoHash.HexString(), p.index)
51 func (p *Piece) Info() metainfo.Piece {
52 return p.t.info.Piece(int(p.index))
55 func (p *Piece) Storage() storage.Piece {
56 return p.t.storage.Piece(p.Info())
59 func (p *Piece) pendingChunkIndex(chunkIndex chunkIndexType) bool {
60 return !p.chunkIndexDirty(chunkIndex)
63 func (p *Piece) pendingChunk(cs ChunkSpec, chunkSize pp.Integer) bool {
64 return p.pendingChunkIndex(chunkIndexFromChunkSpec(cs, chunkSize))
67 func (p *Piece) hasDirtyChunks() bool {
68 return p.numDirtyChunks() != 0
71 func (p *Piece) numDirtyChunks() chunkIndexType {
72 return chunkIndexType(roaringBitmapRangeCardinality(
74 p.requestIndexOffset(),
75 p.t.pieceRequestIndexOffset(p.index+1)))
78 func (p *Piece) unpendChunkIndex(i chunkIndexType) {
79 p.t.dirtyChunks.Add(p.requestIndexOffset() + i)
80 p.t.updatePieceRequestOrder(p.index)
81 p.readerCond.Broadcast()
84 func (p *Piece) pendChunkIndex(i RequestIndex) {
85 p.t.dirtyChunks.Remove(p.requestIndexOffset() + i)
86 p.t.updatePieceRequestOrder(p.index)
89 func (p *Piece) numChunks() chunkIndexType {
90 return p.t.pieceNumChunks(p.index)
93 func (p *Piece) incrementPendingWrites() {
94 p.pendingWritesMutex.Lock()
96 p.pendingWritesMutex.Unlock()
99 func (p *Piece) decrementPendingWrites() {
100 p.pendingWritesMutex.Lock()
101 if p.pendingWrites == 0 {
105 if p.pendingWrites == 0 {
106 p.noPendingWrites.Broadcast()
108 p.pendingWritesMutex.Unlock()
111 func (p *Piece) waitNoPendingWrites() {
112 p.pendingWritesMutex.Lock()
113 for p.pendingWrites != 0 {
114 p.noPendingWrites.Wait()
116 p.pendingWritesMutex.Unlock()
119 func (p *Piece) chunkIndexDirty(chunk chunkIndexType) bool {
120 return p.t.dirtyChunks.Contains(p.requestIndexOffset() + chunk)
123 func (p *Piece) chunkIndexSpec(chunk chunkIndexType) ChunkSpec {
124 return chunkIndexSpec(pp.Integer(chunk), p.length(), p.chunkSize())
127 func (p *Piece) numDirtyBytes() (ret pp.Integer) {
129 // if ret > p.length() {
130 // panic("too many dirty bytes")
133 numRegularDirtyChunks := p.numDirtyChunks()
134 if p.chunkIndexDirty(p.numChunks() - 1) {
135 numRegularDirtyChunks--
136 ret += p.chunkIndexSpec(p.lastChunkIndex()).Length
138 ret += pp.Integer(numRegularDirtyChunks) * p.chunkSize()
142 func (p *Piece) length() pp.Integer {
143 return p.t.pieceLength(p.index)
146 func (p *Piece) chunkSize() pp.Integer {
150 func (p *Piece) lastChunkIndex() chunkIndexType {
151 return p.numChunks() - 1
154 func (p *Piece) bytesLeft() (ret pp.Integer) {
155 if p.t.pieceComplete(p.index) {
158 return p.length() - p.numDirtyBytes()
161 // Forces the piece data to be rehashed.
162 func (p *Piece) VerifyData() {
164 defer p.t.cl.unlock()
165 target := p.numVerifies + 1
169 // log.Printf("target: %d", target)
170 p.t.queuePieceCheck(p.index)
172 // log.Printf("got %d verifies", p.numVerifies)
173 if p.numVerifies >= target {
181 func (p *Piece) queuedForHash() bool {
182 return p.t.piecesQueuedForHash.Get(bitmap.BitIndex(p.index))
185 func (p *Piece) torrentBeginOffset() int64 {
186 return int64(p.index) * p.t.info.PieceLength
189 func (p *Piece) torrentEndOffset() int64 {
190 return p.torrentBeginOffset() + int64(p.length())
193 func (p *Piece) SetPriority(prio piecePriority) {
195 defer p.t.cl.unlock()
197 p.t.updatePiecePriority(p.index, "Piece.SetPriority")
200 func (p *Piece) purePriority() (ret piecePriority) {
201 for _, f := range p.files {
204 if p.t.readerNowPieces().Contains(bitmap.BitIndex(p.index)) {
205 ret.Raise(PiecePriorityNow)
207 // if t._readerNowPieces.Contains(piece - 1) {
208 // return PiecePriorityNext
210 if p.t.readerReadaheadPieces().Contains(bitmap.BitIndex(p.index)) {
211 ret.Raise(PiecePriorityReadahead)
213 ret.Raise(p.priority)
217 func (p *Piece) uncachedPriority() (ret piecePriority) {
218 if p.hashing || p.marking || p.t.pieceComplete(p.index) || p.queuedForHash() {
219 return PiecePriorityNone
221 return p.purePriority()
224 // Tells the Client to refetch the completion status from storage, updating priority etc. if
225 // necessary. Might be useful if you know the state of the piece data has changed externally.
226 func (p *Piece) UpdateCompletion() {
228 defer p.t.cl.unlock()
229 p.t.updatePieceCompletion(p.index)
232 func (p *Piece) completion() (ret storage.Completion) {
233 ret.Complete = p.t.pieceComplete(p.index)
234 ret.Ok = p.storageCompletionOk
238 func (p *Piece) allChunksDirty() bool {
239 return p.numDirtyChunks() == p.numChunks()
242 func (p *Piece) State() PieceState {
243 return p.t.PieceState(p.index)
247 gob.Register(undirtiedChunksIter{})
250 // Use an iterator to jump between dirty bits.
251 type undirtiedChunksIter struct {
252 TorrentDirtyChunks *roaring.Bitmap
253 StartRequestIndex RequestIndex
254 EndRequestIndex RequestIndex
257 func (me *undirtiedChunksIter) Iter(f func(chunkIndexType)) {
258 it := me.TorrentDirtyChunks.Iterator()
259 startIndex := me.StartRequestIndex
260 endIndex := me.EndRequestIndex
261 it.AdvanceIfNeeded(startIndex)
262 lastDirty := startIndex - 1
265 if next >= endIndex {
268 for index := lastDirty + 1; index < next; index++ {
269 f(index - startIndex)
273 for index := lastDirty + 1; index < endIndex; index++ {
274 f(index - startIndex)
279 func (p *Piece) requestIndexOffset() RequestIndex {
280 return p.t.pieceRequestIndexOffset(p.index)