7 "github.com/anacrolix/chansync"
8 "github.com/anacrolix/missinggo/v2/bitmap"
9 "github.com/anacrolix/torrent/metainfo"
10 pp "github.com/anacrolix/torrent/peer_protocol"
11 "github.com/anacrolix/torrent/storage"
15 // The completed piece SHA1 hash, from the metainfo "pieces" field.
21 readerCond chansync.BroadcastCond
26 storageCompletionOk bool
28 publicPieceState PieceState
29 priority piecePriority
30 // Availability adjustment for this piece relative to len(Torrent.connsWithAllPieces). This is
31 // incremented for any piece a peer has when a peer has a piece, Torrent.haveInfo is true, and
32 // the Peer isn't recorded in Torrent.connsWithAllPieces.
33 relativeAvailability int
35 // This can be locked when the Client lock is taken, but probably not vice versa.
36 pendingWritesMutex sync.Mutex
38 noPendingWrites sync.Cond
40 // Connections that have written data to this piece since its last check.
41 // This can include connections that have closed.
42 dirtiers map[*Peer]struct{}
45 func (p *Piece) String() string {
46 return fmt.Sprintf("%s/%d", p.t.infoHash.HexString(), p.index)
49 func (p *Piece) Info() metainfo.Piece {
50 return p.t.info.Piece(int(p.index))
53 func (p *Piece) Storage() storage.Piece {
54 return p.t.storage.Piece(p.Info())
57 func (p *Piece) Flush() {
58 if p.t.storage.Flush != nil {
59 _ = p.t.storage.Flush()
63 func (p *Piece) pendingChunkIndex(chunkIndex chunkIndexType) bool {
64 return !p.chunkIndexDirty(chunkIndex)
67 func (p *Piece) pendingChunk(cs ChunkSpec, chunkSize pp.Integer) bool {
68 return p.pendingChunkIndex(chunkIndexFromChunkSpec(cs, chunkSize))
71 func (p *Piece) hasDirtyChunks() bool {
72 return p.numDirtyChunks() != 0
75 func (p *Piece) numDirtyChunks() chunkIndexType {
76 return chunkIndexType(roaringBitmapRangeCardinality[RequestIndex](
78 p.requestIndexOffset(),
79 p.t.pieceRequestIndexOffset(p.index+1)))
82 func (p *Piece) unpendChunkIndex(i chunkIndexType) {
83 p.t.dirtyChunks.Add(p.requestIndexOffset() + i)
84 p.t.updatePieceRequestOrder(p.index)
85 p.readerCond.Broadcast()
88 func (p *Piece) pendChunkIndex(i RequestIndex) {
89 p.t.dirtyChunks.Remove(p.requestIndexOffset() + i)
90 p.t.updatePieceRequestOrder(p.index)
93 func (p *Piece) numChunks() chunkIndexType {
94 return p.t.pieceNumChunks(p.index)
97 func (p *Piece) incrementPendingWrites() {
98 p.pendingWritesMutex.Lock()
100 p.pendingWritesMutex.Unlock()
103 func (p *Piece) decrementPendingWrites() {
104 p.pendingWritesMutex.Lock()
105 if p.pendingWrites == 0 {
109 if p.pendingWrites == 0 {
110 p.noPendingWrites.Broadcast()
112 p.pendingWritesMutex.Unlock()
115 func (p *Piece) waitNoPendingWrites() {
116 p.pendingWritesMutex.Lock()
117 for p.pendingWrites != 0 {
118 p.noPendingWrites.Wait()
120 p.pendingWritesMutex.Unlock()
123 func (p *Piece) chunkIndexDirty(chunk chunkIndexType) bool {
124 return p.t.dirtyChunks.Contains(p.requestIndexOffset() + chunk)
127 func (p *Piece) chunkIndexSpec(chunk chunkIndexType) ChunkSpec {
128 return chunkIndexSpec(pp.Integer(chunk), p.length(), p.chunkSize())
131 func (p *Piece) numDirtyBytes() (ret pp.Integer) {
133 // if ret > p.length() {
134 // panic("too many dirty bytes")
137 numRegularDirtyChunks := p.numDirtyChunks()
138 if p.chunkIndexDirty(p.numChunks() - 1) {
139 numRegularDirtyChunks--
140 ret += p.chunkIndexSpec(p.lastChunkIndex()).Length
142 ret += pp.Integer(numRegularDirtyChunks) * p.chunkSize()
146 func (p *Piece) length() pp.Integer {
147 return p.t.pieceLength(p.index)
150 func (p *Piece) chunkSize() pp.Integer {
154 func (p *Piece) lastChunkIndex() chunkIndexType {
155 return p.numChunks() - 1
158 func (p *Piece) bytesLeft() (ret pp.Integer) {
159 if p.t.pieceComplete(p.index) {
162 return p.length() - p.numDirtyBytes()
165 // Forces the piece data to be rehashed.
166 func (p *Piece) VerifyData() {
168 defer p.t.cl.unlock()
169 target := p.numVerifies + 1
173 // log.Printf("target: %d", target)
174 p.t.queuePieceCheck(p.index)
176 // log.Printf("got %d verifies", p.numVerifies)
177 if p.numVerifies >= target {
185 func (p *Piece) queuedForHash() bool {
186 return p.t.piecesQueuedForHash.Get(bitmap.BitIndex(p.index))
189 func (p *Piece) torrentBeginOffset() int64 {
190 return int64(p.index) * p.t.info.PieceLength
193 func (p *Piece) torrentEndOffset() int64 {
194 return p.torrentBeginOffset() + int64(p.length())
197 func (p *Piece) SetPriority(prio piecePriority) {
199 defer p.t.cl.unlock()
201 p.t.updatePiecePriority(p.index, "Piece.SetPriority")
204 func (p *Piece) purePriority() (ret piecePriority) {
205 for _, f := range p.files {
208 if p.t.readerNowPieces().Contains(bitmap.BitIndex(p.index)) {
209 ret.Raise(PiecePriorityNow)
211 // if t._readerNowPieces.Contains(piece - 1) {
212 // return PiecePriorityNext
214 if p.t.readerReadaheadPieces().Contains(bitmap.BitIndex(p.index)) {
215 ret.Raise(PiecePriorityReadahead)
217 ret.Raise(p.priority)
221 func (p *Piece) uncachedPriority() (ret piecePriority) {
222 if p.hashing || p.marking || p.t.pieceComplete(p.index) || p.queuedForHash() {
223 return PiecePriorityNone
225 return p.purePriority()
228 // Tells the Client to refetch the completion status from storage, updating priority etc. if
229 // necessary. Might be useful if you know the state of the piece data has changed externally.
230 func (p *Piece) UpdateCompletion() {
232 defer p.t.cl.unlock()
233 p.t.updatePieceCompletion(p.index)
236 func (p *Piece) completion() (ret storage.Completion) {
237 ret.Complete = p.t.pieceComplete(p.index)
238 ret.Ok = p.storageCompletionOk
242 func (p *Piece) allChunksDirty() bool {
243 return p.numDirtyChunks() == p.numChunks()
246 func (p *Piece) State() PieceState {
247 return p.t.PieceState(p.index)
250 func (p *Piece) requestIndexOffset() RequestIndex {
251 return p.t.pieceRequestIndexOffset(p.index)
254 func (p *Piece) availability() int {
255 return len(p.t.connsWithAllPieces) + p.relativeAvailability