7 "github.com/anacrolix/chansync"
8 g "github.com/anacrolix/generics"
9 "github.com/anacrolix/missinggo/v2/bitmap"
11 "github.com/anacrolix/torrent/metainfo"
12 pp "github.com/anacrolix/torrent/peer_protocol"
13 "github.com/anacrolix/torrent/storage"
14 infohash_v2 "github.com/anacrolix/torrent/types/infohash-v2"
18 // The completed piece SHA1 hash, from the metainfo "pieces" field. Nil if the info is not V1
21 hashV2 g.Option[infohash_v2.T]
26 readerCond chansync.BroadcastCond
31 storageCompletionOk bool
33 publicPieceState PieceState
34 priority PiecePriority
35 // Availability adjustment for this piece relative to len(Torrent.connsWithAllPieces). This is
36 // incremented for any piece a peer has when a peer has a piece, Torrent.haveInfo is true, and
37 // the Peer isn't recorded in Torrent.connsWithAllPieces.
38 relativeAvailability int
40 // This can be locked when the Client lock is taken, but probably not vice versa.
41 pendingWritesMutex sync.Mutex
43 noPendingWrites sync.Cond
45 // Connections that have written data to this piece since its last check.
46 // This can include connections that have closed.
47 dirtiers map[*Peer]struct{}
50 func (p *Piece) String() string {
51 return fmt.Sprintf("%s/%d", p.t.canonicalShortInfohash().HexString(), p.index)
54 func (p *Piece) Info() metainfo.Piece {
55 return p.t.info.Piece(int(p.index))
58 func (p *Piece) Storage() storage.Piece {
59 var pieceHash g.Option[[]byte]
61 pieceHash.Set(p.hash.Bytes())
62 } else if p.hashV2.Ok {
63 pieceHash.Set(p.hashV2.Value.Bytes())
65 return p.t.storage.PieceWithHash(p.Info(), pieceHash)
68 func (p *Piece) Flush() {
69 if p.t.storage.Flush != nil {
70 _ = p.t.storage.Flush()
74 func (p *Piece) pendingChunkIndex(chunkIndex chunkIndexType) bool {
75 return !p.chunkIndexDirty(chunkIndex)
78 func (p *Piece) pendingChunk(cs ChunkSpec, chunkSize pp.Integer) bool {
79 return p.pendingChunkIndex(chunkIndexFromChunkSpec(cs, chunkSize))
82 func (p *Piece) hasDirtyChunks() bool {
83 return p.numDirtyChunks() != 0
86 func (p *Piece) numDirtyChunks() chunkIndexType {
87 return chunkIndexType(roaringBitmapRangeCardinality[RequestIndex](
89 p.requestIndexOffset(),
90 p.t.pieceRequestIndexOffset(p.index+1)))
93 func (p *Piece) unpendChunkIndex(i chunkIndexType) {
94 p.t.dirtyChunks.Add(p.requestIndexOffset() + i)
95 p.t.updatePieceRequestOrderPiece(p.index)
96 p.readerCond.Broadcast()
99 func (p *Piece) pendChunkIndex(i RequestIndex) {
100 p.t.dirtyChunks.Remove(p.requestIndexOffset() + i)
101 p.t.updatePieceRequestOrderPiece(p.index)
104 func (p *Piece) numChunks() chunkIndexType {
105 return p.t.pieceNumChunks(p.index)
108 func (p *Piece) incrementPendingWrites() {
109 p.pendingWritesMutex.Lock()
111 p.pendingWritesMutex.Unlock()
114 func (p *Piece) decrementPendingWrites() {
115 p.pendingWritesMutex.Lock()
116 if p.pendingWrites == 0 {
120 if p.pendingWrites == 0 {
121 p.noPendingWrites.Broadcast()
123 p.pendingWritesMutex.Unlock()
126 func (p *Piece) waitNoPendingWrites() {
127 p.pendingWritesMutex.Lock()
128 for p.pendingWrites != 0 {
129 p.noPendingWrites.Wait()
131 p.pendingWritesMutex.Unlock()
134 func (p *Piece) chunkIndexDirty(chunk chunkIndexType) bool {
135 return p.t.dirtyChunks.Contains(p.requestIndexOffset() + chunk)
138 func (p *Piece) chunkIndexSpec(chunk chunkIndexType) ChunkSpec {
139 return chunkIndexSpec(pp.Integer(chunk), p.length(), p.chunkSize())
142 func (p *Piece) numDirtyBytes() (ret pp.Integer) {
144 // if ret > p.length() {
145 // panic("too many dirty bytes")
148 numRegularDirtyChunks := p.numDirtyChunks()
149 if p.chunkIndexDirty(p.numChunks() - 1) {
150 numRegularDirtyChunks--
151 ret += p.chunkIndexSpec(p.lastChunkIndex()).Length
153 ret += pp.Integer(numRegularDirtyChunks) * p.chunkSize()
157 func (p *Piece) length() pp.Integer {
158 return p.t.pieceLength(p.index)
161 func (p *Piece) chunkSize() pp.Integer {
165 func (p *Piece) lastChunkIndex() chunkIndexType {
166 return p.numChunks() - 1
169 func (p *Piece) bytesLeft() (ret pp.Integer) {
170 if p.t.pieceComplete(p.index) {
173 return p.length() - p.numDirtyBytes()
176 // Forces the piece data to be rehashed.
177 func (p *Piece) VerifyData() {
179 defer p.t.cl.unlock()
180 target := p.numVerifies + 1
184 // log.Printf("target: %d", target)
185 p.t.queuePieceCheck(p.index)
187 // log.Printf("got %d verifies", p.numVerifies)
188 if p.numVerifies >= target {
196 func (p *Piece) queuedForHash() bool {
197 return p.t.piecesQueuedForHash.Get(bitmap.BitIndex(p.index))
200 func (p *Piece) torrentBeginOffset() int64 {
201 return int64(p.index) * p.t.info.PieceLength
204 func (p *Piece) torrentEndOffset() int64 {
205 return p.torrentBeginOffset() + int64(p.t.usualPieceSize())
208 func (p *Piece) SetPriority(prio PiecePriority) {
210 defer p.t.cl.unlock()
212 p.t.updatePiecePriority(p.index, "Piece.SetPriority")
215 // This is priority based only on piece, file and reader priorities.
216 func (p *Piece) purePriority() (ret PiecePriority) {
217 for _, f := range p.files {
220 if p.t.readerNowPieces().Contains(bitmap.BitIndex(p.index)) {
221 ret.Raise(PiecePriorityNow)
223 // if t._readerNowPieces.Contains(piece - 1) {
224 // return PiecePriorityNext
226 if p.t.readerReadaheadPieces().Contains(bitmap.BitIndex(p.index)) {
227 ret.Raise(PiecePriorityReadahead)
229 ret.Raise(p.priority)
233 func (p *Piece) ignoreForRequests() bool {
234 return p.hashing || p.marking || !p.haveHash() || p.t.pieceComplete(p.index) || p.queuedForHash()
237 // This is the priority adjusted for piece state like completion, hashing etc.
238 func (p *Piece) effectivePriority() (ret PiecePriority) {
239 if p.ignoreForRequests() {
240 return PiecePriorityNone
242 return p.purePriority()
245 // Tells the Client to refetch the completion status from storage, updating priority etc. if
246 // necessary. Might be useful if you know the state of the piece data has changed externally.
247 func (p *Piece) UpdateCompletion() {
249 defer p.t.cl.unlock()
250 p.t.updatePieceCompletion(p.index)
253 func (p *Piece) completion() (ret storage.Completion) {
254 ret.Complete = p.t.pieceComplete(p.index)
255 ret.Ok = p.storageCompletionOk
259 func (p *Piece) allChunksDirty() bool {
260 return p.numDirtyChunks() == p.numChunks()
263 func (p *Piece) State() PieceState {
264 return p.t.PieceState(p.index)
267 func (p *Piece) requestIndexOffset() RequestIndex {
268 return p.t.pieceRequestIndexOffset(p.index)
271 func (p *Piece) availability() int {
272 return len(p.t.connsWithAllPieces) + p.relativeAvailability
275 // For v2 torrents, files are aligned to pieces so there should always only be a single file for a
277 func (p *Piece) mustGetOnlyFile() *File {
278 if len(p.files) != 1 {
284 // Sets the v2 piece hash, queuing initial piece checks if appropriate.
285 func (p *Piece) setV2Hash(v2h [32]byte) {
286 // See Torrent.onSetInfo. We want to trigger an initial check if appropriate, if we didn't yet
287 // have a piece hash (can occur with v2 when we don't start with piece layers).
288 if !p.hashV2.Set(v2h).Ok && p.hash == nil {
289 p.t.updatePieceCompletion(p.index)
290 p.t.queueInitialPieceCheck(p.index)
294 // Can't do certain things if we don't know the piece hash.
295 func (p *Piece) haveHash() bool {
296 return p.hash != nil || p.hashV2.Ok
299 func pieceStateAllowsMessageWrites(p *Piece, pc *PeerConn) bool {
300 return (pc.shouldRequestHashes() && !p.haveHash()) || !p.t.ignorePieceForRequests(p.index)