7 "github.com/anacrolix/missinggo/v2/bitmap"
9 "github.com/anacrolix/torrent/metainfo"
10 pp "github.com/anacrolix/torrent/peer_protocol"
11 "github.com/anacrolix/torrent/storage"
14 // Describes the importance of obtaining a particular piece.
15 type piecePriority byte
17 func (pp *piecePriority) Raise(maybe piecePriority) bool {
25 // Priority for use in PriorityBitmap
26 func (me piecePriority) BitmapPriority() int {
31 PiecePriorityNone piecePriority = iota // Not wanted. Must be the zero value.
32 PiecePriorityNormal // Wanted.
33 PiecePriorityHigh // Wanted a lot.
34 PiecePriorityReadahead // May be required soon.
35 // Succeeds a piece where a read occurred. Currently the same as Now,
36 // apparently due to issues with caching.
38 PiecePriorityNow // A Reader is reading in this piece. Highest urgency.
42 // The completed piece SHA1 hash, from the metainfo "pieces" field.
47 // Chunks we've written to since the last check. The chunk offset and
48 // length can be determined by the request chunkSize in use.
49 _dirtyChunks bitmap.Bitmap
54 storageCompletionOk bool
56 publicPieceState PieceState
57 priority piecePriority
59 // This can be locked when the Client lock is taken, but probably not vice versa.
60 pendingWritesMutex sync.Mutex
62 noPendingWrites sync.Cond
64 // Connections that have written data to this piece since its last check.
65 // This can include connections that have closed.
66 dirtiers map[*Peer]struct{}
69 func (p *Piece) String() string {
70 return fmt.Sprintf("%s/%d", p.t.infoHash.HexString(), p.index)
73 func (p *Piece) Info() metainfo.Piece {
74 return p.t.info.Piece(int(p.index))
77 func (p *Piece) Storage() storage.Piece {
78 return p.t.storage.Piece(p.Info())
81 func (p *Piece) pendingChunkIndex(chunkIndex int) bool {
82 return !p._dirtyChunks.Contains(chunkIndex)
85 func (p *Piece) pendingChunk(cs ChunkSpec, chunkSize pp.Integer) bool {
86 return p.pendingChunkIndex(chunkIndex(cs, chunkSize))
89 func (p *Piece) hasDirtyChunks() bool {
90 return p._dirtyChunks.Len() != 0
93 func (p *Piece) numDirtyChunks() pp.Integer {
94 return pp.Integer(p._dirtyChunks.Len())
97 func (p *Piece) unpendChunkIndex(i int) {
102 func (p *Piece) pendChunkIndex(i int) {
103 p._dirtyChunks.Remove(i)
106 func (p *Piece) numChunks() pp.Integer {
107 return p.t.pieceNumChunks(p.index)
110 func (p *Piece) incrementPendingWrites() {
111 p.pendingWritesMutex.Lock()
113 p.pendingWritesMutex.Unlock()
116 func (p *Piece) decrementPendingWrites() {
117 p.pendingWritesMutex.Lock()
118 if p.pendingWrites == 0 {
122 if p.pendingWrites == 0 {
123 p.noPendingWrites.Broadcast()
125 p.pendingWritesMutex.Unlock()
128 func (p *Piece) waitNoPendingWrites() {
129 p.pendingWritesMutex.Lock()
130 for p.pendingWrites != 0 {
131 p.noPendingWrites.Wait()
133 p.pendingWritesMutex.Unlock()
136 func (p *Piece) chunkIndexDirty(chunk pp.Integer) bool {
137 return p._dirtyChunks.Contains(bitmap.BitIndex(chunk))
140 func (p *Piece) chunkIndexSpec(chunk pp.Integer) ChunkSpec {
141 return chunkIndexSpec(chunk, p.length(), p.chunkSize())
144 func (p *Piece) chunkIndexRequest(chunkIndex pp.Integer) Request {
147 chunkIndexSpec(chunkIndex, p.length(), p.chunkSize()),
151 func (p *Piece) numDirtyBytes() (ret pp.Integer) {
153 // if ret > p.length() {
154 // panic("too many dirty bytes")
157 numRegularDirtyChunks := p.numDirtyChunks()
158 if p.chunkIndexDirty(p.numChunks() - 1) {
159 numRegularDirtyChunks--
160 ret += p.chunkIndexSpec(p.lastChunkIndex()).Length
162 ret += pp.Integer(numRegularDirtyChunks) * p.chunkSize()
166 func (p *Piece) length() pp.Integer {
167 return p.t.pieceLength(p.index)
170 func (p *Piece) chunkSize() pp.Integer {
174 func (p *Piece) lastChunkIndex() pp.Integer {
175 return p.numChunks() - 1
178 func (p *Piece) bytesLeft() (ret pp.Integer) {
179 if p.t.pieceComplete(p.index) {
182 return p.length() - p.numDirtyBytes()
185 // Forces the piece data to be rehashed.
186 func (p *Piece) VerifyData() {
188 defer p.t.cl.unlock()
189 target := p.numVerifies + 1
193 //log.Printf("target: %d", target)
194 p.t.queuePieceCheck(p.index)
196 //log.Printf("got %d verifies", p.numVerifies)
197 if p.numVerifies >= target {
205 func (p *Piece) queuedForHash() bool {
206 return p.t.piecesQueuedForHash.Get(bitmap.BitIndex(p.index))
209 func (p *Piece) torrentBeginOffset() int64 {
210 return int64(p.index) * p.t.info.PieceLength
213 func (p *Piece) torrentEndOffset() int64 {
214 return p.torrentBeginOffset() + int64(p.length())
217 func (p *Piece) SetPriority(prio piecePriority) {
219 defer p.t.cl.unlock()
221 p.t.updatePiecePriority(p.index)
224 func (p *Piece) uncachedPriority() (ret piecePriority) {
225 if p.t.pieceComplete(p.index) || p.t.pieceQueuedForHash(p.index) || p.t.hashingPiece(p.index) {
226 return PiecePriorityNone
228 for _, f := range p.files {
231 if p.t.readerNowPieces().Contains(int(p.index)) {
232 ret.Raise(PiecePriorityNow)
234 // if t._readerNowPieces.Contains(piece - 1) {
235 // return PiecePriorityNext
237 if p.t.readerReadaheadPieces().Contains(bitmap.BitIndex(p.index)) {
238 ret.Raise(PiecePriorityReadahead)
240 ret.Raise(p.priority)
244 // Tells the Client to refetch the completion status from storage, updating priority etc. if
245 // necessary. Might be useful if you know the state of the piece data has changed externally.
246 func (p *Piece) UpdateCompletion() {
248 defer p.t.cl.unlock()
249 p.t.updatePieceCompletion(p.index)
252 func (p *Piece) completion() (ret storage.Completion) {
253 ret.Complete = p.t.pieceComplete(p.index)
254 ret.Ok = p.storageCompletionOk
258 func (p *Piece) allChunksDirty() bool {
259 return p._dirtyChunks.Len() == int(p.numChunks())
262 func (p *Piece) requestStrategyPiece() requestStrategyPiece {
266 func (p *Piece) dirtyChunks() bitmap.Bitmap {
267 return p._dirtyChunks
270 func (p *Piece) State() PieceState {
271 return p.t.PieceState(p.index)