7 "github.com/anacrolix/missinggo/v2/bitmap"
9 "github.com/anacrolix/torrent/metainfo"
10 pp "github.com/anacrolix/torrent/peer_protocol"
11 "github.com/anacrolix/torrent/storage"
14 // Describes the importance of obtaining a particular piece.
15 type piecePriority byte
17 func (pp *piecePriority) Raise(maybe piecePriority) bool {
25 // Priority for use in PriorityBitmap
26 func (me piecePriority) BitmapPriority() int {
31 PiecePriorityNone piecePriority = iota // Not wanted. Must be the zero value.
32 PiecePriorityNormal // Wanted.
33 PiecePriorityHigh // Wanted a lot.
34 PiecePriorityReadahead // May be required soon.
35 // Succeeds a piece where a read occurred. Currently the same as Now,
36 // apparently due to issues with caching.
38 PiecePriorityNow // A Reader is reading in this piece. Highest urgency.
42 // The completed piece SHA1 hash, from the metainfo "pieces" field.
47 // Chunks we've written to since the last check. The chunk offset and
48 // length can be determined by the request chunkSize in use.
49 _dirtyChunks bitmap.Bitmap
53 storageCompletionOk bool
55 publicPieceState PieceState
56 priority piecePriority
58 // This can be locked when the Client lock is taken, but probably not vice versa.
59 pendingWritesMutex sync.Mutex
61 noPendingWrites sync.Cond
63 // Connections that have written data to this piece since its last check.
64 // This can include connections that have closed.
65 dirtiers map[*peer]struct{}
68 func (p *Piece) String() string {
69 return fmt.Sprintf("%s/%d", p.t.infoHash.HexString(), p.index)
72 func (p *Piece) Info() metainfo.Piece {
73 return p.t.info.Piece(int(p.index))
76 func (p *Piece) Storage() storage.Piece {
77 return p.t.storage.Piece(p.Info())
80 func (p *Piece) pendingChunkIndex(chunkIndex int) bool {
81 return !p._dirtyChunks.Contains(chunkIndex)
84 func (p *Piece) pendingChunk(cs chunkSpec, chunkSize pp.Integer) bool {
85 return p.pendingChunkIndex(chunkIndex(cs, chunkSize))
88 func (p *Piece) hasDirtyChunks() bool {
89 return p._dirtyChunks.Len() != 0
92 func (p *Piece) numDirtyChunks() pp.Integer {
93 return pp.Integer(p._dirtyChunks.Len())
96 func (p *Piece) unpendChunkIndex(i int) {
101 func (p *Piece) pendChunkIndex(i int) {
102 p._dirtyChunks.Remove(i)
105 func (p *Piece) numChunks() pp.Integer {
106 return p.t.pieceNumChunks(p.index)
109 func (p *Piece) incrementPendingWrites() {
110 p.pendingWritesMutex.Lock()
112 p.pendingWritesMutex.Unlock()
115 func (p *Piece) decrementPendingWrites() {
116 p.pendingWritesMutex.Lock()
117 if p.pendingWrites == 0 {
121 if p.pendingWrites == 0 {
122 p.noPendingWrites.Broadcast()
124 p.pendingWritesMutex.Unlock()
127 func (p *Piece) waitNoPendingWrites() {
128 p.pendingWritesMutex.Lock()
129 for p.pendingWrites != 0 {
130 p.noPendingWrites.Wait()
132 p.pendingWritesMutex.Unlock()
135 func (p *Piece) chunkIndexDirty(chunk pp.Integer) bool {
136 return p._dirtyChunks.Contains(bitmap.BitIndex(chunk))
139 func (p *Piece) chunkIndexSpec(chunk pp.Integer) chunkSpec {
140 return chunkIndexSpec(chunk, p.length(), p.chunkSize())
143 func (p *Piece) chunkIndexRequest(chunkIndex pp.Integer) request {
146 chunkIndexSpec(chunkIndex, p.length(), p.chunkSize()),
150 func (p *Piece) numDirtyBytes() (ret pp.Integer) {
152 // if ret > p.length() {
153 // panic("too many dirty bytes")
156 numRegularDirtyChunks := p.numDirtyChunks()
157 if p.chunkIndexDirty(p.numChunks() - 1) {
158 numRegularDirtyChunks--
159 ret += p.chunkIndexSpec(p.lastChunkIndex()).Length
161 ret += pp.Integer(numRegularDirtyChunks) * p.chunkSize()
165 func (p *Piece) length() pp.Integer {
166 return p.t.pieceLength(p.index)
169 func (p *Piece) chunkSize() pp.Integer {
173 func (p *Piece) lastChunkIndex() pp.Integer {
174 return p.numChunks() - 1
177 func (p *Piece) bytesLeft() (ret pp.Integer) {
178 if p.t.pieceComplete(p.index) {
181 return p.length() - p.numDirtyBytes()
184 // Forces the piece data to be rehashed.
185 func (p *Piece) VerifyData() {
187 defer p.t.cl.unlock()
188 target := p.numVerifies + 1
192 //log.Printf("target: %d", target)
193 p.t.queuePieceCheck(p.index)
195 //log.Printf("got %d verifies", p.numVerifies)
196 if p.numVerifies >= target {
204 func (p *Piece) queuedForHash() bool {
205 return p.t.piecesQueuedForHash.Get(bitmap.BitIndex(p.index))
208 func (p *Piece) torrentBeginOffset() int64 {
209 return int64(p.index) * p.t.info.PieceLength
212 func (p *Piece) torrentEndOffset() int64 {
213 return p.torrentBeginOffset() + int64(p.length())
216 func (p *Piece) SetPriority(prio piecePriority) {
218 defer p.t.cl.unlock()
220 p.t.updatePiecePriority(p.index)
223 func (p *Piece) uncachedPriority() (ret piecePriority) {
224 if p.t.pieceComplete(p.index) || p.t.pieceQueuedForHash(p.index) || p.t.hashingPiece(p.index) {
225 return PiecePriorityNone
227 for _, f := range p.files {
230 if p.t.readerNowPieces().Contains(int(p.index)) {
231 ret.Raise(PiecePriorityNow)
233 // if t._readerNowPieces.Contains(piece - 1) {
234 // return PiecePriorityNext
236 if p.t.readerReadaheadPieces().Contains(bitmap.BitIndex(p.index)) {
237 ret.Raise(PiecePriorityReadahead)
239 ret.Raise(p.priority)
243 // Tells the Client to refetch the completion status from storage, updating priority etc. if
244 // necessary. Might be useful if you know the state of the piece data has changed externally.
245 func (p *Piece) UpdateCompletion() {
247 defer p.t.cl.unlock()
248 p.t.updatePieceCompletion(p.index)
251 func (p *Piece) completion() (ret storage.Completion) {
252 ret.Complete = p.t.pieceComplete(p.index)
253 ret.Ok = p.storageCompletionOk
257 func (p *Piece) allChunksDirty() bool {
258 return p._dirtyChunks.Len() == int(p.numChunks())
261 func (p *Piece) requestStrategyPiece() requestStrategyPiece {
265 func (p *Piece) dirtyChunks() bitmap.Bitmap {
266 return p._dirtyChunks
269 func (p *Piece) State() PieceState {
270 return p.t.PieceState(p.index)