7 "github.com/anacrolix/chansync"
8 "github.com/anacrolix/missinggo/v2/bitmap"
10 "github.com/anacrolix/torrent/metainfo"
11 pp "github.com/anacrolix/torrent/peer_protocol"
12 "github.com/anacrolix/torrent/storage"
16 // The completed piece SHA1 hash, from the metainfo "pieces" field.
21 // Chunks we've written to since the last check. The chunk offset and
22 // length can be determined by the request chunkSize in use.
23 _dirtyChunks bitmap.Bitmap
25 readerCond chansync.BroadcastCond
30 storageCompletionOk bool
32 publicPieceState PieceState
33 priority piecePriority
36 // This can be locked when the Client lock is taken, but probably not vice versa.
37 pendingWritesMutex sync.Mutex
39 noPendingWrites sync.Cond
41 // Connections that have written data to this piece since its last check.
42 // This can include connections that have closed.
43 dirtiers map[*Peer]struct{}
46 func (p *Piece) String() string {
47 return fmt.Sprintf("%s/%d", p.t.infoHash.HexString(), p.index)
50 func (p *Piece) Info() metainfo.Piece {
51 return p.t.info.Piece(int(p.index))
54 func (p *Piece) Storage() storage.Piece {
55 return p.t.storage.Piece(p.Info())
58 func (p *Piece) pendingChunkIndex(chunkIndex int) bool {
59 return !p._dirtyChunks.Contains(bitmap.BitIndex(chunkIndex))
62 func (p *Piece) pendingChunk(cs ChunkSpec, chunkSize pp.Integer) bool {
63 return p.pendingChunkIndex(chunkIndex(cs, chunkSize))
66 func (p *Piece) hasDirtyChunks() bool {
67 return p._dirtyChunks.Len() != 0
70 func (p *Piece) numDirtyChunks() pp.Integer {
71 return pp.Integer(p._dirtyChunks.Len())
74 func (p *Piece) unpendChunkIndex(i int) {
75 p._dirtyChunks.Add(bitmap.BitIndex(i))
76 p.readerCond.Broadcast()
79 func (p *Piece) pendChunkIndex(i int) {
80 p._dirtyChunks.Remove(bitmap.BitIndex(i))
83 func (p *Piece) numChunks() pp.Integer {
84 return p.t.pieceNumChunks(p.index)
87 func (p *Piece) incrementPendingWrites() {
88 p.pendingWritesMutex.Lock()
90 p.pendingWritesMutex.Unlock()
93 func (p *Piece) decrementPendingWrites() {
94 p.pendingWritesMutex.Lock()
95 if p.pendingWrites == 0 {
99 if p.pendingWrites == 0 {
100 p.noPendingWrites.Broadcast()
102 p.pendingWritesMutex.Unlock()
105 func (p *Piece) waitNoPendingWrites() {
106 p.pendingWritesMutex.Lock()
107 for p.pendingWrites != 0 {
108 p.noPendingWrites.Wait()
110 p.pendingWritesMutex.Unlock()
113 func (p *Piece) chunkIndexDirty(chunk pp.Integer) bool {
114 return p._dirtyChunks.Contains(bitmap.BitIndex(chunk))
117 func (p *Piece) chunkIndexSpec(chunk pp.Integer) ChunkSpec {
118 return chunkIndexSpec(chunk, p.length(), p.chunkSize())
121 func (p *Piece) numDirtyBytes() (ret pp.Integer) {
123 // if ret > p.length() {
124 // panic("too many dirty bytes")
127 numRegularDirtyChunks := p.numDirtyChunks()
128 if p.chunkIndexDirty(p.numChunks() - 1) {
129 numRegularDirtyChunks--
130 ret += p.chunkIndexSpec(p.lastChunkIndex()).Length
132 ret += pp.Integer(numRegularDirtyChunks) * p.chunkSize()
136 func (p *Piece) length() pp.Integer {
137 return p.t.pieceLength(p.index)
140 func (p *Piece) chunkSize() pp.Integer {
144 func (p *Piece) lastChunkIndex() pp.Integer {
145 return p.numChunks() - 1
148 func (p *Piece) bytesLeft() (ret pp.Integer) {
149 if p.t.pieceComplete(p.index) {
152 return p.length() - p.numDirtyBytes()
155 // Forces the piece data to be rehashed.
156 func (p *Piece) VerifyData() {
158 defer p.t.cl.unlock()
159 target := p.numVerifies + 1
163 //log.Printf("target: %d", target)
164 p.t.queuePieceCheck(p.index)
166 //log.Printf("got %d verifies", p.numVerifies)
167 if p.numVerifies >= target {
175 func (p *Piece) queuedForHash() bool {
176 return p.t.piecesQueuedForHash.Get(bitmap.BitIndex(p.index))
179 func (p *Piece) torrentBeginOffset() int64 {
180 return int64(p.index) * p.t.info.PieceLength
183 func (p *Piece) torrentEndOffset() int64 {
184 return p.torrentBeginOffset() + int64(p.length())
187 func (p *Piece) SetPriority(prio piecePriority) {
189 defer p.t.cl.unlock()
191 p.t.updatePiecePriority(p.index)
194 func (p *Piece) purePriority() (ret piecePriority) {
195 for _, f := range p.files {
198 if p.t.readerNowPieces().Contains(bitmap.BitIndex(p.index)) {
199 ret.Raise(PiecePriorityNow)
201 // if t._readerNowPieces.Contains(piece - 1) {
202 // return PiecePriorityNext
204 if p.t.readerReadaheadPieces().Contains(bitmap.BitIndex(p.index)) {
205 ret.Raise(PiecePriorityReadahead)
207 ret.Raise(p.priority)
211 func (p *Piece) uncachedPriority() (ret piecePriority) {
212 if p.t.pieceComplete(p.index) || p.t.pieceQueuedForHash(p.index) || p.t.hashingPiece(p.index) {
213 return PiecePriorityNone
215 return p.purePriority()
218 // Tells the Client to refetch the completion status from storage, updating priority etc. if
219 // necessary. Might be useful if you know the state of the piece data has changed externally.
220 func (p *Piece) UpdateCompletion() {
222 defer p.t.cl.unlock()
223 p.t.updatePieceCompletion(p.index)
226 func (p *Piece) completion() (ret storage.Completion) {
227 ret.Complete = p.t.pieceComplete(p.index)
228 ret.Ok = p.storageCompletionOk
232 func (p *Piece) allChunksDirty() bool {
233 return p._dirtyChunks.Len() == bitmap.BitRange(p.numChunks())
236 func (p *Piece) State() PieceState {
237 return p.t.PieceState(p.index)
240 func (p *Piece) iterUndirtiedChunks(f func(cs ChunkSpec)) {
241 for i := pp.Integer(0); i < p.numChunks(); i++ {
242 if p.chunkIndexDirty(i) {
245 f(p.chunkIndexSpec(i))