]> Sergey Matveev's repositories - btrtrc.git/blob - piece.go
Pieces being marked should have priority none
[btrtrc.git] / piece.go
1 package torrent
2
3 import (
4         "encoding/gob"
5         "fmt"
6         "sync"
7
8         "github.com/RoaringBitmap/roaring"
9         "github.com/anacrolix/chansync"
10         "github.com/anacrolix/missinggo/v2/bitmap"
11         request_strategy "github.com/anacrolix/torrent/request-strategy"
12
13         "github.com/anacrolix/torrent/metainfo"
14         pp "github.com/anacrolix/torrent/peer_protocol"
15         "github.com/anacrolix/torrent/storage"
16 )
17
18 type Piece struct {
19         // The completed piece SHA1 hash, from the metainfo "pieces" field.
20         hash  *metainfo.Hash
21         t     *Torrent
22         index pieceIndex
23         files []*File
24
25         readerCond chansync.BroadcastCond
26
27         numVerifies         int64
28         hashing             bool
29         marking             bool
30         storageCompletionOk bool
31
32         publicPieceState PieceState
33         priority         piecePriority
34         availability     int64
35
36         // This can be locked when the Client lock is taken, but probably not vice versa.
37         pendingWritesMutex sync.Mutex
38         pendingWrites      int
39         noPendingWrites    sync.Cond
40
41         // Connections that have written data to this piece since its last check.
42         // This can include connections that have closed.
43         dirtiers map[*Peer]struct{}
44 }
45
46 func (p *Piece) String() string {
47         return fmt.Sprintf("%s/%d", p.t.infoHash.HexString(), p.index)
48 }
49
50 func (p *Piece) Info() metainfo.Piece {
51         return p.t.info.Piece(int(p.index))
52 }
53
54 func (p *Piece) Storage() storage.Piece {
55         return p.t.storage.Piece(p.Info())
56 }
57
58 func (p *Piece) pendingChunkIndex(chunkIndex chunkIndexType) bool {
59         return !p.chunkIndexDirty(chunkIndex)
60 }
61
62 func (p *Piece) pendingChunk(cs ChunkSpec, chunkSize pp.Integer) bool {
63         return p.pendingChunkIndex(chunkIndexFromChunkSpec(cs, chunkSize))
64 }
65
66 func (p *Piece) hasDirtyChunks() bool {
67         return p.numDirtyChunks() != 0
68 }
69
70 func (p *Piece) numDirtyChunks() chunkIndexType {
71         return chunkIndexType(roaringBitmapRangeCardinality(
72                 &p.t.dirtyChunks,
73                 p.requestIndexOffset(),
74                 p.t.pieceRequestIndexOffset(p.index+1)))
75 }
76
77 func (p *Piece) unpendChunkIndex(i chunkIndexType) {
78         p.t.dirtyChunks.Add(p.requestIndexOffset() + i)
79         p.readerCond.Broadcast()
80 }
81
82 func (p *Piece) pendChunkIndex(i RequestIndex) {
83         p.t.dirtyChunks.Remove(p.requestIndexOffset() + i)
84 }
85
86 func (p *Piece) numChunks() chunkIndexType {
87         return p.t.pieceNumChunks(p.index)
88 }
89
90 func (p *Piece) incrementPendingWrites() {
91         p.pendingWritesMutex.Lock()
92         p.pendingWrites++
93         p.pendingWritesMutex.Unlock()
94 }
95
96 func (p *Piece) decrementPendingWrites() {
97         p.pendingWritesMutex.Lock()
98         if p.pendingWrites == 0 {
99                 panic("assertion")
100         }
101         p.pendingWrites--
102         if p.pendingWrites == 0 {
103                 p.noPendingWrites.Broadcast()
104         }
105         p.pendingWritesMutex.Unlock()
106 }
107
108 func (p *Piece) waitNoPendingWrites() {
109         p.pendingWritesMutex.Lock()
110         for p.pendingWrites != 0 {
111                 p.noPendingWrites.Wait()
112         }
113         p.pendingWritesMutex.Unlock()
114 }
115
116 func (p *Piece) chunkIndexDirty(chunk chunkIndexType) bool {
117         return p.t.dirtyChunks.Contains(p.requestIndexOffset() + chunk)
118 }
119
120 func (p *Piece) chunkIndexSpec(chunk chunkIndexType) ChunkSpec {
121         return chunkIndexSpec(pp.Integer(chunk), p.length(), p.chunkSize())
122 }
123
124 func (p *Piece) numDirtyBytes() (ret pp.Integer) {
125         // defer func() {
126         //      if ret > p.length() {
127         //              panic("too many dirty bytes")
128         //      }
129         // }()
130         numRegularDirtyChunks := p.numDirtyChunks()
131         if p.chunkIndexDirty(p.numChunks() - 1) {
132                 numRegularDirtyChunks--
133                 ret += p.chunkIndexSpec(p.lastChunkIndex()).Length
134         }
135         ret += pp.Integer(numRegularDirtyChunks) * p.chunkSize()
136         return
137 }
138
139 func (p *Piece) length() pp.Integer {
140         return p.t.pieceLength(p.index)
141 }
142
143 func (p *Piece) chunkSize() pp.Integer {
144         return p.t.chunkSize
145 }
146
147 func (p *Piece) lastChunkIndex() chunkIndexType {
148         return p.numChunks() - 1
149 }
150
151 func (p *Piece) bytesLeft() (ret pp.Integer) {
152         if p.t.pieceComplete(p.index) {
153                 return 0
154         }
155         return p.length() - p.numDirtyBytes()
156 }
157
158 // Forces the piece data to be rehashed.
159 func (p *Piece) VerifyData() {
160         p.t.cl.lock()
161         defer p.t.cl.unlock()
162         target := p.numVerifies + 1
163         if p.hashing {
164                 target++
165         }
166         //log.Printf("target: %d", target)
167         p.t.queuePieceCheck(p.index)
168         for {
169                 //log.Printf("got %d verifies", p.numVerifies)
170                 if p.numVerifies >= target {
171                         break
172                 }
173                 p.t.cl.event.Wait()
174         }
175         // log.Print("done")
176 }
177
178 func (p *Piece) queuedForHash() bool {
179         return p.t.piecesQueuedForHash.Get(bitmap.BitIndex(p.index))
180 }
181
182 func (p *Piece) torrentBeginOffset() int64 {
183         return int64(p.index) * p.t.info.PieceLength
184 }
185
186 func (p *Piece) torrentEndOffset() int64 {
187         return p.torrentBeginOffset() + int64(p.length())
188 }
189
190 func (p *Piece) SetPriority(prio piecePriority) {
191         p.t.cl.lock()
192         defer p.t.cl.unlock()
193         p.priority = prio
194         p.t.updatePiecePriority(p.index)
195 }
196
197 func (p *Piece) purePriority() (ret piecePriority) {
198         for _, f := range p.files {
199                 ret.Raise(f.prio)
200         }
201         if p.t.readerNowPieces().Contains(bitmap.BitIndex(p.index)) {
202                 ret.Raise(PiecePriorityNow)
203         }
204         // if t._readerNowPieces.Contains(piece - 1) {
205         //      return PiecePriorityNext
206         // }
207         if p.t.readerReadaheadPieces().Contains(bitmap.BitIndex(p.index)) {
208                 ret.Raise(PiecePriorityReadahead)
209         }
210         ret.Raise(p.priority)
211         return
212 }
213
214 func (p *Piece) uncachedPriority() (ret piecePriority) {
215         if p.hashing || p.marking || p.t.pieceComplete(p.index) || p.queuedForHash() {
216                 return PiecePriorityNone
217         }
218         return p.purePriority()
219 }
220
221 // Tells the Client to refetch the completion status from storage, updating priority etc. if
222 // necessary. Might be useful if you know the state of the piece data has changed externally.
223 func (p *Piece) UpdateCompletion() {
224         p.t.cl.lock()
225         defer p.t.cl.unlock()
226         p.t.updatePieceCompletion(p.index)
227 }
228
229 func (p *Piece) completion() (ret storage.Completion) {
230         ret.Complete = p.t.pieceComplete(p.index)
231         ret.Ok = p.storageCompletionOk
232         return
233 }
234
235 func (p *Piece) allChunksDirty() bool {
236         return p.numDirtyChunks() == p.numChunks()
237 }
238
239 func (p *Piece) State() PieceState {
240         return p.t.PieceState(p.index)
241 }
242
243 func init() {
244         gob.Register(undirtiedChunksIter{})
245 }
246
247 type undirtiedChunksIter struct {
248         TorrentDirtyChunks *roaring.Bitmap
249         StartRequestIndex  RequestIndex
250         EndRequestIndex    RequestIndex
251 }
252
253 func (me undirtiedChunksIter) Iter(f func(chunkIndexType)) {
254         it := me.TorrentDirtyChunks.Iterator()
255         startIndex := me.StartRequestIndex
256         endIndex := me.EndRequestIndex
257         it.AdvanceIfNeeded(startIndex)
258         lastDirty := startIndex - 1
259         for it.HasNext() {
260                 next := it.Next()
261                 if next >= endIndex {
262                         break
263                 }
264                 for index := lastDirty + 1; index < next; index++ {
265                         f(index - startIndex)
266                 }
267                 lastDirty = next
268         }
269         for index := lastDirty + 1; index < endIndex; index++ {
270                 f(index - startIndex)
271         }
272         return
273 }
274
275 func (p *Piece) undirtiedChunksIter() request_strategy.ChunksIter {
276         // Use an iterator to jump between dirty bits.
277         return undirtiedChunksIter{
278                 TorrentDirtyChunks: &p.t.dirtyChunks,
279                 StartRequestIndex:  p.requestIndexOffset(),
280                 EndRequestIndex:    p.requestIndexOffset() + p.numChunks(),
281         }
282 }
283
284 func (p *Piece) iterUndirtiedChunks(f func(chunkIndexType)) {
285         if true {
286                 p.undirtiedChunksIter().Iter(f)
287                 return
288         }
289         // The original implementation.
290         for i := chunkIndexType(0); i < p.numChunks(); i++ {
291                 if p.chunkIndexDirty(i) {
292                         continue
293                 }
294                 f(i)
295         }
296 }
297
298 func (p *Piece) requestIndexOffset() RequestIndex {
299         return p.t.pieceRequestIndexOffset(p.index)
300 }