]> Sergey Matveev's repositories - btrtrc.git/blob - piece.go
Include completion known in PieceState
[btrtrc.git] / piece.go
1 package torrent
2
3 import (
4         "fmt"
5         "sync"
6
7         "github.com/anacrolix/missinggo/bitmap"
8
9         "github.com/anacrolix/torrent/metainfo"
10         pp "github.com/anacrolix/torrent/peer_protocol"
11         "github.com/anacrolix/torrent/storage"
12 )
13
14 // Describes the importance of obtaining a particular piece.
15 type piecePriority byte
16
17 func (pp *piecePriority) Raise(maybe piecePriority) bool {
18         if maybe > *pp {
19                 *pp = maybe
20                 return true
21         }
22         return false
23 }
24
25 // Priority for use in PriorityBitmap
26 func (me piecePriority) BitmapPriority() int {
27         return -int(me)
28 }
29
30 const (
31         PiecePriorityNone      piecePriority = iota // Not wanted. Must be the zero value.
32         PiecePriorityNormal                         // Wanted.
33         PiecePriorityHigh                           // Wanted a lot.
34         PiecePriorityReadahead                      // May be required soon.
35         // Succeeds a piece where a read occurred. Currently the same as Now,
36         // apparently due to issues with caching.
37         PiecePriorityNext
38         PiecePriorityNow // A Reader is reading in this piece. Highest urgency.
39 )
40
41 type Piece struct {
42         // The completed piece SHA1 hash, from the metainfo "pieces" field.
43         hash  metainfo.Hash
44         t     *Torrent
45         index int
46         files []*File
47         // Chunks we've written to since the last check. The chunk offset and
48         // length can be determined by the request chunkSize in use.
49         dirtyChunks bitmap.Bitmap
50
51         hashing             bool
52         everHashed          bool
53         numVerifies         int64
54         storageCompletionOk bool
55
56         publicPieceState PieceState
57         priority         piecePriority
58
59         pendingWritesMutex sync.Mutex
60         pendingWrites      int
61         noPendingWrites    sync.Cond
62 }
63
64 func (p *Piece) String() string {
65         return fmt.Sprintf("%s/%d", p.t.infoHash.HexString(), p.index)
66 }
67
68 func (p *Piece) Info() metainfo.Piece {
69         return p.t.info.Piece(p.index)
70 }
71
72 func (p *Piece) Storage() storage.Piece {
73         return p.t.storage.Piece(p.Info())
74 }
75
76 func (p *Piece) pendingChunkIndex(chunkIndex int) bool {
77         return !p.dirtyChunks.Contains(chunkIndex)
78 }
79
80 func (p *Piece) pendingChunk(cs chunkSpec, chunkSize pp.Integer) bool {
81         return p.pendingChunkIndex(chunkIndex(cs, chunkSize))
82 }
83
84 func (p *Piece) hasDirtyChunks() bool {
85         return p.dirtyChunks.Len() != 0
86 }
87
88 func (p *Piece) numDirtyChunks() (ret int) {
89         return p.dirtyChunks.Len()
90 }
91
92 func (p *Piece) unpendChunkIndex(i int) {
93         p.dirtyChunks.Add(i)
94 }
95
96 func (p *Piece) pendChunkIndex(i int) {
97         p.dirtyChunks.Remove(i)
98 }
99
100 func (p *Piece) numChunks() int {
101         return p.t.pieceNumChunks(p.index)
102 }
103
104 func (p *Piece) undirtiedChunkIndices() (ret bitmap.Bitmap) {
105         ret = p.dirtyChunks.Copy()
106         ret.FlipRange(0, p.numChunks())
107         return
108 }
109
110 func (p *Piece) incrementPendingWrites() {
111         p.pendingWritesMutex.Lock()
112         p.pendingWrites++
113         p.pendingWritesMutex.Unlock()
114 }
115
116 func (p *Piece) decrementPendingWrites() {
117         p.pendingWritesMutex.Lock()
118         if p.pendingWrites == 0 {
119                 panic("assertion")
120         }
121         p.pendingWrites--
122         if p.pendingWrites == 0 {
123                 p.noPendingWrites.Broadcast()
124         }
125         p.pendingWritesMutex.Unlock()
126 }
127
128 func (p *Piece) waitNoPendingWrites() {
129         p.pendingWritesMutex.Lock()
130         for p.pendingWrites != 0 {
131                 p.noPendingWrites.Wait()
132         }
133         p.pendingWritesMutex.Unlock()
134 }
135
136 func (p *Piece) chunkIndexDirty(chunk int) bool {
137         return p.dirtyChunks.Contains(chunk)
138 }
139
140 func (p *Piece) chunkIndexSpec(chunk int) chunkSpec {
141         return chunkIndexSpec(chunk, p.length(), p.chunkSize())
142 }
143
144 func (p *Piece) numDirtyBytes() (ret pp.Integer) {
145         // defer func() {
146         //      if ret > p.length() {
147         //              panic("too many dirty bytes")
148         //      }
149         // }()
150         numRegularDirtyChunks := p.numDirtyChunks()
151         if p.chunkIndexDirty(p.numChunks() - 1) {
152                 numRegularDirtyChunks--
153                 ret += p.chunkIndexSpec(p.lastChunkIndex()).Length
154         }
155         ret += pp.Integer(numRegularDirtyChunks) * p.chunkSize()
156         return
157 }
158
159 func (p *Piece) length() pp.Integer {
160         return p.t.pieceLength(p.index)
161 }
162
163 func (p *Piece) chunkSize() pp.Integer {
164         return p.t.chunkSize
165 }
166
167 func (p *Piece) lastChunkIndex() int {
168         return p.numChunks() - 1
169 }
170
171 func (p *Piece) bytesLeft() (ret pp.Integer) {
172         if p.t.pieceComplete(p.index) {
173                 return 0
174         }
175         return p.length() - p.numDirtyBytes()
176 }
177
178 func (p *Piece) VerifyData() {
179         p.t.cl.mu.Lock()
180         defer p.t.cl.mu.Unlock()
181         target := p.numVerifies + 1
182         if p.hashing {
183                 target++
184         }
185         // log.Printf("target: %d", target)
186         p.t.queuePieceCheck(p.index)
187         for p.numVerifies < target {
188                 // log.Printf("got %d verifies", p.numVerifies)
189                 p.t.cl.event.Wait()
190         }
191         // log.Print("done")
192 }
193
194 func (p *Piece) queuedForHash() bool {
195         return p.t.piecesQueuedForHash.Get(p.index)
196 }
197
198 func (p *Piece) torrentBeginOffset() int64 {
199         return int64(p.index) * p.t.info.PieceLength
200 }
201
202 func (p *Piece) torrentEndOffset() int64 {
203         return p.torrentBeginOffset() + int64(p.length())
204 }
205
206 func (p *Piece) SetPriority(prio piecePriority) {
207         p.t.cl.mu.Lock()
208         defer p.t.cl.mu.Unlock()
209         p.priority = prio
210         p.t.updatePiecePriority(p.index)
211 }
212
213 func (p *Piece) uncachedPriority() (ret piecePriority) {
214         if p.t.pieceComplete(p.index) {
215                 return PiecePriorityNone
216         }
217         for _, f := range p.files {
218                 ret.Raise(f.prio)
219         }
220         if p.t.readerNowPieces.Contains(p.index) {
221                 ret.Raise(PiecePriorityNow)
222         }
223         // if t.readerNowPieces.Contains(piece - 1) {
224         //      return PiecePriorityNext
225         // }
226         if p.t.readerReadaheadPieces.Contains(p.index) {
227                 ret.Raise(PiecePriorityReadahead)
228         }
229         ret.Raise(p.priority)
230         return
231 }
232
233 func (p *Piece) completion() (ret storage.Completion) {
234         ret.Complete = p.t.pieceComplete(p.index)
235         ret.Ok = p.storageCompletionOk
236         return
237 }