]> Sergey Matveev's repositories - btrtrc.git/blob - piece.go
Aggressively wake Readers
[btrtrc.git] / piece.go
1 package torrent
2
3 import (
4         "fmt"
5         "sync"
6
7         "github.com/anacrolix/missinggo/bitmap"
8
9         "github.com/anacrolix/torrent/metainfo"
10         pp "github.com/anacrolix/torrent/peer_protocol"
11         "github.com/anacrolix/torrent/storage"
12 )
13
14 // Describes the importance of obtaining a particular piece.
15 type piecePriority byte
16
17 func (pp *piecePriority) Raise(maybe piecePriority) bool {
18         if maybe > *pp {
19                 *pp = maybe
20                 return true
21         }
22         return false
23 }
24
25 // Priority for use in PriorityBitmap
26 func (me piecePriority) BitmapPriority() int {
27         return -int(me)
28 }
29
30 const (
31         PiecePriorityNone      piecePriority = iota // Not wanted. Must be the zero value.
32         PiecePriorityNormal                         // Wanted.
33         PiecePriorityHigh                           // Wanted a lot.
34         PiecePriorityReadahead                      // May be required soon.
35         // Succeeds a piece where a read occurred. Currently the same as Now,
36         // apparently due to issues with caching.
37         PiecePriorityNext
38         PiecePriorityNow // A Reader is reading in this piece. Highest urgency.
39 )
40
41 type Piece struct {
42         // The completed piece SHA1 hash, from the metainfo "pieces" field.
43         hash  metainfo.Hash
44         t     *Torrent
45         index int
46         files []*File
47         // Chunks we've written to since the last check. The chunk offset and
48         // length can be determined by the request chunkSize in use.
49         dirtyChunks bitmap.Bitmap
50
51         hashing             bool
52         numVerifies         int64
53         storageCompletionOk bool
54
55         publicPieceState PieceState
56         priority         piecePriority
57
58         pendingWritesMutex sync.Mutex
59         pendingWrites      int
60         noPendingWrites    sync.Cond
61
62         // Connections that have written data to this piece since its last check.
63         // This can include connections that have closed.
64         dirtiers map[*connection]struct{}
65 }
66
67 func (p *Piece) String() string {
68         return fmt.Sprintf("%s/%d", p.t.infoHash.HexString(), p.index)
69 }
70
71 func (p *Piece) Info() metainfo.Piece {
72         return p.t.info.Piece(p.index)
73 }
74
75 func (p *Piece) Storage() storage.Piece {
76         return p.t.storage.Piece(p.Info())
77 }
78
79 func (p *Piece) pendingChunkIndex(chunkIndex int) bool {
80         return !p.dirtyChunks.Contains(chunkIndex)
81 }
82
83 func (p *Piece) pendingChunk(cs chunkSpec, chunkSize pp.Integer) bool {
84         return p.pendingChunkIndex(chunkIndex(cs, chunkSize))
85 }
86
87 func (p *Piece) hasDirtyChunks() bool {
88         return p.dirtyChunks.Len() != 0
89 }
90
91 func (p *Piece) numDirtyChunks() (ret int) {
92         return p.dirtyChunks.Len()
93 }
94
95 func (p *Piece) unpendChunkIndex(i int) {
96         p.dirtyChunks.Add(i)
97         p.t.tickleReaders()
98 }
99
100 func (p *Piece) pendChunkIndex(i int) {
101         p.dirtyChunks.Remove(i)
102 }
103
104 func (p *Piece) numChunks() int {
105         return p.t.pieceNumChunks(p.index)
106 }
107
108 func (p *Piece) undirtiedChunkIndices() (ret bitmap.Bitmap) {
109         ret = p.dirtyChunks.Copy()
110         ret.FlipRange(0, p.numChunks())
111         return
112 }
113
114 func (p *Piece) incrementPendingWrites() {
115         p.pendingWritesMutex.Lock()
116         p.pendingWrites++
117         p.pendingWritesMutex.Unlock()
118 }
119
120 func (p *Piece) decrementPendingWrites() {
121         p.pendingWritesMutex.Lock()
122         if p.pendingWrites == 0 {
123                 panic("assertion")
124         }
125         p.pendingWrites--
126         if p.pendingWrites == 0 {
127                 p.noPendingWrites.Broadcast()
128         }
129         p.pendingWritesMutex.Unlock()
130 }
131
132 func (p *Piece) waitNoPendingWrites() {
133         p.pendingWritesMutex.Lock()
134         for p.pendingWrites != 0 {
135                 p.noPendingWrites.Wait()
136         }
137         p.pendingWritesMutex.Unlock()
138 }
139
140 func (p *Piece) chunkIndexDirty(chunk int) bool {
141         return p.dirtyChunks.Contains(chunk)
142 }
143
144 func (p *Piece) chunkIndexSpec(chunk int) chunkSpec {
145         return chunkIndexSpec(chunk, p.length(), p.chunkSize())
146 }
147
148 func (p *Piece) numDirtyBytes() (ret pp.Integer) {
149         // defer func() {
150         //      if ret > p.length() {
151         //              panic("too many dirty bytes")
152         //      }
153         // }()
154         numRegularDirtyChunks := p.numDirtyChunks()
155         if p.chunkIndexDirty(p.numChunks() - 1) {
156                 numRegularDirtyChunks--
157                 ret += p.chunkIndexSpec(p.lastChunkIndex()).Length
158         }
159         ret += pp.Integer(numRegularDirtyChunks) * p.chunkSize()
160         return
161 }
162
163 func (p *Piece) length() pp.Integer {
164         return p.t.pieceLength(p.index)
165 }
166
167 func (p *Piece) chunkSize() pp.Integer {
168         return p.t.chunkSize
169 }
170
171 func (p *Piece) lastChunkIndex() int {
172         return p.numChunks() - 1
173 }
174
175 func (p *Piece) bytesLeft() (ret pp.Integer) {
176         if p.t.pieceComplete(p.index) {
177                 return 0
178         }
179         return p.length() - p.numDirtyBytes()
180 }
181
182 func (p *Piece) VerifyData() {
183         p.t.cl.mu.Lock()
184         defer p.t.cl.mu.Unlock()
185         target := p.numVerifies + 1
186         if p.hashing {
187                 target++
188         }
189         // log.Printf("target: %d", target)
190         p.t.queuePieceCheck(p.index)
191         for p.numVerifies < target {
192                 // log.Printf("got %d verifies", p.numVerifies)
193                 p.t.cl.event.Wait()
194         }
195         // log.Print("done")
196 }
197
198 func (p *Piece) queuedForHash() bool {
199         return p.t.piecesQueuedForHash.Get(p.index)
200 }
201
202 func (p *Piece) torrentBeginOffset() int64 {
203         return int64(p.index) * p.t.info.PieceLength
204 }
205
206 func (p *Piece) torrentEndOffset() int64 {
207         return p.torrentBeginOffset() + int64(p.length())
208 }
209
210 func (p *Piece) SetPriority(prio piecePriority) {
211         p.t.cl.mu.Lock()
212         defer p.t.cl.mu.Unlock()
213         p.priority = prio
214         p.t.updatePiecePriority(p.index)
215 }
216
217 func (p *Piece) uncachedPriority() (ret piecePriority) {
218         if p.t.pieceComplete(p.index) {
219                 return PiecePriorityNone
220         }
221         for _, f := range p.files {
222                 ret.Raise(f.prio)
223         }
224         if p.t.readerNowPieces.Contains(p.index) {
225                 ret.Raise(PiecePriorityNow)
226         }
227         // if t.readerNowPieces.Contains(piece - 1) {
228         //      return PiecePriorityNext
229         // }
230         if p.t.readerReadaheadPieces.Contains(p.index) {
231                 ret.Raise(PiecePriorityReadahead)
232         }
233         ret.Raise(p.priority)
234         return
235 }
236
237 func (p *Piece) completion() (ret storage.Completion) {
238         ret.Complete = p.t.pieceComplete(p.index)
239         ret.Ok = p.storageCompletionOk
240         return
241 }