]> Sergey Matveev's repositories - btrtrc.git/blob - piece.go
Switch to goimports import sorting
[btrtrc.git] / piece.go
1 package torrent
2
3 import (
4         "fmt"
5         "sync"
6
7         "github.com/anacrolix/missinggo/bitmap"
8         "github.com/anacrolix/torrent/metainfo"
9         pp "github.com/anacrolix/torrent/peer_protocol"
10         "github.com/anacrolix/torrent/storage"
11 )
12
13 // Describes the importance of obtaining a particular piece.
14 type piecePriority byte
15
16 func (pp *piecePriority) Raise(maybe piecePriority) bool {
17         if maybe > *pp {
18                 *pp = maybe
19                 return true
20         }
21         return false
22 }
23
24 // Priority for use in PriorityBitmap
25 func (me piecePriority) BitmapPriority() int {
26         return -int(me)
27 }
28
29 const (
30         PiecePriorityNone      piecePriority = iota // Not wanted. Must be the zero value.
31         PiecePriorityNormal                         // Wanted.
32         PiecePriorityHigh                           // Wanted a lot.
33         PiecePriorityReadahead                      // May be required soon.
34         // Succeeds a piece where a read occurred. Currently the same as Now,
35         // apparently due to issues with caching.
36         PiecePriorityNext
37         PiecePriorityNow // A Reader is reading in this piece. Highest urgency.
38 )
39
40 type Piece struct {
41         // The completed piece SHA1 hash, from the metainfo "pieces" field.
42         hash  metainfo.Hash
43         t     *Torrent
44         index pieceIndex
45         files []*File
46         // Chunks we've written to since the last check. The chunk offset and
47         // length can be determined by the request chunkSize in use.
48         dirtyChunks bitmap.Bitmap
49
50         hashing             bool
51         numVerifies         int64
52         storageCompletionOk bool
53
54         publicPieceState PieceState
55         priority         piecePriority
56
57         pendingWritesMutex sync.Mutex
58         pendingWrites      int
59         noPendingWrites    sync.Cond
60
61         // Connections that have written data to this piece since its last check.
62         // This can include connections that have closed.
63         dirtiers map[*connection]struct{}
64 }
65
66 func (p *Piece) String() string {
67         return fmt.Sprintf("%s/%d", p.t.infoHash.HexString(), p.index)
68 }
69
70 func (p *Piece) Info() metainfo.Piece {
71         return p.t.info.Piece(int(p.index))
72 }
73
74 func (p *Piece) Storage() storage.Piece {
75         return p.t.storage.Piece(p.Info())
76 }
77
78 func (p *Piece) pendingChunkIndex(chunkIndex int) bool {
79         return !p.dirtyChunks.Contains(chunkIndex)
80 }
81
82 func (p *Piece) pendingChunk(cs chunkSpec, chunkSize pp.Integer) bool {
83         return p.pendingChunkIndex(chunkIndex(cs, chunkSize))
84 }
85
86 func (p *Piece) hasDirtyChunks() bool {
87         return p.dirtyChunks.Len() != 0
88 }
89
90 func (p *Piece) numDirtyChunks() pp.Integer {
91         return pp.Integer(p.dirtyChunks.Len())
92 }
93
94 func (p *Piece) unpendChunkIndex(i int) {
95         p.dirtyChunks.Add(i)
96         p.t.tickleReaders()
97 }
98
99 func (p *Piece) pendChunkIndex(i int) {
100         p.dirtyChunks.Remove(i)
101 }
102
103 func (p *Piece) numChunks() pp.Integer {
104         return p.t.pieceNumChunks(p.index)
105 }
106
107 func (p *Piece) undirtiedChunkIndices() (ret bitmap.Bitmap) {
108         ret = p.dirtyChunks.Copy()
109         ret.FlipRange(0, bitmap.BitIndex(p.numChunks()))
110         return
111 }
112
113 func (p *Piece) incrementPendingWrites() {
114         p.pendingWritesMutex.Lock()
115         p.pendingWrites++
116         p.pendingWritesMutex.Unlock()
117 }
118
119 func (p *Piece) decrementPendingWrites() {
120         p.pendingWritesMutex.Lock()
121         if p.pendingWrites == 0 {
122                 panic("assertion")
123         }
124         p.pendingWrites--
125         if p.pendingWrites == 0 {
126                 p.noPendingWrites.Broadcast()
127         }
128         p.pendingWritesMutex.Unlock()
129 }
130
131 func (p *Piece) waitNoPendingWrites() {
132         p.pendingWritesMutex.Lock()
133         for p.pendingWrites != 0 {
134                 p.noPendingWrites.Wait()
135         }
136         p.pendingWritesMutex.Unlock()
137 }
138
139 func (p *Piece) chunkIndexDirty(chunk pp.Integer) bool {
140         return p.dirtyChunks.Contains(bitmap.BitIndex(chunk))
141 }
142
143 func (p *Piece) chunkIndexSpec(chunk pp.Integer) chunkSpec {
144         return chunkIndexSpec(chunk, p.length(), p.chunkSize())
145 }
146
147 func (p *Piece) numDirtyBytes() (ret pp.Integer) {
148         // defer func() {
149         //      if ret > p.length() {
150         //              panic("too many dirty bytes")
151         //      }
152         // }()
153         numRegularDirtyChunks := p.numDirtyChunks()
154         if p.chunkIndexDirty(p.numChunks() - 1) {
155                 numRegularDirtyChunks--
156                 ret += p.chunkIndexSpec(p.lastChunkIndex()).Length
157         }
158         ret += pp.Integer(numRegularDirtyChunks) * p.chunkSize()
159         return
160 }
161
162 func (p *Piece) length() pp.Integer {
163         return p.t.pieceLength(p.index)
164 }
165
166 func (p *Piece) chunkSize() pp.Integer {
167         return p.t.chunkSize
168 }
169
170 func (p *Piece) lastChunkIndex() pp.Integer {
171         return p.numChunks() - 1
172 }
173
174 func (p *Piece) bytesLeft() (ret pp.Integer) {
175         if p.t.pieceComplete(p.index) {
176                 return 0
177         }
178         return p.length() - p.numDirtyBytes()
179 }
180
181 func (p *Piece) VerifyData() {
182         p.t.cl.lock()
183         defer p.t.cl.unlock()
184         target := p.numVerifies + 1
185         if p.hashing {
186                 target++
187         }
188         // log.Printf("target: %d", target)
189         p.t.queuePieceCheck(p.index)
190         for p.numVerifies < target {
191                 // log.Printf("got %d verifies", p.numVerifies)
192                 p.t.cl.event.Wait()
193         }
194         // log.Print("done")
195 }
196
197 func (p *Piece) queuedForHash() bool {
198         return p.t.piecesQueuedForHash.Get(bitmap.BitIndex(p.index))
199 }
200
201 func (p *Piece) torrentBeginOffset() int64 {
202         return int64(p.index) * p.t.info.PieceLength
203 }
204
205 func (p *Piece) torrentEndOffset() int64 {
206         return p.torrentBeginOffset() + int64(p.length())
207 }
208
209 func (p *Piece) SetPriority(prio piecePriority) {
210         p.t.cl.lock()
211         defer p.t.cl.unlock()
212         p.priority = prio
213         p.t.updatePiecePriority(p.index)
214 }
215
216 func (p *Piece) uncachedPriority() (ret piecePriority) {
217         if p.t.pieceComplete(p.index) || p.t.pieceQueuedForHash(p.index) || p.t.hashingPiece(p.index) {
218                 return PiecePriorityNone
219         }
220         for _, f := range p.files {
221                 ret.Raise(f.prio)
222         }
223         if p.t.readerNowPieces.Contains(int(p.index)) {
224                 ret.Raise(PiecePriorityNow)
225         }
226         // if t.readerNowPieces.Contains(piece - 1) {
227         //      return PiecePriorityNext
228         // }
229         if p.t.readerReadaheadPieces.Contains(bitmap.BitIndex(p.index)) {
230                 ret.Raise(PiecePriorityReadahead)
231         }
232         ret.Raise(p.priority)
233         return
234 }
235
236 func (p *Piece) completion() (ret storage.Completion) {
237         ret.Complete = p.t.pieceComplete(p.index)
238         ret.Ok = p.storageCompletionOk
239         return
240 }