]> Sergey Matveev's repositories - btrtrc.git/blob - piece.go
Piece.everHashed is replaced by storageCompletionOk
[btrtrc.git] / piece.go
1 package torrent
2
3 import (
4         "fmt"
5         "sync"
6
7         "github.com/anacrolix/missinggo/bitmap"
8
9         "github.com/anacrolix/torrent/metainfo"
10         pp "github.com/anacrolix/torrent/peer_protocol"
11         "github.com/anacrolix/torrent/storage"
12 )
13
14 // Describes the importance of obtaining a particular piece.
15 type piecePriority byte
16
17 func (pp *piecePriority) Raise(maybe piecePriority) bool {
18         if maybe > *pp {
19                 *pp = maybe
20                 return true
21         }
22         return false
23 }
24
25 // Priority for use in PriorityBitmap
26 func (me piecePriority) BitmapPriority() int {
27         return -int(me)
28 }
29
30 const (
31         PiecePriorityNone      piecePriority = iota // Not wanted. Must be the zero value.
32         PiecePriorityNormal                         // Wanted.
33         PiecePriorityHigh                           // Wanted a lot.
34         PiecePriorityReadahead                      // May be required soon.
35         // Succeeds a piece where a read occurred. Currently the same as Now,
36         // apparently due to issues with caching.
37         PiecePriorityNext
38         PiecePriorityNow // A Reader is reading in this piece. Highest urgency.
39 )
40
41 type Piece struct {
42         // The completed piece SHA1 hash, from the metainfo "pieces" field.
43         hash  metainfo.Hash
44         t     *Torrent
45         index int
46         files []*File
47         // Chunks we've written to since the last check. The chunk offset and
48         // length can be determined by the request chunkSize in use.
49         dirtyChunks bitmap.Bitmap
50
51         hashing             bool
52         numVerifies         int64
53         storageCompletionOk bool
54
55         publicPieceState PieceState
56         priority         piecePriority
57
58         pendingWritesMutex sync.Mutex
59         pendingWrites      int
60         noPendingWrites    sync.Cond
61
62         // Connections that have written data to this piece since its last check.
63         // This can include connections that have closed.
64         dirtiers map[*connection]struct{}
65 }
66
67 func (p *Piece) String() string {
68         return fmt.Sprintf("%s/%d", p.t.infoHash.HexString(), p.index)
69 }
70
71 func (p *Piece) Info() metainfo.Piece {
72         return p.t.info.Piece(p.index)
73 }
74
75 func (p *Piece) Storage() storage.Piece {
76         return p.t.storage.Piece(p.Info())
77 }
78
79 func (p *Piece) pendingChunkIndex(chunkIndex int) bool {
80         return !p.dirtyChunks.Contains(chunkIndex)
81 }
82
83 func (p *Piece) pendingChunk(cs chunkSpec, chunkSize pp.Integer) bool {
84         return p.pendingChunkIndex(chunkIndex(cs, chunkSize))
85 }
86
87 func (p *Piece) hasDirtyChunks() bool {
88         return p.dirtyChunks.Len() != 0
89 }
90
91 func (p *Piece) numDirtyChunks() (ret int) {
92         return p.dirtyChunks.Len()
93 }
94
95 func (p *Piece) unpendChunkIndex(i int) {
96         p.dirtyChunks.Add(i)
97 }
98
99 func (p *Piece) pendChunkIndex(i int) {
100         p.dirtyChunks.Remove(i)
101 }
102
103 func (p *Piece) numChunks() int {
104         return p.t.pieceNumChunks(p.index)
105 }
106
107 func (p *Piece) undirtiedChunkIndices() (ret bitmap.Bitmap) {
108         ret = p.dirtyChunks.Copy()
109         ret.FlipRange(0, p.numChunks())
110         return
111 }
112
113 func (p *Piece) incrementPendingWrites() {
114         p.pendingWritesMutex.Lock()
115         p.pendingWrites++
116         p.pendingWritesMutex.Unlock()
117 }
118
119 func (p *Piece) decrementPendingWrites() {
120         p.pendingWritesMutex.Lock()
121         if p.pendingWrites == 0 {
122                 panic("assertion")
123         }
124         p.pendingWrites--
125         if p.pendingWrites == 0 {
126                 p.noPendingWrites.Broadcast()
127         }
128         p.pendingWritesMutex.Unlock()
129 }
130
131 func (p *Piece) waitNoPendingWrites() {
132         p.pendingWritesMutex.Lock()
133         for p.pendingWrites != 0 {
134                 p.noPendingWrites.Wait()
135         }
136         p.pendingWritesMutex.Unlock()
137 }
138
139 func (p *Piece) chunkIndexDirty(chunk int) bool {
140         return p.dirtyChunks.Contains(chunk)
141 }
142
143 func (p *Piece) chunkIndexSpec(chunk int) chunkSpec {
144         return chunkIndexSpec(chunk, p.length(), p.chunkSize())
145 }
146
147 func (p *Piece) numDirtyBytes() (ret pp.Integer) {
148         // defer func() {
149         //      if ret > p.length() {
150         //              panic("too many dirty bytes")
151         //      }
152         // }()
153         numRegularDirtyChunks := p.numDirtyChunks()
154         if p.chunkIndexDirty(p.numChunks() - 1) {
155                 numRegularDirtyChunks--
156                 ret += p.chunkIndexSpec(p.lastChunkIndex()).Length
157         }
158         ret += pp.Integer(numRegularDirtyChunks) * p.chunkSize()
159         return
160 }
161
162 func (p *Piece) length() pp.Integer {
163         return p.t.pieceLength(p.index)
164 }
165
166 func (p *Piece) chunkSize() pp.Integer {
167         return p.t.chunkSize
168 }
169
170 func (p *Piece) lastChunkIndex() int {
171         return p.numChunks() - 1
172 }
173
174 func (p *Piece) bytesLeft() (ret pp.Integer) {
175         if p.t.pieceComplete(p.index) {
176                 return 0
177         }
178         return p.length() - p.numDirtyBytes()
179 }
180
181 func (p *Piece) VerifyData() {
182         p.t.cl.mu.Lock()
183         defer p.t.cl.mu.Unlock()
184         target := p.numVerifies + 1
185         if p.hashing {
186                 target++
187         }
188         // log.Printf("target: %d", target)
189         p.t.queuePieceCheck(p.index)
190         for p.numVerifies < target {
191                 // log.Printf("got %d verifies", p.numVerifies)
192                 p.t.cl.event.Wait()
193         }
194         // log.Print("done")
195 }
196
197 func (p *Piece) queuedForHash() bool {
198         return p.t.piecesQueuedForHash.Get(p.index)
199 }
200
201 func (p *Piece) torrentBeginOffset() int64 {
202         return int64(p.index) * p.t.info.PieceLength
203 }
204
205 func (p *Piece) torrentEndOffset() int64 {
206         return p.torrentBeginOffset() + int64(p.length())
207 }
208
209 func (p *Piece) SetPriority(prio piecePriority) {
210         p.t.cl.mu.Lock()
211         defer p.t.cl.mu.Unlock()
212         p.priority = prio
213         p.t.updatePiecePriority(p.index)
214 }
215
216 func (p *Piece) uncachedPriority() (ret piecePriority) {
217         if p.t.pieceComplete(p.index) {
218                 return PiecePriorityNone
219         }
220         for _, f := range p.files {
221                 ret.Raise(f.prio)
222         }
223         if p.t.readerNowPieces.Contains(p.index) {
224                 ret.Raise(PiecePriorityNow)
225         }
226         // if t.readerNowPieces.Contains(piece - 1) {
227         //      return PiecePriorityNext
228         // }
229         if p.t.readerReadaheadPieces.Contains(p.index) {
230                 ret.Raise(PiecePriorityReadahead)
231         }
232         ret.Raise(p.priority)
233         return
234 }
235
236 func (p *Piece) completion() (ret storage.Completion) {
237         ret.Complete = p.t.pieceComplete(p.index)
238         ret.Ok = p.storageCompletionOk
239         return
240 }