]> Sergey Matveev's repositories - btrtrc.git/blob - piece.go
Make Torrent.pendingPieces a priority bitmap in preparation for #220
[btrtrc.git] / piece.go
1 package torrent
2
3 import (
4         "fmt"
5         "sync"
6
7         "github.com/anacrolix/missinggo/bitmap"
8
9         "github.com/anacrolix/torrent/metainfo"
10         pp "github.com/anacrolix/torrent/peer_protocol"
11         "github.com/anacrolix/torrent/storage"
12 )
13
14 // Piece priority describes the importance of obtaining a particular piece.
15
16 type piecePriority byte
17
18 func (pp *piecePriority) Raise(maybe piecePriority) {
19         if maybe > *pp {
20                 *pp = maybe
21         }
22 }
23
24 // Priority for use in PriorityBitmap
25 func (me piecePriority) BitmapPriority() int {
26         return -int(me)
27 }
28
29 const (
30         PiecePriorityNone      piecePriority = iota // Not wanted.
31         PiecePriorityNormal                         // Wanted.
32         PiecePriorityHigh                           // Wanted a lot.
33         PiecePriorityReadahead                      // May be required soon.
34         // Succeeds a piece where a read occurred. Currently the same as Now,
35         // apparently due to issues with caching.
36         PiecePriorityNext
37         PiecePriorityNow // A Reader is reading in this piece. Highest urgency.
38 )
39
40 type Piece struct {
41         // The completed piece SHA1 hash, from the metainfo "pieces" field.
42         hash  metainfo.Hash
43         t     *Torrent
44         index int
45         // Chunks we've written to since the last check. The chunk offset and
46         // length can be determined by the request chunkSize in use.
47         dirtyChunks bitmap.Bitmap
48
49         hashing             bool
50         everHashed          bool
51         numVerifies         int64
52         storageCompletionOk bool
53
54         publicPieceState PieceState
55         priority         piecePriority
56
57         pendingWritesMutex sync.Mutex
58         pendingWrites      int
59         noPendingWrites    sync.Cond
60 }
61
62 func (p *Piece) String() string {
63         return fmt.Sprintf("%s/%d", p.t.infoHash.HexString(), p.index)
64 }
65
66 func (p *Piece) Info() metainfo.Piece {
67         return p.t.info.Piece(p.index)
68 }
69
70 func (p *Piece) Storage() storage.Piece {
71         return p.t.storage.Piece(p.Info())
72 }
73
74 func (p *Piece) pendingChunkIndex(chunkIndex int) bool {
75         return !p.dirtyChunks.Contains(chunkIndex)
76 }
77
78 func (p *Piece) pendingChunk(cs chunkSpec, chunkSize pp.Integer) bool {
79         return p.pendingChunkIndex(chunkIndex(cs, chunkSize))
80 }
81
82 func (p *Piece) hasDirtyChunks() bool {
83         return p.dirtyChunks.Len() != 0
84 }
85
86 func (p *Piece) numDirtyChunks() (ret int) {
87         return p.dirtyChunks.Len()
88 }
89
90 func (p *Piece) unpendChunkIndex(i int) {
91         p.dirtyChunks.Add(i)
92 }
93
94 func (p *Piece) pendChunkIndex(i int) {
95         p.dirtyChunks.Remove(i)
96 }
97
98 func (p *Piece) numChunks() int {
99         return p.t.pieceNumChunks(p.index)
100 }
101
102 func (p *Piece) undirtiedChunkIndices() (ret bitmap.Bitmap) {
103         ret = p.dirtyChunks.Copy()
104         ret.FlipRange(0, p.numChunks())
105         return
106 }
107
108 func (p *Piece) incrementPendingWrites() {
109         p.pendingWritesMutex.Lock()
110         p.pendingWrites++
111         p.pendingWritesMutex.Unlock()
112 }
113
114 func (p *Piece) decrementPendingWrites() {
115         p.pendingWritesMutex.Lock()
116         if p.pendingWrites == 0 {
117                 panic("assertion")
118         }
119         p.pendingWrites--
120         if p.pendingWrites == 0 {
121                 p.noPendingWrites.Broadcast()
122         }
123         p.pendingWritesMutex.Unlock()
124 }
125
126 func (p *Piece) waitNoPendingWrites() {
127         p.pendingWritesMutex.Lock()
128         for p.pendingWrites != 0 {
129                 p.noPendingWrites.Wait()
130         }
131         p.pendingWritesMutex.Unlock()
132 }
133
134 func (p *Piece) chunkIndexDirty(chunk int) bool {
135         return p.dirtyChunks.Contains(chunk)
136 }
137
138 func (p *Piece) chunkIndexSpec(chunk int) chunkSpec {
139         return chunkIndexSpec(chunk, p.length(), p.chunkSize())
140 }
141
142 func (p *Piece) numDirtyBytes() (ret pp.Integer) {
143         // defer func() {
144         //      if ret > p.length() {
145         //              panic("too many dirty bytes")
146         //      }
147         // }()
148         numRegularDirtyChunks := p.numDirtyChunks()
149         if p.chunkIndexDirty(p.numChunks() - 1) {
150                 numRegularDirtyChunks--
151                 ret += p.chunkIndexSpec(p.lastChunkIndex()).Length
152         }
153         ret += pp.Integer(numRegularDirtyChunks) * p.chunkSize()
154         return
155 }
156
157 func (p *Piece) length() pp.Integer {
158         return p.t.pieceLength(p.index)
159 }
160
161 func (p *Piece) chunkSize() pp.Integer {
162         return p.t.chunkSize
163 }
164
165 func (p *Piece) lastChunkIndex() int {
166         return p.numChunks() - 1
167 }
168
169 func (p *Piece) bytesLeft() (ret pp.Integer) {
170         if p.t.pieceComplete(p.index) {
171                 return 0
172         }
173         return p.length() - p.numDirtyBytes()
174 }
175
176 func (p *Piece) VerifyData() {
177         p.t.cl.mu.Lock()
178         defer p.t.cl.mu.Unlock()
179         target := p.numVerifies + 1
180         if p.hashing {
181                 target++
182         }
183         // log.Printf("target: %d", target)
184         p.t.queuePieceCheck(p.index)
185         for p.numVerifies < target {
186                 // log.Printf("got %d verifies", p.numVerifies)
187                 p.t.cl.event.Wait()
188         }
189         // log.Print("done")
190 }
191
192 func (p *Piece) queuedForHash() bool {
193         return p.t.piecesQueuedForHash.Get(p.index)
194 }