]> Sergey Matveev's repositories - btrtrc.git/blob - piece.go
Track completion known to implementation state
[btrtrc.git] / piece.go
1 package torrent
2
3 import (
4         "fmt"
5         "log"
6         "sync"
7
8         "github.com/anacrolix/missinggo/bitmap"
9
10         "github.com/anacrolix/torrent/metainfo"
11         pp "github.com/anacrolix/torrent/peer_protocol"
12         "github.com/anacrolix/torrent/storage"
13 )
14
15 // Piece priority describes the importance of obtaining a particular piece.
16
17 type piecePriority byte
18
19 func (pp *piecePriority) Raise(maybe piecePriority) {
20         if maybe > *pp {
21                 *pp = maybe
22         }
23 }
24
25 const (
26         PiecePriorityNone      piecePriority = iota // Not wanted.
27         PiecePriorityNormal                         // Wanted.
28         PiecePriorityReadahead                      // May be required soon.
29         // Succeeds a piece where a read occurred. Currently the same as Now, apparently due to issues with caching.
30         PiecePriorityNext
31         PiecePriorityNow // A Reader is reading in this piece.
32 )
33
34 type Piece struct {
35         // The completed piece SHA1 hash, from the metainfo "pieces" field.
36         hash  metainfo.Hash
37         t     *Torrent
38         index int
39         // Chunks we've written to since the last check. The chunk offset and
40         // length can be determined by the request chunkSize in use.
41         dirtyChunks bitmap.Bitmap
42
43         hashing             bool
44         everHashed          bool
45         numVerifies         int64
46         storageCompletionOk bool
47
48         publicPieceState PieceState
49         priority         piecePriority
50
51         pendingWritesMutex sync.Mutex
52         pendingWrites      int
53         noPendingWrites    sync.Cond
54 }
55
56 func (p *Piece) String() string {
57         return fmt.Sprintf("%s/%d", p.t.infoHash.HexString(), p.index)
58 }
59
60 func (p *Piece) Info() metainfo.Piece {
61         return p.t.info.Piece(p.index)
62 }
63
64 func (p *Piece) Storage() storage.Piece {
65         return p.t.storage.Piece(p.Info())
66 }
67
68 func (p *Piece) pendingChunkIndex(chunkIndex int) bool {
69         return !p.dirtyChunks.Contains(chunkIndex)
70 }
71
72 func (p *Piece) pendingChunk(cs chunkSpec, chunkSize pp.Integer) bool {
73         return p.pendingChunkIndex(chunkIndex(cs, chunkSize))
74 }
75
76 func (p *Piece) hasDirtyChunks() bool {
77         return p.dirtyChunks.Len() != 0
78 }
79
80 func (p *Piece) numDirtyChunks() (ret int) {
81         return p.dirtyChunks.Len()
82 }
83
84 func (p *Piece) unpendChunkIndex(i int) {
85         p.dirtyChunks.Add(i)
86 }
87
88 func (p *Piece) pendChunkIndex(i int) {
89         p.dirtyChunks.Remove(i)
90 }
91
92 func (p *Piece) numChunks() int {
93         return p.t.pieceNumChunks(p.index)
94 }
95
96 func (p *Piece) undirtiedChunkIndices() (ret bitmap.Bitmap) {
97         ret = p.dirtyChunks.Copy()
98         ret.FlipRange(0, p.numChunks())
99         return
100 }
101
102 func (p *Piece) incrementPendingWrites() {
103         p.pendingWritesMutex.Lock()
104         p.pendingWrites++
105         p.pendingWritesMutex.Unlock()
106 }
107
108 func (p *Piece) decrementPendingWrites() {
109         p.pendingWritesMutex.Lock()
110         if p.pendingWrites == 0 {
111                 panic("assertion")
112         }
113         p.pendingWrites--
114         if p.pendingWrites == 0 {
115                 p.noPendingWrites.Broadcast()
116         }
117         p.pendingWritesMutex.Unlock()
118 }
119
120 func (p *Piece) waitNoPendingWrites() {
121         p.pendingWritesMutex.Lock()
122         for p.pendingWrites != 0 {
123                 p.noPendingWrites.Wait()
124         }
125         p.pendingWritesMutex.Unlock()
126 }
127
128 func (p *Piece) chunkIndexDirty(chunk int) bool {
129         return p.dirtyChunks.Contains(chunk)
130 }
131
132 func (p *Piece) chunkIndexSpec(chunk int) chunkSpec {
133         return chunkIndexSpec(chunk, p.length(), p.chunkSize())
134 }
135
136 func (p *Piece) numDirtyBytes() (ret pp.Integer) {
137         // defer func() {
138         //      if ret > p.length() {
139         //              panic("too many dirty bytes")
140         //      }
141         // }()
142         numRegularDirtyChunks := p.numDirtyChunks()
143         if p.chunkIndexDirty(p.numChunks() - 1) {
144                 numRegularDirtyChunks--
145                 ret += p.chunkIndexSpec(p.lastChunkIndex()).Length
146         }
147         ret += pp.Integer(numRegularDirtyChunks) * p.chunkSize()
148         return
149 }
150
151 func (p *Piece) length() pp.Integer {
152         return p.t.pieceLength(p.index)
153 }
154
155 func (p *Piece) chunkSize() pp.Integer {
156         return p.t.chunkSize
157 }
158
159 func (p *Piece) lastChunkIndex() int {
160         return p.numChunks() - 1
161 }
162
163 func (p *Piece) bytesLeft() (ret pp.Integer) {
164         if p.t.pieceComplete(p.index) {
165                 return 0
166         }
167         return p.length() - p.numDirtyBytes()
168 }
169
170 func (p *Piece) VerifyData() {
171         p.t.cl.mu.Lock()
172         defer p.t.cl.mu.Unlock()
173         target := p.numVerifies + 1
174         if p.hashing {
175                 target++
176         }
177         log.Printf("target: %d", target)
178         p.t.queuePieceCheck(p.index)
179         for p.numVerifies < target {
180                 log.Printf("got %d verifies", p.numVerifies)
181                 p.t.cl.event.Wait()
182         }
183         log.Print("done")
184 }
185
186 func (p *Piece) queuedForHash() bool {
187         return p.t.piecesQueuedForHash.Get(p.index)
188 }