]> Sergey Matveev's repositories - btrtrc.git/blob - piece.go
Fix a lot of noisy logging and test warnings
[btrtrc.git] / piece.go
1 package torrent
2
3 import (
4         "fmt"
5         "sync"
6
7         "github.com/anacrolix/missinggo/bitmap"
8
9         "github.com/anacrolix/torrent/metainfo"
10         pp "github.com/anacrolix/torrent/peer_protocol"
11         "github.com/anacrolix/torrent/storage"
12 )
13
14 // Piece priority describes the importance of obtaining a particular piece.
15
16 type piecePriority byte
17
18 func (pp *piecePriority) Raise(maybe piecePriority) {
19         if maybe > *pp {
20                 *pp = maybe
21         }
22 }
23
24 const (
25         PiecePriorityNone      piecePriority = iota // Not wanted.
26         PiecePriorityNormal                         // Wanted.
27         PiecePriorityReadahead                      // May be required soon.
28         // Succeeds a piece where a read occurred. Currently the same as Now, apparently due to issues with caching.
29         PiecePriorityNext
30         PiecePriorityNow // A Reader is reading in this piece.
31 )
32
33 type Piece struct {
34         // The completed piece SHA1 hash, from the metainfo "pieces" field.
35         hash  metainfo.Hash
36         t     *Torrent
37         index int
38         // Chunks we've written to since the last check. The chunk offset and
39         // length can be determined by the request chunkSize in use.
40         dirtyChunks bitmap.Bitmap
41
42         hashing             bool
43         everHashed          bool
44         numVerifies         int64
45         storageCompletionOk bool
46
47         publicPieceState PieceState
48         priority         piecePriority
49
50         pendingWritesMutex sync.Mutex
51         pendingWrites      int
52         noPendingWrites    sync.Cond
53 }
54
55 func (p *Piece) String() string {
56         return fmt.Sprintf("%s/%d", p.t.infoHash.HexString(), p.index)
57 }
58
59 func (p *Piece) Info() metainfo.Piece {
60         return p.t.info.Piece(p.index)
61 }
62
63 func (p *Piece) Storage() storage.Piece {
64         return p.t.storage.Piece(p.Info())
65 }
66
67 func (p *Piece) pendingChunkIndex(chunkIndex int) bool {
68         return !p.dirtyChunks.Contains(chunkIndex)
69 }
70
71 func (p *Piece) pendingChunk(cs chunkSpec, chunkSize pp.Integer) bool {
72         return p.pendingChunkIndex(chunkIndex(cs, chunkSize))
73 }
74
75 func (p *Piece) hasDirtyChunks() bool {
76         return p.dirtyChunks.Len() != 0
77 }
78
79 func (p *Piece) numDirtyChunks() (ret int) {
80         return p.dirtyChunks.Len()
81 }
82
83 func (p *Piece) unpendChunkIndex(i int) {
84         p.dirtyChunks.Add(i)
85 }
86
87 func (p *Piece) pendChunkIndex(i int) {
88         p.dirtyChunks.Remove(i)
89 }
90
91 func (p *Piece) numChunks() int {
92         return p.t.pieceNumChunks(p.index)
93 }
94
95 func (p *Piece) undirtiedChunkIndices() (ret bitmap.Bitmap) {
96         ret = p.dirtyChunks.Copy()
97         ret.FlipRange(0, p.numChunks())
98         return
99 }
100
101 func (p *Piece) incrementPendingWrites() {
102         p.pendingWritesMutex.Lock()
103         p.pendingWrites++
104         p.pendingWritesMutex.Unlock()
105 }
106
107 func (p *Piece) decrementPendingWrites() {
108         p.pendingWritesMutex.Lock()
109         if p.pendingWrites == 0 {
110                 panic("assertion")
111         }
112         p.pendingWrites--
113         if p.pendingWrites == 0 {
114                 p.noPendingWrites.Broadcast()
115         }
116         p.pendingWritesMutex.Unlock()
117 }
118
119 func (p *Piece) waitNoPendingWrites() {
120         p.pendingWritesMutex.Lock()
121         for p.pendingWrites != 0 {
122                 p.noPendingWrites.Wait()
123         }
124         p.pendingWritesMutex.Unlock()
125 }
126
127 func (p *Piece) chunkIndexDirty(chunk int) bool {
128         return p.dirtyChunks.Contains(chunk)
129 }
130
131 func (p *Piece) chunkIndexSpec(chunk int) chunkSpec {
132         return chunkIndexSpec(chunk, p.length(), p.chunkSize())
133 }
134
135 func (p *Piece) numDirtyBytes() (ret pp.Integer) {
136         // defer func() {
137         //      if ret > p.length() {
138         //              panic("too many dirty bytes")
139         //      }
140         // }()
141         numRegularDirtyChunks := p.numDirtyChunks()
142         if p.chunkIndexDirty(p.numChunks() - 1) {
143                 numRegularDirtyChunks--
144                 ret += p.chunkIndexSpec(p.lastChunkIndex()).Length
145         }
146         ret += pp.Integer(numRegularDirtyChunks) * p.chunkSize()
147         return
148 }
149
150 func (p *Piece) length() pp.Integer {
151         return p.t.pieceLength(p.index)
152 }
153
154 func (p *Piece) chunkSize() pp.Integer {
155         return p.t.chunkSize
156 }
157
158 func (p *Piece) lastChunkIndex() int {
159         return p.numChunks() - 1
160 }
161
162 func (p *Piece) bytesLeft() (ret pp.Integer) {
163         if p.t.pieceComplete(p.index) {
164                 return 0
165         }
166         return p.length() - p.numDirtyBytes()
167 }
168
169 func (p *Piece) VerifyData() {
170         p.t.cl.mu.Lock()
171         defer p.t.cl.mu.Unlock()
172         target := p.numVerifies + 1
173         if p.hashing {
174                 target++
175         }
176         // log.Printf("target: %d", target)
177         p.t.queuePieceCheck(p.index)
178         for p.numVerifies < target {
179                 // log.Printf("got %d verifies", p.numVerifies)
180                 p.t.cl.event.Wait()
181         }
182         // log.Print("done")
183 }
184
185 func (p *Piece) queuedForHash() bool {
186         return p.t.piecesQueuedForHash.Get(p.index)
187 }