7 "github.com/anacrolix/missinggo/bitmap"
9 "github.com/anacrolix/torrent/metainfo"
10 pp "github.com/anacrolix/torrent/peer_protocol"
11 "github.com/anacrolix/torrent/storage"
14 // Piece priority describes the importance of obtaining a particular piece.
16 type piecePriority byte
18 func (pp *piecePriority) Raise(maybe piecePriority) {
25 PiecePriorityNone piecePriority = iota // Not wanted.
26 PiecePriorityNormal // Wanted.
27 PiecePriorityReadahead // May be required soon.
28 // Succeeds a piece where a read occurred. Currently the same as Now, apparently due to issues with caching.
30 PiecePriorityNow // A Reader is reading in this piece.
34 // The completed piece SHA1 hash, from the metainfo "pieces" field.
38 // Chunks we've written to since the last check. The chunk offset and
39 // length can be determined by the request chunkSize in use.
40 dirtyChunks bitmap.Bitmap
45 storageCompletionOk bool
47 publicPieceState PieceState
48 priority piecePriority
50 pendingWritesMutex sync.Mutex
52 noPendingWrites sync.Cond
55 func (p *Piece) String() string {
56 return fmt.Sprintf("%s/%d", p.t.infoHash.HexString(), p.index)
59 func (p *Piece) Info() metainfo.Piece {
60 return p.t.info.Piece(p.index)
63 func (p *Piece) Storage() storage.Piece {
64 return p.t.storage.Piece(p.Info())
67 func (p *Piece) pendingChunkIndex(chunkIndex int) bool {
68 return !p.dirtyChunks.Contains(chunkIndex)
71 func (p *Piece) pendingChunk(cs chunkSpec, chunkSize pp.Integer) bool {
72 return p.pendingChunkIndex(chunkIndex(cs, chunkSize))
75 func (p *Piece) hasDirtyChunks() bool {
76 return p.dirtyChunks.Len() != 0
79 func (p *Piece) numDirtyChunks() (ret int) {
80 return p.dirtyChunks.Len()
83 func (p *Piece) unpendChunkIndex(i int) {
87 func (p *Piece) pendChunkIndex(i int) {
88 p.dirtyChunks.Remove(i)
91 func (p *Piece) numChunks() int {
92 return p.t.pieceNumChunks(p.index)
95 func (p *Piece) undirtiedChunkIndices() (ret bitmap.Bitmap) {
96 ret = p.dirtyChunks.Copy()
97 ret.FlipRange(0, p.numChunks())
101 func (p *Piece) incrementPendingWrites() {
102 p.pendingWritesMutex.Lock()
104 p.pendingWritesMutex.Unlock()
107 func (p *Piece) decrementPendingWrites() {
108 p.pendingWritesMutex.Lock()
109 if p.pendingWrites == 0 {
113 if p.pendingWrites == 0 {
114 p.noPendingWrites.Broadcast()
116 p.pendingWritesMutex.Unlock()
119 func (p *Piece) waitNoPendingWrites() {
120 p.pendingWritesMutex.Lock()
121 for p.pendingWrites != 0 {
122 p.noPendingWrites.Wait()
124 p.pendingWritesMutex.Unlock()
127 func (p *Piece) chunkIndexDirty(chunk int) bool {
128 return p.dirtyChunks.Contains(chunk)
131 func (p *Piece) chunkIndexSpec(chunk int) chunkSpec {
132 return chunkIndexSpec(chunk, p.length(), p.chunkSize())
135 func (p *Piece) numDirtyBytes() (ret pp.Integer) {
137 // if ret > p.length() {
138 // panic("too many dirty bytes")
141 numRegularDirtyChunks := p.numDirtyChunks()
142 if p.chunkIndexDirty(p.numChunks() - 1) {
143 numRegularDirtyChunks--
144 ret += p.chunkIndexSpec(p.lastChunkIndex()).Length
146 ret += pp.Integer(numRegularDirtyChunks) * p.chunkSize()
150 func (p *Piece) length() pp.Integer {
151 return p.t.pieceLength(p.index)
154 func (p *Piece) chunkSize() pp.Integer {
158 func (p *Piece) lastChunkIndex() int {
159 return p.numChunks() - 1
162 func (p *Piece) bytesLeft() (ret pp.Integer) {
163 if p.t.pieceComplete(p.index) {
166 return p.length() - p.numDirtyBytes()
169 func (p *Piece) VerifyData() {
171 defer p.t.cl.mu.Unlock()
172 target := p.numVerifies + 1
176 // log.Printf("target: %d", target)
177 p.t.queuePieceCheck(p.index)
178 for p.numVerifies < target {
179 // log.Printf("got %d verifies", p.numVerifies)
185 func (p *Piece) queuedForHash() bool {
186 return p.t.piecesQueuedForHash.Get(p.index)