]> Sergey Matveev's repositories - btrtrc.git/blob - piece.go
Reintroduce connection piece inclinations, and begin caching piece priorities
[btrtrc.git] / piece.go
1 package torrent
2
3 import (
4         "math/rand"
5         "sync"
6
7         "github.com/bradfitz/iter"
8
9         pp "github.com/anacrolix/torrent/peer_protocol"
10 )
11
12 // Piece priority describes the importance of obtaining a particular piece.
13
14 type piecePriority byte
15
16 const (
17         PiecePriorityNone      piecePriority = iota // Not wanted.
18         PiecePriorityNormal                         // Wanted.
19         PiecePriorityReadahead                      // May be required soon.
20         PiecePriorityNext                           // Succeeds a piece where a read occurred.
21         PiecePriorityNow                            // A read occurred in this piece.
22 )
23
24 type piece struct {
25         // The completed piece SHA1 hash, from the metainfo "pieces" field.
26         Hash pieceSum
27         // Chunks we've written to since the last check. The chunk offset and
28         // length can be determined by the request chunkSize in use.
29         DirtyChunks      []bool
30         Hashing          bool
31         QueuedForHash    bool
32         EverHashed       bool
33         PublicPieceState PieceState
34         priority         piecePriority
35
36         pendingWritesMutex sync.Mutex
37         pendingWrites      int
38         noPendingWrites    sync.Cond
39 }
40
41 func (p *piece) pendingChunk(cs chunkSpec, chunkSize pp.Integer) bool {
42         ci := chunkIndex(cs, chunkSize)
43         if ci >= len(p.DirtyChunks) {
44                 return true
45         }
46         return !p.DirtyChunks[ci]
47 }
48
49 func (p *piece) numDirtyChunks() (ret int) {
50         for _, dirty := range p.DirtyChunks {
51                 if dirty {
52                         ret++
53                 }
54         }
55         return
56 }
57
58 func (p *piece) unpendChunkIndex(i int) {
59         for i >= len(p.DirtyChunks) {
60                 p.DirtyChunks = append(p.DirtyChunks, false)
61         }
62         p.DirtyChunks[i] = true
63 }
64
65 func (p *piece) pendChunkIndex(i int) {
66         if i >= len(p.DirtyChunks) {
67                 return
68         }
69         p.DirtyChunks[i] = false
70 }
71
72 func chunkIndexSpec(index int, pieceLength, chunkSize pp.Integer) chunkSpec {
73         ret := chunkSpec{pp.Integer(index) * chunkSize, chunkSize}
74         if ret.Begin+ret.Length > pieceLength {
75                 ret.Length = pieceLength - ret.Begin
76         }
77         return ret
78 }
79
80 func (p *piece) shuffledPendingChunkSpecs(t *torrent, piece int) (css []chunkSpec) {
81         // defer func() {
82         //      log.Println(piece, css)
83         // }()
84         numPending := t.pieceNumPendingChunks(piece)
85         if numPending == 0 {
86                 return
87         }
88         css = make([]chunkSpec, 0, numPending)
89         for ci := range iter.N(t.pieceNumChunks(piece)) {
90                 if ci >= len(p.DirtyChunks) || !p.DirtyChunks[ci] {
91                         css = append(css, t.chunkIndexSpec(ci, piece))
92                 }
93         }
94         if len(css) <= 1 {
95                 return
96         }
97         for i := range css {
98                 j := rand.Intn(i + 1)
99                 css[i], css[j] = css[j], css[i]
100         }
101         return
102 }
103
104 func (p *piece) incrementPendingWrites() {
105         p.pendingWritesMutex.Lock()
106         p.pendingWrites++
107         p.pendingWritesMutex.Unlock()
108 }
109
110 func (p *piece) decrementPendingWrites() {
111         p.pendingWritesMutex.Lock()
112         if p.pendingWrites == 0 {
113                 panic("assertion")
114         }
115         p.pendingWrites--
116         if p.pendingWrites == 0 {
117                 p.noPendingWrites.Broadcast()
118         }
119         p.pendingWritesMutex.Unlock()
120 }
121
122 func (p *piece) waitNoPendingWrites() {
123         p.pendingWritesMutex.Lock()
124         for p.pendingWrites != 0 {
125                 p.noPendingWrites.Wait()
126         }
127         p.pendingWritesMutex.Unlock()
128 }