]> Sergey Matveev's repositories - btrtrc.git/blob - requesting.go
Add Torrent-level request cancel for consistency
[btrtrc.git] / requesting.go
1 package torrent
2
3 import (
4         "container/heap"
5         "context"
6         "encoding/gob"
7         "reflect"
8         "runtime/pprof"
9         "time"
10         "unsafe"
11
12         "github.com/anacrolix/log"
13         "github.com/anacrolix/multiless"
14
15         request_strategy "github.com/anacrolix/torrent/request-strategy"
16 )
17
18 func (t *Torrent) requestStrategyPieceOrderState(i int) request_strategy.PieceRequestOrderState {
19         return request_strategy.PieceRequestOrderState{
20                 Priority:     t.piece(i).purePriority(),
21                 Partial:      t.piecePartiallyDownloaded(i),
22                 Availability: t.piece(i).availability,
23         }
24 }
25
26 func init() {
27         gob.Register(peerId{})
28 }
29
30 type peerId struct {
31         *Peer
32         ptr uintptr
33 }
34
35 func (p peerId) Uintptr() uintptr {
36         return p.ptr
37 }
38
39 func (p peerId) GobEncode() (b []byte, _ error) {
40         *(*reflect.SliceHeader)(unsafe.Pointer(&b)) = reflect.SliceHeader{
41                 Data: uintptr(unsafe.Pointer(&p.ptr)),
42                 Len:  int(unsafe.Sizeof(p.ptr)),
43                 Cap:  int(unsafe.Sizeof(p.ptr)),
44         }
45         return
46 }
47
48 func (p *peerId) GobDecode(b []byte) error {
49         if uintptr(len(b)) != unsafe.Sizeof(p.ptr) {
50                 panic(len(b))
51         }
52         ptr := unsafe.Pointer(&b[0])
53         p.ptr = *(*uintptr)(ptr)
54         log.Printf("%p", ptr)
55         dst := reflect.SliceHeader{
56                 Data: uintptr(unsafe.Pointer(&p.Peer)),
57                 Len:  int(unsafe.Sizeof(p.Peer)),
58                 Cap:  int(unsafe.Sizeof(p.Peer)),
59         }
60         copy(*(*[]byte)(unsafe.Pointer(&dst)), b)
61         return nil
62 }
63
64 type (
65         RequestIndex   = request_strategy.RequestIndex
66         chunkIndexType = request_strategy.ChunkIndex
67 )
68
69 type peerRequests struct {
70         requestIndexes []RequestIndex
71         peer           *Peer
72 }
73
74 func (p *peerRequests) Len() int {
75         return len(p.requestIndexes)
76 }
77
78 func (p *peerRequests) Less(i, j int) bool {
79         leftRequest := p.requestIndexes[i]
80         rightRequest := p.requestIndexes[j]
81         t := p.peer.t
82         leftPieceIndex := leftRequest / t.chunksPerRegularPiece()
83         rightPieceIndex := rightRequest / t.chunksPerRegularPiece()
84         ml := multiless.New()
85         // Push requests that can't be served right now to the end. But we don't throw them away unless
86         // there's a better alternative. This is for when we're using the fast extension and get choked
87         // but our requests could still be good when we get unchoked.
88         if p.peer.peerChoking {
89                 ml = ml.Bool(
90                         !p.peer.peerAllowedFast.Contains(leftPieceIndex),
91                         !p.peer.peerAllowedFast.Contains(rightPieceIndex),
92                 )
93         }
94         leftPeer := t.pendingRequests[leftRequest]
95         rightPeer := t.pendingRequests[rightRequest]
96         ml = ml.Bool(rightPeer == p.peer, leftPeer == p.peer)
97         ml = ml.Bool(rightPeer == nil, leftPeer == nil)
98         if ml.Ok() {
99                 return ml.MustLess()
100         }
101         if leftPeer != nil {
102                 ml = ml.Uint64(
103                         rightPeer.actualRequestState.Requests.GetCardinality(),
104                         leftPeer.actualRequestState.Requests.GetCardinality(),
105                 )
106         }
107         ml = ml.CmpInt64(t.lastRequested[rightRequest].Sub(t.lastRequested[leftRequest]).Nanoseconds())
108         leftPiece := t.piece(int(leftPieceIndex))
109         rightPiece := t.piece(int(rightPieceIndex))
110         ml = ml.Int(
111                 // Technically we would be happy with the cached priority here, except we don't actually
112                 // cache it anymore, and Torrent.piecePriority just does another lookup of *Piece to resolve
113                 // the priority through Piece.purePriority, which is probably slower.
114                 -int(leftPiece.purePriority()),
115                 -int(rightPiece.purePriority()),
116         )
117         ml = ml.Int(
118                 int(leftPiece.availability),
119                 int(rightPiece.availability))
120         return ml.Less()
121 }
122
123 func (p *peerRequests) Swap(i, j int) {
124         p.requestIndexes[i], p.requestIndexes[j] = p.requestIndexes[j], p.requestIndexes[i]
125 }
126
127 func (p *peerRequests) Push(x interface{}) {
128         p.requestIndexes = append(p.requestIndexes, x.(RequestIndex))
129 }
130
131 func (p *peerRequests) Pop() interface{} {
132         last := len(p.requestIndexes) - 1
133         x := p.requestIndexes[last]
134         p.requestIndexes = p.requestIndexes[:last]
135         return x
136 }
137
138 type desiredRequestState struct {
139         Requests   peerRequests
140         Interested bool
141 }
142
143 func (p *Peer) getDesiredRequestState() (desired desiredRequestState) {
144         if !p.t.haveInfo() {
145                 return
146         }
147         input := p.t.getRequestStrategyInput()
148         requestHeap := peerRequests{
149                 peer: p,
150         }
151         request_strategy.GetRequestablePieces(
152                 input,
153                 p.t.cl.pieceRequestOrder[p.t.storage.Capacity],
154                 func(ih InfoHash, pieceIndex int) {
155                         if ih != p.t.infoHash {
156                                 return
157                         }
158                         if !p.peerHasPiece(pieceIndex) {
159                                 return
160                         }
161                         allowedFast := p.peerAllowedFast.ContainsInt(pieceIndex)
162                         p.t.piece(pieceIndex).undirtiedChunksIter.Iter(func(ci request_strategy.ChunkIndex) {
163                                 r := p.t.pieceRequestIndexOffset(pieceIndex) + ci
164                                 // if p.t.pendingRequests.Get(r) != 0 && !p.actualRequestState.Requests.Contains(r) {
165                                 //      return
166                                 // }
167                                 if !allowedFast {
168                                         // We must signal interest to request this. TODO: We could set interested if the
169                                         // peers pieces (minus the allowed fast set) overlap with our missing pieces if
170                                         // there are any readers, or any pending pieces.
171                                         desired.Interested = true
172                                         // We can make or will allow sustaining a request here if we're not choked, or
173                                         // have made the request previously (presumably while unchoked), and haven't had
174                                         // the peer respond yet (and the request was retained because we are using the
175                                         // fast extension).
176                                         if p.peerChoking && !p.actualRequestState.Requests.Contains(r) {
177                                                 // We can't request this right now.
178                                                 return
179                                         }
180                                 }
181                                 requestHeap.requestIndexes = append(requestHeap.requestIndexes, r)
182                         })
183                 },
184         )
185         p.t.assertPendingRequests()
186         desired.Requests = requestHeap
187         return
188 }
189
190 func (p *Peer) maybeUpdateActualRequestState() bool {
191         if p.needRequestUpdate == "" {
192                 return true
193         }
194         var more bool
195         pprof.Do(
196                 context.Background(),
197                 pprof.Labels("update request", p.needRequestUpdate),
198                 func(_ context.Context) {
199                         next := p.getDesiredRequestState()
200                         more = p.applyRequestState(next)
201                 },
202         )
203         return more
204 }
205
206 // Transmit/action the request state to the peer.
207 func (p *Peer) applyRequestState(next desiredRequestState) bool {
208         current := &p.actualRequestState
209         if !p.setInterested(next.Interested) {
210                 return false
211         }
212         more := true
213         requestHeap := &next.Requests
214         t := p.t
215         heap.Init(requestHeap)
216         for requestHeap.Len() != 0 && maxRequests(current.Requests.GetCardinality()) < p.nominalMaxRequests() {
217                 req := heap.Pop(requestHeap).(RequestIndex)
218                 if p.cancelledRequests.Contains(req) {
219                         // Waiting for a reject or piece message, which will suitably trigger us to update our
220                         // requests, so we can skip this one with no additional consideration.
221                         continue
222                 }
223                 existing := t.requestingPeer(req)
224                 if existing != nil && existing != p && existing.uncancelledRequests() > current.Requests.GetCardinality() {
225                         t.cancelRequest(req)
226                 }
227                 more = p.mustRequest(req)
228                 if !more {
229                         break
230                 }
231         }
232         // TODO: This may need to change, we might want to update even if there were no requests due to
233         // filtering them for being recently requested already.
234         p.updateRequestsTimer.Stop()
235         if more {
236                 p.needRequestUpdate = ""
237                 if current.Interested {
238                         p.updateRequestsTimer.Reset(3 * time.Second)
239                 }
240         }
241         return more
242 }