]> Sergey Matveev's repositories - btrtrc.git/blob - requesting.go
Always count unhandled requests as pending
[btrtrc.git] / requesting.go
1 package torrent
2
3 import (
4         "container/heap"
5         "context"
6         "encoding/gob"
7         "math/rand"
8         "reflect"
9         "runtime/pprof"
10         "time"
11         "unsafe"
12
13         "github.com/RoaringBitmap/roaring"
14         "github.com/anacrolix/log"
15         "github.com/anacrolix/multiless"
16
17         request_strategy "github.com/anacrolix/torrent/request-strategy"
18 )
19
20 func (cl *Client) tickleRequester() {
21         cl.updateRequests.Broadcast()
22 }
23
24 func (cl *Client) getRequestStrategyInput() request_strategy.Input {
25         ts := make([]request_strategy.Torrent, 0, len(cl.torrents))
26         for _, t := range cl.torrents {
27                 if !t.haveInfo() {
28                         // This would be removed if metadata is handled here. We have to guard against not
29                         // knowing the piece size. If we have no info, we have no pieces too, so the end result
30                         // is the same.
31                         continue
32                 }
33                 rst := request_strategy.Torrent{
34                         InfoHash:       t.infoHash,
35                         ChunksPerPiece: t.chunksPerRegularPiece(),
36                 }
37                 if t.storage != nil {
38                         rst.Capacity = t.storage.Capacity
39                 }
40                 rst.Pieces = make([]request_strategy.Piece, 0, len(t.pieces))
41                 for i := range t.pieces {
42                         p := &t.pieces[i]
43                         rst.Pieces = append(rst.Pieces, request_strategy.Piece{
44                                 Request:           !t.ignorePieceForRequests(i),
45                                 Priority:          p.purePriority(),
46                                 Partial:           t.piecePartiallyDownloaded(i),
47                                 Availability:      p.availability,
48                                 Length:            int64(p.length()),
49                                 NumPendingChunks:  int(t.pieceNumPendingChunks(i)),
50                                 IterPendingChunks: &p.undirtiedChunksIter,
51                         })
52                 }
53                 t.iterPeers(func(p *Peer) {
54                         if p.closed.IsSet() {
55                                 return
56                         }
57                         if p.piecesReceivedSinceLastRequestUpdate > p.maxPiecesReceivedBetweenRequestUpdates {
58                                 p.maxPiecesReceivedBetweenRequestUpdates = p.piecesReceivedSinceLastRequestUpdate
59                         }
60                         p.piecesReceivedSinceLastRequestUpdate = 0
61                         rst.Peers = append(rst.Peers, request_strategy.Peer{
62                                 Pieces:           *p.newPeerPieces(),
63                                 MaxRequests:      p.nominalMaxRequests(),
64                                 ExistingRequests: p.actualRequestState.Requests,
65                                 Choking:          p.peerChoking,
66                                 PieceAllowedFast: p.peerAllowedFast,
67                                 DownloadRate:     p.downloadRate(),
68                                 Age:              time.Since(p.completedHandshake),
69                                 Id: peerId{
70                                         Peer: p,
71                                         ptr:  uintptr(unsafe.Pointer(p)),
72                                 },
73                         })
74                 })
75                 ts = append(ts, rst)
76         }
77         return request_strategy.Input{
78                 Torrents:           ts,
79                 MaxUnverifiedBytes: cl.config.MaxUnverifiedBytes,
80         }
81 }
82
83 func init() {
84         gob.Register(peerId{})
85 }
86
87 type peerId struct {
88         *Peer
89         ptr uintptr
90 }
91
92 func (p peerId) Uintptr() uintptr {
93         return p.ptr
94 }
95
96 func (p peerId) GobEncode() (b []byte, _ error) {
97         *(*reflect.SliceHeader)(unsafe.Pointer(&b)) = reflect.SliceHeader{
98                 Data: uintptr(unsafe.Pointer(&p.ptr)),
99                 Len:  int(unsafe.Sizeof(p.ptr)),
100                 Cap:  int(unsafe.Sizeof(p.ptr)),
101         }
102         return
103 }
104
105 func (p *peerId) GobDecode(b []byte) error {
106         if uintptr(len(b)) != unsafe.Sizeof(p.ptr) {
107                 panic(len(b))
108         }
109         ptr := unsafe.Pointer(&b[0])
110         p.ptr = *(*uintptr)(ptr)
111         log.Printf("%p", ptr)
112         dst := reflect.SliceHeader{
113                 Data: uintptr(unsafe.Pointer(&p.Peer)),
114                 Len:  int(unsafe.Sizeof(p.Peer)),
115                 Cap:  int(unsafe.Sizeof(p.Peer)),
116         }
117         copy(*(*[]byte)(unsafe.Pointer(&dst)), b)
118         return nil
119 }
120
121 type RequestIndex = request_strategy.RequestIndex
122 type chunkIndexType = request_strategy.ChunkIndex
123
124 type peerRequests struct {
125         requestIndexes       []RequestIndex
126         peer                 *Peer
127         torrentStrategyInput request_strategy.Torrent
128 }
129
130 func (p *peerRequests) Len() int {
131         return len(p.requestIndexes)
132 }
133
134 func (p *peerRequests) Less(i, j int) bool {
135         leftRequest := p.requestIndexes[i]
136         rightRequest := p.requestIndexes[j]
137         t := p.peer.t
138         leftPieceIndex := leftRequest / p.torrentStrategyInput.ChunksPerPiece
139         rightPieceIndex := rightRequest / p.torrentStrategyInput.ChunksPerPiece
140         leftCurrent := p.peer.actualRequestState.Requests.Contains(leftRequest)
141         rightCurrent := p.peer.actualRequestState.Requests.Contains(rightRequest)
142         pending := func(index RequestIndex, current bool) int {
143                 ret := t.pendingRequests.Get(index)
144                 if current {
145                         ret--
146                 }
147                 // See https://github.com/anacrolix/torrent/issues/679 for possible issues. This should be
148                 // resolved.
149                 if ret < 0 {
150                         panic(ret)
151                 }
152                 return ret
153         }
154         ml := multiless.New()
155         // Push requests that can't be served right now to the end. But we don't throw them away unless
156         // there's a better alternative. This is for when we're using the fast extension and get choked
157         // but our requests could still be good when we get unchoked.
158         if p.peer.peerChoking {
159                 ml = ml.Bool(
160                         !p.peer.peerAllowedFast.Contains(leftPieceIndex),
161                         !p.peer.peerAllowedFast.Contains(rightPieceIndex),
162                 )
163         }
164         ml = ml.Int(
165                 pending(leftRequest, leftCurrent),
166                 pending(rightRequest, rightCurrent))
167         ml = ml.Bool(!leftCurrent, !rightCurrent)
168         ml = ml.Int(
169                 -int(p.torrentStrategyInput.Pieces[leftPieceIndex].Priority),
170                 -int(p.torrentStrategyInput.Pieces[rightPieceIndex].Priority),
171         )
172         ml = ml.Int(
173                 int(p.torrentStrategyInput.Pieces[leftPieceIndex].Availability),
174                 int(p.torrentStrategyInput.Pieces[rightPieceIndex].Availability))
175         ml = ml.Uint32(leftPieceIndex, rightPieceIndex)
176         ml = ml.Uint32(leftRequest, rightRequest)
177         return ml.MustLess()
178 }
179
180 func (p *peerRequests) Swap(i, j int) {
181         p.requestIndexes[i], p.requestIndexes[j] = p.requestIndexes[j], p.requestIndexes[i]
182 }
183
184 func (p *peerRequests) Push(x interface{}) {
185         p.requestIndexes = append(p.requestIndexes, x.(RequestIndex))
186 }
187
188 func (p *peerRequests) Pop() interface{} {
189         last := len(p.requestIndexes) - 1
190         x := p.requestIndexes[last]
191         p.requestIndexes = p.requestIndexes[:last]
192         return x
193 }
194
195 func (p *Peer) getDesiredRequestState() (desired requestState) {
196         input := p.t.cl.getRequestStrategyInput()
197         requestHeap := peerRequests{
198                 peer: p,
199         }
200         for _, t := range input.Torrents {
201                 if t.InfoHash == p.t.infoHash {
202                         requestHeap.torrentStrategyInput = t
203                         break
204                 }
205         }
206         request_strategy.GetRequestablePieces(
207                 input,
208                 func(t *request_strategy.Torrent, rsp *request_strategy.Piece, pieceIndex int) {
209                         if t.InfoHash != p.t.infoHash {
210                                 return
211                         }
212                         if !p.peerHasPiece(pieceIndex) {
213                                 return
214                         }
215                         allowedFast := p.peerAllowedFast.ContainsInt(pieceIndex)
216                         rsp.IterPendingChunks.Iter(func(ci request_strategy.ChunkIndex) {
217                                 if !allowedFast {
218                                         // We must signal interest to request this
219                                         desired.Interested = true
220                                         // We can make or will allow sustaining a request here if we're not choked, or
221                                         // have made the request previously (presumably while unchoked), and haven't had
222                                         // the peer respond yet (and the request was retained because we are using the
223                                         // fast extension).
224                                         if p.peerChoking && !p.actualRequestState.Requests.Contains(ci) {
225                                                 // We can't request this right now.
226                                                 return
227                                         }
228                                 }
229                                 requestHeap.requestIndexes = append(
230                                         requestHeap.requestIndexes,
231                                         p.t.pieceRequestIndexOffset(pieceIndex)+ci)
232                         })
233                 },
234         )
235         p.t.assertPendingRequests()
236         heap.Init(&requestHeap)
237         for requestHeap.Len() != 0 && desired.Requests.GetCardinality() < uint64(p.nominalMaxRequests()) {
238                 requestIndex := heap.Pop(&requestHeap).(RequestIndex)
239                 desired.Requests.Add(requestIndex)
240         }
241         return
242 }
243
244 func (p *Peer) maybeUpdateActualRequestState() bool {
245         if p.needRequestUpdate == "" {
246                 return true
247         }
248         var more bool
249         pprof.Do(
250                 context.Background(),
251                 pprof.Labels("update request", p.needRequestUpdate),
252                 func(_ context.Context) {
253                         next := p.getDesiredRequestState()
254                         more = p.applyRequestState(next)
255                 },
256         )
257         return more
258 }
259
260 // Transmit/action the request state to the peer.
261 func (p *Peer) applyRequestState(next requestState) bool {
262         current := &p.actualRequestState
263         if !p.setInterested(next.Interested) {
264                 return false
265         }
266         more := true
267         cancel := roaring.AndNot(&current.Requests, &next.Requests)
268         cancel.Iterate(func(req uint32) bool {
269                 more = p.cancel(req)
270                 return more
271         })
272         if !more {
273                 return false
274         }
275         // We randomize the order in which requests are issued, to reduce the overlap with requests to
276         // other peers. Note that although it really depends on what order the peer services the
277         // requests, if we are only able to issue some requests before buffering, or the peer starts
278         // handling our requests before they've all arrived, then this randomization should reduce
279         // overlap. Note however that if we received the desired requests in priority order, then
280         // randomizing would throw away that benefit.
281         for _, x := range rand.Perm(int(next.Requests.GetCardinality())) {
282                 req, err := next.Requests.Select(uint32(x))
283                 if err != nil {
284                         panic(err)
285                 }
286                 if p.cancelledRequests.Contains(req) {
287                         // Waiting for a reject or piece message, which will suitably trigger us to update our
288                         // requests, so we can skip this one with no additional consideration.
289                         continue
290                 }
291                 // The cardinality of our desired requests shouldn't exceed the max requests since it's used
292                 // in the calculation of the requests. However if we cancelled requests and they haven't
293                 // been rejected or serviced yet with the fast extension enabled, we can end up with more
294                 // extra outstanding requests. We could subtract the number of outstanding cancels from the
295                 // next request cardinality, but peers might not like that.
296                 if maxRequests(current.Requests.GetCardinality()) >= p.nominalMaxRequests() {
297                         //log.Printf("not assigning all requests [desired=%v, cancelled=%v, current=%v, max=%v]",
298                         //      next.Requests.GetCardinality(),
299                         //      p.cancelledRequests.GetCardinality(),
300                         //      current.Requests.GetCardinality(),
301                         //      p.nominalMaxRequests(),
302                         //)
303                         break
304                 }
305                 more, err = p.request(req)
306                 if err != nil {
307                         panic(err)
308                 }
309                 if !more {
310                         break
311                 }
312         }
313         p.updateRequestsTimer.Stop()
314         if more {
315                 p.needRequestUpdate = ""
316                 if !current.Requests.IsEmpty() {
317                         p.updateRequestsTimer.Reset(3 * time.Second)
318                 }
319         }
320         return more
321 }