]> Sergey Matveev's repositories - btrtrc.git/blob - requesting.go
Don't starve unverified bytes limit on unrequestable pieces
[btrtrc.git] / requesting.go
1 package torrent
2
3 import (
4         "context"
5         "encoding/gob"
6         "fmt"
7         "reflect"
8         "runtime/pprof"
9         "time"
10         "unsafe"
11
12         g "github.com/anacrolix/generics"
13
14         "github.com/RoaringBitmap/roaring"
15         "github.com/anacrolix/generics/heap"
16         "github.com/anacrolix/log"
17         "github.com/anacrolix/multiless"
18
19         requestStrategy "github.com/anacrolix/torrent/request-strategy"
20         typedRoaring "github.com/anacrolix/torrent/typed-roaring"
21 )
22
23 type (
24         // Since we have to store all the requests in memory, we can't reasonably exceed what could be
25         // indexed with the memory space available.
26         maxRequests = int
27 )
28
29 func (t *Torrent) requestStrategyPieceOrderState(i int) requestStrategy.PieceRequestOrderState {
30         return requestStrategy.PieceRequestOrderState{
31                 Priority:     t.piece(i).purePriority(),
32                 Partial:      t.piecePartiallyDownloaded(i),
33                 Availability: t.piece(i).availability(),
34         }
35 }
36
37 func init() {
38         gob.Register(peerId{})
39 }
40
41 type peerId struct {
42         *Peer
43         ptr uintptr
44 }
45
46 func (p peerId) Uintptr() uintptr {
47         return p.ptr
48 }
49
50 func (p peerId) GobEncode() (b []byte, _ error) {
51         *(*reflect.SliceHeader)(unsafe.Pointer(&b)) = reflect.SliceHeader{
52                 Data: uintptr(unsafe.Pointer(&p.ptr)),
53                 Len:  int(unsafe.Sizeof(p.ptr)),
54                 Cap:  int(unsafe.Sizeof(p.ptr)),
55         }
56         return
57 }
58
59 func (p *peerId) GobDecode(b []byte) error {
60         if uintptr(len(b)) != unsafe.Sizeof(p.ptr) {
61                 panic(len(b))
62         }
63         ptr := unsafe.Pointer(&b[0])
64         p.ptr = *(*uintptr)(ptr)
65         log.Printf("%p", ptr)
66         dst := reflect.SliceHeader{
67                 Data: uintptr(unsafe.Pointer(&p.Peer)),
68                 Len:  int(unsafe.Sizeof(p.Peer)),
69                 Cap:  int(unsafe.Sizeof(p.Peer)),
70         }
71         copy(*(*[]byte)(unsafe.Pointer(&dst)), b)
72         return nil
73 }
74
75 type (
76         RequestIndex   = requestStrategy.RequestIndex
77         chunkIndexType = requestStrategy.ChunkIndex
78 )
79
80 type desiredPeerRequests struct {
81         requestIndexes []RequestIndex
82         peer           *Peer
83         pieceStates    []g.Option[requestStrategy.PieceRequestOrderState]
84 }
85
86 func (p *desiredPeerRequests) lessByValue(leftRequest, rightRequest RequestIndex) bool {
87         t := p.peer.t
88         leftPieceIndex := t.pieceIndexOfRequestIndex(leftRequest)
89         rightPieceIndex := t.pieceIndexOfRequestIndex(rightRequest)
90         ml := multiless.New()
91         // Push requests that can't be served right now to the end. But we don't throw them away unless
92         // there's a better alternative. This is for when we're using the fast extension and get choked
93         // but our requests could still be good when we get unchoked.
94         if p.peer.peerChoking {
95                 ml = ml.Bool(
96                         !p.peer.peerAllowedFast.Contains(leftPieceIndex),
97                         !p.peer.peerAllowedFast.Contains(rightPieceIndex),
98                 )
99         }
100         leftPiece := p.pieceStates[leftPieceIndex].UnwrapPtr()
101         rightPiece := p.pieceStates[rightPieceIndex].UnwrapPtr()
102         // Putting this first means we can steal requests from lesser-performing peers for our first few
103         // new requests.
104         priority := func() PiecePriority {
105                 // Technically we would be happy with the cached priority here, except we don't actually
106                 // cache it anymore, and Torrent.PiecePriority just does another lookup of *Piece to resolve
107                 // the priority through Piece.purePriority, which is probably slower.
108                 leftPriority := leftPiece.Priority
109                 rightPriority := rightPiece.Priority
110                 ml = ml.Int(
111                         -int(leftPriority),
112                         -int(rightPriority),
113                 )
114                 if !ml.Ok() {
115                         if leftPriority != rightPriority {
116                                 panic("expected equal")
117                         }
118                 }
119                 return leftPriority
120         }()
121         if ml.Ok() {
122                 return ml.MustLess()
123         }
124         leftRequestState := t.requestState[leftRequest]
125         rightRequestState := t.requestState[rightRequest]
126         leftPeer := leftRequestState.peer
127         rightPeer := rightRequestState.peer
128         // Prefer chunks already requested from this peer.
129         ml = ml.Bool(rightPeer == p.peer, leftPeer == p.peer)
130         // Prefer unrequested chunks.
131         ml = ml.Bool(rightPeer == nil, leftPeer == nil)
132         if ml.Ok() {
133                 return ml.MustLess()
134         }
135         if leftPeer != nil {
136                 // The right peer should also be set, or we'd have resolved the computation by now.
137                 ml = ml.Uint64(
138                         rightPeer.requestState.Requests.GetCardinality(),
139                         leftPeer.requestState.Requests.GetCardinality(),
140                 )
141                 // Could either of the lastRequested be Zero? That's what checking an existing peer is for.
142                 leftLast := leftRequestState.when
143                 rightLast := rightRequestState.when
144                 if leftLast.IsZero() || rightLast.IsZero() {
145                         panic("expected non-zero last requested times")
146                 }
147                 // We want the most-recently requested on the left. Clients like Transmission serve requests
148                 // in received order, so the most recently-requested is the one that has the longest until
149                 // it will be served and therefore is the best candidate to cancel.
150                 ml = ml.CmpInt64(rightLast.Sub(leftLast).Nanoseconds())
151         }
152         ml = ml.Int(
153                 leftPiece.Availability,
154                 rightPiece.Availability)
155         if priority == PiecePriorityReadahead {
156                 // TODO: For readahead in particular, it would be even better to consider distance from the
157                 // reader position so that reads earlier in a torrent don't starve reads later in the
158                 // torrent. This would probably require reconsideration of how readahead priority works.
159                 ml = ml.Int(leftPieceIndex, rightPieceIndex)
160         } else {
161                 ml = ml.Int(t.pieceRequestOrder[leftPieceIndex], t.pieceRequestOrder[rightPieceIndex])
162         }
163         return ml.Less()
164 }
165
166 type desiredRequestState struct {
167         Requests   desiredPeerRequests
168         Interested bool
169 }
170
171 func (p *Peer) getDesiredRequestState() (desired desiredRequestState) {
172         t := p.t
173         if !t.haveInfo() {
174                 return
175         }
176         if t.closed.IsSet() {
177                 return
178         }
179         if t.dataDownloadDisallowed.Bool() {
180                 return
181         }
182         input := t.getRequestStrategyInput()
183         requestHeap := desiredPeerRequests{
184                 peer:           p,
185                 pieceStates:    t.requestPieceStates,
186                 requestIndexes: t.requestIndexes,
187         }
188         clear(requestHeap.pieceStates)
189         // Caller-provided allocation for roaring bitmap iteration.
190         var it typedRoaring.Iterator[RequestIndex]
191         requestStrategy.GetRequestablePieces(
192                 input,
193                 t.getPieceRequestOrder(),
194                 func(ih InfoHash, pieceIndex int, pieceExtra requestStrategy.PieceRequestOrderState) bool {
195                         if ih != *t.canonicalShortInfohash() {
196                                 return false
197                         }
198                         if !p.peerHasPiece(pieceIndex) {
199                                 return false
200                         }
201                         requestHeap.pieceStates[pieceIndex].Set(pieceExtra)
202                         allowedFast := p.peerAllowedFast.Contains(pieceIndex)
203                         t.iterUndirtiedRequestIndexesInPiece(&it, pieceIndex, func(r requestStrategy.RequestIndex) {
204                                 if !allowedFast {
205                                         // We must signal interest to request this. TODO: We could set interested if the
206                                         // peers pieces (minus the allowed fast set) overlap with our missing pieces if
207                                         // there are any readers, or any pending pieces.
208                                         desired.Interested = true
209                                         // We can make or will allow sustaining a request here if we're not choked, or
210                                         // have made the request previously (presumably while unchoked), and haven't had
211                                         // the peer respond yet (and the request was retained because we are using the
212                                         // fast extension).
213                                         if p.peerChoking && !p.requestState.Requests.Contains(r) {
214                                                 // We can't request this right now.
215                                                 return
216                                         }
217                                 }
218                                 cancelled := &p.requestState.Cancelled
219                                 if !cancelled.IsEmpty() && cancelled.Contains(r) {
220                                         // Can't re-request while awaiting acknowledgement.
221                                         return
222                                 }
223                                 requestHeap.requestIndexes = append(requestHeap.requestIndexes, r)
224                         })
225                         return true
226                 },
227         )
228         t.assertPendingRequests()
229         desired.Requests = requestHeap
230         return
231 }
232
233 func (p *Peer) maybeUpdateActualRequestState() {
234         if p.closed.IsSet() {
235                 return
236         }
237         if p.needRequestUpdate == "" {
238                 return
239         }
240         if p.needRequestUpdate == peerUpdateRequestsTimerReason {
241                 since := time.Since(p.lastRequestUpdate)
242                 if since < updateRequestsTimerDuration {
243                         panic(since)
244                 }
245         }
246         pprof.Do(
247                 context.Background(),
248                 pprof.Labels("update request", p.needRequestUpdate),
249                 func(_ context.Context) {
250                         next := p.getDesiredRequestState()
251                         p.applyRequestState(next)
252                         p.t.cacheNextRequestIndexesForReuse(next.Requests.requestIndexes)
253                 },
254         )
255 }
256
257 func (t *Torrent) cacheNextRequestIndexesForReuse(slice []RequestIndex) {
258         // The incoming slice can be smaller when getDesiredRequestState short circuits on some
259         // conditions.
260         if cap(slice) > cap(t.requestIndexes) {
261                 t.requestIndexes = slice[:0]
262         }
263 }
264
265 // Whether we should allow sending not interested ("losing interest") to the peer. I noticed
266 // qBitTorrent seems to punish us for sending not interested when we're streaming and don't
267 // currently need anything.
268 func (p *Peer) allowSendNotInterested() bool {
269         // Except for caching, we're not likely to lose pieces very soon.
270         if p.t.haveAllPieces() {
271                 return true
272         }
273         all, known := p.peerHasAllPieces()
274         if all || !known {
275                 return false
276         }
277         // Allow losing interest if we have all the pieces the peer has.
278         return roaring.AndNot(p.peerPieces(), &p.t._completedPieces).IsEmpty()
279 }
280
281 // Transmit/action the request state to the peer.
282 func (p *Peer) applyRequestState(next desiredRequestState) {
283         current := &p.requestState
284         // Make interest sticky
285         if !next.Interested && p.requestState.Interested {
286                 if !p.allowSendNotInterested() {
287                         next.Interested = true
288                 }
289         }
290         if !p.setInterested(next.Interested) {
291                 return
292         }
293         more := true
294         orig := next.Requests.requestIndexes
295         requestHeap := heap.InterfaceForSlice(
296                 &next.Requests.requestIndexes,
297                 next.Requests.lessByValue,
298         )
299         heap.Init(requestHeap)
300
301         t := p.t
302         originalRequestCount := current.Requests.GetCardinality()
303         for {
304                 if requestHeap.Len() == 0 {
305                         break
306                 }
307                 numPending := maxRequests(current.Requests.GetCardinality() + current.Cancelled.GetCardinality())
308                 if numPending >= p.nominalMaxRequests() {
309                         break
310                 }
311                 req := heap.Pop(requestHeap)
312                 if cap(next.Requests.requestIndexes) != cap(orig) {
313                         panic("changed")
314                 }
315                 existing := t.requestingPeer(req)
316                 if existing != nil && existing != p {
317                         // Don't steal from the poor.
318                         diff := int64(current.Requests.GetCardinality()) + 1 - (int64(existing.uncancelledRequests()) - 1)
319                         // Steal a request that leaves us with one more request than the existing peer
320                         // connection if the stealer more recently received a chunk.
321                         if diff > 1 || (diff == 1 && p.lastUsefulChunkReceived.Before(existing.lastUsefulChunkReceived)) {
322                                 continue
323                         }
324                         t.cancelRequest(req)
325                 }
326                 more = p.mustRequest(req)
327                 if !more {
328                         break
329                 }
330         }
331         if !more {
332                 // This might fail if we incorrectly determine that we can fit up to the maximum allowed
333                 // requests into the available write buffer space. We don't want that to happen because it
334                 // makes our peak requests dependent on how much was already in the buffer.
335                 panic(fmt.Sprintf(
336                         "couldn't fill apply entire request state [newRequests=%v]",
337                         current.Requests.GetCardinality()-originalRequestCount))
338         }
339         newPeakRequests := maxRequests(current.Requests.GetCardinality() - originalRequestCount)
340         // log.Printf(
341         //      "requests %v->%v (peak %v->%v) reason %q (peer %v)",
342         //      originalRequestCount, current.Requests.GetCardinality(), p.peakRequests, newPeakRequests, p.needRequestUpdate, p)
343         p.peakRequests = newPeakRequests
344         p.needRequestUpdate = ""
345         p.lastRequestUpdate = time.Now()
346         if enableUpdateRequestsTimer {
347                 p.updateRequestsTimer.Reset(updateRequestsTimerDuration)
348         }
349 }
350
351 // This could be set to 10s to match the unchoke/request update interval recommended by some
352 // specifications. I've set it shorter to trigger it more often for testing for now.
353 const (
354         updateRequestsTimerDuration = 3 * time.Second
355         enableUpdateRequestsTimer   = false
356 )