]> Sergey Matveev's repositories - btrtrc.git/blob - requesting.go
Drop support for go 1.20
[btrtrc.git] / requesting.go
1 package torrent
2
3 import (
4         "context"
5         "encoding/gob"
6         "fmt"
7         "reflect"
8         "runtime/pprof"
9         "time"
10         "unsafe"
11
12         "github.com/anacrolix/generics/heap"
13         "github.com/anacrolix/log"
14         "github.com/anacrolix/multiless"
15
16         requestStrategy "github.com/anacrolix/torrent/request-strategy"
17         typedRoaring "github.com/anacrolix/torrent/typed-roaring"
18 )
19
20 type (
21         // Since we have to store all the requests in memory, we can't reasonably exceed what could be
22         // indexed with the memory space available.
23         maxRequests = int
24 )
25
26 func (t *Torrent) requestStrategyPieceOrderState(i int) requestStrategy.PieceRequestOrderState {
27         return requestStrategy.PieceRequestOrderState{
28                 Priority:     t.piece(i).purePriority(),
29                 Partial:      t.piecePartiallyDownloaded(i),
30                 Availability: t.piece(i).availability(),
31         }
32 }
33
34 func init() {
35         gob.Register(peerId{})
36 }
37
38 type peerId struct {
39         *Peer
40         ptr uintptr
41 }
42
43 func (p peerId) Uintptr() uintptr {
44         return p.ptr
45 }
46
47 func (p peerId) GobEncode() (b []byte, _ error) {
48         *(*reflect.SliceHeader)(unsafe.Pointer(&b)) = reflect.SliceHeader{
49                 Data: uintptr(unsafe.Pointer(&p.ptr)),
50                 Len:  int(unsafe.Sizeof(p.ptr)),
51                 Cap:  int(unsafe.Sizeof(p.ptr)),
52         }
53         return
54 }
55
56 func (p *peerId) GobDecode(b []byte) error {
57         if uintptr(len(b)) != unsafe.Sizeof(p.ptr) {
58                 panic(len(b))
59         }
60         ptr := unsafe.Pointer(&b[0])
61         p.ptr = *(*uintptr)(ptr)
62         log.Printf("%p", ptr)
63         dst := reflect.SliceHeader{
64                 Data: uintptr(unsafe.Pointer(&p.Peer)),
65                 Len:  int(unsafe.Sizeof(p.Peer)),
66                 Cap:  int(unsafe.Sizeof(p.Peer)),
67         }
68         copy(*(*[]byte)(unsafe.Pointer(&dst)), b)
69         return nil
70 }
71
72 type (
73         RequestIndex   = requestStrategy.RequestIndex
74         chunkIndexType = requestStrategy.ChunkIndex
75 )
76
77 type desiredPeerRequests struct {
78         requestIndexes []RequestIndex
79         peer           *Peer
80         pieceStates    []requestStrategy.PieceRequestOrderState
81 }
82
83 func (p *desiredPeerRequests) lessByValue(leftRequest, rightRequest RequestIndex) bool {
84         t := p.peer.t
85         leftPieceIndex := t.pieceIndexOfRequestIndex(leftRequest)
86         rightPieceIndex := t.pieceIndexOfRequestIndex(rightRequest)
87         ml := multiless.New()
88         // Push requests that can't be served right now to the end. But we don't throw them away unless
89         // there's a better alternative. This is for when we're using the fast extension and get choked
90         // but our requests could still be good when we get unchoked.
91         if p.peer.peerChoking {
92                 ml = ml.Bool(
93                         !p.peer.peerAllowedFast.Contains(leftPieceIndex),
94                         !p.peer.peerAllowedFast.Contains(rightPieceIndex),
95                 )
96         }
97         leftPiece := &p.pieceStates[leftPieceIndex]
98         rightPiece := &p.pieceStates[rightPieceIndex]
99         // Putting this first means we can steal requests from lesser-performing peers for our first few
100         // new requests.
101         priority := func() piecePriority {
102                 // Technically we would be happy with the cached priority here, except we don't actually
103                 // cache it anymore, and Torrent.piecePriority just does another lookup of *Piece to resolve
104                 // the priority through Piece.purePriority, which is probably slower.
105                 leftPriority := leftPiece.Priority
106                 rightPriority := rightPiece.Priority
107                 ml = ml.Int(
108                         -int(leftPriority),
109                         -int(rightPriority),
110                 )
111                 if !ml.Ok() {
112                         if leftPriority != rightPriority {
113                                 panic("expected equal")
114                         }
115                 }
116                 return leftPriority
117         }()
118         if ml.Ok() {
119                 return ml.MustLess()
120         }
121         leftRequestState := t.requestState[leftRequest]
122         rightRequestState := t.requestState[rightRequest]
123         leftPeer := leftRequestState.peer
124         rightPeer := rightRequestState.peer
125         // Prefer chunks already requested from this peer.
126         ml = ml.Bool(rightPeer == p.peer, leftPeer == p.peer)
127         // Prefer unrequested chunks.
128         ml = ml.Bool(rightPeer == nil, leftPeer == nil)
129         if ml.Ok() {
130                 return ml.MustLess()
131         }
132         if leftPeer != nil {
133                 // The right peer should also be set, or we'd have resolved the computation by now.
134                 ml = ml.Uint64(
135                         rightPeer.requestState.Requests.GetCardinality(),
136                         leftPeer.requestState.Requests.GetCardinality(),
137                 )
138                 // Could either of the lastRequested be Zero? That's what checking an existing peer is for.
139                 leftLast := leftRequestState.when
140                 rightLast := rightRequestState.when
141                 if leftLast.IsZero() || rightLast.IsZero() {
142                         panic("expected non-zero last requested times")
143                 }
144                 // We want the most-recently requested on the left. Clients like Transmission serve requests
145                 // in received order, so the most recently-requested is the one that has the longest until
146                 // it will be served and therefore is the best candidate to cancel.
147                 ml = ml.CmpInt64(rightLast.Sub(leftLast).Nanoseconds())
148         }
149         ml = ml.Int(
150                 leftPiece.Availability,
151                 rightPiece.Availability)
152         if priority == PiecePriorityReadahead {
153                 // TODO: For readahead in particular, it would be even better to consider distance from the
154                 // reader position so that reads earlier in a torrent don't starve reads later in the
155                 // torrent. This would probably require reconsideration of how readahead priority works.
156                 ml = ml.Int(leftPieceIndex, rightPieceIndex)
157         } else {
158                 ml = ml.Int(t.pieceRequestOrder[leftPieceIndex], t.pieceRequestOrder[rightPieceIndex])
159         }
160         return ml.Less()
161 }
162
163 type desiredRequestState struct {
164         Requests   desiredPeerRequests
165         Interested bool
166 }
167
168 func (p *Peer) getDesiredRequestState() (desired desiredRequestState) {
169         t := p.t
170         if !t.haveInfo() {
171                 return
172         }
173         if t.closed.IsSet() {
174                 return
175         }
176         input := t.getRequestStrategyInput()
177         requestHeap := desiredPeerRequests{
178                 peer:           p,
179                 pieceStates:    t.requestPieceStates,
180                 requestIndexes: t.requestIndexes,
181         }
182         // Caller-provided allocation for roaring bitmap iteration.
183         var it typedRoaring.Iterator[RequestIndex]
184         requestStrategy.GetRequestablePieces(
185                 input,
186                 t.getPieceRequestOrder(),
187                 func(ih InfoHash, pieceIndex int, pieceExtra requestStrategy.PieceRequestOrderState) {
188                         if ih != t.infoHash {
189                                 return
190                         }
191                         if !p.peerHasPiece(pieceIndex) {
192                                 return
193                         }
194                         requestHeap.pieceStates[pieceIndex] = pieceExtra
195                         allowedFast := p.peerAllowedFast.Contains(pieceIndex)
196                         t.iterUndirtiedRequestIndexesInPiece(&it, pieceIndex, func(r requestStrategy.RequestIndex) {
197                                 if !allowedFast {
198                                         // We must signal interest to request this. TODO: We could set interested if the
199                                         // peers pieces (minus the allowed fast set) overlap with our missing pieces if
200                                         // there are any readers, or any pending pieces.
201                                         desired.Interested = true
202                                         // We can make or will allow sustaining a request here if we're not choked, or
203                                         // have made the request previously (presumably while unchoked), and haven't had
204                                         // the peer respond yet (and the request was retained because we are using the
205                                         // fast extension).
206                                         if p.peerChoking && !p.requestState.Requests.Contains(r) {
207                                                 // We can't request this right now.
208                                                 return
209                                         }
210                                 }
211                                 if p.requestState.Cancelled.Contains(r) {
212                                         // Can't re-request while awaiting acknowledgement.
213                                         return
214                                 }
215                                 requestHeap.requestIndexes = append(requestHeap.requestIndexes, r)
216                         })
217                 },
218         )
219         t.assertPendingRequests()
220         desired.Requests = requestHeap
221         return
222 }
223
224 func (p *Peer) maybeUpdateActualRequestState() {
225         if p.closed.IsSet() {
226                 return
227         }
228         if p.needRequestUpdate == "" {
229                 return
230         }
231         if p.needRequestUpdate == peerUpdateRequestsTimerReason {
232                 since := time.Since(p.lastRequestUpdate)
233                 if since < updateRequestsTimerDuration {
234                         panic(since)
235                 }
236         }
237         pprof.Do(
238                 context.Background(),
239                 pprof.Labels("update request", p.needRequestUpdate),
240                 func(_ context.Context) {
241                         next := p.getDesiredRequestState()
242                         p.applyRequestState(next)
243                         p.t.requestIndexes = next.Requests.requestIndexes[:0]
244                 },
245         )
246 }
247
248 // Transmit/action the request state to the peer.
249 func (p *Peer) applyRequestState(next desiredRequestState) {
250         current := &p.requestState
251         if !p.setInterested(next.Interested) {
252                 return
253         }
254         more := true
255         requestHeap := heap.InterfaceForSlice(&next.Requests.requestIndexes, next.Requests.lessByValue)
256         heap.Init(requestHeap)
257
258         t := p.t
259         originalRequestCount := current.Requests.GetCardinality()
260         // We're either here on a timer, or because we ran out of requests. Both are valid reasons to
261         // alter peakRequests.
262         if originalRequestCount != 0 && p.needRequestUpdate != peerUpdateRequestsTimerReason {
263                 panic(fmt.Sprintf(
264                         "expected zero existing requests (%v) for update reason %q",
265                         originalRequestCount, p.needRequestUpdate))
266         }
267         for requestHeap.Len() != 0 && maxRequests(current.Requests.GetCardinality()+current.Cancelled.GetCardinality()) < p.nominalMaxRequests() {
268                 req := heap.Pop(requestHeap)
269                 existing := t.requestingPeer(req)
270                 if existing != nil && existing != p {
271                         // Don't steal from the poor.
272                         diff := int64(current.Requests.GetCardinality()) + 1 - (int64(existing.uncancelledRequests()) - 1)
273                         // Steal a request that leaves us with one more request than the existing peer
274                         // connection if the stealer more recently received a chunk.
275                         if diff > 1 || (diff == 1 && p.lastUsefulChunkReceived.Before(existing.lastUsefulChunkReceived)) {
276                                 continue
277                         }
278                         t.cancelRequest(req)
279                 }
280                 more = p.mustRequest(req)
281                 if !more {
282                         break
283                 }
284         }
285         if !more {
286                 // This might fail if we incorrectly determine that we can fit up to the maximum allowed
287                 // requests into the available write buffer space. We don't want that to happen because it
288                 // makes our peak requests dependent on how much was already in the buffer.
289                 panic(fmt.Sprintf(
290                         "couldn't fill apply entire request state [newRequests=%v]",
291                         current.Requests.GetCardinality()-originalRequestCount))
292         }
293         newPeakRequests := maxRequests(current.Requests.GetCardinality() - originalRequestCount)
294         // log.Printf(
295         //      "requests %v->%v (peak %v->%v) reason %q (peer %v)",
296         //      originalRequestCount, current.Requests.GetCardinality(), p.peakRequests, newPeakRequests, p.needRequestUpdate, p)
297         p.peakRequests = newPeakRequests
298         p.needRequestUpdate = ""
299         p.lastRequestUpdate = time.Now()
300         if enableUpdateRequestsTimer {
301                 p.updateRequestsTimer.Reset(updateRequestsTimerDuration)
302         }
303 }
304
305 // This could be set to 10s to match the unchoke/request update interval recommended by some
306 // specifications. I've set it shorter to trigger it more often for testing for now.
307 const (
308         updateRequestsTimerDuration = 3 * time.Second
309         enableUpdateRequestsTimer   = false
310 )