]> Sergey Matveev's repositories - btrtrc.git/blob - requesting.go
Use reusable roaring iterators
[btrtrc.git] / requesting.go
1 package torrent
2
3 import (
4         "context"
5         "encoding/gob"
6         "fmt"
7         "reflect"
8         "runtime/pprof"
9         "time"
10         "unsafe"
11
12         "github.com/anacrolix/log"
13         "github.com/anacrolix/multiless"
14         "github.com/anacrolix/torrent/typed-roaring"
15         "github.com/lispad/go-generics-tools/binheap"
16
17         "github.com/anacrolix/torrent/request-strategy"
18 )
19
20 func (t *Torrent) requestStrategyPieceOrderState(i int) request_strategy.PieceRequestOrderState {
21         return request_strategy.PieceRequestOrderState{
22                 Priority:     t.piece(i).purePriority(),
23                 Partial:      t.piecePartiallyDownloaded(i),
24                 Availability: t.piece(i).availability(),
25         }
26 }
27
28 func init() {
29         gob.Register(peerId{})
30 }
31
32 type peerId struct {
33         *Peer
34         ptr uintptr
35 }
36
37 func (p peerId) Uintptr() uintptr {
38         return p.ptr
39 }
40
41 func (p peerId) GobEncode() (b []byte, _ error) {
42         *(*reflect.SliceHeader)(unsafe.Pointer(&b)) = reflect.SliceHeader{
43                 Data: uintptr(unsafe.Pointer(&p.ptr)),
44                 Len:  int(unsafe.Sizeof(p.ptr)),
45                 Cap:  int(unsafe.Sizeof(p.ptr)),
46         }
47         return
48 }
49
50 func (p *peerId) GobDecode(b []byte) error {
51         if uintptr(len(b)) != unsafe.Sizeof(p.ptr) {
52                 panic(len(b))
53         }
54         ptr := unsafe.Pointer(&b[0])
55         p.ptr = *(*uintptr)(ptr)
56         log.Printf("%p", ptr)
57         dst := reflect.SliceHeader{
58                 Data: uintptr(unsafe.Pointer(&p.Peer)),
59                 Len:  int(unsafe.Sizeof(p.Peer)),
60                 Cap:  int(unsafe.Sizeof(p.Peer)),
61         }
62         copy(*(*[]byte)(unsafe.Pointer(&dst)), b)
63         return nil
64 }
65
66 type (
67         RequestIndex   = request_strategy.RequestIndex
68         chunkIndexType = request_strategy.ChunkIndex
69 )
70
71 type desiredPeerRequests struct {
72         requestIndexes []RequestIndex
73         peer           *Peer
74         pieceStates    []request_strategy.PieceRequestOrderState
75 }
76
77 func (p *desiredPeerRequests) Len() int {
78         return len(p.requestIndexes)
79 }
80
81 func (p *desiredPeerRequests) Less(i, j int) bool {
82         return p.lessByValue(p.requestIndexes[i], p.requestIndexes[j])
83 }
84
85 func (p *desiredPeerRequests) lessByValue(leftRequest, rightRequest RequestIndex) bool {
86         t := p.peer.t
87         leftPieceIndex := t.pieceIndexOfRequestIndex(leftRequest)
88         rightPieceIndex := t.pieceIndexOfRequestIndex(rightRequest)
89         ml := multiless.New()
90         // Push requests that can't be served right now to the end. But we don't throw them away unless
91         // there's a better alternative. This is for when we're using the fast extension and get choked
92         // but our requests could still be good when we get unchoked.
93         if p.peer.peerChoking {
94                 ml = ml.Bool(
95                         !p.peer.peerAllowedFast.Contains(leftPieceIndex),
96                         !p.peer.peerAllowedFast.Contains(rightPieceIndex),
97                 )
98         }
99         leftPiece := &p.pieceStates[leftPieceIndex]
100         rightPiece := &p.pieceStates[rightPieceIndex]
101         // Putting this first means we can steal requests from lesser-performing peers for our first few
102         // new requests.
103         priority := func() piecePriority {
104                 // Technically we would be happy with the cached priority here, except we don't actually
105                 // cache it anymore, and Torrent.piecePriority just does another lookup of *Piece to resolve
106                 // the priority through Piece.purePriority, which is probably slower.
107                 leftPriority := leftPiece.Priority
108                 rightPriority := rightPiece.Priority
109                 ml = ml.Int(
110                         -int(leftPriority),
111                         -int(rightPriority),
112                 )
113                 if !ml.Ok() {
114                         if leftPriority != rightPriority {
115                                 panic("expected equal")
116                         }
117                 }
118                 return leftPriority
119         }()
120         if ml.Ok() {
121                 return ml.MustLess()
122         }
123         leftRequestState := t.requestState[leftRequest]
124         rightRequestState := t.requestState[rightRequest]
125         leftPeer := leftRequestState.peer
126         rightPeer := rightRequestState.peer
127         // Prefer chunks already requested from this peer.
128         ml = ml.Bool(rightPeer == p.peer, leftPeer == p.peer)
129         // Prefer unrequested chunks.
130         ml = ml.Bool(rightPeer == nil, leftPeer == nil)
131         if ml.Ok() {
132                 return ml.MustLess()
133         }
134         if leftPeer != nil {
135                 // The right peer should also be set, or we'd have resolved the computation by now.
136                 ml = ml.Uint64(
137                         rightPeer.requestState.Requests.GetCardinality(),
138                         leftPeer.requestState.Requests.GetCardinality(),
139                 )
140                 // Could either of the lastRequested be Zero? That's what checking an existing peer is for.
141                 leftLast := leftRequestState.when
142                 rightLast := rightRequestState.when
143                 if leftLast.IsZero() || rightLast.IsZero() {
144                         panic("expected non-zero last requested times")
145                 }
146                 // We want the most-recently requested on the left. Clients like Transmission serve requests
147                 // in received order, so the most recently-requested is the one that has the longest until
148                 // it will be served and therefore is the best candidate to cancel.
149                 ml = ml.CmpInt64(rightLast.Sub(leftLast).Nanoseconds())
150         }
151         ml = ml.Int(
152                 leftPiece.Availability,
153                 rightPiece.Availability)
154         if priority == PiecePriorityReadahead {
155                 // TODO: For readahead in particular, it would be even better to consider distance from the
156                 // reader position so that reads earlier in a torrent don't starve reads later in the
157                 // torrent. This would probably require reconsideration of how readahead priority works.
158                 ml = ml.Int(leftPieceIndex, rightPieceIndex)
159         } else {
160                 ml = ml.Int(t.pieceRequestOrder[leftPieceIndex], t.pieceRequestOrder[rightPieceIndex])
161         }
162         return ml.Less()
163 }
164
165 func (p *desiredPeerRequests) Swap(i, j int) {
166         p.requestIndexes[i], p.requestIndexes[j] = p.requestIndexes[j], p.requestIndexes[i]
167 }
168
169 func (p *desiredPeerRequests) Push(x interface{}) {
170         p.requestIndexes = append(p.requestIndexes, x.(RequestIndex))
171 }
172
173 func (p *desiredPeerRequests) Pop() interface{} {
174         last := len(p.requestIndexes) - 1
175         x := p.requestIndexes[last]
176         p.requestIndexes = p.requestIndexes[:last]
177         return x
178 }
179
180 type desiredRequestState struct {
181         Requests   desiredPeerRequests
182         Interested bool
183 }
184
185 func (p *Peer) getDesiredRequestState() (desired desiredRequestState) {
186         t := p.t
187         if !t.haveInfo() {
188                 return
189         }
190         if t.closed.IsSet() {
191                 return
192         }
193         input := t.getRequestStrategyInput()
194         requestHeap := desiredPeerRequests{
195                 peer:           p,
196                 pieceStates:    t.requestPieceStates,
197                 requestIndexes: t.requestIndexes,
198         }
199         // Caller-provided allocation for roaring bitmap iteration.
200         var it typedRoaring.Iterator[RequestIndex]
201         request_strategy.GetRequestablePieces(
202                 input,
203                 t.getPieceRequestOrder(),
204                 func(ih InfoHash, pieceIndex int, pieceExtra request_strategy.PieceRequestOrderState) {
205                         if ih != t.infoHash {
206                                 return
207                         }
208                         if !p.peerHasPiece(pieceIndex) {
209                                 return
210                         }
211                         requestHeap.pieceStates[pieceIndex] = pieceExtra
212                         allowedFast := p.peerAllowedFast.Contains(pieceIndex)
213                         t.iterUndirtiedRequestIndexesInPiece(&it, pieceIndex, func(r request_strategy.RequestIndex) {
214                                 if !allowedFast {
215                                         // We must signal interest to request this. TODO: We could set interested if the
216                                         // peers pieces (minus the allowed fast set) overlap with our missing pieces if
217                                         // there are any readers, or any pending pieces.
218                                         desired.Interested = true
219                                         // We can make or will allow sustaining a request here if we're not choked, or
220                                         // have made the request previously (presumably while unchoked), and haven't had
221                                         // the peer respond yet (and the request was retained because we are using the
222                                         // fast extension).
223                                         if p.peerChoking && !p.requestState.Requests.Contains(r) {
224                                                 // We can't request this right now.
225                                                 return
226                                         }
227                                 }
228                                 if p.requestState.Cancelled.Contains(r) {
229                                         // Can't re-request while awaiting acknowledgement.
230                                         return
231                                 }
232                                 requestHeap.requestIndexes = append(requestHeap.requestIndexes, r)
233                         })
234                 },
235         )
236         t.assertPendingRequests()
237         desired.Requests = requestHeap
238         return
239 }
240
241 func (p *Peer) maybeUpdateActualRequestState() {
242         if p.closed.IsSet() {
243                 return
244         }
245         if p.needRequestUpdate == "" {
246                 return
247         }
248         if p.needRequestUpdate == peerUpdateRequestsTimerReason {
249                 since := time.Since(p.lastRequestUpdate)
250                 if since < updateRequestsTimerDuration {
251                         panic(since)
252                 }
253         }
254         pprof.Do(
255                 context.Background(),
256                 pprof.Labels("update request", p.needRequestUpdate),
257                 func(_ context.Context) {
258                         next := p.getDesiredRequestState()
259                         p.applyRequestState(next)
260                         p.t.requestIndexes = next.Requests.requestIndexes[:0]
261                 },
262         )
263 }
264
265 // Transmit/action the request state to the peer.
266 func (p *Peer) applyRequestState(next desiredRequestState) {
267         current := &p.requestState
268         if !p.setInterested(next.Interested) {
269                 panic("insufficient write buffer")
270         }
271         more := true
272         requestHeap := binheap.FromSlice(next.Requests.requestIndexes, next.Requests.lessByValue)
273         t := p.t
274         originalRequestCount := current.Requests.GetCardinality()
275         // We're either here on a timer, or because we ran out of requests. Both are valid reasons to
276         // alter peakRequests.
277         if originalRequestCount != 0 && p.needRequestUpdate != peerUpdateRequestsTimerReason {
278                 panic(fmt.Sprintf(
279                         "expected zero existing requests (%v) for update reason %q",
280                         originalRequestCount, p.needRequestUpdate))
281         }
282         for requestHeap.Len() != 0 && maxRequests(current.Requests.GetCardinality()+current.Cancelled.GetCardinality()) < p.nominalMaxRequests() {
283                 req := requestHeap.Pop()
284                 existing := t.requestingPeer(req)
285                 if existing != nil && existing != p {
286                         // Don't steal from the poor.
287                         diff := int64(current.Requests.GetCardinality()) + 1 - (int64(existing.uncancelledRequests()) - 1)
288                         // Steal a request that leaves us with one more request than the existing peer
289                         // connection if the stealer more recently received a chunk.
290                         if diff > 1 || (diff == 1 && p.lastUsefulChunkReceived.Before(existing.lastUsefulChunkReceived)) {
291                                 continue
292                         }
293                         t.cancelRequest(req)
294                 }
295                 more = p.mustRequest(req)
296                 if !more {
297                         break
298                 }
299         }
300         if !more {
301                 // This might fail if we incorrectly determine that we can fit up to the maximum allowed
302                 // requests into the available write buffer space. We don't want that to happen because it
303                 // makes our peak requests dependent on how much was already in the buffer.
304                 panic(fmt.Sprintf(
305                         "couldn't fill apply entire request state [newRequests=%v]",
306                         current.Requests.GetCardinality()-originalRequestCount))
307         }
308         newPeakRequests := maxRequests(current.Requests.GetCardinality() - originalRequestCount)
309         // log.Printf(
310         //      "requests %v->%v (peak %v->%v) reason %q (peer %v)",
311         //      originalRequestCount, current.Requests.GetCardinality(), p.peakRequests, newPeakRequests, p.needRequestUpdate, p)
312         p.peakRequests = newPeakRequests
313         p.needRequestUpdate = ""
314         p.lastRequestUpdate = time.Now()
315         if enableUpdateRequestsTimer {
316                 p.updateRequestsTimer.Reset(updateRequestsTimerDuration)
317         }
318 }
319
320 // This could be set to 10s to match the unchoke/request update interval recommended by some
321 // specifications. I've set it shorter to trigger it more often for testing for now.
322 const (
323         updateRequestsTimerDuration = 3 * time.Second
324         enableUpdateRequestsTimer   = false
325 )