]> Sergey Matveev's repositories - btrtrc.git/blob - requesting.go
Add fallback piece ordering for non-readahead priorities
[btrtrc.git] / requesting.go
1 package torrent
2
3 import (
4         "context"
5         "encoding/gob"
6         "fmt"
7         "reflect"
8         "runtime/pprof"
9         "time"
10         "unsafe"
11
12         "github.com/anacrolix/log"
13         "github.com/anacrolix/multiless"
14         "github.com/lispad/go-generics-tools/binheap"
15
16         request_strategy "github.com/anacrolix/torrent/request-strategy"
17 )
18
19 func (t *Torrent) requestStrategyPieceOrderState(i int) request_strategy.PieceRequestOrderState {
20         return request_strategy.PieceRequestOrderState{
21                 Priority:     t.piece(i).purePriority(),
22                 Partial:      t.piecePartiallyDownloaded(i),
23                 Availability: t.piece(i).availability(),
24         }
25 }
26
27 func init() {
28         gob.Register(peerId{})
29 }
30
31 type peerId struct {
32         *Peer
33         ptr uintptr
34 }
35
36 func (p peerId) Uintptr() uintptr {
37         return p.ptr
38 }
39
40 func (p peerId) GobEncode() (b []byte, _ error) {
41         *(*reflect.SliceHeader)(unsafe.Pointer(&b)) = reflect.SliceHeader{
42                 Data: uintptr(unsafe.Pointer(&p.ptr)),
43                 Len:  int(unsafe.Sizeof(p.ptr)),
44                 Cap:  int(unsafe.Sizeof(p.ptr)),
45         }
46         return
47 }
48
49 func (p *peerId) GobDecode(b []byte) error {
50         if uintptr(len(b)) != unsafe.Sizeof(p.ptr) {
51                 panic(len(b))
52         }
53         ptr := unsafe.Pointer(&b[0])
54         p.ptr = *(*uintptr)(ptr)
55         log.Printf("%p", ptr)
56         dst := reflect.SliceHeader{
57                 Data: uintptr(unsafe.Pointer(&p.Peer)),
58                 Len:  int(unsafe.Sizeof(p.Peer)),
59                 Cap:  int(unsafe.Sizeof(p.Peer)),
60         }
61         copy(*(*[]byte)(unsafe.Pointer(&dst)), b)
62         return nil
63 }
64
65 type (
66         RequestIndex   = request_strategy.RequestIndex
67         chunkIndexType = request_strategy.ChunkIndex
68 )
69
70 type desiredPeerRequests struct {
71         requestIndexes []RequestIndex
72         peer           *Peer
73         pieceStates    []request_strategy.PieceRequestOrderState
74 }
75
76 func (p *desiredPeerRequests) Len() int {
77         return len(p.requestIndexes)
78 }
79
80 func (p *desiredPeerRequests) Less(i, j int) bool {
81         return p.lessByValue(p.requestIndexes[i], p.requestIndexes[j])
82 }
83
84 func (p *desiredPeerRequests) lessByValue(leftRequest, rightRequest RequestIndex) bool {
85         t := p.peer.t
86         leftPieceIndex := t.pieceIndexOfRequestIndex(leftRequest)
87         rightPieceIndex := t.pieceIndexOfRequestIndex(rightRequest)
88         ml := multiless.New()
89         // Push requests that can't be served right now to the end. But we don't throw them away unless
90         // there's a better alternative. This is for when we're using the fast extension and get choked
91         // but our requests could still be good when we get unchoked.
92         if p.peer.peerChoking {
93                 ml = ml.Bool(
94                         !p.peer.peerAllowedFast.Contains(leftPieceIndex),
95                         !p.peer.peerAllowedFast.Contains(rightPieceIndex),
96                 )
97         }
98         leftPiece := &p.pieceStates[leftPieceIndex]
99         rightPiece := &p.pieceStates[rightPieceIndex]
100         // Putting this first means we can steal requests from lesser-performing peers for our first few
101         // new requests.
102         priority := func() piecePriority {
103                 // Technically we would be happy with the cached priority here, except we don't actually
104                 // cache it anymore, and Torrent.piecePriority just does another lookup of *Piece to resolve
105                 // the priority through Piece.purePriority, which is probably slower.
106                 leftPriority := leftPiece.Priority
107                 rightPriority := rightPiece.Priority
108                 ml = ml.Int(
109                         -int(leftPriority),
110                         -int(rightPriority),
111                 )
112                 if !ml.Ok() {
113                         if leftPriority != rightPriority {
114                                 panic("expected equal")
115                         }
116                 }
117                 return leftPriority
118         }()
119         if ml.Ok() {
120                 return ml.MustLess()
121         }
122         leftRequestState := t.requestState[leftRequest]
123         rightRequestState := t.requestState[rightRequest]
124         leftPeer := leftRequestState.peer
125         rightPeer := rightRequestState.peer
126         // Prefer chunks already requested from this peer.
127         ml = ml.Bool(rightPeer == p.peer, leftPeer == p.peer)
128         // Prefer unrequested chunks.
129         ml = ml.Bool(rightPeer == nil, leftPeer == nil)
130         if ml.Ok() {
131                 return ml.MustLess()
132         }
133         if leftPeer != nil {
134                 // The right peer should also be set, or we'd have resolved the computation by now.
135                 ml = ml.Uint64(
136                         rightPeer.requestState.Requests.GetCardinality(),
137                         leftPeer.requestState.Requests.GetCardinality(),
138                 )
139                 // Could either of the lastRequested be Zero? That's what checking an existing peer is for.
140                 leftLast := leftRequestState.when
141                 rightLast := rightRequestState.when
142                 if leftLast.IsZero() || rightLast.IsZero() {
143                         panic("expected non-zero last requested times")
144                 }
145                 // We want the most-recently requested on the left. Clients like Transmission serve requests
146                 // in received order, so the most recently-requested is the one that has the longest until
147                 // it will be served and therefore is the best candidate to cancel.
148                 ml = ml.CmpInt64(rightLast.Sub(leftLast).Nanoseconds())
149         }
150         ml = ml.Int(
151                 leftPiece.Availability,
152                 rightPiece.Availability)
153         if priority == PiecePriorityReadahead {
154                 // TODO: For readahead in particular, it would be even better to consider distance from the
155                 // reader position so that reads earlier in a torrent don't starve reads later in the
156                 // torrent. This would probably require reconsideration of how readahead priority works.
157                 ml = ml.Int(leftPieceIndex, rightPieceIndex)
158         } else {
159                 ml = ml.Int(t.pieceRequestOrder[leftPieceIndex], t.pieceRequestOrder[rightPieceIndex])
160         }
161         return ml.Less()
162 }
163
164 func (p *desiredPeerRequests) Swap(i, j int) {
165         p.requestIndexes[i], p.requestIndexes[j] = p.requestIndexes[j], p.requestIndexes[i]
166 }
167
168 func (p *desiredPeerRequests) Push(x interface{}) {
169         p.requestIndexes = append(p.requestIndexes, x.(RequestIndex))
170 }
171
172 func (p *desiredPeerRequests) Pop() interface{} {
173         last := len(p.requestIndexes) - 1
174         x := p.requestIndexes[last]
175         p.requestIndexes = p.requestIndexes[:last]
176         return x
177 }
178
179 type desiredRequestState struct {
180         Requests   desiredPeerRequests
181         Interested bool
182 }
183
184 func (p *Peer) getDesiredRequestState() (desired desiredRequestState) {
185         t := p.t
186         if !t.haveInfo() {
187                 return
188         }
189         if t.closed.IsSet() {
190                 return
191         }
192         input := t.getRequestStrategyInput()
193         requestHeap := desiredPeerRequests{
194                 peer:           p,
195                 pieceStates:    t.requestPieceStates,
196                 requestIndexes: t.requestIndexes,
197         }
198         request_strategy.GetRequestablePieces(
199                 input,
200                 t.getPieceRequestOrder(),
201                 func(ih InfoHash, pieceIndex int, pieceExtra request_strategy.PieceRequestOrderState) {
202                         if ih != t.infoHash {
203                                 return
204                         }
205                         if !p.peerHasPiece(pieceIndex) {
206                                 return
207                         }
208                         requestHeap.pieceStates[pieceIndex] = pieceExtra
209                         allowedFast := p.peerAllowedFast.Contains(pieceIndex)
210                         p.t.piece(pieceIndex).undirtiedChunksIter.Iter(func(ci request_strategy.ChunkIndex) {
211                                 r := p.t.pieceRequestIndexOffset(pieceIndex) + ci
212                                 if !allowedFast {
213                                         // We must signal interest to request this. TODO: We could set interested if the
214                                         // peers pieces (minus the allowed fast set) overlap with our missing pieces if
215                                         // there are any readers, or any pending pieces.
216                                         desired.Interested = true
217                                         // We can make or will allow sustaining a request here if we're not choked, or
218                                         // have made the request previously (presumably while unchoked), and haven't had
219                                         // the peer respond yet (and the request was retained because we are using the
220                                         // fast extension).
221                                         if p.peerChoking && !p.requestState.Requests.Contains(r) {
222                                                 // We can't request this right now.
223                                                 return
224                                         }
225                                 }
226                                 if p.requestState.Cancelled.Contains(r) {
227                                         // Can't re-request while awaiting acknowledgement.
228                                         return
229                                 }
230                                 requestHeap.requestIndexes = append(requestHeap.requestIndexes, r)
231                         })
232                 },
233         )
234         t.assertPendingRequests()
235         desired.Requests = requestHeap
236         return
237 }
238
239 func (p *Peer) maybeUpdateActualRequestState() {
240         if p.closed.IsSet() {
241                 return
242         }
243         if p.needRequestUpdate == "" {
244                 return
245         }
246         if p.needRequestUpdate == peerUpdateRequestsTimerReason {
247                 since := time.Since(p.lastRequestUpdate)
248                 if since < updateRequestsTimerDuration {
249                         panic(since)
250                 }
251         }
252         pprof.Do(
253                 context.Background(),
254                 pprof.Labels("update request", p.needRequestUpdate),
255                 func(_ context.Context) {
256                         next := p.getDesiredRequestState()
257                         p.applyRequestState(next)
258                         p.t.requestIndexes = next.Requests.requestIndexes[:0]
259                 },
260         )
261 }
262
263 // Transmit/action the request state to the peer.
264 func (p *Peer) applyRequestState(next desiredRequestState) {
265         current := &p.requestState
266         if !p.setInterested(next.Interested) {
267                 panic("insufficient write buffer")
268         }
269         more := true
270         requestHeap := binheap.FromSlice(next.Requests.requestIndexes, next.Requests.lessByValue)
271         t := p.t
272         originalRequestCount := current.Requests.GetCardinality()
273         // We're either here on a timer, or because we ran out of requests. Both are valid reasons to
274         // alter peakRequests.
275         if originalRequestCount != 0 && p.needRequestUpdate != peerUpdateRequestsTimerReason {
276                 panic(fmt.Sprintf(
277                         "expected zero existing requests (%v) for update reason %q",
278                         originalRequestCount, p.needRequestUpdate))
279         }
280         for requestHeap.Len() != 0 && maxRequests(current.Requests.GetCardinality()+current.Cancelled.GetCardinality()) < p.nominalMaxRequests() {
281                 req := requestHeap.Pop()
282                 existing := t.requestingPeer(req)
283                 if existing != nil && existing != p {
284                         // Don't steal from the poor.
285                         diff := int64(current.Requests.GetCardinality()) + 1 - (int64(existing.uncancelledRequests()) - 1)
286                         // Steal a request that leaves us with one more request than the existing peer
287                         // connection if the stealer more recently received a chunk.
288                         if diff > 1 || (diff == 1 && p.lastUsefulChunkReceived.Before(existing.lastUsefulChunkReceived)) {
289                                 continue
290                         }
291                         t.cancelRequest(req)
292                 }
293                 more = p.mustRequest(req)
294                 if !more {
295                         break
296                 }
297         }
298         if !more {
299                 // This might fail if we incorrectly determine that we can fit up to the maximum allowed
300                 // requests into the available write buffer space. We don't want that to happen because it
301                 // makes our peak requests dependent on how much was already in the buffer.
302                 panic(fmt.Sprintf(
303                         "couldn't fill apply entire request state [newRequests=%v]",
304                         current.Requests.GetCardinality()-originalRequestCount))
305         }
306         newPeakRequests := maxRequests(current.Requests.GetCardinality() - originalRequestCount)
307         // log.Printf(
308         //      "requests %v->%v (peak %v->%v) reason %q (peer %v)",
309         //      originalRequestCount, current.Requests.GetCardinality(), p.peakRequests, newPeakRequests, p.needRequestUpdate, p)
310         p.peakRequests = newPeakRequests
311         p.needRequestUpdate = ""
312         p.lastRequestUpdate = time.Now()
313         if enableUpdateRequestsTimer {
314                 p.updateRequestsTimer.Reset(updateRequestsTimerDuration)
315         }
316 }
317
318 // This could be set to 10s to match the unchoke/request update interval recommended by some
319 // specifications. I've set it shorter to trigger it more often for testing for now.
320 const (
321         updateRequestsTimerDuration = 3 * time.Second
322         enableUpdateRequestsTimer   = false
323 )