]> Sergey Matveev's repositories - btrtrc.git/blob - requesting.go
Print peer ID in ASCII-only
[btrtrc.git] / requesting.go
1 package torrent
2
3 import (
4         "context"
5         "encoding/gob"
6         "fmt"
7         "reflect"
8         "runtime/pprof"
9         "time"
10         "unsafe"
11
12         "github.com/anacrolix/log"
13         "github.com/anacrolix/multiless"
14         "github.com/lispad/go-generics-tools/binheap"
15
16         requestStrategy "github.com/anacrolix/torrent/request-strategy"
17         typedRoaring "github.com/anacrolix/torrent/typed-roaring"
18 )
19
20 type (
21         // Since we have to store all the requests in memory, we can't reasonably exceed what could be
22         // indexed with the memory space available.
23         maxRequests = int
24 )
25
26 func (t *Torrent) requestStrategyPieceOrderState(i int) requestStrategy.PieceRequestOrderState {
27         return requestStrategy.PieceRequestOrderState{
28                 Priority:     t.piece(i).purePriority(),
29                 Partial:      t.piecePartiallyDownloaded(i),
30                 Availability: t.piece(i).availability(),
31         }
32 }
33
34 func init() {
35         gob.Register(peerId{})
36 }
37
38 type peerId struct {
39         *Peer
40         ptr uintptr
41 }
42
43 func (p peerId) Uintptr() uintptr {
44         return p.ptr
45 }
46
47 func (p peerId) GobEncode() (b []byte, _ error) {
48         *(*reflect.SliceHeader)(unsafe.Pointer(&b)) = reflect.SliceHeader{
49                 Data: uintptr(unsafe.Pointer(&p.ptr)),
50                 Len:  int(unsafe.Sizeof(p.ptr)),
51                 Cap:  int(unsafe.Sizeof(p.ptr)),
52         }
53         return
54 }
55
56 func (p *peerId) GobDecode(b []byte) error {
57         if uintptr(len(b)) != unsafe.Sizeof(p.ptr) {
58                 panic(len(b))
59         }
60         ptr := unsafe.Pointer(&b[0])
61         p.ptr = *(*uintptr)(ptr)
62         log.Printf("%p", ptr)
63         dst := reflect.SliceHeader{
64                 Data: uintptr(unsafe.Pointer(&p.Peer)),
65                 Len:  int(unsafe.Sizeof(p.Peer)),
66                 Cap:  int(unsafe.Sizeof(p.Peer)),
67         }
68         copy(*(*[]byte)(unsafe.Pointer(&dst)), b)
69         return nil
70 }
71
72 type (
73         RequestIndex   = requestStrategy.RequestIndex
74         chunkIndexType = requestStrategy.ChunkIndex
75 )
76
77 type desiredPeerRequests struct {
78         requestIndexes []RequestIndex
79         peer           *Peer
80         pieceStates    []requestStrategy.PieceRequestOrderState
81 }
82
83 func (p *desiredPeerRequests) Len() int {
84         return len(p.requestIndexes)
85 }
86
87 func (p *desiredPeerRequests) Less(i, j int) bool {
88         return p.lessByValue(p.requestIndexes[i], p.requestIndexes[j])
89 }
90
91 func (p *desiredPeerRequests) lessByValue(leftRequest, rightRequest RequestIndex) bool {
92         t := p.peer.t
93         leftPieceIndex := t.pieceIndexOfRequestIndex(leftRequest)
94         rightPieceIndex := t.pieceIndexOfRequestIndex(rightRequest)
95         ml := multiless.New()
96         // Push requests that can't be served right now to the end. But we don't throw them away unless
97         // there's a better alternative. This is for when we're using the fast extension and get choked
98         // but our requests could still be good when we get unchoked.
99         if p.peer.peerChoking {
100                 ml = ml.Bool(
101                         !p.peer.peerAllowedFast.Contains(leftPieceIndex),
102                         !p.peer.peerAllowedFast.Contains(rightPieceIndex),
103                 )
104         }
105         leftPiece := &p.pieceStates[leftPieceIndex]
106         rightPiece := &p.pieceStates[rightPieceIndex]
107         // Putting this first means we can steal requests from lesser-performing peers for our first few
108         // new requests.
109         priority := func() piecePriority {
110                 // Technically we would be happy with the cached priority here, except we don't actually
111                 // cache it anymore, and Torrent.piecePriority just does another lookup of *Piece to resolve
112                 // the priority through Piece.purePriority, which is probably slower.
113                 leftPriority := leftPiece.Priority
114                 rightPriority := rightPiece.Priority
115                 ml = ml.Int(
116                         -int(leftPriority),
117                         -int(rightPriority),
118                 )
119                 if !ml.Ok() {
120                         if leftPriority != rightPriority {
121                                 panic("expected equal")
122                         }
123                 }
124                 return leftPriority
125         }()
126         if ml.Ok() {
127                 return ml.MustLess()
128         }
129         leftRequestState := t.requestState[leftRequest]
130         rightRequestState := t.requestState[rightRequest]
131         leftPeer := leftRequestState.peer
132         rightPeer := rightRequestState.peer
133         // Prefer chunks already requested from this peer.
134         ml = ml.Bool(rightPeer == p.peer, leftPeer == p.peer)
135         // Prefer unrequested chunks.
136         ml = ml.Bool(rightPeer == nil, leftPeer == nil)
137         if ml.Ok() {
138                 return ml.MustLess()
139         }
140         if leftPeer != nil {
141                 // The right peer should also be set, or we'd have resolved the computation by now.
142                 ml = ml.Uint64(
143                         rightPeer.requestState.Requests.GetCardinality(),
144                         leftPeer.requestState.Requests.GetCardinality(),
145                 )
146                 // Could either of the lastRequested be Zero? That's what checking an existing peer is for.
147                 leftLast := leftRequestState.when
148                 rightLast := rightRequestState.when
149                 if leftLast.IsZero() || rightLast.IsZero() {
150                         panic("expected non-zero last requested times")
151                 }
152                 // We want the most-recently requested on the left. Clients like Transmission serve requests
153                 // in received order, so the most recently-requested is the one that has the longest until
154                 // it will be served and therefore is the best candidate to cancel.
155                 ml = ml.CmpInt64(rightLast.Sub(leftLast).Nanoseconds())
156         }
157         ml = ml.Int(
158                 leftPiece.Availability,
159                 rightPiece.Availability)
160         if priority == PiecePriorityReadahead {
161                 // TODO: For readahead in particular, it would be even better to consider distance from the
162                 // reader position so that reads earlier in a torrent don't starve reads later in the
163                 // torrent. This would probably require reconsideration of how readahead priority works.
164                 ml = ml.Int(leftPieceIndex, rightPieceIndex)
165         } else {
166                 ml = ml.Int(t.pieceRequestOrder[leftPieceIndex], t.pieceRequestOrder[rightPieceIndex])
167         }
168         return ml.Less()
169 }
170
171 func (p *desiredPeerRequests) Swap(i, j int) {
172         p.requestIndexes[i], p.requestIndexes[j] = p.requestIndexes[j], p.requestIndexes[i]
173 }
174
175 func (p *desiredPeerRequests) Push(x interface{}) {
176         p.requestIndexes = append(p.requestIndexes, x.(RequestIndex))
177 }
178
179 func (p *desiredPeerRequests) Pop() interface{} {
180         last := len(p.requestIndexes) - 1
181         x := p.requestIndexes[last]
182         p.requestIndexes = p.requestIndexes[:last]
183         return x
184 }
185
186 type desiredRequestState struct {
187         Requests   desiredPeerRequests
188         Interested bool
189 }
190
191 func (p *Peer) getDesiredRequestState() (desired desiredRequestState) {
192         t := p.t
193         if !t.haveInfo() {
194                 return
195         }
196         if t.closed.IsSet() {
197                 return
198         }
199         input := t.getRequestStrategyInput()
200         requestHeap := desiredPeerRequests{
201                 peer:           p,
202                 pieceStates:    t.requestPieceStates,
203                 requestIndexes: t.requestIndexes,
204         }
205         // Caller-provided allocation for roaring bitmap iteration.
206         var it typedRoaring.Iterator[RequestIndex]
207         requestStrategy.GetRequestablePieces(
208                 input,
209                 t.getPieceRequestOrder(),
210                 func(ih InfoHash, pieceIndex int, pieceExtra requestStrategy.PieceRequestOrderState) {
211                         if ih != t.infoHash {
212                                 return
213                         }
214                         if !p.peerHasPiece(pieceIndex) {
215                                 return
216                         }
217                         requestHeap.pieceStates[pieceIndex] = pieceExtra
218                         allowedFast := p.peerAllowedFast.Contains(pieceIndex)
219                         t.iterUndirtiedRequestIndexesInPiece(&it, pieceIndex, func(r requestStrategy.RequestIndex) {
220                                 if !allowedFast {
221                                         // We must signal interest to request this. TODO: We could set interested if the
222                                         // peers pieces (minus the allowed fast set) overlap with our missing pieces if
223                                         // there are any readers, or any pending pieces.
224                                         desired.Interested = true
225                                         // We can make or will allow sustaining a request here if we're not choked, or
226                                         // have made the request previously (presumably while unchoked), and haven't had
227                                         // the peer respond yet (and the request was retained because we are using the
228                                         // fast extension).
229                                         if p.peerChoking && !p.requestState.Requests.Contains(r) {
230                                                 // We can't request this right now.
231                                                 return
232                                         }
233                                 }
234                                 if p.requestState.Cancelled.Contains(r) {
235                                         // Can't re-request while awaiting acknowledgement.
236                                         return
237                                 }
238                                 requestHeap.requestIndexes = append(requestHeap.requestIndexes, r)
239                         })
240                 },
241         )
242         t.assertPendingRequests()
243         desired.Requests = requestHeap
244         return
245 }
246
247 func (p *Peer) maybeUpdateActualRequestState() {
248         if p.closed.IsSet() {
249                 return
250         }
251         if p.needRequestUpdate == "" {
252                 return
253         }
254         if p.needRequestUpdate == peerUpdateRequestsTimerReason {
255                 since := time.Since(p.lastRequestUpdate)
256                 if since < updateRequestsTimerDuration {
257                         panic(since)
258                 }
259         }
260         pprof.Do(
261                 context.Background(),
262                 pprof.Labels("update request", p.needRequestUpdate),
263                 func(_ context.Context) {
264                         next := p.getDesiredRequestState()
265                         p.applyRequestState(next)
266                         p.t.requestIndexes = next.Requests.requestIndexes[:0]
267                 },
268         )
269 }
270
271 // Transmit/action the request state to the peer.
272 func (p *Peer) applyRequestState(next desiredRequestState) {
273         current := &p.requestState
274         if !p.setInterested(next.Interested) {
275                 return
276         }
277         more := true
278         requestHeap := binheap.FromSlice(next.Requests.requestIndexes, next.Requests.lessByValue)
279         t := p.t
280         originalRequestCount := current.Requests.GetCardinality()
281         // We're either here on a timer, or because we ran out of requests. Both are valid reasons to
282         // alter peakRequests.
283         if originalRequestCount != 0 && p.needRequestUpdate != peerUpdateRequestsTimerReason {
284                 panic(fmt.Sprintf(
285                         "expected zero existing requests (%v) for update reason %q",
286                         originalRequestCount, p.needRequestUpdate))
287         }
288         for requestHeap.Len() != 0 && maxRequests(current.Requests.GetCardinality()+current.Cancelled.GetCardinality()) < p.nominalMaxRequests() {
289                 req := requestHeap.Pop()
290                 existing := t.requestingPeer(req)
291                 if existing != nil && existing != p {
292                         // Don't steal from the poor.
293                         diff := int64(current.Requests.GetCardinality()) + 1 - (int64(existing.uncancelledRequests()) - 1)
294                         // Steal a request that leaves us with one more request than the existing peer
295                         // connection if the stealer more recently received a chunk.
296                         if diff > 1 || (diff == 1 && p.lastUsefulChunkReceived.Before(existing.lastUsefulChunkReceived)) {
297                                 continue
298                         }
299                         t.cancelRequest(req)
300                 }
301                 more = p.mustRequest(req)
302                 if !more {
303                         break
304                 }
305         }
306         if !more {
307                 // This might fail if we incorrectly determine that we can fit up to the maximum allowed
308                 // requests into the available write buffer space. We don't want that to happen because it
309                 // makes our peak requests dependent on how much was already in the buffer.
310                 panic(fmt.Sprintf(
311                         "couldn't fill apply entire request state [newRequests=%v]",
312                         current.Requests.GetCardinality()-originalRequestCount))
313         }
314         newPeakRequests := maxRequests(current.Requests.GetCardinality() - originalRequestCount)
315         // log.Printf(
316         //      "requests %v->%v (peak %v->%v) reason %q (peer %v)",
317         //      originalRequestCount, current.Requests.GetCardinality(), p.peakRequests, newPeakRequests, p.needRequestUpdate, p)
318         p.peakRequests = newPeakRequests
319         p.needRequestUpdate = ""
320         p.lastRequestUpdate = time.Now()
321         if enableUpdateRequestsTimer {
322                 p.updateRequestsTimer.Reset(updateRequestsTimerDuration)
323         }
324 }
325
326 // This could be set to 10s to match the unchoke/request update interval recommended by some
327 // specifications. I've set it shorter to trigger it more often for testing for now.
328 const (
329         updateRequestsTimerDuration = 3 * time.Second
330         enableUpdateRequestsTimer   = false
331 )