]> Sergey Matveev's repositories - btrtrc.git/blob - request-strategy/order.go
Do peer requests separately for each peer
[btrtrc.git] / request-strategy / order.go
1 package request_strategy
2
3 import (
4         "bytes"
5         "fmt"
6         "sort"
7         "sync"
8
9         "github.com/anacrolix/multiless"
10         "github.com/anacrolix/torrent/metainfo"
11         "github.com/anacrolix/torrent/storage"
12
13         pp "github.com/anacrolix/torrent/peer_protocol"
14         "github.com/anacrolix/torrent/types"
15 )
16
17 type (
18         Request       = types.Request
19         pieceIndex    = types.PieceIndex
20         piecePriority = types.PiecePriority
21         // This can be made into a type-param later, will be great for testing.
22         ChunkSpec = types.ChunkSpec
23 )
24
25 type ClientPieceOrder struct{}
26
27 type filterTorrent struct {
28         *Torrent
29         unverifiedBytes int64
30         // Potentially shared with other torrents.
31         storageLeft *int64
32 }
33
34 func sortFilterPieces(pieces []filterPiece) {
35         sort.Slice(pieces, func(_i, _j int) bool {
36                 i := &pieces[_i]
37                 j := &pieces[_j]
38                 return multiless.New().Int(
39                         int(j.Priority), int(i.Priority),
40                 ).Bool(
41                         j.Partial, i.Partial,
42                 ).Int64(
43                         i.Availability, j.Availability,
44                 ).Int(
45                         i.index, j.index,
46                 ).Lazy(func() multiless.Computation {
47                         return multiless.New().Cmp(bytes.Compare(
48                                 i.t.InfoHash[:],
49                                 j.t.InfoHash[:],
50                         ))
51                 }).MustLess()
52         })
53 }
54
55 type requestsPeer struct {
56         Peer
57         nextState                  PeerNextRequestState
58         requestablePiecesRemaining int
59 }
60
61 func (rp *requestsPeer) canFitRequest() bool {
62         return len(rp.nextState.Requests) < rp.MaxRequests
63 }
64
65 func (rp *requestsPeer) addNextRequest(r Request) {
66         _, ok := rp.nextState.Requests[r]
67         if ok {
68                 panic("should only add once")
69         }
70         rp.nextState.Requests[r] = struct{}{}
71 }
72
73 type peersForPieceRequests struct {
74         requestsInPiece int
75         *requestsPeer
76 }
77
78 func (me *peersForPieceRequests) addNextRequest(r Request) {
79         me.requestsPeer.addNextRequest(r)
80         me.requestsInPiece++
81 }
82
83 type requestablePiece struct {
84         index             pieceIndex
85         t                 *Torrent
86         alwaysReallocate  bool
87         NumPendingChunks  int
88         IterPendingChunks ChunksIter
89 }
90
91 type filterPiece struct {
92         t     *filterTorrent
93         index pieceIndex
94         *Piece
95 }
96
97 // Calls f with requestable pieces in order.
98 func GetRequestablePieces(input Input, f func(t *Torrent, p *Piece, pieceIndex int)) {
99         maxPieces := 0
100         for i := range input.Torrents {
101                 maxPieces += len(input.Torrents[i].Pieces)
102         }
103         pieces := make([]filterPiece, 0, maxPieces)
104         // Storage capacity left for this run, keyed by the storage capacity pointer on the storage
105         // TorrentImpl. A nil value means no capacity limit.
106         storageLeft := make(map[storage.TorrentCapacity]*int64)
107         for _t := range input.Torrents {
108                 // TODO: We could do metainfo requests here.
109                 t := &filterTorrent{
110                         Torrent:         &input.Torrents[_t],
111                         unverifiedBytes: 0,
112                 }
113                 key := t.Capacity
114                 if key != nil {
115                         if _, ok := storageLeft[key]; !ok {
116                                 capacity, ok := (*key)()
117                                 if ok {
118                                         storageLeft[key] = &capacity
119                                 } else {
120                                         storageLeft[key] = nil
121                                 }
122                         }
123                         t.storageLeft = storageLeft[key]
124                 }
125                 for i := range t.Pieces {
126                         pieces = append(pieces, filterPiece{
127                                 t:     t,
128                                 index: i,
129                                 Piece: &t.Pieces[i],
130                         })
131                 }
132         }
133         sortFilterPieces(pieces)
134         var allTorrentsUnverifiedBytes int64
135         for _, piece := range pieces {
136                 if left := piece.t.storageLeft; left != nil {
137                         if *left < int64(piece.Length) {
138                                 continue
139                         }
140                         *left -= int64(piece.Length)
141                 }
142                 if !piece.Request || piece.NumPendingChunks == 0 {
143                         // TODO: Clarify exactly what is verified. Stuff that's being hashed should be
144                         // considered unverified and hold up further requests.
145                         continue
146                 }
147                 if piece.t.MaxUnverifiedBytes != 0 && piece.t.unverifiedBytes+piece.Length > piece.t.MaxUnverifiedBytes {
148                         continue
149                 }
150                 if input.MaxUnverifiedBytes != 0 && allTorrentsUnverifiedBytes+piece.Length > input.MaxUnverifiedBytes {
151                         continue
152                 }
153                 piece.t.unverifiedBytes += piece.Length
154                 allTorrentsUnverifiedBytes += piece.Length
155                 f(piece.t.Torrent, piece.Piece, piece.index)
156         }
157         return
158 }
159
160 type Input struct {
161         Torrents           []Torrent
162         MaxUnverifiedBytes int64
163 }
164
165 // TODO: We could do metainfo requests here.
166 func Run(input Input) map[PeerId]PeerNextRequestState {
167         var requestPieces []requestablePiece
168         GetRequestablePieces(input, func(t *Torrent, piece *Piece, pieceIndex int) {
169                 requestPieces = append(requestPieces, requestablePiece{
170                         index:             pieceIndex,
171                         t:                 t,
172                         NumPendingChunks:  piece.NumPendingChunks,
173                         IterPendingChunks: piece.iterPendingChunksWrapper,
174                         alwaysReallocate:  piece.Priority >= types.PiecePriorityNext,
175                 })
176         })
177         torrents := input.Torrents
178         allPeers := make(map[metainfo.Hash][]*requestsPeer, len(torrents))
179         for _, t := range torrents {
180                 peers := make([]*requestsPeer, 0, len(t.Peers))
181                 for _, p := range t.Peers {
182                         peers = append(peers, &requestsPeer{
183                                 Peer: p,
184                                 nextState: PeerNextRequestState{
185                                         Requests: make(map[Request]struct{}, p.MaxRequests),
186                                 },
187                         })
188                 }
189                 allPeers[t.InfoHash] = peers
190         }
191         for _, piece := range requestPieces {
192                 for _, peer := range allPeers[piece.t.InfoHash] {
193                         if peer.canRequestPiece(piece.index) {
194                                 peer.requestablePiecesRemaining++
195                         }
196                 }
197         }
198         for _, piece := range requestPieces {
199                 allocatePendingChunks(piece, allPeers[piece.t.InfoHash])
200         }
201         ret := make(map[PeerId]PeerNextRequestState)
202         for _, peers := range allPeers {
203                 for _, rp := range peers {
204                         if rp.requestablePiecesRemaining != 0 {
205                                 panic(rp.requestablePiecesRemaining)
206                         }
207                         if _, ok := ret[rp.Id]; ok {
208                                 panic(fmt.Sprintf("duplicate peer id: %v", rp.Id))
209                         }
210                         ret[rp.Id] = rp.nextState
211                 }
212         }
213         return ret
214 }
215
216 // Checks that a sorted peersForPiece slice makes sense.
217 func ensureValidSortedPeersForPieceRequests(peers *peersForPieceSorter) {
218         if !sort.IsSorted(peers) {
219                 panic("not sorted")
220         }
221         peerMap := make(map[*peersForPieceRequests]struct{}, peers.Len())
222         for _, p := range peers.peersForPiece {
223                 if _, ok := peerMap[p]; ok {
224                         panic(p)
225                 }
226                 peerMap[p] = struct{}{}
227         }
228 }
229
230 var peersForPiecesPool sync.Pool
231
232 func makePeersForPiece(cap int) []*peersForPieceRequests {
233         got := peersForPiecesPool.Get()
234         if got == nil {
235                 return make([]*peersForPieceRequests, 0, cap)
236         }
237         return got.([]*peersForPieceRequests)[:0]
238 }
239
240 type peersForPieceSorter struct {
241         peersForPiece []*peersForPieceRequests
242         req           *Request
243         p             requestablePiece
244 }
245
246 func (me *peersForPieceSorter) Len() int {
247         return len(me.peersForPiece)
248 }
249
250 func (me *peersForPieceSorter) Swap(i, j int) {
251         me.peersForPiece[i], me.peersForPiece[j] = me.peersForPiece[j], me.peersForPiece[i]
252 }
253
254 func (me *peersForPieceSorter) Less(_i, _j int) bool {
255         i := me.peersForPiece[_i]
256         j := me.peersForPiece[_j]
257         req := me.req
258         p := &me.p
259         byHasRequest := func() multiless.Computation {
260                 ml := multiless.New()
261                 if req != nil {
262                         _, iHas := i.nextState.Requests[*req]
263                         _, jHas := j.nextState.Requests[*req]
264                         ml = ml.Bool(jHas, iHas)
265                 }
266                 return ml
267         }()
268         ml := multiless.New()
269         // We always "reallocate", that is force even striping amongst peers that are either on
270         // the last piece they can contribute too, or for pieces marked for this behaviour.
271         // Striping prevents starving peers of requests, and will always re-balance to the
272         // fastest known peers.
273         if !p.alwaysReallocate {
274                 ml = ml.Bool(
275                         j.requestablePiecesRemaining == 1,
276                         i.requestablePiecesRemaining == 1)
277         }
278         if p.alwaysReallocate || j.requestablePiecesRemaining == 1 {
279                 ml = ml.Int(
280                         i.requestsInPiece,
281                         j.requestsInPiece)
282         } else {
283                 ml = ml.AndThen(byHasRequest)
284         }
285         ml = ml.Int(
286                 i.requestablePiecesRemaining,
287                 j.requestablePiecesRemaining,
288         ).Float64(
289                 j.DownloadRate,
290                 i.DownloadRate,
291         )
292         if ml.Ok() {
293                 return ml.Less()
294         }
295         ml = ml.AndThen(byHasRequest)
296         return ml.Int64(
297                 int64(j.Age), int64(i.Age),
298                 // TODO: Probably peer priority can come next
299         ).Uintptr(
300                 i.Id.Uintptr(),
301                 j.Id.Uintptr(),
302         ).MustLess()
303 }
304
305 func allocatePendingChunks(p requestablePiece, peers []*requestsPeer) {
306         peersForPiece := makePeersForPiece(len(peers))
307         for _, peer := range peers {
308                 if !peer.canRequestPiece(p.index) {
309                         continue
310                 }
311                 if !peer.canFitRequest() {
312                         peer.requestablePiecesRemaining--
313                         continue
314                 }
315                 peersForPiece = append(peersForPiece, &peersForPieceRequests{
316                         requestsInPiece: 0,
317                         requestsPeer:    peer,
318                 })
319         }
320         defer func() {
321                 for _, peer := range peersForPiece {
322                         peer.requestablePiecesRemaining--
323                 }
324                 peersForPiecesPool.Put(peersForPiece)
325         }()
326         peersForPieceSorter := peersForPieceSorter{
327                 peersForPiece: peersForPiece,
328                 p:             p,
329         }
330         sortPeersForPiece := func(req *Request) {
331                 peersForPieceSorter.req = req
332                 sort.Sort(&peersForPieceSorter)
333                 //ensureValidSortedPeersForPieceRequests(&peersForPieceSorter)
334         }
335         // Chunks can be preassigned several times, if peers haven't been able to update their "actual"
336         // with "next" request state before another request strategy run occurs.
337         preallocated := make(map[ChunkSpec][]*peersForPieceRequests, p.NumPendingChunks)
338         p.IterPendingChunks(func(spec ChunkSpec) {
339                 req := Request{pp.Integer(p.index), spec}
340                 for _, peer := range peersForPiece {
341                         if h := peer.HasExistingRequest; h == nil || !h(req) {
342                                 continue
343                         }
344                         if !peer.canFitRequest() {
345                                 continue
346                         }
347                         preallocated[spec] = append(preallocated[spec], peer)
348                         peer.addNextRequest(req)
349                 }
350         })
351         pendingChunksRemaining := int(p.NumPendingChunks)
352         p.IterPendingChunks(func(chunk types.ChunkSpec) {
353                 if _, ok := preallocated[chunk]; ok {
354                         return
355                 }
356                 req := Request{pp.Integer(p.index), chunk}
357                 defer func() { pendingChunksRemaining-- }()
358                 sortPeersForPiece(nil)
359                 for _, peer := range peersForPiece {
360                         if !peer.canFitRequest() {
361                                 continue
362                         }
363                         if !peer.pieceAllowedFastOrDefault(p.index) {
364                                 // TODO: Verify that's okay to stay uninterested if we request allowed fast pieces.
365                                 peer.nextState.Interested = true
366                                 if peer.Choking {
367                                         continue
368                                 }
369                         }
370                         peer.addNextRequest(req)
371                         break
372                 }
373         })
374 chunk:
375         for chunk, prePeers := range preallocated {
376                 pendingChunksRemaining--
377                 req := Request{pp.Integer(p.index), chunk}
378                 for _, pp := range prePeers {
379                         pp.requestsInPiece--
380                 }
381                 sortPeersForPiece(&req)
382                 for _, pp := range prePeers {
383                         delete(pp.nextState.Requests, req)
384                 }
385                 for _, peer := range peersForPiece {
386                         if !peer.canFitRequest() {
387                                 continue
388                         }
389                         if !peer.pieceAllowedFastOrDefault(p.index) {
390                                 // TODO: Verify that's okay to stay uninterested if we request allowed fast pieces.
391                                 peer.nextState.Interested = true
392                                 if peer.Choking {
393                                         continue
394                                 }
395                         }
396                         peer.addNextRequest(req)
397                         continue chunk
398                 }
399         }
400         if pendingChunksRemaining != 0 {
401                 panic(pendingChunksRemaining)
402         }
403 }