13 "github.com/anacrolix/log"
14 "github.com/anacrolix/multiless"
16 request_strategy "github.com/anacrolix/torrent/request-strategy"
19 // Returns what is necessary to run request_strategy.GetRequestablePieces for primaryTorrent.
20 func (cl *Client) getRequestStrategyInput(primaryTorrent *Torrent) (input request_strategy.Input) {
21 input.MaxUnverifiedBytes = cl.config.MaxUnverifiedBytes
22 if !primaryTorrent.haveInfo() {
25 if capFunc := primaryTorrent.storage.Capacity; capFunc != nil {
26 if cap, ok := (*capFunc)(); ok {
31 if input.Capacity == nil {
32 input.Torrents = []request_strategy.Torrent{primaryTorrent.requestStrategyTorrentInput()}
36 input.Torrents = make([]request_strategy.Torrent, 0, len(cl.torrents))
37 for _, t := range cl.torrents {
39 // This would be removed if metadata is handled here. Determining chunks per piece
40 // requires the info. If we have no info, we have no pieces too, so the end result is
44 if t.storage.Capacity != primaryTorrent.storage.Capacity {
47 input.Torrents = append(input.Torrents, t.requestStrategyTorrentInput())
52 func (t *Torrent) getRequestStrategyInput() request_strategy.Input {
53 return t.cl.getRequestStrategyInput(t)
56 func (t *Torrent) requestStrategyTorrentInput() request_strategy.Torrent {
57 rst := request_strategy.Torrent{
59 ChunksPerPiece: t.chunksPerRegularPiece(),
61 rst.Pieces = make([]request_strategy.Piece, 0, len(t.pieces))
62 for i := range t.pieces {
63 rst.Pieces = append(rst.Pieces, t.makeRequestStrategyPiece(i))
68 func (t *Torrent) requestStrategyPieceOrderState(i int) request_strategy.PieceRequestOrderState {
69 return request_strategy.PieceRequestOrderState{
70 Priority: t.piece(i).purePriority(),
71 Partial: t.piecePartiallyDownloaded(i),
72 Availability: t.piece(i).availability,
76 func (t *Torrent) makeRequestStrategyPiece(i int) request_strategy.Piece {
78 return request_strategy.Piece{
79 Request: !t.ignorePieceForRequests(i),
80 Priority: p.purePriority(),
81 Partial: t.piecePartiallyDownloaded(i),
82 Availability: p.availability,
83 Length: int64(p.length()),
84 NumPendingChunks: int(t.pieceNumPendingChunks(i)),
85 IterPendingChunks: &p.undirtiedChunksIter,
90 gob.Register(peerId{})
98 func (p peerId) Uintptr() uintptr {
102 func (p peerId) GobEncode() (b []byte, _ error) {
103 *(*reflect.SliceHeader)(unsafe.Pointer(&b)) = reflect.SliceHeader{
104 Data: uintptr(unsafe.Pointer(&p.ptr)),
105 Len: int(unsafe.Sizeof(p.ptr)),
106 Cap: int(unsafe.Sizeof(p.ptr)),
111 func (p *peerId) GobDecode(b []byte) error {
112 if uintptr(len(b)) != unsafe.Sizeof(p.ptr) {
115 ptr := unsafe.Pointer(&b[0])
116 p.ptr = *(*uintptr)(ptr)
117 log.Printf("%p", ptr)
118 dst := reflect.SliceHeader{
119 Data: uintptr(unsafe.Pointer(&p.Peer)),
120 Len: int(unsafe.Sizeof(p.Peer)),
121 Cap: int(unsafe.Sizeof(p.Peer)),
123 copy(*(*[]byte)(unsafe.Pointer(&dst)), b)
128 RequestIndex = request_strategy.RequestIndex
129 chunkIndexType = request_strategy.ChunkIndex
132 type peerRequests struct {
133 requestIndexes []RequestIndex
135 torrentStrategyInput *request_strategy.Torrent
138 func (p *peerRequests) Len() int {
139 return len(p.requestIndexes)
142 func (p *peerRequests) Less(i, j int) bool {
143 leftRequest := p.requestIndexes[i]
144 rightRequest := p.requestIndexes[j]
146 leftPieceIndex := leftRequest / p.torrentStrategyInput.ChunksPerPiece
147 rightPieceIndex := rightRequest / p.torrentStrategyInput.ChunksPerPiece
148 leftCurrent := p.peer.actualRequestState.Requests.Contains(leftRequest)
149 rightCurrent := p.peer.actualRequestState.Requests.Contains(rightRequest)
150 pending := func(index RequestIndex, current bool) int {
151 ret := t.pendingRequests.Get(index)
155 // See https://github.com/anacrolix/torrent/issues/679 for possible issues. This should be
162 ml := multiless.New()
163 // Push requests that can't be served right now to the end. But we don't throw them away unless
164 // there's a better alternative. This is for when we're using the fast extension and get choked
165 // but our requests could still be good when we get unchoked.
166 if p.peer.peerChoking {
168 !p.peer.peerAllowedFast.Contains(leftPieceIndex),
169 !p.peer.peerAllowedFast.Contains(rightPieceIndex),
173 pending(leftRequest, leftCurrent),
174 pending(rightRequest, rightCurrent))
175 ml = ml.Bool(!leftCurrent, !rightCurrent)
177 -int(p.torrentStrategyInput.Pieces[leftPieceIndex].Priority),
178 -int(p.torrentStrategyInput.Pieces[rightPieceIndex].Priority),
181 int(p.torrentStrategyInput.Pieces[leftPieceIndex].Availability),
182 int(p.torrentStrategyInput.Pieces[rightPieceIndex].Availability))
183 ml = ml.Uint32(leftPieceIndex, rightPieceIndex)
184 ml = ml.Uint32(leftRequest, rightRequest)
188 func (p *peerRequests) Swap(i, j int) {
189 p.requestIndexes[i], p.requestIndexes[j] = p.requestIndexes[j], p.requestIndexes[i]
192 func (p *peerRequests) Push(x interface{}) {
193 p.requestIndexes = append(p.requestIndexes, x.(RequestIndex))
196 func (p *peerRequests) Pop() interface{} {
197 last := len(p.requestIndexes) - 1
198 x := p.requestIndexes[last]
199 p.requestIndexes = p.requestIndexes[:last]
203 type desiredRequestState struct {
204 Requests []RequestIndex
208 func (p *Peer) getDesiredRequestState() (desired desiredRequestState) {
209 input := p.t.getRequestStrategyInput()
210 requestHeap := peerRequests{
213 for i := range input.Torrents {
214 t := &input.Torrents[i]
215 if t.InfoHash == p.t.infoHash {
216 requestHeap.torrentStrategyInput = t
220 request_strategy.GetRequestablePieces(
222 p.t.cl.pieceRequestOrder[p.t.storage.Capacity],
223 func(t *request_strategy.Torrent, rsp *request_strategy.Piece, pieceIndex int) {
224 if t.InfoHash != p.t.infoHash {
227 if !p.peerHasPiece(pieceIndex) {
230 allowedFast := p.peerAllowedFast.ContainsInt(pieceIndex)
231 rsp.IterPendingChunks.Iter(func(ci request_strategy.ChunkIndex) {
232 r := p.t.pieceRequestIndexOffset(pieceIndex) + ci
233 // if p.t.pendingRequests.Get(r) != 0 && !p.actualRequestState.Requests.Contains(r) {
237 // We must signal interest to request this
238 desired.Interested = true
239 // We can make or will allow sustaining a request here if we're not choked, or
240 // have made the request previously (presumably while unchoked), and haven't had
241 // the peer respond yet (and the request was retained because we are using the
243 if p.peerChoking && !p.actualRequestState.Requests.Contains(r) {
244 // We can't request this right now.
248 requestHeap.requestIndexes = append(requestHeap.requestIndexes, r)
252 p.t.assertPendingRequests()
253 heap.Init(&requestHeap)
254 for requestHeap.Len() != 0 && len(desired.Requests) < p.nominalMaxRequests() {
255 requestIndex := heap.Pop(&requestHeap).(RequestIndex)
256 desired.Requests = append(desired.Requests, requestIndex)
261 func (p *Peer) maybeUpdateActualRequestState() bool {
262 if p.needRequestUpdate == "" {
267 context.Background(),
268 pprof.Labels("update request", p.needRequestUpdate),
269 func(_ context.Context) {
270 next := p.getDesiredRequestState()
271 more = p.applyRequestState(next)
277 // Transmit/action the request state to the peer.
278 func (p *Peer) applyRequestState(next desiredRequestState) bool {
279 current := &p.actualRequestState
280 if !p.setInterested(next.Interested) {
284 cancel := current.Requests.Clone()
285 for _, ri := range next.Requests {
288 cancel.Iterate(func(req uint32) bool {
297 for i := 0; i < len(next.Requests); i++ {
298 req := next.Requests[i]
299 if p.cancelledRequests.Contains(req) {
300 // Waiting for a reject or piece message, which will suitably trigger us to update our
301 // requests, so we can skip this one with no additional consideration.
304 // The cardinality of our desired requests shouldn't exceed the max requests since it's used
305 // in the calculation of the requests. However, if we cancelled requests and they haven't
306 // been rejected or serviced yet with the fast extension enabled, we can end up with more
307 // extra outstanding requests. We could subtract the number of outstanding cancels from the
308 // next request cardinality, but peers might not like that.
309 if maxRequests(current.Requests.GetCardinality()) >= p.nominalMaxRequests() {
310 // log.Printf("not assigning all requests [desired=%v, cancelled=%v, current=%v, max=%v]",
311 // next.Requests.GetCardinality(),
312 // p.cancelledRequests.GetCardinality(),
313 // current.Requests.GetCardinality(),
314 // p.nominalMaxRequests(),
318 otherPending := p.t.pendingRequests.Get(next.Requests[0])
319 if p.actualRequestState.Requests.Contains(next.Requests[0]) {
322 if otherPending < lastPending {
323 // Pending should only rise. It's supposed to be the strongest ordering criteria. If it
324 // doesn't, our shuffling condition could be wrong.
327 // If the request has already been requested by another peer, shuffle this and the rest of
328 // the requests (since according to the increasing condition, the rest of the indices
329 // already have an outstanding request with another peer).
330 if !shuffled && otherPending > 0 {
331 shuffleReqs := next.Requests[i:]
332 rand.Shuffle(len(shuffleReqs), func(i, j int) {
333 shuffleReqs[i], shuffleReqs[j] = shuffleReqs[j], shuffleReqs[i]
335 // log.Printf("shuffled reqs [%v:%v]", i, len(next.Requests))
342 more = p.mustRequest(req)
347 p.updateRequestsTimer.Stop()
349 p.needRequestUpdate = ""
350 if !current.Requests.IsEmpty() {
351 p.updateRequestsTimer.Reset(3 * time.Second)