12 "github.com/anacrolix/log"
13 "github.com/anacrolix/multiless"
15 request_strategy "github.com/anacrolix/torrent/request-strategy"
18 func (cl *Client) getRequestStrategyInput() request_strategy.Input {
19 ts := make([]request_strategy.Torrent, 0, len(cl.torrents))
20 for _, t := range cl.torrents {
22 // This would be removed if metadata is handled here. We have to guard against not
23 // knowing the piece size. If we have no info, we have no pieces too, so the end result
27 rst := request_strategy.Torrent{
29 ChunksPerPiece: t.chunksPerRegularPiece(),
32 rst.Capacity = t.storage.Capacity
34 rst.Pieces = make([]request_strategy.Piece, 0, len(t.pieces))
35 for i := range t.pieces {
37 rst.Pieces = append(rst.Pieces, request_strategy.Piece{
38 Request: !t.ignorePieceForRequests(i),
39 Priority: p.purePriority(),
40 Partial: t.piecePartiallyDownloaded(i),
41 Availability: p.availability,
42 Length: int64(p.length()),
43 NumPendingChunks: int(t.pieceNumPendingChunks(i)),
44 IterPendingChunks: &p.undirtiedChunksIter,
47 t.iterPeers(func(p *Peer) {
51 if p.piecesReceivedSinceLastRequestUpdate > p.maxPiecesReceivedBetweenRequestUpdates {
52 p.maxPiecesReceivedBetweenRequestUpdates = p.piecesReceivedSinceLastRequestUpdate
54 p.piecesReceivedSinceLastRequestUpdate = 0
55 rst.Peers = append(rst.Peers, request_strategy.Peer{
56 Pieces: *p.newPeerPieces(),
57 MaxRequests: p.nominalMaxRequests(),
58 ExistingRequests: p.actualRequestState.Requests,
59 Choking: p.peerChoking,
60 PieceAllowedFast: p.peerAllowedFast,
61 DownloadRate: p.downloadRate(),
62 Age: time.Since(p.completedHandshake),
65 ptr: uintptr(unsafe.Pointer(p)),
71 return request_strategy.Input{
73 MaxUnverifiedBytes: cl.config.MaxUnverifiedBytes,
78 gob.Register(peerId{})
86 func (p peerId) Uintptr() uintptr {
90 func (p peerId) GobEncode() (b []byte, _ error) {
91 *(*reflect.SliceHeader)(unsafe.Pointer(&b)) = reflect.SliceHeader{
92 Data: uintptr(unsafe.Pointer(&p.ptr)),
93 Len: int(unsafe.Sizeof(p.ptr)),
94 Cap: int(unsafe.Sizeof(p.ptr)),
99 func (p *peerId) GobDecode(b []byte) error {
100 if uintptr(len(b)) != unsafe.Sizeof(p.ptr) {
103 ptr := unsafe.Pointer(&b[0])
104 p.ptr = *(*uintptr)(ptr)
105 log.Printf("%p", ptr)
106 dst := reflect.SliceHeader{
107 Data: uintptr(unsafe.Pointer(&p.Peer)),
108 Len: int(unsafe.Sizeof(p.Peer)),
109 Cap: int(unsafe.Sizeof(p.Peer)),
111 copy(*(*[]byte)(unsafe.Pointer(&dst)), b)
116 RequestIndex = request_strategy.RequestIndex
117 chunkIndexType = request_strategy.ChunkIndex
120 type peerRequests struct {
121 requestIndexes []RequestIndex
123 torrentStrategyInput request_strategy.Torrent
126 func (p *peerRequests) Len() int {
127 return len(p.requestIndexes)
130 func (p *peerRequests) Less(i, j int) bool {
131 leftRequest := p.requestIndexes[i]
132 rightRequest := p.requestIndexes[j]
134 leftPieceIndex := leftRequest / p.torrentStrategyInput.ChunksPerPiece
135 rightPieceIndex := rightRequest / p.torrentStrategyInput.ChunksPerPiece
136 leftCurrent := p.peer.actualRequestState.Requests.Contains(leftRequest)
137 rightCurrent := p.peer.actualRequestState.Requests.Contains(rightRequest)
138 pending := func(index RequestIndex, current bool) int {
139 ret := t.pendingRequests.Get(index)
143 // See https://github.com/anacrolix/torrent/issues/679 for possible issues. This should be
150 ml := multiless.New()
151 // Push requests that can't be served right now to the end. But we don't throw them away unless
152 // there's a better alternative. This is for when we're using the fast extension and get choked
153 // but our requests could still be good when we get unchoked.
154 if p.peer.peerChoking {
156 !p.peer.peerAllowedFast.Contains(leftPieceIndex),
157 !p.peer.peerAllowedFast.Contains(rightPieceIndex),
161 pending(leftRequest, leftCurrent),
162 pending(rightRequest, rightCurrent))
163 ml = ml.Bool(!leftCurrent, !rightCurrent)
165 -int(p.torrentStrategyInput.Pieces[leftPieceIndex].Priority),
166 -int(p.torrentStrategyInput.Pieces[rightPieceIndex].Priority),
169 int(p.torrentStrategyInput.Pieces[leftPieceIndex].Availability),
170 int(p.torrentStrategyInput.Pieces[rightPieceIndex].Availability))
171 ml = ml.Uint32(leftPieceIndex, rightPieceIndex)
172 ml = ml.Uint32(leftRequest, rightRequest)
176 func (p *peerRequests) Swap(i, j int) {
177 p.requestIndexes[i], p.requestIndexes[j] = p.requestIndexes[j], p.requestIndexes[i]
180 func (p *peerRequests) Push(x interface{}) {
181 p.requestIndexes = append(p.requestIndexes, x.(RequestIndex))
184 func (p *peerRequests) Pop() interface{} {
185 last := len(p.requestIndexes) - 1
186 x := p.requestIndexes[last]
187 p.requestIndexes = p.requestIndexes[:last]
191 type desiredRequestState struct {
192 Requests []RequestIndex
196 func (p *Peer) getDesiredRequestState() (desired desiredRequestState) {
197 input := p.t.cl.getRequestStrategyInput()
198 requestHeap := peerRequests{
201 for _, t := range input.Torrents {
202 if t.InfoHash == p.t.infoHash {
203 requestHeap.torrentStrategyInput = t
207 request_strategy.GetRequestablePieces(
209 func(t *request_strategy.Torrent, rsp *request_strategy.Piece, pieceIndex int) {
210 if t.InfoHash != p.t.infoHash {
213 if !p.peerHasPiece(pieceIndex) {
216 allowedFast := p.peerAllowedFast.ContainsInt(pieceIndex)
217 rsp.IterPendingChunks.Iter(func(ci request_strategy.ChunkIndex) {
218 r := p.t.pieceRequestIndexOffset(pieceIndex) + ci
219 //if p.t.pendingRequests.Get(r) != 0 && !p.actualRequestState.Requests.Contains(r) {
223 // We must signal interest to request this
224 desired.Interested = true
225 // We can make or will allow sustaining a request here if we're not choked, or
226 // have made the request previously (presumably while unchoked), and haven't had
227 // the peer respond yet (and the request was retained because we are using the
229 if p.peerChoking && !p.actualRequestState.Requests.Contains(r) {
230 // We can't request this right now.
234 requestHeap.requestIndexes = append(requestHeap.requestIndexes, r)
238 p.t.assertPendingRequests()
239 heap.Init(&requestHeap)
240 for requestHeap.Len() != 0 && len(desired.Requests) < p.nominalMaxRequests() {
241 requestIndex := heap.Pop(&requestHeap).(RequestIndex)
242 desired.Requests = append(desired.Requests, requestIndex)
247 func (p *Peer) maybeUpdateActualRequestState() bool {
248 if p.needRequestUpdate == "" {
253 context.Background(),
254 pprof.Labels("update request", p.needRequestUpdate),
255 func(_ context.Context) {
256 next := p.getDesiredRequestState()
257 more = p.applyRequestState(next)
263 // Transmit/action the request state to the peer.
264 func (p *Peer) applyRequestState(next desiredRequestState) bool {
265 current := &p.actualRequestState
266 if !p.setInterested(next.Interested) {
270 cancel := current.Requests.Clone()
271 for _, ri := range next.Requests {
274 cancel.Iterate(func(req uint32) bool {
281 for _, req := range next.Requests {
282 if p.cancelledRequests.Contains(req) {
283 // Waiting for a reject or piece message, which will suitably trigger us to update our
284 // requests, so we can skip this one with no additional consideration.
287 // The cardinality of our desired requests shouldn't exceed the max requests since it's used
288 // in the calculation of the requests. However, if we cancelled requests and they haven't
289 // been rejected or serviced yet with the fast extension enabled, we can end up with more
290 // extra outstanding requests. We could subtract the number of outstanding cancels from the
291 // next request cardinality, but peers might not like that.
292 if maxRequests(current.Requests.GetCardinality()) >= p.nominalMaxRequests() {
293 //log.Printf("not assigning all requests [desired=%v, cancelled=%v, current=%v, max=%v]",
294 // next.Requests.GetCardinality(),
295 // p.cancelledRequests.GetCardinality(),
296 // current.Requests.GetCardinality(),
297 // p.nominalMaxRequests(),
301 more = p.mustRequest(req)
306 p.updateRequestsTimer.Stop()
308 p.needRequestUpdate = ""
309 if !current.Requests.IsEmpty() {
310 p.updateRequestsTimer.Reset(3 * time.Second)