12 "github.com/anacrolix/log"
13 "github.com/anacrolix/multiless"
15 request_strategy "github.com/anacrolix/torrent/request-strategy"
18 func (t *Torrent) requestStrategyPieceOrderState(i int) request_strategy.PieceRequestOrderState {
19 return request_strategy.PieceRequestOrderState{
20 Priority: t.piece(i).purePriority(),
21 Partial: t.piecePartiallyDownloaded(i),
22 Availability: t.piece(i).availability,
27 gob.Register(peerId{})
35 func (p peerId) Uintptr() uintptr {
39 func (p peerId) GobEncode() (b []byte, _ error) {
40 *(*reflect.SliceHeader)(unsafe.Pointer(&b)) = reflect.SliceHeader{
41 Data: uintptr(unsafe.Pointer(&p.ptr)),
42 Len: int(unsafe.Sizeof(p.ptr)),
43 Cap: int(unsafe.Sizeof(p.ptr)),
48 func (p *peerId) GobDecode(b []byte) error {
49 if uintptr(len(b)) != unsafe.Sizeof(p.ptr) {
52 ptr := unsafe.Pointer(&b[0])
53 p.ptr = *(*uintptr)(ptr)
55 dst := reflect.SliceHeader{
56 Data: uintptr(unsafe.Pointer(&p.Peer)),
57 Len: int(unsafe.Sizeof(p.Peer)),
58 Cap: int(unsafe.Sizeof(p.Peer)),
60 copy(*(*[]byte)(unsafe.Pointer(&dst)), b)
65 RequestIndex = request_strategy.RequestIndex
66 chunkIndexType = request_strategy.ChunkIndex
69 type peerRequests struct {
70 requestIndexes []RequestIndex
74 func (p *peerRequests) Len() int {
75 return len(p.requestIndexes)
78 func (p *peerRequests) Less(i, j int) bool {
79 leftRequest := p.requestIndexes[i]
80 rightRequest := p.requestIndexes[j]
82 leftPieceIndex := leftRequest / t.chunksPerRegularPiece()
83 rightPieceIndex := rightRequest / t.chunksPerRegularPiece()
85 // Push requests that can't be served right now to the end. But we don't throw them away unless
86 // there's a better alternative. This is for when we're using the fast extension and get choked
87 // but our requests could still be good when we get unchoked.
88 if p.peer.peerChoking {
90 !p.peer.peerAllowedFast.Contains(leftPieceIndex),
91 !p.peer.peerAllowedFast.Contains(rightPieceIndex),
94 leftPeer := t.pendingRequests[leftRequest]
95 rightPeer := t.pendingRequests[rightRequest]
96 ml = ml.Bool(rightPeer == p.peer, leftPeer == p.peer)
97 ml = ml.Bool(rightPeer == nil, leftPeer == nil)
103 rightPeer.actualRequestState.Requests.GetCardinality(),
104 leftPeer.actualRequestState.Requests.GetCardinality(),
107 ml = ml.CmpInt64(t.lastRequested[rightRequest].Sub(t.lastRequested[leftRequest]).Nanoseconds())
108 leftPiece := t.piece(int(leftPieceIndex))
109 rightPiece := t.piece(int(rightPieceIndex))
111 // Technically we would be happy with the cached priority here, except we don't actually
112 // cache it anymore, and Torrent.piecePriority just does another lookup of *Piece to resolve
113 // the priority through Piece.purePriority, which is probably slower.
114 -int(leftPiece.purePriority()),
115 -int(rightPiece.purePriority()),
118 int(leftPiece.availability),
119 int(rightPiece.availability))
123 func (p *peerRequests) Swap(i, j int) {
124 p.requestIndexes[i], p.requestIndexes[j] = p.requestIndexes[j], p.requestIndexes[i]
127 func (p *peerRequests) Push(x interface{}) {
128 p.requestIndexes = append(p.requestIndexes, x.(RequestIndex))
131 func (p *peerRequests) Pop() interface{} {
132 last := len(p.requestIndexes) - 1
133 x := p.requestIndexes[last]
134 p.requestIndexes = p.requestIndexes[:last]
138 type desiredRequestState struct {
139 Requests peerRequests
143 func (p *Peer) getDesiredRequestState() (desired desiredRequestState) {
147 input := p.t.getRequestStrategyInput()
148 requestHeap := peerRequests{
151 request_strategy.GetRequestablePieces(
153 p.t.cl.pieceRequestOrder[p.t.storage.Capacity],
154 func(ih InfoHash, pieceIndex int) {
155 if ih != p.t.infoHash {
158 if !p.peerHasPiece(pieceIndex) {
161 allowedFast := p.peerAllowedFast.ContainsInt(pieceIndex)
162 p.t.piece(pieceIndex).undirtiedChunksIter.Iter(func(ci request_strategy.ChunkIndex) {
163 r := p.t.pieceRequestIndexOffset(pieceIndex) + ci
164 // if p.t.pendingRequests.Get(r) != 0 && !p.actualRequestState.Requests.Contains(r) {
168 // We must signal interest to request this. TODO: We could set interested if the
169 // peers pieces (minus the allowed fast set) overlap with our missing pieces if
170 // there are any readers, or any pending pieces.
171 desired.Interested = true
172 // We can make or will allow sustaining a request here if we're not choked, or
173 // have made the request previously (presumably while unchoked), and haven't had
174 // the peer respond yet (and the request was retained because we are using the
176 if p.peerChoking && !p.actualRequestState.Requests.Contains(r) {
177 // We can't request this right now.
181 requestHeap.requestIndexes = append(requestHeap.requestIndexes, r)
185 p.t.assertPendingRequests()
186 desired.Requests = requestHeap
190 func (p *Peer) maybeUpdateActualRequestState() bool {
191 if p.needRequestUpdate == "" {
196 context.Background(),
197 pprof.Labels("update request", p.needRequestUpdate),
198 func(_ context.Context) {
199 next := p.getDesiredRequestState()
200 more = p.applyRequestState(next)
206 // Transmit/action the request state to the peer.
207 func (p *Peer) applyRequestState(next desiredRequestState) bool {
208 current := &p.actualRequestState
209 if !p.setInterested(next.Interested) {
213 requestHeap := &next.Requests
214 heap.Init(requestHeap)
215 for requestHeap.Len() != 0 && maxRequests(current.Requests.GetCardinality()) < p.nominalMaxRequests() {
216 req := heap.Pop(requestHeap).(RequestIndex)
217 if p.cancelledRequests.Contains(req) {
218 // Waiting for a reject or piece message, which will suitably trigger us to update our
219 // requests, so we can skip this one with no additional consideration.
222 existing := p.t.pendingRequests[req]
223 if existing != nil && existing != p && existing.actualRequestState.Requests.GetCardinality()-existing.cancelledRequests.GetCardinality() > current.Requests.GetCardinality() {
226 more = p.mustRequest(req)
231 // TODO: This may need to change, we might want to update even if there were no requests due to
232 // filtering them for being recently requested already.
233 p.updateRequestsTimer.Stop()
235 p.needRequestUpdate = ""
236 if current.Interested {
237 p.updateRequestsTimer.Reset(3 * time.Second)