"github.com/anacrolix/multiless"
"github.com/lispad/go-generics-tools/binheap"
- request_strategy "github.com/anacrolix/torrent/request-strategy"
+ requestStrategy "github.com/anacrolix/torrent/request-strategy"
+ typedRoaring "github.com/anacrolix/torrent/typed-roaring"
)
-func (t *Torrent) requestStrategyPieceOrderState(i int) request_strategy.PieceRequestOrderState {
- return request_strategy.PieceRequestOrderState{
+type (
+ // Since we have to store all the requests in memory, we can't reasonably exceed what could be
+ // indexed with the memory space available.
+ maxRequests = int
+)
+
+func (t *Torrent) requestStrategyPieceOrderState(i int) requestStrategy.PieceRequestOrderState {
+ return requestStrategy.PieceRequestOrderState{
Priority: t.piece(i).purePriority(),
Partial: t.piecePartiallyDownloaded(i),
Availability: t.piece(i).availability(),
}
type (
- RequestIndex = request_strategy.RequestIndex
- chunkIndexType = request_strategy.ChunkIndex
+ RequestIndex = requestStrategy.RequestIndex
+ chunkIndexType = requestStrategy.ChunkIndex
)
type desiredPeerRequests struct {
requestIndexes []RequestIndex
peer *Peer
+ pieceStates []requestStrategy.PieceRequestOrderState
}
func (p *desiredPeerRequests) Len() int {
!p.peer.peerAllowedFast.Contains(rightPieceIndex),
)
}
- leftPiece := t.piece(leftPieceIndex)
- rightPiece := t.piece(rightPieceIndex)
+ leftPiece := &p.pieceStates[leftPieceIndex]
+ rightPiece := &p.pieceStates[rightPieceIndex]
// Putting this first means we can steal requests from lesser-performing peers for our first few
// new requests.
priority := func() piecePriority {
// Technically we would be happy with the cached priority here, except we don't actually
// cache it anymore, and Torrent.piecePriority just does another lookup of *Piece to resolve
// the priority through Piece.purePriority, which is probably slower.
- leftPriority := leftPiece.purePriority()
- rightPriority := rightPiece.purePriority()
+ leftPriority := leftPiece.Priority
+ rightPriority := rightPiece.Priority
ml = ml.Int(
-int(leftPriority),
-int(rightPriority),
ml = ml.CmpInt64(rightLast.Sub(leftLast).Nanoseconds())
}
ml = ml.Int(
- leftPiece.relativeAvailability,
- rightPiece.relativeAvailability)
+ leftPiece.Availability,
+ rightPiece.Availability)
if priority == PiecePriorityReadahead {
// TODO: For readahead in particular, it would be even better to consider distance from the
// reader position so that reads earlier in a torrent don't starve reads later in the
// torrent. This would probably require reconsideration of how readahead priority works.
ml = ml.Int(leftPieceIndex, rightPieceIndex)
} else {
- // TODO: To prevent unnecessarily requesting from disparate pieces, and to ensure pieces are
- // selected randomly when availability is even, there should be some fixed ordering of
- // pieces.
+ ml = ml.Int(t.pieceRequestOrder[leftPieceIndex], t.pieceRequestOrder[rightPieceIndex])
}
return ml.Less()
}
}
func (p *Peer) getDesiredRequestState() (desired desiredRequestState) {
- if !p.t.haveInfo() {
+ t := p.t
+ if !t.haveInfo() {
return
}
- if p.t.closed.IsSet() {
+ if t.closed.IsSet() {
return
}
- input := p.t.getRequestStrategyInput()
+ input := t.getRequestStrategyInput()
requestHeap := desiredPeerRequests{
- peer: p,
+ peer: p,
+ pieceStates: t.requestPieceStates,
+ requestIndexes: t.requestIndexes,
}
- request_strategy.GetRequestablePieces(
+ // Caller-provided allocation for roaring bitmap iteration.
+ var it typedRoaring.Iterator[RequestIndex]
+ requestStrategy.GetRequestablePieces(
input,
- p.t.getPieceRequestOrder(),
- func(ih InfoHash, pieceIndex int) {
- if ih != p.t.infoHash {
+ t.getPieceRequestOrder(),
+ func(ih InfoHash, pieceIndex int, pieceExtra requestStrategy.PieceRequestOrderState) {
+ if ih != t.infoHash {
return
}
if !p.peerHasPiece(pieceIndex) {
return
}
+ requestHeap.pieceStates[pieceIndex] = pieceExtra
allowedFast := p.peerAllowedFast.Contains(pieceIndex)
- p.t.piece(pieceIndex).undirtiedChunksIter.Iter(func(ci request_strategy.ChunkIndex) {
- r := p.t.pieceRequestIndexOffset(pieceIndex) + ci
+ t.iterUndirtiedRequestIndexesInPiece(&it, pieceIndex, func(r requestStrategy.RequestIndex) {
if !allowedFast {
// We must signal interest to request this. TODO: We could set interested if the
// peers pieces (minus the allowed fast set) overlap with our missing pieces if
})
},
)
- p.t.assertPendingRequests()
+ t.assertPendingRequests()
desired.Requests = requestHeap
return
}
func(_ context.Context) {
next := p.getDesiredRequestState()
p.applyRequestState(next)
+ p.t.requestIndexes = next.Requests.requestIndexes[:0]
},
)
}
p.peakRequests = newPeakRequests
p.needRequestUpdate = ""
p.lastRequestUpdate = time.Now()
- p.updateRequestsTimer.Reset(updateRequestsTimerDuration)
+ if enableUpdateRequestsTimer {
+ p.updateRequestsTimer.Reset(updateRequestsTimerDuration)
+ }
}
// This could be set to 10s to match the unchoke/request update interval recommended by some
// specifications. I've set it shorter to trigger it more often for testing for now.
-const updateRequestsTimerDuration = 3 * time.Second
+const (
+ updateRequestsTimerDuration = 3 * time.Second
+ enableUpdateRequestsTimer = false
+)