connsWithAllPieces map[*Peer]struct{}
- // Last active request for each chunks. TODO: Change to PeerConn specific?
+ // Last active PeerConn request for each chunk.
requestState map[RequestIndex]requestState
// Chunks we've written to since the corresponding piece was last checked.
dirtyChunks typedRoaring.Bitmap[RequestIndex]
aprioriHeap := heap.InterfaceForSlice(
&heapSlice,
func(l heapElem, r heapElem) bool {
- // Prefer the highest priority, then existing requests, then longest remaining file extent.
+ // Prefer the highest priority, then existing requests, then largest files.
return cmp.Or(
-cmp.Compare(l.priority, r.priority),
// Existing requests are assigned the priority of the piece they're reading next.
compareBool(l.existingWebseedRequest == nil, r.existingWebseedRequest == nil),
- // This won't thrash because we already preferred existing requests, so we'll finish out small extents.
+ // Note this isn't correct if the starting piece is split across multiple files. But
+ // I plan to refactor to key on starting piece to handle this case.
-cmp.Compare(
- l.t.Files()[l.fileIndex].length-l.startOffset,
- r.t.Files()[r.fileIndex].length-r.startOffset),
+ l.t.Files()[l.fileIndex].length,
+ r.t.Files()[r.fileIndex].length),
) < 0
},
)
Logger *slog.Logger
HttpClient *http.Client
Url string
- // Max concurrent requests to a WebSeed for a given torrent.
+ // Max concurrent requests to a WebSeed for a given torrent. TODO: Unused.
MaxRequests int
fileIndex *segments.Index
// given that's how requests are mapped to webseeds, but the torrent.Client works at the piece
// level. We can map our file-level adjustments to the pieces here. This probably need to be
// private in the future, if Client ever starts removing pieces. TODO: This belongs in
- // webseedPeer.
+ // webseedPeer. TODO: Unused.
Pieces roaring.Bitmap
// This wraps http.Response bodies, for example to limit the download rate.
ResponseBodyWrapper ResponseBodyWrapper