type legacyPeerImpl interface {
// Trigger the actual request state to get updated
handleOnNeedUpdateRequests()
- writeInterested(interested bool) bool
// Actually go ahead and modify the pending requests.
updateRequests()
return
}
-func (cn *Peer) setInterested(interested bool) bool {
+func (cn *PeerConn) setInterested(interested bool) bool {
if cn.requestState.Interested == interested {
return true
}
type (
// A request index is a chunk indexed across the entire torrent. It's a single integer and can
// be converted to a protocol request. TODO: This should be private.
- RequestIndex = requestStrategy.RequestIndex
+ RequestIndex = requestStrategy.RequestIndex
+ // This is request index but per-piece.
chunkIndexType = requestStrategy.ChunkIndex
)
p.drop()
}
-func (t *Torrent) haveChunk(r Request) (ret bool) {
+func (t *Torrent) haveRequestIndexChunk(reqIndex RequestIndex) bool {
+ return t.haveChunk(t.requestIndexToRequest(reqIndex))
+}
+
+func (t *Torrent) haveChunk(r Request) bool {
if !t.haveInfo() {
return false
}
end := intCeilDiv(uint64(f.offset)+uint64(f.length), t.chunkSize.Uint64())
return RequestIndex(end)
}
+
+func (t *Torrent) wantReceiveChunk(reqIndex RequestIndex) bool {
+ pi := t.pieceIndexOfRequestIndex(reqIndex)
+ if !t.wantPieceIndex(pi) {
+ return false
+ }
+ if t.haveRequestIndexChunk(reqIndex) {
+ return false
+ }
+ return true
+}
})
}
-func (ws *webseedPeer) writeInterested(interested bool) bool {
- return true
-}
-
-func (ws *webseedPeer) handleCancel(r RequestIndex) {
- for wr := range ws.activeRequestsForIndex(r) {
- wr.Cancel()
- }
-}
+// Webseeds check the next request is wanted before reading it.
+func (ws *webseedPeer) handleCancel(RequestIndex) {}
func (ws *webseedPeer) activeRequestsForIndex(r RequestIndex) iter.Seq[*webseedRequest] {
return func(yield func(*webseedRequest) bool) {
})
}
+// Do we want a chunk, assuming it's valid etc.
+func (ws *webseedPeer) wantChunk(ri RequestIndex) bool {
+ return ws.peer.t.wantReceiveChunk(ri)
+}
+
+func (ws *webseedPeer) maxChunkDiscard() RequestIndex {
+ return RequestIndex(int(intCeilDiv(webseed.MaxDiscardBytes, ws.peer.t.chunkSize)))
+}
+
+func (ws *webseedPeer) keepReading(wr *webseedRequest) bool {
+ for ri := wr.next; ri < wr.end && ri < wr.next+ws.maxChunkDiscard(); ri++ {
+ if ws.wantChunk(ri) {
+ return true
+ }
+ }
+ return false
+}
+
func (ws *webseedPeer) readChunks(wr *webseedRequest) (err error) {
t := ws.peer.t
buf := t.getChunkBuffer()
msg := pp.Message{
Type: pp.Piece,
}
- for wr.next < wr.end {
+ for ws.keepReading(wr) {
reqSpec := t.requestIndexToRequest(wr.next)
chunkLen := reqSpec.Length.Int()
buf = buf[:chunkLen]
package torrent
import (
+ "fmt"
+
"github.com/anacrolix/torrent/webseed"
)
// Record that it was exceptionally cancelled.
func (me *webseedRequest) Cancel() {
- me.cancelled = true
me.request.Cancel()
+ if !me.cancelled {
+ me.cancelled = true
+ if webseed.PrintDebug {
+ fmt.Printf("cancelled webseed request\n")
+ }
+ }
}
"github.com/RoaringBitmap/roaring"
"github.com/anacrolix/missinggo/v2/panicif"
+ "github.com/dustin/go-humanize"
"github.com/anacrolix/torrent/metainfo"
"github.com/anacrolix/torrent/segments"
)
+// How many consecutive bytes to allow discarding from responses. This number is based on
+// https://archive.org/download/BloodyPitOfHorror/BloodyPitOfHorror.asr.srt. It seems that
+// archive.org might be using a webserver implementation that refuses to do partial responses to
+// small files.
+const MaxDiscardBytes = 48 << 10
+
+// Output debug information to stdout.
+const PrintDebug = false
+
type RequestSpec = segments.Extent
type requestPart struct {
// Max concurrent requests to a WebSeed for a given torrent.
MaxRequests int
+ // TODO: Share this with Torrent.
fileIndex segments.Index
info *metainfo.Info
// The pieces we can request with the Url. We're more likely to ban/block at the file-level
// given that's how requests are mapped to webseeds, but the torrent.Client works at the piece
// level. We can map our file-level adjustments to the pieces here. This probably need to be
- // private in the future, if Client ever starts removing pieces.
+ // private in the future, if Client ever starts removing pieces. TODO: This belongs in
+ // webseedPeer.
Pieces roaring.Bitmap
// This wraps http.Response bodies, for example to limit the download rate.
ResponseBodyWrapper ResponseBodyWrapper
responseBodyWrapper: ws.ResponseBodyWrapper,
}
part.do = func() (*http.Response, error) {
+ if PrintDebug {
+ fmt.Printf(
+ "doing request for %q (file size %v), Range: %q\n",
+ req.URL,
+ humanize.Bytes(uint64(ws.fileIndex.Index(i).Length)),
+ req.Header.Get("Range"),
+ )
+ }
return ws.HttpClient.Do(req)
}
requestParts = append(requestParts, part)
case http.StatusOK:
// The response is from the beginning.
me.checkContentLength(resp, part, part.e.End())
- // This number is based on
- // https://archive.org/download/BloodyPitOfHorror/BloodyPitOfHorror.asr.srt. It seems that
- // archive.org might be using a webserver implementation that refuses to do partial
- // responses to small files.
discard := part.e.Start
- if discard > 48<<10 {
+ if discard > MaxDiscardBytes {
return ErrBadResponse{"resp status ok but requested range", resp}
}
if discard != 0 {