From b54d454824f964334139a5637663e7e147a8e730 Mon Sep 17 00:00:00 2001 From: Matt Joiner Date: Mon, 14 Jul 2025 16:05:02 +1000 Subject: [PATCH] Reduce webseed download waste Don't close request on cancellation, try to drain it. Shorten webseed requests to wanted region. --- webseed-peer.go | 11 +++++++++-- webseed-request.go | 2 +- webseed-requesting.go | 25 ++++++++++++++++++++----- webseed/client.go | 7 ++++++- 4 files changed, 36 insertions(+), 9 deletions(-) diff --git a/webseed-peer.go b/webseed-peer.go index eb6320ce..cba9d3e6 100644 --- a/webseed-peer.go +++ b/webseed-peer.go @@ -317,6 +317,9 @@ func (ws *webseedPeer) readChunks(wr *webseedRequest) (err error) { var n int n, err = io.ReadFull(wr.request.Body, buf) ws.peer.readBytes(int64(n)) + if webseed.PrintDebug && wr.cancelled.Load() { + fmt.Printf("webseed read error after cancellation: %v\n", err) + } if err != nil { err = fmt.Errorf("reading chunk: %w", err) return @@ -332,14 +335,18 @@ func (ws *webseedPeer) readChunks(wr *webseedRequest) (err error) { // webseed requests are triggered, we want to ensure our existing request is up to date. wr.next++ err = ws.peer.receiveChunk(&msg) - stop := err != nil || !ws.keepReading(wr) + stop := err != nil || wr.next >= wr.end + if !stop { + if !ws.keepReading(wr) { + wr.Cancel() + } + } ws.peer.locker().Unlock() if err != nil { err = fmt.Errorf("processing chunk: %w", err) } if stop { - // TODO: Keep reading until the buffer is drained. return } } diff --git a/webseed-request.go b/webseed-request.go index 43f36883..62e2f1ea 100644 --- a/webseed-request.go +++ b/webseed-request.go @@ -21,7 +21,7 @@ type webseedRequest struct { } func (me *webseedRequest) Close() { - me.request.Cancel() + me.request.Close() } // Record that it was exceptionally cancelled. diff --git a/webseed-requesting.go b/webseed-requesting.go index 23616163..91e3f75f 100644 --- a/webseed-requesting.go +++ b/webseed-requesting.go @@ -159,15 +159,30 @@ func (cl *Client) globalUpdateWebSeedRequests() { continue } t := requestKey.t - // Run the request to the end of the file for now. TODO: Set a reasonable end so the - // remote doesn't oversend. peer := t.webSeeds[requestKey.url] panicif.NotEq(peer.hostKey, costKey) printPlan() begin := t.getRequestIndexContainingOffset(requestKey.startOffset) - // TODO: Find an actual end, so we don't lose lots of data when requests are cancelled. - end := t.endRequestIndexForFileIndex(requestKey.fileIndex) - panicif.Eq(begin, end) + fileEnd := t.endRequestIndexForFileIndex(requestKey.fileIndex) + last := begin + for { + if !t.wantReceiveChunk(last) { + break + } + if last >= fileEnd-1 { + break + } + last++ + } + // Request shouldn't exist if this occurs. + panicif.LessThan(last, begin) + // Hello C++ my old friend. + end := last + 1 + if webseed.PrintDebug { + fmt.Printf("shortened webseed request for %v: [%v-%v) to [%v-%v)\n", + requestKey.filePath(), begin, last+1, begin, end) + } + panicif.GreaterThan(end, fileEnd) peer.spawnRequest(begin, end) } } diff --git a/webseed/client.go b/webseed/client.go index a3a1a2d6..9cd59c1f 100644 --- a/webseed/client.go +++ b/webseed/client.go @@ -47,7 +47,12 @@ type Request struct { func (r Request) Cancel() { r.cancel() - r.bodyPipe.CloseWithError(context.Canceled) +} + +func (r Request) Close() { + // We aren't cancelling because we want to know if we can keep receiving buffered data after + // cancellation. PipeReader.Close always returns nil. + _ = r.bodyPipe.Close() } type Client struct { -- 2.51.0