package torrent
+
+// I don't trust errors.New with allocations, and I know I can use unique.Handle if I get desperate.
+type stringError string
+
+func (e stringError) Error() string {
+ return string(e)
+}
}
func (ws *webseedPeer) spawnRequest(begin, end RequestIndex, logger *slog.Logger) {
- extWsReq := ws.client.StartNewRequest(ws.intoSpec(begin, end), logger)
+ extWsReq := ws.client.StartNewRequest(ws.peer.closedCtx, ws.intoSpec(begin, end), logger)
wsReq := webseedRequest{
logger: logger,
request: extWsReq,
return RequestIndex(int(intCeilDiv(webseed.MaxDiscardBytes, ws.peer.t.chunkSize)))
}
-func (ws *webseedPeer) keepReading(wr *webseedRequest) bool {
+func (ws *webseedPeer) wantedChunksInDiscardWindow(wr *webseedRequest) bool {
+ // Shouldn't call this if request is at the end already.
+ panicif.GreaterThanOrEqual(wr.next, wr.end)
for ri := wr.next; ri < wr.end && ri <= wr.next+ws.maxChunkDiscard(); ri++ {
if ws.wantChunk(ri) {
return true
err = ws.peer.receiveChunk(&msg)
stop := err != nil || wr.next >= wr.end
if !stop {
- if !ws.keepReading(wr) {
- wr.Cancel("finished or discarded")
+ if !ws.wantedChunksInDiscardWindow(wr) {
+ // This cancels the stream, but we don't stop su--reading to make the most of the
+ // buffered body.
+ wr.Cancel("no wanted chunks in discard window")
}
}
ws.peer.locker().Unlock()
}
// Record that it was exceptionally cancelled.
-func (me *webseedRequest) Cancel(reason string) {
- me.request.Cancel()
+func (me *webseedRequest) Cancel(cause string) {
+ me.request.Cancel(stringError(cause))
if !me.cancelled.Swap(true) {
if webseed.PrintDebug {
- me.logger.Debug("cancelled", "reason", reason)
+ me.logger.Debug("cancelled", "cause", cause)
}
}
}
}
type Request struct {
- cancel func()
+ cancel context.CancelCauseFunc
Body io.Reader
// Closed with error to unstick copy routine when context isn't checked.
bodyPipe *io.PipeReader
}
-func (r Request) Cancel() {
- r.cancel()
+func (r Request) Cancel(cause error) {
+ r.cancel(cause)
}
func (r Request) Close() {
return urlForFileIndex(ws.Url, fileIndex, ws.info, ws.PathEscaper)
}
-func (ws *Client) StartNewRequest(r RequestSpec, debugLogger *slog.Logger) Request {
- ctx, cancel := context.WithCancel(context.TODO())
+func (ws *Client) StartNewRequest(ctx context.Context, r RequestSpec, debugLogger *slog.Logger) Request {
+ ctx, cancel := context.WithCancelCause(ctx)
var requestParts []requestPart
for i, e := range ws.fileIndex.LocateIter(r) {
req, err := newRequest(