type webseedPeer struct {
// First field for stats alignment.
peer Peer
+ logger *slog.Logger
client webseed.Client
activeRequests map[*webseedRequest]struct{}
locker sync.Locker
// Is there any point to this? Won't we fail to receive a chunk and cancel anyway? Should we
// Close requests instead?
for req := range me.activeRequests {
- req.Cancel()
+ req.Cancel("all requests cancelled")
}
}
return webseed.RequestSpec{start, endOff - start}
}
-func (ws *webseedPeer) spawnRequest(begin, end RequestIndex) {
- extWsReq := ws.client.StartNewRequest(ws.intoSpec(begin, end))
+func (ws *webseedPeer) spawnRequest(begin, end RequestIndex, logger *slog.Logger) {
+ extWsReq := ws.client.StartNewRequest(ws.intoSpec(begin, end), logger)
wsReq := webseedRequest{
+ logger: logger,
request: extWsReq,
begin: begin,
next: begin,
}
if ws.hasOverlappingRequests(begin, end) {
if webseed.PrintDebug {
- fmt.Printf("webseedPeer.spawnRequest: overlapping request for %v[%v-%v)\n", ws.peer.t.name(), begin, end)
+ logger.Warn("webseedPeer.spawnRequest: request overlaps existing")
}
ws.peer.t.cl.dumpCurrentWebseedRequests()
}
stop := err != nil || wr.next >= wr.end
if !stop {
if !ws.keepReading(wr) {
- wr.Cancel()
+ wr.Cancel("finished or discarded")
}
}
ws.peer.locker().Unlock()
import (
"fmt"
+ "log/slog"
"sync/atomic"
"github.com/anacrolix/torrent/webseed"
type webseedRequest struct {
// Fingers out.
request webseed.Request
+ logger *slog.Logger
// First assigned in the range.
begin RequestIndex
// The next to be read.
}
// Record that it was exceptionally cancelled.
-func (me *webseedRequest) Cancel() {
+func (me *webseedRequest) Cancel(reason string) {
me.request.Cancel()
if !me.cancelled.Swap(true) {
if webseed.PrintDebug {
- fmt.Printf("cancelled webseed request\n")
+ me.logger.Debug("cancelled", "reason", reason)
}
}
}
"cmp"
"fmt"
"iter"
+ "log/slog"
"maps"
"os"
"strconv"
g "github.com/anacrolix/generics"
"github.com/anacrolix/generics/heap"
"github.com/anacrolix/missinggo/v2/panicif"
+ pp "github.com/anacrolix/torrent/peer_protocol"
"github.com/anacrolix/torrent/internal/request-strategy"
"github.com/anacrolix/torrent/metainfo"
}
// Cancel any existing requests that are no longer wanted.
- for key, value := range unwantedExistingRequests {
- if webseed.PrintDebug {
- fmt.Printf("cancelling deprioritized existing webseed request %v\n", key)
- }
- key.t.slogger().Debug("cancelling deprioritized existing webseed request", "webseedUrl", key.url, "fileIndex", key.fileIndex)
- value.existingWebseedRequest.Cancel()
+ for _, value := range unwantedExistingRequests {
+ value.existingWebseedRequest.Cancel("deprioritized")
}
printPlan := sync.OnceFunc(func() {
}
last++
}
+ debugLogger := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{
+ Level: slog.LevelDebug,
+ AddSource: true,
+ })).With(
+ "webseedUrl", requestKey.url,
+ "fileIndex", requestKey.fileIndex)
// Request shouldn't exist if this occurs.
panicif.LessThan(last, begin)
// Hello C++ my old friend.
end := last + 1
if webseed.PrintDebug && end != fileEnd {
- fmt.Printf("shortened webseed request for %v: [%v-%v) to [%v-%v)\n",
- requestKey.filePath(), begin, fileEnd, begin, end)
+ debugLogger.Debug(
+ "shortened webseed request",
+ "first-file", requestKey.filePath(),
+ "from", endExclusiveString(begin, fileEnd),
+ "to", endExclusiveString(begin, end))
}
panicif.GreaterThan(end, fileEnd)
- peer.spawnRequest(begin, end)
+ peer.spawnRequest(begin, end, debugLogger)
}
}
}
panicif.True(cl.webseedRequestTimer.Reset(webseedRequestUpdateTimerInterval))
}
+
+type endExclusive[T any] struct {
+ start, end T
+}
+
+func (me endExclusive[T]) String() string {
+ return fmt.Sprintf("[%v-%v)", me.start, me.end)
+}
+
+func endExclusiveString[T any](start, end T) string {
+ return endExclusive[T]{start, end}.String()
+}
return urlForFileIndex(ws.Url, fileIndex, ws.info, ws.PathEscaper)
}
-func (ws *Client) StartNewRequest(r RequestSpec) Request {
+func (ws *Client) StartNewRequest(r RequestSpec, debugLogger *slog.Logger) Request {
ctx, cancel := context.WithCancel(context.TODO())
var requestParts []requestPart
if !ws.fileIndex.Locate(r, func(i int, e segments.Extent) bool {
}
part.do = func() (*http.Response, error) {
if PrintDebug {
- fmt.Printf(
- "doing request for %q (file size %v), Range: %q\n",
- req.URL,
- humanize.Bytes(uint64(ws.fileIndex.Index(i).Length)),
- req.Header.Get("Range"),
- )
+ debugLogger.Debug(
+ "doing request for part",
+ "url", req.URL,
+ "file size", humanize.Bytes(uint64(e.Length)),
+ "range", req.Header.Get("Range"))
}
return ws.HttpClient.Do(req)
}