]> Sergey Matveev's repositories - btrtrc.git/commitdiff
Rejigger the webseed debug logging
authorMatt Joiner <anacrolix@gmail.com>
Thu, 31 Jul 2025 01:54:06 +0000 (11:54 +1000)
committerMatt Joiner <anacrolix@gmail.com>
Thu, 31 Jul 2025 01:54:06 +0000 (11:54 +1000)
webseed-peer.go
webseed-request.go
webseed-requesting.go
webseed/client.go

index d32aab93d40e0458956688265d830614247eba5f..6026d9c7c3c92ac9a56061250f5bc88b7c6be043 100644 (file)
@@ -25,6 +25,7 @@ import (
 type webseedPeer struct {
        // First field for stats alignment.
        peer             Peer
+       logger           *slog.Logger
        client           webseed.Client
        activeRequests   map[*webseedRequest]struct{}
        locker           sync.Locker
@@ -40,7 +41,7 @@ func (me *webseedPeer) cancelAllRequests() {
        // Is there any point to this? Won't we fail to receive a chunk and cancel anyway? Should we
        // Close requests instead?
        for req := range me.activeRequests {
-               req.Cancel()
+               req.Cancel("all requests cancelled")
        }
 }
 
@@ -131,9 +132,10 @@ func (ws *webseedPeer) intoSpec(begin, end RequestIndex) webseed.RequestSpec {
        return webseed.RequestSpec{start, endOff - start}
 }
 
-func (ws *webseedPeer) spawnRequest(begin, end RequestIndex) {
-       extWsReq := ws.client.StartNewRequest(ws.intoSpec(begin, end))
+func (ws *webseedPeer) spawnRequest(begin, end RequestIndex, logger *slog.Logger) {
+       extWsReq := ws.client.StartNewRequest(ws.intoSpec(begin, end), logger)
        wsReq := webseedRequest{
+               logger:  logger,
                request: extWsReq,
                begin:   begin,
                next:    begin,
@@ -141,7 +143,7 @@ func (ws *webseedPeer) spawnRequest(begin, end RequestIndex) {
        }
        if ws.hasOverlappingRequests(begin, end) {
                if webseed.PrintDebug {
-                       fmt.Printf("webseedPeer.spawnRequest: overlapping request for %v[%v-%v)\n", ws.peer.t.name(), begin, end)
+                       logger.Warn("webseedPeer.spawnRequest: request overlaps existing")
                }
                ws.peer.t.cl.dumpCurrentWebseedRequests()
        }
@@ -304,7 +306,7 @@ func (ws *webseedPeer) readChunks(wr *webseedRequest) (err error) {
                stop := err != nil || wr.next >= wr.end
                if !stop {
                        if !ws.keepReading(wr) {
-                               wr.Cancel()
+                               wr.Cancel("finished or discarded")
                        }
                }
                ws.peer.locker().Unlock()
index 62e2f1eaf60c1fe5ea79c3982dab368aeacbe718..c0db9dbe5041011e00447d9fbf3132bbbbb8f85d 100644 (file)
@@ -2,6 +2,7 @@ package torrent
 
 import (
        "fmt"
+       "log/slog"
        "sync/atomic"
 
        "github.com/anacrolix/torrent/webseed"
@@ -11,6 +12,7 @@ import (
 type webseedRequest struct {
        // Fingers out.
        request webseed.Request
+       logger  *slog.Logger
        // First assigned in the range.
        begin RequestIndex
        // The next to be read.
@@ -25,11 +27,11 @@ func (me *webseedRequest) Close() {
 }
 
 // Record that it was exceptionally cancelled.
-func (me *webseedRequest) Cancel() {
+func (me *webseedRequest) Cancel(reason string) {
        me.request.Cancel()
        if !me.cancelled.Swap(true) {
                if webseed.PrintDebug {
-                       fmt.Printf("cancelled webseed request\n")
+                       me.logger.Debug("cancelled", "reason", reason)
                }
        }
 }
index 48d7eea4409d00f68e6ee3cf577b83aff1b3d2da..ee1f93960eda6ce2d282864b4895d9851d5e651b 100644 (file)
@@ -4,6 +4,7 @@ import (
        "cmp"
        "fmt"
        "iter"
+       "log/slog"
        "maps"
        "os"
        "strconv"
@@ -14,6 +15,7 @@ import (
        g "github.com/anacrolix/generics"
        "github.com/anacrolix/generics/heap"
        "github.com/anacrolix/missinggo/v2/panicif"
+       pp "github.com/anacrolix/torrent/peer_protocol"
 
        "github.com/anacrolix/torrent/internal/request-strategy"
        "github.com/anacrolix/torrent/metainfo"
@@ -156,12 +158,8 @@ func (cl *Client) updateWebseedRequests() {
        }
 
        // Cancel any existing requests that are no longer wanted.
-       for key, value := range unwantedExistingRequests {
-               if webseed.PrintDebug {
-                       fmt.Printf("cancelling deprioritized existing webseed request %v\n", key)
-               }
-               key.t.slogger().Debug("cancelling deprioritized existing webseed request", "webseedUrl", key.url, "fileIndex", key.fileIndex)
-               value.existingWebseedRequest.Cancel()
+       for _, value := range unwantedExistingRequests {
+               value.existingWebseedRequest.Cancel("deprioritized")
        }
 
        printPlan := sync.OnceFunc(func() {
@@ -197,16 +195,25 @@ func (cl *Client) updateWebseedRequests() {
                                }
                                last++
                        }
+                       debugLogger := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{
+                               Level:     slog.LevelDebug,
+                               AddSource: true,
+                       })).With(
+                               "webseedUrl", requestKey.url,
+                               "fileIndex", requestKey.fileIndex)
                        // Request shouldn't exist if this occurs.
                        panicif.LessThan(last, begin)
                        // Hello C++ my old friend.
                        end := last + 1
                        if webseed.PrintDebug && end != fileEnd {
-                               fmt.Printf("shortened webseed request for %v: [%v-%v) to [%v-%v)\n",
-                                       requestKey.filePath(), begin, fileEnd, begin, end)
+                               debugLogger.Debug(
+                                       "shortened webseed request",
+                                       "first-file", requestKey.filePath(),
+                                       "from", endExclusiveString(begin, fileEnd),
+                                       "to", endExclusiveString(begin, end))
                        }
                        panicif.GreaterThan(end, fileEnd)
-                       peer.spawnRequest(begin, end)
+                       peer.spawnRequest(begin, end, debugLogger)
                }
        }
 }
@@ -390,3 +397,15 @@ func (cl *Client) updateWebseedRequestsAndResetTimer() {
        panicif.True(cl.webseedRequestTimer.Reset(webseedRequestUpdateTimerInterval))
 
 }
+
+type endExclusive[T any] struct {
+       start, end T
+}
+
+func (me endExclusive[T]) String() string {
+       return fmt.Sprintf("[%v-%v)", me.start, me.end)
+}
+
+func endExclusiveString[T any](start, end T) string {
+       return endExclusive[T]{start, end}.String()
+}
index babbe0f507f4fb25b9cc6a78278f546fde7418e3..50d5efe5b13c237188616ae95b42bce520cf9ffb 100644 (file)
@@ -104,7 +104,7 @@ func (ws *Client) UrlForFileIndex(fileIndex int) string {
        return urlForFileIndex(ws.Url, fileIndex, ws.info, ws.PathEscaper)
 }
 
-func (ws *Client) StartNewRequest(r RequestSpec) Request {
+func (ws *Client) StartNewRequest(r RequestSpec, debugLogger *slog.Logger) Request {
        ctx, cancel := context.WithCancel(context.TODO())
        var requestParts []requestPart
        if !ws.fileIndex.Locate(r, func(i int, e segments.Extent) bool {
@@ -123,12 +123,11 @@ func (ws *Client) StartNewRequest(r RequestSpec) Request {
                }
                part.do = func() (*http.Response, error) {
                        if PrintDebug {
-                               fmt.Printf(
-                                       "doing request for %q (file size %v), Range: %q\n",
-                                       req.URL,
-                                       humanize.Bytes(uint64(ws.fileIndex.Index(i).Length)),
-                                       req.Header.Get("Range"),
-                               )
+                               debugLogger.Debug(
+                                       "doing request for part",
+                                       "url", req.URL,
+                                       "file size", humanize.Bytes(uint64(e.Length)),
+                                       "range", req.Header.Get("Range"))
                        }
                        return ws.HttpClient.Do(req)
                }